summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/evm54
-rw-r--r--Documentation/ABI/testing/ima_policy3
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-dfsdm-adc-stm3216
-rw-r--r--Documentation/ABI/testing/sysfs-class-led-trigger-netdev45
-rw-r--r--Documentation/ABI/testing/sysfs-fs-f2fs6
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-livepatch26
-rw-r--r--Documentation/IRQ-domain.txt36
-rw-r--r--Documentation/RCU/Design/Data-Structures/Data-Structures.html49
-rw-r--r--Documentation/RCU/Design/Requirements/Requirements.html7
-rw-r--r--Documentation/RCU/rcu_dereference.txt6
-rw-r--r--Documentation/RCU/stallwarn.txt10
-rw-r--r--Documentation/RCU/whatisRCU.txt3
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt18
-rw-r--r--Documentation/arm64/cpu-feature-registers.txt4
-rw-r--r--Documentation/arm64/elf_hwcaps.txt4
-rw-r--r--Documentation/arm64/silicon-errata.txt2
-rw-r--r--Documentation/cgroup-v1/cgroups.txt7
-rw-r--r--Documentation/cgroup-v1/memory.txt4
-rw-r--r--Documentation/cgroup-v2.txt77
-rw-r--r--Documentation/circular-buffers.txt3
-rw-r--r--Documentation/device-mapper/cache-policies.txt4
-rw-r--r--Documentation/device-mapper/cache.txt9
-rw-r--r--Documentation/device-mapper/dm-raid.txt5
-rw-r--r--Documentation/device-mapper/snapshot.txt4
-rw-r--r--Documentation/device-mapper/thin-provisioning.txt14
-rw-r--r--Documentation/device-mapper/unstriped.txt124
-rw-r--r--Documentation/devicetree/bindings/arm/arm-dsu-pmu.txt27
-rw-r--r--Documentation/devicetree/bindings/arm/firmware/sdei.txt42
-rw-r--r--Documentation/devicetree/bindings/arm/marvell/armada-37xx.txt19
-rw-r--r--Documentation/devicetree/bindings/crypto/arm-cryptocell.txt22
-rw-r--r--Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt3
-rw-r--r--Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt4
-rw-r--r--Documentation/devicetree/bindings/crypto/st,stm32-cryp.txt19
-rw-r--r--Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt4
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-axp209.txt49
-rw-r--r--Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt2
-rw-r--r--Documentation/devicetree/bindings/hwmon/aspeed-pwm-tacho.txt14
-rw-r--r--Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.txt13
-rw-r--r--Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.txt128
-rw-r--r--Documentation/devicetree/bindings/input/hid-over-i2c.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2836-l1-intc.txt4
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/google,goldfish-pic.txt30
-rw-r--r--Documentation/devicetree/bindings/leds/leds-lm3692x.txt49
-rw-r--r--Documentation/devicetree/bindings/leds/leds-lp8860.txt32
-rw-r--r--Documentation/devicetree/bindings/mfd/mc13xxx.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/syscon.txt8
-rw-r--r--Documentation/devicetree/bindings/mmc/mtk-sd.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/tmio_mmc.txt1
-rw-r--r--Documentation/devicetree/bindings/mtd/fsl-quadspi.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/gpmc-onenand.txt6
-rw-r--r--Documentation/devicetree/bindings/mtd/marvell-nand.txt123
-rw-r--r--Documentation/devicetree/bindings/mtd/mtk-nand.txt11
-rw-r--r--Documentation/devicetree/bindings/mtd/nand.txt1
-rw-r--r--Documentation/devicetree/bindings/opp/opp.txt13
-rw-r--r--Documentation/devicetree/bindings/opp/ti-omap5-opp-supply.txt63
-rw-r--r--Documentation/devicetree/bindings/power/power_domain.txt65
-rw-r--r--Documentation/devicetree/bindings/power/reset/imx-snvs-poweroff.txt23
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq27xxx.txt1
-rw-r--r--Documentation/devicetree/bindings/regulator/regulator.txt12
-rw-r--r--Documentation/devicetree/bindings/regulator/sprd,sc2731-regulator.txt43
-rw-r--r--Documentation/devicetree/bindings/rng/brcm,bcm2835.txt22
-rw-r--r--Documentation/devicetree/bindings/rng/brcm,bcm6368.txt17
-rw-r--r--Documentation/devicetree/bindings/scsi/hisilicon-sas.txt5
-rw-r--r--Documentation/devicetree/bindings/sound/dmic.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/max98373.txt40
-rw-r--r--Documentation/devicetree/bindings/sound/mt2701-afe-pcm.txt266
-rw-r--r--Documentation/devicetree/bindings/sound/mxs-audio-sgtl5000.txt33
-rw-r--r--Documentation/devicetree/bindings/sound/nau8825.txt4
-rw-r--r--Documentation/devicetree/bindings/sound/pcm186x.txt42
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,rsnd.txt15
-rw-r--r--Documentation/devicetree/bindings/sound/simple-card.txt3
-rw-r--r--Documentation/devicetree/bindings/sound/st,stm32-adfsdm.txt63
-rw-r--r--Documentation/devicetree/bindings/sound/st,stm32-sai.txt12
-rw-r--r--Documentation/devicetree/bindings/sound/sun4i-i2s.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/tas5720.txt4
-rw-r--r--Documentation/devicetree/bindings/sound/tfa9879.txt8
-rw-r--r--Documentation/devicetree/bindings/sound/ti,tas6424.txt20
-rw-r--r--Documentation/devicetree/bindings/sound/tlv320aic31xx.txt9
-rw-r--r--Documentation/devicetree/bindings/sound/tlv320aic3x.txt10
-rw-r--r--Documentation/devicetree/bindings/sound/tscs42xx.txt16
-rw-r--r--Documentation/devicetree/bindings/sound/uniphier,evea.txt26
-rw-r--r--Documentation/devicetree/bindings/spi/sh-msiof.txt16
-rw-r--r--Documentation/devicetree/bindings/spi/spi-meson.txt4
-rw-r--r--Documentation/devicetree/bindings/spi/spi-orion.txt9
-rw-r--r--Documentation/devicetree/bindings/spi/spi-xilinx.txt2
-rw-r--r--Documentation/devicetree/bindings/timer/actions,owl-timer.txt1
-rw-r--r--Documentation/devicetree/bindings/timer/spreadtrum,sprd-timer.txt20
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/devicetree/bindings/watchdog/zii,rave-sp-wdt.txt39
-rw-r--r--Documentation/driver-api/dmaengine/provider.rst38
-rw-r--r--Documentation/driver-api/iio/hw-consumer.rst51
-rw-r--r--Documentation/driver-api/iio/index.rst1
-rw-r--r--Documentation/driver-api/pm/devices.rst54
-rw-r--r--Documentation/driver-api/scsi.rst10
-rw-r--r--Documentation/driver-model/devres.txt3
-rw-r--r--Documentation/features/debug/KASAN/arch-support.txt2
-rw-r--r--Documentation/features/debug/stackprotector/arch-support.txt2
-rw-r--r--Documentation/filesystems/nfs/Exporting27
-rw-r--r--Documentation/gpio/board.txt14
-rw-r--r--Documentation/gpio/consumer.txt107
-rw-r--r--Documentation/gpio/driver.txt4
-rw-r--r--Documentation/gpio/sysfs.txt11
-rw-r--r--Documentation/hwmon/lm2506620
-rw-r--r--Documentation/hwmon/max3178515
-rw-r--r--Documentation/hwmon/w83773g33
-rw-r--r--Documentation/input/multi-touch-protocol.rst9
-rw-r--r--Documentation/livepatch/livepatch.txt114
-rw-r--r--Documentation/locking/locktorture.txt5
-rw-r--r--Documentation/md/raid5-ppl.txt7
-rw-r--r--Documentation/memory-barriers.txt22
-rw-r--r--Documentation/mtd/spi-nor.txt3
-rw-r--r--Documentation/perf/arm_dsu_pmu.txt28
-rw-r--r--Documentation/power/pci.txt11
-rw-r--r--Documentation/power/regulator/machine.txt36
-rw-r--r--Documentation/process/kernel-enforcement-statement.rst1
-rw-r--r--Documentation/thermal/cpu-cooling-api.txt115
-rw-r--r--Documentation/w1/masters/w1-gpio17
-rw-r--r--Documentation/x86/intel_rdt_ui.txt13
-rw-r--r--Documentation/xtensa/mmu.txt78
-rw-r--r--MAINTAINERS156
-rw-r--r--Makefile2
-rw-r--r--arch/Kconfig8
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/include/asm/thread_info.h3
-rw-r--r--arch/alpha/include/uapi/asm/Kbuild1
-rw-r--r--arch/alpha/include/uapi/asm/poll.h2
-rw-r--r--arch/alpha/kernel/osf_sys.c72
-rw-r--r--arch/arc/Kconfig3
-rw-r--r--arch/arc/include/asm/dma-mapping.h7
-rw-r--r--arch/arc/include/asm/thread_info.h3
-rw-r--r--arch/arc/kernel/traps.c14
-rw-r--r--arch/arc/mm/dma.c14
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/boot/dts/bcm2836.dtsi14
-rw-r--r--arch/arm/boot/dts/bcm2837.dtsi12
-rw-r--r--arch/arm/boot/dts/bcm283x.dtsi1
-rw-r--r--arch/arm/boot/dts/imx6ul.dtsi2
-rw-r--r--arch/arm/boot/dts/omap2420-n8x0-common.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-igep.dtsi30
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts1
-rw-r--r--arch/arm/boot/dts/omap3-n950-n9.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3430-sdp.dts1
-rw-r--r--arch/arm/configs/aspeed_g4_defconfig1
-rw-r--r--arch/arm/configs/aspeed_g5_defconfig1
-rw-r--r--arch/arm/configs/hisi_defconfig1
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/configs/mvebu_v7_defconfig3
-rw-r--r--arch/arm/configs/pxa_defconfig1
-rw-r--r--arch/arm/configs/sama5_defconfig1
-rw-r--r--arch/arm/configs/tegra_defconfig1
-rw-r--r--arch/arm/configs/vt8500_v6_v7_defconfig1
-rw-r--r--arch/arm/crypto/aes-neonbs-glue.c10
-rw-r--r--arch/arm/crypto/crc32-ce-glue.c2
-rw-r--r--arch/arm/include/asm/dma-direct.h36
-rw-r--r--arch/arm/include/asm/dma-mapping.h35
-rw-r--r--arch/arm/include/asm/kvm_host.h5
-rw-r--r--arch/arm/include/asm/kvm_mmu.h17
-rw-r--r--arch/arm/include/asm/thread_info.h3
-rw-r--r--arch/arm/include/uapi/asm/siginfo.h13
-rw-r--r--arch/arm/kernel/ptrace.c8
-rw-r--r--arch/arm/mach-ixp4xx/vulcan-setup.c13
-rw-r--r--arch/arm/mach-omap2/Makefile3
-rw-r--r--arch/arm/mach-omap2/gpmc-onenand.c409
-rw-r--r--arch/arm/mach-pxa/raumfeld.c16
-rw-r--r--arch/arm/mm/dma-mapping-nommu.c13
-rw-r--r--arch/arm/vfp/vfpmodule.c2
-rw-r--r--arch/arm64/Kconfig94
-rw-r--r--arch/arm64/configs/defconfig2
-rw-r--r--arch/arm64/crypto/Kconfig18
-rw-r--r--arch/arm64/crypto/Makefile11
-rw-r--r--arch/arm64/crypto/aes-ce-core.S87
-rw-r--r--arch/arm64/crypto/aes-ce-glue.c (renamed from arch/arm64/crypto/aes-ce-cipher.c)115
-rw-r--r--arch/arm64/crypto/aes-cipher-core.S19
-rw-r--r--arch/arm64/crypto/aes-glue.c1
-rw-r--r--arch/arm64/crypto/aes-neon.S8
-rw-r--r--arch/arm64/crypto/crc32-ce-core.S7
-rw-r--r--arch/arm64/crypto/crc32-ce-glue.c2
-rw-r--r--arch/arm64/crypto/crct10dif-ce-core.S17
-rw-r--r--arch/arm64/crypto/sha1-ce-core.S20
-rw-r--r--arch/arm64/crypto/sha2-ce-core.S4
-rw-r--r--arch/arm64/crypto/sha3-ce-core.S210
-rw-r--r--arch/arm64/crypto/sha3-ce-glue.c161
-rw-r--r--arch/arm64/crypto/sha512-ce-core.S204
-rw-r--r--arch/arm64/crypto/sha512-ce-glue.c119
-rw-r--r--arch/arm64/crypto/sha512-glue.c1
-rw-r--r--arch/arm64/crypto/sm3-ce-core.S141
-rw-r--r--arch/arm64/crypto/sm3-ce-glue.c92
-rw-r--r--arch/arm64/include/asm/alternative.h2
-rw-r--r--arch/arm64/include/asm/arm_dsu_pmu.h129
-rw-r--r--arch/arm64/include/asm/asm-uaccess.h42
-rw-r--r--arch/arm64/include/asm/assembler.h75
-rw-r--r--arch/arm64/include/asm/compat.h64
-rw-r--r--arch/arm64/include/asm/cpucaps.h6
-rw-r--r--arch/arm64/include/asm/cputype.h9
-rw-r--r--arch/arm64/include/asm/dma-mapping.h35
-rw-r--r--arch/arm64/include/asm/efi.h12
-rw-r--r--arch/arm64/include/asm/esr.h20
-rw-r--r--arch/arm64/include/asm/exception.h14
-rw-r--r--arch/arm64/include/asm/fixmap.h5
-rw-r--r--arch/arm64/include/asm/fpsimd.h2
-rw-r--r--arch/arm64/include/asm/kernel-pgtable.h59
-rw-r--r--arch/arm64/include/asm/kvm_arm.h2
-rw-r--r--arch/arm64/include/asm/kvm_asm.h2
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h17
-rw-r--r--arch/arm64/include/asm/kvm_host.h19
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h59
-rw-r--r--arch/arm64/include/asm/mmu.h49
-rw-r--r--arch/arm64/include/asm/mmu_context.h27
-rw-r--r--arch/arm64/include/asm/percpu.h11
-rw-r--r--arch/arm64/include/asm/pgalloc.h6
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h32
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h21
-rw-r--r--arch/arm64/include/asm/pgtable.h57
-rw-r--r--arch/arm64/include/asm/proc-fns.h6
-rw-r--r--arch/arm64/include/asm/processor.h1
-rw-r--r--arch/arm64/include/asm/sdei.h57
-rw-r--r--arch/arm64/include/asm/sections.h1
-rw-r--r--arch/arm64/include/asm/sparsemem.h2
-rw-r--r--arch/arm64/include/asm/stacktrace.h3
-rw-r--r--arch/arm64/include/asm/sysreg.h92
-rw-r--r--arch/arm64/include/asm/thread_info.h2
-rw-r--r--arch/arm64/include/asm/tlbflush.h16
-rw-r--r--arch/arm64/include/asm/traps.h54
-rw-r--r--arch/arm64/include/asm/uaccess.h40
-rw-r--r--arch/arm64/include/asm/vmap_stack.h28
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h1
-rw-r--r--arch/arm64/include/uapi/asm/siginfo.h21
-rw-r--r--arch/arm64/kernel/Makefile5
-rw-r--r--arch/arm64/kernel/acpi.c2
-rw-r--r--arch/arm64/kernel/alternative.c9
-rw-r--r--arch/arm64/kernel/asm-offsets.c12
-rw-r--r--arch/arm64/kernel/bpi.S87
-rw-r--r--arch/arm64/kernel/cpu_errata.c192
-rw-r--r--arch/arm64/kernel/cpufeature.c146
-rw-r--r--arch/arm64/kernel/cpuidle.c8
-rw-r--r--arch/arm64/kernel/cpuinfo.c1
-rw-r--r--arch/arm64/kernel/debug-monitors.c13
-rw-r--r--arch/arm64/kernel/efi.c4
-rw-r--r--arch/arm64/kernel/entry.S396
-rw-r--r--arch/arm64/kernel/fpsimd.c6
-rw-r--r--arch/arm64/kernel/head.S245
-rw-r--r--arch/arm64/kernel/hibernate-asm.S12
-rw-r--r--arch/arm64/kernel/hibernate.c5
-rw-r--r--arch/arm64/kernel/irq.c13
-rw-r--r--arch/arm64/kernel/process.c12
-rw-r--r--arch/arm64/kernel/ptrace.c42
-rw-r--r--arch/arm64/kernel/sdei.c235
-rw-r--r--arch/arm64/kernel/signal.c7
-rw-r--r--arch/arm64/kernel/signal32.c85
-rw-r--r--arch/arm64/kernel/smp.c11
-rw-r--r--arch/arm64/kernel/suspend.c4
-rw-r--r--arch/arm64/kernel/topology.c16
-rw-r--r--arch/arm64/kernel/traps.c51
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S27
-rw-r--r--arch/arm64/kvm/handle_exit.c32
-rw-r--r--arch/arm64/kvm/hyp-init.S30
-rw-r--r--arch/arm64/kvm/hyp/entry.S35
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S18
-rw-r--r--arch/arm64/kvm/hyp/s2-setup.c2
-rw-r--r--arch/arm64/kvm/hyp/switch.c60
-rw-r--r--arch/arm64/kvm/hyp/sysreg-sr.c22
-rw-r--r--arch/arm64/kvm/inject_fault.c13
-rw-r--r--arch/arm64/kvm/sys_regs.c11
-rw-r--r--arch/arm64/lib/clear_user.S4
-rw-r--r--arch/arm64/lib/copy_from_user.S4
-rw-r--r--arch/arm64/lib/copy_in_user.S4
-rw-r--r--arch/arm64/lib/copy_to_user.S4
-rw-r--r--arch/arm64/lib/tishift.S8
-rw-r--r--arch/arm64/mm/cache.S4
-rw-r--r--arch/arm64/mm/context.c67
-rw-r--r--arch/arm64/mm/dma-mapping.c54
-rw-r--r--arch/arm64/mm/fault.c131
-rw-r--r--arch/arm64/mm/init.c62
-rw-r--r--arch/arm64/mm/mmu.c47
-rw-r--r--arch/arm64/mm/pgd.c8
-rw-r--r--arch/arm64/mm/proc.S66
-rw-r--r--arch/arm64/xen/hypercall.S4
-rw-r--r--arch/blackfin/include/asm/thread_info.h2
-rw-r--r--arch/blackfin/include/uapi/asm/poll.h21
-rw-r--r--arch/blackfin/include/uapi/asm/siginfo.h34
-rw-r--r--arch/c6x/include/asm/thread_info.h3
-rw-r--r--arch/cris/Kconfig4
-rw-r--r--arch/cris/arch-v10/drivers/gpio.c6
-rw-r--r--arch/cris/arch-v10/drivers/sync_serial.c8
-rw-r--r--arch/cris/arch-v32/drivers/cryptocop.c23
-rw-r--r--arch/cris/arch-v32/drivers/pci/Makefile2
-rw-r--r--arch/cris/arch-v32/drivers/pci/dma.c80
-rw-r--r--arch/cris/arch-v32/drivers/sync_serial.c8
-rw-r--r--arch/cris/include/asm/Kbuild1
-rw-r--r--arch/cris/include/asm/dma-mapping.h20
-rw-r--r--arch/cris/include/asm/processor.h9
-rw-r--r--arch/cris/include/asm/thread_info.h9
-rw-r--r--arch/cris/kernel/vmlinux.lds.S1
-rw-r--r--arch/frv/include/asm/thread_info.h3
-rw-r--r--arch/frv/include/uapi/asm/Kbuild1
-rw-r--r--arch/frv/include/uapi/asm/poll.h21
-rw-r--r--arch/frv/include/uapi/asm/siginfo.h13
-rw-r--r--arch/h8300/Kconfig1
-rw-r--r--arch/h8300/include/asm/Kbuild1
-rw-r--r--arch/h8300/include/asm/dma-mapping.h12
-rw-r--r--arch/h8300/include/asm/thread_info.h3
-rw-r--r--arch/h8300/kernel/Makefile2
-rw-r--r--arch/h8300/kernel/dma.c69
-rw-r--r--arch/hexagon/include/asm/dma-mapping.h7
-rw-r--r--arch/hexagon/include/asm/io.h2
-rw-r--r--arch/hexagon/include/asm/thread_info.h3
-rw-r--r--arch/hexagon/kernel/dma.c1
-rw-r--r--arch/hexagon/kernel/vmlinux.lds.S2
-rw-r--r--arch/ia64/Kconfig10
-rw-r--r--arch/ia64/Makefile2
-rw-r--r--arch/ia64/hp/common/hwsw_iommu.c2
-rw-r--r--arch/ia64/include/asm/dma-mapping.h19
-rw-r--r--arch/ia64/include/asm/dma.h2
-rw-r--r--arch/ia64/include/asm/swiotlb.h18
-rw-r--r--arch/ia64/include/asm/thread_info.h4
-rw-r--r--arch/ia64/include/uapi/asm/Kbuild1
-rw-r--r--arch/ia64/include/uapi/asm/poll.h2
-rw-r--r--arch/ia64/include/uapi/asm/siginfo.h96
-rw-r--r--arch/ia64/kernel/Makefile2
-rw-r--r--arch/ia64/kernel/acpi.c5
-rw-r--r--arch/ia64/kernel/dma-mapping.c9
-rw-r--r--arch/ia64/kernel/init_task.c44
-rw-r--r--arch/ia64/kernel/pci-dma.c19
-rw-r--r--arch/ia64/kernel/pci-swiotlb.c68
-rw-r--r--arch/ia64/kernel/perfmon.c4
-rw-r--r--arch/ia64/kernel/signal.c52
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S1
-rw-r--r--arch/ia64/mm/contig.c4
-rw-r--r--arch/ia64/mm/discontig.c8
-rw-r--r--arch/m32r/Kconfig2
-rw-r--r--arch/m32r/include/asm/Kbuild1
-rw-r--r--arch/m32r/include/asm/dma-mapping.h24
-rw-r--r--arch/m32r/include/asm/io.h2
-rw-r--r--arch/m32r/include/asm/thread_info.h3
-rw-r--r--arch/m32r/include/uapi/asm/Kbuild1
-rw-r--r--arch/m32r/include/uapi/asm/poll.h2
-rw-r--r--arch/m68k/configs/amiga_defconfig4
-rw-r--r--arch/m68k/configs/apollo_defconfig4
-rw-r--r--arch/m68k/configs/atari_defconfig4
-rw-r--r--arch/m68k/configs/bvme6000_defconfig4
-rw-r--r--arch/m68k/configs/hp300_defconfig4
-rw-r--r--arch/m68k/configs/mac_defconfig4
-rw-r--r--arch/m68k/configs/multi_defconfig4
-rw-r--r--arch/m68k/configs/mvme147_defconfig4
-rw-r--r--arch/m68k/configs/mvme16x_defconfig4
-rw-r--r--arch/m68k/configs/q40_defconfig4
-rw-r--r--arch/m68k/configs/sun3_defconfig4
-rw-r--r--arch/m68k/configs/sun3x_defconfig4
-rw-r--r--arch/m68k/include/asm/macintosh.h9
-rw-r--r--arch/m68k/include/asm/thread_info.h4
-rw-r--r--arch/m68k/include/uapi/asm/poll.h19
-rw-r--r--arch/m68k/kernel/dma.c2
-rw-r--r--arch/m68k/mac/config.c110
-rw-r--r--arch/m68k/mac/oss.c67
-rw-r--r--arch/m68k/mm/fault.c3
-rw-r--r--arch/metag/include/asm/thread_info.h3
-rw-r--r--arch/metag/include/uapi/asm/siginfo.h7
-rw-r--r--arch/metag/kernel/traps.c2
-rw-r--r--arch/microblaze/include/asm/dma-mapping.h4
-rw-r--r--arch/microblaze/include/asm/thread_info.h3
-rw-r--r--arch/microblaze/kernel/dma.c78
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/cavium-octeon/Kconfig1
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c29
-rw-r--r--arch/mips/include/asm/compat.h73
-rw-r--r--arch/mips/include/asm/dma-direct.h1
-rw-r--r--arch/mips/include/asm/dma-mapping.h10
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h8
-rw-r--r--arch/mips/include/asm/mach-generic/dma-coherence.h12
-rw-r--r--arch/mips/include/asm/mach-loongson64/dma-coherence.h8
-rw-r--r--arch/mips/include/asm/netlogic/common.h3
-rw-r--r--arch/mips/include/asm/thread_info.h3
-rw-r--r--arch/mips/include/uapi/asm/poll.h19
-rw-r--r--arch/mips/include/uapi/asm/siginfo.h86
-rw-r--r--arch/mips/kernel/rtlx.c4
-rw-r--r--arch/mips/kernel/signal32.c67
-rw-r--r--arch/mips/kernel/traps.c29
-rw-r--r--arch/mips/loongson64/Kconfig1
-rw-r--r--arch/mips/loongson64/common/dma-swiotlb.c24
-rw-r--r--arch/mips/mm/dma-default.c3
-rw-r--r--arch/mips/netlogic/Kconfig5
-rw-r--r--arch/mips/netlogic/common/Makefile1
-rw-r--r--arch/mips/netlogic/common/nlm-dma.c97
-rw-r--r--arch/mn10300/include/asm/thread_info.h2
-rw-r--r--arch/mn10300/include/uapi/asm/Kbuild1
-rw-r--r--arch/mn10300/include/uapi/asm/poll.h2
-rw-r--r--arch/mn10300/kernel/mn10300-serial.c7
-rw-r--r--arch/mn10300/mm/dma-alloc.c3
-rw-r--r--arch/mn10300/mm/misalignment.c2
-rw-r--r--arch/nios2/include/asm/thread_info.h3
-rw-r--r--arch/nios2/mm/dma-mapping.c3
-rw-r--r--arch/openrisc/include/asm/processor.h2
-rw-r--r--arch/openrisc/include/asm/thread_info.h2
-rw-r--r--arch/openrisc/kernel/traps.c10
-rw-r--r--arch/openrisc/kernel/vmlinux.lds.S1
-rw-r--r--arch/parisc/include/asm/compat.h64
-rw-r--r--arch/parisc/include/asm/thread_info.h3
-rw-r--r--arch/parisc/include/uapi/asm/siginfo.h7
-rw-r--r--arch/parisc/kernel/pci-dma.c7
-rw-r--r--arch/parisc/kernel/pdt.c2
-rw-r--r--arch/parisc/kernel/signal32.c106
-rw-r--r--arch/parisc/kernel/signal32.h3
-rw-r--r--arch/parisc/kernel/traps.c2
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/configs/fsl-emb-nonhw.config1
-rw-r--r--arch/powerpc/configs/powernv_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64_defconfig1
-rw-r--r--arch/powerpc/configs/pseries_defconfig1
-rw-r--r--arch/powerpc/crypto/crc32c-vpmsum_glue.c1
-rw-r--r--arch/powerpc/include/asm/compat.h65
-rw-r--r--arch/powerpc/include/asm/debug.h2
-rw-r--r--arch/powerpc/include/asm/dma-direct.h29
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h36
-rw-r--r--arch/powerpc/include/asm/swiotlb.h4
-rw-r--r--arch/powerpc/include/asm/thread_info.h3
-rw-r--r--arch/powerpc/include/uapi/asm/siginfo.h16
-rw-r--r--arch/powerpc/kernel/dma-iommu.c2
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c12
-rw-r--r--arch/powerpc/kernel/dma.c73
-rw-r--r--arch/powerpc/kernel/mce.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c2
-rw-r--r--arch/powerpc/kernel/process.c13
-rw-r--r--arch/powerpc/kernel/rtasd.c2
-rw-r--r--arch/powerpc/kernel/setup-common.c2
-rw-r--r--arch/powerpc/kernel/signal.c6
-rw-r--r--arch/powerpc/kernel/signal_32.c66
-rw-r--r--arch/powerpc/kernel/traps.c22
-rw-r--r--arch/powerpc/platforms/cell/iommu.c28
-rw-r--r--arch/powerpc/platforms/cell/spufs/backing_ops.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c16
-rw-r--r--arch/powerpc/platforms/cell/spufs/hw_ops.c5
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h3
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c2
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-memory-errors.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-prd.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c4
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c2
-rw-r--r--arch/powerpc/platforms/pseries/vio.c2
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c4
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c4
-rw-r--r--arch/riscv/Kconfig2
-rw-r--r--arch/riscv/include/asm/Kbuild1
-rw-r--r--arch/riscv/include/asm/dma-mapping.h38
-rw-r--r--arch/riscv/include/asm/thread_info.h2
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/crypto/crc32-vx.c3
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/s390/include/asm/compat.h73
-rw-r--r--arch/s390/include/asm/dma-mapping.h26
-rw-r--r--arch/s390/include/asm/pci_dma.h3
-rw-r--r--arch/s390/include/asm/thread_info.h2
-rw-r--r--arch/s390/kernel/compat_signal.c100
-rw-r--r--arch/score/include/asm/thread_info.h3
-rw-r--r--arch/score/include/uapi/asm/Kbuild1
-rw-r--r--arch/score/include/uapi/asm/poll.h7
-rw-r--r--arch/sh/include/asm/thread_info.h3
-rw-r--r--arch/sh/kernel/traps_32.c3
-rw-r--r--arch/sparc/crypto/crc32c_glue.c1
-rw-r--r--arch/sparc/include/asm/compat.h59
-rw-r--r--arch/sparc/include/asm/thread_info_32.h3
-rw-r--r--arch/sparc/include/asm/thread_info_64.h3
-rw-r--r--arch/sparc/include/uapi/asm/poll.h28
-rw-r--r--arch/sparc/kernel/signal32.c69
-rw-r--r--arch/tile/Kconfig3
-rw-r--r--arch/tile/include/asm/compat.h62
-rw-r--r--arch/tile/include/asm/dma-mapping.h20
-rw-r--r--arch/tile/include/asm/thread_info.h3
-rw-r--r--arch/tile/include/uapi/asm/siginfo.h8
-rw-r--r--arch/tile/kernel/compat_signal.c73
-rw-r--r--arch/tile/kernel/pci-dma.c38
-rw-r--r--arch/tile/kernel/setup.c8
-rw-r--r--arch/tile/kernel/single_step.c24
-rw-r--r--arch/tile/kernel/traps.c4
-rw-r--r--arch/tile/kernel/unaligned.c46
-rw-r--r--arch/um/Makefile7
-rw-r--r--arch/um/drivers/hostaudio_kern.c6
-rw-r--r--arch/um/include/asm/processor-generic.h5
-rw-r--r--arch/um/include/asm/thread_info.h9
-rw-r--r--arch/um/include/asm/vmlinux.lds.h2
-rw-r--r--arch/um/kernel/dyn.lds.S3
-rw-r--r--arch/um/kernel/trap.c2
-rw-r--r--arch/um/kernel/um_arch.c2
-rw-r--r--arch/um/kernel/uml.lds.S2
-rw-r--r--arch/unicore32/include/asm/dma-mapping.h29
-rw-r--r--arch/unicore32/include/asm/thread_info.h3
-rw-r--r--arch/unicore32/mm/Kconfig1
-rw-r--r--arch/unicore32/mm/Makefile2
-rw-r--r--arch/unicore32/mm/dma-swiotlb.c48
-rw-r--r--arch/x86/Kconfig10
-rw-r--r--arch/x86/Kconfig.debug8
-rw-r--r--arch/x86/boot/compressed/eboot.c1
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S199
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c70
-rw-r--r--arch/x86/crypto/chacha20_glue.c1
-rw-r--r--arch/x86/crypto/crc32-pclmul_glue.c1
-rw-r--r--arch/x86/crypto/crc32c-intel_glue.c1
-rw-r--r--arch/x86/crypto/poly1305_glue.c2
-rw-r--r--arch/x86/crypto/salsa20-i586-asm_32.S184
-rw-r--r--arch/x86/crypto/salsa20-x86_64-asm_64.S114
-rw-r--r--arch/x86/crypto/salsa20_glue.c105
-rw-r--r--arch/x86/crypto/twofish-x86_64-asm_64-3way.S112
-rw-r--r--arch/x86/entry/common.c6
-rw-r--r--arch/x86/entry/entry_32.S3
-rw-r--r--arch/x86/entry/entry_64.S3
-rw-r--r--arch/x86/events/amd/power.c2
-rw-r--r--arch/x86/events/intel/ds.c33
-rw-r--r--arch/x86/events/msr.c70
-rw-r--r--arch/x86/hyperv/mmu.c12
-rw-r--r--arch/x86/include/asm/acpi.h2
-rw-r--r--arch/x86/include/asm/asm-prototypes.h4
-rw-r--r--arch/x86/include/asm/compat.h86
-rw-r--r--arch/x86/include/asm/cpufeature.h7
-rw-r--r--arch/x86/include/asm/cpufeatures.h23
-rw-r--r--arch/x86/include/asm/disabled-features.h3
-rw-r--r--arch/x86/include/asm/dma-direct.h30
-rw-r--r--arch/x86/include/asm/dma-mapping.h29
-rw-r--r--arch/x86/include/asm/fpu/signal.h6
-rw-r--r--arch/x86/include/asm/hypervisor.h1
-rw-r--r--arch/x86/include/asm/i8259.h5
-rw-r--r--arch/x86/include/asm/jailhouse_para.h26
-rw-r--r--arch/x86/include/asm/mce.h2
-rw-r--r--arch/x86/include/asm/mpspec_def.h14
-rw-r--r--arch/x86/include/asm/msr-index.h12
-rw-r--r--arch/x86/include/asm/nospec-branch.h84
-rw-r--r--arch/x86/include/asm/processor.h3
-rw-r--r--arch/x86/include/asm/required-features.h3
-rw-r--r--arch/x86/include/asm/swiotlb.h2
-rw-r--r--arch/x86/include/asm/thread_info.h2
-rw-r--r--arch/x86/include/asm/uprobes.h4
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h1
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h14
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h749
-rw-r--r--arch/x86/include/asm/x86_init.h1
-rw-r--r--arch/x86/include/uapi/asm/Kbuild1
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h22
-rw-r--r--arch/x86/include/uapi/asm/poll.h1
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/acpi/boot.c35
-rw-r--r--arch/x86/kernel/acpi/sleep.c2
-rw-r--r--arch/x86/kernel/alternative.c14
-rw-r--r--arch/x86/kernel/amd_gart_64.c1
-rw-r--r--arch/x86/kernel/aperture_64.c46
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c24
-rw-r--r--arch/x86/kernel/apic/io_apic.c20
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c84
-rw-r--r--arch/x86/kernel/apm_32.c2
-rw-r--r--arch/x86/kernel/cpu/bugs.c40
-rw-r--r--arch/x86/kernel/cpu/centaur.c4
-rw-r--r--arch/x86/kernel/cpu/common.c49
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c4
-rw-r--r--arch/x86/kernel/cpu/intel.c77
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.c68
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.h5
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c117
-rw-r--r--arch/x86/kernel/cpu/mcheck/dev-mcelog.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-severity.c26
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c23
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c29
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c2
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c20
-rw-r--r--arch/x86/kernel/cpu/scattered.c3
-rw-r--r--arch/x86/kernel/ftrace_64.S2
-rw-r--r--arch/x86/kernel/itmt.c1
-rw-r--r--arch/x86/kernel/jailhouse.c211
-rw-r--r--arch/x86/kernel/mpparse.c23
-rw-r--r--arch/x86/kernel/pci-dma.c23
-rw-r--r--arch/x86/kernel/pci-nommu.c2
-rw-r--r--arch/x86/kernel/pci-swiotlb.c8
-rw-r--r--arch/x86/kernel/platform-quirks.c1
-rw-r--r--arch/x86/kernel/process.c1
-rw-r--r--arch/x86/kernel/setup.c1
-rw-r--r--arch/x86/kernel/signal_compat.c123
-rw-r--r--arch/x86/kernel/smpboot.c5
-rw-r--r--arch/x86/kernel/time.c9
-rw-r--r--arch/x86/kernel/tsc.c61
-rw-r--r--arch/x86/kernel/uprobes.c107
-rw-r--r--arch/x86/kvm/emulate.c9
-rw-r--r--arch/x86/kvm/vmx.c4
-rw-r--r--arch/x86/lib/Makefile1
-rw-r--r--arch/x86/lib/delay.c2
-rw-r--r--arch/x86/lib/retpoline.S57
-rw-r--r--arch/x86/mm/extable.c34
-rw-r--r--arch/x86/mm/fault.c22
-rw-r--r--arch/x86/mm/mem_encrypt.c2
-rw-r--r--arch/x86/mm/tlb.c34
-rw-r--r--arch/x86/pci/intel_mid_pci.c1
-rw-r--r--arch/x86/pci/sta2x11-fixup.c1
-rw-r--r--arch/x86/platform/efi/efi_64.c1
-rw-r--r--arch/x86/platform/intel-mid/intel-mid.c2
-rw-r--r--arch/x86/platform/intel-mid/sfi.c5
-rw-r--r--arch/x86/platform/uv/tlb_uv.c3
-rw-r--r--arch/x86/tools/Makefile12
-rw-r--r--arch/x86/tools/insn_decoder_test.c (renamed from arch/x86/tools/test_get_len.c)43
-rw-r--r--arch/x86/tools/objdump_reformat.awk (renamed from arch/x86/tools/distill.awk)4
-rw-r--r--arch/x86/xen/mmu_hvm.c2
-rw-r--r--arch/x86/xen/spinlock.c2
-rw-r--r--arch/xtensa/Kconfig7
-rw-r--r--arch/xtensa/Makefile7
-rw-r--r--arch/xtensa/boot/boot-redboot/bootstrap.S1
-rw-r--r--arch/xtensa/boot/lib/Makefile6
-rw-r--r--arch/xtensa/configs/audio_kc705_defconfig1
-rw-r--r--arch/xtensa/configs/cadence_csp_defconfig1
-rw-r--r--arch/xtensa/configs/generic_kc705_defconfig1
-rw-r--r--arch/xtensa/configs/nommu_kc705_defconfig1
-rw-r--r--arch/xtensa/configs/smp_lx200_defconfig1
-rw-r--r--arch/xtensa/include/asm/asmmacro.h40
-rw-r--r--arch/xtensa/include/asm/current.h4
-rw-r--r--arch/xtensa/include/asm/dma-mapping.h10
-rw-r--r--arch/xtensa/include/asm/fixmap.h4
-rw-r--r--arch/xtensa/include/asm/futex.h23
-rw-r--r--arch/xtensa/include/asm/highmem.h2
-rw-r--r--arch/xtensa/include/asm/kasan.h37
-rw-r--r--arch/xtensa/include/asm/kmem_layout.h7
-rw-r--r--arch/xtensa/include/asm/linkage.h9
-rw-r--r--arch/xtensa/include/asm/mmu_context.h1
-rw-r--r--arch/xtensa/include/asm/nommu_context.h4
-rw-r--r--arch/xtensa/include/asm/page.h2
-rw-r--r--arch/xtensa/include/asm/pgtable.h3
-rw-r--r--arch/xtensa/include/asm/ptrace.h15
-rw-r--r--arch/xtensa/include/asm/regs.h1
-rw-r--r--arch/xtensa/include/asm/stackprotector.h40
-rw-r--r--arch/xtensa/include/asm/string.h23
-rw-r--r--arch/xtensa/include/asm/thread_info.h16
-rw-r--r--arch/xtensa/include/asm/traps.h35
-rw-r--r--arch/xtensa/include/asm/uaccess.h9
-rw-r--r--arch/xtensa/include/uapi/asm/poll.h21
-rw-r--r--arch/xtensa/kernel/Makefile3
-rw-r--r--arch/xtensa/kernel/align.S7
-rw-r--r--arch/xtensa/kernel/asm-offsets.c16
-rw-r--r--arch/xtensa/kernel/coprocessor.S3
-rw-r--r--arch/xtensa/kernel/entry.S103
-rw-r--r--arch/xtensa/kernel/head.S10
-rw-r--r--arch/xtensa/kernel/module.c19
-rw-r--r--arch/xtensa/kernel/pci.c30
-rw-r--r--arch/xtensa/kernel/process.c6
-rw-r--r--arch/xtensa/kernel/ptrace.c8
-rw-r--r--arch/xtensa/kernel/setup.c49
-rw-r--r--arch/xtensa/kernel/signal.c8
-rw-r--r--arch/xtensa/kernel/traps.c64
-rw-r--r--arch/xtensa/kernel/vectors.S17
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S90
-rw-r--r--arch/xtensa/kernel/xtensa_ksyms.c5
-rw-r--r--arch/xtensa/lib/checksum.S74
-rw-r--r--arch/xtensa/lib/memcopy.S81
-rw-r--r--arch/xtensa/lib/memset.S45
-rw-r--r--arch/xtensa/lib/pci-auto.c45
-rw-r--r--arch/xtensa/lib/strncpy_user.S60
-rw-r--r--arch/xtensa/lib/strnlen_user.S28
-rw-r--r--arch/xtensa/lib/usercopy.S138
-rw-r--r--arch/xtensa/mm/Makefile5
-rw-r--r--arch/xtensa/mm/cache.c3
-rw-r--r--arch/xtensa/mm/fault.c22
-rw-r--r--arch/xtensa/mm/init.c36
-rw-r--r--arch/xtensa/mm/kasan_init.c95
-rw-r--r--arch/xtensa/mm/mmu.c31
-rw-r--r--arch/xtensa/mm/tlb.c6
-rw-r--r--arch/xtensa/platforms/iss/console.c4
-rw-r--r--arch/xtensa/platforms/iss/network.c14
-rw-r--r--block/bfq-cgroup.c7
-rw-r--r--block/bfq-iosched.c529
-rw-r--r--block/bfq-iosched.h19
-rw-r--r--block/bfq-wf2q.c7
-rw-r--r--block/bio-integrity.c1
-rw-r--r--block/bio.c30
-rw-r--r--block/blk-core.c87
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-lib.c12
-rw-r--r--block/blk-map.c4
-rw-r--r--block/blk-merge.c13
-rw-r--r--block/blk-mq-debugfs.c22
-rw-r--r--block/blk-mq-sched.c3
-rw-r--r--block/blk-mq-sched.h2
-rw-r--r--block/blk-mq-sysfs.c9
-rw-r--r--block/blk-mq-tag.c13
-rw-r--r--block/blk-mq.c667
-rw-r--r--block/blk-mq.h52
-rw-r--r--block/blk-sysfs.c47
-rw-r--r--block/blk-throttle.c146
-rw-r--r--block/blk-timeout.c26
-rw-r--r--block/blk-zoned.c42
-rw-r--r--block/blk.h46
-rw-r--r--block/bounce.c33
-rw-r--r--block/bsg-lib.c3
-rw-r--r--block/bsg.c44
-rw-r--r--block/deadline-iosched.c114
-rw-r--r--block/elevator.c12
-rw-r--r--block/genhd.c23
-rw-r--r--block/mq-deadline.c141
-rw-r--r--block/partitions/msdos.c4
-rw-r--r--block/scsi_ioctl.c34
-rw-r--r--crypto/Kconfig5
-rw-r--r--crypto/Makefile1
-rw-r--r--crypto/ablk_helper.c5
-rw-r--r--crypto/aead.c19
-rw-r--r--crypto/af_alg.c14
-rw-r--r--crypto/ahash.c33
-rw-r--r--crypto/algapi.c13
-rw-r--r--crypto/algif_aead.c15
-rw-r--r--crypto/algif_hash.c52
-rw-r--r--crypto/algif_skcipher.c60
-rw-r--r--crypto/api.c6
-rw-r--r--crypto/authenc.c4
-rw-r--r--crypto/authencesn.c4
-rw-r--r--crypto/blkcipher.c1
-rw-r--r--crypto/camellia_generic.c3
-rw-r--r--crypto/cast5_generic.c3
-rw-r--r--crypto/cast6_generic.c3
-rw-r--r--crypto/chacha20_generic.c33
-rw-r--r--crypto/crc32_generic.c1
-rw-r--r--crypto/crc32c_generic.c1
-rw-r--r--crypto/cryptd.c17
-rw-r--r--crypto/crypto_user.c4
-rw-r--r--crypto/ecc.c2
-rw-r--r--crypto/echainiv.c5
-rw-r--r--crypto/gcm.c4
-rw-r--r--crypto/gf128mul.c2
-rw-r--r--crypto/ghash-generic.c6
-rw-r--r--crypto/internal.h8
-rw-r--r--crypto/keywrap.c4
-rw-r--r--crypto/mcryptd.c11
-rw-r--r--crypto/poly1305_generic.c27
-rw-r--r--crypto/proc.c2
-rw-r--r--crypto/salsa20_generic.c240
-rw-r--r--crypto/scompress.c51
-rw-r--r--crypto/seqiv.c5
-rw-r--r--crypto/sha3_generic.c332
-rw-r--r--crypto/shash.c25
-rw-r--r--crypto/simd.c4
-rw-r--r--crypto/skcipher.c30
-rw-r--r--crypto/tcrypt.c1083
-rw-r--r--crypto/testmgr.c41
-rw-r--r--crypto/testmgr.h550
-rw-r--r--crypto/twofish_common.c5
-rw-r--r--crypto/twofish_generic.c5
-rw-r--r--crypto/xcbc.c3
-rw-r--r--drivers/acpi/Kconfig32
-rw-r--r--drivers/acpi/acpi_dbg.c4
-rw-r--r--drivers/acpi/acpi_lpss.c139
-rw-r--r--drivers/acpi/acpi_video.c14
-rw-r--r--drivers/acpi/acpica/acapps.h3
-rw-r--r--drivers/acpi/acpica/acdebug.h4
-rw-r--r--drivers/acpi/acpica/acglobal.h82
-rw-r--r--drivers/acpi/acpica/aclocal.h15
-rw-r--r--drivers/acpi/acpica/acmacros.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h3
-rw-r--r--drivers/acpi/acpica/acutils.h23
-rw-r--r--drivers/acpi/acpica/dbexec.c110
-rw-r--r--drivers/acpi/acpica/dbfileio.c4
-rw-r--r--drivers/acpi/acpica/dbinput.c145
-rw-r--r--drivers/acpi/acpica/dscontrol.c18
-rw-r--r--drivers/acpi/acpica/dsfield.c28
-rw-r--r--drivers/acpi/acpica/dsobject.c4
-rw-r--r--drivers/acpi/acpica/dspkginit.c21
-rw-r--r--drivers/acpi/acpica/dsutils.c3
-rw-r--r--drivers/acpi/acpica/dswload.c6
-rw-r--r--drivers/acpi/acpica/dswload2.c13
-rw-r--r--drivers/acpi/acpica/evregion.c10
-rw-r--r--drivers/acpi/acpica/exdump.c11
-rw-r--r--drivers/acpi/acpica/hwtimer.c29
-rw-r--r--drivers/acpi/acpica/hwvalid.c14
-rw-r--r--drivers/acpi/acpica/nsaccess.c13
-rw-r--r--drivers/acpi/acpica/nsconvert.c3
-rw-r--r--drivers/acpi/acpica/nsnames.c149
-rw-r--r--drivers/acpi/acpica/nssearch.c1
-rw-r--r--drivers/acpi/acpica/nsxfeval.c9
-rw-r--r--drivers/acpi/acpica/psargs.c2
-rw-r--r--drivers/acpi/acpica/psobject.c10
-rw-r--r--drivers/acpi/acpica/psutils.c14
-rw-r--r--drivers/acpi/acpica/utdebug.c18
-rw-r--r--drivers/acpi/acpica/utdecode.c11
-rw-r--r--drivers/acpi/acpica/uterror.c73
-rw-r--r--drivers/acpi/acpica/utinit.c1
-rw-r--r--drivers/acpi/acpica/utmath.c4
-rw-r--r--drivers/acpi/acpica/utmutex.c9
-rw-r--r--drivers/acpi/acpica/utnonansi.c11
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utstrsuppt.c42
-rw-r--r--drivers/acpi/acpica/uttrack.c6
-rw-r--r--drivers/acpi/acpica/utxferror.c8
-rw-r--r--drivers/acpi/apei/ghes.c81
-rw-r--r--drivers/acpi/battery.c36
-rw-r--r--drivers/acpi/bus.c18
-rw-r--r--drivers/acpi/button.c22
-rw-r--r--drivers/acpi/device_pm.c27
-rw-r--r--drivers/acpi/ec.c2
-rw-r--r--drivers/acpi/ec_sys.c2
-rw-r--r--drivers/acpi/evged.c47
-rw-r--r--drivers/acpi/internal.h2
-rw-r--r--drivers/acpi/numa.c3
-rw-r--r--drivers/acpi/pci_link.c2
-rw-r--r--drivers/acpi/pmic/intel_pmic_bxtwc.c9
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtdc_ti.c5
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtwc.c9
-rw-r--r--drivers/acpi/pmic/intel_pmic_crc.c7
-rw-r--r--drivers/acpi/pmic/intel_pmic_xpower.c7
-rw-r--r--drivers/acpi/processor_idle.c1
-rw-r--r--drivers/acpi/property.c8
-rw-r--r--drivers/acpi/sleep.c16
-rw-r--r--drivers/acpi/sysfs.c26
-rw-r--r--drivers/acpi/utils.c41
-rw-r--r--drivers/android/binder.c2
-rw-r--r--drivers/ata/Kconfig28
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c94
-rw-r--r--drivers/ata/ahci.h3
-rw-r--r--drivers/ata/ahci_brcm.c120
-rw-r--r--drivers/ata/ata_piix.c2
-rw-r--r--drivers/ata/pata_at32.c400
-rw-r--r--drivers/ata/pata_atiixp.c4
-rw-r--r--drivers/ata/pata_it821x.c2
-rw-r--r--drivers/ata/pata_pdc2027x.c4
-rw-r--r--drivers/ata/sata_mv.c2
-rw-r--r--drivers/base/memory.c2
-rw-r--r--drivers/base/power/domain.c69
-rw-r--r--drivers/base/power/main.c415
-rw-r--r--drivers/base/power/power.h11
-rw-r--r--drivers/base/power/runtime.c84
-rw-r--r--drivers/base/power/sysfs.c182
-rw-r--r--drivers/base/power/wakeirq.c8
-rw-r--r--drivers/base/power/wakeup.c26
-rw-r--r--drivers/base/property.c7
-rw-r--r--drivers/base/regmap/Kconfig6
-rw-r--r--drivers/base/regmap/Makefile1
-rw-r--r--drivers/base/regmap/internal.h8
-rw-r--r--drivers/base/regmap/regcache-flat.c15
-rw-r--r--drivers/base/regmap/regmap-debugfs.c12
-rw-r--r--drivers/base/regmap/regmap-sdw.c88
-rw-r--r--drivers/base/regmap/regmap.c47
-rw-r--r--drivers/block/DAC960.c160
-rw-r--r--drivers/block/Kconfig4
-rw-r--r--drivers/block/aoe/aoe.h3
-rw-r--r--drivers/block/aoe/aoecmd.c48
-rw-r--r--drivers/block/drbd/drbd_bitmap.c2
-rw-r--r--drivers/block/drbd/drbd_main.c8
-rw-r--r--drivers/block/drbd/drbd_receiver.c3
-rw-r--r--drivers/block/null_blk.c290
-rw-r--r--drivers/block/pktcdvd.c12
-rw-r--r--drivers/block/smart1,2.h278
-rw-r--r--drivers/block/zram/zram_drv.c2
-rw-r--r--drivers/bluetooth/hci_ldisc.c2
-rw-r--r--drivers/bluetooth/hci_vhci.c2
-rw-r--r--drivers/bus/Kconfig2
-rw-r--r--drivers/char/apm-emulation.c2
-rw-r--r--drivers/char/dsp56k.c2
-rw-r--r--drivers/char/dtlk.c6
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/hw_random/Kconfig45
-rw-r--r--drivers/char/hw_random/Makefile3
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c169
-rw-r--r--drivers/char/hw_random/bcm63xx-rng.c154
-rw-r--r--drivers/char/hw_random/core.c4
-rw-r--r--drivers/char/hw_random/exynos-trng.c235
-rw-r--r--drivers/char/hw_random/imx-rngc.c13
-rw-r--r--drivers/char/hw_random/mtk-rng.c1
-rw-r--r--drivers/char/hw_random/tpm-rng.c50
-rw-r--r--drivers/char/ipmi/bt-bmc.c4
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c4
-rw-r--r--drivers/char/ipmi/ipmi_dmi.c5
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c2
-rw-r--r--drivers/char/ipmi/ipmi_powernv.c6
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c13
-rw-r--r--drivers/char/ipmi/ipmi_si_platform.c2
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c3
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c8
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c4
-rw-r--r--drivers/char/ppdev.c4
-rw-r--r--drivers/char/random.c28
-rw-r--r--drivers/char/rtc.c4
-rw-r--r--drivers/char/snsc.c4
-rw-r--r--drivers/char/sonypi.c2
-rw-r--r--drivers/char/tpm/Kconfig11
-rw-r--r--drivers/char/tpm/Makefile5
-rw-r--r--drivers/char/tpm/tpm-chip.c67
-rw-r--r--drivers/char/tpm/tpm-interface.c231
-rw-r--r--drivers/char/tpm/tpm.h52
-rw-r--r--drivers/char/tpm/tpm1_eventlog.c13
-rw-r--r--drivers/char/tpm/tpm2-cmd.c12
-rw-r--r--drivers/char/tpm/tpm2_eventlog.c2
-rw-r--r--drivers/char/tpm/tpm_eventlog_acpi.c (renamed from drivers/char/tpm/tpm_acpi.c)4
-rw-r--r--drivers/char/tpm/tpm_eventlog_efi.c66
-rw-r--r--drivers/char/tpm/tpm_eventlog_of.c (renamed from drivers/char/tpm/tpm_of.c)6
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c27
-rw-r--r--drivers/char/tpm/tpm_tis.c108
-rw-r--r--drivers/char/tpm/tpm_tis_core.c193
-rw-r--r--drivers/char/tpm/tpm_tis_core.h16
-rw-r--r--drivers/char/tpm/tpm_vtpm_proxy.c4
-rw-r--r--drivers/char/tpm/xen-tpmfront.c61
-rw-r--r--drivers/char/virtio_console.c4
-rw-r--r--drivers/char/xillybus/xillybus_core.c4
-rw-r--r--drivers/clocksource/Kconfig8
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/owl-timer.c5
-rw-r--r--drivers/clocksource/tcb_clksrc.c2
-rw-r--r--drivers/clocksource/timer-of.c84
-rw-r--r--drivers/clocksource/timer-of.h1
-rw-r--r--drivers/clocksource/timer-sprd.c159
-rw-r--r--drivers/clocksource/timer-stm32.c358
-rw-r--r--drivers/cpufreq/Kconfig.arm88
-rw-r--r--drivers/cpufreq/Makefile9
-rw-r--r--drivers/cpufreq/arm_big_little.c23
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c241
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c8
-rw-r--r--drivers/cpufreq/cpufreq-dt.c27
-rw-r--r--drivers/cpufreq/cpufreq.c55
-rw-r--r--drivers/cpufreq/cpufreq_stats.c3
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c171
-rw-r--r--drivers/cpufreq/intel_pstate.c14
-rw-r--r--drivers/cpufreq/longhaul.c2
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c23
-rw-r--r--drivers/cpufreq/mvebu-cpufreq.c16
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c143
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c14
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c193
-rw-r--r--drivers/cpufreq/ti-cpufreq.c51
-rw-r--r--drivers/cpuidle/governor.c5
-rw-r--r--drivers/crypto/Kconfig1
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c6
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c131
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h4
-rw-r--r--drivers/crypto/amcc/crypto4xx_reg_def.h4
-rw-r--r--drivers/crypto/amcc/crypto4xx_trng.c2
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c8
-rw-r--r--drivers/crypto/bcm/cipher.c1
-rw-r--r--drivers/crypto/bfin_crc.c3
-rw-r--r--drivers/crypto/caam/caamalg.c120
-rw-r--r--drivers/crypto/caam/caamalg_desc.c182
-rw-r--r--drivers/crypto/caam/caamalg_desc.h10
-rw-r--r--drivers/crypto/caam/caamalg_qi.c68
-rw-r--r--drivers/crypto/caam/caamhash.c73
-rw-r--r--drivers/crypto/caam/ctrl.c4
-rw-r--r--drivers/crypto/caam/desc.h29
-rw-r--r--drivers/crypto/caam/desc_constr.h51
-rw-r--r--drivers/crypto/caam/intern.h1
-rw-r--r--drivers/crypto/caam/key_gen.c30
-rw-r--r--drivers/crypto/caam/key_gen.h30
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_reqmanager.c3
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c1
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-galois.c1
-rw-r--r--drivers/crypto/chelsio/Kconfig10
-rw-r--r--drivers/crypto/chelsio/Makefile1
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c540
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h15
-rw-r--r--drivers/crypto/chelsio/chcr_core.c14
-rw-r--r--drivers/crypto/chelsio/chcr_core.h38
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h76
-rw-r--r--drivers/crypto/chelsio/chcr_ipsec.c654
-rw-r--r--drivers/crypto/exynos-rng.c108
-rw-r--r--drivers/crypto/hifn_795x.c1
-rw-r--r--drivers/crypto/inside-secure/safexcel.c370
-rw-r--r--drivers/crypto/inside-secure/safexcel.h173
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c53
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c125
-rw-r--r--drivers/crypto/ixp4xx_crypto.c7
-rw-r--r--drivers/crypto/marvell/cesa.c20
-rw-r--r--drivers/crypto/nx/nx-842-powernv.c4
-rw-r--r--drivers/crypto/picoxcell_crypto.c27
-rw-r--r--drivers/crypto/qat/qat_common/qat_hal.c133
-rw-r--r--drivers/crypto/s5p-sss.c26
-rw-r--r--drivers/crypto/stm32/Kconfig13
-rw-r--r--drivers/crypto/stm32/Makefile5
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c1170
-rw-r--r--drivers/crypto/stm32/stm32_crc32.c2
-rw-r--r--drivers/devfreq/devfreq.c5
-rw-r--r--drivers/dma-buf/dma-buf.c6
-rw-r--r--drivers/dma-buf/sync_file.c2
-rw-r--r--drivers/dma/amba-pl08x.c11
-rw-r--r--drivers/dma/bcm2835-dma.c10
-rw-r--r--drivers/dma/cppi41.c2
-rw-r--r--drivers/dma/dma-jz4780.c10
-rw-r--r--drivers/dma/dmatest.c2
-rw-r--r--drivers/dma/edma.c7
-rw-r--r--drivers/dma/img-mdc-dma.c17
-rw-r--r--drivers/dma/imx-sdma.c6
-rw-r--r--drivers/dma/ioat/dma.c2
-rw-r--r--drivers/dma/k3dma.c10
-rw-r--r--drivers/dma/mic_x100_dma.c4
-rw-r--r--drivers/dma/omap-dma.c2
-rw-r--r--drivers/dma/qcom/hidma.c41
-rw-r--r--drivers/dma/qcom/hidma_ll.c9
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c61
-rw-r--r--drivers/dma/s3c24xx-dma.c11
-rw-r--r--drivers/dma/sh/rcar-dmac.c68
-rw-r--r--drivers/dma/sprd-dma.c2
-rw-r--r--drivers/dma/stm32-dmamux.c3
-rw-r--r--drivers/dma/tegra20-apb-dma.c19
-rw-r--r--drivers/dma/ti-dma-crossbar.c10
-rw-r--r--drivers/dma/timb_dma.c2
-rw-r--r--drivers/dma/virt-dma.c5
-rw-r--r--drivers/dma/virt-dma.h44
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c302
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c179
-rw-r--r--drivers/edac/Kconfig7
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/mv64x60_edac.c2
-rw-r--r--drivers/edac/octeon_edac-lmc.c1
-rw-r--r--drivers/edac/ti_edac.c341
-rw-r--r--drivers/extcon/extcon-axp288.c39
-rw-r--r--drivers/extcon/extcon-usbc-cros-ec.c142
-rw-r--r--drivers/firewire/core-cdev.c4
-rw-r--r--drivers/firewire/nosy.c4
-rw-r--r--drivers/firmware/Kconfig8
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/arm_sdei.c1092
-rw-r--r--drivers/firmware/efi/Kconfig10
-rw-r--r--drivers/firmware/efi/Makefile3
-rw-r--r--drivers/firmware/efi/capsule-loader.c2
-rw-r--r--drivers/firmware/efi/cper-arm.c356
-rw-r--r--drivers/firmware/efi/cper.c122
-rw-r--r--drivers/firmware/efi/efi.c6
-rw-r--r--drivers/firmware/efi/libstub/Makefile3
-rw-r--r--drivers/firmware/efi/libstub/tpm.c81
-rw-r--r--drivers/firmware/efi/tpm.c40
-rw-r--r--drivers/firmware/psci.c2
-rw-r--r--drivers/firmware/psci_checker.c46
-rw-r--r--drivers/gpio/Kconfig32
-rw-r--r--drivers/gpio/Makefile3
-rw-r--r--drivers/gpio/devres.c42
-rw-r--r--drivers/gpio/gpio-74x164.c5
-rw-r--r--drivers/gpio/gpio-adp5520.c3
-rw-r--r--drivers/gpio/gpio-adp5588.c2
-rw-r--r--drivers/gpio/gpio-altera.c3
-rw-r--r--drivers/gpio/gpio-amd8111.c2
-rw-r--r--drivers/gpio/gpio-arizona.c2
-rw-r--r--drivers/gpio/gpio-aspeed.c41
-rw-r--r--drivers/gpio/gpio-ath79.c3
-rw-r--r--drivers/gpio/gpio-axp209.c188
-rw-r--r--drivers/gpio/gpio-bcm-kona.c8
-rw-r--r--drivers/gpio/gpio-brcmstb.c1
-rw-r--r--drivers/gpio/gpio-bt8xx.c2
-rw-r--r--drivers/gpio/gpio-crystalcove.c2
-rw-r--r--drivers/gpio/gpio-cs5535.c2
-rw-r--r--drivers/gpio/gpio-da9052.c2
-rw-r--r--drivers/gpio/gpio-da9055.c2
-rw-r--r--drivers/gpio/gpio-davinci.c2
-rw-r--r--drivers/gpio/gpio-ftgpio010.c4
-rw-r--r--drivers/gpio/gpio-iop.c4
-rw-r--r--drivers/gpio/gpio-it87.c2
-rw-r--r--drivers/gpio/gpio-max732x.c6
-rw-r--r--drivers/gpio/gpio-merrifield.c11
-rw-r--r--drivers/gpio/gpio-mockup.c288
-rw-r--r--drivers/gpio/gpio-omap.c39
-rw-r--r--drivers/gpio/gpio-pcie-idio-24.c447
-rw-r--r--drivers/gpio/gpio-stmpe.c54
-rw-r--r--drivers/gpio/gpio-thunderx.c4
-rw-r--r--drivers/gpio/gpio-winbond.c732
-rw-r--r--drivers/gpio/gpiolib-acpi.c57
-rw-r--r--drivers/gpio/gpiolib-of.c119
-rw-r--r--drivers/gpio/gpiolib-sysfs.c33
-rw-r--r--drivers/gpio/gpiolib.c334
-rw-r--r--drivers/gpio/gpiolib.h18
-rw-r--r--drivers/gpu/drm/drm_file.c4
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c8
-rw-r--r--drivers/gpu/drm/r128/r128_state.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c2
-rw-r--r--drivers/gpu/vga/vgaarb.c2
-rw-r--r--drivers/hid/Kconfig12
-rw-r--r--drivers/hid/Makefile3
-rw-r--r--drivers/hid/hid-asus.c41
-rw-r--r--drivers/hid/hid-core.c932
-rw-r--r--drivers/hid/hid-debug.c2
-rw-r--r--drivers/hid/hid-elecom.c78
-rw-r--r--drivers/hid/hid-elo.c6
-rw-r--r--drivers/hid/hid-generic.c68
-rw-r--r--drivers/hid/hid-ids.h7
-rw-r--r--drivers/hid/hid-jabra.c58
-rw-r--r--drivers/hid/hid-multitouch.c137
-rw-r--r--drivers/hid/hid-quirks.c1276
-rw-r--r--drivers/hid/hid-rmi.c1
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c2
-rw-r--r--drivers/hid/hid-roccat.c2
-rw-r--r--drivers/hid/hid-sensor-custom.c4
-rw-r--r--drivers/hid/hid-sony.c93
-rw-r--r--drivers/hid/hidraw.c2
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c18
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c1
-rw-r--r--drivers/hid/uhid.c2
-rw-r--r--drivers/hid/usbhid/Makefile2
-rw-r--r--drivers/hid/usbhid/hid-core.c11
-rw-r--r--drivers/hid/usbhid/hid-quirks.c402
-rw-r--r--drivers/hid/usbhid/hiddev.c2
-rw-r--r--drivers/hid/wacom_sys.c134
-rw-r--r--drivers/hid/wacom_wac.c67
-rw-r--r--drivers/hid/wacom_wac.h6
-rw-r--r--drivers/hsi/clients/cmt_speech.c8
-rw-r--r--drivers/hv/hv_utils_transport.c2
-rw-r--r--drivers/hwmon/Kconfig26
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c22
-rw-r--r--drivers/hwmon/coretemp.c3
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c54
-rw-r--r--drivers/hwmon/hih6130.c2
-rw-r--r--drivers/hwmon/hwmon.c12
-rw-r--r--drivers/hwmon/iio_hwmon.c3
-rw-r--r--drivers/hwmon/ina2xx.c90
-rw-r--r--drivers/hwmon/k10temp.c1
-rw-r--r--drivers/hwmon/lm75.c2
-rw-r--r--drivers/hwmon/pmbus/Kconfig1
-rw-r--r--drivers/hwmon/pmbus/ibm-cffps.c288
-rw-r--r--drivers/hwmon/pmbus/ir35221.c189
-rw-r--r--drivers/hwmon/pmbus/lm25066.c67
-rw-r--r--drivers/hwmon/pmbus/max31785.c294
-rw-r--r--drivers/hwmon/pmbus/pmbus.h41
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c273
-rw-r--r--drivers/hwmon/sht15.c1
-rw-r--r--drivers/hwmon/sht21.c2
-rw-r--r--drivers/hwmon/sht3x.c7
-rw-r--r--drivers/hwmon/w83773g.c329
-rw-r--r--drivers/hwtracing/coresight/of_coresight.c15
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h2
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c39
-rw-r--r--drivers/iio/adc/Kconfig37
-rw-r--r--drivers/iio/adc/Makefile3
-rw-r--r--drivers/iio/adc/sd_adc_modulator.c68
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c1205
-rw-r--r--drivers/iio/adc/stm32-dfsdm-core.c302
-rw-r--r--drivers/iio/adc/stm32-dfsdm.h310
-rw-r--r--drivers/iio/buffer/Kconfig10
-rw-r--r--drivers/iio/buffer/Makefile1
-rw-r--r--drivers/iio/buffer/industrialio-buffer-cb.c11
-rw-r--r--drivers/iio/buffer/industrialio-hw-consumer.c247
-rw-r--r--drivers/iio/iio_core.h2
-rw-r--r--drivers/iio/industrialio-buffer.c2
-rw-r--r--drivers/iio/industrialio-event.c4
-rw-r--r--drivers/iio/inkern.c17
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/Makefile2
-rw-r--r--drivers/infiniband/core/addr.c65
-rw-r--r--drivers/infiniband/core/cache.c23
-rw-r--r--drivers/infiniband/core/cm.c227
-rw-r--r--drivers/infiniband/core/cma.c252
-rw-r--r--drivers/infiniband/core/cma_configfs.c2
-rw-r--r--drivers/infiniband/core/core_priv.h52
-rw-r--r--drivers/infiniband/core/cq.c39
-rw-r--r--drivers/infiniband/core/device.c42
-rw-r--r--drivers/infiniband/core/fmr_pool.c12
-rw-r--r--drivers/infiniband/core/iwpm_util.c1
-rw-r--r--drivers/infiniband/core/mad.c1
-rw-r--r--drivers/infiniband/core/netlink.c10
-rw-r--r--drivers/infiniband/core/nldev.c394
-rw-r--r--drivers/infiniband/core/restrack.c164
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c13
-rw-r--r--drivers/infiniband/core/sa_query.c18
-rw-r--r--drivers/infiniband/core/security.c10
-rw-r--r--drivers/infiniband/core/sysfs.c1
-rw-r--r--drivers/infiniband/core/ucm.c77
-rw-r--r--drivers/infiniband/core/ucma.c23
-rw-r--r--drivers/infiniband/core/umem.c2
-rw-r--r--drivers/infiniband/core/user_mad.c127
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c14
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c19
-rw-r--r--drivers/infiniband/core/uverbs_main.c103
-rw-r--r--drivers/infiniband/core/uverbs_std_types.c3
-rw-r--r--drivers/infiniband/core/verbs.c312
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h43
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c145
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.h39
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c404
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h20
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c251
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c463
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h78
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c5
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h7
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c9
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c141
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h91
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h127
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c27
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c36
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h4
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h4
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c87
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h2
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c16
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c18
-rw-r--r--drivers/infiniband/hw/hfi1/firmware.c64
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h25
-rw-r--r--drivers/infiniband/hw/hfi1/init.c2
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c6
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c10
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c12
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c1
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c1
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c2
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c2
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c6
-rw-r--r--drivers/infiniband/hw/hns/Makefile2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.h10
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_common.h11
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c19
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h103
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_eq.c759
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_eq.h134
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c758
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.h44
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c1837
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h283
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c16
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c72
-rw-r--r--drivers/infiniband/hw/i40iw/Kconfig1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c68
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.h8
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c25
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_d.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c13
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_uk.c18
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_user.h3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c50
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c5
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c4
-rw-r--r--drivers/infiniband/hw/mlx4/main.c19
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c20
-rw-r--r--drivers/infiniband/hw/mlx5/cong.c83
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c23
-rw-r--r--drivers/infiniband/hw/mlx5/main.c1353
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h111
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c3
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c9
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c432
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c7
-rw-r--r--drivers/infiniband/hw/mthca/mthca_user.h112
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.h3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c19
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c8
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c10
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c15
-rw-r--r--drivers/infiniband/hw/qib/qib.h8
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c16
-rw-r--r--drivers/infiniband/hw/qib/qib_eeprom.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c82
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c235
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.c1
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c1
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c13
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c21
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c15
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c11
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c15
-rw-r--r--drivers/infiniband/sw/rdmavt/mcast.c4
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c28
-rw-r--r--drivers/infiniband/sw/rdmavt/srq.c16
-rw-r--r--drivers/infiniband/sw/rdmavt/trace.h4
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_qp.h42
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c10
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.h6
-rw-r--r--drivers/infiniband/sw/rxe/Kconfig4
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c18
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.h1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c12
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c18
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c98
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c6
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c16
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c7
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h1
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c795
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h43
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c962
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h100
-rw-r--r--drivers/input/evdev.c4
-rw-r--r--drivers/input/input.c2
-rw-r--r--drivers/input/joydev.c2
-rw-r--r--drivers/input/misc/hp_sdc_rtc.c2
-rw-r--r--drivers/input/misc/uinput.c2
-rw-r--r--drivers/input/mousedev.c4
-rw-r--r--drivers/input/serio/serio_raw.c4
-rw-r--r--drivers/input/serio/userio.c2
-rw-r--r--drivers/iommu/intel-iommu.c2
-rw-r--r--drivers/irqchip/Kconfig8
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-bcm2836.c46
-rw-r--r--drivers/irqchip/irq-gic-v3.c40
-rw-r--r--drivers/irqchip/irq-goldfish-pic.c139
-rw-r--r--drivers/irqchip/irq-ompic.c4
-rw-r--r--drivers/isdn/capi/capi.c4
-rw-r--r--drivers/isdn/divert/divert_procfs.c4
-rw-r--r--drivers/isdn/hardware/eicon/divamnt.c4
-rw-r--r--drivers/isdn/hardware/eicon/divasi.c4
-rw-r--r--drivers/isdn/hardware/eicon/divasmain.c2
-rw-r--r--drivers/isdn/hardware/eicon/divasproc.c2
-rw-r--r--drivers/isdn/hysdn/hysdn_proclog.c4
-rw-r--r--drivers/isdn/i4l/isdn_common.c4
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c4
-rw-r--r--drivers/isdn/i4l/isdn_ppp.h2
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c22
-rw-r--r--drivers/isdn/mISDN/timerdev.c4
-rw-r--r--drivers/leds/Kconfig9
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-as3645a.c7
-rw-r--r--drivers/leds/leds-blinkm.c4
-rw-r--r--drivers/leds/leds-lm3692x.c393
-rw-r--r--drivers/leds/leds-lp8860.c66
-rw-r--r--drivers/leds/leds-pm8058.c2
-rw-r--r--drivers/leds/leds-pwm.c1
-rw-r--r--drivers/leds/trigger/Kconfig9
-rw-r--r--drivers/leds/trigger/Makefile1
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c496
-rw-r--r--drivers/leds/trigger/ledtrig-transient.c33
-rw-r--r--drivers/leds/uleds.c2
-rw-r--r--drivers/lightnvm/Kconfig7
-rw-r--r--drivers/lightnvm/Makefile1
-rw-r--r--drivers/lightnvm/core.c462
-rw-r--r--drivers/lightnvm/pblk-cache.c5
-rw-r--r--drivers/lightnvm/pblk-core.c55
-rw-r--r--drivers/lightnvm/pblk-gc.c23
-rw-r--r--drivers/lightnvm/pblk-init.c104
-rw-r--r--drivers/lightnvm/pblk-map.c2
-rw-r--r--drivers/lightnvm/pblk-rb.c111
-rw-r--r--drivers/lightnvm/pblk-read.c35
-rw-r--r--drivers/lightnvm/pblk-recovery.c43
-rw-r--r--drivers/lightnvm/pblk-rl.c54
-rw-r--r--drivers/lightnvm/pblk-sysfs.c15
-rw-r--r--drivers/lightnvm/pblk-write.c23
-rw-r--r--drivers/lightnvm/pblk.h163
-rw-r--r--drivers/lightnvm/rrpc.c1625
-rw-r--r--drivers/lightnvm/rrpc.h290
-rw-r--r--drivers/macintosh/smu.c4
-rw-r--r--drivers/macintosh/via-pmu.c4
-rw-r--r--drivers/mailbox/mailbox-test.c2
-rw-r--r--drivers/md/Kconfig7
-rw-r--r--drivers/md/Makefile1
-rw-r--r--drivers/md/bcache/alloc.c19
-rw-r--r--drivers/md/bcache/bcache.h24
-rw-r--r--drivers/md/bcache/btree.c10
-rw-r--r--drivers/md/bcache/closure.c47
-rw-r--r--drivers/md/bcache/closure.h60
-rw-r--r--drivers/md/bcache/debug.c7
-rw-r--r--drivers/md/bcache/io.c13
-rw-r--r--drivers/md/bcache/movinggc.c2
-rw-r--r--drivers/md/bcache/request.c29
-rw-r--r--drivers/md/bcache/super.c27
-rw-r--r--drivers/md/bcache/util.c34
-rw-r--r--drivers/md/bcache/util.h1
-rw-r--r--drivers/md/bcache/writeback.c203
-rw-r--r--drivers/md/bcache/writeback.h12
-rw-r--r--drivers/md/dm-bufio.c37
-rw-r--r--drivers/md/dm-core.h5
-rw-r--r--drivers/md/dm-crypt.c6
-rw-r--r--drivers/md/dm-delay.c2
-rw-r--r--drivers/md/dm-flakey.c5
-rw-r--r--drivers/md/dm-io.c3
-rw-r--r--drivers/md/dm-ioctl.c4
-rw-r--r--drivers/md/dm-kcopyd.c6
-rw-r--r--drivers/md/dm-log-writes.c2
-rw-r--r--drivers/md/dm-mpath.c316
-rw-r--r--drivers/md/dm-queue-length.c6
-rw-r--r--drivers/md/dm-raid.c389
-rw-r--r--drivers/md/dm-rq.c34
-rw-r--r--drivers/md/dm-service-time.c6
-rw-r--r--drivers/md/dm-snap.c84
-rw-r--r--drivers/md/dm-stats.c1
-rw-r--r--drivers/md/dm-table.c114
-rw-r--r--drivers/md/dm-thin.c9
-rw-r--r--drivers/md/dm-unstripe.c219
-rw-r--r--drivers/md/dm-zoned-metadata.c3
-rw-r--r--drivers/md/dm-zoned-target.c3
-rw-r--r--drivers/md/dm.c680
-rw-r--r--drivers/md/dm.h4
-rw-r--r--drivers/md/md.c35
-rw-r--r--drivers/md/md.h9
-rw-r--r--drivers/md/raid1.c11
-rw-r--r--drivers/md/raid10.c12
-rw-r--r--drivers/md/raid5-cache.c31
-rw-r--r--drivers/md/raid5-log.h30
-rw-r--r--drivers/md/raid5-ppl.c167
-rw-r--r--drivers/md/raid5.c16
-rw-r--r--drivers/media/cec/cec-api.c4
-rw-r--r--drivers/media/common/saa7146/saa7146_fops.c8
-rw-r--r--drivers/media/common/siano/smsdvb-debugfs.c7
-rw-r--r--drivers/media/dvb-core/dmxdev.c8
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c4
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c2
-rw-r--r--drivers/media/firewire/firedtv-ci.c2
-rw-r--r--drivers/media/media-devnode.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c12
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.c8
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.h2
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-fileops.c10
-rw-r--r--drivers/media/pci/ivtv/ivtv-fileops.h4
-rw-r--r--drivers/media/pci/meye/meye.c4
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-encoder.c6
-rw-r--r--drivers/media/pci/saa7164/saa7164-vbi.c4
-rw-r--r--drivers/media/pci/ttpci/av7110_av.c8
-rw-r--r--drivers/media/pci/ttpci/av7110_ca.c4
-rw-r--r--drivers/media/pci/zoran/zoran_driver.c4
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c2
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c4
-rw-r--r--drivers/media/platform/fsl-viu.c6
-rw-r--r--drivers/media/platform/m2m-deinterlace.c4
-rw-r--r--drivers/media/platform/mx2_emmaprp.c4
-rw-r--r--drivers/media/platform/omap/omap_vout.c2
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c4
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c4
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c4
-rw-r--r--drivers/media/platform/sh_veu.c2
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c2
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c4
-rw-r--r--drivers/media/platform/via-camera.c2
-rw-r--r--drivers/media/platform/vivid/vivid-core.c2
-rw-r--r--drivers/media/platform/vivid/vivid-radio-rx.c2
-rw-r--r--drivers/media/platform/vivid/vivid-radio-rx.h2
-rw-r--r--drivers/media/platform/vivid/vivid-radio-tx.c2
-rw-r--r--drivers/media/platform/vivid/vivid-radio-tx.h2
-rw-r--r--drivers/media/radio/radio-cadet.c6
-rw-r--r--drivers/media/radio/radio-si476x.c6
-rw-r--r--drivers/media/radio/radio-wl1273.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-common.c6
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c2
-rw-r--r--drivers/media/rc/lirc_dev.c4
-rw-r--r--drivers/media/usb/cpia2/cpia2.h2
-rw-r--r--drivers/media/usb/cpia2/cpia2_core.c4
-rw-r--r--drivers/media/usb/cpia2/cpia2_v4l.c4
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c6
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c6
-rw-r--r--drivers/media/usb/gspca/gspca.c6
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c6
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c4
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c4
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c10
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c4
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c57
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h2
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c10
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c2
-rw-r--r--drivers/media/v4l2-core/videobuf-core.c6
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c4
-rw-r--r--drivers/media/v4l2-core/videobuf2-v4l2.c10
-rw-r--r--drivers/memory/omap-gpmc.c163
-rw-r--r--drivers/memstick/host/Kconfig4
-rw-r--r--drivers/memstick/host/rtsx_pci_ms.c2
-rw-r--r--drivers/memstick/host/rtsx_usb_ms.c2
-rw-r--r--drivers/message/fusion/mptbase.c59
-rw-r--r--drivers/message/fusion/mptctl.c25
-rw-r--r--drivers/message/fusion/mptsas.c1
-rw-r--r--drivers/mfd/Kconfig41
-rw-r--r--drivers/mfd/Makefile7
-rw-r--r--drivers/mfd/ab8500-debugfs.c439
-rw-r--r--drivers/mfd/atmel-flexcom.c63
-rw-r--r--drivers/mfd/axp20x.c4
-rw-r--r--drivers/mfd/cros_ec.c4
-rw-r--r--drivers/mfd/cros_ec_dev.c (renamed from drivers/platform/chrome/cros_ec_dev.c)8
-rw-r--r--drivers/mfd/cros_ec_dev.h (renamed from drivers/platform/chrome/cros_ec_dev.h)0
-rw-r--r--drivers/mfd/cros_ec_spi.c25
-rw-r--r--drivers/mfd/intel-lpss.c6
-rw-r--r--drivers/mfd/intel_soc_pmic_core.c1
-rw-r--r--drivers/mfd/kempld-core.c2
-rw-r--r--drivers/mfd/lpc_ich.c5
-rw-r--r--drivers/mfd/max77843.c1
-rw-r--r--drivers/mfd/palmas.c10
-rw-r--r--drivers/mfd/pcf50633-core.c2
-rw-r--r--drivers/mfd/rave-sp.c710
-rw-r--r--drivers/mfd/stm32-lptimer.c6
-rw-r--r--drivers/mfd/stm32-timers.c4
-rw-r--r--drivers/mfd/syscon.c19
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c2
-rw-r--r--drivers/mfd/tmio_core.c20
-rw-r--r--drivers/misc/Kconfig5
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/cardreader/Kconfig20
-rw-r--r--drivers/misc/cardreader/Makefile4
-rw-r--r--drivers/misc/cardreader/rtl8411.c (renamed from drivers/mfd/rtl8411.c)2
-rw-r--r--drivers/misc/cardreader/rts5209.c (renamed from drivers/mfd/rts5209.c)2
-rw-r--r--drivers/misc/cardreader/rts5227.c (renamed from drivers/mfd/rts5227.c)2
-rw-r--r--drivers/misc/cardreader/rts5229.c (renamed from drivers/mfd/rts5229.c)2
-rw-r--r--drivers/misc/cardreader/rts5249.c (renamed from drivers/mfd/rts5249.c)3
-rw-r--r--drivers/misc/cardreader/rts5260.c748
-rw-r--r--drivers/misc/cardreader/rts5260.h45
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c (renamed from drivers/mfd/rtsx_pcr.c)125
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.h (renamed from drivers/mfd/rtsx_pcr.h)12
-rw-r--r--drivers/misc/cardreader/rtsx_usb.c (renamed from drivers/mfd/rtsx_usb.c)2
-rw-r--r--drivers/misc/cxl/api.c2
-rw-r--r--drivers/misc/cxl/cxl.h2
-rw-r--r--drivers/misc/cxl/file.c4
-rw-r--r--drivers/misc/cxl/vphb.c2
-rw-r--r--drivers/misc/hpilo.c2
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c2
-rw-r--r--drivers/misc/mei/main.c6
-rw-r--r--drivers/misc/mic/scif/scif_api.c7
-rw-r--r--drivers/misc/mic/scif/scif_epd.h2
-rw-r--r--drivers/misc/mic/scif/scif_fd.c2
-rw-r--r--drivers/misc/mic/vop/vop_vringh.c4
-rw-r--r--drivers/misc/phantom.c4
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c4
-rw-r--r--drivers/mmc/core/block.c1385
-rw-r--r--drivers/mmc/core/block.h12
-rw-r--r--drivers/mmc/core/bus.c2
-rw-r--r--drivers/mmc/core/core.c228
-rw-r--r--drivers/mmc/core/core.h47
-rw-r--r--drivers/mmc/core/host.h6
-rw-r--r--drivers/mmc/core/mmc_test.c221
-rw-r--r--drivers/mmc/core/queue.c504
-rw-r--r--drivers/mmc/core/queue.h64
-rw-r--r--drivers/mmc/core/slot-gpio.c22
-rw-r--r--drivers/mmc/host/Kconfig26
-rw-r--r--drivers/mmc/host/Makefile11
-rw-r--r--drivers/mmc/host/android-goldfish.c2
-rw-r--r--drivers/mmc/host/cqhci.c1150
-rw-r--r--drivers/mmc/host/cqhci.h240
-rw-r--r--drivers/mmc/host/davinci_mmc.c17
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c2
-rw-r--r--drivers/mmc/host/mmci.c127
-rw-r--r--drivers/mmc/host/mmci.h6
-rw-r--r--drivers/mmc/host/renesas_sdhi.h22
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c49
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c15
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c37
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c2
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c2
-rw-r--r--drivers/mmc/host/s3cmci.c2
-rw-r--r--drivers/mmc/host/sdhci-acpi.c132
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c22
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c149
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c8
-rw-r--r--drivers/mmc/host/sdhci-pci-arasan.c331
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c218
-rw-r--r--drivers/mmc/host/sdhci-pci.h7
-rw-r--r--drivers/mmc/host/sdhci-spear.c4
-rw-r--r--drivers/mmc/host/sdhci-xenon.c7
-rw-r--r--drivers/mmc/host/sdhci.c57
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mmc/host/sdhci_f_sdh30.c52
-rw-r--r--drivers/mmc/host/sh_mmcif.c2
-rw-r--r--drivers/mmc/host/sunxi-mmc.c9
-rw-r--r--drivers/mmc/host/tmio_mmc.c23
-rw-r--r--drivers/mmc/host/tmio_mmc.h43
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c152
-rw-r--r--drivers/mtd/devices/docg3.c70
-rw-r--r--drivers/mtd/devices/m25p80.c9
-rw-r--r--drivers/mtd/devices/mchp23k256.c18
-rw-r--r--drivers/mtd/mtdcore.c36
-rw-r--r--drivers/mtd/mtdpart.c43
-rw-r--r--drivers/mtd/mtdswap.c5
-rw-r--r--drivers/mtd/nand/Kconfig17
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/atmel/nand-controller.c9
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c6
-rw-r--r--drivers/mtd/nand/brcmnand/brcmnand.c38
-rw-r--r--drivers/mtd/nand/cafe_nand.c52
-rw-r--r--drivers/mtd/nand/denali.c84
-rw-r--r--drivers/mtd/nand/denali.h4
-rw-r--r--drivers/mtd/nand/denali_pci.c4
-rw-r--r--drivers/mtd/nand/diskonchip.c4
-rw-r--r--drivers/mtd/nand/docg4.c21
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c10
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c13
-rw-r--r--drivers/mtd/nand/fsmc_nand.c9
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c111
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h46
-rw-r--r--drivers/mtd/nand/hisi504_nand.c9
-rw-r--r--drivers/mtd/nand/jz4740_nand.c16
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c7
-rw-r--r--drivers/mtd/nand/lpc32xx_slc.c33
-rw-r--r--drivers/mtd/nand/marvell_nand.c2896
-rw-r--r--drivers/mtd/nand/mtk_ecc.c126
-rw-r--r--drivers/mtd/nand/mtk_ecc.h3
-rw-r--r--drivers/mtd/nand/mtk_nand.c76
-rw-r--r--drivers/mtd/nand/nand_base.c2309
-rw-r--r--drivers/mtd/nand/nand_bbt.c2
-rw-r--r--drivers/mtd/nand/nand_hynix.c129
-rw-r--r--drivers/mtd/nand/nand_micron.c83
-rw-r--r--drivers/mtd/nand/nand_samsung.c19
-rw-r--r--drivers/mtd/nand/nand_timings.c21
-rw-r--r--drivers/mtd/nand/omap2.c28
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c14
-rw-r--r--drivers/mtd/nand/qcom_nandc.c31
-rw-r--r--drivers/mtd/nand/r852.c11
-rw-r--r--drivers/mtd/nand/sh_flctl.c6
-rw-r--r--drivers/mtd/nand/sm_common.h2
-rw-r--r--drivers/mtd/nand/sunxi_nand.c111
-rw-r--r--drivers/mtd/nand/tango_nand.c27
-rw-r--r--drivers/mtd/nand/tmio_nand.c5
-rw-r--r--drivers/mtd/nand/vf610_nfc.c6
-rw-r--r--drivers/mtd/onenand/Kconfig7
-rw-r--r--drivers/mtd/onenand/omap2.c577
-rw-r--r--drivers/mtd/onenand/onenand_base.c81
-rw-r--r--drivers/mtd/onenand/samsung.c185
-rw-r--r--drivers/mtd/parsers/sharpslpart.c6
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c55
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c8
-rw-r--r--drivers/mtd/spi-nor/intel-spi.c6
-rw-r--r--drivers/mtd/spi-nor/mtk-quadspi.c240
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c75
-rw-r--r--drivers/mtd/tests/nandbiterrs.c2
-rw-r--r--drivers/mtd/tests/oobtest.c21
-rw-r--r--drivers/mtd/ubi/block.c42
-rw-r--r--drivers/mtd/ubi/build.c13
-rw-r--r--drivers/mtd/ubi/eba.c2
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c2
-rw-r--r--drivers/mtd/ubi/fastmap.c5
-rw-r--r--drivers/mtd/ubi/vmt.c15
-rw-r--r--drivers/mtd/ubi/wl.c87
-rw-r--r--drivers/mtd/ubi/wl.h2
-rw-r--r--drivers/net/ethernet/8390/mac8390.c33
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c102
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h7
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c125
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c91
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c38
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c4
-rw-r--r--drivers/net/ieee802154/ca8210.c4
-rw-r--r--drivers/net/ppp/ppp_async.c2
-rw-r--r--drivers/net/ppp/ppp_generic.c4
-rw-r--r--drivers/net/ppp/ppp_synctty.c2
-rw-r--r--drivers/net/tap.c4
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/wan/cosa.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00debug.c2
-rw-r--r--drivers/nubus/Makefile2
-rw-r--r--drivers/nubus/bus.c117
-rw-r--r--drivers/nubus/nubus.c542
-rw-r--r--drivers/nubus/proc.c281
-rw-r--r--drivers/nvme/host/Makefile4
-rw-r--r--drivers/nvme/host/core.c134
-rw-r--r--drivers/nvme/host/fabrics.c22
-rw-r--r--drivers/nvme/host/fabrics.h2
-rw-r--r--drivers/nvme/host/fc.c7
-rw-r--r--drivers/nvme/host/lightnvm.c185
-rw-r--r--drivers/nvme/host/multipath.c44
-rw-r--r--drivers/nvme/host/nvme.h9
-rw-r--r--drivers/nvme/host/pci.c216
-rw-r--r--drivers/nvme/host/rdma.c6
-rw-r--r--drivers/nvme/host/trace.c130
-rw-r--r--drivers/nvme/host/trace.h165
-rw-r--r--drivers/nvme/target/Kconfig2
-rw-r--r--drivers/nvme/target/core.c14
-rw-r--r--drivers/nvme/target/fabrics-cmd.c2
-rw-r--r--drivers/nvme/target/fc.c60
-rw-r--r--drivers/nvme/target/fcloop.c244
-rw-r--r--drivers/nvme/target/loop.c3
-rw-r--r--drivers/nvme/target/rdma.c83
-rw-r--r--drivers/of/base.c26
-rw-r--r--drivers/of/property.c8
-rw-r--r--drivers/opp/Makefile1
-rw-r--r--drivers/opp/ti-opp-supply.c425
-rw-r--r--drivers/pci/pci-driver.c21
-rw-r--r--drivers/pci/pcie/portdrv_pci.c3
-rw-r--r--drivers/pci/switch/switchtec.c4
-rw-r--r--drivers/perf/Kconfig9
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/arm_dsu_pmu.c843
-rw-r--r--drivers/perf/arm_pmu_platform.c15
-rw-r--r--drivers/perf/arm_spe_pmu.c9
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c31
-rw-r--r--drivers/pinctrl/Kconfig10
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/pinctrl-axp209.c476
-rw-r--r--drivers/platform/chrome/Kconfig10
-rw-r--r--drivers/platform/chrome/Makefile7
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c9
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.h27
-rw-r--r--drivers/platform/chrome/cros_ec_lightbar.c6
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c5
-rw-r--r--drivers/platform/chrome/cros_ec_vbc.c1
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c4
-rw-r--r--drivers/platform/x86/sony-laptop.c2
-rw-r--r--drivers/platform/x86/surfacepro3_button.c4
-rw-r--r--drivers/pnp/pnpbios/core.c5
-rw-r--r--drivers/pnp/quirks.c1
-rw-r--r--drivers/power/avs/rockchip-io-domain.c24
-rw-r--r--drivers/power/reset/Kconfig9
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c4
-rw-r--r--drivers/power/reset/imx-snvs-poweroff.c66
-rw-r--r--drivers/power/reset/msm-poweroff.c7
-rw-r--r--drivers/power/reset/zx-reboot.c4
-rw-r--r--drivers/power/supply/ab8500_charger.c6
-rw-r--r--drivers/power/supply/axp20x_ac_power.c8
-rw-r--r--drivers/power/supply/axp288_charger.c290
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c203
-rw-r--r--drivers/power/supply/bq24190_charger.c126
-rw-r--r--drivers/power/supply/bq27xxx_battery.c39
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c2
-rw-r--r--drivers/power/supply/charger-manager.c2
-rw-r--r--drivers/power/supply/cpcap-battery.c4
-rw-r--r--drivers/power/supply/ltc2941-battery-gauge.c25
-rw-r--r--drivers/power/supply/max17042_battery.c54
-rw-r--r--drivers/power/supply/sbs-manager.c2
-rw-r--r--drivers/powercap/intel_rapl.c99
-rw-r--r--drivers/powercap/powercap_sys.c6
-rw-r--r--drivers/pps/pps.c2
-rw-r--r--drivers/ptp/ptp_chardev.c2
-rw-r--r--drivers/ptp/ptp_private.h2
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c2
-rw-r--r--drivers/ras/cec.c2
-rw-r--r--drivers/regulator/Kconfig7
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/core.c383
-rw-r--r--drivers/regulator/internal.h27
-rw-r--r--drivers/regulator/of_regulator.c34
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c84
-rw-r--r--drivers/regulator/sc2731-regulator.c256
-rw-r--r--drivers/regulator/tps65218-regulator.c5
-rw-r--r--drivers/rpmsg/qcom_smd.c4
-rw-r--r--drivers/rpmsg/rpmsg_char.c4
-rw-r--r--drivers/rpmsg/rpmsg_core.c2
-rw-r--r--drivers/rpmsg/rpmsg_internal.h2
-rw-r--r--drivers/rtc/rtc-dev.c2
-rw-r--r--drivers/s390/block/dasd_eer.c4
-rw-r--r--drivers/s390/char/monreader.c2
-rw-r--r--drivers/scsi/3w-9xxx.c26
-rw-r--r--drivers/scsi/3w-9xxx.h2
-rw-r--r--drivers/scsi/3w-sas.c15
-rw-r--r--drivers/scsi/aacraid/aachba.c473
-rw-r--r--drivers/scsi/aacraid/aacraid.h54
-rw-r--r--drivers/scsi/aacraid/commctrl.c6
-rw-r--r--drivers/scsi/aacraid/comminit.c49
-rw-r--r--drivers/scsi/aacraid/commsup.c220
-rw-r--r--drivers/scsi/aacraid/linit.c26
-rw-r--r--drivers/scsi/aacraid/sa.c32
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h567
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c1318
-rw-r--r--drivers/scsi/arm/fas216.c2
-rw-r--r--drivers/scsi/bfa/bfa_core.c2
-rw-r--r--drivers/scsi/bfa/bfa_cs.h6
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h3
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c8
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c3
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h4
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c78
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c62
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c10
-rw-r--r--drivers/scsi/bfa/bfa_port.c15
-rw-r--r--drivers/scsi/bfa/bfa_port.h2
-rw-r--r--drivers/scsi/bfa/bfa_svc.c51
-rw-r--r--drivers/scsi/bfa/bfa_svc.h2
-rw-r--r--drivers/scsi/bfa/bfad.c23
-rw-r--r--drivers/scsi/bfa/bfad_attr.c5
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c10
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c8
-rw-r--r--drivers/scsi/bfa/bfad_im.h32
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c60
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c51
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c19
-rw-r--r--drivers/scsi/csiostor/csio_init.c2
-rw-r--r--drivers/scsi/csiostor/csio_init.h1
-rw-r--r--drivers/scsi/csiostor/csio_mb.c6
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c2
-rw-r--r--drivers/scsi/cxlflash/Makefile2
-rw-r--r--drivers/scsi/cxlflash/backend.h41
-rw-r--r--drivers/scsi/cxlflash/common.h8
-rw-r--r--drivers/scsi/cxlflash/cxl_hw.c168
-rw-r--r--drivers/scsi/cxlflash/main.c100
-rw-r--r--drivers/scsi/cxlflash/superpipe.c64
-rw-r--r--drivers/scsi/cxlflash/superpipe.h4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c37
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c22
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c4
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c2
-rw-r--r--drivers/scsi/fnic/fnic_stats.h4
-rw-r--r--drivers/scsi/fnic/fnic_trace.c58
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h46
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c267
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c194
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c332
-rw-r--r--drivers/scsi/hosts.c6
-rw-r--r--drivers/scsi/hpsa.c18
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c34
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c320
-rw-r--r--drivers/scsi/ipr.c4
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/scsi/libiscsi.c28
-rw-r--r--drivers/scsi/libiscsi_tcp.c9
-rw-r--r--drivers/scsi/libsas/sas_ata.c1
-rw-r--r--drivers/scsi/libsas/sas_discover.c34
-rw-r--r--drivers/scsi/libsas/sas_event.c86
-rw-r--r--drivers/scsi/libsas/sas_expander.c12
-rw-r--r--drivers/scsi/libsas/sas_init.c107
-rw-r--r--drivers/scsi/libsas/sas_internal.h7
-rw-r--r--drivers/scsi/libsas/sas_phy.c69
-rw-r--r--drivers/scsi/libsas/sas_port.c25
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c20
-rw-r--r--drivers/scsi/lpfc/lpfc.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c59
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c65
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c116
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c29
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c294
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c88
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c356
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h17
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c71
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c193
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h37
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c175
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c28
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c99
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h6
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c207
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h29
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c33
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c332
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_warpdrive.c33
-rw-r--r--drivers/scsi/pmcraid.c2
-rw-r--r--drivers/scsi/ppa.c4
-rw-r--r--drivers/scsi/qedf/qedf_main.c3
-rw-r--r--drivers/scsi/qedi/qedi_fw.c2
-rw-r--r--drivers/scsi/qedi/qedi_main.c46
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h173
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c39
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h32
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c1497
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c1044
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c65
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c56
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c151
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c140
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c528
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c761
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c40
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c21
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c12
-rw-r--r--drivers/scsi/scsi_common.c14
-rw-r--r--drivers/scsi/scsi_debug.c724
-rw-r--r--drivers/scsi/scsi_devinfo.c39
-rw-r--r--drivers/scsi/scsi_dh.c5
-rw-r--r--drivers/scsi/scsi_error.c21
-rw-r--r--drivers/scsi/scsi_lib.c50
-rw-r--r--drivers/scsi/scsi_priv.h15
-rw-r--r--drivers/scsi/scsi_sysfs.c3
-rw-r--r--drivers/scsi/scsi_transport_fc.c4
-rw-r--r--drivers/scsi/scsi_transport_spi.c16
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/scsi/ses.c11
-rw-r--r--drivers/scsi/sg.c6
-rw-r--r--drivers/scsi/smartpqi/Makefile2
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/storvsc_drv.c4
-rw-r--r--drivers/scsi/ufs/ufshci.h16
-rw-r--r--drivers/scsi/wd719x.c4
-rw-r--r--drivers/spi/spi-armada-3700.c110
-rw-r--r--drivers/spi/spi-atmel.c113
-rw-r--r--drivers/spi/spi-bcm53xx.c26
-rw-r--r--drivers/spi/spi-davinci.c4
-rw-r--r--drivers/spi/spi-dw.c2
-rw-r--r--drivers/spi/spi-fsl-dspi.c9
-rw-r--r--drivers/spi/spi-imx.c26
-rw-r--r--drivers/spi/spi-jcore.c4
-rw-r--r--drivers/spi/spi-meson-spicc.c1
-rw-r--r--drivers/spi/spi-orion.c21
-rw-r--r--drivers/spi/spi-pxa2xx.c4
-rw-r--r--drivers/spi/spi-s3c64xx.c18
-rw-r--r--drivers/spi/spi-sh-msiof.c128
-rw-r--r--drivers/spi/spi-sirf.c4
-rw-r--r--drivers/spi/spi-sun6i.c2
-rw-r--r--drivers/spi/spi-xilinx.c1
-rw-r--r--drivers/staging/comedi/comedi_fops.c4
-rw-r--r--drivers/staging/comedi/drivers/serial2002.c2
-rw-r--r--drivers/staging/irda/net/af_irda.c4
-rw-r--r--drivers/staging/irda/net/irnet/irnet_ppp.c8
-rw-r--r--drivers/staging/irda/net/irnet/irnet_ppp.h2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c1
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-socket.c24
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c3
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c2
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c4
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c2
-rw-r--r--drivers/staging/media/lirc/lirc_zilog.c4
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c2
-rw-r--r--drivers/staging/most/aim-cdev/cdev.c4
-rw-r--r--drivers/staging/most/aim-v4l2/video.c4
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.c5
-rw-r--r--drivers/staging/speakup/speakup_soft.c4
-rw-r--r--drivers/staging/vme/devices/vme_user.c8
-rw-r--r--drivers/target/Kconfig1
-rw-r--r--drivers/target/target_core_transport.c46
-rw-r--r--drivers/thermal/cpu_cooling.c201
-rw-r--r--drivers/tty/n_gsm.c4
-rw-r--r--drivers/tty/n_hdlc.c6
-rw-r--r--drivers/tty/n_r3964.c6
-rw-r--r--drivers/tty/n_tty.c4
-rw-r--r--drivers/tty/serdev/core.c31
-rw-r--r--drivers/tty/tty_io.c8
-rw-r--r--drivers/tty/vt/vc_screen.c4
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/usb/class/cdc-wdm.c4
-rw-r--r--drivers/usb/class/usblp.c4
-rw-r--r--drivers/usb/class/usbtmc.c4
-rw-r--r--drivers/usb/core/devices.c2
-rw-r--r--drivers/usb/core/devio.c8
-rw-r--r--drivers/usb/gadget/function/f_fs.c4
-rw-r--r--drivers/usb/gadget/function/f_hid.c4
-rw-r--r--drivers/usb/gadget/function/f_ncm.c30
-rw-r--r--drivers/usb/gadget/function/f_printer.c4
-rw-r--r--drivers/usb/gadget/function/uvc_queue.c2
-rw-r--r--drivers/usb/gadget/function/uvc_queue.h2
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c2
-rw-r--r--drivers/usb/gadget/legacy/inode.c4
-rw-r--r--drivers/usb/misc/iowarrior.c4
-rw-r--r--drivers/usb/misc/ldusb.c4
-rw-r--r--drivers/usb/misc/legousbtower.c6
-rw-r--r--drivers/usb/mon/mon_bin.c4
-rw-r--r--drivers/vfio/virqfd.c4
-rw-r--r--drivers/vhost/net.c2
-rw-r--r--drivers/vhost/vhost.c19
-rw-r--r--drivers/vhost/vhost.h6
-rw-r--r--drivers/video/backlight/apple_bl.c2
-rw-r--r--drivers/video/backlight/corgi_lcd.c2
-rw-r--r--drivers/video/backlight/tdo24m.c2
-rw-r--r--drivers/video/backlight/tosa_lcd.c2
-rw-r--r--drivers/video/fbdev/macfb.c10
-rw-r--r--drivers/virt/fsl_hypervisor.c4
-rw-r--r--drivers/w1/masters/w1-gpio.c149
-rw-r--r--drivers/watchdog/Kconfig7
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/rave-sp-wdt.c337
-rw-r--r--drivers/xen/evtchn.c4
-rw-r--r--drivers/xen/mcelog.c2
-rw-r--r--drivers/xen/pvcalls-front.c10
-rw-r--r--drivers/xen/swiotlb-xen.c2
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c2
-rw-r--r--fs/affs/amigaffs.c5
-rw-r--r--fs/affs/dir.c5
-rw-r--r--fs/affs/super.c3
-rw-r--r--fs/afs/fsclient.c3
-rw-r--r--fs/afs/inode.c5
-rw-r--r--fs/autofs4/waitq.c4
-rw-r--r--fs/btrfs/Makefile2
-rw-r--r--fs/btrfs/backref.c3
-rw-r--r--fs/btrfs/compression.c141
-rw-r--r--fs/btrfs/compression.h4
-rw-r--r--fs/btrfs/ctree.c56
-rw-r--r--fs/btrfs/ctree.h12
-rw-r--r--fs/btrfs/delayed-inode.c66
-rw-r--r--fs/btrfs/delayed-ref.c2
-rw-r--r--fs/btrfs/delayed-ref.h2
-rw-r--r--fs/btrfs/dev-replace.c30
-rw-r--r--fs/btrfs/dir-item.c108
-rw-r--r--fs/btrfs/disk-io.c75
-rw-r--r--fs/btrfs/disk-io.h3
-rw-r--r--fs/btrfs/export.c5
-rw-r--r--fs/btrfs/extent-tree.c21
-rw-r--r--fs/btrfs/extent_io.c154
-rw-r--r--fs/btrfs/extent_io.h50
-rw-r--r--fs/btrfs/extent_map.c132
-rw-r--r--fs/btrfs/extent_map.h3
-rw-r--r--fs/btrfs/file.c392
-rw-r--r--fs/btrfs/free-space-cache.c13
-rw-r--r--fs/btrfs/inode.c298
-rw-r--r--fs/btrfs/ioctl.c45
-rw-r--r--fs/btrfs/props.c13
-rw-r--r--fs/btrfs/qgroup.c3
-rw-r--r--fs/btrfs/raid56.c119
-rw-r--r--fs/btrfs/ref-verify.c6
-rw-r--r--fs/btrfs/root-tree.c7
-rw-r--r--fs/btrfs/scrub.c95
-rw-r--r--fs/btrfs/send.c6
-rw-r--r--fs/btrfs/super.c348
-rw-r--r--fs/btrfs/sysfs.c2
-rw-r--r--fs/btrfs/tests/btrfs-tests.c3
-rw-r--r--fs/btrfs/tests/btrfs-tests.h1
-rw-r--r--fs/btrfs/tests/extent-map-tests.c366
-rw-r--r--fs/btrfs/tests/inode-tests.c17
-rw-r--r--fs/btrfs/transaction.c22
-rw-r--r--fs/btrfs/transaction.h11
-rw-r--r--fs/btrfs/tree-checker.c142
-rw-r--r--fs/btrfs/tree-log.c58
-rw-r--r--fs/btrfs/volumes.c689
-rw-r--r--fs/btrfs/volumes.h45
-rw-r--r--fs/btrfs/xattr.c7
-rw-r--r--fs/btrfs/zstd.c132
-rw-r--r--fs/buffer.c12
-rw-r--r--fs/cachefiles/daemon.c6
-rw-r--r--fs/cifs/Kconfig8
-rw-r--r--fs/cifs/Makefile2
-rw-r--r--fs/cifs/cifs_debug.c199
-rw-r--r--fs/cifs/cifsacl.c2
-rw-r--r--fs/cifs/cifsencrypt.c3
-rw-r--r--fs/cifs/cifsfs.c8
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h35
-rw-r--r--fs/cifs/cifsproto.h4
-rw-r--r--fs/cifs/cifssmb.c22
-rw-r--r--fs/cifs/connect.c251
-rw-r--r--fs/cifs/file.c43
-rw-r--r--fs/cifs/inode.c2
-rw-r--r--fs/cifs/misc.c14
-rw-r--r--fs/cifs/smb1ops.c4
-rw-r--r--fs/cifs/smb2file.c2
-rw-r--r--fs/cifs/smb2misc.c2
-rw-r--r--fs/cifs/smb2ops.c77
-rw-r--r--fs/cifs/smb2pdu.c578
-rw-r--r--fs/cifs/smb2pdu.h60
-rw-r--r--fs/cifs/smb2proto.h3
-rw-r--r--fs/cifs/smbdirect.c2610
-rw-r--r--fs/cifs/smbdirect.h338
-rw-r--r--fs/cifs/transport.c69
-rw-r--r--fs/coda/psdev.c4
-rw-r--r--fs/dcache.c42
-rw-r--r--fs/debugfs/file.c4
-rw-r--r--fs/devpts/inode.c4
-rw-r--r--fs/direct-io.c24
-rw-r--r--fs/dlm/lowcomms.c4
-rw-r--r--fs/dlm/plock.c4
-rw-r--r--fs/dlm/user.c2
-rw-r--r--fs/ecryptfs/miscdev.c4
-rw-r--r--fs/eventfd.c131
-rw-r--r--fs/eventpoll.c28
-rw-r--r--fs/exofs/dir.c9
-rw-r--r--fs/exofs/super.c3
-rw-r--r--fs/ext2/dir.c9
-rw-r--r--fs/ext2/super.c5
-rw-r--r--fs/ext4/dir.c9
-rw-r--r--fs/ext4/inline.c7
-rw-r--r--fs/ext4/inode.c13
-rw-r--r--fs/ext4/ioctl.c3
-rw-r--r--fs/ext4/namei.c5
-rw-r--r--fs/ext4/super.c3
-rw-r--r--fs/ext4/xattr.c5
-rw-r--r--fs/f2fs/acl.c2
-rw-r--r--fs/f2fs/checkpoint.c10
-rw-r--r--fs/f2fs/data.c301
-rw-r--r--fs/f2fs/debug.c12
-rw-r--r--fs/f2fs/dir.c6
-rw-r--r--fs/f2fs/f2fs.h201
-rw-r--r--fs/f2fs/file.c248
-rw-r--r--fs/f2fs/gc.c18
-rw-r--r--fs/f2fs/gc.h2
-rw-r--r--fs/f2fs/inode.c34
-rw-r--r--fs/f2fs/namei.c67
-rw-r--r--fs/f2fs/node.c149
-rw-r--r--fs/f2fs/node.h4
-rw-r--r--fs/f2fs/recovery.c27
-rw-r--r--fs/f2fs/segment.c129
-rw-r--r--fs/f2fs/segment.h92
-rw-r--r--fs/f2fs/super.c142
-rw-r--r--fs/f2fs/sysfs.c14
-rw-r--r--fs/f2fs/trace.c12
-rw-r--r--fs/f2fs/xattr.c12
-rw-r--r--fs/fat/dir.c3
-rw-r--r--fs/fat/inode.c9
-rw-r--r--fs/fat/namei_msdos.c7
-rw-r--r--fs/fat/namei_vfat.c22
-rw-r--r--fs/fcntl.c5
-rw-r--r--fs/file.c7
-rw-r--r--fs/file_table.c1
-rw-r--r--fs/fs-writeback.c2
-rw-r--r--fs/fuse/dev.c4
-rw-r--r--fs/fuse/file.c6
-rw-r--r--fs/fuse/fuse_i.h2
-rw-r--r--fs/gfs2/Kconfig2
-rw-r--r--fs/gfs2/aops.c35
-rw-r--r--fs/gfs2/bmap.c583
-rw-r--r--fs/gfs2/bmap.h1
-rw-r--r--fs/gfs2/dir.c3
-rw-r--r--fs/gfs2/file.c23
-rw-r--r--fs/gfs2/glops.c19
-rw-r--r--fs/gfs2/incore.h6
-rw-r--r--fs/gfs2/inode.c11
-rw-r--r--fs/gfs2/lock_dlm.c4
-rw-r--r--fs/gfs2/log.c124
-rw-r--r--fs/gfs2/log.h10
-rw-r--r--fs/gfs2/lops.c18
-rw-r--r--fs/gfs2/lops.h3
-rw-r--r--fs/gfs2/main.c90
-rw-r--r--fs/gfs2/ops_fstype.c2
-rw-r--r--fs/gfs2/quota.c3
-rw-r--r--fs/gfs2/recovery.c110
-rw-r--r--fs/gfs2/rgrp.c38
-rw-r--r--fs/gfs2/super.c19
-rw-r--r--fs/gfs2/sys.c4
-rw-r--r--fs/gfs2/trace_gfs2.h11
-rw-r--r--fs/gfs2/trans.c4
-rw-r--r--fs/inode.c11
-rw-r--r--fs/iomap.c14
-rw-r--r--fs/jffs2/fs.c1
-rw-r--r--fs/kernfs/file.c2
-rw-r--r--fs/lockd/clntproc.c14
-rw-r--r--fs/lockd/host.c22
-rw-r--r--fs/lockd/mon.c14
-rw-r--r--fs/lockd/svcproc.c2
-rw-r--r--fs/namei.c95
-rw-r--r--fs/ncpfs/sock.c3
-rw-r--r--fs/nfs/blocklayout/blocklayout.c94
-rw-r--r--fs/nfs/blocklayout/blocklayout.h7
-rw-r--r--fs/nfs/blocklayout/dev.c7
-rw-r--r--fs/nfs/delegation.c3
-rw-r--r--fs/nfs/direct.c4
-rw-r--r--fs/nfs/export.c5
-rw-r--r--fs/nfs/filelayout/filelayout.c4
-rw-r--r--fs/nfs/fscache-index.c5
-rw-r--r--fs/nfs/inode.c71
-rw-r--r--fs/nfs/io.c2
-rw-r--r--fs/nfs/nfs4client.c24
-rw-r--r--fs/nfs/nfs4file.c1
-rw-r--r--fs/nfs/nfs4idmap.c6
-rw-r--r--fs/nfs/nfs4namespace.c2
-rw-r--r--fs/nfs/nfs4proc.c52
-rw-r--r--fs/nfs/nfs4state.c5
-rw-r--r--fs/nfs/nfs4sysctl.c2
-rw-r--r--fs/nfs/nfs4xdr.c64
-rw-r--r--fs/nfs/nfstrace.h27
-rw-r--r--fs/nfs/pagelist.c8
-rw-r--r--fs/nfs/pnfs.c6
-rw-r--r--fs/nfs/pnfs.h6
-rw-r--r--fs/nfs/pnfs_dev.c1
-rw-r--r--fs/nfs/write.c10
-rw-r--r--fs/nfsd/nfsfh.h3
-rw-r--r--fs/notify/fanotify/fanotify_user.c4
-rw-r--r--fs/notify/inotify/inotify_user.c4
-rw-r--r--fs/ntfs/inode.c9
-rw-r--r--fs/ntfs/mft.c6
-rw-r--r--fs/ocfs2/cluster/tcp.c3
-rw-r--r--fs/ocfs2/dir.c15
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c4
-rw-r--r--fs/ocfs2/inode.c3
-rw-r--r--fs/ocfs2/namei.c3
-rw-r--r--fs/ocfs2/quota_global.c3
-rw-r--r--fs/orangefs/devorangefs-req.c6
-rw-r--r--fs/pipe.c4
-rw-r--r--fs/posix_acl.c6
-rw-r--r--fs/proc/inode.c6
-rw-r--r--fs/proc/kmsg.c2
-rw-r--r--fs/proc/proc_sysctl.c4
-rw-r--r--fs/proc_namespace.c4
-rw-r--r--fs/select.c27
-rw-r--r--fs/signalfd.c4
-rw-r--r--fs/super.c2
-rw-r--r--fs/timerfd.c4
-rw-r--r--fs/ubifs/dir.c43
-rw-r--r--fs/ubifs/file.c41
-rw-r--r--fs/ubifs/tnc.c21
-rw-r--r--fs/ubifs/xattr.c2
-rw-r--r--fs/ufs/dir.c9
-rw-r--r--fs/ufs/inode.c3
-rw-r--r--fs/ufs/super.c3
-rw-r--r--fs/userfaultfd.c4
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c124
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h10
-rw-r--r--fs/xfs/libxfs/xfs_alloc_btree.c47
-rw-r--r--fs/xfs/libxfs/xfs_attr.c4
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c148
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.h1
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c104
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c120
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c58
-rw-r--r--fs/xfs/libxfs/xfs_btree.c117
-rw-r--r--fs/xfs/libxfs/xfs_btree.h16
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c70
-rw-r--r--fs/xfs/libxfs/xfs_da_format.h6
-rw-r--r--fs/xfs/libxfs/xfs_dir2.c5
-rw-r--r--fs/xfs/libxfs/xfs_dir2.h2
-rw-r--r--fs/xfs/libxfs/xfs_dir2_block.c39
-rw-r--r--fs/xfs/libxfs/xfs_dir2_data.c208
-rw-r--r--fs/xfs/libxfs/xfs_dir2_leaf.c89
-rw-r--r--fs/xfs/libxfs/xfs_dir2_node.c89
-rw-r--r--fs/xfs/libxfs/xfs_dir2_priv.h12
-rw-r--r--fs/xfs/libxfs/xfs_dir2_sf.c30
-rw-r--r--fs/xfs/libxfs/xfs_dquot_buf.c174
-rw-r--r--fs/xfs/libxfs/xfs_fs.h7
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c143
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.h6
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c65
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c135
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.h4
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c152
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.h14
-rw-r--r--fs/xfs/libxfs/xfs_log_rlimit.c2
-rw-r--r--fs/xfs/libxfs/xfs_quota_defs.h9
-rw-r--r--fs/xfs/libxfs/xfs_refcount.c19
-rw-r--r--fs/xfs/libxfs/xfs_refcount.h3
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.c40
-rw-r--r--fs/xfs/libxfs/xfs_rmap.c67
-rw-r--r--fs/xfs/libxfs/xfs_rmap.h5
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.c40
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.c21
-rw-r--r--fs/xfs/libxfs/xfs_sb.c113
-rw-r--r--fs/xfs/libxfs/xfs_sb.h4
-rw-r--r--fs/xfs/libxfs/xfs_shared.h4
-rw-r--r--fs/xfs/libxfs/xfs_symlink_remote.c75
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.c199
-rw-r--r--fs/xfs/scrub/agheader.c340
-rw-r--r--fs/xfs/scrub/alloc.c81
-rw-r--r--fs/xfs/scrub/bmap.c219
-rw-r--r--fs/xfs/scrub/btree.c184
-rw-r--r--fs/xfs/scrub/btree.h9
-rw-r--r--fs/xfs/scrub/common.c255
-rw-r--r--fs/xfs/scrub/common.h23
-rw-r--r--fs/xfs/scrub/dabtree.c22
-rw-r--r--fs/xfs/scrub/dir.c44
-rw-r--r--fs/xfs/scrub/ialloc.c194
-rw-r--r--fs/xfs/scrub/inode.c178
-rw-r--r--fs/xfs/scrub/parent.c8
-rw-r--r--fs/xfs/scrub/quota.c7
-rw-r--r--fs/xfs/scrub/refcount.c420
-rw-r--r--fs/xfs/scrub/rmap.c123
-rw-r--r--fs/xfs/scrub/rtbitmap.c35
-rw-r--r--fs/xfs/scrub/scrub.c203
-rw-r--r--fs/xfs/scrub/scrub.h37
-rw-r--r--fs/xfs/scrub/trace.h44
-rw-r--r--fs/xfs/xfs_aops.c15
-rw-r--r--fs/xfs/xfs_bmap_util.c4
-rw-r--r--fs/xfs/xfs_buf.c22
-rw-r--r--fs/xfs/xfs_buf.h8
-rw-r--r--fs/xfs/xfs_buf_item.c156
-rw-r--r--fs/xfs/xfs_buf_item.h7
-rw-r--r--fs/xfs/xfs_dir2_readdir.c4
-rw-r--r--fs/xfs/xfs_dquot.c62
-rw-r--r--fs/xfs/xfs_dquot_item.c9
-rw-r--r--fs/xfs/xfs_error.c64
-rw-r--r--fs/xfs/xfs_error.h14
-rw-r--r--fs/xfs/xfs_fsops.c79
-rw-r--r--fs/xfs/xfs_fsops.h1
-rw-r--r--fs/xfs/xfs_icache.c75
-rw-r--r--fs/xfs/xfs_inode.c107
-rw-r--r--fs/xfs/xfs_inode.h5
-rw-r--r--fs/xfs/xfs_inode_item.c44
-rw-r--r--fs/xfs/xfs_ioctl.c5
-rw-r--r--fs/xfs/xfs_ioctl32.c3
-rw-r--r--fs/xfs/xfs_linux.h14
-rw-r--r--fs/xfs/xfs_log.c17
-rw-r--r--fs/xfs/xfs_log_recover.c58
-rw-r--r--fs/xfs/xfs_mount.c3
-rw-r--r--fs/xfs/xfs_qm.c33
-rw-r--r--fs/xfs/xfs_reflink.c95
-rw-r--r--fs/xfs/xfs_rtalloc.h4
-rw-r--r--fs/xfs/xfs_super.c14
-rw-r--r--fs/xfs/xfs_trace.h68
-rw-r--r--fs/xfs/xfs_trans.c22
-rw-r--r--fs/xfs/xfs_trans.h2
-rw-r--r--fs/xfs/xfs_trans_buf.c98
-rw-r--r--fs/xfs/xfs_trans_inode.c16
-rw-r--r--include/acpi/acconfig.h4
-rw-r--r--include/acpi/acexcep.h12
-rw-r--r--include/acpi/acpi_bus.h3
-rw-r--r--include/acpi/acpixf.h6
-rw-r--r--include/acpi/actbl1.h159
-rw-r--r--include/acpi/actbl2.h15
-rw-r--r--include/acpi/actypes.h4
-rw-r--r--include/asm-generic/dma-mapping.h10
-rw-r--r--include/asm-generic/vmlinux.lds.h4
-rw-r--r--include/crypto/aead.h10
-rw-r--r--include/crypto/chacha20.h3
-rw-r--r--include/crypto/hash.h46
-rw-r--r--include/crypto/if_alg.h2
-rw-r--r--include/crypto/internal/hash.h2
-rw-r--r--include/crypto/internal/scompress.h11
-rw-r--r--include/crypto/null.h10
-rw-r--r--include/crypto/poly1305.h2
-rw-r--r--include/crypto/salsa20.h27
-rw-r--r--include/crypto/sha3.h6
-rw-r--r--include/crypto/skcipher.h11
-rw-r--r--include/drm/drm_file.h2
-rw-r--r--include/dt-bindings/gpio/gpio.h6
-rw-r--r--include/linux/acpi.h18
-rw-r--r--include/linux/arch_topology.h2
-rw-r--r--include/linux/arm_sdei.h79
-rw-r--r--include/linux/ata.h2
-rw-r--r--include/linux/backing-dev.h2
-rw-r--r--include/linux/bio.h24
-rw-r--r--include/linux/bitfield.h46
-rw-r--r--include/linux/blk-cgroup.h8
-rw-r--r--include/linux/blk-mq.h3
-rw-r--r--include/linux/blk_types.h28
-rw-r--r--include/linux/blkdev.h172
-rw-r--r--include/linux/buffer_head.h1
-rw-r--r--include/linux/bvec.h9
-rw-r--r--include/linux/cgroup-defs.h2
-rw-r--r--include/linux/compat.h100
-rw-r--r--include/linux/cper.h48
-rw-r--r--include/linux/cpu_cooling.h75
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/cpuidle.h40
-rw-r--r--include/linux/crc-ccitt.h7
-rw-r--r--include/linux/crypto.h10
-rw-r--r--include/linux/device-mapper.h56
-rw-r--r--include/linux/device.h3
-rw-r--r--include/linux/dma-buf.h2
-rw-r--r--include/linux/dma-direct.h47
-rw-r--r--include/linux/dma-mapping.h23
-rw-r--r--include/linux/efi.h46
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/eventfd.h14
-rw-r--r--include/linux/f2fs_fs.h14
-rw-r--r--include/linux/fs.h34
-rw-r--r--include/linux/ftrace.h12
-rw-r--r--include/linux/fwnode.h4
-rw-r--r--include/linux/genetlink.h3
-rw-r--r--include/linux/genhd.h5
-rw-r--r--include/linux/gpio.h10
-rw-r--r--include/linux/gpio/consumer.h25
-rw-r--r--include/linux/gpio/driver.h3
-rw-r--r--include/linux/gpio/machine.h4
-rw-r--r--include/linux/hid.h22
-rw-r--r--include/linux/hrtimer.h113
-rw-r--r--include/linux/iio/adc/stm32-dfsdm-adc.h18
-rw-r--r--include/linux/iio/consumer.h37
-rw-r--r--include/linux/iio/hw-consumer.h21
-rw-r--r--include/linux/iio/iio.h28
-rw-r--r--include/linux/iio/types.h28
-rw-r--r--include/linux/init_task.h258
-rw-r--r--include/linux/integrity.h1
-rw-r--r--include/linux/irq_work.h11
-rw-r--r--include/linux/irqflags.h2
-rw-r--r--include/linux/iversion.h337
-rw-r--r--include/linux/jump_label.h7
-rw-r--r--include/linux/lightnvm.h125
-rw-r--r--include/linux/livepatch.h4
-rw-r--r--include/linux/lockd/lockd.h9
-rw-r--r--include/linux/lockdep.h7
-rw-r--r--include/linux/mfd/axp20x.h5
-rw-r--r--include/linux/mfd/cros_ec.h4
-rw-r--r--include/linux/mfd/cros_ec_commands.h17
-rw-r--r--include/linux/mfd/palmas.h3
-rw-r--r--include/linux/mfd/rave-sp.h60
-rw-r--r--include/linux/mfd/stm32-lptimer.h6
-rw-r--r--include/linux/mfd/stm32-timers.h4
-rw-r--r--include/linux/mfd/tmio.h20
-rw-r--r--include/linux/mlx5/device.h16
-rw-r--r--include/linux/mlx5/driver.h43
-rw-r--r--include/linux/mlx5/mlx5_ifc.h27
-rw-r--r--include/linux/mlx5/qp.h12
-rw-r--r--include/linux/mlx5/vport.h4
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/mmc/host.h5
-rw-r--r--include/linux/mmc/slot-gpio.h1
-rw-r--r--include/linux/module.h9
-rw-r--r--include/linux/mtd/map.h130
-rw-r--r--include/linux/mtd/mtd.h28
-rw-r--r--include/linux/mtd/rawnand.h443
-rw-r--r--include/linux/mtd/spi-nor.h12
-rw-r--r--include/linux/net.h2
-rw-r--r--include/linux/netfilter/nfnetlink.h3
-rw-r--r--include/linux/nfs4.h12
-rw-r--r--include/linux/nubus.h189
-rw-r--r--include/linux/nvme.h22
-rw-r--r--include/linux/of.h7
-rw-r--r--include/linux/of_gpio.h2
-rw-r--r--include/linux/omap-gpmc.h28
-rw-r--r--include/linux/percpu-refcount.h6
-rw-r--r--include/linux/percpu_counter.h6
-rw-r--r--include/linux/pinctrl/pinconf-generic.h2
-rw-r--r--include/linux/platform_data/mtd-onenand-omap2.h34
-rw-r--r--include/linux/platform_data/spi-s3c64xx.h6
-rw-r--r--include/linux/pm.h16
-rw-r--r--include/linux/pm_wakeup.h7
-rw-r--r--include/linux/poll.h10
-rw-r--r--include/linux/posix-clock.h2
-rw-r--r--include/linux/posix-timers.h25
-rw-r--r--include/linux/posix_acl.h7
-rw-r--r--include/linux/power/bq27xxx_battery.h1
-rw-r--r--include/linux/property.h2
-rw-r--r--include/linux/psci.h4
-rw-r--r--include/linux/rcupdate.h25
-rw-r--r--include/linux/rcutiny.h1
-rw-r--r--include/linux/rcutree.h1
-rw-r--r--include/linux/regmap.h50
-rw-r--r--include/linux/regulator/driver.h2
-rw-r--r--include/linux/regulator/machine.h37
-rw-r--r--include/linux/ring_buffer.h2
-rw-r--r--include/linux/rpmsg.h4
-rw-r--r--include/linux/rtnetlink.h3
-rw-r--r--include/linux/rtsx_common.h (renamed from include/linux/mfd/rtsx_common.h)0
-rw-r--r--include/linux/rtsx_pci.h (renamed from include/linux/mfd/rtsx_pci.h)236
-rw-r--r--include/linux/rtsx_usb.h (renamed from include/linux/mfd/rtsx_usb.h)0
-rw-r--r--include/linux/scatterlist.h11
-rw-r--r--include/linux/sched.h14
-rw-r--r--include/linux/sched/cpufreq.h2
-rw-r--r--include/linux/sched/signal.h28
-rw-r--r--include/linux/sched/task_stack.h2
-rw-r--r--include/linux/sched/topology.h12
-rw-r--r--include/linux/scif.h4
-rw-r--r--include/linux/seccomp.h8
-rw-r--r--include/linux/seqlock.h3
-rw-r--r--include/linux/serdev.h1
-rw-r--r--include/linux/signal.h15
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/linux/sound.h2
-rw-r--r--include/linux/srcu.h4
-rw-r--r--include/linux/srcutree.h8
-rw-r--r--include/linux/string.h3
-rw-r--r--include/linux/sunrpc/clnt.h1
-rw-r--r--include/linux/sunrpc/xprtrdma.h2
-rw-r--r--include/linux/suspend.h28
-rw-r--r--include/linux/swiotlb.h12
-rw-r--r--include/linux/torture.h8
-rw-r--r--include/linux/tpm.h39
-rw-r--r--include/linux/tpm_eventlog.h (renamed from drivers/char/tpm/tpm_eventlog.h)34
-rw-r--r--include/linux/tracepoint.h5
-rw-r--r--include/linux/tty_ldisc.h2
-rw-r--r--include/linux/w1-gpio.h9
-rw-r--r--include/linux/wait.h10
-rw-r--r--include/media/lirc_dev.h2
-rw-r--r--include/media/media-devnode.h2
-rw-r--r--include/media/soc_camera.h2
-rw-r--r--include/media/v4l2-ctrls.h2
-rw-r--r--include/media/v4l2-dev.h2
-rw-r--r--include/media/v4l2-mem2mem.h4
-rw-r--r--include/media/videobuf-core.h2
-rw-r--r--include/media/videobuf2-core.h2
-rw-r--r--include/media/videobuf2-v4l2.h5
-rw-r--r--include/misc/cxl.h2
-rw-r--r--include/net/bluetooth/bluetooth.h2
-rw-r--r--include/net/inet_connection_sock.h2
-rw-r--r--include/net/iucv/af_iucv.h2
-rw-r--r--include/net/sctp/sctp.h2
-rw-r--r--include/net/sock.h6
-rw-r--r--include/net/tcp.h2
-rw-r--r--include/net/udp.h2
-rw-r--r--include/rdma/ib_addr.h38
-rw-r--r--include/rdma/ib_sa.h10
-rw-r--r--include/rdma/ib_verbs.h64
-rw-r--r--include/rdma/opa_addr.h16
-rw-r--r--include/rdma/rdma_cm.h19
-rw-r--r--include/rdma/rdma_cm_ib.h8
-rw-r--r--include/rdma/rdma_vt.h31
-rw-r--r--include/rdma/restrack.h157
-rw-r--r--include/scsi/libsas.h30
-rw-r--r--include/scsi/scsi_cmnd.h1
-rw-r--r--include/scsi/scsi_host.h2
-rw-r--r--include/scsi/scsi_proto.h1
-rw-r--r--include/scsi/scsi_transport_fc.h4
-rw-r--r--include/scsi/scsi_transport_sas.h1
-rw-r--r--include/scsi/srp.h17
-rw-r--r--include/sound/hdaudio_ext.h4
-rw-r--r--include/sound/hwdep.h2
-rw-r--r--include/sound/info.h2
-rw-r--r--include/sound/pcm.h8
-rw-r--r--include/sound/rt5514.h2
-rw-r--r--include/sound/rt5645.h3
-rw-r--r--include/sound/soc-acpi-intel-match.h1
-rw-r--r--include/sound/soc-acpi.h14
-rw-r--r--include/sound/soc-dai.h5
-rw-r--r--include/sound/soc.h9
-rw-r--r--include/trace/events/btrfs.h1
-rw-r--r--include/trace/events/f2fs.h3
-rw-r--r--include/trace/events/rcu.h75
-rw-r--r--include/trace/events/rdma.h129
-rw-r--r--include/trace/events/rpcrdma.h890
-rw-r--r--include/trace/events/sunrpc.h12
-rw-r--r--include/trace/events/thermal.h10
-rw-r--r--include/trace/events/timer.h37
-rw-r--r--include/uapi/asm-generic/poll.h44
-rw-r--r--include/uapi/asm-generic/siginfo.h109
-rw-r--r--include/uapi/linux/arm_sdei.h73
-rw-r--r--include/uapi/linux/btrfs.h11
-rw-r--r--include/uapi/linux/btrfs_tree.h2
-rw-r--r--include/uapi/linux/fs.h6
-rw-r--r--include/uapi/linux/gfs2_ondisk.h62
-rw-r--r--include/uapi/linux/lightnvm.h9
-rw-r--r--include/uapi/linux/nfs.h1
-rw-r--r--include/uapi/linux/nubus.h23
-rw-r--r--include/uapi/linux/perf_event.h10
-rw-r--r--include/uapi/linux/ptrace.h6
-rw-r--r--include/uapi/linux/sched.h5
-rw-r--r--include/uapi/linux/types.h6
-rw-r--r--include/uapi/rdma/bnxt_re-abi.h9
-rw-r--r--include/uapi/rdma/ib_user_verbs.h11
-rw-r--r--include/uapi/rdma/mlx4-abi.h7
-rw-r--r--include/uapi/rdma/mlx5-abi.h53
-rw-r--r--include/uapi/rdma/rdma_netlink.h49
-rw-r--r--include/uapi/rdma/vmw_pvrdma-abi.h12
-rw-r--r--include/uapi/sound/asound.h9
-rw-r--r--include/uapi/sound/snd_sst_tokens.h17
-rw-r--r--init/Makefile2
-rw-r--r--init/init_task.c170
-rw-r--r--ipc/mqueue.c246
-rw-r--r--kernel/bpf/inode.c50
-rw-r--r--kernel/cgroup/cgroup.c6
-rw-r--r--kernel/configs/nopm.config15
-rw-r--r--kernel/debug/kdb/kdb_main.c10
-rw-r--r--kernel/debug/kdb/kdb_private.h2
-rw-r--r--kernel/events/callchain.c15
-rw-r--r--kernel/events/core.c106
-rw-r--r--kernel/events/internal.h4
-rw-r--r--kernel/events/uprobes.c12
-rw-r--r--kernel/futex.c6
-rw-r--r--kernel/irq/Kconfig10
-rw-r--r--kernel/irq/affinity.c30
-rw-r--r--kernel/irq/irqdomain.c118
-rw-r--r--kernel/irq_work.c2
-rw-r--r--kernel/jump_label.c12
-rw-r--r--kernel/livepatch/core.c76
-rw-r--r--kernel/livepatch/transition.c116
-rw-r--r--kernel/livepatch/transition.h2
-rw-r--r--kernel/locking/lockdep.c91
-rw-r--r--kernel/locking/locktorture.c108
-rw-r--r--kernel/locking/qspinlock.c12
-rw-r--r--kernel/module.c11
-rw-r--r--kernel/padata.c1
-rw-r--r--kernel/pid.c14
-rw-r--r--kernel/power/main.c29
-rw-r--r--kernel/power/snapshot.c6
-rw-r--r--kernel/power/swap.c6
-rw-r--r--kernel/printk/printk.c4
-rw-r--r--kernel/ptrace.c9
-rw-r--r--kernel/rcu/rcu.h27
-rw-r--r--kernel/rcu/rcuperf.c6
-rw-r--r--kernel/rcu/rcutorture.c12
-rw-r--r--kernel/rcu/srcutree.c109
-rw-r--r--kernel/rcu/tree.c355
-rw-r--r--kernel/rcu/tree.h5
-rw-r--r--kernel/rcu/tree_plugin.h13
-rw-r--r--kernel/rcu/update.c2
-rw-r--r--kernel/relay.c4
-rw-r--r--kernel/sched/core.c70
-rw-r--r--kernel/sched/cpufreq_schedutil.c93
-rw-r--r--kernel/sched/deadline.c143
-rw-r--r--kernel/sched/fair.c43
-rw-r--r--kernel/sched/rt.c2
-rw-r--r--kernel/sched/sched.h112
-rw-r--r--kernel/seccomp.c108
-rw-r--r--kernel/signal.c354
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/time/hrtimer.c654
-rw-r--r--kernel/time/posix-clock.c6
-rw-r--r--kernel/time/posix-cpu-timers.c25
-rw-r--r--kernel/time/posix-timers.c2
-rw-r--r--kernel/time/tick-internal.h13
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--kernel/time/timer.c90
-rw-r--r--kernel/torture.c39
-rw-r--r--kernel/trace/ring_buffer.c2
-rw-r--r--kernel/trace/trace.c17
-rw-r--r--kernel/trace/trace_benchmark.c2
-rw-r--r--kernel/tracepoint.c9
-rw-r--r--kernel/workqueue.c66
-rw-r--r--lib/Kconfig6
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/Makefile2
-rw-r--r--lib/assoc_array.c37
-rw-r--r--lib/chacha20.c71
-rw-r--r--lib/crc-ccitt.c58
-rw-r--r--lib/dma-direct.c156
-rw-r--r--lib/dma-noop.c68
-rw-r--r--lib/kobject.c2
-rw-r--r--lib/percpu-refcount.c8
-rw-r--r--lib/sbitmap.c2
-rw-r--r--lib/scatterlist.c127
-rw-r--r--lib/swiotlb.c205
-rw-r--r--lib/usercopy.c2
-rw-r--r--mm/gup.c46
-rw-r--r--mm/hwpoison-inject.c2
-rw-r--r--mm/ksm.c9
-rw-r--r--mm/madvise.c2
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/memory-failure.c48
-rw-r--r--mm/mlock.c2
-rw-r--r--mm/page_io.c4
-rw-r--r--mm/swapfile.c2
-rw-r--r--mm/util.c36
-rw-r--r--net/9p/trans_fd.c60
-rw-r--r--net/atm/common.c4
-rw-r--r--net/atm/common.h2
-rw-r--r--net/batman-adv/icmp_socket.c2
-rw-r--r--net/batman-adv/log.c2
-rw-r--r--net/bluetooth/af_bluetooth.c6
-rw-r--r--net/bluetooth/hidp/core.c2
-rw-r--r--net/caif/caif_socket.c4
-rw-r--r--net/core/datagram.c8
-rw-r--r--net/core/sock.c2
-rw-r--r--net/dccp/dccp.h2
-rw-r--r--net/dccp/proto.c4
-rw-r--r--net/decnet/af_decnet.c4
-rw-r--r--net/ipv4/netfilter/arp_tables.c7
-rw-r--r--net/ipv4/netfilter/ip_tables.c7
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv6/netfilter/ip6_tables.c7
-rw-r--r--net/iucv/af_iucv.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c9
-rw-r--r--net/netfilter/nf_conntrack_core.c2
-rw-r--r--net/nfc/llcp_sock.c6
-rw-r--r--net/nfc/nci/uart.c2
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/phonet/socket.c4
-rw-r--r--net/rds/af_rds.c4
-rw-r--r--net/rds/ib.c6
-rw-r--r--net/rfkill/core.c4
-rw-r--r--net/rxrpc/af_rxrpc.c4
-rw-r--r--net/sctp/socket.c63
-rw-r--r--net/smc/af_smc.c6
-rw-r--r--net/smc/smc_clc.c18
-rw-r--r--net/socket.c6
-rw-r--r--net/sunrpc/cache.c8
-rw-r--r--net/sunrpc/clnt.c16
-rw-r--r--net/sunrpc/rpc_pipe.c4
-rw-r--r--net/sunrpc/sched.c26
-rw-r--r--net/sunrpc/svcsock.c4
-rw-r--r--net/sunrpc/xprt.c2
-rw-r--r--net/sunrpc/xprtrdma/backchannel.c78
-rw-r--r--net/sunrpc/xprtrdma/fmr_ops.c157
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c329
-rw-r--r--net/sunrpc/xprtrdma/module.c12
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c162
-rw-r--r--net/sunrpc/xprtrdma/transport.c128
-rw-r--r--net/sunrpc/xprtrdma/verbs.c280
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h116
-rw-r--r--net/sunrpc/xprtsock.c36
-rw-r--r--net/tipc/server.c4
-rw-r--r--net/tipc/socket.c4
-rw-r--r--net/unix/af_unix.c15
-rw-r--r--net/vmw_vsock/af_vsock.c4
-rw-r--r--samples/livepatch/livepatch-callbacks-demo.c15
-rw-r--r--samples/livepatch/livepatch-sample.c15
-rw-r--r--samples/livepatch/livepatch-shadow-fix1.c15
-rw-r--r--samples/livepatch/livepatch-shadow-fix2.c15
-rwxr-xr-xscripts/checkpatch.pl6
-rw-r--r--scripts/mod/modpost.c9
-rw-r--r--security/apparmor/apparmorfs.c4
-rw-r--r--security/integrity/evm/evm.h9
-rw-r--r--security/integrity/evm/evm_crypto.c75
-rw-r--r--security/integrity/evm/evm_main.c67
-rw-r--r--security/integrity/evm/evm_secfs.c20
-rw-r--r--security/integrity/iint.c4
-rw-r--r--security/integrity/ima/ima_api.c5
-rw-r--r--security/integrity/ima/ima_appraise.c46
-rw-r--r--security/integrity/ima/ima_crypto.c2
-rw-r--r--security/integrity/ima/ima_init.c2
-rw-r--r--security/integrity/ima/ima_main.c96
-rw-r--r--security/integrity/ima/ima_policy.c32
-rw-r--r--security/integrity/ima/ima_queue.c2
-rw-r--r--security/integrity/ima/ima_template.c11
-rw-r--r--security/integrity/integrity.h41
-rw-r--r--security/keys/keyring.c7
-rw-r--r--security/keys/trusted.c35
-rw-r--r--security/selinux/include/netlabel.h3
-rw-r--r--security/selinux/netlabel.c3
-rw-r--r--security/selinux/ss/services.c21
-rw-r--r--security/smack/smack.h1
-rw-r--r--security/smack/smack_access.c40
-rw-r--r--security/smack/smack_lsm.c10
-rw-r--r--security/tomoyo/audit.c2
-rw-r--r--security/tomoyo/common.c4
-rw-r--r--security/tomoyo/common.h6
-rw-r--r--security/tomoyo/securityfs_if.c2
-rw-r--r--sound/core/compress_offload.c6
-rw-r--r--sound/core/control.c19
-rw-r--r--sound/core/hwdep.c4
-rw-r--r--sound/core/info.c4
-rw-r--r--sound/core/init.c2
-rw-r--r--sound/core/oss/pcm_oss.c16
-rw-r--r--sound/core/pcm_lib.c4
-rw-r--r--sound/core/pcm_misc.c19
-rw-r--r--sound/core/pcm_native.c10
-rw-r--r--sound/core/rawmidi.c4
-rw-r--r--sound/core/seq/oss/seq_oss.c4
-rw-r--r--sound/core/seq/oss/seq_oss_device.h2
-rw-r--r--sound/core/seq/oss/seq_oss_rw.c4
-rw-r--r--sound/core/seq/seq_clientmgr.c4
-rw-r--r--sound/core/seq/seq_queue.c4
-rw-r--r--sound/core/seq/seq_timer.c13
-rw-r--r--sound/core/seq/seq_timer.h2
-rw-r--r--sound/core/timer.c4
-rw-r--r--sound/drivers/dummy.c29
-rw-r--r--sound/firewire/bebob/bebob_hwdep.c4
-rw-r--r--sound/firewire/dice/dice-hwdep.c4
-rw-r--r--sound/firewire/digi00x/digi00x-hwdep.c4
-rw-r--r--sound/firewire/fireface/ff-hwdep.c4
-rw-r--r--sound/firewire/fireworks/fireworks_hwdep.c4
-rw-r--r--sound/firewire/motu/motu-hwdep.c4
-rw-r--r--sound/firewire/oxfw/oxfw-hwdep.c4
-rw-r--r--sound/firewire/tascam/tascam-hwdep.c4
-rw-r--r--sound/hda/ext/hdac_ext_bus.c2
-rw-r--r--sound/isa/gus/gus_dma.c5
-rw-r--r--sound/mips/hal2.c2
-rw-r--r--sound/mips/sgio2audio.c2
-rw-r--r--sound/oss/dmasound/dmasound_core.c4
-rw-r--r--sound/pci/hda/Kconfig1
-rw-r--r--sound/pci/hda/patch_realtek.c137
-rw-r--r--sound/pci/ice1712/prodigy_hifi.c131
-rw-r--r--sound/pci/korg1212/korg1212.c1
-rw-r--r--sound/soc/Kconfig1
-rw-r--r--sound/soc/Makefile1
-rw-r--r--sound/soc/amd/acp-pcm-dma.c35
-rw-r--r--sound/soc/atmel/atmel-classd.c6
-rw-r--r--sound/soc/au1x/ac97c.c6
-rw-r--r--sound/soc/bcm/bcm2835-i2s.c20
-rw-r--r--sound/soc/cirrus/ep93xx-ac97.c6
-rw-r--r--sound/soc/codecs/88pm860x-codec.c9
-rw-r--r--sound/soc/codecs/Kconfig46
-rw-r--r--sound/soc/codecs/Makefile13
-rw-r--r--sound/soc/codecs/cq93vc.c10
-rw-r--r--sound/soc/codecs/cs35l32.c18
-rw-r--r--sound/soc/codecs/cs35l34.c19
-rw-r--r--sound/soc/codecs/cs42l52.c13
-rw-r--r--sound/soc/codecs/cs42l56.c13
-rw-r--r--sound/soc/codecs/cs42l73.c15
-rw-r--r--sound/soc/codecs/cs47l24.c12
-rw-r--r--sound/soc/codecs/cx20442.c46
-rw-r--r--sound/soc/codecs/da7213.c7
-rw-r--r--sound/soc/codecs/da7218.c9
-rw-r--r--sound/soc/codecs/dmic.c24
-rw-r--r--sound/soc/codecs/hdac_hdmi.c358
-rw-r--r--sound/soc/codecs/max98373.c976
-rw-r--r--sound/soc/codecs/max98373.h212
-rw-r--r--sound/soc/codecs/max98926.c2
-rw-r--r--sound/soc/codecs/max98927.c1
-rw-r--r--sound/soc/codecs/mc13783.c9
-rw-r--r--sound/soc/codecs/msm8916-wcd-analog.c8
-rw-r--r--sound/soc/codecs/nau8540.c98
-rw-r--r--sound/soc/codecs/nau8540.h20
-rw-r--r--sound/soc/codecs/nau8824.c18
-rw-r--r--sound/soc/codecs/nau8825.c101
-rw-r--r--sound/soc/codecs/nau8825.h3
-rw-r--r--sound/soc/codecs/pcm186x-i2c.c69
-rw-r--r--sound/soc/codecs/pcm186x-spi.c69
-rw-r--r--sound/soc/codecs/pcm186x.c719
-rw-r--r--sound/soc/codecs/pcm186x.h220
-rw-r--r--sound/soc/codecs/pcm512x-spi.c4
-rw-r--r--sound/soc/codecs/rl6231.c93
-rw-r--r--sound/soc/codecs/rt5514-spi.c1
-rw-r--r--sound/soc/codecs/rt5514.c85
-rw-r--r--sound/soc/codecs/rt5514.h5
-rw-r--r--sound/soc/codecs/rt5645.c187
-rw-r--r--sound/soc/codecs/rt5645.h6
-rw-r--r--sound/soc/codecs/sgtl5000.c5
-rw-r--r--sound/soc/codecs/si476x.c9
-rw-r--r--sound/soc/codecs/sn95031.c936
-rw-r--r--sound/soc/codecs/sn95031.h133
-rw-r--r--sound/soc/codecs/spdif_receiver.c5
-rw-r--r--sound/soc/codecs/spdif_transmitter.c5
-rw-r--r--sound/soc/codecs/tas5720.c61
-rw-r--r--sound/soc/codecs/tas5720.h31
-rw-r--r--sound/soc/codecs/tas6424.c707
-rw-r--r--sound/soc/codecs/tas6424.h144
-rw-r--r--sound/soc/codecs/tfa9879.c1
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c310
-rw-r--r--sound/soc/codecs/tlv320aic31xx.h335
-rw-r--r--sound/soc/codecs/tlv320aic32x4.c182
-rw-r--r--sound/soc/codecs/tlv320aic32x4.h308
-rw-r--r--sound/soc/codecs/tlv320aic3x.c15
-rw-r--r--sound/soc/codecs/tlv320dac33.c32
-rw-r--r--sound/soc/codecs/ts3a227e.c2
-rw-r--r--sound/soc/codecs/tscs42xx.c1456
-rw-r--r--sound/soc/codecs/tscs42xx.h2693
-rw-r--r--sound/soc/codecs/twl4030.c11
-rw-r--r--sound/soc/codecs/twl6040.c20
-rw-r--r--sound/soc/codecs/uda1380.c42
-rw-r--r--sound/soc/codecs/wm0010.c5
-rw-r--r--sound/soc/codecs/wm2000.c6
-rw-r--r--sound/soc/codecs/wm2200.c9
-rw-r--r--sound/soc/codecs/wm5102.c11
-rw-r--r--sound/soc/codecs/wm5110.c12
-rw-r--r--sound/soc/codecs/wm8350.c10
-rw-r--r--sound/soc/codecs/wm8400.c9
-rw-r--r--sound/soc/codecs/wm8903.c12
-rw-r--r--sound/soc/codecs/wm8994.c10
-rw-r--r--sound/soc/codecs/wm8997.c11
-rw-r--r--sound/soc/codecs/wm8998.c12
-rw-r--r--sound/soc/davinci/davinci-mcasp.c19
-rw-r--r--sound/soc/fsl/eukrea-tlv320.c1
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c4
-rw-r--r--sound/soc/fsl/fsl_asrc.h2
-rw-r--r--sound/soc/fsl/fsl_dma.c4
-rw-r--r--sound/soc/fsl/fsl_ssi.c1367
-rw-r--r--sound/soc/fsl/fsl_ssi.h427
-rw-r--r--sound/soc/fsl/fsl_ssi_dbg.c59
-rw-r--r--sound/soc/hisilicon/hi6210-i2s.c1
-rw-r--r--sound/soc/intel/Kconfig115
-rw-r--r--sound/soc/intel/Makefile2
-rw-r--r--sound/soc/intel/atom/sst/sst_acpi.c3
-rw-r--r--sound/soc/intel/atom/sst/sst_stream.c8
-rw-r--r--sound/soc/intel/boards/Kconfig195
-rw-r--r--sound/soc/intel/boards/bytcht_da7213.c4
-rw-r--r--sound/soc/intel/boards/bytcht_es8316.c26
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c4
-rw-r--r--sound/soc/intel/boards/bytcr_rt5651.c50
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5645.c13
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5672.c4
-rw-r--r--sound/soc/intel/boards/haswell.c2
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_max98927.c2
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c2
-rw-r--r--sound/soc/intel/boards/mfld_machine.c428
-rw-r--r--sound/soc/intel/common/sst-dsp.c4
-rw-r--r--sound/soc/intel/skylake/bxt-sst.c2
-rw-r--r--sound/soc/intel/skylake/cnl-sst.c2
-rw-r--r--sound/soc/intel/skylake/skl-i2s.h64
-rw-r--r--sound/soc/intel/skylake/skl-messages.c22
-rw-r--r--sound/soc/intel/skylake/skl-nhlt.c158
-rw-r--r--sound/soc/intel/skylake/skl-pcm.c14
-rw-r--r--sound/soc/intel/skylake/skl-ssp-clk.h79
-rw-r--r--sound/soc/intel/skylake/skl-sst-dsp.c14
-rw-r--r--sound/soc/intel/skylake/skl-sst-dsp.h4
-rw-r--r--sound/soc/intel/skylake/skl-sst-utils.c6
-rw-r--r--sound/soc/intel/skylake/skl-sst.c2
-rw-r--r--sound/soc/intel/skylake/skl-topology.c45
-rw-r--r--sound/soc/intel/skylake/skl.c150
-rw-r--r--sound/soc/intel/skylake/skl.h22
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.c552
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.h15
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-afe-common.h87
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-afe-pcm.c213
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-reg.h42
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-afe-pcm.c6
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c2
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c2
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650.c2
-rw-r--r--sound/soc/mxs/mxs-sgtl5000.c25
-rw-r--r--sound/soc/nuc900/nuc900-ac97.c11
-rw-r--r--sound/soc/omap/ams-delta.c4
-rw-r--r--sound/soc/qcom/apq8016_sbc.c10
-rw-r--r--sound/soc/rockchip/rk3399_gru_sound.c3
-rw-r--r--sound/soc/rockchip/rockchip_i2s.c11
-rw-r--r--sound/soc/samsung/bells.c40
-rw-r--r--sound/soc/sh/rcar/core.c143
-rw-r--r--sound/soc/sh/rcar/dma.c18
-rw-r--r--sound/soc/sh/rcar/rsnd.h15
-rw-r--r--sound/soc/sh/rcar/ssi.c163
-rw-r--r--sound/soc/soc-acpi.c73
-rw-r--r--sound/soc/soc-compress.c4
-rw-r--r--sound/soc/soc-core.c143
-rw-r--r--sound/soc/soc-io.c6
-rw-r--r--sound/soc/soc-ops.c4
-rw-r--r--sound/soc/soc-utils.c2
-rw-r--r--sound/soc/stm/Kconfig12
-rw-r--r--sound/soc/stm/Makefile3
-rw-r--r--sound/soc/stm/stm32_adfsdm.c347
-rw-r--r--sound/soc/stm/stm32_sai.c114
-rw-r--r--sound/soc/sunxi/sun4i-codec.c29
-rw-r--r--sound/soc/sunxi/sun4i-i2s.c57
-rw-r--r--sound/soc/uniphier/Kconfig19
-rw-r--r--sound/soc/uniphier/Makefile3
-rw-r--r--sound/soc/uniphier/evea.c567
-rw-r--r--sound/soc/ux500/mop500.c4
-rw-r--r--sound/soc/ux500/ux500_pcm.c5
-rw-r--r--sound/sound_core.c42
-rw-r--r--sound/usb/card.c21
-rw-r--r--sound/usb/mixer.c8
-rw-r--r--sound/usb/mixer_quirks.c84
-rw-r--r--sound/usb/quirks-table.h48
-rw-r--r--sound/usb/usx2y/us122l.c47
-rw-r--r--sound/usb/usx2y/usX2Yhwdep.c32
-rw-r--r--tools/arch/alpha/include/uapi/asm/errno.h128
-rw-r--r--tools/arch/mips/include/asm/errno.h17
-rw-r--r--tools/arch/mips/include/uapi/asm/errno.h130
-rw-r--r--tools/arch/parisc/include/uapi/asm/errno.h128
-rw-r--r--tools/arch/powerpc/include/uapi/asm/errno.h10
-rw-r--r--tools/arch/s390/include/uapi/asm/unistd.h412
-rw-r--r--tools/arch/sparc/include/uapi/asm/errno.h118
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h4
-rw-r--r--tools/arch/x86/include/asm/disabled-features.h8
-rw-r--r--tools/arch/x86/include/uapi/asm/errno.h1
-rw-r--r--tools/build/Makefile.feature4
-rw-r--r--tools/build/feature/Makefile11
-rw-r--r--tools/build/feature/test-all.c10
-rw-r--r--tools/build/feature/test-libopencsd.c8
-rw-r--r--tools/build/feature/test-pthread-barrier.c12
-rw-r--r--tools/gpio/gpio-event-mon.c10
-rw-r--r--tools/include/uapi/asm-generic/errno-base.h40
-rw-r--r--tools/include/uapi/asm-generic/errno.h123
-rw-r--r--tools/include/uapi/linux/perf_event.h10
-rw-r--r--tools/lib/traceevent/event-parse.c62
-rw-r--r--tools/lib/traceevent/event-plugin.c24
-rw-r--r--tools/lib/traceevent/kbuffer-parse.c4
-rw-r--r--tools/lib/traceevent/parse-filter.c22
-rw-r--r--tools/perf/Build4
-rw-r--r--tools/perf/Documentation/perf-buildid-cache.txt3
-rw-r--r--tools/perf/Documentation/perf-evlist.txt4
-rw-r--r--tools/perf/Documentation/perf-inject.txt4
-rw-r--r--tools/perf/Documentation/perf-lock.txt4
-rw-r--r--tools/perf/Documentation/perf-probe.txt18
-rw-r--r--tools/perf/Documentation/perf-record.txt3
-rw-r--r--tools/perf/Documentation/perf-report.txt37
-rw-r--r--tools/perf/Documentation/perf-sched.txt4
-rw-r--r--tools/perf/Documentation/perf-script.txt47
-rw-r--r--tools/perf/Documentation/perf-timechart.txt4
-rw-r--r--tools/perf/Documentation/perf-top.txt6
-rw-r--r--tools/perf/Documentation/perf-trace.txt20
-rw-r--r--tools/perf/Documentation/perf.data-file-format.txt27
-rw-r--r--tools/perf/Documentation/tips.txt2
-rw-r--r--tools/perf/Makefile.config65
-rw-r--r--tools/perf/Makefile.perf17
-rw-r--r--tools/perf/arch/arm/util/auxtrace.c77
-rw-r--r--tools/perf/arch/arm/util/pmu.c6
-rw-r--r--tools/perf/arch/arm64/util/Build5
-rw-r--r--tools/perf/arch/arm64/util/arm-spe.c225
-rw-r--r--tools/perf/arch/arm64/util/header.c65
-rw-r--r--tools/perf/arch/arm64/util/sym-handling.c22
-rw-r--r--tools/perf/arch/common.c44
-rw-r--r--tools/perf/arch/common.h1
-rw-r--r--tools/perf/arch/powerpc/util/header.c2
-rw-r--r--tools/perf/arch/powerpc/util/sym-handling.c8
-rw-r--r--tools/perf/arch/s390/Makefile21
-rw-r--r--tools/perf/arch/s390/annotate/instructions.c3
-rwxr-xr-xtools/perf/arch/s390/entry/syscalls/mksyscalltbl36
-rw-r--r--tools/perf/arch/x86/tests/perf-time-to-tsc.c2
-rw-r--r--tools/perf/arch/x86/util/header.c4
-rw-r--r--tools/perf/arch/x86/util/unwind-libunwind.c2
-rw-r--r--tools/perf/bench/futex-hash.c20
-rw-r--r--tools/perf/bench/futex-lock-pi.c23
-rw-r--r--tools/perf/bench/futex-requeue.c22
-rw-r--r--tools/perf/bench/futex-wake-parallel.c46
-rw-r--r--tools/perf/bench/futex-wake.c18
-rw-r--r--tools/perf/builtin-buildid-cache.c4
-rw-r--r--tools/perf/builtin-c2c.c16
-rw-r--r--tools/perf/builtin-help.c2
-rw-r--r--tools/perf/builtin-inject.c3
-rw-r--r--tools/perf/builtin-kvm.c13
-rw-r--r--tools/perf/builtin-record.c63
-rw-r--r--tools/perf/builtin-report.c283
-rw-r--r--tools/perf/builtin-script.c266
-rw-r--r--tools/perf/builtin-stat.c230
-rw-r--r--tools/perf/builtin-top.c10
-rw-r--r--tools/perf/builtin-trace.c122
-rwxr-xr-xtools/perf/check-headers.sh11
-rw-r--r--tools/perf/perf-completion.sh47
-rw-r--r--tools/perf/perf.c4
-rw-r--r--tools/perf/pmu-events/arch/arm64/cavium/thunderx2-imp-def.json62
-rw-r--r--tools/perf/pmu-events/arch/arm64/mapfile.csv15
-rw-r--r--tools/perf/pmu-events/arch/powerpc/mapfile.csv12
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/cache.json5
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/frontend.json7
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/marked.json27
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/other.json276
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/pipeline.json14
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/pmc.json2
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/translation.json5
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/cache.json555
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/floating-point.json108
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/frontend.json138
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/memory.json210
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/other.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/pipeline.json1156
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json150
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/cache.json389
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json108
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/frontend.json138
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/memory.json9
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/other.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json1156
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json150
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/cache.json383
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json108
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/frontend.json138
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/memory.json40
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/other.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json1156
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json150
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/cache.json1050
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/memory.json280
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/other.json54
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/pipeline.json506
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json60
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/cache.json365
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/floating-point.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/frontend.json132
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/memory.json21
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/other.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/pipeline.json981
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json212
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/cache.json377
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/floating-point.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/frontend.json132
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/memory.json28
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/other.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/pipeline.json981
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json212
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/cache.json243
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/frontend.json122
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/memory.json24
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/other.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json812
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json60
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/cache.json236
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/frontend.json122
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/memory.json24
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/other.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/pipeline.json812
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json60
-rw-r--r--tools/perf/pmu-events/arch/x86/mapfile.csv5
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/cache.json3
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/cache.json4180
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/floating-point.json5
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/frontend.json232
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/memory.json2008
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/other.json40
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/pipeline.json973
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json262
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/cache.json257
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/floating-point.json3
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/frontend.json48
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/memory.json231
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/other.json94
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/pipeline.json44
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json42
-rw-r--r--tools/perf/pmu-events/jevents.c39
-rw-r--r--tools/perf/scripts/python/bin/mem-phys-addr-record19
-rw-r--r--tools/perf/scripts/python/bin/mem-phys-addr-report3
-rw-r--r--tools/perf/scripts/python/mem-phys-addr.py95
-rw-r--r--tools/perf/tests/attr.c6
-rw-r--r--tools/perf/tests/backward-ring-buffer.c6
-rw-r--r--tools/perf/tests/bp_signal.c2
-rw-r--r--tools/perf/tests/bpf-script-example.c4
-rw-r--r--tools/perf/tests/bpf.c68
-rw-r--r--tools/perf/tests/builtin-test.c10
-rw-r--r--tools/perf/tests/code-reading.c2
-rw-r--r--tools/perf/tests/dwarf-unwind.c1
-rw-r--r--tools/perf/tests/keep-tracking.c2
-rw-r--r--tools/perf/tests/mmap-basic.c2
-rw-r--r--tools/perf/tests/openat-syscall-tp-fields.c5
-rw-r--r--tools/perf/tests/parse-events.c1
-rw-r--r--tools/perf/tests/perf-record.c2
-rw-r--r--tools/perf/tests/sample-parsing.c2
-rwxr-xr-xtools/perf/tests/shell/trace+probe_vfs_getname.sh7
-rw-r--r--tools/perf/tests/sw-clock.c2
-rw-r--r--tools/perf/tests/switch-tracking.c2
-rw-r--r--tools/perf/tests/task-exit.c2
-rw-r--r--tools/perf/tests/thread-map.c2
-rw-r--r--tools/perf/trace/beauty/Build1
-rw-r--r--tools/perf/trace/beauty/arch_errno_names.c1
-rwxr-xr-xtools/perf/trace/beauty/arch_errno_names.sh100
-rw-r--r--tools/perf/trace/beauty/beauty.h5
-rw-r--r--tools/perf/trace/beauty/flock.c10
-rw-r--r--tools/perf/trace/beauty/futex_val3.c18
-rw-r--r--tools/perf/ui/browsers/annotate.c399
-rw-r--r--tools/perf/ui/gtk/annotate.c25
-rw-r--r--tools/perf/util/Build10
-rw-r--r--tools/perf/util/annotate.c652
-rw-r--r--tools/perf/util/annotate.h76
-rw-r--r--tools/perf/util/arm-spe-pkt-decoder.c462
-rw-r--r--tools/perf/util/arm-spe-pkt-decoder.h43
-rw-r--r--tools/perf/util/arm-spe.c231
-rw-r--r--tools/perf/util/arm-spe.h31
-rw-r--r--tools/perf/util/auxtrace.c8
-rw-r--r--tools/perf/util/auxtrace.h1
-rw-r--r--tools/perf/util/bpf-loader.c4
-rw-r--r--tools/perf/util/callchain.c10
-rw-r--r--tools/perf/util/callchain.h2
-rw-r--r--tools/perf/util/cgroup.c3
-rw-r--r--tools/perf/util/cs-etm-decoder/Build1
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c513
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.h105
-rw-r--r--tools/perf/util/cs-etm.c1023
-rw-r--r--tools/perf/util/cs-etm.h18
-rw-r--r--tools/perf/util/data.c10
-rw-r--r--tools/perf/util/dso.c2
-rw-r--r--tools/perf/util/env.c47
-rw-r--r--tools/perf/util/env.h2
-rw-r--r--tools/perf/util/event.c8
-rw-r--r--tools/perf/util/event.h4
-rw-r--r--tools/perf/util/evlist.c70
-rw-r--r--tools/perf/util/evlist.h15
-rw-r--r--tools/perf/util/evsel.c227
-rw-r--r--tools/perf/util/evsel.h12
-rwxr-xr-xtools/perf/util/generate-cmdlist.sh2
-rw-r--r--tools/perf/util/header.c130
-rw-r--r--tools/perf/util/header.h9
-rw-r--r--tools/perf/util/intel-bts.c6
-rw-r--r--tools/perf/util/intel-pt-decoder/Build24
-rw-r--r--tools/perf/util/intel-pt.c11
-rw-r--r--tools/perf/util/machine.c4
-rw-r--r--tools/perf/util/map.c2
-rw-r--r--tools/perf/util/metricgroup.c10
-rw-r--r--tools/perf/util/mmap.c73
-rw-r--r--tools/perf/util/mmap.h4
-rw-r--r--tools/perf/util/ordered-events.c3
-rw-r--r--tools/perf/util/ordered-events.h2
-rw-r--r--tools/perf/util/parse-events.c3
-rw-r--r--tools/perf/util/path.c14
-rw-r--r--tools/perf/util/path.h3
-rw-r--r--tools/perf/util/pmu.c87
-rw-r--r--tools/perf/util/pmu.h2
-rw-r--r--tools/perf/util/probe-event.c85
-rw-r--r--tools/perf/util/python-ext-sources1
-rw-r--r--tools/perf/util/python.c2
-rw-r--r--tools/perf/util/rblist.c19
-rw-r--r--tools/perf/util/rblist.h1
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c3
-rw-r--r--tools/perf/util/session.c54
-rw-r--r--tools/perf/util/session.h2
-rw-r--r--tools/perf/util/sort.c20
-rw-r--r--tools/perf/util/srcline.c9
-rw-r--r--tools/perf/util/srcline.h5
-rw-r--r--tools/perf/util/stat-shadow.c426
-rw-r--r--tools/perf/util/stat.c15
-rw-r--r--tools/perf/util/stat.h63
-rw-r--r--tools/perf/util/string.c46
-rw-r--r--tools/perf/util/string2.h2
-rw-r--r--tools/perf/util/symbol.c5
-rw-r--r--tools/perf/util/symbol.h1
-rw-r--r--tools/perf/util/syscalltbl.c4
-rw-r--r--tools/perf/util/target.h7
-rw-r--r--tools/perf/util/thread_map.c27
-rw-r--r--tools/perf/util/thread_map.h3
-rw-r--r--tools/perf/util/time-utils.c301
-rw-r--r--tools/perf/util/time-utils.h8
-rw-r--r--tools/perf/util/tool.h1
-rw-r--r--tools/perf/util/unwind-libunwind-local.c9
-rw-r--r--tools/perf/util/unwind-libunwind.c4
-rw-r--r--tools/perf/util/util.c2
-rw-r--r--tools/perf/util/util.h10
-rw-r--r--tools/power/acpi/tools/acpidump/apmain.c28
-rw-r--r--tools/power/cpupower/lib/cpufreq.h4
-rwxr-xr-xtools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py15
-rw-r--r--tools/testing/selftests/ptp/testptp.c4
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/config2frag.sh25
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configinit.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-build.sh5
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh4
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh4
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh6
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh42
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-torture.sh2
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh1
-rw-r--r--virt/kvm/arm/arm.c35
-rw-r--r--virt/kvm/arm/mmu.c12
-rw-r--r--virt/kvm/eventfd.c4
-rw-r--r--virt/kvm/kvm_main.c43
3175 files changed, 124652 insertions, 62282 deletions
diff --git a/Documentation/ABI/testing/evm b/Documentation/ABI/testing/evm
index 9578247e1792..d12cb2eae9ee 100644
--- a/Documentation/ABI/testing/evm
+++ b/Documentation/ABI/testing/evm
@@ -14,30 +14,46 @@ Description:
generated either locally or remotely using an
asymmetric key. These keys are loaded onto root's
keyring using keyctl, and EVM is then enabled by
- echoing a value to <securityfs>/evm:
+ echoing a value to <securityfs>/evm made up of the
+ following bits:
- 1: enable HMAC validation and creation
- 2: enable digital signature validation
- 3: enable HMAC and digital signature validation and HMAC
- creation
+ Bit Effect
+ 0 Enable HMAC validation and creation
+ 1 Enable digital signature validation
+ 2 Permit modification of EVM-protected metadata at
+ runtime. Not supported if HMAC validation and
+ creation is enabled.
+ 31 Disable further runtime modification of EVM policy
- Further writes will be blocked if HMAC support is enabled or
- if bit 32 is set:
+ For example:
- echo 0x80000002 ><securityfs>/evm
+ echo 1 ><securityfs>/evm
- will enable digital signature validation and block
- further writes to <securityfs>/evm.
+ will enable HMAC validation and creation
- Until this is done, EVM can not create or validate the
- 'security.evm' xattr, but returns INTEGRITY_UNKNOWN.
- Loading keys and signaling EVM should be done as early
- as possible. Normally this is done in the initramfs,
- which has already been measured as part of the trusted
- boot. For more information on creating and loading
- existing trusted/encrypted keys, refer to:
+ echo 0x80000003 ><securityfs>/evm
- Documentation/security/keys/trusted-encrypted.rst. Both dracut
- (via 97masterkey and 98integrity) and systemd (via
+ will enable HMAC and digital signature validation and
+ HMAC creation and disable all further modification of policy.
+
+ echo 0x80000006 ><securityfs>/evm
+
+ will enable digital signature validation, permit
+ modification of EVM-protected metadata and
+ disable all further modification of policy
+
+ Note that once a key has been loaded, it will no longer be
+ possible to enable metadata modification.
+
+ Until key loading has been signaled EVM can not create
+ or validate the 'security.evm' xattr, but returns
+ INTEGRITY_UNKNOWN. Loading keys and signaling EVM
+ should be done as early as possible. Normally this is
+ done in the initramfs, which has already been measured
+ as part of the trusted boot. For more information on
+ creating and loading existing trusted/encrypted keys,
+ refer to:
+ Documentation/security/keys/trusted-encrypted.rst. Both
+ dracut (via 97masterkey and 98integrity) and systemd (via
core/ima-setup) have support for loading keys at boot
time.
diff --git a/Documentation/ABI/testing/ima_policy b/Documentation/ABI/testing/ima_policy
index e76432b9954d..2028f2d093b2 100644
--- a/Documentation/ABI/testing/ima_policy
+++ b/Documentation/ABI/testing/ima_policy
@@ -17,7 +17,8 @@ Description:
rule format: action [condition ...]
- action: measure | dont_measure | appraise | dont_appraise | audit
+ action: measure | dont_measure | appraise | dont_appraise |
+ audit | hash | dont_hash
condition:= base | lsm [option]
base: [[func=] [mask=] [fsmagic=] [fsuuid=] [uid=]
[euid=] [fowner=]]
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-dfsdm-adc-stm32 b/Documentation/ABI/testing/sysfs-bus-iio-dfsdm-adc-stm32
new file mode 100644
index 000000000000..da9822309f07
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-dfsdm-adc-stm32
@@ -0,0 +1,16 @@
+What: /sys/bus/iio/devices/iio:deviceX/in_voltage_spi_clk_freq
+KernelVersion: 4.14
+Contact: arnaud.pouliquen@st.com
+Description:
+ For audio purpose only.
+ Used by audio driver to set/get the spi input frequency.
+ This is mandatory if DFSDM is slave on SPI bus, to
+ provide information on the SPI clock frequency during runtime
+ Notice that the SPI frequency should be a multiple of sample
+ frequency to ensure the precision.
+ if DFSDM input is SPI master
+ Reading SPI clkout frequency,
+ error on writing
+ If DFSDM input is SPI Slave:
+ Reading returns value previously set.
+ Writing value before starting conversions. \ No newline at end of file
diff --git a/Documentation/ABI/testing/sysfs-class-led-trigger-netdev b/Documentation/ABI/testing/sysfs-class-led-trigger-netdev
new file mode 100644
index 000000000000..451af6d6768c
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-led-trigger-netdev
@@ -0,0 +1,45 @@
+What: /sys/class/leds/<led>/device_name
+Date: Dec 2017
+KernelVersion: 4.16
+Contact: linux-leds@vger.kernel.org
+Description:
+ Specifies the network device name to monitor.
+
+What: /sys/class/leds/<led>/interval
+Date: Dec 2017
+KernelVersion: 4.16
+Contact: linux-leds@vger.kernel.org
+Description:
+ Specifies the duration of the LED blink in milliseconds.
+ Defaults to 50 ms.
+
+What: /sys/class/leds/<led>/link
+Date: Dec 2017
+KernelVersion: 4.16
+Contact: linux-leds@vger.kernel.org
+Description:
+ Signal the link state of the named network device.
+ If set to 0 (default), the LED's normal state is off.
+ If set to 1, the LED's normal state reflects the link state
+ of the named network device.
+ Setting this value also immediately changes the LED state.
+
+What: /sys/class/leds/<led>/tx
+Date: Dec 2017
+KernelVersion: 4.16
+Contact: linux-leds@vger.kernel.org
+Description:
+ Signal transmission of data on the named network device.
+ If set to 0 (default), the LED will not blink on transmission.
+ If set to 1, the LED will blink for the milliseconds specified
+ in interval to signal transmission.
+
+What: /sys/class/leds/<led>/rx
+Date: Dec 2017
+KernelVersion: 4.16
+Contact: linux-leds@vger.kernel.org
+Description:
+ Signal reception of data on the named network device.
+ If set to 0 (default), the LED will not blink on reception.
+ If set to 1, the LED will blink for the milliseconds specified
+ in interval to signal reception.
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index a7799c2fca28..d870b5514d15 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -186,3 +186,9 @@ Date: August 2017
Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
Description:
Controls sleep time of GC urgent mode
+
+What: /sys/fs/f2fs/<disk>/readdir_ra
+Date: November 2017
+Contact: "Sheng Yong" <shengyong1@huawei.com>
+Description:
+ Controls readahead inode block in readdir.
diff --git a/Documentation/ABI/testing/sysfs-kernel-livepatch b/Documentation/ABI/testing/sysfs-kernel-livepatch
index d5d39748382f..dac7e1e62a8b 100644
--- a/Documentation/ABI/testing/sysfs-kernel-livepatch
+++ b/Documentation/ABI/testing/sysfs-kernel-livepatch
@@ -33,6 +33,32 @@ Description:
An attribute which indicates whether the patch is currently in
transition.
+What: /sys/kernel/livepatch/<patch>/signal
+Date: Nov 2017
+KernelVersion: 4.15.0
+Contact: live-patching@vger.kernel.org
+Description:
+ A writable attribute that allows administrator to affect the
+ course of an existing transition. Writing 1 sends a fake
+ signal to all remaining blocking tasks. The fake signal
+ means that no proper signal is delivered (there is no data in
+ signal pending structures). Tasks are interrupted or woken up,
+ and forced to change their patched state.
+
+What: /sys/kernel/livepatch/<patch>/force
+Date: Nov 2017
+KernelVersion: 4.15.0
+Contact: live-patching@vger.kernel.org
+Description:
+ A writable attribute that allows administrator to affect the
+ course of an existing transition. Writing 1 clears
+ TIF_PATCH_PENDING flag of all tasks and thus forces the tasks to
+ the patched or unpatched state. Administrator should not
+ use this feature without a clearance from a patch
+ distributor. Removal (rmmod) of patch modules is permanently
+ disabled when the feature is used. See
+ Documentation/livepatch/livepatch.txt for more information.
+
What: /sys/kernel/livepatch/<patch>/<object>
Date: Nov 2014
KernelVersion: 3.19.0
diff --git a/Documentation/IRQ-domain.txt b/Documentation/IRQ-domain.txt
index 4a1cd7645d85..507775cce753 100644
--- a/Documentation/IRQ-domain.txt
+++ b/Documentation/IRQ-domain.txt
@@ -265,37 +265,5 @@ support other architectures, such as ARM, ARM64 etc.
=== Debugging ===
-If you switch on CONFIG_IRQ_DOMAIN_DEBUG (which depends on
-CONFIG_IRQ_DOMAIN and CONFIG_DEBUG_FS), you will find a new file in
-your debugfs mount point, called irq_domain_mapping. This file
-contains a live snapshot of all the IRQ domains in the system:
-
- name mapped linear-max direct-max devtree-node
- pl061 8 8 0 /smb/gpio@e0080000
- pl061 8 8 0 /smb/gpio@e1050000
- pMSI 0 0 0 /interrupt-controller@e1101000/v2m@e0080000
- MSI 37 0 0 /interrupt-controller@e1101000/v2m@e0080000
- GICv2m 37 0 0 /interrupt-controller@e1101000/v2m@e0080000
- GICv2 448 448 0 /interrupt-controller@e1101000
-
-it also iterates over the interrupts to display their mapping in the
-domains, and makes the domain stacking visible:
-
-
-irq hwirq chip name chip data active type domain
- 1 0x00019 GICv2 0xffff00000916bfd8 * LINEAR GICv2
- 2 0x0001d GICv2 0xffff00000916bfd8 LINEAR GICv2
- 3 0x0001e GICv2 0xffff00000916bfd8 * LINEAR GICv2
- 4 0x0001b GICv2 0xffff00000916bfd8 * LINEAR GICv2
- 5 0x0001a GICv2 0xffff00000916bfd8 LINEAR GICv2
-[...]
- 96 0x81808 MSI 0x (null) RADIX MSI
- 96+ 0x00063 GICv2m 0xffff8003ee116980 RADIX GICv2m
- 96+ 0x00063 GICv2 0xffff00000916bfd8 LINEAR GICv2
- 97 0x08800 MSI 0x (null) * RADIX MSI
- 97+ 0x00064 GICv2m 0xffff8003ee116980 * RADIX GICv2m
- 97+ 0x00064 GICv2 0xffff00000916bfd8 * LINEAR GICv2
-
-Here, interrupts 1-5 are only using a single domain, while 96 and 97
-are build out of a stack of three domain, each level performing a
-particular function.
+Most of the internals of the IRQ subsystem are exposed in debugfs by
+turning CONFIG_GENERIC_IRQ_DEBUGFS on.
diff --git a/Documentation/RCU/Design/Data-Structures/Data-Structures.html b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
index 38d6d800761f..6c06e10bd04b 100644
--- a/Documentation/RCU/Design/Data-Structures/Data-Structures.html
+++ b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
@@ -1097,7 +1097,8 @@ will cause the CPU to disregard the values of its counters on
its next exit from idle.
Finally, the <tt>rcu_qs_ctr_snap</tt> field is used to detect
cases where a given operation has resulted in a quiescent state
-for all flavors of RCU, for example, <tt>cond_resched_rcu_qs()</tt>.
+for all flavors of RCU, for example, <tt>cond_resched()</tt>
+when RCU has indicated a need for quiescent states.
<h5>RCU Callback Handling</h5>
@@ -1182,8 +1183,8 @@ CPU (and from tracing) unless otherwise stated.
Its fields are as follows:
<pre>
- 1 int dynticks_nesting;
- 2 int dynticks_nmi_nesting;
+ 1 long dynticks_nesting;
+ 2 long dynticks_nmi_nesting;
3 atomic_t dynticks;
4 bool rcu_need_heavy_qs;
5 unsigned long rcu_qs_ctr;
@@ -1191,15 +1192,31 @@ Its fields are as follows:
</pre>
<p>The <tt>-&gt;dynticks_nesting</tt> field counts the
-nesting depth of normal interrupts.
-In addition, this counter is incremented when exiting dyntick-idle
-mode and decremented when entering it.
+nesting depth of process execution, so that in normal circumstances
+this counter has value zero or one.
+NMIs, irqs, and tracers are counted by the <tt>-&gt;dynticks_nmi_nesting</tt>
+field.
+Because NMIs cannot be masked, changes to this variable have to be
+undertaken carefully using an algorithm provided by Andy Lutomirski.
+The initial transition from idle adds one, and nested transitions
+add two, so that a nesting level of five is represented by a
+<tt>-&gt;dynticks_nmi_nesting</tt> value of nine.
This counter can therefore be thought of as counting the number
of reasons why this CPU cannot be permitted to enter dyntick-idle
-mode, aside from non-maskable interrupts (NMIs).
-NMIs are counted by the <tt>-&gt;dynticks_nmi_nesting</tt>
-field, except that NMIs that interrupt non-dyntick-idle execution
-are not counted.
+mode, aside from process-level transitions.
+
+<p>However, it turns out that when running in non-idle kernel context,
+the Linux kernel is fully capable of entering interrupt handlers that
+never exit and perhaps also vice versa.
+Therefore, whenever the <tt>-&gt;dynticks_nesting</tt> field is
+incremented up from zero, the <tt>-&gt;dynticks_nmi_nesting</tt> field
+is set to a large positive number, and whenever the
+<tt>-&gt;dynticks_nesting</tt> field is decremented down to zero,
+the the <tt>-&gt;dynticks_nmi_nesting</tt> field is set to zero.
+Assuming that the number of misnested interrupts is not sufficient
+to overflow the counter, this approach corrects the
+<tt>-&gt;dynticks_nmi_nesting</tt> field every time the corresponding
+CPU enters the idle loop from process context.
</p><p>The <tt>-&gt;dynticks</tt> field counts the corresponding
CPU's transitions to and from dyntick-idle mode, so that this counter
@@ -1231,14 +1248,16 @@ in response.
<tr><th>&nbsp;</th></tr>
<tr><th align="left">Quick Quiz:</th></tr>
<tr><td>
- Why not just count all NMIs?
- Wouldn't that be simpler and less error prone?
+ Why not simply combine the <tt>-&gt;dynticks_nesting</tt>
+ and <tt>-&gt;dynticks_nmi_nesting</tt> counters into a
+ single counter that just counts the number of reasons that
+ the corresponding CPU is non-idle?
</td></tr>
<tr><th align="left">Answer:</th></tr>
<tr><td bgcolor="#ffffff"><font color="ffffff">
- It seems simpler only until you think hard about how to go about
- updating the <tt>rcu_dynticks</tt> structure's
- <tt>-&gt;dynticks</tt> field.
+ Because this would fail in the presence of interrupts whose
+ handlers never return and of handlers that manage to return
+ from a made-up interrupt.
</font></td></tr>
<tr><td>&nbsp;</td></tr>
</table>
diff --git a/Documentation/RCU/Design/Requirements/Requirements.html b/Documentation/RCU/Design/Requirements/Requirements.html
index 62e847bcdcdd..49690228b1c6 100644
--- a/Documentation/RCU/Design/Requirements/Requirements.html
+++ b/Documentation/RCU/Design/Requirements/Requirements.html
@@ -581,7 +581,8 @@ This guarantee was only partially premeditated.
DYNIX/ptx used an explicit memory barrier for publication, but had nothing
resembling <tt>rcu_dereference()</tt> for subscription, nor did it
have anything resembling the <tt>smp_read_barrier_depends()</tt>
-that was later subsumed into <tt>rcu_dereference()</tt>.
+that was later subsumed into <tt>rcu_dereference()</tt> and later
+still into <tt>READ_ONCE()</tt>.
The need for these operations made itself known quite suddenly at a
late-1990s meeting with the DEC Alpha architects, back in the days when
DEC was still a free-standing company.
@@ -2797,7 +2798,7 @@ RCU must avoid degrading real-time response for CPU-bound threads, whether
executing in usermode (which is one use case for
<tt>CONFIG_NO_HZ_FULL=y</tt>) or in the kernel.
That said, CPU-bound loops in the kernel must execute
-<tt>cond_resched_rcu_qs()</tt> at least once per few tens of milliseconds
+<tt>cond_resched()</tt> at least once per few tens of milliseconds
in order to avoid receiving an IPI from RCU.
<p>
@@ -3128,7 +3129,7 @@ The solution, in the form of
is to have implicit
read-side critical sections that are delimited by voluntary context
switches, that is, calls to <tt>schedule()</tt>,
-<tt>cond_resched_rcu_qs()</tt>, and
+<tt>cond_resched()</tt>, and
<tt>synchronize_rcu_tasks()</tt>.
In addition, transitions to and from userspace execution also delimit
tasks-RCU read-side critical sections.
diff --git a/Documentation/RCU/rcu_dereference.txt b/Documentation/RCU/rcu_dereference.txt
index 1acb26b09b48..ab96227bad42 100644
--- a/Documentation/RCU/rcu_dereference.txt
+++ b/Documentation/RCU/rcu_dereference.txt
@@ -122,11 +122,7 @@ o Be very careful about comparing pointers obtained from
Note that if checks for being within an RCU read-side
critical section are not required and the pointer is never
dereferenced, rcu_access_pointer() should be used in place
- of rcu_dereference(). The rcu_access_pointer() primitive
- does not require an enclosing read-side critical section,
- and also omits the smp_read_barrier_depends() included in
- rcu_dereference(), which in turn should provide a small
- performance gain in some CPUs (e.g., the DEC Alpha).
+ of rcu_dereference().
o The comparison is against a pointer that references memory
that was initialized "a long time ago." The reason
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
index a08f928c8557..4259f95c3261 100644
--- a/Documentation/RCU/stallwarn.txt
+++ b/Documentation/RCU/stallwarn.txt
@@ -23,12 +23,10 @@ o A CPU looping with preemption disabled. This condition can
o A CPU looping with bottom halves disabled. This condition can
result in RCU-sched and RCU-bh stalls.
-o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the
- kernel without invoking schedule(). Note that cond_resched()
- does not necessarily prevent RCU CPU stall warnings. Therefore,
- if the looping in the kernel is really expected and desirable
- behavior, you might need to replace some of the cond_resched()
- calls with calls to cond_resched_rcu_qs().
+o For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the kernel
+ without invoking schedule(). If the looping in the kernel is
+ really expected and desirable behavior, you might need to add
+ some calls to cond_resched().
o Booting Linux using a console connection that is too slow to
keep up with the boot-time console-message rate. For example,
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index df62466da4e0..a27fbfb0efb8 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -600,8 +600,7 @@ don't forget about them when submitting patches making use of RCU!]
#define rcu_dereference(p) \
({ \
- typeof(p) _________p1 = p; \
- smp_read_barrier_depends(); \
+ typeof(p) _________p1 = READ_ONCE(p); \
(_________p1); \
})
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 46b26bfee27b..b98048b56ada 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -114,7 +114,6 @@
This facility can be used to prevent such uncontrolled
GPE floodings.
Format: <int>
- Support masking of GPEs numbered from 0x00 to 0x7f.
acpi_no_auto_serialize [HW,ACPI]
Disable auto-serialization of AML methods
@@ -223,7 +222,7 @@
acpi_sleep= [HW,ACPI] Sleep options
Format: { s3_bios, s3_mode, s3_beep, s4_nohwsig,
- old_ordering, nonvs, sci_force_enable }
+ old_ordering, nonvs, sci_force_enable, nobl }
See Documentation/power/video.txt for information on
s3_bios and s3_mode.
s3_beep is for debugging; it makes the PC's speaker beep
@@ -239,6 +238,9 @@
sci_force_enable causes the kernel to set SCI_EN directly
on resume from S1/S3 (which is against the ACPI spec,
but some broken systems don't work without it).
+ nobl causes the internal blacklist of systems known to
+ behave incorrectly in some ways with respect to system
+ suspend and resume to be ignored (use wisely).
acpi_use_timer_override [HW,ACPI]
Use timer override. For some broken Nvidia NF5 boards
@@ -2050,9 +2052,6 @@
This tests the locking primitive's ability to
transition abruptly to and from idle.
- locktorture.torture_runnable= [BOOT]
- Start locktorture running at boot time.
-
locktorture.torture_type= [KNL]
Specify the locking implementation to test.
@@ -3486,9 +3485,6 @@
the same as for rcuperf.nreaders.
N, where N is the number of CPUs
- rcuperf.perf_runnable= [BOOT]
- Start rcuperf running at boot time.
-
rcuperf.perf_type= [KNL]
Specify the RCU implementation to test.
@@ -3622,9 +3618,6 @@
Test RCU's dyntick-idle handling. See also the
rcutorture.shuffle_interval parameter.
- rcutorture.torture_runnable= [BOOT]
- Start rcutorture running at boot time.
-
rcutorture.torture_type= [KNL]
Specify the RCU implementation to test.
@@ -3682,7 +3675,8 @@
rdt= [HW,X86,RDT]
Turn on/off individual RDT features. List is:
- cmt, mbmtotal, mbmlocal, l3cat, l3cdp, l2cat, mba.
+ cmt, mbmtotal, mbmlocal, l3cat, l3cdp, l2cat, l2cdp,
+ mba.
E.g. to turn on cmt and turn off mba use:
rdt=cmt,!mba
diff --git a/Documentation/arm64/cpu-feature-registers.txt b/Documentation/arm64/cpu-feature-registers.txt
index bd9b3faab2c4..a70090b28b07 100644
--- a/Documentation/arm64/cpu-feature-registers.txt
+++ b/Documentation/arm64/cpu-feature-registers.txt
@@ -110,7 +110,9 @@ infrastructure:
x--------------------------------------------------x
| Name | bits | visible |
|--------------------------------------------------|
- | RES0 | [63-48] | n |
+ | RES0 | [63-52] | n |
+ |--------------------------------------------------|
+ | FHM | [51-48] | y |
|--------------------------------------------------|
| DP | [47-44] | y |
|--------------------------------------------------|
diff --git a/Documentation/arm64/elf_hwcaps.txt b/Documentation/arm64/elf_hwcaps.txt
index 89edba12a9e0..57324ee55ecc 100644
--- a/Documentation/arm64/elf_hwcaps.txt
+++ b/Documentation/arm64/elf_hwcaps.txt
@@ -158,3 +158,7 @@ HWCAP_SHA512
HWCAP_SVE
Functionality implied by ID_AA64PFR0_EL1.SVE == 0b0001.
+
+HWCAP_ASIMDFHM
+
+ Functionality implied by ID_AA64ISAR0_EL1.FHM == 0b0001.
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
index fc1c884fea10..c1d520de6dfe 100644
--- a/Documentation/arm64/silicon-errata.txt
+++ b/Documentation/arm64/silicon-errata.txt
@@ -72,7 +72,7 @@ stable kernels.
| Hisilicon | Hip0{6,7} | #161010701 | N/A |
| Hisilicon | Hip07 | #161600802 | HISILICON_ERRATUM_161600802 |
| | | | |
-| Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
+| Qualcomm Tech. | Kryo/Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
| Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 |
| Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 |
| Qualcomm Tech. | Falkor v{1,2} | E1041 | QCOM_FALKOR_ERRATUM_1041 |
diff --git a/Documentation/cgroup-v1/cgroups.txt b/Documentation/cgroup-v1/cgroups.txt
index 308e5ff7207a..059f7063eea6 100644
--- a/Documentation/cgroup-v1/cgroups.txt
+++ b/Documentation/cgroup-v1/cgroups.txt
@@ -523,12 +523,7 @@ Accessing a task's cgroup pointer may be done in the following ways:
Each subsystem should:
- add an entry in linux/cgroup_subsys.h
-- define a cgroup_subsys object called <name>_subsys
-
-If a subsystem can be compiled as a module, it should also have in its
-module initcall a call to cgroup_load_subsys(), and in its exitcall a
-call to cgroup_unload_subsys(). It should also set its_subsys.module =
-THIS_MODULE in its .c file.
+- define a cgroup_subsys object called <name>_cgrp_subsys
Each subsystem may export the following methods. The only mandatory
methods are css_alloc/free. Any others that are null are presumed to
diff --git a/Documentation/cgroup-v1/memory.txt b/Documentation/cgroup-v1/memory.txt
index cefb63639070..a4af2e124e24 100644
--- a/Documentation/cgroup-v1/memory.txt
+++ b/Documentation/cgroup-v1/memory.txt
@@ -524,9 +524,9 @@ Note:
Only anonymous and swap cache memory is listed as part of 'rss' stat.
This should not be confused with the true 'resident set size' or the
amount of physical memory used by the cgroup.
- 'rss + file_mapped" will give you resident set size of cgroup.
+ 'rss + mapped_file" will give you resident set size of cgroup.
(Note: file and shmem may be shared among other cgroups. In that case,
- file_mapped is accounted only when the memory cgroup is owner of page
+ mapped_file is accounted only when the memory cgroup is owner of page
cache.)
5.3 swappiness
diff --git a/Documentation/cgroup-v2.txt b/Documentation/cgroup-v2.txt
index 2cddab7efb20..74cdeaed9f7a 100644
--- a/Documentation/cgroup-v2.txt
+++ b/Documentation/cgroup-v2.txt
@@ -53,10 +53,14 @@ v1 is available under Documentation/cgroup-v1/.
5-3-2. Writeback
5-4. PID
5-4-1. PID Interface Files
- 5-5. RDMA
- 5-5-1. RDMA Interface Files
- 5-6. Misc
- 5-6-1. perf_event
+ 5-5. Device
+ 5-6. RDMA
+ 5-6-1. RDMA Interface Files
+ 5-7. Misc
+ 5-7-1. perf_event
+ 5-N. Non-normative information
+ 5-N-1. CPU controller root cgroup process behaviour
+ 5-N-2. IO controller root cgroup process behaviour
6. Namespace
6-1. Basics
6-2. The Root and Views
@@ -279,7 +283,7 @@ thread mode, the following conditions must be met.
exempt from this requirement.
Topology-wise, a cgroup can be in an invalid state. Please consider
-the following toplogy::
+the following topology::
A (threaded domain) - B (threaded) - C (domain, just created)
@@ -420,7 +424,9 @@ The root cgroup is exempt from this restriction. Root contains
processes and anonymous resource consumption which can't be associated
with any other cgroups and requires special treatment from most
controllers. How resource consumption in the root cgroup is governed
-is up to each controller.
+is up to each controller (for more information on this topic please
+refer to the Non-normative information section in the Controllers
+chapter).
Note that the restriction doesn't get in the way if there is no
enabled controller in the cgroup's "cgroup.subtree_control". This is
@@ -1063,10 +1069,10 @@ PAGE_SIZE multiple when read back.
reached the limit and allocation was about to fail.
Depending on context result could be invocation of OOM
- killer and retrying allocation or failing alloction.
+ killer and retrying allocation or failing allocation.
Failed allocation in its turn could be returned into
- userspace as -ENOMEM or siletly ignored in cases like
+ userspace as -ENOMEM or silently ignored in cases like
disk readahead. For now OOM in memory cgroup kills
tasks iff shortage has happened inside page fault.
@@ -1191,7 +1197,7 @@ PAGE_SIZE multiple when read back.
cgroups. The default is "max".
Swap usage hard limit. If a cgroup's swap usage reaches this
- limit, anonymous meomry of the cgroup will not be swapped out.
+ limit, anonymous memory of the cgroup will not be swapped out.
Usage Guidelines
@@ -1429,6 +1435,30 @@ through fork() or clone(). These will return -EAGAIN if the creation
of a new process would cause a cgroup policy to be violated.
+Device controller
+-----------------
+
+Device controller manages access to device files. It includes both
+creation of new device files (using mknod), and access to the
+existing device files.
+
+Cgroup v2 device controller has no interface files and is implemented
+on top of cgroup BPF. To control access to device files, a user may
+create bpf programs of the BPF_CGROUP_DEVICE type and attach them
+to cgroups. On an attempt to access a device file, corresponding
+BPF programs will be executed, and depending on the return value
+the attempt will succeed or fail with -EPERM.
+
+A BPF_CGROUP_DEVICE program takes a pointer to the bpf_cgroup_dev_ctx
+structure, which describes the device access attempt: access type
+(mknod/read/write) and device (type, major and minor numbers).
+If the program returns 0, the attempt fails with -EPERM, otherwise
+it succeeds.
+
+An example of BPF_CGROUP_DEVICE program may be found in the kernel
+source tree in the tools/testing/selftests/bpf/dev_cgroup.c file.
+
+
RDMA
----
@@ -1481,6 +1511,35 @@ always be filtered by cgroup v2 path. The controller can still be
moved to a legacy hierarchy after v2 hierarchy is populated.
+Non-normative information
+-------------------------
+
+This section contains information that isn't considered to be a part of
+the stable kernel API and so is subject to change.
+
+
+CPU controller root cgroup process behaviour
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When distributing CPU cycles in the root cgroup each thread in this
+cgroup is treated as if it was hosted in a separate child cgroup of the
+root cgroup. This child cgroup weight is dependent on its thread nice
+level.
+
+For details of this mapping see sched_prio_to_weight array in
+kernel/sched/core.c file (values from this array should be scaled
+appropriately so the neutral - nice 0 - value is 100 instead of 1024).
+
+
+IO controller root cgroup process behaviour
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Root cgroup processes are hosted in an implicit leaf child node.
+When distributing IO resources this implicit child node is taken into
+account as if it was a normal child cgroup of the root cgroup with a
+weight value of 200.
+
+
Namespace
=========
diff --git a/Documentation/circular-buffers.txt b/Documentation/circular-buffers.txt
index d4628174b7c5..53e51caa3347 100644
--- a/Documentation/circular-buffers.txt
+++ b/Documentation/circular-buffers.txt
@@ -220,8 +220,7 @@ before it writes the new tail pointer, which will erase the item.
Note the use of READ_ONCE() and smp_load_acquire() to read the
opposition index. This prevents the compiler from discarding and
-reloading its cached value - which some compilers will do across
-smp_read_barrier_depends(). This isn't strictly needed if you can
+reloading its cached value. This isn't strictly needed if you can
be sure that the opposition index will _only_ be used the once.
The smp_load_acquire() additionally forces the CPU to order against
subsequent memory references. Similarly, smp_store_release() is used
diff --git a/Documentation/device-mapper/cache-policies.txt b/Documentation/device-mapper/cache-policies.txt
index d3ca8af21a31..86786d87d9a8 100644
--- a/Documentation/device-mapper/cache-policies.txt
+++ b/Documentation/device-mapper/cache-policies.txt
@@ -60,7 +60,7 @@ Memory usage:
The mq policy used a lot of memory; 88 bytes per cache block on a 64
bit machine.
-smq uses 28bit indexes to implement it's data structures rather than
+smq uses 28bit indexes to implement its data structures rather than
pointers. It avoids storing an explicit hit count for each block. It
has a 'hotspot' queue, rather than a pre-cache, which uses a quarter of
the entries (each hotspot block covers a larger area than a single
@@ -84,7 +84,7 @@ resulting in better promotion/demotion decisions.
Adaptability:
The mq policy maintained a hit count for each cache block. For a
-different block to get promoted to the cache it's hit count has to
+different block to get promoted to the cache its hit count has to
exceed the lowest currently in the cache. This meant it could take a
long time for the cache to adapt between varying IO patterns.
diff --git a/Documentation/device-mapper/cache.txt b/Documentation/device-mapper/cache.txt
index cdfd0feb294e..ff0841711fd5 100644
--- a/Documentation/device-mapper/cache.txt
+++ b/Documentation/device-mapper/cache.txt
@@ -59,7 +59,7 @@ Fixed block size
The origin is divided up into blocks of a fixed size. This block size
is configurable when you first create the cache. Typically we've been
using block sizes of 256KB - 1024KB. The block size must be between 64
-(32KB) and 2097152 (1GB) and a multiple of 64 (32KB).
+sectors (32KB) and 2097152 sectors (1GB) and a multiple of 64 sectors (32KB).
Having a fixed block size simplifies the target a lot. But it is
something of a compromise. For instance, a small part of a block may be
@@ -119,7 +119,7 @@ doing here to avoid migrating during those peak io moments.
For the time being, a message "migration_threshold <#sectors>"
can be used to set the maximum number of sectors being migrated,
-the default being 204800 sectors (or 100MB).
+the default being 2048 sectors (1MB).
Updating on-disk metadata
-------------------------
@@ -143,11 +143,6 @@ the policy how big this chunk is, but it should be kept small. Like the
dirty flags this data is lost if there's a crash so a safe fallback
value should always be possible.
-For instance, the 'mq' policy, which is currently the default policy,
-uses this facility to store the hit count of the cache blocks. If
-there's a crash this information will be lost, which means the cache
-may be less efficient until those hit counts are regenerated.
-
Policy hints affect performance, not correctness.
Policy messaging
diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt
index 32df07e29f68..390c145f01d7 100644
--- a/Documentation/device-mapper/dm-raid.txt
+++ b/Documentation/device-mapper/dm-raid.txt
@@ -343,5 +343,8 @@ Version History
1.11.0 Fix table line argument order
(wrong raid10_copies/raid10_format sequence)
1.11.1 Add raid4/5/6 journal write-back support via journal_mode option
-1.12.1 fix for MD deadlock between mddev_suspend() and md_write_start() available
+1.12.1 Fix for MD deadlock between mddev_suspend() and md_write_start() available
1.13.0 Fix dev_health status at end of "recover" (was 'a', now 'A')
+1.13.1 Fix deadlock caused by early md_stop_writes(). Also fix size an
+ state races.
+1.13.2 Fix raid redundancy validation and avoid keeping raid set frozen
diff --git a/Documentation/device-mapper/snapshot.txt b/Documentation/device-mapper/snapshot.txt
index ad6949bff2e3..b8bbb516f989 100644
--- a/Documentation/device-mapper/snapshot.txt
+++ b/Documentation/device-mapper/snapshot.txt
@@ -49,6 +49,10 @@ The difference between persistent and transient is with transient
snapshots less metadata must be saved on disk - they can be kept in
memory by the kernel.
+When loading or unloading the snapshot target, the corresponding
+snapshot-origin or snapshot-merge target must be suspended. A failure to
+suspend the origin target could result in data corruption.
+
* snapshot-merge <origin> <COW device> <persistent> <chunksize>
diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt
index 1699a55b7b70..4bcd4b7f79f9 100644
--- a/Documentation/device-mapper/thin-provisioning.txt
+++ b/Documentation/device-mapper/thin-provisioning.txt
@@ -112,9 +112,11 @@ $low_water_mark is expressed in blocks of size $data_block_size. If
free space on the data device drops below this level then a dm event
will be triggered which a userspace daemon should catch allowing it to
extend the pool device. Only one such event will be sent.
-Resuming a device with a new table itself triggers an event so the
-userspace daemon can use this to detect a situation where a new table
-already exceeds the threshold.
+
+No special event is triggered if a just resumed device's free space is below
+the low water mark. However, resuming a device always triggers an
+event; a userspace daemon should verify that free space exceeds the low
+water mark when handling this event.
A low water mark for the metadata device is maintained in the kernel and
will trigger a dm event if free space on the metadata device drops below
@@ -274,7 +276,8 @@ ii) Status
<transaction id> <used metadata blocks>/<total metadata blocks>
<used data blocks>/<total data blocks> <held metadata root>
- [no_]discard_passdown ro|rw
+ ro|rw|out_of_data_space [no_]discard_passdown [error|queue]_if_no_space
+ needs_check|-
transaction id:
A 64-bit number used by userspace to help synchronise with metadata
@@ -394,3 +397,6 @@ ii) Status
If the pool has encountered device errors and failed, the status
will just contain the string 'Fail'. The userspace recovery
tools should then be used.
+
+ In the case where <nr mapped sectors> is 0, there is no highest
+ mapped sector and the value of <highest mapped sector> is unspecified.
diff --git a/Documentation/device-mapper/unstriped.txt b/Documentation/device-mapper/unstriped.txt
new file mode 100644
index 000000000000..0b2a306c54ee
--- /dev/null
+++ b/Documentation/device-mapper/unstriped.txt
@@ -0,0 +1,124 @@
+Introduction
+============
+
+The device-mapper "unstriped" target provides a transparent mechanism to
+unstripe a device-mapper "striped" target to access the underlying disks
+without having to touch the true backing block-device. It can also be
+used to unstripe a hardware RAID-0 to access backing disks.
+
+Parameters:
+<number of stripes> <chunk size> <stripe #> <dev_path> <offset>
+
+<number of stripes>
+ The number of stripes in the RAID 0.
+
+<chunk size>
+ The amount of 512B sectors in the chunk striping.
+
+<dev_path>
+ The block device you wish to unstripe.
+
+<stripe #>
+ The stripe number within the device that corresponds to physical
+ drive you wish to unstripe. This must be 0 indexed.
+
+
+Why use this module?
+====================
+
+An example of undoing an existing dm-stripe
+-------------------------------------------
+
+This small bash script will setup 4 loop devices and use the existing
+striped target to combine the 4 devices into one. It then will use
+the unstriped target ontop of the striped device to access the
+individual backing loop devices. We write data to the newly exposed
+unstriped devices and verify the data written matches the correct
+underlying device on the striped array.
+
+#!/bin/bash
+
+MEMBER_SIZE=$((128 * 1024 * 1024))
+NUM=4
+SEQ_END=$((${NUM}-1))
+CHUNK=256
+BS=4096
+
+RAID_SIZE=$((${MEMBER_SIZE}*${NUM}/512))
+DM_PARMS="0 ${RAID_SIZE} striped ${NUM} ${CHUNK}"
+COUNT=$((${MEMBER_SIZE} / ${BS}))
+
+for i in $(seq 0 ${SEQ_END}); do
+ dd if=/dev/zero of=member-${i} bs=${MEMBER_SIZE} count=1 oflag=direct
+ losetup /dev/loop${i} member-${i}
+ DM_PARMS+=" /dev/loop${i} 0"
+done
+
+echo $DM_PARMS | dmsetup create raid0
+for i in $(seq 0 ${SEQ_END}); do
+ echo "0 1 unstriped ${NUM} ${CHUNK} ${i} /dev/mapper/raid0 0" | dmsetup create set-${i}
+done;
+
+for i in $(seq 0 ${SEQ_END}); do
+ dd if=/dev/urandom of=/dev/mapper/set-${i} bs=${BS} count=${COUNT} oflag=direct
+ diff /dev/mapper/set-${i} member-${i}
+done;
+
+for i in $(seq 0 ${SEQ_END}); do
+ dmsetup remove set-${i}
+done
+
+dmsetup remove raid0
+
+for i in $(seq 0 ${SEQ_END}); do
+ losetup -d /dev/loop${i}
+ rm -f member-${i}
+done
+
+Another example
+---------------
+
+Intel NVMe drives contain two cores on the physical device.
+Each core of the drive has segregated access to its LBA range.
+The current LBA model has a RAID 0 128k chunk on each core, resulting
+in a 256k stripe across the two cores:
+
+ Core 0: Core 1:
+ __________ __________
+ | LBA 512| | LBA 768|
+ | LBA 0 | | LBA 256|
+ ---------- ----------
+
+The purpose of this unstriping is to provide better QoS in noisy
+neighbor environments. When two partitions are created on the
+aggregate drive without this unstriping, reads on one partition
+can affect writes on another partition. This is because the partitions
+are striped across the two cores. When we unstripe this hardware RAID 0
+and make partitions on each new exposed device the two partitions are now
+physically separated.
+
+With the dm-unstriped target we're able to segregate an fio script that
+has read and write jobs that are independent of each other. Compared to
+when we run the test on a combined drive with partitions, we were able
+to get a 92% reduction in read latency using this device mapper target.
+
+
+Example dmsetup usage
+=====================
+
+unstriped ontop of Intel NVMe device that has 2 cores
+-----------------------------------------------------
+dmsetup create nvmset0 --table '0 512 unstriped 2 256 0 /dev/nvme0n1 0'
+dmsetup create nvmset1 --table '0 512 unstriped 2 256 1 /dev/nvme0n1 0'
+
+There will now be two devices that expose Intel NVMe core 0 and 1
+respectively:
+/dev/mapper/nvmset0
+/dev/mapper/nvmset1
+
+unstriped ontop of striped with 4 drives using 128K chunk size
+--------------------------------------------------------------
+dmsetup create raid_disk0 --table '0 512 unstriped 4 256 0 /dev/mapper/striped 0'
+dmsetup create raid_disk1 --table '0 512 unstriped 4 256 1 /dev/mapper/striped 0'
+dmsetup create raid_disk2 --table '0 512 unstriped 4 256 2 /dev/mapper/striped 0'
+dmsetup create raid_disk3 --table '0 512 unstriped 4 256 3 /dev/mapper/striped 0'
diff --git a/Documentation/devicetree/bindings/arm/arm-dsu-pmu.txt b/Documentation/devicetree/bindings/arm/arm-dsu-pmu.txt
new file mode 100644
index 000000000000..6efabba530f1
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/arm-dsu-pmu.txt
@@ -0,0 +1,27 @@
+* ARM DynamIQ Shared Unit (DSU) Performance Monitor Unit (PMU)
+
+ARM DyanmIQ Shared Unit (DSU) integrates one or more CPU cores
+with a shared L3 memory system, control logic and external interfaces to
+form a multicore cluster. The PMU enables to gather various statistics on
+the operations of the DSU. The PMU provides independent 32bit counters that
+can count any of the supported events, along with a 64bit cycle counter.
+The PMU is accessed via CPU system registers and has no MMIO component.
+
+** DSU PMU required properties:
+
+- compatible : should be one of :
+
+ "arm,dsu-pmu"
+
+- interrupts : Exactly 1 SPI must be listed.
+
+- cpus : List of phandles for the CPUs connected to this DSU instance.
+
+
+** Example:
+
+dsu-pmu-0 {
+ compatible = "arm,dsu-pmu";
+ interrupts = <GIC_SPI 02 IRQ_TYPE_LEVEL_HIGH>;
+ cpus = <&cpu_0>, <&cpu_1>;
+};
diff --git a/Documentation/devicetree/bindings/arm/firmware/sdei.txt b/Documentation/devicetree/bindings/arm/firmware/sdei.txt
new file mode 100644
index 000000000000..ee3f0ff49889
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/firmware/sdei.txt
@@ -0,0 +1,42 @@
+* Software Delegated Exception Interface (SDEI)
+
+Firmware implementing the SDEI functions described in ARM document number
+ARM DEN 0054A ("Software Delegated Exception Interface") can be used by
+Linux to receive notification of events such as those generated by
+firmware-first error handling, or from an IRQ that has been promoted to
+a firmware-assisted NMI.
+
+The interface provides a number of API functions for registering callbacks
+and enabling/disabling events. Functions are invoked by trapping to the
+privilege level of the SDEI firmware (specified as part of the binding
+below) and passing arguments in a manner specified by the "SMC Calling
+Convention (ARM DEN 0028B):
+
+ r0 => 32-bit Function ID / return value
+ {r1 - r3} => Parameters
+
+Note that the immediate field of the trapping instruction must be set
+to #0.
+
+The SDEI_EVENT_REGISTER function registers a callback in the kernel
+text to handle the specified event number.
+
+The sdei node should be a child node of '/firmware' and have required
+properties:
+
+ - compatible : should contain:
+ * "arm,sdei-1.0" : For implementations complying to SDEI version 1.x.
+
+ - method : The method of calling the SDEI firmware. Permitted
+ values are:
+ * "smc" : SMC #0, with the register assignments specified in this
+ binding.
+ * "hvc" : HVC #0, with the register assignments specified in this
+ binding.
+Example:
+ firmware {
+ sdei {
+ compatible = "arm,sdei-1.0";
+ method = "smc";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/arm/marvell/armada-37xx.txt b/Documentation/devicetree/bindings/arm/marvell/armada-37xx.txt
index 51336e5fc761..35c3c3460d17 100644
--- a/Documentation/devicetree/bindings/arm/marvell/armada-37xx.txt
+++ b/Documentation/devicetree/bindings/arm/marvell/armada-37xx.txt
@@ -14,3 +14,22 @@ following property before the previous one:
Example:
compatible = "marvell,armada-3720-db", "marvell,armada3720", "marvell,armada3710";
+
+
+Power management
+----------------
+
+For power management (particularly DVFS and AVS), the North Bridge
+Power Management component is needed:
+
+Required properties:
+- compatible : should contain "marvell,armada-3700-nb-pm", "syscon";
+- reg : the register start and length for the North Bridge
+ Power Management
+
+Example:
+
+nb_pm: syscon@14000 {
+ compatible = "marvell,armada-3700-nb-pm", "syscon";
+ reg = <0x14000 0x60>;
+}
diff --git a/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt b/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
new file mode 100644
index 000000000000..cec8d5d74e26
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
@@ -0,0 +1,22 @@
+Arm TrustZone CryptoCell cryptographic engine
+
+Required properties:
+- compatible: Should be "arm,cryptocell-712-ree".
+- reg: Base physical address of the engine and length of memory mapped region.
+- interrupts: Interrupt number for the device.
+
+Optional properties:
+- interrupt-parent: The phandle for the interrupt controller that services
+ interrupts for this device.
+- clocks: Reference to the crypto engine clock.
+- dma-coherent: Present if dma operations are coherent.
+
+Examples:
+
+ arm_cc712: crypto@80000000 {
+ compatible = "arm,cryptocell-712-ree";
+ interrupt-parent = <&intc>;
+ interrupts = < 0 30 4 >;
+ reg = < 0x80000000 0x10000 >;
+
+ };
diff --git a/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt b/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
index fbc07d12322f..30c3ce6b502e 100644
--- a/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
+++ b/Documentation/devicetree/bindings/crypto/inside-secure-safexcel.txt
@@ -1,7 +1,8 @@
Inside Secure SafeXcel cryptographic engine
Required properties:
-- compatible: Should be "inside-secure,safexcel-eip197".
+- compatible: Should be "inside-secure,safexcel-eip197" or
+ "inside-secure,safexcel-eip97".
- reg: Base physical address of the engine and length of memory mapped region.
- interrupts: Interrupt numbers for the rings and engine.
- interrupt-names: Should be "ring0", "ring1", "ring2", "ring3", "eip", "mem".
diff --git a/Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt b/Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt
index 4ca8dd4d7e66..a13fbdb4bd88 100644
--- a/Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt
+++ b/Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt
@@ -2,7 +2,9 @@ Exynos Pseudo Random Number Generator
Required properties:
-- compatible : Should be "samsung,exynos4-rng".
+- compatible : One of:
+ - "samsung,exynos4-rng" for Exynos4210 and Exynos4412
+ - "samsung,exynos5250-prng" for Exynos5250+
- reg : Specifies base physical address and size of the registers map.
- clocks : Phandle to clock-controller plus clock-specifier pair.
- clock-names : "secss" as a clock name.
diff --git a/Documentation/devicetree/bindings/crypto/st,stm32-cryp.txt b/Documentation/devicetree/bindings/crypto/st,stm32-cryp.txt
new file mode 100644
index 000000000000..970487fa40b8
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/st,stm32-cryp.txt
@@ -0,0 +1,19 @@
+* STMicroelectronics STM32 CRYP
+
+Required properties:
+- compatible: Should be "st,stm32f756-cryp".
+- reg: The address and length of the peripheral registers space
+- clocks: The input clock of the CRYP instance
+- interrupts: The CRYP interrupt
+
+Optional properties:
+- resets: The input reset of the CRYP instance
+
+Example:
+crypto@50060000 {
+ compatible = "st,stm32f756-cryp";
+ reg = <0x50060000 0x400>;
+ interrupts = <79>;
+ clocks = <&rcc 0 STM32F7_AHB2_CLOCK(CRYP)>;
+ resets = <&rcc STM32F7_AHB2_RESET(CRYP)>;
+};
diff --git a/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt b/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt
index b3408cc57be6..1ae4748730a8 100644
--- a/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt
+++ b/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt
@@ -47,8 +47,8 @@ When the OS is not in control of the management interface (i.e. it's a guest),
the channel nodes appear on their own, not under a management node.
Required properties:
-- compatible: must contain "qcom,hidma-1.0" for initial HW or "qcom,hidma-1.1"
-for MSI capable HW.
+- compatible: must contain "qcom,hidma-1.0" for initial HW or
+ "qcom,hidma-1.1"/"qcom,hidma-1.2" for MSI capable HW.
- reg: Addresses for the transfer and event channel
- interrupts: Should contain the event interrupt
- desc-count: Number of asynchronous requests this channel can handle
diff --git a/Documentation/devicetree/bindings/gpio/gpio-axp209.txt b/Documentation/devicetree/bindings/gpio/gpio-axp209.txt
index a6611304dd3c..fc42b2caa06d 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-axp209.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-axp209.txt
@@ -1,10 +1,17 @@
-AXP209 GPIO controller
+AXP209 GPIO & pinctrl controller
This driver follows the usual GPIO bindings found in
Documentation/devicetree/bindings/gpio/gpio.txt
+This driver follows the usual pinctrl bindings found in
+Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+
+This driver employs the per-pin muxing pattern.
+
Required properties:
-- compatible: Should be "x-powers,axp209-gpio"
+- compatible: Should be one of:
+ - "x-powers,axp209-gpio"
+ - "x-powers,axp813-gpio"
- #gpio-cells: Should be two. The first cell is the pin number and the
second is the GPIO flags.
- gpio-controller: Marks the device node as a GPIO controller.
@@ -28,3 +35,41 @@ axp209: pmic@34 {
#gpio-cells = <2>;
};
};
+
+The GPIOs can be muxed to other functions and therefore, must be a subnode of
+axp_gpio.
+
+Example:
+
+&axp_gpio {
+ gpio0_adc: gpio0-adc {
+ pins = "GPIO0";
+ function = "adc";
+ };
+};
+
+&example_node {
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio0_adc>;
+};
+
+GPIOs and their functions
+-------------------------
+
+Each GPIO is independent from the other (i.e. GPIO0 in gpio_in function does
+not force GPIO1 and GPIO2 to be in gpio_in function as well).
+
+axp209
+------
+GPIO | Functions
+------------------------
+GPIO0 | gpio_in, gpio_out, ldo, adc
+GPIO1 | gpio_in, gpio_out, ldo, adc
+GPIO2 | gpio_in, gpio_out
+
+axp813
+------
+GPIO | Functions
+------------------------
+GPIO0 | gpio_in, gpio_out, ldo, adc
+GPIO1 | gpio_in, gpio_out, ldo
diff --git a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
index a7ac460ad657..9474138d776e 100644
--- a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
+++ b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
@@ -5,7 +5,7 @@ Required Properties:
- compatible: should contain one or more of the following:
- "renesas,gpio-r8a7743": for R8A7743 (RZ/G1M) compatible GPIO controller.
- "renesas,gpio-r8a7745": for R8A7745 (RZ/G1E) compatible GPIO controller.
- - "renesas,gpio-r8a7778": for R8A7778 (R-Mobile M1) compatible GPIO controller.
+ - "renesas,gpio-r8a7778": for R8A7778 (R-Car M1) compatible GPIO controller.
- "renesas,gpio-r8a7779": for R8A7779 (R-Car H1) compatible GPIO controller.
- "renesas,gpio-r8a7790": for R8A7790 (R-Car H2) compatible GPIO controller.
- "renesas,gpio-r8a7791": for R8A7791 (R-Car M2-W) compatible GPIO controller.
diff --git a/Documentation/devicetree/bindings/hwmon/aspeed-pwm-tacho.txt b/Documentation/devicetree/bindings/hwmon/aspeed-pwm-tacho.txt
index 367c8203213b..3ac02988a1a5 100644
--- a/Documentation/devicetree/bindings/hwmon/aspeed-pwm-tacho.txt
+++ b/Documentation/devicetree/bindings/hwmon/aspeed-pwm-tacho.txt
@@ -22,8 +22,9 @@ Required properties for pwm-tacho node:
- compatible : should be "aspeed,ast2400-pwm-tacho" for AST2400 and
"aspeed,ast2500-pwm-tacho" for AST2500.
-- clocks : a fixed clock providing input clock frequency(PWM
- and Fan Tach clock)
+- clocks : phandle to clock provider with the clock number in the second cell
+
+- resets : phandle to reset controller with the reset number in the second cell
fan subnode format:
===================
@@ -48,19 +49,14 @@ Required properties for each child node:
Examples:
-pwm_tacho_fixed_clk: fixedclk {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <24000000>;
-};
-
pwm_tacho: pwmtachocontroller@1e786000 {
#address-cells = <1>;
#size-cells = <1>;
#cooling-cells = <2>;
reg = <0x1E786000 0x1000>;
compatible = "aspeed,ast2500-pwm-tacho";
- clocks = <&pwm_tacho_fixed_clk>;
+ clocks = <&syscon ASPEED_CLK_APB>;
+ resets = <&syscon ASPEED_RESET_PWM>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_pwm0_default &pinctrl_pwm1_default>;
diff --git a/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.txt b/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.txt
new file mode 100644
index 000000000000..e9ebb8a20e0d
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/sigma-delta-modulator.txt
@@ -0,0 +1,13 @@
+Device-Tree bindings for sigma delta modulator
+
+Required properties:
+- compatible: should be "ads1201", "sd-modulator". "sd-modulator" can be use
+ as a generic SD modulator if modulator not specified in compatible list.
+- #io-channel-cells = <1>: See the IIO bindings section "IIO consumers".
+
+Example node:
+
+ ads1202: adc@0 {
+ compatible = "sd-modulator";
+ #io-channel-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.txt b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.txt
new file mode 100644
index 000000000000..911492da48f3
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.txt
@@ -0,0 +1,128 @@
+STMicroelectronics STM32 DFSDM ADC device driver
+
+
+STM32 DFSDM ADC is a sigma delta analog-to-digital converter dedicated to
+interface external sigma delta modulators to STM32 micro controllers.
+It is mainly targeted for:
+- Sigma delta modulators (motor control, metering...)
+- PDM microphones (audio digital microphone)
+
+It features up to 8 serial digital interfaces (SPI or Manchester) and
+up to 4 filters on stm32h7.
+
+Each child node match with a filter instance.
+
+Contents of a STM32 DFSDM root node:
+------------------------------------
+Required properties:
+- compatible: Should be "st,stm32h7-dfsdm".
+- reg: Offset and length of the DFSDM block register set.
+- clocks: IP and serial interfaces clocking. Should be set according
+ to rcc clock ID and "clock-names".
+- clock-names: Input clock name "dfsdm" must be defined,
+ "audio" is optional. If defined CLKOUT is based on the audio
+ clock, else "dfsdm" is used.
+- #interrupt-cells = <1>;
+- #address-cells = <1>;
+- #size-cells = <0>;
+
+Optional properties:
+- spi-max-frequency: Requested only for SPI master mode.
+ SPI clock OUT frequency (Hz). This clock must be set according
+ to "clock" property. Frequency must be a multiple of the rcc
+ clock frequency. If not, SPI CLKOUT frequency will not be
+ accurate.
+
+Contents of a STM32 DFSDM child nodes:
+--------------------------------------
+
+Required properties:
+- compatible: Must be:
+ "st,stm32-dfsdm-adc" for sigma delta ADCs
+ "st,stm32-dfsdm-dmic" for audio digital microphone.
+- reg: Specifies the DFSDM filter instance used.
+- interrupts: IRQ lines connected to each DFSDM filter instance.
+- st,adc-channels: List of single-ended channels muxed for this ADC.
+ valid values:
+ "st,stm32h7-dfsdm" compatibility: 0 to 7.
+- st,adc-channel-names: List of single-ended channel names.
+- st,filter-order: SinC filter order from 0 to 5.
+ 0: FastSinC
+ [1-5]: order 1 to 5.
+ For audio purpose it is recommended to use order 3 to 5.
+- #io-channel-cells = <1>: See the IIO bindings section "IIO consumers".
+
+Required properties for "st,stm32-dfsdm-adc" compatibility:
+- io-channels: From common IIO binding. Used to pipe external sigma delta
+ modulator or internal ADC output to DFSDM channel.
+ This is not required for "st,stm32-dfsdm-pdm" compatibility as
+ PDM microphone is binded in Audio DT node.
+
+Required properties for "st,stm32-dfsdm-pdm" compatibility:
+- #sound-dai-cells: Must be set to 0.
+- dma: DMA controller phandle and DMA request line associated to the
+ filter instance (specified by the field "reg")
+- dma-names: Must be "rx"
+
+Optional properties:
+- st,adc-channel-types: Single-ended channel input type.
+ - "SPI_R": SPI with data on rising edge (default)
+ - "SPI_F": SPI with data on falling edge
+ - "MANCH_R": manchester codec, rising edge = logic 0
+ - "MANCH_F": manchester codec, falling edge = logic 1
+- st,adc-channel-clk-src: Conversion clock source.
+ - "CLKIN": external SPI clock (CLKIN x)
+ - "CLKOUT": internal SPI clock (CLKOUT) (default)
+ - "CLKOUT_F": internal SPI clock divided by 2 (falling edge).
+ - "CLKOUT_R": internal SPI clock divided by 2 (rising edge).
+
+- st,adc-alt-channel: Must be defined if two sigma delta modulator are
+ connected on same SPI input.
+ If not set, channel n is connected to SPI input n.
+ If set, channel n is connected to SPI input n + 1.
+
+- st,filter0-sync: Set to 1 to synchronize with DFSDM filter instance 0.
+ Used for multi microphones synchronization.
+
+Example of a sigma delta adc connected on DFSDM SPI port 0
+and a pdm microphone connected on DFSDM SPI port 1:
+
+ ads1202: simple_sd_adc@0 {
+ compatible = "ads1202";
+ #io-channel-cells = <1>;
+ };
+
+ dfsdm: dfsdm@40017000 {
+ compatible = "st,stm32h7-dfsdm";
+ reg = <0x40017000 0x400>;
+ clocks = <&rcc DFSDM1_CK>;
+ clock-names = "dfsdm";
+ #interrupt-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dfsdm_adc0: filter@0 {
+ compatible = "st,stm32-dfsdm-adc";
+ #io-channel-cells = <1>;
+ reg = <0>;
+ interrupts = <110>;
+ st,adc-channels = <0>;
+ st,adc-channel-names = "sd_adc0";
+ st,adc-channel-types = "SPI_F";
+ st,adc-channel-clk-src = "CLKOUT";
+ io-channels = <&ads1202 0>;
+ st,filter-order = <3>;
+ };
+ dfsdm_pdm1: filter@1 {
+ compatible = "st,stm32-dfsdm-dmic";
+ reg = <1>;
+ interrupts = <111>;
+ dmas = <&dmamux1 102 0x400 0x00>;
+ dma-names = "rx";
+ st,adc-channels = <1>;
+ st,adc-channel-names = "dmic1";
+ st,adc-channel-types = "SPI_R";
+ st,adc-channel-clk-src = "CLKOUT";
+ st,filter-order = <5>;
+ };
+ }
diff --git a/Documentation/devicetree/bindings/input/hid-over-i2c.txt b/Documentation/devicetree/bindings/input/hid-over-i2c.txt
index 28e8bd8b7d64..4d3da9d91de4 100644
--- a/Documentation/devicetree/bindings/input/hid-over-i2c.txt
+++ b/Documentation/devicetree/bindings/input/hid-over-i2c.txt
@@ -31,7 +31,7 @@ device-specific compatible properties, which should be used in addition to the
- vdd-supply: phandle of the regulator that provides the supply voltage.
- post-power-on-delay-ms: time required by the device after enabling its regulators
- before it is ready for communication. Must be used with 'vdd-supply'.
+ or powering it on, before it is ready for communication.
Example:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2836-l1-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2836-l1-intc.txt
index f320dcd6e69b..8ced1696c325 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2836-l1-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2836-l1-intc.txt
@@ -12,7 +12,7 @@ Required properties:
registers
- interrupt-controller: Identifies the node as an interrupt controller
- #interrupt-cells: Specifies the number of cells needed to encode an
- interrupt source. The value shall be 1
+ interrupt source. The value shall be 2
Please refer to interrupts.txt in this directory for details of the common
Interrupt Controllers bindings used by client devices.
@@ -32,6 +32,6 @@ local_intc: local_intc {
compatible = "brcm,bcm2836-l1-intc";
reg = <0x40000000 0x100>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
interrupt-parent = <&local_intc>;
};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/google,goldfish-pic.txt b/Documentation/devicetree/bindings/interrupt-controller/google,goldfish-pic.txt
new file mode 100644
index 000000000000..35f752706e7d
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/google,goldfish-pic.txt
@@ -0,0 +1,30 @@
+Android Goldfish PIC
+
+Android Goldfish programmable interrupt device used by Android
+emulator.
+
+Required properties:
+
+- compatible : should contain "google,goldfish-pic"
+- reg : <registers mapping>
+- interrupts : <interrupt mapping>
+
+Example for mips when used in cascade mode:
+
+ cpuintc {
+ #interrupt-cells = <0x1>;
+ #address-cells = <0>;
+ interrupt-controller;
+ compatible = "mti,cpu-interrupt-controller";
+ };
+
+ interrupt-controller@1f000000 {
+ compatible = "google,goldfish-pic";
+ reg = <0x1f000000 0x1000>;
+
+ interrupt-controller;
+ #interrupt-cells = <0x1>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <0x2>;
+ };
diff --git a/Documentation/devicetree/bindings/leds/leds-lm3692x.txt b/Documentation/devicetree/bindings/leds/leds-lm3692x.txt
new file mode 100644
index 000000000000..6c9074f84a51
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-lm3692x.txt
@@ -0,0 +1,49 @@
+* Texas Instruments - LM3692x Highly Efficient White LED Driver
+
+The LM3692x is an ultra-compact, highly efficient,
+white-LED driver designed for LCD display backlighting.
+
+The main difference between the LM36922 and LM36923 is the number of
+LED strings it supports. The LM36922 supports two strings while the LM36923
+supports three strings.
+
+Required properties:
+ - compatible:
+ "ti,lm36922"
+ "ti,lm36923"
+ - reg : I2C slave address
+ - #address-cells : 1
+ - #size-cells : 0
+
+Optional properties:
+ - enable-gpios : gpio pin to enable/disable the device.
+ - vled-supply : LED supply
+
+Required child properties:
+ - reg : 0
+
+Optional child properties:
+ - label : see Documentation/devicetree/bindings/leds/common.txt
+ - linux,default-trigger :
+ see Documentation/devicetree/bindings/leds/common.txt
+
+Example:
+
+led-controller@36 {
+ compatible = "ti,lm3692x";
+ reg = <0x36>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ enable-gpios = <&gpio1 28 GPIO_ACTIVE_HIGH>;
+ vled-supply = <&vbatt>;
+
+ led@0 {
+ reg = <0>;
+ label = "white:backlight_cluster";
+ linux,default-trigger = "backlight";
+ };
+}
+
+For more product information please see the link below:
+http://www.ti.com/lit/ds/snvsa29/snvsa29.pdf
diff --git a/Documentation/devicetree/bindings/leds/leds-lp8860.txt b/Documentation/devicetree/bindings/leds/leds-lp8860.txt
index aad38dd94d4b..5f0e892ad759 100644
--- a/Documentation/devicetree/bindings/leds/leds-lp8860.txt
+++ b/Documentation/devicetree/bindings/leds/leds-lp8860.txt
@@ -6,23 +6,39 @@ current sinks that can be controlled by a PWM input
signal, a SPI/I2C master, or both.
Required properties:
- - compatible:
+ - compatible :
"ti,lp8860"
- - reg - I2C slave address
- - label - Used for naming LEDs
+ - reg : I2C slave address
+ - #address-cells : 1
+ - #size-cells : 0
Optional properties:
- - enable-gpio - gpio pin to enable/disable the device.
- - supply - "vled" - LED supply
+ - enable-gpios : gpio pin to enable (active high)/disable the device.
+ - vled-supply : LED supply
+
+Required child properties:
+ - reg : 0
+
+Optional child properties:
+ - label : see Documentation/devicetree/bindings/leds/common.txt
+ - linux,default-trigger :
+ see Documentation/devicetree/bindings/leds/common.txt
Example:
-leds: leds@6 {
+led-controller@2d {
compatible = "ti,lp8860";
+ #address-cells = <1>;
+ #size-cells = <0>;
reg = <0x2d>;
- label = "display_cluster";
- enable-gpio = <&gpio1 28 GPIO_ACTIVE_HIGH>;
+ enable-gpios = <&gpio1 28 GPIO_ACTIVE_HIGH>;
vled-supply = <&vbatt>;
+
+ led@0 {
+ reg = <0>;
+ label = "white:backlight";
+ linux,default-trigger = "backlight";
+ };
}
For more product information please see the link below:
diff --git a/Documentation/devicetree/bindings/mfd/mc13xxx.txt b/Documentation/devicetree/bindings/mfd/mc13xxx.txt
index ac235fe385fc..8261ea73278a 100644
--- a/Documentation/devicetree/bindings/mfd/mc13xxx.txt
+++ b/Documentation/devicetree/bindings/mfd/mc13xxx.txt
@@ -130,7 +130,7 @@ ecspi@70010000 { /* ECSPI1 */
#size-cells = <0>;
led-control = <0x000 0x000 0x0e0 0x000>;
- sysled {
+ sysled@3 {
reg = <3>;
label = "system:red:live";
linux,default-trigger = "heartbeat";
diff --git a/Documentation/devicetree/bindings/mfd/syscon.txt b/Documentation/devicetree/bindings/mfd/syscon.txt
index 8b92d4576c42..25d9e9c2fd53 100644
--- a/Documentation/devicetree/bindings/mfd/syscon.txt
+++ b/Documentation/devicetree/bindings/mfd/syscon.txt
@@ -16,9 +16,17 @@ Required properties:
Optional property:
- reg-io-width: the size (in bytes) of the IO accesses that should be
performed on the device.
+- hwlocks: reference to a phandle of a hardware spinlock provider node.
Examples:
gpr: iomuxc-gpr@20e0000 {
compatible = "fsl,imx6q-iomuxc-gpr", "syscon";
reg = <0x020e0000 0x38>;
+ hwlocks = <&hwlock1 1>;
+};
+
+hwlock1: hwspinlock@40500000 {
+ ...
+ reg = <0x40500000 0x1000>;
+ #hwlock-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/mmc/mtk-sd.txt b/Documentation/devicetree/bindings/mmc/mtk-sd.txt
index 72d2a734ab85..9b8017670870 100644
--- a/Documentation/devicetree/bindings/mmc/mtk-sd.txt
+++ b/Documentation/devicetree/bindings/mmc/mtk-sd.txt
@@ -12,6 +12,8 @@ Required properties:
"mediatek,mt8173-mmc": for mmc host ip compatible with mt8173
"mediatek,mt2701-mmc": for mmc host ip compatible with mt2701
"mediatek,mt2712-mmc": for mmc host ip compatible with mt2712
+ "mediatek,mt7623-mmc", "mediatek,mt2701-mmc": for MT7623 SoC
+
- reg: physical base address of the controller and length
- interrupts: Should contain MSDC interrupt number
- clocks: Should contain phandle for the clock feeding the MMC controller
diff --git a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
index 3c6762430fd9..d8685cb83325 100644
--- a/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/tmio_mmc.txt
@@ -26,6 +26,7 @@ Required properties:
"renesas,sdhi-r8a7794" - SDHI IP on R8A7794 SoC
"renesas,sdhi-r8a7795" - SDHI IP on R8A7795 SoC
"renesas,sdhi-r8a7796" - SDHI IP on R8A7796 SoC
+ "renesas,sdhi-r8a77995" - SDHI IP on R8A77995 SoC
"renesas,sdhi-shmobile" - a generic sh-mobile SDHI controller
"renesas,rcar-gen1-sdhi" - a generic R-Car Gen1 SDHI controller
"renesas,rcar-gen2-sdhi" - a generic R-Car Gen2 or RZ/G1
diff --git a/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt b/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt
index c34aa6f8a424..63d4d626fbd5 100644
--- a/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt
+++ b/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt
@@ -12,7 +12,7 @@ Required properties:
- reg-names: Should contain the reg names "QuadSPI" and "QuadSPI-memory"
- interrupts : Should contain the interrupt for the device
- clocks : The clocks needed by the QuadSPI controller
- - clock-names : the name of the clocks
+ - clock-names : Should contain the name of the clocks: "qspi_en" and "qspi".
Optional properties:
- fsl,qspi-has-second-chip: The controller has two buses, bus A and bus B.
diff --git a/Documentation/devicetree/bindings/mtd/gpmc-onenand.txt b/Documentation/devicetree/bindings/mtd/gpmc-onenand.txt
index b6e8bfd024f4..e9f01a963a0a 100644
--- a/Documentation/devicetree/bindings/mtd/gpmc-onenand.txt
+++ b/Documentation/devicetree/bindings/mtd/gpmc-onenand.txt
@@ -9,13 +9,14 @@ Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt
Required properties:
+ - compatible: "ti,omap2-onenand"
- reg: The CS line the peripheral is connected to
- - gpmc,device-width Width of the ONENAND device connected to the GPMC
+ - gpmc,device-width: Width of the ONENAND device connected to the GPMC
in bytes. Must be 1 or 2.
Optional properties:
- - dma-channel: DMA Channel index
+ - int-gpios: GPIO specifier for the INT pin.
For inline partition table parsing (optional):
@@ -35,6 +36,7 @@ Example for an OMAP3430 board:
#size-cells = <1>;
onenand@0 {
+ compatible = "ti,omap2-onenand";
reg = <0 0 0>; /* CS0, offset 0 */
gpmc,device-width = <2>;
diff --git a/Documentation/devicetree/bindings/mtd/marvell-nand.txt b/Documentation/devicetree/bindings/mtd/marvell-nand.txt
new file mode 100644
index 000000000000..c08fb477b3c6
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/marvell-nand.txt
@@ -0,0 +1,123 @@
+Marvell NAND Flash Controller (NFC)
+
+Required properties:
+- compatible: can be one of the following:
+ * "marvell,armada-8k-nand-controller"
+ * "marvell,armada370-nand-controller"
+ * "marvell,pxa3xx-nand-controller"
+ * "marvell,armada-8k-nand" (deprecated)
+ * "marvell,armada370-nand" (deprecated)
+ * "marvell,pxa3xx-nand" (deprecated)
+ Compatibles marked deprecated support only the old bindings described
+ at the bottom.
+- reg: NAND flash controller memory area.
+- #address-cells: shall be set to 1. Encode the NAND CS.
+- #size-cells: shall be set to 0.
+- interrupts: shall define the NAND controller interrupt.
+- clocks: shall reference the NAND controller clock.
+- marvell,system-controller: Set to retrieve the syscon node that handles
+ NAND controller related registers (only required with the
+ "marvell,armada-8k-nand[-controller]" compatibles).
+
+Optional properties:
+- label: see partition.txt. New platforms shall omit this property.
+- dmas: shall reference DMA channel associated to the NAND controller.
+ This property is only used with "marvell,pxa3xx-nand[-controller]"
+ compatible strings.
+- dma-names: shall be "rxtx".
+ This property is only used with "marvell,pxa3xx-nand[-controller]"
+ compatible strings.
+
+Optional children nodes:
+Children nodes represent the available NAND chips.
+
+Required properties:
+- reg: shall contain the native Chip Select ids (0-3).
+- nand-rb: see nand.txt (0-1).
+
+Optional properties:
+- marvell,nand-keep-config: orders the driver not to take the timings
+ from the core and leaving them completely untouched. Bootloader
+ timings will then be used.
+- label: MTD name.
+- nand-on-flash-bbt: see nand.txt.
+- nand-ecc-mode: see nand.txt. Will use hardware ECC if not specified.
+- nand-ecc-algo: see nand.txt. This property is essentially useful when
+ not using hardware ECC. Howerver, it may be added when using hardware
+ ECC for clarification but will be ignored by the driver because ECC
+ mode is chosen depending on the page size and the strength required by
+ the NAND chip. This value may be overwritten with nand-ecc-strength
+ property.
+- nand-ecc-strength: see nand.txt.
+- nand-ecc-step-size: see nand.txt. Marvell's NAND flash controller does
+ use fixed strength (1-bit for Hamming, 16-bit for BCH), so the actual
+ step size will shrink or grow in order to fit the required strength.
+ Step sizes are not completely random for all and follow certain
+ patterns described in AN-379, "Marvell SoC NFC ECC".
+
+See Documentation/devicetree/bindings/mtd/nand.txt for more details on
+generic bindings.
+
+
+Example:
+nand_controller: nand-controller@d0000 {
+ compatible = "marvell,armada370-nand-controller";
+ reg = <0xd0000 0x54>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&coredivclk 0>;
+
+ nand@0 {
+ reg = <0>;
+ label = "main-storage";
+ nand-rb = <0>;
+ nand-ecc-mode = "hw";
+ marvell,nand-keep-config;
+ nand-on-flash-bbt;
+ nand-ecc-strength = <4>;
+ nand-ecc-step-size = <512>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "Rootfs";
+ reg = <0x00000000 0x40000000>;
+ };
+ };
+ };
+};
+
+
+Note on legacy bindings: One can find, in not-updated device trees,
+bindings slightly different than described above with other properties
+described below as well as the partitions node at the root of a so
+called "nand" node (without clear controller/chip separation).
+
+Legacy properties:
+- marvell,nand-enable-arbiter: To enable the arbiter, all boards blindly
+ used it, this bit was set by the bootloader for many boards and even if
+ it is marked reserved in several datasheets, it might be needed to set
+ it (otherwise it is harmless) so whether or not this property is set,
+ the bit is selected by the driver.
+- num-cs: Number of chip-select lines to use, all boards blindly set 1
+ to this and for a reason, other values would have failed. The value of
+ this property is ignored.
+
+Example:
+
+ nand0: nand@43100000 {
+ compatible = "marvell,pxa3xx-nand";
+ reg = <0x43100000 90>;
+ interrupts = <45>;
+ dmas = <&pdma 97 0>;
+ dma-names = "rxtx";
+ #address-cells = <1>;
+ marvell,nand-keep-config;
+ marvell,nand-enable-arbiter;
+ num-cs = <1>;
+ /* Partitions (optional) */
+ };
diff --git a/Documentation/devicetree/bindings/mtd/mtk-nand.txt b/Documentation/devicetree/bindings/mtd/mtk-nand.txt
index 0431841de781..1c88526dedfc 100644
--- a/Documentation/devicetree/bindings/mtd/mtk-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/mtk-nand.txt
@@ -12,8 +12,10 @@ tree nodes.
The first part of NFC is NAND Controller Interface (NFI) HW.
Required NFI properties:
-- compatible: Should be one of "mediatek,mt2701-nfc",
- "mediatek,mt2712-nfc".
+- compatible: Should be one of
+ "mediatek,mt2701-nfc",
+ "mediatek,mt2712-nfc",
+ "mediatek,mt7622-nfc".
- reg: Base physical address and size of NFI.
- interrupts: Interrupts of NFI.
- clocks: NFI required clocks.
@@ -142,7 +144,10 @@ Example:
==============
Required BCH properties:
-- compatible: Should be one of "mediatek,mt2701-ecc", "mediatek,mt2712-ecc".
+- compatible: Should be one of
+ "mediatek,mt2701-ecc",
+ "mediatek,mt2712-ecc",
+ "mediatek,mt7622-ecc".
- reg: Base physical address and size of ECC.
- interrupts: Interrupts of ECC.
- clocks: ECC required clocks.
diff --git a/Documentation/devicetree/bindings/mtd/nand.txt b/Documentation/devicetree/bindings/mtd/nand.txt
index 133f3813719c..8bb11d809429 100644
--- a/Documentation/devicetree/bindings/mtd/nand.txt
+++ b/Documentation/devicetree/bindings/mtd/nand.txt
@@ -43,6 +43,7 @@ Optional NAND chip properties:
This is particularly useful when only the in-band area is
used by the upper layers, and you want to make your NAND
as reliable as possible.
+- nand-rb: shall contain the native Ready/Busy ids.
The ECC strength and ECC step size properties define the correction capability
of a controller. Together, they say a controller can correct "{strength} bit
diff --git a/Documentation/devicetree/bindings/opp/opp.txt b/Documentation/devicetree/bindings/opp/opp.txt
index 9d733af26be7..4e4f30288c8b 100644
--- a/Documentation/devicetree/bindings/opp/opp.txt
+++ b/Documentation/devicetree/bindings/opp/opp.txt
@@ -45,6 +45,11 @@ Devices supporting OPPs must set their "operating-points-v2" property with
phandle to a OPP table in their DT node. The OPP core will use this phandle to
find the operating points for the device.
+This can contain more than one phandle for power domain providers that provide
+multiple power domains. That is, one phandle for each power domain. If only one
+phandle is available, then the same OPP table will be used for all power domains
+provided by the power domain provider.
+
If required, this can be extended for SoC vendor specific bindings. Such bindings
should be documented as Documentation/devicetree/bindings/power/<vendor>-opp.txt
and should have a compatible description like: "operating-points-v2-<vendor>".
@@ -154,6 +159,14 @@ Optional properties:
- status: Marks the node enabled/disabled.
+- required-opp: This contains phandle to an OPP node in another device's OPP
+ table. It may contain an array of phandles, where each phandle points to an
+ OPP of a different device. It should not contain multiple phandles to the OPP
+ nodes in the same OPP table. This specifies the minimum required OPP of the
+ device(s), whose OPP's phandle is present in this property, for the
+ functioning of the current device at the current OPP (where this property is
+ present).
+
Example 1: Single cluster Dual-core ARM cortex A9, switch DVFS states together.
/ {
diff --git a/Documentation/devicetree/bindings/opp/ti-omap5-opp-supply.txt b/Documentation/devicetree/bindings/opp/ti-omap5-opp-supply.txt
new file mode 100644
index 000000000000..832346e489a3
--- /dev/null
+++ b/Documentation/devicetree/bindings/opp/ti-omap5-opp-supply.txt
@@ -0,0 +1,63 @@
+Texas Instruments OMAP compatible OPP supply description
+
+OMAP5, DRA7, and AM57 family of SoCs have Class0 AVS eFuse registers which
+contain data that can be used to adjust voltages programmed for some of their
+supplies for more efficient operation. This binding provides the information
+needed to read these values and use them to program the main regulator during
+an OPP transitions.
+
+Also, some supplies may have an associated vbb-supply which is an Adaptive Body
+Bias regulator which much be transitioned in a specific sequence with regards
+to the vdd-supply and clk when making an OPP transition. By supplying two
+regulators to the device that will undergo OPP transitions we can make use
+of the multi regulator binding that is part of the OPP core described here [1]
+to describe both regulators needed by the platform.
+
+[1] Documentation/devicetree/bindings/opp/opp.txt
+
+Required Properties for Device Node:
+- vdd-supply: phandle to regulator controlling VDD supply
+- vbb-supply: phandle to regulator controlling Body Bias supply
+ (Usually Adaptive Body Bias regulator)
+
+Required Properties for opp-supply node:
+- compatible: Should be one of:
+ "ti,omap-opp-supply" - basic OPP supply controlling VDD and VBB
+ "ti,omap5-opp-supply" - OMAP5+ optimized voltages in efuse(class0)VDD
+ along with VBB
+ "ti,omap5-core-opp-supply" - OMAP5+ optimized voltages in efuse(class0) VDD
+ but no VBB.
+- reg: Address and length of the efuse register set for the device (mandatory
+ only for "ti,omap5-opp-supply")
+- ti,efuse-settings: An array of u32 tuple items providing information about
+ optimized efuse configuration. Each item consists of the following:
+ volt: voltage in uV - reference voltage (OPP voltage)
+ efuse_offseet: efuse offset from reg where the optimized voltage is stored.
+- ti,absolute-max-voltage-uv: absolute maximum voltage for the OPP supply.
+
+Example:
+
+/* Device Node (CPU) */
+cpus {
+ cpu0: cpu@0 {
+ device_type = "cpu";
+
+ ...
+
+ vdd-supply = <&vcc>;
+ vbb-supply = <&abb_mpu>;
+ };
+};
+
+/* OMAP OPP Supply with Class0 registers */
+opp_supply_mpu: opp_supply@4a003b20 {
+ compatible = "ti,omap5-opp-supply";
+ reg = <0x4a003b20 0x8>;
+ ti,efuse-settings = <
+ /* uV offset */
+ 1060000 0x0
+ 1160000 0x4
+ 1210000 0x8
+ >;
+ ti,absolute-max-voltage-uv = <1500000>;
+};
diff --git a/Documentation/devicetree/bindings/power/power_domain.txt b/Documentation/devicetree/bindings/power/power_domain.txt
index 14bd9e945ff6..f3355313c020 100644
--- a/Documentation/devicetree/bindings/power/power_domain.txt
+++ b/Documentation/devicetree/bindings/power/power_domain.txt
@@ -40,6 +40,12 @@ Optional properties:
domain's idle states. In the absence of this property, the domain would be
considered as capable of being powered-on or powered-off.
+- operating-points-v2 : Phandles to the OPP tables of power domains provided by
+ a power domain provider. If the provider provides a single power domain only
+ or all the power domains provided by the provider have identical OPP tables,
+ then this shall contain a single phandle. Refer to ../opp/opp.txt for more
+ information.
+
Example:
power: power-controller@12340000 {
@@ -120,4 +126,63 @@ The node above defines a typical PM domain consumer device, which is located
inside a PM domain with index 0 of a power controller represented by a node
with the label "power".
+Optional properties:
+- required-opp: This contains phandle to an OPP node in another device's OPP
+ table. It may contain an array of phandles, where each phandle points to an
+ OPP of a different device. It should not contain multiple phandles to the OPP
+ nodes in the same OPP table. This specifies the minimum required OPP of the
+ device(s), whose OPP's phandle is present in this property, for the
+ functioning of the current device at the current OPP (where this property is
+ present).
+
+Example:
+- OPP table for domain provider that provides two domains.
+
+ domain0_opp_table: opp-table0 {
+ compatible = "operating-points-v2";
+
+ domain0_opp_0: opp-1000000000 {
+ opp-hz = /bits/ 64 <1000000000>;
+ opp-microvolt = <975000 970000 985000>;
+ };
+ domain0_opp_1: opp-1100000000 {
+ opp-hz = /bits/ 64 <1100000000>;
+ opp-microvolt = <1000000 980000 1010000>;
+ };
+ };
+
+ domain1_opp_table: opp-table1 {
+ compatible = "operating-points-v2";
+
+ domain1_opp_0: opp-1200000000 {
+ opp-hz = /bits/ 64 <1200000000>;
+ opp-microvolt = <975000 970000 985000>;
+ };
+ domain1_opp_1: opp-1300000000 {
+ opp-hz = /bits/ 64 <1300000000>;
+ opp-microvolt = <1000000 980000 1010000>;
+ };
+ };
+
+ power: power-controller@12340000 {
+ compatible = "foo,power-controller";
+ reg = <0x12340000 0x1000>;
+ #power-domain-cells = <1>;
+ operating-points-v2 = <&domain0_opp_table>, <&domain1_opp_table>;
+ };
+
+ leaky-device0@12350000 {
+ compatible = "foo,i-leak-current";
+ reg = <0x12350000 0x1000>;
+ power-domains = <&power 0>;
+ required-opp = <&domain0_opp_0>;
+ };
+
+ leaky-device1@12350000 {
+ compatible = "foo,i-leak-current";
+ reg = <0x12350000 0x1000>;
+ power-domains = <&power 1>;
+ required-opp = <&domain1_opp_1>;
+ };
+
[1]. Documentation/devicetree/bindings/power/domain-idle-state.txt
diff --git a/Documentation/devicetree/bindings/power/reset/imx-snvs-poweroff.txt b/Documentation/devicetree/bindings/power/reset/imx-snvs-poweroff.txt
deleted file mode 100644
index 1b81fcd9fb72..000000000000
--- a/Documentation/devicetree/bindings/power/reset/imx-snvs-poweroff.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-i.mx6 Poweroff Driver
-
-SNVS_LPCR in SNVS module can power off the whole system by pull
-PMIC_ON_REQ low if PMIC_ON_REQ is connected with external PMIC.
-If you don't want to use PMIC_ON_REQ as power on/off control,
-please set status='disabled' to disable this driver.
-
-Required Properties:
--compatible: "fsl,sec-v4.0-poweroff"
--reg: Specifies the physical address of the SNVS_LPCR register
-
-Example:
- snvs@20cc000 {
- compatible = "fsl,sec-v4.0-mon", "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0 0x020cc000 0x4000>;
- .....
- snvs_poweroff: snvs-poweroff@38 {
- compatible = "fsl,sec-v4.0-poweroff";
- reg = <0x38 0x4>;
- };
- }
diff --git a/Documentation/devicetree/bindings/power/supply/bq27xxx.txt b/Documentation/devicetree/bindings/power/supply/bq27xxx.txt
index 6858e1a804ad..615c1cb6889f 100644
--- a/Documentation/devicetree/bindings/power/supply/bq27xxx.txt
+++ b/Documentation/devicetree/bindings/power/supply/bq27xxx.txt
@@ -15,6 +15,7 @@ Required properties:
* "ti,bq27520g2" - BQ27520-g2
* "ti,bq27520g3" - BQ27520-g3
* "ti,bq27520g4" - BQ27520-g4
+ * "ti,bq27521" - BQ27521
* "ti,bq27530" - BQ27530
* "ti,bq27531" - BQ27531
* "ti,bq27541" - BQ27541
diff --git a/Documentation/devicetree/bindings/regulator/regulator.txt b/Documentation/devicetree/bindings/regulator/regulator.txt
index 3cbf56ce66ea..2babe15b618d 100644
--- a/Documentation/devicetree/bindings/regulator/regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/regulator.txt
@@ -42,8 +42,16 @@ Optional properties:
- regulator-state-[mem/disk] node has following common properties:
- regulator-on-in-suspend: regulator should be on in suspend state.
- regulator-off-in-suspend: regulator should be off in suspend state.
- - regulator-suspend-microvolt: regulator should be set to this voltage
- in suspend.
+ - regulator-suspend-min-microvolt: minimum voltage may be set in
+ suspend state.
+ - regulator-suspend-max-microvolt: maximum voltage may be set in
+ suspend state.
+ - regulator-suspend-microvolt: the default voltage which regulator
+ would be set in suspend. This property is now deprecated, instead
+ setting voltage for suspend mode via the API which regulator
+ driver provides is recommended.
+ - regulator-changeable-in-suspend: whether the default voltage and
+ the regulator on/off in suspend can be changed in runtime.
- regulator-mode: operating mode in the given suspend state.
The set of possible operating modes depends on the capabilities of
every hardware so the valid modes are documented on each regulator
diff --git a/Documentation/devicetree/bindings/regulator/sprd,sc2731-regulator.txt b/Documentation/devicetree/bindings/regulator/sprd,sc2731-regulator.txt
new file mode 100644
index 000000000000..63dc07877cd6
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/sprd,sc2731-regulator.txt
@@ -0,0 +1,43 @@
+Spreadtrum SC2731 Voltage regulators
+
+The SC2731 integrates low-voltage and low quiescent current DCDC/LDO.
+14 LDO and 3 DCDCs are designed for external use. All DCDCs/LDOs have
+their own bypass (power-down) control signals. External tantalum or MLCC
+ceramic capacitors are recommended to use with these LDOs.
+
+Required properties:
+ - compatible: should be "sprd,sc27xx-regulator".
+
+List of regulators provided by this controller. It is named according to
+its regulator type, BUCK_<name> and LDO_<name>. The definition for each
+of these nodes is defined using the standard binding for regulators at
+Documentation/devicetree/bindings/regulator/regulator.txt.
+
+The valid names for regulators are:
+BUCK:
+ BUCK_CPU0, BUCK_CPU1, BUCK_RF
+LDO:
+ LDO_CAMA0, LDO_CAMA1, LDO_CAMMOT, LDO_VLDO, LDO_EMMCCORE, LDO_SDCORE,
+ LDO_SDIO, LDO_WIFIPA, LDO_USB33, LDO_CAMD0, LDO_CAMD1, LDO_CON,
+ LDO_CAMIO, LDO_SRAM
+
+Example:
+ regulators {
+ compatible = "sprd,sc27xx-regulator";
+
+ vddarm0: BUCK_CPU0 {
+ regulator-name = "vddarm0";
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1996875>;
+ regulator-ramp-delay = <25000>;
+ regulator-always-on;
+ };
+
+ vddcama0: LDO_CAMA0 {
+ regulator-name = "vddcama0";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3750000>;
+ regulator-enable-ramp-delay = <100>;
+ };
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/rng/brcm,bcm2835.txt b/Documentation/devicetree/bindings/rng/brcm,bcm2835.txt
index 26542690b578..627b29531a32 100644
--- a/Documentation/devicetree/bindings/rng/brcm,bcm2835.txt
+++ b/Documentation/devicetree/bindings/rng/brcm,bcm2835.txt
@@ -1,11 +1,19 @@
-BCM2835 Random number generator
+BCM2835/6368 Random number generator
Required properties:
-- compatible : should be "brcm,bcm2835-rng" or "brcm,bcm-nsp-rng" or
- "brcm,bcm5301x-rng"
+- compatible : should be one of
+ "brcm,bcm2835-rng"
+ "brcm,bcm-nsp-rng"
+ "brcm,bcm5301x-rng" or
+ "brcm,bcm6368-rng"
- reg : Specifies base physical address and size of the registers.
+Optional properties:
+
+- clocks : phandle to clock-controller plus clock-specifier pair
+- clock-names : "ipsec" as a clock name
+
Example:
rng {
@@ -17,3 +25,11 @@ rng@18033000 {
compatible = "brcm,bcm-nsp-rng";
reg = <0x18033000 0x14>;
};
+
+random: rng@10004180 {
+ compatible = "brcm,bcm6368-rng";
+ reg = <0x10004180 0x14>;
+
+ clocks = <&periph_clk 18>;
+ clock-names = "ipsec";
+};
diff --git a/Documentation/devicetree/bindings/rng/brcm,bcm6368.txt b/Documentation/devicetree/bindings/rng/brcm,bcm6368.txt
deleted file mode 100644
index 4b5ac600bfbd..000000000000
--- a/Documentation/devicetree/bindings/rng/brcm,bcm6368.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-BCM6368 Random number generator
-
-Required properties:
-
-- compatible : should be "brcm,bcm6368-rng"
-- reg : Specifies base physical address and size of the registers
-- clocks : phandle to clock-controller plus clock-specifier pair
-- clock-names : "ipsec" as a clock name
-
-Example:
- random: rng@10004180 {
- compatible = "brcm,bcm6368-rng";
- reg = <0x10004180 0x14>;
-
- clocks = <&periph_clk 18>;
- clock-names = "ipsec";
- };
diff --git a/Documentation/devicetree/bindings/scsi/hisilicon-sas.txt b/Documentation/devicetree/bindings/scsi/hisilicon-sas.txt
index b6a869f97715..df3bef7998fa 100644
--- a/Documentation/devicetree/bindings/scsi/hisilicon-sas.txt
+++ b/Documentation/devicetree/bindings/scsi/hisilicon-sas.txt
@@ -8,7 +8,10 @@ Main node required properties:
(b) "hisilicon,hip06-sas-v2" for v2 hw in hip06 chipset
(c) "hisilicon,hip07-sas-v2" for v2 hw in hip07 chipset
- sas-addr : array of 8 bytes for host SAS address
- - reg : Address and length of the SAS register
+ - reg : Contains two regions. The first is the address and length of the SAS
+ register. The second is the address and length of CPLD register for
+ SGPIO control. The second is optional, and should be set only when
+ we use a CPLD for directly attached disk LED control.
- hisilicon,sas-syscon: phandle of syscon used for sas control
- ctrl-reset-reg : offset to controller reset register in ctrl reg
- ctrl-reset-sts-reg : offset to controller reset status register in ctrl reg
diff --git a/Documentation/devicetree/bindings/sound/dmic.txt b/Documentation/devicetree/bindings/sound/dmic.txt
index 54c8ef6498a8..f7bf65611453 100644
--- a/Documentation/devicetree/bindings/sound/dmic.txt
+++ b/Documentation/devicetree/bindings/sound/dmic.txt
@@ -7,10 +7,12 @@ Required properties:
Optional properties:
- dmicen-gpios: GPIO specifier for dmic to control start and stop
+ - num-channels: Number of microphones on this DAI
Example node:
dmic_codec: dmic@0 {
compatible = "dmic-codec";
dmicen-gpios = <&gpio4 3 GPIO_ACTIVE_HIGH>;
+ num-channels = <1>;
};
diff --git a/Documentation/devicetree/bindings/sound/max98373.txt b/Documentation/devicetree/bindings/sound/max98373.txt
new file mode 100644
index 000000000000..456cb1c59353
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/max98373.txt
@@ -0,0 +1,40 @@
+Maxim Integrated MAX98373 Speaker Amplifier
+
+This device supports I2C.
+
+Required properties:
+
+ - compatible : "maxim,max98373"
+
+ - reg : the I2C address of the device.
+
+Optional properties:
+
+ - maxim,vmon-slot-no : slot number used to send voltage information
+ or in inteleave mode this will be used as
+ interleave slot.
+ slot range : 0 ~ 15, Default : 0
+
+ - maxim,imon-slot-no : slot number used to send current information
+ slot range : 0 ~ 15, Default : 0
+
+ - maxim,spkfb-slot-no : slot number used to send speaker feedback information
+ slot range : 0 ~ 15, Default : 0
+
+ - maxim,interleave-mode : For cases where a single combined channel
+ for the I/V sense data is not sufficient, the device can also be configured
+ to share a single data output channel on alternating frames.
+ In this configuration, the current and voltage data will be frame interleaved
+ on a single output channel.
+ Boolean, define to enable the interleave mode, Default : false
+
+Example:
+
+codec: max98373@31 {
+ compatible = "maxim,max98373";
+ reg = <0x31>;
+ maxim,vmon-slot-no = <0>;
+ maxim,imon-slot-no = <1>;
+ maxim,spkfb-slot-no = <2>;
+ maxim,interleave-mode;
+};
diff --git a/Documentation/devicetree/bindings/sound/mt2701-afe-pcm.txt b/Documentation/devicetree/bindings/sound/mt2701-afe-pcm.txt
index 77a57f84bed4..6df87b97f7cb 100644
--- a/Documentation/devicetree/bindings/sound/mt2701-afe-pcm.txt
+++ b/Documentation/devicetree/bindings/sound/mt2701-afe-pcm.txt
@@ -2,153 +2,143 @@ Mediatek AFE PCM controller for mt2701
Required properties:
- compatible = "mediatek,mt2701-audio";
-- reg: register location and size
- interrupts: should contain AFE and ASYS interrupts
- interrupt-names: should be "afe" and "asys"
- power-domains: should define the power domain
+- clocks: Must contain an entry for each entry in clock-names
+ See ../clocks/clock-bindings.txt for details
- clock-names: should have these clock names:
"infra_sys_audio_clk",
"top_audio_mux1_sel",
"top_audio_mux2_sel",
- "top_audio_mux1_div",
- "top_audio_mux2_div",
- "top_audio_48k_timing",
- "top_audio_44k_timing",
- "top_audpll_mux_sel",
- "top_apll_sel",
- "top_aud1_pll_98M",
- "top_aud2_pll_90M",
- "top_hadds2_pll_98M",
- "top_hadds2_pll_294M",
- "top_audpll",
- "top_audpll_d4",
- "top_audpll_d8",
- "top_audpll_d16",
- "top_audpll_d24",
- "top_audintbus_sel",
- "clk_26m",
- "top_syspll1_d4",
- "top_aud_k1_src_sel",
- "top_aud_k2_src_sel",
- "top_aud_k3_src_sel",
- "top_aud_k4_src_sel",
- "top_aud_k5_src_sel",
- "top_aud_k6_src_sel",
- "top_aud_k1_src_div",
- "top_aud_k2_src_div",
- "top_aud_k3_src_div",
- "top_aud_k4_src_div",
- "top_aud_k5_src_div",
- "top_aud_k6_src_div",
- "top_aud_i2s1_mclk",
- "top_aud_i2s2_mclk",
- "top_aud_i2s3_mclk",
- "top_aud_i2s4_mclk",
- "top_aud_i2s5_mclk",
- "top_aud_i2s6_mclk",
- "top_asm_m_sel",
- "top_asm_h_sel",
- "top_univpll2_d4",
- "top_univpll2_d2",
- "top_syspll_d5";
+ "top_audio_a1sys_hp",
+ "top_audio_a2sys_hp",
+ "i2s0_src_sel",
+ "i2s1_src_sel",
+ "i2s2_src_sel",
+ "i2s3_src_sel",
+ "i2s0_src_div",
+ "i2s1_src_div",
+ "i2s2_src_div",
+ "i2s3_src_div",
+ "i2s0_mclk_en",
+ "i2s1_mclk_en",
+ "i2s2_mclk_en",
+ "i2s3_mclk_en",
+ "i2so0_hop_ck",
+ "i2so1_hop_ck",
+ "i2so2_hop_ck",
+ "i2so3_hop_ck",
+ "i2si0_hop_ck",
+ "i2si1_hop_ck",
+ "i2si2_hop_ck",
+ "i2si3_hop_ck",
+ "asrc0_out_ck",
+ "asrc1_out_ck",
+ "asrc2_out_ck",
+ "asrc3_out_ck",
+ "audio_afe_pd",
+ "audio_afe_conn_pd",
+ "audio_a1sys_pd",
+ "audio_a2sys_pd",
+ "audio_mrgif_pd";
+- assigned-clocks: list of input clocks and dividers for the audio system.
+ See ../clocks/clock-bindings.txt for details.
+- assigned-clocks-parents: parent of input clocks of assigned clocks.
+- assigned-clock-rates: list of clock frequencies of assigned clocks.
+
+Must be a subnode of MediaTek audsys device tree node.
+See ../arm/mediatek/mediatek,audsys.txt for details about the parent node.
Example:
- afe: mt2701-afe-pcm@11220000 {
- compatible = "mediatek,mt2701-audio";
- reg = <0 0x11220000 0 0x2000>,
- <0 0x112A0000 0 0x20000>;
- interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_LOW>,
- <GIC_SPI 132 IRQ_TYPE_LEVEL_LOW>;
- interrupt-names = "afe", "asys";
- power-domains = <&scpsys MT2701_POWER_DOMAIN_IFR_MSC>;
- clocks = <&infracfg CLK_INFRA_AUDIO>,
- <&topckgen CLK_TOP_AUD_MUX1_SEL>,
- <&topckgen CLK_TOP_AUD_MUX2_SEL>,
- <&topckgen CLK_TOP_AUD_MUX1_DIV>,
- <&topckgen CLK_TOP_AUD_MUX2_DIV>,
- <&topckgen CLK_TOP_AUD_48K_TIMING>,
- <&topckgen CLK_TOP_AUD_44K_TIMING>,
- <&topckgen CLK_TOP_AUDPLL_MUX_SEL>,
- <&topckgen CLK_TOP_APLL_SEL>,
- <&topckgen CLK_TOP_AUD1PLL_98M>,
- <&topckgen CLK_TOP_AUD2PLL_90M>,
- <&topckgen CLK_TOP_HADDS2PLL_98M>,
- <&topckgen CLK_TOP_HADDS2PLL_294M>,
- <&topckgen CLK_TOP_AUDPLL>,
- <&topckgen CLK_TOP_AUDPLL_D4>,
- <&topckgen CLK_TOP_AUDPLL_D8>,
- <&topckgen CLK_TOP_AUDPLL_D16>,
- <&topckgen CLK_TOP_AUDPLL_D24>,
- <&topckgen CLK_TOP_AUDINTBUS_SEL>,
- <&clk26m>,
- <&topckgen CLK_TOP_SYSPLL1_D4>,
- <&topckgen CLK_TOP_AUD_K1_SRC_SEL>,
- <&topckgen CLK_TOP_AUD_K2_SRC_SEL>,
- <&topckgen CLK_TOP_AUD_K3_SRC_SEL>,
- <&topckgen CLK_TOP_AUD_K4_SRC_SEL>,
- <&topckgen CLK_TOP_AUD_K5_SRC_SEL>,
- <&topckgen CLK_TOP_AUD_K6_SRC_SEL>,
- <&topckgen CLK_TOP_AUD_K1_SRC_DIV>,
- <&topckgen CLK_TOP_AUD_K2_SRC_DIV>,
- <&topckgen CLK_TOP_AUD_K3_SRC_DIV>,
- <&topckgen CLK_TOP_AUD_K4_SRC_DIV>,
- <&topckgen CLK_TOP_AUD_K5_SRC_DIV>,
- <&topckgen CLK_TOP_AUD_K6_SRC_DIV>,
- <&topckgen CLK_TOP_AUD_I2S1_MCLK>,
- <&topckgen CLK_TOP_AUD_I2S2_MCLK>,
- <&topckgen CLK_TOP_AUD_I2S3_MCLK>,
- <&topckgen CLK_TOP_AUD_I2S4_MCLK>,
- <&topckgen CLK_TOP_AUD_I2S5_MCLK>,
- <&topckgen CLK_TOP_AUD_I2S6_MCLK>,
- <&topckgen CLK_TOP_ASM_M_SEL>,
- <&topckgen CLK_TOP_ASM_H_SEL>,
- <&topckgen CLK_TOP_UNIVPLL2_D4>,
- <&topckgen CLK_TOP_UNIVPLL2_D2>,
- <&topckgen CLK_TOP_SYSPLL_D5>;
+ audsys: audio-subsystem@11220000 {
+ compatible = "mediatek,mt2701-audsys", "syscon", "simple-mfd";
+ ...
+
+ afe: audio-controller {
+ compatible = "mediatek,mt2701-audio";
+ interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 132 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "afe", "asys";
+ power-domains = <&scpsys MT2701_POWER_DOMAIN_IFR_MSC>;
+
+ clocks = <&infracfg CLK_INFRA_AUDIO>,
+ <&topckgen CLK_TOP_AUD_MUX1_SEL>,
+ <&topckgen CLK_TOP_AUD_MUX2_SEL>,
+ <&topckgen CLK_TOP_AUD_48K_TIMING>,
+ <&topckgen CLK_TOP_AUD_44K_TIMING>,
+ <&topckgen CLK_TOP_AUD_K1_SRC_SEL>,
+ <&topckgen CLK_TOP_AUD_K2_SRC_SEL>,
+ <&topckgen CLK_TOP_AUD_K3_SRC_SEL>,
+ <&topckgen CLK_TOP_AUD_K4_SRC_SEL>,
+ <&topckgen CLK_TOP_AUD_K1_SRC_DIV>,
+ <&topckgen CLK_TOP_AUD_K2_SRC_DIV>,
+ <&topckgen CLK_TOP_AUD_K3_SRC_DIV>,
+ <&topckgen CLK_TOP_AUD_K4_SRC_DIV>,
+ <&topckgen CLK_TOP_AUD_I2S1_MCLK>,
+ <&topckgen CLK_TOP_AUD_I2S2_MCLK>,
+ <&topckgen CLK_TOP_AUD_I2S3_MCLK>,
+ <&topckgen CLK_TOP_AUD_I2S4_MCLK>,
+ <&audsys CLK_AUD_I2SO1>,
+ <&audsys CLK_AUD_I2SO2>,
+ <&audsys CLK_AUD_I2SO3>,
+ <&audsys CLK_AUD_I2SO4>,
+ <&audsys CLK_AUD_I2SIN1>,
+ <&audsys CLK_AUD_I2SIN2>,
+ <&audsys CLK_AUD_I2SIN3>,
+ <&audsys CLK_AUD_I2SIN4>,
+ <&audsys CLK_AUD_ASRCO1>,
+ <&audsys CLK_AUD_ASRCO2>,
+ <&audsys CLK_AUD_ASRCO3>,
+ <&audsys CLK_AUD_ASRCO4>,
+ <&audsys CLK_AUD_AFE>,
+ <&audsys CLK_AUD_AFE_CONN>,
+ <&audsys CLK_AUD_A1SYS>,
+ <&audsys CLK_AUD_A2SYS>,
+ <&audsys CLK_AUD_AFE_MRGIF>;
+
+ clock-names = "infra_sys_audio_clk",
+ "top_audio_mux1_sel",
+ "top_audio_mux2_sel",
+ "top_audio_a1sys_hp",
+ "top_audio_a2sys_hp",
+ "i2s0_src_sel",
+ "i2s1_src_sel",
+ "i2s2_src_sel",
+ "i2s3_src_sel",
+ "i2s0_src_div",
+ "i2s1_src_div",
+ "i2s2_src_div",
+ "i2s3_src_div",
+ "i2s0_mclk_en",
+ "i2s1_mclk_en",
+ "i2s2_mclk_en",
+ "i2s3_mclk_en",
+ "i2so0_hop_ck",
+ "i2so1_hop_ck",
+ "i2so2_hop_ck",
+ "i2so3_hop_ck",
+ "i2si0_hop_ck",
+ "i2si1_hop_ck",
+ "i2si2_hop_ck",
+ "i2si3_hop_ck",
+ "asrc0_out_ck",
+ "asrc1_out_ck",
+ "asrc2_out_ck",
+ "asrc3_out_ck",
+ "audio_afe_pd",
+ "audio_afe_conn_pd",
+ "audio_a1sys_pd",
+ "audio_a2sys_pd",
+ "audio_mrgif_pd";
- clock-names = "infra_sys_audio_clk",
- "top_audio_mux1_sel",
- "top_audio_mux2_sel",
- "top_audio_mux1_div",
- "top_audio_mux2_div",
- "top_audio_48k_timing",
- "top_audio_44k_timing",
- "top_audpll_mux_sel",
- "top_apll_sel",
- "top_aud1_pll_98M",
- "top_aud2_pll_90M",
- "top_hadds2_pll_98M",
- "top_hadds2_pll_294M",
- "top_audpll",
- "top_audpll_d4",
- "top_audpll_d8",
- "top_audpll_d16",
- "top_audpll_d24",
- "top_audintbus_sel",
- "clk_26m",
- "top_syspll1_d4",
- "top_aud_k1_src_sel",
- "top_aud_k2_src_sel",
- "top_aud_k3_src_sel",
- "top_aud_k4_src_sel",
- "top_aud_k5_src_sel",
- "top_aud_k6_src_sel",
- "top_aud_k1_src_div",
- "top_aud_k2_src_div",
- "top_aud_k3_src_div",
- "top_aud_k4_src_div",
- "top_aud_k5_src_div",
- "top_aud_k6_src_div",
- "top_aud_i2s1_mclk",
- "top_aud_i2s2_mclk",
- "top_aud_i2s3_mclk",
- "top_aud_i2s4_mclk",
- "top_aud_i2s5_mclk",
- "top_aud_i2s6_mclk",
- "top_asm_m_sel",
- "top_asm_h_sel",
- "top_univpll2_d4",
- "top_univpll2_d2",
- "top_syspll_d5";
+ assigned-clocks = <&topckgen CLK_TOP_AUD_MUX1_SEL>,
+ <&topckgen CLK_TOP_AUD_MUX2_SEL>,
+ <&topckgen CLK_TOP_AUD_MUX1_DIV>,
+ <&topckgen CLK_TOP_AUD_MUX2_DIV>;
+ assigned-clock-parents = <&topckgen CLK_TOP_AUD1PLL_98M>,
+ <&topckgen CLK_TOP_AUD2PLL_90M>;
+ assigned-clock-rates = <0>, <0>, <49152000>, <45158400>;
+ };
};
diff --git a/Documentation/devicetree/bindings/sound/mxs-audio-sgtl5000.txt b/Documentation/devicetree/bindings/sound/mxs-audio-sgtl5000.txt
index 601c518eddaa..4eb980bd0287 100644
--- a/Documentation/devicetree/bindings/sound/mxs-audio-sgtl5000.txt
+++ b/Documentation/devicetree/bindings/sound/mxs-audio-sgtl5000.txt
@@ -1,10 +1,31 @@
* Freescale MXS audio complex with SGTL5000 codec
Required properties:
-- compatible: "fsl,mxs-audio-sgtl5000"
-- model: The user-visible name of this sound complex
-- saif-controllers: The phandle list of the MXS SAIF controller
-- audio-codec: The phandle of the SGTL5000 audio codec
+- compatible : "fsl,mxs-audio-sgtl5000"
+- model : The user-visible name of this sound complex
+- saif-controllers : The phandle list of the MXS SAIF controller
+- audio-codec : The phandle of the SGTL5000 audio codec
+- audio-routing : A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the
+ connection's sink, the second being the connection's
+ source. Valid names could be power supplies, SGTL5000
+ pins, and the jacks on the board:
+
+ Power supplies:
+ * Mic Bias
+
+ SGTL5000 pins:
+ * MIC_IN
+ * LINE_IN
+ * HP_OUT
+ * LINE_OUT
+
+ Board connectors:
+ * Mic Jack
+ * Line In Jack
+ * Headphone Jack
+ * Line Out Jack
+ * Ext Spk
Example:
@@ -14,4 +35,8 @@ sound {
model = "imx28-evk-sgtl5000";
saif-controllers = <&saif0 &saif1>;
audio-codec = <&sgtl5000>;
+ audio-routing =
+ "MIC_IN", "Mic Jack",
+ "Mic Jack", "Mic Bias",
+ "Headphone Jack", "HP_OUT";
};
diff --git a/Documentation/devicetree/bindings/sound/nau8825.txt b/Documentation/devicetree/bindings/sound/nau8825.txt
index 2f5e973285a6..d16d96839bcb 100644
--- a/Documentation/devicetree/bindings/sound/nau8825.txt
+++ b/Documentation/devicetree/bindings/sound/nau8825.txt
@@ -69,7 +69,7 @@ Optional properties:
- nuvoton,jack-insert-debounce: number from 0 to 7 that sets debounce time to 2^(n+2) ms
- nuvoton,jack-eject-debounce: number from 0 to 7 that sets debounce time to 2^(n+2) ms
- - nuvoton,crosstalk-bypass: make crosstalk function bypass if set.
+ - nuvoton,crosstalk-enable: make crosstalk function enable if set.
- clocks: list of phandle and clock specifier pairs according to common clock bindings for the
clocks described in clock-names
@@ -98,7 +98,7 @@ Example:
nuvoton,short-key-debounce = <2>;
nuvoton,jack-insert-debounce = <7>;
nuvoton,jack-eject-debounce = <7>;
- nuvoton,crosstalk-bypass;
+ nuvoton,crosstalk-enable;
clock-names = "mclk";
clocks = <&tegra_car TEGRA210_CLK_CLK_OUT_2>;
diff --git a/Documentation/devicetree/bindings/sound/pcm186x.txt b/Documentation/devicetree/bindings/sound/pcm186x.txt
new file mode 100644
index 000000000000..1087f4855980
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/pcm186x.txt
@@ -0,0 +1,42 @@
+Texas Instruments PCM186x Universal Audio ADC
+
+These devices support both I2C and SPI (configured with pin strapping
+on the board).
+
+Required properties:
+
+ - compatible : "ti,pcm1862",
+ "ti,pcm1863",
+ "ti,pcm1864",
+ "ti,pcm1865"
+
+ - reg : The I2C address of the device for I2C, the chip select
+ number for SPI.
+
+ - avdd-supply: Analog core power supply (3.3v)
+ - dvdd-supply: Digital core power supply
+ - iovdd-supply: Digital IO power supply
+ See regulator/regulator.txt for more information
+
+CODEC input pins:
+ * VINL1
+ * VINR1
+ * VINL2
+ * VINR2
+ * VINL3
+ * VINR3
+ * VINL4
+ * VINR4
+
+The pins can be used in referring sound node's audio-routing property.
+
+Example:
+
+ pcm186x: audio-codec@4a {
+ compatible = "ti,pcm1865";
+ reg = <0x4a>;
+
+ avdd-supply = <&reg_3v3_analog>;
+ dvdd-supply = <&reg_3v3>;
+ iovdd-supply = <&reg_1v8>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
index 085bec364caf..5bed9a595772 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
+++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
@@ -4,7 +4,7 @@ Renesas R-Car sound
* Modules
=============================================
-Renesas R-Car sound is constructed from below modules
+Renesas R-Car and RZ/G sound is constructed from below modules
(for Gen2 or later)
SCU : Sampling Rate Converter Unit
@@ -197,12 +197,17 @@ Ex)
[MEM] -> [SRC2] -> [CTU03] -+
sound {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
compatible = "simple-scu-audio-card";
...
- simple-audio-card,cpu-0 {
+ simple-audio-card,cpu@0 {
+ reg = <0>;
sound-dai = <&rcar_sound 0>;
};
- simple-audio-card,cpu-1 {
+ simple-audio-card,cpu@1 {
+ reg = <1>;
sound-dai = <&rcar_sound 1>;
};
simple-audio-card,codec {
@@ -334,9 +339,11 @@ Required properties:
- compatible : "renesas,rcar_sound-<soctype>", fallbacks
"renesas,rcar_sound-gen1" if generation1, and
- "renesas,rcar_sound-gen2" if generation2
+ "renesas,rcar_sound-gen2" if generation2 (or RZ/G1)
"renesas,rcar_sound-gen3" if generation3
Examples with soctypes are:
+ - "renesas,rcar_sound-r8a7743" (RZ/G1M)
+ - "renesas,rcar_sound-r8a7745" (RZ/G1E)
- "renesas,rcar_sound-r8a7778" (R-Car M1A)
- "renesas,rcar_sound-r8a7779" (R-Car H1)
- "renesas,rcar_sound-r8a7790" (R-Car H2)
diff --git a/Documentation/devicetree/bindings/sound/simple-card.txt b/Documentation/devicetree/bindings/sound/simple-card.txt
index 166f2290233b..17c13e74667d 100644
--- a/Documentation/devicetree/bindings/sound/simple-card.txt
+++ b/Documentation/devicetree/bindings/sound/simple-card.txt
@@ -140,6 +140,7 @@ sound {
simple-audio-card,name = "Cubox Audio";
simple-audio-card,dai-link@0 { /* I2S - HDMI */
+ reg = <0>;
format = "i2s";
cpu {
sound-dai = <&audio1 0>;
@@ -150,6 +151,7 @@ sound {
};
simple-audio-card,dai-link@1 { /* S/PDIF - HDMI */
+ reg = <1>;
cpu {
sound-dai = <&audio1 1>;
};
@@ -159,6 +161,7 @@ sound {
};
simple-audio-card,dai-link@2 { /* S/PDIF - S/PDIF */
+ reg = <2>;
cpu {
sound-dai = <&audio1 1>;
};
diff --git a/Documentation/devicetree/bindings/sound/st,stm32-adfsdm.txt b/Documentation/devicetree/bindings/sound/st,stm32-adfsdm.txt
new file mode 100644
index 000000000000..864f5b00b031
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/st,stm32-adfsdm.txt
@@ -0,0 +1,63 @@
+STMicroelectronics Audio Digital Filter Sigma Delta modulators(DFSDM)
+
+The DFSDM allows PDM microphones capture through SPI interface. The Audio
+interface is seems as a sub block of the DFSDM device.
+For details on DFSDM bindings refer to ../iio/adc/st,stm32-dfsdm-adc.txt
+
+Required properties:
+ - compatible: "st,stm32h7-dfsdm-dai".
+
+ - #sound-dai-cells : Must be equal to 0
+
+ - io-channels : phandle to iio dfsdm instance node.
+
+Example of a sound card using audio DFSDM node.
+
+ sound_card {
+ compatible = "audio-graph-card";
+
+ dais = <&cpu_port>;
+ };
+
+ dfsdm: dfsdm@40017000 {
+ compatible = "st,stm32h7-dfsdm";
+ reg = <0x40017000 0x400>;
+ clocks = <&rcc DFSDM1_CK>;
+ clock-names = "dfsdm";
+ #interrupt-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dfsdm_adc0: filter@0 {
+ compatible = "st,stm32-dfsdm-dmic";
+ reg = <0>;
+ interrupts = <110>;
+ dmas = <&dmamux1 101 0x400 0x00>;
+ dma-names = "rx";
+ st,adc-channels = <1>;
+ st,adc-channel-names = "dmic0";
+ st,adc-channel-types = "SPI_R";
+ st,adc-channel-clk-src = "CLKOUT";
+ st,filter-order = <5>;
+
+ dfsdm_dai0: dfsdm-dai {
+ compatible = "st,stm32h7-dfsdm-dai";
+ #sound-dai-cells = <0>;
+ io-channels = <&dfsdm_adc0 0>;
+ cpu_port: port {
+ dfsdm_endpoint: endpoint {
+ remote-endpoint = <&dmic0_endpoint>;
+ };
+ };
+ };
+ };
+
+ dmic0: dmic@0 {
+ compatible = "dmic-codec";
+ #sound-dai-cells = <0>;
+ port {
+ dmic0_endpoint: endpoint {
+ remote-endpoint = <&dfsdm_endpoint>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/st,stm32-sai.txt b/Documentation/devicetree/bindings/sound/st,stm32-sai.txt
index 1f9cd7095337..b1acc1a256ba 100644
--- a/Documentation/devicetree/bindings/sound/st,stm32-sai.txt
+++ b/Documentation/devicetree/bindings/sound/st,stm32-sai.txt
@@ -20,11 +20,6 @@ Required properties:
Optional properties:
- resets: Reference to a reset controller asserting the SAI
- - st,sync: specify synchronization mode.
- By default SAI sub-block is in asynchronous mode.
- This property sets SAI sub-block as slave of another SAI sub-block.
- Must contain the phandle and index of the sai sub-block providing
- the synchronization.
SAI subnodes:
Two subnodes corresponding to SAI sub-block instances A et B can be defined.
@@ -44,6 +39,13 @@ SAI subnodes required properties:
- pinctrl-names: should contain only value "default"
- pinctrl-0: see Documentation/devicetree/bindings/pinctrl/pinctrl-stm32.txt
+SAI subnodes Optional properties:
+ - st,sync: specify synchronization mode.
+ By default SAI sub-block is in asynchronous mode.
+ This property sets SAI sub-block as slave of another SAI sub-block.
+ Must contain the phandle and index of the sai sub-block providing
+ the synchronization.
+
The device node should contain one 'port' child node with one child 'endpoint'
node, according to the bindings defined in Documentation/devicetree/bindings/
graph.txt.
diff --git a/Documentation/devicetree/bindings/sound/sun4i-i2s.txt b/Documentation/devicetree/bindings/sound/sun4i-i2s.txt
index 05d7135a8d2f..b9d50d6cdef3 100644
--- a/Documentation/devicetree/bindings/sound/sun4i-i2s.txt
+++ b/Documentation/devicetree/bindings/sound/sun4i-i2s.txt
@@ -8,6 +8,7 @@ Required properties:
- compatible: should be one of the following:
- "allwinner,sun4i-a10-i2s"
- "allwinner,sun6i-a31-i2s"
+ - "allwinner,sun8i-a83t-i2s"
- "allwinner,sun8i-h3-i2s"
- reg: physical base address of the controller and length of memory mapped
region.
@@ -23,6 +24,7 @@ Required properties:
Required properties for the following compatibles:
- "allwinner,sun6i-a31-i2s"
+ - "allwinner,sun8i-a83t-i2s"
- "allwinner,sun8i-h3-i2s"
- resets: phandle to the reset line for this codec
diff --git a/Documentation/devicetree/bindings/sound/tas5720.txt b/Documentation/devicetree/bindings/sound/tas5720.txt
index 40d94f82beb3..7481653fe8e3 100644
--- a/Documentation/devicetree/bindings/sound/tas5720.txt
+++ b/Documentation/devicetree/bindings/sound/tas5720.txt
@@ -6,10 +6,12 @@ audio playback. For more product information please see the links below:
http://www.ti.com/product/TAS5720L
http://www.ti.com/product/TAS5720M
+http://www.ti.com/product/TAS5722L
Required properties:
-- compatible : "ti,tas5720"
+- compatible : "ti,tas5720",
+ "ti,tas5722"
- reg : I2C slave address
- dvdd-supply : phandle to a 3.3-V supply for the digital circuitry
- pvdd-supply : phandle to a supply used for the Class-D amp and the analog
diff --git a/Documentation/devicetree/bindings/sound/tfa9879.txt b/Documentation/devicetree/bindings/sound/tfa9879.txt
index 23ba522d9e2b..1620e6848436 100644
--- a/Documentation/devicetree/bindings/sound/tfa9879.txt
+++ b/Documentation/devicetree/bindings/sound/tfa9879.txt
@@ -6,18 +6,18 @@ Required properties:
- reg : the I2C address of the device
+- #sound-dai-cells : must be 0.
+
Example:
&i2c1 {
- clock-frequency = <100000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c1>;
- status = "okay";
- codec: tfa9879@6c {
+ amp: amp@6c {
#sound-dai-cells = <0>;
compatible = "nxp,tfa9879";
reg = <0x6c>;
- };
+ };
};
diff --git a/Documentation/devicetree/bindings/sound/ti,tas6424.txt b/Documentation/devicetree/bindings/sound/ti,tas6424.txt
new file mode 100644
index 000000000000..1c4ada0eef4e
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/ti,tas6424.txt
@@ -0,0 +1,20 @@
+Texas Instruments TAS6424 Quad-Channel Audio amplifier
+
+The TAS6424 serial control bus communicates through I2C protocols.
+
+Required properties:
+ - compatible: "ti,tas6424" - TAS6424
+ - reg: I2C slave address
+ - sound-dai-cells: must be equal to 0
+
+Example:
+
+tas6424: tas6424@6a {
+ compatible = "ti,tas6424";
+ reg = <0x6a>;
+
+ #sound-dai-cells = <0>;
+};
+
+For more product information please see the link below:
+http://www.ti.com/product/TAS6424-Q1
diff --git a/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt b/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
index 6fbba562eaa7..5b3c33bb99e5 100644
--- a/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
+++ b/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
@@ -22,7 +22,7 @@ Required properties:
Optional properties:
-- gpio-reset - gpio pin number used for codec reset
+- reset-gpios - GPIO specification for the active low RESET input.
- ai31xx-micbias-vg - MicBias Voltage setting
1 or MICBIAS_2_0V - MICBIAS output is powered to 2.0V
2 or MICBIAS_2_5V - MICBIAS output is powered to 2.5V
@@ -30,6 +30,10 @@ Optional properties:
If this node is not mentioned or if the value is unknown, then
micbias is set to 2.0V.
+Deprecated properties:
+
+- gpio-reset - gpio pin number used for codec reset
+
CODEC output pins:
* HPL
* HPR
@@ -48,6 +52,7 @@ CODEC input pins:
The pins can be used in referring sound node's audio-routing property.
Example:
+#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/sound/tlv320aic31xx-micbias.h>
tlv320aic31xx: tlv320aic31xx@18 {
@@ -56,6 +61,8 @@ tlv320aic31xx: tlv320aic31xx@18 {
ai31xx-micbias-vg = <MICBIAS_OFF>;
+ reset-gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
+
HPVDD-supply = <&regulator>;
SPRVDD-supply = <&regulator>;
SPLVDD-supply = <&regulator>;
diff --git a/Documentation/devicetree/bindings/sound/tlv320aic3x.txt b/Documentation/devicetree/bindings/sound/tlv320aic3x.txt
index ba5b45c483f5..9796c4639262 100644
--- a/Documentation/devicetree/bindings/sound/tlv320aic3x.txt
+++ b/Documentation/devicetree/bindings/sound/tlv320aic3x.txt
@@ -17,7 +17,7 @@ Required properties:
Optional properties:
-- gpio-reset - gpio pin number used for codec reset
+- reset-gpios - GPIO specification for the active low RESET input.
- ai3x-gpio-func - <array of 2 int> - AIC3X_GPIO1 & AIC3X_GPIO2 Functionality
- Not supported on tlv320aic3104
- ai3x-micbias-vg - MicBias Voltage required.
@@ -34,6 +34,10 @@ Optional properties:
- AVDD-supply, IOVDD-supply, DRVDD-supply, DVDD-supply : power supplies for the
device as covered in Documentation/devicetree/bindings/regulator/regulator.txt
+Deprecated properties:
+
+- gpio-reset - gpio pin number used for codec reset
+
CODEC output pins:
* LLOUT
* RLOUT
@@ -61,10 +65,14 @@ The pins can be used in referring sound node's audio-routing property.
Example:
+#include <dt-bindings/gpio/gpio.h>
+
tlv320aic3x: tlv320aic3x@1b {
compatible = "ti,tlv320aic3x";
reg = <0x1b>;
+ reset-gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
+
AVDD-supply = <&regulator>;
IOVDD-supply = <&regulator>;
DRVDD-supply = <&regulator>;
diff --git a/Documentation/devicetree/bindings/sound/tscs42xx.txt b/Documentation/devicetree/bindings/sound/tscs42xx.txt
new file mode 100644
index 000000000000..2ac2f0996697
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/tscs42xx.txt
@@ -0,0 +1,16 @@
+TSCS42XX Audio CODEC
+
+Required Properties:
+
+ - compatible : "tempo,tscs42A1" for analog mic
+ "tempo,tscs42A2" for digital mic
+
+ - reg : <0x71> for analog mic
+ <0x69> for digital mic
+
+Example:
+
+wookie: codec@69 {
+ compatible = "tempo,tscs42A2";
+ reg = <0x69>;
+};
diff --git a/Documentation/devicetree/bindings/sound/uniphier,evea.txt b/Documentation/devicetree/bindings/sound/uniphier,evea.txt
new file mode 100644
index 000000000000..3f31b235f18b
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/uniphier,evea.txt
@@ -0,0 +1,26 @@
+Socionext EVEA - UniPhier SoC internal codec driver
+
+Required properties:
+- compatible : should be "socionext,uniphier-evea".
+- reg : offset and length of the register set for the device.
+- clock-names : should include following entries:
+ "evea", "exiv"
+- clocks : a list of phandle, should contain an entry for each
+ entries in clock-names.
+- reset-names : should include following entries:
+ "evea", "exiv", "adamv"
+- resets : a list of phandle, should contain reset entries of
+ reset-names.
+- #sound-dai-cells: should be 1.
+
+Example:
+
+ codec {
+ compatible = "socionext,uniphier-evea";
+ reg = <0x57900000 0x1000>;
+ clock-names = "evea", "exiv";
+ clocks = <&sys_clk 41>, <&sys_clk 42>;
+ reset-names = "evea", "exiv", "adamv";
+ resets = <&sys_rst 41>, <&sys_rst 42>, <&adamv_rst 0>;
+ #sound-dai-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt
index bdd83959019c..80710f0f0448 100644
--- a/Documentation/devicetree/bindings/spi/sh-msiof.txt
+++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt
@@ -36,7 +36,21 @@ Required properties:
Optional properties:
- clocks : Must contain a reference to the functional clock.
-- num-cs : Total number of chip-selects (default is 1)
+- num-cs : Total number of chip selects (default is 1).
+ Up to 3 native chip selects are supported:
+ 0: MSIOF_SYNC
+ 1: MSIOF_SS1
+ 2: MSIOF_SS2
+ Hardware limitations related to chip selects:
+ - Native chip selects are always deasserted in
+ between transfers that are part of the same
+ message. Use cs-gpios to work around this.
+ - All slaves using native chip selects must use the
+ same spi-cs-high configuration. Use cs-gpios to
+ work around this.
+ - When using GPIO chip selects, at least one native
+ chip select must be left unused, as it will be
+ driven anyway.
- dmas : Must contain a list of two references to DMA
specifiers, one for transmission, and one for
reception.
diff --git a/Documentation/devicetree/bindings/spi/spi-meson.txt b/Documentation/devicetree/bindings/spi/spi-meson.txt
index 825c39cae74a..b7f5e86fed22 100644
--- a/Documentation/devicetree/bindings/spi/spi-meson.txt
+++ b/Documentation/devicetree/bindings/spi/spi-meson.txt
@@ -27,7 +27,9 @@ The Meson SPICC is generic SPI controller for general purpose Full-Duplex
communications with dedicated 16 words RX/TX PIO FIFOs.
Required properties:
- - compatible: should be "amlogic,meson-gx-spicc" on Amlogic GX SoCs.
+ - compatible: should be:
+ "amlogic,meson-gx-spicc" on Amlogic GX and compatible SoCs.
+ "amlogic,meson-axg-spicc" on Amlogic AXG and compatible SoCs
- reg: physical base address and length of the controller registers
- interrupts: The interrupt specifier
- clock-names: Must contain "core"
diff --git a/Documentation/devicetree/bindings/spi/spi-orion.txt b/Documentation/devicetree/bindings/spi/spi-orion.txt
index df8ec31f2f07..8434a65fc12a 100644
--- a/Documentation/devicetree/bindings/spi/spi-orion.txt
+++ b/Documentation/devicetree/bindings/spi/spi-orion.txt
@@ -18,8 +18,17 @@ Required properties:
The eight register sets following the control registers refer to
chip-select lines 0 through 7 respectively.
- cell-index : Which of multiple SPI controllers is this.
+- clocks : pointers to the reference clocks for this device, the first
+ one is the one used for the clock on the spi bus, the
+ second one is optional and is the clock used for the
+ functional part of the controller
+
Optional properties:
- interrupts : Is currently not used.
+- clock-names : names of used clocks, mandatory if the second clock is
+ used, the name must be "core", and "axi" (the latter
+ is only for Armada 7K/8K).
+
Example:
spi@10600 {
diff --git a/Documentation/devicetree/bindings/spi/spi-xilinx.txt b/Documentation/devicetree/bindings/spi/spi-xilinx.txt
index c7b7856bd528..7bf61efc66c8 100644
--- a/Documentation/devicetree/bindings/spi/spi-xilinx.txt
+++ b/Documentation/devicetree/bindings/spi/spi-xilinx.txt
@@ -2,7 +2,7 @@ Xilinx SPI controller Device Tree Bindings
-------------------------------------------------
Required properties:
-- compatible : Should be "xlnx,xps-spi-2.00.a" or "xlnx,xps-spi-2.00.b"
+- compatible : Should be "xlnx,xps-spi-2.00.a", "xlnx,xps-spi-2.00.b" or "xlnx,axi-quad-spi-1.00.a"
- reg : Physical base address and size of SPI registers map.
- interrupts : Property with a value describing the interrupt
number.
diff --git a/Documentation/devicetree/bindings/timer/actions,owl-timer.txt b/Documentation/devicetree/bindings/timer/actions,owl-timer.txt
index e3c28da80cb2..977054f87563 100644
--- a/Documentation/devicetree/bindings/timer/actions,owl-timer.txt
+++ b/Documentation/devicetree/bindings/timer/actions,owl-timer.txt
@@ -2,6 +2,7 @@ Actions Semi Owl Timer
Required properties:
- compatible : "actions,s500-timer" for S500
+ "actions,s700-timer" for S700
"actions,s900-timer" for S900
- reg : Offset and length of the register set for the device.
- interrupts : Should contain the interrupts.
diff --git a/Documentation/devicetree/bindings/timer/spreadtrum,sprd-timer.txt b/Documentation/devicetree/bindings/timer/spreadtrum,sprd-timer.txt
new file mode 100644
index 000000000000..6d97e7d0f6e8
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/spreadtrum,sprd-timer.txt
@@ -0,0 +1,20 @@
+Spreadtrum timers
+
+The Spreadtrum SC9860 platform provides 3 general-purpose timers.
+These timers can support 32bit or 64bit counter, as well as supporting
+period mode or one-shot mode, and they are can be wakeup source
+during deep sleep.
+
+Required properties:
+- compatible: should be "sprd,sc9860-timer" for SC9860 platform.
+- reg: The register address of the timer device.
+- interrupts: Should contain the interrupt for the timer device.
+- clocks: The phandle to the source clock (usually a 32.768 KHz fixed clock).
+
+Example:
+ timer@40050000 {
+ compatible = "sprd,sc9860-timer";
+ reg = <0 0x40050000 0 0x20>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ext_32k>;
+ };
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 0994bdd82cd3..f776fb804a8c 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -347,6 +347,7 @@ tcg Trusted Computing Group
tcl Toby Churchill Ltd.
technexion TechNexion
technologic Technologic Systems
+tempo Tempo Semiconductor
terasic Terasic Inc.
thine THine Electronics, Inc.
ti Texas Instruments
diff --git a/Documentation/devicetree/bindings/watchdog/zii,rave-sp-wdt.txt b/Documentation/devicetree/bindings/watchdog/zii,rave-sp-wdt.txt
new file mode 100644
index 000000000000..3de96186e92e
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/zii,rave-sp-wdt.txt
@@ -0,0 +1,39 @@
+Zodiac Inflight Innovations RAVE Supervisory Processor Watchdog Bindings
+
+RAVE SP watchdog device is a "MFD cell" device corresponding to
+watchdog functionality of RAVE Supervisory Processor. It is expected
+that its Device Tree node is specified as a child of the node
+corresponding to the parent RAVE SP device (as documented in
+Documentation/devicetree/bindings/mfd/zii,rave-sp.txt)
+
+Required properties:
+
+- compatible: Depending on wire protocol implemented by RAVE SP
+ firmware, should be one of:
+ - "zii,rave-sp-watchdog"
+ - "zii,rave-sp-watchdog-legacy"
+
+Optional properties:
+
+- wdt-timeout: Two byte nvmem cell specified as per
+ Documentation/devicetree/bindings/nvmem/nvmem.txt
+
+Example:
+
+ rave-sp {
+ compatible = "zii,rave-sp-rdu1";
+ current-speed = <38400>;
+
+ eeprom {
+ wdt_timeout: wdt-timeout@8E {
+ reg = <0x8E 2>;
+ };
+ };
+
+ watchdog {
+ compatible = "zii,rave-sp-watchdog";
+ nvmem-cells = <&wdt_timeout>;
+ nvmem-cell-names = "wdt-timeout";
+ };
+ }
+
diff --git a/Documentation/driver-api/dmaengine/provider.rst b/Documentation/driver-api/dmaengine/provider.rst
index 814acb4d2294..dfc4486b5743 100644
--- a/Documentation/driver-api/dmaengine/provider.rst
+++ b/Documentation/driver-api/dmaengine/provider.rst
@@ -111,40 +111,36 @@ The first thing you need to do in your driver is to allocate this
structure. Any of the usual memory allocators will do, but you'll also
need to initialize a few fields in there:
-- channels: should be initialized as a list using the
+- ``channels``: should be initialized as a list using the
INIT_LIST_HEAD macro for example
-- src_addr_widths:
+- ``src_addr_widths``:
should contain a bitmask of the supported source transfer width
-- dst_addr_widths:
+- ``dst_addr_widths``:
should contain a bitmask of the supported destination transfer width
-- directions:
+- ``directions``:
should contain a bitmask of the supported slave directions
(i.e. excluding mem2mem transfers)
-- residue_granularity:
+- ``residue_granularity``:
+ granularity of the transfer residue reported to dma_set_residue.
+ This can be either:
- - Granularity of the transfer residue reported to dma_set_residue.
- This can be either:
+ - Descriptor:
+ your device doesn't support any kind of residue
+ reporting. The framework will only know that a particular
+ transaction descriptor is done.
- - Descriptor
+ - Segment:
+ your device is able to report which chunks have been transferred
- - Your device doesn't support any kind of residue
- reporting. The framework will only know that a particular
- transaction descriptor is done.
+ - Burst:
+ your device is able to report which burst have been transferred
- - Segment
-
- - Your device is able to report which chunks have been transferred
-
- - Burst
-
- - Your device is able to report which burst have been transferred
-
- - dev: should hold the pointer to the ``struct device`` associated
- to your current driver instance.
+- ``dev``: should hold the pointer to the ``struct device`` associated
+ to your current driver instance.
Supported transaction types
---------------------------
diff --git a/Documentation/driver-api/iio/hw-consumer.rst b/Documentation/driver-api/iio/hw-consumer.rst
new file mode 100644
index 000000000000..8facce6a6733
--- /dev/null
+++ b/Documentation/driver-api/iio/hw-consumer.rst
@@ -0,0 +1,51 @@
+===========
+HW consumer
+===========
+An IIO device can be directly connected to another device in hardware. in this
+case the buffers between IIO provider and IIO consumer are handled by hardware.
+The Industrial I/O HW consumer offers a way to bond these IIO devices without
+software buffer for data. The implementation can be found under
+:file:`drivers/iio/buffer/hw-consumer.c`
+
+
+* struct :c:type:`iio_hw_consumer` — Hardware consumer structure
+* :c:func:`iio_hw_consumer_alloc` — Allocate IIO hardware consumer
+* :c:func:`iio_hw_consumer_free` — Free IIO hardware consumer
+* :c:func:`iio_hw_consumer_enable` — Enable IIO hardware consumer
+* :c:func:`iio_hw_consumer_disable` — Disable IIO hardware consumer
+
+
+HW consumer setup
+=================
+
+As standard IIO device the implementation is based on IIO provider/consumer.
+A typical IIO HW consumer setup looks like this::
+
+ static struct iio_hw_consumer *hwc;
+
+ static const struct iio_info adc_info = {
+ .read_raw = adc_read_raw,
+ };
+
+ static int adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+ {
+ ret = iio_hw_consumer_enable(hwc);
+
+ /* Acquire data */
+
+ ret = iio_hw_consumer_disable(hwc);
+ }
+
+ static int adc_probe(struct platform_device *pdev)
+ {
+ hwc = devm_iio_hw_consumer_alloc(&iio->dev);
+ }
+
+More details
+============
+.. kernel-doc:: include/linux/iio/hw-consumer.h
+.. kernel-doc:: drivers/iio/buffer/industrialio-hw-consumer.c
+ :export:
+
diff --git a/Documentation/driver-api/iio/index.rst b/Documentation/driver-api/iio/index.rst
index e5c3922d1b6f..7fba341bd8b2 100644
--- a/Documentation/driver-api/iio/index.rst
+++ b/Documentation/driver-api/iio/index.rst
@@ -15,3 +15,4 @@ Contents:
buffers
triggers
triggered-buffers
+ hw-consumer
diff --git a/Documentation/driver-api/pm/devices.rst b/Documentation/driver-api/pm/devices.rst
index 53c1b0b06da5..1128705a5731 100644
--- a/Documentation/driver-api/pm/devices.rst
+++ b/Documentation/driver-api/pm/devices.rst
@@ -777,17 +777,51 @@ The driver can indicate that by setting ``DPM_FLAG_SMART_SUSPEND`` in
runtime suspend at the beginning of the ``suspend_late`` phase of system-wide
suspend (or in the ``poweroff_late`` phase of hibernation), when runtime PM
has been disabled for it, under the assumption that its state should not change
-after that point until the system-wide transition is over. If that happens, the
-driver's system-wide resume callbacks, if present, may still be invoked during
-the subsequent system-wide resume transition and the device's runtime power
-management status may be set to "active" before enabling runtime PM for it,
-so the driver must be prepared to cope with the invocation of its system-wide
-resume callbacks back-to-back with its ``->runtime_suspend`` one (without the
-intervening ``->runtime_resume`` and so on) and the final state of the device
-must reflect the "active" status for runtime PM in that case.
+after that point until the system-wide transition is over (the PM core itself
+does that for devices whose "noirq", "late" and "early" system-wide PM callbacks
+are executed directly by it). If that happens, the driver's system-wide resume
+callbacks, if present, may still be invoked during the subsequent system-wide
+resume transition and the device's runtime power management status may be set
+to "active" before enabling runtime PM for it, so the driver must be prepared to
+cope with the invocation of its system-wide resume callbacks back-to-back with
+its ``->runtime_suspend`` one (without the intervening ``->runtime_resume`` and
+so on) and the final state of the device must reflect the "active" runtime PM
+status in that case.
During system-wide resume from a sleep state it's easiest to put devices into
the full-power state, as explained in :file:`Documentation/power/runtime_pm.txt`.
-Refer to that document for more information regarding this particular issue as
+[Refer to that document for more information regarding this particular issue as
well as for information on the device runtime power management framework in
-general.
+general.]
+
+However, it often is desirable to leave devices in suspend after system
+transitions to the working state, especially if those devices had been in
+runtime suspend before the preceding system-wide suspend (or analogous)
+transition. Device drivers can use the ``DPM_FLAG_LEAVE_SUSPENDED`` flag to
+indicate to the PM core (and middle-layer code) that they prefer the specific
+devices handled by them to be left suspended and they have no problems with
+skipping their system-wide resume callbacks for this reason. Whether or not the
+devices will actually be left in suspend may depend on their state before the
+given system suspend-resume cycle and on the type of the system transition under
+way. In particular, devices are not left suspended if that transition is a
+restore from hibernation, as device states are not guaranteed to be reflected
+by the information stored in the hibernation image in that case.
+
+The middle-layer code involved in the handling of the device is expected to
+indicate to the PM core if the device may be left in suspend by setting its
+:c:member:`power.may_skip_resume` status bit which is checked by the PM core
+during the "noirq" phase of the preceding system-wide suspend (or analogous)
+transition. The middle layer is then responsible for handling the device as
+appropriate in its "noirq" resume callback, which is executed regardless of
+whether or not the device is left suspended, but the other resume callbacks
+(except for ``->complete``) will be skipped automatically by the PM core if the
+device really can be left in suspend.
+
+For devices whose "noirq", "late" and "early" driver callbacks are invoked
+directly by the PM core, all of the system-wide resume callbacks are skipped if
+``DPM_FLAG_LEAVE_SUSPENDED`` is set and the device is in runtime suspend during
+the ``suspend_noirq`` (or analogous) phase or the transition under way is a
+proper system suspend (rather than anything related to hibernation) and the
+device's wakeup settings are suitable for runtime PM (that is, it cannot
+generate wakeup signals at all or it is allowed to wake up the system from
+sleep).
diff --git a/Documentation/driver-api/scsi.rst b/Documentation/driver-api/scsi.rst
index 9ae03171daca..3ae337929721 100644
--- a/Documentation/driver-api/scsi.rst
+++ b/Documentation/driver-api/scsi.rst
@@ -224,6 +224,14 @@ mid to lowlevel SCSI driver interface
.. kernel-doc:: drivers/scsi/hosts.c
:export:
+drivers/scsi/scsi_common.c
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+general support functions
+
+.. kernel-doc:: drivers/scsi/scsi_common.c
+ :export:
+
Transport classes
-----------------
@@ -332,5 +340,5 @@ todo
~~~~
Parallel (fast/wide/ultra) SCSI, USB, SATA, SAS, Fibre Channel,
-FireWire, ATAPI devices, Infiniband, I20, iSCSI, Parallel ports,
+FireWire, ATAPI devices, Infiniband, I2O, iSCSI, Parallel ports,
netlink...
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index c180045eb43b..7c1bb3d0c222 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -384,6 +384,9 @@ RESET
devm_reset_control_get()
devm_reset_controller_register()
+SERDEV
+ devm_serdev_device_open()
+
SLAVE DMA ENGINE
devm_acpi_dma_controller_register()
diff --git a/Documentation/features/debug/KASAN/arch-support.txt b/Documentation/features/debug/KASAN/arch-support.txt
index f377290fe48e..3406fae833c3 100644
--- a/Documentation/features/debug/KASAN/arch-support.txt
+++ b/Documentation/features/debug/KASAN/arch-support.txt
@@ -35,5 +35,5 @@
| um: | TODO |
| unicore32: | TODO |
| x86: | ok | 64-bit only
- | xtensa: | TODO |
+ | xtensa: | ok |
-----------------------
diff --git a/Documentation/features/debug/stackprotector/arch-support.txt b/Documentation/features/debug/stackprotector/arch-support.txt
index d7acd7bd3619..59a4c9ffb7f3 100644
--- a/Documentation/features/debug/stackprotector/arch-support.txt
+++ b/Documentation/features/debug/stackprotector/arch-support.txt
@@ -35,5 +35,5 @@
| um: | TODO |
| unicore32: | TODO |
| x86: | ok |
- | xtensa: | TODO |
+ | xtensa: | ok |
-----------------------
diff --git a/Documentation/filesystems/nfs/Exporting b/Documentation/filesystems/nfs/Exporting
index 520a4becb75c..63889149f532 100644
--- a/Documentation/filesystems/nfs/Exporting
+++ b/Documentation/filesystems/nfs/Exporting
@@ -56,13 +56,25 @@ a/ A dentry flag DCACHE_DISCONNECTED which is set on
any dentry that might not be part of the proper prefix.
This is set when anonymous dentries are created, and cleared when a
dentry is noticed to be a child of a dentry which is in the proper
- prefix.
-
-b/ A per-superblock list "s_anon" of dentries which are the roots of
- subtrees that are not in the proper prefix. These dentries, as
- well as the proper prefix, need to be released at unmount time. As
- these dentries will not be hashed, they are linked together on the
- d_hash list_head.
+ prefix. If the refcount on a dentry with this flag set
+ becomes zero, the dentry is immediately discarded, rather than being
+ kept in the dcache. If a dentry that is not already in the dcache
+ is repeatedly accessed by filehandle (as NFSD might do), an new dentry
+ will be a allocated for each access, and discarded at the end of
+ the access.
+
+ Note that such a dentry can acquire children, name, ancestors, etc.
+ without losing DCACHE_DISCONNECTED - that flag is only cleared when
+ subtree is successfully reconnected to root. Until then dentries
+ in such subtree are retained only as long as there are references;
+ refcount reaching zero means immediate eviction, same as for unhashed
+ dentries. That guarantees that we won't need to hunt them down upon
+ umount.
+
+b/ A primitive for creation of secondary roots - d_obtain_root(inode).
+ Those do _not_ bear DCACHE_DISCONNECTED. They are placed on the
+ per-superblock list (->s_roots), so they can be located at umount
+ time for eviction purposes.
c/ Helper routines to allocate anonymous dentries, and to help attach
loose directory dentries at lookup time. They are:
@@ -77,7 +89,6 @@ c/ Helper routines to allocate anonymous dentries, and to help attach
(such as an anonymous one created by d_obtain_alias), if appropriate.
It returns NULL when the passed-in dentry is used, following the calling
convention of ->lookup.
-
Filesystem Issues
-----------------
diff --git a/Documentation/gpio/board.txt b/Documentation/gpio/board.txt
index a0f61898d493..659bb19f5b3c 100644
--- a/Documentation/gpio/board.txt
+++ b/Documentation/gpio/board.txt
@@ -2,6 +2,7 @@ GPIO Mappings
=============
This document explains how GPIOs can be assigned to given devices and functions.
+
Note that it only applies to the new descriptor-based interface. For a
description of the deprecated integer-based GPIO interface please refer to
gpio-legacy.txt (actually, there is no real mapping possible with the old
@@ -49,7 +50,7 @@ This property will make GPIOs 15, 16 and 17 available to the driver under the
power = gpiod_get(dev, "power", GPIOD_OUT_HIGH);
-The led GPIOs will be active-high, while the power GPIO will be active-low (i.e.
+The led GPIOs will be active high, while the power GPIO will be active low (i.e.
gpiod_is_active_low(power) will be true).
The second parameter of the gpiod_get() functions, the con_id string, has to be
@@ -122,9 +123,14 @@ where
can be NULL, in which case it will match any function.
- idx is the index of the GPIO within the function.
- flags is defined to specify the following properties:
- * GPIOF_ACTIVE_LOW - to configure the GPIO as active-low
- * GPIOF_OPEN_DRAIN - GPIO pin is open drain type.
- * GPIOF_OPEN_SOURCE - GPIO pin is open source type.
+ * GPIO_ACTIVE_HIGH - GPIO line is active high
+ * GPIO_ACTIVE_LOW - GPIO line is active low
+ * GPIO_OPEN_DRAIN - GPIO line is set up as open drain
+ * GPIO_OPEN_SOURCE - GPIO line is set up as open source
+ * GPIO_PERSISTENT - GPIO line is persistent during
+ suspend/resume and maintains its value
+ * GPIO_TRANSITORY - GPIO line is transitory and may loose its
+ electrical state during suspend/resume
In the future, these flags might be extended to support more properties.
diff --git a/Documentation/gpio/consumer.txt b/Documentation/gpio/consumer.txt
index 63e1bd1d88e3..d53e5b5cfc9c 100644
--- a/Documentation/gpio/consumer.txt
+++ b/Documentation/gpio/consumer.txt
@@ -66,6 +66,15 @@ for the GPIO. Values can be:
* GPIOD_IN to initialize the GPIO as input.
* GPIOD_OUT_LOW to initialize the GPIO as output with a value of 0.
* GPIOD_OUT_HIGH to initialize the GPIO as output with a value of 1.
+* GPIOD_OUT_LOW_OPEN_DRAIN same as GPIOD_OUT_LOW but also enforce the line
+ to be electrically used with open drain.
+* GPIOD_OUT_HIGH_OPEN_DRAIN same as GPIOD_OUT_HIGH but also enforce the line
+ to be electrically used with open drain.
+
+The two last flags are used for use cases where open drain is mandatory, such
+as I2C: if the line is not already configured as open drain in the mappings
+(see board.txt), then open drain will be enforced anyway and a warning will be
+printed that the board configuration needs to be updated to match the use case.
Both functions return either a valid GPIO descriptor, or an error code checkable
with IS_ERR() (they will never return a NULL pointer). -ENOENT will be returned
@@ -184,7 +193,7 @@ A driver can also query the current direction of a GPIO:
int gpiod_get_direction(const struct gpio_desc *desc)
-This function will return either GPIOF_DIR_IN or GPIOF_DIR_OUT.
+This function returns 0 for output, 1 for input, or an error code in case of error.
Be aware that there is no default direction for GPIOs. Therefore, **using a GPIO
without setting its direction first is illegal and will result in undefined
@@ -240,59 +249,71 @@ that can't be accessed from hardIRQ handlers, these calls act the same as the
spinlock-safe calls.
-Active-low State and Raw GPIO Values
-------------------------------------
-Device drivers like to manage the logical state of a GPIO, i.e. the value their
-device will actually receive, no matter what lies between it and the GPIO line.
-In some cases, it might make sense to control the actual GPIO line value. The
-following set of calls ignore the active-low property of a GPIO and work on the
-raw line value:
-
- int gpiod_get_raw_value(const struct gpio_desc *desc)
- void gpiod_set_raw_value(struct gpio_desc *desc, int value)
- int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
- void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value)
- int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
-
-The active-low state of a GPIO can also be queried using the following call:
-
- int gpiod_is_active_low(const struct gpio_desc *desc)
-
-Note that these functions should only be used with great moderation ; a driver
-should not have to care about the physical line level.
-
-
-The active-low property
------------------------
-
-As a driver should not have to care about the physical line level, all of the
+The active low and open drain semantics
+---------------------------------------
+As a consumer should not have to care about the physical line level, all of the
gpiod_set_value_xxx() or gpiod_set_array_value_xxx() functions operate with
-the *logical* value. With this they take the active-low property into account.
-This means that they check whether the GPIO is configured to be active-low,
+the *logical* value. With this they take the active low property into account.
+This means that they check whether the GPIO is configured to be active low,
and if so, they manipulate the passed value before the physical line level is
driven.
+The same is applicable for open drain or open source output lines: those do not
+actively drive their output high (open drain) or low (open source), they just
+switch their output to a high impedance value. The consumer should not need to
+care. (For details read about open drain in driver.txt.)
+
With this, all the gpiod_set_(array)_value_xxx() functions interpret the
-parameter "value" as "active" ("1") or "inactive" ("0"). The physical line
+parameter "value" as "asserted" ("1") or "de-asserted" ("0"). The physical line
level will be driven accordingly.
-As an example, if the active-low property for a dedicated GPIO is set, and the
-gpiod_set_(array)_value_xxx() passes "active" ("1"), the physical line level
+As an example, if the active low property for a dedicated GPIO is set, and the
+gpiod_set_(array)_value_xxx() passes "asserted" ("1"), the physical line level
will be driven low.
To summarize:
-Function (example) active-low property physical line
-gpiod_set_raw_value(desc, 0); don't care low
-gpiod_set_raw_value(desc, 1); don't care high
-gpiod_set_value(desc, 0); default (active-high) low
-gpiod_set_value(desc, 1); default (active-high) high
-gpiod_set_value(desc, 0); active-low high
-gpiod_set_value(desc, 1); active-low low
-
-Please note again that the set_raw/get_raw functions should be avoided as much
-as possible, especially by drivers which should not care about the actual
-physical line level and worry about the logical value instead.
+Function (example) line property physical line
+gpiod_set_raw_value(desc, 0); don't care low
+gpiod_set_raw_value(desc, 1); don't care high
+gpiod_set_value(desc, 0); default (active high) low
+gpiod_set_value(desc, 1); default (active high) high
+gpiod_set_value(desc, 0); active low high
+gpiod_set_value(desc, 1); active low low
+gpiod_set_value(desc, 0); default (active high) low
+gpiod_set_value(desc, 1); default (active high) high
+gpiod_set_value(desc, 0); open drain low
+gpiod_set_value(desc, 1); open drain high impedance
+gpiod_set_value(desc, 0); open source high impedance
+gpiod_set_value(desc, 1); open source high
+
+It is possible to override these semantics using the *set_raw/'get_raw functions
+but it should be avoided as much as possible, especially by system-agnostic drivers
+which should not need to care about the actual physical line level and worry about
+the logical value instead.
+
+
+Accessing raw GPIO values
+-------------------------
+Consumers exist that need to manage the logical state of a GPIO line, i.e. the value
+their device will actually receive, no matter what lies between it and the GPIO
+line.
+
+The following set of calls ignore the active-low or open drain property of a GPIO and
+work on the raw line value:
+
+ int gpiod_get_raw_value(const struct gpio_desc *desc)
+ void gpiod_set_raw_value(struct gpio_desc *desc, int value)
+ int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
+ void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value)
+ int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
+
+The active low state of a GPIO can also be queried using the following call:
+
+ int gpiod_is_active_low(const struct gpio_desc *desc)
+
+Note that these functions should only be used with great moderation; a driver
+should not have to care about the physical line level or open drain semantics.
Access multiple GPIOs with a single function call
diff --git a/Documentation/gpio/driver.txt b/Documentation/gpio/driver.txt
index d8de1c7de85a..3392a0fd4c23 100644
--- a/Documentation/gpio/driver.txt
+++ b/Documentation/gpio/driver.txt
@@ -88,6 +88,10 @@ ending up in the pin control back-end "behind" the GPIO controller, usually
closer to the actual pins. This way the pin controller can manage the below
listed GPIO configurations.
+If a pin controller back-end is used, the GPIO controller or hardware
+description needs to provide "GPIO ranges" mapping the GPIO line offsets to pin
+numbers on the pin controller so they can properly cross-reference each other.
+
GPIOs with debounce support
---------------------------
diff --git a/Documentation/gpio/sysfs.txt b/Documentation/gpio/sysfs.txt
index aeab01aa4d00..6cdeab8650cd 100644
--- a/Documentation/gpio/sysfs.txt
+++ b/Documentation/gpio/sysfs.txt
@@ -1,6 +1,17 @@
GPIO Sysfs Interface for Userspace
==================================
+THIS ABI IS DEPRECATED, THE ABI DOCUMENTATION HAS BEEN MOVED TO
+Documentation/ABI/obsolete/sysfs-gpio AND NEW USERSPACE CONSUMERS
+ARE SUPPOSED TO USE THE CHARACTER DEVICE ABI. THIS OLD SYSFS ABI WILL
+NOT BE DEVELOPED (NO NEW FEATURES), IT WILL JUST BE MAINTAINED.
+
+Refer to the examples in tools/gpio/* for an introduction to the new
+character device ABI. Also see the userspace header in
+include/uapi/linux/gpio.h
+
+The deprecated sysfs ABI
+------------------------
Platforms which use the "gpiolib" implementors framework may choose to
configure a sysfs user interface to GPIOs. This is different from the
debugfs interface, since it provides control over GPIO direction and
diff --git a/Documentation/hwmon/lm25066 b/Documentation/hwmon/lm25066
index 3fa6bf820c88..51b32aa203a8 100644
--- a/Documentation/hwmon/lm25066
+++ b/Documentation/hwmon/lm25066
@@ -8,11 +8,6 @@ Supported chips:
Datasheets:
http://www.ti.com/lit/gpn/lm25056
http://www.ti.com/lit/gpn/lm25056a
- * TI LM25063
- Prefix: 'lm25063'
- Addresses scanned: -
- Datasheet:
- To be announced
* National Semiconductor LM25066
Prefix: 'lm25066'
Addresses scanned: -
@@ -42,7 +37,7 @@ Description
-----------
This driver supports hardware monitoring for National Semiconductor / TI LM25056,
-LM25063, LM25066, LM5064, and LM5066/LM5066I Power Management, Monitoring,
+LM25066, LM5064, and LM5066/LM5066I Power Management, Monitoring,
Control, and Protection ICs.
The driver is a client driver to the core PMBus driver. Please see
@@ -74,12 +69,8 @@ in1_input Measured input voltage.
in1_average Average measured input voltage.
in1_min Minimum input voltage.
in1_max Maximum input voltage.
-in1_crit Critical high input voltage (LM25063 only).
-in1_lcrit Critical low input voltage (LM25063 only).
in1_min_alarm Input voltage low alarm.
in1_max_alarm Input voltage high alarm.
-in1_lcrit_alarm Input voltage critical low alarm (LM25063 only).
-in1_crit_alarm Input voltage critical high alarm. (LM25063 only).
in2_label "vmon"
in2_input Measured voltage on VAUX pin
@@ -94,16 +85,12 @@ in3_input Measured output voltage.
in3_average Average measured output voltage.
in3_min Minimum output voltage.
in3_min_alarm Output voltage low alarm.
-in3_highest Historical minimum output voltage (LM25063 only).
-in3_lowest Historical maximum output voltage (LM25063 only).
curr1_label "iin"
curr1_input Measured input current.
curr1_average Average measured input current.
curr1_max Maximum input current.
-curr1_crit Critical input current (LM25063 only).
curr1_max_alarm Input current high alarm.
-curr1_crit_alarm Input current critical high alarm (LM25063 only).
power1_label "pin"
power1_input Measured input power.
@@ -113,11 +100,6 @@ power1_alarm Input power alarm
power1_input_highest Historical maximum power.
power1_reset_history Write any value to reset maximum power history.
-power2_label "pout". LM25063 only.
-power2_input Measured output power.
-power2_max Maximum output power limit.
-power2_crit Critical output power limit.
-
temp1_input Measured temperature.
temp1_max Maximum temperature.
temp1_crit Critical high temperature.
diff --git a/Documentation/hwmon/max31785 b/Documentation/hwmon/max31785
index 45fb6093dec2..270c5f865261 100644
--- a/Documentation/hwmon/max31785
+++ b/Documentation/hwmon/max31785
@@ -17,8 +17,9 @@ management with temperature and remote voltage sensing. Various fan control
features are provided, including PWM frequency control, temperature hysteresis,
dual tachometer measurements, and fan health monitoring.
-For dual rotor fan configuration, the MAX31785 exposes the slowest rotor of the
-two in the fan[1-4]_input attributes.
+For dual-rotor configurations the MAX31785A exposes the second rotor tachometer
+readings in attributes fan[5-8]_input. By contrast the MAX31785 only exposes
+the slowest rotor measurement, and does so in the fan[1-4]_input attributes.
Usage Notes
-----------
@@ -31,7 +32,9 @@ Sysfs attributes
fan[1-4]_alarm Fan alarm.
fan[1-4]_fault Fan fault.
-fan[1-4]_input Fan RPM.
+fan[1-8]_input Fan RPM. On the MAX31785A, inputs 5-8 correspond to the
+ second rotor of fans 1-4
+fan[1-4]_target Fan input target
in[1-6]_crit Critical maximum output voltage
in[1-6]_crit_alarm Output voltage critical high alarm
@@ -44,6 +47,12 @@ in[1-6]_max_alarm Output voltage high alarm
in[1-6]_min Minimum output voltage
in[1-6]_min_alarm Output voltage low alarm
+pwm[1-4] Fan target duty cycle (0..255)
+pwm[1-4]_enable 0: Full-speed
+ 1: Manual PWM control
+ 2: Automatic PWM (tach-feedback RPM fan-control)
+ 3: Automatic closed-loop (temp-feedback fan-control)
+
temp[1-11]_crit Critical high temperature
temp[1-11]_crit_alarm Chip temperature critical high alarm
temp[1-11]_input Measured temperature
diff --git a/Documentation/hwmon/w83773g b/Documentation/hwmon/w83773g
new file mode 100644
index 000000000000..4cc6c0b8257f
--- /dev/null
+++ b/Documentation/hwmon/w83773g
@@ -0,0 +1,33 @@
+Kernel driver w83773g
+====================
+
+Supported chips:
+ * Nuvoton W83773G
+ Prefix: 'w83773g'
+ Addresses scanned: I2C 0x4c and 0x4d
+ Datasheet: https://www.nuvoton.com/resource-files/W83773G_SG_DatasheetV1_2.pdf
+
+Authors:
+ Lei YU <mine260309@gmail.com>
+
+Description
+-----------
+
+This driver implements support for Nuvoton W83773G temperature sensor
+chip. This chip implements one local and two remote sensors.
+The chip also features offsets for the two remote sensors which get added to
+the input readings. The chip does all the scaling by itself and the driver
+therefore reports true temperatures that don't need any user-space adjustments.
+Temperature is measured in degrees Celsius.
+The chip is wired over I2C/SMBus and specified over a temperature
+range of -40 to +125 degrees Celsius (for local sensor) and -40 to +127
+degrees Celsius (for remote sensors).
+Resolution for both the local and remote channels is 0.125 degree C.
+
+The chip supports only temperature measurement. The driver exports
+the temperature values via the following sysfs files:
+
+temp[1-3]_input
+temp[2-3]_fault
+temp[2-3]_offset
+update_interval
diff --git a/Documentation/input/multi-touch-protocol.rst b/Documentation/input/multi-touch-protocol.rst
index 8035868c56bc..b51751a0cd5d 100644
--- a/Documentation/input/multi-touch-protocol.rst
+++ b/Documentation/input/multi-touch-protocol.rst
@@ -269,10 +269,11 @@ ABS_MT_ORIENTATION
The orientation of the touching ellipse. The value should describe a signed
quarter of a revolution clockwise around the touch center. The signed value
range is arbitrary, but zero should be returned for an ellipse aligned with
- the Y axis of the surface, a negative value when the ellipse is turned to
- the left, and a positive value when the ellipse is turned to the
- right. When completely aligned with the X axis, the range max should be
- returned.
+ the Y axis (north) of the surface, a negative value when the ellipse is
+ turned to the left, and a positive value when the ellipse is turned to the
+ right. When aligned with the X axis in the positive direction, the range
+ max should be returned; when aligned with the X axis in the negative
+ direction, the range -max should be returned.
Touch ellipsis are symmetrical by default. For devices capable of true 360
degree orientation, the reported orientation must exceed the range max to
diff --git a/Documentation/livepatch/livepatch.txt b/Documentation/livepatch/livepatch.txt
index ecdb18104ab0..1ae2de758c08 100644
--- a/Documentation/livepatch/livepatch.txt
+++ b/Documentation/livepatch/livepatch.txt
@@ -72,8 +72,7 @@ example, they add a NULL pointer or a boundary check, fix a race by adding
a missing memory barrier, or add some locking around a critical section.
Most of these changes are self contained and the function presents itself
the same way to the rest of the system. In this case, the functions might
-be updated independently one by one. (This can be done by setting the
-'immediate' flag in the klp_patch struct.)
+be updated independently one by one.
But there are more complex fixes. For example, a patch might change
ordering of locking in multiple functions at the same time. Or a patch
@@ -125,12 +124,6 @@ safe to patch tasks:
b) Patching CPU-bound user tasks. If the task is highly CPU-bound
then it will get patched the next time it gets interrupted by an
IRQ.
- c) In the future it could be useful for applying patches for
- architectures which don't yet have HAVE_RELIABLE_STACKTRACE. In
- this case you would have to signal most of the tasks on the
- system. However this isn't supported yet because there's
- currently no way to patch kthreads without
- HAVE_RELIABLE_STACKTRACE.
3. For idle "swapper" tasks, since they don't ever exit the kernel, they
instead have a klp_update_patch_state() call in the idle loop which
@@ -138,27 +131,16 @@ safe to patch tasks:
(Note there's not yet such an approach for kthreads.)
-All the above approaches may be skipped by setting the 'immediate' flag
-in the 'klp_patch' struct, which will disable per-task consistency and
-patch all tasks immediately. This can be useful if the patch doesn't
-change any function or data semantics. Note that, even with this flag
-set, it's possible that some tasks may still be running with an old
-version of the function, until that function returns.
+Architectures which don't have HAVE_RELIABLE_STACKTRACE solely rely on
+the second approach. It's highly likely that some tasks may still be
+running with an old version of the function, until that function
+returns. In this case you would have to signal the tasks. This
+especially applies to kthreads. They may not be woken up and would need
+to be forced. See below for more information.
-There's also an 'immediate' flag in the 'klp_func' struct which allows
-you to specify that certain functions in the patch can be applied
-without per-task consistency. This might be useful if you want to patch
-a common function like schedule(), and the function change doesn't need
-consistency but the rest of the patch does.
-
-For architectures which don't have HAVE_RELIABLE_STACKTRACE, the user
-must set patch->immediate which causes all tasks to be patched
-immediately. This option should be used with care, only when the patch
-doesn't change any function or data semantics.
-
-In the future, architectures which don't have HAVE_RELIABLE_STACKTRACE
-may be allowed to use per-task consistency if we can come up with
-another way to patch kthreads.
+Unless we can come up with another way to patch kthreads, architectures
+without HAVE_RELIABLE_STACKTRACE are not considered fully supported by
+the kernel livepatching.
The /sys/kernel/livepatch/<patch>/transition file shows whether a patch
is in transition. Only a single patch (the topmost patch on the stack)
@@ -176,8 +158,31 @@ If a patch is in transition, this file shows 0 to indicate the task is
unpatched and 1 to indicate it's patched. Otherwise, if no patch is in
transition, it shows -1. Any tasks which are blocking the transition
can be signaled with SIGSTOP and SIGCONT to force them to change their
-patched state.
-
+patched state. This may be harmful to the system though.
+/sys/kernel/livepatch/<patch>/signal attribute provides a better alternative.
+Writing 1 to the attribute sends a fake signal to all remaining blocking
+tasks. No proper signal is actually delivered (there is no data in signal
+pending structures). Tasks are interrupted or woken up, and forced to change
+their patched state.
+
+Administrator can also affect a transition through
+/sys/kernel/livepatch/<patch>/force attribute. Writing 1 there clears
+TIF_PATCH_PENDING flag of all tasks and thus forces the tasks to the patched
+state. Important note! The force attribute is intended for cases when the
+transition gets stuck for a long time because of a blocking task. Administrator
+is expected to collect all necessary data (namely stack traces of such blocking
+tasks) and request a clearance from a patch distributor to force the transition.
+Unauthorized usage may cause harm to the system. It depends on the nature of the
+patch, which functions are (un)patched, and which functions the blocking tasks
+are sleeping in (/proc/<pid>/stack may help here). Removal (rmmod) of patch
+modules is permanently disabled when the force feature is used. It cannot be
+guaranteed there is no task sleeping in such module. It implies unbounded
+reference count if a patch module is disabled and enabled in a loop.
+
+Moreover, the usage of force may also affect future applications of live
+patches and cause even more harm to the system. Administrator should first
+consider to simply cancel a transition (see above). If force is used, reboot
+should be planned and no more live patches applied.
3.1 Adding consistency model support to new architectures
---------------------------------------------------------
@@ -216,13 +221,6 @@ few options:
a good backup option for those architectures which don't have
reliable stack traces yet.
-In the meantime, patches for such architectures can bypass the
-consistency model by setting klp_patch.immediate to true. This option
-is perfectly fine for patches which don't change the semantics of the
-patched functions. In practice, this is usable for ~90% of security
-fixes. Use of this option also means the patch can't be unloaded after
-it has been disabled.
-
4. Livepatch module
===================
@@ -278,9 +276,6 @@ into three levels:
only for a particular object ( vmlinux or a kernel module ). Note that
kallsyms allows for searching symbols according to the object name.
- There's also an 'immediate' flag which, when set, patches the
- function immediately, bypassing the consistency model safety checks.
-
+ struct klp_object defines an array of patched functions (struct
klp_func) in the same object. Where the object is either vmlinux
(NULL) or a module name.
@@ -299,9 +294,6 @@ into three levels:
symbols are found. The only exception are symbols from objects
(kernel modules) that have not been loaded yet.
- Setting the 'immediate' flag applies the patch to all tasks
- immediately, bypassing the consistency model safety checks.
-
For more details on how the patch is applied on a per-task basis,
see the "Consistency model" section.
@@ -316,14 +308,12 @@ section "Livepatch life-cycle" below for more details about these
two operations.
Module removal is only safe when there are no users of the underlying
-functions. The immediate consistency model is not able to detect this. The
-code just redirects the functions at the very beginning and it does not
-check if the functions are in use. In other words, it knows when the
-functions get called but it does not know when the functions return.
-Therefore it cannot be decided when the livepatch module can be safely
-removed. This is solved by a hybrid consistency model. When the system is
-transitioned to a new patch state (patched/unpatched) it is guaranteed that
-no task sleeps or runs in the old code.
+functions. This is the reason why the force feature permanently disables
+the removal. The forced tasks entered the functions but we cannot say
+that they returned back. Therefore it cannot be decided when the
+livepatch module can be safely removed. When the system is successfully
+transitioned to a new patch state (patched/unpatched) without being
+forced it is guaranteed that no task sleeps or runs in the old code.
5. Livepatch life-cycle
@@ -337,19 +327,12 @@ First, the patch is applied only when all patched symbols for already
loaded objects are found. The error handling is much easier if this
check is done before particular functions get redirected.
-Second, the immediate consistency model does not guarantee that anyone is not
-sleeping in the new code after the patch is reverted. This means that the new
-code needs to stay around "forever". If the code is there, one could apply it
-again. Therefore it makes sense to separate the operations that might be done
-once and those that need to be repeated when the patch is enabled (applied)
-again.
-
-Third, it might take some time until the entire system is migrated
-when a more complex consistency model is used. The patch revert might
-block the livepatch module removal for too long. Therefore it is useful
-to revert the patch using a separate operation that might be called
-explicitly. But it does not make sense to remove all information
-until the livepatch module is really removed.
+Second, it might take some time until the entire system is migrated with
+the hybrid consistency model being used. The patch revert might block
+the livepatch module removal for too long. Therefore it is useful to
+revert the patch using a separate operation that might be called
+explicitly. But it does not make sense to remove all information until
+the livepatch module is really removed.
5.1. Registration
@@ -435,6 +418,9 @@ Information about the registered patches can be found under
/sys/kernel/livepatch. The patches could be enabled and disabled
by writing there.
+/sys/kernel/livepatch/<patch>/signal and /sys/kernel/livepatch/<patch>/force
+attributes allow administrator to affect a patching operation.
+
See Documentation/ABI/testing/sysfs-kernel-livepatch for more details.
diff --git a/Documentation/locking/locktorture.txt b/Documentation/locking/locktorture.txt
index a2ef3a929bf1..6a8df4cd19bf 100644
--- a/Documentation/locking/locktorture.txt
+++ b/Documentation/locking/locktorture.txt
@@ -57,11 +57,6 @@ torture_type Type of lock to torture. By default, only spinlocks will
o "rwsem_lock": read/write down() and up() semaphore pairs.
-torture_runnable Start locktorture at boot time in the case where the
- module is built into the kernel, otherwise wait for
- torture_runnable to be set via sysfs before starting.
- By default it will begin once the module is loaded.
-
** Torture-framework (RCU + locking) **
diff --git a/Documentation/md/raid5-ppl.txt b/Documentation/md/raid5-ppl.txt
index 127072b09363..bfa092589e00 100644
--- a/Documentation/md/raid5-ppl.txt
+++ b/Documentation/md/raid5-ppl.txt
@@ -39,6 +39,7 @@ case the behavior is the same as in plain raid5.
PPL is available for md version-1 metadata and external (specifically IMSM)
metadata arrays. It can be enabled using mdadm option --consistency-policy=ppl.
-Currently, volatile write-back cache should be disabled on all member drives
-when using PPL. Otherwise it cannot guarantee consistency in case of power
-failure.
+There is a limitation of maximum 64 disks in the array for PPL. It allows to
+keep data structures and implementation simple. RAID5 arrays with so many disks
+are not likely due to high risk of multiple disks failure. Such restriction
+should not be a real life limitation.
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index 479ecec80593..a863009849a3 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -227,17 +227,20 @@ There are some minimal guarantees that may be expected of a CPU:
(*) On any given CPU, dependent memory accesses will be issued in order, with
respect to itself. This means that for:
- Q = READ_ONCE(P); smp_read_barrier_depends(); D = READ_ONCE(*Q);
+ Q = READ_ONCE(P); D = READ_ONCE(*Q);
the CPU will issue the following memory operations:
Q = LOAD P, D = LOAD *Q
- and always in that order. On most systems, smp_read_barrier_depends()
- does nothing, but it is required for DEC Alpha. The READ_ONCE()
- is required to prevent compiler mischief. Please note that you
- should normally use something like rcu_dereference() instead of
- open-coding smp_read_barrier_depends().
+ and always in that order. However, on DEC Alpha, READ_ONCE() also
+ emits a memory-barrier instruction, so that a DEC Alpha CPU will
+ instead issue the following memory operations:
+
+ Q = LOAD P, MEMORY_BARRIER, D = LOAD *Q, MEMORY_BARRIER
+
+ Whether on DEC Alpha or not, the READ_ONCE() also prevents compiler
+ mischief.
(*) Overlapping loads and stores within a particular CPU will appear to be
ordered within that CPU. This means that for:
@@ -1815,7 +1818,7 @@ The Linux kernel has eight basic CPU memory barriers:
GENERAL mb() smp_mb()
WRITE wmb() smp_wmb()
READ rmb() smp_rmb()
- DATA DEPENDENCY read_barrier_depends() smp_read_barrier_depends()
+ DATA DEPENDENCY READ_ONCE()
All memory barriers except the data dependency barriers imply a compiler
@@ -2864,7 +2867,10 @@ access depends on a read, not all do, so it may not be relied on.
Other CPUs may also have split caches, but must coordinate between the various
cachelets for normal memory accesses. The semantics of the Alpha removes the
-need for coordination in the absence of memory barriers.
+need for hardware coordination in the absence of memory barriers, which
+permitted Alpha to sport higher CPU clock rates back in the day. However,
+please note that smp_read_barrier_depends() should not be used except in
+Alpha arch-specific code and within the READ_ONCE() macro.
CACHE COHERENCY VS DMA
diff --git a/Documentation/mtd/spi-nor.txt b/Documentation/mtd/spi-nor.txt
index 548d6306ebca..da1fbff5a24c 100644
--- a/Documentation/mtd/spi-nor.txt
+++ b/Documentation/mtd/spi-nor.txt
@@ -60,3 +60,6 @@ The main API is spi_nor_scan(). Before you call the hook, a driver should
initialize the necessary fields for spi_nor{}. Please see
drivers/mtd/spi-nor/spi-nor.c for detail. Please also refer to fsl-quadspi.c
when you want to write a new driver for a SPI NOR controller.
+Another API is spi_nor_restore(), this is used to restore the status of SPI
+flash chip such as addressing mode. Call it whenever detach the driver from
+device or reboot the system.
diff --git a/Documentation/perf/arm_dsu_pmu.txt b/Documentation/perf/arm_dsu_pmu.txt
new file mode 100644
index 000000000000..d611e15f5add
--- /dev/null
+++ b/Documentation/perf/arm_dsu_pmu.txt
@@ -0,0 +1,28 @@
+ARM DynamIQ Shared Unit (DSU) PMU
+==================================
+
+ARM DynamIQ Shared Unit integrates one or more cores with an L3 memory system,
+control logic and external interfaces to form a multicore cluster. The PMU
+allows counting the various events related to the L3 cache, Snoop Control Unit
+etc, using 32bit independent counters. It also provides a 64bit cycle counter.
+
+The PMU can only be accessed via CPU system registers and are common to the
+cores connected to the same DSU. Like most of the other uncore PMUs, DSU
+PMU doesn't support process specific events and cannot be used in sampling mode.
+
+The DSU provides a bitmap for a subset of implemented events via hardware
+registers. There is no way for the driver to determine if the other events
+are available or not. Hence the driver exposes only those events advertised
+by the DSU, in "events" directory under :
+
+ /sys/bus/event_sources/devices/arm_dsu_<N>/
+
+The user should refer to the TRM of the product to figure out the supported events
+and use the raw event code for the unlisted events.
+
+The driver also exposes the CPUs connected to the DSU instance in "associated_cpus".
+
+
+e.g usage :
+
+ perf stat -a -e arm_dsu_0/cycles/
diff --git a/Documentation/power/pci.txt b/Documentation/power/pci.txt
index 704cd36079b8..8eaf9ee24d43 100644
--- a/Documentation/power/pci.txt
+++ b/Documentation/power/pci.txt
@@ -994,6 +994,17 @@ into D0 going forward), but if it is in runtime suspend in pci_pm_thaw_noirq(),
the function will set the power.direct_complete flag for it (to make the PM core
skip the subsequent "thaw" callbacks for it) and return.
+Setting the DPM_FLAG_LEAVE_SUSPENDED flag means that the driver prefers the
+device to be left in suspend after system-wide transitions to the working state.
+This flag is checked by the PM core, but the PCI bus type informs the PM core
+which devices may be left in suspend from its perspective (that happens during
+the "noirq" phase of system-wide suspend and analogous transitions) and next it
+uses the dev_pm_may_skip_resume() helper to decide whether or not to return from
+pci_pm_resume_noirq() early, as the PM core will skip the remaining resume
+callbacks for the device during the transition under way and will set its
+runtime PM status to "suspended" if dev_pm_may_skip_resume() returns "true" for
+it.
+
3.2. Device Runtime Power Management
------------------------------------
In addition to providing device power management callbacks PCI device drivers
diff --git a/Documentation/power/regulator/machine.txt b/Documentation/power/regulator/machine.txt
index 757e3b53dc11..eff4dcaaa252 100644
--- a/Documentation/power/regulator/machine.txt
+++ b/Documentation/power/regulator/machine.txt
@@ -23,16 +23,12 @@ struct regulator_consumer_supply {
e.g. for the machine above
static struct regulator_consumer_supply regulator1_consumers[] = {
-{
- .dev_name = "dev_name(consumer B)",
- .supply = "Vcc",
-},};
+ REGULATOR_SUPPLY("Vcc", "consumer B"),
+};
static struct regulator_consumer_supply regulator2_consumers[] = {
-{
- .dev = "dev_name(consumer A"),
- .supply = "Vcc",
-},};
+ REGULATOR_SUPPLY("Vcc", "consumer A"),
+};
This maps Regulator-1 to the 'Vcc' supply for Consumer B and maps Regulator-2
to the 'Vcc' supply for Consumer A.
@@ -78,20 +74,20 @@ static struct regulator_init_data regulator2_data = {
Finally the regulator devices must be registered in the usual manner.
static struct platform_device regulator_devices[] = {
-{
- .name = "regulator",
- .id = DCDC_1,
- .dev = {
- .platform_data = &regulator1_data,
+ {
+ .name = "regulator",
+ .id = DCDC_1,
+ .dev = {
+ .platform_data = &regulator1_data,
+ },
},
-},
-{
- .name = "regulator",
- .id = DCDC_2,
- .dev = {
- .platform_data = &regulator2_data,
+ {
+ .name = "regulator",
+ .id = DCDC_2,
+ .dev = {
+ .platform_data = &regulator2_data,
+ },
},
-},
};
/* register regulator 1 device */
platform_device_register(&regulator_devices[0]);
diff --git a/Documentation/process/kernel-enforcement-statement.rst b/Documentation/process/kernel-enforcement-statement.rst
index b3170671a1df..bfa6a78103d8 100644
--- a/Documentation/process/kernel-enforcement-statement.rst
+++ b/Documentation/process/kernel-enforcement-statement.rst
@@ -118,6 +118,7 @@ we might work for today, have in the past, or will in the future.
- Mike Marshall
- Chris Mason
- Paul E. McKenney
+ - Arnaldo Carvalho de Melo
- David S. Miller
- Ingo Molnar
- Kuninori Morimoto
diff --git a/Documentation/thermal/cpu-cooling-api.txt b/Documentation/thermal/cpu-cooling-api.txt
index 71653584cd03..7df567eaea1a 100644
--- a/Documentation/thermal/cpu-cooling-api.txt
+++ b/Documentation/thermal/cpu-cooling-api.txt
@@ -26,39 +26,16 @@ the user. The registration APIs returns the cooling device pointer.
clip_cpus: cpumask of cpus where the frequency constraints will happen.
1.1.2 struct thermal_cooling_device *of_cpufreq_cooling_register(
- struct device_node *np, const struct cpumask *clip_cpus)
+ struct cpufreq_policy *policy)
This interface function registers the cpufreq cooling device with
the name "thermal-cpufreq-%x" linking it with a device tree node, in
order to bind it via the thermal DT code. This api can support multiple
instances of cpufreq cooling devices.
- np: pointer to the cooling device device tree node
- clip_cpus: cpumask of cpus where the frequency constraints will happen.
+ policy: CPUFreq policy.
-1.1.3 struct thermal_cooling_device *cpufreq_power_cooling_register(
- const struct cpumask *clip_cpus, u32 capacitance,
- get_static_t plat_static_func)
-
-Similar to cpufreq_cooling_register, this function registers a cpufreq
-cooling device. Using this function, the cooling device will
-implement the power extensions by using a simple cpu power model. The
-cpus must have registered their OPPs using the OPP library.
-
-The additional parameters are needed for the power model (See 2. Power
-models). "capacitance" is the dynamic power coefficient (See 2.1
-Dynamic power). "plat_static_func" is a function to calculate the
-static power consumed by these cpus (See 2.2 Static power).
-
-1.1.4 struct thermal_cooling_device *of_cpufreq_power_cooling_register(
- struct device_node *np, const struct cpumask *clip_cpus, u32 capacitance,
- get_static_t plat_static_func)
-
-Similar to cpufreq_power_cooling_register, this function register a
-cpufreq cooling device with power extensions using the device tree
-information supplied by the np parameter.
-
-1.1.5 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
+1.1.3 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
This interface function unregisters the "thermal-cpufreq-%x" cooling device.
@@ -67,20 +44,14 @@ information supplied by the np parameter.
2. Power models
The power API registration functions provide a simple power model for
-CPUs. The current power is calculated as dynamic + (optionally)
-static power. This power model requires that the operating-points of
+CPUs. The current power is calculated as dynamic power (static power isn't
+supported currently). This power model requires that the operating-points of
the CPUs are registered using the kernel's opp library and the
`cpufreq_frequency_table` is assigned to the `struct device` of the
cpu. If you are using CONFIG_CPUFREQ_DT then the
`cpufreq_frequency_table` should already be assigned to the cpu
device.
-The `plat_static_func` parameter of `cpufreq_power_cooling_register()`
-and `of_cpufreq_power_cooling_register()` is optional. If you don't
-provide it, only dynamic power will be considered.
-
-2.1 Dynamic power
-
The dynamic power consumption of a processor depends on many factors.
For a given processor implementation the primary factors are:
@@ -119,79 +90,3 @@ mW/MHz/uVolt^2. Typical values for mobile CPUs might lie in range
from 100 to 500. For reference, the approximate values for the SoC in
ARM's Juno Development Platform are 530 for the Cortex-A57 cluster and
140 for the Cortex-A53 cluster.
-
-
-2.2 Static power
-
-Static leakage power consumption depends on a number of factors. For a
-given circuit implementation the primary factors are:
-
-- Time the circuit spends in each 'power state'
-- Temperature
-- Operating voltage
-- Process grade
-
-The time the circuit spends in each 'power state' for a given
-evaluation period at first order means OFF or ON. However,
-'retention' states can also be supported that reduce power during
-inactive periods without loss of context.
-
-Note: The visibility of state entries to the OS can vary, according to
-platform specifics, and this can then impact the accuracy of a model
-based on OS state information alone. It might be possible in some
-cases to extract more accurate information from system resources.
-
-The temperature, operating voltage and process 'grade' (slow to fast)
-of the circuit are all significant factors in static leakage power
-consumption. All of these have complex relationships to static power.
-
-Circuit implementation specific factors include the chosen silicon
-process as well as the type, number and size of transistors in both
-the logic gates and any RAM elements included.
-
-The static power consumption modelling must take into account the
-power managed regions that are implemented. Taking the example of an
-ARM processor cluster, the modelling would take into account whether
-each CPU can be powered OFF separately or if only a single power
-region is implemented for the complete cluster.
-
-In one view, there are others, a static power consumption model can
-then start from a set of reference values for each power managed
-region (e.g. CPU, Cluster/L2) in each state (e.g. ON, OFF) at an
-arbitrary process grade, voltage and temperature point. These values
-are then scaled for all of the following: the time in each state, the
-process grade, the current temperature and the operating voltage.
-However, since both implementation specific and complex relationships
-dominate the estimate, the appropriate interface to the model from the
-cpu cooling device is to provide a function callback that calculates
-the static power in this platform. When registering the cpu cooling
-device pass a function pointer that follows the `get_static_t`
-prototype:
-
- int plat_get_static(cpumask_t *cpumask, int interval,
- unsigned long voltage, u32 &power);
-
-`cpumask` is the cpumask of the cpus involved in the calculation.
-`voltage` is the voltage at which they are operating. The function
-should calculate the average static power for the last `interval`
-milliseconds. It returns 0 on success, -E* on error. If it
-succeeds, it should store the static power in `power`. Reading the
-temperature of the cpus described by `cpumask` is left for
-plat_get_static() to do as the platform knows best which thermal
-sensor is closest to the cpu.
-
-If `plat_static_func` is NULL, static power is considered to be
-negligible for this platform and only dynamic power is considered.
-
-The platform specific callback can then use any combination of tables
-and/or equations to permute the estimated value. Process grade
-information is not passed to the model since access to such data, from
-on-chip measurement capability or manufacture time data, is platform
-specific.
-
-Note: the significance of static power for CPUs in comparison to
-dynamic power is highly dependent on implementation. Given the
-potential complexity in implementation, the importance and accuracy of
-its inclusion when using cpu cooling devices should be assessed on a
-case by case basis.
-
diff --git a/Documentation/w1/masters/w1-gpio b/Documentation/w1/masters/w1-gpio
index af5d3b4aa851..623961d9e83f 100644
--- a/Documentation/w1/masters/w1-gpio
+++ b/Documentation/w1/masters/w1-gpio
@@ -8,17 +8,27 @@ Description
-----------
GPIO 1-wire bus master driver. The driver uses the GPIO API to control the
-wire and the GPIO pin can be specified using platform data.
+wire and the GPIO pin can be specified using GPIO machine descriptor tables.
+It is also possible to define the master using device tree, see
+Documentation/devicetree/bindings/w1/w1-gpio.txt
Example (mach-at91)
-------------------
+#include <linux/gpio/machine.h>
#include <linux/w1-gpio.h>
+static struct gpiod_lookup_table foo_w1_gpiod_table = {
+ .dev_id = "w1-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("at91-gpio", AT91_PIN_PB20, NULL, 0,
+ GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN),
+ },
+};
+
static struct w1_gpio_platform_data foo_w1_gpio_pdata = {
- .pin = AT91_PIN_PB20,
- .is_open_drain = 1,
+ .ext_pullup_enable_pin = -EINVAL,
};
static struct platform_device foo_w1_device = {
@@ -30,4 +40,5 @@ static struct platform_device foo_w1_device = {
...
at91_set_GPIO_periph(foo_w1_gpio_pdata.pin, 1);
at91_set_multi_drive(foo_w1_gpio_pdata.pin, 1);
+ gpiod_add_lookup_table(&foo_w1_gpiod_table);
platform_device_register(&foo_w1_device);
diff --git a/Documentation/x86/intel_rdt_ui.txt b/Documentation/x86/intel_rdt_ui.txt
index 6851854cf69d..756fd76b78a6 100644
--- a/Documentation/x86/intel_rdt_ui.txt
+++ b/Documentation/x86/intel_rdt_ui.txt
@@ -7,15 +7,24 @@ Tony Luck <tony.luck@intel.com>
Vikas Shivappa <vikas.shivappa@intel.com>
This feature is enabled by the CONFIG_INTEL_RDT Kconfig and the
-X86 /proc/cpuinfo flag bits "rdt", "cqm", "cat_l3" and "cdp_l3".
+X86 /proc/cpuinfo flag bits:
+RDT (Resource Director Technology) Allocation - "rdt_a"
+CAT (Cache Allocation Technology) - "cat_l3", "cat_l2"
+CDP (Code and Data Prioritization ) - "cdp_l3", "cdp_l2"
+CQM (Cache QoS Monitoring) - "cqm_llc", "cqm_occup_llc"
+MBM (Memory Bandwidth Monitoring) - "cqm_mbm_total", "cqm_mbm_local"
+MBA (Memory Bandwidth Allocation) - "mba"
To use the feature mount the file system:
- # mount -t resctrl resctrl [-o cdp] /sys/fs/resctrl
+ # mount -t resctrl resctrl [-o cdp[,cdpl2]] /sys/fs/resctrl
mount options are:
"cdp": Enable code/data prioritization in L3 cache allocations.
+"cdpl2": Enable code/data prioritization in L2 cache allocations.
+
+L2 and L3 CDP are controlled seperately.
RDT features are orthogonal. A particular system may support only
monitoring, only control, or both monitoring and control.
diff --git a/Documentation/xtensa/mmu.txt b/Documentation/xtensa/mmu.txt
index 5de8715d5bec..318114de63f3 100644
--- a/Documentation/xtensa/mmu.txt
+++ b/Documentation/xtensa/mmu.txt
@@ -69,19 +69,10 @@ Default MMUv2-compatible layout.
| Userspace | 0x00000000 TASK_SIZE
+------------------+ 0x40000000
+------------------+
-| Page table | 0x80000000
-+------------------+ 0x80400000
+| Page table | XCHAL_PAGE_TABLE_VADDR 0x80000000 XCHAL_PAGE_TABLE_SIZE
+------------------+
-| KMAP area | PKMAP_BASE PTRS_PER_PTE *
-| | DCACHE_N_COLORS *
-| | PAGE_SIZE
-| | (4MB * DCACHE_N_COLORS)
-+------------------+
-| Atomic KMAP area | FIXADDR_START KM_TYPE_NR *
-| | NR_CPUS *
-| | DCACHE_N_COLORS *
-| | PAGE_SIZE
-+------------------+ FIXADDR_TOP 0xbffff000
+| KASAN shadow map | KASAN_SHADOW_START 0x80400000 KASAN_SHADOW_SIZE
++------------------+ 0x8e400000
+------------------+
| VMALLOC area | VMALLOC_START 0xc0000000 128MB - 64KB
+------------------+ VMALLOC_END
@@ -92,6 +83,17 @@ Default MMUv2-compatible layout.
| remap area 2 |
+------------------+
+------------------+
+| KMAP area | PKMAP_BASE PTRS_PER_PTE *
+| | DCACHE_N_COLORS *
+| | PAGE_SIZE
+| | (4MB * DCACHE_N_COLORS)
++------------------+
+| Atomic KMAP area | FIXADDR_START KM_TYPE_NR *
+| | NR_CPUS *
+| | DCACHE_N_COLORS *
+| | PAGE_SIZE
++------------------+ FIXADDR_TOP 0xcffff000
++------------------+
| Cached KSEG | XCHAL_KSEG_CACHED_VADDR 0xd0000000 128MB
+------------------+
| Uncached KSEG | XCHAL_KSEG_BYPASS_VADDR 0xd8000000 128MB
@@ -109,19 +111,10 @@ Default MMUv2-compatible layout.
| Userspace | 0x00000000 TASK_SIZE
+------------------+ 0x40000000
+------------------+
-| Page table | 0x80000000
-+------------------+ 0x80400000
+| Page table | XCHAL_PAGE_TABLE_VADDR 0x80000000 XCHAL_PAGE_TABLE_SIZE
+------------------+
-| KMAP area | PKMAP_BASE PTRS_PER_PTE *
-| | DCACHE_N_COLORS *
-| | PAGE_SIZE
-| | (4MB * DCACHE_N_COLORS)
-+------------------+
-| Atomic KMAP area | FIXADDR_START KM_TYPE_NR *
-| | NR_CPUS *
-| | DCACHE_N_COLORS *
-| | PAGE_SIZE
-+------------------+ FIXADDR_TOP 0x9ffff000
+| KASAN shadow map | KASAN_SHADOW_START 0x80400000 KASAN_SHADOW_SIZE
++------------------+ 0x8e400000
+------------------+
| VMALLOC area | VMALLOC_START 0xa0000000 128MB - 64KB
+------------------+ VMALLOC_END
@@ -132,6 +125,17 @@ Default MMUv2-compatible layout.
| remap area 2 |
+------------------+
+------------------+
+| KMAP area | PKMAP_BASE PTRS_PER_PTE *
+| | DCACHE_N_COLORS *
+| | PAGE_SIZE
+| | (4MB * DCACHE_N_COLORS)
++------------------+
+| Atomic KMAP area | FIXADDR_START KM_TYPE_NR *
+| | NR_CPUS *
+| | DCACHE_N_COLORS *
+| | PAGE_SIZE
++------------------+ FIXADDR_TOP 0xaffff000
++------------------+
| Cached KSEG | XCHAL_KSEG_CACHED_VADDR 0xb0000000 256MB
+------------------+
| Uncached KSEG | XCHAL_KSEG_BYPASS_VADDR 0xc0000000 256MB
@@ -150,19 +154,10 @@ Default MMUv2-compatible layout.
| Userspace | 0x00000000 TASK_SIZE
+------------------+ 0x40000000
+------------------+
-| Page table | 0x80000000
-+------------------+ 0x80400000
+| Page table | XCHAL_PAGE_TABLE_VADDR 0x80000000 XCHAL_PAGE_TABLE_SIZE
+------------------+
-| KMAP area | PKMAP_BASE PTRS_PER_PTE *
-| | DCACHE_N_COLORS *
-| | PAGE_SIZE
-| | (4MB * DCACHE_N_COLORS)
-+------------------+
-| Atomic KMAP area | FIXADDR_START KM_TYPE_NR *
-| | NR_CPUS *
-| | DCACHE_N_COLORS *
-| | PAGE_SIZE
-+------------------+ FIXADDR_TOP 0x8ffff000
+| KASAN shadow map | KASAN_SHADOW_START 0x80400000 KASAN_SHADOW_SIZE
++------------------+ 0x8e400000
+------------------+
| VMALLOC area | VMALLOC_START 0x90000000 128MB - 64KB
+------------------+ VMALLOC_END
@@ -173,6 +168,17 @@ Default MMUv2-compatible layout.
| remap area 2 |
+------------------+
+------------------+
+| KMAP area | PKMAP_BASE PTRS_PER_PTE *
+| | DCACHE_N_COLORS *
+| | PAGE_SIZE
+| | (4MB * DCACHE_N_COLORS)
++------------------+
+| Atomic KMAP area | FIXADDR_START KM_TYPE_NR *
+| | NR_CPUS *
+| | DCACHE_N_COLORS *
+| | PAGE_SIZE
++------------------+ FIXADDR_TOP 0x9ffff000
++------------------+
| Cached KSEG | XCHAL_KSEG_CACHED_VADDR 0xa0000000 512MB
+------------------+
| Uncached KSEG | XCHAL_KSEG_BYPASS_VADDR 0xc0000000 512MB
diff --git a/MAINTAINERS b/MAINTAINERS
index 884ee9601707..e262e234d76d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -278,6 +278,12 @@ L: linux-gpio@vger.kernel.org
S: Maintained
F: drivers/gpio/gpio-pci-idio-16.c
+ACCES PCIe-IDIO-24 GPIO DRIVER
+M: William Breathitt Gray <vilhelm.gray@gmail.com>
+L: linux-gpio@vger.kernel.org
+S: Maintained
+F: drivers/gpio/gpio-pcie-idio-24.c
+
ACENIC DRIVER
M: Jes Sorensen <jes@trained-monkey.org>
L: linux-acenic@sunsite.dk
@@ -329,7 +335,7 @@ F: drivers/acpi/apei/
ACPI COMPONENT ARCHITECTURE (ACPICA)
M: Robert Moore <robert.moore@intel.com>
-M: Lv Zheng <lv.zheng@intel.com>
+M: Erik Schmauss <erik.schmauss@intel.com>
M: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
L: linux-acpi@vger.kernel.org
L: devel@acpica.org
@@ -875,6 +881,12 @@ S: Supported
F: drivers/android/
F: drivers/staging/android/
+ANDROID GOLDFISH PIC DRIVER
+M: Miodrag Dinic <miodrag.dinic@mips.com>
+S: Supported
+F: Documentation/devicetree/bindings/interrupt-controller/google,goldfish-pic.txt
+F: drivers/irqchip/irq-goldfish-pic.c
+
ANDROID GOLDFISH RTC DRIVER
M: Miodrag Dinic <miodrag.dinic@mips.com>
S: Supported
@@ -1321,7 +1333,8 @@ F: tools/perf/arch/arm/util/pmu.c
F: tools/perf/arch/arm/util/auxtrace.c
F: tools/perf/arch/arm/util/cs-etm.c
F: tools/perf/arch/arm/util/cs-etm.h
-F: tools/perf/util/cs-etm.h
+F: tools/perf/util/cs-etm.*
+F: tools/perf/util/cs-etm-decoder/*
ARM/CORGI MACHINE SUPPORT
M: Richard Purdie <rpurdie@rpsys.net>
@@ -1593,6 +1606,7 @@ F: arch/arm/boot/dts/kirkwood*
F: arch/arm/configs/mvebu_*_defconfig
F: arch/arm/mach-mvebu/
F: arch/arm64/boot/dts/marvell/armada*
+F: drivers/cpufreq/armada-37xx-cpufreq.c
F: drivers/cpufreq/mvebu-cpufreq.c
F: drivers/irqchip/irq-armada-370-xp.c
F: drivers/irqchip/irq-mvebu-*
@@ -2393,13 +2407,6 @@ F: Documentation/devicetree/bindings/input/atmel,maxtouch.txt
F: drivers/input/touchscreen/atmel_mxt_ts.c
F: include/linux/platform_data/atmel_mxt_ts.h
-ATMEL NAND DRIVER
-M: Wenyou Yang <wenyou.yang@atmel.com>
-M: Josh Wu <rainyfeeling@outlook.com>
-L: linux-mtd@lists.infradead.org
-S: Supported
-F: drivers/mtd/nand/atmel/*
-
ATMEL SAMA5D2 ADC DRIVER
M: Ludovic Desroches <ludovic.desroches@microchip.com>
L: linux-iio@vger.kernel.org
@@ -4347,10 +4354,12 @@ T: git git://git.infradead.org/users/hch/dma-mapping.git
W: http://git.infradead.org/users/hch/dma-mapping.git
S: Supported
F: lib/dma-debug.c
-F: lib/dma-noop.c
+F: lib/dma-direct.c
F: lib/dma-virt.c
F: drivers/base/dma-mapping.c
F: drivers/base/dma-coherent.c
+F: include/asm-generic/dma-mapping.h
+F: include/linux/dma-direct.h
F: include/linux/dma-mapping.h
DME1737 HARDWARE MONITOR DRIVER
@@ -5157,6 +5166,12 @@ L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/skx_edac.c
+EDAC-TI
+M: Tero Kristo <t-kristo@ti.com>
+L: linux-edac@vger.kernel.org
+S: Maintained
+F: drivers/edac/ti_edac.c
+
EDIROL UA-101/UA-1000 DRIVER
M: Clemens Ladisch <clemens@ladisch.de>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -5980,6 +5995,7 @@ F: drivers/media/rc/gpio-ir-tx.c
GPIO MOCKUP DRIVER
M: Bamvor Jian Zhang <bamvor.zhangjian@linaro.org>
+R: Bartosz Golaszewski <brgl@bgdev.pl>
L: linux-gpio@vger.kernel.org
S: Maintained
F: drivers/gpio/gpio-mockup.c
@@ -6627,16 +6643,6 @@ L: linux-i2c@vger.kernel.org
S: Maintained
F: drivers/i2c/i2c-stub.c
-i386 BOOT CODE
-M: "H. Peter Anvin" <hpa@zytor.com>
-S: Maintained
-F: arch/x86/boot/
-
-i386 SETUP CODE / CPU ERRATA WORKAROUNDS
-M: "H. Peter Anvin" <hpa@zytor.com>
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/hpa/linux-2.6-x86setup.git
-S: Maintained
-
IA64 (Itanium) PLATFORM
M: Tony Luck <tony.luck@intel.com>
M: Fenghua Yu <fenghua.yu@intel.com>
@@ -6903,7 +6909,7 @@ M: Jason Gunthorpe <jgg@mellanox.com>
L: linux-rdma@vger.kernel.org
W: http://www.openfabrics.org/
Q: http://patchwork.kernel.org/project/linux-rdma/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git
S: Supported
F: Documentation/devicetree/bindings/infiniband/
F: Documentation/infiniband/
@@ -8210,6 +8216,7 @@ F: arch/*/include/asm/rwsem.h
F: include/linux/seqlock.h
F: lib/locking*.[ch]
F: kernel/locking/
+X: kernel/locking/locktorture.c
LOGICAL DISK MANAGER SUPPORT (LDM, Windows 2000/XP/Vista Dynamic Disks)
M: "Richard Russon (FlatCap)" <ldm@flatcap.org>
@@ -8425,6 +8432,13 @@ L: linux-wireless@vger.kernel.org
S: Odd Fixes
F: drivers/net/wireless/marvell/mwl8k.c
+MARVELL NAND CONTROLLER DRIVER
+M: Miquel Raynal <miquel.raynal@free-electrons.com>
+L: linux-mtd@lists.infradead.org
+S: Maintained
+F: drivers/mtd/nand/marvell_nand.c
+F: Documentation/devicetree/bindings/mtd/marvell-nand.txt
+
MARVELL SOC MMC/SD/SDIO CONTROLLER DRIVER
M: Nicolas Pitre <nico@fluxnic.net>
S: Odd Fixes
@@ -8979,7 +8993,7 @@ L: linux-mtd@lists.infradead.org
W: http://www.linux-mtd.infradead.org/
Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
T: git git://git.infradead.org/linux-mtd.git master
-T: git git://git.infradead.org/l2-mtd.git master
+T: git git://git.infradead.org/linux-mtd.git mtd/next
S: Maintained
F: Documentation/devicetree/bindings/mtd/
F: drivers/mtd/
@@ -9068,6 +9082,14 @@ F: drivers/media/platform/atmel/atmel-isc.c
F: drivers/media/platform/atmel/atmel-isc-regs.h
F: devicetree/bindings/media/atmel-isc.txt
+MICROCHIP / ATMEL NAND DRIVER
+M: Wenyou Yang <wenyou.yang@microchip.com>
+M: Josh Wu <rainyfeeling@outlook.com>
+L: linux-mtd@lists.infradead.org
+S: Supported
+F: drivers/mtd/nand/atmel/*
+F: Documentation/devicetree/bindings/mtd/atmel-nand.txt
+
MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER
M: Woojung Huh <Woojung.Huh@microchip.com>
M: Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
@@ -9368,7 +9390,7 @@ L: linux-mtd@lists.infradead.org
W: http://www.linux-mtd.infradead.org/
Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
T: git git://git.infradead.org/linux-mtd.git nand/fixes
-T: git git://git.infradead.org/l2-mtd.git nand/next
+T: git git://git.infradead.org/linux-mtd.git nand/next
S: Maintained
F: drivers/mtd/nand/
F: include/linux/mtd/*nand*.h
@@ -9774,6 +9796,15 @@ S: Supported
F: Documentation/filesystems/ntfs.txt
F: fs/ntfs/
+NUBUS SUBSYSTEM
+M: Finn Thain <fthain@telegraphics.com.au>
+L: linux-m68k@lists.linux-m68k.org
+S: Maintained
+F: arch/*/include/asm/nubus.h
+F: drivers/nubus/
+F: include/linux/nubus.h
+F: include/uapi/linux/nubus.h
+
NVIDIA (rivafb and nvidiafb) FRAMEBUFFER DRIVER
M: Antonino Daplas <adaplas@gmail.com>
L: linux-fbdev@vger.kernel.org
@@ -9834,6 +9865,7 @@ NXP TFA9879 DRIVER
M: Peter Rosin <peda@axentia.se>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Maintained
+F: Documentation/devicetree/bindings/sound/tfa9879.txt
F: sound/soc/codecs/tfa9879*
NXP-NCI NFC DRIVER
@@ -10920,6 +10952,7 @@ F: include/linux/pm.h
F: include/linux/pm_*
F: include/linux/powercap.h
F: drivers/powercap/
+F: kernel/configs/nopm.config
POWER STATE COORDINATION INTERFACE (PSCI)
M: Mark Rutland <mark.rutland@arm.com>
@@ -11214,7 +11247,8 @@ S: Maintained
F: drivers/firmware/qemu_fw_cfg.c
QIB DRIVER
-M: Mike Marciniszyn <infinipath@intel.com>
+M: Dennis Dalessandro <dennis.dalessandro@intel.com>
+M: Mike Marciniszyn <mike.marciniszyn@intel.com>
L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/hw/qib/
@@ -11241,7 +11275,6 @@ F: include/linux/qed/
F: drivers/net/ethernet/qlogic/qede/
QLOGIC QL4xxx RDMA DRIVER
-M: Ram Amrani <Ram.Amrani@cavium.com>
M: Michal Kalderon <Michal.Kalderon@cavium.com>
M: Ariel Elior <Ariel.Elior@cavium.com>
L: linux-rdma@vger.kernel.org
@@ -11480,15 +11513,6 @@ L: linux-wireless@vger.kernel.org
S: Orphan
F: drivers/net/wireless/ray*
-RCUTORTURE MODULE
-M: Josh Triplett <josh@joshtriplett.org>
-M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
-L: linux-kernel@vger.kernel.org
-S: Supported
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
-F: Documentation/RCU/torture.txt
-F: kernel/rcu/rcutorture.c
-
RCUTORTURE TEST FRAMEWORK
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
M: Josh Triplett <josh@joshtriplett.org>
@@ -11512,6 +11536,7 @@ F: drivers/net/ethernet/rdc/r6040.c
RDMAVT - RDMA verbs software
M: Dennis Dalessandro <dennis.dalessandro@intel.com>
+M: Mike Marciniszyn <mike.marciniszyn@intel.com>
L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/sw/rdmavt
@@ -11681,8 +11706,8 @@ F: drivers/mtd/nand/r852.h
RISC-V ARCHITECTURE
M: Palmer Dabbelt <palmer@sifive.com>
M: Albert Ou <albert@sifive.com>
-L: patches@groups.riscv.org
-T: git https://github.com/riscv/riscv-linux
+L: linux-riscv@lists.infradead.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux.git
S: Supported
F: arch/riscv/
K: riscv
@@ -11959,6 +11984,13 @@ S: Maintained
F: drivers/crypto/exynos-rng.c
F: Documentation/devicetree/bindings/crypto/samsung,exynos-rng4.txt
+SAMSUNG EXYNOS TRUE RANDOM NUMBER GENERATOR (TRNG) DRIVER
+M: Łukasz Stelmach <l.stelmach@samsung.com>
+L: linux-samsung-soc@vger.kernel.org
+S: Maintained
+F: drivers/char/hw_random/exynos-trng.c
+F: Documentation/devicetree/bindings/rng/samsung,exynos5250-trng.txt
+
SAMSUNG FRAMEBUFFER DRIVER
M: Jingoo Han <jingoohan1@gmail.com>
L: linux-fbdev@vger.kernel.org
@@ -12021,6 +12053,7 @@ F: drivers/media/i2c/s5k5baf.c
SAMSUNG S5P Security SubSystem (SSS) DRIVER
M: Krzysztof Kozlowski <krzk@kernel.org>
M: Vladimir Zapolskiy <vz@mleia.com>
+M: Kamil Konieczny <k.konieczny@partner.samsung.com>
L: linux-crypto@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
S: Maintained
@@ -12620,6 +12653,12 @@ F: include/media/soc*
F: drivers/media/i2c/soc_camera/
F: drivers/media/platform/soc_camera/
+SOCIONEXT UNIPHIER SOUND DRIVER
+M: Katsuhiro Suzuki <suzuki.katsuhiro@socionext.com>
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+S: Maintained
+F: sound/soc/uniphier/
+
SOEKRIS NET48XX LED SUPPORT
M: Chris Boot <bootc@bootc.net>
S: Maintained
@@ -12644,6 +12683,15 @@ L: linux-media@vger.kernel.org
S: Supported
F: drivers/media/pci/solo6x10/
+SOFTWARE DELEGATED EXCEPTION INTERFACE (SDEI)
+M: James Morse <james.morse@arm.com>
+L: linux-arm-kernel@lists.infradead.org
+S: Maintained
+F: Documentation/devicetree/bindings/arm/firmware/sdei.txt
+F: drivers/firmware/arm_sdei.c
+F: include/linux/sdei.h
+F: include/uapi/linux/sdei.h
+
SOFTWARE RAID (Multiple Disks) SUPPORT
M: Shaohua Li <shli@kernel.org>
L: linux-raid@vger.kernel.org
@@ -12815,7 +12863,7 @@ L: linux-mtd@lists.infradead.org
W: http://www.linux-mtd.infradead.org/
Q: http://patchwork.ozlabs.org/project/linux-mtd/list/
T: git git://git.infradead.org/linux-mtd.git spi-nor/fixes
-T: git git://git.infradead.org/l2-mtd.git spi-nor/next
+T: git git://git.infradead.org/linux-mtd.git spi-nor/next
S: Maintained
F: drivers/mtd/spi-nor/
F: include/linux/mtd/spi-nor.h
@@ -13068,7 +13116,7 @@ F: arch/x86/boot/video*
SWIOTLB SUBSYSTEM
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-L: linux-kernel@vger.kernel.org
+L: iommu@lists.linux-foundation.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb.git
S: Supported
F: lib/swiotlb.c
@@ -13802,6 +13850,18 @@ L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/topstar-laptop.c
+TORTURE-TEST MODULES
+M: Davidlohr Bueso <dave@stgolabs.net>
+M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+M: Josh Triplett <josh@joshtriplett.org>
+L: linux-kernel@vger.kernel.org
+S: Supported
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
+F: Documentation/RCU/torture.txt
+F: kernel/torture.c
+F: kernel/rcu/rcutorture.c
+F: kernel/locking/locktorture.c
+
TOSHIBA ACPI EXTRAS DRIVER
M: Azael Avalos <coproscefalo@gmail.com>
L: platform-driver-x86@vger.kernel.org
@@ -13844,9 +13904,10 @@ F: drivers/platform/x86/toshiba-wmi.c
TPM DEVICE DRIVER
M: Peter Huewe <peterhuewe@gmx.de>
M: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
-R: Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+R: Jason Gunthorpe <jgg@ziepe.ca>
L: linux-integrity@vger.kernel.org
Q: https://patchwork.kernel.org/project/linux-integrity/list/
+W: https://kernsec.org/wiki/index.php/Linux_Kernel_Integrity
T: git git://git.infradead.org/users/jjs/linux-tpmdd.git
S: Maintained
F: drivers/char/tpm/
@@ -13885,6 +13946,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial.git
S: Maintained
K: ^Subject:.*(?i)trivial
+TEMPO SEMICONDUCTOR DRIVERS
+M: Steven Eckhoff <steven.eckhoff.opensource@gmail.com>
+S: Maintained
+F: sound/soc/codecs/tscs*.c
+F: sound/soc/codecs/tscs*.h
+F: Documentation/devicetree/bindings/sound/tscs*.txt
+
TTY LAYER
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
M: Jiri Slaby <jslaby@suse.com>
@@ -14683,6 +14751,7 @@ W: http://www.slimlogic.co.uk/?p=48
T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regulator.git
S: Supported
F: Documentation/devicetree/bindings/regulator/
+F: Documentation/power/regulator/
F: drivers/regulator/
F: include/dt-bindings/regulator/
F: include/linux/regulator/
@@ -14876,6 +14945,12 @@ F: include/linux/workqueue.h
F: kernel/workqueue.c
F: Documentation/core-api/workqueue.rst
+X-POWERS AXP288 PMIC DRIVERS
+M: Hans de Goede <hdegoede@redhat.com>
+S: Maintained
+N: axp288
+F: drivers/acpi/pmic/intel_pmic_xpower.c
+
X-POWERS MULTIFUNCTION PMIC DEVICE DRIVERS
M: Chen-Yu Tsai <wens@csie.org>
L: linux-kernel@vger.kernel.org
@@ -14893,7 +14968,7 @@ F: net/x25/
X86 ARCHITECTURE (32-BIT AND 64-BIT)
M: Thomas Gleixner <tglx@linutronix.de>
M: Ingo Molnar <mingo@redhat.com>
-M: "H. Peter Anvin" <hpa@zytor.com>
+R: "H. Peter Anvin" <hpa@zytor.com>
M: x86@kernel.org
L: linux-kernel@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
@@ -15003,6 +15078,7 @@ F: include/xen/interface/io/vscsiif.h
XEN SWIOTLB SUBSYSTEM
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
+L: iommu@lists.linux-foundation.org
S: Supported
F: arch/x86/xen/*swiotlb*
F: drivers/xen/*swiotlb*
diff --git a/Makefile b/Makefile
index 339397b838d3..c8b8e902d5a4 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 4
PATCHLEVEL = 15
SUBLEVEL = 0
-EXTRAVERSION = -rc9
+EXTRAVERSION =
NAME = Fearless Coyote
# *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index 97376accfb14..d007b2a15b22 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -237,8 +237,8 @@ config ARCH_HAS_FORTIFY_SOURCE
config ARCH_HAS_SET_MEMORY
bool
-# Select if arch init_task initializer is different to init/init_task.c
-config ARCH_INIT_TASK
+# Select if arch init_task must go in the __init_task_data section
+config ARCH_TASK_STRUCT_ON_STACK
bool
# Select if arch has its private alloc_task_struct() function
@@ -941,6 +941,10 @@ config STRICT_MODULE_RWX
and non-text memory will be made non-executable. This provides
protection against certain security exploits (e.g. writing to text)
+# select if the architecture provides an asm/dma-direct.h header
+config ARCH_HAS_PHYS_TO_DMA
+ bool
+
config ARCH_HAS_REFCOUNT
bool
help
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index b31b974a03cb..e96adcbcab41 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -209,6 +209,7 @@ config ALPHA_EIGER
config ALPHA_JENSEN
bool "Jensen"
+ depends on BROKEN
help
DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one
of the first-generation Alpha systems. A number of these systems
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h
index 8c20c5e35432..807d7b9a1860 100644
--- a/arch/alpha/include/asm/thread_info.h
+++ b/arch/alpha/include/asm/thread_info.h
@@ -39,9 +39,6 @@ struct thread_info {
.preempt_count = INIT_PREEMPT_COUNT, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* How to get the thread information struct from C. */
register struct thread_info *__current_thread_info __asm__("$8");
#define current_thread_info() __current_thread_info
diff --git a/arch/alpha/include/uapi/asm/Kbuild b/arch/alpha/include/uapi/asm/Kbuild
index 14a2e9af97e9..9afaba5e5503 100644
--- a/arch/alpha/include/uapi/asm/Kbuild
+++ b/arch/alpha/include/uapi/asm/Kbuild
@@ -2,3 +2,4 @@
include include/uapi/asm-generic/Kbuild.asm
generic-y += bpf_perf_event.h
+generic-y += poll.h
diff --git a/arch/alpha/include/uapi/asm/poll.h b/arch/alpha/include/uapi/asm/poll.h
deleted file mode 100644
index b7132a305a47..000000000000
--- a/arch/alpha/include/uapi/asm/poll.h
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#include <asm-generic/poll.h>
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index ce3a675c0c4b..fa1a392ca9a2 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -950,22 +950,31 @@ struct itimerval32
};
static inline long
-get_tv32(struct timeval *o, struct timeval32 __user *i)
+get_tv32(struct timespec64 *o, struct timeval32 __user *i)
{
struct timeval32 tv;
if (copy_from_user(&tv, i, sizeof(struct timeval32)))
return -EFAULT;
o->tv_sec = tv.tv_sec;
- o->tv_usec = tv.tv_usec;
+ o->tv_nsec = tv.tv_usec * NSEC_PER_USEC;
return 0;
}
static inline long
-put_tv32(struct timeval32 __user *o, struct timeval *i)
+put_tv32(struct timeval32 __user *o, struct timespec64 *i)
{
return copy_to_user(o, &(struct timeval32){
- .tv_sec = o->tv_sec,
- .tv_usec = o->tv_usec},
+ .tv_sec = i->tv_sec,
+ .tv_usec = i->tv_nsec / NSEC_PER_USEC},
+ sizeof(struct timeval32));
+}
+
+static inline long
+put_tv_to_tv32(struct timeval32 __user *o, struct timeval *i)
+{
+ return copy_to_user(o, &(struct timeval32){
+ .tv_sec = i->tv_sec,
+ .tv_usec = i->tv_usec},
sizeof(struct timeval32));
}
@@ -1004,9 +1013,10 @@ SYSCALL_DEFINE2(osf_gettimeofday, struct timeval32 __user *, tv,
struct timezone __user *, tz)
{
if (tv) {
- struct timeval ktv;
- do_gettimeofday(&ktv);
- if (put_tv32(tv, &ktv))
+ struct timespec64 kts;
+
+ ktime_get_real_ts64(&kts);
+ if (put_tv32(tv, &kts))
return -EFAULT;
}
if (tz) {
@@ -1019,22 +1029,19 @@ SYSCALL_DEFINE2(osf_gettimeofday, struct timeval32 __user *, tv,
SYSCALL_DEFINE2(osf_settimeofday, struct timeval32 __user *, tv,
struct timezone __user *, tz)
{
- struct timespec64 kts64;
- struct timespec kts;
+ struct timespec64 kts;
struct timezone ktz;
if (tv) {
- if (get_tv32((struct timeval *)&kts, tv))
+ if (get_tv32(&kts, tv))
return -EFAULT;
- kts.tv_nsec *= 1000;
- kts64 = timespec_to_timespec64(kts);
}
if (tz) {
if (copy_from_user(&ktz, tz, sizeof(*tz)))
return -EFAULT;
}
- return do_sys_settimeofday64(tv ? &kts64 : NULL, tz ? &ktz : NULL);
+ return do_sys_settimeofday64(tv ? &kts : NULL, tz ? &ktz : NULL);
}
asmlinkage long sys_ni_posix_timers(void);
@@ -1083,22 +1090,16 @@ SYSCALL_DEFINE3(osf_setitimer, int, which, struct itimerval32 __user *, in,
SYSCALL_DEFINE2(osf_utimes, const char __user *, filename,
struct timeval32 __user *, tvs)
{
- struct timespec tv[2];
+ struct timespec64 tv[2];
if (tvs) {
- struct timeval ktvs[2];
- if (get_tv32(&ktvs[0], &tvs[0]) ||
- get_tv32(&ktvs[1], &tvs[1]))
+ if (get_tv32(&tv[0], &tvs[0]) ||
+ get_tv32(&tv[1], &tvs[1]))
return -EFAULT;
- if (ktvs[0].tv_usec < 0 || ktvs[0].tv_usec >= 1000000 ||
- ktvs[1].tv_usec < 0 || ktvs[1].tv_usec >= 1000000)
+ if (tv[0].tv_nsec < 0 || tv[0].tv_nsec >= 1000000000 ||
+ tv[1].tv_nsec < 0 || tv[1].tv_nsec >= 1000000000)
return -EINVAL;
-
- tv[0].tv_sec = ktvs[0].tv_sec;
- tv[0].tv_nsec = 1000 * ktvs[0].tv_usec;
- tv[1].tv_sec = ktvs[1].tv_sec;
- tv[1].tv_nsec = 1000 * ktvs[1].tv_usec;
}
return do_utimes(AT_FDCWD, filename, tvs ? tv : NULL, 0);
@@ -1107,19 +1108,18 @@ SYSCALL_DEFINE2(osf_utimes, const char __user *, filename,
SYSCALL_DEFINE5(osf_select, int, n, fd_set __user *, inp, fd_set __user *, outp,
fd_set __user *, exp, struct timeval32 __user *, tvp)
{
- struct timespec end_time, *to = NULL;
+ struct timespec64 end_time, *to = NULL;
if (tvp) {
- struct timeval tv;
+ struct timespec64 tv;
to = &end_time;
if (get_tv32(&tv, tvp))
return -EFAULT;
- if (tv.tv_sec < 0 || tv.tv_usec < 0)
+ if (tv.tv_sec < 0 || tv.tv_nsec < 0)
return -EINVAL;
- if (poll_select_set_timeout(to, tv.tv_sec,
- tv.tv_usec * NSEC_PER_USEC))
+ if (poll_select_set_timeout(to, tv.tv_sec, tv.tv_nsec))
return -EINVAL;
}
@@ -1192,9 +1192,9 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
return -EFAULT;
if (!ur)
return err;
- if (put_tv32(&ur->ru_utime, &r.ru_utime))
+ if (put_tv_to_tv32(&ur->ru_utime, &r.ru_utime))
return -EFAULT;
- if (put_tv32(&ur->ru_stime, &r.ru_stime))
+ if (put_tv_to_tv32(&ur->ru_stime, &r.ru_stime))
return -EFAULT;
if (copy_to_user(&ur->ru_maxrss, &r.ru_maxrss,
sizeof(struct rusage32) - offsetof(struct rusage32, ru_maxrss)))
@@ -1210,18 +1210,18 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
SYSCALL_DEFINE2(osf_usleep_thread, struct timeval32 __user *, sleep,
struct timeval32 __user *, remain)
{
- struct timeval tmp;
+ struct timespec64 tmp;
unsigned long ticks;
if (get_tv32(&tmp, sleep))
goto fault;
- ticks = timeval_to_jiffies(&tmp);
+ ticks = timespec64_to_jiffies(&tmp);
ticks = schedule_timeout_interruptible(ticks);
if (remain) {
- jiffies_to_timeval(ticks, &tmp);
+ jiffies_to_timespec64(ticks, &tmp);
if (put_tv32(remain, &tmp))
goto fault;
}
@@ -1280,7 +1280,7 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
if (copy_to_user(txc_p, &txc, offsetof(struct timex32, time)) ||
(copy_to_user(&txc_p->tick, &txc.tick, sizeof(struct timex32) -
offsetof(struct timex32, tick))) ||
- (put_tv32(&txc_p->time, &txc.time)))
+ (put_tv_to_tv32(&txc_p->time, &txc.time)))
return -EFAULT;
return ret;
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 9d5fd00d9e91..f3a80cf164cc 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -463,9 +463,6 @@ config ARCH_PHYS_ADDR_T_64BIT
config ARCH_DMA_ADDR_T_64BIT
bool
-config ARC_PLAT_NEEDS_PHYS_TO_DMA
- bool
-
config ARC_KVADDR_SIZE
int "Kernel Virtual Address Space size (MB)"
range 0 512
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
index 94285031c4fb..7a16824bfe98 100644
--- a/arch/arc/include/asm/dma-mapping.h
+++ b/arch/arc/include/asm/dma-mapping.h
@@ -11,13 +11,6 @@
#ifndef ASM_ARC_DMA_MAPPING_H
#define ASM_ARC_DMA_MAPPING_H
-#ifndef CONFIG_ARC_PLAT_NEEDS_PHYS_TO_DMA
-#define plat_dma_to_phys(dev, dma_handle) ((phys_addr_t)(dma_handle))
-#define plat_phys_to_dma(dev, paddr) ((dma_addr_t)(paddr))
-#else
-#include <plat/dma.h>
-#endif
-
extern const struct dma_map_ops arc_dma_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
index 2d79e527fa50..c85947bac5e5 100644
--- a/arch/arc/include/asm/thread_info.h
+++ b/arch/arc/include/asm/thread_info.h
@@ -62,9 +62,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
static inline __attribute_const__ struct thread_info *current_thread_info(void)
{
register unsigned long sp asm("sp");
diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c
index 133a4dae41fe..b123558bf0bb 100644
--- a/arch/arc/kernel/traps.c
+++ b/arch/arc/kernel/traps.c
@@ -65,12 +65,14 @@ unhandled_exception(const char *str, struct pt_regs *regs, siginfo_t *info)
#define DO_ERROR_INFO(signr, str, name, sicode) \
int name(unsigned long address, struct pt_regs *regs) \
{ \
- siginfo_t info = { \
- .si_signo = signr, \
- .si_errno = 0, \
- .si_code = sicode, \
- .si_addr = (void __user *)address, \
- }; \
+ siginfo_t info; \
+ \
+ clear_siginfo(&info); \
+ info.si_signo = signr; \
+ info.si_errno = 0; \
+ info.si_code = sicode; \
+ info.si_addr = (void __user *)address; \
+ \
return unhandled_exception(str, regs, &info);\
}
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index e9d93604ad0f..1dcc404b5aec 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -60,7 +60,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
/* This is linear addr (0x8000_0000 based) */
paddr = page_to_phys(page);
- *dma_handle = plat_phys_to_dma(dev, paddr);
+ *dma_handle = paddr;
/* This is kernel Virtual address (0x7000_0000 based) */
if (need_kvaddr) {
@@ -92,7 +92,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
- phys_addr_t paddr = plat_dma_to_phys(dev, dma_handle);
+ phys_addr_t paddr = dma_handle;
struct page *page = virt_to_page(paddr);
int is_non_coh = 1;
@@ -111,7 +111,7 @@ static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma,
{
unsigned long user_count = vma_pages(vma);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- unsigned long pfn = __phys_to_pfn(plat_dma_to_phys(dev, dma_addr));
+ unsigned long pfn = __phys_to_pfn(dma_addr);
unsigned long off = vma->vm_pgoff;
int ret = -ENXIO;
@@ -175,7 +175,7 @@ static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
_dma_cache_sync(paddr, size, dir);
- return plat_phys_to_dma(dev, paddr);
+ return paddr;
}
/*
@@ -190,7 +190,7 @@ static void arc_dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- phys_addr_t paddr = plat_dma_to_phys(dev, handle);
+ phys_addr_t paddr = handle;
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
_dma_cache_sync(paddr, size, dir);
@@ -224,13 +224,13 @@ static void arc_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
static void arc_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
- _dma_cache_sync(plat_dma_to_phys(dev, dma_handle), size, DMA_FROM_DEVICE);
+ _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE);
}
static void arc_dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
- _dma_cache_sync(plat_dma_to_phys(dev, dma_handle), size, DMA_TO_DEVICE);
+ _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE);
}
static void arc_dma_sync_sg_for_cpu(struct device *dev,
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 51c8df561077..430a0aa710d6 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -8,6 +8,7 @@ config ARM
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_SET_MEMORY
+ select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
select ARCH_HAS_STRICT_MODULE_RWX if MMU
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
@@ -24,7 +25,7 @@ config ARM
select CLONE_BACKWARDS
select CPU_PM if (SUSPEND || CPU_IDLE)
select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS
- select DMA_NOOP_OPS if !MMU
+ select DMA_DIRECT_OPS if !MMU
select EDAC_SUPPORT
select EDAC_ATOMIC_SCRUB
select GENERIC_ALLOCATOR
diff --git a/arch/arm/boot/dts/bcm2836.dtsi b/arch/arm/boot/dts/bcm2836.dtsi
index 61e158003509..1dfd76442777 100644
--- a/arch/arm/boot/dts/bcm2836.dtsi
+++ b/arch/arm/boot/dts/bcm2836.dtsi
@@ -13,24 +13,24 @@
compatible = "brcm,bcm2836-l1-intc";
reg = <0x40000000 0x100>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
interrupt-parent = <&local_intc>;
};
arm-pmu {
compatible = "arm,cortex-a7-pmu";
interrupt-parent = <&local_intc>;
- interrupts = <9>;
+ interrupts = <9 IRQ_TYPE_LEVEL_HIGH>;
};
};
timer {
compatible = "arm,armv7-timer";
interrupt-parent = <&local_intc>;
- interrupts = <0>, // PHYS_SECURE_PPI
- <1>, // PHYS_NONSECURE_PPI
- <3>, // VIRT_PPI
- <2>; // HYP_PPI
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>, // PHYS_SECURE_PPI
+ <1 IRQ_TYPE_LEVEL_HIGH>, // PHYS_NONSECURE_PPI
+ <3 IRQ_TYPE_LEVEL_HIGH>, // VIRT_PPI
+ <2 IRQ_TYPE_LEVEL_HIGH>; // HYP_PPI
always-on;
};
@@ -76,7 +76,7 @@
compatible = "brcm,bcm2836-armctrl-ic";
reg = <0x7e00b200 0x200>;
interrupt-parent = <&local_intc>;
- interrupts = <8>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
};
&cpu_thermal {
diff --git a/arch/arm/boot/dts/bcm2837.dtsi b/arch/arm/boot/dts/bcm2837.dtsi
index bc1cca5cf43c..efa7d3387ab2 100644
--- a/arch/arm/boot/dts/bcm2837.dtsi
+++ b/arch/arm/boot/dts/bcm2837.dtsi
@@ -12,7 +12,7 @@
compatible = "brcm,bcm2836-l1-intc";
reg = <0x40000000 0x100>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
interrupt-parent = <&local_intc>;
};
};
@@ -20,10 +20,10 @@
timer {
compatible = "arm,armv7-timer";
interrupt-parent = <&local_intc>;
- interrupts = <0>, // PHYS_SECURE_PPI
- <1>, // PHYS_NONSECURE_PPI
- <3>, // VIRT_PPI
- <2>; // HYP_PPI
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>, // PHYS_SECURE_PPI
+ <1 IRQ_TYPE_LEVEL_HIGH>, // PHYS_NONSECURE_PPI
+ <3 IRQ_TYPE_LEVEL_HIGH>, // VIRT_PPI
+ <2 IRQ_TYPE_LEVEL_HIGH>; // HYP_PPI
always-on;
};
@@ -73,7 +73,7 @@
compatible = "brcm,bcm2836-armctrl-ic";
reg = <0x7e00b200 0x200>;
interrupt-parent = <&local_intc>;
- interrupts = <8>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
};
&cpu_thermal {
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index dcde93c85c2d..18db25a5a66e 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -2,6 +2,7 @@
#include <dt-bindings/clock/bcm2835.h>
#include <dt-bindings/clock/bcm2835-aux.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
/* firmware-provided startup stubs live here, where the secondary CPUs are
* spinning.
diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi
index d5181f85ca9c..963e1698fe1d 100644
--- a/arch/arm/boot/dts/imx6ul.dtsi
+++ b/arch/arm/boot/dts/imx6ul.dtsi
@@ -68,12 +68,14 @@
clock-latency = <61036>; /* two CLK32 periods */
operating-points = <
/* kHz uV */
+ 696000 1275000
528000 1175000
396000 1025000
198000 950000
>;
fsl,soc-operating-points = <
/* KHz uV */
+ 696000 1275000
528000 1175000
396000 1175000
198000 1175000
diff --git a/arch/arm/boot/dts/omap2420-n8x0-common.dtsi b/arch/arm/boot/dts/omap2420-n8x0-common.dtsi
index 1df3ace3af92..63b0b4921e4e 100644
--- a/arch/arm/boot/dts/omap2420-n8x0-common.dtsi
+++ b/arch/arm/boot/dts/omap2420-n8x0-common.dtsi
@@ -52,6 +52,7 @@
onenand@0,0 {
#address-cells = <1>;
#size-cells = <1>;
+ compatible = "ti,omap2-onenand";
reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */
gpmc,sync-read;
diff --git a/arch/arm/boot/dts/omap3-igep.dtsi b/arch/arm/boot/dts/omap3-igep.dtsi
index 4ad7d5565906..f33cc80c9dbc 100644
--- a/arch/arm/boot/dts/omap3-igep.dtsi
+++ b/arch/arm/boot/dts/omap3-igep.dtsi
@@ -147,32 +147,32 @@
gpmc,sync-read;
gpmc,sync-write;
gpmc,burst-length = <16>;
- gpmc,burst-read;
gpmc,burst-wrap;
+ gpmc,burst-read;
gpmc,burst-write;
gpmc,device-width = <2>; /* GPMC_DEVWIDTH_16BIT */
gpmc,mux-add-data = <2>; /* GPMC_MUX_AD */
gpmc,cs-on-ns = <0>;
- gpmc,cs-rd-off-ns = <87>;
- gpmc,cs-wr-off-ns = <87>;
+ gpmc,cs-rd-off-ns = <96>;
+ gpmc,cs-wr-off-ns = <96>;
gpmc,adv-on-ns = <0>;
- gpmc,adv-rd-off-ns = <10>;
- gpmc,adv-wr-off-ns = <10>;
- gpmc,oe-on-ns = <15>;
- gpmc,oe-off-ns = <87>;
+ gpmc,adv-rd-off-ns = <12>;
+ gpmc,adv-wr-off-ns = <12>;
+ gpmc,oe-on-ns = <18>;
+ gpmc,oe-off-ns = <96>;
gpmc,we-on-ns = <0>;
- gpmc,we-off-ns = <87>;
- gpmc,rd-cycle-ns = <112>;
- gpmc,wr-cycle-ns = <112>;
- gpmc,access-ns = <81>;
- gpmc,page-burst-access-ns = <15>;
+ gpmc,we-off-ns = <96>;
+ gpmc,rd-cycle-ns = <114>;
+ gpmc,wr-cycle-ns = <114>;
+ gpmc,access-ns = <90>;
+ gpmc,page-burst-access-ns = <12>;
gpmc,bus-turnaround-ns = <0>;
gpmc,cycle2cycle-delay-ns = <0>;
gpmc,wait-monitoring-ns = <0>;
- gpmc,clk-activation-ns = <5>;
+ gpmc,clk-activation-ns = <6>;
gpmc,wr-data-mux-bus-ns = <30>;
- gpmc,wr-access-ns = <81>;
- gpmc,sync-clk-ps = <15000>;
+ gpmc,wr-access-ns = <90>;
+ gpmc,sync-clk-ps = <12000>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 669c51c00c00..e7c7b8e50703 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -838,6 +838,7 @@
onenand@0,0 {
#address-cells = <1>;
#size-cells = <1>;
+ compatible = "ti,omap2-onenand";
reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */
gpmc,sync-read;
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
index 12fbb3da5fce..0d9b85317529 100644
--- a/arch/arm/boot/dts/omap3-n950-n9.dtsi
+++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
@@ -367,6 +367,7 @@
onenand@0,0 {
#address-cells = <1>;
#size-cells = <1>;
+ compatible = "ti,omap2-onenand";
reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */
gpmc,sync-read;
diff --git a/arch/arm/boot/dts/omap3430-sdp.dts b/arch/arm/boot/dts/omap3430-sdp.dts
index 908951eb5943..d652708f6bef 100644
--- a/arch/arm/boot/dts/omap3430-sdp.dts
+++ b/arch/arm/boot/dts/omap3430-sdp.dts
@@ -154,6 +154,7 @@
linux,mtd-name= "samsung,kfm2g16q2m-deb8";
#address-cells = <1>;
#size-cells = <1>;
+ compatible = "ti,omap2-onenand";
reg = <2 0 0x20000>; /* CS2, offset 0, IO size 4 */
gpmc,device-width = <2>;
diff --git a/arch/arm/configs/aspeed_g4_defconfig b/arch/arm/configs/aspeed_g4_defconfig
index d23b9d56a88b..95946dee9c77 100644
--- a/arch/arm/configs/aspeed_g4_defconfig
+++ b/arch/arm/configs/aspeed_g4_defconfig
@@ -1,7 +1,6 @@
CONFIG_KERNEL_XZ=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
diff --git a/arch/arm/configs/aspeed_g5_defconfig b/arch/arm/configs/aspeed_g5_defconfig
index c0ad7b82086b..8c7ea033cdc2 100644
--- a/arch/arm/configs/aspeed_g5_defconfig
+++ b/arch/arm/configs/aspeed_g5_defconfig
@@ -1,7 +1,6 @@
CONFIG_KERNEL_XZ=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
diff --git a/arch/arm/configs/hisi_defconfig b/arch/arm/configs/hisi_defconfig
index b2e340b272ee..74d611e41e02 100644
--- a/arch/arm/configs/hisi_defconfig
+++ b/arch/arm/configs/hisi_defconfig
@@ -1,4 +1,3 @@
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BLK_DEV_INITRD=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 61509c4b769f..b659244902cd 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -1,6 +1,5 @@
CONFIG_SYSVIPC=y
CONFIG_FHANDLE=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_CGROUPS=y
diff --git a/arch/arm/configs/mvebu_v7_defconfig b/arch/arm/configs/mvebu_v7_defconfig
index 69553704f2dc..ddaeda4f2e82 100644
--- a/arch/arm/configs/mvebu_v7_defconfig
+++ b/arch/arm/configs/mvebu_v7_defconfig
@@ -1,6 +1,5 @@
CONFIG_SYSVIPC=y
CONFIG_FHANDLE=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
@@ -57,7 +56,7 @@ CONFIG_MTD_CFI_STAA=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_M25P80=y
CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_PXA3xx=y
+CONFIG_MTD_NAND_MARVELL=y
CONFIG_MTD_SPI_NOR=y
CONFIG_SRAM=y
CONFIG_MTD_UBI=y
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index 830e817a028a..837d0c9c8b0e 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -1,7 +1,6 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_FHANDLE=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig
index 6529cb43e0fd..2080025556b5 100644
--- a/arch/arm/configs/sama5_defconfig
+++ b/arch/arm/configs/sama5_defconfig
@@ -2,7 +2,6 @@
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_FHANDLE=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index 6678f2929356..c819be04187e 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -1,5 +1,4 @@
CONFIG_SYSVIPC=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
diff --git a/arch/arm/configs/vt8500_v6_v7_defconfig b/arch/arm/configs/vt8500_v6_v7_defconfig
index 1bfaa7bfc392..9b85326ba287 100644
--- a/arch/arm/configs/vt8500_v6_v7_defconfig
+++ b/arch/arm/configs/vt8500_v6_v7_defconfig
@@ -1,4 +1,3 @@
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BLK_DEV_INITRD=y
diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
index 18768f330449..07e31941dc67 100644
--- a/arch/arm/crypto/aes-neonbs-glue.c
+++ b/arch/arm/crypto/aes-neonbs-glue.c
@@ -181,9 +181,8 @@ static int cbc_init(struct crypto_tfm *tfm)
struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->enc_tfm = crypto_alloc_cipher("aes", 0, 0);
- if (IS_ERR(ctx->enc_tfm))
- return PTR_ERR(ctx->enc_tfm);
- return 0;
+
+ return PTR_ERR_OR_ZERO(ctx->enc_tfm);
}
static void cbc_exit(struct crypto_tfm *tfm)
@@ -258,9 +257,8 @@ static int xts_init(struct crypto_tfm *tfm)
struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->tweak_tfm = crypto_alloc_cipher("aes", 0, 0);
- if (IS_ERR(ctx->tweak_tfm))
- return PTR_ERR(ctx->tweak_tfm);
- return 0;
+
+ return PTR_ERR_OR_ZERO(ctx->tweak_tfm);
}
static void xts_exit(struct crypto_tfm *tfm)
diff --git a/arch/arm/crypto/crc32-ce-glue.c b/arch/arm/crypto/crc32-ce-glue.c
index 1b0e0e86ee9c..96e62ec105d0 100644
--- a/arch/arm/crypto/crc32-ce-glue.c
+++ b/arch/arm/crypto/crc32-ce-glue.c
@@ -188,6 +188,7 @@ static struct shash_alg crc32_pmull_algs[] = { {
.base.cra_name = "crc32",
.base.cra_driver_name = "crc32-arm-ce",
.base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.base.cra_blocksize = 1,
.base.cra_module = THIS_MODULE,
}, {
@@ -203,6 +204,7 @@ static struct shash_alg crc32_pmull_algs[] = { {
.base.cra_name = "crc32c",
.base.cra_driver_name = "crc32c-arm-ce",
.base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.base.cra_blocksize = 1,
.base.cra_module = THIS_MODULE,
} };
diff --git a/arch/arm/include/asm/dma-direct.h b/arch/arm/include/asm/dma-direct.h
new file mode 100644
index 000000000000..5b0a8a421894
--- /dev/null
+++ b/arch/arm/include/asm/dma-direct.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ASM_ARM_DMA_DIRECT_H
+#define ASM_ARM_DMA_DIRECT_H 1
+
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ unsigned int offset = paddr & ~PAGE_MASK;
+ return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
+}
+
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
+{
+ unsigned int offset = dev_addr & ~PAGE_MASK;
+ return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
+}
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ u64 limit, mask;
+
+ if (!dev->dma_mask)
+ return 0;
+
+ mask = *dev->dma_mask;
+
+ limit = (mask + 1) & ~mask;
+ if (limit && size > limit)
+ return 0;
+
+ if ((addr | (addr + size - 1)) & ~mask)
+ return 0;
+
+ return 1;
+}
+
+#endif /* ASM_ARM_DMA_DIRECT_H */
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index daf837423a76..8436f6ade57d 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -18,7 +18,7 @@ extern const struct dma_map_ops arm_coherent_dma_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
- return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : &dma_noop_ops;
+ return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : &dma_direct_ops;
}
#ifdef __arch_page_to_dma
@@ -109,39 +109,6 @@ static inline bool is_device_dma_coherent(struct device *dev)
return dev->archdata.dma_coherent;
}
-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- unsigned int offset = paddr & ~PAGE_MASK;
- return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
-}
-
-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
-{
- unsigned int offset = dev_addr & ~PAGE_MASK;
- return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
-}
-
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- u64 limit, mask;
-
- if (!dev->dma_mask)
- return 0;
-
- mask = *dev->dma_mask;
-
- limit = (mask + 1) & ~mask;
- if (limit && size > limit)
- return 0;
-
- if ((addr | (addr + size - 1)) & ~mask)
- return 0;
-
- return 1;
-}
-
-static inline void dma_mark_clean(void *addr, size_t size) { }
-
/**
* arm_dma_alloc - allocate consistent memory for DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index a9f7d3f47134..acbf9ec7b396 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -238,6 +238,9 @@ int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
int exception_index);
+static inline void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ int exception_index) {}
+
static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
unsigned long hyp_stack_ptr,
unsigned long vector_ptr)
@@ -301,4 +304,6 @@ int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
/* All host FP/SIMD state is restored on guest exit, so nothing to save: */
static inline void kvm_fpsimd_flush_cpu_state(void) {}
+static inline void kvm_arm_vhe_guest_enter(void) {}
+static inline void kvm_arm_vhe_guest_exit(void) {}
#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index fa6f2174276b..a2d176a308bd 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -211,6 +211,11 @@ static inline bool __kvm_cpu_uses_extended_idmap(void)
return false;
}
+static inline unsigned long __kvm_idmap_ptrs_per_pgd(void)
+{
+ return PTRS_PER_PGD;
+}
+
static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
pgd_t *hyp_pgd,
pgd_t *merged_hyp_pgd,
@@ -221,6 +226,18 @@ static inline unsigned int kvm_get_vmid_bits(void)
return 8;
}
+static inline void *kvm_get_hyp_vector(void)
+{
+ return kvm_ksym_ref(__kvm_hyp_vector);
+}
+
+static inline int kvm_map_vectors(void)
+{
+ return 0;
+}
+
+#define kvm_phys_to_vttbr(addr) (addr)
+
#endif /* !__ASSEMBLY__ */
#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 776757d1604a..e71cc35de163 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -75,9 +75,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/*
* how to get the current stack pointer in C
*/
diff --git a/arch/arm/include/uapi/asm/siginfo.h b/arch/arm/include/uapi/asm/siginfo.h
new file mode 100644
index 000000000000..d0513880be21
--- /dev/null
+++ b/arch/arm/include/uapi/asm/siginfo.h
@@ -0,0 +1,13 @@
+#ifndef __ASM_SIGINFO_H
+#define __ASM_SIGINFO_H
+
+#include <asm-generic/siginfo.h>
+
+/*
+ * SIGFPE si_codes
+ */
+#ifdef __KERNEL__
+#define FPE_FIXME 0 /* Broken dup of SI_USER */
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 58e3771e4c5b..7724b0f661b3 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -390,7 +390,6 @@ static void ptrace_hbptriggered(struct perf_event *bp,
struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
long num;
int i;
- siginfo_t info;
for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i)
if (current->thread.debug.hbp[i] == bp)
@@ -398,12 +397,7 @@ static void ptrace_hbptriggered(struct perf_event *bp,
num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i);
- info.si_signo = SIGTRAP;
- info.si_errno = (int)num;
- info.si_code = TRAP_HWBKPT;
- info.si_addr = (void __user *)(bkpt->trigger);
-
- force_sig_info(SIGTRAP, &info, current);
+ force_sig_ptrace_errno_trap((int)num, (void __user *)(bkpt->trigger));
}
/*
diff --git a/arch/arm/mach-ixp4xx/vulcan-setup.c b/arch/arm/mach-ixp4xx/vulcan-setup.c
index 731fb2019ecb..2c03d2f6b647 100644
--- a/arch/arm/mach-ixp4xx/vulcan-setup.c
+++ b/arch/arm/mach-ixp4xx/vulcan-setup.c
@@ -16,6 +16,7 @@
#include <linux/serial_8250.h>
#include <linux/io.h>
#include <linux/w1-gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/mtd/plat-ram.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -162,9 +163,16 @@ static struct platform_device vulcan_max6369 = {
.num_resources = 1,
};
+static struct gpiod_lookup_table vulcan_w1_gpiod_table = {
+ .dev_id = "w1-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("IXP4XX_GPIO_CHIP", 14, NULL, 0,
+ GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ },
+};
+
static struct w1_gpio_platform_data vulcan_w1_gpio_pdata = {
- .pin = 14,
- .ext_pullup_enable_pin = -EINVAL,
+ /* Intentionally left blank */
};
static struct platform_device vulcan_w1_gpio = {
@@ -233,6 +241,7 @@ static void __init vulcan_init(void)
IXP4XX_EXP_BUS_WR_EN |
IXP4XX_EXP_BUS_BYTE_EN;
+ gpiod_add_lookup_table(&vulcan_w1_gpiod_table);
platform_add_devices(vulcan_devices, ARRAY_SIZE(vulcan_devices));
}
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 2f722a805948..c15bbcad5f67 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -232,6 +232,3 @@ obj-y += $(omap-hsmmc-m) $(omap-hsmmc-y)
obj-y += omap_phy_internal.o
obj-$(CONFIG_MACH_OMAP2_TUSB6010) += usb-tusb6010.o
-
-onenand-$(CONFIG_MTD_ONENAND_OMAP2) := gpmc-onenand.o
-obj-y += $(onenand-m) $(onenand-y)
diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
deleted file mode 100644
index 2944af820558..000000000000
--- a/arch/arm/mach-omap2/gpmc-onenand.c
+++ /dev/null
@@ -1,409 +0,0 @@
-/*
- * linux/arch/arm/mach-omap2/gpmc-onenand.c
- *
- * Copyright (C) 2006 - 2009 Nokia Corporation
- * Contacts: Juha Yrjola
- * Tony Lindgren
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/onenand_regs.h>
-#include <linux/io.h>
-#include <linux/omap-gpmc.h>
-#include <linux/platform_data/mtd-onenand-omap2.h>
-#include <linux/err.h>
-
-#include <asm/mach/flash.h>
-
-#include "soc.h"
-
-#define ONENAND_IO_SIZE SZ_128K
-
-#define ONENAND_FLAG_SYNCREAD (1 << 0)
-#define ONENAND_FLAG_SYNCWRITE (1 << 1)
-#define ONENAND_FLAG_HF (1 << 2)
-#define ONENAND_FLAG_VHF (1 << 3)
-
-static unsigned onenand_flags;
-static unsigned latency;
-
-static struct omap_onenand_platform_data *gpmc_onenand_data;
-
-static struct resource gpmc_onenand_resource = {
- .flags = IORESOURCE_MEM,
-};
-
-static struct platform_device gpmc_onenand_device = {
- .name = "omap2-onenand",
- .id = -1,
- .num_resources = 1,
- .resource = &gpmc_onenand_resource,
-};
-
-static struct gpmc_settings onenand_async = {
- .device_width = GPMC_DEVWIDTH_16BIT,
- .mux_add_data = GPMC_MUX_AD,
-};
-
-static struct gpmc_settings onenand_sync = {
- .burst_read = true,
- .burst_wrap = true,
- .burst_len = GPMC_BURST_16,
- .device_width = GPMC_DEVWIDTH_16BIT,
- .mux_add_data = GPMC_MUX_AD,
- .wait_pin = 0,
-};
-
-static void omap2_onenand_calc_async_timings(struct gpmc_timings *t)
-{
- struct gpmc_device_timings dev_t;
- const int t_cer = 15;
- const int t_avdp = 12;
- const int t_aavdh = 7;
- const int t_ce = 76;
- const int t_aa = 76;
- const int t_oe = 20;
- const int t_cez = 20; /* max of t_cez, t_oez */
- const int t_wpl = 40;
- const int t_wph = 30;
-
- memset(&dev_t, 0, sizeof(dev_t));
-
- dev_t.t_avdp_r = max_t(int, t_avdp, t_cer) * 1000;
- dev_t.t_avdp_w = dev_t.t_avdp_r;
- dev_t.t_aavdh = t_aavdh * 1000;
- dev_t.t_aa = t_aa * 1000;
- dev_t.t_ce = t_ce * 1000;
- dev_t.t_oe = t_oe * 1000;
- dev_t.t_cez_r = t_cez * 1000;
- dev_t.t_cez_w = dev_t.t_cez_r;
- dev_t.t_wpl = t_wpl * 1000;
- dev_t.t_wph = t_wph * 1000;
-
- gpmc_calc_timings(t, &onenand_async, &dev_t);
-}
-
-static void omap2_onenand_set_async_mode(void __iomem *onenand_base)
-{
- u32 reg;
-
- /* Ensure sync read and sync write are disabled */
- reg = readw(onenand_base + ONENAND_REG_SYS_CFG1);
- reg &= ~ONENAND_SYS_CFG1_SYNC_READ & ~ONENAND_SYS_CFG1_SYNC_WRITE;
- writew(reg, onenand_base + ONENAND_REG_SYS_CFG1);
-}
-
-static void set_onenand_cfg(void __iomem *onenand_base)
-{
- u32 reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
-
- reg |= (latency << ONENAND_SYS_CFG1_BRL_SHIFT) |
- ONENAND_SYS_CFG1_BL_16;
- if (onenand_flags & ONENAND_FLAG_SYNCREAD)
- reg |= ONENAND_SYS_CFG1_SYNC_READ;
- else
- reg &= ~ONENAND_SYS_CFG1_SYNC_READ;
- if (onenand_flags & ONENAND_FLAG_SYNCWRITE)
- reg |= ONENAND_SYS_CFG1_SYNC_WRITE;
- else
- reg &= ~ONENAND_SYS_CFG1_SYNC_WRITE;
- if (onenand_flags & ONENAND_FLAG_HF)
- reg |= ONENAND_SYS_CFG1_HF;
- else
- reg &= ~ONENAND_SYS_CFG1_HF;
- if (onenand_flags & ONENAND_FLAG_VHF)
- reg |= ONENAND_SYS_CFG1_VHF;
- else
- reg &= ~ONENAND_SYS_CFG1_VHF;
-
- writew(reg, onenand_base + ONENAND_REG_SYS_CFG1);
-}
-
-static int omap2_onenand_get_freq(struct omap_onenand_platform_data *cfg,
- void __iomem *onenand_base)
-{
- u16 ver = readw(onenand_base + ONENAND_REG_VERSION_ID);
- int freq;
-
- switch ((ver >> 4) & 0xf) {
- case 0:
- freq = 40;
- break;
- case 1:
- freq = 54;
- break;
- case 2:
- freq = 66;
- break;
- case 3:
- freq = 83;
- break;
- case 4:
- freq = 104;
- break;
- default:
- pr_err("onenand rate not detected, bad GPMC async timings?\n");
- freq = 0;
- }
-
- return freq;
-}
-
-static void omap2_onenand_calc_sync_timings(struct gpmc_timings *t,
- unsigned int flags,
- int freq)
-{
- struct gpmc_device_timings dev_t;
- const int t_cer = 15;
- const int t_avdp = 12;
- const int t_cez = 20; /* max of t_cez, t_oez */
- const int t_wpl = 40;
- const int t_wph = 30;
- int min_gpmc_clk_period, t_ces, t_avds, t_avdh, t_ach, t_aavdh, t_rdyo;
- int div, gpmc_clk_ns;
-
- if (flags & ONENAND_SYNC_READ)
- onenand_flags = ONENAND_FLAG_SYNCREAD;
- else if (flags & ONENAND_SYNC_READWRITE)
- onenand_flags = ONENAND_FLAG_SYNCREAD | ONENAND_FLAG_SYNCWRITE;
-
- switch (freq) {
- case 104:
- min_gpmc_clk_period = 9600; /* 104 MHz */
- t_ces = 3;
- t_avds = 4;
- t_avdh = 2;
- t_ach = 3;
- t_aavdh = 6;
- t_rdyo = 6;
- break;
- case 83:
- min_gpmc_clk_period = 12000; /* 83 MHz */
- t_ces = 5;
- t_avds = 4;
- t_avdh = 2;
- t_ach = 6;
- t_aavdh = 6;
- t_rdyo = 9;
- break;
- case 66:
- min_gpmc_clk_period = 15000; /* 66 MHz */
- t_ces = 6;
- t_avds = 5;
- t_avdh = 2;
- t_ach = 6;
- t_aavdh = 6;
- t_rdyo = 11;
- break;
- default:
- min_gpmc_clk_period = 18500; /* 54 MHz */
- t_ces = 7;
- t_avds = 7;
- t_avdh = 7;
- t_ach = 9;
- t_aavdh = 7;
- t_rdyo = 15;
- onenand_flags &= ~ONENAND_FLAG_SYNCWRITE;
- break;
- }
-
- div = gpmc_calc_divider(min_gpmc_clk_period);
- gpmc_clk_ns = gpmc_ticks_to_ns(div);
- if (gpmc_clk_ns < 15) /* >66MHz */
- onenand_flags |= ONENAND_FLAG_HF;
- else
- onenand_flags &= ~ONENAND_FLAG_HF;
- if (gpmc_clk_ns < 12) /* >83MHz */
- onenand_flags |= ONENAND_FLAG_VHF;
- else
- onenand_flags &= ~ONENAND_FLAG_VHF;
- if (onenand_flags & ONENAND_FLAG_VHF)
- latency = 8;
- else if (onenand_flags & ONENAND_FLAG_HF)
- latency = 6;
- else if (gpmc_clk_ns >= 25) /* 40 MHz*/
- latency = 3;
- else
- latency = 4;
-
- /* Set synchronous read timings */
- memset(&dev_t, 0, sizeof(dev_t));
-
- if (onenand_flags & ONENAND_FLAG_SYNCREAD)
- onenand_sync.sync_read = true;
- if (onenand_flags & ONENAND_FLAG_SYNCWRITE) {
- onenand_sync.sync_write = true;
- onenand_sync.burst_write = true;
- } else {
- dev_t.t_avdp_w = max(t_avdp, t_cer) * 1000;
- dev_t.t_wpl = t_wpl * 1000;
- dev_t.t_wph = t_wph * 1000;
- dev_t.t_aavdh = t_aavdh * 1000;
- }
- dev_t.ce_xdelay = true;
- dev_t.avd_xdelay = true;
- dev_t.oe_xdelay = true;
- dev_t.we_xdelay = true;
- dev_t.clk = min_gpmc_clk_period;
- dev_t.t_bacc = dev_t.clk;
- dev_t.t_ces = t_ces * 1000;
- dev_t.t_avds = t_avds * 1000;
- dev_t.t_avdh = t_avdh * 1000;
- dev_t.t_ach = t_ach * 1000;
- dev_t.cyc_iaa = (latency + 1);
- dev_t.t_cez_r = t_cez * 1000;
- dev_t.t_cez_w = dev_t.t_cez_r;
- dev_t.cyc_aavdh_oe = 1;
- dev_t.t_rdyo = t_rdyo * 1000 + min_gpmc_clk_period;
-
- gpmc_calc_timings(t, &onenand_sync, &dev_t);
-}
-
-static int omap2_onenand_setup_async(void __iomem *onenand_base)
-{
- struct gpmc_timings t;
- int ret;
-
- /*
- * Note that we need to keep sync_write set for the call to
- * omap2_onenand_set_async_mode() to work to detect the onenand
- * supported clock rate for the sync timings.
- */
- if (gpmc_onenand_data->of_node) {
- gpmc_read_settings_dt(gpmc_onenand_data->of_node,
- &onenand_async);
- if (onenand_async.sync_read || onenand_async.sync_write) {
- if (onenand_async.sync_write)
- gpmc_onenand_data->flags |=
- ONENAND_SYNC_READWRITE;
- else
- gpmc_onenand_data->flags |= ONENAND_SYNC_READ;
- onenand_async.sync_read = false;
- }
- }
-
- onenand_async.sync_write = true;
- omap2_onenand_calc_async_timings(&t);
-
- ret = gpmc_cs_program_settings(gpmc_onenand_data->cs, &onenand_async);
- if (ret < 0)
- return ret;
-
- ret = gpmc_cs_set_timings(gpmc_onenand_data->cs, &t, &onenand_async);
- if (ret < 0)
- return ret;
-
- omap2_onenand_set_async_mode(onenand_base);
-
- return 0;
-}
-
-static int omap2_onenand_setup_sync(void __iomem *onenand_base, int *freq_ptr)
-{
- int ret, freq = *freq_ptr;
- struct gpmc_timings t;
-
- if (!freq) {
- /* Very first call freq is not known */
- freq = omap2_onenand_get_freq(gpmc_onenand_data, onenand_base);
- if (!freq)
- return -ENODEV;
- set_onenand_cfg(onenand_base);
- }
-
- if (gpmc_onenand_data->of_node) {
- gpmc_read_settings_dt(gpmc_onenand_data->of_node,
- &onenand_sync);
- } else {
- /*
- * FIXME: Appears to be legacy code from initial ONENAND commit.
- * Unclear what boards this is for and if this can be removed.
- */
- if (!cpu_is_omap34xx())
- onenand_sync.wait_on_read = true;
- }
-
- omap2_onenand_calc_sync_timings(&t, gpmc_onenand_data->flags, freq);
-
- ret = gpmc_cs_program_settings(gpmc_onenand_data->cs, &onenand_sync);
- if (ret < 0)
- return ret;
-
- ret = gpmc_cs_set_timings(gpmc_onenand_data->cs, &t, &onenand_sync);
- if (ret < 0)
- return ret;
-
- set_onenand_cfg(onenand_base);
-
- *freq_ptr = freq;
-
- return 0;
-}
-
-static int gpmc_onenand_setup(void __iomem *onenand_base, int *freq_ptr)
-{
- struct device *dev = &gpmc_onenand_device.dev;
- unsigned l = ONENAND_SYNC_READ | ONENAND_SYNC_READWRITE;
- int ret;
-
- ret = omap2_onenand_setup_async(onenand_base);
- if (ret) {
- dev_err(dev, "unable to set to async mode\n");
- return ret;
- }
-
- if (!(gpmc_onenand_data->flags & l))
- return 0;
-
- ret = omap2_onenand_setup_sync(onenand_base, freq_ptr);
- if (ret)
- dev_err(dev, "unable to set to sync mode\n");
- return ret;
-}
-
-int gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
-{
- int err;
- struct device *dev = &gpmc_onenand_device.dev;
-
- gpmc_onenand_data = _onenand_data;
- gpmc_onenand_data->onenand_setup = gpmc_onenand_setup;
- gpmc_onenand_device.dev.platform_data = gpmc_onenand_data;
-
- if (cpu_is_omap24xx() &&
- (gpmc_onenand_data->flags & ONENAND_SYNC_READWRITE)) {
- dev_warn(dev, "OneNAND using only SYNC_READ on 24xx\n");
- gpmc_onenand_data->flags &= ~ONENAND_SYNC_READWRITE;
- gpmc_onenand_data->flags |= ONENAND_SYNC_READ;
- }
-
- if (cpu_is_omap34xx())
- gpmc_onenand_data->flags |= ONENAND_IN_OMAP34XX;
- else
- gpmc_onenand_data->flags &= ~ONENAND_IN_OMAP34XX;
-
- err = gpmc_cs_request(gpmc_onenand_data->cs, ONENAND_IO_SIZE,
- (unsigned long *)&gpmc_onenand_resource.start);
- if (err < 0) {
- dev_err(dev, "Cannot request GPMC CS %d, error %d\n",
- gpmc_onenand_data->cs, err);
- return err;
- }
-
- gpmc_onenand_resource.end = gpmc_onenand_resource.start +
- ONENAND_IO_SIZE - 1;
-
- err = platform_device_register(&gpmc_onenand_device);
- if (err) {
- dev_err(dev, "Unable to register OneNAND device\n");
- gpmc_cs_free(gpmc_onenand_data->cs);
- }
-
- return err;
-}
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
index 9d662fed03ec..feddca7f3540 100644
--- a/arch/arm/mach-pxa/raumfeld.c
+++ b/arch/arm/mach-pxa/raumfeld.c
@@ -506,11 +506,16 @@ static void w1_enable_external_pullup(int enable)
msleep(100);
}
+static struct gpiod_lookup_table raumfeld_w1_gpiod_table = {
+ .dev_id = "w1-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("gpio-pxa", GPIO_ONE_WIRE, NULL, 0,
+ GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+ },
+};
+
static struct w1_gpio_platform_data w1_gpio_platform_data = {
- .pin = GPIO_ONE_WIRE,
- .is_open_drain = 0,
- .enable_external_pullup = w1_enable_external_pullup,
- .ext_pullup_enable_pin = -EINVAL,
+ .enable_external_pullup = w1_enable_external_pullup,
};
static struct platform_device raumfeld_w1_gpio_device = {
@@ -523,13 +528,14 @@ static struct platform_device raumfeld_w1_gpio_device = {
static void __init raumfeld_w1_init(void)
{
int ret = gpio_request(GPIO_W1_PULLUP_ENABLE,
- "W1 external pullup enable");
+ "W1 external pullup enable");
if (ret < 0)
pr_warn("Unable to request GPIO_W1_PULLUP_ENABLE\n");
else
gpio_direction_output(GPIO_W1_PULLUP_ENABLE, 0);
+ gpiod_add_lookup_table(&raumfeld_w1_gpiod_table);
platform_device_register(&raumfeld_w1_gpio_device);
}
diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c
index 6db5fc26d154..619f24a42d09 100644
--- a/arch/arm/mm/dma-mapping-nommu.c
+++ b/arch/arm/mm/dma-mapping-nommu.c
@@ -11,7 +11,7 @@
#include <linux/export.h>
#include <linux/mm.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/scatterlist.h>
#include <asm/cachetype.h>
@@ -22,7 +22,7 @@
#include "dma.h"
/*
- * dma_noop_ops is used if
+ * dma_direct_ops is used if
* - MMU/MPU is off
* - cpu is v7m w/o cache support
* - device is coherent
@@ -39,7 +39,6 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
unsigned long attrs)
{
- const struct dma_map_ops *ops = &dma_noop_ops;
void *ret;
/*
@@ -48,7 +47,7 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
*/
if (attrs & DMA_ATTR_NON_CONSISTENT)
- return ops->alloc(dev, size, dma_handle, gfp, attrs);
+ return dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
ret = dma_alloc_from_global_coherent(size, dma_handle);
@@ -70,10 +69,8 @@ static void arm_nommu_dma_free(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr,
unsigned long attrs)
{
- const struct dma_map_ops *ops = &dma_noop_ops;
-
if (attrs & DMA_ATTR_NON_CONSISTENT) {
- ops->free(dev, size, cpu_addr, dma_addr, attrs);
+ dma_direct_free(dev, size, cpu_addr, dma_addr, attrs);
} else {
int ret = dma_release_from_global_coherent(get_order(size),
cpu_addr);
@@ -213,7 +210,7 @@ EXPORT_SYMBOL(arm_nommu_dma_ops);
static const struct dma_map_ops *arm_nommu_get_dma_map_ops(bool coherent)
{
- return coherent ? &dma_noop_ops : &arm_nommu_dma_ops;
+ return coherent ? &dma_direct_ops : &arm_nommu_dma_ops;
}
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index a71a48e71fff..03c6a3c72f9c 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -257,7 +257,7 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_
if (exceptions == VFP_EXCEPTION_ERROR) {
vfp_panic("unhandled bounce", inst);
- vfp_raise_sigfpe(0, regs);
+ vfp_raise_sigfpe(FPE_FIXME, regs);
return;
}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index c9a7e9e1414f..b2b95f79c746 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -59,6 +59,7 @@ config ARM64
select COMMON_CLK
select CPU_PM if (SUSPEND || CPU_IDLE)
select DCACHE_WORD_ACCESS
+ select DMA_DIRECT_OPS
select EDAC_SUPPORT
select FRAME_POINTER
select GENERIC_ALLOCATOR
@@ -227,7 +228,7 @@ config GENERIC_CSUM
config GENERIC_CALIBRATE_DELAY
def_bool y
-config ZONE_DMA
+config ZONE_DMA32
def_bool y
config HAVE_GENERIC_GUP
@@ -522,20 +523,13 @@ config CAVIUM_ERRATUM_30115
config QCOM_FALKOR_ERRATUM_1003
bool "Falkor E1003: Incorrect translation due to ASID change"
default y
- select ARM64_PAN if ARM64_SW_TTBR0_PAN
help
On Falkor v1, an incorrect ASID may be cached in the TLB when ASID
- and BADDR are changed together in TTBRx_EL1. The workaround for this
- issue is to use a reserved ASID in cpu_do_switch_mm() before
- switching to the new ASID. Saying Y here selects ARM64_PAN if
- ARM64_SW_TTBR0_PAN is selected. This is done because implementing and
- maintaining the E1003 workaround in the software PAN emulation code
- would be an unnecessary complication. The affected Falkor v1 CPU
- implements ARMv8.1 hardware PAN support and using hardware PAN
- support versus software PAN emulation is mutually exclusive at
- runtime.
-
- If unsure, say Y.
+ and BADDR are changed together in TTBRx_EL1. Since we keep the ASID
+ in TTBR1_EL1, this situation only occurs in the entry trampoline and
+ then only for entries in the walk cache, since the leaf translation
+ is unchanged. Work around the erratum by invalidating the walk cache
+ entries for the trampoline before entering the kernel proper.
config QCOM_FALKOR_ERRATUM_1009
bool "Falkor E1009: Prematurely complete a DSB after a TLBI"
@@ -656,6 +650,35 @@ config ARM64_VA_BITS
default 47 if ARM64_VA_BITS_47
default 48 if ARM64_VA_BITS_48
+choice
+ prompt "Physical address space size"
+ default ARM64_PA_BITS_48
+ help
+ Choose the maximum physical address range that the kernel will
+ support.
+
+config ARM64_PA_BITS_48
+ bool "48-bit"
+
+config ARM64_PA_BITS_52
+ bool "52-bit (ARMv8.2)"
+ depends on ARM64_64K_PAGES
+ depends on ARM64_PAN || !ARM64_SW_TTBR0_PAN
+ help
+ Enable support for a 52-bit physical address space, introduced as
+ part of the ARMv8.2-LPA extension.
+
+ With this enabled, the kernel will also continue to work on CPUs that
+ do not support ARMv8.2-LPA, but with some added memory overhead (and
+ minor performance overhead).
+
+endchoice
+
+config ARM64_PA_BITS
+ int
+ default 48 if ARM64_PA_BITS_48
+ default 52 if ARM64_PA_BITS_52
+
config CPU_BIG_ENDIAN
bool "Build big-endian kernel"
help
@@ -850,6 +873,35 @@ config FORCE_MAX_ZONEORDER
However for 4K, we choose a higher default value, 11 as opposed to 10, giving us
4M allocations matching the default size used by generic code.
+config UNMAP_KERNEL_AT_EL0
+ bool "Unmap kernel when running in userspace (aka \"KAISER\")" if EXPERT
+ default y
+ help
+ Speculation attacks against some high-performance processors can
+ be used to bypass MMU permission checks and leak kernel data to
+ userspace. This can be defended against by unmapping the kernel
+ when running in userspace, mapping it back in on exception entry
+ via a trampoline page in the vector table.
+
+ If unsure, say Y.
+
+config HARDEN_BRANCH_PREDICTOR
+ bool "Harden the branch predictor against aliasing attacks" if EXPERT
+ default y
+ help
+ Speculation attacks against some high-performance processors rely on
+ being able to manipulate the branch predictor for a victim context by
+ executing aliasing branches in the attacker context. Such attacks
+ can be partially mitigated against by clearing internal branch
+ predictor state and limiting the prediction logic in some situations.
+
+ This config option will take CPU-specific actions to harden the
+ branch predictor against aliasing attacks and may rely on specific
+ instruction sequences or control bits being set by the system
+ firmware.
+
+ If unsure, say Y.
+
menuconfig ARMV8_DEPRECATED
bool "Emulate deprecated/obsolete ARMv8 instructions"
depends on COMPAT
@@ -1021,6 +1073,22 @@ config ARM64_PMEM
operations if DC CVAP is not supported (following the behaviour of
DC CVAP itself if the system does not define a point of persistence).
+config ARM64_RAS_EXTN
+ bool "Enable support for RAS CPU Extensions"
+ default y
+ help
+ CPUs that support the Reliability, Availability and Serviceability
+ (RAS) Extensions, part of ARMv8.2 are able to track faults and
+ errors, classify them and report them to software.
+
+ On CPUs with these extensions system software can use additional
+ barriers to determine if faults are pending and read the
+ classification from a new set of registers.
+
+ Selecting this feature will allow the kernel to use these barriers
+ and access the new registers if the system supports the extension.
+ Platform RAS features may additionally depend on firmware support.
+
endmenu
config ARM64_SVE
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 6356c6da34ea..b20fa9b31efe 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -161,7 +161,7 @@ CONFIG_MTD_BLOCK=y
CONFIG_MTD_M25P80=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_DENALI_DT=y
-CONFIG_MTD_NAND_PXA3xx=y
+CONFIG_MTD_NAND_MARVELL=y
CONFIG_MTD_SPI_NOR=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=m
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index 70c517aa4501..285c36c7b408 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -29,6 +29,24 @@ config CRYPTO_SHA2_ARM64_CE
select CRYPTO_HASH
select CRYPTO_SHA256_ARM64
+config CRYPTO_SHA512_ARM64_CE
+ tristate "SHA-384/SHA-512 digest algorithm (ARMv8 Crypto Extensions)"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_HASH
+ select CRYPTO_SHA512_ARM64
+
+config CRYPTO_SHA3_ARM64
+ tristate "SHA3 digest algorithm (ARMv8.2 Crypto Extensions)"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_HASH
+ select CRYPTO_SHA3
+
+config CRYPTO_SM3_ARM64_CE
+ tristate "SM3 digest algorithm (ARMv8.2 Crypto Extensions)"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_HASH
+ select CRYPTO_SM3
+
config CRYPTO_GHASH_ARM64_CE
tristate "GHASH/AES-GCM using ARMv8 Crypto Extensions"
depends on KERNEL_MODE_NEON
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index b5edc5918c28..cee9b8d9830b 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -14,6 +14,15 @@ sha1-ce-y := sha1-ce-glue.o sha1-ce-core.o
obj-$(CONFIG_CRYPTO_SHA2_ARM64_CE) += sha2-ce.o
sha2-ce-y := sha2-ce-glue.o sha2-ce-core.o
+obj-$(CONFIG_CRYPTO_SHA512_ARM64_CE) += sha512-ce.o
+sha512-ce-y := sha512-ce-glue.o sha512-ce-core.o
+
+obj-$(CONFIG_CRYPTO_SHA3_ARM64) += sha3-ce.o
+sha3-ce-y := sha3-ce-glue.o sha3-ce-core.o
+
+obj-$(CONFIG_CRYPTO_SM3_ARM64_CE) += sm3-ce.o
+sm3-ce-y := sm3-ce-glue.o sm3-ce-core.o
+
obj-$(CONFIG_CRYPTO_GHASH_ARM64_CE) += ghash-ce.o
ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o
@@ -24,7 +33,7 @@ obj-$(CONFIG_CRYPTO_CRC32_ARM64_CE) += crc32-ce.o
crc32-ce-y:= crc32-ce-core.o crc32-ce-glue.o
obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o
-CFLAGS_aes-ce-cipher.o += -march=armv8-a+crypto
+aes-ce-cipher-y := aes-ce-core.o aes-ce-glue.o
obj-$(CONFIG_CRYPTO_AES_ARM64_CE_CCM) += aes-ce-ccm.o
aes-ce-ccm-y := aes-ce-ccm-glue.o aes-ce-ccm-core.o
diff --git a/arch/arm64/crypto/aes-ce-core.S b/arch/arm64/crypto/aes-ce-core.S
new file mode 100644
index 000000000000..8efdfdade393
--- /dev/null
+++ b/arch/arm64/crypto/aes-ce-core.S
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2013 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .arch armv8-a+crypto
+
+ENTRY(__aes_ce_encrypt)
+ sub w3, w3, #2
+ ld1 {v0.16b}, [x2]
+ ld1 {v1.4s}, [x0], #16
+ cmp w3, #10
+ bmi 0f
+ bne 3f
+ mov v3.16b, v1.16b
+ b 2f
+0: mov v2.16b, v1.16b
+ ld1 {v3.4s}, [x0], #16
+1: aese v0.16b, v2.16b
+ aesmc v0.16b, v0.16b
+2: ld1 {v1.4s}, [x0], #16
+ aese v0.16b, v3.16b
+ aesmc v0.16b, v0.16b
+3: ld1 {v2.4s}, [x0], #16
+ subs w3, w3, #3
+ aese v0.16b, v1.16b
+ aesmc v0.16b, v0.16b
+ ld1 {v3.4s}, [x0], #16
+ bpl 1b
+ aese v0.16b, v2.16b
+ eor v0.16b, v0.16b, v3.16b
+ st1 {v0.16b}, [x1]
+ ret
+ENDPROC(__aes_ce_encrypt)
+
+ENTRY(__aes_ce_decrypt)
+ sub w3, w3, #2
+ ld1 {v0.16b}, [x2]
+ ld1 {v1.4s}, [x0], #16
+ cmp w3, #10
+ bmi 0f
+ bne 3f
+ mov v3.16b, v1.16b
+ b 2f
+0: mov v2.16b, v1.16b
+ ld1 {v3.4s}, [x0], #16
+1: aesd v0.16b, v2.16b
+ aesimc v0.16b, v0.16b
+2: ld1 {v1.4s}, [x0], #16
+ aesd v0.16b, v3.16b
+ aesimc v0.16b, v0.16b
+3: ld1 {v2.4s}, [x0], #16
+ subs w3, w3, #3
+ aesd v0.16b, v1.16b
+ aesimc v0.16b, v0.16b
+ ld1 {v3.4s}, [x0], #16
+ bpl 1b
+ aesd v0.16b, v2.16b
+ eor v0.16b, v0.16b, v3.16b
+ st1 {v0.16b}, [x1]
+ ret
+ENDPROC(__aes_ce_decrypt)
+
+/*
+ * __aes_ce_sub() - use the aese instruction to perform the AES sbox
+ * substitution on each byte in 'input'
+ */
+ENTRY(__aes_ce_sub)
+ dup v1.4s, w0
+ movi v0.16b, #0
+ aese v0.16b, v1.16b
+ umov w0, v0.s[0]
+ ret
+ENDPROC(__aes_ce_sub)
+
+ENTRY(__aes_ce_invert)
+ ld1 {v0.4s}, [x1]
+ aesimc v1.16b, v0.16b
+ st1 {v1.4s}, [x0]
+ ret
+ENDPROC(__aes_ce_invert)
diff --git a/arch/arm64/crypto/aes-ce-cipher.c b/arch/arm64/crypto/aes-ce-glue.c
index 6a75cd75ed11..e6b3227bbf57 100644
--- a/arch/arm64/crypto/aes-ce-cipher.c
+++ b/arch/arm64/crypto/aes-ce-glue.c
@@ -29,6 +29,13 @@ struct aes_block {
u8 b[AES_BLOCK_SIZE];
};
+asmlinkage void __aes_ce_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
+asmlinkage void __aes_ce_decrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
+
+asmlinkage u32 __aes_ce_sub(u32 l);
+asmlinkage void __aes_ce_invert(struct aes_block *out,
+ const struct aes_block *in);
+
static int num_rounds(struct crypto_aes_ctx *ctx)
{
/*
@@ -44,10 +51,6 @@ static int num_rounds(struct crypto_aes_ctx *ctx)
static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
- struct aes_block *out = (struct aes_block *)dst;
- struct aes_block const *in = (struct aes_block *)src;
- void *dummy0;
- int dummy1;
if (!may_use_simd()) {
__aes_arm64_encrypt(ctx->key_enc, dst, src, num_rounds(ctx));
@@ -55,49 +58,13 @@ static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
}
kernel_neon_begin();
-
- __asm__(" ld1 {v0.16b}, %[in] ;"
- " ld1 {v1.4s}, [%[key]], #16 ;"
- " cmp %w[rounds], #10 ;"
- " bmi 0f ;"
- " bne 3f ;"
- " mov v3.16b, v1.16b ;"
- " b 2f ;"
- "0: mov v2.16b, v1.16b ;"
- " ld1 {v3.4s}, [%[key]], #16 ;"
- "1: aese v0.16b, v2.16b ;"
- " aesmc v0.16b, v0.16b ;"
- "2: ld1 {v1.4s}, [%[key]], #16 ;"
- " aese v0.16b, v3.16b ;"
- " aesmc v0.16b, v0.16b ;"
- "3: ld1 {v2.4s}, [%[key]], #16 ;"
- " subs %w[rounds], %w[rounds], #3 ;"
- " aese v0.16b, v1.16b ;"
- " aesmc v0.16b, v0.16b ;"
- " ld1 {v3.4s}, [%[key]], #16 ;"
- " bpl 1b ;"
- " aese v0.16b, v2.16b ;"
- " eor v0.16b, v0.16b, v3.16b ;"
- " st1 {v0.16b}, %[out] ;"
-
- : [out] "=Q"(*out),
- [key] "=r"(dummy0),
- [rounds] "=r"(dummy1)
- : [in] "Q"(*in),
- "1"(ctx->key_enc),
- "2"(num_rounds(ctx) - 2)
- : "cc");
-
+ __aes_ce_encrypt(ctx->key_enc, dst, src, num_rounds(ctx));
kernel_neon_end();
}
static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
- struct aes_block *out = (struct aes_block *)dst;
- struct aes_block const *in = (struct aes_block *)src;
- void *dummy0;
- int dummy1;
if (!may_use_simd()) {
__aes_arm64_decrypt(ctx->key_dec, dst, src, num_rounds(ctx));
@@ -105,62 +72,10 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
}
kernel_neon_begin();
-
- __asm__(" ld1 {v0.16b}, %[in] ;"
- " ld1 {v1.4s}, [%[key]], #16 ;"
- " cmp %w[rounds], #10 ;"
- " bmi 0f ;"
- " bne 3f ;"
- " mov v3.16b, v1.16b ;"
- " b 2f ;"
- "0: mov v2.16b, v1.16b ;"
- " ld1 {v3.4s}, [%[key]], #16 ;"
- "1: aesd v0.16b, v2.16b ;"
- " aesimc v0.16b, v0.16b ;"
- "2: ld1 {v1.4s}, [%[key]], #16 ;"
- " aesd v0.16b, v3.16b ;"
- " aesimc v0.16b, v0.16b ;"
- "3: ld1 {v2.4s}, [%[key]], #16 ;"
- " subs %w[rounds], %w[rounds], #3 ;"
- " aesd v0.16b, v1.16b ;"
- " aesimc v0.16b, v0.16b ;"
- " ld1 {v3.4s}, [%[key]], #16 ;"
- " bpl 1b ;"
- " aesd v0.16b, v2.16b ;"
- " eor v0.16b, v0.16b, v3.16b ;"
- " st1 {v0.16b}, %[out] ;"
-
- : [out] "=Q"(*out),
- [key] "=r"(dummy0),
- [rounds] "=r"(dummy1)
- : [in] "Q"(*in),
- "1"(ctx->key_dec),
- "2"(num_rounds(ctx) - 2)
- : "cc");
-
+ __aes_ce_decrypt(ctx->key_dec, dst, src, num_rounds(ctx));
kernel_neon_end();
}
-/*
- * aes_sub() - use the aese instruction to perform the AES sbox substitution
- * on each byte in 'input'
- */
-static u32 aes_sub(u32 input)
-{
- u32 ret;
-
- __asm__("dup v1.4s, %w[in] ;"
- "movi v0.16b, #0 ;"
- "aese v0.16b, v1.16b ;"
- "umov %w[out], v0.4s[0] ;"
-
- : [out] "=r"(ret)
- : [in] "r"(input)
- : "v0","v1");
-
- return ret;
-}
-
int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
unsigned int key_len)
{
@@ -189,7 +104,7 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
u32 *rki = ctx->key_enc + (i * kwords);
u32 *rko = rki + kwords;
- rko[0] = ror32(aes_sub(rki[kwords - 1]), 8) ^ rcon[i] ^ rki[0];
+ rko[0] = ror32(__aes_ce_sub(rki[kwords - 1]), 8) ^ rcon[i] ^ rki[0];
rko[1] = rko[0] ^ rki[1];
rko[2] = rko[1] ^ rki[2];
rko[3] = rko[2] ^ rki[3];
@@ -202,7 +117,7 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
} else if (key_len == AES_KEYSIZE_256) {
if (i >= 6)
break;
- rko[4] = aes_sub(rko[3]) ^ rki[4];
+ rko[4] = __aes_ce_sub(rko[3]) ^ rki[4];
rko[5] = rko[4] ^ rki[5];
rko[6] = rko[5] ^ rki[6];
rko[7] = rko[6] ^ rki[7];
@@ -221,13 +136,7 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
key_dec[0] = key_enc[j];
for (i = 1, j--; j > 0; i++, j--)
- __asm__("ld1 {v0.4s}, %[in] ;"
- "aesimc v1.16b, v0.16b ;"
- "st1 {v1.4s}, %[out] ;"
-
- : [out] "=Q"(key_dec[i])
- : [in] "Q"(key_enc[j])
- : "v0","v1");
+ __aes_ce_invert(key_dec + i, key_enc + j);
key_dec[i] = key_enc[0];
kernel_neon_end();
diff --git a/arch/arm64/crypto/aes-cipher-core.S b/arch/arm64/crypto/aes-cipher-core.S
index 6d2445d603cc..3a44eada2347 100644
--- a/arch/arm64/crypto/aes-cipher-core.S
+++ b/arch/arm64/crypto/aes-cipher-core.S
@@ -125,6 +125,16 @@ CPU_BE( rev w7, w7 )
ret
.endm
+ENTRY(__aes_arm64_encrypt)
+ do_crypt fround, crypto_ft_tab, crypto_ft_tab + 1, 2
+ENDPROC(__aes_arm64_encrypt)
+
+ .align 5
+ENTRY(__aes_arm64_decrypt)
+ do_crypt iround, crypto_it_tab, __aes_arm64_inverse_sbox, 0
+ENDPROC(__aes_arm64_decrypt)
+
+ .section ".rodata", "a"
.align L1_CACHE_SHIFT
.type __aes_arm64_inverse_sbox, %object
__aes_arm64_inverse_sbox:
@@ -161,12 +171,3 @@ __aes_arm64_inverse_sbox:
.byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
.byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
.size __aes_arm64_inverse_sbox, . - __aes_arm64_inverse_sbox
-
-ENTRY(__aes_arm64_encrypt)
- do_crypt fround, crypto_ft_tab, crypto_ft_tab + 1, 2
-ENDPROC(__aes_arm64_encrypt)
-
- .align 5
-ENTRY(__aes_arm64_decrypt)
- do_crypt iround, crypto_it_tab, __aes_arm64_inverse_sbox, 0
-ENDPROC(__aes_arm64_decrypt)
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 998ba519a026..2fa850e86aa8 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -665,6 +665,7 @@ static int __init aes_init(void)
unregister_simds:
aes_exit();
+ return err;
unregister_ciphers:
crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
return err;
diff --git a/arch/arm64/crypto/aes-neon.S b/arch/arm64/crypto/aes-neon.S
index f1e3aa2732f9..1c7b45b7268e 100644
--- a/arch/arm64/crypto/aes-neon.S
+++ b/arch/arm64/crypto/aes-neon.S
@@ -32,10 +32,10 @@
/* preload the entire Sbox */
.macro prepare, sbox, shiftrows, temp
- adr \temp, \sbox
movi v12.16b, #0x1b
- ldr q13, \shiftrows
- ldr q14, .Lror32by8
+ ldr_l q13, \shiftrows, \temp
+ ldr_l q14, .Lror32by8, \temp
+ adr_l \temp, \sbox
ld1 {v16.16b-v19.16b}, [\temp], #64
ld1 {v20.16b-v23.16b}, [\temp], #64
ld1 {v24.16b-v27.16b}, [\temp], #64
@@ -272,7 +272,7 @@
#include "aes-modes.S"
- .text
+ .section ".rodata", "a"
.align 6
.LForward_Sbox:
.byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
diff --git a/arch/arm64/crypto/crc32-ce-core.S b/arch/arm64/crypto/crc32-ce-core.S
index 18f5a8442276..16ed3c7ebd37 100644
--- a/arch/arm64/crypto/crc32-ce-core.S
+++ b/arch/arm64/crypto/crc32-ce-core.S
@@ -50,7 +50,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
- .text
+ .section ".rodata", "a"
.align 6
.cpu generic+crypto+crc
@@ -115,12 +115,13 @@
* uint crc32_pmull_le(unsigned char const *buffer,
* size_t len, uint crc32)
*/
+ .text
ENTRY(crc32_pmull_le)
- adr x3, .Lcrc32_constants
+ adr_l x3, .Lcrc32_constants
b 0f
ENTRY(crc32c_pmull_le)
- adr x3, .Lcrc32c_constants
+ adr_l x3, .Lcrc32c_constants
0: bic LEN, LEN, #15
ld1 {v1.16b-v4.16b}, [BUF], #0x40
diff --git a/arch/arm64/crypto/crc32-ce-glue.c b/arch/arm64/crypto/crc32-ce-glue.c
index 624f4137918c..34b4e3d46aab 100644
--- a/arch/arm64/crypto/crc32-ce-glue.c
+++ b/arch/arm64/crypto/crc32-ce-glue.c
@@ -185,6 +185,7 @@ static struct shash_alg crc32_pmull_algs[] = { {
.base.cra_name = "crc32",
.base.cra_driver_name = "crc32-arm64-ce",
.base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.base.cra_blocksize = 1,
.base.cra_module = THIS_MODULE,
}, {
@@ -200,6 +201,7 @@ static struct shash_alg crc32_pmull_algs[] = { {
.base.cra_name = "crc32c",
.base.cra_driver_name = "crc32c-arm64-ce",
.base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.base.cra_blocksize = 1,
.base.cra_module = THIS_MODULE,
} };
diff --git a/arch/arm64/crypto/crct10dif-ce-core.S b/arch/arm64/crypto/crct10dif-ce-core.S
index d5b5a8c038c8..f179c01bd55c 100644
--- a/arch/arm64/crypto/crct10dif-ce-core.S
+++ b/arch/arm64/crypto/crct10dif-ce-core.S
@@ -128,7 +128,7 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
// XOR the initial_crc value
eor v0.16b, v0.16b, v10.16b
- ldr q10, rk3 // xmm10 has rk3 and rk4
+ ldr_l q10, rk3, x8 // xmm10 has rk3 and rk4
// type of pmull instruction
// will determine which constant to use
@@ -184,13 +184,13 @@ CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 )
// fold the 8 vector registers to 1 vector register with different
// constants
- ldr q10, rk9
+ ldr_l q10, rk9, x8
.macro fold16, reg, rk
pmull v8.1q, \reg\().1d, v10.1d
pmull2 \reg\().1q, \reg\().2d, v10.2d
.ifnb \rk
- ldr q10, \rk
+ ldr_l q10, \rk, x8
.endif
eor v7.16b, v7.16b, v8.16b
eor v7.16b, v7.16b, \reg\().16b
@@ -251,7 +251,7 @@ CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 )
// get rid of the extra data that was loaded before
// load the shift constant
- adr x4, tbl_shf_table + 16
+ adr_l x4, tbl_shf_table + 16
sub x4, x4, arg3
ld1 {v0.16b}, [x4]
@@ -275,7 +275,7 @@ CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 )
_128_done:
// compute crc of a 128-bit value
- ldr q10, rk5 // rk5 and rk6 in xmm10
+ ldr_l q10, rk5, x8 // rk5 and rk6 in xmm10
// 64b fold
ext v0.16b, vzr.16b, v7.16b, #8
@@ -291,7 +291,7 @@ _128_done:
// barrett reduction
_barrett:
- ldr q10, rk7
+ ldr_l q10, rk7, x8
mov v0.d[0], v7.d[1]
pmull v0.1q, v0.1d, v10.1d
@@ -321,7 +321,7 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
b.eq _128_done // exactly 16 left
b.lt _less_than_16_left
- ldr q10, rk1 // rk1 and rk2 in xmm10
+ ldr_l q10, rk1, x8 // rk1 and rk2 in xmm10
// update the counter. subtract 32 instead of 16 to save one
// instruction from the loop
@@ -333,7 +333,7 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
_less_than_16_left:
// shl r9, 4
- adr x0, tbl_shf_table + 16
+ adr_l x0, tbl_shf_table + 16
sub x0, x0, arg3
ld1 {v0.16b}, [x0]
movi v9.16b, #0x80
@@ -345,6 +345,7 @@ ENDPROC(crc_t10dif_pmull)
// precomputed constants
// these constants are precomputed from the poly:
// 0x8bb70000 (0x8bb7 scaled to 32 bits)
+ .section ".rodata", "a"
.align 4
// Q = 0x18BB70000
// rk1 = 2^(32*3) mod Q << 32
diff --git a/arch/arm64/crypto/sha1-ce-core.S b/arch/arm64/crypto/sha1-ce-core.S
index 8550408735a0..46049850727d 100644
--- a/arch/arm64/crypto/sha1-ce-core.S
+++ b/arch/arm64/crypto/sha1-ce-core.S
@@ -58,12 +58,11 @@
sha1su1 v\s0\().4s, v\s3\().4s
.endm
- /*
- * The SHA1 round constants
- */
- .align 4
-.Lsha1_rcon:
- .word 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6
+ .macro loadrc, k, val, tmp
+ movz \tmp, :abs_g0_nc:\val
+ movk \tmp, :abs_g1:\val
+ dup \k, \tmp
+ .endm
/*
* void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
@@ -71,11 +70,10 @@
*/
ENTRY(sha1_ce_transform)
/* load round constants */
- adr x6, .Lsha1_rcon
- ld1r {k0.4s}, [x6], #4
- ld1r {k1.4s}, [x6], #4
- ld1r {k2.4s}, [x6], #4
- ld1r {k3.4s}, [x6]
+ loadrc k0.4s, 0x5a827999, w6
+ loadrc k1.4s, 0x6ed9eba1, w6
+ loadrc k2.4s, 0x8f1bbcdc, w6
+ loadrc k3.4s, 0xca62c1d6, w6
/* load state */
ld1 {dgav.4s}, [x0]
diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S
index 679c6c002f4f..4c3c89b812ce 100644
--- a/arch/arm64/crypto/sha2-ce-core.S
+++ b/arch/arm64/crypto/sha2-ce-core.S
@@ -53,6 +53,7 @@
/*
* The SHA-256 round constants
*/
+ .section ".rodata", "a"
.align 4
.Lsha2_rcon:
.word 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5
@@ -76,9 +77,10 @@
* void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
* int blocks)
*/
+ .text
ENTRY(sha2_ce_transform)
/* load round constants */
- adr x8, .Lsha2_rcon
+ adr_l x8, .Lsha2_rcon
ld1 { v0.4s- v3.4s}, [x8], #64
ld1 { v4.4s- v7.4s}, [x8], #64
ld1 { v8.4s-v11.4s}, [x8], #64
diff --git a/arch/arm64/crypto/sha3-ce-core.S b/arch/arm64/crypto/sha3-ce-core.S
new file mode 100644
index 000000000000..332ad7530690
--- /dev/null
+++ b/arch/arm64/crypto/sha3-ce-core.S
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * sha3-ce-core.S - core SHA-3 transform using v8.2 Crypto Extensions
+ *
+ * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .irp b,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
+ .set .Lv\b\().2d, \b
+ .set .Lv\b\().16b, \b
+ .endr
+
+ /*
+ * ARMv8.2 Crypto Extensions instructions
+ */
+ .macro eor3, rd, rn, rm, ra
+ .inst 0xce000000 | .L\rd | (.L\rn << 5) | (.L\ra << 10) | (.L\rm << 16)
+ .endm
+
+ .macro rax1, rd, rn, rm
+ .inst 0xce608c00 | .L\rd | (.L\rn << 5) | (.L\rm << 16)
+ .endm
+
+ .macro bcax, rd, rn, rm, ra
+ .inst 0xce200000 | .L\rd | (.L\rn << 5) | (.L\ra << 10) | (.L\rm << 16)
+ .endm
+
+ .macro xar, rd, rn, rm, imm6
+ .inst 0xce800000 | .L\rd | (.L\rn << 5) | ((\imm6) << 10) | (.L\rm << 16)
+ .endm
+
+ /*
+ * sha3_ce_transform(u64 *st, const u8 *data, int blocks, int dg_size)
+ */
+ .text
+ENTRY(sha3_ce_transform)
+ /* load state */
+ add x8, x0, #32
+ ld1 { v0.1d- v3.1d}, [x0]
+ ld1 { v4.1d- v7.1d}, [x8], #32
+ ld1 { v8.1d-v11.1d}, [x8], #32
+ ld1 {v12.1d-v15.1d}, [x8], #32
+ ld1 {v16.1d-v19.1d}, [x8], #32
+ ld1 {v20.1d-v23.1d}, [x8], #32
+ ld1 {v24.1d}, [x8]
+
+0: sub w2, w2, #1
+ mov w8, #24
+ adr_l x9, .Lsha3_rcon
+
+ /* load input */
+ ld1 {v25.8b-v28.8b}, [x1], #32
+ ld1 {v29.8b-v31.8b}, [x1], #24
+ eor v0.8b, v0.8b, v25.8b
+ eor v1.8b, v1.8b, v26.8b
+ eor v2.8b, v2.8b, v27.8b
+ eor v3.8b, v3.8b, v28.8b
+ eor v4.8b, v4.8b, v29.8b
+ eor v5.8b, v5.8b, v30.8b
+ eor v6.8b, v6.8b, v31.8b
+
+ tbnz x3, #6, 2f // SHA3-512
+
+ ld1 {v25.8b-v28.8b}, [x1], #32
+ ld1 {v29.8b-v30.8b}, [x1], #16
+ eor v7.8b, v7.8b, v25.8b
+ eor v8.8b, v8.8b, v26.8b
+ eor v9.8b, v9.8b, v27.8b
+ eor v10.8b, v10.8b, v28.8b
+ eor v11.8b, v11.8b, v29.8b
+ eor v12.8b, v12.8b, v30.8b
+
+ tbnz x3, #4, 1f // SHA3-384 or SHA3-224
+
+ // SHA3-256
+ ld1 {v25.8b-v28.8b}, [x1], #32
+ eor v13.8b, v13.8b, v25.8b
+ eor v14.8b, v14.8b, v26.8b
+ eor v15.8b, v15.8b, v27.8b
+ eor v16.8b, v16.8b, v28.8b
+ b 3f
+
+1: tbz x3, #2, 3f // bit 2 cleared? SHA-384
+
+ // SHA3-224
+ ld1 {v25.8b-v28.8b}, [x1], #32
+ ld1 {v29.8b}, [x1], #8
+ eor v13.8b, v13.8b, v25.8b
+ eor v14.8b, v14.8b, v26.8b
+ eor v15.8b, v15.8b, v27.8b
+ eor v16.8b, v16.8b, v28.8b
+ eor v17.8b, v17.8b, v29.8b
+ b 3f
+
+ // SHA3-512
+2: ld1 {v25.8b-v26.8b}, [x1], #16
+ eor v7.8b, v7.8b, v25.8b
+ eor v8.8b, v8.8b, v26.8b
+
+3: sub w8, w8, #1
+
+ eor3 v29.16b, v4.16b, v9.16b, v14.16b
+ eor3 v26.16b, v1.16b, v6.16b, v11.16b
+ eor3 v28.16b, v3.16b, v8.16b, v13.16b
+ eor3 v25.16b, v0.16b, v5.16b, v10.16b
+ eor3 v27.16b, v2.16b, v7.16b, v12.16b
+ eor3 v29.16b, v29.16b, v19.16b, v24.16b
+ eor3 v26.16b, v26.16b, v16.16b, v21.16b
+ eor3 v28.16b, v28.16b, v18.16b, v23.16b
+ eor3 v25.16b, v25.16b, v15.16b, v20.16b
+ eor3 v27.16b, v27.16b, v17.16b, v22.16b
+
+ rax1 v30.2d, v29.2d, v26.2d // bc[0]
+ rax1 v26.2d, v26.2d, v28.2d // bc[2]
+ rax1 v28.2d, v28.2d, v25.2d // bc[4]
+ rax1 v25.2d, v25.2d, v27.2d // bc[1]
+ rax1 v27.2d, v27.2d, v29.2d // bc[3]
+
+ eor v0.16b, v0.16b, v30.16b
+ xar v29.2d, v1.2d, v25.2d, (64 - 1)
+ xar v1.2d, v6.2d, v25.2d, (64 - 44)
+ xar v6.2d, v9.2d, v28.2d, (64 - 20)
+ xar v9.2d, v22.2d, v26.2d, (64 - 61)
+ xar v22.2d, v14.2d, v28.2d, (64 - 39)
+ xar v14.2d, v20.2d, v30.2d, (64 - 18)
+ xar v31.2d, v2.2d, v26.2d, (64 - 62)
+ xar v2.2d, v12.2d, v26.2d, (64 - 43)
+ xar v12.2d, v13.2d, v27.2d, (64 - 25)
+ xar v13.2d, v19.2d, v28.2d, (64 - 8)
+ xar v19.2d, v23.2d, v27.2d, (64 - 56)
+ xar v23.2d, v15.2d, v30.2d, (64 - 41)
+ xar v15.2d, v4.2d, v28.2d, (64 - 27)
+ xar v28.2d, v24.2d, v28.2d, (64 - 14)
+ xar v24.2d, v21.2d, v25.2d, (64 - 2)
+ xar v8.2d, v8.2d, v27.2d, (64 - 55)
+ xar v4.2d, v16.2d, v25.2d, (64 - 45)
+ xar v16.2d, v5.2d, v30.2d, (64 - 36)
+ xar v5.2d, v3.2d, v27.2d, (64 - 28)
+ xar v27.2d, v18.2d, v27.2d, (64 - 21)
+ xar v3.2d, v17.2d, v26.2d, (64 - 15)
+ xar v25.2d, v11.2d, v25.2d, (64 - 10)
+ xar v26.2d, v7.2d, v26.2d, (64 - 6)
+ xar v30.2d, v10.2d, v30.2d, (64 - 3)
+
+ bcax v20.16b, v31.16b, v22.16b, v8.16b
+ bcax v21.16b, v8.16b, v23.16b, v22.16b
+ bcax v22.16b, v22.16b, v24.16b, v23.16b
+ bcax v23.16b, v23.16b, v31.16b, v24.16b
+ bcax v24.16b, v24.16b, v8.16b, v31.16b
+
+ ld1r {v31.2d}, [x9], #8
+
+ bcax v17.16b, v25.16b, v19.16b, v3.16b
+ bcax v18.16b, v3.16b, v15.16b, v19.16b
+ bcax v19.16b, v19.16b, v16.16b, v15.16b
+ bcax v15.16b, v15.16b, v25.16b, v16.16b
+ bcax v16.16b, v16.16b, v3.16b, v25.16b
+
+ bcax v10.16b, v29.16b, v12.16b, v26.16b
+ bcax v11.16b, v26.16b, v13.16b, v12.16b
+ bcax v12.16b, v12.16b, v14.16b, v13.16b
+ bcax v13.16b, v13.16b, v29.16b, v14.16b
+ bcax v14.16b, v14.16b, v26.16b, v29.16b
+
+ bcax v7.16b, v30.16b, v9.16b, v4.16b
+ bcax v8.16b, v4.16b, v5.16b, v9.16b
+ bcax v9.16b, v9.16b, v6.16b, v5.16b
+ bcax v5.16b, v5.16b, v30.16b, v6.16b
+ bcax v6.16b, v6.16b, v4.16b, v30.16b
+
+ bcax v3.16b, v27.16b, v0.16b, v28.16b
+ bcax v4.16b, v28.16b, v1.16b, v0.16b
+ bcax v0.16b, v0.16b, v2.16b, v1.16b
+ bcax v1.16b, v1.16b, v27.16b, v2.16b
+ bcax v2.16b, v2.16b, v28.16b, v27.16b
+
+ eor v0.16b, v0.16b, v31.16b
+
+ cbnz w8, 3b
+ cbnz w2, 0b
+
+ /* save state */
+ st1 { v0.1d- v3.1d}, [x0], #32
+ st1 { v4.1d- v7.1d}, [x0], #32
+ st1 { v8.1d-v11.1d}, [x0], #32
+ st1 {v12.1d-v15.1d}, [x0], #32
+ st1 {v16.1d-v19.1d}, [x0], #32
+ st1 {v20.1d-v23.1d}, [x0], #32
+ st1 {v24.1d}, [x0]
+ ret
+ENDPROC(sha3_ce_transform)
+
+ .section ".rodata", "a"
+ .align 8
+.Lsha3_rcon:
+ .quad 0x0000000000000001, 0x0000000000008082, 0x800000000000808a
+ .quad 0x8000000080008000, 0x000000000000808b, 0x0000000080000001
+ .quad 0x8000000080008081, 0x8000000000008009, 0x000000000000008a
+ .quad 0x0000000000000088, 0x0000000080008009, 0x000000008000000a
+ .quad 0x000000008000808b, 0x800000000000008b, 0x8000000000008089
+ .quad 0x8000000000008003, 0x8000000000008002, 0x8000000000000080
+ .quad 0x000000000000800a, 0x800000008000000a, 0x8000000080008081
+ .quad 0x8000000000008080, 0x0000000080000001, 0x8000000080008008
diff --git a/arch/arm64/crypto/sha3-ce-glue.c b/arch/arm64/crypto/sha3-ce-glue.c
new file mode 100644
index 000000000000..da8222e528bd
--- /dev/null
+++ b/arch/arm64/crypto/sha3-ce-glue.c
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * sha3-ce-glue.c - core SHA-3 transform using v8.2 Crypto Extensions
+ *
+ * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+#include <asm/simd.h>
+#include <asm/unaligned.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha3.h>
+#include <linux/cpufeature.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("SHA3 secure hash using ARMv8 Crypto Extensions");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+
+asmlinkage void sha3_ce_transform(u64 *st, const u8 *data, int blocks,
+ int md_len);
+
+static int sha3_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct sha3_state *sctx = shash_desc_ctx(desc);
+ unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
+
+ if (!may_use_simd())
+ return crypto_sha3_update(desc, data, len);
+
+ if ((sctx->partial + len) >= sctx->rsiz) {
+ int blocks;
+
+ if (sctx->partial) {
+ int p = sctx->rsiz - sctx->partial;
+
+ memcpy(sctx->buf + sctx->partial, data, p);
+ kernel_neon_begin();
+ sha3_ce_transform(sctx->st, sctx->buf, 1, digest_size);
+ kernel_neon_end();
+
+ data += p;
+ len -= p;
+ sctx->partial = 0;
+ }
+
+ blocks = len / sctx->rsiz;
+ len %= sctx->rsiz;
+
+ if (blocks) {
+ kernel_neon_begin();
+ sha3_ce_transform(sctx->st, data, blocks, digest_size);
+ kernel_neon_end();
+ data += blocks * sctx->rsiz;
+ }
+ }
+
+ if (len) {
+ memcpy(sctx->buf + sctx->partial, data, len);
+ sctx->partial += len;
+ }
+ return 0;
+}
+
+static int sha3_final(struct shash_desc *desc, u8 *out)
+{
+ struct sha3_state *sctx = shash_desc_ctx(desc);
+ unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
+ __le64 *digest = (__le64 *)out;
+ int i;
+
+ if (!may_use_simd())
+ return crypto_sha3_final(desc, out);
+
+ sctx->buf[sctx->partial++] = 0x06;
+ memset(sctx->buf + sctx->partial, 0, sctx->rsiz - sctx->partial);
+ sctx->buf[sctx->rsiz - 1] |= 0x80;
+
+ kernel_neon_begin();
+ sha3_ce_transform(sctx->st, sctx->buf, 1, digest_size);
+ kernel_neon_end();
+
+ for (i = 0; i < digest_size / 8; i++)
+ put_unaligned_le64(sctx->st[i], digest++);
+
+ if (digest_size & 4)
+ put_unaligned_le32(sctx->st[i], (__le32 *)digest);
+
+ *sctx = (struct sha3_state){};
+ return 0;
+}
+
+static struct shash_alg algs[] = { {
+ .digestsize = SHA3_224_DIGEST_SIZE,
+ .init = crypto_sha3_init,
+ .update = sha3_update,
+ .final = sha3_final,
+ .descsize = sizeof(struct sha3_state),
+ .base.cra_name = "sha3-224",
+ .base.cra_driver_name = "sha3-224-ce",
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA3_224_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .base.cra_priority = 200,
+}, {
+ .digestsize = SHA3_256_DIGEST_SIZE,
+ .init = crypto_sha3_init,
+ .update = sha3_update,
+ .final = sha3_final,
+ .descsize = sizeof(struct sha3_state),
+ .base.cra_name = "sha3-256",
+ .base.cra_driver_name = "sha3-256-ce",
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA3_256_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .base.cra_priority = 200,
+}, {
+ .digestsize = SHA3_384_DIGEST_SIZE,
+ .init = crypto_sha3_init,
+ .update = sha3_update,
+ .final = sha3_final,
+ .descsize = sizeof(struct sha3_state),
+ .base.cra_name = "sha3-384",
+ .base.cra_driver_name = "sha3-384-ce",
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA3_384_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .base.cra_priority = 200,
+}, {
+ .digestsize = SHA3_512_DIGEST_SIZE,
+ .init = crypto_sha3_init,
+ .update = sha3_update,
+ .final = sha3_final,
+ .descsize = sizeof(struct sha3_state),
+ .base.cra_name = "sha3-512",
+ .base.cra_driver_name = "sha3-512-ce",
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA3_512_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .base.cra_priority = 200,
+} };
+
+static int __init sha3_neon_mod_init(void)
+{
+ return crypto_register_shashes(algs, ARRAY_SIZE(algs));
+}
+
+static void __exit sha3_neon_mod_fini(void)
+{
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+}
+
+module_cpu_feature_match(SHA3, sha3_neon_mod_init);
+module_exit(sha3_neon_mod_fini);
diff --git a/arch/arm64/crypto/sha512-ce-core.S b/arch/arm64/crypto/sha512-ce-core.S
new file mode 100644
index 000000000000..7f3bca5c59a2
--- /dev/null
+++ b/arch/arm64/crypto/sha512-ce-core.S
@@ -0,0 +1,204 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * sha512-ce-core.S - core SHA-384/SHA-512 transform using v8 Crypto Extensions
+ *
+ * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .irp b,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19
+ .set .Lq\b, \b
+ .set .Lv\b\().2d, \b
+ .endr
+
+ .macro sha512h, rd, rn, rm
+ .inst 0xce608000 | .L\rd | (.L\rn << 5) | (.L\rm << 16)
+ .endm
+
+ .macro sha512h2, rd, rn, rm
+ .inst 0xce608400 | .L\rd | (.L\rn << 5) | (.L\rm << 16)
+ .endm
+
+ .macro sha512su0, rd, rn
+ .inst 0xcec08000 | .L\rd | (.L\rn << 5)
+ .endm
+
+ .macro sha512su1, rd, rn, rm
+ .inst 0xce608800 | .L\rd | (.L\rn << 5) | (.L\rm << 16)
+ .endm
+
+ /*
+ * The SHA-512 round constants
+ */
+ .section ".rodata", "a"
+ .align 4
+.Lsha512_rcon:
+ .quad 0x428a2f98d728ae22, 0x7137449123ef65cd
+ .quad 0xb5c0fbcfec4d3b2f, 0xe9b5dba58189dbbc
+ .quad 0x3956c25bf348b538, 0x59f111f1b605d019
+ .quad 0x923f82a4af194f9b, 0xab1c5ed5da6d8118
+ .quad 0xd807aa98a3030242, 0x12835b0145706fbe
+ .quad 0x243185be4ee4b28c, 0x550c7dc3d5ffb4e2
+ .quad 0x72be5d74f27b896f, 0x80deb1fe3b1696b1
+ .quad 0x9bdc06a725c71235, 0xc19bf174cf692694
+ .quad 0xe49b69c19ef14ad2, 0xefbe4786384f25e3
+ .quad 0x0fc19dc68b8cd5b5, 0x240ca1cc77ac9c65
+ .quad 0x2de92c6f592b0275, 0x4a7484aa6ea6e483
+ .quad 0x5cb0a9dcbd41fbd4, 0x76f988da831153b5
+ .quad 0x983e5152ee66dfab, 0xa831c66d2db43210
+ .quad 0xb00327c898fb213f, 0xbf597fc7beef0ee4
+ .quad 0xc6e00bf33da88fc2, 0xd5a79147930aa725
+ .quad 0x06ca6351e003826f, 0x142929670a0e6e70
+ .quad 0x27b70a8546d22ffc, 0x2e1b21385c26c926
+ .quad 0x4d2c6dfc5ac42aed, 0x53380d139d95b3df
+ .quad 0x650a73548baf63de, 0x766a0abb3c77b2a8
+ .quad 0x81c2c92e47edaee6, 0x92722c851482353b
+ .quad 0xa2bfe8a14cf10364, 0xa81a664bbc423001
+ .quad 0xc24b8b70d0f89791, 0xc76c51a30654be30
+ .quad 0xd192e819d6ef5218, 0xd69906245565a910
+ .quad 0xf40e35855771202a, 0x106aa07032bbd1b8
+ .quad 0x19a4c116b8d2d0c8, 0x1e376c085141ab53
+ .quad 0x2748774cdf8eeb99, 0x34b0bcb5e19b48a8
+ .quad 0x391c0cb3c5c95a63, 0x4ed8aa4ae3418acb
+ .quad 0x5b9cca4f7763e373, 0x682e6ff3d6b2b8a3
+ .quad 0x748f82ee5defb2fc, 0x78a5636f43172f60
+ .quad 0x84c87814a1f0ab72, 0x8cc702081a6439ec
+ .quad 0x90befffa23631e28, 0xa4506cebde82bde9
+ .quad 0xbef9a3f7b2c67915, 0xc67178f2e372532b
+ .quad 0xca273eceea26619c, 0xd186b8c721c0c207
+ .quad 0xeada7dd6cde0eb1e, 0xf57d4f7fee6ed178
+ .quad 0x06f067aa72176fba, 0x0a637dc5a2c898a6
+ .quad 0x113f9804bef90dae, 0x1b710b35131c471b
+ .quad 0x28db77f523047d84, 0x32caab7b40c72493
+ .quad 0x3c9ebe0a15c9bebc, 0x431d67c49c100d4c
+ .quad 0x4cc5d4becb3e42b6, 0x597f299cfc657e2a
+ .quad 0x5fcb6fab3ad6faec, 0x6c44198c4a475817
+
+ .macro dround, i0, i1, i2, i3, i4, rc0, rc1, in0, in1, in2, in3, in4
+ .ifnb \rc1
+ ld1 {v\rc1\().2d}, [x4], #16
+ .endif
+ add v5.2d, v\rc0\().2d, v\in0\().2d
+ ext v6.16b, v\i2\().16b, v\i3\().16b, #8
+ ext v5.16b, v5.16b, v5.16b, #8
+ ext v7.16b, v\i1\().16b, v\i2\().16b, #8
+ add v\i3\().2d, v\i3\().2d, v5.2d
+ .ifnb \in1
+ ext v5.16b, v\in3\().16b, v\in4\().16b, #8
+ sha512su0 v\in0\().2d, v\in1\().2d
+ .endif
+ sha512h q\i3, q6, v7.2d
+ .ifnb \in1
+ sha512su1 v\in0\().2d, v\in2\().2d, v5.2d
+ .endif
+ add v\i4\().2d, v\i1\().2d, v\i3\().2d
+ sha512h2 q\i3, q\i1, v\i0\().2d
+ .endm
+
+ /*
+ * void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
+ * int blocks)
+ */
+ .text
+ENTRY(sha512_ce_transform)
+ /* load state */
+ ld1 {v8.2d-v11.2d}, [x0]
+
+ /* load first 4 round constants */
+ adr_l x3, .Lsha512_rcon
+ ld1 {v20.2d-v23.2d}, [x3], #64
+
+ /* load input */
+0: ld1 {v12.2d-v15.2d}, [x1], #64
+ ld1 {v16.2d-v19.2d}, [x1], #64
+ sub w2, w2, #1
+
+CPU_LE( rev64 v12.16b, v12.16b )
+CPU_LE( rev64 v13.16b, v13.16b )
+CPU_LE( rev64 v14.16b, v14.16b )
+CPU_LE( rev64 v15.16b, v15.16b )
+CPU_LE( rev64 v16.16b, v16.16b )
+CPU_LE( rev64 v17.16b, v17.16b )
+CPU_LE( rev64 v18.16b, v18.16b )
+CPU_LE( rev64 v19.16b, v19.16b )
+
+ mov x4, x3 // rc pointer
+
+ mov v0.16b, v8.16b
+ mov v1.16b, v9.16b
+ mov v2.16b, v10.16b
+ mov v3.16b, v11.16b
+
+ // v0 ab cd -- ef gh ab
+ // v1 cd -- ef gh ab cd
+ // v2 ef gh ab cd -- ef
+ // v3 gh ab cd -- ef gh
+ // v4 -- ef gh ab cd --
+
+ dround 0, 1, 2, 3, 4, 20, 24, 12, 13, 19, 16, 17
+ dround 3, 0, 4, 2, 1, 21, 25, 13, 14, 12, 17, 18
+ dround 2, 3, 1, 4, 0, 22, 26, 14, 15, 13, 18, 19
+ dround 4, 2, 0, 1, 3, 23, 27, 15, 16, 14, 19, 12
+ dround 1, 4, 3, 0, 2, 24, 28, 16, 17, 15, 12, 13
+
+ dround 0, 1, 2, 3, 4, 25, 29, 17, 18, 16, 13, 14
+ dround 3, 0, 4, 2, 1, 26, 30, 18, 19, 17, 14, 15
+ dround 2, 3, 1, 4, 0, 27, 31, 19, 12, 18, 15, 16
+ dround 4, 2, 0, 1, 3, 28, 24, 12, 13, 19, 16, 17
+ dround 1, 4, 3, 0, 2, 29, 25, 13, 14, 12, 17, 18
+
+ dround 0, 1, 2, 3, 4, 30, 26, 14, 15, 13, 18, 19
+ dround 3, 0, 4, 2, 1, 31, 27, 15, 16, 14, 19, 12
+ dround 2, 3, 1, 4, 0, 24, 28, 16, 17, 15, 12, 13
+ dround 4, 2, 0, 1, 3, 25, 29, 17, 18, 16, 13, 14
+ dround 1, 4, 3, 0, 2, 26, 30, 18, 19, 17, 14, 15
+
+ dround 0, 1, 2, 3, 4, 27, 31, 19, 12, 18, 15, 16
+ dround 3, 0, 4, 2, 1, 28, 24, 12, 13, 19, 16, 17
+ dround 2, 3, 1, 4, 0, 29, 25, 13, 14, 12, 17, 18
+ dround 4, 2, 0, 1, 3, 30, 26, 14, 15, 13, 18, 19
+ dround 1, 4, 3, 0, 2, 31, 27, 15, 16, 14, 19, 12
+
+ dround 0, 1, 2, 3, 4, 24, 28, 16, 17, 15, 12, 13
+ dround 3, 0, 4, 2, 1, 25, 29, 17, 18, 16, 13, 14
+ dround 2, 3, 1, 4, 0, 26, 30, 18, 19, 17, 14, 15
+ dround 4, 2, 0, 1, 3, 27, 31, 19, 12, 18, 15, 16
+ dround 1, 4, 3, 0, 2, 28, 24, 12, 13, 19, 16, 17
+
+ dround 0, 1, 2, 3, 4, 29, 25, 13, 14, 12, 17, 18
+ dround 3, 0, 4, 2, 1, 30, 26, 14, 15, 13, 18, 19
+ dround 2, 3, 1, 4, 0, 31, 27, 15, 16, 14, 19, 12
+ dround 4, 2, 0, 1, 3, 24, 28, 16, 17, 15, 12, 13
+ dround 1, 4, 3, 0, 2, 25, 29, 17, 18, 16, 13, 14
+
+ dround 0, 1, 2, 3, 4, 26, 30, 18, 19, 17, 14, 15
+ dround 3, 0, 4, 2, 1, 27, 31, 19, 12, 18, 15, 16
+ dround 2, 3, 1, 4, 0, 28, 24, 12
+ dround 4, 2, 0, 1, 3, 29, 25, 13
+ dround 1, 4, 3, 0, 2, 30, 26, 14
+
+ dround 0, 1, 2, 3, 4, 31, 27, 15
+ dround 3, 0, 4, 2, 1, 24, , 16
+ dround 2, 3, 1, 4, 0, 25, , 17
+ dround 4, 2, 0, 1, 3, 26, , 18
+ dround 1, 4, 3, 0, 2, 27, , 19
+
+ /* update state */
+ add v8.2d, v8.2d, v0.2d
+ add v9.2d, v9.2d, v1.2d
+ add v10.2d, v10.2d, v2.2d
+ add v11.2d, v11.2d, v3.2d
+
+ /* handled all input blocks? */
+ cbnz w2, 0b
+
+ /* store new state */
+3: st1 {v8.2d-v11.2d}, [x0]
+ ret
+ENDPROC(sha512_ce_transform)
diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
new file mode 100644
index 000000000000..a77c8632a589
--- /dev/null
+++ b/arch/arm64/crypto/sha512-ce-glue.c
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * sha512-ce-glue.c - SHA-384/SHA-512 using ARMv8 Crypto Extensions
+ *
+ * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <asm/simd.h>
+#include <asm/unaligned.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <crypto/sha512_base.h>
+#include <linux/cpufeature.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("SHA-384/SHA-512 secure hash using ARMv8 Crypto Extensions");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+
+asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
+ int blocks);
+
+asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
+
+static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ if (!may_use_simd())
+ return sha512_base_do_update(desc, data, len,
+ (sha512_block_fn *)sha512_block_data_order);
+
+ kernel_neon_begin();
+ sha512_base_do_update(desc, data, len,
+ (sha512_block_fn *)sha512_ce_transform);
+ kernel_neon_end();
+
+ return 0;
+}
+
+static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ if (!may_use_simd()) {
+ if (len)
+ sha512_base_do_update(desc, data, len,
+ (sha512_block_fn *)sha512_block_data_order);
+ sha512_base_do_finalize(desc,
+ (sha512_block_fn *)sha512_block_data_order);
+ return sha512_base_finish(desc, out);
+ }
+
+ kernel_neon_begin();
+ sha512_base_do_update(desc, data, len,
+ (sha512_block_fn *)sha512_ce_transform);
+ sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+ kernel_neon_end();
+ return sha512_base_finish(desc, out);
+}
+
+static int sha512_ce_final(struct shash_desc *desc, u8 *out)
+{
+ if (!may_use_simd()) {
+ sha512_base_do_finalize(desc,
+ (sha512_block_fn *)sha512_block_data_order);
+ return sha512_base_finish(desc, out);
+ }
+
+ kernel_neon_begin();
+ sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_ce_transform);
+ kernel_neon_end();
+ return sha512_base_finish(desc, out);
+}
+
+static struct shash_alg algs[] = { {
+ .init = sha384_base_init,
+ .update = sha512_ce_update,
+ .final = sha512_ce_final,
+ .finup = sha512_ce_finup,
+ .descsize = sizeof(struct sha512_state),
+ .digestsize = SHA384_DIGEST_SIZE,
+ .base.cra_name = "sha384",
+ .base.cra_driver_name = "sha384-ce",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA512_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+}, {
+ .init = sha512_base_init,
+ .update = sha512_ce_update,
+ .final = sha512_ce_final,
+ .finup = sha512_ce_finup,
+ .descsize = sizeof(struct sha512_state),
+ .digestsize = SHA512_DIGEST_SIZE,
+ .base.cra_name = "sha512",
+ .base.cra_driver_name = "sha512-ce",
+ .base.cra_priority = 200,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA512_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+} };
+
+static int __init sha512_ce_mod_init(void)
+{
+ return crypto_register_shashes(algs, ARRAY_SIZE(algs));
+}
+
+static void __exit sha512_ce_mod_fini(void)
+{
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+}
+
+module_cpu_feature_match(SHA512, sha512_ce_mod_init);
+module_exit(sha512_ce_mod_fini);
diff --git a/arch/arm64/crypto/sha512-glue.c b/arch/arm64/crypto/sha512-glue.c
index aff35c9992a4..27db4851e380 100644
--- a/arch/arm64/crypto/sha512-glue.c
+++ b/arch/arm64/crypto/sha512-glue.c
@@ -27,6 +27,7 @@ MODULE_ALIAS_CRYPTO("sha512");
asmlinkage void sha512_block_data_order(u32 *digest, const void *data,
unsigned int num_blks);
+EXPORT_SYMBOL(sha512_block_data_order);
static int sha512_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
diff --git a/arch/arm64/crypto/sm3-ce-core.S b/arch/arm64/crypto/sm3-ce-core.S
new file mode 100644
index 000000000000..27169fe07a68
--- /dev/null
+++ b/arch/arm64/crypto/sm3-ce-core.S
@@ -0,0 +1,141 @@
+/*
+ * sm3-ce-core.S - SM3 secure hash using ARMv8.2 Crypto Extensions
+ *
+ * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .irp b, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12
+ .set .Lv\b\().4s, \b
+ .endr
+
+ .macro sm3partw1, rd, rn, rm
+ .inst 0xce60c000 | .L\rd | (.L\rn << 5) | (.L\rm << 16)
+ .endm
+
+ .macro sm3partw2, rd, rn, rm
+ .inst 0xce60c400 | .L\rd | (.L\rn << 5) | (.L\rm << 16)
+ .endm
+
+ .macro sm3ss1, rd, rn, rm, ra
+ .inst 0xce400000 | .L\rd | (.L\rn << 5) | (.L\ra << 10) | (.L\rm << 16)
+ .endm
+
+ .macro sm3tt1a, rd, rn, rm, imm2
+ .inst 0xce408000 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16)
+ .endm
+
+ .macro sm3tt1b, rd, rn, rm, imm2
+ .inst 0xce408400 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16)
+ .endm
+
+ .macro sm3tt2a, rd, rn, rm, imm2
+ .inst 0xce408800 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16)
+ .endm
+
+ .macro sm3tt2b, rd, rn, rm, imm2
+ .inst 0xce408c00 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16)
+ .endm
+
+ .macro round, ab, s0, t0, t1, i
+ sm3ss1 v5.4s, v8.4s, \t0\().4s, v9.4s
+ shl \t1\().4s, \t0\().4s, #1
+ sri \t1\().4s, \t0\().4s, #31
+ sm3tt1\ab v8.4s, v5.4s, v10.4s, \i
+ sm3tt2\ab v9.4s, v5.4s, \s0\().4s, \i
+ .endm
+
+ .macro qround, ab, s0, s1, s2, s3, s4
+ .ifnb \s4
+ ext \s4\().16b, \s1\().16b, \s2\().16b, #12
+ ext v6.16b, \s0\().16b, \s1\().16b, #12
+ ext v7.16b, \s2\().16b, \s3\().16b, #8
+ sm3partw1 \s4\().4s, \s0\().4s, \s3\().4s
+ .endif
+
+ eor v10.16b, \s0\().16b, \s1\().16b
+
+ round \ab, \s0, v11, v12, 0
+ round \ab, \s0, v12, v11, 1
+ round \ab, \s0, v11, v12, 2
+ round \ab, \s0, v12, v11, 3
+
+ .ifnb \s4
+ sm3partw2 \s4\().4s, v7.4s, v6.4s
+ .endif
+ .endm
+
+ /*
+ * void sm3_ce_transform(struct sm3_state *sst, u8 const *src,
+ * int blocks)
+ */
+ .text
+ENTRY(sm3_ce_transform)
+ /* load state */
+ ld1 {v8.4s-v9.4s}, [x0]
+ rev64 v8.4s, v8.4s
+ rev64 v9.4s, v9.4s
+ ext v8.16b, v8.16b, v8.16b, #8
+ ext v9.16b, v9.16b, v9.16b, #8
+
+ adr_l x8, .Lt
+ ldp s13, s14, [x8]
+
+ /* load input */
+0: ld1 {v0.16b-v3.16b}, [x1], #64
+ sub w2, w2, #1
+
+ mov v15.16b, v8.16b
+ mov v16.16b, v9.16b
+
+CPU_LE( rev32 v0.16b, v0.16b )
+CPU_LE( rev32 v1.16b, v1.16b )
+CPU_LE( rev32 v2.16b, v2.16b )
+CPU_LE( rev32 v3.16b, v3.16b )
+
+ ext v11.16b, v13.16b, v13.16b, #4
+
+ qround a, v0, v1, v2, v3, v4
+ qround a, v1, v2, v3, v4, v0
+ qround a, v2, v3, v4, v0, v1
+ qround a, v3, v4, v0, v1, v2
+
+ ext v11.16b, v14.16b, v14.16b, #4
+
+ qround b, v4, v0, v1, v2, v3
+ qround b, v0, v1, v2, v3, v4
+ qround b, v1, v2, v3, v4, v0
+ qround b, v2, v3, v4, v0, v1
+ qround b, v3, v4, v0, v1, v2
+ qround b, v4, v0, v1, v2, v3
+ qround b, v0, v1, v2, v3, v4
+ qround b, v1, v2, v3, v4, v0
+ qround b, v2, v3, v4, v0, v1
+ qround b, v3, v4
+ qround b, v4, v0
+ qround b, v0, v1
+
+ eor v8.16b, v8.16b, v15.16b
+ eor v9.16b, v9.16b, v16.16b
+
+ /* handled all input blocks? */
+ cbnz w2, 0b
+
+ /* save state */
+ rev64 v8.4s, v8.4s
+ rev64 v9.4s, v9.4s
+ ext v8.16b, v8.16b, v8.16b, #8
+ ext v9.16b, v9.16b, v9.16b, #8
+ st1 {v8.4s-v9.4s}, [x0]
+ ret
+ENDPROC(sm3_ce_transform)
+
+ .section ".rodata", "a"
+ .align 3
+.Lt: .word 0x79cc4519, 0x9d8a7a87
diff --git a/arch/arm64/crypto/sm3-ce-glue.c b/arch/arm64/crypto/sm3-ce-glue.c
new file mode 100644
index 000000000000..3b4948f7e26f
--- /dev/null
+++ b/arch/arm64/crypto/sm3-ce-glue.c
@@ -0,0 +1,92 @@
+/*
+ * sm3-ce-glue.c - SM3 secure hash using ARMv8.2 Crypto Extensions
+ *
+ * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <asm/simd.h>
+#include <asm/unaligned.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sm3.h>
+#include <crypto/sm3_base.h>
+#include <linux/cpufeature.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+MODULE_DESCRIPTION("SM3 secure hash using ARMv8 Crypto Extensions");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+
+asmlinkage void sm3_ce_transform(struct sm3_state *sst, u8 const *src,
+ int blocks);
+
+static int sm3_ce_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ if (!may_use_simd())
+ return crypto_sm3_update(desc, data, len);
+
+ kernel_neon_begin();
+ sm3_base_do_update(desc, data, len, sm3_ce_transform);
+ kernel_neon_end();
+
+ return 0;
+}
+
+static int sm3_ce_final(struct shash_desc *desc, u8 *out)
+{
+ if (!may_use_simd())
+ return crypto_sm3_finup(desc, NULL, 0, out);
+
+ kernel_neon_begin();
+ sm3_base_do_finalize(desc, sm3_ce_transform);
+ kernel_neon_end();
+
+ return sm3_base_finish(desc, out);
+}
+
+static int sm3_ce_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ if (!may_use_simd())
+ return crypto_sm3_finup(desc, data, len, out);
+
+ kernel_neon_begin();
+ sm3_base_do_update(desc, data, len, sm3_ce_transform);
+ kernel_neon_end();
+
+ return sm3_ce_final(desc, out);
+}
+
+static struct shash_alg sm3_alg = {
+ .digestsize = SM3_DIGEST_SIZE,
+ .init = sm3_base_init,
+ .update = sm3_ce_update,
+ .final = sm3_ce_final,
+ .finup = sm3_ce_finup,
+ .descsize = sizeof(struct sm3_state),
+ .base.cra_name = "sm3",
+ .base.cra_driver_name = "sm3-ce",
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SM3_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+ .base.cra_priority = 200,
+};
+
+static int __init sm3_ce_mod_init(void)
+{
+ return crypto_register_shash(&sm3_alg);
+}
+
+static void __exit sm3_ce_mod_fini(void)
+{
+ crypto_unregister_shash(&sm3_alg);
+}
+
+module_cpu_feature_match(SM3, sm3_ce_mod_init);
+module_exit(sm3_ce_mod_fini);
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index 4a85c6952a22..669028172fd6 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -12,6 +12,8 @@
#include <linux/stddef.h>
#include <linux/stringify.h>
+extern int alternatives_applied;
+
struct alt_instr {
s32 orig_offset; /* offset to original instruction */
s32 alt_offset; /* offset to replacement instruction */
diff --git a/arch/arm64/include/asm/arm_dsu_pmu.h b/arch/arm64/include/asm/arm_dsu_pmu.h
new file mode 100644
index 000000000000..82e5cc3356bf
--- /dev/null
+++ b/arch/arm64/include/asm/arm_dsu_pmu.h
@@ -0,0 +1,129 @@
+/*
+ * ARM DynamIQ Shared Unit (DSU) PMU Low level register access routines.
+ *
+ * Copyright (C) ARM Limited, 2017.
+ *
+ * Author: Suzuki K Poulose <suzuki.poulose@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/build_bug.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <asm/barrier.h>
+#include <asm/sysreg.h>
+
+
+#define CLUSTERPMCR_EL1 sys_reg(3, 0, 15, 5, 0)
+#define CLUSTERPMCNTENSET_EL1 sys_reg(3, 0, 15, 5, 1)
+#define CLUSTERPMCNTENCLR_EL1 sys_reg(3, 0, 15, 5, 2)
+#define CLUSTERPMOVSSET_EL1 sys_reg(3, 0, 15, 5, 3)
+#define CLUSTERPMOVSCLR_EL1 sys_reg(3, 0, 15, 5, 4)
+#define CLUSTERPMSELR_EL1 sys_reg(3, 0, 15, 5, 5)
+#define CLUSTERPMINTENSET_EL1 sys_reg(3, 0, 15, 5, 6)
+#define CLUSTERPMINTENCLR_EL1 sys_reg(3, 0, 15, 5, 7)
+#define CLUSTERPMCCNTR_EL1 sys_reg(3, 0, 15, 6, 0)
+#define CLUSTERPMXEVTYPER_EL1 sys_reg(3, 0, 15, 6, 1)
+#define CLUSTERPMXEVCNTR_EL1 sys_reg(3, 0, 15, 6, 2)
+#define CLUSTERPMMDCR_EL1 sys_reg(3, 0, 15, 6, 3)
+#define CLUSTERPMCEID0_EL1 sys_reg(3, 0, 15, 6, 4)
+#define CLUSTERPMCEID1_EL1 sys_reg(3, 0, 15, 6, 5)
+
+static inline u32 __dsu_pmu_read_pmcr(void)
+{
+ return read_sysreg_s(CLUSTERPMCR_EL1);
+}
+
+static inline void __dsu_pmu_write_pmcr(u32 val)
+{
+ write_sysreg_s(val, CLUSTERPMCR_EL1);
+ isb();
+}
+
+static inline u32 __dsu_pmu_get_reset_overflow(void)
+{
+ u32 val = read_sysreg_s(CLUSTERPMOVSCLR_EL1);
+ /* Clear the bit */
+ write_sysreg_s(val, CLUSTERPMOVSCLR_EL1);
+ isb();
+ return val;
+}
+
+static inline void __dsu_pmu_select_counter(int counter)
+{
+ write_sysreg_s(counter, CLUSTERPMSELR_EL1);
+ isb();
+}
+
+static inline u64 __dsu_pmu_read_counter(int counter)
+{
+ __dsu_pmu_select_counter(counter);
+ return read_sysreg_s(CLUSTERPMXEVCNTR_EL1);
+}
+
+static inline void __dsu_pmu_write_counter(int counter, u64 val)
+{
+ __dsu_pmu_select_counter(counter);
+ write_sysreg_s(val, CLUSTERPMXEVCNTR_EL1);
+ isb();
+}
+
+static inline void __dsu_pmu_set_event(int counter, u32 event)
+{
+ __dsu_pmu_select_counter(counter);
+ write_sysreg_s(event, CLUSTERPMXEVTYPER_EL1);
+ isb();
+}
+
+static inline u64 __dsu_pmu_read_pmccntr(void)
+{
+ return read_sysreg_s(CLUSTERPMCCNTR_EL1);
+}
+
+static inline void __dsu_pmu_write_pmccntr(u64 val)
+{
+ write_sysreg_s(val, CLUSTERPMCCNTR_EL1);
+ isb();
+}
+
+static inline void __dsu_pmu_disable_counter(int counter)
+{
+ write_sysreg_s(BIT(counter), CLUSTERPMCNTENCLR_EL1);
+ isb();
+}
+
+static inline void __dsu_pmu_enable_counter(int counter)
+{
+ write_sysreg_s(BIT(counter), CLUSTERPMCNTENSET_EL1);
+ isb();
+}
+
+static inline void __dsu_pmu_counter_interrupt_enable(int counter)
+{
+ write_sysreg_s(BIT(counter), CLUSTERPMINTENSET_EL1);
+ isb();
+}
+
+static inline void __dsu_pmu_counter_interrupt_disable(int counter)
+{
+ write_sysreg_s(BIT(counter), CLUSTERPMINTENCLR_EL1);
+ isb();
+}
+
+
+static inline u32 __dsu_pmu_read_pmceid(int n)
+{
+ switch (n) {
+ case 0:
+ return read_sysreg_s(CLUSTERPMCEID0_EL1);
+ case 1:
+ return read_sysreg_s(CLUSTERPMCEID1_EL1);
+ default:
+ BUILD_BUG();
+ return 0;
+ }
+}
diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
index b3da6c886835..4128bec033f6 100644
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -4,6 +4,7 @@
#include <asm/alternative.h>
#include <asm/kernel-pgtable.h>
+#include <asm/mmu.h>
#include <asm/sysreg.h>
#include <asm/assembler.h>
@@ -12,52 +13,63 @@
*/
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
.macro __uaccess_ttbr0_disable, tmp1
- mrs \tmp1, ttbr1_el1 // swapper_pg_dir
- add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
- msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
+ mrs \tmp1, ttbr1_el1 // swapper_pg_dir
+ bic \tmp1, \tmp1, #TTBR_ASID_MASK
+ sub \tmp1, \tmp1, #RESERVED_TTBR0_SIZE // reserved_ttbr0 just before swapper_pg_dir
+ msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
+ isb
+ add \tmp1, \tmp1, #RESERVED_TTBR0_SIZE
+ msr ttbr1_el1, \tmp1 // set reserved ASID
isb
.endm
- .macro __uaccess_ttbr0_enable, tmp1
+ .macro __uaccess_ttbr0_enable, tmp1, tmp2
get_thread_info \tmp1
ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1
+ mrs \tmp2, ttbr1_el1
+ extr \tmp2, \tmp2, \tmp1, #48
+ ror \tmp2, \tmp2, #16
+ msr ttbr1_el1, \tmp2 // set the active ASID
+ isb
msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
isb
.endm
- .macro uaccess_ttbr0_disable, tmp1
+ .macro uaccess_ttbr0_disable, tmp1, tmp2
alternative_if_not ARM64_HAS_PAN
+ save_and_disable_irq \tmp2 // avoid preemption
__uaccess_ttbr0_disable \tmp1
+ restore_irq \tmp2
alternative_else_nop_endif
.endm
- .macro uaccess_ttbr0_enable, tmp1, tmp2
+ .macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3
alternative_if_not ARM64_HAS_PAN
- save_and_disable_irq \tmp2 // avoid preemption
- __uaccess_ttbr0_enable \tmp1
- restore_irq \tmp2
+ save_and_disable_irq \tmp3 // avoid preemption
+ __uaccess_ttbr0_enable \tmp1, \tmp2
+ restore_irq \tmp3
alternative_else_nop_endif
.endm
#else
- .macro uaccess_ttbr0_disable, tmp1
+ .macro uaccess_ttbr0_disable, tmp1, tmp2
.endm
- .macro uaccess_ttbr0_enable, tmp1, tmp2
+ .macro uaccess_ttbr0_enable, tmp1, tmp2, tmp3
.endm
#endif
/*
* These macros are no-ops when UAO is present.
*/
- .macro uaccess_disable_not_uao, tmp1
- uaccess_ttbr0_disable \tmp1
+ .macro uaccess_disable_not_uao, tmp1, tmp2
+ uaccess_ttbr0_disable \tmp1, \tmp2
alternative_if ARM64_ALT_PAN_NOT_UAO
SET_PSTATE_PAN(1)
alternative_else_nop_endif
.endm
- .macro uaccess_enable_not_uao, tmp1, tmp2
- uaccess_ttbr0_enable \tmp1, \tmp2
+ .macro uaccess_enable_not_uao, tmp1, tmp2, tmp3
+ uaccess_ttbr0_enable \tmp1, \tmp2, \tmp3
alternative_if ARM64_ALT_PAN_NOT_UAO
SET_PSTATE_PAN(0)
alternative_else_nop_endif
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 8b168280976f..3873dd7b5a32 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -26,7 +26,6 @@
#include <asm/asm-offsets.h>
#include <asm/cpufeature.h>
#include <asm/debug-monitors.h>
-#include <asm/mmu_context.h>
#include <asm/page.h>
#include <asm/pgtable-hwdef.h>
#include <asm/ptrace.h>
@@ -110,6 +109,13 @@
.endm
/*
+ * RAS Error Synchronization barrier
+ */
+ .macro esb
+ hint #16
+ .endm
+
+/*
* NOP sequence
*/
.macro nops, num
@@ -255,7 +261,11 @@ lr .req x30 // link register
#else
adr_l \dst, \sym
#endif
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs \tmp, tpidr_el1
+alternative_else
+ mrs \tmp, tpidr_el2
+alternative_endif
add \dst, \dst, \tmp
.endm
@@ -266,7 +276,11 @@ lr .req x30 // link register
*/
.macro ldr_this_cpu dst, sym, tmp
adr_l \dst, \sym
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs \tmp, tpidr_el1
+alternative_else
+ mrs \tmp, tpidr_el2
+alternative_endif
ldr \dst, [\dst, \tmp]
.endm
@@ -344,10 +358,26 @@ alternative_endif
* tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
*/
.macro tcr_set_idmap_t0sz, valreg, tmpreg
-#ifndef CONFIG_ARM64_VA_BITS_48
ldr_l \tmpreg, idmap_t0sz
bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
-#endif
+ .endm
+
+/*
+ * tcr_compute_pa_size - set TCR.(I)PS to the highest supported
+ * ID_AA64MMFR0_EL1.PARange value
+ *
+ * tcr: register with the TCR_ELx value to be updated
+ * pos: IPS or PS bitfield position
+ * tmp{0,1}: temporary registers
+ */
+ .macro tcr_compute_pa_size, tcr, pos, tmp0, tmp1
+ mrs \tmp0, ID_AA64MMFR0_EL1
+ // Narrow PARange to fit the PS field in TCR_ELx
+ ubfx \tmp0, \tmp0, #ID_AA64MMFR0_PARANGE_SHIFT, #3
+ mov \tmp1, #ID_AA64MMFR0_PARANGE_MAX
+ cmp \tmp0, \tmp1
+ csel \tmp0, \tmp1, \tmp0, hi
+ bfi \tcr, \tmp0, \pos, #3
.endm
/*
@@ -478,37 +508,18 @@ alternative_endif
.endm
/*
- * Errata workaround prior to TTBR0_EL1 update
+ * Arrange a physical address in a TTBR register, taking care of 52-bit
+ * addresses.
*
- * val: TTBR value with new BADDR, preserved
- * tmp0: temporary register, clobbered
- * tmp1: other temporary register, clobbered
+ * phys: physical address, preserved
+ * ttbr: returns the TTBR value
*/
- .macro pre_ttbr0_update_workaround, val, tmp0, tmp1
-#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
-alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
- mrs \tmp0, ttbr0_el1
- mov \tmp1, #FALKOR_RESERVED_ASID
- bfi \tmp0, \tmp1, #48, #16 // reserved ASID + old BADDR
- msr ttbr0_el1, \tmp0
- isb
- bfi \tmp0, \val, #0, #48 // reserved ASID + new BADDR
- msr ttbr0_el1, \tmp0
- isb
-alternative_else_nop_endif
-#endif
- .endm
-
-/*
- * Errata workaround post TTBR0_EL1 update.
- */
- .macro post_ttbr0_update_workaround
-#ifdef CONFIG_CAVIUM_ERRATUM_27456
-alternative_if ARM64_WORKAROUND_CAVIUM_27456
- ic iallu
- dsb nsh
- isb
-alternative_else_nop_endif
+ .macro phys_to_ttbr, phys, ttbr
+#ifdef CONFIG_ARM64_PA_BITS_52
+ orr \ttbr, \phys, \phys, lsr #46
+ and \ttbr, \ttbr, #TTBR_BADDR_MASK_52
+#else
+ mov \ttbr, \phys
#endif
.endm
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index a3c7f271ad4c..c00c62e1a4a3 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -150,70 +150,6 @@ typedef u32 compat_old_sigset_t;
typedef u32 compat_sigset_word;
-typedef union compat_sigval {
- compat_int_t sival_int;
- compat_uptr_t sival_ptr;
-} compat_sigval_t;
-
-typedef struct compat_siginfo {
- int si_signo;
- int si_errno;
- int si_code;
-
- union {
- int _pad[128/sizeof(int) - 3];
-
- /* kill() */
- struct {
- compat_pid_t _pid; /* sender's pid */
- __compat_uid32_t _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- compat_timer_t _tid; /* timer id */
- int _overrun; /* overrun count */
- compat_sigval_t _sigval; /* same as below */
- int _sys_private; /* not to be passed to user */
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- compat_pid_t _pid; /* sender's pid */
- __compat_uid32_t _uid; /* sender's uid */
- compat_sigval_t _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- compat_pid_t _pid; /* which child */
- __compat_uid32_t _uid; /* sender's uid */
- int _status; /* exit code */
- compat_clock_t _utime;
- compat_clock_t _stime;
- } _sigchld;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
- struct {
- compat_uptr_t _addr; /* faulting insn/memory ref. */
- short _addr_lsb; /* LSB of the reported address */
- } _sigfault;
-
- /* SIGPOLL */
- struct {
- compat_long_t _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
-
- /* SIGSYS */
- struct {
- compat_uptr_t _call_addr; /* calling user insn */
- int _syscall; /* triggering system call number */
- compat_uint_t _arch; /* AUDIT_ARCH_* of syscall */
- } _sigsys;
- } _sifields;
-} compat_siginfo_t;
-
#define COMPAT_OFF_T_MAX 0x7fffffff
/*
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 2ff7c5e8efab..bb263820de13 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -41,7 +41,11 @@
#define ARM64_WORKAROUND_CAVIUM_30115 20
#define ARM64_HAS_DCPOP 21
#define ARM64_SVE 22
+#define ARM64_UNMAP_KERNEL_AT_EL0 23
+#define ARM64_HARDEN_BRANCH_PREDICTOR 24
+#define ARM64_HARDEN_BP_POST_GUEST_EXIT 25
+#define ARM64_HAS_RAS_EXTN 26
-#define ARM64_NCAPS 23
+#define ARM64_NCAPS 27
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index cbf08d7cbf30..be7bd19c87ec 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -79,28 +79,37 @@
#define ARM_CPU_PART_AEM_V8 0xD0F
#define ARM_CPU_PART_FOUNDATION 0xD00
#define ARM_CPU_PART_CORTEX_A57 0xD07
+#define ARM_CPU_PART_CORTEX_A72 0xD08
#define ARM_CPU_PART_CORTEX_A53 0xD03
#define ARM_CPU_PART_CORTEX_A73 0xD09
+#define ARM_CPU_PART_CORTEX_A75 0xD0A
#define APM_CPU_PART_POTENZA 0x000
#define CAVIUM_CPU_PART_THUNDERX 0x0A1
#define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2
#define CAVIUM_CPU_PART_THUNDERX_83XX 0x0A3
+#define CAVIUM_CPU_PART_THUNDERX2 0x0AF
#define BRCM_CPU_PART_VULCAN 0x516
#define QCOM_CPU_PART_FALKOR_V1 0x800
#define QCOM_CPU_PART_FALKOR 0xC00
+#define QCOM_CPU_PART_KRYO 0x200
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
+#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
#define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73)
+#define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
+#define MIDR_CAVIUM_THUNDERX2 MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX2)
+#define MIDR_BRCM_VULCAN MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_VULCAN)
#define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
#define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
+#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 0df756b24863..b7847eb8a7bb 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -50,40 +50,5 @@ static inline bool is_device_dma_coherent(struct device *dev)
return dev->archdata.dma_coherent;
}
-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- dma_addr_t dev_addr = (dma_addr_t)paddr;
-
- return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
-}
-
-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
-{
- phys_addr_t paddr = (phys_addr_t)dev_addr;
-
- return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
-}
-
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return false;
-
- return addr + size - 1 <= *dev->dma_mask;
-}
-
-static inline void dma_mark_clean(void *addr, size_t size)
-{
-}
-
-/* Override for dma_max_pfn() */
-static inline unsigned long dma_max_pfn(struct device *dev)
-{
- dma_addr_t dma_max = (dma_addr_t)*dev->dma_mask;
-
- return (ulong)dma_to_phys(dev, dma_max) >> PAGE_SHIFT;
-}
-#define dma_max_pfn(dev) dma_max_pfn(dev)
-
#endif /* __KERNEL__ */
#endif /* __ASM_DMA_MAPPING_H */
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index c4cd5081d78b..8389050328bb 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -121,19 +121,21 @@ static inline void efi_set_pgd(struct mm_struct *mm)
if (mm != current->active_mm) {
/*
* Update the current thread's saved ttbr0 since it is
- * restored as part of a return from exception. Set
- * the hardware TTBR0_EL1 using cpu_switch_mm()
- * directly to enable potential errata workarounds.
+ * restored as part of a return from exception. Enable
+ * access to the valid TTBR0_EL1 and invoke the errata
+ * workaround directly since there is no return from
+ * exception when invoking the EFI run-time services.
*/
update_saved_ttbr0(current, mm);
- cpu_switch_mm(mm->pgd, mm);
+ uaccess_ttbr0_enable();
+ post_ttbr_update_workaround();
} else {
/*
* Defer the switch to the current thread's TTBR0_EL1
* until uaccess_enable(). Restore the current
* thread's saved ttbr0 corresponding to its active_mm
*/
- cpu_set_reserved_ttbr0();
+ uaccess_ttbr0_disable();
update_saved_ttbr0(current, current->active_mm);
}
}
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 014d7d8edcf9..803443d74926 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -86,6 +86,18 @@
#define ESR_ELx_WNR_SHIFT (6)
#define ESR_ELx_WNR (UL(1) << ESR_ELx_WNR_SHIFT)
+/* Asynchronous Error Type */
+#define ESR_ELx_IDS_SHIFT (24)
+#define ESR_ELx_IDS (UL(1) << ESR_ELx_IDS_SHIFT)
+#define ESR_ELx_AET_SHIFT (10)
+#define ESR_ELx_AET (UL(0x7) << ESR_ELx_AET_SHIFT)
+
+#define ESR_ELx_AET_UC (UL(0) << ESR_ELx_AET_SHIFT)
+#define ESR_ELx_AET_UEU (UL(1) << ESR_ELx_AET_SHIFT)
+#define ESR_ELx_AET_UEO (UL(2) << ESR_ELx_AET_SHIFT)
+#define ESR_ELx_AET_UER (UL(3) << ESR_ELx_AET_SHIFT)
+#define ESR_ELx_AET_CE (UL(6) << ESR_ELx_AET_SHIFT)
+
/* Shared ISS field definitions for Data/Instruction aborts */
#define ESR_ELx_SET_SHIFT (11)
#define ESR_ELx_SET_MASK (UL(3) << ESR_ELx_SET_SHIFT)
@@ -100,6 +112,7 @@
#define ESR_ELx_FSC (0x3F)
#define ESR_ELx_FSC_TYPE (0x3C)
#define ESR_ELx_FSC_EXTABT (0x10)
+#define ESR_ELx_FSC_SERROR (0x11)
#define ESR_ELx_FSC_ACCESS (0x08)
#define ESR_ELx_FSC_FAULT (0x04)
#define ESR_ELx_FSC_PERM (0x0C)
@@ -127,6 +140,13 @@
#define ESR_ELx_WFx_ISS_WFE (UL(1) << 0)
#define ESR_ELx_xVC_IMM_MASK ((1UL << 16) - 1)
+#define DISR_EL1_IDS (UL(1) << 24)
+/*
+ * DISR_EL1 and ESR_ELx share the bottom 13 bits, but the RES0 bits may mean
+ * different things in the future...
+ */
+#define DISR_EL1_ESR_MASK (ESR_ELx_AET | ESR_ELx_EA | ESR_ELx_FSC)
+
/* ESR value templates for specific events */
/* BRK instruction trap from AArch64 state */
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index 0c2eec490abf..bc30429d8e91 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -18,6 +18,8 @@
#ifndef __ASM_EXCEPTION_H
#define __ASM_EXCEPTION_H
+#include <asm/esr.h>
+
#include <linux/interrupt.h>
#define __exception __attribute__((section(".exception.text")))
@@ -27,4 +29,16 @@
#define __exception_irq_entry __exception
#endif
+static inline u32 disr_to_esr(u64 disr)
+{
+ unsigned int esr = ESR_ELx_EC_SERROR << ESR_ELx_EC_SHIFT;
+
+ if ((disr & DISR_EL1_IDS) == 0)
+ esr |= (disr & DISR_EL1_ESR_MASK);
+ else
+ esr |= (disr & ESR_ELx_ISS_MASK);
+
+ return esr;
+}
+
#endif /* __ASM_EXCEPTION_H */
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index 4052ec39e8db..ec1e6d6fa14c 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -58,6 +58,11 @@ enum fixed_addresses {
FIX_APEI_GHES_NMI,
#endif /* CONFIG_ACPI_APEI_GHES */
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ FIX_ENTRY_TRAMP_DATA,
+ FIX_ENTRY_TRAMP_TEXT,
+#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT))
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
__end_of_permanent_fixed_addresses,
/*
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index 74f34392a531..8857a0f0d0f7 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -71,7 +71,7 @@ extern void fpsimd_flush_thread(void);
extern void fpsimd_signal_preserve_current_state(void);
extern void fpsimd_preserve_current_state(void);
extern void fpsimd_restore_current_state(void);
-extern void fpsimd_update_current_state(struct fpsimd_state *state);
+extern void fpsimd_update_current_state(struct user_fpsimd_state const *state);
extern void fpsimd_flush_task_state(struct task_struct *target);
extern void sve_flush_cpu_state(void);
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index 7803343e5881..82386e860dd2 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -52,7 +52,52 @@
#define IDMAP_PGTABLE_LEVELS (ARM64_HW_PGTABLE_LEVELS(PHYS_MASK_SHIFT))
#endif
-#define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE)
+
+/*
+ * If KASLR is enabled, then an offset K is added to the kernel address
+ * space. The bottom 21 bits of this offset are zero to guarantee 2MB
+ * alignment for PA and VA.
+ *
+ * For each pagetable level of the swapper, we know that the shift will
+ * be larger than 21 (for the 4KB granule case we use section maps thus
+ * the smallest shift is actually 30) thus there is the possibility that
+ * KASLR can increase the number of pagetable entries by 1, so we make
+ * room for this extra entry.
+ *
+ * Note KASLR cannot increase the number of required entries for a level
+ * by more than one because it increments both the virtual start and end
+ * addresses equally (the extra entry comes from the case where the end
+ * address is just pushed over a boundary and the start address isn't).
+ */
+
+#ifdef CONFIG_RANDOMIZE_BASE
+#define EARLY_KASLR (1)
+#else
+#define EARLY_KASLR (0)
+#endif
+
+#define EARLY_ENTRIES(vstart, vend, shift) (((vend) >> (shift)) \
+ - ((vstart) >> (shift)) + 1 + EARLY_KASLR)
+
+#define EARLY_PGDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT))
+
+#if SWAPPER_PGTABLE_LEVELS > 3
+#define EARLY_PUDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PUD_SHIFT))
+#else
+#define EARLY_PUDS(vstart, vend) (0)
+#endif
+
+#if SWAPPER_PGTABLE_LEVELS > 2
+#define EARLY_PMDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, SWAPPER_TABLE_SHIFT))
+#else
+#define EARLY_PMDS(vstart, vend) (0)
+#endif
+
+#define EARLY_PAGES(vstart, vend) ( 1 /* PGDIR page */ \
+ + EARLY_PGDS((vstart), (vend)) /* each PGDIR needs a next level page table */ \
+ + EARLY_PUDS((vstart), (vend)) /* each PUD needs a next level page table */ \
+ + EARLY_PMDS((vstart), (vend))) /* each PMD needs a next level page table */
+#define SWAPPER_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR + TEXT_OFFSET, _end))
#define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
@@ -78,8 +123,16 @@
/*
* Initial memory map attributes.
*/
-#define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
-#define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
+#define _SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
+#define _SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+#define SWAPPER_PTE_FLAGS (_SWAPPER_PTE_FLAGS | PTE_NG)
+#define SWAPPER_PMD_FLAGS (_SWAPPER_PMD_FLAGS | PMD_SECT_NG)
+#else
+#define SWAPPER_PTE_FLAGS _SWAPPER_PTE_FLAGS
+#define SWAPPER_PMD_FLAGS _SWAPPER_PMD_FLAGS
+#endif
#if ARM64_SWAPPER_USES_SECTION_MAPS
#define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 715d395ef45b..b0c84171e6a3 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -23,6 +23,8 @@
#include <asm/types.h>
/* Hyp Configuration Register (HCR) bits */
+#define HCR_TEA (UL(1) << 37)
+#define HCR_TERR (UL(1) << 36)
#define HCR_E2H (UL(1) << 34)
#define HCR_ID (UL(1) << 33)
#define HCR_CD (UL(1) << 32)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index ab4d0a926043..24961b732e65 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -68,6 +68,8 @@ extern u32 __kvm_get_mdcr_el2(void);
extern u32 __init_stage2_translation(void);
+extern void __qcom_hyp_sanitize_btac_predictors(void);
+
#endif
#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 5f28dfa14cee..413dc82b1e89 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -50,6 +50,13 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
if (is_kernel_in_hyp_mode())
vcpu->arch.hcr_el2 |= HCR_E2H;
+ if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
+ /* route synchronous external abort exceptions to EL2 */
+ vcpu->arch.hcr_el2 |= HCR_TEA;
+ /* trap error record accesses */
+ vcpu->arch.hcr_el2 |= HCR_TERR;
+ }
+
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
vcpu->arch.hcr_el2 &= ~HCR_RW;
}
@@ -64,6 +71,11 @@ static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
vcpu->arch.hcr_el2 = hcr;
}
+static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
+{
+ vcpu->arch.vsesr_el2 = vsesr;
+}
+
static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
{
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
@@ -171,6 +183,11 @@ static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
}
+static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.fault.disr_el1;
+}
+
static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index ea6cb5b24258..4485ae8e98de 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -25,6 +25,7 @@
#include <linux/types.h>
#include <linux/kvm_types.h>
#include <asm/cpufeature.h>
+#include <asm/daifflags.h>
#include <asm/fpsimd.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
@@ -89,6 +90,7 @@ struct kvm_vcpu_fault_info {
u32 esr_el2; /* Hyp Syndrom Register */
u64 far_el2; /* Hyp Fault Address Register */
u64 hpfar_el2; /* Hyp IPA Fault Address Register */
+ u64 disr_el1; /* Deferred [SError] Status Register */
};
/*
@@ -120,6 +122,7 @@ enum vcpu_sysreg {
PAR_EL1, /* Physical Address Register */
MDSCR_EL1, /* Monitor Debug System Control Register */
MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */
+ DISR_EL1, /* Deferred Interrupt Status Register */
/* Performance Monitors Registers */
PMCR_EL0, /* Control Register */
@@ -192,6 +195,8 @@ struct kvm_cpu_context {
u64 sys_regs[NR_SYS_REGS];
u32 copro[NR_COPRO_REGS];
};
+
+ struct kvm_vcpu *__hyp_running_vcpu;
};
typedef struct kvm_cpu_context kvm_cpu_context_t;
@@ -277,6 +282,9 @@ struct kvm_vcpu_arch {
/* Detect first run of a vcpu */
bool has_run_once;
+
+ /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
+ u64 vsesr_el2;
};
#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
@@ -340,6 +348,8 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
int exception_index);
+void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ int exception_index);
int kvm_perf_init(void);
int kvm_perf_teardown(void);
@@ -396,4 +406,13 @@ static inline void kvm_fpsimd_flush_cpu_state(void)
sve_flush_cpu_state();
}
+static inline void kvm_arm_vhe_guest_enter(void)
+{
+ local_daif_mask();
+}
+
+static inline void kvm_arm_vhe_guest_exit(void)
+{
+ local_daif_restore(DAIF_PROCCTX_NOIRQ);
+}
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 672c8684d5c2..72e279dbae5f 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -273,15 +273,26 @@ void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
static inline bool __kvm_cpu_uses_extended_idmap(void)
{
- return __cpu_uses_extended_idmap();
+ return __cpu_uses_extended_idmap_level();
}
+static inline unsigned long __kvm_idmap_ptrs_per_pgd(void)
+{
+ return idmap_ptrs_per_pgd;
+}
+
+/*
+ * Can't use pgd_populate here, because the extended idmap adds an extra level
+ * above CONFIG_PGTABLE_LEVELS (which is 2 or 3 if we're using the extended
+ * idmap), and pgd_populate is only available if CONFIG_PGTABLE_LEVELS = 4.
+ */
static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
pgd_t *hyp_pgd,
pgd_t *merged_hyp_pgd,
unsigned long hyp_idmap_start)
{
int idmap_idx;
+ u64 pgd_addr;
/*
* Use the first entry to access the HYP mappings. It is
@@ -289,7 +300,8 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
* extended idmap.
*/
VM_BUG_ON(pgd_val(merged_hyp_pgd[0]));
- merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE);
+ pgd_addr = __phys_to_pgd_val(__pa(hyp_pgd));
+ merged_hyp_pgd[0] = __pgd(pgd_addr | PMD_TYPE_TABLE);
/*
* Create another extended level entry that points to the boot HYP map,
@@ -299,7 +311,8 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
*/
idmap_idx = hyp_idmap_start >> VA_BITS;
VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx]));
- merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE);
+ pgd_addr = __phys_to_pgd_val(__pa(boot_hyp_pgd));
+ merged_hyp_pgd[idmap_idx] = __pgd(pgd_addr | PMD_TYPE_TABLE);
}
static inline unsigned int kvm_get_vmid_bits(void)
@@ -309,5 +322,45 @@ static inline unsigned int kvm_get_vmid_bits(void)
return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
}
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+#include <asm/mmu.h>
+
+static inline void *kvm_get_hyp_vector(void)
+{
+ struct bp_hardening_data *data = arm64_get_bp_hardening_data();
+ void *vect = kvm_ksym_ref(__kvm_hyp_vector);
+
+ if (data->fn) {
+ vect = __bp_harden_hyp_vecs_start +
+ data->hyp_vectors_slot * SZ_2K;
+
+ if (!has_vhe())
+ vect = lm_alias(vect);
+ }
+
+ return vect;
+}
+
+static inline int kvm_map_vectors(void)
+{
+ return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
+ kvm_ksym_ref(__bp_harden_hyp_vecs_end),
+ PAGE_HYP_EXEC);
+}
+
+#else
+static inline void *kvm_get_hyp_vector(void)
+{
+ return kvm_ksym_ref(__kvm_hyp_vector);
+}
+
+static inline int kvm_map_vectors(void)
+{
+ return 0;
+}
+#endif
+
+#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
+
#endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 0d34bf0a89c7..a050d4f3615d 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -17,6 +17,11 @@
#define __ASM_MMU_H
#define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */
+#define USER_ASID_BIT 48
+#define USER_ASID_FLAG (UL(1) << USER_ASID_BIT)
+#define TTBR_ASID_MASK (UL(0xffff) << 48)
+
+#ifndef __ASSEMBLY__
typedef struct {
atomic64_t id;
@@ -31,6 +36,49 @@ typedef struct {
*/
#define ASID(mm) ((mm)->context.id.counter & 0xffff)
+static inline bool arm64_kernel_unmapped_at_el0(void)
+{
+ return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
+ cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
+}
+
+typedef void (*bp_hardening_cb_t)(void);
+
+struct bp_hardening_data {
+ int hyp_vectors_slot;
+ bp_hardening_cb_t fn;
+};
+
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
+
+DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
+
+static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
+{
+ return this_cpu_ptr(&bp_hardening_data);
+}
+
+static inline void arm64_apply_bp_hardening(void)
+{
+ struct bp_hardening_data *d;
+
+ if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
+ return;
+
+ d = arm64_get_bp_hardening_data();
+ if (d->fn)
+ d->fn();
+}
+#else
+static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
+{
+ return NULL;
+}
+
+static inline void arm64_apply_bp_hardening(void) { }
+#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+
extern void paging_init(void);
extern void bootmem_init(void);
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
@@ -41,4 +89,5 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
extern void mark_linear_text_alias_ro(void);
+#endif /* !__ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 9d155fa9a507..8d3331985d2e 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -19,8 +19,6 @@
#ifndef __ASM_MMU_CONTEXT_H
#define __ASM_MMU_CONTEXT_H
-#define FALKOR_RESERVED_ASID 1
-
#ifndef __ASSEMBLY__
#include <linux/compiler.h>
@@ -51,23 +49,39 @@ static inline void contextidr_thread_switch(struct task_struct *next)
*/
static inline void cpu_set_reserved_ttbr0(void)
{
- unsigned long ttbr = __pa_symbol(empty_zero_page);
+ unsigned long ttbr = phys_to_ttbr(__pa_symbol(empty_zero_page));
write_sysreg(ttbr, ttbr0_el1);
isb();
}
+static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
+{
+ BUG_ON(pgd == swapper_pg_dir);
+ cpu_set_reserved_ttbr0();
+ cpu_do_switch_mm(virt_to_phys(pgd),mm);
+}
+
/*
* TCR.T0SZ value to use when the ID map is active. Usually equals
* TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
* physical memory, in which case it will be smaller.
*/
extern u64 idmap_t0sz;
+extern u64 idmap_ptrs_per_pgd;
static inline bool __cpu_uses_extended_idmap(void)
{
- return (!IS_ENABLED(CONFIG_ARM64_VA_BITS_48) &&
- unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS)));
+ return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
+}
+
+/*
+ * True if the extended ID map requires an extra level of translation table
+ * to be configured.
+ */
+static inline bool __cpu_uses_extended_idmap_level(void)
+{
+ return ARM64_HW_PGTABLE_LEVELS(64 - idmap_t0sz) > CONFIG_PGTABLE_LEVELS;
}
/*
@@ -170,7 +184,7 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
else
ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
- task_thread_info(tsk)->ttbr0 = ttbr;
+ WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
}
#else
static inline void update_saved_ttbr0(struct task_struct *tsk,
@@ -225,6 +239,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
#define activate_mm(prev,next) switch_mm(prev, next, current)
void verify_cpu_asid_bits(void);
+void post_ttbr_update_workaround(void);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 3bd498e4de4c..43393208229e 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -16,11 +16,15 @@
#ifndef __ASM_PERCPU_H
#define __ASM_PERCPU_H
+#include <asm/alternative.h>
#include <asm/stack_pointer.h>
static inline void set_my_cpu_offset(unsigned long off)
{
- asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
+ asm volatile(ALTERNATIVE("msr tpidr_el1, %0",
+ "msr tpidr_el2, %0",
+ ARM64_HAS_VIRT_HOST_EXTN)
+ :: "r" (off) : "memory");
}
static inline unsigned long __my_cpu_offset(void)
@@ -31,7 +35,10 @@ static inline unsigned long __my_cpu_offset(void)
* We want to allow caching the value, so avoid using volatile and
* instead use a fake stack read to hazard against barrier().
*/
- asm("mrs %0, tpidr_el1" : "=r" (off) :
+ asm(ALTERNATIVE("mrs %0, tpidr_el1",
+ "mrs %0, tpidr_el2",
+ ARM64_HAS_VIRT_HOST_EXTN)
+ : "=r" (off) :
"Q" (*(const unsigned long *)current_stack_pointer));
return off;
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index 5ca6a573a701..e9d9f1b006ef 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -44,7 +44,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot)
{
- set_pud(pud, __pud(pmd | prot));
+ set_pud(pud, __pud(__phys_to_pud_val(pmd) | prot));
}
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
@@ -73,7 +73,7 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot)
{
- set_pgd(pgdp, __pgd(pud | prot));
+ set_pgd(pgdp, __pgd(__phys_to_pgd_val(pud) | prot));
}
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
@@ -129,7 +129,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
pmdval_t prot)
{
- set_pmd(pmdp, __pmd(pte | prot));
+ set_pmd(pmdp, __pmd(__phys_to_pmd_val(pte) | prot));
}
/*
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index eb0c2bd90de9..f42836da8723 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -16,6 +16,8 @@
#ifndef __ASM_PGTABLE_HWDEF_H
#define __ASM_PGTABLE_HWDEF_H
+#include <asm/memory.h>
+
/*
* Number of page-table levels required to address 'va_bits' wide
* address, without section mapping. We resolve the top (va_bits - PAGE_SHIFT)
@@ -116,9 +118,9 @@
* Level 1 descriptor (PUD).
*/
#define PUD_TYPE_TABLE (_AT(pudval_t, 3) << 0)
-#define PUD_TABLE_BIT (_AT(pgdval_t, 1) << 1)
-#define PUD_TYPE_MASK (_AT(pgdval_t, 3) << 0)
-#define PUD_TYPE_SECT (_AT(pgdval_t, 1) << 0)
+#define PUD_TABLE_BIT (_AT(pudval_t, 1) << 1)
+#define PUD_TYPE_MASK (_AT(pudval_t, 3) << 0)
+#define PUD_TYPE_SECT (_AT(pudval_t, 1) << 0)
/*
* Level 2 descriptor (PMD).
@@ -166,6 +168,14 @@
#define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */
#define PTE_HYP_XN (_AT(pteval_t, 1) << 54) /* HYP XN */
+#define PTE_ADDR_LOW (((_AT(pteval_t, 1) << (48 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
+#ifdef CONFIG_ARM64_PA_BITS_52
+#define PTE_ADDR_HIGH (_AT(pteval_t, 0xf) << 12)
+#define PTE_ADDR_MASK (PTE_ADDR_LOW | PTE_ADDR_HIGH)
+#else
+#define PTE_ADDR_MASK PTE_ADDR_LOW
+#endif
+
/*
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
*/
@@ -196,7 +206,7 @@
/*
* Highest possible physical address supported.
*/
-#define PHYS_MASK_SHIFT (48)
+#define PHYS_MASK_SHIFT (CONFIG_ARM64_PA_BITS)
#define PHYS_MASK ((UL(1) << PHYS_MASK_SHIFT) - 1)
/*
@@ -272,9 +282,23 @@
#define TCR_TG1_4K (UL(2) << TCR_TG1_SHIFT)
#define TCR_TG1_64K (UL(3) << TCR_TG1_SHIFT)
+#define TCR_IPS_SHIFT 32
+#define TCR_IPS_MASK (UL(7) << TCR_IPS_SHIFT)
+#define TCR_A1 (UL(1) << 22)
#define TCR_ASID16 (UL(1) << 36)
#define TCR_TBI0 (UL(1) << 37)
#define TCR_HA (UL(1) << 39)
#define TCR_HD (UL(1) << 40)
+/*
+ * TTBR.
+ */
+#ifdef CONFIG_ARM64_PA_BITS_52
+/*
+ * This should be GENMASK_ULL(47, 2).
+ * TTBR_ELx[1] is RES0 in this configuration.
+ */
+#define TTBR_BADDR_MASK_52 (((UL(1) << 46) - 1) << 2)
+#endif
+
#endif
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 0a5635fb0ef9..22a926825e3f 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -34,8 +34,16 @@
#include <asm/pgtable-types.h>
-#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
-#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
+#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
+#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+#define PROT_DEFAULT (_PROT_DEFAULT | PTE_NG)
+#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_SECT_NG)
+#else
+#define PROT_DEFAULT _PROT_DEFAULT
+#define PROT_SECT_DEFAULT _PROT_SECT_DEFAULT
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
@@ -48,6 +56,7 @@
#define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
#define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
+#define _HYP_PAGE_DEFAULT (_PAGE_DEFAULT & ~PTE_NG)
#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
@@ -55,15 +64,15 @@
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
-#define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
-#define PAGE_HYP_EXEC __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
-#define PAGE_HYP_RO __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
+#define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
+#define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
+#define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
-#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN)
+#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index bdcc7f1c9d06..89167c43ebb5 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -59,9 +59,22 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
-#define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
+/*
+ * Macros to convert between a physical address and its placement in a
+ * page table entry, taking care of 52-bit addresses.
+ */
+#ifdef CONFIG_ARM64_PA_BITS_52
+#define __pte_to_phys(pte) \
+ ((pte_val(pte) & PTE_ADDR_LOW) | ((pte_val(pte) & PTE_ADDR_HIGH) << 36))
+#define __phys_to_pte_val(phys) (((phys) | ((phys) >> 36)) & PTE_ADDR_MASK)
+#else
+#define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_MASK)
+#define __phys_to_pte_val(phys) (phys)
+#endif
-#define pfn_pte(pfn,prot) (__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+#define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT)
+#define pfn_pte(pfn,prot) \
+ __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define pte_none(pte) (!pte_val(pte))
#define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
@@ -292,6 +305,11 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b)
#define __HAVE_ARCH_PTE_SPECIAL
+static inline pte_t pgd_pte(pgd_t pgd)
+{
+ return __pte(pgd_val(pgd));
+}
+
static inline pte_t pud_pte(pud_t pud)
{
return __pte(pud_val(pud));
@@ -357,15 +375,24 @@ static inline int pmd_protnone(pmd_t pmd)
#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
-#define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
-#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+#define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd))
+#define __phys_to_pmd_val(phys) __phys_to_pte_val(phys)
+#define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
+#define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
#define pud_write(pud) pte_write(pud_pte(pud))
-#define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
+
+#define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud))
+#define __phys_to_pud_val(phys) __phys_to_pte_val(phys)
+#define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
+#define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
+#define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd))
+#define __phys_to_pgd_val(phys) __phys_to_pte_val(phys)
+
#define __pgprot_modify(prot,mask,bits) \
__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
@@ -416,7 +443,7 @@ static inline void pmd_clear(pmd_t *pmdp)
static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
{
- return pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK;
+ return __pmd_to_phys(pmd);
}
/* Find an entry in the third-level page table. */
@@ -434,7 +461,7 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
#define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr))
#define pte_clear_fixmap() clear_fixmap(FIX_PTE)
-#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
+#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(__pmd_to_phys(pmd)))
/* use ONLY for statically allocated translation tables */
#define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
@@ -467,7 +494,7 @@ static inline void pud_clear(pud_t *pudp)
static inline phys_addr_t pud_page_paddr(pud_t pud)
{
- return pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK;
+ return __pud_to_phys(pud);
}
/* Find an entry in the second-level page table. */
@@ -480,7 +507,7 @@ static inline phys_addr_t pud_page_paddr(pud_t pud)
#define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr))
#define pmd_clear_fixmap() clear_fixmap(FIX_PMD)
-#define pud_page(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
+#define pud_page(pud) pfn_to_page(__phys_to_pfn(__pud_to_phys(pud)))
/* use ONLY for statically allocated translation tables */
#define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
@@ -519,7 +546,7 @@ static inline void pgd_clear(pgd_t *pgdp)
static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
{
- return pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK;
+ return __pgd_to_phys(pgd);
}
/* Find an entry in the frst-level page table. */
@@ -532,7 +559,7 @@ static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
#define pud_set_fixmap_offset(pgd, addr) pud_set_fixmap(pud_offset_phys(pgd, addr))
#define pud_clear_fixmap() clear_fixmap(FIX_PUD)
-#define pgd_page(pgd) pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
+#define pgd_page(pgd) pfn_to_page(__phys_to_pfn(__pgd_to_phys(pgd)))
/* use ONLY for statically allocated translation tables */
#define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
@@ -682,7 +709,9 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
#endif
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+extern pgd_t swapper_pg_end[];
extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
+extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
/*
* Encode and decode a swap entry:
@@ -736,6 +765,12 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#define kc_vaddr_to_offset(v) ((v) & ~VA_START)
#define kc_offset_to_vaddr(o) ((o) | VA_START)
+#ifdef CONFIG_ARM64_PA_BITS_52
+#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
+#else
+#define phys_to_ttbr(addr) (addr)
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_PGTABLE_H */
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 14ad6e4e87d1..16cef2e8449e 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -35,12 +35,6 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
#include <asm/memory.h>
-#define cpu_switch_mm(pgd,mm) \
-do { \
- BUG_ON(pgd == swapper_pg_dir); \
- cpu_do_switch_mm(virt_to_phys(pgd),mm); \
-} while (0)
-
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* __ASM_PROCFNS_H */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 023cacb946c3..cee4ae25a5d1 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -216,6 +216,7 @@ static inline void spin_lock_prefetch(const void *ptr)
int cpu_enable_pan(void *__unused);
int cpu_enable_cache_maint_trap(void *__unused);
+int cpu_clear_disr(void *__unused);
/* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */
#define SVE_SET_VL(arg) sve_set_current_vl(arg)
diff --git a/arch/arm64/include/asm/sdei.h b/arch/arm64/include/asm/sdei.h
new file mode 100644
index 000000000000..e073e6886685
--- /dev/null
+++ b/arch/arm64/include/asm/sdei.h
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2017 Arm Ltd.
+#ifndef __ASM_SDEI_H
+#define __ASM_SDEI_H
+
+/* Values for sdei_exit_mode */
+#define SDEI_EXIT_HVC 0
+#define SDEI_EXIT_SMC 1
+
+#define SDEI_STACK_SIZE IRQ_STACK_SIZE
+
+#ifndef __ASSEMBLY__
+
+#include <linux/linkage.h>
+#include <linux/preempt.h>
+#include <linux/types.h>
+
+#include <asm/virt.h>
+
+extern unsigned long sdei_exit_mode;
+
+/* Software Delegated Exception entry point from firmware*/
+asmlinkage void __sdei_asm_handler(unsigned long event_num, unsigned long arg,
+ unsigned long pc, unsigned long pstate);
+
+/* and its CONFIG_UNMAP_KERNEL_AT_EL0 trampoline */
+asmlinkage void __sdei_asm_entry_trampoline(unsigned long event_num,
+ unsigned long arg,
+ unsigned long pc,
+ unsigned long pstate);
+
+/*
+ * The above entry point does the minimum to call C code. This function does
+ * anything else, before calling the driver.
+ */
+struct sdei_registered_event;
+asmlinkage unsigned long __sdei_handler(struct pt_regs *regs,
+ struct sdei_registered_event *arg);
+
+unsigned long sdei_arch_get_entry_point(int conduit);
+#define sdei_arch_get_entry_point(x) sdei_arch_get_entry_point(x)
+
+bool _on_sdei_stack(unsigned long sp);
+static inline bool on_sdei_stack(unsigned long sp)
+{
+ if (!IS_ENABLED(CONFIG_VMAP_STACK))
+ return false;
+ if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
+ return false;
+ if (in_nmi())
+ return _on_sdei_stack(sp);
+
+ return false;
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_SDEI_H */
diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h
index 941267caa39c..caab039d6305 100644
--- a/arch/arm64/include/asm/sections.h
+++ b/arch/arm64/include/asm/sections.h
@@ -28,5 +28,6 @@ extern char __initdata_begin[], __initdata_end[];
extern char __inittext_begin[], __inittext_end[];
extern char __irqentry_text_start[], __irqentry_text_end[];
extern char __mmuoff_data_start[], __mmuoff_data_end[];
+extern char __entry_tramp_text_start[], __entry_tramp_text_end[];
#endif /* __ASM_SECTIONS_H */
diff --git a/arch/arm64/include/asm/sparsemem.h b/arch/arm64/include/asm/sparsemem.h
index 74a9d301819f..b299929fe56c 100644
--- a/arch/arm64/include/asm/sparsemem.h
+++ b/arch/arm64/include/asm/sparsemem.h
@@ -17,7 +17,7 @@
#define __ASM_SPARSEMEM_H
#ifdef CONFIG_SPARSEMEM
-#define MAX_PHYSMEM_BITS 48
+#define MAX_PHYSMEM_BITS CONFIG_ARM64_PA_BITS
#define SECTION_SIZE_BITS 30
#endif
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index 6ad30776e984..472ef944e932 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -22,6 +22,7 @@
#include <asm/memory.h>
#include <asm/ptrace.h>
+#include <asm/sdei.h>
struct stackframe {
unsigned long fp;
@@ -85,6 +86,8 @@ static inline bool on_accessible_stack(struct task_struct *tsk, unsigned long sp
return true;
if (on_overflow_stack(sp))
return true;
+ if (on_sdei_stack(sp))
+ return true;
return false;
}
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 08cc88574659..0e1960c59197 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -20,6 +20,7 @@
#ifndef __ASM_SYSREG_H
#define __ASM_SYSREG_H
+#include <asm/compiler.h>
#include <linux/stringify.h>
/*
@@ -175,6 +176,16 @@
#define SYS_AFSR0_EL1 sys_reg(3, 0, 5, 1, 0)
#define SYS_AFSR1_EL1 sys_reg(3, 0, 5, 1, 1)
#define SYS_ESR_EL1 sys_reg(3, 0, 5, 2, 0)
+
+#define SYS_ERRIDR_EL1 sys_reg(3, 0, 5, 3, 0)
+#define SYS_ERRSELR_EL1 sys_reg(3, 0, 5, 3, 1)
+#define SYS_ERXFR_EL1 sys_reg(3, 0, 5, 4, 0)
+#define SYS_ERXCTLR_EL1 sys_reg(3, 0, 5, 4, 1)
+#define SYS_ERXSTATUS_EL1 sys_reg(3, 0, 5, 4, 2)
+#define SYS_ERXADDR_EL1 sys_reg(3, 0, 5, 4, 3)
+#define SYS_ERXMISC0_EL1 sys_reg(3, 0, 5, 5, 0)
+#define SYS_ERXMISC1_EL1 sys_reg(3, 0, 5, 5, 1)
+
#define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0)
#define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0)
@@ -278,6 +289,7 @@
#define SYS_AMAIR_EL1 sys_reg(3, 0, 10, 3, 0)
#define SYS_VBAR_EL1 sys_reg(3, 0, 12, 0, 0)
+#define SYS_DISR_EL1 sys_reg(3, 0, 12, 1, 1)
#define SYS_ICC_IAR0_EL1 sys_reg(3, 0, 12, 8, 0)
#define SYS_ICC_EOIR0_EL1 sys_reg(3, 0, 12, 8, 1)
@@ -353,8 +365,10 @@
#define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0)
#define SYS_IFSR32_EL2 sys_reg(3, 4, 5, 0, 1)
+#define SYS_VSESR_EL2 sys_reg(3, 4, 5, 2, 3)
#define SYS_FPEXC32_EL2 sys_reg(3, 4, 5, 3, 0)
+#define SYS_VDISR_EL2 sys_reg(3, 4, 12, 1, 1)
#define __SYS__AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x)
#define SYS_ICH_AP0R0_EL2 __SYS__AP0Rx_EL2(0)
#define SYS_ICH_AP0R1_EL2 __SYS__AP0Rx_EL2(1)
@@ -398,27 +412,85 @@
/* Common SCTLR_ELx flags. */
#define SCTLR_ELx_EE (1 << 25)
+#define SCTLR_ELx_IESB (1 << 21)
+#define SCTLR_ELx_WXN (1 << 19)
#define SCTLR_ELx_I (1 << 12)
#define SCTLR_ELx_SA (1 << 3)
#define SCTLR_ELx_C (1 << 2)
#define SCTLR_ELx_A (1 << 1)
#define SCTLR_ELx_M 1
+#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
+ SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_IESB)
+
+/* SCTLR_EL2 specific flags. */
#define SCTLR_EL2_RES1 ((1 << 4) | (1 << 5) | (1 << 11) | (1 << 16) | \
(1 << 18) | (1 << 22) | (1 << 23) | (1 << 28) | \
(1 << 29))
+#define SCTLR_EL2_RES0 ((1 << 6) | (1 << 7) | (1 << 8) | (1 << 9) | \
+ (1 << 10) | (1 << 13) | (1 << 14) | (1 << 15) | \
+ (1 << 17) | (1 << 20) | (1 << 24) | (1 << 26) | \
+ (1 << 27) | (1 << 30) | (1 << 31))
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define ENDIAN_SET_EL2 SCTLR_ELx_EE
+#define ENDIAN_CLEAR_EL2 0
+#else
+#define ENDIAN_SET_EL2 0
+#define ENDIAN_CLEAR_EL2 SCTLR_ELx_EE
+#endif
+
+/* SCTLR_EL2 value used for the hyp-stub */
+#define SCTLR_EL2_SET (SCTLR_ELx_IESB | ENDIAN_SET_EL2 | SCTLR_EL2_RES1)
+#define SCTLR_EL2_CLEAR (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
+ SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \
+ ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
+
+/* Check all the bits are accounted for */
+#define SCTLR_EL2_BUILD_BUG_ON_MISSING_BITS BUILD_BUG_ON((SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != ~0)
-#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
- SCTLR_ELx_SA | SCTLR_ELx_I)
/* SCTLR_EL1 specific flags. */
#define SCTLR_EL1_UCI (1 << 26)
+#define SCTLR_EL1_E0E (1 << 24)
#define SCTLR_EL1_SPAN (1 << 23)
+#define SCTLR_EL1_NTWE (1 << 18)
+#define SCTLR_EL1_NTWI (1 << 16)
#define SCTLR_EL1_UCT (1 << 15)
+#define SCTLR_EL1_DZE (1 << 14)
+#define SCTLR_EL1_UMA (1 << 9)
#define SCTLR_EL1_SED (1 << 8)
+#define SCTLR_EL1_ITD (1 << 7)
#define SCTLR_EL1_CP15BEN (1 << 5)
+#define SCTLR_EL1_SA0 (1 << 4)
+
+#define SCTLR_EL1_RES1 ((1 << 11) | (1 << 20) | (1 << 22) | (1 << 28) | \
+ (1 << 29))
+#define SCTLR_EL1_RES0 ((1 << 6) | (1 << 10) | (1 << 13) | (1 << 17) | \
+ (1 << 27) | (1 << 30) | (1 << 31))
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE)
+#define ENDIAN_CLEAR_EL1 0
+#else
+#define ENDIAN_SET_EL1 0
+#define ENDIAN_CLEAR_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE)
+#endif
+
+#define SCTLR_EL1_SET (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA |\
+ SCTLR_EL1_SA0 | SCTLR_EL1_SED | SCTLR_ELx_I |\
+ SCTLR_EL1_DZE | SCTLR_EL1_UCT | SCTLR_EL1_NTWI |\
+ SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\
+ ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1)
+#define SCTLR_EL1_CLEAR (SCTLR_ELx_A | SCTLR_EL1_CP15BEN | SCTLR_EL1_ITD |\
+ SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\
+ SCTLR_EL1_RES0)
+
+/* Check all the bits are accounted for */
+#define SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS BUILD_BUG_ON((SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != ~0)
/* id_aa64isar0 */
+#define ID_AA64ISAR0_FHM_SHIFT 48
#define ID_AA64ISAR0_DP_SHIFT 44
#define ID_AA64ISAR0_SM4_SHIFT 40
#define ID_AA64ISAR0_SM3_SHIFT 36
@@ -437,7 +509,10 @@
#define ID_AA64ISAR1_DPB_SHIFT 0
/* id_aa64pfr0 */
+#define ID_AA64PFR0_CSV3_SHIFT 60
+#define ID_AA64PFR0_CSV2_SHIFT 56
#define ID_AA64PFR0_SVE_SHIFT 32
+#define ID_AA64PFR0_RAS_SHIFT 28
#define ID_AA64PFR0_GIC_SHIFT 24
#define ID_AA64PFR0_ASIMD_SHIFT 20
#define ID_AA64PFR0_FP_SHIFT 16
@@ -447,6 +522,7 @@
#define ID_AA64PFR0_EL0_SHIFT 0
#define ID_AA64PFR0_SVE 0x1
+#define ID_AA64PFR0_RAS_V1 0x1
#define ID_AA64PFR0_FP_NI 0xf
#define ID_AA64PFR0_FP_SUPPORTED 0x0
#define ID_AA64PFR0_ASIMD_NI 0xf
@@ -471,6 +547,14 @@
#define ID_AA64MMFR0_TGRAN64_SUPPORTED 0x0
#define ID_AA64MMFR0_TGRAN16_NI 0x0
#define ID_AA64MMFR0_TGRAN16_SUPPORTED 0x1
+#define ID_AA64MMFR0_PARANGE_48 0x5
+#define ID_AA64MMFR0_PARANGE_52 0x6
+
+#ifdef CONFIG_ARM64_PA_BITS_52
+#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_52
+#else
+#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_48
+#endif
/* id_aa64mmfr1 */
#define ID_AA64MMFR1_PAN_SHIFT 20
@@ -582,6 +666,7 @@
#else
+#include <linux/build_bug.h>
#include <linux/types.h>
asm(
@@ -638,6 +723,9 @@ static inline void config_sctlr_el1(u32 clear, u32 set)
{
u32 val;
+ SCTLR_EL2_BUILD_BUG_ON_MISSING_BITS;
+ SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS;
+
val = read_sysreg(sctlr_el1);
val &= ~clear;
val |= set;
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index eb431286bacd..740aa03c5f0d 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -51,8 +51,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_stack (init_thread_union.stack)
-
#define thread_saved_pc(tsk) \
((unsigned long)(tsk->thread.cpu_context.pc))
#define thread_saved_sp(tsk) \
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index af1c76981911..9e82dd79c7db 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -23,6 +23,7 @@
#include <linux/sched.h>
#include <asm/cputype.h>
+#include <asm/mmu.h>
/*
* Raw TLBI operations.
@@ -54,6 +55,11 @@
#define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0)
+#define __tlbi_user(op, arg) do { \
+ if (arm64_kernel_unmapped_at_el0()) \
+ __tlbi(op, (arg) | USER_ASID_FLAG); \
+} while (0)
+
/*
* TLB Management
* ==============
@@ -115,6 +121,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
dsb(ishst);
__tlbi(aside1is, asid);
+ __tlbi_user(aside1is, asid);
dsb(ish);
}
@@ -125,6 +132,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
dsb(ishst);
__tlbi(vale1is, addr);
+ __tlbi_user(vale1is, addr);
dsb(ish);
}
@@ -151,10 +159,13 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
dsb(ishst);
for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
- if (last_level)
+ if (last_level) {
__tlbi(vale1is, addr);
- else
+ __tlbi_user(vale1is, addr);
+ } else {
__tlbi(vae1is, addr);
+ __tlbi_user(vae1is, addr);
+ }
}
dsb(ish);
}
@@ -194,6 +205,7 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm,
unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
__tlbi(vae1is, addr);
+ __tlbi_user(vae1is, addr);
dsb(ish);
}
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index 1696f9de9359..178e338d2889 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -19,6 +19,7 @@
#define __ASM_TRAP_H
#include <linux/list.h>
+#include <asm/esr.h>
#include <asm/sections.h>
struct pt_regs;
@@ -66,4 +67,57 @@ static inline int in_entry_text(unsigned long ptr)
return ptr >= (unsigned long)&__entry_text_start &&
ptr < (unsigned long)&__entry_text_end;
}
+
+/*
+ * CPUs with the RAS extensions have an Implementation-Defined-Syndrome bit
+ * to indicate whether this ESR has a RAS encoding. CPUs without this feature
+ * have a ISS-Valid bit in the same position.
+ * If this bit is set, we know its not a RAS SError.
+ * If its clear, we need to know if the CPU supports RAS. Uncategorized RAS
+ * errors share the same encoding as an all-zeros encoding from a CPU that
+ * doesn't support RAS.
+ */
+static inline bool arm64_is_ras_serror(u32 esr)
+{
+ WARN_ON(preemptible());
+
+ if (esr & ESR_ELx_IDS)
+ return false;
+
+ if (this_cpu_has_cap(ARM64_HAS_RAS_EXTN))
+ return true;
+ else
+ return false;
+}
+
+/*
+ * Return the AET bits from a RAS SError's ESR.
+ *
+ * It is implementation defined whether Uncategorized errors are containable.
+ * We treat them as Uncontainable.
+ * Non-RAS SError's are reported as Uncontained/Uncategorized.
+ */
+static inline u32 arm64_ras_serror_get_severity(u32 esr)
+{
+ u32 aet = esr & ESR_ELx_AET;
+
+ if (!arm64_is_ras_serror(esr)) {
+ /* Not a RAS error, we can't interpret the ESR. */
+ return ESR_ELx_AET_UC;
+ }
+
+ /*
+ * AET is RES0 if 'the value returned in the DFSC field is not
+ * [ESR_ELx_FSC_SERROR]'
+ */
+ if ((esr & ESR_ELx_FSC) != ESR_ELx_FSC_SERROR) {
+ /* No severity information : Uncategorized */
+ return ESR_ELx_AET_UC;
+ }
+
+ return aet;
+}
+
+bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr);
+void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr);
#endif
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index fc0f9eb66039..59fda5292936 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -105,17 +105,23 @@ static inline void set_fs(mm_segment_t fs)
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
static inline void __uaccess_ttbr0_disable(void)
{
- unsigned long ttbr;
+ unsigned long flags, ttbr;
- /* reserved_ttbr0 placed at the end of swapper_pg_dir */
- ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE;
- write_sysreg(ttbr, ttbr0_el1);
+ local_irq_save(flags);
+ ttbr = read_sysreg(ttbr1_el1);
+ ttbr &= ~TTBR_ASID_MASK;
+ /* reserved_ttbr0 placed before swapper_pg_dir */
+ write_sysreg(ttbr - RESERVED_TTBR0_SIZE, ttbr0_el1);
+ isb();
+ /* Set reserved ASID */
+ write_sysreg(ttbr, ttbr1_el1);
isb();
+ local_irq_restore(flags);
}
static inline void __uaccess_ttbr0_enable(void)
{
- unsigned long flags;
+ unsigned long flags, ttbr0, ttbr1;
/*
* Disable interrupts to avoid preemption between reading the 'ttbr0'
@@ -123,7 +129,17 @@ static inline void __uaccess_ttbr0_enable(void)
* roll-over and an update of 'ttbr0'.
*/
local_irq_save(flags);
- write_sysreg(current_thread_info()->ttbr0, ttbr0_el1);
+ ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
+
+ /* Restore active ASID */
+ ttbr1 = read_sysreg(ttbr1_el1);
+ ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */
+ ttbr1 |= ttbr0 & TTBR_ASID_MASK;
+ write_sysreg(ttbr1, ttbr1_el1);
+ isb();
+
+ /* Restore user page table */
+ write_sysreg(ttbr0, ttbr0_el1);
isb();
local_irq_restore(flags);
}
@@ -155,6 +171,18 @@ static inline bool uaccess_ttbr0_enable(void)
}
#endif
+static inline void __uaccess_disable_hw_pan(void)
+{
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,
+ CONFIG_ARM64_PAN));
+}
+
+static inline void __uaccess_enable_hw_pan(void)
+{
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
+ CONFIG_ARM64_PAN));
+}
+
#define __uaccess_disable(alt) \
do { \
if (!uaccess_ttbr0_disable()) \
diff --git a/arch/arm64/include/asm/vmap_stack.h b/arch/arm64/include/asm/vmap_stack.h
new file mode 100644
index 000000000000..0b5ec6e08c10
--- /dev/null
+++ b/arch/arm64/include/asm/vmap_stack.h
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2017 Arm Ltd.
+#ifndef __ASM_VMAP_STACK_H
+#define __ASM_VMAP_STACK_H
+
+#include <linux/bug.h>
+#include <linux/gfp.h>
+#include <linux/kconfig.h>
+#include <linux/vmalloc.h>
+#include <asm/memory.h>
+#include <asm/pgtable.h>
+#include <asm/thread_info.h>
+
+/*
+ * To ensure that VMAP'd stack overflow detection works correctly, all VMAP'd
+ * stacks need to have the same alignment.
+ */
+static inline unsigned long *arch_alloc_vmap_stack(size_t stack_size, int node)
+{
+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_VMAP_STACK));
+
+ return __vmalloc_node_range(stack_size, THREAD_ALIGN,
+ VMALLOC_START, VMALLOC_END,
+ THREADINFO_GFP, PAGE_KERNEL, 0, node,
+ __builtin_return_address(0));
+}
+
+#endif /* __ASM_VMAP_STACK_H */
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index cda76fa8b9b2..f018c3deea3b 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -43,5 +43,6 @@
#define HWCAP_ASIMDDP (1 << 20)
#define HWCAP_SHA512 (1 << 21)
#define HWCAP_SVE (1 << 22)
+#define HWCAP_ASIMDFHM (1 << 23)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/include/uapi/asm/siginfo.h b/arch/arm64/include/uapi/asm/siginfo.h
index 574d12f86039..9b4d91277742 100644
--- a/arch/arm64/include/uapi/asm/siginfo.h
+++ b/arch/arm64/include/uapi/asm/siginfo.h
@@ -21,4 +21,25 @@
#include <asm-generic/siginfo.h>
+/*
+ * SIGFPE si_codes
+ */
+#ifdef __KERNEL__
+#define FPE_FIXME 0 /* Broken dup of SI_USER */
+#endif /* __KERNEL__ */
+
+/*
+ * SIGBUS si_codes
+ */
+#ifdef __KERNEL__
+#define BUS_FIXME 0 /* Broken dup of SI_USER */
+#endif /* __KERNEL__ */
+
+/*
+ * SIGTRAP si_codes
+ */
+#ifdef __KERNEL__
+#define TRAP_FIXME 0 /* Broken dup of SI_USER */
+#endif /* __KERNEL__ */
+
#endif
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 067baace74a0..b87541360f43 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -52,6 +52,11 @@ arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \
arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o
arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+arm64-obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
+
+ifeq ($(CONFIG_KVM),y)
+arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR) += bpi.o
+endif
obj-y += $(arm64-obj-y) vdso/ probes/
obj-m += $(arm64-obj-m)
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index b3162715ed78..252396a96c78 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -117,7 +117,7 @@ bool __init acpi_psci_present(void)
}
/* Whether HVC must be used instead of SMC as the PSCI conduit */
-bool __init acpi_psci_use_hvc(void)
+bool acpi_psci_use_hvc(void)
{
return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_USE_HVC;
}
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index 6dd0a3a3e5c9..414288a558c8 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -32,6 +32,8 @@
#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
+int alternatives_applied;
+
struct alt_region {
struct alt_instr *begin;
struct alt_instr *end;
@@ -143,7 +145,6 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias)
*/
static int __apply_alternatives_multi_stop(void *unused)
{
- static int patched = 0;
struct alt_region region = {
.begin = (struct alt_instr *)__alt_instructions,
.end = (struct alt_instr *)__alt_instructions_end,
@@ -151,14 +152,14 @@ static int __apply_alternatives_multi_stop(void *unused)
/* We always have a CPU 0 at this point (__init) */
if (smp_processor_id()) {
- while (!READ_ONCE(patched))
+ while (!READ_ONCE(alternatives_applied))
cpu_relax();
isb();
} else {
- BUG_ON(patched);
+ BUG_ON(alternatives_applied);
__apply_alternatives(&region, true);
/* Barriers provided by the cache flushing */
- WRITE_ONCE(patched, 1);
+ WRITE_ONCE(alternatives_applied, 1);
}
return 0;
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 71bf088f1e4b..1303e04110cd 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -18,12 +18,14 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/arm_sdei.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/kvm_host.h>
#include <linux/suspend.h>
#include <asm/cpufeature.h>
+#include <asm/fixmap.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/smp_plat.h>
@@ -130,6 +132,7 @@ int main(void)
BLANK();
#ifdef CONFIG_KVM_ARM_HOST
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
+ DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1));
DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs));
@@ -148,11 +151,18 @@ int main(void)
DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2));
DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id));
DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state));
-
BLANK();
DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address));
DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address));
DEFINE(HIBERN_PBE_NEXT, offsetof(struct pbe, next));
DEFINE(ARM64_FTR_SYSVAL, offsetof(struct arm64_ftr_reg, sys_val));
+ BLANK();
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ DEFINE(TRAMP_VALIAS, TRAMP_VALIAS);
+#endif
+#ifdef CONFIG_ARM_SDE_INTERFACE
+ DEFINE(SDEI_EVENT_INTREGS, offsetof(struct sdei_registered_event, interrupted_regs));
+ DEFINE(SDEI_EVENT_PRIORITY, offsetof(struct sdei_registered_event, priority));
+#endif
return 0;
}
diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S
new file mode 100644
index 000000000000..76225c2611ea
--- /dev/null
+++ b/arch/arm64/kernel/bpi.S
@@ -0,0 +1,87 @@
+/*
+ * Contains CPU specific branch predictor invalidation sequences
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+
+.macro ventry target
+ .rept 31
+ nop
+ .endr
+ b \target
+.endm
+
+.macro vectors target
+ ventry \target + 0x000
+ ventry \target + 0x080
+ ventry \target + 0x100
+ ventry \target + 0x180
+
+ ventry \target + 0x200
+ ventry \target + 0x280
+ ventry \target + 0x300
+ ventry \target + 0x380
+
+ ventry \target + 0x400
+ ventry \target + 0x480
+ ventry \target + 0x500
+ ventry \target + 0x580
+
+ ventry \target + 0x600
+ ventry \target + 0x680
+ ventry \target + 0x700
+ ventry \target + 0x780
+.endm
+
+ .align 11
+ENTRY(__bp_harden_hyp_vecs_start)
+ .rept 4
+ vectors __kvm_hyp_vector
+ .endr
+ENTRY(__bp_harden_hyp_vecs_end)
+ENTRY(__psci_hyp_bp_inval_start)
+ sub sp, sp, #(8 * 18)
+ stp x16, x17, [sp, #(16 * 0)]
+ stp x14, x15, [sp, #(16 * 1)]
+ stp x12, x13, [sp, #(16 * 2)]
+ stp x10, x11, [sp, #(16 * 3)]
+ stp x8, x9, [sp, #(16 * 4)]
+ stp x6, x7, [sp, #(16 * 5)]
+ stp x4, x5, [sp, #(16 * 6)]
+ stp x2, x3, [sp, #(16 * 7)]
+ stp x0, x1, [sp, #(16 * 8)]
+ mov x0, #0x84000000
+ smc #0
+ ldp x16, x17, [sp, #(16 * 0)]
+ ldp x14, x15, [sp, #(16 * 1)]
+ ldp x12, x13, [sp, #(16 * 2)]
+ ldp x10, x11, [sp, #(16 * 3)]
+ ldp x8, x9, [sp, #(16 * 4)]
+ ldp x6, x7, [sp, #(16 * 5)]
+ ldp x4, x5, [sp, #(16 * 6)]
+ ldp x2, x3, [sp, #(16 * 7)]
+ ldp x0, x1, [sp, #(16 * 8)]
+ add sp, sp, #(8 * 18)
+ENTRY(__psci_hyp_bp_inval_end)
+
+ENTRY(__qcom_hyp_sanitize_link_stack_start)
+ stp x29, x30, [sp, #-16]!
+ .rept 16
+ bl . + 4
+ .endr
+ ldp x29, x30, [sp], #16
+ENTRY(__qcom_hyp_sanitize_link_stack_end)
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 0e27f86ee709..ed6881882231 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -30,6 +30,20 @@ is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
entry->midr_range_max);
}
+static bool __maybe_unused
+is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ u32 model;
+
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
+ model = read_cpuid_id();
+ model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
+ MIDR_ARCHITECTURE_MASK;
+
+ return model == entry->midr_model;
+}
+
static bool
has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
int scope)
@@ -46,6 +60,127 @@ static int cpu_enable_trap_ctr_access(void *__unused)
return 0;
}
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+
+DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
+
+#ifdef CONFIG_KVM
+extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[];
+extern char __qcom_hyp_sanitize_link_stack_start[];
+extern char __qcom_hyp_sanitize_link_stack_end[];
+
+static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
+ const char *hyp_vecs_end)
+{
+ void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
+ int i;
+
+ for (i = 0; i < SZ_2K; i += 0x80)
+ memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
+
+ flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
+}
+
+static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+ const char *hyp_vecs_start,
+ const char *hyp_vecs_end)
+{
+ static int last_slot = -1;
+ static DEFINE_SPINLOCK(bp_lock);
+ int cpu, slot = -1;
+
+ spin_lock(&bp_lock);
+ for_each_possible_cpu(cpu) {
+ if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
+ slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
+ break;
+ }
+ }
+
+ if (slot == -1) {
+ last_slot++;
+ BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
+ / SZ_2K) <= last_slot);
+ slot = last_slot;
+ __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
+ }
+
+ __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
+ __this_cpu_write(bp_hardening_data.fn, fn);
+ spin_unlock(&bp_lock);
+}
+#else
+#define __psci_hyp_bp_inval_start NULL
+#define __psci_hyp_bp_inval_end NULL
+#define __qcom_hyp_sanitize_link_stack_start NULL
+#define __qcom_hyp_sanitize_link_stack_end NULL
+
+static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+ const char *hyp_vecs_start,
+ const char *hyp_vecs_end)
+{
+ __this_cpu_write(bp_hardening_data.fn, fn);
+}
+#endif /* CONFIG_KVM */
+
+static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
+ bp_hardening_cb_t fn,
+ const char *hyp_vecs_start,
+ const char *hyp_vecs_end)
+{
+ u64 pfr0;
+
+ if (!entry->matches(entry, SCOPE_LOCAL_CPU))
+ return;
+
+ pfr0 = read_cpuid(ID_AA64PFR0_EL1);
+ if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
+ return;
+
+ __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
+}
+
+#include <linux/psci.h>
+
+static int enable_psci_bp_hardening(void *data)
+{
+ const struct arm64_cpu_capabilities *entry = data;
+
+ if (psci_ops.get_version)
+ install_bp_hardening_cb(entry,
+ (bp_hardening_cb_t)psci_ops.get_version,
+ __psci_hyp_bp_inval_start,
+ __psci_hyp_bp_inval_end);
+
+ return 0;
+}
+
+static void qcom_link_stack_sanitization(void)
+{
+ u64 tmp;
+
+ asm volatile("mov %0, x30 \n"
+ ".rept 16 \n"
+ "bl . + 4 \n"
+ ".endr \n"
+ "mov x30, %0 \n"
+ : "=&r" (tmp));
+}
+
+static int qcom_enable_link_stack_sanitization(void *data)
+{
+ const struct arm64_cpu_capabilities *entry = data;
+
+ install_bp_hardening_cb(entry, qcom_link_stack_sanitization,
+ __qcom_hyp_sanitize_link_stack_start,
+ __qcom_hyp_sanitize_link_stack_end);
+
+ return 0;
+}
+#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
+
#define MIDR_RANGE(model, min, max) \
.def_scope = SCOPE_LOCAL_CPU, \
.matches = is_affected_midr_range, \
@@ -169,6 +304,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
MIDR_CPU_VAR_REV(0, 0),
MIDR_CPU_VAR_REV(0, 0)),
},
+ {
+ .desc = "Qualcomm Technologies Kryo erratum 1003",
+ .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
+ .def_scope = SCOPE_LOCAL_CPU,
+ .midr_model = MIDR_QCOM_KRYO,
+ .matches = is_kryo_midr,
+ },
#endif
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
{
@@ -187,6 +329,47 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
},
#endif
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+ .enable = enable_psci_bp_hardening,
+ },
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+ .enable = enable_psci_bp_hardening,
+ },
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+ .enable = enable_psci_bp_hardening,
+ },
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+ .enable = enable_psci_bp_hardening,
+ },
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
+ .enable = qcom_enable_link_stack_sanitization,
+ },
+ {
+ .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
+ MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
+ },
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+ .enable = enable_psci_bp_hardening,
+ },
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
+ .enable = enable_psci_bp_hardening,
+ },
+#endif
{
}
};
@@ -200,15 +383,18 @@ void verify_local_cpu_errata_workarounds(void)
{
const struct arm64_cpu_capabilities *caps = arm64_errata;
- for (; caps->matches; caps++)
- if (!cpus_have_cap(caps->capability) &&
- caps->matches(caps, SCOPE_LOCAL_CPU)) {
+ for (; caps->matches; caps++) {
+ if (cpus_have_cap(caps->capability)) {
+ if (caps->enable)
+ caps->enable((void *)caps);
+ } else if (caps->matches(caps, SCOPE_LOCAL_CPU)) {
pr_crit("CPU%d: Requires work around for %s, not detected"
" at boot time\n",
smp_processor_id(),
caps->desc ? : "an erratum");
cpu_die_early();
}
+ }
}
void update_cpu_errata_workarounds(void)
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index a73a5928f09b..0fb6a3151443 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -123,6 +123,7 @@ cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
* sync with the documentation of the CPU feature register ABI.
*/
static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
@@ -145,8 +146,11 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
@@ -846,6 +850,67 @@ static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unus
ID_AA64PFR0_FP_SHIFT) < 0;
}
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
+
+static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
+ int __unused)
+{
+ u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
+
+ /* Forced on command line? */
+ if (__kpti_forced) {
+ pr_info_once("kernel page table isolation forced %s by command line option\n",
+ __kpti_forced > 0 ? "ON" : "OFF");
+ return __kpti_forced > 0;
+ }
+
+ /* Useful for KASLR robustness */
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+ return true;
+
+ /* Don't force KPTI for CPUs that are not vulnerable */
+ switch (read_cpuid_id() & MIDR_CPU_MODEL_MASK) {
+ case MIDR_CAVIUM_THUNDERX2:
+ case MIDR_BRCM_VULCAN:
+ return false;
+ }
+
+ /* Defer to CPU feature registers */
+ return !cpuid_feature_extract_unsigned_field(pfr0,
+ ID_AA64PFR0_CSV3_SHIFT);
+}
+
+static int __init parse_kpti(char *str)
+{
+ bool enabled;
+ int ret = strtobool(str, &enabled);
+
+ if (ret)
+ return ret;
+
+ __kpti_forced = enabled ? 1 : -1;
+ return 0;
+}
+__setup("kpti=", parse_kpti);
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+
+static int cpu_copy_el2regs(void *__unused)
+{
+ /*
+ * Copy register values that aren't redirected by hardware.
+ *
+ * Before code patching, we only set tpidr_el1, all CPUs need to copy
+ * this value to tpidr_el2 before we patch the code. Once we've done
+ * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
+ * do anything here.
+ */
+ if (!alternatives_applied)
+ write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
+
+ return 0;
+}
+
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
@@ -915,6 +980,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HAS_VIRT_HOST_EXTN,
.def_scope = SCOPE_SYSTEM,
.matches = runs_at_el2,
+ .enable = cpu_copy_el2regs,
},
{
.desc = "32-bit EL0 Support",
@@ -932,6 +998,14 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.def_scope = SCOPE_SYSTEM,
.matches = hyp_offset_low,
},
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ {
+ .desc = "Kernel page table isolation (KPTI)",
+ .capability = ARM64_UNMAP_KERNEL_AT_EL0,
+ .def_scope = SCOPE_SYSTEM,
+ .matches = unmap_kernel_at_el0,
+ },
+#endif
{
/* FP/SIMD is not implemented */
.capability = ARM64_HAS_NO_FPSIMD,
@@ -963,6 +1037,19 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.enable = sve_kernel_enable,
},
#endif /* CONFIG_ARM64_SVE */
+#ifdef CONFIG_ARM64_RAS_EXTN
+ {
+ .desc = "RAS Extension Support",
+ .capability = ARM64_HAS_RAS_EXTN,
+ .def_scope = SCOPE_SYSTEM,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64PFR0_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64PFR0_RAS_SHIFT,
+ .min_field_value = ID_AA64PFR0_RAS_V1,
+ .enable = cpu_clear_disr,
+ },
+#endif /* CONFIG_ARM64_RAS_EXTN */
{},
};
@@ -992,6 +1079,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM3),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDFHM),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
@@ -1071,6 +1159,25 @@ static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
cap_set_elf_hwcap(hwcaps);
}
+/*
+ * Check if the current CPU has a given feature capability.
+ * Should be called from non-preemptible context.
+ */
+static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
+ unsigned int cap)
+{
+ const struct arm64_cpu_capabilities *caps;
+
+ if (WARN_ON(preemptible()))
+ return false;
+
+ for (caps = cap_array; caps->matches; caps++)
+ if (caps->capability == cap &&
+ caps->matches(caps, SCOPE_LOCAL_CPU))
+ return true;
+ return false;
+}
+
void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
const char *info)
{
@@ -1106,7 +1213,7 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
* uses an IPI, giving us a PSTATE that disappears when
* we return.
*/
- stop_machine(caps->enable, NULL, cpu_online_mask);
+ stop_machine(caps->enable, (void *)caps, cpu_online_mask);
}
}
}
@@ -1134,8 +1241,9 @@ verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
}
static void
-verify_local_cpu_features(const struct arm64_cpu_capabilities *caps)
+verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list)
{
+ const struct arm64_cpu_capabilities *caps = caps_list;
for (; caps->matches; caps++) {
if (!cpus_have_cap(caps->capability))
continue;
@@ -1143,13 +1251,13 @@ verify_local_cpu_features(const struct arm64_cpu_capabilities *caps)
* If the new CPU misses an advertised feature, we cannot proceed
* further, park the cpu.
*/
- if (!caps->matches(caps, SCOPE_LOCAL_CPU)) {
+ if (!__this_cpu_has_cap(caps_list, caps->capability)) {
pr_crit("CPU%d: missing feature: %s\n",
smp_processor_id(), caps->desc);
cpu_die_early();
}
if (caps->enable)
- caps->enable(NULL);
+ caps->enable((void *)caps);
}
}
@@ -1189,6 +1297,9 @@ static void verify_local_cpu_capabilities(void)
if (system_supports_sve())
verify_sve_features();
+
+ if (system_uses_ttbr0_pan())
+ pr_info("Emulating Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
}
void check_local_cpu_capabilities(void)
@@ -1225,25 +1336,6 @@ static void __init mark_const_caps_ready(void)
static_branch_enable(&arm64_const_caps_ready);
}
-/*
- * Check if the current CPU has a given feature capability.
- * Should be called from non-preemptible context.
- */
-static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
- unsigned int cap)
-{
- const struct arm64_cpu_capabilities *caps;
-
- if (WARN_ON(preemptible()))
- return false;
-
- for (caps = cap_array; caps->desc; caps++)
- if (caps->capability == cap && caps->matches)
- return caps->matches(caps, SCOPE_LOCAL_CPU);
-
- return false;
-}
-
extern const struct arm64_cpu_capabilities arm64_errata[];
bool this_cpu_has_cap(unsigned int cap)
@@ -1387,3 +1479,11 @@ static int __init enable_mrs_emulation(void)
}
core_initcall(enable_mrs_emulation);
+
+int cpu_clear_disr(void *__unused)
+{
+ /* Firmware may have left a deferred SError in this register. */
+ write_sysreg_s(0, SYS_DISR_EL1);
+
+ return 0;
+}
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
index fd691087dc9a..f2d13810daa8 100644
--- a/arch/arm64/kernel/cpuidle.c
+++ b/arch/arm64/kernel/cpuidle.c
@@ -47,6 +47,8 @@ int arm_cpuidle_suspend(int index)
#include <acpi/processor.h>
+#define ARM64_LPI_IS_RETENTION_STATE(arch_flags) (!(arch_flags))
+
int acpi_processor_ffh_lpi_probe(unsigned int cpu)
{
return arm_cpuidle_init(cpu);
@@ -54,6 +56,10 @@ int acpi_processor_ffh_lpi_probe(unsigned int cpu)
int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
{
- return CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, lpi->index);
+ if (ARM64_LPI_IS_RETENTION_STATE(lpi->arch_flags))
+ return CPU_PM_CPU_IDLE_ENTER_RETENTION(arm_cpuidle_suspend,
+ lpi->index);
+ else
+ return CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, lpi->index);
}
#endif
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 1e2554543506..7f94623df8a5 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -76,6 +76,7 @@ static const char *const hwcap_str[] = {
"asimddp",
"sha512",
"sve",
+ "asimdfhm",
NULL
};
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index a88b6ccebbb4..53781f5687c5 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -209,12 +209,13 @@ NOKPROBE_SYMBOL(call_step_hook);
static void send_user_sigtrap(int si_code)
{
struct pt_regs *regs = current_pt_regs();
- siginfo_t info = {
- .si_signo = SIGTRAP,
- .si_errno = 0,
- .si_code = si_code,
- .si_addr = (void __user *)instruction_pointer(regs),
- };
+ siginfo_t info;
+
+ clear_siginfo(&info);
+ info.si_signo = SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = si_code;
+ info.si_addr = (void __user *)instruction_pointer(regs);
if (WARN_ON(!user_mode(regs)))
return;
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 82cd07592519..f85ac58d08a3 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -48,7 +48,9 @@ static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
return pgprot_val(PAGE_KERNEL_ROX);
/* RW- */
- if (attr & EFI_MEMORY_XP || type != EFI_RUNTIME_SERVICES_CODE)
+ if (((attr & (EFI_MEMORY_RP | EFI_MEMORY_WP | EFI_MEMORY_XP)) ==
+ EFI_MEMORY_XP) ||
+ type != EFI_RUNTIME_SERVICES_CODE)
return pgprot_val(PAGE_KERNEL);
/* RWX */
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 6d14b8f29b5f..b34e717d7597 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -28,6 +28,8 @@
#include <asm/errno.h>
#include <asm/esr.h>
#include <asm/irq.h>
+#include <asm/memory.h>
+#include <asm/mmu.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/thread_info.h>
@@ -69,8 +71,21 @@
#define BAD_FIQ 2
#define BAD_ERROR 3
- .macro kernel_ventry label
+ .macro kernel_ventry, el, label, regsize = 64
.align 7
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+alternative_if ARM64_UNMAP_KERNEL_AT_EL0
+ .if \el == 0
+ .if \regsize == 64
+ mrs x30, tpidrro_el0
+ msr tpidrro_el0, xzr
+ .else
+ mov x30, xzr
+ .endif
+ .endif
+alternative_else_nop_endif
+#endif
+
sub sp, sp, #S_FRAME_SIZE
#ifdef CONFIG_VMAP_STACK
/*
@@ -82,7 +97,7 @@
tbnz x0, #THREAD_SHIFT, 0f
sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
- b \label
+ b el\()\el\()_\label
0:
/*
@@ -114,7 +129,12 @@
sub sp, sp, x0
mrs x0, tpidrro_el0
#endif
- b \label
+ b el\()\el\()_\label
+ .endm
+
+ .macro tramp_alias, dst, sym
+ mov_q \dst, TRAMP_VALIAS
+ add \dst, \dst, #(\sym - .entry.tramp.text)
.endm
.macro kernel_entry, el, regsize = 64
@@ -185,7 +205,7 @@ alternative_else_nop_endif
.if \el != 0
mrs x21, ttbr0_el1
- tst x21, #0xffff << 48 // Check for the reserved ASID
+ tst x21, #TTBR_ASID_MASK // Check for the reserved ASID
orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR
b.eq 1f // TTBR0 access already disabled
and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
@@ -248,7 +268,7 @@ alternative_else_nop_endif
tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
.endif
- __uaccess_ttbr0_enable x0
+ __uaccess_ttbr0_enable x0, x1
.if \el == 0
/*
@@ -257,7 +277,7 @@ alternative_else_nop_endif
* Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
* corruption).
*/
- post_ttbr0_update_workaround
+ bl post_ttbr_update_workaround
.endif
1:
.if \el != 0
@@ -269,18 +289,20 @@ alternative_else_nop_endif
.if \el == 0
ldr x23, [sp, #S_SP] // load return stack pointer
msr sp_el0, x23
+ tst x22, #PSR_MODE32_BIT // native task?
+ b.eq 3f
+
#ifdef CONFIG_ARM64_ERRATUM_845719
alternative_if ARM64_WORKAROUND_845719
- tbz x22, #4, 1f
#ifdef CONFIG_PID_IN_CONTEXTIDR
mrs x29, contextidr_el1
msr contextidr_el1, x29
#else
msr contextidr_el1, xzr
#endif
-1:
alternative_else_nop_endif
#endif
+3:
.endif
msr elr_el1, x21 // set up the return data
@@ -302,7 +324,21 @@ alternative_else_nop_endif
ldp x28, x29, [sp, #16 * 14]
ldr lr, [sp, #S_LR]
add sp, sp, #S_FRAME_SIZE // restore sp
- eret // return to kernel
+
+ .if \el == 0
+alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ bne 4f
+ msr far_el1, x30
+ tramp_alias x30, tramp_exit_native
+ br x30
+4:
+ tramp_alias x30, tramp_exit_compat
+ br x30
+#endif
+ .else
+ eret
+ .endif
.endm
.macro irq_stack_entry
@@ -367,31 +403,31 @@ tsk .req x28 // current thread_info
.align 11
ENTRY(vectors)
- kernel_ventry el1_sync_invalid // Synchronous EL1t
- kernel_ventry el1_irq_invalid // IRQ EL1t
- kernel_ventry el1_fiq_invalid // FIQ EL1t
- kernel_ventry el1_error_invalid // Error EL1t
+ kernel_ventry 1, sync_invalid // Synchronous EL1t
+ kernel_ventry 1, irq_invalid // IRQ EL1t
+ kernel_ventry 1, fiq_invalid // FIQ EL1t
+ kernel_ventry 1, error_invalid // Error EL1t
- kernel_ventry el1_sync // Synchronous EL1h
- kernel_ventry el1_irq // IRQ EL1h
- kernel_ventry el1_fiq_invalid // FIQ EL1h
- kernel_ventry el1_error // Error EL1h
+ kernel_ventry 1, sync // Synchronous EL1h
+ kernel_ventry 1, irq // IRQ EL1h
+ kernel_ventry 1, fiq_invalid // FIQ EL1h
+ kernel_ventry 1, error // Error EL1h
- kernel_ventry el0_sync // Synchronous 64-bit EL0
- kernel_ventry el0_irq // IRQ 64-bit EL0
- kernel_ventry el0_fiq_invalid // FIQ 64-bit EL0
- kernel_ventry el0_error // Error 64-bit EL0
+ kernel_ventry 0, sync // Synchronous 64-bit EL0
+ kernel_ventry 0, irq // IRQ 64-bit EL0
+ kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0
+ kernel_ventry 0, error // Error 64-bit EL0
#ifdef CONFIG_COMPAT
- kernel_ventry el0_sync_compat // Synchronous 32-bit EL0
- kernel_ventry el0_irq_compat // IRQ 32-bit EL0
- kernel_ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
- kernel_ventry el0_error_compat // Error 32-bit EL0
+ kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0
+ kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0
+ kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0
+ kernel_ventry 0, error_compat, 32 // Error 32-bit EL0
#else
- kernel_ventry el0_sync_invalid // Synchronous 32-bit EL0
- kernel_ventry el0_irq_invalid // IRQ 32-bit EL0
- kernel_ventry el0_fiq_invalid // FIQ 32-bit EL0
- kernel_ventry el0_error_invalid // Error 32-bit EL0
+ kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0
+ kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0
+ kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
+ kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
#endif
END(vectors)
@@ -685,12 +721,15 @@ el0_ia:
* Instruction abort handling
*/
mrs x26, far_el1
- enable_daif
+ enable_da_f
+#ifdef CONFIG_TRACE_IRQFLAGS
+ bl trace_hardirqs_off
+#endif
ct_user_exit
mov x0, x26
mov x1, x25
mov x2, sp
- bl do_mem_abort
+ bl do_el0_ia_bp_hardening
b ret_to_user
el0_fpsimd_acc:
/*
@@ -943,6 +982,124 @@ __ni_sys_trace:
.popsection // .entry.text
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+/*
+ * Exception vectors trampoline.
+ */
+ .pushsection ".entry.tramp.text", "ax"
+
+ .macro tramp_map_kernel, tmp
+ mrs \tmp, ttbr1_el1
+ add \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
+ bic \tmp, \tmp, #USER_ASID_FLAG
+ msr ttbr1_el1, \tmp
+#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
+alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
+ /* ASID already in \tmp[63:48] */
+ movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
+ movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
+ /* 2MB boundary containing the vectors, so we nobble the walk cache */
+ movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
+ isb
+ tlbi vae1, \tmp
+ dsb nsh
+alternative_else_nop_endif
+#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
+ .endm
+
+ .macro tramp_unmap_kernel, tmp
+ mrs \tmp, ttbr1_el1
+ sub \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
+ orr \tmp, \tmp, #USER_ASID_FLAG
+ msr ttbr1_el1, \tmp
+ /*
+ * We avoid running the post_ttbr_update_workaround here because the
+ * user and kernel ASIDs don't have conflicting mappings, so any
+ * "blessing" as described in:
+ *
+ * http://lkml.kernel.org/r/56BB848A.6060603@caviumnetworks.com
+ *
+ * will not hurt correctness. Whilst this may partially defeat the
+ * point of using split ASIDs in the first place, it avoids
+ * the hit of invalidating the entire I-cache on every return to
+ * userspace.
+ */
+ .endm
+
+ .macro tramp_ventry, regsize = 64
+ .align 7
+1:
+ .if \regsize == 64
+ msr tpidrro_el0, x30 // Restored in kernel_ventry
+ .endif
+ /*
+ * Defend against branch aliasing attacks by pushing a dummy
+ * entry onto the return stack and using a RET instruction to
+ * enter the full-fat kernel vectors.
+ */
+ bl 2f
+ b .
+2:
+ tramp_map_kernel x30
+#ifdef CONFIG_RANDOMIZE_BASE
+ adr x30, tramp_vectors + PAGE_SIZE
+alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
+ ldr x30, [x30]
+#else
+ ldr x30, =vectors
+#endif
+ prfm plil1strm, [x30, #(1b - tramp_vectors)]
+ msr vbar_el1, x30
+ add x30, x30, #(1b - tramp_vectors)
+ isb
+ ret
+ .endm
+
+ .macro tramp_exit, regsize = 64
+ adr x30, tramp_vectors
+ msr vbar_el1, x30
+ tramp_unmap_kernel x30
+ .if \regsize == 64
+ mrs x30, far_el1
+ .endif
+ eret
+ .endm
+
+ .align 11
+ENTRY(tramp_vectors)
+ .space 0x400
+
+ tramp_ventry
+ tramp_ventry
+ tramp_ventry
+ tramp_ventry
+
+ tramp_ventry 32
+ tramp_ventry 32
+ tramp_ventry 32
+ tramp_ventry 32
+END(tramp_vectors)
+
+ENTRY(tramp_exit_native)
+ tramp_exit
+END(tramp_exit_native)
+
+ENTRY(tramp_exit_compat)
+ tramp_exit 32
+END(tramp_exit_compat)
+
+ .ltorg
+ .popsection // .entry.tramp.text
+#ifdef CONFIG_RANDOMIZE_BASE
+ .pushsection ".rodata", "a"
+ .align PAGE_SHIFT
+ .globl __entry_tramp_data_start
+__entry_tramp_data_start:
+ .quad vectors
+ .popsection // .rodata
+#endif /* CONFIG_RANDOMIZE_BASE */
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+
/*
* Special system call wrappers.
*/
@@ -996,3 +1153,180 @@ ENTRY(ret_from_fork)
b ret_to_user
ENDPROC(ret_from_fork)
NOKPROBE(ret_from_fork)
+
+#ifdef CONFIG_ARM_SDE_INTERFACE
+
+#include <asm/sdei.h>
+#include <uapi/linux/arm_sdei.h>
+
+.macro sdei_handler_exit exit_mode
+ /* On success, this call never returns... */
+ cmp \exit_mode, #SDEI_EXIT_SMC
+ b.ne 99f
+ smc #0
+ b .
+99: hvc #0
+ b .
+.endm
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+/*
+ * The regular SDEI entry point may have been unmapped along with the rest of
+ * the kernel. This trampoline restores the kernel mapping to make the x1 memory
+ * argument accessible.
+ *
+ * This clobbers x4, __sdei_handler() will restore this from firmware's
+ * copy.
+ */
+.ltorg
+.pushsection ".entry.tramp.text", "ax"
+ENTRY(__sdei_asm_entry_trampoline)
+ mrs x4, ttbr1_el1
+ tbz x4, #USER_ASID_BIT, 1f
+
+ tramp_map_kernel tmp=x4
+ isb
+ mov x4, xzr
+
+ /*
+ * Use reg->interrupted_regs.addr_limit to remember whether to unmap
+ * the kernel on exit.
+ */
+1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
+
+#ifdef CONFIG_RANDOMIZE_BASE
+ adr x4, tramp_vectors + PAGE_SIZE
+ add x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
+ ldr x4, [x4]
+#else
+ ldr x4, =__sdei_asm_handler
+#endif
+ br x4
+ENDPROC(__sdei_asm_entry_trampoline)
+NOKPROBE(__sdei_asm_entry_trampoline)
+
+/*
+ * Make the exit call and restore the original ttbr1_el1
+ *
+ * x0 & x1: setup for the exit API call
+ * x2: exit_mode
+ * x4: struct sdei_registered_event argument from registration time.
+ */
+ENTRY(__sdei_asm_exit_trampoline)
+ ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
+ cbnz x4, 1f
+
+ tramp_unmap_kernel tmp=x4
+
+1: sdei_handler_exit exit_mode=x2
+ENDPROC(__sdei_asm_exit_trampoline)
+NOKPROBE(__sdei_asm_exit_trampoline)
+ .ltorg
+.popsection // .entry.tramp.text
+#ifdef CONFIG_RANDOMIZE_BASE
+.pushsection ".rodata", "a"
+__sdei_asm_trampoline_next_handler:
+ .quad __sdei_asm_handler
+.popsection // .rodata
+#endif /* CONFIG_RANDOMIZE_BASE */
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+
+/*
+ * Software Delegated Exception entry point.
+ *
+ * x0: Event number
+ * x1: struct sdei_registered_event argument from registration time.
+ * x2: interrupted PC
+ * x3: interrupted PSTATE
+ * x4: maybe clobbered by the trampoline
+ *
+ * Firmware has preserved x0->x17 for us, we must save/restore the rest to
+ * follow SMC-CC. We save (or retrieve) all the registers as the handler may
+ * want them.
+ */
+ENTRY(__sdei_asm_handler)
+ stp x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
+ stp x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
+ stp x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
+ stp x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
+ stp x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
+ stp x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
+ stp x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
+ stp x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
+ stp x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
+ stp x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
+ stp x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
+ stp x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
+ stp x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
+ stp x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
+ mov x4, sp
+ stp lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
+
+ mov x19, x1
+
+#ifdef CONFIG_VMAP_STACK
+ /*
+ * entry.S may have been using sp as a scratch register, find whether
+ * this is a normal or critical event and switch to the appropriate
+ * stack for this CPU.
+ */
+ ldrb w4, [x19, #SDEI_EVENT_PRIORITY]
+ cbnz w4, 1f
+ ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
+ b 2f
+1: ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
+2: mov x6, #SDEI_STACK_SIZE
+ add x5, x5, x6
+ mov sp, x5
+#endif
+
+ /*
+ * We may have interrupted userspace, or a guest, or exit-from or
+ * return-to either of these. We can't trust sp_el0, restore it.
+ */
+ mrs x28, sp_el0
+ ldr_this_cpu dst=x0, sym=__entry_task, tmp=x1
+ msr sp_el0, x0
+
+ /* If we interrupted the kernel point to the previous stack/frame. */
+ and x0, x3, #0xc
+ mrs x1, CurrentEL
+ cmp x0, x1
+ csel x29, x29, xzr, eq // fp, or zero
+ csel x4, x2, xzr, eq // elr, or zero
+
+ stp x29, x4, [sp, #-16]!
+ mov x29, sp
+
+ add x0, x19, #SDEI_EVENT_INTREGS
+ mov x1, x19
+ bl __sdei_handler
+
+ msr sp_el0, x28
+ /* restore regs >x17 that we clobbered */
+ mov x4, x19 // keep x4 for __sdei_asm_exit_trampoline
+ ldp x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
+ ldp x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
+ ldp lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
+ mov sp, x1
+
+ mov x1, x0 // address to complete_and_resume
+ /* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
+ cmp x0, #1
+ mov_q x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
+ mov_q x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
+ csel x0, x2, x3, ls
+
+ ldr_l x2, sdei_exit_mode
+
+alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
+ sdei_handler_exit exit_mode=x2
+alternative_else_nop_endif
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline
+ br x5
+#endif
+ENDPROC(__sdei_asm_handler)
+NOKPROBE(__sdei_asm_handler)
+#endif /* CONFIG_ARM_SDE_INTERFACE */
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index fae81f7964b4..e7226c4c7493 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -867,7 +867,7 @@ asmlinkage void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
{
siginfo_t info;
- unsigned int si_code = 0;
+ unsigned int si_code = FPE_FIXME;
if (esr & FPEXC_IOF)
si_code = FPE_FLTINV;
@@ -1036,14 +1036,14 @@ void fpsimd_restore_current_state(void)
* flag that indicates that the FPSIMD register contents are the most recent
* FPSIMD state of 'current'
*/
-void fpsimd_update_current_state(struct fpsimd_state *state)
+void fpsimd_update_current_state(struct user_fpsimd_state const *state)
{
if (!system_supports_fpsimd())
return;
local_bh_disable();
- current->thread.fpsimd_state.user_fpsimd = state->user_fpsimd;
+ current->thread.fpsimd_state.user_fpsimd = *state;
if (system_supports_sve() && test_thread_flag(TIF_SVE))
fpsimd_to_sve(current);
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index e3cb9fbf96b6..ba3ab04788dc 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -148,6 +148,26 @@ preserve_boot_args:
ENDPROC(preserve_boot_args)
/*
+ * Macro to arrange a physical address in a page table entry, taking care of
+ * 52-bit addresses.
+ *
+ * Preserves: phys
+ * Returns: pte
+ */
+ .macro phys_to_pte, phys, pte
+#ifdef CONFIG_ARM64_PA_BITS_52
+ /*
+ * We assume \phys is 64K aligned and this is guaranteed by only
+ * supporting this configuration with 64K pages.
+ */
+ orr \pte, \phys, \phys, lsr #36
+ and \pte, \pte, #PTE_ADDR_MASK
+#else
+ mov \pte, \phys
+#endif
+ .endm
+
+/*
* Macro to create a table entry to the next page.
*
* tbl: page table address
@@ -156,54 +176,124 @@ ENDPROC(preserve_boot_args)
* ptrs: #imm pointers per table page
*
* Preserves: virt
- * Corrupts: tmp1, tmp2
+ * Corrupts: ptrs, tmp1, tmp2
* Returns: tbl -> next level table page address
*/
.macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
- lsr \tmp1, \virt, #\shift
- and \tmp1, \tmp1, #\ptrs - 1 // table index
- add \tmp2, \tbl, #PAGE_SIZE
+ add \tmp1, \tbl, #PAGE_SIZE
+ phys_to_pte \tmp1, \tmp2
orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type
+ lsr \tmp1, \virt, #\shift
+ sub \ptrs, \ptrs, #1
+ and \tmp1, \tmp1, \ptrs // table index
str \tmp2, [\tbl, \tmp1, lsl #3]
add \tbl, \tbl, #PAGE_SIZE // next level table page
.endm
/*
- * Macro to populate the PGD (and possibily PUD) for the corresponding
- * block entry in the next level (tbl) for the given virtual address.
+ * Macro to populate page table entries, these entries can be pointers to the next level
+ * or last level entries pointing to physical memory.
+ *
+ * tbl: page table address
+ * rtbl: pointer to page table or physical memory
+ * index: start index to write
+ * eindex: end index to write - [index, eindex] written to
+ * flags: flags for pagetable entry to or in
+ * inc: increment to rtbl between each entry
+ * tmp1: temporary variable
*
- * Preserves: tbl, next, virt
- * Corrupts: tmp1, tmp2
+ * Preserves: tbl, eindex, flags, inc
+ * Corrupts: index, tmp1
+ * Returns: rtbl
*/
- .macro create_pgd_entry, tbl, virt, tmp1, tmp2
- create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2
-#if SWAPPER_PGTABLE_LEVELS > 3
- create_table_entry \tbl, \virt, PUD_SHIFT, PTRS_PER_PUD, \tmp1, \tmp2
-#endif
-#if SWAPPER_PGTABLE_LEVELS > 2
- create_table_entry \tbl, \virt, SWAPPER_TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2
-#endif
+ .macro populate_entries, tbl, rtbl, index, eindex, flags, inc, tmp1
+.Lpe\@: phys_to_pte \rtbl, \tmp1
+ orr \tmp1, \tmp1, \flags // tmp1 = table entry
+ str \tmp1, [\tbl, \index, lsl #3]
+ add \rtbl, \rtbl, \inc // rtbl = pa next level
+ add \index, \index, #1
+ cmp \index, \eindex
+ b.ls .Lpe\@
+ .endm
+
+/*
+ * Compute indices of table entries from virtual address range. If multiple entries
+ * were needed in the previous page table level then the next page table level is assumed
+ * to be composed of multiple pages. (This effectively scales the end index).
+ *
+ * vstart: virtual address of start of range
+ * vend: virtual address of end of range
+ * shift: shift used to transform virtual address into index
+ * ptrs: number of entries in page table
+ * istart: index in table corresponding to vstart
+ * iend: index in table corresponding to vend
+ * count: On entry: how many extra entries were required in previous level, scales
+ * our end index.
+ * On exit: returns how many extra entries required for next page table level
+ *
+ * Preserves: vstart, vend, shift, ptrs
+ * Returns: istart, iend, count
+ */
+ .macro compute_indices, vstart, vend, shift, ptrs, istart, iend, count
+ lsr \iend, \vend, \shift
+ mov \istart, \ptrs
+ sub \istart, \istart, #1
+ and \iend, \iend, \istart // iend = (vend >> shift) & (ptrs - 1)
+ mov \istart, \ptrs
+ mul \istart, \istart, \count
+ add \iend, \iend, \istart // iend += (count - 1) * ptrs
+ // our entries span multiple tables
+
+ lsr \istart, \vstart, \shift
+ mov \count, \ptrs
+ sub \count, \count, #1
+ and \istart, \istart, \count
+
+ sub \count, \iend, \istart
.endm
/*
- * Macro to populate block entries in the page table for the start..end
- * virtual range (inclusive).
+ * Map memory for specified virtual address range. Each level of page table needed supports
+ * multiple entries. If a level requires n entries the next page table level is assumed to be
+ * formed from n pages.
+ *
+ * tbl: location of page table
+ * rtbl: address to be used for first level page table entry (typically tbl + PAGE_SIZE)
+ * vstart: start address to map
+ * vend: end address to map - we map [vstart, vend]
+ * flags: flags to use to map last level entries
+ * phys: physical address corresponding to vstart - physical memory is contiguous
+ * pgds: the number of pgd entries
*
- * Preserves: tbl, flags
- * Corrupts: phys, start, end, pstate
+ * Temporaries: istart, iend, tmp, count, sv - these need to be different registers
+ * Preserves: vstart, vend, flags
+ * Corrupts: tbl, rtbl, istart, iend, tmp, count, sv
*/
- .macro create_block_map, tbl, flags, phys, start, end
- lsr \phys, \phys, #SWAPPER_BLOCK_SHIFT
- lsr \start, \start, #SWAPPER_BLOCK_SHIFT
- and \start, \start, #PTRS_PER_PTE - 1 // table index
- orr \phys, \flags, \phys, lsl #SWAPPER_BLOCK_SHIFT // table entry
- lsr \end, \end, #SWAPPER_BLOCK_SHIFT
- and \end, \end, #PTRS_PER_PTE - 1 // table end index
-9999: str \phys, [\tbl, \start, lsl #3] // store the entry
- add \start, \start, #1 // next entry
- add \phys, \phys, #SWAPPER_BLOCK_SIZE // next block
- cmp \start, \end
- b.ls 9999b
+ .macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv
+ add \rtbl, \tbl, #PAGE_SIZE
+ mov \sv, \rtbl
+ mov \count, #0
+ compute_indices \vstart, \vend, #PGDIR_SHIFT, \pgds, \istart, \iend, \count
+ populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
+ mov \tbl, \sv
+ mov \sv, \rtbl
+
+#if SWAPPER_PGTABLE_LEVELS > 3
+ compute_indices \vstart, \vend, #PUD_SHIFT, #PTRS_PER_PUD, \istart, \iend, \count
+ populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
+ mov \tbl, \sv
+ mov \sv, \rtbl
+#endif
+
+#if SWAPPER_PGTABLE_LEVELS > 2
+ compute_indices \vstart, \vend, #SWAPPER_TABLE_SHIFT, #PTRS_PER_PMD, \istart, \iend, \count
+ populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
+ mov \tbl, \sv
+#endif
+
+ compute_indices \vstart, \vend, #SWAPPER_BLOCK_SHIFT, #PTRS_PER_PTE, \istart, \iend, \count
+ bic \count, \phys, #SWAPPER_BLOCK_SIZE - 1
+ populate_entries \tbl, \count, \istart, \iend, \flags, #SWAPPER_BLOCK_SIZE, \tmp
.endm
/*
@@ -221,14 +311,16 @@ __create_page_tables:
* dirty cache lines being evicted.
*/
adrp x0, idmap_pg_dir
- ldr x1, =(IDMAP_DIR_SIZE + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
+ adrp x1, swapper_pg_end
+ sub x1, x1, x0
bl __inval_dcache_area
/*
* Clear the idmap and swapper page tables.
*/
adrp x0, idmap_pg_dir
- ldr x1, =(IDMAP_DIR_SIZE + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
+ adrp x1, swapper_pg_end
+ sub x1, x1, x0
1: stp xzr, xzr, [x0], #16
stp xzr, xzr, [x0], #16
stp xzr, xzr, [x0], #16
@@ -244,26 +336,13 @@ __create_page_tables:
adrp x0, idmap_pg_dir
adrp x3, __idmap_text_start // __pa(__idmap_text_start)
-#ifndef CONFIG_ARM64_VA_BITS_48
-#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
-#define EXTRA_PTRS (1 << (48 - EXTRA_SHIFT))
-
- /*
- * If VA_BITS < 48, it may be too small to allow for an ID mapping to be
- * created that covers system RAM if that is located sufficiently high
- * in the physical address space. So for the ID map, use an extended
- * virtual range in that case, by configuring an additional translation
- * level.
- * First, we have to verify our assumption that the current value of
- * VA_BITS was chosen such that all translation levels are fully
- * utilised, and that lowering T0SZ will always result in an additional
- * translation level to be configured.
- */
-#if VA_BITS != EXTRA_SHIFT
-#error "Mismatch between VA_BITS and page size/number of translation levels"
-#endif
-
/*
+ * VA_BITS may be too small to allow for an ID mapping to be created
+ * that covers system RAM if that is located sufficiently high in the
+ * physical address space. So for the ID map, use an extended virtual
+ * range in that case, and configure an additional translation level
+ * if needed.
+ *
* Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
* entire ID map region can be mapped. As T0SZ == (64 - #bits used),
* this number conveniently equals the number of leading zeroes in
@@ -272,21 +351,44 @@ __create_page_tables:
adrp x5, __idmap_text_end
clz x5, x5
cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough?
- b.ge 1f // .. then skip additional level
+ b.ge 1f // .. then skip VA range extension
adr_l x6, idmap_t0sz
str x5, [x6]
dmb sy
dc ivac, x6 // Invalidate potentially stale cache line
- create_table_entry x0, x3, EXTRA_SHIFT, EXTRA_PTRS, x5, x6
-1:
+#if (VA_BITS < 48)
+#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
+#define EXTRA_PTRS (1 << (PHYS_MASK_SHIFT - EXTRA_SHIFT))
+
+ /*
+ * If VA_BITS < 48, we have to configure an additional table level.
+ * First, we have to verify our assumption that the current value of
+ * VA_BITS was chosen such that all translation levels are fully
+ * utilised, and that lowering T0SZ will always result in an additional
+ * translation level to be configured.
+ */
+#if VA_BITS != EXTRA_SHIFT
+#error "Mismatch between VA_BITS and page size/number of translation levels"
#endif
- create_pgd_entry x0, x3, x5, x6
+ mov x4, EXTRA_PTRS
+ create_table_entry x0, x3, EXTRA_SHIFT, x4, x5, x6
+#else
+ /*
+ * If VA_BITS == 48, we don't have to configure an additional
+ * translation level, but the top-level table has more entries.
+ */
+ mov x4, #1 << (PHYS_MASK_SHIFT - PGDIR_SHIFT)
+ str_l x4, idmap_ptrs_per_pgd, x5
+#endif
+1:
+ ldr_l x4, idmap_ptrs_per_pgd
mov x5, x3 // __pa(__idmap_text_start)
adr_l x6, __idmap_text_end // __pa(__idmap_text_end)
- create_block_map x0, x7, x3, x5, x6
+
+ map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14
/*
* Map the kernel image (starting with PHYS_OFFSET).
@@ -294,12 +396,13 @@ __create_page_tables:
adrp x0, swapper_pg_dir
mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text)
add x5, x5, x23 // add KASLR displacement
- create_pgd_entry x0, x5, x3, x6
+ mov x4, PTRS_PER_PGD
adrp x6, _end // runtime __pa(_end)
adrp x3, _text // runtime __pa(_text)
sub x6, x6, x3 // _end - _text
add x6, x6, x5 // runtime __va(_end)
- create_block_map x0, x7, x3, x5, x6
+
+ map_memory x0, x1, x5, x6, x7, x3, x4, x10, x11, x12, x13, x14
/*
* Since the page tables have been populated with non-cacheable
@@ -307,7 +410,8 @@ __create_page_tables:
* tables again to remove any speculatively loaded cache lines.
*/
adrp x0, idmap_pg_dir
- ldr x1, =(IDMAP_DIR_SIZE + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
+ adrp x1, swapper_pg_end
+ sub x1, x1, x0
dmb sy
bl __inval_dcache_area
@@ -388,17 +492,13 @@ ENTRY(el2_setup)
mrs x0, CurrentEL
cmp x0, #CurrentEL_EL2
b.eq 1f
- mrs x0, sctlr_el1
-CPU_BE( orr x0, x0, #(3 << 24) ) // Set the EE and E0E bits for EL1
-CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
+ mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1)
msr sctlr_el1, x0
mov w0, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1
isb
ret
-1: mrs x0, sctlr_el2
-CPU_BE( orr x0, x0, #(1 << 25) ) // Set the EE bit for EL2
-CPU_LE( bic x0, x0, #(1 << 25) ) // Clear the EE bit for EL2
+1: mov_q x0, (SCTLR_EL2_RES1 | ENDIAN_SET_EL2)
msr sctlr_el2, x0
#ifdef CONFIG_ARM64_VHE
@@ -514,10 +614,7 @@ install_el2_stub:
* requires no configuration, and all non-hyp-specific EL2 setup
* will be done via the _EL1 system register aliases in __cpu_setup.
*/
- /* sctlr_el1 */
- mov x0, #0x0800 // Set/clear RES{1,0} bits
-CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems
-CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
+ mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1)
msr sctlr_el1, x0
/* Coprocessor traps. */
@@ -679,8 +776,10 @@ ENTRY(__enable_mmu)
update_early_cpu_boot_status 0, x1, x2
adrp x1, idmap_pg_dir
adrp x2, swapper_pg_dir
- msr ttbr0_el1, x1 // load TTBR0
- msr ttbr1_el1, x2 // load TTBR1
+ phys_to_ttbr x1, x3
+ phys_to_ttbr x2, x4
+ msr ttbr0_el1, x3 // load TTBR0
+ msr ttbr1_el1, x4 // load TTBR1
isb
msr sctlr_el1, x0
isb
diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S
index e56d848b6466..84f5d52fddda 100644
--- a/arch/arm64/kernel/hibernate-asm.S
+++ b/arch/arm64/kernel/hibernate-asm.S
@@ -33,12 +33,14 @@
* Even switching to our copied tables will cause a changed output address at
* each stage of the walk.
*/
-.macro break_before_make_ttbr_switch zero_page, page_table
- msr ttbr1_el1, \zero_page
+.macro break_before_make_ttbr_switch zero_page, page_table, tmp
+ phys_to_ttbr \zero_page, \tmp
+ msr ttbr1_el1, \tmp
isb
tlbi vmalle1
dsb nsh
- msr ttbr1_el1, \page_table
+ phys_to_ttbr \page_table, \tmp
+ msr ttbr1_el1, \tmp
isb
.endm
@@ -78,7 +80,7 @@ ENTRY(swsusp_arch_suspend_exit)
* We execute from ttbr0, change ttbr1 to our copied linear map tables
* with a break-before-make via the zero page
*/
- break_before_make_ttbr_switch x5, x0
+ break_before_make_ttbr_switch x5, x0, x6
mov x21, x1
mov x30, x2
@@ -109,7 +111,7 @@ ENTRY(swsusp_arch_suspend_exit)
dsb ish /* wait for PoU cleaning to finish */
/* switch to the restored kernels page tables */
- break_before_make_ttbr_switch x25, x21
+ break_before_make_ttbr_switch x25, x21, x6
ic ialluis
dsb ish
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 3009b8b80f08..f20cf7e99249 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -247,8 +247,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
}
pte = pte_offset_kernel(pmd, dst_addr);
- set_pte(pte, __pte(virt_to_phys((void *)dst) |
- pgprot_val(PAGE_KERNEL_EXEC)));
+ set_pte(pte, pfn_pte(virt_to_pfn(dst), PAGE_KERNEL_EXEC));
/*
* Load our new page tables. A strict BBM approach requires that we
@@ -264,7 +263,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
*/
cpu_set_reserved_ttbr0();
local_flush_tlb_all();
- write_sysreg(virt_to_phys(pgd), ttbr0_el1);
+ write_sysreg(phys_to_ttbr(virt_to_phys(pgd)), ttbr0_el1);
isb();
*phys_dst_addr = virt_to_phys((void *)dst);
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 713561e5bcab..60e5fc661f74 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -29,6 +29,7 @@
#include <linux/irqchip.h>
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
+#include <asm/vmap_stack.h>
unsigned long irq_err_count;
@@ -58,17 +59,7 @@ static void init_irq_stacks(void)
unsigned long *p;
for_each_possible_cpu(cpu) {
- /*
- * To ensure that VMAP'd stack overflow detection works
- * correctly, the IRQ stacks need to have the same
- * alignment as other stacks.
- */
- p = __vmalloc_node_range(IRQ_STACK_SIZE, THREAD_ALIGN,
- VMALLOC_START, VMALLOC_END,
- THREADINFO_GFP, PAGE_KERNEL,
- 0, cpu_to_node(cpu),
- __builtin_return_address(0));
-
+ p = arch_alloc_vmap_stack(IRQ_STACK_SIZE, cpu_to_node(cpu));
per_cpu(irq_stack_ptr, cpu) = p;
}
}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 6b7dcf4310ac..583fd8154695 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -370,16 +370,14 @@ void tls_preserve_current_state(void)
static void tls_thread_switch(struct task_struct *next)
{
- unsigned long tpidr, tpidrro;
-
tls_preserve_current_state();
- tpidr = *task_user_tls(next);
- tpidrro = is_compat_thread(task_thread_info(next)) ?
- next->thread.tp_value : 0;
+ if (is_compat_thread(task_thread_info(next)))
+ write_sysreg(next->thread.tp_value, tpidrro_el0);
+ else if (!arm64_kernel_unmapped_at_el0())
+ write_sysreg(0, tpidrro_el0);
- write_sysreg(tpidr, tpidr_el0);
- write_sysreg(tpidrro, tpidrro_el0);
+ write_sysreg(*task_user_tls(next), tpidr_el0);
}
/* Restore the UAO state depending on next's addr_limit */
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 7c44658b316d..6618036ae6d4 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -180,34 +180,34 @@ static void ptrace_hbptriggered(struct perf_event *bp,
struct pt_regs *regs)
{
struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
- siginfo_t info = {
- .si_signo = SIGTRAP,
- .si_errno = 0,
- .si_code = TRAP_HWBKPT,
- .si_addr = (void __user *)(bkpt->trigger),
- };
+ siginfo_t info;
-#ifdef CONFIG_COMPAT
- int i;
+ clear_siginfo(&info);
+ info.si_signo = SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = TRAP_HWBKPT;
+ info.si_addr = (void __user *)(bkpt->trigger);
- if (!is_compat_task())
- goto send_sig;
+#ifdef CONFIG_COMPAT
+ if (is_compat_task()) {
+ int si_errno = 0;
+ int i;
- for (i = 0; i < ARM_MAX_BRP; ++i) {
- if (current->thread.debug.hbp_break[i] == bp) {
- info.si_errno = (i << 1) + 1;
- break;
+ for (i = 0; i < ARM_MAX_BRP; ++i) {
+ if (current->thread.debug.hbp_break[i] == bp) {
+ si_errno = (i << 1) + 1;
+ break;
+ }
}
- }
- for (i = 0; i < ARM_MAX_WRP; ++i) {
- if (current->thread.debug.hbp_watch[i] == bp) {
- info.si_errno = -((i << 1) + 1);
- break;
+ for (i = 0; i < ARM_MAX_WRP; ++i) {
+ if (current->thread.debug.hbp_watch[i] == bp) {
+ si_errno = -((i << 1) + 1);
+ break;
+ }
}
+ force_sig_ptrace_errno_trap(si_errno, (void __user *)bkpt->trigger);
}
-
-send_sig:
#endif
force_sig_info(SIGTRAP, &info, current);
}
diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c
new file mode 100644
index 000000000000..6b8d90d5ceae
--- /dev/null
+++ b/arch/arm64/kernel/sdei.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2017 Arm Ltd.
+#define pr_fmt(fmt) "sdei: " fmt
+
+#include <linux/arm_sdei.h>
+#include <linux/hardirq.h>
+#include <linux/irqflags.h>
+#include <linux/sched/task_stack.h>
+#include <linux/uaccess.h>
+
+#include <asm/alternative.h>
+#include <asm/kprobes.h>
+#include <asm/mmu.h>
+#include <asm/ptrace.h>
+#include <asm/sections.h>
+#include <asm/sysreg.h>
+#include <asm/vmap_stack.h>
+
+unsigned long sdei_exit_mode;
+
+/*
+ * VMAP'd stacks checking for stack overflow on exception using sp as a scratch
+ * register, meaning SDEI has to switch to its own stack. We need two stacks as
+ * a critical event may interrupt a normal event that has just taken a
+ * synchronous exception, and is using sp as scratch register. For a critical
+ * event interrupting a normal event, we can't reliably tell if we were on the
+ * sdei stack.
+ * For now, we allocate stacks when the driver is probed.
+ */
+DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
+DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
+
+#ifdef CONFIG_VMAP_STACK
+DEFINE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
+DEFINE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
+#endif
+
+static void _free_sdei_stack(unsigned long * __percpu *ptr, int cpu)
+{
+ unsigned long *p;
+
+ p = per_cpu(*ptr, cpu);
+ if (p) {
+ per_cpu(*ptr, cpu) = NULL;
+ vfree(p);
+ }
+}
+
+static void free_sdei_stacks(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ _free_sdei_stack(&sdei_stack_normal_ptr, cpu);
+ _free_sdei_stack(&sdei_stack_critical_ptr, cpu);
+ }
+}
+
+static int _init_sdei_stack(unsigned long * __percpu *ptr, int cpu)
+{
+ unsigned long *p;
+
+ p = arch_alloc_vmap_stack(SDEI_STACK_SIZE, cpu_to_node(cpu));
+ if (!p)
+ return -ENOMEM;
+ per_cpu(*ptr, cpu) = p;
+
+ return 0;
+}
+
+static int init_sdei_stacks(void)
+{
+ int cpu;
+ int err = 0;
+
+ for_each_possible_cpu(cpu) {
+ err = _init_sdei_stack(&sdei_stack_normal_ptr, cpu);
+ if (err)
+ break;
+ err = _init_sdei_stack(&sdei_stack_critical_ptr, cpu);
+ if (err)
+ break;
+ }
+
+ if (err)
+ free_sdei_stacks();
+
+ return err;
+}
+
+bool _on_sdei_stack(unsigned long sp)
+{
+ unsigned long low, high;
+
+ if (!IS_ENABLED(CONFIG_VMAP_STACK))
+ return false;
+
+ low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
+ high = low + SDEI_STACK_SIZE;
+
+ if (low <= sp && sp < high)
+ return true;
+
+ low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
+ high = low + SDEI_STACK_SIZE;
+
+ return (low <= sp && sp < high);
+}
+
+unsigned long sdei_arch_get_entry_point(int conduit)
+{
+ /*
+ * SDEI works between adjacent exception levels. If we booted at EL1 we
+ * assume a hypervisor is marshalling events. If we booted at EL2 and
+ * dropped to EL1 because we don't support VHE, then we can't support
+ * SDEI.
+ */
+ if (is_hyp_mode_available() && !is_kernel_in_hyp_mode()) {
+ pr_err("Not supported on this hardware/boot configuration\n");
+ return 0;
+ }
+
+ if (IS_ENABLED(CONFIG_VMAP_STACK)) {
+ if (init_sdei_stacks())
+ return 0;
+ }
+
+ sdei_exit_mode = (conduit == CONDUIT_HVC) ? SDEI_EXIT_HVC : SDEI_EXIT_SMC;
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ if (arm64_kernel_unmapped_at_el0()) {
+ unsigned long offset;
+
+ offset = (unsigned long)__sdei_asm_entry_trampoline -
+ (unsigned long)__entry_tramp_text_start;
+ return TRAMP_VALIAS + offset;
+ } else
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+ return (unsigned long)__sdei_asm_handler;
+
+}
+
+/*
+ * __sdei_handler() returns one of:
+ * SDEI_EV_HANDLED - success, return to the interrupted context.
+ * SDEI_EV_FAILED - failure, return this error code to firmare.
+ * virtual-address - success, return to this address.
+ */
+static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
+ struct sdei_registered_event *arg)
+{
+ u32 mode;
+ int i, err = 0;
+ int clobbered_registers = 4;
+ u64 elr = read_sysreg(elr_el1);
+ u32 kernel_mode = read_sysreg(CurrentEL) | 1; /* +SPSel */
+ unsigned long vbar = read_sysreg(vbar_el1);
+
+ if (arm64_kernel_unmapped_at_el0())
+ clobbered_registers++;
+
+ /* Retrieve the missing registers values */
+ for (i = 0; i < clobbered_registers; i++) {
+ /* from within the handler, this call always succeeds */
+ sdei_api_event_context(i, &regs->regs[i]);
+ }
+
+ /*
+ * We didn't take an exception to get here, set PAN. UAO will be cleared
+ * by sdei_event_handler()s set_fs(USER_DS) call.
+ */
+ __uaccess_enable_hw_pan();
+
+ err = sdei_event_handler(regs, arg);
+ if (err)
+ return SDEI_EV_FAILED;
+
+ if (elr != read_sysreg(elr_el1)) {
+ /*
+ * We took a synchronous exception from the SDEI handler.
+ * This could deadlock, and if you interrupt KVM it will
+ * hyp-panic instead.
+ */
+ pr_warn("unsafe: exception during handler\n");
+ }
+
+ mode = regs->pstate & (PSR_MODE32_BIT | PSR_MODE_MASK);
+
+ /*
+ * If we interrupted the kernel with interrupts masked, we always go
+ * back to wherever we came from.
+ */
+ if (mode == kernel_mode && !interrupts_enabled(regs))
+ return SDEI_EV_HANDLED;
+
+ /*
+ * Otherwise, we pretend this was an IRQ. This lets user space tasks
+ * receive signals before we return to them, and KVM to invoke it's
+ * world switch to do the same.
+ *
+ * See DDI0487B.a Table D1-7 'Vector offsets from vector table base
+ * address'.
+ */
+ if (mode == kernel_mode)
+ return vbar + 0x280;
+ else if (mode & PSR_MODE32_BIT)
+ return vbar + 0x680;
+
+ return vbar + 0x480;
+}
+
+
+asmlinkage __kprobes notrace unsigned long
+__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
+{
+ unsigned long ret;
+ bool do_nmi_exit = false;
+
+ /*
+ * nmi_enter() deals with printk() re-entrance and use of RCU when
+ * RCU believed this CPU was idle. Because critical events can
+ * interrupt normal events, we may already be in_nmi().
+ */
+ if (!in_nmi()) {
+ nmi_enter();
+ do_nmi_exit = true;
+ }
+
+ ret = _sdei_handler(regs, arg);
+
+ if (do_nmi_exit)
+ nmi_exit();
+
+ return ret;
+}
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index b120111a46be..f60c052e8d1c 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -178,7 +178,8 @@ static void __user *apply_user_offset(
static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
{
- struct fpsimd_state *fpsimd = &current->thread.fpsimd_state;
+ struct user_fpsimd_state const *fpsimd =
+ &current->thread.fpsimd_state.user_fpsimd;
int err;
/* copy the FP and status/control registers */
@@ -195,7 +196,7 @@ static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
{
- struct fpsimd_state fpsimd;
+ struct user_fpsimd_state fpsimd;
__u32 magic, size;
int err = 0;
@@ -266,7 +267,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
{
int err;
unsigned int vq;
- struct fpsimd_state fpsimd;
+ struct user_fpsimd_state fpsimd;
struct sve_context sve;
if (__copy_from_user(&sve, user->sve, sizeof(sve)))
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 22711ee8e36c..79feb861929b 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -125,86 +125,6 @@ static inline int get_sigset_t(sigset_t *set,
return 0;
}
-int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
-{
- int err;
-
- if (!access_ok(VERIFY_WRITE, to, sizeof(*to)))
- return -EFAULT;
-
- /* If you change siginfo_t structure, please be sure
- * this code is fixed accordingly.
- * It should never copy any pad contained in the structure
- * to avoid security leaks, but must copy the generic
- * 3 ints plus the relevant union member.
- * This routine must convert siginfo from 64bit to 32bit as well
- * at the same time.
- */
- err = __put_user(from->si_signo, &to->si_signo);
- err |= __put_user(from->si_errno, &to->si_errno);
- err |= __put_user(from->si_code, &to->si_code);
- if (from->si_code < 0)
- err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad,
- SI_PAD_SIZE);
- else switch (siginfo_layout(from->si_signo, from->si_code)) {
- case SIL_KILL:
- err |= __put_user(from->si_pid, &to->si_pid);
- err |= __put_user(from->si_uid, &to->si_uid);
- break;
- case SIL_TIMER:
- err |= __put_user(from->si_tid, &to->si_tid);
- err |= __put_user(from->si_overrun, &to->si_overrun);
- err |= __put_user(from->si_int, &to->si_int);
- break;
- case SIL_POLL:
- err |= __put_user(from->si_band, &to->si_band);
- err |= __put_user(from->si_fd, &to->si_fd);
- break;
- case SIL_FAULT:
- err |= __put_user((compat_uptr_t)(unsigned long)from->si_addr,
- &to->si_addr);
-#ifdef BUS_MCEERR_AO
- /*
- * Other callers might not initialize the si_lsb field,
- * so check explicitly for the right codes here.
- */
- if (from->si_signo == SIGBUS &&
- (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
- err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
-#endif
- break;
- case SIL_CHLD:
- err |= __put_user(from->si_pid, &to->si_pid);
- err |= __put_user(from->si_uid, &to->si_uid);
- err |= __put_user(from->si_status, &to->si_status);
- err |= __put_user(from->si_utime, &to->si_utime);
- err |= __put_user(from->si_stime, &to->si_stime);
- break;
- case SIL_RT:
- err |= __put_user(from->si_pid, &to->si_pid);
- err |= __put_user(from->si_uid, &to->si_uid);
- err |= __put_user(from->si_int, &to->si_int);
- break;
- case SIL_SYS:
- err |= __put_user((compat_uptr_t)(unsigned long)
- from->si_call_addr, &to->si_call_addr);
- err |= __put_user(from->si_syscall, &to->si_syscall);
- err |= __put_user(from->si_arch, &to->si_arch);
- break;
- }
- return err;
-}
-
-int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
-{
- if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
- copy_from_user(to->_sifields._pad,
- from->_sifields._pad, SI_PAD_SIZE))
- return -EFAULT;
-
- return 0;
-}
-
/*
* VFP save/restore code.
*
@@ -228,7 +148,8 @@ union __fpsimd_vreg {
static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
{
- struct fpsimd_state *fpsimd = &current->thread.fpsimd_state;
+ struct user_fpsimd_state const *fpsimd =
+ &current->thread.fpsimd_state.user_fpsimd;
compat_ulong_t magic = VFP_MAGIC;
compat_ulong_t size = VFP_STORAGE_SIZE;
compat_ulong_t fpscr, fpexc;
@@ -277,7 +198,7 @@ static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
{
- struct fpsimd_state fpsimd;
+ struct user_fpsimd_state fpsimd;
compat_ulong_t magic = VFP_MAGIC;
compat_ulong_t size = VFP_STORAGE_SIZE;
compat_ulong_t fpscr;
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 551eb07c53b6..3b8ad7be9c33 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -18,6 +18,7 @@
*/
#include <linux/acpi.h>
+#include <linux/arm_sdei.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
@@ -836,6 +837,7 @@ static void ipi_cpu_stop(unsigned int cpu)
set_cpu_online(cpu, false);
local_daif_mask();
+ sdei_mask_local_cpu();
while (1)
cpu_relax();
@@ -853,6 +855,7 @@ static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
atomic_dec(&waiting_for_crash_ipi);
local_irq_disable();
+ sdei_mask_local_cpu();
#ifdef CONFIG_HOTPLUG_CPU
if (cpu_ops[cpu]->cpu_die)
@@ -972,6 +975,8 @@ void smp_send_stop(void)
if (num_online_cpus() > 1)
pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
cpumask_pr_args(cpu_online_mask));
+
+ sdei_mask_local_cpu();
}
#ifdef CONFIG_KEXEC_CORE
@@ -990,8 +995,10 @@ void crash_smp_send_stop(void)
cpus_stopped = 1;
- if (num_online_cpus() == 1)
+ if (num_online_cpus() == 1) {
+ sdei_mask_local_cpu();
return;
+ }
cpumask_copy(&mask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &mask);
@@ -1009,6 +1016,8 @@ void crash_smp_send_stop(void)
if (atomic_read(&waiting_for_crash_ipi) > 0)
pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
cpumask_pr_args(&mask));
+
+ sdei_mask_local_cpu();
}
bool smp_crash_stop_failed(void)
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 3fe5ad884418..a307b9e13392 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -2,6 +2,7 @@
#include <linux/ftrace.h>
#include <linux/percpu.h>
#include <linux/slab.h>
+#include <linux/uaccess.h>
#include <asm/alternative.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
@@ -51,8 +52,7 @@ void notrace __cpu_suspend_exit(void)
* PSTATE was not saved over suspend/resume, re-enable any detected
* features that might not have been set correctly.
*/
- asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
- CONFIG_ARM64_PAN));
+ __uaccess_enable_hw_pan();
uao_thread_switch(current);
/*
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 8d48b233e6ce..21868530018e 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -37,18 +37,14 @@ static int __init get_cpu_for_node(struct device_node *node)
if (!cpu_node)
return -1;
- for_each_possible_cpu(cpu) {
- if (of_get_cpu_node(cpu, NULL) == cpu_node) {
- topology_parse_cpu_capacity(cpu_node, cpu);
- of_node_put(cpu_node);
- return cpu;
- }
- }
-
- pr_crit("Unable to find CPU node for %pOF\n", cpu_node);
+ cpu = of_cpu_node_to_id(cpu_node);
+ if (cpu >= 0)
+ topology_parse_cpu_capacity(cpu_node, cpu);
+ else
+ pr_crit("Unable to find CPU node for %pOF\n", cpu_node);
of_node_put(cpu_node);
- return -1;
+ return cpu;
}
static int __init parse_core(struct device_node *core, int cluster_id,
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 3d3588fcd1c7..bbb0fde2780e 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -662,17 +662,58 @@ asmlinkage void handle_bad_stack(struct pt_regs *regs)
}
#endif
-asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
+void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr)
{
- nmi_enter();
-
console_verbose();
pr_crit("SError Interrupt on CPU%d, code 0x%08x -- %s\n",
smp_processor_id(), esr, esr_get_class_string(esr));
- __show_regs(regs);
+ if (regs)
+ __show_regs(regs);
+
+ nmi_panic(regs, "Asynchronous SError Interrupt");
+
+ cpu_park_loop();
+ unreachable();
+}
+
+bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)
+{
+ u32 aet = arm64_ras_serror_get_severity(esr);
+
+ switch (aet) {
+ case ESR_ELx_AET_CE: /* corrected error */
+ case ESR_ELx_AET_UEO: /* restartable, not yet consumed */
+ /*
+ * The CPU can make progress. We may take UEO again as
+ * a more severe error.
+ */
+ return false;
+
+ case ESR_ELx_AET_UEU: /* Uncorrected Unrecoverable */
+ case ESR_ELx_AET_UER: /* Uncorrected Recoverable */
+ /*
+ * The CPU can't make progress. The exception may have
+ * been imprecise.
+ */
+ return true;
+
+ case ESR_ELx_AET_UC: /* Uncontainable or Uncategorized error */
+ default:
+ /* Error has been silently propagated */
+ arm64_serror_panic(regs, esr);
+ }
+}
+
+asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
+{
+ nmi_enter();
+
+ /* non-RAS errors are not containable */
+ if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
+ arm64_serror_panic(regs, esr);
- panic("Asynchronous SError Interrupt");
+ nmi_exit();
}
void __pte_error(const char *file, int line, unsigned long val)
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 7da3e5c366a0..0221aca6493d 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -57,6 +57,17 @@ jiffies = jiffies_64;
#define HIBERNATE_TEXT
#endif
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+#define TRAMP_TEXT \
+ . = ALIGN(PAGE_SIZE); \
+ VMLINUX_SYMBOL(__entry_tramp_text_start) = .; \
+ *(.entry.tramp.text) \
+ . = ALIGN(PAGE_SIZE); \
+ VMLINUX_SYMBOL(__entry_tramp_text_end) = .;
+#else
+#define TRAMP_TEXT
+#endif
+
/*
* The size of the PE/COFF section that covers the kernel image, which
* runs from stext to _edata, must be a round multiple of the PE/COFF
@@ -113,6 +124,7 @@ SECTIONS
HYPERVISOR_TEXT
IDMAP_TEXT
HIBERNATE_TEXT
+ TRAMP_TEXT
*(.fixup)
*(.gnu.warning)
. = ALIGN(16);
@@ -206,13 +218,19 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
idmap_pg_dir = .;
. += IDMAP_DIR_SIZE;
- swapper_pg_dir = .;
- . += SWAPPER_DIR_SIZE;
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ tramp_pg_dir = .;
+ . += PAGE_SIZE;
+#endif
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
reserved_ttbr0 = .;
. += RESERVED_TTBR0_SIZE;
#endif
+ swapper_pg_dir = .;
+ . += SWAPPER_DIR_SIZE;
+ swapper_pg_end = .;
__pecoff_data_size = ABSOLUTE(. - __initdata_begin);
_end = .;
@@ -234,7 +252,10 @@ ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
<= SZ_4K, "Hibernate exit text too big or misaligned")
#endif
-
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
+ "Entry trampoline text too big")
+#endif
/*
* If padding is applied before .head.text, virt<->phys conversions will fail.
*/
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index e60494f1eef9..520b0dad3c62 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -23,18 +23,26 @@
#include <linux/kvm_host.h>
#include <asm/esr.h>
+#include <asm/exception.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_psci.h>
#include <asm/debug-monitors.h>
+#include <asm/traps.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
+static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u32 esr)
+{
+ if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr))
+ kvm_inject_vabt(vcpu);
+}
+
static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
int ret;
@@ -242,7 +250,6 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
*vcpu_pc(vcpu) -= adj;
}
- kvm_inject_vabt(vcpu);
return 1;
}
@@ -252,7 +259,6 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
case ARM_EXCEPTION_IRQ:
return 1;
case ARM_EXCEPTION_EL1_SERROR:
- kvm_inject_vabt(vcpu);
/* We may still need to return for single-step */
if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS)
&& kvm_arm_handle_step_debug(vcpu, run))
@@ -275,3 +281,25 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
return 0;
}
}
+
+/* For exit types that need handling before we can be preempted */
+void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ int exception_index)
+{
+ if (ARM_SERROR_PENDING(exception_index)) {
+ if (this_cpu_has_cap(ARM64_HAS_RAS_EXTN)) {
+ u64 disr = kvm_vcpu_get_disr(vcpu);
+
+ kvm_handle_guest_serror(vcpu, disr_to_esr(disr));
+ } else {
+ kvm_inject_vabt(vcpu);
+ }
+
+ return;
+ }
+
+ exception_index = ARM_EXCEPTION_CODE(exception_index);
+
+ if (exception_index == ARM_EXCEPTION_EL1_SERROR)
+ kvm_handle_guest_serror(vcpu, kvm_vcpu_get_hsr(vcpu));
+}
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 870828c364c5..e086c6eff8c6 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -63,7 +63,8 @@ __do_hyp_init:
cmp x0, #HVC_STUB_HCALL_NR
b.lo __kvm_handle_stub_hvc
- msr ttbr0_el2, x0
+ phys_to_ttbr x0, x4
+ msr ttbr0_el2, x4
mrs x4, tcr_el1
ldr x5, =TCR_EL2_MASK
@@ -71,30 +72,27 @@ __do_hyp_init:
mov x5, #TCR_EL2_RES1
orr x4, x4, x5
-#ifndef CONFIG_ARM64_VA_BITS_48
/*
- * If we are running with VA_BITS < 48, we may be running with an extra
- * level of translation in the ID map. This is only the case if system
- * RAM is out of range for the currently configured page size and number
- * of translation levels, in which case we will also need the extra
- * level for the HYP ID map, or we won't be able to enable the EL2 MMU.
+ * The ID map may be configured to use an extended virtual address
+ * range. This is only the case if system RAM is out of range for the
+ * currently configured page size and VA_BITS, in which case we will
+ * also need the extended virtual range for the HYP ID map, or we won't
+ * be able to enable the EL2 MMU.
*
* However, at EL2, there is only one TTBR register, and we can't switch
* between translation tables *and* update TCR_EL2.T0SZ at the same
- * time. Bottom line: we need the extra level in *both* our translation
- * tables.
+ * time. Bottom line: we need to use the extended range with *both* our
+ * translation tables.
*
* So use the same T0SZ value we use for the ID map.
*/
ldr_l x5, idmap_t0sz
bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
-#endif
+
/*
- * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in
- * TCR_EL2.
+ * Set the PS bits in TCR_EL2.
*/
- mrs x5, ID_AA64MMFR0_EL1
- bfi x4, x5, #16, #3
+ tcr_compute_pa_size x4, #TCR_EL2_PS_SHIFT, x5, x6
msr tcr_el2, x4
@@ -122,6 +120,10 @@ CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
kern_hyp_va x2
msr vbar_el2, x2
+ /* copy tpidr_el1 into tpidr_el2 for use by HYP */
+ mrs x1, tpidr_el1
+ msr tpidr_el2, x1
+
/* Hello, World! */
eret
ENDPROC(__kvm_hyp_init)
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index 12ee62d6d410..fdd1068ee3a5 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -62,8 +62,8 @@ ENTRY(__guest_enter)
// Store the host regs
save_callee_saved_regs x1
- // Store the host_ctxt for use at exit time
- str x1, [sp, #-16]!
+ // Store host_ctxt and vcpu for use at exit time
+ stp x1, x0, [sp, #-16]!
add x18, x0, #VCPU_CONTEXT
@@ -124,6 +124,17 @@ ENTRY(__guest_exit)
// Now restore the host regs
restore_callee_saved_regs x2
+alternative_if ARM64_HAS_RAS_EXTN
+ // If we have the RAS extensions we can consume a pending error
+ // without an unmask-SError and isb.
+ esb
+ mrs_s x2, SYS_DISR_EL1
+ str x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)]
+ cbz x2, 1f
+ msr_s SYS_DISR_EL1, xzr
+ orr x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT)
+1: ret
+alternative_else
// If we have a pending asynchronous abort, now is the
// time to find out. From your VAXorcist book, page 666:
// "Threaten me not, oh Evil one! For I speak with
@@ -134,7 +145,9 @@ ENTRY(__guest_exit)
mov x5, x0
dsb sy // Synchronize against in-flight ld/st
+ nop
msr daifclr, #4 // Unmask aborts
+alternative_endif
// This is our single instruction exception window. A pending
// SError is guaranteed to occur at the earliest when we unmask
@@ -159,6 +172,10 @@ abort_guest_exit_end:
ENDPROC(__guest_exit)
ENTRY(__fpsimd_guest_restore)
+ // x0: esr
+ // x1: vcpu
+ // x2-x29,lr: vcpu regs
+ // vcpu x0-x1 on the stack
stp x2, x3, [sp, #-16]!
stp x4, lr, [sp, #-16]!
@@ -173,7 +190,7 @@ alternative_else
alternative_endif
isb
- mrs x3, tpidr_el2
+ mov x3, x1
ldr x0, [x3, #VCPU_HOST_CONTEXT]
kern_hyp_va x0
@@ -196,3 +213,15 @@ alternative_endif
eret
ENDPROC(__fpsimd_guest_restore)
+
+ENTRY(__qcom_hyp_sanitize_btac_predictors)
+ /**
+ * Call SMC64 with Silicon provider serviceID 23<<8 (0xc2001700)
+ * 0xC2000000-0xC200FFFF: assigned to SiP Service Calls
+ * b15-b0: contains SiP functionID
+ */
+ movz x0, #0x1700
+ movk x0, #0xc200, lsl #16
+ smc #0
+ ret
+ENDPROC(__qcom_hyp_sanitize_btac_predictors)
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 5170ce1021da..e4f37b9dd47c 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -104,6 +104,7 @@ el1_trap:
/*
* x0: ESR_EC
*/
+ ldr x1, [sp, #16 + 8] // vcpu stored by __guest_enter
/*
* We trap the first access to the FP/SIMD to save the host context
@@ -116,19 +117,18 @@ alternative_if_not ARM64_HAS_NO_FPSIMD
b.eq __fpsimd_guest_restore
alternative_else_nop_endif
- mrs x1, tpidr_el2
mov x0, #ARM_EXCEPTION_TRAP
b __guest_exit
el1_irq:
stp x0, x1, [sp, #-16]!
- mrs x1, tpidr_el2
+ ldr x1, [sp, #16 + 8]
mov x0, #ARM_EXCEPTION_IRQ
b __guest_exit
el1_error:
stp x0, x1, [sp, #-16]!
- mrs x1, tpidr_el2
+ ldr x1, [sp, #16 + 8]
mov x0, #ARM_EXCEPTION_EL1_SERROR
b __guest_exit
@@ -163,6 +163,18 @@ ENTRY(__hyp_do_panic)
eret
ENDPROC(__hyp_do_panic)
+ENTRY(__hyp_panic)
+ /*
+ * '=kvm_host_cpu_state' is a host VA from the constant pool, it may
+ * not be accessible by this address from EL2, hyp_panic() converts
+ * it with kern_hyp_va() before use.
+ */
+ ldr x0, =kvm_host_cpu_state
+ mrs x1, tpidr_el2
+ add x0, x0, x1
+ b hyp_panic
+ENDPROC(__hyp_panic)
+
.macro invalid_vector label, target = __hyp_panic
.align 2
\label:
diff --git a/arch/arm64/kvm/hyp/s2-setup.c b/arch/arm64/kvm/hyp/s2-setup.c
index a81f5e10fc8c..603e1ee83e89 100644
--- a/arch/arm64/kvm/hyp/s2-setup.c
+++ b/arch/arm64/kvm/hyp/s2-setup.c
@@ -32,6 +32,8 @@ u32 __hyp_text __init_stage2_translation(void)
* PS is only 3. Fortunately, bit 19 is RES0 in VTCR_EL2...
*/
parange = read_sysreg(id_aa64mmfr0_el1) & 7;
+ if (parange > ID_AA64MMFR0_PARANGE_MAX)
+ parange = ID_AA64MMFR0_PARANGE_MAX;
val |= parange << 16;
/* Compute the actual PARange... */
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index f7c651f3a8c0..036e1f3d77a6 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -17,6 +17,7 @@
#include <linux/types.h>
#include <linux/jump_label.h>
+#include <uapi/linux/psci.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
@@ -52,7 +53,7 @@ static void __hyp_text __activate_traps_vhe(void)
val &= ~(CPACR_EL1_FPEN | CPACR_EL1_ZEN);
write_sysreg(val, cpacr_el1);
- write_sysreg(__kvm_hyp_vector, vbar_el1);
+ write_sysreg(kvm_get_hyp_vector(), vbar_el1);
}
static void __hyp_text __activate_traps_nvhe(void)
@@ -93,6 +94,9 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
write_sysreg(val, hcr_el2);
+ if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (val & HCR_VSE))
+ write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
+
/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
write_sysreg(1 << 15, hstr_el2);
/*
@@ -235,11 +239,12 @@ static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
{
- u64 esr = read_sysreg_el2(esr);
- u8 ec = ESR_ELx_EC(esr);
+ u8 ec;
+ u64 esr;
u64 hpfar, far;
- vcpu->arch.fault.esr_el2 = esr;
+ esr = vcpu->arch.fault.esr_el2;
+ ec = ESR_ELx_EC(esr);
if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
return true;
@@ -305,9 +310,9 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
u64 exit_code;
vcpu = kern_hyp_va(vcpu);
- write_sysreg(vcpu, tpidr_el2);
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;
__sysreg_save_host_state(host_ctxt);
@@ -332,6 +337,8 @@ again:
exit_code = __guest_enter(vcpu, host_ctxt);
/* And we're baaack! */
+ if (ARM_EXCEPTION_CODE(exit_code) != ARM_EXCEPTION_IRQ)
+ vcpu->arch.fault.esr_el2 = read_sysreg_el2(esr);
/*
* We're using the raw exception code in order to only process
* the trap if no SError is pending. We will come back to the
@@ -341,6 +348,18 @@ again:
if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
goto again;
+ if (exit_code == ARM_EXCEPTION_TRAP &&
+ (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC64 ||
+ kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC32) &&
+ vcpu_get_reg(vcpu, 0) == PSCI_0_2_FN_PSCI_VERSION) {
+ u64 val = PSCI_RET_NOT_SUPPORTED;
+ if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
+ val = 2;
+
+ vcpu_set_reg(vcpu, 0, val);
+ goto again;
+ }
+
if (static_branch_unlikely(&vgic_v2_cpuif_trap) &&
exit_code == ARM_EXCEPTION_TRAP) {
bool valid;
@@ -393,6 +412,14 @@ again:
/* 0 falls through to be handled out of EL2 */
}
+ if (cpus_have_const_cap(ARM64_HARDEN_BP_POST_GUEST_EXIT)) {
+ u32 midr = read_cpuid_id();
+
+ /* Apply BTAC predictors mitigation to all Falkor chips */
+ if ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)
+ __qcom_hyp_sanitize_btac_predictors();
+ }
+
fp_enabled = __fpsimd_enabled();
__sysreg_save_guest_state(guest_ctxt);
@@ -422,7 +449,8 @@ again:
static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
-static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par)
+static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
+ struct kvm_vcpu *vcpu)
{
unsigned long str_va;
@@ -436,35 +464,35 @@ static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par)
__hyp_do_panic(str_va,
spsr, elr,
read_sysreg(esr_el2), read_sysreg_el2(far),
- read_sysreg(hpfar_el2), par,
- (void *)read_sysreg(tpidr_el2));
+ read_sysreg(hpfar_el2), par, vcpu);
}
-static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par)
+static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
+ struct kvm_vcpu *vcpu)
{
panic(__hyp_panic_string,
spsr, elr,
read_sysreg_el2(esr), read_sysreg_el2(far),
- read_sysreg(hpfar_el2), par,
- (void *)read_sysreg(tpidr_el2));
+ read_sysreg(hpfar_el2), par, vcpu);
}
static hyp_alternate_select(__hyp_call_panic,
__hyp_call_panic_nvhe, __hyp_call_panic_vhe,
ARM64_HAS_VIRT_HOST_EXTN);
-void __hyp_text __noreturn __hyp_panic(void)
+void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *__host_ctxt)
{
+ struct kvm_vcpu *vcpu = NULL;
+
u64 spsr = read_sysreg_el2(spsr);
u64 elr = read_sysreg_el2(elr);
u64 par = read_sysreg(par_el1);
if (read_sysreg(vttbr_el2)) {
- struct kvm_vcpu *vcpu;
struct kvm_cpu_context *host_ctxt;
- vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
- host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ host_ctxt = kern_hyp_va(__host_ctxt);
+ vcpu = host_ctxt->__hyp_running_vcpu;
__timer_disable_traps(vcpu);
__deactivate_traps(vcpu);
__deactivate_vm(vcpu);
@@ -472,7 +500,7 @@ void __hyp_text __noreturn __hyp_panic(void)
}
/* Call panic for real */
- __hyp_call_panic()(spsr, elr, par);
+ __hyp_call_panic()(spsr, elr, par, vcpu);
unreachable();
}
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index 934137647837..2c17afd2be96 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -27,8 +27,8 @@ static void __hyp_text __sysreg_do_nothing(struct kvm_cpu_context *ctxt) { }
/*
* Non-VHE: Both host and guest must save everything.
*
- * VHE: Host must save tpidr*_el[01], actlr_el1, mdscr_el1, sp0, pc,
- * pstate, and guest must save everything.
+ * VHE: Host must save tpidr*_el0, actlr_el1, mdscr_el1, sp_el0,
+ * and guest must save everything.
*/
static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
@@ -36,11 +36,8 @@ static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1);
ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0);
ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0);
- ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
ctxt->gp_regs.regs.sp = read_sysreg(sp_el0);
- ctxt->gp_regs.regs.pc = read_sysreg_el2(elr);
- ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr);
}
static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
@@ -62,10 +59,16 @@ static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(amair);
ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(cntkctl);
ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1);
+ ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1);
ctxt->gp_regs.elr_el1 = read_sysreg_el1(elr);
ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(spsr);
+ ctxt->gp_regs.regs.pc = read_sysreg_el2(elr);
+ ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr);
+
+ if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
+ ctxt->sys_regs[DISR_EL1] = read_sysreg_s(SYS_VDISR_EL2);
}
static hyp_alternate_select(__sysreg_call_save_host_state,
@@ -89,11 +92,8 @@ static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctx
write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1);
write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0);
write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
- write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
write_sysreg(ctxt->gp_regs.regs.sp, sp_el0);
- write_sysreg_el2(ctxt->gp_regs.regs.pc, elr);
- write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr);
}
static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
@@ -115,10 +115,16 @@ static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], amair);
write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], cntkctl);
write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
+ write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
write_sysreg(ctxt->gp_regs.sp_el1, sp_el1);
write_sysreg_el1(ctxt->gp_regs.elr_el1, elr);
write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],spsr);
+ write_sysreg_el2(ctxt->gp_regs.regs.pc, elr);
+ write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr);
+
+ if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
+ write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2);
}
static hyp_alternate_select(__sysreg_call_restore_host_state,
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index 8ecbcb40e317..60666a056944 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -164,14 +164,25 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
inject_undef64(vcpu);
}
+static void pend_guest_serror(struct kvm_vcpu *vcpu, u64 esr)
+{
+ vcpu_set_vsesr(vcpu, esr);
+ vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) | HCR_VSE);
+}
+
/**
* kvm_inject_vabt - inject an async abort / SError into the guest
* @vcpu: The VCPU to receive the exception
*
* It is assumed that this code is called from the VCPU thread and that the
* VCPU therefore is not currently executing guest code.
+ *
+ * Systems with the RAS Extensions specify an imp-def ESR (ISV/IDS = 1) with
+ * the remaining ISS all-zeros so that this error is not interpreted as an
+ * uncategorized RAS error. Without the RAS Extensions we can't specify an ESR
+ * value, so the CPU generates an imp-def value.
*/
void kvm_inject_vabt(struct kvm_vcpu *vcpu)
{
- vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) | HCR_VSE);
+ pend_guest_serror(vcpu, ESR_ELx_ISV);
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 1830ebc227d1..50a43c7b97ca 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1159,6 +1159,16 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
{ SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
{ SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
+
+ { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
+ { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
+ { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
+ { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
+ { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
+ { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
+ { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
+ { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
+
{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
@@ -1169,6 +1179,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
{ SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
+ { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
{ SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
{ SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index e88fb99c1561..3d69a8d41fa5 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -30,7 +30,7 @@
* Alignment fixed up by hardware.
*/
ENTRY(__clear_user)
- uaccess_enable_not_uao x2, x3
+ uaccess_enable_not_uao x2, x3, x4
mov x2, x1 // save the size for fixup return
subs x1, x1, #8
b.mi 2f
@@ -50,7 +50,7 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
b.mi 5f
uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
5: mov x0, #0
- uaccess_disable_not_uao x2
+ uaccess_disable_not_uao x2, x3
ret
ENDPROC(__clear_user)
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 4b5d826895ff..20305d485046 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -64,10 +64,10 @@
end .req x5
ENTRY(__arch_copy_from_user)
- uaccess_enable_not_uao x3, x4
+ uaccess_enable_not_uao x3, x4, x5
add end, x0, x2
#include "copy_template.S"
- uaccess_disable_not_uao x3
+ uaccess_disable_not_uao x3, x4
mov x0, #0 // Nothing to copy
ret
ENDPROC(__arch_copy_from_user)
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index b24a830419ad..fbb090f431a5 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -65,10 +65,10 @@
end .req x5
ENTRY(raw_copy_in_user)
- uaccess_enable_not_uao x3, x4
+ uaccess_enable_not_uao x3, x4, x5
add end, x0, x2
#include "copy_template.S"
- uaccess_disable_not_uao x3
+ uaccess_disable_not_uao x3, x4
mov x0, #0
ret
ENDPROC(raw_copy_in_user)
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 351f0766f7a6..fda6172d6b88 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -63,10 +63,10 @@
end .req x5
ENTRY(__arch_copy_to_user)
- uaccess_enable_not_uao x3, x4
+ uaccess_enable_not_uao x3, x4, x5
add end, x0, x2
#include "copy_template.S"
- uaccess_disable_not_uao x3
+ uaccess_disable_not_uao x3, x4
mov x0, #0
ret
ENDPROC(__arch_copy_to_user)
diff --git a/arch/arm64/lib/tishift.S b/arch/arm64/lib/tishift.S
index 0179a43cc045..d3db9b2cd479 100644
--- a/arch/arm64/lib/tishift.S
+++ b/arch/arm64/lib/tishift.S
@@ -38,19 +38,19 @@ ENTRY(__ashlti3)
ENDPROC(__ashlti3)
ENTRY(__ashrti3)
- cbz x2, 3f
+ cbz x2, 1f
mov x3, #64
sub x3, x3, x2
cmp x3, #0
- b.le 4f
+ b.le 2f
lsr x0, x0, x2
lsl x3, x1, x3
asr x2, x1, x2
orr x0, x0, x3
mov x1, x2
-3:
+1:
ret
-4:
+2:
neg w0, w3
asr x2, x1, #63
asr x0, x1, x0
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 7f1dbe962cf5..91464e7f77cc 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -49,7 +49,7 @@ ENTRY(flush_icache_range)
* - end - virtual end address of region
*/
ENTRY(__flush_cache_user_range)
- uaccess_ttbr0_enable x2, x3
+ uaccess_ttbr0_enable x2, x3, x4
dcache_line_size x2, x3
sub x3, x2, #1
bic x4, x0, x3
@@ -72,7 +72,7 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU
isb
mov x0, #0
1:
- uaccess_ttbr0_disable x1
+ uaccess_ttbr0_disable x1, x2
ret
9:
mov x0, #-EFAULT
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 6f4017046323..301417ae2ba8 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -39,7 +39,16 @@ static cpumask_t tlb_flush_pending;
#define ASID_MASK (~GENMASK(asid_bits - 1, 0))
#define ASID_FIRST_VERSION (1UL << asid_bits)
-#define NUM_USER_ASIDS ASID_FIRST_VERSION
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+#define NUM_USER_ASIDS (ASID_FIRST_VERSION >> 1)
+#define asid2idx(asid) (((asid) & ~ASID_MASK) >> 1)
+#define idx2asid(idx) (((idx) << 1) & ~ASID_MASK)
+#else
+#define NUM_USER_ASIDS (ASID_FIRST_VERSION)
+#define asid2idx(asid) ((asid) & ~ASID_MASK)
+#define idx2asid(idx) asid2idx(idx)
+#endif
/* Get the ASIDBits supported by the current CPU */
static u32 get_cpu_asid_bits(void)
@@ -79,13 +88,6 @@ void verify_cpu_asid_bits(void)
}
}
-static void set_reserved_asid_bits(void)
-{
- if (IS_ENABLED(CONFIG_QCOM_FALKOR_ERRATUM_1003) &&
- cpus_have_const_cap(ARM64_WORKAROUND_QCOM_FALKOR_E1003))
- __set_bit(FALKOR_RESERVED_ASID, asid_map);
-}
-
static void flush_context(unsigned int cpu)
{
int i;
@@ -94,8 +96,6 @@ static void flush_context(unsigned int cpu)
/* Update the list of reserved ASIDs and the ASID bitmap. */
bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
- set_reserved_asid_bits();
-
for_each_possible_cpu(i) {
asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
/*
@@ -107,7 +107,7 @@ static void flush_context(unsigned int cpu)
*/
if (asid == 0)
asid = per_cpu(reserved_asids, i);
- __set_bit(asid & ~ASID_MASK, asid_map);
+ __set_bit(asid2idx(asid), asid_map);
per_cpu(reserved_asids, i) = asid;
}
@@ -162,16 +162,16 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
* We had a valid ASID in a previous life, so try to re-use
* it if possible.
*/
- asid &= ~ASID_MASK;
- if (!__test_and_set_bit(asid, asid_map))
+ if (!__test_and_set_bit(asid2idx(asid), asid_map))
return newasid;
}
/*
* Allocate a free ASID. If we can't find one, take a note of the
- * currently active ASIDs and mark the TLBs as requiring flushes.
- * We always count from ASID #1, as we use ASID #0 when setting a
- * reserved TTBR0 for the init_mm.
+ * currently active ASIDs and mark the TLBs as requiring flushes. We
+ * always count from ASID #2 (index 1), as we use ASID #0 when setting
+ * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
+ * pairs.
*/
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
if (asid != NUM_USER_ASIDS)
@@ -188,32 +188,35 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
set_asid:
__set_bit(asid, asid_map);
cur_idx = asid;
- return asid | generation;
+ return idx2asid(asid) | generation;
}
void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
{
unsigned long flags;
- u64 asid;
+ u64 asid, old_active_asid;
asid = atomic64_read(&mm->context.id);
/*
* The memory ordering here is subtle.
- * If our ASID matches the current generation, then we update
- * our active_asids entry with a relaxed xchg. Racing with a
- * concurrent rollover means that either:
+ * If our active_asids is non-zero and the ASID matches the current
+ * generation, then we update the active_asids entry with a relaxed
+ * cmpxchg. Racing with a concurrent rollover means that either:
*
- * - We get a zero back from the xchg and end up waiting on the
+ * - We get a zero back from the cmpxchg and end up waiting on the
* lock. Taking the lock synchronises with the rollover and so
* we are forced to see the updated generation.
*
- * - We get a valid ASID back from the xchg, which means the
+ * - We get a valid ASID back from the cmpxchg, which means the
* relaxed xchg in flush_context will treat us as reserved
* because atomic RmWs are totally ordered for a given location.
*/
- if (!((asid ^ atomic64_read(&asid_generation)) >> asid_bits)
- && atomic64_xchg_relaxed(&per_cpu(active_asids, cpu), asid))
+ old_active_asid = atomic64_read(&per_cpu(active_asids, cpu));
+ if (old_active_asid &&
+ !((asid ^ atomic64_read(&asid_generation)) >> asid_bits) &&
+ atomic64_cmpxchg_relaxed(&per_cpu(active_asids, cpu),
+ old_active_asid, asid))
goto switch_mm_fastpath;
raw_spin_lock_irqsave(&cpu_asid_lock, flags);
@@ -231,6 +234,9 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
switch_mm_fastpath:
+
+ arm64_apply_bp_hardening();
+
/*
* Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
* emulating PAN.
@@ -239,6 +245,15 @@ switch_mm_fastpath:
cpu_switch_mm(mm->pgd, mm);
}
+/* Errata workaround post TTBRx_EL1 update. */
+asmlinkage void post_ttbr_update_workaround(void)
+{
+ asm(ALTERNATIVE("nop; nop; nop",
+ "ic iallu; dsb nsh; isb",
+ ARM64_WORKAROUND_CAVIUM_27456,
+ CONFIG_CAVIUM_ERRATUM_27456));
+}
+
static int asids_init(void)
{
asid_bits = get_cpu_asid_bits();
@@ -254,8 +269,6 @@ static int asids_init(void)
panic("Failed to allocate bitmap for %lu ASIDs\n",
NUM_USER_ASIDS);
- set_reserved_asid_bits();
-
pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
return 0;
}
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index b45c5bcaeccb..a96ec0181818 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -24,7 +24,7 @@
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/genalloc.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/dma-contiguous.h>
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
@@ -91,46 +91,6 @@ static int __free_from_pool(void *start, size_t size)
return 1;
}
-static void *__dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- unsigned long attrs)
-{
- if (IS_ENABLED(CONFIG_ZONE_DMA) &&
- dev->coherent_dma_mask <= DMA_BIT_MASK(32))
- flags |= GFP_DMA;
- if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) {
- struct page *page;
- void *addr;
-
- page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
- get_order(size), flags);
- if (!page)
- return NULL;
-
- *dma_handle = phys_to_dma(dev, page_to_phys(page));
- addr = page_address(page);
- memset(addr, 0, size);
- return addr;
- } else {
- return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
- }
-}
-
-static void __dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- unsigned long attrs)
-{
- bool freed;
- phys_addr_t paddr = dma_to_phys(dev, dma_handle);
-
-
- freed = dma_release_from_contiguous(dev,
- phys_to_page(paddr),
- size >> PAGE_SHIFT);
- if (!freed)
- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
-}
-
static void *__dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
unsigned long attrs)
@@ -152,7 +112,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
return addr;
}
- ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
+ ptr = swiotlb_alloc(dev, size, dma_handle, flags, attrs);
if (!ptr)
goto no_mem;
@@ -173,7 +133,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
return coherent_ptr;
no_map:
- __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
+ swiotlb_free(dev, size, ptr, *dma_handle, attrs);
no_mem:
return NULL;
}
@@ -191,7 +151,7 @@ static void __dma_free(struct device *dev, size_t size,
return;
vunmap(vaddr);
}
- __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
+ swiotlb_free(dev, size, swiotlb_addr, dma_handle, attrs);
}
static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
@@ -368,7 +328,7 @@ static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr)
return 0;
}
-static const struct dma_map_ops swiotlb_dma_ops = {
+static const struct dma_map_ops arm64_swiotlb_dma_ops = {
.alloc = __dma_alloc,
.free = __dma_free,
.mmap = __swiotlb_mmap,
@@ -397,7 +357,7 @@ static int __init atomic_pool_init(void)
page = dma_alloc_from_contiguous(NULL, nr_pages,
pool_size_order, GFP_KERNEL);
else
- page = alloc_pages(GFP_DMA, pool_size_order);
+ page = alloc_pages(GFP_DMA32, pool_size_order);
if (page) {
int ret;
@@ -923,7 +883,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent)
{
if (!dev->dma_ops)
- dev->dma_ops = &swiotlb_dma_ops;
+ dev->dma_ops = &arm64_swiotlb_dma_ops;
dev->archdata.dma_coherent = coherent;
__iommu_setup_dma_ops(dev, dma_base, size, iommu);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 9b7f89df49db..ce441d29e7f6 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -596,7 +596,7 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
info.si_signo = SIGBUS;
info.si_errno = 0;
- info.si_code = 0;
+ info.si_code = BUS_FIXME;
if (esr & ESR_ELx_FnV)
info.si_addr = NULL;
else
@@ -607,70 +607,70 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs)
}
static const struct fault_info fault_info[] = {
- { do_bad, SIGBUS, 0, "ttbr address size fault" },
- { do_bad, SIGBUS, 0, "level 1 address size fault" },
- { do_bad, SIGBUS, 0, "level 2 address size fault" },
- { do_bad, SIGBUS, 0, "level 3 address size fault" },
+ { do_bad, SIGBUS, BUS_FIXME, "ttbr address size fault" },
+ { do_bad, SIGBUS, BUS_FIXME, "level 1 address size fault" },
+ { do_bad, SIGBUS, BUS_FIXME, "level 2 address size fault" },
+ { do_bad, SIGBUS, BUS_FIXME, "level 3 address size fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
- { do_bad, SIGBUS, 0, "unknown 8" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 8" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" },
- { do_bad, SIGBUS, 0, "unknown 12" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 12" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
- { do_sea, SIGBUS, 0, "synchronous external abort" },
- { do_bad, SIGBUS, 0, "unknown 17" },
- { do_bad, SIGBUS, 0, "unknown 18" },
- { do_bad, SIGBUS, 0, "unknown 19" },
- { do_sea, SIGBUS, 0, "level 0 (translation table walk)" },
- { do_sea, SIGBUS, 0, "level 1 (translation table walk)" },
- { do_sea, SIGBUS, 0, "level 2 (translation table walk)" },
- { do_sea, SIGBUS, 0, "level 3 (translation table walk)" },
- { do_sea, SIGBUS, 0, "synchronous parity or ECC error" }, // Reserved when RAS is implemented
- { do_bad, SIGBUS, 0, "unknown 25" },
- { do_bad, SIGBUS, 0, "unknown 26" },
- { do_bad, SIGBUS, 0, "unknown 27" },
- { do_sea, SIGBUS, 0, "level 0 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
- { do_sea, SIGBUS, 0, "level 1 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
- { do_sea, SIGBUS, 0, "level 2 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
- { do_sea, SIGBUS, 0, "level 3 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
- { do_bad, SIGBUS, 0, "unknown 32" },
+ { do_sea, SIGBUS, BUS_FIXME, "synchronous external abort" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 17" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 18" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 19" },
+ { do_sea, SIGBUS, BUS_FIXME, "level 0 (translation table walk)" },
+ { do_sea, SIGBUS, BUS_FIXME, "level 1 (translation table walk)" },
+ { do_sea, SIGBUS, BUS_FIXME, "level 2 (translation table walk)" },
+ { do_sea, SIGBUS, BUS_FIXME, "level 3 (translation table walk)" },
+ { do_sea, SIGBUS, BUS_FIXME, "synchronous parity or ECC error" }, // Reserved when RAS is implemented
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 25" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 26" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 27" },
+ { do_sea, SIGBUS, BUS_FIXME, "level 0 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
+ { do_sea, SIGBUS, BUS_FIXME, "level 1 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
+ { do_sea, SIGBUS, BUS_FIXME, "level 2 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
+ { do_sea, SIGBUS, BUS_FIXME, "level 3 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 32" },
{ do_alignment_fault, SIGBUS, BUS_ADRALN, "alignment fault" },
- { do_bad, SIGBUS, 0, "unknown 34" },
- { do_bad, SIGBUS, 0, "unknown 35" },
- { do_bad, SIGBUS, 0, "unknown 36" },
- { do_bad, SIGBUS, 0, "unknown 37" },
- { do_bad, SIGBUS, 0, "unknown 38" },
- { do_bad, SIGBUS, 0, "unknown 39" },
- { do_bad, SIGBUS, 0, "unknown 40" },
- { do_bad, SIGBUS, 0, "unknown 41" },
- { do_bad, SIGBUS, 0, "unknown 42" },
- { do_bad, SIGBUS, 0, "unknown 43" },
- { do_bad, SIGBUS, 0, "unknown 44" },
- { do_bad, SIGBUS, 0, "unknown 45" },
- { do_bad, SIGBUS, 0, "unknown 46" },
- { do_bad, SIGBUS, 0, "unknown 47" },
- { do_bad, SIGBUS, 0, "TLB conflict abort" },
- { do_bad, SIGBUS, 0, "Unsupported atomic hardware update fault" },
- { do_bad, SIGBUS, 0, "unknown 50" },
- { do_bad, SIGBUS, 0, "unknown 51" },
- { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" },
- { do_bad, SIGBUS, 0, "implementation fault (unsupported exclusive)" },
- { do_bad, SIGBUS, 0, "unknown 54" },
- { do_bad, SIGBUS, 0, "unknown 55" },
- { do_bad, SIGBUS, 0, "unknown 56" },
- { do_bad, SIGBUS, 0, "unknown 57" },
- { do_bad, SIGBUS, 0, "unknown 58" },
- { do_bad, SIGBUS, 0, "unknown 59" },
- { do_bad, SIGBUS, 0, "unknown 60" },
- { do_bad, SIGBUS, 0, "section domain fault" },
- { do_bad, SIGBUS, 0, "page domain fault" },
- { do_bad, SIGBUS, 0, "unknown 63" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 34" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 35" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 36" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 37" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 38" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 39" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 40" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 41" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 42" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 43" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 44" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 45" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 46" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 47" },
+ { do_bad, SIGBUS, BUS_FIXME, "TLB conflict abort" },
+ { do_bad, SIGBUS, BUS_FIXME, "Unsupported atomic hardware update fault" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 50" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 51" },
+ { do_bad, SIGBUS, BUS_FIXME, "implementation fault (lockdown abort)" },
+ { do_bad, SIGBUS, BUS_FIXME, "implementation fault (unsupported exclusive)" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 54" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 55" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 56" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 57" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 58" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 59" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 60" },
+ { do_bad, SIGBUS, BUS_FIXME, "section domain fault" },
+ { do_bad, SIGBUS, BUS_FIXME, "page domain fault" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 63" },
};
int handle_guest_sea(phys_addr_t addr, unsigned int esr)
@@ -707,6 +707,23 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
arm64_notify_die("", regs, &info, esr);
}
+asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
+ unsigned int esr,
+ struct pt_regs *regs)
+{
+ /*
+ * We've taken an instruction abort from userspace and not yet
+ * re-enabled IRQs. If the address is a kernel address, apply
+ * BP hardening prior to enabling IRQs and pre-emption.
+ */
+ if (addr > TASK_SIZE)
+ arm64_apply_bp_hardening();
+
+ local_irq_enable();
+ do_mem_abort(addr, esr, regs);
+}
+
+
asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
unsigned int esr,
struct pt_regs *regs)
@@ -739,11 +756,11 @@ static struct fault_info __refdata debug_fault_info[] = {
{ do_bad, SIGTRAP, TRAP_HWBKPT, "hardware breakpoint" },
{ do_bad, SIGTRAP, TRAP_HWBKPT, "hardware single-step" },
{ do_bad, SIGTRAP, TRAP_HWBKPT, "hardware watchpoint" },
- { do_bad, SIGBUS, 0, "unknown 3" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 3" },
{ do_bad, SIGTRAP, TRAP_BRKPT, "aarch32 BKPT" },
- { do_bad, SIGTRAP, 0, "aarch32 vector catch" },
+ { do_bad, SIGTRAP, TRAP_FIXME, "aarch32 vector catch" },
{ early_brk64, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" },
- { do_bad, SIGBUS, 0, "unknown 7" },
+ { do_bad, SIGBUS, BUS_FIXME, "unknown 7" },
};
void __init hook_debug_fault_code(int nr,
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 00e7b900ca41..9f3c47acf8ff 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -217,7 +217,7 @@ static void __init reserve_elfcorehdr(void)
}
#endif /* CONFIG_CRASH_DUMP */
/*
- * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
+ * Return the maximum physical address for ZONE_DMA32 (DMA_BIT_MASK(32)). It
* currently assumes that for memory starting above 4G, 32-bit devices will
* use a DMA offset.
*/
@@ -233,8 +233,8 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
- if (IS_ENABLED(CONFIG_ZONE_DMA))
- max_zone_pfns[ZONE_DMA] = PFN_DOWN(max_zone_dma_phys());
+ if (IS_ENABLED(CONFIG_ZONE_DMA32))
+ max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys());
max_zone_pfns[ZONE_NORMAL] = max;
free_area_init_nodes(max_zone_pfns);
@@ -251,9 +251,9 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
memset(zone_size, 0, sizeof(zone_size));
/* 4GB maximum for 32-bit only capable devices */
-#ifdef CONFIG_ZONE_DMA
+#ifdef CONFIG_ZONE_DMA32
max_dma = PFN_DOWN(arm64_dma_phys_limit);
- zone_size[ZONE_DMA] = max_dma - min;
+ zone_size[ZONE_DMA32] = max_dma - min;
#endif
zone_size[ZONE_NORMAL] = max - max_dma;
@@ -266,10 +266,10 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
if (start >= max)
continue;
-#ifdef CONFIG_ZONE_DMA
+#ifdef CONFIG_ZONE_DMA32
if (start < max_dma) {
unsigned long dma_end = min(end, max_dma);
- zhole_size[ZONE_DMA] -= dma_end - start;
+ zhole_size[ZONE_DMA32] -= dma_end - start;
}
#endif
if (end > max_dma) {
@@ -366,6 +366,9 @@ void __init arm64_memblock_init(void)
/* Handle linux,usable-memory-range property */
fdt_enforce_memory_region();
+ /* Remove memory above our supported physical address size */
+ memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX);
+
/*
* Ensure that the linear region takes up exactly half of the kernel
* virtual address space. This way, we can distinguish a linear address
@@ -467,7 +470,7 @@ void __init arm64_memblock_init(void)
early_init_fdt_scan_reserved_mem();
/* 4GB maximum for 32-bit only capable devices */
- if (IS_ENABLED(CONFIG_ZONE_DMA))
+ if (IS_ENABLED(CONFIG_ZONE_DMA32))
arm64_dma_phys_limit = max_zone_dma_phys();
else
arm64_dma_phys_limit = PHYS_MASK + 1;
@@ -600,49 +603,6 @@ void __init mem_init(void)
mem_init_print_info(NULL);
-#define MLK(b, t) b, t, ((t) - (b)) >> 10
-#define MLM(b, t) b, t, ((t) - (b)) >> 20
-#define MLG(b, t) b, t, ((t) - (b)) >> 30
-#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
-
- pr_notice("Virtual kernel memory layout:\n");
-#ifdef CONFIG_KASAN
- pr_notice(" kasan : 0x%16lx - 0x%16lx (%6ld GB)\n",
- MLG(KASAN_SHADOW_START, KASAN_SHADOW_END));
-#endif
- pr_notice(" modules : 0x%16lx - 0x%16lx (%6ld MB)\n",
- MLM(MODULES_VADDR, MODULES_END));
- pr_notice(" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n",
- MLG(VMALLOC_START, VMALLOC_END));
- pr_notice(" .text : 0x%p" " - 0x%p" " (%6ld KB)\n",
- MLK_ROUNDUP(_text, _etext));
- pr_notice(" .rodata : 0x%p" " - 0x%p" " (%6ld KB)\n",
- MLK_ROUNDUP(__start_rodata, __init_begin));
- pr_notice(" .init : 0x%p" " - 0x%p" " (%6ld KB)\n",
- MLK_ROUNDUP(__init_begin, __init_end));
- pr_notice(" .data : 0x%p" " - 0x%p" " (%6ld KB)\n",
- MLK_ROUNDUP(_sdata, _edata));
- pr_notice(" .bss : 0x%p" " - 0x%p" " (%6ld KB)\n",
- MLK_ROUNDUP(__bss_start, __bss_stop));
- pr_notice(" fixed : 0x%16lx - 0x%16lx (%6ld KB)\n",
- MLK(FIXADDR_START, FIXADDR_TOP));
- pr_notice(" PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n",
- MLM(PCI_IO_START, PCI_IO_END));
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
- pr_notice(" vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n",
- MLG(VMEMMAP_START, VMEMMAP_START + VMEMMAP_SIZE));
- pr_notice(" 0x%16lx - 0x%16lx (%6ld MB actual)\n",
- MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()),
- (unsigned long)virt_to_page(high_memory)));
-#endif
- pr_notice(" memory : 0x%16lx - 0x%16lx (%6ld MB)\n",
- MLM(__phys_to_virt(memblock_start_of_DRAM()),
- (unsigned long)high_memory));
-
-#undef MLK
-#undef MLM
-#undef MLK_ROUNDUP
-
/*
* Check boundaries twice: Some fundamental inconsistencies can be
* detected at build time already.
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 267d2b79d52d..b44992ec9643 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -50,6 +50,7 @@
#define NO_CONT_MAPPINGS BIT(1)
u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
+u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
u64 kimage_voffset __ro_after_init;
EXPORT_SYMBOL(kimage_voffset);
@@ -525,6 +526,35 @@ static int __init parse_rodata(char *arg)
}
early_param("rodata", parse_rodata);
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+static int __init map_entry_trampoline(void)
+{
+ pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
+ phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
+
+ /* The trampoline is always mapped and can therefore be global */
+ pgprot_val(prot) &= ~PTE_NG;
+
+ /* Map only the text into the trampoline page table */
+ memset(tramp_pg_dir, 0, PGD_SIZE);
+ __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
+ prot, pgd_pgtable_alloc, 0);
+
+ /* Map both the text and data into the kernel page table */
+ __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+ extern char __entry_tramp_data_start[];
+
+ __set_fixmap(FIX_ENTRY_TRAMP_DATA,
+ __pa_symbol(__entry_tramp_data_start),
+ PAGE_KERNEL_RO);
+ }
+
+ return 0;
+}
+core_initcall(map_entry_trampoline);
+#endif
+
/*
* Create fine-grained mappings for the kernel.
*/
@@ -570,8 +600,8 @@ static void __init map_kernel(pgd_t *pgd)
* entry instead.
*/
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
- set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START),
- __pud(__pa_symbol(bm_pmd) | PUD_TYPE_TABLE));
+ pud_populate(&init_mm, pud_set_fixmap_offset(pgd, FIXADDR_START),
+ lm_alias(bm_pmd));
pud_clear_fixmap();
} else {
BUG();
@@ -612,7 +642,8 @@ void __init paging_init(void)
* allocated with it.
*/
memblock_free(__pa_symbol(swapper_pg_dir) + PAGE_SIZE,
- SWAPPER_DIR_SIZE - PAGE_SIZE);
+ __pa_symbol(swapper_pg_end) - __pa_symbol(swapper_pg_dir)
+ - PAGE_SIZE);
}
/*
@@ -686,7 +717,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
if (!p)
return -ENOMEM;
- set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
+ pmd_set_huge(pmd, __pa(p), __pgprot(PROT_SECT_NORMAL));
} else
vmemmap_verify((pte_t *)pmd, node, addr, next);
} while (addr = next, addr != end);
@@ -879,15 +910,19 @@ int __init arch_ioremap_pmd_supported(void)
int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
{
+ pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT |
+ pgprot_val(mk_sect_prot(prot)));
BUG_ON(phys & ~PUD_MASK);
- set_pud(pud, __pud(phys | PUD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
+ set_pud(pud, pfn_pud(__phys_to_pfn(phys), sect_prot));
return 1;
}
int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
{
+ pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT |
+ pgprot_val(mk_sect_prot(prot)));
BUG_ON(phys & ~PMD_MASK);
- set_pmd(pmd, __pmd(phys | PMD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
+ set_pmd(pmd, pfn_pmd(__phys_to_pfn(phys), sect_prot));
return 1;
}
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index 051e71ec3335..289f9113a27a 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -49,6 +49,14 @@ void __init pgd_cache_init(void)
if (PGD_SIZE == PAGE_SIZE)
return;
+#ifdef CONFIG_ARM64_PA_BITS_52
+ /*
+ * With 52-bit physical addresses, the architecture requires the
+ * top-level table to be aligned to at least 64 bytes.
+ */
+ BUILD_BUG_ON(PGD_SIZE < 64);
+#endif
+
/*
* Naturally aligned pgds required by the architecture.
*/
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 95233dfc4c39..9f177aac6390 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -70,7 +70,11 @@ ENTRY(cpu_do_suspend)
mrs x8, mdscr_el1
mrs x9, oslsr_el1
mrs x10, sctlr_el1
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs x11, tpidr_el1
+alternative_else
+ mrs x11, tpidr_el2
+alternative_endif
mrs x12, sp_el0
stp x2, x3, [x0]
stp x4, xzr, [x0, #16]
@@ -116,7 +120,11 @@ ENTRY(cpu_do_resume)
msr mdscr_el1, x10
msr sctlr_el1, x12
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
msr tpidr_el1, x13
+alternative_else
+ msr tpidr_el2, x13
+alternative_endif
msr sp_el0, x14
/*
* Restore oslsr_el1 by writing oslar_el1
@@ -124,6 +132,11 @@ ENTRY(cpu_do_resume)
ubfx x11, x11, #1, #1
msr oslar_el1, x11
reset_pmuserenr_el0 x0 // Disable PMU access from EL0
+
+alternative_if ARM64_HAS_RAS_EXTN
+ msr_s SYS_DISR_EL1, xzr
+alternative_else_nop_endif
+
isb
ret
ENDPROC(cpu_do_resume)
@@ -138,13 +151,18 @@ ENDPROC(cpu_do_resume)
* - pgd_phys - physical address of new TTB
*/
ENTRY(cpu_do_switch_mm)
- pre_ttbr0_update_workaround x0, x2, x3
+ mrs x2, ttbr1_el1
mmid x1, x1 // get mm->context.id
- bfi x0, x1, #48, #16 // set the ASID
- msr ttbr0_el1, x0 // set TTBR0
+ phys_to_ttbr x0, x3
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ bfi x3, x1, #48, #16 // set the ASID field in TTBR0
+#endif
+ bfi x2, x1, #48, #16 // set the ASID
+ msr ttbr1_el1, x2 // in TTBR1 (since TCR.A1 is set)
isb
- post_ttbr0_update_workaround
- ret
+ msr ttbr0_el1, x3 // now update TTBR0
+ isb
+ b post_ttbr_update_workaround // Back to C code...
ENDPROC(cpu_do_switch_mm)
.pushsection ".idmap.text", "ax"
@@ -158,14 +176,16 @@ ENTRY(idmap_cpu_replace_ttbr1)
save_and_disable_daif flags=x2
adrp x1, empty_zero_page
- msr ttbr1_el1, x1
+ phys_to_ttbr x1, x3
+ msr ttbr1_el1, x3
isb
tlbi vmalle1
dsb nsh
isb
- msr ttbr1_el1, x0
+ phys_to_ttbr x0, x3
+ msr ttbr1_el1, x3
isb
restore_daif x2
@@ -214,25 +234,19 @@ ENTRY(__cpu_setup)
/*
* Prepare SCTLR
*/
- adr x5, crval
- ldp w5, w6, [x5]
- mrs x0, sctlr_el1
- bic x0, x0, x5 // clear bits
- orr x0, x0, x6 // set bits
+ mov_q x0, SCTLR_EL1_SET
/*
* Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
* both user and kernel.
*/
ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
- TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0
+ TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 | TCR_A1
tcr_set_idmap_t0sz x10, x9
/*
- * Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in
- * TCR_EL1.
+ * Set the IPS bits in TCR_EL1.
*/
- mrs x9, ID_AA64MMFR0_EL1
- bfi x10, x9, #32, #3
+ tcr_compute_pa_size x10, #TCR_IPS_SHIFT, x5, x6
#ifdef CONFIG_ARM64_HW_AFDBM
/*
* Hardware update of the Access and Dirty bits.
@@ -249,21 +263,3 @@ ENTRY(__cpu_setup)
msr tcr_el1, x10
ret // return to head.S
ENDPROC(__cpu_setup)
-
- /*
- * We set the desired value explicitly, including those of the
- * reserved bits. The values of bits EE & E0E were set early in
- * el2_setup, which are left untouched below.
- *
- * n n T
- * U E WT T UD US IHBS
- * CE0 XWHW CZ ME TEEA S
- * .... .IEE .... NEAI TE.I ..AD DEN0 ACAM
- * 0011 0... 1101 ..0. ..0. 10.. .0.. .... < hardware reserved
- * .... .1.. .... 01.1 11.1 ..01 0.01 1101 < software settings
- */
- .type crval, #object
-crval:
- .word 0xfcffffff // clear
- .word 0x34d5d91d // set
- .popsection
diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S
index 401ceb71540c..c5f05c4a4d00 100644
--- a/arch/arm64/xen/hypercall.S
+++ b/arch/arm64/xen/hypercall.S
@@ -101,12 +101,12 @@ ENTRY(privcmd_call)
* need the explicit uaccess_enable/disable if the TTBR0 PAN emulation
* is enabled (it implies that hardware UAO and PAN disabled).
*/
- uaccess_ttbr0_enable x6, x7
+ uaccess_ttbr0_enable x6, x7, x8
hvc XEN_IMM
/*
* Disable userspace access from kernel once the hyp call completed.
*/
- uaccess_ttbr0_disable x6
+ uaccess_ttbr0_disable x6, x7
ret
ENDPROC(privcmd_call);
diff --git a/arch/blackfin/include/asm/thread_info.h b/arch/blackfin/include/asm/thread_info.h
index 2966b93850a1..a5aeab4e5f2d 100644
--- a/arch/blackfin/include/asm/thread_info.h
+++ b/arch/blackfin/include/asm/thread_info.h
@@ -56,8 +56,6 @@ struct thread_info {
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
/* Given a task stack pointer, you can find its corresponding
* thread_info structure just by masking it to the THREAD_SIZE
diff --git a/arch/blackfin/include/uapi/asm/poll.h b/arch/blackfin/include/uapi/asm/poll.h
index 8b094d43e9b7..3b162f2d2970 100644
--- a/arch/blackfin/include/uapi/asm/poll.h
+++ b/arch/blackfin/include/uapi/asm/poll.h
@@ -9,8 +9,25 @@
#ifndef _UAPI__BFIN_POLL_H
#define _UAPI__BFIN_POLL_H
-#define POLLWRNORM 4 /* POLLOUT */
-#define POLLWRBAND 256
+#ifndef __KERNEL__
+#define POLLWRNORM POLLOUT
+#define POLLWRBAND (__force __poll_t)256
+#else
+#define __ARCH_HAS_MANGLED_POLL
+static inline __u16 mangle_poll(__poll_t val)
+{
+ __u16 v = (__force __u16)val;
+ /* bit 9 -> bit 8, bit 8 -> bit 2 */
+ return (v & ~0x300) | ((v & 0x200) >> 1) | ((v & 0x100) >> 6);
+}
+
+static inline __poll_t demangle_poll(__u16 v)
+{
+ /* bit 8 -> bit 9, bit 2 -> bits 2 and 8 */
+ return (__force __poll_t)((v & ~0x100) | ((v & 0x100) << 1) |
+ ((v & 4) << 6));
+}
+#endif
#include <asm-generic/poll.h>
diff --git a/arch/blackfin/include/uapi/asm/siginfo.h b/arch/blackfin/include/uapi/asm/siginfo.h
index b1db506c8d2e..2dd8c9c39248 100644
--- a/arch/blackfin/include/uapi/asm/siginfo.h
+++ b/arch/blackfin/include/uapi/asm/siginfo.h
@@ -11,40 +11,6 @@
#include <linux/types.h>
#include <asm-generic/siginfo.h>
-#define UID16_SIGINFO_COMPAT_NEEDED
-
#define si_uid16 _sifields._kill._uid
-#define ILL_ILLPARAOP 2 /* illegal opcode combine ********** */
-#define ILL_ILLEXCPT 4 /* unrecoverable exception ********** */
-#define ILL_CPLB_VI 9 /* D/I CPLB protect violation ******** */
-#define ILL_CPLB_MISS 10 /* D/I CPLB miss ******** */
-#define ILL_CPLB_MULHIT 11 /* D/I CPLB multiple hit ******** */
-#undef NSIGILL
-#define NSIGILL 11
-
-/*
- * SIGBUS si_codes
- */
-#define BUS_OPFETCH 4 /* error from instruction fetch ******** */
-#undef NSIGBUS
-#define NSIGBUS 4
-
-/*
- * SIGTRAP si_codes
- */
-#define TRAP_STEP 1 /* single-step breakpoint************* */
-#define TRAP_TRACEFLOW 2 /* trace buffer overflow ************* */
-#define TRAP_WATCHPT 3 /* watchpoint match ************* */
-#define TRAP_ILLTRAP 4 /* illegal trap ************* */
-#undef NSIGTRAP
-#define NSIGTRAP 4
-
-/*
- * SIGSEGV si_codes
- */
-#define SEGV_STACKFLOW 3 /* stack overflow */
-#undef NSIGSEGV
-#define NSIGSEGV 3
-
#endif /* _UAPI_BFIN_SIGINFO_H */
diff --git a/arch/c6x/include/asm/thread_info.h b/arch/c6x/include/asm/thread_info.h
index acc70c135ab8..59a5697fe0f3 100644
--- a/arch/c6x/include/asm/thread_info.h
+++ b/arch/c6x/include/asm/thread_info.h
@@ -60,9 +60,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* get the thread information struct of current task */
static inline __attribute__((const))
struct thread_info *current_thread_info(void)
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 54d3f426763b..cd5a0865c97f 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -33,6 +33,9 @@ config GENERIC_CALIBRATE_DELAY
config NO_IOPORT_MAP
def_bool y if !PCI
+config NO_DMA
+ def_bool y if !PCI
+
config FORCE_MAX_ZONEORDER
int
default 6
@@ -72,6 +75,7 @@ config CRIS
select GENERIC_SCHED_CLOCK if ETRAX_ARCH_V32
select HAVE_DEBUG_BUGVERBOSE if ETRAX_ARCH_V32
select HAVE_NMI
+ select DMA_DIRECT_OPS if PCI
config HZ
int
diff --git a/arch/cris/arch-v10/drivers/gpio.c b/arch/cris/arch-v10/drivers/gpio.c
index 68dbe261dc57..a2986c60aaac 100644
--- a/arch/cris/arch-v10/drivers/gpio.c
+++ b/arch/cris/arch-v10/drivers/gpio.c
@@ -50,7 +50,7 @@ static ssize_t gpio_write(struct file *file, const char __user *buf,
size_t count, loff_t *off);
static int gpio_open(struct inode *inode, struct file *filp);
static int gpio_release(struct inode *inode, struct file *filp);
-static unsigned int gpio_poll(struct file *filp, struct poll_table_struct *wait);
+static __poll_t gpio_poll(struct file *filp, struct poll_table_struct *wait);
/* private data per open() of this driver */
@@ -141,9 +141,9 @@ static unsigned long dir_g_shadow; /* 1=output */
#define USE_PORTS(priv) ((priv)->minor <= GPIO_MINOR_B)
-static unsigned int gpio_poll(struct file *file, poll_table *wait)
+static __poll_t gpio_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
struct gpio_private *priv = file->private_data;
unsigned long data;
unsigned long flags;
diff --git a/arch/cris/arch-v10/drivers/sync_serial.c b/arch/cris/arch-v10/drivers/sync_serial.c
index cfe9176f2205..177843c64071 100644
--- a/arch/cris/arch-v10/drivers/sync_serial.c
+++ b/arch/cris/arch-v10/drivers/sync_serial.c
@@ -157,7 +157,7 @@ static inline int sync_data_avail(struct sync_port *port);
static int sync_serial_open(struct inode *inode, struct file *file);
static int sync_serial_release(struct inode *inode, struct file *file);
-static unsigned int sync_serial_poll(struct file *filp, poll_table *wait);
+static __poll_t sync_serial_poll(struct file *filp, poll_table *wait);
static long sync_serial_ioctl(struct file *file,
unsigned int cmd, unsigned long arg);
@@ -654,12 +654,12 @@ static int sync_serial_release(struct inode *inode, struct file *file)
-static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
+static __poll_t sync_serial_poll(struct file *file, poll_table *wait)
{
int dev = MINOR(file_inode(file)->i_rdev);
- unsigned int mask = 0;
+ __poll_t mask = 0;
struct sync_port *port;
- DEBUGPOLL(static unsigned int prev_mask = 0);
+ DEBUGPOLL(static __poll_t prev_mask = 0);
port = &ports[dev];
poll_wait(file, &port->out_wait_q, wait);
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c
index d688fe117dca..a3c353472a8c 100644
--- a/arch/cris/arch-v32/drivers/cryptocop.c
+++ b/arch/cris/arch-v32/drivers/cryptocop.c
@@ -2717,37 +2717,28 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig
}
}
- /* Acquire the mm page semaphore. */
- down_read(&current->mm->mmap_sem);
-
- err = get_user_pages((unsigned long int)(oper.indata + prev_ix),
+ err = get_user_pages_fast((unsigned long)(oper.indata + prev_ix),
noinpages,
- 0, /* read access only for in data */
- inpages,
- NULL);
+ false, /* read access only for in data */
+ inpages);
if (err < 0) {
- up_read(&current->mm->mmap_sem);
nooutpages = noinpages = 0;
DEBUG_API(printk("cryptocop_ioctl_process: get_user_pages indata\n"));
goto error_cleanup;
}
noinpages = err;
- if (oper.do_cipher){
- err = get_user_pages((unsigned long int)oper.cipher_outdata,
+ if (oper.do_cipher) {
+ err = get_user_pages_fast((unsigned long)oper.cipher_outdata,
nooutpages,
- FOLL_WRITE, /* write access for out data */
- outpages,
- NULL);
- up_read(&current->mm->mmap_sem);
+ true, /* write access for out data */
+ outpages);
if (err < 0) {
nooutpages = 0;
DEBUG_API(printk("cryptocop_ioctl_process: get_user_pages outdata\n"));
goto error_cleanup;
}
nooutpages = err;
- } else {
- up_read(&current->mm->mmap_sem);
}
/* Add 6 to nooutpages to make room for possibly inserted buffers for storing digest and
diff --git a/arch/cris/arch-v32/drivers/pci/Makefile b/arch/cris/arch-v32/drivers/pci/Makefile
index bff7482f2444..93c8be6170b1 100644
--- a/arch/cris/arch-v32/drivers/pci/Makefile
+++ b/arch/cris/arch-v32/drivers/pci/Makefile
@@ -2,4 +2,4 @@
# Makefile for Etrax cardbus driver
#
-obj-$(CONFIG_ETRAX_CARDBUS) += bios.o dma.o
+obj-$(CONFIG_ETRAX_CARDBUS) += bios.o
diff --git a/arch/cris/arch-v32/drivers/pci/dma.c b/arch/cris/arch-v32/drivers/pci/dma.c
deleted file mode 100644
index dbbd3816cc0b..000000000000
--- a/arch/cris/arch-v32/drivers/pci/dma.c
+++ /dev/null
@@ -1,80 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Dynamic DMA mapping support.
- *
- * On cris there is no hardware dynamic DMA address translation,
- * so consistent alloc/free are merely page allocation/freeing.
- * The rest of the dynamic DMA mapping interface is implemented
- * in asm/pci.h.
- *
- * Borrowed from i386.
- */
-
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/pci.h>
-#include <linux/gfp.h>
-#include <asm/io.h>
-
-static void *v32_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
-{
- void *ret;
-
- /* ignore region specifiers */
- gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
-
- if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
- gfp |= GFP_DMA;
-
- ret = (void *)__get_free_pages(gfp, get_order(size));
-
- if (ret != NULL) {
- memset(ret, 0, size);
- *dma_handle = virt_to_phys(ret);
- }
- return ret;
-}
-
-static void v32_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, unsigned long attrs)
-{
- free_pages((unsigned long)vaddr, get_order(size));
-}
-
-static inline dma_addr_t v32_dma_map_page(struct device *dev,
- struct page *page, unsigned long offset, size_t size,
- enum dma_data_direction direction, unsigned long attrs)
-{
- return page_to_phys(page) + offset;
-}
-
-static inline int v32_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction,
- unsigned long attrs)
-{
- printk("Map sg\n");
- return nents;
-}
-
-static inline int v32_dma_supported(struct device *dev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if (mask < 0x00ffffff)
- return 0;
- return 1;
-}
-
-const struct dma_map_ops v32_dma_ops = {
- .alloc = v32_dma_alloc,
- .free = v32_dma_free,
- .map_page = v32_dma_map_page,
- .map_sg = v32_dma_map_sg,
- .dma_supported = v32_dma_supported,
-};
-EXPORT_SYMBOL(v32_dma_ops);
diff --git a/arch/cris/arch-v32/drivers/sync_serial.c b/arch/cris/arch-v32/drivers/sync_serial.c
index 8efcc1a899a8..e20e0b9a3a5c 100644
--- a/arch/cris/arch-v32/drivers/sync_serial.c
+++ b/arch/cris/arch-v32/drivers/sync_serial.c
@@ -178,7 +178,7 @@ static inline int sync_data_avail(struct sync_port *port);
static int sync_serial_open(struct inode *, struct file *);
static int sync_serial_release(struct inode *, struct file *);
-static unsigned int sync_serial_poll(struct file *filp, poll_table *wait);
+static __poll_t sync_serial_poll(struct file *filp, poll_table *wait);
static long sync_serial_ioctl(struct file *file,
unsigned int cmd, unsigned long arg);
@@ -555,13 +555,13 @@ static int sync_serial_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
+static __poll_t sync_serial_poll(struct file *file, poll_table *wait)
{
int dev = iminor(file_inode(file));
- unsigned int mask = 0;
+ __poll_t mask = 0;
struct sync_port *port;
DEBUGPOLL(
- static unsigned int prev_mask;
+ static __poll_t prev_mask;
);
port = &ports[dev];
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index 460349cb147f..8cf45ac30c1b 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -5,6 +5,7 @@ generic-y += cmpxchg.h
generic-y += current.h
generic-y += device.h
generic-y += div64.h
+generic-y += dma-mapping.h
generic-y += emergency-restart.h
generic-y += exec.h
generic-y += extable.h
diff --git a/arch/cris/include/asm/dma-mapping.h b/arch/cris/include/asm/dma-mapping.h
deleted file mode 100644
index 1553bdb30a0c..000000000000
--- a/arch/cris/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_CRIS_DMA_MAPPING_H
-#define _ASM_CRIS_DMA_MAPPING_H
-
-#ifdef CONFIG_PCI
-extern const struct dma_map_ops v32_dma_ops;
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- return &v32_dma_ops;
-}
-#else
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- BUG();
- return NULL;
-}
-#endif
-
-#endif
diff --git a/arch/cris/include/asm/processor.h b/arch/cris/include/asm/processor.h
index 124dd5ec7f65..ee4d8b03d048 100644
--- a/arch/cris/include/asm/processor.h
+++ b/arch/cris/include/asm/processor.h
@@ -26,13 +26,6 @@ struct task_struct;
*/
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
-/* THREAD_SIZE is the size of the thread_info/kernel_stack combo.
- * normally, the stack is found by doing something like p + THREAD_SIZE
- * in CRIS, a page is 8192 bytes, which seems like a sane size
- */
-#define THREAD_SIZE PAGE_SIZE
-#define THREAD_SIZE_ORDER (0)
-
/*
* At user->kernel entry, the pt_regs struct is stacked on the top of the kernel-stack.
* This macro allows us to find those regs for a task.
@@ -59,8 +52,6 @@ static inline void release_thread(struct task_struct *dead_task)
/* Nothing needs to be done. */
}
-#define init_stack (init_thread_union.stack)
-
#define cpu_relax() barrier()
void default_idle(void);
diff --git a/arch/cris/include/asm/thread_info.h b/arch/cris/include/asm/thread_info.h
index 472830c90997..996fef3be1d5 100644
--- a/arch/cris/include/asm/thread_info.h
+++ b/arch/cris/include/asm/thread_info.h
@@ -20,6 +20,13 @@
#endif
+/* THREAD_SIZE is the size of the thread_info/kernel_stack combo.
+ * normally, the stack is found by doing something like p + THREAD_SIZE
+ * in CRIS, a page is 8192 bytes, which seems like a sane size
+ */
+#define THREAD_SIZE PAGE_SIZE
+#define THREAD_SIZE_ORDER (0)
+
/*
* low level task data that entry.S needs immediate access to
* - this struct should fit entirely inside of one cache line
@@ -56,8 +63,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-
#endif /* !__ASSEMBLY__ */
/*
diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S
index 6d1dbc1ba767..9b232e0f673e 100644
--- a/arch/cris/kernel/vmlinux.lds.S
+++ b/arch/cris/kernel/vmlinux.lds.S
@@ -11,6 +11,7 @@
#include <asm-generic/vmlinux.lds.h>
#include <asm/page.h>
+#include <asm/thread_info.h>
#ifdef CONFIG_ETRAX_VMEM_SIZE
#define __CONFIG_ETRAX_VMEM_SIZE CONFIG_ETRAX_VMEM_SIZE
diff --git a/arch/frv/include/asm/thread_info.h b/arch/frv/include/asm/thread_info.h
index ccba3b6ce918..0f950845fad9 100644
--- a/arch/frv/include/asm/thread_info.h
+++ b/arch/frv/include/asm/thread_info.h
@@ -64,9 +64,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* how to get the thread information struct from C */
register struct thread_info *__current_thread_info asm("gr15");
diff --git a/arch/frv/include/uapi/asm/Kbuild b/arch/frv/include/uapi/asm/Kbuild
index 14a2e9af97e9..5354b0f84d41 100644
--- a/arch/frv/include/uapi/asm/Kbuild
+++ b/arch/frv/include/uapi/asm/Kbuild
@@ -1,4 +1,5 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+generic-y += siginfo.h
generic-y += bpf_perf_event.h
diff --git a/arch/frv/include/uapi/asm/poll.h b/arch/frv/include/uapi/asm/poll.h
index 887b67288340..a44c8f0ebee7 100644
--- a/arch/frv/include/uapi/asm/poll.h
+++ b/arch/frv/include/uapi/asm/poll.h
@@ -2,12 +2,27 @@
#ifndef _ASM_POLL_H
#define _ASM_POLL_H
+#ifndef __KERNEL__
#define POLLWRNORM POLLOUT
-#define POLLWRBAND 256
+#define POLLWRBAND (__force __poll_t)256
+#else
+#define __ARCH_HAS_MANGLED_POLL
+static inline __u16 mangle_poll(__poll_t val)
+{
+ __u16 v = (__force __u16)val;
+ /* bit 9 -> bit 8, bit 8 -> bit 2 */
+ return (v & ~0x300) | ((v & 0x200) >> 1) | ((v & 0x100) >> 6);
+}
-#include <asm-generic/poll.h>
+static inline __poll_t demangle_poll(__u16 v)
+{
+ /* bit 8 -> bit 9, bit 2 -> bits 2 and 8 */
+ return (__force __poll_t)((v & ~0x100) | ((v & 0x100) << 1) |
+ ((v & 4) << 6));
+}
+#endif
+#include <asm-generic/poll.h>
#undef POLLREMOVE
#endif
-
diff --git a/arch/frv/include/uapi/asm/siginfo.h b/arch/frv/include/uapi/asm/siginfo.h
deleted file mode 100644
index 4c8c975747ac..000000000000
--- a/arch/frv/include/uapi/asm/siginfo.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _ASM_SIGINFO_H
-#define _ASM_SIGINFO_H
-
-#include <linux/types.h>
-#include <asm-generic/siginfo.h>
-
-#define FPE_MDAOVF 9 /* media overflow */
-#undef NSIGFPE
-#define NSIGFPE 9
-
-#endif
-
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index f8d3fde08190..091d6d04b5e5 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -23,6 +23,7 @@ config H8300
select HAVE_ARCH_KGDB
select HAVE_ARCH_HASH
select CPU_NO_EFFICIENT_FFS
+ select DMA_DIRECT_OPS
config CPU_BIG_ENDIAN
def_bool y
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
index bc077491d299..642752c94306 100644
--- a/arch/h8300/include/asm/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
@@ -9,6 +9,7 @@ generic-y += delay.h
generic-y += device.h
generic-y += div64.h
generic-y += dma.h
+generic-y += dma-mapping.h
generic-y += emergency-restart.h
generic-y += exec.h
generic-y += extable.h
diff --git a/arch/h8300/include/asm/dma-mapping.h b/arch/h8300/include/asm/dma-mapping.h
deleted file mode 100644
index 21bb1fc3a6f1..000000000000
--- a/arch/h8300/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _H8300_DMA_MAPPING_H
-#define _H8300_DMA_MAPPING_H
-
-extern const struct dma_map_ops h8300_dma_map_ops;
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- return &h8300_dma_map_ops;
-}
-
-#endif
diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h
index 072b92c0d8b5..0cdaa302d3d2 100644
--- a/arch/h8300/include/asm/thread_info.h
+++ b/arch/h8300/include/asm/thread_info.h
@@ -46,9 +46,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
diff --git a/arch/h8300/kernel/Makefile b/arch/h8300/kernel/Makefile
index b62e830525c6..307aa51576dd 100644
--- a/arch/h8300/kernel/Makefile
+++ b/arch/h8300/kernel/Makefile
@@ -7,7 +7,7 @@ extra-y := vmlinux.lds
obj-y := process.o traps.o ptrace.o \
signal.o setup.o syscalls.o \
- irq.o entry.o dma.o
+ irq.o entry.o
obj-$(CONFIG_ROMKERNEL) += head_rom.o
obj-$(CONFIG_RAMKERNEL) += head_ram.o
diff --git a/arch/h8300/kernel/dma.c b/arch/h8300/kernel/dma.c
deleted file mode 100644
index 225dd0a188dc..000000000000
--- a/arch/h8300/kernel/dma.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- */
-
-#include <linux/dma-mapping.h>
-#include <linux/kernel.h>
-#include <linux/scatterlist.h>
-#include <linux/module.h>
-#include <asm/pgalloc.h>
-
-static void *dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- unsigned long attrs)
-{
- void *ret;
-
- /* ignore region specifiers */
- gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
-
- if (dev == NULL || (*dev->dma_mask < 0xffffffff))
- gfp |= GFP_DMA;
- ret = (void *)__get_free_pages(gfp, get_order(size));
-
- if (ret != NULL) {
- memset(ret, 0, size);
- *dma_handle = virt_to_phys(ret);
- }
- return ret;
-}
-
-static void dma_free(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- unsigned long attrs)
-
-{
- free_pages((unsigned long)vaddr, get_order(size));
-}
-
-static dma_addr_t map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction,
- unsigned long attrs)
-{
- return page_to_phys(page) + offset;
-}
-
-static int map_sg(struct device *dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction direction,
- unsigned long attrs)
-{
- struct scatterlist *sg;
- int i;
-
- for_each_sg(sgl, sg, nents, i) {
- sg->dma_address = sg_phys(sg);
- }
-
- return nents;
-}
-
-const struct dma_map_ops h8300_dma_map_ops = {
- .alloc = dma_alloc,
- .free = dma_free,
- .map_page = map_page,
- .map_sg = map_sg,
-};
-EXPORT_SYMBOL(h8300_dma_map_ops);
diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h
index 5208de242e79..263f6acbfb0f 100644
--- a/arch/hexagon/include/asm/dma-mapping.h
+++ b/arch/hexagon/include/asm/dma-mapping.h
@@ -37,11 +37,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return dma_ops;
}
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return 0;
- return addr + size - 1 <= *dev->dma_mask;
-}
-
#endif
diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h
index 66f5e9a61efc..9e8621d94ee9 100644
--- a/arch/hexagon/include/asm/io.h
+++ b/arch/hexagon/include/asm/io.h
@@ -330,8 +330,6 @@ static inline void outsl(unsigned long port, const void *buffer, int count)
}
}
-#define flush_write_buffers() do { } while (0)
-
#endif /* __KERNEL__ */
#endif
diff --git a/arch/hexagon/include/asm/thread_info.h b/arch/hexagon/include/asm/thread_info.h
index b80fe1db7b64..f41f9c6f0e31 100644
--- a/arch/hexagon/include/asm/thread_info.h
+++ b/arch/hexagon/include/asm/thread_info.h
@@ -84,9 +84,6 @@ struct thread_info {
.regs = NULL, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* Tacky preprocessor trickery */
#define qqstr(s) qstr(s)
#define qstr(s) #s
diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
index 546792d176a4..ad8347c29dcf 100644
--- a/arch/hexagon/kernel/dma.c
+++ b/arch/hexagon/kernel/dma.c
@@ -19,6 +19,7 @@
*/
#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/bootmem.h>
#include <linux/genalloc.h>
#include <asm/dma-mapping.h>
diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S
index ec87e67feb19..ad69d181c939 100644
--- a/arch/hexagon/kernel/vmlinux.lds.S
+++ b/arch/hexagon/kernel/vmlinux.lds.S
@@ -22,6 +22,8 @@
#include <asm/asm-offsets.h> /* Most of the kernel defines are here */
#include <asm/mem-layout.h> /* except for page_offset */
#include <asm/cache.h> /* and now we're pulling cache line size */
+#include <asm/thread_info.h> /* and we need THREAD_SIZE too */
+
OUTPUT_ARCH(hexagon)
ENTRY(stext)
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 49583c5a5d44..bbe12a038d21 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -33,6 +33,7 @@ config IA64
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_VIRT_CPU_ACCOUNTING
+ select ARCH_HAS_DMA_MARK_CLEAN
select ARCH_HAS_SG_CHAIN
select VIRT_TO_BUS
select ARCH_DISCARD_MEMBLOCK
@@ -43,7 +44,7 @@ config IA64
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_IOMAP
select GENERIC_SMP_IDLE_THREAD
- select ARCH_INIT_TASK
+ select ARCH_TASK_STRUCT_ON_STACK
select ARCH_TASK_STRUCT_ALLOCATOR
select ARCH_THREAD_STACK_ALLOCATOR
select ARCH_CLOCKSOURCE_DATA
@@ -65,7 +66,7 @@ config 64BIT
select ATA_NONSTANDARD if ATA
default y
-config ZONE_DMA
+config ZONE_DMA32
def_bool y
depends on !IA64_SGI_SN2
@@ -145,6 +146,7 @@ config IA64_GENERIC
bool "generic"
select NUMA
select ACPI_NUMA
+ select DMA_DIRECT_OPS
select SWIOTLB
select PCI_MSI
help
@@ -165,6 +167,7 @@ config IA64_GENERIC
config IA64_DIG
bool "DIG-compliant"
+ select DMA_DIRECT_OPS
select SWIOTLB
config IA64_DIG_VTD
@@ -180,6 +183,7 @@ config IA64_HP_ZX1
config IA64_HP_ZX1_SWIOTLB
bool "HP-zx1/sx1000 with software I/O TLB"
+ select DMA_DIRECT_OPS
select SWIOTLB
help
Build a kernel that runs on HP zx1 and sx1000 systems even when they
@@ -203,6 +207,7 @@ config IA64_SGI_UV
bool "SGI-UV"
select NUMA
select ACPI_NUMA
+ select DMA_DIRECT_OPS
select SWIOTLB
help
Selecting this option will optimize the kernel for use on UV based
@@ -213,6 +218,7 @@ config IA64_SGI_UV
config IA64_HP_SIM
bool "Ski-simulator"
+ select DMA_DIRECT_OPS
select SWIOTLB
depends on !PM
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index c100d780f1eb..2dd7f519ad0b 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -42,7 +42,7 @@ $(error Sorry, you need a newer version of the assember, one that is built from
endif
KBUILD_CFLAGS += $(cflags-y)
-head-y := arch/ia64/kernel/head.o arch/ia64/kernel/init_task.o
+head-y := arch/ia64/kernel/head.o
libs-y += arch/ia64/lib/
core-y += arch/ia64/kernel/ arch/ia64/mm/
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
index 63d8e1d2477f..58969039bed2 100644
--- a/arch/ia64/hp/common/hwsw_iommu.c
+++ b/arch/ia64/hp/common/hwsw_iommu.c
@@ -19,7 +19,7 @@
#include <linux/export.h>
#include <asm/machvec.h>
-extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
+extern const struct dma_map_ops sba_dma_ops;
/* swiotlb declarations & definitions: */
extern int swiotlb_late_init_with_default_size (size_t size);
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index c1bab526a046..76e4d6632d68 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -8,7 +8,6 @@
*/
#include <asm/machvec.h>
#include <linux/scatterlist.h>
-#include <asm/swiotlb.h>
#include <linux/dma-debug.h>
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
@@ -27,22 +26,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return platform_dma_get_ops(NULL);
}
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return 0;
-
- return addr + size - 1 <= *dev->dma_mask;
-}
-
-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- return paddr;
-}
-
-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
-{
- return daddr;
-}
-
#endif /* _ASM_IA64_DMA_MAPPING_H */
diff --git a/arch/ia64/include/asm/dma.h b/arch/ia64/include/asm/dma.h
index 186850eec934..23604d6a2cb2 100644
--- a/arch/ia64/include/asm/dma.h
+++ b/arch/ia64/include/asm/dma.h
@@ -20,6 +20,4 @@ extern unsigned long MAX_DMA_ADDRESS;
#define free_dma(x)
-void dma_mark_clean(void *addr, size_t size);
-
#endif /* _ASM_IA64_DMA_H */
diff --git a/arch/ia64/include/asm/swiotlb.h b/arch/ia64/include/asm/swiotlb.h
deleted file mode 100644
index 841e2c7d0b21..000000000000
--- a/arch/ia64/include/asm/swiotlb.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ASM_IA64__SWIOTLB_H
-#define ASM_IA64__SWIOTLB_H
-
-#include <linux/dma-mapping.h>
-#include <linux/swiotlb.h>
-
-#ifdef CONFIG_SWIOTLB
-extern int swiotlb;
-extern void pci_swiotlb_init(void);
-#else
-#define swiotlb 0
-static inline void pci_swiotlb_init(void)
-{
-}
-#endif
-
-#endif /* ASM_IA64__SWIOTLB_H */
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index 1d172a4119a7..64a1011f6812 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -12,6 +12,8 @@
#include <asm/processor.h>
#include <asm/ptrace.h>
+#define THREAD_SIZE KERNEL_STACK_SIZE
+
#ifndef __ASSEMBLY__
/*
@@ -41,8 +43,6 @@ struct thread_info {
#endif
};
-#define THREAD_SIZE KERNEL_STACK_SIZE
-
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
diff --git a/arch/ia64/include/uapi/asm/Kbuild b/arch/ia64/include/uapi/asm/Kbuild
index f5c6967a93bb..c0527cfc48f0 100644
--- a/arch/ia64/include/uapi/asm/Kbuild
+++ b/arch/ia64/include/uapi/asm/Kbuild
@@ -3,3 +3,4 @@ include include/uapi/asm-generic/Kbuild.asm
generic-y += bpf_perf_event.h
generic-y += kvm_para.h
+generic-y += poll.h
diff --git a/arch/ia64/include/uapi/asm/poll.h b/arch/ia64/include/uapi/asm/poll.h
deleted file mode 100644
index b7132a305a47..000000000000
--- a/arch/ia64/include/uapi/asm/poll.h
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#include <asm-generic/poll.h>
diff --git a/arch/ia64/include/uapi/asm/siginfo.h b/arch/ia64/include/uapi/asm/siginfo.h
index f3a02a10c3a3..5aa454ed89db 100644
--- a/arch/ia64/include/uapi/asm/siginfo.h
+++ b/arch/ia64/include/uapi/asm/siginfo.h
@@ -11,77 +11,8 @@
#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
-#define HAVE_ARCH_SIGINFO_T
-#define HAVE_ARCH_COPY_SIGINFO_TO_USER
-
#include <asm-generic/siginfo.h>
-typedef struct siginfo {
- int si_signo;
- int si_errno;
- int si_code;
- int __pad0;
-
- union {
- int _pad[SI_PAD_SIZE];
-
- /* kill() */
- struct {
- pid_t _pid; /* sender's pid */
- uid_t _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- timer_t _tid; /* timer id */
- int _overrun; /* overrun count */
- char _pad[sizeof(__ARCH_SI_UID_T) - sizeof(int)];
- sigval_t _sigval; /* must overlay ._rt._sigval! */
- int _sys_private; /* not to be passed to user */
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- pid_t _pid; /* sender's pid */
- uid_t _uid; /* sender's uid */
- sigval_t _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- pid_t _pid; /* which child */
- uid_t _uid; /* sender's uid */
- int _status; /* exit code */
- clock_t _utime;
- clock_t _stime;
- } _sigchld;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
- struct {
- void __user *_addr; /* faulting insn/memory ref. */
- int _imm; /* immediate value for "break" */
- unsigned int _flags; /* see below */
- unsigned long _isr; /* isr */
- short _addr_lsb; /* lsb of faulting address */
- union {
- /* used when si_code=SEGV_BNDERR */
- struct {
- void __user *_lower;
- void __user *_upper;
- } _addr_bnd;
- /* used when si_code=SEGV_PKUERR */
- __u32 _pkey;
- };
- } _sigfault;
-
- /* SIGPOLL */
- struct {
- long _band; /* POLL_IN, POLL_OUT, POLL_MSG (XPG requires a "long") */
- int _fd;
- } _sigpoll;
- } _sifields;
-} siginfo_t;
-
#define si_imm _sifields._sigfault._imm /* as per UNIX SysV ABI spec */
#define si_flags _sifields._sigfault._flags
/*
@@ -97,37 +28,10 @@ typedef struct siginfo {
#define __ISR_VALID (1 << __ISR_VALID_BIT)
/*
- * SIGILL si_codes
- */
-#define ILL_BADIADDR 9 /* unimplemented instruction address */
-#define __ILL_BREAK 10 /* illegal break */
-#define __ILL_BNDMOD 11 /* bundle-update (modification) in progress */
-#undef NSIGILL
-#define NSIGILL 11
-
-/*
* SIGFPE si_codes
*/
#ifdef __KERNEL__
#define FPE_FIXME 0 /* Broken dup of SI_USER */
#endif /* __KERNEL__ */
-#define __FPE_DECOVF 9 /* decimal overflow */
-#define __FPE_DECDIV 10 /* decimal division by zero */
-#define __FPE_DECERR 11 /* packed decimal error */
-#define __FPE_INVASC 12 /* invalid ASCII digit */
-#define __FPE_INVDEC 13 /* invalid decimal digit */
-#undef NSIGFPE
-#define NSIGFPE 13
-
-/*
- * SIGSEGV si_codes
- */
-#define __SEGV_PSTKOVF 4 /* paragraph stack overflow */
-#undef NSIGSEGV
-#define NSIGSEGV 4
-
-#undef NSIGTRAP
-#define NSIGTRAP 4
-
#endif /* _UAPI_ASM_IA64_SIGINFO_H */
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 14ad79f394e5..0b4c65a1af25 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -7,7 +7,7 @@ ifdef CONFIG_DYNAMIC_FTRACE
CFLAGS_REMOVE_ftrace.o = -pg
endif
-extra-y := head.o init_task.o vmlinux.lds
+extra-y := head.o vmlinux.lds
obj-y := entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 1d29b2f8726b..1dacbf5e9e09 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -504,6 +504,11 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
return -1;
+ if (num_node_memblks >= NR_NODE_MEMBLKS) {
+ pr_err("NUMA: too many memblk ranges\n");
+ return -EINVAL;
+ }
+
/* record this node in proximity bitmap */
pxm_bit_set(pxm);
diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
index 7a82c9259609..f2d57e66fd86 100644
--- a/arch/ia64/kernel/dma-mapping.c
+++ b/arch/ia64/kernel/dma-mapping.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/dma-mapping.h>
+#include <linux/swiotlb.h>
#include <linux/export.h>
/* Set this to 1 if there is a HW IOMMU in the system */
@@ -23,3 +24,11 @@ const struct dma_map_ops *dma_get_ops(struct device *dev)
return dma_ops;
}
EXPORT_SYMBOL(dma_get_ops);
+
+#ifdef CONFIG_SWIOTLB
+void __init swiotlb_dma_init(void)
+{
+ dma_ops = &swiotlb_dma_ops;
+ swiotlb_init(1);
+}
+#endif
diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c
deleted file mode 100644
index 8df9245e29d9..000000000000
--- a/arch/ia64/kernel/init_task.c
+++ /dev/null
@@ -1,44 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This is where we statically allocate and initialize the initial
- * task.
- *
- * Copyright (C) 1999, 2002-2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/init_task.h>
-#include <linux/mqueue.h>
-
-#include <linux/uaccess.h>
-#include <asm/pgtable.h>
-
-static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
-static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-/*
- * Initial task structure.
- *
- * We need to make sure that this is properly aligned due to the way process stacks are
- * handled. This is done by having a special ".data..init_task" section...
- */
-#define init_thread_info init_task_mem.s.thread_info
-#define init_stack init_task_mem.stack
-
-union {
- struct {
- struct task_struct task;
- struct thread_info thread_info;
- } s;
- unsigned long stack[KERNEL_STACK_SIZE/sizeof (unsigned long)];
-} init_task_mem asm ("init_task") __init_task_data =
- {{
- .task = INIT_TASK(init_task_mem.s.task),
- .thread_info = INIT_THREAD_INFO(init_task_mem.s.task)
-}};
-
-EXPORT_SYMBOL(init_task);
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
index 3ba87c22dfbc..b5df084c0af4 100644
--- a/arch/ia64/kernel/pci-dma.c
+++ b/arch/ia64/kernel/pci-dma.c
@@ -12,12 +12,7 @@
#include <asm/iommu.h>
#include <asm/machvec.h>
#include <linux/dma-mapping.h>
-
-
-#ifdef CONFIG_INTEL_IOMMU
-
#include <linux/kernel.h>
-
#include <asm/page.h>
dma_addr_t bad_dma_address __read_mostly;
@@ -104,8 +99,14 @@ void __init pci_iommu_alloc(void)
detect_intel_iommu();
#ifdef CONFIG_SWIOTLB
- pci_swiotlb_init();
-#endif
+ if (!iommu_detected) {
+#ifdef CONFIG_IA64_GENERIC
+ printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n");
+ machvec_init("dig");
+ swiotlb_dma_init();
+#else
+ panic("Unable to find Intel IOMMU");
+#endif /* CONFIG_IA64_GENERIC */
+ }
+#endif /* CONFIG_SWIOTLB */
}
-
-#endif
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
deleted file mode 100644
index 5e50939aa03e..000000000000
--- a/arch/ia64/kernel/pci-swiotlb.c
+++ /dev/null
@@ -1,68 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Glue code to lib/swiotlb.c */
-
-#include <linux/pci.h>
-#include <linux/gfp.h>
-#include <linux/cache.h>
-#include <linux/module.h>
-#include <linux/dma-mapping.h>
-
-#include <asm/swiotlb.h>
-#include <asm/dma.h>
-#include <asm/iommu.h>
-#include <asm/machvec.h>
-
-int swiotlb __read_mostly;
-EXPORT_SYMBOL(swiotlb);
-
-static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- unsigned long attrs)
-{
- if (dev->coherent_dma_mask != DMA_BIT_MASK(64))
- gfp |= GFP_DMA;
- return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
-}
-
-static void ia64_swiotlb_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_addr,
- unsigned long attrs)
-{
- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
-}
-
-const struct dma_map_ops swiotlb_dma_ops = {
- .alloc = ia64_swiotlb_alloc_coherent,
- .free = ia64_swiotlb_free_coherent,
- .map_page = swiotlb_map_page,
- .unmap_page = swiotlb_unmap_page,
- .map_sg = swiotlb_map_sg_attrs,
- .unmap_sg = swiotlb_unmap_sg_attrs,
- .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
- .sync_single_for_device = swiotlb_sync_single_for_device,
- .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
- .sync_sg_for_device = swiotlb_sync_sg_for_device,
- .dma_supported = swiotlb_dma_supported,
- .mapping_error = swiotlb_dma_mapping_error,
-};
-
-void __init swiotlb_dma_init(void)
-{
- dma_ops = &swiotlb_dma_ops;
- swiotlb_init(1);
-}
-
-void __init pci_swiotlb_init(void)
-{
- if (!iommu_detected) {
-#ifdef CONFIG_IA64_GENERIC
- swiotlb = 1;
- printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n");
- machvec_init("dig");
- swiotlb_init(1);
- dma_ops = &swiotlb_dma_ops;
-#else
- panic("Unable to find Intel IOMMU");
-#endif
- }
-}
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 09f86ebfcc7b..c44f002e8f6b 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -1644,12 +1644,12 @@ pfm_write(struct file *file, const char __user *ubuf,
return -EINVAL;
}
-static unsigned int
+static __poll_t
pfm_poll(struct file *filp, poll_table * wait)
{
pfm_context_t *ctx;
unsigned long flags;
- unsigned int mask = 0;
+ __poll_t mask = 0;
if (PFM_IS_FILE(filp) == 0) {
printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index a254cc98f95c..54547c7cf8a2 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -105,58 +105,6 @@ restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr)
return err;
}
-int
-copy_siginfo_to_user (siginfo_t __user *to, const siginfo_t *from)
-{
- if (!access_ok(VERIFY_WRITE, to, sizeof(siginfo_t)))
- return -EFAULT;
- if (from->si_code < 0) {
- if (__copy_to_user(to, from, sizeof(siginfo_t)))
- return -EFAULT;
- return 0;
- } else {
- int err;
-
- /*
- * If you change siginfo_t structure, please be sure this code is fixed
- * accordingly. It should never copy any pad contained in the structure
- * to avoid security leaks, but must copy the generic 3 ints plus the
- * relevant union member.
- */
- err = __put_user(from->si_signo, &to->si_signo);
- err |= __put_user(from->si_errno, &to->si_errno);
- err |= __put_user(from->si_code, &to->si_code);
- switch (siginfo_layout(from->si_signo, from->si_code)) {
- case SIL_FAULT:
- err |= __put_user(from->si_flags, &to->si_flags);
- err |= __put_user(from->si_isr, &to->si_isr);
- case SIL_POLL:
- err |= __put_user(from->si_addr, &to->si_addr);
- err |= __put_user(from->si_imm, &to->si_imm);
- break;
- case SIL_TIMER:
- err |= __put_user(from->si_tid, &to->si_tid);
- err |= __put_user(from->si_overrun, &to->si_overrun);
- err |= __put_user(from->si_ptr, &to->si_ptr);
- break;
- case SIL_RT:
- err |= __put_user(from->si_uid, &to->si_uid);
- err |= __put_user(from->si_pid, &to->si_pid);
- err |= __put_user(from->si_ptr, &to->si_ptr);
- break;
- case SIL_CHLD:
- err |= __put_user(from->si_utime, &to->si_utime);
- err |= __put_user(from->si_stime, &to->si_stime);
- err |= __put_user(from->si_status, &to->si_status);
- case SIL_KILL:
- err |= __put_user(from->si_uid, &to->si_uid);
- err |= __put_user(from->si_pid, &to->si_pid);
- break;
- }
- return err;
- }
-}
-
long
ia64_rt_sigreturn (struct sigscratch *scr)
{
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 58db59da0bd8..b0b2070e0591 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -3,6 +3,7 @@
#include <asm/cache.h>
#include <asm/ptrace.h>
#include <asm/pgtable.h>
+#include <asm/thread_info.h>
#include <asm-generic/vmlinux.lds.h>
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 52715a71aede..7d64b30913d1 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -237,9 +237,9 @@ paging_init (void)
unsigned long max_zone_pfns[MAX_NR_ZONES];
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
-#ifdef CONFIG_ZONE_DMA
+#ifdef CONFIG_ZONE_DMA32
max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
- max_zone_pfns[ZONE_DMA] = max_dma;
+ max_zone_pfns[ZONE_DMA32] = max_dma;
#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 9b2d994cddf6..ac46f0d60b66 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -38,7 +38,7 @@ struct early_node_data {
struct ia64_node_data *node_data;
unsigned long pernode_addr;
unsigned long pernode_size;
-#ifdef CONFIG_ZONE_DMA
+#ifdef CONFIG_ZONE_DMA32
unsigned long num_dma_physpages;
#endif
unsigned long min_pfn;
@@ -669,7 +669,7 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n
{
unsigned long end = start + len;
-#ifdef CONFIG_ZONE_DMA
+#ifdef CONFIG_ZONE_DMA32
if (start <= __pa(MAX_DMA_ADDRESS))
mem_data[node].num_dma_physpages +=
(min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT;
@@ -724,8 +724,8 @@ void __init paging_init(void)
}
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
-#ifdef CONFIG_ZONE_DMA
- max_zone_pfns[ZONE_DMA] = max_dma;
+#ifdef CONFIG_ZONE_DMA32
+ max_zone_pfns[ZONE_DMA32] = max_dma;
#endif
max_zone_pfns[ZONE_NORMAL] = max_pfn;
free_area_init_nodes(max_zone_pfns);
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 498398d915c1..dd84ee194579 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -19,7 +19,7 @@ config M32R
select MODULES_USE_ELF_RELA
select HAVE_DEBUG_STACKOVERFLOW
select CPU_NO_EFFICIENT_FFS
- select DMA_NOOP_OPS
+ select DMA_DIRECT_OPS
select ARCH_NO_COHERENT_DMA_MMAP if !MMU
config SBUS
diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild
index 7e11b125c35e..ca83fda8177b 100644
--- a/arch/m32r/include/asm/Kbuild
+++ b/arch/m32r/include/asm/Kbuild
@@ -1,5 +1,6 @@
generic-y += clkdev.h
generic-y += current.h
+generic-y += dma-mapping.h
generic-y += exec.h
generic-y += extable.h
generic-y += irq_work.h
diff --git a/arch/m32r/include/asm/dma-mapping.h b/arch/m32r/include/asm/dma-mapping.h
deleted file mode 100644
index 336ffe60814b..000000000000
--- a/arch/m32r/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_M32R_DMA_MAPPING_H
-#define _ASM_M32R_DMA_MAPPING_H
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-debug.h>
-#include <linux/io.h>
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- return &dma_noop_ops;
-}
-
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return false;
- return addr + size - 1 <= *dev->dma_mask;
-}
-
-#endif /* _ASM_M32R_DMA_MAPPING_H */
diff --git a/arch/m32r/include/asm/io.h b/arch/m32r/include/asm/io.h
index 1b653bb16f9a..a4272d8f0d9c 100644
--- a/arch/m32r/include/asm/io.h
+++ b/arch/m32r/include/asm/io.h
@@ -191,8 +191,6 @@ static inline void _writel(unsigned long l, unsigned long addr)
#define mmiowb()
-#define flush_write_buffers() do { } while (0) /* M32R_FIXME */
-
static inline void
memset_io(volatile void __iomem *addr, unsigned char val, int count)
{
diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h
index b3a215b0ce0a..ba00f1032587 100644
--- a/arch/m32r/include/asm/thread_info.h
+++ b/arch/m32r/include/asm/thread_info.h
@@ -56,9 +56,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
diff --git a/arch/m32r/include/uapi/asm/Kbuild b/arch/m32r/include/uapi/asm/Kbuild
index 451bf6071c6e..c3df55aeefe7 100644
--- a/arch/m32r/include/uapi/asm/Kbuild
+++ b/arch/m32r/include/uapi/asm/Kbuild
@@ -3,4 +3,5 @@ include include/uapi/asm-generic/Kbuild.asm
generic-y += bpf_perf_event.h
generic-y += kvm_para.h
+generic-y += poll.h
generic-y += siginfo.h
diff --git a/arch/m32r/include/uapi/asm/poll.h b/arch/m32r/include/uapi/asm/poll.h
deleted file mode 100644
index b7132a305a47..000000000000
--- a/arch/m32r/include/uapi/asm/poll.h
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#include <asm-generic/poll.h>
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 5b5fa9831b4d..e0b285e1e75f 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -454,7 +454,6 @@ CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PPS_CLIENT_PARPORT=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
-# CONFIG_RC_CORE is not set
CONFIG_FB=y
CONFIG_FB_CIRRUS=y
CONFIG_FB_AMIGA=y
@@ -595,6 +594,7 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
+CONFIG_TEST_FIND_BIT=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
@@ -624,6 +624,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
@@ -653,3 +654,4 @@ CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
+CONFIG_STRING_SELFTEST=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 72a7764b74ed..3281026a3e15 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -422,7 +422,6 @@ CONFIG_NTP_PPS=y
CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
-# CONFIG_RC_CORE is not set
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
@@ -554,6 +553,7 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
+CONFIG_TEST_FIND_BIT=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
@@ -583,6 +583,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
@@ -612,3 +613,4 @@ CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
+CONFIG_STRING_SELFTEST=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 884b43a2f0d9..e943fad480cf 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -437,7 +437,6 @@ CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PPS_CLIENT_PARPORT=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
-# CONFIG_RC_CORE is not set
CONFIG_FB=y
CONFIG_FB_ATARI=y
CONFIG_FRAMEBUFFER_CONSOLE=y
@@ -576,6 +575,7 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
+CONFIG_TEST_FIND_BIT=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
@@ -605,6 +605,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
@@ -634,3 +635,4 @@ CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
+CONFIG_STRING_SELFTEST=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index fcfa60d31499..700c2310c336 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -420,7 +420,6 @@ CONFIG_NTP_PPS=y
CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
-# CONFIG_RC_CORE is not set
CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
@@ -546,6 +545,7 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
+CONFIG_TEST_FIND_BIT=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
@@ -575,6 +575,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
@@ -604,3 +605,4 @@ CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
+CONFIG_STRING_SELFTEST=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 9d597bbbbbfe..271d57fa4301 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -425,7 +425,6 @@ CONFIG_NTP_PPS=y
CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
-# CONFIG_RC_CORE is not set
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
@@ -556,6 +555,7 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
+CONFIG_TEST_FIND_BIT=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
@@ -585,6 +585,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
@@ -614,3 +615,4 @@ CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
+CONFIG_STRING_SELFTEST=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 45da20d1286c..88761b867975 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -447,7 +447,6 @@ CONFIG_NTP_PPS=y
CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
-# CONFIG_RC_CORE is not set
CONFIG_FB=y
CONFIG_FB_VALKYRIE=y
CONFIG_FB_MAC=y
@@ -578,6 +577,7 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
+CONFIG_TEST_FIND_BIT=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
@@ -607,6 +607,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
@@ -636,3 +637,4 @@ CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
+CONFIG_STRING_SELFTEST=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index fda880c10861..7cb35dadf03b 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -504,7 +504,6 @@ CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PPS_CLIENT_PARPORT=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
-# CONFIG_RC_CORE is not set
CONFIG_FB=y
CONFIG_FB_CIRRUS=y
CONFIG_FB_AMIGA=y
@@ -658,6 +657,7 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
+CONFIG_TEST_FIND_BIT=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
@@ -687,6 +687,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
@@ -716,3 +717,4 @@ CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
+CONFIG_STRING_SELFTEST=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 7d5e4863efec..b139d7b68393 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -420,7 +420,6 @@ CONFIG_NTP_PPS=y
CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
-# CONFIG_RC_CORE is not set
CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
@@ -546,6 +545,7 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
+CONFIG_TEST_FIND_BIT=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
@@ -575,6 +575,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
@@ -604,3 +605,4 @@ CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
+CONFIG_STRING_SELFTEST=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 7763b71a9c49..398346138769 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -420,7 +420,6 @@ CONFIG_NTP_PPS=y
CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
-# CONFIG_RC_CORE is not set
CONFIG_HID=m
CONFIG_HIDRAW=y
CONFIG_UHID=m
@@ -546,6 +545,7 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
+CONFIG_TEST_FIND_BIT=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
@@ -575,6 +575,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
@@ -604,3 +605,4 @@ CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
+CONFIG_STRING_SELFTEST=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index 17eaebfa3e19..14c608326f6d 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -437,7 +437,6 @@ CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PPS_CLIENT_PARPORT=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
-# CONFIG_RC_CORE is not set
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
@@ -569,6 +568,7 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
+CONFIG_TEST_FIND_BIT=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
@@ -598,6 +598,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
@@ -627,3 +628,4 @@ CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
+CONFIG_STRING_SELFTEST=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index d1cb7a04ae1d..97dec0bf52f1 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -419,7 +419,6 @@ CONFIG_NTP_PPS=y
CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
-# CONFIG_RC_CORE is not set
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
@@ -548,6 +547,7 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
+CONFIG_TEST_FIND_BIT=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
@@ -576,6 +576,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
@@ -605,3 +606,4 @@ CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
+CONFIG_STRING_SELFTEST=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index ea3a331c62d5..56df28d6d91d 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -419,7 +419,6 @@ CONFIG_NTP_PPS=y
CONFIG_PPS_CLIENT_LDISC=m
CONFIG_PTP_1588_CLOCK=m
# CONFIG_HWMON is not set
-# CONFIG_RC_CORE is not set
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
@@ -548,6 +547,7 @@ CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_HASH=m
CONFIG_TEST_USER_COPY=m
CONFIG_TEST_BPF=m
+CONFIG_TEST_FIND_BIT=m
CONFIG_TEST_FIRMWARE=m
CONFIG_TEST_SYSCTL=m
CONFIG_TEST_UDELAY=m
@@ -577,6 +577,7 @@ CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
@@ -606,3 +607,4 @@ CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
CONFIG_XZ_DEC_TEST=m
+CONFIG_STRING_SELFTEST=m
diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h
index f42c27400dbc..9b840c03ebb7 100644
--- a/arch/m68k/include/asm/macintosh.h
+++ b/arch/m68k/include/asm/macintosh.h
@@ -33,7 +33,7 @@ struct mac_model
char ide_type;
char scc_type;
char ether_type;
- char nubus_type;
+ char expansion_type;
char floppy_type;
};
@@ -73,8 +73,11 @@ struct mac_model
#define MAC_ETHER_SONIC 1
#define MAC_ETHER_MACE 2
-#define MAC_NO_NUBUS 0
-#define MAC_NUBUS 1
+#define MAC_EXP_NONE 0
+#define MAC_EXP_PDS 1 /* Accepts only a PDS card */
+#define MAC_EXP_NUBUS 2 /* Accepts only NuBus card(s) */
+#define MAC_EXP_PDS_NUBUS 3 /* Accepts PDS card and/or NuBus card(s) */
+#define MAC_EXP_PDS_COMM 4 /* Accepts PDS card or Comm Slot card */
#define MAC_FLOPPY_IWM 0
#define MAC_FLOPPY_SWIM_ADDR1 1
diff --git a/arch/m68k/include/asm/thread_info.h b/arch/m68k/include/asm/thread_info.h
index 928035591f2e..015f1ca38305 100644
--- a/arch/m68k/include/asm/thread_info.h
+++ b/arch/m68k/include/asm/thread_info.h
@@ -41,8 +41,6 @@ struct thread_info {
.preempt_count = INIT_PREEMPT_COUNT, \
}
-#define init_stack (init_thread_union.stack)
-
#ifndef __ASSEMBLY__
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
@@ -58,8 +56,6 @@ static inline struct thread_info *current_thread_info(void)
}
#endif
-#define init_thread_info (init_thread_union.thread_info)
-
/* entry.S relies on these definitions!
* bits 0-7 are tested at every exception exit
* bits 8-15 are also tested at syscall exit
diff --git a/arch/m68k/include/uapi/asm/poll.h b/arch/m68k/include/uapi/asm/poll.h
index c3e3fcc15e1d..d8be239e8141 100644
--- a/arch/m68k/include/uapi/asm/poll.h
+++ b/arch/m68k/include/uapi/asm/poll.h
@@ -2,8 +2,25 @@
#ifndef __m68k_POLL_H
#define __m68k_POLL_H
+#ifndef __KERNEL__
#define POLLWRNORM POLLOUT
-#define POLLWRBAND 256
+#define POLLWRBAND (__force __poll_t)256
+#else
+#define __ARCH_HAS_MANGLED_POLL
+static inline __u16 mangle_poll(__poll_t val)
+{
+ __u16 v = (__force __u16)val;
+ /* bit 9 -> bit 8, bit 8 -> bit 2 */
+ return (v & ~0x300) | ((v & 0x200) >> 1) | ((v & 0x100) >> 6);
+}
+
+static inline __poll_t demangle_poll(__u16 v)
+{
+ /* bit 8 -> bit 9, bit 2 -> bits 2 and 8 */
+ return (__force __poll_t)((v & ~0x100) | ((v & 0x100) << 1) |
+ ((v & 4) << 6));
+}
+#endif
#include <asm-generic/poll.h>
diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c
index 87ef73a93856..c01b9b8f97bf 100644
--- a/arch/m68k/kernel/dma.c
+++ b/arch/m68k/kernel/dma.c
@@ -76,8 +76,6 @@ static void *m68k_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
void *ret;
- /* ignore region specifiers */
- gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
if (dev == NULL || (*dev->dma_mask < 0xffffffff))
gfp |= GFP_DMA;
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index 16cd5cea5207..d3d435248a24 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -212,7 +212,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_II,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_NUBUS,
.floppy_type = MAC_FLOPPY_IWM,
},
@@ -227,7 +227,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_II,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_NUBUS,
.floppy_type = MAC_FLOPPY_IWM,
}, {
.ident = MAC_MODEL_IIX,
@@ -236,7 +236,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_II,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_IICX,
@@ -245,7 +245,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_II,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_SE30,
@@ -254,7 +254,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_II,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
},
@@ -272,7 +272,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_IIFX,
@@ -281,7 +281,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_IIFX,
.scc_type = MAC_SCC_IOP,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_IOP,
}, {
.ident = MAC_MODEL_IISI,
@@ -290,7 +290,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_IIVI,
@@ -299,7 +299,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_IIVX,
@@ -308,7 +308,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
},
@@ -323,7 +323,6 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_CCL,
@@ -332,7 +331,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_CCLII,
@@ -341,7 +340,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
},
@@ -356,7 +355,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_LCII,
@@ -365,7 +364,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_LCIII,
@@ -374,7 +373,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
},
@@ -395,7 +394,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_QUADRA,
.scsi_type = MAC_SCSI_QUADRA,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR1,
}, {
.ident = MAC_MODEL_Q605_ACC,
@@ -404,7 +403,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_QUADRA,
.scsi_type = MAC_SCSI_QUADRA,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR1,
}, {
.ident = MAC_MODEL_Q610,
@@ -414,7 +413,7 @@ static struct mac_model mac_data_table[] = {
.scsi_type = MAC_SCSI_QUADRA,
.scc_type = MAC_SCC_QUADRA,
.ether_type = MAC_ETHER_SONIC,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR1,
}, {
.ident = MAC_MODEL_Q630,
@@ -424,8 +423,7 @@ static struct mac_model mac_data_table[] = {
.scsi_type = MAC_SCSI_QUADRA,
.ide_type = MAC_IDE_QUADRA,
.scc_type = MAC_SCC_QUADRA,
- .ether_type = MAC_ETHER_SONIC,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS_COMM,
.floppy_type = MAC_FLOPPY_SWIM_ADDR1,
}, {
.ident = MAC_MODEL_Q650,
@@ -435,7 +433,7 @@ static struct mac_model mac_data_table[] = {
.scsi_type = MAC_SCSI_QUADRA,
.scc_type = MAC_SCC_QUADRA,
.ether_type = MAC_ETHER_SONIC,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR1,
},
/* The Q700 does have a NS Sonic */
@@ -447,7 +445,7 @@ static struct mac_model mac_data_table[] = {
.scsi_type = MAC_SCSI_QUADRA2,
.scc_type = MAC_SCC_QUADRA,
.ether_type = MAC_ETHER_SONIC,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR1,
}, {
.ident = MAC_MODEL_Q800,
@@ -457,7 +455,7 @@ static struct mac_model mac_data_table[] = {
.scsi_type = MAC_SCSI_QUADRA,
.scc_type = MAC_SCC_QUADRA,
.ether_type = MAC_ETHER_SONIC,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR1,
}, {
.ident = MAC_MODEL_Q840,
@@ -467,7 +465,7 @@ static struct mac_model mac_data_table[] = {
.scsi_type = MAC_SCSI_QUADRA3,
.scc_type = MAC_SCC_PSC,
.ether_type = MAC_ETHER_MACE,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_NUBUS,
.floppy_type = MAC_FLOPPY_AV,
}, {
.ident = MAC_MODEL_Q900,
@@ -477,7 +475,7 @@ static struct mac_model mac_data_table[] = {
.scsi_type = MAC_SCSI_QUADRA2,
.scc_type = MAC_SCC_IOP,
.ether_type = MAC_ETHER_SONIC,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_IOP,
}, {
.ident = MAC_MODEL_Q950,
@@ -487,7 +485,7 @@ static struct mac_model mac_data_table[] = {
.scsi_type = MAC_SCSI_QUADRA2,
.scc_type = MAC_SCC_IOP,
.ether_type = MAC_ETHER_SONIC,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_IOP,
},
@@ -502,7 +500,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_P475,
@@ -511,7 +509,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_QUADRA,
.scsi_type = MAC_SCSI_QUADRA,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR1,
}, {
.ident = MAC_MODEL_P475F,
@@ -520,7 +518,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_QUADRA,
.scsi_type = MAC_SCSI_QUADRA,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR1,
}, {
.ident = MAC_MODEL_P520,
@@ -529,7 +527,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_P550,
@@ -538,7 +536,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
},
/* These have the comm slot, and therefore possibly SONIC ethernet */
@@ -549,8 +547,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_QUADRA,
.scsi_type = MAC_SCSI_QUADRA,
.scc_type = MAC_SCC_II,
- .ether_type = MAC_ETHER_SONIC,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS_COMM,
.floppy_type = MAC_FLOPPY_SWIM_ADDR1,
}, {
.ident = MAC_MODEL_P588,
@@ -560,8 +557,7 @@ static struct mac_model mac_data_table[] = {
.scsi_type = MAC_SCSI_QUADRA,
.ide_type = MAC_IDE_QUADRA,
.scc_type = MAC_SCC_II,
- .ether_type = MAC_ETHER_SONIC,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS_COMM,
.floppy_type = MAC_FLOPPY_SWIM_ADDR1,
}, {
.ident = MAC_MODEL_TV,
@@ -570,7 +566,6 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_P600,
@@ -579,7 +574,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_LC,
.scc_type = MAC_SCC_II,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
},
@@ -596,7 +591,7 @@ static struct mac_model mac_data_table[] = {
.scsi_type = MAC_SCSI_QUADRA,
.scc_type = MAC_SCC_QUADRA,
.ether_type = MAC_ETHER_SONIC,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR1,
}, {
.ident = MAC_MODEL_C650,
@@ -606,7 +601,7 @@ static struct mac_model mac_data_table[] = {
.scsi_type = MAC_SCSI_QUADRA,
.scc_type = MAC_SCC_QUADRA,
.ether_type = MAC_ETHER_SONIC,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR1,
}, {
.ident = MAC_MODEL_C660,
@@ -616,7 +611,7 @@ static struct mac_model mac_data_table[] = {
.scsi_type = MAC_SCSI_QUADRA3,
.scc_type = MAC_SCC_PSC,
.ether_type = MAC_ETHER_MACE,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_PDS_NUBUS,
.floppy_type = MAC_FLOPPY_AV,
},
@@ -633,7 +628,6 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_QUADRA,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_PB145,
@@ -642,7 +636,6 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_QUADRA,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_PB150,
@@ -652,7 +645,6 @@ static struct mac_model mac_data_table[] = {
.scsi_type = MAC_SCSI_OLD,
.ide_type = MAC_IDE_PB,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_PB160,
@@ -661,7 +653,6 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_QUADRA,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_PB165,
@@ -670,7 +661,6 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_QUADRA,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_PB165C,
@@ -679,7 +669,6 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_QUADRA,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_PB170,
@@ -688,7 +677,6 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_QUADRA,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_PB180,
@@ -697,7 +685,6 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_QUADRA,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_PB180C,
@@ -706,7 +693,6 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_QUADRA,
.scsi_type = MAC_SCSI_OLD,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_PB190,
@@ -716,7 +702,6 @@ static struct mac_model mac_data_table[] = {
.scsi_type = MAC_SCSI_LATE,
.ide_type = MAC_IDE_BABOON,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_PB520,
@@ -726,7 +711,6 @@ static struct mac_model mac_data_table[] = {
.scsi_type = MAC_SCSI_LATE,
.scc_type = MAC_SCC_QUADRA,
.ether_type = MAC_ETHER_SONIC,
- .nubus_type = MAC_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
},
@@ -743,7 +727,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_DUO,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_PB230,
@@ -752,7 +736,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_DUO,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_PB250,
@@ -761,7 +745,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_DUO,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_PB270C,
@@ -770,7 +754,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_DUO,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_PB280,
@@ -779,7 +763,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_DUO,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
}, {
.ident = MAC_MODEL_PB280C,
@@ -788,7 +772,7 @@ static struct mac_model mac_data_table[] = {
.via_type = MAC_VIA_IICI,
.scsi_type = MAC_SCSI_DUO,
.scc_type = MAC_SCC_QUADRA,
- .nubus_type = MAC_NUBUS,
+ .expansion_type = MAC_EXP_NUBUS,
.floppy_type = MAC_FLOPPY_SWIM_ADDR2,
},
@@ -1100,14 +1084,12 @@ int __init mac_platform_init(void)
* Ethernet device
*/
- switch (macintosh_config->ether_type) {
- case MAC_ETHER_SONIC:
+ if (macintosh_config->ether_type == MAC_ETHER_SONIC ||
+ macintosh_config->expansion_type == MAC_EXP_PDS_COMM)
platform_device_register_simple("macsonic", -1, NULL, 0);
- break;
- case MAC_ETHER_MACE:
+
+ if (macintosh_config->ether_type == MAC_ETHER_MACE)
platform_device_register_simple("macmace", -1, NULL, 0);
- break;
- }
return 0;
}
diff --git a/arch/m68k/mac/oss.c b/arch/m68k/mac/oss.c
index 3f81892527ad..921e6c092f2c 100644
--- a/arch/m68k/mac/oss.c
+++ b/arch/m68k/mac/oss.c
@@ -53,56 +53,41 @@ void __init oss_init(void)
}
/*
- * Handle miscellaneous OSS interrupts.
+ * Handle OSS interrupts.
+ * XXX how do you clear a pending IRQ? is it even necessary?
*/
-static void oss_irq(struct irq_desc *desc)
+static void oss_iopism_irq(struct irq_desc *desc)
{
- int events = oss->irq_pending &
- (OSS_IP_IOPSCC | OSS_IP_SCSI | OSS_IP_IOPISM);
-
- if (events & OSS_IP_IOPSCC) {
- oss->irq_pending &= ~OSS_IP_IOPSCC;
- generic_handle_irq(IRQ_MAC_SCC);
- }
-
- if (events & OSS_IP_SCSI) {
- oss->irq_pending &= ~OSS_IP_SCSI;
- generic_handle_irq(IRQ_MAC_SCSI);
- }
-
- if (events & OSS_IP_IOPISM) {
- oss->irq_pending &= ~OSS_IP_IOPISM;
- generic_handle_irq(IRQ_MAC_ADB);
- }
+ generic_handle_irq(IRQ_MAC_ADB);
}
-/*
- * Nubus IRQ handler, OSS style
- *
- * Unlike the VIA/RBV this is on its own autovector interrupt level.
- */
+static void oss_scsi_irq(struct irq_desc *desc)
+{
+ generic_handle_irq(IRQ_MAC_SCSI);
+}
static void oss_nubus_irq(struct irq_desc *desc)
{
- int events, irq_bit, i;
+ u16 events, irq_bit;
+ int irq_num;
events = oss->irq_pending & OSS_IP_NUBUS;
- if (!events)
- return;
-
- /* There are only six slots on the OSS, not seven */
-
- i = 6;
- irq_bit = 0x40;
+ irq_num = NUBUS_SOURCE_BASE + 5;
+ irq_bit = OSS_IP_NUBUS5;
do {
- --i;
- irq_bit >>= 1;
if (events & irq_bit) {
- oss->irq_pending &= ~irq_bit;
- generic_handle_irq(NUBUS_SOURCE_BASE + i);
+ events &= ~irq_bit;
+ generic_handle_irq(irq_num);
}
- } while(events & (irq_bit - 1));
+ --irq_num;
+ irq_bit >>= 1;
+ } while (events);
+}
+
+static void oss_iopscc_irq(struct irq_desc *desc)
+{
+ generic_handle_irq(IRQ_MAC_SCC);
}
/*
@@ -122,14 +107,14 @@ static void oss_nubus_irq(struct irq_desc *desc)
void __init oss_register_interrupts(void)
{
- irq_set_chained_handler(OSS_IRQLEV_IOPISM, oss_irq);
- irq_set_chained_handler(OSS_IRQLEV_SCSI, oss_irq);
+ irq_set_chained_handler(OSS_IRQLEV_IOPISM, oss_iopism_irq);
+ irq_set_chained_handler(OSS_IRQLEV_SCSI, oss_scsi_irq);
irq_set_chained_handler(OSS_IRQLEV_NUBUS, oss_nubus_irq);
- irq_set_chained_handler(OSS_IRQLEV_IOPSCC, oss_irq);
+ irq_set_chained_handler(OSS_IRQLEV_IOPSCC, oss_iopscc_irq);
irq_set_chained_handler(OSS_IRQLEV_VIA1, via1_irq);
/* OSS_VIA1 gets enabled here because it has no machspec interrupt. */
- oss->irq_level[OSS_VIA1] = IRQ_AUTO_6;
+ oss->irq_level[OSS_VIA1] = OSS_IRQLEV_VIA1;
}
/*
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index 127d7c1f2090..03253c4f8e6a 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -21,8 +21,9 @@ extern void die_if_kernel(char *, struct pt_regs *, long);
int send_fault_sig(struct pt_regs *regs)
{
- siginfo_t siginfo = { 0, 0, 0, };
+ siginfo_t siginfo;
+ clear_siginfo(&siginfo);
siginfo.si_signo = current->thread.signo;
siginfo.si_code = current->thread.code;
siginfo.si_addr = (void *)current->thread.faddr;
diff --git a/arch/metag/include/asm/thread_info.h b/arch/metag/include/asm/thread_info.h
index 554f73a77e6e..a1a9c7f5ca8c 100644
--- a/arch/metag/include/asm/thread_info.h
+++ b/arch/metag/include/asm/thread_info.h
@@ -74,9 +74,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* how to get the current stack pointer from C */
register unsigned long current_stack_pointer asm("A0StP") __used;
diff --git a/arch/metag/include/uapi/asm/siginfo.h b/arch/metag/include/uapi/asm/siginfo.h
index b54ef7186ca3..9a3f6cde9487 100644
--- a/arch/metag/include/uapi/asm/siginfo.h
+++ b/arch/metag/include/uapi/asm/siginfo.h
@@ -6,4 +6,11 @@
#include <asm-generic/siginfo.h>
+/*
+ * SIGFPE si_codes
+ */
+#ifdef __KERNEL__
+#define FPE_FIXME 0 /* Broken dup of SI_USER */
+#endif /* __KERNEL__ */
+
#endif
diff --git a/arch/metag/kernel/traps.c b/arch/metag/kernel/traps.c
index 444851e510d5..3b62b1b0c0b5 100644
--- a/arch/metag/kernel/traps.c
+++ b/arch/metag/kernel/traps.c
@@ -735,7 +735,7 @@ TBIRES fpe_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI)
else if (error_state & TXSTAT_FPE_INEXACT_BIT)
info.si_code = FPE_FLTRES;
else
- info.si_code = 0;
+ info.si_code = FPE_FIXME;
info.si_errno = 0;
info.si_addr = (__force void __user *)regs->ctx.CurrPC;
force_sig_info(SIGFPE, &info, current);
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h
index 6b9ea39405b8..add50c1373bf 100644
--- a/arch/microblaze/include/asm/dma-mapping.h
+++ b/arch/microblaze/include/asm/dma-mapping.h
@@ -18,11 +18,11 @@
/*
* Available generic sets of operations
*/
-extern const struct dma_map_ops dma_direct_ops;
+extern const struct dma_map_ops dma_nommu_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
- return &dma_direct_ops;
+ return &dma_nommu_ops;
}
#endif /* _ASM_MICROBLAZE_DMA_MAPPING_H */
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h
index e7e8954e9815..9afe4b5bd6c8 100644
--- a/arch/microblaze/include/asm/thread_info.h
+++ b/arch/microblaze/include/asm/thread_info.h
@@ -86,9 +86,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index 990bf9ea0ec6..c91e8cef98dd 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -15,42 +15,18 @@
#include <linux/bug.h>
#include <asm/cacheflush.h>
-#define NOT_COHERENT_CACHE
-
-static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
+static void *dma_nommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs)
{
-#ifdef NOT_COHERENT_CACHE
return consistent_alloc(flag, size, dma_handle);
-#else
- void *ret;
- struct page *page;
- int node = dev_to_node(dev);
-
- /* ignore region specifiers */
- flag &= ~(__GFP_HIGHMEM);
-
- page = alloc_pages_node(node, flag, get_order(size));
- if (page == NULL)
- return NULL;
- ret = page_address(page);
- memset(ret, 0, size);
- *dma_handle = virt_to_phys(ret);
-
- return ret;
-#endif
}
-static void dma_direct_free_coherent(struct device *dev, size_t size,
+static void dma_nommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
unsigned long attrs)
{
-#ifdef NOT_COHERENT_CACHE
consistent_free(size, vaddr);
-#else
- free_pages((unsigned long)vaddr, get_order(size));
-#endif
}
static inline void __dma_sync(unsigned long paddr,
@@ -69,7 +45,7 @@ static inline void __dma_sync(unsigned long paddr,
}
}
-static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
+static int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
unsigned long attrs)
{
@@ -89,12 +65,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
return nents;
}
-static int dma_direct_dma_supported(struct device *dev, u64 mask)
-{
- return 1;
-}
-
-static inline dma_addr_t dma_direct_map_page(struct device *dev,
+static inline dma_addr_t dma_nommu_map_page(struct device *dev,
struct page *page,
unsigned long offset,
size_t size,
@@ -106,7 +77,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
return page_to_phys(page) + offset;
}
-static inline void dma_direct_unmap_page(struct device *dev,
+static inline void dma_nommu_unmap_page(struct device *dev,
dma_addr_t dma_address,
size_t size,
enum dma_data_direction direction,
@@ -122,7 +93,7 @@ static inline void dma_direct_unmap_page(struct device *dev,
}
static inline void
-dma_direct_sync_single_for_cpu(struct device *dev,
+dma_nommu_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
@@ -136,7 +107,7 @@ dma_direct_sync_single_for_cpu(struct device *dev,
}
static inline void
-dma_direct_sync_single_for_device(struct device *dev,
+dma_nommu_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
@@ -150,7 +121,7 @@ dma_direct_sync_single_for_device(struct device *dev,
}
static inline void
-dma_direct_sync_sg_for_cpu(struct device *dev,
+dma_nommu_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sgl, int nents,
enum dma_data_direction direction)
{
@@ -164,7 +135,7 @@ dma_direct_sync_sg_for_cpu(struct device *dev,
}
static inline void
-dma_direct_sync_sg_for_device(struct device *dev,
+dma_nommu_sync_sg_for_device(struct device *dev,
struct scatterlist *sgl, int nents,
enum dma_data_direction direction)
{
@@ -178,7 +149,7 @@ dma_direct_sync_sg_for_device(struct device *dev,
}
static
-int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
+int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t handle, size_t size,
unsigned long attrs)
{
@@ -191,12 +162,8 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
if (off >= count || user_count > (count - off))
return -ENXIO;
-#ifdef NOT_COHERENT_CACHE
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pfn = consistent_virt_to_pfn(cpu_addr);
-#else
- pfn = virt_to_pfn(cpu_addr);
-#endif
return remap_pfn_range(vma, vma->vm_start, pfn + off,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
#else
@@ -204,20 +171,19 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
#endif
}
-const struct dma_map_ops dma_direct_ops = {
- .alloc = dma_direct_alloc_coherent,
- .free = dma_direct_free_coherent,
- .mmap = dma_direct_mmap_coherent,
- .map_sg = dma_direct_map_sg,
- .dma_supported = dma_direct_dma_supported,
- .map_page = dma_direct_map_page,
- .unmap_page = dma_direct_unmap_page,
- .sync_single_for_cpu = dma_direct_sync_single_for_cpu,
- .sync_single_for_device = dma_direct_sync_single_for_device,
- .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
- .sync_sg_for_device = dma_direct_sync_sg_for_device,
+const struct dma_map_ops dma_nommu_ops = {
+ .alloc = dma_nommu_alloc_coherent,
+ .free = dma_nommu_free_coherent,
+ .mmap = dma_nommu_mmap_coherent,
+ .map_sg = dma_nommu_map_sg,
+ .map_page = dma_nommu_map_page,
+ .unmap_page = dma_nommu_unmap_page,
+ .sync_single_for_cpu = dma_nommu_sync_single_for_cpu,
+ .sync_single_for_device = dma_nommu_sync_single_for_device,
+ .sync_sg_for_cpu = dma_nommu_sync_sg_for_cpu,
+ .sync_sg_for_device = dma_nommu_sync_sg_for_device,
};
-EXPORT_SYMBOL(dma_direct_ops);
+EXPORT_SYMBOL(dma_nommu_ops);
/* Number of entries preallocated for DMA-API debugging */
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 8e0b3702f1c0..ab98569994f0 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -431,6 +431,7 @@ config MACH_LOONGSON32
config MACH_LOONGSON64
bool "Loongson-2/3 family of machines"
+ select ARCH_HAS_PHYS_TO_DMA
select SYS_SUPPORTS_ZBOOT
help
This enables the support of Loongson-2/3 family of machines.
@@ -880,6 +881,7 @@ config MIKROTIK_RB532
config CAVIUM_OCTEON_SOC
bool "Cavium Networks Octeon SoC based boards"
select CEVT_R4K
+ select ARCH_HAS_PHYS_TO_DMA
select ARCH_PHYS_ADDR_T_64BIT
select DMA_COHERENT
select SYS_SUPPORTS_64BIT_KERNEL
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig
index 204a1670fd9b..b5eee1a57d6c 100644
--- a/arch/mips/cavium-octeon/Kconfig
+++ b/arch/mips/cavium-octeon/Kconfig
@@ -75,6 +75,7 @@ config NEED_SG_DMA_LENGTH
config SWIOTLB
def_bool y
+ select DMA_DIRECT_OPS
select IOMMU_HELPER
select NEED_SG_DMA_LENGTH
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index c64bd87f0b6e..c7bb8a407041 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -159,36 +159,13 @@ static void octeon_dma_sync_sg_for_device(struct device *dev,
static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
- void *ret;
-
- /* ignore region specifiers */
- gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
-
- if (IS_ENABLED(CONFIG_ZONE_DMA) && dev == NULL)
- gfp |= __GFP_DMA;
- else if (IS_ENABLED(CONFIG_ZONE_DMA) &&
- dev->coherent_dma_mask <= DMA_BIT_MASK(24))
- gfp |= __GFP_DMA;
- else if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
- dev->coherent_dma_mask <= DMA_BIT_MASK(32))
- gfp |= __GFP_DMA32;
-
- /* Don't invoke OOM killer */
- gfp |= __GFP_NORETRY;
-
- ret = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
+ void *ret = swiotlb_alloc(dev, size, dma_handle, gfp, attrs);
mb();
return ret;
}
-static void octeon_dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle, unsigned long attrs)
-{
- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
-}
-
static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
{
return paddr;
@@ -228,7 +205,7 @@ EXPORT_SYMBOL(dma_to_phys);
static struct octeon_dma_map_ops octeon_linear_dma_map_ops = {
.dma_map_ops = {
.alloc = octeon_dma_alloc_coherent,
- .free = octeon_dma_free_coherent,
+ .free = swiotlb_free,
.map_page = octeon_dma_map_page,
.unmap_page = swiotlb_unmap_page,
.map_sg = octeon_dma_map_sg,
@@ -314,7 +291,7 @@ void __init plat_swiotlb_setup(void)
static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = {
.dma_map_ops = {
.alloc = octeon_dma_alloc_coherent,
- .free = octeon_dma_free_coherent,
+ .free = swiotlb_free,
.map_page = octeon_dma_map_page,
.unmap_page = swiotlb_unmap_page,
.map_sg = octeon_dma_map_sg,
diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h
index 49691331ada4..946681db8dc3 100644
--- a/arch/mips/include/asm/compat.h
+++ b/arch/mips/include/asm/compat.h
@@ -126,79 +126,6 @@ typedef u32 compat_old_sigset_t; /* at least 32 bits */
typedef u32 compat_sigset_word;
-typedef union compat_sigval {
- compat_int_t sival_int;
- compat_uptr_t sival_ptr;
-} compat_sigval_t;
-
-/* Can't use the generic version because si_code and si_errno are swapped */
-
-#define SI_PAD_SIZE32 (128/sizeof(int) - 3)
-
-typedef struct compat_siginfo {
- int si_signo;
- int si_code;
- int si_errno;
-
- union {
- int _pad[128 / sizeof(int) - 3];
-
- /* kill() */
- struct {
- compat_pid_t _pid; /* sender's pid */
- __compat_uid32_t _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- compat_timer_t _tid; /* timer id */
- int _overrun; /* overrun count */
- compat_sigval_t _sigval; /* same as below */
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- compat_pid_t _pid; /* sender's pid */
- __compat_uid32_t _uid; /* sender's uid */
- compat_sigval_t _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- compat_pid_t _pid; /* which child */
- __compat_uid32_t _uid; /* sender's uid */
- int _status; /* exit code */
- compat_clock_t _utime;
- compat_clock_t _stime;
- } _sigchld;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
- struct {
- compat_uptr_t _addr; /* faulting insn/memory ref. */
-#ifdef __ARCH_SI_TRAPNO
- int _trapno; /* TRAP # which caused the signal */
-#endif
- short _addr_lsb; /* LSB of the reported address */
- struct {
- compat_uptr_t _lower;
- compat_uptr_t _upper;
- } _addr_bnd;
- } _sigfault;
-
- /* SIGPOLL */
- struct {
- compat_long_t _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
-
- struct {
- compat_uptr_t _call_addr; /* calling insn */
- int _syscall; /* triggering system call number */
- compat_uint_t _arch; /* AUDIT_ARCH_* of syscall */
- } _sigsys;
- } _sifields;
-} compat_siginfo_t;
-
#define COMPAT_OFF_T_MAX 0x7fffffff
/*
diff --git a/arch/mips/include/asm/dma-direct.h b/arch/mips/include/asm/dma-direct.h
new file mode 100644
index 000000000000..f32f15530aba
--- /dev/null
+++ b/arch/mips/include/asm/dma-direct.h
@@ -0,0 +1 @@
+#include <asm/dma-coherence.h>
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index 0d9418d264f9..886e75a383f2 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -17,16 +17,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return mips_dma_map_ops;
}
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return false;
-
- return addr + size <= *dev->dma_mask;
-}
-
-static inline void dma_mark_clean(void *addr, size_t size) {}
-
#define arch_setup_dma_ops arch_setup_dma_ops
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
u64 size, const struct iommu_ops *iommu,
diff --git a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
index 9110988b92a1..138edf6b5b48 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
@@ -61,6 +61,14 @@ static inline void plat_post_dma_flush(struct device *dev)
{
}
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ if (!dev->dma_mask)
+ return false;
+
+ return addr + size - 1 <= *dev->dma_mask;
+}
+
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
diff --git a/arch/mips/include/asm/mach-generic/dma-coherence.h b/arch/mips/include/asm/mach-generic/dma-coherence.h
index 61addb1677e9..8ad7a40ca786 100644
--- a/arch/mips/include/asm/mach-generic/dma-coherence.h
+++ b/arch/mips/include/asm/mach-generic/dma-coherence.h
@@ -70,16 +70,4 @@ static inline void plat_post_dma_flush(struct device *dev)
}
#endif
-#ifdef CONFIG_SWIOTLB
-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- return paddr;
-}
-
-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
-{
- return daddr;
-}
-#endif
-
#endif /* __ASM_MACH_GENERIC_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-loongson64/dma-coherence.h b/arch/mips/include/asm/mach-loongson64/dma-coherence.h
index 1602a9e9e8c2..b1b575f5c6c1 100644
--- a/arch/mips/include/asm/mach-loongson64/dma-coherence.h
+++ b/arch/mips/include/asm/mach-loongson64/dma-coherence.h
@@ -17,6 +17,14 @@
struct device;
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ if (!dev->dma_mask)
+ return false;
+
+ return addr + size - 1 <= *dev->dma_mask;
+}
+
extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
diff --git a/arch/mips/include/asm/netlogic/common.h b/arch/mips/include/asm/netlogic/common.h
index a6e6cbebe046..57616649b4f3 100644
--- a/arch/mips/include/asm/netlogic/common.h
+++ b/arch/mips/include/asm/netlogic/common.h
@@ -87,9 +87,6 @@ unsigned int nlm_get_cpu_frequency(void);
extern const struct plat_smp_ops nlm_smp_ops;
extern char nlm_reset_entry[], nlm_reset_entry_end[];
-/* SWIOTLB */
-extern const struct dma_map_ops nlm_swiotlb_dma_ops;
-
extern unsigned int nlm_threads_per_core;
extern cpumask_t nlm_cpumask;
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 5e8927f99a76..4993db40482c 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -49,9 +49,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* How to get the thread information struct from C. */
register struct thread_info *__current_thread_info __asm__("$28");
diff --git a/arch/mips/include/uapi/asm/poll.h b/arch/mips/include/uapi/asm/poll.h
index ad289d7b7434..3173f8917128 100644
--- a/arch/mips/include/uapi/asm/poll.h
+++ b/arch/mips/include/uapi/asm/poll.h
@@ -2,8 +2,25 @@
#ifndef __ASM_POLL_H
#define __ASM_POLL_H
+#ifndef __KERNEL__
#define POLLWRNORM POLLOUT
-#define POLLWRBAND 0x0100
+#define POLLWRBAND (__force __poll_t)0x0100
+#else
+#define __ARCH_HAS_MANGLED_POLL
+static inline __u16 mangle_poll(__poll_t val)
+{
+ __u16 v = (__force __u16)val;
+ /* bit 9 -> bit 8, bit 8 -> bit 2 */
+ return (v & ~0x300) | ((v & 0x200) >> 1) | ((v & 0x100) >> 6);
+}
+
+static inline __poll_t demangle_poll(__u16 v)
+{
+ /* bit 8 -> bit 9, bit 2 -> bits 2 and 8 */
+ return (__force __poll_t)((v & ~0x100) | ((v & 0x100) << 1) |
+ ((v & 4) << 6));
+}
+#endif
#include <asm-generic/poll.h>
diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h
index f17d8163dec6..262504bd59a5 100644
--- a/arch/mips/include/uapi/asm/siginfo.h
+++ b/arch/mips/include/uapi/asm/siginfo.h
@@ -14,8 +14,6 @@
#define __ARCH_SIGEV_PREAMBLE_SIZE (sizeof(long) + 2*sizeof(int))
#undef __ARCH_SI_TRAPNO /* exception code needs to fill this ... */
-#define HAVE_ARCH_SIGINFO_T
-
/*
* Careful to keep union _sifields from shifting ...
*/
@@ -27,92 +25,10 @@
#error _MIPS_SZLONG neither 32 nor 64
#endif
-#define __ARCH_SIGSYS
+#define __ARCH_HAS_SWAPPED_SIGINFO
#include <asm-generic/siginfo.h>
-/* We can't use generic siginfo_t, because our si_code and si_errno are swapped */
-typedef struct siginfo {
- int si_signo;
- int si_code;
- int si_errno;
- int __pad0[SI_MAX_SIZE / sizeof(int) - SI_PAD_SIZE - 3];
-
- union {
- int _pad[SI_PAD_SIZE];
-
- /* kill() */
- struct {
- __kernel_pid_t _pid; /* sender's pid */
- __ARCH_SI_UID_T _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- __kernel_timer_t _tid; /* timer id */
- int _overrun; /* overrun count */
- char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)];
- sigval_t _sigval; /* same as below */
- int _sys_private; /* not to be passed to user */
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- __kernel_pid_t _pid; /* sender's pid */
- __ARCH_SI_UID_T _uid; /* sender's uid */
- sigval_t _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- __kernel_pid_t _pid; /* which child */
- __ARCH_SI_UID_T _uid; /* sender's uid */
- int _status; /* exit code */
- __kernel_clock_t _utime;
- __kernel_clock_t _stime;
- } _sigchld;
-
- /* IRIX SIGCHLD */
- struct {
- __kernel_pid_t _pid; /* which child */
- __kernel_clock_t _utime;
- int _status; /* exit code */
- __kernel_clock_t _stime;
- } _irix_sigchld;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
- struct {
- void __user *_addr; /* faulting insn/memory ref. */
-#ifdef __ARCH_SI_TRAPNO
- int _trapno; /* TRAP # which caused the signal */
-#endif
- short _addr_lsb;
- union {
- /* used when si_code=SEGV_BNDERR */
- struct {
- void __user *_lower;
- void __user *_upper;
- } _addr_bnd;
- /* used when si_code=SEGV_PKUERR */
- __u32 _pkey;
- };
- } _sigfault;
-
- /* SIGPOLL, SIGXFSZ (To do ...) */
- struct {
- __ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
-
- /* SIGSYS */
- struct {
- void __user *_call_addr; /* calling user insn */
- int _syscall; /* triggering system call number */
- unsigned int _arch; /* AUDIT_ARCH_* of syscall */
- } _sigsys;
- } _sifields;
-} siginfo_t;
-
/*
* si_code values
* Again these have been chosen to be IRIX compatible.
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index b80dd8b17a76..bbb0f4770c0d 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -336,10 +336,10 @@ static int file_release(struct inode *inode, struct file *filp)
return rtlx_release(iminor(inode));
}
-static unsigned int file_poll(struct file *file, poll_table *wait)
+static __poll_t file_poll(struct file *file, poll_table *wait)
{
int minor = iminor(file_inode(file));
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &channel_wqs[minor].rt_queue, wait);
poll_wait(file, &channel_wqs[minor].lx_queue, wait);
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index cf5c7c05e5a3..c4db910a8794 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -76,70 +76,3 @@ SYSCALL_DEFINE3(32_sigaction, long, sig, const struct compat_sigaction __user *,
return ret;
}
-
-int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
-{
- int err;
-
- if (!access_ok (VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
- return -EFAULT;
-
- /* If you change siginfo_t structure, please be sure
- this code is fixed accordingly.
- It should never copy any pad contained in the structure
- to avoid security leaks, but must copy the generic
- 3 ints plus the relevant union member.
- This routine must convert siginfo from 64bit to 32bit as well
- at the same time. */
- err = __put_user(from->si_signo, &to->si_signo);
- err |= __put_user(from->si_errno, &to->si_errno);
- err |= __put_user(from->si_code, &to->si_code);
- if (from->si_code < 0)
- err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
- else {
- switch (siginfo_layout(from->si_signo, from->si_code)) {
- case SIL_TIMER:
- err |= __put_user(from->si_tid, &to->si_tid);
- err |= __put_user(from->si_overrun, &to->si_overrun);
- err |= __put_user(from->si_int, &to->si_int);
- break;
- case SIL_CHLD:
- err |= __put_user(from->si_utime, &to->si_utime);
- err |= __put_user(from->si_stime, &to->si_stime);
- err |= __put_user(from->si_status, &to->si_status);
- case SIL_KILL:
- err |= __put_user(from->si_pid, &to->si_pid);
- err |= __put_user(from->si_uid, &to->si_uid);
- break;
- case SIL_FAULT:
- err |= __put_user((unsigned long)from->si_addr, &to->si_addr);
- break;
- case SIL_POLL:
- err |= __put_user(from->si_band, &to->si_band);
- err |= __put_user(from->si_fd, &to->si_fd);
- break;
- case SIL_RT:
- err |= __put_user(from->si_pid, &to->si_pid);
- err |= __put_user(from->si_uid, &to->si_uid);
- err |= __put_user(from->si_int, &to->si_int);
- break;
- case SIL_SYS:
- err |= __copy_to_user(&to->si_call_addr, &from->si_call_addr,
- sizeof(compat_uptr_t));
- err |= __put_user(from->si_syscall, &to->si_syscall);
- err |= __put_user(from->si_arch, &to->si_arch);
- break;
- }
- }
- return err;
-}
-
-int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
-{
- if (copy_from_user(to, from, 3*sizeof(int)) ||
- copy_from_user(to->_sifields._pad,
- from->_sifields._pad, SI_PAD_SIZE32))
- return -EFAULT;
-
- return 0;
-}
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 5d19ed07e99d..0ae4a731cc12 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -699,11 +699,12 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
asmlinkage void do_ov(struct pt_regs *regs)
{
enum ctx_state prev_state;
- siginfo_t info = {
- .si_signo = SIGFPE,
- .si_code = FPE_INTOVF,
- .si_addr = (void __user *)regs->cp0_epc,
- };
+ siginfo_t info;
+
+ clear_siginfo(&info);
+ info.si_signo = SIGFPE;
+ info.si_code = FPE_INTOVF;
+ info.si_addr = (void __user *)regs->cp0_epc;
prev_state = exception_enter();
die_if_kernel("Integer overflow", regs);
@@ -721,7 +722,11 @@ asmlinkage void do_ov(struct pt_regs *regs)
void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
struct task_struct *tsk)
{
- struct siginfo si = { .si_addr = fault_addr, .si_signo = SIGFPE };
+ struct siginfo si;
+
+ clear_siginfo(&si);
+ si.si_addr = fault_addr;
+ si.si_signo = SIGFPE;
if (fcr31 & FPU_CSR_INV_X)
si.si_code = FPE_FLTINV;
@@ -739,9 +744,10 @@ void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
{
- struct siginfo si = { 0 };
+ struct siginfo si;
struct vm_area_struct *vma;
+ clear_siginfo(&si);
switch (sig) {
case 0:
return 0;
@@ -890,9 +896,10 @@ out:
void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
const char *str)
{
- siginfo_t info = { 0 };
+ siginfo_t info;
char b[40];
+ clear_siginfo(&info);
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
SIGTRAP) == NOTIFY_STOP)
@@ -1499,9 +1506,13 @@ asmlinkage void do_mdmx(struct pt_regs *regs)
*/
asmlinkage void do_watch(struct pt_regs *regs)
{
- siginfo_t info = { .si_signo = SIGTRAP, .si_code = TRAP_HWBKPT };
+ siginfo_t info;
enum ctx_state prev_state;
+ clear_siginfo(&info);
+ info.si_signo = SIGTRAP;
+ info.si_code = TRAP_HWBKPT;
+
prev_state = exception_enter();
/*
* Clear WP (bit 22) bit of cause register so we don't loop
diff --git a/arch/mips/loongson64/Kconfig b/arch/mips/loongson64/Kconfig
index 0d249fc3cfe9..6f109bb54cdb 100644
--- a/arch/mips/loongson64/Kconfig
+++ b/arch/mips/loongson64/Kconfig
@@ -136,6 +136,7 @@ config SWIOTLB
bool "Soft IOMMU Support for All-Memory DMA"
default y
depends on CPU_LOONGSON3
+ select DMA_DIRECT_OPS
select IOMMU_HELPER
select NEED_SG_DMA_LENGTH
select NEED_DMA_MAP_STATE
diff --git a/arch/mips/loongson64/common/dma-swiotlb.c b/arch/mips/loongson64/common/dma-swiotlb.c
index ef07740cee61..7bbcf89475f3 100644
--- a/arch/mips/loongson64/common/dma-swiotlb.c
+++ b/arch/mips/loongson64/common/dma-swiotlb.c
@@ -13,32 +13,12 @@
static void *loongson_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
- void *ret;
+ void *ret = swiotlb_alloc(dev, size, dma_handle, gfp, attrs);
- /* ignore region specifiers */
- gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
-
- if ((IS_ENABLED(CONFIG_ISA) && dev == NULL) ||
- (IS_ENABLED(CONFIG_ZONE_DMA) &&
- dev->coherent_dma_mask < DMA_BIT_MASK(32)))
- gfp |= __GFP_DMA;
- else if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
- dev->coherent_dma_mask < DMA_BIT_MASK(40))
- gfp |= __GFP_DMA32;
-
- gfp |= __GFP_NORETRY;
-
- ret = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
mb();
return ret;
}
-static void loongson_dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle, unsigned long attrs)
-{
- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
-}
-
static dma_addr_t loongson_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
@@ -109,7 +89,7 @@ phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
static const struct dma_map_ops loongson_dma_map_ops = {
.alloc = loongson_dma_alloc_coherent,
- .free = loongson_dma_free_coherent,
+ .free = swiotlb_free,
.map_page = loongson_dma_map_page,
.unmap_page = swiotlb_unmap_page,
.map_sg = loongson_dma_map_sg,
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index e3e94d05f0fd..237532e89919 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -93,9 +93,6 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
{
gfp_t dma_flag;
- /* ignore region specifiers */
- gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
-
#ifdef CONFIG_ISA
if (dev == NULL)
dma_flag = __GFP_DMA;
diff --git a/arch/mips/netlogic/Kconfig b/arch/mips/netlogic/Kconfig
index 8296b13affd2..7fcfc7fe9f14 100644
--- a/arch/mips/netlogic/Kconfig
+++ b/arch/mips/netlogic/Kconfig
@@ -89,9 +89,4 @@ config IOMMU_HELPER
config NEED_SG_DMA_LENGTH
bool
-config SWIOTLB
- def_bool y
- select NEED_SG_DMA_LENGTH
- select IOMMU_HELPER
-
endif
diff --git a/arch/mips/netlogic/common/Makefile b/arch/mips/netlogic/common/Makefile
index 60d00b5d748e..89f6e3f39fed 100644
--- a/arch/mips/netlogic/common/Makefile
+++ b/arch/mips/netlogic/common/Makefile
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
obj-y += irq.o time.o
-obj-y += nlm-dma.o
obj-y += reset.o
obj-$(CONFIG_SMP) += smp.o smpboot.o
obj-$(CONFIG_EARLY_PRINTK) += earlycons.o
diff --git a/arch/mips/netlogic/common/nlm-dma.c b/arch/mips/netlogic/common/nlm-dma.c
deleted file mode 100644
index 0ec9d9da6d51..000000000000
--- a/arch/mips/netlogic/common/nlm-dma.c
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
-* Copyright (C) 2003-2013 Broadcom Corporation
-* All Rights Reserved
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the Broadcom
- * license below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
- * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include <linux/dma-mapping.h>
-#include <linux/scatterlist.h>
-#include <linux/bootmem.h>
-#include <linux/export.h>
-#include <linux/swiotlb.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-
-#include <asm/bootinfo.h>
-
-static char *nlm_swiotlb;
-
-static void *nlm_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
-{
- /* ignore region specifiers */
- gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
-
-#ifdef CONFIG_ZONE_DMA32
- if (dev->coherent_dma_mask <= DMA_BIT_MASK(32))
- gfp |= __GFP_DMA32;
-#endif
-
- /* Don't invoke OOM killer */
- gfp |= __GFP_NORETRY;
-
- return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
-}
-
-static void nlm_dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle, unsigned long attrs)
-{
- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
-}
-
-const struct dma_map_ops nlm_swiotlb_dma_ops = {
- .alloc = nlm_dma_alloc_coherent,
- .free = nlm_dma_free_coherent,
- .map_page = swiotlb_map_page,
- .unmap_page = swiotlb_unmap_page,
- .map_sg = swiotlb_map_sg_attrs,
- .unmap_sg = swiotlb_unmap_sg_attrs,
- .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
- .sync_single_for_device = swiotlb_sync_single_for_device,
- .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
- .sync_sg_for_device = swiotlb_sync_sg_for_device,
- .mapping_error = swiotlb_dma_mapping_error,
- .dma_supported = swiotlb_dma_supported
-};
-
-void __init plat_swiotlb_setup(void)
-{
- size_t swiotlbsize;
- unsigned long swiotlb_nslabs;
-
- swiotlbsize = 1 << 20; /* 1 MB for now */
- swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT;
- swiotlb_nslabs = ALIGN(swiotlb_nslabs, IO_TLB_SEGSIZE);
- swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT;
-
- nlm_swiotlb = alloc_bootmem_low_pages(swiotlbsize);
- swiotlb_init_with_tbl(nlm_swiotlb, swiotlb_nslabs, 1);
-}
diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h
index f5f90bbf019d..1748a7b25bf8 100644
--- a/arch/mn10300/include/asm/thread_info.h
+++ b/arch/mn10300/include/asm/thread_info.h
@@ -79,8 +79,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
#define init_uregs \
((struct pt_regs *) \
((unsigned long) init_stack + THREAD_SIZE - sizeof(struct pt_regs)))
diff --git a/arch/mn10300/include/uapi/asm/Kbuild b/arch/mn10300/include/uapi/asm/Kbuild
index 81271d3af47c..b04fd1632051 100644
--- a/arch/mn10300/include/uapi/asm/Kbuild
+++ b/arch/mn10300/include/uapi/asm/Kbuild
@@ -2,4 +2,5 @@
include include/uapi/asm-generic/Kbuild.asm
generic-y += bpf_perf_event.h
+generic-y += poll.h
generic-y += siginfo.h
diff --git a/arch/mn10300/include/uapi/asm/poll.h b/arch/mn10300/include/uapi/asm/poll.h
deleted file mode 100644
index b7132a305a47..000000000000
--- a/arch/mn10300/include/uapi/asm/poll.h
+++ /dev/null
@@ -1,2 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#include <asm-generic/poll.h>
diff --git a/arch/mn10300/kernel/mn10300-serial.c b/arch/mn10300/kernel/mn10300-serial.c
index d7ef1232a82a..4994b570dfd9 100644
--- a/arch/mn10300/kernel/mn10300-serial.c
+++ b/arch/mn10300/kernel/mn10300-serial.c
@@ -550,7 +550,7 @@ try_again:
return;
}
- smp_read_barrier_depends();
+ /* READ_ONCE() enforces dependency, but dangerous through integer!!! */
ch = port->rx_buffer[ix++];
st = port->rx_buffer[ix++];
smp_mb();
@@ -1728,7 +1728,10 @@ static int mn10300_serial_poll_get_char(struct uart_port *_port)
if (CIRC_CNT(port->rx_inp, ix, MNSC_BUFFER_SIZE) == 0)
return NO_POLL_CHAR;
- smp_read_barrier_depends();
+ /*
+ * READ_ONCE() enforces dependency, but dangerous
+ * through integer!!!
+ */
ch = port->rx_buffer[ix++];
st = port->rx_buffer[ix++];
smp_mb();
diff --git a/arch/mn10300/mm/dma-alloc.c b/arch/mn10300/mm/dma-alloc.c
index 86108d2496b3..e3910d4db102 100644
--- a/arch/mn10300/mm/dma-alloc.c
+++ b/arch/mn10300/mm/dma-alloc.c
@@ -37,9 +37,6 @@ static void *mn10300_dma_alloc(struct device *dev, size_t size,
goto done;
}
- /* ignore region specifiers */
- gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
-
if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
gfp |= GFP_DMA;
diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c
index b39a388825ae..8ace89617c1c 100644
--- a/arch/mn10300/mm/misalignment.c
+++ b/arch/mn10300/mm/misalignment.c
@@ -437,7 +437,7 @@ transfer_failed:
info.si_signo = SIGSEGV;
info.si_errno = 0;
- info.si_code = 0;
+ info.si_code = SEGV_MAPERR;
info.si_addr = (void *) regs->pc;
force_sig_info(SIGSEGV, &info, current);
return;
diff --git a/arch/nios2/include/asm/thread_info.h b/arch/nios2/include/asm/thread_info.h
index d69c338bd19c..7349a4fa635b 100644
--- a/arch/nios2/include/asm/thread_info.h
+++ b/arch/nios2/include/asm/thread_info.h
@@ -63,9 +63,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
diff --git a/arch/nios2/mm/dma-mapping.c b/arch/nios2/mm/dma-mapping.c
index 7040c1adbb5e..4be815519dd4 100644
--- a/arch/nios2/mm/dma-mapping.c
+++ b/arch/nios2/mm/dma-mapping.c
@@ -63,9 +63,6 @@ static void *nios2_dma_alloc(struct device *dev, size_t size,
{
void *ret;
- /* ignore region specifiers */
- gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
-
/* optimized page clearing */
gfp |= __GFP_ZERO;
diff --git a/arch/openrisc/include/asm/processor.h b/arch/openrisc/include/asm/processor.h
index 396d8f306c21..af31a9fe736a 100644
--- a/arch/openrisc/include/asm/processor.h
+++ b/arch/openrisc/include/asm/processor.h
@@ -84,8 +84,6 @@ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp);
void release_thread(struct task_struct *);
unsigned long get_wchan(struct task_struct *p);
-#define init_stack (init_thread_union.stack)
-
#define cpu_relax() barrier()
#endif /* __ASSEMBLY__ */
diff --git a/arch/openrisc/include/asm/thread_info.h b/arch/openrisc/include/asm/thread_info.h
index c229aa6bb502..5c15dfa2fd4f 100644
--- a/arch/openrisc/include/asm/thread_info.h
+++ b/arch/openrisc/include/asm/thread_info.h
@@ -79,8 +79,6 @@ struct thread_info {
.ksp = 0, \
}
-#define init_thread_info (init_thread_union.thread_info)
-
/* how to get the thread information struct from C */
register struct thread_info *current_thread_info_reg asm("r10");
#define current_thread_info() (current_thread_info_reg)
diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
index 4085d72fa5ae..9e38dc66c9e4 100644
--- a/arch/openrisc/kernel/traps.c
+++ b/arch/openrisc/kernel/traps.c
@@ -266,12 +266,12 @@ asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address)
siginfo_t info;
if (user_mode(regs)) {
- /* Send a SIGSEGV */
- info.si_signo = SIGSEGV;
+ /* Send a SIGBUS */
+ info.si_signo = SIGBUS;
info.si_errno = 0;
- /* info.si_code has been set above */
- info.si_addr = (void *)address;
- force_sig_info(SIGSEGV, &info, current);
+ info.si_code = BUS_ADRALN;
+ info.si_addr = (void __user *)address;
+ force_sig_info(SIGBUS, &info, current);
} else {
printk("KERNEL: Unaligned Access 0x%.8lx\n", address);
show_registers(regs);
diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S
index 00ddb7804be4..953bdcd54efe 100644
--- a/arch/openrisc/kernel/vmlinux.lds.S
+++ b/arch/openrisc/kernel/vmlinux.lds.S
@@ -28,6 +28,7 @@
#include <asm/page.h>
#include <asm/cache.h>
+#include <asm/thread_info.h>
#include <asm-generic/vmlinux.lds.h>
#ifdef __OR1K__
diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h
index acf8aa07cbe0..c22db5323244 100644
--- a/arch/parisc/include/asm/compat.h
+++ b/arch/parisc/include/asm/compat.h
@@ -130,70 +130,6 @@ typedef u32 compat_old_sigset_t; /* at least 32 bits */
typedef u32 compat_sigset_word;
-typedef union compat_sigval {
- compat_int_t sival_int;
- compat_uptr_t sival_ptr;
-} compat_sigval_t;
-
-typedef struct compat_siginfo {
- int si_signo;
- int si_errno;
- int si_code;
-
- union {
- int _pad[128/sizeof(int) - 3];
-
- /* kill() */
- struct {
- unsigned int _pid; /* sender's pid */
- unsigned int _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- compat_timer_t _tid; /* timer id */
- int _overrun; /* overrun count */
- char _pad[sizeof(unsigned int) - sizeof(int)];
- compat_sigval_t _sigval; /* same as below */
- int _sys_private; /* not to be passed to user */
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- unsigned int _pid; /* sender's pid */
- unsigned int _uid; /* sender's uid */
- compat_sigval_t _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- unsigned int _pid; /* which child */
- unsigned int _uid; /* sender's uid */
- int _status; /* exit code */
- compat_clock_t _utime;
- compat_clock_t _stime;
- } _sigchld;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
- struct {
- unsigned int _addr; /* faulting insn/memory ref. */
- } _sigfault;
-
- /* SIGPOLL */
- struct {
- int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
-
- /* SIGSYS */
- struct {
- compat_uptr_t _call_addr; /* calling user insn */
- int _syscall; /* triggering system call number */
- compat_uint_t _arch; /* AUDIT_ARCH_* of syscall */
- } _sigsys;
- } _sifields;
-} compat_siginfo_t;
-
#define COMPAT_OFF_T_MAX 0x7fffffff
struct compat_ipc64_perm {
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index 598c8d60fa5e..285757544cca 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -25,9 +25,6 @@ struct thread_info {
.preempt_count = INIT_PREEMPT_COUNT, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* how to get the thread information struct from C */
#define current_thread_info() ((struct thread_info *)mfctl(30))
diff --git a/arch/parisc/include/uapi/asm/siginfo.h b/arch/parisc/include/uapi/asm/siginfo.h
index 4a1062e05aaf..be40331f757d 100644
--- a/arch/parisc/include/uapi/asm/siginfo.h
+++ b/arch/parisc/include/uapi/asm/siginfo.h
@@ -8,4 +8,11 @@
#include <asm-generic/siginfo.h>
+/*
+ * SIGFPE si_codes
+ */
+#ifdef __KERNEL__
+#define FPE_FIXME 0 /* Broken dup of SI_USER */
+#endif /* __KERNEL__ */
+
#endif
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index c0dfd892f70c..91bc0cac03a1 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -75,11 +75,6 @@ void dump_resmap(void)
static inline void dump_resmap(void) {;}
#endif
-static int pa11_dma_supported( struct device *dev, u64 mask)
-{
- return 1;
-}
-
static inline int map_pte_uncached(pte_t * pte,
unsigned long vaddr,
unsigned long size, unsigned long *paddr_ptr)
@@ -579,7 +574,6 @@ static void pa11_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
}
const struct dma_map_ops pcxl_dma_ops = {
- .dma_supported = pa11_dma_supported,
.alloc = pa11_dma_alloc,
.free = pa11_dma_free,
.map_page = pa11_dma_map_page,
@@ -616,7 +610,6 @@ static void pcx_dma_free(struct device *dev, size_t size, void *vaddr,
}
const struct dma_map_ops pcx_dma_ops = {
- .dma_supported = pa11_dma_supported,
.alloc = pcx_dma_alloc,
.free = pcx_dma_free,
.map_page = pa11_dma_map_page,
diff --git a/arch/parisc/kernel/pdt.c b/arch/parisc/kernel/pdt.c
index e07eb34c8750..36434d4da381 100644
--- a/arch/parisc/kernel/pdt.c
+++ b/arch/parisc/kernel/pdt.c
@@ -325,7 +325,7 @@ static int pdt_mainloop(void *unused)
#ifdef CONFIG_MEMORY_FAILURE
if ((pde & PDT_ADDR_PERM_ERR) ||
((pde & PDT_ADDR_SINGLE_ERR) == 0))
- memory_failure(pde >> PAGE_SHIFT, 0, 0);
+ memory_failure(pde >> PAGE_SHIFT, 0);
else
soft_offline_page(
pfn_to_page(pde >> PAGE_SHIFT), 0);
diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c
index 41afa9cd1f55..e8ef3eb69449 100644
--- a/arch/parisc/kernel/signal32.c
+++ b/arch/parisc/kernel/signal32.c
@@ -260,109 +260,3 @@ setup_sigcontext32(struct compat_sigcontext __user *sc, struct compat_regfile __
return err;
}
-
-int
-copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from)
-{
- compat_uptr_t addr;
- int err;
-
- if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t)))
- return -EFAULT;
-
- err = __get_user(to->si_signo, &from->si_signo);
- err |= __get_user(to->si_errno, &from->si_errno);
- err |= __get_user(to->si_code, &from->si_code);
-
- if (to->si_code < 0)
- err |= __copy_from_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
- else {
- switch (siginfo_layout(to->si_signo, to->si_code)) {
- case SIL_CHLD:
- err |= __get_user(to->si_utime, &from->si_utime);
- err |= __get_user(to->si_stime, &from->si_stime);
- err |= __get_user(to->si_status, &from->si_status);
- default:
- case SIL_KILL:
- err |= __get_user(to->si_pid, &from->si_pid);
- err |= __get_user(to->si_uid, &from->si_uid);
- break;
- case SIL_FAULT:
- err |= __get_user(addr, &from->si_addr);
- to->si_addr = compat_ptr(addr);
- break;
- case SIL_POLL:
- err |= __get_user(to->si_band, &from->si_band);
- err |= __get_user(to->si_fd, &from->si_fd);
- break;
- case SIL_RT:
- err |= __get_user(to->si_pid, &from->si_pid);
- err |= __get_user(to->si_uid, &from->si_uid);
- err |= __get_user(to->si_int, &from->si_int);
- break;
- }
- }
- return err;
-}
-
-int
-copy_siginfo_to_user32 (compat_siginfo_t __user *to, const siginfo_t *from)
-{
- compat_uptr_t addr;
- compat_int_t val;
- int err;
-
- if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
- return -EFAULT;
-
- /* If you change siginfo_t structure, please be sure
- this code is fixed accordingly.
- It should never copy any pad contained in the structure
- to avoid security leaks, but must copy the generic
- 3 ints plus the relevant union member.
- This routine must convert siginfo from 64bit to 32bit as well
- at the same time. */
- err = __put_user(from->si_signo, &to->si_signo);
- err |= __put_user(from->si_errno, &to->si_errno);
- err |= __put_user(from->si_code, &to->si_code);
- if (from->si_code < 0)
- err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
- else {
- switch (siginfo_layout(from->si_signo, from->si_code)) {
- case SIL_CHLD:
- err |= __put_user(from->si_utime, &to->si_utime);
- err |= __put_user(from->si_stime, &to->si_stime);
- err |= __put_user(from->si_status, &to->si_status);
- case SIL_KILL:
- err |= __put_user(from->si_pid, &to->si_pid);
- err |= __put_user(from->si_uid, &to->si_uid);
- break;
- case SIL_FAULT:
- addr = ptr_to_compat(from->si_addr);
- err |= __put_user(addr, &to->si_addr);
- break;
- case SIL_POLL:
- err |= __put_user(from->si_band, &to->si_band);
- err |= __put_user(from->si_fd, &to->si_fd);
- break;
- case SIL_TIMER:
- err |= __put_user(from->si_tid, &to->si_tid);
- err |= __put_user(from->si_overrun, &to->si_overrun);
- val = (compat_int_t)from->si_int;
- err |= __put_user(val, &to->si_int);
- break;
- case SIL_RT:
- err |= __put_user(from->si_uid, &to->si_uid);
- err |= __put_user(from->si_pid, &to->si_pid);
- val = (compat_int_t)from->si_int;
- err |= __put_user(val, &to->si_int);
- break;
- case SIL_SYS:
- err |= __put_user(ptr_to_compat(from->si_call_addr), &to->si_call_addr);
- err |= __put_user(from->si_syscall, &to->si_syscall);
- err |= __put_user(from->si_arch, &to->si_arch);
- break;
- }
- }
- return err;
-}
diff --git a/arch/parisc/kernel/signal32.h b/arch/parisc/kernel/signal32.h
index 719e7417732c..a271dc0976ce 100644
--- a/arch/parisc/kernel/signal32.h
+++ b/arch/parisc/kernel/signal32.h
@@ -34,9 +34,6 @@ struct compat_ucontext {
/* ELF32 signal handling */
-int copy_siginfo_to_user32 (compat_siginfo_t __user *to, const siginfo_t *from);
-int copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from);
-
/* In a deft move of uber-hackery, we decide to carry the top half of all
* 64-bit registers in a non-portable, non-ABI, hidden structure.
* Userspace can read the hidden structure if it *wants* but is never
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 8453724b8009..c919e6c0a687 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -629,7 +629,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
si.si_signo = SIGFPE;
/* Set to zero, and let the userspace app figure it out from
the insn pointed to by si_addr */
- si.si_code = 0;
+ si.si_code = FPE_FIXME;
si.si_addr = (void __user *) regs->iaoq[0];
force_sig_info(SIGFPE, &si, current);
return;
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 2ed525a44734..e92432ae9737 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -139,6 +139,7 @@ config PPC
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
+ select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_PMEM_API if PPC64
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
select ARCH_HAS_SG_CHAIN
diff --git a/arch/powerpc/configs/fsl-emb-nonhw.config b/arch/powerpc/configs/fsl-emb-nonhw.config
index cc49c95494da..e0567dc41968 100644
--- a/arch/powerpc/configs/fsl-emb-nonhw.config
+++ b/arch/powerpc/configs/fsl-emb-nonhw.config
@@ -71,7 +71,6 @@ CONFIG_IP_ROUTE_MULTIPATH=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_SCTP=m
CONFIG_IPV6=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_ISO9660_FS=m
CONFIG_JFFS2_FS_DEBUG=1
CONFIG_JFFS2_FS=y
diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig
index 4891bbed6258..73dab7a37386 100644
--- a/arch/powerpc/configs/powernv_defconfig
+++ b/arch/powerpc/configs/powernv_defconfig
@@ -4,7 +4,6 @@ CONFIG_CPU_LITTLE_ENDIAN=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_TASKSTATS=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 6ddca80c52c3..5033e630afea 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -1,7 +1,6 @@
CONFIG_PPC64=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_TASKSTATS=y
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index bde2cd1005a2..0dd5cf7b566d 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -3,7 +3,6 @@ CONFIG_NR_CPUS=2048
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_TASKSTATS=y
diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
index f058e0c3e4d4..fd1d6c83f0c0 100644
--- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c
+++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c
@@ -141,6 +141,7 @@ static struct shash_alg alg = {
.cra_name = "crc32c",
.cra_driver_name = "crc32c-vpmsum",
.cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(u32),
.cra_module = THIS_MODULE,
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
index 8a2aecfe9b02..62168e1158f1 100644
--- a/arch/powerpc/include/asm/compat.h
+++ b/arch/powerpc/include/asm/compat.h
@@ -119,71 +119,6 @@ typedef u32 compat_old_sigset_t;
typedef u32 compat_sigset_word;
-typedef union compat_sigval {
- compat_int_t sival_int;
- compat_uptr_t sival_ptr;
-} compat_sigval_t;
-
-#define SI_PAD_SIZE32 (128/sizeof(int) - 3)
-
-typedef struct compat_siginfo {
- int si_signo;
- int si_errno;
- int si_code;
-
- union {
- int _pad[SI_PAD_SIZE32];
-
- /* kill() */
- struct {
- compat_pid_t _pid; /* sender's pid */
- __compat_uid_t _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- compat_timer_t _tid; /* timer id */
- int _overrun; /* overrun count */
- compat_sigval_t _sigval; /* same as below */
- int _sys_private; /* not to be passed to user */
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- compat_pid_t _pid; /* sender's pid */
- __compat_uid_t _uid; /* sender's uid */
- compat_sigval_t _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- compat_pid_t _pid; /* which child */
- __compat_uid_t _uid; /* sender's uid */
- int _status; /* exit code */
- compat_clock_t _utime;
- compat_clock_t _stime;
- } _sigchld;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGEMT */
- struct {
- unsigned int _addr; /* faulting insn/memory ref. */
- } _sigfault;
-
- /* SIGPOLL */
- struct {
- int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
-
- /* SIGSYS */
- struct {
- unsigned int _call_addr; /* calling insn */
- int _syscall; /* triggering system call number */
- unsigned int _arch; /* AUDIT_ARCH_* of syscall */
- } _sigsys;
- } _sifields;
-} compat_siginfo_t;
-
#define COMPAT_OFF_T_MAX 0x7fffffff
/*
diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h
index 14e71ff6579e..fc97404de0a3 100644
--- a/arch/powerpc/include/asm/debug.h
+++ b/arch/powerpc/include/asm/debug.h
@@ -49,7 +49,7 @@ void set_breakpoint(struct arch_hw_breakpoint *brk);
void __set_breakpoint(struct arch_hw_breakpoint *brk);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
extern void do_send_trap(struct pt_regs *regs, unsigned long address,
- unsigned long error_code, int signal_code, int brkpt);
+ unsigned long error_code, int brkpt);
#else
extern void do_break(struct pt_regs *regs, unsigned long address,
diff --git a/arch/powerpc/include/asm/dma-direct.h b/arch/powerpc/include/asm/dma-direct.h
new file mode 100644
index 000000000000..a5b59c765426
--- /dev/null
+++ b/arch/powerpc/include/asm/dma-direct.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ASM_POWERPC_DMA_DIRECT_H
+#define ASM_POWERPC_DMA_DIRECT_H 1
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+#ifdef CONFIG_SWIOTLB
+ struct dev_archdata *sd = &dev->archdata;
+
+ if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr)
+ return false;
+#endif
+
+ if (!dev->dma_mask)
+ return false;
+
+ return addr + size - 1 <= *dev->dma_mask;
+}
+
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return paddr + get_dma_offset(dev);
+}
+
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ return daddr - get_dma_offset(dev);
+}
+#endif /* ASM_POWERPC_DMA_DIRECT_H */
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 5a6cbe11db6f..8fa394520af6 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -19,13 +19,13 @@
#include <asm/swiotlb.h>
/* Some dma direct funcs must be visible for use in other dma_ops */
-extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
+extern void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs);
-extern void __dma_direct_free_coherent(struct device *dev, size_t size,
+extern void __dma_nommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
unsigned long attrs);
-extern int dma_direct_mmap_coherent(struct device *dev,
+extern int dma_nommu_mmap_coherent(struct device *dev,
struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t handle,
size_t size, unsigned long attrs);
@@ -73,7 +73,7 @@ static inline unsigned long device_to_mask(struct device *dev)
#ifdef CONFIG_PPC64
extern struct dma_map_ops dma_iommu_ops;
#endif
-extern const struct dma_map_ops dma_direct_ops;
+extern const struct dma_map_ops dma_nommu_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
@@ -107,39 +107,11 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
dev->archdata.dma_offset = off;
}
-/* this will be removed soon */
-#define flush_write_buffers()
-
#define HAVE_ARCH_DMA_SET_MASK 1
extern int dma_set_mask(struct device *dev, u64 dma_mask);
extern u64 __dma_get_required_mask(struct device *dev);
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
-#ifdef CONFIG_SWIOTLB
- struct dev_archdata *sd = &dev->archdata;
-
- if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr)
- return false;
-#endif
-
- if (!dev->dma_mask)
- return false;
-
- return addr + size - 1 <= *dev->dma_mask;
-}
-
-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- return paddr + get_dma_offset(dev);
-}
-
-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
-{
- return daddr - get_dma_offset(dev);
-}
-
#define ARCH_HAS_DMA_MMAP_COHERENT
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
index 01d45a5fd00b..f65ecf57b66c 100644
--- a/arch/powerpc/include/asm/swiotlb.h
+++ b/arch/powerpc/include/asm/swiotlb.h
@@ -13,9 +13,7 @@
#include <linux/swiotlb.h>
-extern const struct dma_map_ops swiotlb_dma_ops;
-
-static inline void dma_mark_clean(void *addr, size_t size) {}
+extern const struct dma_map_ops powerpc_swiotlb_dma_ops;
extern unsigned int ppc_swiotlb_enable;
int __init swiotlb_setup_bus_notifier(void);
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index a264c3ad366b..4a12c00f8de3 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -58,9 +58,6 @@ struct thread_info {
.flags = 0, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
/* how to get the thread information struct from C */
diff --git a/arch/powerpc/include/uapi/asm/siginfo.h b/arch/powerpc/include/uapi/asm/siginfo.h
index 1a691141e49f..9f142451a01f 100644
--- a/arch/powerpc/include/uapi/asm/siginfo.h
+++ b/arch/powerpc/include/uapi/asm/siginfo.h
@@ -15,7 +15,19 @@
#include <asm-generic/siginfo.h>
-#undef NSIGTRAP
-#define NSIGTRAP 4
+/*
+ * SIGFPE si_codes
+ */
+#ifdef __KERNEL__
+#define FPE_FIXME 0 /* Broken dup of SI_USER */
+#endif /* __KERNEL__ */
+
+/*
+ * SIGTRAP si_codes
+ */
+#ifdef __KERNEL__
+#define TRAP_FIXME 0 /* Broken dup of SI_USER */
+#endif /* __KERNEL__ */
+
#endif /* _ASM_POWERPC_SIGINFO_H */
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 66f33e7f8d40..f9fe2080ceb9 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -114,7 +114,7 @@ int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
struct dma_map_ops dma_iommu_ops = {
.alloc = dma_iommu_alloc_coherent,
.free = dma_iommu_free_coherent,
- .mmap = dma_direct_mmap_coherent,
+ .mmap = dma_nommu_mmap_coherent,
.map_sg = dma_iommu_map_sg,
.unmap_sg = dma_iommu_unmap_sg,
.dma_supported = dma_iommu_dma_supported,
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index d0ea7860e02b..88f3963ca30f 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -46,10 +46,10 @@ static u64 swiotlb_powerpc_get_required(struct device *dev)
* map_page, and unmap_page on highmem, use normal dma_ops
* for everything else.
*/
-const struct dma_map_ops swiotlb_dma_ops = {
- .alloc = __dma_direct_alloc_coherent,
- .free = __dma_direct_free_coherent,
- .mmap = dma_direct_mmap_coherent,
+const struct dma_map_ops powerpc_swiotlb_dma_ops = {
+ .alloc = __dma_nommu_alloc_coherent,
+ .free = __dma_nommu_free_coherent,
+ .mmap = dma_nommu_mmap_coherent,
.map_sg = swiotlb_map_sg_attrs,
.unmap_sg = swiotlb_unmap_sg_attrs,
.dma_supported = swiotlb_dma_supported,
@@ -89,7 +89,7 @@ static int ppc_swiotlb_bus_notify(struct notifier_block *nb,
/* May need to bounce if the device can't address all of DRAM */
if ((dma_get_mask(dev) + 1) < memblock_end_of_DRAM())
- set_dma_ops(dev, &swiotlb_dma_ops);
+ set_dma_ops(dev, &powerpc_swiotlb_dma_ops);
return NOTIFY_DONE;
}
@@ -121,7 +121,7 @@ static int __init check_swiotlb_enabled(void)
if (ppc_swiotlb_enable)
swiotlb_print_info();
else
- swiotlb_free();
+ swiotlb_exit();
return 0;
}
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 4194bbbbdb10..da20569de9d4 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -33,14 +33,14 @@ static u64 __maybe_unused get_pfn_limit(struct device *dev)
struct dev_archdata __maybe_unused *sd = &dev->archdata;
#ifdef CONFIG_SWIOTLB
- if (sd->max_direct_dma_addr && dev->dma_ops == &swiotlb_dma_ops)
+ if (sd->max_direct_dma_addr && dev->dma_ops == &powerpc_swiotlb_dma_ops)
pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT);
#endif
return pfn;
}
-static int dma_direct_dma_supported(struct device *dev, u64 mask)
+static int dma_nommu_dma_supported(struct device *dev, u64 mask)
{
#ifdef CONFIG_PPC64
u64 limit = get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
@@ -62,7 +62,7 @@ static int dma_direct_dma_supported(struct device *dev, u64 mask)
#endif
}
-void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
+void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs)
{
@@ -105,9 +105,6 @@ void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
};
#endif /* CONFIG_FSL_SOC */
- /* ignore region specifiers */
- flag &= ~(__GFP_HIGHMEM);
-
page = alloc_pages_node(node, flag, get_order(size));
if (page == NULL)
return NULL;
@@ -119,7 +116,7 @@ void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
#endif
}
-void __dma_direct_free_coherent(struct device *dev, size_t size,
+void __dma_nommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
unsigned long attrs)
{
@@ -130,7 +127,7 @@ void __dma_direct_free_coherent(struct device *dev, size_t size,
#endif
}
-static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
+static void *dma_nommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs)
{
@@ -139,8 +136,8 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
/* The coherent mask may be smaller than the real mask, check if
* we can really use the direct ops
*/
- if (dma_direct_dma_supported(dev, dev->coherent_dma_mask))
- return __dma_direct_alloc_coherent(dev, size, dma_handle,
+ if (dma_nommu_dma_supported(dev, dev->coherent_dma_mask))
+ return __dma_nommu_alloc_coherent(dev, size, dma_handle,
flag, attrs);
/* Ok we can't ... do we have an iommu ? If not, fail */
@@ -154,15 +151,15 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
dev_to_node(dev));
}
-static void dma_direct_free_coherent(struct device *dev, size_t size,
+static void dma_nommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
unsigned long attrs)
{
struct iommu_table *iommu;
- /* See comments in dma_direct_alloc_coherent() */
- if (dma_direct_dma_supported(dev, dev->coherent_dma_mask))
- return __dma_direct_free_coherent(dev, size, vaddr, dma_handle,
+ /* See comments in dma_nommu_alloc_coherent() */
+ if (dma_nommu_dma_supported(dev, dev->coherent_dma_mask))
+ return __dma_nommu_free_coherent(dev, size, vaddr, dma_handle,
attrs);
/* Maybe we used an iommu ... */
iommu = get_iommu_table_base(dev);
@@ -175,7 +172,7 @@ static void dma_direct_free_coherent(struct device *dev, size_t size,
iommu_free_coherent(iommu, size, vaddr, dma_handle);
}
-int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
+int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t handle, size_t size,
unsigned long attrs)
{
@@ -193,7 +190,7 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
vma->vm_page_prot);
}
-static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
+static int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
unsigned long attrs)
{
@@ -213,13 +210,13 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
return nents;
}
-static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
+static void dma_nommu_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction,
unsigned long attrs)
{
}
-static u64 dma_direct_get_required_mask(struct device *dev)
+static u64 dma_nommu_get_required_mask(struct device *dev)
{
u64 end, mask;
@@ -231,7 +228,7 @@ static u64 dma_direct_get_required_mask(struct device *dev)
return mask;
}
-static inline dma_addr_t dma_direct_map_page(struct device *dev,
+static inline dma_addr_t dma_nommu_map_page(struct device *dev,
struct page *page,
unsigned long offset,
size_t size,
@@ -246,7 +243,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
return page_to_phys(page) + offset + get_dma_offset(dev);
}
-static inline void dma_direct_unmap_page(struct device *dev,
+static inline void dma_nommu_unmap_page(struct device *dev,
dma_addr_t dma_address,
size_t size,
enum dma_data_direction direction,
@@ -255,7 +252,7 @@ static inline void dma_direct_unmap_page(struct device *dev,
}
#ifdef CONFIG_NOT_COHERENT_CACHE
-static inline void dma_direct_sync_sg(struct device *dev,
+static inline void dma_nommu_sync_sg(struct device *dev,
struct scatterlist *sgl, int nents,
enum dma_data_direction direction)
{
@@ -266,7 +263,7 @@ static inline void dma_direct_sync_sg(struct device *dev,
__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
}
-static inline void dma_direct_sync_single(struct device *dev,
+static inline void dma_nommu_sync_single(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
@@ -274,24 +271,24 @@ static inline void dma_direct_sync_single(struct device *dev,
}
#endif
-const struct dma_map_ops dma_direct_ops = {
- .alloc = dma_direct_alloc_coherent,
- .free = dma_direct_free_coherent,
- .mmap = dma_direct_mmap_coherent,
- .map_sg = dma_direct_map_sg,
- .unmap_sg = dma_direct_unmap_sg,
- .dma_supported = dma_direct_dma_supported,
- .map_page = dma_direct_map_page,
- .unmap_page = dma_direct_unmap_page,
- .get_required_mask = dma_direct_get_required_mask,
+const struct dma_map_ops dma_nommu_ops = {
+ .alloc = dma_nommu_alloc_coherent,
+ .free = dma_nommu_free_coherent,
+ .mmap = dma_nommu_mmap_coherent,
+ .map_sg = dma_nommu_map_sg,
+ .unmap_sg = dma_nommu_unmap_sg,
+ .dma_supported = dma_nommu_dma_supported,
+ .map_page = dma_nommu_map_page,
+ .unmap_page = dma_nommu_unmap_page,
+ .get_required_mask = dma_nommu_get_required_mask,
#ifdef CONFIG_NOT_COHERENT_CACHE
- .sync_single_for_cpu = dma_direct_sync_single,
- .sync_single_for_device = dma_direct_sync_single,
- .sync_sg_for_cpu = dma_direct_sync_sg,
- .sync_sg_for_device = dma_direct_sync_sg,
+ .sync_single_for_cpu = dma_nommu_sync_single,
+ .sync_single_for_device = dma_nommu_sync_single,
+ .sync_sg_for_cpu = dma_nommu_sync_sg,
+ .sync_sg_for_device = dma_nommu_sync_sg,
#endif
};
-EXPORT_SYMBOL(dma_direct_ops);
+EXPORT_SYMBOL(dma_nommu_ops);
int dma_set_coherent_mask(struct device *dev, u64 mask)
{
@@ -302,7 +299,7 @@ int dma_set_coherent_mask(struct device *dev, u64 mask)
* is no dma_op->set_coherent_mask() so we have to do
* things the hard way:
*/
- if (get_dma_ops(dev) != &dma_direct_ops ||
+ if (get_dma_ops(dev) != &dma_nommu_ops ||
get_iommu_table_base(dev) == NULL ||
!dma_iommu_dma_supported(dev, mask))
return -EIO;
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index 742e4658c5dc..71e8a1b8c86e 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -273,7 +273,7 @@ static void machine_process_ue_event(struct work_struct *work)
pfn = evt->u.ue_error.physical_address >>
PAGE_SHIFT;
- memory_failure(pfn, SIGBUS, 0);
+ memory_failure(pfn, 0);
} else
pr_warn("Failed to identify bad address from "
"where the uncorrectable error (UE) "
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 0ac7aa346c69..590f4d0a6cb1 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -60,7 +60,7 @@ resource_size_t isa_mem_base;
EXPORT_SYMBOL(isa_mem_base);
-static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
+static const struct dma_map_ops *pci_dma_ops = &dma_nommu_ops;
void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
{
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 72be0c32e902..4208cbe2fb7f 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -601,21 +601,16 @@ EXPORT_SYMBOL(flush_all_to_thread);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
void do_send_trap(struct pt_regs *regs, unsigned long address,
- unsigned long error_code, int signal_code, int breakpt)
+ unsigned long error_code, int breakpt)
{
- siginfo_t info;
-
- current->thread.trap_nr = signal_code;
+ current->thread.trap_nr = TRAP_HWBKPT;
if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
11, SIGSEGV) == NOTIFY_STOP)
return;
/* Deliver the signal to userspace */
- info.si_signo = SIGTRAP;
- info.si_errno = breakpt; /* breakpoint or watchpoint id */
- info.si_code = signal_code;
- info.si_addr = (void __user *)address;
- force_sig_info(SIGTRAP, &info, current);
+ force_sig_ptrace_errno_trap(breakpt, /* breakpoint or watchpoint id */
+ (void __user *)address);
}
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
void do_break (struct pt_regs *regs, unsigned long address,
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index 0f0b1b2f3b60..1da8b7d8c6ca 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -388,7 +388,7 @@ out:
return error;
}
-static unsigned int rtas_log_poll(struct file *file, poll_table * wait)
+static __poll_t rtas_log_poll(struct file *file, poll_table * wait)
{
poll_wait(file, &rtas_log_wait, wait);
if (rtas_log_size)
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 8fd3a70047f1..3f33869c6486 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -780,7 +780,7 @@ void arch_setup_pdev_archdata(struct platform_device *pdev)
{
pdev->archdata.dma_mask = DMA_BIT_MASK(32);
pdev->dev.dma_mask = &pdev->archdata.dma_mask;
- set_dma_ops(&pdev->dev, &dma_direct_ops);
+ set_dma_ops(&pdev->dev, &dma_nommu_ops);
}
static __init void print_system_info(void)
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 3d7539b90010..61db86ecd318 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -153,6 +153,9 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
if (thread_info_flags & _TIF_UPROBE)
uprobe_notify_resume(regs);
+ if (thread_info_flags & _TIF_PATCH_PENDING)
+ klp_update_patch_state(current);
+
if (thread_info_flags & _TIF_SIGPENDING) {
BUG_ON(regs != current->thread.regs);
do_signal(current);
@@ -163,9 +166,6 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
tracehook_notify_resume(regs);
}
- if (thread_info_flags & _TIF_PATCH_PENDING)
- klp_update_patch_state(current);
-
user_enter();
}
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 9ffd73296f64..aded81169648 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -873,75 +873,9 @@ static long restore_tm_user_regs(struct pt_regs *regs,
#endif
#ifdef CONFIG_PPC64
-int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s)
-{
- int err;
-
- if (!access_ok (VERIFY_WRITE, d, sizeof(*d)))
- return -EFAULT;
-
- /* If you change siginfo_t structure, please be sure
- * this code is fixed accordingly.
- * It should never copy any pad contained in the structure
- * to avoid security leaks, but must copy the generic
- * 3 ints plus the relevant union member.
- * This routine must convert siginfo from 64bit to 32bit as well
- * at the same time.
- */
- err = __put_user(s->si_signo, &d->si_signo);
- err |= __put_user(s->si_errno, &d->si_errno);
- err |= __put_user(s->si_code, &d->si_code);
- if (s->si_code < 0)
- err |= __copy_to_user(&d->_sifields._pad, &s->_sifields._pad,
- SI_PAD_SIZE32);
- else switch(siginfo_layout(s->si_signo, s->si_code)) {
- case SIL_CHLD:
- err |= __put_user(s->si_pid, &d->si_pid);
- err |= __put_user(s->si_uid, &d->si_uid);
- err |= __put_user(s->si_utime, &d->si_utime);
- err |= __put_user(s->si_stime, &d->si_stime);
- err |= __put_user(s->si_status, &d->si_status);
- break;
- case SIL_FAULT:
- err |= __put_user((unsigned int)(unsigned long)s->si_addr,
- &d->si_addr);
- break;
- case SIL_POLL:
- err |= __put_user(s->si_band, &d->si_band);
- err |= __put_user(s->si_fd, &d->si_fd);
- break;
- case SIL_TIMER:
- err |= __put_user(s->si_tid, &d->si_tid);
- err |= __put_user(s->si_overrun, &d->si_overrun);
- err |= __put_user(s->si_int, &d->si_int);
- break;
- case SIL_SYS:
- err |= __put_user(ptr_to_compat(s->si_call_addr), &d->si_call_addr);
- err |= __put_user(s->si_syscall, &d->si_syscall);
- err |= __put_user(s->si_arch, &d->si_arch);
- break;
- case SIL_RT:
- err |= __put_user(s->si_int, &d->si_int);
- /* fallthrough */
- case SIL_KILL:
- err |= __put_user(s->si_pid, &d->si_pid);
- err |= __put_user(s->si_uid, &d->si_uid);
- break;
- }
- return err;
-}
#define copy_siginfo_to_user copy_siginfo_to_user32
-int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
-{
- if (copy_from_user(to, from, 3*sizeof(int)) ||
- copy_from_user(to->_sifields._pad,
- from->_sifields._pad, SI_PAD_SIZE32))
- return -EFAULT;
-
- return 0;
-}
#endif /* CONFIG_PPC64 */
/*
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index f3eb61be0d30..c93f1e6a9fff 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -917,7 +917,7 @@ void unknown_exception(struct pt_regs *regs)
printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
regs->nip, regs->msr, regs->trap);
- _exception(SIGTRAP, regs, 0, 0);
+ _exception(SIGTRAP, regs, TRAP_FIXME, 0);
exception_exit(prev_state);
}
@@ -939,7 +939,7 @@ bail:
void RunModeException(struct pt_regs *regs)
{
- _exception(SIGTRAP, regs, 0, 0);
+ _exception(SIGTRAP, regs, TRAP_FIXME, 0);
}
void single_step_exception(struct pt_regs *regs)
@@ -978,7 +978,7 @@ static void emulate_single_step(struct pt_regs *regs)
static inline int __parse_fpscr(unsigned long fpscr)
{
- int ret = 0;
+ int ret = FPE_FIXME;
/* Invalid operation */
if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
@@ -1750,34 +1750,34 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
#endif
- do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT,
+ do_send_trap(regs, mfspr(SPRN_DAC1), debug_status,
5);
changed |= 0x01;
} else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
- do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT,
+ do_send_trap(regs, mfspr(SPRN_DAC2), debug_status,
6);
changed |= 0x01;
} else if (debug_status & DBSR_IAC1) {
current->thread.debug.dbcr0 &= ~DBCR0_IAC1;
dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
- do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT,
+ do_send_trap(regs, mfspr(SPRN_IAC1), debug_status,
1);
changed |= 0x01;
} else if (debug_status & DBSR_IAC2) {
current->thread.debug.dbcr0 &= ~DBCR0_IAC2;
- do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT,
+ do_send_trap(regs, mfspr(SPRN_IAC2), debug_status,
2);
changed |= 0x01;
} else if (debug_status & DBSR_IAC3) {
current->thread.debug.dbcr0 &= ~DBCR0_IAC3;
dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
- do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT,
+ do_send_trap(regs, mfspr(SPRN_IAC3), debug_status,
3);
changed |= 0x01;
} else if (debug_status & DBSR_IAC4) {
current->thread.debug.dbcr0 &= ~DBCR0_IAC4;
- do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT,
+ do_send_trap(regs, mfspr(SPRN_IAC4), debug_status,
4);
changed |= 0x01;
}
@@ -1929,7 +1929,7 @@ void SPEFloatingPointException(struct pt_regs *regs)
extern int do_spe_mathemu(struct pt_regs *regs);
unsigned long spefscr;
int fpexc_mode;
- int code = 0;
+ int code = FPE_FIXME;
int err;
flush_spe_to_thread(current);
@@ -1998,7 +1998,7 @@ void SPEFloatingPointRoundException(struct pt_regs *regs)
printk(KERN_ERR "unrecognized spe instruction "
"in %s at %lx\n", current->comm, regs->nip);
} else {
- _exception(SIGFPE, regs, 0, regs->nip);
+ _exception(SIGFPE, regs, FPE_FIXME, regs->nip);
return;
}
}
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 4b91ad08eefd..12352a58072a 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -541,7 +541,7 @@ static struct cbe_iommu *cell_iommu_for_node(int nid)
return NULL;
}
-static unsigned long cell_dma_direct_offset;
+static unsigned long cell_dma_nommu_offset;
static unsigned long dma_iommu_fixed_base;
@@ -580,7 +580,7 @@ static void *dma_fixed_alloc_coherent(struct device *dev, size_t size,
device_to_mask(dev), flag,
dev_to_node(dev));
else
- return dma_direct_ops.alloc(dev, size, dma_handle, flag,
+ return dma_nommu_ops.alloc(dev, size, dma_handle, flag,
attrs);
}
@@ -592,7 +592,7 @@ static void dma_fixed_free_coherent(struct device *dev, size_t size,
iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr,
dma_handle);
else
- dma_direct_ops.free(dev, size, vaddr, dma_handle, attrs);
+ dma_nommu_ops.free(dev, size, vaddr, dma_handle, attrs);
}
static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page,
@@ -601,7 +601,7 @@ static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page,
unsigned long attrs)
{
if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
- return dma_direct_ops.map_page(dev, page, offset, size,
+ return dma_nommu_ops.map_page(dev, page, offset, size,
direction, attrs);
else
return iommu_map_page(dev, cell_get_iommu_table(dev), page,
@@ -614,7 +614,7 @@ static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr,
unsigned long attrs)
{
if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
- dma_direct_ops.unmap_page(dev, dma_addr, size, direction,
+ dma_nommu_ops.unmap_page(dev, dma_addr, size, direction,
attrs);
else
iommu_unmap_page(cell_get_iommu_table(dev), dma_addr, size,
@@ -626,7 +626,7 @@ static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg,
unsigned long attrs)
{
if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
- return dma_direct_ops.map_sg(dev, sg, nents, direction, attrs);
+ return dma_nommu_ops.map_sg(dev, sg, nents, direction, attrs);
else
return ppc_iommu_map_sg(dev, cell_get_iommu_table(dev), sg,
nents, device_to_mask(dev),
@@ -638,7 +638,7 @@ static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg,
unsigned long attrs)
{
if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
- dma_direct_ops.unmap_sg(dev, sg, nents, direction, attrs);
+ dma_nommu_ops.unmap_sg(dev, sg, nents, direction, attrs);
else
ppc_iommu_unmap_sg(cell_get_iommu_table(dev), sg, nents,
direction, attrs);
@@ -661,8 +661,8 @@ static void cell_dma_dev_setup(struct device *dev)
{
if (get_pci_dma_ops() == &dma_iommu_ops)
set_iommu_table_base(dev, cell_get_iommu_table(dev));
- else if (get_pci_dma_ops() == &dma_direct_ops)
- set_dma_offset(dev, cell_dma_direct_offset);
+ else if (get_pci_dma_ops() == &dma_nommu_ops)
+ set_dma_offset(dev, cell_dma_nommu_offset);
else
BUG();
}
@@ -810,14 +810,14 @@ static int __init cell_iommu_init_disabled(void)
unsigned long base = 0, size;
/* When no iommu is present, we use direct DMA ops */
- set_pci_dma_ops(&dma_direct_ops);
+ set_pci_dma_ops(&dma_nommu_ops);
/* First make sure all IOC translation is turned off */
cell_disable_iommus();
/* If we have no Axon, we set up the spider DMA magic offset */
if (of_find_node_by_name(NULL, "axon") == NULL)
- cell_dma_direct_offset = SPIDER_DMA_OFFSET;
+ cell_dma_nommu_offset = SPIDER_DMA_OFFSET;
/* Now we need to check to see where the memory is mapped
* in PCI space. We assume that all busses use the same dma
@@ -851,13 +851,13 @@ static int __init cell_iommu_init_disabled(void)
return -ENODEV;
}
- cell_dma_direct_offset += base;
+ cell_dma_nommu_offset += base;
- if (cell_dma_direct_offset != 0)
+ if (cell_dma_nommu_offset != 0)
cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup;
printk("iommu: disabled, direct DMA offset is 0x%lx\n",
- cell_dma_direct_offset);
+ cell_dma_nommu_offset);
return 0;
}
diff --git a/arch/powerpc/platforms/cell/spufs/backing_ops.c b/arch/powerpc/platforms/cell/spufs/backing_ops.c
index 6e8a9ef8590e..1a9a756b0b2f 100644
--- a/arch/powerpc/platforms/cell/spufs/backing_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c
@@ -86,10 +86,10 @@ static u32 spu_backing_mbox_stat_read(struct spu_context *ctx)
return ctx->csa.prob.mb_stat_R;
}
-static unsigned int spu_backing_mbox_stat_poll(struct spu_context *ctx,
- unsigned int events)
+static __poll_t spu_backing_mbox_stat_poll(struct spu_context *ctx,
+ __poll_t events)
{
- int ret;
+ __poll_t ret;
u32 stat;
ret = 0;
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 5ffcdeb1eb17..fc7772c3d068 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -762,10 +762,10 @@ out:
return count;
}
-static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
+static __poll_t spufs_ibox_poll(struct file *file, poll_table *wait)
{
struct spu_context *ctx = file->private_data;
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, &ctx->ibox_wq, wait);
@@ -898,10 +898,10 @@ out:
return count;
}
-static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
+static __poll_t spufs_wbox_poll(struct file *file, poll_table *wait)
{
struct spu_context *ctx = file->private_data;
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, &ctx->wbox_wq, wait);
@@ -1690,11 +1690,11 @@ out:
return ret;
}
-static unsigned int spufs_mfc_poll(struct file *file,poll_table *wait)
+static __poll_t spufs_mfc_poll(struct file *file,poll_table *wait)
{
struct spu_context *ctx = file->private_data;
u32 free_elements, tagstatus;
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, &ctx->mfc_wq, wait);
@@ -2455,11 +2455,11 @@ static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
return cnt == 0 ? error : cnt;
}
-static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait)
+static __poll_t spufs_switch_log_poll(struct file *file, poll_table *wait)
{
struct inode *inode = file_inode(file);
struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
- unsigned int mask = 0;
+ __poll_t mask = 0;
int rc;
poll_wait(file, &ctx->switch_log->wait, wait);
diff --git a/arch/powerpc/platforms/cell/spufs/hw_ops.c b/arch/powerpc/platforms/cell/spufs/hw_ops.c
index 8655c4cbefc2..fff58198b5b6 100644
--- a/arch/powerpc/platforms/cell/spufs/hw_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/hw_ops.c
@@ -56,11 +56,10 @@ static u32 spu_hw_mbox_stat_read(struct spu_context *ctx)
return in_be32(&ctx->spu->problem->mb_stat_R);
}
-static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx,
- unsigned int events)
+static __poll_t spu_hw_mbox_stat_poll(struct spu_context *ctx, __poll_t events)
{
struct spu *spu = ctx->spu;
- int ret = 0;
+ __poll_t ret = 0;
u32 stat;
spin_lock_irq(&spu->register_lock);
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 5e59f80e95db..2d0479ad3af4 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -185,8 +185,7 @@ struct mfc_dma_command {
struct spu_context_ops {
int (*mbox_read) (struct spu_context * ctx, u32 * data);
u32(*mbox_stat_read) (struct spu_context * ctx);
- unsigned int (*mbox_stat_poll)(struct spu_context *ctx,
- unsigned int events);
+ __poll_t (*mbox_stat_poll)(struct spu_context *ctx, __poll_t events);
int (*ibox_read) (struct spu_context * ctx, u32 * data);
int (*wbox_write) (struct spu_context * ctx, u32 data);
u32(*signal1_read) (struct spu_context * ctx);
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index 7fec04de27fc..78b80cbd9768 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -186,7 +186,7 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev)
*/
if (dev->vendor == 0x1959 && dev->device == 0xa007 &&
!firmware_has_feature(FW_FEATURE_LPAR)) {
- dev->dev.dma_ops = &dma_direct_ops;
+ dev->dev.dma_ops = &dma_nommu_ops;
/*
* Set the coherent DMA mask to prevent the iommu
* being used unnecessarily
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index c4a3e93dc324..d0b8ae53660d 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -363,7 +363,7 @@ static int pcmcia_notify(struct notifier_block *nb, unsigned long action,
return 0;
/* We use the direct ops for localbus */
- dev->dma_ops = &dma_direct_ops;
+ dev->dma_ops = &dma_nommu_ops;
return 0;
}
diff --git a/arch/powerpc/platforms/powernv/opal-memory-errors.c b/arch/powerpc/platforms/powernv/opal-memory-errors.c
index d9916ea62305..8ddc1accf199 100644
--- a/arch/powerpc/platforms/powernv/opal-memory-errors.c
+++ b/arch/powerpc/platforms/powernv/opal-memory-errors.c
@@ -60,7 +60,7 @@ static void handle_memory_error_event(struct OpalMemoryErrorData *merr_evt)
}
for (; paddr_start < paddr_end; paddr_start += PAGE_SIZE) {
- memory_failure(paddr_start >> PAGE_SHIFT, 0, 0);
+ memory_failure(paddr_start >> PAGE_SHIFT, 0);
}
}
diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c
index de4dd09f4a15..c18de0a9b1bd 100644
--- a/arch/powerpc/platforms/powernv/opal-prd.c
+++ b/arch/powerpc/platforms/powernv/opal-prd.c
@@ -147,7 +147,7 @@ static bool opal_msg_queue_empty(void)
return ret;
}
-static unsigned int opal_prd_poll(struct file *file,
+static __poll_t opal_prd_poll(struct file *file,
struct poll_table_struct *wait)
{
poll_wait(file, &opal_prd_msg_wait, wait);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 749055553064..9582aeb1fe4c 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1850,7 +1850,7 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
if (bypass) {
dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n");
- set_dma_ops(&pdev->dev, &dma_direct_ops);
+ set_dma_ops(&pdev->dev, &dma_nommu_ops);
} else {
/*
* If the device can't set the TCE bypass bit but still wants
@@ -1868,7 +1868,7 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
return rc;
/* 4GB offset bypasses 32-bit space */
set_dma_offset(&pdev->dev, (1ULL << 32));
- set_dma_ops(&pdev->dev, &dma_direct_ops);
+ set_dma_ops(&pdev->dev, &dma_nommu_ops);
} else if (dma_mask >> 32 && dma_mask != DMA_BIT_MASK(64)) {
/*
* Fail the request if a DMA mask between 32 and 64 bits
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 69921f72e2da..eaa11334fc8c 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -1231,7 +1231,7 @@ static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
if (dma_offset != 0) {
dev_info(dev, "Using 64-bit direct DMA at offset %llx\n", dma_offset);
set_dma_offset(dev, dma_offset);
- set_dma_ops(dev, &dma_direct_ops);
+ set_dma_ops(dev, &dma_nommu_ops);
ddw_enabled = true;
}
}
diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
index d86938260a86..49e04ec19238 100644
--- a/arch/powerpc/platforms/pseries/vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -618,7 +618,7 @@ static u64 vio_dma_get_required_mask(struct device *dev)
static const struct dma_map_ops vio_dma_mapping_ops = {
.alloc = vio_dma_iommu_alloc_coherent,
.free = vio_dma_iommu_free_coherent,
- .mmap = dma_direct_mmap_coherent,
+ .mmap = dma_nommu_mmap_coherent,
.map_sg = vio_dma_iommu_map_sg,
.unmap_sg = vio_dma_iommu_unmap_sg,
.map_page = vio_dma_iommu_map_page,
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 3573d54b2770..a6198d4f0f03 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -402,7 +402,7 @@ static int dart_dma_set_mask(struct device *dev, u64 dma_mask)
*/
if (dart_device_on_pcie(dev) && dma_mask >= DMA_BIT_MASK(40)) {
dev_info(dev, "Using 64-bit DMA iommu bypass\n");
- set_dma_ops(dev, &dma_direct_ops);
+ set_dma_ops(dev, &dma_nommu_ops);
} else {
dev_info(dev, "Using 32-bit DMA via iommu\n");
set_dma_ops(dev, &dma_iommu_ops);
@@ -446,7 +446,7 @@ void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops)
controller_ops->dma_bus_setup = NULL;
/* Setup pci_dma ops */
- set_pci_dma_ops(&dma_direct_ops);
+ set_pci_dma_ops(&dma_nommu_ops);
}
#ifdef CONFIG_PM
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 22d98057f773..61e07c78d64f 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -118,7 +118,7 @@ static void setup_swiotlb_ops(struct pci_controller *hose)
{
if (ppc_swiotlb_enable) {
hose->controller_ops.dma_dev_setup = pci_dma_dev_setup_swiotlb;
- set_pci_dma_ops(&swiotlb_dma_ops);
+ set_pci_dma_ops(&powerpc_swiotlb_dma_ops);
}
}
#else
@@ -135,7 +135,7 @@ static int fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
* mapping that allows addressing any RAM address from across PCI.
*/
if (dev_is_pci(dev) && dma_mask >= pci64_dma_offset * 2 - 1) {
- set_dma_ops(dev, &dma_direct_ops);
+ set_dma_ops(dev, &dma_nommu_ops);
set_dma_offset(dev, pci64_dma_offset);
}
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 2c6adf12713a..865e14f50c14 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -83,7 +83,7 @@ config PGTABLE_LEVELS
config HAVE_KPROBES
def_bool n
-config DMA_NOOP_OPS
+config DMA_DIRECT_OPS
def_bool y
menu "Platform type"
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
index 970460a0b492..197460ccbf21 100644
--- a/arch/riscv/include/asm/Kbuild
+++ b/arch/riscv/include/asm/Kbuild
@@ -7,6 +7,7 @@ generic-y += device.h
generic-y += div64.h
generic-y += dma.h
generic-y += dma-contiguous.h
+generic-y += dma-mapping.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
diff --git a/arch/riscv/include/asm/dma-mapping.h b/arch/riscv/include/asm/dma-mapping.h
deleted file mode 100644
index 3eec1000196d..000000000000
--- a/arch/riscv/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (C) 2003-2004 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- * Copyright (C) 2012 ARM Ltd.
- * Copyright (C) 2016 SiFive, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_RISCV_DMA_MAPPING_H
-#define __ASM_RISCV_DMA_MAPPING_H
-
-/* Use ops->dma_mapping_error (if it exists) or assume success */
-// #undef DMA_ERROR_CODE
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- return &dma_noop_ops;
-}
-
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return false;
-
- return addr + size - 1 <= *dev->dma_mask;
-}
-
-#endif /* __ASM_RISCV_DMA_MAPPING_H */
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index 22c3536ed281..f8fa1cd2dad9 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -64,8 +64,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_stack (init_thread_union.stack)
-
#endif /* !__ASSEMBLY__ */
/*
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 829c67986db7..9376637229c9 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -140,7 +140,7 @@ config S390
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
select HAVE_DMA_CONTIGUOUS
- select DMA_NOOP_OPS
+ select DMA_DIRECT_OPS
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_EFFICIENT_UNALIGNED_ACCESS
diff --git a/arch/s390/crypto/crc32-vx.c b/arch/s390/crypto/crc32-vx.c
index 436865926c26..423ee05887e6 100644
--- a/arch/s390/crypto/crc32-vx.c
+++ b/arch/s390/crypto/crc32-vx.c
@@ -239,6 +239,7 @@ static struct shash_alg crc32_vx_algs[] = {
.cra_name = "crc32",
.cra_driver_name = "crc32-vx",
.cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CRC32_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crc_ctx),
.cra_module = THIS_MODULE,
@@ -259,6 +260,7 @@ static struct shash_alg crc32_vx_algs[] = {
.cra_name = "crc32be",
.cra_driver_name = "crc32be-vx",
.cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CRC32_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crc_ctx),
.cra_module = THIS_MODULE,
@@ -279,6 +281,7 @@ static struct shash_alg crc32_vx_algs[] = {
.cra_name = "crc32c",
.cra_driver_name = "crc32c-vx",
.cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CRC32_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crc_ctx),
.cra_module = THIS_MODULE,
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 048450869328..dade72be127b 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -4,6 +4,7 @@ generic-y += cacheflush.h
generic-y += clkdev.h
generic-y += device.h
generic-y += dma-contiguous.h
+generic-y += dma-mapping.h
generic-y += div64.h
generic-y += emergency-restart.h
generic-y += export.h
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index 5e6a63641a5f..9830fb6b076e 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -189,79 +189,6 @@ typedef u32 compat_old_sigset_t; /* at least 32 bits */
typedef u32 compat_sigset_word;
-typedef union compat_sigval {
- compat_int_t sival_int;
- compat_uptr_t sival_ptr;
-} compat_sigval_t;
-
-typedef struct compat_siginfo {
- int si_signo;
- int si_errno;
- int si_code;
-
- union {
- int _pad[128/sizeof(int) - 3];
-
- /* kill() */
- struct {
- pid_t _pid; /* sender's pid */
- uid_t _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- compat_timer_t _tid; /* timer id */
- int _overrun; /* overrun count */
- compat_sigval_t _sigval; /* same as below */
- int _sys_private; /* not to be passed to user */
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- pid_t _pid; /* sender's pid */
- uid_t _uid; /* sender's uid */
- compat_sigval_t _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- pid_t _pid; /* which child */
- uid_t _uid; /* sender's uid */
- int _status;/* exit code */
- compat_clock_t _utime;
- compat_clock_t _stime;
- } _sigchld;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
- struct {
- __u32 _addr; /* faulting insn/memory ref. - pointer */
- } _sigfault;
-
- /* SIGPOLL */
- struct {
- int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
- } _sifields;
-} compat_siginfo_t;
-
-/*
- * How these fields are to be accessed.
- */
-#define si_pid _sifields._kill._pid
-#define si_uid _sifields._kill._uid
-#define si_status _sifields._sigchld._status
-#define si_utime _sifields._sigchld._utime
-#define si_stime _sifields._sigchld._stime
-#define si_value _sifields._rt._sigval
-#define si_int _sifields._rt._sigval.sival_int
-#define si_ptr _sifields._rt._sigval.sival_ptr
-#define si_addr _sifields._sigfault._addr
-#define si_band _sifields._sigpoll._band
-#define si_fd _sifields._sigpoll._fd
-#define si_tid _sifields._timer._tid
-#define si_overrun _sifields._timer._overrun
-
#define COMPAT_OFF_T_MAX 0x7fffffff
/*
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h
deleted file mode 100644
index eaf490f9c5bc..000000000000
--- a/arch/s390/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_S390_DMA_MAPPING_H
-#define _ASM_S390_DMA_MAPPING_H
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-debug.h>
-#include <linux/io.h>
-
-extern const struct dma_map_ops s390_pci_dma_ops;
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
- return &dma_noop_ops;
-}
-
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return false;
- return addr + size - 1 <= *dev->dma_mask;
-}
-
-#endif /* _ASM_S390_DMA_MAPPING_H */
diff --git a/arch/s390/include/asm/pci_dma.h b/arch/s390/include/asm/pci_dma.h
index e8d9161fa17a..419fac7a62c0 100644
--- a/arch/s390/include/asm/pci_dma.h
+++ b/arch/s390/include/asm/pci_dma.h
@@ -201,4 +201,7 @@ void dma_cleanup_tables(unsigned long *);
unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr);
void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags);
+extern const struct dma_map_ops s390_pci_dma_ops;
+
+
#endif
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 0880a37b6d3b..25d6ec3aaddd 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -42,8 +42,6 @@ struct thread_info {
.flags = 0, \
}
-#define init_stack (init_thread_union.stack)
-
void arch_release_task_struct(struct task_struct *tsk);
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index ef246940b44c..18c1eeb847b2 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -50,106 +50,6 @@ typedef struct
struct ucontext32 uc;
} rt_sigframe32;
-int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
-{
- int err;
-
- /* If you change siginfo_t structure, please be sure
- this code is fixed accordingly.
- It should never copy any pad contained in the structure
- to avoid security leaks, but must copy the generic
- 3 ints plus the relevant union member.
- This routine must convert siginfo from 64bit to 32bit as well
- at the same time. */
- err = __put_user(from->si_signo, &to->si_signo);
- err |= __put_user(from->si_errno, &to->si_errno);
- err |= __put_user(from->si_code, &to->si_code);
- if (from->si_code < 0)
- err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
- else {
- switch (siginfo_layout(from->si_signo, from->si_code)) {
- case SIL_RT:
- err |= __put_user(from->si_int, &to->si_int);
- /* fallthrough */
- case SIL_KILL:
- err |= __put_user(from->si_pid, &to->si_pid);
- err |= __put_user(from->si_uid, &to->si_uid);
- break;
- case SIL_CHLD:
- err |= __put_user(from->si_pid, &to->si_pid);
- err |= __put_user(from->si_uid, &to->si_uid);
- err |= __put_user(from->si_utime, &to->si_utime);
- err |= __put_user(from->si_stime, &to->si_stime);
- err |= __put_user(from->si_status, &to->si_status);
- break;
- case SIL_FAULT:
- err |= __put_user((unsigned long) from->si_addr,
- &to->si_addr);
- break;
- case SIL_POLL:
- err |= __put_user(from->si_band, &to->si_band);
- err |= __put_user(from->si_fd, &to->si_fd);
- break;
- case SIL_TIMER:
- err |= __put_user(from->si_tid, &to->si_tid);
- err |= __put_user(from->si_overrun, &to->si_overrun);
- err |= __put_user(from->si_int, &to->si_int);
- break;
- default:
- break;
- }
- }
- return err ? -EFAULT : 0;
-}
-
-int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
-{
- int err;
- u32 tmp;
-
- err = __get_user(to->si_signo, &from->si_signo);
- err |= __get_user(to->si_errno, &from->si_errno);
- err |= __get_user(to->si_code, &from->si_code);
-
- if (to->si_code < 0)
- err |= __copy_from_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
- else {
- switch (siginfo_layout(to->si_signo, to->si_code)) {
- case SIL_RT:
- err |= __get_user(to->si_int, &from->si_int);
- /* fallthrough */
- case SIL_KILL:
- err |= __get_user(to->si_pid, &from->si_pid);
- err |= __get_user(to->si_uid, &from->si_uid);
- break;
- case SIL_CHLD:
- err |= __get_user(to->si_pid, &from->si_pid);
- err |= __get_user(to->si_uid, &from->si_uid);
- err |= __get_user(to->si_utime, &from->si_utime);
- err |= __get_user(to->si_stime, &from->si_stime);
- err |= __get_user(to->si_status, &from->si_status);
- break;
- case SIL_FAULT:
- err |= __get_user(tmp, &from->si_addr);
- to->si_addr = (void __force __user *)
- (u64) (tmp & PSW32_ADDR_INSN);
- break;
- case SIL_POLL:
- err |= __get_user(to->si_band, &from->si_band);
- err |= __get_user(to->si_fd, &from->si_fd);
- break;
- case SIL_TIMER:
- err |= __get_user(to->si_tid, &from->si_tid);
- err |= __get_user(to->si_overrun, &from->si_overrun);
- err |= __get_user(to->si_int, &from->si_int);
- break;
- default:
- break;
- }
- }
- return err ? -EFAULT : 0;
-}
-
/* Store registers needed to create the signal frame */
static void store_sigregs(void)
{
diff --git a/arch/score/include/asm/thread_info.h b/arch/score/include/asm/thread_info.h
index ad51b56e51bd..bc4c7c90550f 100644
--- a/arch/score/include/asm/thread_info.h
+++ b/arch/score/include/asm/thread_info.h
@@ -58,9 +58,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* How to get the thread information struct from C. */
register struct thread_info *__current_thread_info __asm__("r28");
#define current_thread_info() __current_thread_info
diff --git a/arch/score/include/uapi/asm/Kbuild b/arch/score/include/uapi/asm/Kbuild
index 81271d3af47c..b04fd1632051 100644
--- a/arch/score/include/uapi/asm/Kbuild
+++ b/arch/score/include/uapi/asm/Kbuild
@@ -2,4 +2,5 @@
include include/uapi/asm-generic/Kbuild.asm
generic-y += bpf_perf_event.h
+generic-y += poll.h
generic-y += siginfo.h
diff --git a/arch/score/include/uapi/asm/poll.h b/arch/score/include/uapi/asm/poll.h
deleted file mode 100644
index c636b85843cd..000000000000
--- a/arch/score/include/uapi/asm/poll.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _ASM_SCORE_POLL_H
-#define _ASM_SCORE_POLL_H
-
-#include <asm-generic/poll.h>
-
-#endif /* _ASM_SCORE_POLL_H */
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h
index becb798f1b04..cf5c792bf70b 100644
--- a/arch/sh/include/asm/thread_info.h
+++ b/arch/sh/include/asm/thread_info.h
@@ -63,9 +63,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* how to get the current stack pointer from C */
register unsigned long current_stack_pointer asm("r15") __used;
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index 57cff00cad17..b3770bb26211 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -609,7 +609,8 @@ asmlinkage void do_divide_error(unsigned long r4)
break;
}
- force_sig_info(SIGFPE, &info, current);
+ info.si_signo = SIGFPE;
+ force_sig_info(info.si_signo, &info, current);
}
#endif
diff --git a/arch/sparc/crypto/crc32c_glue.c b/arch/sparc/crypto/crc32c_glue.c
index d1064e46efe8..8aa664638c3c 100644
--- a/arch/sparc/crypto/crc32c_glue.c
+++ b/arch/sparc/crypto/crc32c_glue.c
@@ -133,6 +133,7 @@ static struct shash_alg alg = {
.cra_name = "crc32c",
.cra_driver_name = "crc32c-sparc64",
.cra_priority = SPARC_CR_OPCODE_PRIORITY,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(u32),
.cra_alignmask = 7,
diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h
index fa38c78de0f0..615283e16f22 100644
--- a/arch/sparc/include/asm/compat.h
+++ b/arch/sparc/include/asm/compat.h
@@ -149,65 +149,6 @@ typedef u32 compat_old_sigset_t;
typedef u32 compat_sigset_word;
-typedef union compat_sigval {
- compat_int_t sival_int;
- compat_uptr_t sival_ptr;
-} compat_sigval_t;
-
-#define SI_PAD_SIZE32 (128/sizeof(int) - 3)
-
-typedef struct compat_siginfo {
- int si_signo;
- int si_errno;
- int si_code;
-
- union {
- int _pad[SI_PAD_SIZE32];
-
- /* kill() */
- struct {
- compat_pid_t _pid; /* sender's pid */
- unsigned int _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- compat_timer_t _tid; /* timer id */
- int _overrun; /* overrun count */
- compat_sigval_t _sigval; /* same as below */
- int _sys_private; /* not to be passed to user */
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- compat_pid_t _pid; /* sender's pid */
- unsigned int _uid; /* sender's uid */
- compat_sigval_t _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- compat_pid_t _pid; /* which child */
- unsigned int _uid; /* sender's uid */
- int _status; /* exit code */
- compat_clock_t _utime;
- compat_clock_t _stime;
- } _sigchld;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGEMT */
- struct {
- u32 _addr; /* faulting insn/memory ref. */
- int _trapno;
- } _sigfault;
-
- /* SIGPOLL */
- struct {
- int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
- } _sifields;
-} compat_siginfo_t;
-
#define COMPAT_OFF_T_MAX 0x7fffffff
/*
diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
index febaaeb1a0fe..548b366165dd 100644
--- a/arch/sparc/include/asm/thread_info_32.h
+++ b/arch/sparc/include/asm/thread_info_32.h
@@ -63,9 +63,6 @@ struct thread_info {
.preempt_count = INIT_PREEMPT_COUNT, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* how to get the thread information struct from C */
register struct thread_info *current_thread_info_reg asm("g6");
#define current_thread_info() (current_thread_info_reg)
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index caf915321ba9..f7e7b0baec9f 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -120,9 +120,6 @@ struct thread_info {
.preempt_count = INIT_PREEMPT_COUNT, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* how to get the thread information struct from C */
register struct thread_info *current_thread_info_reg asm("g6");
#define current_thread_info() (current_thread_info_reg)
diff --git a/arch/sparc/include/uapi/asm/poll.h b/arch/sparc/include/uapi/asm/poll.h
index 72356c999125..2a81e79aa3ea 100644
--- a/arch/sparc/include/uapi/asm/poll.h
+++ b/arch/sparc/include/uapi/asm/poll.h
@@ -2,11 +2,31 @@
#ifndef __SPARC_POLL_H
#define __SPARC_POLL_H
+#ifndef __KERNEL__
#define POLLWRNORM POLLOUT
-#define POLLWRBAND 256
-#define POLLMSG 512
-#define POLLREMOVE 1024
-#define POLLRDHUP 2048
+#define POLLWRBAND (__force __poll_t)256
+#define POLLMSG (__force __poll_t)512
+#define POLLREMOVE (__force __poll_t)1024
+#define POLLRDHUP (__force __poll_t)2048
+#else
+#define __ARCH_HAS_MANGLED_POLL
+static inline __u16 mangle_poll(__poll_t val)
+{
+ __u16 v = (__force __u16)val;
+ /* bit 9 -> bit 8, bit 8 -> bit 2, bit 13 -> bit 11 */
+ return (v & ~0x300) | ((v & 0x200) >> 1) | ((v & 0x100) >> 6) |
+ ((v & 0x2000) >> 2);
+
+
+}
+
+static inline __poll_t demangle_poll(__u16 v)
+{
+ /* bit 8 -> bit 9, bit 2 -> bits 2 and 8 */
+ return (__force __poll_t)((v & ~0x100) | ((v & 0x100) << 1) |
+ ((v & 4) << 6) | ((v & 0x800) << 2));
+}
+#endif
#include <asm-generic/poll.h>
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index 54a6159b9cd8..44d379db3f64 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -70,75 +70,6 @@ struct rt_signal_frame32 {
/* __siginfo_rwin_t * */u32 rwin_save;
} __attribute__((aligned(8)));
-int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
-{
- int err;
-
- if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
- return -EFAULT;
-
- /* If you change siginfo_t structure, please be sure
- this code is fixed accordingly.
- It should never copy any pad contained in the structure
- to avoid security leaks, but must copy the generic
- 3 ints plus the relevant union member.
- This routine must convert siginfo from 64bit to 32bit as well
- at the same time. */
- err = __put_user(from->si_signo, &to->si_signo);
- err |= __put_user(from->si_errno, &to->si_errno);
- err |= __put_user(from->si_code, &to->si_code);
- if (from->si_code < 0)
- err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
- else {
- switch (siginfo_layout(from->si_signo, from->si_code)) {
- case SIL_TIMER:
- err |= __put_user(from->si_tid, &to->si_tid);
- err |= __put_user(from->si_overrun, &to->si_overrun);
- err |= __put_user(from->si_int, &to->si_int);
- break;
- case SIL_CHLD:
- err |= __put_user(from->si_utime, &to->si_utime);
- err |= __put_user(from->si_stime, &to->si_stime);
- err |= __put_user(from->si_status, &to->si_status);
- default:
- case SIL_KILL:
- err |= __put_user(from->si_pid, &to->si_pid);
- err |= __put_user(from->si_uid, &to->si_uid);
- break;
- case SIL_FAULT:
- err |= __put_user(from->si_trapno, &to->si_trapno);
- err |= __put_user((unsigned long)from->si_addr, &to->si_addr);
- break;
- case SIL_POLL:
- err |= __put_user(from->si_band, &to->si_band);
- err |= __put_user(from->si_fd, &to->si_fd);
- break;
- case SIL_RT:
- err |= __put_user(from->si_pid, &to->si_pid);
- err |= __put_user(from->si_uid, &to->si_uid);
- err |= __put_user(from->si_int, &to->si_int);
- break;
- }
- }
- return err;
-}
-
-/* CAUTION: This is just a very minimalist implementation for the
- * sake of compat_sys_rt_sigqueueinfo()
- */
-int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
-{
- if (!access_ok(VERIFY_WRITE, from, sizeof(compat_siginfo_t)))
- return -EFAULT;
-
- if (copy_from_user(to, from, 3*sizeof(int)) ||
- copy_from_user(to->_sifields._pad, from->_sifields._pad,
- SI_PAD_SIZE))
- return -EFAULT;
-
- return 0;
-}
-
/* Checks if the fp is valid. We always build signal frames which are
* 16-byte aligned, therefore we can always enforce that the restore
* frame has that property as well.
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 02f269cfa538..ef9d403cbbe4 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -249,7 +249,7 @@ config HIGHMEM
If unsure, say "true".
-config ZONE_DMA
+config ZONE_DMA32
def_bool y
config IOMMU_HELPER
@@ -261,6 +261,7 @@ config NEED_SG_DMA_LENGTH
config SWIOTLB
bool
default TILEGX
+ select DMA_DIRECT_OPS
select IOMMU_HELPER
select NEED_SG_DMA_LENGTH
select ARCH_HAS_DMA_SET_COHERENT_MASK
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
index 62a7b83025dd..769ff6ac0bf5 100644
--- a/arch/tile/include/asm/compat.h
+++ b/arch/tile/include/asm/compat.h
@@ -110,68 +110,6 @@ struct compat_flock64 {
typedef u32 compat_sigset_word;
-typedef union compat_sigval {
- compat_int_t sival_int;
- compat_uptr_t sival_ptr;
-} compat_sigval_t;
-
-#define COMPAT_SI_PAD_SIZE (128/sizeof(int) - 3)
-
-typedef struct compat_siginfo {
- int si_signo;
- int si_errno;
- int si_code;
-
- union {
- int _pad[COMPAT_SI_PAD_SIZE];
-
- /* kill() */
- struct {
- unsigned int _pid; /* sender's pid */
- unsigned int _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- compat_timer_t _tid; /* timer id */
- int _overrun; /* overrun count */
- compat_sigval_t _sigval; /* same as below */
- int _sys_private; /* not to be passed to user */
- int _overrun_incr; /* amount to add to overrun */
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- unsigned int _pid; /* sender's pid */
- unsigned int _uid; /* sender's uid */
- compat_sigval_t _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- unsigned int _pid; /* which child */
- unsigned int _uid; /* sender's uid */
- int _status; /* exit code */
- compat_clock_t _utime;
- compat_clock_t _stime;
- } _sigchld;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
- struct {
- unsigned int _addr; /* faulting insn/memory ref. */
-#ifdef __ARCH_SI_TRAPNO
- int _trapno; /* TRAP # which caused the signal */
-#endif
- } _sigfault;
-
- /* SIGPOLL */
- struct {
- int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
- } _sifields;
-} compat_siginfo_t;
-
#define COMPAT_OFF_T_MAX 0x7fffffff
struct compat_ipc64_perm {
diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h
index 97ad62878290..d25fce101fc0 100644
--- a/arch/tile/include/asm/dma-mapping.h
+++ b/arch/tile/include/asm/dma-mapping.h
@@ -44,26 +44,6 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
dev->archdata.dma_offset = off;
}
-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- return paddr;
-}
-
-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
-{
- return daddr;
-}
-
-static inline void dma_mark_clean(void *addr, size_t size) {}
-
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return 0;
-
- return addr + size - 1 <= *dev->dma_mask;
-}
-
#define HAVE_ARCH_DMA_SET_MASK 1
int dma_set_mask(struct device *dev, u64 mask);
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index b7659b8f1117..2adcacd85749 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -59,9 +59,6 @@ struct thread_info {
.align_ctl = 0, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
#endif /* !__ASSEMBLY__ */
#if PAGE_SIZE < 8192
diff --git a/arch/tile/include/uapi/asm/siginfo.h b/arch/tile/include/uapi/asm/siginfo.h
index f234d24fff55..a812fcbf4267 100644
--- a/arch/tile/include/uapi/asm/siginfo.h
+++ b/arch/tile/include/uapi/asm/siginfo.h
@@ -24,12 +24,4 @@
#include <asm-generic/siginfo.h>
-/*
- * Additional Tile-specific SIGILL si_codes
- */
-#define ILL_DBLFLT 9 /* double fault */
-#define ILL_HARDWALL 10 /* user networks hardwall violation */
-#undef NSIGILL
-#define NSIGILL 10
-
#endif /* _ASM_TILE_SIGINFO_H */
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index 971d87a1d8cf..a703bd0e0488 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -50,79 +50,6 @@ struct compat_rt_sigframe {
struct compat_ucontext uc;
};
-int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from)
-{
- int err;
-
- if (!access_ok(VERIFY_WRITE, to, sizeof(struct compat_siginfo)))
- return -EFAULT;
-
- /* If you change siginfo_t structure, please make sure that
- this code is fixed accordingly.
- It should never copy any pad contained in the structure
- to avoid security leaks, but must copy the generic
- 3 ints plus the relevant union member. */
- err = __put_user(from->si_signo, &to->si_signo);
- err |= __put_user(from->si_errno, &to->si_errno);
- err |= __put_user(from->si_code, &to->si_code);
-
- if (from->si_code < 0) {
- err |= __put_user(from->si_pid, &to->si_pid);
- err |= __put_user(from->si_uid, &to->si_uid);
- err |= __put_user(from->si_int, &to->si_int);
- } else {
- /*
- * First 32bits of unions are always present:
- * si_pid === si_band === si_tid === si_addr(LS half)
- */
- err |= __put_user(from->_sifields._pad[0],
- &to->_sifields._pad[0]);
- switch (siginfo_layout(from->si_signo, from->si_code)) {
- case SIL_FAULT:
- break;
- case SIL_CHLD:
- err |= __put_user(from->si_utime, &to->si_utime);
- err |= __put_user(from->si_stime, &to->si_stime);
- err |= __put_user(from->si_status, &to->si_status);
- /* FALL THROUGH */
- default:
- case SIL_KILL:
- err |= __put_user(from->si_uid, &to->si_uid);
- break;
- case SIL_POLL:
- err |= __put_user(from->si_fd, &to->si_fd);
- break;
- case SIL_TIMER:
- err |= __put_user(from->si_overrun, &to->si_overrun);
- err |= __put_user(from->si_int, &to->si_int);
- break;
- case SIL_RT:
- err |= __put_user(from->si_uid, &to->si_uid);
- err |= __put_user(from->si_int, &to->si_int);
- break;
- }
- }
- return err;
-}
-
-int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
-{
- int err;
-
- if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo)))
- return -EFAULT;
-
- err = __get_user(to->si_signo, &from->si_signo);
- err |= __get_user(to->si_errno, &from->si_errno);
- err |= __get_user(to->si_code, &from->si_code);
-
- err |= __get_user(to->si_pid, &from->si_pid);
- err |= __get_user(to->si_uid, &from->si_uid);
- err |= __get_user(to->si_int, &from->si_int);
-
- return err;
-}
-
/* The assembly shim for this function arranges to ignore the return value. */
long compat_sys_rt_sigreturn(void)
{
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c
index f2abedc8a080..6a1efe5543fa 100644
--- a/arch/tile/kernel/pci-dma.c
+++ b/arch/tile/kernel/pci-dma.c
@@ -54,7 +54,7 @@ static void *tile_dma_alloc_coherent(struct device *dev, size_t size,
* which case we will return NULL. But such devices are uncommon.
*/
if (dma_mask <= DMA_BIT_MASK(32)) {
- gfp |= GFP_DMA;
+ gfp |= GFP_DMA32;
node = 0;
}
@@ -509,39 +509,9 @@ EXPORT_SYMBOL(gx_pci_dma_map_ops);
/* PCI DMA mapping functions for legacy PCI devices */
#ifdef CONFIG_SWIOTLB
-static void *tile_swiotlb_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- unsigned long attrs)
-{
- gfp |= GFP_DMA;
- return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
-}
-
-static void tile_swiotlb_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_addr,
- unsigned long attrs)
-{
- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
-}
-
-static const struct dma_map_ops pci_swiotlb_dma_ops = {
- .alloc = tile_swiotlb_alloc_coherent,
- .free = tile_swiotlb_free_coherent,
- .map_page = swiotlb_map_page,
- .unmap_page = swiotlb_unmap_page,
- .map_sg = swiotlb_map_sg_attrs,
- .unmap_sg = swiotlb_unmap_sg_attrs,
- .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
- .sync_single_for_device = swiotlb_sync_single_for_device,
- .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
- .sync_sg_for_device = swiotlb_sync_sg_for_device,
- .dma_supported = swiotlb_dma_supported,
- .mapping_error = swiotlb_dma_mapping_error,
-};
-
static const struct dma_map_ops pci_hybrid_dma_ops = {
- .alloc = tile_swiotlb_alloc_coherent,
- .free = tile_swiotlb_free_coherent,
+ .alloc = swiotlb_alloc,
+ .free = swiotlb_free,
.map_page = tile_pci_dma_map_page,
.unmap_page = tile_pci_dma_unmap_page,
.map_sg = tile_pci_dma_map_sg,
@@ -552,7 +522,7 @@ static const struct dma_map_ops pci_hybrid_dma_ops = {
.sync_sg_for_device = tile_pci_dma_sync_sg_for_device,
};
-const struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops;
+const struct dma_map_ops *gx_legacy_pci_dma_map_ops = &swiotlb_dma_ops;
const struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops;
#else
const struct dma_map_ops *gx_legacy_pci_dma_map_ops;
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index ad83c1e66dbd..eb4e198f6f93 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -814,11 +814,11 @@ static void __init zone_sizes_init(void)
#endif
if (start < dma_end) {
- zones_size[ZONE_DMA] = min(zones_size[ZONE_NORMAL],
+ zones_size[ZONE_DMA32] = min(zones_size[ZONE_NORMAL],
dma_end - start);
- zones_size[ZONE_NORMAL] -= zones_size[ZONE_DMA];
+ zones_size[ZONE_NORMAL] -= zones_size[ZONE_DMA32];
} else {
- zones_size[ZONE_DMA] = 0;
+ zones_size[ZONE_DMA32] = 0;
}
/* Take zone metadata from controller 0 if we're isolnode. */
@@ -830,7 +830,7 @@ static void __init zone_sizes_init(void)
PFN_UP(node_percpu[i]));
/* Track the type of memory on each node */
- if (zones_size[ZONE_NORMAL] || zones_size[ZONE_DMA])
+ if (zones_size[ZONE_NORMAL] || zones_size[ZONE_DMA32])
node_set_state(i, N_NORMAL_MEMORY);
#ifdef CONFIG_HIGHMEM
if (end != start)
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index de3eae813e52..479d8033a801 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -163,11 +163,13 @@ static tilepro_bundle_bits rewrite_load_store_unaligned(
* actual bad address in an SPR, which it doesn't.
*/
if (align_ctl == 0) {
- siginfo_t info = {
- .si_signo = SIGBUS,
- .si_code = BUS_ADRALN,
- .si_addr = addr
- };
+ siginfo_t info;
+
+ clear_siginfo(&info);
+ info.si_signo = SIGBUS;
+ info.si_code = BUS_ADRALN;
+ info.si_addr = addr;
+
trace_unhandled_signal("unaligned trap", regs,
(unsigned long)addr, SIGBUS);
force_sig_info(info.si_signo, &info, current);
@@ -210,11 +212,13 @@ static tilepro_bundle_bits rewrite_load_store_unaligned(
}
if (err) {
- siginfo_t info = {
- .si_signo = SIGBUS,
- .si_code = BUS_ADRALN,
- .si_addr = addr
- };
+ siginfo_t info;
+
+ clear_siginfo(&info);
+ info.si_signo = SIGBUS;
+ info.si_code = BUS_ADRALN;
+ info.si_addr = addr;
+
trace_unhandled_signal("bad address for unaligned fixup", regs,
(unsigned long)addr, SIGBUS);
force_sig_info(info.si_signo, &info, current);
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 9b08c6055f15..83a7186198d7 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -256,12 +256,14 @@ static int do_bpt(struct pt_regs *regs)
void __kprobes do_trap(struct pt_regs *regs, int fault_num,
unsigned long reason)
{
- siginfo_t info = { 0 };
+ siginfo_t info;
int signo, code;
unsigned long address = 0;
tile_bundle_bits instr;
int is_kernel = !user_mode(regs);
+ clear_siginfo(&info);
+
/* Handle breakpoints, etc. */
if (is_kernel && fault_num == INT_ILL && do_bpt(regs))
return;
diff --git a/arch/tile/kernel/unaligned.c b/arch/tile/kernel/unaligned.c
index 8149c38f67b6..77a0b6b6a2a1 100644
--- a/arch/tile/kernel/unaligned.c
+++ b/arch/tile/kernel/unaligned.c
@@ -980,11 +980,13 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
}
if ((align_ctl == 0) || unexpected) {
- siginfo_t info = {
- .si_signo = SIGBUS,
- .si_code = BUS_ADRALN,
- .si_addr = (unsigned char __user *)0
- };
+ siginfo_t info;
+
+ clear_siginfo(&info);
+ info.si_signo = SIGBUS;
+ info.si_code = BUS_ADRALN;
+ info.si_addr = (unsigned char __user *)0;
+
if (unaligned_printk)
pr_info("Unalign bundle: unexp @%llx, %llx\n",
(unsigned long long)regs->pc,
@@ -1396,11 +1398,12 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
&frag, sizeof(frag));
if (status) {
/* Fail to copy JIT into user land. send SIGSEGV. */
- siginfo_t info = {
- .si_signo = SIGSEGV,
- .si_code = SEGV_MAPERR,
- .si_addr = (void __user *)&jit_code_area[idx]
- };
+ siginfo_t info;
+
+ clear_siginfo(&info);
+ info.si_signo = SIGSEGV;
+ info.si_code = SEGV_MAPERR;
+ info.si_addr = (void __user *)&jit_code_area[idx];
pr_warn("Unalign fixup: pid=%d %s jit_code_area=%llx\n",
current->pid, current->comm,
@@ -1511,11 +1514,12 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
* If so, we will trigger SIGBUS.
*/
if ((regs->sp & 0x7) || (regs->ex1) || (align_ctl < 0)) {
- siginfo_t info = {
- .si_signo = SIGBUS,
- .si_code = BUS_ADRALN,
- .si_addr = (unsigned char __user *)0
- };
+ siginfo_t info;
+
+ clear_siginfo(&info);
+ info.si_signo = SIGBUS;
+ info.si_code = BUS_ADRALN;
+ info.si_addr = (unsigned char __user *)0;
if (unaligned_printk)
pr_info("Unalign fixup: %d %llx @%llx\n",
@@ -1535,11 +1539,13 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
pc = (tilegx_bundle_bits __user *)(regs->pc);
if (get_user(bundle, pc) != 0) {
/* Probably never be here since pc is valid user address.*/
- siginfo_t info = {
- .si_signo = SIGSEGV,
- .si_code = SEGV_MAPERR,
- .si_addr = (void __user *)pc
- };
+ siginfo_t info;
+
+ clear_siginfo(&info);
+ info.si_signo = SIGSEGV;
+ info.si_code = SEGV_MAPERR;
+ info.si_addr = (void __user *)pc;
+
pr_err("Couldn't read instruction at %p trying to step\n", pc);
trace_unhandled_signal("segfault in unalign fixup", regs,
(unsigned long)info.si_addr, SIGSEGV);
diff --git a/arch/um/Makefile b/arch/um/Makefile
index b76fcce397a1..e54dda8a0363 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -116,8 +116,15 @@ endef
KBUILD_KCONFIG := $(HOST_DIR)/um/Kconfig
archheaders:
+ $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \
+ kbuild-file=$(HOST_DIR)/include/asm/Kbuild \
+ obj=$(HOST_DIR)/include/generated/asm
+ $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.asm-generic \
+ kbuild-file=$(HOST_DIR)/include/uapi/asm/Kbuild \
+ obj=$(HOST_DIR)/include/generated/uapi/asm
$(Q)$(MAKE) KBUILD_SRC= ARCH=$(HEADER_ARCH) archheaders
+
archprepare: include/generated/user_constants.h
LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static
diff --git a/arch/um/drivers/hostaudio_kern.c b/arch/um/drivers/hostaudio_kern.c
index 12bdb5996bf5..7f9dbdbc4eb7 100644
--- a/arch/um/drivers/hostaudio_kern.c
+++ b/arch/um/drivers/hostaudio_kern.c
@@ -119,10 +119,10 @@ static ssize_t hostaudio_write(struct file *file, const char __user *buffer,
return err;
}
-static unsigned int hostaudio_poll(struct file *file,
- struct poll_table_struct *wait)
+static __poll_t hostaudio_poll(struct file *file,
+ struct poll_table_struct *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
#ifdef DEBUG
printk(KERN_DEBUG "hostaudio: poll called (unimplemented)\n");
diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h
index 86942a492454..b58b746d3f2c 100644
--- a/arch/um/include/asm/processor-generic.h
+++ b/arch/um/include/asm/processor-generic.h
@@ -58,7 +58,10 @@ static inline void release_thread(struct task_struct *task)
{
}
-#define init_stack (init_thread_union.stack)
+static inline void mm_copy_segments(struct mm_struct *from_mm,
+ struct mm_struct *new_mm)
+{
+}
/*
* User space process size: 3GB (default).
diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h
index 9300f7630d2a..4eecd960ee8c 100644
--- a/arch/um/include/asm/thread_info.h
+++ b/arch/um/include/asm/thread_info.h
@@ -6,6 +6,9 @@
#ifndef __UM_THREAD_INFO_H
#define __UM_THREAD_INFO_H
+#define THREAD_SIZE_ORDER CONFIG_KERNEL_STACK_ORDER
+#define THREAD_SIZE ((1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE)
+
#ifndef __ASSEMBLY__
#include <asm/types.h>
@@ -37,10 +40,6 @@ struct thread_info {
.real_thread = NULL, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
-#define THREAD_SIZE ((1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE)
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
@@ -53,8 +52,6 @@ static inline struct thread_info *current_thread_info(void)
return ti;
}
-#define THREAD_SIZE_ORDER CONFIG_KERNEL_STACK_ORDER
-
#endif
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
diff --git a/arch/um/include/asm/vmlinux.lds.h b/arch/um/include/asm/vmlinux.lds.h
new file mode 100644
index 000000000000..149494ae78ea
--- /dev/null
+++ b/arch/um/include/asm/vmlinux.lds.h
@@ -0,0 +1,2 @@
+#include <asm/thread_info.h>
+#include <asm-generic/vmlinux.lds.h>
diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S
index d417e3899700..5568cf882371 100644
--- a/arch/um/kernel/dyn.lds.S
+++ b/arch/um/kernel/dyn.lds.S
@@ -1,5 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <asm-generic/vmlinux.lds.h>
+#include <asm/vmlinux.lds.h>
#include <asm/page.h>
OUTPUT_FORMAT(ELF_FORMAT)
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 428644175956..b2b02df9896e 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -306,7 +306,7 @@ void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs)
arch_examine_signal(sig, regs);
- memset(&clean_si, 0, sizeof(clean_si));
+ clear_siginfo(&clean_si);
clean_si.si_signo = si->si_signo;
clean_si.si_errno = si->si_errno;
clean_si.si_code = si->si_code;
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index f433690b9b37..a818ccef30ca 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -54,7 +54,7 @@ struct cpuinfo_um boot_cpu_data = {
union thread_union cpu0_irqstack
__attribute__((__section__(".data..init_irqstack"))) =
- { INIT_THREAD_INFO(init_task) };
+ { .thread_info = INIT_THREAD_INFO(init_task) };
/* Changed in setup_arch, which is called in early boot */
static char host_info[(__NEW_UTS_LEN + 1) * 5];
diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S
index 3d6ed6ba5b78..36b07ec09742 100644
--- a/arch/um/kernel/uml.lds.S
+++ b/arch/um/kernel/uml.lds.S
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#include <asm-generic/vmlinux.lds.h>
+#include <asm/vmlinux.lds.h>
#include <asm/page.h>
OUTPUT_FORMAT(ELF_FORMAT)
diff --git a/arch/unicore32/include/asm/dma-mapping.h b/arch/unicore32/include/asm/dma-mapping.h
index ac608c2f6af6..790bc2ef4af2 100644
--- a/arch/unicore32/include/asm/dma-mapping.h
+++ b/arch/unicore32/include/asm/dma-mapping.h
@@ -12,38 +12,11 @@
#ifndef __UNICORE_DMA_MAPPING_H__
#define __UNICORE_DMA_MAPPING_H__
-#ifdef __KERNEL__
-
-#include <linux/mm_types.h>
-#include <linux/scatterlist.h>
#include <linux/swiotlb.h>
-extern const struct dma_map_ops swiotlb_dma_map_ops;
-
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
- return &swiotlb_dma_map_ops;
+ return &swiotlb_dma_ops;
}
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (dev && dev->dma_mask)
- return addr + size - 1 <= *dev->dma_mask;
-
- return 1;
-}
-
-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- return paddr;
-}
-
-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
-{
- return daddr;
-}
-
-static inline void dma_mark_clean(void *addr, size_t size) {}
-
-#endif /* __KERNEL__ */
#endif
diff --git a/arch/unicore32/include/asm/thread_info.h b/arch/unicore32/include/asm/thread_info.h
index e79ad6d5b5b2..5fb728f3b49a 100644
--- a/arch/unicore32/include/asm/thread_info.h
+++ b/arch/unicore32/include/asm/thread_info.h
@@ -87,9 +87,6 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/*
* how to get the thread information struct from C
*/
diff --git a/arch/unicore32/mm/Kconfig b/arch/unicore32/mm/Kconfig
index c256460cd363..e9154a59d561 100644
--- a/arch/unicore32/mm/Kconfig
+++ b/arch/unicore32/mm/Kconfig
@@ -42,6 +42,7 @@ config CPU_TLB_SINGLE_ENTRY_DISABLE
config SWIOTLB
def_bool y
+ select DMA_DIRECT_OPS
config IOMMU_HELPER
def_bool SWIOTLB
diff --git a/arch/unicore32/mm/Makefile b/arch/unicore32/mm/Makefile
index 681c0ef5ec9e..8106260583ab 100644
--- a/arch/unicore32/mm/Makefile
+++ b/arch/unicore32/mm/Makefile
@@ -6,8 +6,6 @@
obj-y := extable.o fault.o init.o pgd.o mmu.o
obj-y += flush.o ioremap.o
-obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o
-
obj-$(CONFIG_MODULES) += proc-syms.o
obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
diff --git a/arch/unicore32/mm/dma-swiotlb.c b/arch/unicore32/mm/dma-swiotlb.c
deleted file mode 100644
index 525413d6690e..000000000000
--- a/arch/unicore32/mm/dma-swiotlb.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Contains routines needed to support swiotlb for UniCore32.
- *
- * Copyright (C) 2010 Guan Xuetao
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-#include <linux/pci.h>
-#include <linux/cache.h>
-#include <linux/module.h>
-#include <linux/dma-mapping.h>
-#include <linux/swiotlb.h>
-#include <linux/bootmem.h>
-
-#include <asm/dma.h>
-
-static void *unicore_swiotlb_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- unsigned long attrs)
-{
- return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
-}
-
-static void unicore_swiotlb_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_addr,
- unsigned long attrs)
-{
- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
-}
-
-const struct dma_map_ops swiotlb_dma_map_ops = {
- .alloc = unicore_swiotlb_alloc_coherent,
- .free = unicore_swiotlb_free_coherent,
- .map_sg = swiotlb_map_sg_attrs,
- .unmap_sg = swiotlb_unmap_sg_attrs,
- .dma_supported = swiotlb_dma_supported,
- .map_page = swiotlb_map_page,
- .unmap_page = swiotlb_unmap_page,
- .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
- .sync_single_for_device = swiotlb_sync_single_for_device,
- .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
- .sync_sg_for_device = swiotlb_sync_sg_for_device,
- .mapping_error = swiotlb_dma_mapping_error,
-};
-EXPORT_SYMBOL(swiotlb_dma_map_ops);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index bc2204f829d3..fcd3b4d24eea 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -54,6 +54,7 @@ config X86
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_KCOV if X86_64
+ select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_PMEM_API if X86_64
select ARCH_HAS_REFCOUNT
select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
@@ -811,6 +812,15 @@ config PARAVIRT_TIME_ACCOUNTING
config PARAVIRT_CLOCK
bool
+config JAILHOUSE_GUEST
+ bool "Jailhouse non-root cell support"
+ depends on X86_64 && PCI
+ select X86_PM_TIMER
+ ---help---
+ This option allows to run Linux as guest in a Jailhouse non-root
+ cell. You can leave this option disabled if you only want to start
+ Jailhouse and run Linux afterwards in the root cell.
+
endif #HYPERVISOR_GUEST
config NO_BOOTMEM
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 672441c008c7..192e4d2f9efc 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -169,14 +169,6 @@ config IOMMU_DEBUG
options. See Documentation/x86/x86_64/boot-options.txt for more
details.
-config IOMMU_STRESS
- bool "Enable IOMMU stress-test mode"
- ---help---
- This option disables various optimizations in IOMMU related
- code to do real stress testing of the IOMMU code. This option
- will cause a performance drop and should only be enabled for
- testing.
-
config IOMMU_LEAK
bool "IOMMU leak tracing"
depends on IOMMU_DEBUG && DMA_API_DEBUG
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index e56dbc67e837..353e20c3f114 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -999,6 +999,7 @@ struct boot_params *efi_main(struct efi_config *c,
/* Ask the firmware to clear memory on unclean shutdown */
efi_enable_reset_attack_mitigation(sys_table);
+ efi_retrieve_tpm2_eventlog(sys_table);
setup_graphics(boot_params);
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 3d09e3aca18d..12e8484a8ee7 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -90,30 +90,6 @@ SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100
ALL_F: .octa 0xffffffffffffffffffffffffffffffff
.octa 0x00000000000000000000000000000000
-.section .rodata
-.align 16
-.type aad_shift_arr, @object
-.size aad_shift_arr, 272
-aad_shift_arr:
- .octa 0xffffffffffffffffffffffffffffffff
- .octa 0xffffffffffffffffffffffffffffff0C
- .octa 0xffffffffffffffffffffffffffff0D0C
- .octa 0xffffffffffffffffffffffffff0E0D0C
- .octa 0xffffffffffffffffffffffff0F0E0D0C
- .octa 0xffffffffffffffffffffff0C0B0A0908
- .octa 0xffffffffffffffffffff0D0C0B0A0908
- .octa 0xffffffffffffffffff0E0D0C0B0A0908
- .octa 0xffffffffffffffff0F0E0D0C0B0A0908
- .octa 0xffffffffffffff0C0B0A090807060504
- .octa 0xffffffffffff0D0C0B0A090807060504
- .octa 0xffffffffff0E0D0C0B0A090807060504
- .octa 0xffffffff0F0E0D0C0B0A090807060504
- .octa 0xffffff0C0B0A09080706050403020100
- .octa 0xffff0D0C0B0A09080706050403020100
- .octa 0xff0E0D0C0B0A09080706050403020100
- .octa 0x0F0E0D0C0B0A09080706050403020100
-
-
.text
@@ -257,6 +233,37 @@ aad_shift_arr:
pxor \TMP1, \GH # result is in TMP1
.endm
+# Reads DLEN bytes starting at DPTR and stores in XMMDst
+# where 0 < DLEN < 16
+# Clobbers %rax, DLEN and XMM1
+.macro READ_PARTIAL_BLOCK DPTR DLEN XMM1 XMMDst
+ cmp $8, \DLEN
+ jl _read_lt8_\@
+ mov (\DPTR), %rax
+ MOVQ_R64_XMM %rax, \XMMDst
+ sub $8, \DLEN
+ jz _done_read_partial_block_\@
+ xor %eax, %eax
+_read_next_byte_\@:
+ shl $8, %rax
+ mov 7(\DPTR, \DLEN, 1), %al
+ dec \DLEN
+ jnz _read_next_byte_\@
+ MOVQ_R64_XMM %rax, \XMM1
+ pslldq $8, \XMM1
+ por \XMM1, \XMMDst
+ jmp _done_read_partial_block_\@
+_read_lt8_\@:
+ xor %eax, %eax
+_read_next_byte_lt8_\@:
+ shl $8, %rax
+ mov -1(\DPTR, \DLEN, 1), %al
+ dec \DLEN
+ jnz _read_next_byte_lt8_\@
+ MOVQ_R64_XMM %rax, \XMMDst
+_done_read_partial_block_\@:
+.endm
+
/*
* if a = number of total plaintext bytes
* b = floor(a/16)
@@ -273,62 +280,30 @@ aad_shift_arr:
XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
MOVADQ SHUF_MASK(%rip), %xmm14
mov arg7, %r10 # %r10 = AAD
- mov arg8, %r12 # %r12 = aadLen
- mov %r12, %r11
+ mov arg8, %r11 # %r11 = aadLen
pxor %xmm\i, %xmm\i
pxor \XMM2, \XMM2
cmp $16, %r11
- jl _get_AAD_rest8\num_initial_blocks\operation
+ jl _get_AAD_rest\num_initial_blocks\operation
_get_AAD_blocks\num_initial_blocks\operation:
movdqu (%r10), %xmm\i
PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
pxor %xmm\i, \XMM2
GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
add $16, %r10
- sub $16, %r12
sub $16, %r11
cmp $16, %r11
jge _get_AAD_blocks\num_initial_blocks\operation
movdqu \XMM2, %xmm\i
+
+ /* read the last <16B of AAD */
+_get_AAD_rest\num_initial_blocks\operation:
cmp $0, %r11
je _get_AAD_done\num_initial_blocks\operation
- pxor %xmm\i,%xmm\i
-
- /* read the last <16B of AAD. since we have at least 4B of
- data right after the AAD (the ICV, and maybe some CT), we can
- read 4B/8B blocks safely, and then get rid of the extra stuff */
-_get_AAD_rest8\num_initial_blocks\operation:
- cmp $4, %r11
- jle _get_AAD_rest4\num_initial_blocks\operation
- movq (%r10), \TMP1
- add $8, %r10
- sub $8, %r11
- pslldq $8, \TMP1
- psrldq $8, %xmm\i
- pxor \TMP1, %xmm\i
- jmp _get_AAD_rest8\num_initial_blocks\operation
-_get_AAD_rest4\num_initial_blocks\operation:
- cmp $0, %r11
- jle _get_AAD_rest0\num_initial_blocks\operation
- mov (%r10), %eax
- movq %rax, \TMP1
- add $4, %r10
- sub $4, %r10
- pslldq $12, \TMP1
- psrldq $4, %xmm\i
- pxor \TMP1, %xmm\i
-_get_AAD_rest0\num_initial_blocks\operation:
- /* finalize: shift out the extra bytes we read, and align
- left. since pslldq can only shift by an immediate, we use
- vpshufb and an array of shuffle masks */
- movq %r12, %r11
- salq $4, %r11
- movdqu aad_shift_arr(%r11), \TMP1
- PSHUFB_XMM \TMP1, %xmm\i
-_get_AAD_rest_final\num_initial_blocks\operation:
+ READ_PARTIAL_BLOCK %r10, %r11, \TMP1, %xmm\i
PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
pxor \XMM2, %xmm\i
GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
@@ -532,62 +507,30 @@ _initial_blocks_done\num_initial_blocks\operation:
XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
MOVADQ SHUF_MASK(%rip), %xmm14
mov arg7, %r10 # %r10 = AAD
- mov arg8, %r12 # %r12 = aadLen
- mov %r12, %r11
+ mov arg8, %r11 # %r11 = aadLen
pxor %xmm\i, %xmm\i
pxor \XMM2, \XMM2
cmp $16, %r11
- jl _get_AAD_rest8\num_initial_blocks\operation
+ jl _get_AAD_rest\num_initial_blocks\operation
_get_AAD_blocks\num_initial_blocks\operation:
movdqu (%r10), %xmm\i
PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
pxor %xmm\i, \XMM2
GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
add $16, %r10
- sub $16, %r12
sub $16, %r11
cmp $16, %r11
jge _get_AAD_blocks\num_initial_blocks\operation
movdqu \XMM2, %xmm\i
+
+ /* read the last <16B of AAD */
+_get_AAD_rest\num_initial_blocks\operation:
cmp $0, %r11
je _get_AAD_done\num_initial_blocks\operation
- pxor %xmm\i,%xmm\i
-
- /* read the last <16B of AAD. since we have at least 4B of
- data right after the AAD (the ICV, and maybe some PT), we can
- read 4B/8B blocks safely, and then get rid of the extra stuff */
-_get_AAD_rest8\num_initial_blocks\operation:
- cmp $4, %r11
- jle _get_AAD_rest4\num_initial_blocks\operation
- movq (%r10), \TMP1
- add $8, %r10
- sub $8, %r11
- pslldq $8, \TMP1
- psrldq $8, %xmm\i
- pxor \TMP1, %xmm\i
- jmp _get_AAD_rest8\num_initial_blocks\operation
-_get_AAD_rest4\num_initial_blocks\operation:
- cmp $0, %r11
- jle _get_AAD_rest0\num_initial_blocks\operation
- mov (%r10), %eax
- movq %rax, \TMP1
- add $4, %r10
- sub $4, %r10
- pslldq $12, \TMP1
- psrldq $4, %xmm\i
- pxor \TMP1, %xmm\i
-_get_AAD_rest0\num_initial_blocks\operation:
- /* finalize: shift out the extra bytes we read, and align
- left. since pslldq can only shift by an immediate, we use
- vpshufb and an array of shuffle masks */
- movq %r12, %r11
- salq $4, %r11
- movdqu aad_shift_arr(%r11), \TMP1
- PSHUFB_XMM \TMP1, %xmm\i
-_get_AAD_rest_final\num_initial_blocks\operation:
+ READ_PARTIAL_BLOCK %r10, %r11, \TMP1, %xmm\i
PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
pxor \XMM2, %xmm\i
GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
@@ -1386,14 +1329,6 @@ _esb_loop_\@:
*
* AAD Format with 64-bit Extended Sequence Number
*
-* aadLen:
-* from the definition of the spec, aadLen can only be 8 or 12 bytes.
-* The code supports 16 too but for other sizes, the code will fail.
-*
-* TLen:
-* from the definition of the spec, TLen can only be 8, 12 or 16 bytes.
-* For other sizes, the code will fail.
-*
* poly = x^128 + x^127 + x^126 + x^121 + 1
*
*****************************************************************************/
@@ -1487,19 +1422,16 @@ _zero_cipher_left_decrypt:
PSHUFB_XMM %xmm10, %xmm0
ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Yn)
- sub $16, %r11
- add %r13, %r11
- movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
- lea SHIFT_MASK+16(%rip), %r12
- sub %r13, %r12
-# adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
-# (%r13 is the number of bytes in plaintext mod 16)
- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
- PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
+ lea (%arg3,%r11,1), %r10
+ mov %r13, %r12
+ READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1
+
+ lea ALL_F+16(%rip), %r12
+ sub %r13, %r12
movdqa %xmm1, %xmm2
pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
+ movdqu (%r12), %xmm1
# get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
pand %xmm1, %xmm2
@@ -1508,9 +1440,6 @@ _zero_cipher_left_decrypt:
pxor %xmm2, %xmm8
GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
- # GHASH computation for the last <16 byte block
- sub %r13, %r11
- add $16, %r11
# output %r13 bytes
MOVQ_R64_XMM %xmm0, %rax
@@ -1664,14 +1593,6 @@ ENDPROC(aesni_gcm_dec)
*
* AAD Format with 64-bit Extended Sequence Number
*
-* aadLen:
-* from the definition of the spec, aadLen can only be 8 or 12 bytes.
-* The code supports 16 too but for other sizes, the code will fail.
-*
-* TLen:
-* from the definition of the spec, TLen can only be 8, 12 or 16 bytes.
-* For other sizes, the code will fail.
-*
* poly = x^128 + x^127 + x^126 + x^121 + 1
***************************************************************************/
ENTRY(aesni_gcm_enc)
@@ -1764,19 +1685,16 @@ _zero_cipher_left_encrypt:
movdqa SHUF_MASK(%rip), %xmm10
PSHUFB_XMM %xmm10, %xmm0
-
ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn)
- sub $16, %r11
- add %r13, %r11
- movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
- lea SHIFT_MASK+16(%rip), %r12
+
+ lea (%arg3,%r11,1), %r10
+ mov %r13, %r12
+ READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1
+
+ lea ALL_F+16(%rip), %r12
sub %r13, %r12
- # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
- # (%r13 is the number of bytes in plaintext mod 16)
- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
- PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
+ movdqu (%r12), %xmm1
# get the appropriate mask to mask out top 16-r13 bytes of xmm0
pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
movdqa SHUF_MASK(%rip), %xmm10
@@ -1785,9 +1703,6 @@ _zero_cipher_left_encrypt:
pxor %xmm0, %xmm8
GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
# GHASH computation for the last <16 byte block
- sub %r13, %r11
- add $16, %r11
-
movdqa SHUF_MASK(%rip), %xmm10
PSHUFB_XMM %xmm10, %xmm0
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 3bf3dcf29825..34cf1c1f8c98 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -690,8 +690,8 @@ static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
}
-static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
- unsigned int key_len)
+static int gcmaes_wrapper_set_key(struct crypto_aead *parent, const u8 *key,
+ unsigned int key_len)
{
struct cryptd_aead **ctx = crypto_aead_ctx(parent);
struct cryptd_aead *cryptd_tfm = *ctx;
@@ -716,8 +716,8 @@ static int common_rfc4106_set_authsize(struct crypto_aead *aead,
/* This is the Integrity Check Value (aka the authentication tag length and can
* be 8, 12 or 16 bytes long. */
-static int rfc4106_set_authsize(struct crypto_aead *parent,
- unsigned int authsize)
+static int gcmaes_wrapper_set_authsize(struct crypto_aead *parent,
+ unsigned int authsize)
{
struct cryptd_aead **ctx = crypto_aead_ctx(parent);
struct cryptd_aead *cryptd_tfm = *ctx;
@@ -824,7 +824,7 @@ static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
if (sg_is_last(req->src) &&
(!PageHighMem(sg_page(req->src)) ||
req->src->offset + req->src->length <= PAGE_SIZE) &&
- sg_is_last(req->dst) &&
+ sg_is_last(req->dst) && req->dst->length &&
(!PageHighMem(sg_page(req->dst)) ||
req->dst->offset + req->dst->length <= PAGE_SIZE)) {
one_entry_in_sg = 1;
@@ -929,7 +929,7 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
aes_ctx);
}
-static int rfc4106_encrypt(struct aead_request *req)
+static int gcmaes_wrapper_encrypt(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
@@ -945,7 +945,7 @@ static int rfc4106_encrypt(struct aead_request *req)
return crypto_aead_encrypt(req);
}
-static int rfc4106_decrypt(struct aead_request *req)
+static int gcmaes_wrapper_decrypt(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
@@ -1117,7 +1117,7 @@ static int generic_gcmaes_decrypt(struct aead_request *req)
{
__be32 counter = cpu_to_be32(1);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
+ struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
void *aes_ctx = &(ctx->aes_key_expanded);
u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
@@ -1128,6 +1128,30 @@ static int generic_gcmaes_decrypt(struct aead_request *req)
aes_ctx);
}
+static int generic_gcmaes_init(struct crypto_aead *aead)
+{
+ struct cryptd_aead *cryptd_tfm;
+ struct cryptd_aead **ctx = crypto_aead_ctx(aead);
+
+ cryptd_tfm = cryptd_alloc_aead("__driver-generic-gcm-aes-aesni",
+ CRYPTO_ALG_INTERNAL,
+ CRYPTO_ALG_INTERNAL);
+ if (IS_ERR(cryptd_tfm))
+ return PTR_ERR(cryptd_tfm);
+
+ *ctx = cryptd_tfm;
+ crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
+
+ return 0;
+}
+
+static void generic_gcmaes_exit(struct crypto_aead *aead)
+{
+ struct cryptd_aead **ctx = crypto_aead_ctx(aead);
+
+ cryptd_free_aead(*ctx);
+}
+
static struct aead_alg aesni_aead_algs[] = { {
.setkey = common_rfc4106_set_key,
.setauthsize = common_rfc4106_set_authsize,
@@ -1147,10 +1171,10 @@ static struct aead_alg aesni_aead_algs[] = { {
}, {
.init = rfc4106_init,
.exit = rfc4106_exit,
- .setkey = rfc4106_set_key,
- .setauthsize = rfc4106_set_authsize,
- .encrypt = rfc4106_encrypt,
- .decrypt = rfc4106_decrypt,
+ .setkey = gcmaes_wrapper_set_key,
+ .setauthsize = gcmaes_wrapper_set_authsize,
+ .encrypt = gcmaes_wrapper_encrypt,
+ .decrypt = gcmaes_wrapper_decrypt,
.ivsize = GCM_RFC4106_IV_SIZE,
.maxauthsize = 16,
.base = {
@@ -1170,13 +1194,31 @@ static struct aead_alg aesni_aead_algs[] = { {
.ivsize = GCM_AES_IV_SIZE,
.maxauthsize = 16,
.base = {
+ .cra_name = "__generic-gcm-aes-aesni",
+ .cra_driver_name = "__driver-generic-gcm-aes-aesni",
+ .cra_priority = 0,
+ .cra_flags = CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct generic_gcmaes_ctx),
+ .cra_alignmask = AESNI_ALIGN - 1,
+ .cra_module = THIS_MODULE,
+ },
+}, {
+ .init = generic_gcmaes_init,
+ .exit = generic_gcmaes_exit,
+ .setkey = gcmaes_wrapper_set_key,
+ .setauthsize = gcmaes_wrapper_set_authsize,
+ .encrypt = gcmaes_wrapper_encrypt,
+ .decrypt = gcmaes_wrapper_decrypt,
+ .ivsize = GCM_AES_IV_SIZE,
+ .maxauthsize = 16,
+ .base = {
.cra_name = "gcm(aes)",
.cra_driver_name = "generic-gcm-aesni",
.cra_priority = 400,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct generic_gcmaes_ctx),
- .cra_alignmask = AESNI_ALIGN - 1,
+ .cra_ctxsize = sizeof(struct cryptd_aead *),
.cra_module = THIS_MODULE,
},
} };
diff --git a/arch/x86/crypto/chacha20_glue.c b/arch/x86/crypto/chacha20_glue.c
index 1e6af1b35f7b..dce7c5d39c2f 100644
--- a/arch/x86/crypto/chacha20_glue.c
+++ b/arch/x86/crypto/chacha20_glue.c
@@ -107,7 +107,6 @@ static struct skcipher_alg alg = {
.base.cra_priority = 300,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha20_ctx),
- .base.cra_alignmask = sizeof(u32) - 1,
.base.cra_module = THIS_MODULE,
.min_keysize = CHACHA20_KEY_SIZE,
diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c
index 27226df3f7d8..c8d9cdacbf10 100644
--- a/arch/x86/crypto/crc32-pclmul_glue.c
+++ b/arch/x86/crypto/crc32-pclmul_glue.c
@@ -162,6 +162,7 @@ static struct shash_alg alg = {
.cra_name = "crc32",
.cra_driver_name = "crc32-pclmul",
.cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(u32),
.cra_module = THIS_MODULE,
diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
index c194d5717ae5..5773e1161072 100644
--- a/arch/x86/crypto/crc32c-intel_glue.c
+++ b/arch/x86/crypto/crc32c-intel_glue.c
@@ -226,6 +226,7 @@ static struct shash_alg alg = {
.cra_name = "crc32c",
.cra_driver_name = "crc32c-intel",
.cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(u32),
.cra_module = THIS_MODULE,
diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
index e32142bc071d..790377797544 100644
--- a/arch/x86/crypto/poly1305_glue.c
+++ b/arch/x86/crypto/poly1305_glue.c
@@ -164,14 +164,12 @@ static struct shash_alg alg = {
.init = poly1305_simd_init,
.update = poly1305_simd_update,
.final = crypto_poly1305_final,
- .setkey = crypto_poly1305_setkey,
.descsize = sizeof(struct poly1305_simd_desc_ctx),
.base = {
.cra_name = "poly1305",
.cra_driver_name = "poly1305-simd",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_alignmask = sizeof(u32) - 1,
.cra_blocksize = POLY1305_BLOCK_SIZE,
.cra_module = THIS_MODULE,
},
diff --git a/arch/x86/crypto/salsa20-i586-asm_32.S b/arch/x86/crypto/salsa20-i586-asm_32.S
index 329452b8f794..6014b7b9e52a 100644
--- a/arch/x86/crypto/salsa20-i586-asm_32.S
+++ b/arch/x86/crypto/salsa20-i586-asm_32.S
@@ -1,6 +1,7 @@
-# salsa20_pm.s version 20051229
-# D. J. Bernstein
-# Public domain.
+# Derived from:
+# salsa20_pm.s version 20051229
+# D. J. Bernstein
+# Public domain.
#include <linux/linkage.h>
@@ -935,180 +936,3 @@ ENTRY(salsa20_encrypt_bytes)
# goto bytesatleast1
jmp ._bytesatleast1
ENDPROC(salsa20_encrypt_bytes)
-
-# enter salsa20_keysetup
-ENTRY(salsa20_keysetup)
- mov %esp,%eax
- and $31,%eax
- add $256,%eax
- sub %eax,%esp
- # eax_stack = eax
- movl %eax,64(%esp)
- # ebx_stack = ebx
- movl %ebx,68(%esp)
- # esi_stack = esi
- movl %esi,72(%esp)
- # edi_stack = edi
- movl %edi,76(%esp)
- # ebp_stack = ebp
- movl %ebp,80(%esp)
- # k = arg2
- movl 8(%esp,%eax),%ecx
- # kbits = arg3
- movl 12(%esp,%eax),%edx
- # x = arg1
- movl 4(%esp,%eax),%eax
- # in1 = *(uint32 *) (k + 0)
- movl 0(%ecx),%ebx
- # in2 = *(uint32 *) (k + 4)
- movl 4(%ecx),%esi
- # in3 = *(uint32 *) (k + 8)
- movl 8(%ecx),%edi
- # in4 = *(uint32 *) (k + 12)
- movl 12(%ecx),%ebp
- # *(uint32 *) (x + 4) = in1
- movl %ebx,4(%eax)
- # *(uint32 *) (x + 8) = in2
- movl %esi,8(%eax)
- # *(uint32 *) (x + 12) = in3
- movl %edi,12(%eax)
- # *(uint32 *) (x + 16) = in4
- movl %ebp,16(%eax)
- # kbits - 256
- cmp $256,%edx
- # goto kbits128 if unsigned<
- jb ._kbits128
-._kbits256:
- # in11 = *(uint32 *) (k + 16)
- movl 16(%ecx),%edx
- # in12 = *(uint32 *) (k + 20)
- movl 20(%ecx),%ebx
- # in13 = *(uint32 *) (k + 24)
- movl 24(%ecx),%esi
- # in14 = *(uint32 *) (k + 28)
- movl 28(%ecx),%ecx
- # *(uint32 *) (x + 44) = in11
- movl %edx,44(%eax)
- # *(uint32 *) (x + 48) = in12
- movl %ebx,48(%eax)
- # *(uint32 *) (x + 52) = in13
- movl %esi,52(%eax)
- # *(uint32 *) (x + 56) = in14
- movl %ecx,56(%eax)
- # in0 = 1634760805
- mov $1634760805,%ecx
- # in5 = 857760878
- mov $857760878,%edx
- # in10 = 2036477234
- mov $2036477234,%ebx
- # in15 = 1797285236
- mov $1797285236,%esi
- # *(uint32 *) (x + 0) = in0
- movl %ecx,0(%eax)
- # *(uint32 *) (x + 20) = in5
- movl %edx,20(%eax)
- # *(uint32 *) (x + 40) = in10
- movl %ebx,40(%eax)
- # *(uint32 *) (x + 60) = in15
- movl %esi,60(%eax)
- # goto keysetupdone
- jmp ._keysetupdone
-._kbits128:
- # in11 = *(uint32 *) (k + 0)
- movl 0(%ecx),%edx
- # in12 = *(uint32 *) (k + 4)
- movl 4(%ecx),%ebx
- # in13 = *(uint32 *) (k + 8)
- movl 8(%ecx),%esi
- # in14 = *(uint32 *) (k + 12)
- movl 12(%ecx),%ecx
- # *(uint32 *) (x + 44) = in11
- movl %edx,44(%eax)
- # *(uint32 *) (x + 48) = in12
- movl %ebx,48(%eax)
- # *(uint32 *) (x + 52) = in13
- movl %esi,52(%eax)
- # *(uint32 *) (x + 56) = in14
- movl %ecx,56(%eax)
- # in0 = 1634760805
- mov $1634760805,%ecx
- # in5 = 824206446
- mov $824206446,%edx
- # in10 = 2036477238
- mov $2036477238,%ebx
- # in15 = 1797285236
- mov $1797285236,%esi
- # *(uint32 *) (x + 0) = in0
- movl %ecx,0(%eax)
- # *(uint32 *) (x + 20) = in5
- movl %edx,20(%eax)
- # *(uint32 *) (x + 40) = in10
- movl %ebx,40(%eax)
- # *(uint32 *) (x + 60) = in15
- movl %esi,60(%eax)
-._keysetupdone:
- # eax = eax_stack
- movl 64(%esp),%eax
- # ebx = ebx_stack
- movl 68(%esp),%ebx
- # esi = esi_stack
- movl 72(%esp),%esi
- # edi = edi_stack
- movl 76(%esp),%edi
- # ebp = ebp_stack
- movl 80(%esp),%ebp
- # leave
- add %eax,%esp
- ret
-ENDPROC(salsa20_keysetup)
-
-# enter salsa20_ivsetup
-ENTRY(salsa20_ivsetup)
- mov %esp,%eax
- and $31,%eax
- add $256,%eax
- sub %eax,%esp
- # eax_stack = eax
- movl %eax,64(%esp)
- # ebx_stack = ebx
- movl %ebx,68(%esp)
- # esi_stack = esi
- movl %esi,72(%esp)
- # edi_stack = edi
- movl %edi,76(%esp)
- # ebp_stack = ebp
- movl %ebp,80(%esp)
- # iv = arg2
- movl 8(%esp,%eax),%ecx
- # x = arg1
- movl 4(%esp,%eax),%eax
- # in6 = *(uint32 *) (iv + 0)
- movl 0(%ecx),%edx
- # in7 = *(uint32 *) (iv + 4)
- movl 4(%ecx),%ecx
- # in8 = 0
- mov $0,%ebx
- # in9 = 0
- mov $0,%esi
- # *(uint32 *) (x + 24) = in6
- movl %edx,24(%eax)
- # *(uint32 *) (x + 28) = in7
- movl %ecx,28(%eax)
- # *(uint32 *) (x + 32) = in8
- movl %ebx,32(%eax)
- # *(uint32 *) (x + 36) = in9
- movl %esi,36(%eax)
- # eax = eax_stack
- movl 64(%esp),%eax
- # ebx = ebx_stack
- movl 68(%esp),%ebx
- # esi = esi_stack
- movl 72(%esp),%esi
- # edi = edi_stack
- movl 76(%esp),%edi
- # ebp = ebp_stack
- movl 80(%esp),%ebp
- # leave
- add %eax,%esp
- ret
-ENDPROC(salsa20_ivsetup)
diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
index 10db30d58006..03a4918f41ee 100644
--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
@@ -803,117 +803,3 @@ ENTRY(salsa20_encrypt_bytes)
# goto bytesatleast1
jmp ._bytesatleast1
ENDPROC(salsa20_encrypt_bytes)
-
-# enter salsa20_keysetup
-ENTRY(salsa20_keysetup)
- mov %rsp,%r11
- and $31,%r11
- add $256,%r11
- sub %r11,%rsp
- # k = arg2
- mov %rsi,%rsi
- # kbits = arg3
- mov %rdx,%rdx
- # x = arg1
- mov %rdi,%rdi
- # in0 = *(uint64 *) (k + 0)
- movq 0(%rsi),%r8
- # in2 = *(uint64 *) (k + 8)
- movq 8(%rsi),%r9
- # *(uint64 *) (x + 4) = in0
- movq %r8,4(%rdi)
- # *(uint64 *) (x + 12) = in2
- movq %r9,12(%rdi)
- # unsigned<? kbits - 256
- cmp $256,%rdx
- # comment:fp stack unchanged by jump
- # goto kbits128 if unsigned<
- jb ._kbits128
-# kbits256:
-._kbits256:
- # in10 = *(uint64 *) (k + 16)
- movq 16(%rsi),%rdx
- # in12 = *(uint64 *) (k + 24)
- movq 24(%rsi),%rsi
- # *(uint64 *) (x + 44) = in10
- movq %rdx,44(%rdi)
- # *(uint64 *) (x + 52) = in12
- movq %rsi,52(%rdi)
- # in0 = 1634760805
- mov $1634760805,%rsi
- # in4 = 857760878
- mov $857760878,%rdx
- # in10 = 2036477234
- mov $2036477234,%rcx
- # in14 = 1797285236
- mov $1797285236,%r8
- # *(uint32 *) (x + 0) = in0
- movl %esi,0(%rdi)
- # *(uint32 *) (x + 20) = in4
- movl %edx,20(%rdi)
- # *(uint32 *) (x + 40) = in10
- movl %ecx,40(%rdi)
- # *(uint32 *) (x + 60) = in14
- movl %r8d,60(%rdi)
- # comment:fp stack unchanged by jump
- # goto keysetupdone
- jmp ._keysetupdone
-# kbits128:
-._kbits128:
- # in10 = *(uint64 *) (k + 0)
- movq 0(%rsi),%rdx
- # in12 = *(uint64 *) (k + 8)
- movq 8(%rsi),%rsi
- # *(uint64 *) (x + 44) = in10
- movq %rdx,44(%rdi)
- # *(uint64 *) (x + 52) = in12
- movq %rsi,52(%rdi)
- # in0 = 1634760805
- mov $1634760805,%rsi
- # in4 = 824206446
- mov $824206446,%rdx
- # in10 = 2036477238
- mov $2036477238,%rcx
- # in14 = 1797285236
- mov $1797285236,%r8
- # *(uint32 *) (x + 0) = in0
- movl %esi,0(%rdi)
- # *(uint32 *) (x + 20) = in4
- movl %edx,20(%rdi)
- # *(uint32 *) (x + 40) = in10
- movl %ecx,40(%rdi)
- # *(uint32 *) (x + 60) = in14
- movl %r8d,60(%rdi)
-# keysetupdone:
-._keysetupdone:
- # leave
- add %r11,%rsp
- mov %rdi,%rax
- mov %rsi,%rdx
- ret
-ENDPROC(salsa20_keysetup)
-
-# enter salsa20_ivsetup
-ENTRY(salsa20_ivsetup)
- mov %rsp,%r11
- and $31,%r11
- add $256,%r11
- sub %r11,%rsp
- # iv = arg2
- mov %rsi,%rsi
- # x = arg1
- mov %rdi,%rdi
- # in6 = *(uint64 *) (iv + 0)
- movq 0(%rsi),%rsi
- # in8 = 0
- mov $0,%r8
- # *(uint64 *) (x + 24) = in6
- movq %rsi,24(%rdi)
- # *(uint64 *) (x + 32) = in8
- movq %r8,32(%rdi)
- # leave
- add %r11,%rsp
- mov %rdi,%rax
- mov %rsi,%rdx
- ret
-ENDPROC(salsa20_ivsetup)
diff --git a/arch/x86/crypto/salsa20_glue.c b/arch/x86/crypto/salsa20_glue.c
index cb91a64a99e7..b07d7d959806 100644
--- a/arch/x86/crypto/salsa20_glue.c
+++ b/arch/x86/crypto/salsa20_glue.c
@@ -11,6 +11,9 @@
* - x86-64 version, renamed as salsa20-x86_64-asm_64.S
* available from <http://cr.yp.to/snuffle/salsa20/amd64-3/salsa20.s>
*
+ * Also modified to set up the initial state using the generic C code rather
+ * than in assembly.
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
@@ -18,93 +21,65 @@
*
*/
-#include <crypto/algapi.h>
+#include <asm/unaligned.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/salsa20.h>
#include <linux/module.h>
-#include <linux/crypto.h>
-
-#define SALSA20_IV_SIZE 8U
-#define SALSA20_MIN_KEY_SIZE 16U
-#define SALSA20_MAX_KEY_SIZE 32U
-
-struct salsa20_ctx
-{
- u32 input[16];
-};
-asmlinkage void salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k,
- u32 keysize, u32 ivsize);
-asmlinkage void salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv);
-asmlinkage void salsa20_encrypt_bytes(struct salsa20_ctx *ctx,
- const u8 *src, u8 *dst, u32 bytes);
+asmlinkage void salsa20_encrypt_bytes(u32 state[16], const u8 *src, u8 *dst,
+ u32 bytes);
-static int setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keysize)
+static int salsa20_asm_crypt(struct skcipher_request *req)
{
- struct salsa20_ctx *ctx = crypto_tfm_ctx(tfm);
- salsa20_keysetup(ctx, key, keysize*8, SALSA20_IV_SIZE*8);
- return 0;
-}
-
-static int encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- struct blkcipher_walk walk;
- struct crypto_blkcipher *tfm = desc->tfm;
- struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct salsa20_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ u32 state[16];
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, 64);
+ err = skcipher_walk_virt(&walk, req, true);
- salsa20_ivsetup(ctx, walk.iv);
+ crypto_salsa20_init(state, ctx, walk.iv);
- while (walk.nbytes >= 64) {
- salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
- walk.dst.virt.addr,
- walk.nbytes - (walk.nbytes % 64));
- err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64);
- }
+ while (walk.nbytes > 0) {
+ unsigned int nbytes = walk.nbytes;
- if (walk.nbytes) {
- salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
- walk.dst.virt.addr, walk.nbytes);
- err = blkcipher_walk_done(desc, &walk, 0);
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, walk.stride);
+
+ salsa20_encrypt_bytes(state, walk.src.virt.addr,
+ walk.dst.virt.addr, nbytes);
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
return err;
}
-static struct crypto_alg alg = {
- .cra_name = "salsa20",
- .cra_driver_name = "salsa20-asm",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_type = &crypto_blkcipher_type,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct salsa20_ctx),
- .cra_alignmask = 3,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .setkey = setkey,
- .encrypt = encrypt,
- .decrypt = encrypt,
- .min_keysize = SALSA20_MIN_KEY_SIZE,
- .max_keysize = SALSA20_MAX_KEY_SIZE,
- .ivsize = SALSA20_IV_SIZE,
- }
- }
+static struct skcipher_alg alg = {
+ .base.cra_name = "salsa20",
+ .base.cra_driver_name = "salsa20-asm",
+ .base.cra_priority = 200,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct salsa20_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = SALSA20_MIN_KEY_SIZE,
+ .max_keysize = SALSA20_MAX_KEY_SIZE,
+ .ivsize = SALSA20_IV_SIZE,
+ .chunksize = SALSA20_BLOCK_SIZE,
+ .setkey = crypto_salsa20_setkey,
+ .encrypt = salsa20_asm_crypt,
+ .decrypt = salsa20_asm_crypt,
};
static int __init init(void)
{
- return crypto_register_alg(&alg);
+ return crypto_register_skcipher(&alg);
}
static void __exit fini(void)
{
- crypto_unregister_alg(&alg);
+ crypto_unregister_skcipher(&alg);
}
module_init(init);
diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
index 1c3b7ceb36d2..e7273a606a07 100644
--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
@@ -55,29 +55,31 @@
#define RAB1bl %bl
#define RAB2bl %cl
+#define CD0 0x0(%rsp)
+#define CD1 0x8(%rsp)
+#define CD2 0x10(%rsp)
+
+# used only before/after all rounds
#define RCD0 %r8
#define RCD1 %r9
#define RCD2 %r10
-#define RCD0d %r8d
-#define RCD1d %r9d
-#define RCD2d %r10d
-
-#define RX0 %rbp
-#define RX1 %r11
-#define RX2 %r12
+# used only during rounds
+#define RX0 %r8
+#define RX1 %r9
+#define RX2 %r10
-#define RX0d %ebp
-#define RX1d %r11d
-#define RX2d %r12d
+#define RX0d %r8d
+#define RX1d %r9d
+#define RX2d %r10d
-#define RY0 %r13
-#define RY1 %r14
-#define RY2 %r15
+#define RY0 %r11
+#define RY1 %r12
+#define RY2 %r13
-#define RY0d %r13d
-#define RY1d %r14d
-#define RY2d %r15d
+#define RY0d %r11d
+#define RY1d %r12d
+#define RY2d %r13d
#define RT0 %rdx
#define RT1 %rsi
@@ -85,6 +87,8 @@
#define RT0d %edx
#define RT1d %esi
+#define RT1bl %sil
+
#define do16bit_ror(rot, op1, op2, T0, T1, tmp1, tmp2, ab, dst) \
movzbl ab ## bl, tmp2 ## d; \
movzbl ab ## bh, tmp1 ## d; \
@@ -92,6 +96,11 @@
op1##l T0(CTX, tmp2, 4), dst ## d; \
op2##l T1(CTX, tmp1, 4), dst ## d;
+#define swap_ab_with_cd(ab, cd, tmp) \
+ movq cd, tmp; \
+ movq ab, cd; \
+ movq tmp, ab;
+
/*
* Combined G1 & G2 function. Reordered with help of rotates to have moves
* at begining.
@@ -110,15 +119,15 @@
/* G1,2 && G2,2 */ \
do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 0, x ## 0); \
do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 0, y ## 0); \
- xchgq cd ## 0, ab ## 0; \
+ swap_ab_with_cd(ab ## 0, cd ## 0, RT0); \
\
do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 1, x ## 1); \
do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 1, y ## 1); \
- xchgq cd ## 1, ab ## 1; \
+ swap_ab_with_cd(ab ## 1, cd ## 1, RT0); \
\
do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 2, x ## 2); \
do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 2, y ## 2); \
- xchgq cd ## 2, ab ## 2;
+ swap_ab_with_cd(ab ## 2, cd ## 2, RT0);
#define enc_round_end(ab, x, y, n) \
addl y ## d, x ## d; \
@@ -168,6 +177,16 @@
decrypt_round3(ba, dc, (n*2)+1); \
decrypt_round3(ba, dc, (n*2));
+#define push_cd() \
+ pushq RCD2; \
+ pushq RCD1; \
+ pushq RCD0;
+
+#define pop_cd() \
+ popq RCD0; \
+ popq RCD1; \
+ popq RCD2;
+
#define inpack3(in, n, xy, m) \
movq 4*(n)(in), xy ## 0; \
xorq w+4*m(CTX), xy ## 0; \
@@ -223,11 +242,8 @@ ENTRY(__twofish_enc_blk_3way)
* %rdx: src, RIO
* %rcx: bool, if true: xor output
*/
- pushq %r15;
- pushq %r14;
pushq %r13;
pushq %r12;
- pushq %rbp;
pushq %rbx;
pushq %rcx; /* bool xor */
@@ -235,40 +251,36 @@ ENTRY(__twofish_enc_blk_3way)
inpack_enc3();
- encrypt_cycle3(RAB, RCD, 0);
- encrypt_cycle3(RAB, RCD, 1);
- encrypt_cycle3(RAB, RCD, 2);
- encrypt_cycle3(RAB, RCD, 3);
- encrypt_cycle3(RAB, RCD, 4);
- encrypt_cycle3(RAB, RCD, 5);
- encrypt_cycle3(RAB, RCD, 6);
- encrypt_cycle3(RAB, RCD, 7);
+ push_cd();
+ encrypt_cycle3(RAB, CD, 0);
+ encrypt_cycle3(RAB, CD, 1);
+ encrypt_cycle3(RAB, CD, 2);
+ encrypt_cycle3(RAB, CD, 3);
+ encrypt_cycle3(RAB, CD, 4);
+ encrypt_cycle3(RAB, CD, 5);
+ encrypt_cycle3(RAB, CD, 6);
+ encrypt_cycle3(RAB, CD, 7);
+ pop_cd();
popq RIO; /* dst */
- popq %rbp; /* bool xor */
+ popq RT1; /* bool xor */
- testb %bpl, %bpl;
+ testb RT1bl, RT1bl;
jnz .L__enc_xor3;
outunpack_enc3(mov);
popq %rbx;
- popq %rbp;
popq %r12;
popq %r13;
- popq %r14;
- popq %r15;
ret;
.L__enc_xor3:
outunpack_enc3(xor);
popq %rbx;
- popq %rbp;
popq %r12;
popq %r13;
- popq %r14;
- popq %r15;
ret;
ENDPROC(__twofish_enc_blk_3way)
@@ -278,35 +290,31 @@ ENTRY(twofish_dec_blk_3way)
* %rsi: dst
* %rdx: src, RIO
*/
- pushq %r15;
- pushq %r14;
pushq %r13;
pushq %r12;
- pushq %rbp;
pushq %rbx;
pushq %rsi; /* dst */
inpack_dec3();
- decrypt_cycle3(RAB, RCD, 7);
- decrypt_cycle3(RAB, RCD, 6);
- decrypt_cycle3(RAB, RCD, 5);
- decrypt_cycle3(RAB, RCD, 4);
- decrypt_cycle3(RAB, RCD, 3);
- decrypt_cycle3(RAB, RCD, 2);
- decrypt_cycle3(RAB, RCD, 1);
- decrypt_cycle3(RAB, RCD, 0);
+ push_cd();
+ decrypt_cycle3(RAB, CD, 7);
+ decrypt_cycle3(RAB, CD, 6);
+ decrypt_cycle3(RAB, CD, 5);
+ decrypt_cycle3(RAB, CD, 4);
+ decrypt_cycle3(RAB, CD, 3);
+ decrypt_cycle3(RAB, CD, 2);
+ decrypt_cycle3(RAB, CD, 1);
+ decrypt_cycle3(RAB, CD, 0);
+ pop_cd();
popq RIO; /* dst */
outunpack_dec3();
popq %rbx;
- popq %rbp;
popq %r12;
popq %r13;
- popq %r14;
- popq %r15;
ret;
ENDPROC(twofish_dec_blk_3way)
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index d7d3cc24baf4..1e3883e45687 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -153,6 +153,9 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
if (cached_flags & _TIF_UPROBE)
uprobe_notify_resume(regs);
+ if (cached_flags & _TIF_PATCH_PENDING)
+ klp_update_patch_state(current);
+
/* deal with pending signal delivery */
if (cached_flags & _TIF_SIGPENDING)
do_signal(regs);
@@ -165,9 +168,6 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
if (cached_flags & _TIF_USER_RETURN_NOTIFY)
fire_user_return_notifiers();
- if (cached_flags & _TIF_PATCH_PENDING)
- klp_update_patch_state(current);
-
/* Disable IRQs and retry */
local_irq_disable();
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 60c4c342316c..2a35b1e0fb90 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -252,7 +252,8 @@ ENTRY(__switch_to_asm)
* exist, overwrite the RSB with entries which capture
* speculative execution to prevent attack.
*/
- FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
+ /* Clobbers %ebx */
+ FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
#endif
/* restore callee-saved registers */
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index ff6f8022612c..a83570495162 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -499,7 +499,8 @@ ENTRY(__switch_to_asm)
* exist, overwrite the RSB with entries which capture
* speculative execution to prevent attack.
*/
- FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
+ /* Clobbers %rbx */
+ FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
#endif
/* restore callee-saved registers */
diff --git a/arch/x86/events/amd/power.c b/arch/x86/events/amd/power.c
index a6eee5ac4f58..2aefacf5c5b2 100644
--- a/arch/x86/events/amd/power.c
+++ b/arch/x86/events/amd/power.c
@@ -277,7 +277,7 @@ static int __init amd_power_pmu_init(void)
int ret;
if (!x86_match_cpu(cpu_match))
- return 0;
+ return -ENODEV;
if (!boot_cpu_has(X86_FEATURE_ACC_POWER))
return -ENODEV;
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 8156e47da7ba..18c25ab28557 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -372,10 +372,9 @@ static int alloc_pebs_buffer(int cpu)
static void release_pebs_buffer(int cpu)
{
struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
- struct debug_store *ds = hwev->ds;
void *cea;
- if (!ds || !x86_pmu.pebs)
+ if (!x86_pmu.pebs)
return;
kfree(per_cpu(insn_buffer, cpu));
@@ -384,7 +383,6 @@ static void release_pebs_buffer(int cpu)
/* Clear the fixmap */
cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
ds_clear_cea(cea, x86_pmu.pebs_buffer_size);
- ds->pebs_buffer_base = 0;
dsfree_pages(hwev->ds_pebs_vaddr, x86_pmu.pebs_buffer_size);
hwev->ds_pebs_vaddr = NULL;
}
@@ -419,16 +417,14 @@ static int alloc_bts_buffer(int cpu)
static void release_bts_buffer(int cpu)
{
struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
- struct debug_store *ds = hwev->ds;
void *cea;
- if (!ds || !x86_pmu.bts)
+ if (!x86_pmu.bts)
return;
/* Clear the fixmap */
cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer;
ds_clear_cea(cea, BTS_BUFFER_SIZE);
- ds->bts_buffer_base = 0;
dsfree_pages(hwev->ds_bts_vaddr, BTS_BUFFER_SIZE);
hwev->ds_bts_vaddr = NULL;
}
@@ -454,16 +450,22 @@ void release_ds_buffers(void)
if (!x86_pmu.bts && !x86_pmu.pebs)
return;
- get_online_cpus();
- for_each_online_cpu(cpu)
+ for_each_possible_cpu(cpu)
+ release_ds_buffer(cpu);
+
+ for_each_possible_cpu(cpu) {
+ /*
+ * Again, ignore errors from offline CPUs, they will no longer
+ * observe cpu_hw_events.ds and not program the DS_AREA when
+ * they come up.
+ */
fini_debug_store_on_cpu(cpu);
+ }
for_each_possible_cpu(cpu) {
release_pebs_buffer(cpu);
release_bts_buffer(cpu);
- release_ds_buffer(cpu);
}
- put_online_cpus();
}
void reserve_ds_buffers(void)
@@ -483,8 +485,6 @@ void reserve_ds_buffers(void)
if (!x86_pmu.pebs)
pebs_err = 1;
- get_online_cpus();
-
for_each_possible_cpu(cpu) {
if (alloc_ds_buffer(cpu)) {
bts_err = 1;
@@ -521,11 +521,14 @@ void reserve_ds_buffers(void)
if (x86_pmu.pebs && !pebs_err)
x86_pmu.pebs_active = 1;
- for_each_online_cpu(cpu)
+ for_each_possible_cpu(cpu) {
+ /*
+ * Ignores wrmsr_on_cpu() errors for offline CPUs they
+ * will get this call through intel_pmu_cpu_starting().
+ */
init_debug_store_on_cpu(cpu);
+ }
}
-
- put_online_cpus();
}
/*
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
index 14efaa0e8684..18e2628e2d8f 100644
--- a/arch/x86/events/msr.c
+++ b/arch/x86/events/msr.c
@@ -10,7 +10,9 @@ enum perf_msr_id {
PERF_MSR_SMI = 4,
PERF_MSR_PTSC = 5,
PERF_MSR_IRPERF = 6,
-
+ PERF_MSR_THERM = 7,
+ PERF_MSR_THERM_SNAP = 8,
+ PERF_MSR_THERM_UNIT = 9,
PERF_MSR_EVENT_MAX,
};
@@ -29,6 +31,11 @@ static bool test_irperf(int idx)
return boot_cpu_has(X86_FEATURE_IRPERF);
}
+static bool test_therm_status(int idx)
+{
+ return boot_cpu_has(X86_FEATURE_DTHERM);
+}
+
static bool test_intel(int idx)
{
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
@@ -95,22 +102,28 @@ struct perf_msr {
bool (*test)(int idx);
};
-PMU_EVENT_ATTR_STRING(tsc, evattr_tsc, "event=0x00");
-PMU_EVENT_ATTR_STRING(aperf, evattr_aperf, "event=0x01");
-PMU_EVENT_ATTR_STRING(mperf, evattr_mperf, "event=0x02");
-PMU_EVENT_ATTR_STRING(pperf, evattr_pperf, "event=0x03");
-PMU_EVENT_ATTR_STRING(smi, evattr_smi, "event=0x04");
-PMU_EVENT_ATTR_STRING(ptsc, evattr_ptsc, "event=0x05");
-PMU_EVENT_ATTR_STRING(irperf, evattr_irperf, "event=0x06");
+PMU_EVENT_ATTR_STRING(tsc, evattr_tsc, "event=0x00" );
+PMU_EVENT_ATTR_STRING(aperf, evattr_aperf, "event=0x01" );
+PMU_EVENT_ATTR_STRING(mperf, evattr_mperf, "event=0x02" );
+PMU_EVENT_ATTR_STRING(pperf, evattr_pperf, "event=0x03" );
+PMU_EVENT_ATTR_STRING(smi, evattr_smi, "event=0x04" );
+PMU_EVENT_ATTR_STRING(ptsc, evattr_ptsc, "event=0x05" );
+PMU_EVENT_ATTR_STRING(irperf, evattr_irperf, "event=0x06" );
+PMU_EVENT_ATTR_STRING(cpu_thermal_margin, evattr_therm, "event=0x07" );
+PMU_EVENT_ATTR_STRING(cpu_thermal_margin.snapshot, evattr_therm_snap, "1" );
+PMU_EVENT_ATTR_STRING(cpu_thermal_margin.unit, evattr_therm_unit, "C" );
static struct perf_msr msr[] = {
- [PERF_MSR_TSC] = { 0, &evattr_tsc, NULL, },
- [PERF_MSR_APERF] = { MSR_IA32_APERF, &evattr_aperf, test_aperfmperf, },
- [PERF_MSR_MPERF] = { MSR_IA32_MPERF, &evattr_mperf, test_aperfmperf, },
- [PERF_MSR_PPERF] = { MSR_PPERF, &evattr_pperf, test_intel, },
- [PERF_MSR_SMI] = { MSR_SMI_COUNT, &evattr_smi, test_intel, },
- [PERF_MSR_PTSC] = { MSR_F15H_PTSC, &evattr_ptsc, test_ptsc, },
- [PERF_MSR_IRPERF] = { MSR_F17H_IRPERF, &evattr_irperf, test_irperf, },
+ [PERF_MSR_TSC] = { 0, &evattr_tsc, NULL, },
+ [PERF_MSR_APERF] = { MSR_IA32_APERF, &evattr_aperf, test_aperfmperf, },
+ [PERF_MSR_MPERF] = { MSR_IA32_MPERF, &evattr_mperf, test_aperfmperf, },
+ [PERF_MSR_PPERF] = { MSR_PPERF, &evattr_pperf, test_intel, },
+ [PERF_MSR_SMI] = { MSR_SMI_COUNT, &evattr_smi, test_intel, },
+ [PERF_MSR_PTSC] = { MSR_F15H_PTSC, &evattr_ptsc, test_ptsc, },
+ [PERF_MSR_IRPERF] = { MSR_F17H_IRPERF, &evattr_irperf, test_irperf, },
+ [PERF_MSR_THERM] = { MSR_IA32_THERM_STATUS, &evattr_therm, test_therm_status, },
+ [PERF_MSR_THERM_SNAP] = { MSR_IA32_THERM_STATUS, &evattr_therm_snap, test_therm_status, },
+ [PERF_MSR_THERM_UNIT] = { MSR_IA32_THERM_STATUS, &evattr_therm_unit, test_therm_status, },
};
static struct attribute *events_attrs[PERF_MSR_EVENT_MAX + 1] = {
@@ -161,9 +174,9 @@ static int msr_event_init(struct perf_event *event)
if (!msr[cfg].attr)
return -EINVAL;
- event->hw.idx = -1;
- event->hw.event_base = msr[cfg].msr;
- event->hw.config = cfg;
+ event->hw.idx = -1;
+ event->hw.event_base = msr[cfg].msr;
+ event->hw.config = cfg;
return 0;
}
@@ -184,7 +197,7 @@ static void msr_event_update(struct perf_event *event)
u64 prev, now;
s64 delta;
- /* Careful, an NMI might modify the previous event value. */
+ /* Careful, an NMI might modify the previous event value: */
again:
prev = local64_read(&event->hw.prev_count);
now = msr_read_counter(event);
@@ -193,17 +206,22 @@ again:
goto again;
delta = now - prev;
- if (unlikely(event->hw.event_base == MSR_SMI_COUNT))
+ if (unlikely(event->hw.event_base == MSR_SMI_COUNT)) {
delta = sign_extend64(delta, 31);
-
- local64_add(delta, &event->count);
+ local64_add(delta, &event->count);
+ } else if (unlikely(event->hw.event_base == MSR_IA32_THERM_STATUS)) {
+ /* If valid, extract digital readout, otherwise set to -1: */
+ now = now & (1ULL << 31) ? (now >> 16) & 0x3f : -1;
+ local64_set(&event->count, now);
+ } else {
+ local64_add(delta, &event->count);
+ }
}
static void msr_event_start(struct perf_event *event, int flags)
{
- u64 now;
+ u64 now = msr_read_counter(event);
- now = msr_read_counter(event);
local64_set(&event->hw.prev_count, now);
}
@@ -250,9 +268,7 @@ static int __init msr_init(void)
for (i = PERF_MSR_TSC + 1; i < PERF_MSR_EVENT_MAX; i++) {
u64 val;
- /*
- * Virt sucks arse; you cannot tell if a R/O MSR is present :/
- */
+ /* Virt sucks; you cannot tell if a R/O MSR is present :/ */
if (!msr[i].test(i) || rdmsrl_safe(msr[i].msr, &val))
msr[i].attr = NULL;
}
diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
index 9cc9e1c1e2db..56c9ebac946f 100644
--- a/arch/x86/hyperv/mmu.c
+++ b/arch/x86/hyperv/mmu.c
@@ -137,7 +137,12 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
}
if (info->mm) {
+ /*
+ * AddressSpace argument must match the CR3 with PCID bits
+ * stripped out.
+ */
flush->address_space = virt_to_phys(info->mm->pgd);
+ flush->address_space &= CR3_ADDR_MASK;
flush->flags = 0;
} else {
flush->address_space = 0;
@@ -219,7 +224,12 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
}
if (info->mm) {
+ /*
+ * AddressSpace argument must match the CR3 with PCID bits
+ * stripped out.
+ */
flush->address_space = virt_to_phys(info->mm->pgd);
+ flush->address_space &= CR3_ADDR_MASK;
flush->flags = 0;
} else {
flush->address_space = 0;
@@ -278,8 +288,6 @@ void hyperv_setup_mmu_ops(void)
if (!(ms_hyperv.hints & HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED))
return;
- setup_clear_cpu_cap(X86_FEATURE_PCID);
-
if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) {
pr_info("Using hypercall for remote TLB flush\n");
pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others;
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 8d0ec9df1cbe..44f5d79d5105 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -49,7 +49,7 @@ extern int acpi_fix_pin2_polarity;
extern int acpi_disable_cmcff;
extern u8 acpi_sci_flags;
-extern int acpi_sci_override_gsi;
+extern u32 acpi_sci_override_gsi;
void acpi_pic_sci_set_trigger(unsigned int, u16);
struct device;
diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
index 0927cdc4f946..4d111616524b 100644
--- a/arch/x86/include/asm/asm-prototypes.h
+++ b/arch/x86/include/asm/asm-prototypes.h
@@ -38,5 +38,7 @@ INDIRECT_THUNK(dx)
INDIRECT_THUNK(si)
INDIRECT_THUNK(di)
INDIRECT_THUNK(bp)
-INDIRECT_THUNK(sp)
+asmlinkage void __fill_rsb(void);
+asmlinkage void __clear_rsb(void);
+
#endif /* CONFIG_RETPOLINE */
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index 2cbd75dd2fd3..e1c8dab86670 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -127,88 +127,6 @@ typedef u32 compat_old_sigset_t; /* at least 32 bits */
typedef u32 compat_sigset_word;
-typedef union compat_sigval {
- compat_int_t sival_int;
- compat_uptr_t sival_ptr;
-} compat_sigval_t;
-
-typedef struct compat_siginfo {
- int si_signo;
- int si_errno;
- int si_code;
-
- union {
- int _pad[128/sizeof(int) - 3];
-
- /* kill() */
- struct {
- unsigned int _pid; /* sender's pid */
- unsigned int _uid; /* sender's uid */
- } _kill;
-
- /* POSIX.1b timers */
- struct {
- compat_timer_t _tid; /* timer id */
- int _overrun; /* overrun count */
- compat_sigval_t _sigval; /* same as below */
- int _sys_private; /* not to be passed to user */
- int _overrun_incr; /* amount to add to overrun */
- } _timer;
-
- /* POSIX.1b signals */
- struct {
- unsigned int _pid; /* sender's pid */
- unsigned int _uid; /* sender's uid */
- compat_sigval_t _sigval;
- } _rt;
-
- /* SIGCHLD */
- struct {
- unsigned int _pid; /* which child */
- unsigned int _uid; /* sender's uid */
- int _status; /* exit code */
- compat_clock_t _utime;
- compat_clock_t _stime;
- } _sigchld;
-
- /* SIGCHLD (x32 version) */
- struct {
- unsigned int _pid; /* which child */
- unsigned int _uid; /* sender's uid */
- int _status; /* exit code */
- compat_s64 _utime;
- compat_s64 _stime;
- } _sigchld_x32;
-
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
- struct {
- unsigned int _addr; /* faulting insn/memory ref. */
- short int _addr_lsb; /* Valid LSB of the reported address. */
- union {
- /* used when si_code=SEGV_BNDERR */
- struct {
- compat_uptr_t _lower;
- compat_uptr_t _upper;
- } _addr_bnd;
- /* used when si_code=SEGV_PKUERR */
- compat_u32 _pkey;
- };
- } _sigfault;
-
- /* SIGPOLL */
- struct {
- int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
- int _fd;
- } _sigpoll;
-
- struct {
- unsigned int _call_addr; /* calling insn */
- int _syscall; /* triggering system call number */
- unsigned int _arch; /* AUDIT_ARCH_* of syscall */
- } _sigsys;
- } _sifields;
-} compat_siginfo_t;
-
#define COMPAT_OFF_T_MAX 0x7fffffff
struct compat_ipc64_perm {
@@ -331,4 +249,8 @@ static inline bool in_compat_syscall(void)
}
#define in_compat_syscall in_compat_syscall /* override the generic impl */
+struct compat_siginfo;
+int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
+ const siginfo_t *from, bool x32_ABI);
+
#endif /* _ASM_X86_COMPAT_H */
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index ea9a7dde62e5..70eddb3922ff 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -29,6 +29,7 @@ enum cpuid_leafs
CPUID_8000_000A_EDX,
CPUID_7_ECX,
CPUID_8000_0007_EBX,
+ CPUID_7_EDX,
};
#ifdef CONFIG_X86_FEATURE_NAMES
@@ -79,8 +80,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 15, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 16, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \
REQUIRED_MASK_CHECK || \
- BUILD_BUG_ON_ZERO(NCAPINTS != 18))
+ BUILD_BUG_ON_ZERO(NCAPINTS != 19))
#define DISABLED_MASK_BIT_SET(feature_bit) \
( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \
@@ -101,8 +103,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 15, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 16, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \
DISABLED_MASK_CHECK || \
- BUILD_BUG_ON_ZERO(NCAPINTS != 18))
+ BUILD_BUG_ON_ZERO(NCAPINTS != 19))
#define cpu_has(c, bit) \
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 25b9375c1484..1d9199e1c2ad 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -13,7 +13,7 @@
/*
* Defines x86 CPU feature bits
*/
-#define NCAPINTS 18 /* N 32-bit words worth of info */
+#define NCAPINTS 19 /* N 32-bit words worth of info */
#define NBUGINTS 1 /* N 32-bit bug flags */
/*
@@ -203,14 +203,15 @@
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
-#define X86_FEATURE_RETPOLINE ( 7*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */
-#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */
+#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
+#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
-#define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */
-#define X86_FEATURE_AVX512_4FMAPS ( 7*32+17) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
-#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
+#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
+
+#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
@@ -271,6 +272,9 @@
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
+#define X86_FEATURE_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
+#define X86_FEATURE_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
+#define X86_FEATURE_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
@@ -319,6 +323,13 @@
#define X86_FEATURE_SUCCOR (17*32+ 1) /* Uncorrectable error containment and recovery */
#define X86_FEATURE_SMCA (17*32+ 3) /* Scalable MCA */
+/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
+#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
+#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
+#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
+
/*
* BUG word(s)
*/
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
index b027633e7300..33833d1909af 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -77,6 +77,7 @@
#define DISABLED_MASK15 0
#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP)
#define DISABLED_MASK17 0
-#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
+#define DISABLED_MASK18 0
+#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
#endif /* _ASM_X86_DISABLED_FEATURES_H */
diff --git a/arch/x86/include/asm/dma-direct.h b/arch/x86/include/asm/dma-direct.h
new file mode 100644
index 000000000000..1295bc622ebe
--- /dev/null
+++ b/arch/x86/include/asm/dma-direct.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ASM_X86_DMA_DIRECT_H
+#define ASM_X86_DMA_DIRECT_H 1
+
+#include <linux/mem_encrypt.h>
+
+#ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
+bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
+dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
+phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
+#else
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ if (!dev->dma_mask)
+ return 0;
+
+ return addr + size - 1 <= *dev->dma_mask;
+}
+
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return __sme_set(paddr);
+}
+
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ return __sme_clr(daddr);
+}
+#endif /* CONFIG_X86_DMA_REMAP */
+#endif /* ASM_X86_DMA_DIRECT_H */
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 0350d99bb8fd..6277c83c0eb1 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -12,7 +12,6 @@
#include <asm/io.h>
#include <asm/swiotlb.h>
#include <linux/dma-contiguous.h>
-#include <linux/mem_encrypt.h>
#ifdef CONFIG_ISA
# define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
@@ -31,6 +30,9 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return dma_ops;
}
+int arch_dma_supported(struct device *dev, u64 mask);
+#define arch_dma_supported arch_dma_supported
+
bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp);
#define arch_dma_alloc_attrs arch_dma_alloc_attrs
@@ -42,31 +44,6 @@ extern void dma_generic_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_addr,
unsigned long attrs);
-#ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
-extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
-extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
-extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
-#else
-
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return 0;
-
- return addr + size - 1 <= *dev->dma_mask;
-}
-
-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- return __sme_set(paddr);
-}
-
-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
-{
- return __sme_clr(daddr);
-}
-#endif /* CONFIG_X86_DMA_REMAP */
-
static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
gfp_t gfp)
{
diff --git a/arch/x86/include/asm/fpu/signal.h b/arch/x86/include/asm/fpu/signal.h
index 4df2754ef380..44bbc39a57b3 100644
--- a/arch/x86/include/asm/fpu/signal.h
+++ b/arch/x86/include/asm/fpu/signal.h
@@ -20,12 +20,6 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
# define ia32_setup_rt_frame __setup_rt_frame
#endif
-#ifdef CONFIG_COMPAT
-int __copy_siginfo_to_user32(compat_siginfo_t __user *to,
- const siginfo_t *from, bool x32_ABI);
-#endif
-
-
extern void convert_from_fxsr(struct user_i387_ia32_struct *env,
struct task_struct *tsk);
extern void convert_to_fxsr(struct task_struct *tsk,
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index 96aa6b9884dc..8c5aaba6633f 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -28,6 +28,7 @@ enum x86_hypervisor_type {
X86_HYPER_XEN_PV,
X86_HYPER_XEN_HVM,
X86_HYPER_KVM,
+ X86_HYPER_JAILHOUSE,
};
#ifdef CONFIG_HYPERVISOR_GUEST
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
index c8376b40e882..5cdcdbd4d892 100644
--- a/arch/x86/include/asm/i8259.h
+++ b/arch/x86/include/asm/i8259.h
@@ -69,6 +69,11 @@ struct legacy_pic {
extern struct legacy_pic *legacy_pic;
extern struct legacy_pic null_legacy_pic;
+static inline bool has_legacy_pic(void)
+{
+ return legacy_pic != &null_legacy_pic;
+}
+
static inline int nr_legacy_irqs(void)
{
return legacy_pic->nr_legacy_irqs;
diff --git a/arch/x86/include/asm/jailhouse_para.h b/arch/x86/include/asm/jailhouse_para.h
new file mode 100644
index 000000000000..875b54376689
--- /dev/null
+++ b/arch/x86/include/asm/jailhouse_para.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL2.0 */
+
+/*
+ * Jailhouse paravirt_ops implementation
+ *
+ * Copyright (c) Siemens AG, 2015-2017
+ *
+ * Authors:
+ * Jan Kiszka <jan.kiszka@siemens.com>
+ */
+
+#ifndef _ASM_X86_JAILHOUSE_PARA_H
+#define _ASM_X86_JAILHOUSE_PARA_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_JAILHOUSE_GUEST
+bool jailhouse_paravirt(void);
+#else
+static inline bool jailhouse_paravirt(void)
+{
+ return false;
+}
+#endif
+
+#endif /* _ASM_X86_JAILHOUSE_PARA_H */
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index b1e8d8db921f..96ea4b5ba658 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -376,6 +376,7 @@ struct smca_bank {
extern struct smca_bank smca_banks[MAX_NR_BANKS];
extern const char *smca_get_long_name(enum smca_bank_types t);
+extern bool amd_mce_is_memory_error(struct mce *m);
extern int mce_threshold_create_device(unsigned int cpu);
extern int mce_threshold_remove_device(unsigned int cpu);
@@ -384,6 +385,7 @@ extern int mce_threshold_remove_device(unsigned int cpu);
static inline int mce_threshold_create_device(unsigned int cpu) { return 0; };
static inline int mce_threshold_remove_device(unsigned int cpu) { return 0; };
+static inline bool amd_mce_is_memory_error(struct mce *m) { return false; };
#endif
diff --git a/arch/x86/include/asm/mpspec_def.h b/arch/x86/include/asm/mpspec_def.h
index a6bec8028480..6fb923a34309 100644
--- a/arch/x86/include/asm/mpspec_def.h
+++ b/arch/x86/include/asm/mpspec_def.h
@@ -128,9 +128,17 @@ enum mp_irq_source_types {
mp_ExtINT = 3
};
-#define MP_IRQDIR_DEFAULT 0
-#define MP_IRQDIR_HIGH 1
-#define MP_IRQDIR_LOW 3
+#define MP_IRQPOL_DEFAULT 0x0
+#define MP_IRQPOL_ACTIVE_HIGH 0x1
+#define MP_IRQPOL_RESERVED 0x2
+#define MP_IRQPOL_ACTIVE_LOW 0x3
+#define MP_IRQPOL_MASK 0x3
+
+#define MP_IRQTRIG_DEFAULT 0x0
+#define MP_IRQTRIG_EDGE 0x4
+#define MP_IRQTRIG_RESERVED 0x8
+#define MP_IRQTRIG_LEVEL 0xc
+#define MP_IRQTRIG_MASK 0xc
#define MP_APIC_ALL 0xFF
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index e7b983a35506..e520a1e6fc11 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -39,6 +39,13 @@
/* Intel MSRs. Some also available on other CPUs */
+#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
+#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
+#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
+
+#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
+#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
+
#define MSR_PPIN_CTL 0x0000004e
#define MSR_PPIN 0x0000004f
@@ -57,6 +64,11 @@
#define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
#define MSR_MTRRcap 0x000000fe
+
+#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
+#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
+#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
+
#define MSR_IA32_BBL_CR_CTL 0x00000119
#define MSR_IA32_BBL_CR_CTL3 0x0000011e
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 4ad41087ce0e..d15d471348b8 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -1,56 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __NOSPEC_BRANCH_H__
-#define __NOSPEC_BRANCH_H__
+#ifndef _ASM_X86_NOSPEC_BRANCH_H_
+#define _ASM_X86_NOSPEC_BRANCH_H_
#include <asm/alternative.h>
#include <asm/alternative-asm.h>
#include <asm/cpufeatures.h>
-/*
- * Fill the CPU return stack buffer.
- *
- * Each entry in the RSB, if used for a speculative 'ret', contains an
- * infinite 'pause; lfence; jmp' loop to capture speculative execution.
- *
- * This is required in various cases for retpoline and IBRS-based
- * mitigations for the Spectre variant 2 vulnerability. Sometimes to
- * eliminate potentially bogus entries from the RSB, and sometimes
- * purely to ensure that it doesn't get empty, which on some CPUs would
- * allow predictions from other (unwanted!) sources to be used.
- *
- * We define a CPP macro such that it can be used from both .S files and
- * inline assembly. It's possible to do a .macro and then include that
- * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
- */
-
-#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
-#define RSB_FILL_LOOPS 16 /* To avoid underflow */
-
-/*
- * Google experimented with loop-unrolling and this turned out to be
- * the optimal version — two calls, each with their own speculation
- * trap should their return address end up getting used, in a loop.
- */
-#define __FILL_RETURN_BUFFER(reg, nr, sp) \
- mov $(nr/2), reg; \
-771: \
- call 772f; \
-773: /* speculation trap */ \
- pause; \
- lfence; \
- jmp 773b; \
-772: \
- call 774f; \
-775: /* speculation trap */ \
- pause; \
- lfence; \
- jmp 775b; \
-774: \
- dec reg; \
- jnz 771b; \
- add $(BITS_PER_LONG/8) * nr, sp;
-
#ifdef __ASSEMBLY__
/*
@@ -121,17 +77,10 @@
#endif
.endm
- /*
- * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
- * monstrosity above, manually.
- */
-.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
+/* This clobbers the BX register */
+.macro FILL_RETURN_BUFFER nr:req ftr:req
#ifdef CONFIG_RETPOLINE
- ANNOTATE_NOSPEC_ALTERNATIVE
- ALTERNATIVE "jmp .Lskip_rsb_\@", \
- __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \
- \ftr
-.Lskip_rsb_\@:
+ ALTERNATIVE "", "call __clear_rsb", \ftr
#endif
.endm
@@ -206,17 +155,20 @@ extern char __indirect_thunk_end[];
static inline void vmexit_fill_RSB(void)
{
#ifdef CONFIG_RETPOLINE
- unsigned long loops;
-
- asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
- ALTERNATIVE("jmp 910f",
- __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
- X86_FEATURE_RETPOLINE)
- "910:"
- : "=r" (loops), ASM_CALL_CONSTRAINT
- : : "memory" );
+ alternative_input("",
+ "call __fill_rsb",
+ X86_FEATURE_RETPOLINE,
+ ASM_NO_INPUT_CLOBBER(_ASM_BX, "memory"));
#endif
}
+static inline void indirect_branch_prediction_barrier(void)
+{
+ alternative_input("",
+ "call __ibp_barrier",
+ X86_FEATURE_USE_IBPB,
+ ASM_NO_INPUT_CLOBBER("eax", "ecx", "edx", "memory"));
+}
+
#endif /* __ASSEMBLY__ */
-#endif /* __NOSPEC_BRANCH_H__ */
+#endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index d3a67fba200a..efbde088a718 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -971,4 +971,7 @@ bool xen_set_default_idle(void);
void stop_this_cpu(void *dummy);
void df_debug(struct pt_regs *regs, long error_code);
+
+void __ibp_barrier(void);
+
#endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h
index d91ba04dd007..fb3a6de7440b 100644
--- a/arch/x86/include/asm/required-features.h
+++ b/arch/x86/include/asm/required-features.h
@@ -106,6 +106,7 @@
#define REQUIRED_MASK15 0
#define REQUIRED_MASK16 (NEED_LA57)
#define REQUIRED_MASK17 0
-#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18)
+#define REQUIRED_MASK18 0
+#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
#endif /* _ASM_X86_REQUIRED_FEATURES_H */
diff --git a/arch/x86/include/asm/swiotlb.h b/arch/x86/include/asm/swiotlb.h
index bdf9aed40403..1c6a6cb230ff 100644
--- a/arch/x86/include/asm/swiotlb.h
+++ b/arch/x86/include/asm/swiotlb.h
@@ -28,8 +28,6 @@ static inline void pci_swiotlb_late_init(void)
}
#endif
-static inline void dma_mark_clean(void *addr, size_t size) {}
-
extern void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
unsigned long attrs);
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 00223333821a..d25a638a2720 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -62,8 +62,6 @@ struct thread_info {
.flags = 0, \
}
-#define init_stack (init_thread_union.stack)
-
#else /* !__ASSEMBLY__ */
#include <asm/asm-offsets.h>
diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
index 74f4c2ff6427..d8bfa98fca98 100644
--- a/arch/x86/include/asm/uprobes.h
+++ b/arch/x86/include/asm/uprobes.h
@@ -53,6 +53,10 @@ struct arch_uprobe {
u8 fixups;
u8 ilen;
} defparam;
+ struct {
+ u8 reg_offset; /* to the start of pt_regs */
+ u8 ilen;
+ } push;
};
};
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index 7cac79802ad2..7803114aa140 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -48,7 +48,6 @@
#define UV2_NET_ENDPOINT_INTD 0x28
#define UV_NET_ENDPOINT_INTD (is_uv1_hub() ? \
UV1_NET_ENDPOINT_INTD : UV2_NET_ENDPOINT_INTD)
-#define UV_DESC_PSHIFT 49
#define UV_PAYLOADQ_GNODE_SHIFT 49
#define UV_PTC_BASENAME "sgi_uv/ptc_statistics"
#define UV_BAU_BASENAME "sgi_uv/bau_tunables"
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 036e26d63d9a..44cf6d6deb7a 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -241,6 +241,7 @@ static inline int uv_hub_info_check(int version)
#define UV2_HUB_REVISION_BASE 3
#define UV3_HUB_REVISION_BASE 5
#define UV4_HUB_REVISION_BASE 7
+#define UV4A_HUB_REVISION_BASE 8 /* UV4 (fixed) rev 2 */
#ifdef UV1_HUB_IS_SUPPORTED
static inline int is_uv1_hub(void)
@@ -280,6 +281,19 @@ static inline int is_uv3_hub(void)
}
#endif
+/* First test "is UV4A", then "is UV4" */
+#ifdef UV4A_HUB_IS_SUPPORTED
+static inline int is_uv4a_hub(void)
+{
+ return (uv_hub_info->hub_revision >= UV4A_HUB_REVISION_BASE);
+}
+#else
+static inline int is_uv4a_hub(void)
+{
+ return 0;
+}
+#endif
+
#ifdef UV4_HUB_IS_SUPPORTED
static inline int is_uv4_hub(void)
{
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index 548d684a7960..ecb9ddef128f 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -39,9 +39,11 @@
* #define UV2Hxxx b
* #define UV3Hxxx c
* #define UV4Hxxx d
+ * #define UV4AHxxx e
* #define UVHxxx (is_uv1_hub() ? UV1Hxxx :
* (is_uv2_hub() ? UV2Hxxx :
* (is_uv3_hub() ? UV3Hxxx :
+ * (is_uv4a_hub() ? UV4AHxxx :
* UV4Hxxx))
*
* If the MMR exists on all hub types > 1 but have different addresses, the
@@ -49,8 +51,10 @@
* #define UV2Hxxx b
* #define UV3Hxxx c
* #define UV4Hxxx d
+ * #define UV4AHxxx e
* #define UVHxxx (is_uv2_hub() ? UV2Hxxx :
* (is_uv3_hub() ? UV3Hxxx :
+ * (is_uv4a_hub() ? UV4AHxxx :
* UV4Hxxx))
*
* union uvh_xxx {
@@ -63,6 +67,7 @@
* } s2;
* struct uv3h_xxx_s { # Full UV3 definition (*)
* } s3;
+ * (NOTE: No struct uv4ah_xxx_s members exist)
* struct uv4h_xxx_s { # Full UV4 definition (*)
* } s4;
* };
@@ -99,6 +104,7 @@
#define UV2_HUB_IS_SUPPORTED 1
#define UV3_HUB_IS_SUPPORTED 1
#define UV4_HUB_IS_SUPPORTED 1
+#define UV4A_HUB_IS_SUPPORTED 1
/* Error function to catch undefined references */
extern unsigned long uv_undefined(char *str);
@@ -2779,35 +2785,47 @@ union uvh_lb_bau_sb_activation_status_1_u {
/*is_uv4_hub*/ UV4H_LB_BAU_SB_DESCRIPTOR_BASE_32)
#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49
-#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL
+#define UV1H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49
#define UV1H_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL
+#define UV1H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL
-
+#define UV2H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49
#define UV2H_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL
+#define UV2H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL
+#define UV3H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49
#define UV3H_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000007fffffff000UL
+#define UV3H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL
+#define UV4H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49
#define UV4H_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x00003ffffffff000UL
-
-
-union uvh_lb_bau_sb_descriptor_base_u {
- unsigned long v;
- struct uvh_lb_bau_sb_descriptor_base_s {
- unsigned long rsvd_0_11:12;
- unsigned long rsvd_12_48:37;
- unsigned long node_id:14; /* RW */
- unsigned long rsvd_63:1;
- } s;
- struct uv4h_lb_bau_sb_descriptor_base_s {
- unsigned long rsvd_0_11:12;
- unsigned long page_address:34; /* RW */
- unsigned long rsvd_46_48:3;
- unsigned long node_id:14; /* RW */
- unsigned long rsvd_63:1;
- } s4;
-};
+#define UV4H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0x7ffe000000000000UL
+
+#define UV4AH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 53
+#define UV4AH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_MASK 0x000ffffffffff000UL
+#define UV4AH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK 0xffe0000000000000UL
+
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT ( \
+ is_uv1_hub() ? UV1H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT : \
+ is_uv2_hub() ? UV2H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT : \
+ is_uv3_hub() ? UV3H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT : \
+ is_uv4a_hub() ? UV4AH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT : \
+ /*is_uv4_hub*/ UV4H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT)
+
+#define UVH_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK ( \
+ is_uv1_hub() ? UV1H_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK : \
+ is_uv2_hub() ? UV2H_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK : \
+ is_uv3_hub() ? UV3H_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK : \
+ is_uv4a_hub() ? UV4AH_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK : \
+ /*is_uv4_hub*/ UV4H_LB_BAU_SB_DESCRIPTOR_PAGE_ADDRESS_MASK)
+
+#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK ( \
+ is_uv1_hub() ? UV1H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK : \
+ is_uv2_hub() ? UV2H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK : \
+ is_uv3_hub() ? UV3H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK : \
+ is_uv4a_hub() ? UV4AH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK : \
+ /*is_uv4_hub*/ UV4H_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_MASK)
/* ========================================================================= */
/* UVH_NODE_ID */
@@ -3031,6 +3049,41 @@ union uvh_node_present_table_u {
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
+
union uvh_rh_gam_alias210_overlay_config_0_mmr_u {
unsigned long v;
@@ -3042,6 +3095,46 @@ union uvh_rh_gam_alias210_overlay_config_0_mmr_u {
unsigned long rsvd_53_62:10;
unsigned long enable:1; /* RW */
} s;
+ struct uv1h_rh_gam_alias210_overlay_config_0_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s1;
+ struct uvxh_rh_gam_alias210_overlay_config_0_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } sx;
+ struct uv2h_rh_gam_alias210_overlay_config_0_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s2;
+ struct uv3h_rh_gam_alias210_overlay_config_0_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s3;
+ struct uv4h_rh_gam_alias210_overlay_config_0_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s4;
};
/* ========================================================================= */
@@ -3064,6 +3157,41 @@ union uvh_rh_gam_alias210_overlay_config_0_mmr_u {
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
+
union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
unsigned long v;
@@ -3075,6 +3203,46 @@ union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
unsigned long rsvd_53_62:10;
unsigned long enable:1; /* RW */
} s;
+ struct uv1h_rh_gam_alias210_overlay_config_1_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s1;
+ struct uvxh_rh_gam_alias210_overlay_config_1_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } sx;
+ struct uv2h_rh_gam_alias210_overlay_config_1_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s2;
+ struct uv3h_rh_gam_alias210_overlay_config_1_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s3;
+ struct uv4h_rh_gam_alias210_overlay_config_1_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s4;
};
/* ========================================================================= */
@@ -3097,6 +3265,41 @@ union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UV1H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UVXH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UV2H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UV3H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UV4H_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
+
union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
unsigned long v;
@@ -3108,6 +3311,46 @@ union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
unsigned long rsvd_53_62:10;
unsigned long enable:1; /* RW */
} s;
+ struct uv1h_rh_gam_alias210_overlay_config_2_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s1;
+ struct uvxh_rh_gam_alias210_overlay_config_2_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } sx;
+ struct uv2h_rh_gam_alias210_overlay_config_2_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s2;
+ struct uv3h_rh_gam_alias210_overlay_config_2_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s3;
+ struct uv4h_rh_gam_alias210_overlay_config_2_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long base:8; /* RW */
+ unsigned long rsvd_32_47:16;
+ unsigned long m_alias:5; /* RW */
+ unsigned long rsvd_53_62:10;
+ unsigned long enable:1; /* RW */
+ } s4;
};
/* ========================================================================= */
@@ -3126,6 +3369,21 @@ union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+#define UV1H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
+#define UV1H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
+#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
+#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
+#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
+#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
union uvh_rh_gam_alias210_redirect_config_0_mmr_u {
unsigned long v;
@@ -3134,6 +3392,31 @@ union uvh_rh_gam_alias210_redirect_config_0_mmr_u {
unsigned long dest_base:22; /* RW */
unsigned long rsvd_46_63:18;
} s;
+ struct uv1h_rh_gam_alias210_redirect_config_0_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s1;
+ struct uvxh_rh_gam_alias210_redirect_config_0_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } sx;
+ struct uv2h_rh_gam_alias210_redirect_config_0_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s2;
+ struct uv3h_rh_gam_alias210_redirect_config_0_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s3;
+ struct uv4h_rh_gam_alias210_redirect_config_0_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s4;
};
/* ========================================================================= */
@@ -3152,6 +3435,21 @@ union uvh_rh_gam_alias210_redirect_config_0_mmr_u {
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+#define UV1H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
+#define UV1H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
+#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
+#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
+#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
+#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
union uvh_rh_gam_alias210_redirect_config_1_mmr_u {
unsigned long v;
@@ -3160,6 +3458,31 @@ union uvh_rh_gam_alias210_redirect_config_1_mmr_u {
unsigned long dest_base:22; /* RW */
unsigned long rsvd_46_63:18;
} s;
+ struct uv1h_rh_gam_alias210_redirect_config_1_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s1;
+ struct uvxh_rh_gam_alias210_redirect_config_1_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } sx;
+ struct uv2h_rh_gam_alias210_redirect_config_1_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s2;
+ struct uv3h_rh_gam_alias210_redirect_config_1_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s3;
+ struct uv4h_rh_gam_alias210_redirect_config_1_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s4;
};
/* ========================================================================= */
@@ -3178,6 +3501,21 @@ union uvh_rh_gam_alias210_redirect_config_1_mmr_u {
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+#define UV1H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
+#define UV1H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
+#define UVXH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
+#define UV2H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
+#define UV3H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
+#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
+#define UV4H_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
+
union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
unsigned long v;
@@ -3186,6 +3524,31 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
unsigned long dest_base:22; /* RW */
unsigned long rsvd_46_63:18;
} s;
+ struct uv1h_rh_gam_alias210_redirect_config_2_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s1;
+ struct uvxh_rh_gam_alias210_redirect_config_2_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } sx;
+ struct uv2h_rh_gam_alias210_redirect_config_2_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s2;
+ struct uv3h_rh_gam_alias210_redirect_config_2_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s3;
+ struct uv4h_rh_gam_alias210_redirect_config_2_mmr_s {
+ unsigned long rsvd_0_23:24;
+ unsigned long dest_base:22; /* RW */
+ unsigned long rsvd_46_63:18;
+ } s4;
};
/* ========================================================================= */
@@ -3384,6 +3747,162 @@ union uvh_rh_gam_gru_overlay_config_mmr_u {
};
/* ========================================================================= */
+/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR */
+/* ========================================================================= */
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR uv_undefined("UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR")
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR uv_undefined("UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR")
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR 0x1603000UL
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR 0x483000UL
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR ( \
+ is_uv1_hub() ? UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR : \
+ is_uv2_hub() ? UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR : \
+ is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR : \
+ /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR)
+
+
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_SHFT 26
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT 46
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_SHFT 63
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK 0x000fc00000000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_SHFT 26
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT 46
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_SHFT 63
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK 0x000fc00000000000UL
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT 52
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK 0x000ffffffc000000UL
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK 0x03f0000000000000UL
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT ( \
+ is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT : \
+ is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT : \
+ /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT)
+
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK ( \
+ is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK : \
+ is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK : \
+ /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK)
+
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK ( \
+ is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK : \
+ is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK : \
+ /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK)
+
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK ( \
+ is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK : \
+ is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK : \
+ /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK)
+
+union uvh_rh_gam_mmioh_overlay_config0_mmr_u {
+ unsigned long v;
+ struct uv3h_rh_gam_mmioh_overlay_config0_mmr_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s3;
+ struct uv4h_rh_gam_mmioh_overlay_config0_mmr_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s4;
+ struct uv4ah_rh_gam_mmioh_overlay_config0_mmr_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:26; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long undef_62:1; /* Undefined */
+ unsigned long enable:1; /* RW */
+ } s4a;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR */
+/* ========================================================================= */
+#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR uv_undefined("UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR")
+#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR uv_undefined("UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR")
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR 0x1603000UL
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR 0x483000UL
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR ( \
+ is_uv1_hub() ? UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR : \
+ is_uv2_hub() ? UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR : \
+ is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR : \
+ /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR)
+
+
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_SHFT 26
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT 46
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_SHFT 63
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK 0x000fc00000000000UL
+#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_SHFT 26
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT 46
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_SHFT 63
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK 0x00003ffffc000000UL
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK 0x000fc00000000000UL
+#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_MASK 0x8000000000000000UL
+
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT 52
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK 0x000ffffffc000000UL
+#define UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK 0x03f0000000000000UL
+
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT ( \
+ is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT : \
+ is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT : \
+ /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT)
+
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK ( \
+ is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK : \
+ is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK : \
+ /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK)
+
+#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK ( \
+ is_uv3_hub() ? UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK : \
+ is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK : \
+ /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK)
+
+union uvh_rh_gam_mmioh_overlay_config1_mmr_u {
+ unsigned long v;
+ struct uv3h_rh_gam_mmioh_overlay_config1_mmr_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s3;
+ struct uv4h_rh_gam_mmioh_overlay_config1_mmr_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:20; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long rsvd_56_62:7;
+ unsigned long enable:1; /* RW */
+ } s4;
+ struct uv4ah_rh_gam_mmioh_overlay_config1_mmr_s {
+ unsigned long rsvd_0_25:26;
+ unsigned long base:26; /* RW */
+ unsigned long m_io:6; /* RW */
+ unsigned long n_io:4;
+ unsigned long undef_62:1; /* Undefined */
+ unsigned long enable:1; /* RW */
+ } s4a;
+};
+
+/* ========================================================================= */
/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR */
/* ========================================================================= */
#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
@@ -3438,6 +3957,112 @@ union uvh_rh_gam_mmioh_overlay_config_mmr_u {
};
/* ========================================================================= */
+/* UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR */
+/* ========================================================================= */
+#define UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR uv_undefined("UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR")
+#define UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR uv_undefined("UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR")
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR 0x1603800UL
+#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR 0x483800UL
+#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR ( \
+ is_uv1_hub() ? UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR : \
+ is_uv2_hub() ? UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR : \
+ is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR : \
+ /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR)
+
+#define UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH uv_undefined("UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH")
+#define UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH uv_undefined("UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH")
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH 128
+#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH 128
+#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH ( \
+ is_uv1_hub() ? UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH : \
+ is_uv2_hub() ? UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH : \
+ is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH : \
+ /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH)
+
+
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_SHFT 0
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK 0x0000000000007fffUL
+
+#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_SHFT 0
+#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK 0x0000000000007fffUL
+
+#define UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK 0x0000000000000fffUL
+
+#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK ( \
+ is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK : \
+ is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK : \
+ /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK)
+
+union uvh_rh_gam_mmioh_redirect_config0_mmr_u {
+ unsigned long v;
+ struct uv3h_rh_gam_mmioh_redirect_config0_mmr_s {
+ unsigned long nasid:15; /* RW */
+ unsigned long rsvd_15_63:49;
+ } s3;
+ struct uv4h_rh_gam_mmioh_redirect_config0_mmr_s {
+ unsigned long nasid:15; /* RW */
+ unsigned long rsvd_15_63:49;
+ } s4;
+ struct uv4ah_rh_gam_mmioh_redirect_config0_mmr_s {
+ unsigned long nasid:12; /* RW */
+ unsigned long rsvd_12_63:52;
+ } s4a;
+};
+
+/* ========================================================================= */
+/* UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR */
+/* ========================================================================= */
+#define UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR uv_undefined("UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR")
+#define UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR uv_undefined("UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR")
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR 0x1604800UL
+#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR 0x484800UL
+#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR ( \
+ is_uv1_hub() ? UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR : \
+ is_uv2_hub() ? UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR : \
+ is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR : \
+ /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR)
+
+#define UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH uv_undefined("UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH")
+#define UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH uv_undefined("UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH")
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH 128
+#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH 128
+#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH ( \
+ is_uv1_hub() ? UV1H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH : \
+ is_uv2_hub() ? UV2H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH : \
+ is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH : \
+ /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH)
+
+
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_SHFT 0
+#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK 0x0000000000007fffUL
+
+#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_SHFT 0
+#define UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK 0x0000000000007fffUL
+
+#define UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK 0x0000000000000fffUL
+
+#define UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK ( \
+ is_uv3_hub() ? UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK : \
+ is_uv4a_hub() ? UV4AH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK : \
+ /*is_uv4_hub*/ UV4H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK)
+
+union uvh_rh_gam_mmioh_redirect_config1_mmr_u {
+ unsigned long v;
+ struct uv3h_rh_gam_mmioh_redirect_config1_mmr_s {
+ unsigned long nasid:15; /* RW */
+ unsigned long rsvd_15_63:49;
+ } s3;
+ struct uv4h_rh_gam_mmioh_redirect_config1_mmr_s {
+ unsigned long nasid:15; /* RW */
+ unsigned long rsvd_15_63:49;
+ } s4;
+ struct uv4ah_rh_gam_mmioh_redirect_config1_mmr_s {
+ unsigned long nasid:12; /* RW */
+ unsigned long rsvd_12_63:52;
+ } s4a;
+};
+
+/* ========================================================================= */
/* UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR */
/* ========================================================================= */
#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
@@ -4138,88 +4763,6 @@ union uv3h_gr0_gam_gr_config_u {
};
/* ========================================================================= */
-/* UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR */
-/* ========================================================================= */
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR 0x1603000UL
-
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_SHFT 26
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT 46
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_SHFT 63
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK 0x000fc00000000000UL
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK 0x8000000000000000UL
-
-union uv3h_rh_gam_mmioh_overlay_config0_mmr_u {
- unsigned long v;
- struct uv3h_rh_gam_mmioh_overlay_config0_mmr_s {
- unsigned long rsvd_0_25:26;
- unsigned long base:20; /* RW */
- unsigned long m_io:6; /* RW */
- unsigned long n_io:4;
- unsigned long rsvd_56_62:7;
- unsigned long enable:1; /* RW */
- } s3;
-};
-
-/* ========================================================================= */
-/* UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR */
-/* ========================================================================= */
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR 0x1604000UL
-
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_SHFT 26
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT 46
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_SHFT 63
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK 0x00003ffffc000000UL
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK 0x000fc00000000000UL
-#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_MASK 0x8000000000000000UL
-
-union uv3h_rh_gam_mmioh_overlay_config1_mmr_u {
- unsigned long v;
- struct uv3h_rh_gam_mmioh_overlay_config1_mmr_s {
- unsigned long rsvd_0_25:26;
- unsigned long base:20; /* RW */
- unsigned long m_io:6; /* RW */
- unsigned long n_io:4;
- unsigned long rsvd_56_62:7;
- unsigned long enable:1; /* RW */
- } s3;
-};
-
-/* ========================================================================= */
-/* UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR */
-/* ========================================================================= */
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR 0x1603800UL
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH 128
-
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_SHFT 0
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK 0x0000000000007fffUL
-
-union uv3h_rh_gam_mmioh_redirect_config0_mmr_u {
- unsigned long v;
- struct uv3h_rh_gam_mmioh_redirect_config0_mmr_s {
- unsigned long nasid:15; /* RW */
- unsigned long rsvd_15_63:49;
- } s3;
-};
-
-/* ========================================================================= */
-/* UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR */
-/* ========================================================================= */
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR 0x1604800UL
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH 128
-
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_SHFT 0
-#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK 0x0000000000007fffUL
-
-union uv3h_rh_gam_mmioh_redirect_config1_mmr_u {
- unsigned long v;
- struct uv3h_rh_gam_mmioh_redirect_config1_mmr_s {
- unsigned long nasid:15; /* RW */
- unsigned long rsvd_15_63:49;
- } s3;
-};
-
-/* ========================================================================= */
/* UV4H_LB_PROC_INTD_QUEUE_FIRST */
/* ========================================================================= */
#define UV4H_LB_PROC_INTD_QUEUE_FIRST 0xa4100UL
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index aa4747569e23..fc2f082ac635 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -212,6 +212,7 @@ enum x86_legacy_i8042_state {
struct x86_legacy_features {
enum x86_legacy_i8042_state i8042;
int rtc;
+ int warm_reset;
int no_vga;
int reserve_bios_regions;
struct x86_legacy_devices devices;
diff --git a/arch/x86/include/uapi/asm/Kbuild b/arch/x86/include/uapi/asm/Kbuild
index 1e901e421f2d..322681622d1e 100644
--- a/arch/x86/include/uapi/asm/Kbuild
+++ b/arch/x86/include/uapi/asm/Kbuild
@@ -5,3 +5,4 @@ generic-y += bpf_perf_event.h
generated-y += unistd_32.h
generated-y += unistd_64.h
generated-y += unistd_x32.h
+generic-y += poll.h
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index afdd5ae0fcc4..aebf60357758 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -9,6 +9,7 @@
#define SETUP_PCI 3
#define SETUP_EFI 4
#define SETUP_APPLE_PROPERTIES 5
+#define SETUP_JAILHOUSE 6
/* ram_size flags */
#define RAMDISK_IMAGE_START_MASK 0x07FF
@@ -126,6 +127,27 @@ struct boot_e820_entry {
__u32 type;
} __attribute__((packed));
+/*
+ * Smallest compatible version of jailhouse_setup_data required by this kernel.
+ */
+#define JAILHOUSE_SETUP_REQUIRED_VERSION 1
+
+/*
+ * The boot loader is passing platform information via this Jailhouse-specific
+ * setup data structure.
+ */
+struct jailhouse_setup_data {
+ u16 version;
+ u16 compatible_version;
+ u16 pm_timer_address;
+ u16 num_cpus;
+ u64 pci_mmconfig_base;
+ u32 tsc_khz;
+ u32 apic_khz;
+ u8 standard_ioapic;
+ u8 cpu_ids[255];
+} __attribute__((packed));
+
/* The so-called "zeropage" */
struct boot_params {
struct screen_info screen_info; /* 0x000 */
diff --git a/arch/x86/include/uapi/asm/poll.h b/arch/x86/include/uapi/asm/poll.h
deleted file mode 100644
index c98509d3149e..000000000000
--- a/arch/x86/include/uapi/asm/poll.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/poll.h>
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 7e2baf7304ae..29786c87e864 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -115,6 +115,8 @@ obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= paravirt-spinlocks.o
obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o
obj-$(CONFIG_X86_PMEM_LEGACY_DEVICE) += pmem.o
+obj-$(CONFIG_JAILHOUSE_GUEST) += jailhouse.o
+
obj-$(CONFIG_EISA) += eisa.o
obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index f4c463df8b08..ec3a286163c3 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -68,8 +68,9 @@ int acpi_ioapic;
int acpi_strict;
int acpi_disable_cmcff;
+/* ACPI SCI override configuration */
u8 acpi_sci_flags __initdata;
-int acpi_sci_override_gsi __initdata;
+u32 acpi_sci_override_gsi __initdata = INVALID_ACPI_IRQ;
int acpi_skip_timer_override __initdata;
int acpi_use_timer_override __initdata;
int acpi_fix_pin2_polarity __initdata;
@@ -112,8 +113,6 @@ static u32 isa_irq_to_gsi[NR_IRQS_LEGACY] __read_mostly = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
};
-#define ACPI_INVALID_GSI INT_MIN
-
/*
* This is just a simple wrapper around early_memremap(),
* with sanity checks for phys == 0 and size == 0.
@@ -372,7 +371,7 @@ static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
* and acpi_isa_irq_to_gsi() may give wrong result.
*/
if (gsi < nr_legacy_irqs() && isa_irq_to_gsi[gsi] == gsi)
- isa_irq_to_gsi[gsi] = ACPI_INVALID_GSI;
+ isa_irq_to_gsi[gsi] = INVALID_ACPI_IRQ;
isa_irq_to_gsi[bus_irq] = gsi;
}
@@ -620,24 +619,24 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
}
rc = acpi_get_override_irq(gsi, &trigger, &polarity);
- if (rc == 0) {
- trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
- polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
- irq = acpi_register_gsi(NULL, gsi, trigger, polarity);
- if (irq >= 0) {
- *irqp = irq;
- return 0;
- }
- }
+ if (rc)
+ return rc;
- return -1;
+ trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
+ polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
+ irq = acpi_register_gsi(NULL, gsi, trigger, polarity);
+ if (irq < 0)
+ return irq;
+
+ *irqp = irq;
+ return 0;
}
EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi)
{
if (isa_irq < nr_legacy_irqs() &&
- isa_irq_to_gsi[isa_irq] != ACPI_INVALID_GSI) {
+ isa_irq_to_gsi[isa_irq] != INVALID_ACPI_IRQ) {
*gsi = isa_irq_to_gsi[isa_irq];
return 0;
}
@@ -676,8 +675,7 @@ static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi,
mutex_lock(&acpi_ioapic_lock);
irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info);
/* Don't set up the ACPI SCI because it's already set up */
- if (irq >= 0 && enable_update_mptable &&
- acpi_gbl_FADT.sci_interrupt != gsi)
+ if (irq >= 0 && enable_update_mptable && gsi != acpi_gbl_FADT.sci_interrupt)
mp_config_acpi_gsi(dev, gsi, trigger, polarity);
mutex_unlock(&acpi_ioapic_lock);
#endif
@@ -1211,8 +1209,9 @@ static int __init acpi_parse_madt_ioapic_entries(void)
/*
* If BIOS did not supply an INT_SRC_OVR for the SCI
* pretend we got one so we can set the SCI flags.
+ * But ignore setting up SCI on hardware reduced platforms.
*/
- if (!acpi_sci_override_gsi)
+ if (acpi_sci_override_gsi == INVALID_ACPI_IRQ && !acpi_gbl_reduced_hardware)
acpi_sci_ioapic_setup(acpi_gbl_FADT.sci_interrupt, 0, 0,
acpi_gbl_FADT.sci_interrupt);
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 7188aea91549..f1915b744052 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -138,6 +138,8 @@ static int __init acpi_sleep_setup(char *str)
acpi_nvs_nosave_s3();
if (strncmp(str, "old_ordering", 12) == 0)
acpi_old_suspend_ordering();
+ if (strncmp(str, "nobl", 4) == 0)
+ acpi_sleep_no_blacklist();
str = strchr(str, ',');
if (str != NULL)
str += strspn(str, ", \t");
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 4817d743c263..30571fdaaf6f 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -298,7 +298,7 @@ recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
tgt_rip = next_rip + o_dspl;
n_dspl = tgt_rip - orig_insn;
- DPRINTK("target RIP: %p, new_displ: 0x%x", tgt_rip, n_dspl);
+ DPRINTK("target RIP: %px, new_displ: 0x%x", tgt_rip, n_dspl);
if (tgt_rip - orig_insn >= 0) {
if (n_dspl - 2 <= 127)
@@ -355,7 +355,7 @@ static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *ins
add_nops(instr + (a->instrlen - a->padlen), a->padlen);
local_irq_restore(flags);
- DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ",
+ DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ",
instr, a->instrlen - a->padlen, a->padlen);
}
@@ -376,7 +376,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
u8 *instr, *replacement;
u8 insnbuf[MAX_PATCH_LEN];
- DPRINTK("alt table %p -> %p", start, end);
+ DPRINTK("alt table %px, -> %px", start, end);
/*
* The scan order should be from start to end. A later scanned
* alternative code can overwrite previously scanned alternative code.
@@ -400,14 +400,14 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
continue;
}
- DPRINTK("feat: %d*32+%d, old: (%p, len: %d), repl: (%p, len: %d), pad: %d",
+ DPRINTK("feat: %d*32+%d, old: (%px len: %d), repl: (%px, len: %d), pad: %d",
a->cpuid >> 5,
a->cpuid & 0x1f,
instr, a->instrlen,
replacement, a->replacementlen, a->padlen);
- DUMP_BYTES(instr, a->instrlen, "%p: old_insn: ", instr);
- DUMP_BYTES(replacement, a->replacementlen, "%p: rpl_insn: ", replacement);
+ DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
+ DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
memcpy(insnbuf, replacement, a->replacementlen);
insnbuf_sz = a->replacementlen;
@@ -433,7 +433,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
a->instrlen - a->replacementlen);
insnbuf_sz += a->instrlen - a->replacementlen;
}
- DUMP_BYTES(insnbuf, insnbuf_sz, "%p: final_insn: ", instr);
+ DUMP_BYTES(insnbuf, insnbuf_sz, "%px: final_insn: ", instr);
text_poke_early(instr, insnbuf, insnbuf_sz);
}
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index cc0e8bc0ea3f..ecd486cb06ab 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -31,6 +31,7 @@
#include <linux/io.h>
#include <linux/gfp.h>
#include <linux/atomic.h>
+#include <linux/dma-direct.h>
#include <asm/mtrr.h>
#include <asm/pgtable.h>
#include <asm/proto.h>
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index f5d92bc3b884..2c4d5ece7456 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -30,6 +30,7 @@
#include <asm/dma.h>
#include <asm/amd_nb.h>
#include <asm/x86_init.h>
+#include <linux/crash_dump.h>
/*
* Using 512M as goal, in case kexec will load kernel_big
@@ -56,6 +57,33 @@ int fallback_aper_force __initdata;
int fix_aperture __initdata = 1;
+#ifdef CONFIG_PROC_VMCORE
+/*
+ * If the first kernel maps the aperture over e820 RAM, the kdump kernel will
+ * use the same range because it will remain configured in the northbridge.
+ * Trying to dump this area via /proc/vmcore may crash the machine, so exclude
+ * it from vmcore.
+ */
+static unsigned long aperture_pfn_start, aperture_page_count;
+
+static int gart_oldmem_pfn_is_ram(unsigned long pfn)
+{
+ return likely((pfn < aperture_pfn_start) ||
+ (pfn >= aperture_pfn_start + aperture_page_count));
+}
+
+static void exclude_from_vmcore(u64 aper_base, u32 aper_order)
+{
+ aperture_pfn_start = aper_base >> PAGE_SHIFT;
+ aperture_page_count = (32 * 1024 * 1024) << aper_order >> PAGE_SHIFT;
+ WARN_ON(register_oldmem_pfn_is_ram(&gart_oldmem_pfn_is_ram));
+}
+#else
+static void exclude_from_vmcore(u64 aper_base, u32 aper_order)
+{
+}
+#endif
+
/* This code runs before the PCI subsystem is initialized, so just
access the northbridge directly. */
@@ -435,8 +463,16 @@ int __init gart_iommu_hole_init(void)
out:
if (!fix && !fallback_aper_force) {
- if (last_aper_base)
+ if (last_aper_base) {
+ /*
+ * If this is the kdump kernel, the first kernel
+ * may have allocated the range over its e820 RAM
+ * and fixed up the northbridge
+ */
+ exclude_from_vmcore(last_aper_base, last_aper_order);
+
return 1;
+ }
return 0;
}
@@ -473,6 +509,14 @@ out:
return 0;
}
+ /*
+ * If this is the kdump kernel _and_ the first kernel did not
+ * configure the aperture in the northbridge, this range may
+ * overlap with the first kernel's memory. We can't access the
+ * range through vmcore even though it should be part of the dump.
+ */
+ exclude_from_vmcore(aper_alloc, aper_order);
+
/* Fix up the north bridges */
for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
int bus, dev_base, dev_limit;
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index 25a87028cb3f..e84c9eb4e5b4 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -19,6 +19,7 @@
#include <asm/smp.h>
#include <asm/apic.h>
#include <asm/ipi.h>
+#include <asm/jailhouse_para.h>
#include <linux/acpi.h>
@@ -84,12 +85,8 @@ flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector)
static void flat_send_IPI_allbutself(int vector)
{
int cpu = smp_processor_id();
-#ifdef CONFIG_HOTPLUG_CPU
- int hotplug = 1;
-#else
- int hotplug = 0;
-#endif
- if (hotplug || vector == NMI_VECTOR) {
+
+ if (IS_ENABLED(CONFIG_HOTPLUG_CPU) || vector == NMI_VECTOR) {
if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) {
unsigned long mask = cpumask_bits(cpu_online_mask)[0];
@@ -218,6 +215,15 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
return 0;
}
+static void physflat_init_apic_ldr(void)
+{
+ /*
+ * LDR and DFR are not involved in physflat mode, rather:
+ * "In physical destination mode, the destination processor is
+ * specified by its local APIC ID [...]." (Intel SDM, 10.6.2.1)
+ */
+}
+
static void physflat_send_IPI_allbutself(int vector)
{
default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
@@ -230,7 +236,8 @@ static void physflat_send_IPI_all(int vector)
static int physflat_probe(void)
{
- if (apic == &apic_physflat || num_possible_cpus() > 8)
+ if (apic == &apic_physflat || num_possible_cpus() > 8 ||
+ jailhouse_paravirt())
return 1;
return 0;
@@ -251,8 +258,7 @@ static struct apic apic_physflat __ro_after_init = {
.dest_logical = 0,
.check_apicid_used = NULL,
- /* not needed, but shouldn't hurt: */
- .init_apic_ldr = flat_init_apic_ldr,
+ .init_apic_ldr = physflat_init_apic_ldr,
.ioapic_phys_id_map = NULL,
.setup_apic_routing = NULL,
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 8a7963421460..8ad2e410974f 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -800,18 +800,18 @@ static int irq_polarity(int idx)
/*
* Determine IRQ line polarity (high active or low active):
*/
- switch (mp_irqs[idx].irqflag & 0x03) {
- case 0:
+ switch (mp_irqs[idx].irqflag & MP_IRQPOL_MASK) {
+ case MP_IRQPOL_DEFAULT:
/* conforms to spec, ie. bus-type dependent polarity */
if (test_bit(bus, mp_bus_not_pci))
return default_ISA_polarity(idx);
else
return default_PCI_polarity(idx);
- case 1:
+ case MP_IRQPOL_ACTIVE_HIGH:
return IOAPIC_POL_HIGH;
- case 2:
+ case MP_IRQPOL_RESERVED:
pr_warn("IOAPIC: Invalid polarity: 2, defaulting to low\n");
- case 3:
+ case MP_IRQPOL_ACTIVE_LOW:
default: /* Pointless default required due to do gcc stupidity */
return IOAPIC_POL_LOW;
}
@@ -845,8 +845,8 @@ static int irq_trigger(int idx)
/*
* Determine IRQ trigger mode (edge or level sensitive):
*/
- switch ((mp_irqs[idx].irqflag >> 2) & 0x03) {
- case 0:
+ switch (mp_irqs[idx].irqflag & MP_IRQTRIG_MASK) {
+ case MP_IRQTRIG_DEFAULT:
/* conforms to spec, ie. bus-type dependent trigger mode */
if (test_bit(bus, mp_bus_not_pci))
trigger = default_ISA_trigger(idx);
@@ -854,11 +854,11 @@ static int irq_trigger(int idx)
trigger = default_PCI_trigger(idx);
/* Take EISA into account */
return eisa_irq_trigger(idx, bus, trigger);
- case 1:
+ case MP_IRQTRIG_EDGE:
return IOAPIC_EDGE;
- case 2:
+ case MP_IRQTRIG_RESERVED:
pr_warn("IOAPIC: Invalid trigger mode 2 defaulting to level\n");
- case 3:
+ case MP_IRQTRIG_LEVEL:
default: /* Pointless default required due to do gcc stupidity */
return IOAPIC_LEVEL;
}
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index e1b8e8bf6b3c..46b675aaf20b 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -137,6 +137,8 @@ static int __init early_get_pnodeid(void)
case UV3_HUB_PART_NUMBER_X:
uv_min_hub_revision_id += UV3_HUB_REVISION_BASE;
break;
+
+ /* Update: UV4A has only a modified revision to indicate HUB fixes */
case UV4_HUB_PART_NUMBER:
uv_min_hub_revision_id += UV4_HUB_REVISION_BASE - 1;
uv_cpuid.gnode_shift = 2; /* min partition is 4 sockets */
@@ -316,6 +318,7 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
} else if (!strcmp(oem_table_id, "UVH")) {
/* Only UV1 systems: */
uv_system_type = UV_NON_UNIQUE_APIC;
+ x86_platform.legacy.warm_reset = 0;
__this_cpu_write(x2apic_extra_bits, pnodeid << uvh_apicid.s.pnode_shift);
uv_set_apicid_hibit();
uv_apic = 1;
@@ -767,6 +770,7 @@ static __init void map_gru_high(int max_pnode)
return;
}
+ /* Only UV3 has distributed GRU mode */
if (is_uv3_hub() && gru.s3.mode) {
map_gru_distributed(gru.v);
return;
@@ -790,63 +794,61 @@ static __init void map_mmr_high(int max_pnode)
pr_info("UV: MMR disabled\n");
}
-/*
- * This commonality works because both 0 & 1 versions of the MMIOH OVERLAY
- * and REDIRECT MMR regs are exactly the same on UV3.
- */
-struct mmioh_config {
- unsigned long overlay;
- unsigned long redirect;
- char *id;
-};
-
-static __initdata struct mmioh_config mmiohs[] = {
- {
- UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR,
- UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR,
- "MMIOH0"
- },
- {
- UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR,
- UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR,
- "MMIOH1"
- },
-};
-
-/* UV3 & UV4 have identical MMIOH overlay configs */
-static __init void map_mmioh_high_uv3(int index, int min_pnode, int max_pnode)
+/* UV3/4 have identical MMIOH overlay configs, UV4A is slightly different */
+static __init void map_mmioh_high_uv34(int index, int min_pnode, int max_pnode)
{
- union uv3h_rh_gam_mmioh_overlay_config0_mmr_u overlay;
+ unsigned long overlay;
unsigned long mmr;
unsigned long base;
+ unsigned long nasid_mask;
+ unsigned long m_overlay;
int i, n, shift, m_io, max_io;
int nasid, lnasid, fi, li;
char *id;
- id = mmiohs[index].id;
- overlay.v = uv_read_local_mmr(mmiohs[index].overlay);
-
- pr_info("UV: %s overlay 0x%lx base:0x%x m_io:%d\n", id, overlay.v, overlay.s3.base, overlay.s3.m_io);
- if (!overlay.s3.enable) {
+ if (index == 0) {
+ id = "MMIOH0";
+ m_overlay = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR;
+ overlay = uv_read_local_mmr(m_overlay);
+ base = overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK;
+ mmr = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR;
+ m_io = (overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK)
+ >> UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT;
+ shift = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT;
+ n = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH;
+ nasid_mask = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK;
+ } else {
+ id = "MMIOH1";
+ m_overlay = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR;
+ overlay = uv_read_local_mmr(m_overlay);
+ base = overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK;
+ mmr = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR;
+ m_io = (overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK)
+ >> UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT;
+ shift = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT;
+ n = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH;
+ nasid_mask = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK;
+ }
+ pr_info("UV: %s overlay 0x%lx base:0x%lx m_io:%d\n", id, overlay, base, m_io);
+ if (!(overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK)) {
pr_info("UV: %s disabled\n", id);
return;
}
- shift = UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_SHFT;
- base = (unsigned long)overlay.s3.base;
- m_io = overlay.s3.m_io;
- mmr = mmiohs[index].redirect;
- n = UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH;
/* Convert to NASID: */
min_pnode *= 2;
max_pnode *= 2;
max_io = lnasid = fi = li = -1;
for (i = 0; i < n; i++) {
- union uv3h_rh_gam_mmioh_redirect_config0_mmr_u redirect;
+ unsigned long m_redirect = mmr + i * 8;
+ unsigned long redirect = uv_read_local_mmr(m_redirect);
+
+ nasid = redirect & nasid_mask;
+ if (i == 0)
+ pr_info("UV: %s redirect base 0x%lx(@0x%lx) 0x%04x\n",
+ id, redirect, m_redirect, nasid);
- redirect.v = uv_read_local_mmr(mmr + i * 8);
- nasid = redirect.s3.nasid;
/* Invalid NASID: */
if (nasid < min_pnode || max_pnode < nasid)
nasid = -1;
@@ -894,8 +896,8 @@ static __init void map_mmioh_high(int min_pnode, int max_pnode)
if (is_uv3_hub() || is_uv4_hub()) {
/* Map both MMIOH regions: */
- map_mmioh_high_uv3(0, min_pnode, max_pnode);
- map_mmioh_high_uv3(1, min_pnode, max_pnode);
+ map_mmioh_high_uv34(0, min_pnode, max_pnode);
+ map_mmioh_high_uv34(1, min_pnode, max_pnode);
return;
}
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index e4b0d92b3ae0..ab1865342002 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -1506,7 +1506,7 @@ static ssize_t do_read(struct file *fp, char __user *buf, size_t count, loff_t *
return 0;
}
-static unsigned int do_poll(struct file *fp, poll_table *wait)
+static __poll_t do_poll(struct file *fp, poll_table *wait)
{
struct apm_user *as;
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 390b3dc3d438..3bfb2b23d79c 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/utsname.h>
#include <linux/cpu.h>
+#include <linux/module.h>
#include <asm/nospec-branch.h>
#include <asm/cmdline.h>
@@ -90,10 +91,31 @@ static const char *spectre_v2_strings[] = {
};
#undef pr_fmt
-#define pr_fmt(fmt) "Spectre V2 mitigation: " fmt
+#define pr_fmt(fmt) "Spectre V2 : " fmt
static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
+#ifdef RETPOLINE
+static bool spectre_v2_bad_module;
+
+bool retpoline_module_ok(bool has_retpoline)
+{
+ if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
+ return true;
+
+ pr_err("System may be vunerable to spectre v2\n");
+ spectre_v2_bad_module = true;
+ return false;
+}
+
+static inline const char *spectre_v2_module_string(void)
+{
+ return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
+}
+#else
+static inline const char *spectre_v2_module_string(void) { return ""; }
+#endif
+
static void __init spec2_print_if_insecure(const char *reason)
{
if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
@@ -249,6 +271,12 @@ retpoline_auto:
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
pr_info("Filling RSB on context switch\n");
}
+
+ /* Initialize Indirect Branch Prediction Barrier if supported */
+ if (boot_cpu_has(X86_FEATURE_IBPB)) {
+ setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
+ pr_info("Enabling Indirect Branch Prediction Barrier\n");
+ }
}
#undef pr_fmt
@@ -278,6 +306,14 @@ ssize_t cpu_show_spectre_v2(struct device *dev,
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
return sprintf(buf, "Not affected\n");
- return sprintf(buf, "%s\n", spectre_v2_strings[spectre_v2_enabled]);
+ return sprintf(buf, "%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+ boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
+ spectre_v2_module_string());
}
#endif
+
+void __ibp_barrier(void)
+{
+ __wrmsr(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, 0);
+}
+EXPORT_SYMBOL_GPL(__ibp_barrier);
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index 68bc6d9b3132..c578cd29c2d2 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -106,6 +106,10 @@ static void early_init_centaur(struct cpuinfo_x86 *c)
#ifdef CONFIG_X86_64
set_cpu_cap(c, X86_FEATURE_SYSENTER32);
#endif
+ if (c->x86_power & (1 << 8)) {
+ set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
+ set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
+ }
}
static void init_centaur(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index ef29ad001991..c7c996a692fd 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -47,6 +47,8 @@
#include <asm/pat.h>
#include <asm/microcode.h>
#include <asm/microcode_intel.h>
+#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/uv/uv.h>
@@ -769,6 +771,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
c->x86_capability[CPUID_7_0_EBX] = ebx;
c->x86_capability[CPUID_7_ECX] = ecx;
+ c->x86_capability[CPUID_7_EDX] = edx;
}
/* Extended state features: level 0x0000000d */
@@ -876,6 +879,41 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
#endif
}
+static const __initdata struct x86_cpu_id cpu_no_speculation[] = {
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY },
+ { X86_VENDOR_CENTAUR, 5 },
+ { X86_VENDOR_INTEL, 5 },
+ { X86_VENDOR_NSC, 5 },
+ { X86_VENDOR_ANY, 4 },
+ {}
+};
+
+static const __initdata struct x86_cpu_id cpu_no_meltdown[] = {
+ { X86_VENDOR_AMD },
+ {}
+};
+
+static bool __init cpu_vulnerable_to_meltdown(struct cpuinfo_x86 *c)
+{
+ u64 ia32_cap = 0;
+
+ if (x86_match_cpu(cpu_no_meltdown))
+ return false;
+
+ if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+
+ /* Rogue Data Cache Load? No! */
+ if (ia32_cap & ARCH_CAP_RDCL_NO)
+ return false;
+
+ return true;
+}
+
/*
* Do minimum CPU detection early.
* Fields really needed: vendor, cpuid_level, family, model, mask,
@@ -923,11 +961,12 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
setup_force_cpu_cap(X86_FEATURE_ALWAYS);
- if (c->x86_vendor != X86_VENDOR_AMD)
- setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
-
- setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
- setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+ if (!x86_match_cpu(cpu_no_speculation)) {
+ if (cpu_vulnerable_to_meltdown(c))
+ setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+ setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
+ setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+ }
fpu__init_system(c);
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index bea8d3e24f50..479ca4728de0 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -31,6 +31,7 @@ extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
extern const struct hypervisor_x86 x86_hyper_xen_pv;
extern const struct hypervisor_x86 x86_hyper_xen_hvm;
extern const struct hypervisor_x86 x86_hyper_kvm;
+extern const struct hypervisor_x86 x86_hyper_jailhouse;
static const __initconst struct hypervisor_x86 * const hypervisors[] =
{
@@ -45,6 +46,9 @@ static const __initconst struct hypervisor_x86 * const hypervisors[] =
#ifdef CONFIG_KVM_GUEST
&x86_hyper_kvm,
#endif
+#ifdef CONFIG_JAILHOUSE_GUEST
+ &x86_hyper_jailhouse,
+#endif
};
enum x86_hypervisor_type x86_hyper_type;
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index b1af22073e28..6936d14d4c77 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -102,6 +102,59 @@ static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
ELF_HWCAP2 |= HWCAP2_RING3MWAIT;
}
+/*
+ * Early microcode releases for the Spectre v2 mitigation were broken.
+ * Information taken from;
+ * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/microcode-update-guidance.pdf
+ * - https://kb.vmware.com/s/article/52345
+ * - Microcode revisions observed in the wild
+ * - Release note from 20180108 microcode release
+ */
+struct sku_microcode {
+ u8 model;
+ u8 stepping;
+ u32 microcode;
+};
+static const struct sku_microcode spectre_bad_microcodes[] = {
+ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x84 },
+ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x84 },
+ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x84 },
+ { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x84 },
+ { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x84 },
+ { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
+ { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
+ { INTEL_FAM6_SKYLAKE_MOBILE, 0x03, 0xc2 },
+ { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 },
+ { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
+ { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
+ { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 },
+ { INTEL_FAM6_BROADWELL_XEON_D, 0x03, 0x07000011 },
+ { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 },
+ { INTEL_FAM6_HASWELL_ULT, 0x01, 0x21 },
+ { INTEL_FAM6_HASWELL_GT3E, 0x01, 0x18 },
+ { INTEL_FAM6_HASWELL_CORE, 0x03, 0x23 },
+ { INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
+ { INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
+ { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
+ /* Updated in the 20180108 release; blacklist until we know otherwise */
+ { INTEL_FAM6_ATOM_GEMINI_LAKE, 0x01, 0x22 },
+ /* Observed in the wild */
+ { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
+ { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },
+};
+
+static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
+ if (c->x86_model == spectre_bad_microcodes[i].model &&
+ c->x86_mask == spectre_bad_microcodes[i].stepping)
+ return (c->microcode <= spectre_bad_microcodes[i].microcode);
+ }
+ return false;
+}
+
static void early_init_intel(struct cpuinfo_x86 *c)
{
u64 misc_enable;
@@ -123,6 +176,30 @@ static void early_init_intel(struct cpuinfo_x86 *c)
c->microcode = intel_get_microcode_revision();
/*
+ * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support,
+ * and they also have a different bit for STIBP support. Also,
+ * a hypervisor might have set the individual AMD bits even on
+ * Intel CPUs, for finer-grained selection of what's available.
+ */
+ if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
+ set_cpu_cap(c, X86_FEATURE_IBRS);
+ set_cpu_cap(c, X86_FEATURE_IBPB);
+ }
+ if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
+ set_cpu_cap(c, X86_FEATURE_STIBP);
+
+ /* Now if any of them are set, check the blacklist and clear the lot */
+ if ((cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
+ cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
+ pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
+ clear_cpu_cap(c, X86_FEATURE_IBRS);
+ clear_cpu_cap(c, X86_FEATURE_IBPB);
+ clear_cpu_cap(c, X86_FEATURE_STIBP);
+ clear_cpu_cap(c, X86_FEATURE_SPEC_CTRL);
+ clear_cpu_cap(c, X86_FEATURE_INTEL_STIBP);
+ }
+
+ /*
* Atom erratum AAE44/AAF40/AAG38/AAH41:
*
* A race condition between speculative fetches and invalidating
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index 99442370de40..410629f10ad3 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -135,6 +135,40 @@ struct rdt_resource rdt_resources_all[] = {
.format_str = "%d=%0*x",
.fflags = RFTYPE_RES_CACHE,
},
+ [RDT_RESOURCE_L2DATA] =
+ {
+ .rid = RDT_RESOURCE_L2DATA,
+ .name = "L2DATA",
+ .domains = domain_init(RDT_RESOURCE_L2DATA),
+ .msr_base = IA32_L2_CBM_BASE,
+ .msr_update = cat_wrmsr,
+ .cache_level = 2,
+ .cache = {
+ .min_cbm_bits = 1,
+ .cbm_idx_mult = 2,
+ .cbm_idx_offset = 0,
+ },
+ .parse_ctrlval = parse_cbm,
+ .format_str = "%d=%0*x",
+ .fflags = RFTYPE_RES_CACHE,
+ },
+ [RDT_RESOURCE_L2CODE] =
+ {
+ .rid = RDT_RESOURCE_L2CODE,
+ .name = "L2CODE",
+ .domains = domain_init(RDT_RESOURCE_L2CODE),
+ .msr_base = IA32_L2_CBM_BASE,
+ .msr_update = cat_wrmsr,
+ .cache_level = 2,
+ .cache = {
+ .min_cbm_bits = 1,
+ .cbm_idx_mult = 2,
+ .cbm_idx_offset = 1,
+ },
+ .parse_ctrlval = parse_cbm,
+ .format_str = "%d=%0*x",
+ .fflags = RFTYPE_RES_CACHE,
+ },
[RDT_RESOURCE_MBA] =
{
.rid = RDT_RESOURCE_MBA,
@@ -259,15 +293,15 @@ static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
r->alloc_enabled = true;
}
-static void rdt_get_cdp_l3_config(int type)
+static void rdt_get_cdp_config(int level, int type)
{
- struct rdt_resource *r_l3 = &rdt_resources_all[RDT_RESOURCE_L3];
+ struct rdt_resource *r_l = &rdt_resources_all[level];
struct rdt_resource *r = &rdt_resources_all[type];
- r->num_closid = r_l3->num_closid / 2;
- r->cache.cbm_len = r_l3->cache.cbm_len;
- r->default_ctrl = r_l3->default_ctrl;
- r->cache.shareable_bits = r_l3->cache.shareable_bits;
+ r->num_closid = r_l->num_closid / 2;
+ r->cache.cbm_len = r_l->cache.cbm_len;
+ r->default_ctrl = r_l->default_ctrl;
+ r->cache.shareable_bits = r_l->cache.shareable_bits;
r->data_width = (r->cache.cbm_len + 3) / 4;
r->alloc_capable = true;
/*
@@ -277,6 +311,18 @@ static void rdt_get_cdp_l3_config(int type)
r->alloc_enabled = false;
}
+static void rdt_get_cdp_l3_config(void)
+{
+ rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA);
+ rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3CODE);
+}
+
+static void rdt_get_cdp_l2_config(void)
+{
+ rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA);
+ rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2CODE);
+}
+
static int get_cache_id(int cpu, int level)
{
struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
@@ -645,6 +691,7 @@ enum {
RDT_FLAG_L3_CAT,
RDT_FLAG_L3_CDP,
RDT_FLAG_L2_CAT,
+ RDT_FLAG_L2_CDP,
RDT_FLAG_MBA,
};
@@ -667,6 +714,7 @@ static struct rdt_options rdt_options[] __initdata = {
RDT_OPT(RDT_FLAG_L3_CAT, "l3cat", X86_FEATURE_CAT_L3),
RDT_OPT(RDT_FLAG_L3_CDP, "l3cdp", X86_FEATURE_CDP_L3),
RDT_OPT(RDT_FLAG_L2_CAT, "l2cat", X86_FEATURE_CAT_L2),
+ RDT_OPT(RDT_FLAG_L2_CDP, "l2cdp", X86_FEATURE_CDP_L2),
RDT_OPT(RDT_FLAG_MBA, "mba", X86_FEATURE_MBA),
};
#define NUM_RDT_OPTIONS ARRAY_SIZE(rdt_options)
@@ -729,15 +777,15 @@ static __init bool get_rdt_alloc_resources(void)
if (rdt_cpu_has(X86_FEATURE_CAT_L3)) {
rdt_get_cache_alloc_cfg(1, &rdt_resources_all[RDT_RESOURCE_L3]);
- if (rdt_cpu_has(X86_FEATURE_CDP_L3)) {
- rdt_get_cdp_l3_config(RDT_RESOURCE_L3DATA);
- rdt_get_cdp_l3_config(RDT_RESOURCE_L3CODE);
- }
+ if (rdt_cpu_has(X86_FEATURE_CDP_L3))
+ rdt_get_cdp_l3_config();
ret = true;
}
if (rdt_cpu_has(X86_FEATURE_CAT_L2)) {
/* CPUID 0x10.2 fields are same format at 0x10.1 */
rdt_get_cache_alloc_cfg(2, &rdt_resources_all[RDT_RESOURCE_L2]);
+ if (rdt_cpu_has(X86_FEATURE_CDP_L2))
+ rdt_get_cdp_l2_config();
ret = true;
}
diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/intel_rdt.h
index 3397244984f5..3fd7a70ee04a 100644
--- a/arch/x86/kernel/cpu/intel_rdt.h
+++ b/arch/x86/kernel/cpu/intel_rdt.h
@@ -7,12 +7,15 @@
#include <linux/jump_label.h>
#define IA32_L3_QOS_CFG 0xc81
+#define IA32_L2_QOS_CFG 0xc82
#define IA32_L3_CBM_BASE 0xc90
#define IA32_L2_CBM_BASE 0xd10
#define IA32_MBA_THRTL_BASE 0xd50
#define L3_QOS_CDP_ENABLE 0x01ULL
+#define L2_QOS_CDP_ENABLE 0x01ULL
+
/*
* Event IDs are used to program IA32_QM_EVTSEL before reading event
* counter from IA32_QM_CTR
@@ -357,6 +360,8 @@ enum {
RDT_RESOURCE_L3DATA,
RDT_RESOURCE_L3CODE,
RDT_RESOURCE_L2,
+ RDT_RESOURCE_L2DATA,
+ RDT_RESOURCE_L2CODE,
RDT_RESOURCE_MBA,
/* Must be the last */
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index 64c5ff97ee0d..bdab7d2f51af 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -990,6 +990,7 @@ out_destroy:
kernfs_remove(kn);
return ret;
}
+
static void l3_qos_cfg_update(void *arg)
{
bool *enable = arg;
@@ -997,8 +998,17 @@ static void l3_qos_cfg_update(void *arg)
wrmsrl(IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);
}
-static int set_l3_qos_cfg(struct rdt_resource *r, bool enable)
+static void l2_qos_cfg_update(void *arg)
{
+ bool *enable = arg;
+
+ wrmsrl(IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL);
+}
+
+static int set_cache_qos_cfg(int level, bool enable)
+{
+ void (*update)(void *arg);
+ struct rdt_resource *r_l;
cpumask_var_t cpu_mask;
struct rdt_domain *d;
int cpu;
@@ -1006,16 +1016,24 @@ static int set_l3_qos_cfg(struct rdt_resource *r, bool enable)
if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
return -ENOMEM;
- list_for_each_entry(d, &r->domains, list) {
+ if (level == RDT_RESOURCE_L3)
+ update = l3_qos_cfg_update;
+ else if (level == RDT_RESOURCE_L2)
+ update = l2_qos_cfg_update;
+ else
+ return -EINVAL;
+
+ r_l = &rdt_resources_all[level];
+ list_for_each_entry(d, &r_l->domains, list) {
/* Pick one CPU from each domain instance to update MSR */
cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
}
cpu = get_cpu();
/* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */
if (cpumask_test_cpu(cpu, cpu_mask))
- l3_qos_cfg_update(&enable);
+ update(&enable);
/* Update QOS_CFG MSR on all other cpus in cpu_mask. */
- smp_call_function_many(cpu_mask, l3_qos_cfg_update, &enable, 1);
+ smp_call_function_many(cpu_mask, update, &enable, 1);
put_cpu();
free_cpumask_var(cpu_mask);
@@ -1023,52 +1041,99 @@ static int set_l3_qos_cfg(struct rdt_resource *r, bool enable)
return 0;
}
-static int cdp_enable(void)
+static int cdp_enable(int level, int data_type, int code_type)
{
- struct rdt_resource *r_l3data = &rdt_resources_all[RDT_RESOURCE_L3DATA];
- struct rdt_resource *r_l3code = &rdt_resources_all[RDT_RESOURCE_L3CODE];
- struct rdt_resource *r_l3 = &rdt_resources_all[RDT_RESOURCE_L3];
+ struct rdt_resource *r_ldata = &rdt_resources_all[data_type];
+ struct rdt_resource *r_lcode = &rdt_resources_all[code_type];
+ struct rdt_resource *r_l = &rdt_resources_all[level];
int ret;
- if (!r_l3->alloc_capable || !r_l3data->alloc_capable ||
- !r_l3code->alloc_capable)
+ if (!r_l->alloc_capable || !r_ldata->alloc_capable ||
+ !r_lcode->alloc_capable)
return -EINVAL;
- ret = set_l3_qos_cfg(r_l3, true);
+ ret = set_cache_qos_cfg(level, true);
if (!ret) {
- r_l3->alloc_enabled = false;
- r_l3data->alloc_enabled = true;
- r_l3code->alloc_enabled = true;
+ r_l->alloc_enabled = false;
+ r_ldata->alloc_enabled = true;
+ r_lcode->alloc_enabled = true;
}
return ret;
}
-static void cdp_disable(void)
+static int cdpl3_enable(void)
{
- struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
+ return cdp_enable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA,
+ RDT_RESOURCE_L3CODE);
+}
+
+static int cdpl2_enable(void)
+{
+ return cdp_enable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA,
+ RDT_RESOURCE_L2CODE);
+}
+
+static void cdp_disable(int level, int data_type, int code_type)
+{
+ struct rdt_resource *r = &rdt_resources_all[level];
r->alloc_enabled = r->alloc_capable;
- if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled) {
- rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled = false;
- rdt_resources_all[RDT_RESOURCE_L3CODE].alloc_enabled = false;
- set_l3_qos_cfg(r, false);
+ if (rdt_resources_all[data_type].alloc_enabled) {
+ rdt_resources_all[data_type].alloc_enabled = false;
+ rdt_resources_all[code_type].alloc_enabled = false;
+ set_cache_qos_cfg(level, false);
}
}
+static void cdpl3_disable(void)
+{
+ cdp_disable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA, RDT_RESOURCE_L3CODE);
+}
+
+static void cdpl2_disable(void)
+{
+ cdp_disable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA, RDT_RESOURCE_L2CODE);
+}
+
+static void cdp_disable_all(void)
+{
+ if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
+ cdpl3_disable();
+ if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled)
+ cdpl2_disable();
+}
+
static int parse_rdtgroupfs_options(char *data)
{
char *token, *o = data;
int ret = 0;
while ((token = strsep(&o, ",")) != NULL) {
- if (!*token)
- return -EINVAL;
+ if (!*token) {
+ ret = -EINVAL;
+ goto out;
+ }
- if (!strcmp(token, "cdp"))
- ret = cdp_enable();
+ if (!strcmp(token, "cdp")) {
+ ret = cdpl3_enable();
+ if (ret)
+ goto out;
+ } else if (!strcmp(token, "cdpl2")) {
+ ret = cdpl2_enable();
+ if (ret)
+ goto out;
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
}
+ return 0;
+
+out:
+ pr_err("Invalid mount option \"%s\"\n", token);
+
return ret;
}
@@ -1223,7 +1288,7 @@ out_mongrp:
out_info:
kernfs_remove(kn_info);
out_cdp:
- cdp_disable();
+ cdp_disable_all();
out:
rdt_last_cmd_clear();
mutex_unlock(&rdtgroup_mutex);
@@ -1383,7 +1448,7 @@ static void rdt_kill_sb(struct super_block *sb)
/*Put everything back to default values. */
for_each_alloc_enabled_rdt_resource(r)
reset_all_ctrls(r);
- cdp_disable();
+ cdp_disable_all();
rmdir_all_sub();
static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
static_branch_disable_cpuslocked(&rdt_mon_enable_key);
diff --git a/arch/x86/kernel/cpu/mcheck/dev-mcelog.c b/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
index 7f85b76f43bc..213e8c2ca702 100644
--- a/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
+++ b/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
@@ -243,7 +243,7 @@ out:
return err ? err : buf - ubuf;
}
-static unsigned int mce_chrdev_poll(struct file *file, poll_table *wait)
+static __poll_t mce_chrdev_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &mce_chrdev_wait, wait);
if (READ_ONCE(mcelog.next))
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index 4ca632a06e0b..5bbd06f38ff6 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -59,6 +59,7 @@ static struct severity {
#define MCGMASK(x, y) .mcgmask = x, .mcgres = y
#define MASK(x, y) .mask = x, .result = y
#define MCI_UC_S (MCI_STATUS_UC|MCI_STATUS_S)
+#define MCI_UC_AR (MCI_STATUS_UC|MCI_STATUS_AR)
#define MCI_UC_SAR (MCI_STATUS_UC|MCI_STATUS_S|MCI_STATUS_AR)
#define MCI_ADDR (MCI_STATUS_ADDRV|MCI_STATUS_MISCV)
@@ -101,6 +102,22 @@ static struct severity {
NOSER, BITCLR(MCI_STATUS_UC)
),
+ /*
+ * known AO MCACODs reported via MCE or CMC:
+ *
+ * SRAO could be signaled either via a machine check exception or
+ * CMCI with the corresponding bit S 1 or 0. So we don't need to
+ * check bit S for SRAO.
+ */
+ MCESEV(
+ AO, "Action optional: memory scrubbing error",
+ SER, MASK(MCI_STATUS_OVER|MCI_UC_AR|MCACOD_SCRUBMSK, MCI_STATUS_UC|MCACOD_SCRUB)
+ ),
+ MCESEV(
+ AO, "Action optional: last level cache writeback error",
+ SER, MASK(MCI_STATUS_OVER|MCI_UC_AR|MCACOD, MCI_STATUS_UC|MCACOD_L3WB)
+ ),
+
/* ignore OVER for UCNA */
MCESEV(
UCNA, "Uncorrected no action required",
@@ -149,15 +166,6 @@ static struct severity {
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_SAR)
),
- /* known AO MCACODs: */
- MCESEV(
- AO, "Action optional: memory scrubbing error",
- SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCACOD_SCRUBMSK, MCI_UC_S|MCACOD_SCRUB)
- ),
- MCESEV(
- AO, "Action optional: last level cache writeback error",
- SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCACOD, MCI_UC_S|MCACOD_L3WB)
- ),
MCESEV(
SOME, "Action optional: unknown MCACOD",
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_UC_S)
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 868e412b4f0c..ba1f9555fbc5 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -503,10 +503,8 @@ static int mce_usable_address(struct mce *m)
bool mce_is_memory_error(struct mce *m)
{
if (m->cpuvendor == X86_VENDOR_AMD) {
- /* ErrCodeExt[20:16] */
- u8 xec = (m->status >> 16) & 0x1f;
+ return amd_mce_is_memory_error(m);
- return (xec == 0x0 || xec == 0x8);
} else if (m->cpuvendor == X86_VENDOR_INTEL) {
/*
* Intel SDM Volume 3B - 15.9.2 Compound Error Codes
@@ -530,6 +528,17 @@ bool mce_is_memory_error(struct mce *m)
}
EXPORT_SYMBOL_GPL(mce_is_memory_error);
+static bool mce_is_correctable(struct mce *m)
+{
+ if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
+ return false;
+
+ if (m->status & MCI_STATUS_UC)
+ return false;
+
+ return true;
+}
+
static bool cec_add_mce(struct mce *m)
{
if (!m)
@@ -537,7 +546,7 @@ static bool cec_add_mce(struct mce *m)
/* We eat only correctable DRAM errors with usable addresses. */
if (mce_is_memory_error(m) &&
- !(m->status & MCI_STATUS_UC) &&
+ mce_is_correctable(m) &&
mce_usable_address(m))
if (!cec_add_elem(m->addr >> PAGE_SHIFT))
return true;
@@ -582,7 +591,7 @@ static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) {
pfn = mce->addr >> PAGE_SHIFT;
- memory_failure(pfn, MCE_VECTOR, 0);
+ memory_failure(pfn, 0);
}
return NOTIFY_OK;
@@ -1046,7 +1055,7 @@ static int do_memory_failure(struct mce *m)
pr_err("Uncorrected hardware memory error in user-access at %llx", m->addr);
if (!(m->mcgstatus & MCG_STATUS_RIPV))
flags |= MF_MUST_KILL;
- ret = memory_failure(m->addr >> PAGE_SHIFT, MCE_VECTOR, flags);
+ ret = memory_failure(m->addr >> PAGE_SHIFT, flags);
if (ret)
pr_err("Memory error not recovered");
return ret;
@@ -1325,7 +1334,7 @@ out_ist:
EXPORT_SYMBOL_GPL(do_machine_check);
#ifndef CONFIG_MEMORY_FAILURE
-int memory_failure(unsigned long pfn, int vector, int flags)
+int memory_failure(unsigned long pfn, int flags)
{
/* mce_severity() should not hand us an ACTION_REQUIRED error */
BUG_ON(flags & MF_ACTION_REQUIRED);
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 486f640b02ef..0f32ad242324 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -110,6 +110,20 @@ const char *smca_get_long_name(enum smca_bank_types t)
}
EXPORT_SYMBOL_GPL(smca_get_long_name);
+static enum smca_bank_types smca_get_bank_type(struct mce *m)
+{
+ struct smca_bank *b;
+
+ if (m->bank >= N_SMCA_BANK_TYPES)
+ return N_SMCA_BANK_TYPES;
+
+ b = &smca_banks[m->bank];
+ if (!b->hwid)
+ return N_SMCA_BANK_TYPES;
+
+ return b->hwid->bank_type;
+}
+
static struct smca_hwid smca_hwid_mcatypes[] = {
/* { bank_type, hwid_mcatype, xec_bitmap } */
@@ -407,7 +421,9 @@ static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c)
(deferred_error_int_vector != amd_deferred_error_interrupt))
deferred_error_int_vector = amd_deferred_error_interrupt;
- low = (low & ~MASK_DEF_INT_TYPE) | DEF_INT_TYPE_APIC;
+ if (!mce_flags.smca)
+ low = (low & ~MASK_DEF_INT_TYPE) | DEF_INT_TYPE_APIC;
+
wrmsr(MSR_CU_DEF_ERR, low, high);
}
@@ -738,6 +754,17 @@ out_err:
}
EXPORT_SYMBOL_GPL(umc_normaddr_to_sysaddr);
+bool amd_mce_is_memory_error(struct mce *m)
+{
+ /* ErrCodeExt[20:16] */
+ u8 xec = (m->status >> 16) & 0x1f;
+
+ if (mce_flags.smca)
+ return smca_get_bank_type(m) == SMCA_UMC && xec == 0x0;
+
+ return m->bank == 4 && xec == 0x8;
+}
+
static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc)
{
struct mce m;
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index c4fa4a85d4cb..e4fc595cd6ea 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -239,7 +239,7 @@ static int __init save_microcode_in_initrd(void)
break;
case X86_VENDOR_AMD:
if (c->x86 >= 0x10)
- return save_microcode_in_initrd_amd(cpuid_eax(1));
+ ret = save_microcode_in_initrd_amd(cpuid_eax(1));
break;
default:
break;
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index d9e460fc7a3b..f7c55b0e753a 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -45,6 +45,9 @@ static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
/* Current microcode patch used in early patching on the APs. */
static struct microcode_intel *intel_ucode_patch;
+/* last level cache size per core */
+static int llc_size_per_core;
+
static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
unsigned int s2, unsigned int p2)
{
@@ -912,12 +915,14 @@ static bool is_blacklisted(unsigned int cpu)
/*
* Late loading on model 79 with microcode revision less than 0x0b000021
- * may result in a system hang. This behavior is documented in item
- * BDF90, #334165 (Intel Xeon Processor E7-8800/4800 v4 Product Family).
+ * and LLC size per core bigger than 2.5MB may result in a system hang.
+ * This behavior is documented in item BDF90, #334165 (Intel Xeon
+ * Processor E7-8800/4800 v4 Product Family).
*/
if (c->x86 == 6 &&
c->x86_model == INTEL_FAM6_BROADWELL_X &&
c->x86_mask == 0x01 &&
+ llc_size_per_core > 2621440 &&
c->microcode < 0x0b000021) {
pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
@@ -975,6 +980,15 @@ static struct microcode_ops microcode_intel_ops = {
.apply_microcode = apply_microcode_intel,
};
+static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
+{
+ u64 llc_size = c->x86_cache_size * 1024;
+
+ do_div(llc_size, c->x86_max_cores);
+
+ return (int)llc_size;
+}
+
struct microcode_ops * __init init_intel_microcode(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
@@ -985,5 +999,7 @@ struct microcode_ops * __init init_intel_microcode(void)
return NULL;
}
+ llc_size_per_core = calc_llc_size_per_core(c);
+
return &microcode_intel_ops;
}
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index d0e69769abfd..4075d2be5357 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -21,11 +21,10 @@ struct cpuid_bit {
static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
- { X86_FEATURE_AVX512_4VNNIW, CPUID_EDX, 2, 0x00000007, 0 },
- { X86_FEATURE_AVX512_4FMAPS, CPUID_EDX, 3, 0x00000007, 0 },
{ X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 },
{ X86_FEATURE_CAT_L2, CPUID_EBX, 2, 0x00000010, 0 },
{ X86_FEATURE_CDP_L3, CPUID_ECX, 2, 0x00000010, 1 },
+ { X86_FEATURE_CDP_L2, CPUID_ECX, 2, 0x00000010, 2 },
{ X86_FEATURE_MBA, CPUID_EBX, 3, 0x00000010, 0 },
{ X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 },
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index ef61f540cf0a..91b2cff4b79a 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -295,7 +295,7 @@ trace:
restore_mcount_regs
jmp fgraph_trace
-END(function_hook)
+ENDPROC(function_hook)
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/arch/x86/kernel/itmt.c b/arch/x86/kernel/itmt.c
index f73f475d0573..d177940aa090 100644
--- a/arch/x86/kernel/itmt.c
+++ b/arch/x86/kernel/itmt.c
@@ -24,7 +24,6 @@
#include <linux/cpumask.h>
#include <linux/cpuset.h>
#include <linux/mutex.h>
-#include <linux/sched.h>
#include <linux/sysctl.h>
#include <linux/nodemask.h>
diff --git a/arch/x86/kernel/jailhouse.c b/arch/x86/kernel/jailhouse.c
new file mode 100644
index 000000000000..b68fd895235a
--- /dev/null
+++ b/arch/x86/kernel/jailhouse.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL2.0
+/*
+ * Jailhouse paravirt_ops implementation
+ *
+ * Copyright (c) Siemens AG, 2015-2017
+ *
+ * Authors:
+ * Jan Kiszka <jan.kiszka@siemens.com>
+ */
+
+#include <linux/acpi_pmtmr.h>
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <asm/apic.h>
+#include <asm/cpu.h>
+#include <asm/hypervisor.h>
+#include <asm/i8259.h>
+#include <asm/irqdomain.h>
+#include <asm/pci_x86.h>
+#include <asm/reboot.h>
+#include <asm/setup.h>
+
+static __initdata struct jailhouse_setup_data setup_data;
+static unsigned int precalibrated_tsc_khz;
+
+static uint32_t jailhouse_cpuid_base(void)
+{
+ if (boot_cpu_data.cpuid_level < 0 ||
+ !boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ return 0;
+
+ return hypervisor_cpuid_base("Jailhouse\0\0\0", 0);
+}
+
+static uint32_t __init jailhouse_detect(void)
+{
+ return jailhouse_cpuid_base();
+}
+
+static void jailhouse_get_wallclock(struct timespec *now)
+{
+ memset(now, 0, sizeof(*now));
+}
+
+static void __init jailhouse_timer_init(void)
+{
+ lapic_timer_frequency = setup_data.apic_khz * (1000 / HZ);
+}
+
+static unsigned long jailhouse_get_tsc(void)
+{
+ return precalibrated_tsc_khz;
+}
+
+static void __init jailhouse_x2apic_init(void)
+{
+#ifdef CONFIG_X86_X2APIC
+ if (!x2apic_enabled())
+ return;
+ /*
+ * We do not have access to IR inside Jailhouse non-root cells. So
+ * we have to run in physical mode.
+ */
+ x2apic_phys = 1;
+ /*
+ * This will trigger the switch to apic_x2apic_phys. Empty OEM IDs
+ * ensure that only this APIC driver picks up the call.
+ */
+ default_acpi_madt_oem_check("", "");
+#endif
+}
+
+static void __init jailhouse_get_smp_config(unsigned int early)
+{
+ struct ioapic_domain_cfg ioapic_cfg = {
+ .type = IOAPIC_DOMAIN_STRICT,
+ .ops = &mp_ioapic_irqdomain_ops,
+ };
+ struct mpc_intsrc mp_irq = {
+ .type = MP_INTSRC,
+ .irqtype = mp_INT,
+ .irqflag = MP_IRQPOL_ACTIVE_HIGH | MP_IRQTRIG_EDGE,
+ };
+ unsigned int cpu;
+
+ jailhouse_x2apic_init();
+
+ register_lapic_address(0xfee00000);
+
+ for (cpu = 0; cpu < setup_data.num_cpus; cpu++) {
+ generic_processor_info(setup_data.cpu_ids[cpu],
+ boot_cpu_apic_version);
+ }
+
+ smp_found_config = 1;
+
+ if (setup_data.standard_ioapic) {
+ mp_register_ioapic(0, 0xfec00000, gsi_top, &ioapic_cfg);
+
+ /* Register 1:1 mapping for legacy UART IRQs 3 and 4 */
+ mp_irq.srcbusirq = mp_irq.dstirq = 3;
+ mp_save_irq(&mp_irq);
+
+ mp_irq.srcbusirq = mp_irq.dstirq = 4;
+ mp_save_irq(&mp_irq);
+ }
+}
+
+static void jailhouse_no_restart(void)
+{
+ pr_notice("Jailhouse: Restart not supported, halting\n");
+ machine_halt();
+}
+
+static int __init jailhouse_pci_arch_init(void)
+{
+ pci_direct_init(1);
+
+ /*
+ * There are no bridges on the virtual PCI root bus under Jailhouse,
+ * thus no other way to discover all devices than a full scan.
+ * Respect any overrides via the command line, though.
+ */
+ if (pcibios_last_bus < 0)
+ pcibios_last_bus = 0xff;
+
+ return 0;
+}
+
+static void __init jailhouse_init_platform(void)
+{
+ u64 pa_data = boot_params.hdr.setup_data;
+ struct setup_data header;
+ void *mapping;
+
+ x86_init.irqs.pre_vector_init = x86_init_noop;
+ x86_init.timers.timer_init = jailhouse_timer_init;
+ x86_init.mpparse.get_smp_config = jailhouse_get_smp_config;
+ x86_init.pci.arch_init = jailhouse_pci_arch_init;
+
+ x86_platform.calibrate_cpu = jailhouse_get_tsc;
+ x86_platform.calibrate_tsc = jailhouse_get_tsc;
+ x86_platform.get_wallclock = jailhouse_get_wallclock;
+ x86_platform.legacy.rtc = 0;
+ x86_platform.legacy.warm_reset = 0;
+ x86_platform.legacy.i8042 = X86_LEGACY_I8042_PLATFORM_ABSENT;
+
+ legacy_pic = &null_legacy_pic;
+
+ machine_ops.emergency_restart = jailhouse_no_restart;
+
+ while (pa_data) {
+ mapping = early_memremap(pa_data, sizeof(header));
+ memcpy(&header, mapping, sizeof(header));
+ early_memunmap(mapping, sizeof(header));
+
+ if (header.type == SETUP_JAILHOUSE &&
+ header.len >= sizeof(setup_data)) {
+ pa_data += offsetof(struct setup_data, data);
+
+ mapping = early_memremap(pa_data, sizeof(setup_data));
+ memcpy(&setup_data, mapping, sizeof(setup_data));
+ early_memunmap(mapping, sizeof(setup_data));
+
+ break;
+ }
+
+ pa_data = header.next;
+ }
+
+ if (!pa_data)
+ panic("Jailhouse: No valid setup data found");
+
+ if (setup_data.compatible_version > JAILHOUSE_SETUP_REQUIRED_VERSION)
+ panic("Jailhouse: Unsupported setup data structure");
+
+ pmtmr_ioport = setup_data.pm_timer_address;
+ pr_debug("Jailhouse: PM-Timer IO Port: %#x\n", pmtmr_ioport);
+
+ precalibrated_tsc_khz = setup_data.tsc_khz;
+ setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
+
+ pci_probe = 0;
+
+ /*
+ * Avoid that the kernel complains about missing ACPI tables - there
+ * are none in a non-root cell.
+ */
+ disable_acpi();
+}
+
+bool jailhouse_paravirt(void)
+{
+ return jailhouse_cpuid_base() != 0;
+}
+
+static bool jailhouse_x2apic_available(void)
+{
+ /*
+ * The x2APIC is only available if the root cell enabled it. Jailhouse
+ * does not support switching between xAPIC and x2APIC.
+ */
+ return x2apic_enabled();
+}
+
+const struct hypervisor_x86 x86_hyper_jailhouse __refconst = {
+ .name = "Jailhouse",
+ .detect = jailhouse_detect,
+ .init.init_platform = jailhouse_init_platform,
+ .init.x2apic_available = jailhouse_x2apic_available,
+};
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 3a4b12809ab5..27d0a1712663 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -281,7 +281,7 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type)
int ELCR_fallback = 0;
intsrc.type = MP_INTSRC;
- intsrc.irqflag = 0; /* conforming */
+ intsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
intsrc.srcbus = 0;
intsrc.dstapic = mpc_ioapic_id(0);
@@ -324,10 +324,13 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type)
* copy that information over to the MP table in the
* irqflag field (level sensitive, active high polarity).
*/
- if (ELCR_trigger(i))
- intsrc.irqflag = 13;
- else
- intsrc.irqflag = 0;
+ if (ELCR_trigger(i)) {
+ intsrc.irqflag = MP_IRQTRIG_LEVEL |
+ MP_IRQPOL_ACTIVE_HIGH;
+ } else {
+ intsrc.irqflag = MP_IRQTRIG_DEFAULT |
+ MP_IRQPOL_DEFAULT;
+ }
}
intsrc.srcbusirq = i;
@@ -419,7 +422,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
construct_ioapic_table(mpc_default_type);
lintsrc.type = MP_LINTSRC;
- lintsrc.irqflag = 0; /* conforming */
+ lintsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
lintsrc.srcbusid = 0;
lintsrc.srcbusirq = 0;
lintsrc.destapic = MP_APIC_ALL;
@@ -664,7 +667,7 @@ static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
if (m->irqtype != mp_INT)
return 0;
- if (m->irqflag != 0x0f)
+ if (m->irqflag != (MP_IRQTRIG_LEVEL | MP_IRQPOL_ACTIVE_LOW))
return 0;
/* not legacy */
@@ -673,7 +676,8 @@ static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
if (mp_irqs[i].irqtype != mp_INT)
continue;
- if (mp_irqs[i].irqflag != 0x0f)
+ if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
+ MP_IRQPOL_ACTIVE_LOW))
continue;
if (mp_irqs[i].srcbus != m->srcbus)
@@ -784,7 +788,8 @@ static int __init replace_intsrc_all(struct mpc_table *mpc,
if (mp_irqs[i].irqtype != mp_INT)
continue;
- if (mp_irqs[i].irqflag != 0x0f)
+ if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
+ MP_IRQPOL_ACTIVE_LOW))
continue;
if (nr_m_spare > 0) {
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 599d7462eccc..df7ab02f959f 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/dma-debug.h>
#include <linux/dmar.h>
#include <linux/export.h>
@@ -87,7 +87,6 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_mask = dma_alloc_coherent_mask(dev, flag);
- flag &= ~__GFP_ZERO;
again:
page = NULL;
/* CMA can be used only in the context which permits sleeping */
@@ -139,7 +138,6 @@ bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp)
if (!*dev)
*dev = &x86_dma_fallback_dev;
- *gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
*gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp);
if (!is_device_dma_capable(*dev))
@@ -217,7 +215,7 @@ static __init int iommu_setup(char *p)
}
early_param("iommu", iommu_setup);
-int x86_dma_supported(struct device *dev, u64 mask)
+int arch_dma_supported(struct device *dev, u64 mask)
{
#ifdef CONFIG_PCI
if (mask > 0xffffffff && forbid_dac > 0) {
@@ -226,12 +224,6 @@ int x86_dma_supported(struct device *dev, u64 mask)
}
#endif
- /* Copied from i386. Doesn't make much sense, because it will
- only work for pci_alloc_coherent.
- The caller just has to use GFP_DMA in this case. */
- if (mask < DMA_BIT_MASK(24))
- return 0;
-
/* Tell the device to use SAC when IOMMU force is on. This
allows the driver to use cheaper accesses in some cases.
@@ -251,6 +243,17 @@ int x86_dma_supported(struct device *dev, u64 mask)
return 1;
}
+EXPORT_SYMBOL(arch_dma_supported);
+
+int x86_dma_supported(struct device *dev, u64 mask)
+{
+ /* Copied from i386. Doesn't make much sense, because it will
+ only work for pci_alloc_coherent.
+ The caller just has to use GFP_DMA in this case. */
+ if (mask < DMA_BIT_MASK(24))
+ return 0;
+ return 1;
+}
static int __init pci_iommu_init(void)
{
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index b0caae27e1b7..618285e475c6 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Fallback functions when the main IOMMU code is not compiled in. This
code is roughly equivalent to i386. */
-#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
#include <linux/gfp.h>
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index 53bd05ea90d8..0ee0f8f34251 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -6,7 +6,7 @@
#include <linux/init.h>
#include <linux/swiotlb.h>
#include <linux/bootmem.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/mem_encrypt.h>
#include <asm/iommu.h>
@@ -48,7 +48,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size,
dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
}
-static const struct dma_map_ops swiotlb_dma_ops = {
+static const struct dma_map_ops x86_swiotlb_dma_ops = {
.mapping_error = swiotlb_dma_mapping_error,
.alloc = x86_swiotlb_alloc_coherent,
.free = x86_swiotlb_free_coherent,
@@ -112,7 +112,7 @@ void __init pci_swiotlb_init(void)
{
if (swiotlb) {
swiotlb_init(0);
- dma_ops = &swiotlb_dma_ops;
+ dma_ops = &x86_swiotlb_dma_ops;
}
}
@@ -120,7 +120,7 @@ void __init pci_swiotlb_late_init(void)
{
/* An IOMMU turned us off. */
if (!swiotlb)
- swiotlb_free();
+ swiotlb_exit();
else {
printk(KERN_INFO "PCI-DMA: "
"Using software bounce buffering for IO (SWIOTLB)\n");
diff --git a/arch/x86/kernel/platform-quirks.c b/arch/x86/kernel/platform-quirks.c
index 39a59299bfa0..235fe6008ac8 100644
--- a/arch/x86/kernel/platform-quirks.c
+++ b/arch/x86/kernel/platform-quirks.c
@@ -9,6 +9,7 @@ void __init x86_early_init_platform_quirks(void)
{
x86_platform.legacy.i8042 = X86_LEGACY_I8042_EXPECTED_PRESENT;
x86_platform.legacy.rtc = 1;
+ x86_platform.legacy.warm_reset = 1;
x86_platform.legacy.reserve_bios_regions = 0;
x86_platform.legacy.devices.pnpbios = 1;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index cb368c2a22ab..03408b942adb 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -21,7 +21,6 @@
#include <linux/dmi.h>
#include <linux/utsname.h>
#include <linux/stackprotector.h>
-#include <linux/tick.h>
#include <linux/cpuidle.h>
#include <trace/events/power.h>
#include <linux/hw_breakpoint.h>
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 68d7ab81c62f..1ae67e982af7 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -114,7 +114,6 @@
#include <asm/alternative.h>
#include <asm/prom.h>
#include <asm/microcode.h>
-#include <asm/mmu_context.h>
#include <asm/kaslr.h>
#include <asm/unwind.h>
diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c
index 8c6da1a643da..ac057f9b0763 100644
--- a/arch/x86/kernel/signal_compat.c
+++ b/arch/x86/kernel/signal_compat.c
@@ -25,8 +25,8 @@ static inline void signal_compat_build_tests(void)
* limits also have to look at this code. Make sure any
* new fields are handled in copy_siginfo_to_user32()!
*/
- BUILD_BUG_ON(NSIGILL != 8);
- BUILD_BUG_ON(NSIGFPE != 8);
+ BUILD_BUG_ON(NSIGILL != 11);
+ BUILD_BUG_ON(NSIGFPE != 13);
BUILD_BUG_ON(NSIGSEGV != 4);
BUILD_BUG_ON(NSIGBUS != 5);
BUILD_BUG_ON(NSIGTRAP != 4);
@@ -64,7 +64,7 @@ static inline void signal_compat_build_tests(void)
CHECK_SI_SIZE (_kill, 2*sizeof(int));
CHECK_CSI_OFFSET(_timer);
- CHECK_CSI_SIZE (_timer, 5*sizeof(int));
+ CHECK_CSI_SIZE (_timer, 3*sizeof(int));
CHECK_SI_SIZE (_timer, 6*sizeof(int));
CHECK_CSI_OFFSET(_rt);
@@ -75,9 +75,11 @@ static inline void signal_compat_build_tests(void)
CHECK_CSI_SIZE (_sigchld, 5*sizeof(int));
CHECK_SI_SIZE (_sigchld, 8*sizeof(int));
+#ifdef CONFIG_X86_X32_ABI
CHECK_CSI_OFFSET(_sigchld_x32);
CHECK_CSI_SIZE (_sigchld_x32, 7*sizeof(int));
/* no _sigchld_x32 in the generic siginfo_t */
+#endif
CHECK_CSI_OFFSET(_sigfault);
CHECK_CSI_SIZE (_sigfault, 4*sizeof(int));
@@ -96,6 +98,8 @@ static inline void signal_compat_build_tests(void)
void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact)
{
+ signal_compat_build_tests();
+
/* Don't leak in-kernel non-uapi flags to user-space */
if (oact)
oact->sa.sa_flags &= ~(SA_IA32_ABI | SA_X32_ABI);
@@ -111,116 +115,3 @@ void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact)
if (in_x32_syscall())
act->sa.sa_flags |= SA_X32_ABI;
}
-
-int __copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from,
- bool x32_ABI)
-{
- int err = 0;
-
- signal_compat_build_tests();
-
- if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
- return -EFAULT;
-
- put_user_try {
- /* If you change siginfo_t structure, please make sure that
- this code is fixed accordingly.
- It should never copy any pad contained in the structure
- to avoid security leaks, but must copy the generic
- 3 ints plus the relevant union member. */
- put_user_ex(from->si_signo, &to->si_signo);
- put_user_ex(from->si_errno, &to->si_errno);
- put_user_ex(from->si_code, &to->si_code);
-
- if (from->si_code < 0) {
- put_user_ex(from->si_pid, &to->si_pid);
- put_user_ex(from->si_uid, &to->si_uid);
- put_user_ex(ptr_to_compat(from->si_ptr), &to->si_ptr);
- } else {
- /*
- * First 32bits of unions are always present:
- * si_pid === si_band === si_tid === si_addr(LS half)
- */
- put_user_ex(from->_sifields._pad[0],
- &to->_sifields._pad[0]);
- switch (siginfo_layout(from->si_signo, from->si_code)) {
- case SIL_FAULT:
- if (from->si_signo == SIGBUS &&
- (from->si_code == BUS_MCEERR_AR ||
- from->si_code == BUS_MCEERR_AO))
- put_user_ex(from->si_addr_lsb, &to->si_addr_lsb);
-
- if (from->si_signo == SIGSEGV) {
- if (from->si_code == SEGV_BNDERR) {
- compat_uptr_t lower = (unsigned long)from->si_lower;
- compat_uptr_t upper = (unsigned long)from->si_upper;
- put_user_ex(lower, &to->si_lower);
- put_user_ex(upper, &to->si_upper);
- }
- if (from->si_code == SEGV_PKUERR)
- put_user_ex(from->si_pkey, &to->si_pkey);
- }
- break;
- case SIL_SYS:
- put_user_ex(from->si_syscall, &to->si_syscall);
- put_user_ex(from->si_arch, &to->si_arch);
- break;
- case SIL_CHLD:
- if (!x32_ABI) {
- put_user_ex(from->si_utime, &to->si_utime);
- put_user_ex(from->si_stime, &to->si_stime);
- } else {
- put_user_ex(from->si_utime, &to->_sifields._sigchld_x32._utime);
- put_user_ex(from->si_stime, &to->_sifields._sigchld_x32._stime);
- }
- put_user_ex(from->si_status, &to->si_status);
- /* FALL THROUGH */
- case SIL_KILL:
- put_user_ex(from->si_uid, &to->si_uid);
- break;
- case SIL_POLL:
- put_user_ex(from->si_fd, &to->si_fd);
- break;
- case SIL_TIMER:
- put_user_ex(from->si_overrun, &to->si_overrun);
- put_user_ex(ptr_to_compat(from->si_ptr),
- &to->si_ptr);
- break;
- case SIL_RT:
- put_user_ex(from->si_uid, &to->si_uid);
- put_user_ex(from->si_int, &to->si_int);
- break;
- }
- }
- } put_user_catch(err);
-
- return err;
-}
-
-/* from syscall's path, where we know the ABI */
-int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
-{
- return __copy_siginfo_to_user32(to, from, in_x32_syscall());
-}
-
-int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
-{
- int err = 0;
- u32 ptr32;
-
- if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t)))
- return -EFAULT;
-
- get_user_try {
- get_user_ex(to->si_signo, &from->si_signo);
- get_user_ex(to->si_errno, &from->si_errno);
- get_user_ex(to->si_code, &from->si_code);
-
- get_user_ex(to->si_pid, &from->si_pid);
- get_user_ex(to->si_uid, &from->si_uid);
- get_user_ex(ptr32, &from->si_ptr);
- to->si_ptr = compat_ptr(ptr32);
- } get_user_catch(err);
-
- return err;
-}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index ed556d50d7ed..6f27facbaa9b 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -75,7 +75,6 @@
#include <asm/uv/uv.h>
#include <linux/mc146818rtc.h>
#include <asm/i8259.h>
-#include <asm/realmode.h>
#include <asm/misc.h>
#include <asm/qspinlock.h>
@@ -934,7 +933,7 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle,
* the targeted processor.
*/
- if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
+ if (x86_platform.legacy.warm_reset) {
pr_debug("Setting warm reset code and vector.\n");
@@ -1006,7 +1005,7 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle,
/* mark "stuck" area as not stuck */
*trampoline_status = 0;
- if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
+ if (x86_platform.legacy.warm_reset) {
/*
* Cleanup possible dangling ends...
*/
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index 749d189f8cd4..774ebafa97c4 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -69,9 +69,12 @@ static struct irqaction irq0 = {
static void __init setup_default_timer_irq(void)
{
- if (!nr_legacy_irqs())
- return;
- setup_irq(0, &irq0);
+ /*
+ * Unconditionally register the legacy timer; even without legacy
+ * PIC/PIT we need this for the HPET0 in legacy replacement mode.
+ */
+ if (setup_irq(0, &irq0))
+ pr_info("Failed to register legacy timer interrupt\n");
}
/* Default timer init function */
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index e169e85db434..fb4302738410 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -25,6 +25,7 @@
#include <asm/geode.h>
#include <asm/apic.h>
#include <asm/intel-family.h>
+#include <asm/i8259.h>
unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
EXPORT_SYMBOL(cpu_khz);
@@ -363,6 +364,20 @@ static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
unsigned long tscmin, tscmax;
int pitcnt;
+ if (!has_legacy_pic()) {
+ /*
+ * Relies on tsc_early_delay_calibrate() to have given us semi
+ * usable udelay(), wait for the same 50ms we would have with
+ * the PIT loop below.
+ */
+ udelay(10 * USEC_PER_MSEC);
+ udelay(10 * USEC_PER_MSEC);
+ udelay(10 * USEC_PER_MSEC);
+ udelay(10 * USEC_PER_MSEC);
+ udelay(10 * USEC_PER_MSEC);
+ return ULONG_MAX;
+ }
+
/* Set the Gate high, disable speaker */
outb((inb(0x61) & ~0x02) | 0x01, 0x61);
@@ -487,6 +502,9 @@ static unsigned long quick_pit_calibrate(void)
u64 tsc, delta;
unsigned long d1, d2;
+ if (!has_legacy_pic())
+ return 0;
+
/* Set the Gate high, disable speaker */
outb((inb(0x61) & ~0x02) | 0x01, 0x61);
@@ -988,8 +1006,6 @@ static void __init detect_art(void)
/* clocksource code */
-static struct clocksource clocksource_tsc;
-
static void tsc_resume(struct clocksource *cs)
{
tsc_verify_tsc_adjust(true);
@@ -1040,12 +1056,31 @@ static void tsc_cs_tick_stable(struct clocksource *cs)
/*
* .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc()
*/
+static struct clocksource clocksource_tsc_early = {
+ .name = "tsc-early",
+ .rating = 299,
+ .read = read_tsc,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS |
+ CLOCK_SOURCE_MUST_VERIFY,
+ .archdata = { .vclock_mode = VCLOCK_TSC },
+ .resume = tsc_resume,
+ .mark_unstable = tsc_cs_mark_unstable,
+ .tick_stable = tsc_cs_tick_stable,
+};
+
+/*
+ * Must mark VALID_FOR_HRES early such that when we unregister tsc_early
+ * this one will immediately take over. We will only register if TSC has
+ * been found good.
+ */
static struct clocksource clocksource_tsc = {
.name = "tsc",
.rating = 300,
.read = read_tsc,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS |
+ CLOCK_SOURCE_VALID_FOR_HRES |
CLOCK_SOURCE_MUST_VERIFY,
.archdata = { .vclock_mode = VCLOCK_TSC },
.resume = tsc_resume,
@@ -1169,8 +1204,8 @@ static void tsc_refine_calibration_work(struct work_struct *work)
int cpu;
/* Don't bother refining TSC on unstable systems */
- if (check_tsc_unstable())
- goto out;
+ if (tsc_unstable)
+ return;
/*
* Since the work is started early in boot, we may be
@@ -1222,9 +1257,13 @@ static void tsc_refine_calibration_work(struct work_struct *work)
set_cyc2ns_scale(tsc_khz, cpu, tsc_stop);
out:
+ if (tsc_unstable)
+ return;
+
if (boot_cpu_has(X86_FEATURE_ART))
art_related_clocksource = &clocksource_tsc;
clocksource_register_khz(&clocksource_tsc, tsc_khz);
+ clocksource_unregister(&clocksource_tsc_early);
}
@@ -1233,13 +1272,11 @@ static int __init init_tsc_clocksource(void)
if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_disabled > 0 || !tsc_khz)
return 0;
+ if (check_tsc_unstable())
+ return 0;
+
if (tsc_clocksource_reliable)
clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
- /* lower the rating if we already know its unstable: */
- if (check_tsc_unstable()) {
- clocksource_tsc.rating = 0;
- clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
- }
if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
@@ -1252,6 +1289,7 @@ static int __init init_tsc_clocksource(void)
if (boot_cpu_has(X86_FEATURE_ART))
art_related_clocksource = &clocksource_tsc;
clocksource_register_khz(&clocksource_tsc, tsc_khz);
+ clocksource_unregister(&clocksource_tsc_early);
return 0;
}
@@ -1356,9 +1394,12 @@ void __init tsc_init(void)
check_system_tsc_reliable();
- if (unsynchronized_tsc())
+ if (unsynchronized_tsc()) {
mark_tsc_unstable("TSCs unsynchronized");
+ return;
+ }
+ clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
detect_art();
}
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index a3755d293a48..85c7ef23d99f 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -528,11 +528,11 @@ static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
return 0;
}
-static int push_ret_address(struct pt_regs *regs, unsigned long ip)
+static int emulate_push_stack(struct pt_regs *regs, unsigned long val)
{
unsigned long new_sp = regs->sp - sizeof_long();
- if (copy_to_user((void __user *)new_sp, &ip, sizeof_long()))
+ if (copy_to_user((void __user *)new_sp, &val, sizeof_long()))
return -EFAULT;
regs->sp = new_sp;
@@ -566,7 +566,7 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs
regs->ip += correction;
} else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) {
regs->sp += sizeof_long(); /* Pop incorrect return address */
- if (push_ret_address(regs, utask->vaddr + auprobe->defparam.ilen))
+ if (emulate_push_stack(regs, utask->vaddr + auprobe->defparam.ilen))
return -ERESTART;
}
/* popf; tell the caller to not touch TF */
@@ -655,7 +655,7 @@ static bool branch_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
*
* But there is corner case, see the comment in ->post_xol().
*/
- if (push_ret_address(regs, new_ip))
+ if (emulate_push_stack(regs, new_ip))
return false;
} else if (!check_jmp_cond(auprobe, regs)) {
offs = 0;
@@ -665,6 +665,16 @@ static bool branch_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
return true;
}
+static bool push_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ unsigned long *src_ptr = (void *)regs + auprobe->push.reg_offset;
+
+ if (emulate_push_stack(regs, *src_ptr))
+ return false;
+ regs->ip += auprobe->push.ilen;
+ return true;
+}
+
static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
BUG_ON(!branch_is_call(auprobe));
@@ -703,6 +713,10 @@ static const struct uprobe_xol_ops branch_xol_ops = {
.post_xol = branch_post_xol_op,
};
+static const struct uprobe_xol_ops push_xol_ops = {
+ .emulate = push_emulate_op,
+};
+
/* Returns -ENOSYS if branch_xol_ops doesn't handle this insn */
static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
{
@@ -750,6 +764,87 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
return 0;
}
+/* Returns -ENOSYS if push_xol_ops doesn't handle this insn */
+static int push_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
+{
+ u8 opc1 = OPCODE1(insn), reg_offset = 0;
+
+ if (opc1 < 0x50 || opc1 > 0x57)
+ return -ENOSYS;
+
+ if (insn->length > 2)
+ return -ENOSYS;
+ if (insn->length == 2) {
+ /* only support rex_prefix 0x41 (x64 only) */
+#ifdef CONFIG_X86_64
+ if (insn->rex_prefix.nbytes != 1 ||
+ insn->rex_prefix.bytes[0] != 0x41)
+ return -ENOSYS;
+
+ switch (opc1) {
+ case 0x50:
+ reg_offset = offsetof(struct pt_regs, r8);
+ break;
+ case 0x51:
+ reg_offset = offsetof(struct pt_regs, r9);
+ break;
+ case 0x52:
+ reg_offset = offsetof(struct pt_regs, r10);
+ break;
+ case 0x53:
+ reg_offset = offsetof(struct pt_regs, r11);
+ break;
+ case 0x54:
+ reg_offset = offsetof(struct pt_regs, r12);
+ break;
+ case 0x55:
+ reg_offset = offsetof(struct pt_regs, r13);
+ break;
+ case 0x56:
+ reg_offset = offsetof(struct pt_regs, r14);
+ break;
+ case 0x57:
+ reg_offset = offsetof(struct pt_regs, r15);
+ break;
+ }
+#else
+ return -ENOSYS;
+#endif
+ } else {
+ switch (opc1) {
+ case 0x50:
+ reg_offset = offsetof(struct pt_regs, ax);
+ break;
+ case 0x51:
+ reg_offset = offsetof(struct pt_regs, cx);
+ break;
+ case 0x52:
+ reg_offset = offsetof(struct pt_regs, dx);
+ break;
+ case 0x53:
+ reg_offset = offsetof(struct pt_regs, bx);
+ break;
+ case 0x54:
+ reg_offset = offsetof(struct pt_regs, sp);
+ break;
+ case 0x55:
+ reg_offset = offsetof(struct pt_regs, bp);
+ break;
+ case 0x56:
+ reg_offset = offsetof(struct pt_regs, si);
+ break;
+ case 0x57:
+ reg_offset = offsetof(struct pt_regs, di);
+ break;
+ }
+ }
+
+ auprobe->push.reg_offset = reg_offset;
+ auprobe->push.ilen = insn->length;
+ auprobe->ops = &push_xol_ops;
+ return 0;
+}
+
/**
* arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
* @mm: the probed address space.
@@ -771,6 +866,10 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
if (ret != -ENOSYS)
return ret;
+ ret = push_setup_xol_ops(auprobe, &insn);
+ if (ret != -ENOSYS)
+ return ret;
+
/*
* Figure out which fixups default_post_xol_op() will need to perform,
* and annotate defparam->fixups accordingly.
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index b514b2b2845a..290ecf711aec 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -25,6 +25,7 @@
#include <asm/kvm_emulate.h>
#include <linux/stringify.h>
#include <asm/debugreg.h>
+#include <asm/nospec-branch.h>
#include "x86.h"
#include "tss.h"
@@ -1021,8 +1022,8 @@ static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
- asm("push %[flags]; popf; call *%[fastop]"
- : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
+ asm("push %[flags]; popf; " CALL_NOSPEC
+ : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
return rc;
}
@@ -5335,9 +5336,9 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
if (!(ctxt->d & ByteOp))
fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
- asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
+ asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
: "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
- [fastop]"+S"(fop), ASM_CALL_CONSTRAINT
+ [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
: "c"(ctxt->src2.val));
ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c829d89e2e63..a8b96dc4cd83 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -9129,14 +9129,14 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
#endif
"pushf\n\t"
__ASM_SIZE(push) " $%c[cs]\n\t"
- "call *%[entry]\n\t"
+ CALL_NOSPEC
:
#ifdef CONFIG_X86_64
[sp]"=&r"(tmp),
#endif
ASM_CALL_CONSTRAINT
:
- [entry]"r"(entry),
+ THUNK_TARGET(entry),
[ss]"i"(__KERNEL_DS),
[cs]"i"(__KERNEL_CS)
);
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 25a972c61b0a..91e9700cc6dc 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -28,6 +28,7 @@ lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o
lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
lib-$(CONFIG_RETPOLINE) += retpoline.o
+OBJECT_FILES_NON_STANDARD_retpoline.o :=y
obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index 4846eff7e4c8..f5b7f1b3b6d7 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -162,7 +162,7 @@ void __delay(unsigned long loops)
}
EXPORT_SYMBOL(__delay);
-inline void __const_udelay(unsigned long xloops)
+void __const_udelay(unsigned long xloops)
{
unsigned long lpj = this_cpu_read(cpu_info.loops_per_jiffy) ? : loops_per_jiffy;
int d0;
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index dfb2ba91b670..480edc3a5e03 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -7,6 +7,7 @@
#include <asm/alternative-asm.h>
#include <asm/export.h>
#include <asm/nospec-branch.h>
+#include <asm/bitsperlong.h>
.macro THUNK reg
.section .text.__x86.indirect_thunk
@@ -36,7 +37,6 @@ GENERATE_THUNK(_ASM_DX)
GENERATE_THUNK(_ASM_SI)
GENERATE_THUNK(_ASM_DI)
GENERATE_THUNK(_ASM_BP)
-GENERATE_THUNK(_ASM_SP)
#ifdef CONFIG_64BIT
GENERATE_THUNK(r8)
GENERATE_THUNK(r9)
@@ -47,3 +47,58 @@ GENERATE_THUNK(r13)
GENERATE_THUNK(r14)
GENERATE_THUNK(r15)
#endif
+
+/*
+ * Fill the CPU return stack buffer.
+ *
+ * Each entry in the RSB, if used for a speculative 'ret', contains an
+ * infinite 'pause; lfence; jmp' loop to capture speculative execution.
+ *
+ * This is required in various cases for retpoline and IBRS-based
+ * mitigations for the Spectre variant 2 vulnerability. Sometimes to
+ * eliminate potentially bogus entries from the RSB, and sometimes
+ * purely to ensure that it doesn't get empty, which on some CPUs would
+ * allow predictions from other (unwanted!) sources to be used.
+ *
+ * Google experimented with loop-unrolling and this turned out to be
+ * the optimal version - two calls, each with their own speculation
+ * trap should their return address end up getting used, in a loop.
+ */
+.macro STUFF_RSB nr:req sp:req
+ mov $(\nr / 2), %_ASM_BX
+ .align 16
+771:
+ call 772f
+773: /* speculation trap */
+ pause
+ lfence
+ jmp 773b
+ .align 16
+772:
+ call 774f
+775: /* speculation trap */
+ pause
+ lfence
+ jmp 775b
+ .align 16
+774:
+ dec %_ASM_BX
+ jnz 771b
+ add $((BITS_PER_LONG/8) * \nr), \sp
+.endm
+
+#define RSB_FILL_LOOPS 16 /* To avoid underflow */
+
+ENTRY(__fill_rsb)
+ STUFF_RSB RSB_FILL_LOOPS, %_ASM_SP
+ ret
+END(__fill_rsb)
+EXPORT_SYMBOL_GPL(__fill_rsb)
+
+#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
+
+ENTRY(__clear_rsb)
+ STUFF_RSB RSB_CLEAR_LOOPS, %_ASM_SP
+ ret
+END(__clear_rsb)
+EXPORT_SYMBOL_GPL(__clear_rsb)
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 9fe656c42aa5..45f5d6cf65ae 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -21,16 +21,16 @@ ex_fixup_handler(const struct exception_table_entry *x)
return (ex_handler_t)((unsigned long)&x->handler + x->handler);
}
-bool ex_handler_default(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
+__visible bool ex_handler_default(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr)
{
regs->ip = ex_fixup_addr(fixup);
return true;
}
EXPORT_SYMBOL(ex_handler_default);
-bool ex_handler_fault(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
+__visible bool ex_handler_fault(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr)
{
regs->ip = ex_fixup_addr(fixup);
regs->ax = trapnr;
@@ -42,8 +42,8 @@ EXPORT_SYMBOL_GPL(ex_handler_fault);
* Handler for UD0 exception following a failed test against the
* result of a refcount inc/dec/add/sub.
*/
-bool ex_handler_refcount(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
+__visible bool ex_handler_refcount(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr)
{
/* First unconditionally saturate the refcount. */
*(int *)regs->cx = INT_MIN / 2;
@@ -95,8 +95,8 @@ EXPORT_SYMBOL(ex_handler_refcount);
* of vulnerability by restoring from the initial state (essentially, zeroing
* out all the FPU registers) if we can't restore from the task's FPU state.
*/
-bool ex_handler_fprestore(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
+__visible bool ex_handler_fprestore(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr)
{
regs->ip = ex_fixup_addr(fixup);
@@ -108,8 +108,8 @@ bool ex_handler_fprestore(const struct exception_table_entry *fixup,
}
EXPORT_SYMBOL_GPL(ex_handler_fprestore);
-bool ex_handler_ext(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
+__visible bool ex_handler_ext(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr)
{
/* Special hack for uaccess_err */
current->thread.uaccess_err = 1;
@@ -118,8 +118,8 @@ bool ex_handler_ext(const struct exception_table_entry *fixup,
}
EXPORT_SYMBOL(ex_handler_ext);
-bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
+__visible bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr)
{
if (pr_warn_once("unchecked MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pF)\n",
(unsigned int)regs->cx, regs->ip, (void *)regs->ip))
@@ -133,8 +133,8 @@ bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup,
}
EXPORT_SYMBOL(ex_handler_rdmsr_unsafe);
-bool ex_handler_wrmsr_unsafe(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
+__visible bool ex_handler_wrmsr_unsafe(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr)
{
if (pr_warn_once("unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pF)\n",
(unsigned int)regs->cx, (unsigned int)regs->dx,
@@ -147,8 +147,8 @@ bool ex_handler_wrmsr_unsafe(const struct exception_table_entry *fixup,
}
EXPORT_SYMBOL(ex_handler_wrmsr_unsafe);
-bool ex_handler_clear_fs(const struct exception_table_entry *fixup,
- struct pt_regs *regs, int trapnr)
+__visible bool ex_handler_clear_fs(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr)
{
if (static_cpu_has(X86_BUG_NULL_SEG))
asm volatile ("mov %0, %%fs" : : "rm" (__USER_DS));
@@ -157,7 +157,7 @@ bool ex_handler_clear_fs(const struct exception_table_entry *fixup,
}
EXPORT_SYMBOL(ex_handler_clear_fs);
-bool ex_has_fault_handler(unsigned long ip)
+__visible bool ex_has_fault_handler(unsigned long ip)
{
const struct exception_table_entry *e;
ex_handler_t handler;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index b3e40773dce0..800de815519c 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -439,18 +439,13 @@ static noinline int vmalloc_fault(unsigned long address)
if (pgd_none(*pgd_ref))
return -1;
- if (pgd_none(*pgd)) {
- set_pgd(pgd, *pgd_ref);
- arch_flush_lazy_mmu_mode();
- } else if (CONFIG_PGTABLE_LEVELS > 4) {
- /*
- * With folded p4d, pgd_none() is always false, so the pgd may
- * point to an empty page table entry and pgd_page_vaddr()
- * will return garbage.
- *
- * We will do the correct sanity check on the p4d level.
- */
- BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
+ if (CONFIG_PGTABLE_LEVELS > 4) {
+ if (pgd_none(*pgd)) {
+ set_pgd(pgd, *pgd_ref);
+ arch_flush_lazy_mmu_mode();
+ } else {
+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
+ }
}
/* With 4-level paging, copying happens on the p4d level. */
@@ -459,7 +454,7 @@ static noinline int vmalloc_fault(unsigned long address)
if (p4d_none(*p4d_ref))
return -1;
- if (p4d_none(*p4d)) {
+ if (p4d_none(*p4d) && CONFIG_PGTABLE_LEVELS == 4) {
set_p4d(p4d, *p4d_ref);
arch_flush_lazy_mmu_mode();
} else {
@@ -470,6 +465,7 @@ static noinline int vmalloc_fault(unsigned long address)
* Below here mismatches are bugs because these lower tables
* are shared:
*/
+ BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS < 4);
pud = pud_offset(p4d, address);
pud_ref = pud_offset(p4d_ref, address);
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index e1d61e8500f9..1a53071e2e17 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -15,7 +15,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <linux/mm.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/swiotlb.h>
#include <linux/mem_encrypt.h>
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index a1561957dccb..5bfe61a5e8e3 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -151,6 +151,34 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
local_irq_restore(flags);
}
+static void sync_current_stack_to_mm(struct mm_struct *mm)
+{
+ unsigned long sp = current_stack_pointer;
+ pgd_t *pgd = pgd_offset(mm, sp);
+
+ if (CONFIG_PGTABLE_LEVELS > 4) {
+ if (unlikely(pgd_none(*pgd))) {
+ pgd_t *pgd_ref = pgd_offset_k(sp);
+
+ set_pgd(pgd, *pgd_ref);
+ }
+ } else {
+ /*
+ * "pgd" is faked. The top level entries are "p4d"s, so sync
+ * the p4d. This compiles to approximately the same code as
+ * the 5-level case.
+ */
+ p4d_t *p4d = p4d_offset(pgd, sp);
+
+ if (unlikely(p4d_none(*p4d))) {
+ pgd_t *pgd_ref = pgd_offset_k(sp);
+ p4d_t *p4d_ref = p4d_offset(pgd_ref, sp);
+
+ set_p4d(p4d, *p4d_ref);
+ }
+ }
+}
+
void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
@@ -226,11 +254,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
* mapped in the new pgd, we'll double-fault. Forcibly
* map it.
*/
- unsigned int index = pgd_index(current_stack_pointer);
- pgd_t *pgd = next->pgd + index;
-
- if (unlikely(pgd_none(*pgd)))
- set_pgd(pgd, init_mm.pgd[index]);
+ sync_current_stack_to_mm(next);
}
/* Stop remote flushes for the previous mm */
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index 511921045312..43867bc85368 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -300,6 +300,7 @@ int __init intel_mid_pci_init(void)
pci_root_ops = intel_mid_pci_ops;
pci_soc_mode = 1;
/* Continue with standard init */
+ acpi_noirq_set();
return 1;
}
diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c
index 53d600217973..75577c1490c4 100644
--- a/arch/x86/pci/sta2x11-fixup.c
+++ b/arch/x86/pci/sta2x11-fixup.c
@@ -26,6 +26,7 @@
#include <linux/pci_ids.h>
#include <linux/export.h>
#include <linux/list.h>
+#include <linux/dma-direct.h>
#include <asm/iommu.h>
#define STA2X11_SWIOTLB_SIZE (4*1024*1024)
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 2dd15e967c3f..c310a8284358 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -25,7 +25,6 @@
#include <linux/spinlock.h>
#include <linux/bootmem.h>
#include <linux/ioport.h>
-#include <linux/init.h>
#include <linux/mc146818rtc.h>
#include <linux/efi.h>
#include <linux/uaccess.h>
diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
index 86676cec99a1..2c67bae6bb53 100644
--- a/arch/x86/platform/intel-mid/intel-mid.c
+++ b/arch/x86/platform/intel-mid/intel-mid.c
@@ -194,7 +194,7 @@ void __init x86_intel_mid_early_setup(void)
x86_platform.calibrate_tsc = intel_mid_calibrate_tsc;
x86_platform.get_nmi_reason = intel_mid_get_nmi_reason;
- x86_init.pci.init = intel_mid_pci_init;
+ x86_init.pci.arch_init = intel_mid_pci_init;
x86_init.pci.fixup_irqs = x86_init_noop;
legacy_pic = &null_legacy_pic;
diff --git a/arch/x86/platform/intel-mid/sfi.c b/arch/x86/platform/intel-mid/sfi.c
index 19b43e3a9f0f..7be1e1fe9ae3 100644
--- a/arch/x86/platform/intel-mid/sfi.c
+++ b/arch/x86/platform/intel-mid/sfi.c
@@ -96,8 +96,7 @@ int __init sfi_parse_mtmr(struct sfi_table_header *table)
pentry->freq_hz, pentry->irq);
mp_irq.type = MP_INTSRC;
mp_irq.irqtype = mp_INT;
- /* triggering mode edge bit 2-3, active high polarity bit 0-1 */
- mp_irq.irqflag = 5;
+ mp_irq.irqflag = MP_IRQTRIG_EDGE | MP_IRQPOL_ACTIVE_HIGH;
mp_irq.srcbus = MP_BUS_ISA;
mp_irq.srcbusirq = pentry->irq; /* IRQ */
mp_irq.dstapic = MP_APIC_ALL;
@@ -168,7 +167,7 @@ int __init sfi_parse_mrtc(struct sfi_table_header *table)
totallen, (u32)pentry->phys_addr, pentry->irq);
mp_irq.type = MP_INTSRC;
mp_irq.irqtype = mp_INT;
- mp_irq.irqflag = 0xf; /* level trigger and active low */
+ mp_irq.irqflag = MP_IRQTRIG_LEVEL | MP_IRQPOL_ACTIVE_LOW;
mp_irq.srcbus = MP_BUS_ISA;
mp_irq.srcbusirq = pentry->irq; /* IRQ */
mp_irq.dstapic = MP_APIC_ALL;
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 8538a6723171..c2e9285d1bf1 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1751,7 +1751,8 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
uv1 = 1;
/* the 14-bit pnode */
- write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
+ write_mmr_descriptor_base(pnode,
+ (n << UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT | m));
/*
* Initializing all 8 (ITEMS_PER_DESC) descriptors for each
* cpu even though we only use the first one; one descriptor can
diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
index 972b8e8d939c..09af7ff53044 100644
--- a/arch/x86/tools/Makefile
+++ b/arch/x86/tools/Makefile
@@ -13,28 +13,28 @@ else
posttest_64bit = -n
endif
-distill_awk = $(srctree)/arch/x86/tools/distill.awk
+reformatter = $(srctree)/arch/x86/tools/objdump_reformat.awk
chkobjdump = $(srctree)/arch/x86/tools/chkobjdump.awk
quiet_cmd_posttest = TEST $@
- cmd_posttest = ($(OBJDUMP) -v | $(AWK) -f $(chkobjdump)) || $(OBJDUMP) -d -j .text $(objtree)/vmlinux | $(AWK) -f $(distill_awk) | $(obj)/test_get_len $(posttest_64bit) $(posttest_verbose)
+ cmd_posttest = ($(OBJDUMP) -v | $(AWK) -f $(chkobjdump)) || $(OBJDUMP) -d -j .text $(objtree)/vmlinux | $(AWK) -f $(reformatter) | $(obj)/insn_decoder_test $(posttest_64bit) $(posttest_verbose)
quiet_cmd_sanitytest = TEST $@
cmd_sanitytest = $(obj)/insn_sanity $(posttest_64bit) -m 1000000
-posttest: $(obj)/test_get_len vmlinux $(obj)/insn_sanity
+posttest: $(obj)/insn_decoder_test vmlinux $(obj)/insn_sanity
$(call cmd,posttest)
$(call cmd,sanitytest)
-hostprogs-y += test_get_len insn_sanity
+hostprogs-y += insn_decoder_test insn_sanity
# -I needed for generated C source and C source which in the kernel tree.
-HOSTCFLAGS_test_get_len.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/uapi/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/uapi/
+HOSTCFLAGS_insn_decoder_test.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/uapi/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/uapi/
HOSTCFLAGS_insn_sanity.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ -I$(srctree)/include/
# Dependencies are also needed.
-$(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
+$(obj)/insn_decoder_test.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
$(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
diff --git a/arch/x86/tools/test_get_len.c b/arch/x86/tools/insn_decoder_test.c
index ecf31e0358c8..a3b4fd954931 100644
--- a/arch/x86/tools/test_get_len.c
+++ b/arch/x86/tools/insn_decoder_test.c
@@ -9,10 +9,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* Copyright (C) IBM Corporation, 2009
*/
@@ -21,6 +17,7 @@
#include <string.h>
#include <assert.h>
#include <unistd.h>
+#include <stdarg.h>
#define unlikely(cond) (cond)
@@ -33,7 +30,7 @@
* particular. See if insn_get_length() and the disassembler agree
* on the length of each instruction in an elf disassembly.
*
- * Usage: objdump -d a.out | awk -f distill.awk | ./test_get_len
+ * Usage: objdump -d a.out | awk -f objdump_reformat.awk | ./insn_decoder_test
*/
const char *prog;
@@ -42,8 +39,8 @@ static int x86_64;
static void usage(void)
{
- fprintf(stderr, "Usage: objdump -d a.out | awk -f distill.awk |"
- " %s [-y|-n] [-v]\n", prog);
+ fprintf(stderr, "Usage: objdump -d a.out | awk -f objdump_reformat.awk"
+ " | %s [-y|-n] [-v]\n", prog);
fprintf(stderr, "\t-y 64bit mode\n");
fprintf(stderr, "\t-n 32bit mode\n");
fprintf(stderr, "\t-v verbose mode\n");
@@ -52,10 +49,21 @@ static void usage(void)
static void malformed_line(const char *line, int line_nr)
{
- fprintf(stderr, "%s: malformed line %d:\n%s", prog, line_nr, line);
+ fprintf(stderr, "%s: error: malformed line %d:\n%s",
+ prog, line_nr, line);
exit(3);
}
+static void pr_warn(const char *fmt, ...)
+{
+ va_list ap;
+
+ fprintf(stderr, "%s: warning: ", prog);
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+}
+
static void dump_field(FILE *fp, const char *name, const char *indent,
struct insn_field *field)
{
@@ -153,21 +161,20 @@ int main(int argc, char **argv)
insn_get_length(&insn);
if (insn.length != nb) {
warnings++;
- fprintf(stderr, "Warning: %s found difference at %s\n",
- prog, sym);
- fprintf(stderr, "Warning: %s", line);
- fprintf(stderr, "Warning: objdump says %d bytes, but "
- "insn_get_length() says %d\n", nb,
- insn.length);
+ pr_warn("Found an x86 instruction decoder bug, "
+ "please report this.\n", sym);
+ pr_warn("%s", line);
+ pr_warn("objdump says %d bytes, but insn_get_length() "
+ "says %d\n", nb, insn.length);
if (verbose)
dump_insn(stderr, &insn);
}
}
if (warnings)
- fprintf(stderr, "Warning: decoded and checked %d"
- " instructions with %d warnings\n", insns, warnings);
+ pr_warn("Decoded and checked %d instructions with %d "
+ "failures\n", insns, warnings);
else
- fprintf(stdout, "Success: decoded and checked %d"
- " instructions\n", insns);
+ fprintf(stdout, "%s: success: Decoded and checked %d"
+ " instructions\n", prog, insns);
return 0;
}
diff --git a/arch/x86/tools/distill.awk b/arch/x86/tools/objdump_reformat.awk
index e0edeccc1429..f418c91b71f0 100644
--- a/arch/x86/tools/distill.awk
+++ b/arch/x86/tools/objdump_reformat.awk
@@ -1,7 +1,7 @@
#!/bin/awk -f
# SPDX-License-Identifier: GPL-2.0
-# Usage: objdump -d a.out | awk -f distill.awk | ./test_get_len
-# Distills the disassembly as follows:
+# Usage: objdump -d a.out | awk -f objdump_reformat.awk | ./insn_decoder_test
+# Reformats the disassembly as follows:
# - Removes all lines except the disassembled instructions.
# - For instructions that exceed 1 line (7 bytes), crams all the hex bytes
# into a single line.
diff --git a/arch/x86/xen/mmu_hvm.c b/arch/x86/xen/mmu_hvm.c
index 2cfcfe4f6b2a..dd2ad82eee80 100644
--- a/arch/x86/xen/mmu_hvm.c
+++ b/arch/x86/xen/mmu_hvm.c
@@ -75,6 +75,6 @@ void __init xen_hvm_init_mmu_ops(void)
if (is_pagetable_dying_supported())
pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
#ifdef CONFIG_PROC_VMCORE
- register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram);
+ WARN_ON(register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram));
#endif
}
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 02f3445a2b5f..cd97a62394e7 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -23,8 +23,6 @@ static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
static DEFINE_PER_CPU(char *, irq_name);
static bool xen_pvspin = true;
-#include <asm/qspinlock.h>
-
static void xen_qlock_kick(int cpu)
{
int irq = per_cpu(lock_kicker_irq, cpu);
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 8bc52f749f20..c921e8bccdc8 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -15,6 +15,9 @@ config XTENSA
select GENERIC_IRQ_SHOW
select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK
+ select GENERIC_STRNCPY_FROM_USER if KASAN
+ select HAVE_ARCH_KASAN if MMU
+ select HAVE_CC_STACKPROTECTOR
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
select HAVE_DMA_CONTIGUOUS
@@ -79,6 +82,10 @@ config VARIANT_IRQ_SWITCH
config HAVE_XTENSA_GPIO32
def_bool n
+config KASAN_SHADOW_OFFSET
+ hex
+ default 0x6e400000
+
menu "Processor type and features"
choice
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
index 7ee02fe4a63d..3a934b72a272 100644
--- a/arch/xtensa/Makefile
+++ b/arch/xtensa/Makefile
@@ -42,10 +42,11 @@ export PLATFORM
# temporarily until string.h is fixed
KBUILD_CFLAGS += -ffreestanding -D__linux__
-
-KBUILD_CFLAGS += -pipe -mlongcalls
-
+KBUILD_CFLAGS += -pipe -mlongcalls -mtext-section-literals
KBUILD_CFLAGS += $(call cc-option,-mforce-no-pic,)
+KBUILD_CFLAGS += $(call cc-option,-mno-serialize-volatile,)
+
+KBUILD_AFLAGS += -mlongcalls -mtext-section-literals
ifneq ($(CONFIG_LD_NO_RELAX),)
LDFLAGS := --no-relax
diff --git a/arch/xtensa/boot/boot-redboot/bootstrap.S b/arch/xtensa/boot/boot-redboot/bootstrap.S
index bf7fabe6310d..bbf3b4b080cd 100644
--- a/arch/xtensa/boot/boot-redboot/bootstrap.S
+++ b/arch/xtensa/boot/boot-redboot/bootstrap.S
@@ -42,6 +42,7 @@ __start_a0:
.align 4
.section .text, "ax"
+ .literal_position
.begin literal_prefix .text
/* put literals in here! */
diff --git a/arch/xtensa/boot/lib/Makefile b/arch/xtensa/boot/lib/Makefile
index d2a7f48564a4..355127faade1 100644
--- a/arch/xtensa/boot/lib/Makefile
+++ b/arch/xtensa/boot/lib/Makefile
@@ -15,6 +15,12 @@ CFLAGS_REMOVE_inftrees.o = -pg
CFLAGS_REMOVE_inffast.o = -pg
endif
+KASAN_SANITIZE := n
+
+CFLAGS_REMOVE_inflate.o += -fstack-protector -fstack-protector-strong
+CFLAGS_REMOVE_zmem.o += -fstack-protector -fstack-protector-strong
+CFLAGS_REMOVE_inftrees.o += -fstack-protector -fstack-protector-strong
+CFLAGS_REMOVE_inffast.o += -fstack-protector -fstack-protector-strong
quiet_cmd_copy_zlib = COPY $@
cmd_copy_zlib = cat $< > $@
diff --git a/arch/xtensa/configs/audio_kc705_defconfig b/arch/xtensa/configs/audio_kc705_defconfig
index 8d16925765cb..2bf964df37ba 100644
--- a/arch/xtensa/configs/audio_kc705_defconfig
+++ b/arch/xtensa/configs/audio_kc705_defconfig
@@ -1,7 +1,6 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_FHANDLE=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IRQ_TIME_ACCOUNTING=y
diff --git a/arch/xtensa/configs/cadence_csp_defconfig b/arch/xtensa/configs/cadence_csp_defconfig
index f2d3094aa1d1..3221b7053fa3 100644
--- a/arch/xtensa/configs/cadence_csp_defconfig
+++ b/arch/xtensa/configs/cadence_csp_defconfig
@@ -1,7 +1,6 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_USELIB=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IRQ_TIME_ACCOUNTING=y
diff --git a/arch/xtensa/configs/generic_kc705_defconfig b/arch/xtensa/configs/generic_kc705_defconfig
index 744adeaf2945..985fa8546e4e 100644
--- a/arch/xtensa/configs/generic_kc705_defconfig
+++ b/arch/xtensa/configs/generic_kc705_defconfig
@@ -1,7 +1,6 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_FHANDLE=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IRQ_TIME_ACCOUNTING=y
diff --git a/arch/xtensa/configs/nommu_kc705_defconfig b/arch/xtensa/configs/nommu_kc705_defconfig
index 78c2529d0459..624f9b3a3878 100644
--- a/arch/xtensa/configs/nommu_kc705_defconfig
+++ b/arch/xtensa/configs/nommu_kc705_defconfig
@@ -1,7 +1,6 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_FHANDLE=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IRQ_TIME_ACCOUNTING=y
diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig
index 14e3ca353ac8..11fed6c06a7c 100644
--- a/arch/xtensa/configs/smp_lx200_defconfig
+++ b/arch/xtensa/configs/smp_lx200_defconfig
@@ -1,7 +1,6 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_FHANDLE=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IRQ_TIME_ACCOUNTING=y
diff --git a/arch/xtensa/include/asm/asmmacro.h b/arch/xtensa/include/asm/asmmacro.h
index 746dcc8b5abc..7f2ae5872151 100644
--- a/arch/xtensa/include/asm/asmmacro.h
+++ b/arch/xtensa/include/asm/asmmacro.h
@@ -150,5 +150,45 @@
__endl \ar \as
.endm
+/* Load or store instructions that may cause exceptions use the EX macro. */
+
+#define EX(handler) \
+ .section __ex_table, "a"; \
+ .word 97f, handler; \
+ .previous \
+97:
+
+
+/*
+ * Extract unaligned word that is split between two registers w0 and w1
+ * into r regardless of machine endianness. SAR must be loaded with the
+ * starting bit of the word (see __ssa8).
+ */
+
+ .macro __src_b r, w0, w1
+#ifdef __XTENSA_EB__
+ src \r, \w0, \w1
+#else
+ src \r, \w1, \w0
+#endif
+ .endm
+
+/*
+ * Load 2 lowest address bits of r into SAR for __src_b to extract unaligned
+ * word starting at r from two registers loaded from consecutive aligned
+ * addresses covering r regardless of machine endianness.
+ *
+ * r 0 1 2 3
+ * LE SAR 0 8 16 24
+ * BE SAR 32 24 16 8
+ */
+
+ .macro __ssa8 r
+#ifdef __XTENSA_EB__
+ ssa8b \r
+#else
+ ssa8l \r
+#endif
+ .endm
#endif /* _XTENSA_ASMMACRO_H */
diff --git a/arch/xtensa/include/asm/current.h b/arch/xtensa/include/asm/current.h
index 47e46dcf5d49..5d98a7ad4251 100644
--- a/arch/xtensa/include/asm/current.h
+++ b/arch/xtensa/include/asm/current.h
@@ -11,6 +11,8 @@
#ifndef _XTENSA_CURRENT_H
#define _XTENSA_CURRENT_H
+#include <asm/thread_info.h>
+
#ifndef __ASSEMBLY__
#include <linux/thread_info.h>
@@ -26,8 +28,6 @@ static inline struct task_struct *get_current(void)
#else
-#define CURRENT_SHIFT 13
-
#define GET_CURRENT(reg,sp) \
GET_THREAD_INFO(reg,sp); \
l32i reg, reg, TI_TASK \
diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h
index 153bf2370988..44098800dad7 100644
--- a/arch/xtensa/include/asm/dma-mapping.h
+++ b/arch/xtensa/include/asm/dma-mapping.h
@@ -23,14 +23,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return &xtensa_dma_map_ops;
}
-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- return (dma_addr_t)paddr;
-}
-
-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
-{
- return (phys_addr_t)daddr;
-}
-
#endif /* _XTENSA_DMA_MAPPING_H */
diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h
index 0d30403b6c95..7e25c1b50ac0 100644
--- a/arch/xtensa/include/asm/fixmap.h
+++ b/arch/xtensa/include/asm/fixmap.h
@@ -44,7 +44,7 @@ enum fixed_addresses {
__end_of_fixed_addresses
};
-#define FIXADDR_TOP (VMALLOC_START - PAGE_SIZE)
+#define FIXADDR_TOP (XCHAL_KSEG_CACHED_VADDR - PAGE_SIZE)
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK)
@@ -63,7 +63,7 @@ static __always_inline unsigned long fix_to_virt(const unsigned int idx)
* table.
*/
BUILD_BUG_ON(FIXADDR_START <
- XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE);
+ TLBTEMP_BASE_1 + TLBTEMP_SIZE);
BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
return __fix_to_virt(idx);
}
diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h
index eaaf1ebcc7a4..5bfbc1c401d4 100644
--- a/arch/xtensa/include/asm/futex.h
+++ b/arch/xtensa/include/asm/futex.h
@@ -92,7 +92,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
int ret = 0;
- u32 prev;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
@@ -103,26 +102,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
__asm__ __volatile__ (
" # futex_atomic_cmpxchg_inatomic\n"
- "1: l32i %1, %3, 0\n"
- " mov %0, %5\n"
- " wsr %1, scompare1\n"
- "2: s32c1i %0, %3, 0\n"
- "3:\n"
+ " wsr %5, scompare1\n"
+ "1: s32c1i %1, %4, 0\n"
+ " s32i %1, %6, 0\n"
+ "2:\n"
" .section .fixup,\"ax\"\n"
" .align 4\n"
- "4: .long 3b\n"
- "5: l32r %1, 4b\n"
- " movi %0, %6\n"
+ "3: .long 2b\n"
+ "4: l32r %1, 3b\n"
+ " movi %0, %7\n"
" jx %1\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
- " .long 1b,5b,2b,5b\n"
+ " .long 1b,4b\n"
" .previous\n"
- : "+r" (ret), "=&r" (prev), "+m" (*uaddr)
- : "r" (uaddr), "r" (oldval), "r" (newval), "I" (-EFAULT)
+ : "+r" (ret), "+r" (newval), "+m" (*uaddr), "+m" (*uval)
+ : "r" (uaddr), "r" (oldval), "r" (uval), "I" (-EFAULT)
: "memory");
- *uval = prev;
return ret;
}
diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h
index 6e070db1022e..04e9340eac4b 100644
--- a/arch/xtensa/include/asm/highmem.h
+++ b/arch/xtensa/include/asm/highmem.h
@@ -72,7 +72,7 @@ static inline void *kmap(struct page *page)
* page table.
*/
BUILD_BUG_ON(PKMAP_BASE <
- XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE);
+ TLBTEMP_BASE_1 + TLBTEMP_SIZE);
BUG_ON(in_interrupt());
if (!PageHighMem(page))
return page_address(page);
diff --git a/arch/xtensa/include/asm/kasan.h b/arch/xtensa/include/asm/kasan.h
new file mode 100644
index 000000000000..54be80876e57
--- /dev/null
+++ b/arch/xtensa/include/asm/kasan.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_KASAN_H
+#define __ASM_KASAN_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_KASAN
+
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <asm/kmem_layout.h>
+
+/* Start of area covered by KASAN */
+#define KASAN_START_VADDR __XTENSA_UL_CONST(0x90000000)
+/* Start of the shadow map */
+#define KASAN_SHADOW_START (XCHAL_PAGE_TABLE_VADDR + XCHAL_PAGE_TABLE_SIZE)
+/* Size of the shadow map */
+#define KASAN_SHADOW_SIZE (-KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT)
+/* Offset for mem to shadow address transformation */
+#define KASAN_SHADOW_OFFSET __XTENSA_UL_CONST(CONFIG_KASAN_SHADOW_OFFSET)
+
+void __init kasan_early_init(void);
+void __init kasan_init(void);
+
+#else
+
+static inline void kasan_early_init(void)
+{
+}
+
+static inline void kasan_init(void)
+{
+}
+
+#endif
+#endif
+#endif
diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h
index 561f8729bcde..2317c835a4db 100644
--- a/arch/xtensa/include/asm/kmem_layout.h
+++ b/arch/xtensa/include/asm/kmem_layout.h
@@ -71,4 +71,11 @@
#endif
+#ifndef CONFIG_KASAN
+#define KERNEL_STACK_SHIFT 13
+#else
+#define KERNEL_STACK_SHIFT 15
+#endif
+#define KERNEL_STACK_SIZE (1 << KERNEL_STACK_SHIFT)
+
#endif
diff --git a/arch/xtensa/include/asm/linkage.h b/arch/xtensa/include/asm/linkage.h
new file mode 100644
index 000000000000..0ba9973235d9
--- /dev/null
+++ b/arch/xtensa/include/asm/linkage.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#define __ALIGN .align 4
+#define __ALIGN_STR ".align 4"
+
+#endif
diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h
index f7e186dfc4e4..de5e6cbbafe4 100644
--- a/arch/xtensa/include/asm/mmu_context.h
+++ b/arch/xtensa/include/asm/mmu_context.h
@@ -52,6 +52,7 @@ DECLARE_PER_CPU(unsigned long, asid_cache);
#define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8))
void init_mmu(void);
+void init_kio(void);
static inline void set_rasid_register (unsigned long val)
{
diff --git a/arch/xtensa/include/asm/nommu_context.h b/arch/xtensa/include/asm/nommu_context.h
index 2cebdbbdb633..37251b2ef871 100644
--- a/arch/xtensa/include/asm/nommu_context.h
+++ b/arch/xtensa/include/asm/nommu_context.h
@@ -3,6 +3,10 @@ static inline void init_mmu(void)
{
}
+static inline void init_kio(void)
+{
+}
+
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
index 4ddbfd57a7c8..5d69c11c01b8 100644
--- a/arch/xtensa/include/asm/page.h
+++ b/arch/xtensa/include/asm/page.h
@@ -36,8 +36,6 @@
#define MAX_LOW_PFN PHYS_PFN(0xfffffffful)
#endif
-#define PGTABLE_START 0x80000000
-
/*
* Cache aliasing:
*
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index 30dd5b2e4ad5..38802259978f 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -12,9 +12,9 @@
#define _XTENSA_PGTABLE_H
#define __ARCH_USE_5LEVEL_HACK
-#include <asm-generic/pgtable-nopmd.h>
#include <asm/page.h>
#include <asm/kmem_layout.h>
+#include <asm-generic/pgtable-nopmd.h>
/*
* We only use two ring levels, user and kernel space.
@@ -170,6 +170,7 @@
#define PAGE_SHARED_EXEC \
__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE)
+#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT)
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC)
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h
index e2d9c5eb10bd..3a5c5918aea3 100644
--- a/arch/xtensa/include/asm/ptrace.h
+++ b/arch/xtensa/include/asm/ptrace.h
@@ -10,6 +10,7 @@
#ifndef _XTENSA_PTRACE_H
#define _XTENSA_PTRACE_H
+#include <asm/kmem_layout.h>
#include <uapi/asm/ptrace.h>
/*
@@ -38,20 +39,6 @@
* +-----------------------+ --------
*/
-#define KERNEL_STACK_SIZE (2 * PAGE_SIZE)
-
-/* Offsets for exception_handlers[] (3 x 64-entries x 4-byte tables). */
-
-#define EXC_TABLE_KSTK 0x004 /* Kernel Stack */
-#define EXC_TABLE_DOUBLE_SAVE 0x008 /* Double exception save area for a0 */
-#define EXC_TABLE_FIXUP 0x00c /* Fixup handler */
-#define EXC_TABLE_PARAM 0x010 /* For passing a parameter to fixup */
-#define EXC_TABLE_SYSCALL_SAVE 0x014 /* For fast syscall handler */
-#define EXC_TABLE_FAST_USER 0x100 /* Fast user exception handler */
-#define EXC_TABLE_FAST_KERNEL 0x200 /* Fast kernel exception handler */
-#define EXC_TABLE_DEFAULT 0x300 /* Default C-Handler */
-#define EXC_TABLE_SIZE 0x400
-
#ifndef __ASSEMBLY__
#include <asm/coprocessor.h>
diff --git a/arch/xtensa/include/asm/regs.h b/arch/xtensa/include/asm/regs.h
index 881a1134a4b4..477594e5817f 100644
--- a/arch/xtensa/include/asm/regs.h
+++ b/arch/xtensa/include/asm/regs.h
@@ -76,6 +76,7 @@
#define EXCCAUSE_COPROCESSOR5_DISABLED 37
#define EXCCAUSE_COPROCESSOR6_DISABLED 38
#define EXCCAUSE_COPROCESSOR7_DISABLED 39
+#define EXCCAUSE_N 64
/* PS register fields. */
diff --git a/arch/xtensa/include/asm/stackprotector.h b/arch/xtensa/include/asm/stackprotector.h
new file mode 100644
index 000000000000..e368f94fd2af
--- /dev/null
+++ b/arch/xtensa/include/asm/stackprotector.h
@@ -0,0 +1,40 @@
+/*
+ * GCC stack protector support.
+ *
+ * (This is directly adopted from the ARM implementation)
+ *
+ * Stack protector works by putting predefined pattern at the start of
+ * the stack frame and verifying that it hasn't been overwritten when
+ * returning from the function. The pattern is called stack canary
+ * and gcc expects it to be defined by a global variable called
+ * "__stack_chk_guard" on Xtensa. This unfortunately means that on SMP
+ * we cannot have a different canary value per task.
+ */
+
+#ifndef _ASM_STACKPROTECTOR_H
+#define _ASM_STACKPROTECTOR_H 1
+
+#include <linux/random.h>
+#include <linux/version.h>
+
+extern unsigned long __stack_chk_guard;
+
+/*
+ * Initialize the stackprotector canary value.
+ *
+ * NOTE: this must only be called from functions that never return,
+ * and it must always be inlined.
+ */
+static __always_inline void boot_init_stack_canary(void)
+{
+ unsigned long canary;
+
+ /* Try to get a semi random initial value. */
+ get_random_bytes(&canary, sizeof(canary));
+ canary ^= LINUX_VERSION_CODE;
+
+ current->stack_canary = canary;
+ __stack_chk_guard = current->stack_canary;
+}
+
+#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/xtensa/include/asm/string.h b/arch/xtensa/include/asm/string.h
index 8d5d9dfadb09..89b51a0c752f 100644
--- a/arch/xtensa/include/asm/string.h
+++ b/arch/xtensa/include/asm/string.h
@@ -53,7 +53,7 @@ static inline char *strncpy(char *__dest, const char *__src, size_t __n)
"bne %1, %5, 1b\n"
"2:"
: "=r" (__dest), "=r" (__src), "=&r" (__dummy)
- : "0" (__dest), "1" (__src), "r" (__src+__n)
+ : "0" (__dest), "1" (__src), "r" ((uintptr_t)__src+__n)
: "memory");
return __xdest;
@@ -101,21 +101,40 @@ static inline int strncmp(const char *__cs, const char *__ct, size_t __n)
"2:\n\t"
"sub %2, %2, %3"
: "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy)
- : "0" (__cs), "1" (__ct), "r" (__cs+__n));
+ : "0" (__cs), "1" (__ct), "r" ((uintptr_t)__cs+__n));
return __res;
}
#define __HAVE_ARCH_MEMSET
extern void *memset(void *__s, int __c, size_t __count);
+extern void *__memset(void *__s, int __c, size_t __count);
#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
+extern void *__memcpy(void *__to, __const__ void *__from, size_t __n);
#define __HAVE_ARCH_MEMMOVE
extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
+extern void *__memmove(void *__dest, __const__ void *__src, size_t __n);
/* Don't build bcopy at all ... */
#define __HAVE_ARCH_BCOPY
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+
+/*
+ * For files that are not instrumented (e.g. mm/slub.c) we
+ * should use not instrumented version of mem* functions.
+ */
+
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+#define memset(s, c, n) __memset(s, c, n)
+
+#ifndef __NO_FORTIFY
+#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+#endif
+#endif
+
#endif /* _XTENSA_STRING_H */
diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h
index 7be2400f745a..2bd19ae61e47 100644
--- a/arch/xtensa/include/asm/thread_info.h
+++ b/arch/xtensa/include/asm/thread_info.h
@@ -11,7 +11,9 @@
#ifndef _XTENSA_THREAD_INFO_H
#define _XTENSA_THREAD_INFO_H
-#ifdef __KERNEL__
+#include <asm/kmem_layout.h>
+
+#define CURRENT_SHIFT KERNEL_STACK_SHIFT
#ifndef __ASSEMBLY__
# include <asm/processor.h>
@@ -77,14 +79,11 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
struct thread_info *ti;
- __asm__("extui %0,a1,0,13\n\t"
+ __asm__("extui %0, a1, 0, "__stringify(CURRENT_SHIFT)"\n\t"
"xor %0, a1, %0" : "=&r" (ti) : );
return ti;
}
@@ -93,7 +92,7 @@ static inline struct thread_info *current_thread_info(void)
/* how to get the thread information struct from ASM */
#define GET_THREAD_INFO(reg,sp) \
- extui reg, sp, 0, 13; \
+ extui reg, sp, 0, CURRENT_SHIFT; \
xor reg, sp, reg
#endif
@@ -130,8 +129,7 @@ static inline struct thread_info *current_thread_info(void)
*/
#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
-#define THREAD_SIZE 8192 //(2*PAGE_SIZE)
-#define THREAD_SIZE_ORDER 1
+#define THREAD_SIZE KERNEL_STACK_SIZE
+#define THREAD_SIZE_ORDER (KERNEL_STACK_SHIFT - PAGE_SHIFT)
-#endif /* __KERNEL__ */
#endif /* _XTENSA_THREAD_INFO */
diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
index 2e69aa4b843f..f5cd7a7e65e0 100644
--- a/arch/xtensa/include/asm/traps.h
+++ b/arch/xtensa/include/asm/traps.h
@@ -13,12 +13,47 @@
#include <asm/ptrace.h>
/*
+ * Per-CPU exception handling data structure.
+ * EXCSAVE1 points to it.
+ */
+struct exc_table {
+ /* Kernel Stack */
+ void *kstk;
+ /* Double exception save area for a0 */
+ unsigned long double_save;
+ /* Fixup handler */
+ void *fixup;
+ /* For passing a parameter to fixup */
+ void *fixup_param;
+ /* For fast syscall handler */
+ unsigned long syscall_save;
+ /* Fast user exception handlers */
+ void *fast_user_handler[EXCCAUSE_N];
+ /* Fast kernel exception handlers */
+ void *fast_kernel_handler[EXCCAUSE_N];
+ /* Default C-Handlers */
+ void *default_handler[EXCCAUSE_N];
+};
+
+/*
* handler must be either of the following:
* void (*)(struct pt_regs *regs);
* void (*)(struct pt_regs *regs, unsigned long exccause);
*/
extern void * __init trap_set_handler(int cause, void *handler);
extern void do_unhandled(struct pt_regs *regs, unsigned long exccause);
+void fast_second_level_miss(void);
+
+/* Initialize minimal exc_table structure sufficient for basic paging */
+static inline void __init early_trap_init(void)
+{
+ static struct exc_table exc_table __initdata = {
+ .fast_kernel_handler[EXCCAUSE_DTLB_MISS] =
+ fast_second_level_miss,
+ };
+ __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (&exc_table));
+}
+
void secondary_trap_init(void);
static inline void spill_registers(void)
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index b8f152b6aaa5..f1158b4c629c 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -44,6 +44,8 @@
#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size))
+#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE)
+
/*
* These are the main single-value transfer routines. They
* automatically use the right size if we just have the right pointer
@@ -261,7 +263,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
static inline unsigned long
__xtensa_clear_user(void *addr, unsigned long size)
{
- if ( ! memset(addr, 0, size) )
+ if (!__memset(addr, 0, size))
return size;
return 0;
}
@@ -277,6 +279,8 @@ clear_user(void *addr, unsigned long size)
#define __clear_user __xtensa_clear_user
+#ifndef CONFIG_GENERIC_STRNCPY_FROM_USER
+
extern long __strncpy_user(char *, const char *, long);
static inline long
@@ -286,6 +290,9 @@ strncpy_from_user(char *dst, const char *src, long count)
return __strncpy_user(dst, src, count);
return -EFAULT;
}
+#else
+long strncpy_from_user(char *dst, const char *src, long count);
+#endif
/*
* Return the size of a string (including the ending 0!)
diff --git a/arch/xtensa/include/uapi/asm/poll.h b/arch/xtensa/include/uapi/asm/poll.h
index 4d249040b33d..e3246d41182c 100644
--- a/arch/xtensa/include/uapi/asm/poll.h
+++ b/arch/xtensa/include/uapi/asm/poll.h
@@ -12,9 +12,26 @@
#ifndef _XTENSA_POLL_H
#define _XTENSA_POLL_H
+#ifndef __KERNEL__
#define POLLWRNORM POLLOUT
-#define POLLWRBAND 0x0100
-#define POLLREMOVE 0x0800
+#define POLLWRBAND (__force __poll_t)0x0100
+#define POLLREMOVE (__force __poll_t)0x0800
+#else
+#define __ARCH_HAS_MANGLED_POLL
+static inline __u16 mangle_poll(__poll_t val)
+{
+ __u16 v = (__force __u16)val;
+ /* bit 9 -> bit 8, bit 8 -> bit 2 */
+ return (v & ~0x300) | ((v & 0x200) >> 1) | ((v & 0x100) >> 6);
+}
+
+static inline __poll_t demangle_poll(__u16 v)
+{
+ /* bit 8 -> bit 9, bit 2 -> bits 2 and 8 */
+ return (__force __poll_t)((v & ~0x100) | ((v & 0x100) << 1) |
+ ((v & 4) << 6));
+}
+#endif
#include <asm-generic/poll.h>
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
index bb8d55775a97..91907590d183 100644
--- a/arch/xtensa/kernel/Makefile
+++ b/arch/xtensa/kernel/Makefile
@@ -17,9 +17,6 @@ obj-$(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_S32C1I_SELFTEST) += s32c1i_selftest.o
-AFLAGS_head.o += -mtext-section-literals
-AFLAGS_mxhead.o += -mtext-section-literals
-
# In the Xtensa architecture, assembly generates literals which must always
# precede the L32R instruction with a relative offset less than 256 kB.
# Therefore, the .text and .literal section must be combined in parenthesis
diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S
index 890004af03a9..9301452e521e 100644
--- a/arch/xtensa/kernel/align.S
+++ b/arch/xtensa/kernel/align.S
@@ -19,6 +19,7 @@
#include <linux/linkage.h>
#include <asm/current.h>
#include <asm/asm-offsets.h>
+#include <asm/asmmacro.h>
#include <asm/processor.h>
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
@@ -66,8 +67,6 @@
#define INSN_T 24
#define INSN_OP1 16
-.macro __src_b r, w0, w1; src \r, \w0, \w1; .endm
-.macro __ssa8 r; ssa8b \r; .endm
.macro __ssa8r r; ssa8l \r; .endm
.macro __sh r, s; srl \r, \s; .endm
.macro __sl r, s; sll \r, \s; .endm
@@ -81,8 +80,6 @@
#define INSN_T 4
#define INSN_OP1 12
-.macro __src_b r, w0, w1; src \r, \w1, \w0; .endm
-.macro __ssa8 r; ssa8l \r; .endm
.macro __ssa8r r; ssa8b \r; .endm
.macro __sh r, s; sll \r, \s; .endm
.macro __sl r, s; srl \r, \s; .endm
@@ -155,7 +152,7 @@
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
*/
-
+ .literal_position
ENTRY(fast_unaligned)
/* Note: We don't expect the address to be aligned on a word
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
index bcb5beb81177..022cf918ec20 100644
--- a/arch/xtensa/kernel/asm-offsets.c
+++ b/arch/xtensa/kernel/asm-offsets.c
@@ -76,6 +76,9 @@ int main(void)
DEFINE(TASK_PID, offsetof (struct task_struct, pid));
DEFINE(TASK_THREAD, offsetof (struct task_struct, thread));
DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, stack));
+#ifdef CONFIG_CC_STACKPROTECTOR
+ DEFINE(TASK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
+#endif
DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct));
/* offsets in thread_info struct */
@@ -129,5 +132,18 @@ int main(void)
offsetof(struct debug_table, icount_level_save));
#endif
+ /* struct exc_table */
+ DEFINE(EXC_TABLE_KSTK, offsetof(struct exc_table, kstk));
+ DEFINE(EXC_TABLE_DOUBLE_SAVE, offsetof(struct exc_table, double_save));
+ DEFINE(EXC_TABLE_FIXUP, offsetof(struct exc_table, fixup));
+ DEFINE(EXC_TABLE_PARAM, offsetof(struct exc_table, fixup_param));
+ DEFINE(EXC_TABLE_SYSCALL_SAVE,
+ offsetof(struct exc_table, syscall_save));
+ DEFINE(EXC_TABLE_FAST_USER,
+ offsetof(struct exc_table, fast_user_handler));
+ DEFINE(EXC_TABLE_FAST_KERNEL,
+ offsetof(struct exc_table, fast_kernel_handler));
+ DEFINE(EXC_TABLE_DEFAULT, offsetof(struct exc_table, default_handler));
+
return 0;
}
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
index 3a98503ad11a..4f8b52d575a2 100644
--- a/arch/xtensa/kernel/coprocessor.S
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -212,8 +212,7 @@ ENDPROC(coprocessor_restore)
ENTRY(fast_coprocessor_double)
wsr a0, excsave1
- movi a0, unrecoverable_exception
- callx0 a0
+ call0 unrecoverable_exception
ENDPROC(fast_coprocessor_double)
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index 37a239556889..5caff0744f3c 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -14,6 +14,7 @@
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
+#include <asm/asmmacro.h>
#include <asm/processor.h>
#include <asm/coprocessor.h>
#include <asm/thread_info.h>
@@ -125,6 +126,7 @@
*
* Note: _user_exception might be at an odd address. Don't use call0..call12
*/
+ .literal_position
ENTRY(user_exception)
@@ -475,8 +477,7 @@ common_exception_return:
1:
irq_save a2, a3
#ifdef CONFIG_TRACE_IRQFLAGS
- movi a4, trace_hardirqs_off
- callx4 a4
+ call4 trace_hardirqs_off
#endif
/* Jump if we are returning from kernel exceptions. */
@@ -503,24 +504,20 @@ common_exception_return:
/* Call do_signal() */
#ifdef CONFIG_TRACE_IRQFLAGS
- movi a4, trace_hardirqs_on
- callx4 a4
+ call4 trace_hardirqs_on
#endif
rsil a2, 0
- movi a4, do_notify_resume # int do_notify_resume(struct pt_regs*)
mov a6, a1
- callx4 a4
+ call4 do_notify_resume # int do_notify_resume(struct pt_regs*)
j 1b
3: /* Reschedule */
#ifdef CONFIG_TRACE_IRQFLAGS
- movi a4, trace_hardirqs_on
- callx4 a4
+ call4 trace_hardirqs_on
#endif
rsil a2, 0
- movi a4, schedule # void schedule (void)
- callx4 a4
+ call4 schedule # void schedule (void)
j 1b
#ifdef CONFIG_PREEMPT
@@ -531,8 +528,7 @@ common_exception_return:
l32i a4, a2, TI_PRE_COUNT
bnez a4, 4f
- movi a4, preempt_schedule_irq
- callx4 a4
+ call4 preempt_schedule_irq
j 1b
#endif
@@ -545,23 +541,20 @@ common_exception_return:
5:
#ifdef CONFIG_HAVE_HW_BREAKPOINT
_bbci.l a4, TIF_DB_DISABLED, 7f
- movi a4, restore_dbreak
- callx4 a4
+ call4 restore_dbreak
7:
#endif
#ifdef CONFIG_DEBUG_TLB_SANITY
l32i a4, a1, PT_DEPC
bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f
- movi a4, check_tlb_sanity
- callx4 a4
+ call4 check_tlb_sanity
#endif
6:
4:
#ifdef CONFIG_TRACE_IRQFLAGS
extui a4, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
bgei a4, LOCKLEVEL, 1f
- movi a4, trace_hardirqs_on
- callx4 a4
+ call4 trace_hardirqs_on
1:
#endif
/* Restore optional registers. */
@@ -777,6 +770,8 @@ ENDPROC(kernel_exception)
* When we get here, a0 is trashed and saved to excsave[debuglevel]
*/
+ .literal_position
+
ENTRY(debug_exception)
rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL
@@ -916,6 +911,8 @@ ENDPROC(debug_exception)
unrecoverable_text:
.ascii "Unrecoverable error in exception handler\0"
+ .literal_position
+
ENTRY(unrecoverable_exception)
movi a0, 1
@@ -933,10 +930,8 @@ ENTRY(unrecoverable_exception)
movi a0, 0
addi a1, a1, PT_REGS_OFFSET
- movi a4, panic
movi a6, unrecoverable_text
-
- callx4 a4
+ call4 panic
1: j 1b
@@ -1073,8 +1068,7 @@ ENTRY(fast_syscall_unrecoverable)
xsr a2, depc # restore a2, depc
wsr a0, excsave1
- movi a0, unrecoverable_exception
- callx0 a0
+ call0 unrecoverable_exception
ENDPROC(fast_syscall_unrecoverable)
@@ -1101,32 +1095,11 @@ ENDPROC(fast_syscall_unrecoverable)
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
*
* Note: we don't have to save a2; a2 holds the return value
- *
- * We use the two macros TRY and CATCH:
- *
- * TRY adds an entry to the __ex_table fixup table for the immediately
- * following instruction.
- *
- * CATCH catches any exception that occurred at one of the preceding TRY
- * statements and continues from there
- *
- * Usage TRY l32i a0, a1, 0
- * <other code>
- * done: rfe
- * CATCH <set return code>
- * j done
*/
-#ifdef CONFIG_FAST_SYSCALL_XTENSA
-
-#define TRY \
- .section __ex_table, "a"; \
- .word 66f, 67f; \
- .text; \
-66:
+ .literal_position
-#define CATCH \
-67:
+#ifdef CONFIG_FAST_SYSCALL_XTENSA
ENTRY(fast_syscall_xtensa)
@@ -1141,9 +1114,9 @@ ENTRY(fast_syscall_xtensa)
.Lswp: /* Atomic compare and swap */
-TRY l32i a0, a3, 0 # read old value
+EX(.Leac) l32i a0, a3, 0 # read old value
bne a0, a4, 1f # same as old value? jump
-TRY s32i a5, a3, 0 # different, modify value
+EX(.Leac) s32i a5, a3, 0 # different, modify value
l32i a7, a2, PT_AREG7 # restore a7
l32i a0, a2, PT_AREG0 # restore a0
movi a2, 1 # and return 1
@@ -1156,12 +1129,12 @@ TRY s32i a5, a3, 0 # different, modify value
.Lnswp: /* Atomic set, add, and exg_add. */
-TRY l32i a7, a3, 0 # orig
+EX(.Leac) l32i a7, a3, 0 # orig
addi a6, a6, -SYS_XTENSA_ATOMIC_SET
add a0, a4, a7 # + arg
moveqz a0, a4, a6 # set
addi a6, a6, SYS_XTENSA_ATOMIC_SET
-TRY s32i a0, a3, 0 # write new value
+EX(.Leac) s32i a0, a3, 0 # write new value
mov a0, a2
mov a2, a7
@@ -1169,7 +1142,6 @@ TRY s32i a0, a3, 0 # write new value
l32i a0, a0, PT_AREG0 # restore a0
rfe
-CATCH
.Leac: l32i a7, a2, PT_AREG7 # restore a7
l32i a0, a2, PT_AREG0 # restore a0
movi a2, -EFAULT
@@ -1411,14 +1383,12 @@ ENTRY(fast_syscall_spill_registers)
rsync
movi a6, SIGSEGV
- movi a4, do_exit
- callx4 a4
+ call4 do_exit
/* shouldn't return, so panic */
wsr a0, excsave1
- movi a0, unrecoverable_exception
- callx0 a0 # should not return
+ call0 unrecoverable_exception # should not return
1: j 1b
@@ -1564,8 +1534,8 @@ ENDPROC(fast_syscall_spill_registers)
ENTRY(fast_second_level_miss_double_kernel)
-1: movi a0, unrecoverable_exception
- callx0 a0 # should not return
+1:
+ call0 unrecoverable_exception # should not return
1: j 1b
ENDPROC(fast_second_level_miss_double_kernel)
@@ -1887,6 +1857,7 @@ ENDPROC(fast_store_prohibited)
* void system_call (struct pt_regs* regs, int exccause)
* a2 a3
*/
+ .literal_position
ENTRY(system_call)
@@ -1896,9 +1867,8 @@ ENTRY(system_call)
l32i a3, a2, PT_AREG2
mov a6, a2
- movi a4, do_syscall_trace_enter
s32i a3, a2, PT_SYSCALL
- callx4 a4
+ call4 do_syscall_trace_enter
mov a3, a6
/* syscall = sys_call_table[syscall_nr] */
@@ -1930,9 +1900,8 @@ ENTRY(system_call)
1: /* regs->areg[2] = return_value */
s32i a6, a2, PT_AREG2
- movi a4, do_syscall_trace_leave
mov a6, a2
- callx4 a4
+ call4 do_syscall_trace_leave
retw
ENDPROC(system_call)
@@ -2002,6 +1971,12 @@ ENTRY(_switch_to)
s32i a1, a2, THREAD_SP # save stack pointer
#endif
+#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+ movi a6, __stack_chk_guard
+ l32i a8, a3, TASK_STACK_CANARY
+ s32i a8, a6, 0
+#endif
+
/* Disable ints while we manipulate the stack pointer. */
irq_save a14, a3
@@ -2048,12 +2023,10 @@ ENTRY(ret_from_fork)
/* void schedule_tail (struct task_struct *prev)
* Note: prev is still in a6 (return value from fake call4 frame)
*/
- movi a4, schedule_tail
- callx4 a4
+ call4 schedule_tail
- movi a4, do_syscall_trace_leave
mov a6, a1
- callx4 a4
+ call4 do_syscall_trace_leave
j common_exception_return
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index 23ce62e60435..9c4e9433e536 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -264,11 +264,8 @@ ENTRY(_startup)
/* init_arch kick-starts the linux kernel */
- movi a4, init_arch
- callx4 a4
-
- movi a4, start_kernel
- callx4 a4
+ call4 init_arch
+ call4 start_kernel
should_never_return:
j should_never_return
@@ -294,8 +291,7 @@ should_never_return:
movi a6, 0
wsr a6, excsave1
- movi a4, secondary_start_kernel
- callx4 a4
+ call4 secondary_start_kernel
j should_never_return
#endif /* CONFIG_SMP */
diff --git a/arch/xtensa/kernel/module.c b/arch/xtensa/kernel/module.c
index b715237bae61..902845ddacb7 100644
--- a/arch/xtensa/kernel/module.c
+++ b/arch/xtensa/kernel/module.c
@@ -22,8 +22,6 @@
#include <linux/kernel.h>
#include <linux/cache.h>
-#undef DEBUG_RELOCATE
-
static int
decode_calln_opcode (unsigned char *location)
{
@@ -58,10 +56,9 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
unsigned char *location;
uint32_t value;
-#ifdef DEBUG_RELOCATE
- printk("Applying relocate section %u to %u\n", relsec,
- sechdrs[relsec].sh_info);
-#endif
+ pr_debug("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
location = (char *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rela[i].r_offset;
@@ -87,7 +84,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
value -= ((unsigned long)location & -4) + 4;
if ((value & 3) != 0 ||
((value + (1 << 19)) >> 20) != 0) {
- printk("%s: relocation out of range, "
+ pr_err("%s: relocation out of range, "
"section %d reloc %d "
"sym '%s'\n",
mod->name, relsec, i,
@@ -111,7 +108,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
value -= (((unsigned long)location + 3) & -4);
if ((value & 3) != 0 ||
(signed int)value >> 18 != -1) {
- printk("%s: relocation out of range, "
+ pr_err("%s: relocation out of range, "
"section %d reloc %d "
"sym '%s'\n",
mod->name, relsec, i,
@@ -156,7 +153,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
case R_XTENSA_SLOT12_OP:
case R_XTENSA_SLOT13_OP:
case R_XTENSA_SLOT14_OP:
- printk("%s: unexpected FLIX relocation: %u\n",
+ pr_err("%s: unexpected FLIX relocation: %u\n",
mod->name,
ELF32_R_TYPE(rela[i].r_info));
return -ENOEXEC;
@@ -176,13 +173,13 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
case R_XTENSA_SLOT12_ALT:
case R_XTENSA_SLOT13_ALT:
case R_XTENSA_SLOT14_ALT:
- printk("%s: unexpected ALT relocation: %u\n",
+ pr_err("%s: unexpected ALT relocation: %u\n",
mod->name,
ELF32_R_TYPE(rela[i].r_info));
return -ENOEXEC;
default:
- printk("%s: unexpected relocation: %u\n",
+ pr_err("%s: unexpected relocation: %u\n",
mod->name,
ELF32_R_TYPE(rela[i].r_info));
return -ENOEXEC;
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
index 903963ee495d..d981f01c8d89 100644
--- a/arch/xtensa/kernel/pci.c
+++ b/arch/xtensa/kernel/pci.c
@@ -29,14 +29,6 @@
#include <asm/pci-bridge.h>
#include <asm/platform.h>
-#undef DEBUG
-
-#ifdef DEBUG
-#define DBG(x...) printk(x)
-#else
-#define DBG(x...)
-#endif
-
/* PCI Controller */
@@ -101,8 +93,8 @@ pcibios_enable_resources(struct pci_dev *dev, int mask)
for(idx=0; idx<6; idx++) {
r = &dev->resource[idx];
if (!r->start && r->end) {
- printk (KERN_ERR "PCI: Device %s not available because "
- "of resource collisions\n", pci_name(dev));
+ pr_err("PCI: Device %s not available because "
+ "of resource collisions\n", pci_name(dev));
return -EINVAL;
}
if (r->flags & IORESOURCE_IO)
@@ -113,7 +105,7 @@ pcibios_enable_resources(struct pci_dev *dev, int mask)
if (dev->resource[PCI_ROM_RESOURCE].start)
cmd |= PCI_COMMAND_MEMORY;
if (cmd != old_cmd) {
- printk("PCI: Enabling device %s (%04x -> %04x)\n",
+ pr_info("PCI: Enabling device %s (%04x -> %04x)\n",
pci_name(dev), old_cmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
@@ -144,8 +136,8 @@ static void __init pci_controller_apertures(struct pci_controller *pci_ctrl,
res = &pci_ctrl->io_resource;
if (!res->flags) {
if (io_offset)
- printk (KERN_ERR "I/O resource not set for host"
- " bridge %d\n", pci_ctrl->index);
+ pr_err("I/O resource not set for host bridge %d\n",
+ pci_ctrl->index);
res->start = 0;
res->end = IO_SPACE_LIMIT;
res->flags = IORESOURCE_IO;
@@ -159,8 +151,8 @@ static void __init pci_controller_apertures(struct pci_controller *pci_ctrl,
if (!res->flags) {
if (i > 0)
continue;
- printk(KERN_ERR "Memory resource not set for "
- "host bridge %d\n", pci_ctrl->index);
+ pr_err("Memory resource not set for host bridge %d\n",
+ pci_ctrl->index);
res->start = 0;
res->end = ~0U;
res->flags = IORESOURCE_MEM;
@@ -176,7 +168,7 @@ static int __init pcibios_init(void)
struct pci_bus *bus;
int next_busno = 0, ret;
- printk("PCI: Probing PCI hardware\n");
+ pr_info("PCI: Probing PCI hardware\n");
/* Scan all of the recorded PCI controllers. */
for (pci_ctrl = pci_ctrl_head; pci_ctrl; pci_ctrl = pci_ctrl->next) {
@@ -232,7 +224,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
for (idx=0; idx<6; idx++) {
r = &dev->resource[idx];
if (!r->start && r->end) {
- printk(KERN_ERR "PCI: Device %s not available because "
+ pr_err("PCI: Device %s not available because "
"of resource collisions\n", pci_name(dev));
return -EINVAL;
}
@@ -242,8 +234,8 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
cmd |= PCI_COMMAND_MEMORY;
}
if (cmd != old_cmd) {
- printk("PCI: Enabling device %s (%04x -> %04x)\n",
- pci_name(dev), old_cmd, cmd);
+ pr_info("PCI: Enabling device %s (%04x -> %04x)\n",
+ pci_name(dev), old_cmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index ff4f0ecb03dd..8dd0593fb2c4 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -58,6 +58,12 @@ void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);
+#ifdef CONFIG_CC_STACKPROTECTOR
+#include <linux/stackprotector.h>
+unsigned long __stack_chk_guard __read_mostly;
+EXPORT_SYMBOL(__stack_chk_guard);
+#endif
+
#if XTENSA_HAVE_COPROCESSORS
void coprocessor_release_all(struct thread_info *ti)
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index e2461968efb2..c0845cb1cbb9 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -278,7 +278,6 @@ static void ptrace_hbptriggered(struct perf_event *bp,
struct pt_regs *regs)
{
int i;
- siginfo_t info;
struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
if (bp->attr.bp_type & HW_BREAKPOINT_X) {
@@ -293,12 +292,7 @@ static void ptrace_hbptriggered(struct perf_event *bp,
i = (i << 1) | 1;
}
- info.si_signo = SIGTRAP;
- info.si_errno = i;
- info.si_code = TRAP_HWBKPT;
- info.si_addr = (void __user *)bkpt->address;
-
- force_sig_info(SIGTRAP, &info, current);
+ force_sig_ptrace_errno_trap(i, (void __user *)bkpt->address);
}
static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 08175df7a69e..a931af9075f2 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -36,6 +36,7 @@
#endif
#include <asm/bootparam.h>
+#include <asm/kasan.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
@@ -156,7 +157,7 @@ static int __init parse_bootparam(const bp_tag_t* tag)
/* Boot parameters must start with a BP_TAG_FIRST tag. */
if (tag->id != BP_TAG_FIRST) {
- printk(KERN_WARNING "Invalid boot parameters!\n");
+ pr_warn("Invalid boot parameters!\n");
return 0;
}
@@ -165,15 +166,14 @@ static int __init parse_bootparam(const bp_tag_t* tag)
/* Parse all tags. */
while (tag != NULL && tag->id != BP_TAG_LAST) {
- for (t = &__tagtable_begin; t < &__tagtable_end; t++) {
+ for (t = &__tagtable_begin; t < &__tagtable_end; t++) {
if (tag->id == t->tag) {
t->parse(tag);
break;
}
}
if (t == &__tagtable_end)
- printk(KERN_WARNING "Ignoring tag "
- "0x%08x\n", tag->id);
+ pr_warn("Ignoring tag 0x%08x\n", tag->id);
tag = (bp_tag_t*)((unsigned long)(tag + 1) + tag->size);
}
@@ -208,6 +208,8 @@ static int __init xtensa_dt_io_area(unsigned long node, const char *uname,
/* round down to nearest 256MB boundary */
xtensa_kio_paddr &= 0xf0000000;
+ init_kio();
+
return 1;
}
#else
@@ -246,6 +248,14 @@ void __init early_init_devtree(void *params)
void __init init_arch(bp_tag_t *bp_start)
{
+ /* Initialize MMU. */
+
+ init_mmu();
+
+ /* Initialize initial KASAN shadow map */
+
+ kasan_early_init();
+
/* Parse boot parameters */
if (bp_start)
@@ -263,10 +273,6 @@ void __init init_arch(bp_tag_t *bp_start)
/* Early hook for platforms */
platform_init(bp_start);
-
- /* Initialize MMU. */
-
- init_mmu();
}
/*
@@ -277,13 +283,13 @@ extern char _end[];
extern char _stext[];
extern char _WindowVectors_text_start;
extern char _WindowVectors_text_end;
-extern char _DebugInterruptVector_literal_start;
+extern char _DebugInterruptVector_text_start;
extern char _DebugInterruptVector_text_end;
-extern char _KernelExceptionVector_literal_start;
+extern char _KernelExceptionVector_text_start;
extern char _KernelExceptionVector_text_end;
-extern char _UserExceptionVector_literal_start;
+extern char _UserExceptionVector_text_start;
extern char _UserExceptionVector_text_end;
-extern char _DoubleExceptionVector_literal_start;
+extern char _DoubleExceptionVector_text_start;
extern char _DoubleExceptionVector_text_end;
#if XCHAL_EXCM_LEVEL >= 2
extern char _Level2InterruptVector_text_start;
@@ -317,6 +323,13 @@ static inline int mem_reserve(unsigned long start, unsigned long end)
void __init setup_arch(char **cmdline_p)
{
+ pr_info("config ID: %08x:%08x\n",
+ get_sr(SREG_EPC), get_sr(SREG_EXCSAVE));
+ if (get_sr(SREG_EPC) != XCHAL_HW_CONFIGID0 ||
+ get_sr(SREG_EXCSAVE) != XCHAL_HW_CONFIGID1)
+ pr_info("built for config ID: %08x:%08x\n",
+ XCHAL_HW_CONFIGID0, XCHAL_HW_CONFIGID1);
+
*cmdline_p = command_line;
platform_setup(cmdline_p);
strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
@@ -339,16 +352,16 @@ void __init setup_arch(char **cmdline_p)
mem_reserve(__pa(&_WindowVectors_text_start),
__pa(&_WindowVectors_text_end));
- mem_reserve(__pa(&_DebugInterruptVector_literal_start),
+ mem_reserve(__pa(&_DebugInterruptVector_text_start),
__pa(&_DebugInterruptVector_text_end));
- mem_reserve(__pa(&_KernelExceptionVector_literal_start),
+ mem_reserve(__pa(&_KernelExceptionVector_text_start),
__pa(&_KernelExceptionVector_text_end));
- mem_reserve(__pa(&_UserExceptionVector_literal_start),
+ mem_reserve(__pa(&_UserExceptionVector_text_start),
__pa(&_UserExceptionVector_text_end));
- mem_reserve(__pa(&_DoubleExceptionVector_literal_start),
+ mem_reserve(__pa(&_DoubleExceptionVector_text_start),
__pa(&_DoubleExceptionVector_text_end));
#if XCHAL_EXCM_LEVEL >= 2
@@ -380,7 +393,7 @@ void __init setup_arch(char **cmdline_p)
#endif
parse_early_param();
bootmem_init();
-
+ kasan_init();
unflatten_and_copy_device_tree();
#ifdef CONFIG_SMP
@@ -582,12 +595,14 @@ c_show(struct seq_file *f, void *slot)
"model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n"
"core ID\t\t: " XCHAL_CORE_ID "\n"
"build ID\t: 0x%x\n"
+ "config ID\t: %08x:%08x\n"
"byte order\t: %s\n"
"cpu MHz\t\t: %lu.%02lu\n"
"bogomips\t: %lu.%02lu\n",
num_online_cpus(),
cpumask_pr_args(cpu_online_mask),
XCHAL_BUILD_UNIQUE_ID,
+ get_sr(SREG_EPC), get_sr(SREG_EXCSAVE),
XCHAL_HAVE_BE ? "big" : "little",
ccount_freq/1000000,
(ccount_freq/10000) % 100,
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index d427e784ab44..f88e7a0b232c 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -28,8 +28,6 @@
#include <asm/coprocessor.h>
#include <asm/unistd.h>
-#define DEBUG_SIG 0
-
extern struct task_struct *coproc_owners[];
struct rt_sigframe
@@ -399,10 +397,8 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
regs->areg[8] = (unsigned long) &frame->uc;
regs->threadptr = tp;
-#if DEBUG_SIG
- printk("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08x\n",
- current->comm, current->pid, sig, frame, regs->pc);
-#endif
+ pr_debug("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08lx\n",
+ current->comm, current->pid, sig, frame, regs->pc);
return 0;
}
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index bae697a06a98..32c5207f1226 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -33,6 +33,7 @@
#include <linux/kallsyms.h>
#include <linux/delay.h>
#include <linux/hardirq.h>
+#include <linux/ratelimit.h>
#include <asm/stacktrace.h>
#include <asm/ptrace.h>
@@ -158,8 +159,7 @@ COPROCESSOR(7),
* 2. it is a temporary memory buffer for the exception handlers.
*/
-DEFINE_PER_CPU(unsigned long, exc_table[EXC_TABLE_SIZE/4]);
-
+DEFINE_PER_CPU(struct exc_table, exc_table);
DEFINE_PER_CPU(struct debug_table, debug_table);
void die(const char*, struct pt_regs*, long);
@@ -178,13 +178,14 @@ __die_if_kernel(const char *str, struct pt_regs *regs, long err)
void do_unhandled(struct pt_regs *regs, unsigned long exccause)
{
__die_if_kernel("Caught unhandled exception - should not happen",
- regs, SIGKILL);
+ regs, SIGKILL);
/* If in user mode, send SIGILL signal to current process */
- printk("Caught unhandled exception in '%s' "
- "(pid = %d, pc = %#010lx) - should not happen\n"
- "\tEXCCAUSE is %ld\n",
- current->comm, task_pid_nr(current), regs->pc, exccause);
+ pr_info_ratelimited("Caught unhandled exception in '%s' "
+ "(pid = %d, pc = %#010lx) - should not happen\n"
+ "\tEXCCAUSE is %ld\n",
+ current->comm, task_pid_nr(current), regs->pc,
+ exccause);
force_sig(SIGILL, current);
}
@@ -305,8 +306,8 @@ do_illegal_instruction(struct pt_regs *regs)
/* If in user mode, send SIGILL signal to current process. */
- printk("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n",
- current->comm, task_pid_nr(current), regs->pc);
+ pr_info_ratelimited("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n",
+ current->comm, task_pid_nr(current), regs->pc);
force_sig(SIGILL, current);
}
@@ -325,13 +326,14 @@ do_unaligned_user (struct pt_regs *regs)
siginfo_t info;
__die_if_kernel("Unhandled unaligned exception in kernel",
- regs, SIGKILL);
+ regs, SIGKILL);
current->thread.bad_vaddr = regs->excvaddr;
current->thread.error_code = -3;
- printk("Unaligned memory access to %08lx in '%s' "
- "(pid = %d, pc = %#010lx)\n",
- regs->excvaddr, current->comm, task_pid_nr(current), regs->pc);
+ pr_info_ratelimited("Unaligned memory access to %08lx in '%s' "
+ "(pid = %d, pc = %#010lx)\n",
+ regs->excvaddr, current->comm,
+ task_pid_nr(current), regs->pc);
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRALN;
@@ -365,28 +367,28 @@ do_debug(struct pt_regs *regs)
}
-static void set_handler(int idx, void *handler)
-{
- unsigned int cpu;
-
- for_each_possible_cpu(cpu)
- per_cpu(exc_table, cpu)[idx] = (unsigned long)handler;
-}
+#define set_handler(type, cause, handler) \
+ do { \
+ unsigned int cpu; \
+ \
+ for_each_possible_cpu(cpu) \
+ per_cpu(exc_table, cpu).type[cause] = (handler);\
+ } while (0)
/* Set exception C handler - for temporary use when probing exceptions */
void * __init trap_set_handler(int cause, void *handler)
{
- void *previous = (void *)per_cpu(exc_table, 0)[
- EXC_TABLE_DEFAULT / 4 + cause];
- set_handler(EXC_TABLE_DEFAULT / 4 + cause, handler);
+ void *previous = per_cpu(exc_table, 0).default_handler[cause];
+
+ set_handler(default_handler, cause, handler);
return previous;
}
static void trap_init_excsave(void)
{
- unsigned long excsave1 = (unsigned long)this_cpu_ptr(exc_table);
+ unsigned long excsave1 = (unsigned long)this_cpu_ptr(&exc_table);
__asm__ __volatile__("wsr %0, excsave1\n" : : "a" (excsave1));
}
@@ -418,10 +420,10 @@ void __init trap_init(void)
/* Setup default vectors. */
- for(i = 0; i < 64; i++) {
- set_handler(EXC_TABLE_FAST_USER/4 + i, user_exception);
- set_handler(EXC_TABLE_FAST_KERNEL/4 + i, kernel_exception);
- set_handler(EXC_TABLE_DEFAULT/4 + i, do_unhandled);
+ for (i = 0; i < EXCCAUSE_N; i++) {
+ set_handler(fast_user_handler, i, user_exception);
+ set_handler(fast_kernel_handler, i, kernel_exception);
+ set_handler(default_handler, i, do_unhandled);
}
/* Setup specific handlers. */
@@ -433,11 +435,11 @@ void __init trap_init(void)
void *handler = dispatch_init_table[i].handler;
if (fast == 0)
- set_handler (EXC_TABLE_DEFAULT/4 + cause, handler);
+ set_handler(default_handler, cause, handler);
if (fast && fast & USER)
- set_handler (EXC_TABLE_FAST_USER/4 + cause, handler);
+ set_handler(fast_user_handler, cause, handler);
if (fast && fast & KRNL)
- set_handler (EXC_TABLE_FAST_KERNEL/4 + cause, handler);
+ set_handler(fast_kernel_handler, cause, handler);
}
/* Initialize EXCSAVE_1 to hold the address of the exception table. */
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index 332e9d635fb6..841503d3307c 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -205,9 +205,6 @@ ENDPROC(_KernelExceptionVector)
*/
.section .DoubleExceptionVector.text, "ax"
- .begin literal_prefix .DoubleExceptionVector
- .globl _DoubleExceptionVector_WindowUnderflow
- .globl _DoubleExceptionVector_WindowOverflow
ENTRY(_DoubleExceptionVector)
@@ -217,8 +214,12 @@ ENTRY(_DoubleExceptionVector)
/* Check for kernel double exception (usually fatal). */
rsr a2, ps
- _bbci.l a2, PS_UM_BIT, .Lksp
+ _bbsi.l a2, PS_UM_BIT, 1f
+ j .Lksp
+ .align 4
+ .literal_position
+1:
/* Check if we are currently handling a window exception. */
/* Note: We don't need to indicate that we enter a critical section. */
@@ -304,8 +305,7 @@ _DoubleExceptionVector_WindowUnderflow:
.Lunrecoverable:
rsr a3, excsave1
wsr a0, excsave1
- movi a0, unrecoverable_exception
- callx0 a0
+ call0 unrecoverable_exception
.Lfixup:/* Check for a fixup handler or if we were in a critical section. */
@@ -475,11 +475,8 @@ _DoubleExceptionVector_handle_exception:
rotw -3
j 1b
-
ENDPROC(_DoubleExceptionVector)
- .end literal_prefix
-
.text
/*
* Fixup handler for TLB miss in double exception handler for window owerflow.
@@ -508,6 +505,8 @@ ENDPROC(_DoubleExceptionVector)
* a3: exctable, original value in excsave1
*/
+ .literal_position
+
ENTRY(window_overflow_restore_a0_fixup)
rsr a0, ps
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 162c77e53ca8..70b731edc7b8 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -45,24 +45,16 @@ jiffies = jiffies_64;
LONG(sym ## _end); \
LONG(LOADADDR(section))
-/* Macro to define a section for a vector.
- *
- * Use of the MIN function catches the types of errors illustrated in
- * the following example:
- *
- * Assume the section .DoubleExceptionVector.literal is completely
- * full. Then a programmer adds code to .DoubleExceptionVector.text
- * that produces another literal. The final literal position will
- * overlay onto the first word of the adjacent code section
- * .DoubleExceptionVector.text. (In practice, the literals will
- * overwrite the code, and the first few instructions will be
- * garbage.)
+/*
+ * Macro to define a section for a vector. When CONFIG_VECTORS_OFFSET is
+ * defined code for every vector is located with other init data. At startup
+ * time head.S copies code for every vector to its final position according
+ * to description recorded in the corresponding RELOCATE_ENTRY.
*/
#ifdef CONFIG_VECTORS_OFFSET
-#define SECTION_VECTOR(sym, section, addr, max_prevsec_size, prevsec) \
- section addr : AT((MIN(LOADADDR(prevsec) + max_prevsec_size, \
- LOADADDR(prevsec) + SIZEOF(prevsec)) + 3) & ~ 3) \
+#define SECTION_VECTOR(sym, section, addr, prevsec) \
+ section addr : AT(((LOADADDR(prevsec) + SIZEOF(prevsec)) + 3) & ~ 3) \
{ \
. = ALIGN(4); \
sym ## _start = ABSOLUTE(.); \
@@ -112,26 +104,19 @@ SECTIONS
#if XCHAL_EXCM_LEVEL >= 6
SECTION_VECTOR (.Level6InterruptVector.text, INTLEVEL6_VECTOR_VADDR)
#endif
- SECTION_VECTOR (.DebugInterruptVector.literal, DEBUG_VECTOR_VADDR - 4)
SECTION_VECTOR (.DebugInterruptVector.text, DEBUG_VECTOR_VADDR)
- SECTION_VECTOR (.KernelExceptionVector.literal, KERNEL_VECTOR_VADDR - 4)
SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR)
- SECTION_VECTOR (.UserExceptionVector.literal, USER_VECTOR_VADDR - 4)
SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR)
- SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 20)
SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR)
#endif
+ IRQENTRY_TEXT
+ SOFTIRQENTRY_TEXT
+ ENTRY_TEXT
TEXT_TEXT
- VMLINUX_SYMBOL(__sched_text_start) = .;
- *(.sched.literal .sched.text)
- VMLINUX_SYMBOL(__sched_text_end) = .;
- VMLINUX_SYMBOL(__cpuidle_text_start) = .;
- *(.cpuidle.literal .cpuidle.text)
- VMLINUX_SYMBOL(__cpuidle_text_end) = .;
- VMLINUX_SYMBOL(__lock_text_start) = .;
- *(.spinlock.literal .spinlock.text)
- VMLINUX_SYMBOL(__lock_text_end) = .;
+ SCHED_TEXT
+ CPUIDLE_TEXT
+ LOCK_TEXT
}
_etext = .;
@@ -196,8 +181,6 @@ SECTIONS
.KernelExceptionVector.text);
RELOCATE_ENTRY(_UserExceptionVector_text,
.UserExceptionVector.text);
- RELOCATE_ENTRY(_DoubleExceptionVector_literal,
- .DoubleExceptionVector.literal);
RELOCATE_ENTRY(_DoubleExceptionVector_text,
.DoubleExceptionVector.text);
RELOCATE_ENTRY(_DebugInterruptVector_text,
@@ -230,25 +213,19 @@ SECTIONS
SECTION_VECTOR (_WindowVectors_text,
.WindowVectors.text,
- WINDOW_VECTORS_VADDR, 4,
+ WINDOW_VECTORS_VADDR,
.dummy)
- SECTION_VECTOR (_DebugInterruptVector_literal,
- .DebugInterruptVector.literal,
- DEBUG_VECTOR_VADDR - 4,
- SIZEOF(.WindowVectors.text),
- .WindowVectors.text)
SECTION_VECTOR (_DebugInterruptVector_text,
.DebugInterruptVector.text,
DEBUG_VECTOR_VADDR,
- 4,
- .DebugInterruptVector.literal)
+ .WindowVectors.text)
#undef LAST
#define LAST .DebugInterruptVector.text
#if XCHAL_EXCM_LEVEL >= 2
SECTION_VECTOR (_Level2InterruptVector_text,
.Level2InterruptVector.text,
INTLEVEL2_VECTOR_VADDR,
- SIZEOF(LAST), LAST)
+ LAST)
# undef LAST
# define LAST .Level2InterruptVector.text
#endif
@@ -256,7 +233,7 @@ SECTIONS
SECTION_VECTOR (_Level3InterruptVector_text,
.Level3InterruptVector.text,
INTLEVEL3_VECTOR_VADDR,
- SIZEOF(LAST), LAST)
+ LAST)
# undef LAST
# define LAST .Level3InterruptVector.text
#endif
@@ -264,7 +241,7 @@ SECTIONS
SECTION_VECTOR (_Level4InterruptVector_text,
.Level4InterruptVector.text,
INTLEVEL4_VECTOR_VADDR,
- SIZEOF(LAST), LAST)
+ LAST)
# undef LAST
# define LAST .Level4InterruptVector.text
#endif
@@ -272,7 +249,7 @@ SECTIONS
SECTION_VECTOR (_Level5InterruptVector_text,
.Level5InterruptVector.text,
INTLEVEL5_VECTOR_VADDR,
- SIZEOF(LAST), LAST)
+ LAST)
# undef LAST
# define LAST .Level5InterruptVector.text
#endif
@@ -280,40 +257,23 @@ SECTIONS
SECTION_VECTOR (_Level6InterruptVector_text,
.Level6InterruptVector.text,
INTLEVEL6_VECTOR_VADDR,
- SIZEOF(LAST), LAST)
+ LAST)
# undef LAST
# define LAST .Level6InterruptVector.text
#endif
- SECTION_VECTOR (_KernelExceptionVector_literal,
- .KernelExceptionVector.literal,
- KERNEL_VECTOR_VADDR - 4,
- SIZEOF(LAST), LAST)
-#undef LAST
SECTION_VECTOR (_KernelExceptionVector_text,
.KernelExceptionVector.text,
KERNEL_VECTOR_VADDR,
- 4,
- .KernelExceptionVector.literal)
- SECTION_VECTOR (_UserExceptionVector_literal,
- .UserExceptionVector.literal,
- USER_VECTOR_VADDR - 4,
- SIZEOF(.KernelExceptionVector.text),
- .KernelExceptionVector.text)
+ LAST)
+#undef LAST
SECTION_VECTOR (_UserExceptionVector_text,
.UserExceptionVector.text,
USER_VECTOR_VADDR,
- 4,
- .UserExceptionVector.literal)
- SECTION_VECTOR (_DoubleExceptionVector_literal,
- .DoubleExceptionVector.literal,
- DOUBLEEXC_VECTOR_VADDR - 20,
- SIZEOF(.UserExceptionVector.text),
- .UserExceptionVector.text)
+ .KernelExceptionVector.text)
SECTION_VECTOR (_DoubleExceptionVector_text,
.DoubleExceptionVector.text,
DOUBLEEXC_VECTOR_VADDR,
- 20,
- .DoubleExceptionVector.literal)
+ .UserExceptionVector.text)
. = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
@@ -323,7 +283,6 @@ SECTIONS
SECTION_VECTOR (_SecondaryResetVector_text,
.SecondaryResetVector.text,
RESET_VECTOR1_VADDR,
- SIZEOF(.DoubleExceptionVector.text),
.DoubleExceptionVector.text)
. = LOADADDR(.SecondaryResetVector.text)+SIZEOF(.SecondaryResetVector.text);
@@ -373,5 +332,4 @@ SECTIONS
/* Sections to be discarded */
DISCARDS
- /DISCARD/ : { *(.exit.literal) }
}
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
index 672391003e40..04f19de46700 100644
--- a/arch/xtensa/kernel/xtensa_ksyms.c
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -41,7 +41,12 @@
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(__memset);
+EXPORT_SYMBOL(__memcpy);
+EXPORT_SYMBOL(__memmove);
+#ifndef CONFIG_GENERIC_STRNCPY_FROM_USER
EXPORT_SYMBOL(__strncpy_user);
+#endif
EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(copy_page);
diff --git a/arch/xtensa/lib/checksum.S b/arch/xtensa/lib/checksum.S
index 4eb573d2720e..528fe0dd9339 100644
--- a/arch/xtensa/lib/checksum.S
+++ b/arch/xtensa/lib/checksum.S
@@ -14,9 +14,10 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <asm/errno.h>
+#include <linux/errno.h>
#include <linux/linkage.h>
#include <variant/core.h>
+#include <asm/asmmacro.h>
/*
* computes a partial checksum, e.g. for TCP/UDP fragments
@@ -175,23 +176,8 @@ ENDPROC(csum_partial)
/*
* Copy from ds while checksumming, otherwise like csum_partial
- *
- * The macros SRC and DST specify the type of access for the instruction.
- * thus we can call a custom exception handler for each access type.
*/
-#define SRC(y...) \
- 9999: y; \
- .section __ex_table, "a"; \
- .long 9999b, 6001f ; \
- .previous
-
-#define DST(y...) \
- 9999: y; \
- .section __ex_table, "a"; \
- .long 9999b, 6002f ; \
- .previous
-
/*
unsigned int csum_partial_copy_generic (const char *src, char *dst, int len,
int sum, int *src_err_ptr, int *dst_err_ptr)
@@ -244,28 +230,28 @@ ENTRY(csum_partial_copy_generic)
add a10, a10, a2 /* a10 = end of last 32-byte src chunk */
.Loop5:
#endif
-SRC( l32i a9, a2, 0 )
-SRC( l32i a8, a2, 4 )
-DST( s32i a9, a3, 0 )
-DST( s32i a8, a3, 4 )
+EX(10f) l32i a9, a2, 0
+EX(10f) l32i a8, a2, 4
+EX(11f) s32i a9, a3, 0
+EX(11f) s32i a8, a3, 4
ONES_ADD(a5, a9)
ONES_ADD(a5, a8)
-SRC( l32i a9, a2, 8 )
-SRC( l32i a8, a2, 12 )
-DST( s32i a9, a3, 8 )
-DST( s32i a8, a3, 12 )
+EX(10f) l32i a9, a2, 8
+EX(10f) l32i a8, a2, 12
+EX(11f) s32i a9, a3, 8
+EX(11f) s32i a8, a3, 12
ONES_ADD(a5, a9)
ONES_ADD(a5, a8)
-SRC( l32i a9, a2, 16 )
-SRC( l32i a8, a2, 20 )
-DST( s32i a9, a3, 16 )
-DST( s32i a8, a3, 20 )
+EX(10f) l32i a9, a2, 16
+EX(10f) l32i a8, a2, 20
+EX(11f) s32i a9, a3, 16
+EX(11f) s32i a8, a3, 20
ONES_ADD(a5, a9)
ONES_ADD(a5, a8)
-SRC( l32i a9, a2, 24 )
-SRC( l32i a8, a2, 28 )
-DST( s32i a9, a3, 24 )
-DST( s32i a8, a3, 28 )
+EX(10f) l32i a9, a2, 24
+EX(10f) l32i a8, a2, 28
+EX(11f) s32i a9, a3, 24
+EX(11f) s32i a8, a3, 28
ONES_ADD(a5, a9)
ONES_ADD(a5, a8)
addi a2, a2, 32
@@ -284,8 +270,8 @@ DST( s32i a8, a3, 28 )
add a10, a10, a2 /* a10 = end of last 4-byte src chunk */
.Loop6:
#endif
-SRC( l32i a9, a2, 0 )
-DST( s32i a9, a3, 0 )
+EX(10f) l32i a9, a2, 0
+EX(11f) s32i a9, a3, 0
ONES_ADD(a5, a9)
addi a2, a2, 4
addi a3, a3, 4
@@ -315,8 +301,8 @@ DST( s32i a9, a3, 0 )
add a10, a10, a2 /* a10 = end of last 2-byte src chunk */
.Loop7:
#endif
-SRC( l16ui a9, a2, 0 )
-DST( s16i a9, a3, 0 )
+EX(10f) l16ui a9, a2, 0
+EX(11f) s16i a9, a3, 0
ONES_ADD(a5, a9)
addi a2, a2, 2
addi a3, a3, 2
@@ -326,8 +312,8 @@ DST( s16i a9, a3, 0 )
4:
/* This section processes a possible trailing odd byte. */
_bbci.l a4, 0, 8f /* 1-byte chunk */
-SRC( l8ui a9, a2, 0 )
-DST( s8i a9, a3, 0 )
+EX(10f) l8ui a9, a2, 0
+EX(11f) s8i a9, a3, 0
#ifdef __XTENSA_EB__
slli a9, a9, 8 /* shift byte to bits 8..15 */
#endif
@@ -350,10 +336,10 @@ DST( s8i a9, a3, 0 )
add a10, a10, a2 /* a10 = end of last odd-aligned, 2-byte src chunk */
.Loop8:
#endif
-SRC( l8ui a9, a2, 0 )
-SRC( l8ui a8, a2, 1 )
-DST( s8i a9, a3, 0 )
-DST( s8i a8, a3, 1 )
+EX(10f) l8ui a9, a2, 0
+EX(10f) l8ui a8, a2, 1
+EX(11f) s8i a9, a3, 0
+EX(11f) s8i a8, a3, 1
#ifdef __XTENSA_EB__
slli a9, a9, 8 /* combine into a single 16-bit value */
#else /* for checksum computation */
@@ -381,7 +367,7 @@ ENDPROC(csum_partial_copy_generic)
a12 = original dst for exception handling
*/
-6001:
+10:
_movi a2, -EFAULT
s32i a2, a6, 0 /* src_err_ptr */
@@ -403,7 +389,7 @@ ENDPROC(csum_partial_copy_generic)
2:
retw
-6002:
+11:
movi a2, -EFAULT
s32i a2, a7, 0 /* dst_err_ptr */
movi a2, 0
diff --git a/arch/xtensa/lib/memcopy.S b/arch/xtensa/lib/memcopy.S
index b1c219acabe7..c0f6981719d6 100644
--- a/arch/xtensa/lib/memcopy.S
+++ b/arch/xtensa/lib/memcopy.S
@@ -9,23 +9,9 @@
* Copyright (C) 2002 - 2012 Tensilica Inc.
*/
+#include <linux/linkage.h>
#include <variant/core.h>
-
- .macro src_b r, w0, w1
-#ifdef __XTENSA_EB__
- src \r, \w0, \w1
-#else
- src \r, \w1, \w0
-#endif
- .endm
-
- .macro ssa8 r
-#ifdef __XTENSA_EB__
- ssa8b \r
-#else
- ssa8l \r
-#endif
- .endm
+#include <asm/asmmacro.h>
/*
* void *memcpy(void *dst, const void *src, size_t len);
@@ -123,10 +109,8 @@
addi a5, a5, 2
j .Ldstaligned # dst is now aligned, return to main algorithm
- .align 4
- .global memcpy
- .type memcpy,@function
-memcpy:
+ENTRY(__memcpy)
+WEAK(memcpy)
entry sp, 16 # minimal stack frame
# a2/ dst, a3/ src, a4/ len
@@ -209,7 +193,7 @@ memcpy:
.Lsrcunaligned:
_beqz a4, .Ldone # avoid loading anything for zero-length copies
# copy 16 bytes per iteration for word-aligned dst and unaligned src
- ssa8 a3 # set shift amount from byte offset
+ __ssa8 a3 # set shift amount from byte offset
/* set to 1 when running on ISS (simulator) with the
lint or ferret client, or 0 to save a few cycles */
@@ -229,16 +213,16 @@ memcpy:
.Loop2:
l32i a7, a3, 4
l32i a8, a3, 8
- src_b a6, a6, a7
+ __src_b a6, a6, a7
s32i a6, a5, 0
l32i a9, a3, 12
- src_b a7, a7, a8
+ __src_b a7, a7, a8
s32i a7, a5, 4
l32i a6, a3, 16
- src_b a8, a8, a9
+ __src_b a8, a8, a9
s32i a8, a5, 8
addi a3, a3, 16
- src_b a9, a9, a6
+ __src_b a9, a9, a6
s32i a9, a5, 12
addi a5, a5, 16
#if !XCHAL_HAVE_LOOPS
@@ -249,10 +233,10 @@ memcpy:
# copy 8 bytes
l32i a7, a3, 4
l32i a8, a3, 8
- src_b a6, a6, a7
+ __src_b a6, a6, a7
s32i a6, a5, 0
addi a3, a3, 8
- src_b a7, a7, a8
+ __src_b a7, a7, a8
s32i a7, a5, 4
addi a5, a5, 8
mov a6, a8
@@ -261,7 +245,7 @@ memcpy:
# copy 4 bytes
l32i a7, a3, 4
addi a3, a3, 4
- src_b a6, a6, a7
+ __src_b a6, a6, a7
s32i a6, a5, 0
addi a5, a5, 4
mov a6, a7
@@ -288,14 +272,14 @@ memcpy:
s8i a6, a5, 0
retw
+ENDPROC(__memcpy)
/*
* void bcopy(const void *src, void *dest, size_t n);
*/
- .align 4
- .global bcopy
- .type bcopy,@function
-bcopy:
+
+ENTRY(bcopy)
+
entry sp, 16 # minimal stack frame
# a2=src, a3=dst, a4=len
mov a5, a3
@@ -303,6 +287,8 @@ bcopy:
mov a2, a5
j .Lmovecommon # go to common code for memmove+bcopy
+ENDPROC(bcopy)
+
/*
* void *memmove(void *dst, const void *src, size_t len);
*
@@ -391,10 +377,8 @@ bcopy:
j .Lbackdstaligned # dst is now aligned,
# return to main algorithm
- .align 4
- .global memmove
- .type memmove,@function
-memmove:
+ENTRY(__memmove)
+WEAK(memmove)
entry sp, 16 # minimal stack frame
# a2/ dst, a3/ src, a4/ len
@@ -485,7 +469,7 @@ memmove:
.Lbacksrcunaligned:
_beqz a4, .Lbackdone # avoid loading anything for zero-length copies
# copy 16 bytes per iteration for word-aligned dst and unaligned src
- ssa8 a3 # set shift amount from byte offset
+ __ssa8 a3 # set shift amount from byte offset
#define SIM_CHECKS_ALIGNMENT 1 /* set to 1 when running on ISS with
* the lint or ferret client, or 0
* to save a few cycles */
@@ -506,15 +490,15 @@ memmove:
l32i a7, a3, 12
l32i a8, a3, 8
addi a5, a5, -16
- src_b a6, a7, a6
+ __src_b a6, a7, a6
s32i a6, a5, 12
l32i a9, a3, 4
- src_b a7, a8, a7
+ __src_b a7, a8, a7
s32i a7, a5, 8
l32i a6, a3, 0
- src_b a8, a9, a8
+ __src_b a8, a9, a8
s32i a8, a5, 4
- src_b a9, a6, a9
+ __src_b a9, a6, a9
s32i a9, a5, 0
#if !XCHAL_HAVE_LOOPS
bne a3, a10, .backLoop2 # continue loop if a3:src != a10:src_start
@@ -526,9 +510,9 @@ memmove:
l32i a7, a3, 4
l32i a8, a3, 0
addi a5, a5, -8
- src_b a6, a7, a6
+ __src_b a6, a7, a6
s32i a6, a5, 4
- src_b a7, a8, a7
+ __src_b a7, a8, a7
s32i a7, a5, 0
mov a6, a8
.Lback12:
@@ -537,7 +521,7 @@ memmove:
addi a3, a3, -4
l32i a7, a3, 0
addi a5, a5, -4
- src_b a6, a7, a6
+ __src_b a6, a7, a6
s32i a6, a5, 0
mov a6, a7
.Lback13:
@@ -566,11 +550,4 @@ memmove:
s8i a6, a5, 0
retw
-
-/*
- * Local Variables:
- * mode:fundamental
- * comment-start: "# "
- * comment-start-skip: "# *"
- * End:
- */
+ENDPROC(__memmove)
diff --git a/arch/xtensa/lib/memset.S b/arch/xtensa/lib/memset.S
index 10b8c400f175..276747dec300 100644
--- a/arch/xtensa/lib/memset.S
+++ b/arch/xtensa/lib/memset.S
@@ -11,7 +11,9 @@
* Copyright (C) 2002 Tensilica Inc.
*/
+#include <linux/linkage.h>
#include <variant/core.h>
+#include <asm/asmmacro.h>
/*
* void *memset(void *dst, int c, size_t length)
@@ -28,20 +30,10 @@
* the alignment labels).
*/
-/* Load or store instructions that may cause exceptions use the EX macro. */
-
-#define EX(insn,reg1,reg2,offset,handler) \
-9: insn reg1, reg2, offset; \
- .section __ex_table, "a"; \
- .word 9b, handler; \
- .previous
-
-
.text
-.align 4
-.global memset
-.type memset,@function
-memset:
+ENTRY(__memset)
+WEAK(memset)
+
entry sp, 16 # minimal stack frame
# a2/ dst, a3/ c, a4/ length
extui a3, a3, 0, 8 # mask to just 8 bits
@@ -73,10 +65,10 @@ memset:
add a6, a6, a5 # a6 = end of last 16B chunk
#endif /* !XCHAL_HAVE_LOOPS */
.Loop1:
- EX(s32i, a3, a5, 0, memset_fixup)
- EX(s32i, a3, a5, 4, memset_fixup)
- EX(s32i, a3, a5, 8, memset_fixup)
- EX(s32i, a3, a5, 12, memset_fixup)
+EX(10f) s32i a3, a5, 0
+EX(10f) s32i a3, a5, 4
+EX(10f) s32i a3, a5, 8
+EX(10f) s32i a3, a5, 12
addi a5, a5, 16
#if !XCHAL_HAVE_LOOPS
blt a5, a6, .Loop1
@@ -84,23 +76,23 @@ memset:
.Loop1done:
bbci.l a4, 3, .L2
# set 8 bytes
- EX(s32i, a3, a5, 0, memset_fixup)
- EX(s32i, a3, a5, 4, memset_fixup)
+EX(10f) s32i a3, a5, 0
+EX(10f) s32i a3, a5, 4
addi a5, a5, 8
.L2:
bbci.l a4, 2, .L3
# set 4 bytes
- EX(s32i, a3, a5, 0, memset_fixup)
+EX(10f) s32i a3, a5, 0
addi a5, a5, 4
.L3:
bbci.l a4, 1, .L4
# set 2 bytes
- EX(s16i, a3, a5, 0, memset_fixup)
+EX(10f) s16i a3, a5, 0
addi a5, a5, 2
.L4:
bbci.l a4, 0, .L5
# set 1 byte
- EX(s8i, a3, a5, 0, memset_fixup)
+EX(10f) s8i a3, a5, 0
.L5:
.Lret1:
retw
@@ -114,7 +106,7 @@ memset:
bbci.l a5, 0, .L20 # branch if dst alignment half-aligned
# dst is only byte aligned
# set 1 byte
- EX(s8i, a3, a5, 0, memset_fixup)
+EX(10f) s8i a3, a5, 0
addi a5, a5, 1
addi a4, a4, -1
# now retest if dst aligned
@@ -122,7 +114,7 @@ memset:
.L20:
# dst half-aligned
# set 2 bytes
- EX(s16i, a3, a5, 0, memset_fixup)
+EX(10f) s16i a3, a5, 0
addi a5, a5, 2
addi a4, a4, -2
j .L0 # dst is now aligned, return to main algorithm
@@ -141,7 +133,7 @@ memset:
add a6, a5, a4 # a6 = ending address
#endif /* !XCHAL_HAVE_LOOPS */
.Lbyteloop:
- EX(s8i, a3, a5, 0, memset_fixup)
+EX(10f) s8i a3, a5, 0
addi a5, a5, 1
#if !XCHAL_HAVE_LOOPS
blt a5, a6, .Lbyteloop
@@ -149,12 +141,13 @@ memset:
.Lbytesetdone:
retw
+ENDPROC(__memset)
.section .fixup, "ax"
.align 4
/* We return zero if a failure occurred. */
-memset_fixup:
+10:
movi a2, 0
retw
diff --git a/arch/xtensa/lib/pci-auto.c b/arch/xtensa/lib/pci-auto.c
index 34d05abbd921..a2b558161d6d 100644
--- a/arch/xtensa/lib/pci-auto.c
+++ b/arch/xtensa/lib/pci-auto.c
@@ -49,17 +49,6 @@
*
*/
-
-/* define DEBUG to print some debugging messages. */
-
-#undef DEBUG
-
-#ifdef DEBUG
-# define DBG(x...) printk(x)
-#else
-# define DBG(x...)
-#endif
-
static int pciauto_upper_iospc;
static int pciauto_upper_memspc;
@@ -97,7 +86,7 @@ pciauto_setup_bars(struct pci_dev *dev, int bar_limit)
{
bar_size &= PCI_BASE_ADDRESS_IO_MASK;
upper_limit = &pciauto_upper_iospc;
- DBG("PCI Autoconfig: BAR %d, I/O, ", bar_nr);
+ pr_debug("PCI Autoconfig: BAR %d, I/O, ", bar_nr);
}
else
{
@@ -107,7 +96,7 @@ pciauto_setup_bars(struct pci_dev *dev, int bar_limit)
bar_size &= PCI_BASE_ADDRESS_MEM_MASK;
upper_limit = &pciauto_upper_memspc;
- DBG("PCI Autoconfig: BAR %d, Mem, ", bar_nr);
+ pr_debug("PCI Autoconfig: BAR %d, Mem, ", bar_nr);
}
/* Allocate a base address (bar_size is negative!) */
@@ -125,7 +114,8 @@ pciauto_setup_bars(struct pci_dev *dev, int bar_limit)
if (found_mem64)
pci_write_config_dword(dev, (bar+=4), 0x00000000);
- DBG("size=0x%x, address=0x%x\n", ~bar_size + 1, *upper_limit);
+ pr_debug("size=0x%x, address=0x%x\n",
+ ~bar_size + 1, *upper_limit);
}
}
@@ -150,7 +140,7 @@ pciauto_setup_irq(struct pci_controller* pci_ctrl,struct pci_dev *dev,int devfn)
if (irq == -1)
irq = 0;
- DBG("PCI Autoconfig: Interrupt %d, pin %d\n", irq, pin);
+ pr_debug("PCI Autoconfig: Interrupt %d, pin %d\n", irq, pin);
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
}
@@ -289,8 +279,8 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
int iosave, memsave;
- DBG("PCI Autoconfig: Found P2P bridge, device %d\n",
- PCI_SLOT(pci_devfn));
+ pr_debug("PCI Autoconfig: Found P2P bridge, device %d\n",
+ PCI_SLOT(pci_devfn));
/* Allocate PCI I/O and/or memory space */
pciauto_setup_bars(dev, PCI_BASE_ADDRESS_1);
@@ -306,23 +296,6 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
}
-
-#if 0
- /* Skip legacy mode IDE controller */
-
- if ((pci_class >> 16) == PCI_CLASS_STORAGE_IDE) {
-
- unsigned char prg_iface;
- pci_read_config_byte(dev, PCI_CLASS_PROG, &prg_iface);
-
- if (!(prg_iface & PCIAUTO_IDE_MODE_MASK)) {
- DBG("PCI Autoconfig: Skipping legacy mode "
- "IDE controller\n");
- continue;
- }
- }
-#endif
-
/*
* Found a peripheral, enable some standard
* settings
@@ -337,8 +310,8 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x80);
/* Allocate PCI I/O and/or memory space */
- DBG("PCI Autoconfig: Found Bus %d, Device %d, Function %d\n",
- current_bus, PCI_SLOT(pci_devfn), PCI_FUNC(pci_devfn) );
+ pr_debug("PCI Autoconfig: Found Bus %d, Device %d, Function %d\n",
+ current_bus, PCI_SLOT(pci_devfn), PCI_FUNC(pci_devfn));
pciauto_setup_bars(dev, PCI_BASE_ADDRESS_5);
pciauto_setup_irq(pci_ctrl, dev, pci_devfn);
diff --git a/arch/xtensa/lib/strncpy_user.S b/arch/xtensa/lib/strncpy_user.S
index 1ad0ecf45368..5fce16b67dca 100644
--- a/arch/xtensa/lib/strncpy_user.S
+++ b/arch/xtensa/lib/strncpy_user.S
@@ -11,16 +11,10 @@
* Copyright (C) 2002 Tensilica Inc.
*/
-#include <variant/core.h>
#include <linux/errno.h>
-
-/* Load or store instructions that may cause exceptions use the EX macro. */
-
-#define EX(insn,reg1,reg2,offset,handler) \
-9: insn reg1, reg2, offset; \
- .section __ex_table, "a"; \
- .word 9b, handler; \
- .previous
+#include <linux/linkage.h>
+#include <variant/core.h>
+#include <asm/asmmacro.h>
/*
* char *__strncpy_user(char *dst, const char *src, size_t len)
@@ -54,10 +48,8 @@
# a12/ tmp
.text
-.align 4
-.global __strncpy_user
-.type __strncpy_user,@function
-__strncpy_user:
+ENTRY(__strncpy_user)
+
entry sp, 16 # minimal stack frame
# a2/ dst, a3/ src, a4/ len
mov a11, a2 # leave dst in return value register
@@ -75,9 +67,9 @@ __strncpy_user:
j .Ldstunaligned
.Lsrc1mod2: # src address is odd
- EX(l8ui, a9, a3, 0, fixup_l) # get byte 0
+EX(11f) l8ui a9, a3, 0 # get byte 0
addi a3, a3, 1 # advance src pointer
- EX(s8i, a9, a11, 0, fixup_s) # store byte 0
+EX(10f) s8i a9, a11, 0 # store byte 0
beqz a9, .Lret # if byte 0 is zero
addi a11, a11, 1 # advance dst pointer
addi a4, a4, -1 # decrement len
@@ -85,16 +77,16 @@ __strncpy_user:
bbci.l a3, 1, .Lsrcaligned # if src is now word-aligned
.Lsrc2mod4: # src address is 2 mod 4
- EX(l8ui, a9, a3, 0, fixup_l) # get byte 0
+EX(11f) l8ui a9, a3, 0 # get byte 0
/* 1-cycle interlock */
- EX(s8i, a9, a11, 0, fixup_s) # store byte 0
+EX(10f) s8i a9, a11, 0 # store byte 0
beqz a9, .Lret # if byte 0 is zero
addi a11, a11, 1 # advance dst pointer
addi a4, a4, -1 # decrement len
beqz a4, .Lret # if len is zero
- EX(l8ui, a9, a3, 1, fixup_l) # get byte 0
+EX(11f) l8ui a9, a3, 1 # get byte 0
addi a3, a3, 2 # advance src pointer
- EX(s8i, a9, a11, 0, fixup_s) # store byte 0
+EX(10f) s8i a9, a11, 0 # store byte 0
beqz a9, .Lret # if byte 0 is zero
addi a11, a11, 1 # advance dst pointer
addi a4, a4, -1 # decrement len
@@ -117,12 +109,12 @@ __strncpy_user:
add a12, a12, a11 # a12 = end of last 4B chunck
#endif
.Loop1:
- EX(l32i, a9, a3, 0, fixup_l) # get word from src
+EX(11f) l32i a9, a3, 0 # get word from src
addi a3, a3, 4 # advance src pointer
bnone a9, a5, .Lz0 # if byte 0 is zero
bnone a9, a6, .Lz1 # if byte 1 is zero
bnone a9, a7, .Lz2 # if byte 2 is zero
- EX(s32i, a9, a11, 0, fixup_s) # store word to dst
+EX(10f) s32i a9, a11, 0 # store word to dst
bnone a9, a8, .Lz3 # if byte 3 is zero
addi a11, a11, 4 # advance dst pointer
#if !XCHAL_HAVE_LOOPS
@@ -132,7 +124,7 @@ __strncpy_user:
.Loop1done:
bbci.l a4, 1, .L100
# copy 2 bytes
- EX(l16ui, a9, a3, 0, fixup_l)
+EX(11f) l16ui a9, a3, 0
addi a3, a3, 2 # advance src pointer
#ifdef __XTENSA_EB__
bnone a9, a7, .Lz0 # if byte 2 is zero
@@ -141,13 +133,13 @@ __strncpy_user:
bnone a9, a5, .Lz0 # if byte 0 is zero
bnone a9, a6, .Lz1 # if byte 1 is zero
#endif
- EX(s16i, a9, a11, 0, fixup_s)
+EX(10f) s16i a9, a11, 0
addi a11, a11, 2 # advance dst pointer
.L100:
bbci.l a4, 0, .Lret
- EX(l8ui, a9, a3, 0, fixup_l)
+EX(11f) l8ui a9, a3, 0
/* slot */
- EX(s8i, a9, a11, 0, fixup_s)
+EX(10f) s8i a9, a11, 0
beqz a9, .Lret # if byte is zero
addi a11, a11, 1-3 # advance dst ptr 1, but also cancel
# the effect of adding 3 in .Lz3 code
@@ -161,14 +153,14 @@ __strncpy_user:
#ifdef __XTENSA_EB__
movi a9, 0
#endif /* __XTENSA_EB__ */
- EX(s8i, a9, a11, 0, fixup_s)
+EX(10f) s8i a9, a11, 0
sub a2, a11, a2 # compute strlen
retw
.Lz1: # byte 1 is zero
#ifdef __XTENSA_EB__
extui a9, a9, 16, 16
#endif /* __XTENSA_EB__ */
- EX(s16i, a9, a11, 0, fixup_s)
+EX(10f) s16i a9, a11, 0
addi a11, a11, 1 # advance dst pointer
sub a2, a11, a2 # compute strlen
retw
@@ -176,9 +168,9 @@ __strncpy_user:
#ifdef __XTENSA_EB__
extui a9, a9, 16, 16
#endif /* __XTENSA_EB__ */
- EX(s16i, a9, a11, 0, fixup_s)
+EX(10f) s16i a9, a11, 0
movi a9, 0
- EX(s8i, a9, a11, 2, fixup_s)
+EX(10f) s8i a9, a11, 2
addi a11, a11, 2 # advance dst pointer
sub a2, a11, a2 # compute strlen
retw
@@ -196,9 +188,9 @@ __strncpy_user:
add a12, a11, a4 # a12 = ending address
#endif /* XCHAL_HAVE_LOOPS */
.Lnextbyte:
- EX(l8ui, a9, a3, 0, fixup_l)
+EX(11f) l8ui a9, a3, 0
addi a3, a3, 1
- EX(s8i, a9, a11, 0, fixup_s)
+EX(10f) s8i a9, a11, 0
beqz a9, .Lunalignedend
addi a11, a11, 1
#if !XCHAL_HAVE_LOOPS
@@ -209,6 +201,7 @@ __strncpy_user:
sub a2, a11, a2 # compute strlen
retw
+ENDPROC(__strncpy_user)
.section .fixup, "ax"
.align 4
@@ -218,8 +211,7 @@ __strncpy_user:
* implementation in memset(). Thus, we differentiate between
* load/store fixups. */
-fixup_s:
-fixup_l:
+10:
+11:
movi a2, -EFAULT
retw
-
diff --git a/arch/xtensa/lib/strnlen_user.S b/arch/xtensa/lib/strnlen_user.S
index 4c03b1e581e9..0b956ce7f386 100644
--- a/arch/xtensa/lib/strnlen_user.S
+++ b/arch/xtensa/lib/strnlen_user.S
@@ -11,15 +11,9 @@
* Copyright (C) 2002 Tensilica Inc.
*/
+#include <linux/linkage.h>
#include <variant/core.h>
-
-/* Load or store instructions that may cause exceptions use the EX macro. */
-
-#define EX(insn,reg1,reg2,offset,handler) \
-9: insn reg1, reg2, offset; \
- .section __ex_table, "a"; \
- .word 9b, handler; \
- .previous
+#include <asm/asmmacro.h>
/*
* size_t __strnlen_user(const char *s, size_t len)
@@ -49,10 +43,8 @@
# a10/ tmp
.text
-.align 4
-.global __strnlen_user
-.type __strnlen_user,@function
-__strnlen_user:
+ENTRY(__strnlen_user)
+
entry sp, 16 # minimal stack frame
# a2/ s, a3/ len
addi a4, a2, -4 # because we overincrement at the end;
@@ -77,7 +69,7 @@ __strnlen_user:
add a10, a10, a4 # a10 = end of last 4B chunk
#endif /* XCHAL_HAVE_LOOPS */
.Loop:
- EX(l32i, a9, a4, 4, lenfixup) # get next word of string
+EX(10f) l32i a9, a4, 4 # get next word of string
addi a4, a4, 4 # advance string pointer
bnone a9, a5, .Lz0 # if byte 0 is zero
bnone a9, a6, .Lz1 # if byte 1 is zero
@@ -88,7 +80,7 @@ __strnlen_user:
#endif
.Ldone:
- EX(l32i, a9, a4, 4, lenfixup) # load 4 bytes for remaining checks
+EX(10f) l32i a9, a4, 4 # load 4 bytes for remaining checks
bbci.l a3, 1, .L100
# check two more bytes (bytes 0, 1 of word)
@@ -125,14 +117,14 @@ __strnlen_user:
retw
.L1mod2: # address is odd
- EX(l8ui, a9, a4, 4, lenfixup) # get byte 0
+EX(10f) l8ui a9, a4, 4 # get byte 0
addi a4, a4, 1 # advance string pointer
beqz a9, .Lz3 # if byte 0 is zero
bbci.l a4, 1, .Laligned # if string pointer is now word-aligned
.L2mod4: # address is 2 mod 4
addi a4, a4, 2 # advance ptr for aligned access
- EX(l32i, a9, a4, 0, lenfixup) # get word with first two bytes of string
+EX(10f) l32i a9, a4, 0 # get word with first two bytes of string
bnone a9, a7, .Lz2 # if byte 2 (of word, not string) is zero
bany a9, a8, .Laligned # if byte 3 (of word, not string) is nonzero
# byte 3 is zero
@@ -140,8 +132,10 @@ __strnlen_user:
sub a2, a4, a2 # subtract to get length
retw
+ENDPROC(__strnlen_user)
+
.section .fixup, "ax"
.align 4
-lenfixup:
+10:
movi a2, 0
retw
diff --git a/arch/xtensa/lib/usercopy.S b/arch/xtensa/lib/usercopy.S
index d9cd766bde3e..64ab1971324f 100644
--- a/arch/xtensa/lib/usercopy.S
+++ b/arch/xtensa/lib/usercopy.S
@@ -53,30 +53,13 @@
* a11/ original length
*/
+#include <linux/linkage.h>
#include <variant/core.h>
-
-#ifdef __XTENSA_EB__
-#define ALIGN(R, W0, W1) src R, W0, W1
-#define SSA8(R) ssa8b R
-#else
-#define ALIGN(R, W0, W1) src R, W1, W0
-#define SSA8(R) ssa8l R
-#endif
-
-/* Load or store instructions that may cause exceptions use the EX macro. */
-
-#define EX(insn,reg1,reg2,offset,handler) \
-9: insn reg1, reg2, offset; \
- .section __ex_table, "a"; \
- .word 9b, handler; \
- .previous
-
+#include <asm/asmmacro.h>
.text
- .align 4
- .global __xtensa_copy_user
- .type __xtensa_copy_user,@function
-__xtensa_copy_user:
+ENTRY(__xtensa_copy_user)
+
entry sp, 16 # minimal stack frame
# a2/ dst, a3/ src, a4/ len
mov a5, a2 # copy dst so that a2 is return value
@@ -89,7 +72,7 @@ __xtensa_copy_user:
# per iteration
movi a8, 3 # if source is also aligned,
bnone a3, a8, .Laligned # then use word copy
- SSA8( a3) # set shift amount from byte offset
+ __ssa8 a3 # set shift amount from byte offset
bnez a4, .Lsrcunaligned
movi a2, 0 # return success for len==0
retw
@@ -102,9 +85,9 @@ __xtensa_copy_user:
bltui a4, 7, .Lbytecopy # do short copies byte by byte
# copy 1 byte
- EX(l8ui, a6, a3, 0, fixup)
+EX(10f) l8ui a6, a3, 0
addi a3, a3, 1
- EX(s8i, a6, a5, 0, fixup)
+EX(10f) s8i a6, a5, 0
addi a5, a5, 1
addi a4, a4, -1
bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then
@@ -112,11 +95,11 @@ __xtensa_copy_user:
.Ldst2mod4: # dst 16-bit aligned
# copy 2 bytes
bltui a4, 6, .Lbytecopy # do short copies byte by byte
- EX(l8ui, a6, a3, 0, fixup)
- EX(l8ui, a7, a3, 1, fixup)
+EX(10f) l8ui a6, a3, 0
+EX(10f) l8ui a7, a3, 1
addi a3, a3, 2
- EX(s8i, a6, a5, 0, fixup)
- EX(s8i, a7, a5, 1, fixup)
+EX(10f) s8i a6, a5, 0
+EX(10f) s8i a7, a5, 1
addi a5, a5, 2
addi a4, a4, -2
j .Ldstaligned # dst is now aligned, return to main algorithm
@@ -135,9 +118,9 @@ __xtensa_copy_user:
add a7, a3, a4 # a7 = end address for source
#endif /* !XCHAL_HAVE_LOOPS */
.Lnextbyte:
- EX(l8ui, a6, a3, 0, fixup)
+EX(10f) l8ui a6, a3, 0
addi a3, a3, 1
- EX(s8i, a6, a5, 0, fixup)
+EX(10f) s8i a6, a5, 0
addi a5, a5, 1
#if !XCHAL_HAVE_LOOPS
blt a3, a7, .Lnextbyte
@@ -161,15 +144,15 @@ __xtensa_copy_user:
add a8, a8, a3 # a8 = end of last 16B source chunk
#endif /* !XCHAL_HAVE_LOOPS */
.Loop1:
- EX(l32i, a6, a3, 0, fixup)
- EX(l32i, a7, a3, 4, fixup)
- EX(s32i, a6, a5, 0, fixup)
- EX(l32i, a6, a3, 8, fixup)
- EX(s32i, a7, a5, 4, fixup)
- EX(l32i, a7, a3, 12, fixup)
- EX(s32i, a6, a5, 8, fixup)
+EX(10f) l32i a6, a3, 0
+EX(10f) l32i a7, a3, 4
+EX(10f) s32i a6, a5, 0
+EX(10f) l32i a6, a3, 8
+EX(10f) s32i a7, a5, 4
+EX(10f) l32i a7, a3, 12
+EX(10f) s32i a6, a5, 8
addi a3, a3, 16
- EX(s32i, a7, a5, 12, fixup)
+EX(10f) s32i a7, a5, 12
addi a5, a5, 16
#if !XCHAL_HAVE_LOOPS
blt a3, a8, .Loop1
@@ -177,31 +160,31 @@ __xtensa_copy_user:
.Loop1done:
bbci.l a4, 3, .L2
# copy 8 bytes
- EX(l32i, a6, a3, 0, fixup)
- EX(l32i, a7, a3, 4, fixup)
+EX(10f) l32i a6, a3, 0
+EX(10f) l32i a7, a3, 4
addi a3, a3, 8
- EX(s32i, a6, a5, 0, fixup)
- EX(s32i, a7, a5, 4, fixup)
+EX(10f) s32i a6, a5, 0
+EX(10f) s32i a7, a5, 4
addi a5, a5, 8
.L2:
bbci.l a4, 2, .L3
# copy 4 bytes
- EX(l32i, a6, a3, 0, fixup)
+EX(10f) l32i a6, a3, 0
addi a3, a3, 4
- EX(s32i, a6, a5, 0, fixup)
+EX(10f) s32i a6, a5, 0
addi a5, a5, 4
.L3:
bbci.l a4, 1, .L4
# copy 2 bytes
- EX(l16ui, a6, a3, 0, fixup)
+EX(10f) l16ui a6, a3, 0
addi a3, a3, 2
- EX(s16i, a6, a5, 0, fixup)
+EX(10f) s16i a6, a5, 0
addi a5, a5, 2
.L4:
bbci.l a4, 0, .L5
# copy 1 byte
- EX(l8ui, a6, a3, 0, fixup)
- EX(s8i, a6, a5, 0, fixup)
+EX(10f) l8ui a6, a3, 0
+EX(10f) s8i a6, a5, 0
.L5:
movi a2, 0 # return success for len bytes copied
retw
@@ -217,7 +200,7 @@ __xtensa_copy_user:
# copy 16 bytes per iteration for word-aligned dst and unaligned src
and a10, a3, a8 # save unalignment offset for below
sub a3, a3, a10 # align a3 (to avoid sim warnings only; not needed for hardware)
- EX(l32i, a6, a3, 0, fixup) # load first word
+EX(10f) l32i a6, a3, 0 # load first word
#if XCHAL_HAVE_LOOPS
loopnez a7, .Loop2done
#else /* !XCHAL_HAVE_LOOPS */
@@ -226,19 +209,19 @@ __xtensa_copy_user:
add a12, a12, a3 # a12 = end of last 16B source chunk
#endif /* !XCHAL_HAVE_LOOPS */
.Loop2:
- EX(l32i, a7, a3, 4, fixup)
- EX(l32i, a8, a3, 8, fixup)
- ALIGN( a6, a6, a7)
- EX(s32i, a6, a5, 0, fixup)
- EX(l32i, a9, a3, 12, fixup)
- ALIGN( a7, a7, a8)
- EX(s32i, a7, a5, 4, fixup)
- EX(l32i, a6, a3, 16, fixup)
- ALIGN( a8, a8, a9)
- EX(s32i, a8, a5, 8, fixup)
+EX(10f) l32i a7, a3, 4
+EX(10f) l32i a8, a3, 8
+ __src_b a6, a6, a7
+EX(10f) s32i a6, a5, 0
+EX(10f) l32i a9, a3, 12
+ __src_b a7, a7, a8
+EX(10f) s32i a7, a5, 4
+EX(10f) l32i a6, a3, 16
+ __src_b a8, a8, a9
+EX(10f) s32i a8, a5, 8
addi a3, a3, 16
- ALIGN( a9, a9, a6)
- EX(s32i, a9, a5, 12, fixup)
+ __src_b a9, a9, a6
+EX(10f) s32i a9, a5, 12
addi a5, a5, 16
#if !XCHAL_HAVE_LOOPS
blt a3, a12, .Loop2
@@ -246,43 +229,44 @@ __xtensa_copy_user:
.Loop2done:
bbci.l a4, 3, .L12
# copy 8 bytes
- EX(l32i, a7, a3, 4, fixup)
- EX(l32i, a8, a3, 8, fixup)
- ALIGN( a6, a6, a7)
- EX(s32i, a6, a5, 0, fixup)
+EX(10f) l32i a7, a3, 4
+EX(10f) l32i a8, a3, 8
+ __src_b a6, a6, a7
+EX(10f) s32i a6, a5, 0
addi a3, a3, 8
- ALIGN( a7, a7, a8)
- EX(s32i, a7, a5, 4, fixup)
+ __src_b a7, a7, a8
+EX(10f) s32i a7, a5, 4
addi a5, a5, 8
mov a6, a8
.L12:
bbci.l a4, 2, .L13
# copy 4 bytes
- EX(l32i, a7, a3, 4, fixup)
+EX(10f) l32i a7, a3, 4
addi a3, a3, 4
- ALIGN( a6, a6, a7)
- EX(s32i, a6, a5, 0, fixup)
+ __src_b a6, a6, a7
+EX(10f) s32i a6, a5, 0
addi a5, a5, 4
mov a6, a7
.L13:
add a3, a3, a10 # readjust a3 with correct misalignment
bbci.l a4, 1, .L14
# copy 2 bytes
- EX(l8ui, a6, a3, 0, fixup)
- EX(l8ui, a7, a3, 1, fixup)
+EX(10f) l8ui a6, a3, 0
+EX(10f) l8ui a7, a3, 1
addi a3, a3, 2
- EX(s8i, a6, a5, 0, fixup)
- EX(s8i, a7, a5, 1, fixup)
+EX(10f) s8i a6, a5, 0
+EX(10f) s8i a7, a5, 1
addi a5, a5, 2
.L14:
bbci.l a4, 0, .L15
# copy 1 byte
- EX(l8ui, a6, a3, 0, fixup)
- EX(s8i, a6, a5, 0, fixup)
+EX(10f) l8ui a6, a3, 0
+EX(10f) s8i a6, a5, 0
.L15:
movi a2, 0 # return success for len bytes copied
retw
+ENDPROC(__xtensa_copy_user)
.section .fixup, "ax"
.align 4
@@ -294,7 +278,7 @@ __xtensa_copy_user:
*/
-fixup:
+10:
sub a2, a5, a2 /* a2 <-- bytes copied */
sub a2, a11, a2 /* a2 <-- bytes not copied */
retw
diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile
index 0b3d296a016a..734888a00dc8 100644
--- a/arch/xtensa/mm/Makefile
+++ b/arch/xtensa/mm/Makefile
@@ -5,3 +5,8 @@
obj-y := init.o misc.o
obj-$(CONFIG_MMU) += cache.o fault.o ioremap.o mmu.o tlb.o
obj-$(CONFIG_HIGHMEM) += highmem.o
+obj-$(CONFIG_KASAN) += kasan_init.o
+
+KASAN_SANITIZE_fault.o := n
+KASAN_SANITIZE_kasan_init.o := n
+KASAN_SANITIZE_mmu.o := n
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
index 3c75c4e597da..57dc231a0709 100644
--- a/arch/xtensa/mm/cache.c
+++ b/arch/xtensa/mm/cache.c
@@ -33,9 +33,6 @@
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
-//#define printd(x...) printk(x)
-#define printd(x...) do { } while(0)
-
/*
* Note:
* The kernel provides one architecture bit PG_arch_1 in the page flags that
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index a14df5aa98c8..8b9b6f44bb06 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -25,8 +25,6 @@
DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
void bad_page_fault(struct pt_regs*, unsigned long, int);
-#undef DEBUG_PAGE_FAULT
-
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
@@ -68,10 +66,10 @@ void do_page_fault(struct pt_regs *regs)
exccause == EXCCAUSE_ITLB_MISS ||
exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
-#ifdef DEBUG_PAGE_FAULT
- printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid,
- address, exccause, regs->pc, is_write? "w":"", is_exec? "x":"");
-#endif
+ pr_debug("[%s:%d:%08x:%d:%08lx:%s%s]\n",
+ current->comm, current->pid,
+ address, exccause, regs->pc,
+ is_write ? "w" : "", is_exec ? "x" : "");
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
@@ -247,10 +245,8 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
/* Are we prepared to handle this kernel fault? */
if ((entry = search_exception_tables(regs->pc)) != NULL) {
-#ifdef DEBUG_PAGE_FAULT
- printk(KERN_DEBUG "%s: Exception at pc=%#010lx (%lx)\n",
- current->comm, regs->pc, entry->fixup);
-#endif
+ pr_debug("%s: Exception at pc=%#010lx (%lx)\n",
+ current->comm, regs->pc, entry->fixup);
current->thread.bad_uaddr = address;
regs->pc = entry->fixup;
return;
@@ -259,9 +255,9 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
/* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
- printk(KERN_ALERT "Unable to handle kernel paging request at virtual "
- "address %08lx\n pc = %08lx, ra = %08lx\n",
- address, regs->pc, regs->areg[0]);
+ pr_alert("Unable to handle kernel paging request at virtual "
+ "address %08lx\n pc = %08lx, ra = %08lx\n",
+ address, regs->pc, regs->areg[0]);
die("Oops", regs, sig);
do_exit(sig);
}
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 720fe4e8b497..d776ec0d7b22 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -100,29 +100,51 @@ void __init mem_init(void)
mem_init_print_info(NULL);
pr_info("virtual kernel memory layout:\n"
+#ifdef CONFIG_KASAN
+ " kasan : 0x%08lx - 0x%08lx (%5lu MB)\n"
+#endif
+#ifdef CONFIG_MMU
+ " vmalloc : 0x%08lx - 0x%08lx (%5lu MB)\n"
+#endif
#ifdef CONFIG_HIGHMEM
" pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
" fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
#endif
-#ifdef CONFIG_MMU
- " vmalloc : 0x%08lx - 0x%08lx (%5lu MB)\n"
+ " lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n"
+ " .text : 0x%08lx - 0x%08lx (%5lu kB)\n"
+ " .rodata : 0x%08lx - 0x%08lx (%5lu kB)\n"
+ " .data : 0x%08lx - 0x%08lx (%5lu kB)\n"
+ " .init : 0x%08lx - 0x%08lx (%5lu kB)\n"
+ " .bss : 0x%08lx - 0x%08lx (%5lu kB)\n",
+#ifdef CONFIG_KASAN
+ KASAN_SHADOW_START, KASAN_SHADOW_START + KASAN_SHADOW_SIZE,
+ KASAN_SHADOW_SIZE >> 20,
#endif
- " lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n",
+#ifdef CONFIG_MMU
+ VMALLOC_START, VMALLOC_END,
+ (VMALLOC_END - VMALLOC_START) >> 20,
#ifdef CONFIG_HIGHMEM
PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
(LAST_PKMAP*PAGE_SIZE) >> 10,
FIXADDR_START, FIXADDR_TOP,
(FIXADDR_TOP - FIXADDR_START) >> 10,
#endif
-#ifdef CONFIG_MMU
- VMALLOC_START, VMALLOC_END,
- (VMALLOC_END - VMALLOC_START) >> 20,
PAGE_OFFSET, PAGE_OFFSET +
(max_low_pfn - min_low_pfn) * PAGE_SIZE,
#else
min_low_pfn * PAGE_SIZE, max_low_pfn * PAGE_SIZE,
#endif
- ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20);
+ ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20,
+ (unsigned long)_text, (unsigned long)_etext,
+ (unsigned long)(_etext - _text) >> 10,
+ (unsigned long)__start_rodata, (unsigned long)_sdata,
+ (unsigned long)(_sdata - __start_rodata) >> 10,
+ (unsigned long)_sdata, (unsigned long)_edata,
+ (unsigned long)(_edata - _sdata) >> 10,
+ (unsigned long)__init_begin, (unsigned long)__init_end,
+ (unsigned long)(__init_end - __init_begin) >> 10,
+ (unsigned long)__bss_start, (unsigned long)__bss_stop,
+ (unsigned long)(__bss_stop - __bss_start) >> 10);
}
#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c
new file mode 100644
index 000000000000..6b532b6bd785
--- /dev/null
+++ b/arch/xtensa/mm/kasan_init.c
@@ -0,0 +1,95 @@
+/*
+ * Xtensa KASAN shadow map initialization
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2017 Cadence Design Systems Inc.
+ */
+
+#include <linux/bootmem.h>
+#include <linux/init_task.h>
+#include <linux/kasan.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <asm/initialize_mmu.h>
+#include <asm/tlbflush.h>
+#include <asm/traps.h>
+
+void __init kasan_early_init(void)
+{
+ unsigned long vaddr = KASAN_SHADOW_START;
+ pgd_t *pgd = pgd_offset_k(vaddr);
+ pmd_t *pmd = pmd_offset(pgd, vaddr);
+ int i;
+
+ for (i = 0; i < PTRS_PER_PTE; ++i)
+ set_pte(kasan_zero_pte + i,
+ mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL));
+
+ for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) {
+ BUG_ON(!pmd_none(*pmd));
+ set_pmd(pmd, __pmd((unsigned long)kasan_zero_pte));
+ }
+ early_trap_init();
+}
+
+static void __init populate(void *start, void *end)
+{
+ unsigned long n_pages = (end - start) / PAGE_SIZE;
+ unsigned long n_pmds = n_pages / PTRS_PER_PTE;
+ unsigned long i, j;
+ unsigned long vaddr = (unsigned long)start;
+ pgd_t *pgd = pgd_offset_k(vaddr);
+ pmd_t *pmd = pmd_offset(pgd, vaddr);
+ pte_t *pte = memblock_virt_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
+
+ pr_debug("%s: %p - %p\n", __func__, start, end);
+
+ for (i = j = 0; i < n_pmds; ++i) {
+ int k;
+
+ for (k = 0; k < PTRS_PER_PTE; ++k, ++j) {
+ phys_addr_t phys =
+ memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
+ MEMBLOCK_ALLOC_ANYWHERE);
+
+ set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
+ }
+ }
+
+ for (i = 0; i < n_pmds ; ++i, pte += PTRS_PER_PTE)
+ set_pmd(pmd + i, __pmd((unsigned long)pte));
+
+ local_flush_tlb_all();
+ memset(start, 0, end - start);
+}
+
+void __init kasan_init(void)
+{
+ int i;
+
+ BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_START -
+ (KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT));
+ BUILD_BUG_ON(VMALLOC_START < KASAN_START_VADDR);
+
+ /*
+ * Replace shadow map pages that cover addresses from VMALLOC area
+ * start to the end of KSEG with clean writable pages.
+ */
+ populate(kasan_mem_to_shadow((void *)VMALLOC_START),
+ kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR));
+
+ /* Write protect kasan_zero_page and zero-initialize it again. */
+ for (i = 0; i < PTRS_PER_PTE; ++i)
+ set_pte(kasan_zero_pte + i,
+ mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL_RO));
+
+ local_flush_tlb_all();
+ memset(kasan_zero_page, 0, PAGE_SIZE);
+
+ /* At this point kasan is fully initialized. Enable error messages. */
+ current->kasan_depth = 0;
+ pr_info("KernelAddressSanitizer initialized\n");
+}
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index 358d748d9083..9d1ecfc53670 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -56,7 +56,6 @@ static void __init fixedrange_init(void)
void __init paging_init(void)
{
- memset(swapper_pg_dir, 0, PAGE_SIZE);
#ifdef CONFIG_HIGHMEM
fixedrange_init();
pkmap_page_table = init_pmd(PKMAP_BASE, LAST_PKMAP);
@@ -82,6 +81,23 @@ void init_mmu(void)
set_itlbcfg_register(0);
set_dtlbcfg_register(0);
#endif
+ init_kio();
+ local_flush_tlb_all();
+
+ /* Set rasid register to a known value. */
+
+ set_rasid_register(ASID_INSERT(ASID_USER_FIRST));
+
+ /* Set PTEVADDR special register to the start of the page
+ * table, which is in kernel mappable space (ie. not
+ * statically mapped). This register's value is undefined on
+ * reset.
+ */
+ set_ptevaddr_register(XCHAL_PAGE_TABLE_VADDR);
+}
+
+void init_kio(void)
+{
#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
/*
* Update the IO area mapping in case xtensa_kio_paddr has changed
@@ -95,17 +111,4 @@ void init_mmu(void)
write_itlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS),
XCHAL_KIO_BYPASS_VADDR + 6);
#endif
-
- local_flush_tlb_all();
-
- /* Set rasid register to a known value. */
-
- set_rasid_register(ASID_INSERT(ASID_USER_FIRST));
-
- /* Set PTEVADDR special register to the start of the page
- * table, which is in kernel mappable space (ie. not
- * statically mapped). This register's value is undefined on
- * reset.
- */
- set_ptevaddr_register(PGTABLE_START);
}
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c
index 35c822286bbe..59153d0aa890 100644
--- a/arch/xtensa/mm/tlb.c
+++ b/arch/xtensa/mm/tlb.c
@@ -95,10 +95,8 @@ void local_flush_tlb_range(struct vm_area_struct *vma,
if (mm->context.asid[cpu] == NO_CONTEXT)
return;
-#if 0
- printk("[tlbrange<%02lx,%08lx,%08lx>]\n",
- (unsigned long)mm->context.asid[cpu], start, end);
-#endif
+ pr_debug("[tlbrange<%02lx,%08lx,%08lx>]\n",
+ (unsigned long)mm->context.asid[cpu], start, end);
local_irq_save(flags);
if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
index 464c2684c4f1..92f567f9a21e 100644
--- a/arch/xtensa/platforms/iss/console.c
+++ b/arch/xtensa/platforms/iss/console.c
@@ -185,7 +185,7 @@ int __init rs_init(void)
serial_driver = alloc_tty_driver(SERIAL_MAX_NUM_LINES);
- printk ("%s %s\n", serial_name, serial_version);
+ pr_info("%s %s\n", serial_name, serial_version);
/* Initialize the tty_driver structure */
@@ -214,7 +214,7 @@ static __exit void rs_exit(void)
int error;
if ((error = tty_unregister_driver(serial_driver)))
- printk("ISS_SERIAL: failed to unregister serial driver (%d)\n",
+ pr_err("ISS_SERIAL: failed to unregister serial driver (%d)\n",
error);
put_tty_driver(serial_driver);
tty_port_destroy(&serial_port);
diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
index 6363b18e5b8c..d027dddc41ca 100644
--- a/arch/xtensa/platforms/iss/network.c
+++ b/arch/xtensa/platforms/iss/network.c
@@ -16,6 +16,8 @@
*
*/
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
#include <linux/list.h>
#include <linux/irq.h>
#include <linux/spinlock.h>
@@ -606,8 +608,6 @@ struct iss_net_init {
* those fields. They will be later initialized in iss_net_init.
*/
-#define ERR KERN_ERR "iss_net_setup: "
-
static int __init iss_net_setup(char *str)
{
struct iss_net_private *device = NULL;
@@ -619,14 +619,14 @@ static int __init iss_net_setup(char *str)
end = strchr(str, '=');
if (!end) {
- printk(ERR "Expected '=' after device number\n");
+ pr_err("Expected '=' after device number\n");
return 1;
}
*end = 0;
rc = kstrtouint(str, 0, &n);
*end = '=';
if (rc < 0) {
- printk(ERR "Failed to parse '%s'\n", str);
+ pr_err("Failed to parse '%s'\n", str);
return 1;
}
str = end;
@@ -642,13 +642,13 @@ static int __init iss_net_setup(char *str)
spin_unlock(&devices_lock);
if (device && device->index == n) {
- printk(ERR "Device %u already configured\n", n);
+ pr_err("Device %u already configured\n", n);
return 1;
}
new = alloc_bootmem(sizeof(*new));
if (new == NULL) {
- printk(ERR "Alloc_bootmem failed\n");
+ pr_err("Alloc_bootmem failed\n");
return 1;
}
@@ -660,8 +660,6 @@ static int __init iss_net_setup(char *str)
return 1;
}
-#undef ERR
-
__setup("eth", iss_net_setup);
/*
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index da1525ec4c87..d819dc77fe65 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -775,10 +775,11 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
unsigned long flags;
int i;
+ spin_lock_irqsave(&bfqd->lock, flags);
+
if (!entity) /* root group */
- return;
+ goto put_async_queues;
- spin_lock_irqsave(&bfqd->lock, flags);
/*
* Empty all service_trees belonging to this group before
* deactivating the group itself.
@@ -809,6 +810,8 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
}
__bfq_deactivate_entity(entity, false);
+
+put_async_queues:
bfq_put_async_queues(bfqd, bfqg);
spin_unlock_irqrestore(&bfqd->lock, flags);
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index bcb6d21baf12..47e6ec7427c4 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -166,6 +166,20 @@ static const int bfq_async_charge_factor = 10;
/* Default timeout values, in jiffies, approximating CFQ defaults. */
const int bfq_timeout = HZ / 8;
+/*
+ * Time limit for merging (see comments in bfq_setup_cooperator). Set
+ * to the slowest value that, in our tests, proved to be effective in
+ * removing false positives, while not causing true positives to miss
+ * queue merging.
+ *
+ * As can be deduced from the low time limit below, queue merging, if
+ * successful, happens at the very beggining of the I/O of the involved
+ * cooperating processes, as a consequence of the arrival of the very
+ * first requests from each cooperator. After that, there is very
+ * little chance to find cooperators.
+ */
+static const unsigned long bfq_merge_time_limit = HZ/10;
+
static struct kmem_cache *bfq_pool;
/* Below this threshold (in ns), we consider thinktime immediate. */
@@ -178,7 +192,7 @@ static struct kmem_cache *bfq_pool;
#define BFQQ_SEEK_THR (sector_t)(8 * 100)
#define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
#define BFQQ_CLOSE_THR (sector_t)(8 * 1024)
-#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 32/8)
+#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 19)
/* Min number of samples required to perform peak-rate update */
#define BFQ_RATE_MIN_SAMPLES 32
@@ -195,15 +209,17 @@ static struct kmem_cache *bfq_pool;
* interactive applications automatically, using the following formula:
* duration = (R / r) * T, where r is the peak rate of the device, and
* R and T are two reference parameters.
- * In particular, R is the peak rate of the reference device (see below),
- * and T is a reference time: given the systems that are likely to be
- * installed on the reference device according to its speed class, T is
- * about the maximum time needed, under BFQ and while reading two files in
- * parallel, to load typical large applications on these systems.
- * In practice, the slower/faster the device at hand is, the more/less it
- * takes to load applications with respect to the reference device.
- * Accordingly, the longer/shorter BFQ grants weight raising to interactive
- * applications.
+ * In particular, R is the peak rate of the reference device (see
+ * below), and T is a reference time: given the systems that are
+ * likely to be installed on the reference device according to its
+ * speed class, T is about the maximum time needed, under BFQ and
+ * while reading two files in parallel, to load typical large
+ * applications on these systems (see the comments on
+ * max_service_from_wr below, for more details on how T is obtained).
+ * In practice, the slower/faster the device at hand is, the more/less
+ * it takes to load applications with respect to the reference device.
+ * Accordingly, the longer/shorter BFQ grants weight raising to
+ * interactive applications.
*
* BFQ uses four different reference pairs (R, T), depending on:
* . whether the device is rotational or non-rotational;
@@ -240,6 +256,60 @@ static int T_slow[2];
static int T_fast[2];
static int device_speed_thresh[2];
+/*
+ * BFQ uses the above-detailed, time-based weight-raising mechanism to
+ * privilege interactive tasks. This mechanism is vulnerable to the
+ * following false positives: I/O-bound applications that will go on
+ * doing I/O for much longer than the duration of weight
+ * raising. These applications have basically no benefit from being
+ * weight-raised at the beginning of their I/O. On the opposite end,
+ * while being weight-raised, these applications
+ * a) unjustly steal throughput to applications that may actually need
+ * low latency;
+ * b) make BFQ uselessly perform device idling; device idling results
+ * in loss of device throughput with most flash-based storage, and may
+ * increase latencies when used purposelessly.
+ *
+ * BFQ tries to reduce these problems, by adopting the following
+ * countermeasure. To introduce this countermeasure, we need first to
+ * finish explaining how the duration of weight-raising for
+ * interactive tasks is computed.
+ *
+ * For a bfq_queue deemed as interactive, the duration of weight
+ * raising is dynamically adjusted, as a function of the estimated
+ * peak rate of the device, so as to be equal to the time needed to
+ * execute the 'largest' interactive task we benchmarked so far. By
+ * largest task, we mean the task for which each involved process has
+ * to do more I/O than for any of the other tasks we benchmarked. This
+ * reference interactive task is the start-up of LibreOffice Writer,
+ * and in this task each process/bfq_queue needs to have at most ~110K
+ * sectors transferred.
+ *
+ * This last piece of information enables BFQ to reduce the actual
+ * duration of weight-raising for at least one class of I/O-bound
+ * applications: those doing sequential or quasi-sequential I/O. An
+ * example is file copy. In fact, once started, the main I/O-bound
+ * processes of these applications usually consume the above 110K
+ * sectors in much less time than the processes of an application that
+ * is starting, because these I/O-bound processes will greedily devote
+ * almost all their CPU cycles only to their target,
+ * throughput-friendly I/O operations. This is even more true if BFQ
+ * happens to be underestimating the device peak rate, and thus
+ * overestimating the duration of weight raising. But, according to
+ * our measurements, once transferred 110K sectors, these processes
+ * have no right to be weight-raised any longer.
+ *
+ * Basing on the last consideration, BFQ ends weight-raising for a
+ * bfq_queue if the latter happens to have received an amount of
+ * service at least equal to the following constant. The constant is
+ * set to slightly more than 110K, to have a minimum safety margin.
+ *
+ * This early ending of weight-raising reduces the amount of time
+ * during which interactive false positives cause the two problems
+ * described at the beginning of these comments.
+ */
+static const unsigned long max_service_from_wr = 120000;
+
#define RQ_BIC(rq) icq_to_bic((rq)->elv.priv[0])
#define RQ_BFQQ(rq) ((rq)->elv.priv[1])
@@ -403,6 +473,82 @@ static struct request *bfq_choose_req(struct bfq_data *bfqd,
}
}
+/*
+ * See the comments on bfq_limit_depth for the purpose of
+ * the depths set in the function.
+ */
+static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
+{
+ bfqd->sb_shift = bt->sb.shift;
+
+ /*
+ * In-word depths if no bfq_queue is being weight-raised:
+ * leaving 25% of tags only for sync reads.
+ *
+ * In next formulas, right-shift the value
+ * (1U<<bfqd->sb_shift), instead of computing directly
+ * (1U<<(bfqd->sb_shift - something)), to be robust against
+ * any possible value of bfqd->sb_shift, without having to
+ * limit 'something'.
+ */
+ /* no more than 50% of tags for async I/O */
+ bfqd->word_depths[0][0] = max((1U<<bfqd->sb_shift)>>1, 1U);
+ /*
+ * no more than 75% of tags for sync writes (25% extra tags
+ * w.r.t. async I/O, to prevent async I/O from starving sync
+ * writes)
+ */
+ bfqd->word_depths[0][1] = max(((1U<<bfqd->sb_shift) * 3)>>2, 1U);
+
+ /*
+ * In-word depths in case some bfq_queue is being weight-
+ * raised: leaving ~63% of tags for sync reads. This is the
+ * highest percentage for which, in our tests, application
+ * start-up times didn't suffer from any regression due to tag
+ * shortage.
+ */
+ /* no more than ~18% of tags for async I/O */
+ bfqd->word_depths[1][0] = max(((1U<<bfqd->sb_shift) * 3)>>4, 1U);
+ /* no more than ~37% of tags for sync writes (~20% extra tags) */
+ bfqd->word_depths[1][1] = max(((1U<<bfqd->sb_shift) * 6)>>4, 1U);
+}
+
+/*
+ * Async I/O can easily starve sync I/O (both sync reads and sync
+ * writes), by consuming all tags. Similarly, storms of sync writes,
+ * such as those that sync(2) may trigger, can starve sync reads.
+ * Limit depths of async I/O and sync writes so as to counter both
+ * problems.
+ */
+static void bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
+{
+ struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
+ struct bfq_data *bfqd = data->q->elevator->elevator_data;
+ struct sbitmap_queue *bt;
+
+ if (op_is_sync(op) && !op_is_write(op))
+ return;
+
+ if (data->flags & BLK_MQ_REQ_RESERVED) {
+ if (unlikely(!tags->nr_reserved_tags)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+ bt = &tags->breserved_tags;
+ } else
+ bt = &tags->bitmap_tags;
+
+ if (unlikely(bfqd->sb_shift != bt->sb.shift))
+ bfq_update_depths(bfqd, bt);
+
+ data->shallow_depth =
+ bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op)];
+
+ bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u",
+ __func__, bfqd->wr_busy_queues, op_is_sync(op),
+ data->shallow_depth);
+}
+
static struct bfq_queue *
bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
sector_t sector, struct rb_node **ret_parent,
@@ -444,6 +590,13 @@ bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
return bfqq;
}
+static bool bfq_too_late_for_merging(struct bfq_queue *bfqq)
+{
+ return bfqq->service_from_backlogged > 0 &&
+ time_is_before_jiffies(bfqq->first_IO_time +
+ bfq_merge_time_limit);
+}
+
void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
{
struct rb_node **p, *parent;
@@ -454,6 +607,14 @@ void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfqq->pos_root = NULL;
}
+ /*
+ * bfqq cannot be merged any longer (see comments in
+ * bfq_setup_cooperator): no point in adding bfqq into the
+ * position tree.
+ */
+ if (bfq_too_late_for_merging(bfqq))
+ return;
+
if (bfq_class_idle(bfqq))
return;
if (!bfqq->next_rq)
@@ -1247,6 +1408,7 @@ static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd,
if (old_wr_coeff == 1 && wr_or_deserves_wr) {
/* start a weight-raising period */
if (interactive) {
+ bfqq->service_from_wr = 0;
bfqq->wr_coeff = bfqd->bfq_wr_coeff;
bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
} else {
@@ -1627,6 +1789,8 @@ static void bfq_remove_request(struct request_queue *q,
rb_erase(&bfqq->pos_node, bfqq->pos_root);
bfqq->pos_root = NULL;
}
+ } else {
+ bfq_pos_tree_add_move(bfqd, bfqq);
}
if (rq->cmd_flags & REQ_META)
@@ -1933,6 +2097,9 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
struct bfq_queue *new_bfqq)
{
+ if (bfq_too_late_for_merging(new_bfqq))
+ return false;
+
if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) ||
(bfqq->ioprio_class != new_bfqq->ioprio_class))
return false;
@@ -1957,20 +2124,6 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
}
/*
- * If this function returns true, then bfqq cannot be merged. The idea
- * is that true cooperation happens very early after processes start
- * to do I/O. Usually, late cooperations are just accidental false
- * positives. In case bfqq is weight-raised, such false positives
- * would evidently degrade latency guarantees for bfqq.
- */
-static bool wr_from_too_long(struct bfq_queue *bfqq)
-{
- return bfqq->wr_coeff > 1 &&
- time_is_before_jiffies(bfqq->last_wr_start_finish +
- msecs_to_jiffies(100));
-}
-
-/*
* Attempt to schedule a merge of bfqq with the currently in-service
* queue or with a close queue among the scheduled queues. Return
* NULL if no merge was scheduled, a pointer to the shared bfq_queue
@@ -1983,11 +2136,6 @@ static bool wr_from_too_long(struct bfq_queue *bfqq)
* to maintain. Besides, in such a critical condition as an out of memory,
* the benefits of queue merging may be little relevant, or even negligible.
*
- * Weight-raised queues can be merged only if their weight-raising
- * period has just started. In fact cooperating processes are usually
- * started together. Thus, with this filter we avoid false positives
- * that would jeopardize low-latency guarantees.
- *
* WARNING: queue merging may impair fairness among non-weight raised
* queues, for at least two reasons: 1) the original weight of a
* merged queue may change during the merged state, 2) even being the
@@ -2001,12 +2149,24 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
{
struct bfq_queue *in_service_bfqq, *new_bfqq;
+ /*
+ * Prevent bfqq from being merged if it has been created too
+ * long ago. The idea is that true cooperating processes, and
+ * thus their associated bfq_queues, are supposed to be
+ * created shortly after each other. This is the case, e.g.,
+ * for KVM/QEMU and dump I/O threads. Basing on this
+ * assumption, the following filtering greatly reduces the
+ * probability that two non-cooperating processes, which just
+ * happen to do close I/O for some short time interval, have
+ * their queues merged by mistake.
+ */
+ if (bfq_too_late_for_merging(bfqq))
+ return NULL;
+
if (bfqq->new_bfqq)
return bfqq->new_bfqq;
- if (!io_struct ||
- wr_from_too_long(bfqq) ||
- unlikely(bfqq == &bfqd->oom_bfqq))
+ if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
return NULL;
/* If there is only one backlogged queue, don't search. */
@@ -2015,12 +2175,9 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
in_service_bfqq = bfqd->in_service_queue;
- if (!in_service_bfqq || in_service_bfqq == bfqq
- || wr_from_too_long(in_service_bfqq) ||
- unlikely(in_service_bfqq == &bfqd->oom_bfqq))
- goto check_scheduled;
-
- if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
+ if (in_service_bfqq && in_service_bfqq != bfqq &&
+ likely(in_service_bfqq != &bfqd->oom_bfqq) &&
+ bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
bfqq->entity.parent == in_service_bfqq->entity.parent &&
bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
@@ -2032,12 +2189,10 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
* queues. The only thing we need is that the bio/request is not
* NULL, as we need it to establish whether a cooperator exists.
*/
-check_scheduled:
new_bfqq = bfq_find_close_cooperator(bfqd, bfqq,
bfq_io_struct_pos(io_struct, request));
- if (new_bfqq && !wr_from_too_long(new_bfqq) &&
- likely(new_bfqq != &bfqd->oom_bfqq) &&
+ if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq) &&
bfq_may_be_close_cooperator(bfqq, new_bfqq))
return bfq_setup_merge(bfqq, new_bfqq);
@@ -2062,7 +2217,8 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
if (unlikely(bfq_bfqq_just_created(bfqq) &&
- !bfq_bfqq_in_large_burst(bfqq))) {
+ !bfq_bfqq_in_large_burst(bfqq) &&
+ bfqq->bfqd->low_latency)) {
/*
* bfqq being merged right after being created: bfqq
* would have deserved interactive weight raising, but
@@ -2917,45 +3073,87 @@ static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
* whereas soft_rt_next_start is set to infinity for applications that do
* not.
*
- * Unfortunately, even a greedy application may happen to behave in an
- * isochronous way if the CPU load is high. In fact, the application may
- * stop issuing requests while the CPUs are busy serving other processes,
- * then restart, then stop again for a while, and so on. In addition, if
- * the disk achieves a low enough throughput with the request pattern
- * issued by the application (e.g., because the request pattern is random
- * and/or the device is slow), then the application may meet the above
- * bandwidth requirement too. To prevent such a greedy application to be
- * deemed as soft real-time, a further rule is used in the computation of
- * soft_rt_next_start: soft_rt_next_start must be higher than the current
- * time plus the maximum time for which the arrival of a request is waited
- * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle.
- * This filters out greedy applications, as the latter issue instead their
- * next request as soon as possible after the last one has been completed
- * (in contrast, when a batch of requests is completed, a soft real-time
- * application spends some time processing data).
+ * Unfortunately, even a greedy (i.e., I/O-bound) application may
+ * happen to meet, occasionally or systematically, both the above
+ * bandwidth and isochrony requirements. This may happen at least in
+ * the following circumstances. First, if the CPU load is high. The
+ * application may stop issuing requests while the CPUs are busy
+ * serving other processes, then restart, then stop again for a while,
+ * and so on. The other circumstances are related to the storage
+ * device: the storage device is highly loaded or reaches a low-enough
+ * throughput with the I/O of the application (e.g., because the I/O
+ * is random and/or the device is slow). In all these cases, the
+ * I/O of the application may be simply slowed down enough to meet
+ * the bandwidth and isochrony requirements. To reduce the probability
+ * that greedy applications are deemed as soft real-time in these
+ * corner cases, a further rule is used in the computation of
+ * soft_rt_next_start: the return value of this function is forced to
+ * be higher than the maximum between the following two quantities.
+ *
+ * (a) Current time plus: (1) the maximum time for which the arrival
+ * of a request is waited for when a sync queue becomes idle,
+ * namely bfqd->bfq_slice_idle, and (2) a few extra jiffies. We
+ * postpone for a moment the reason for adding a few extra
+ * jiffies; we get back to it after next item (b). Lower-bounding
+ * the return value of this function with the current time plus
+ * bfqd->bfq_slice_idle tends to filter out greedy applications,
+ * because the latter issue their next request as soon as possible
+ * after the last one has been completed. In contrast, a soft
+ * real-time application spends some time processing data, after a
+ * batch of its requests has been completed.
*
- * Unfortunately, the last filter may easily generate false positives if
- * only bfqd->bfq_slice_idle is used as a reference time interval and one
- * or both the following cases occur:
- * 1) HZ is so low that the duration of a jiffy is comparable to or higher
- * than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with
- * HZ=100.
+ * (b) Current value of bfqq->soft_rt_next_start. As pointed out
+ * above, greedy applications may happen to meet both the
+ * bandwidth and isochrony requirements under heavy CPU or
+ * storage-device load. In more detail, in these scenarios, these
+ * applications happen, only for limited time periods, to do I/O
+ * slowly enough to meet all the requirements described so far,
+ * including the filtering in above item (a). These slow-speed
+ * time intervals are usually interspersed between other time
+ * intervals during which these applications do I/O at a very high
+ * speed. Fortunately, exactly because of the high speed of the
+ * I/O in the high-speed intervals, the values returned by this
+ * function happen to be so high, near the end of any such
+ * high-speed interval, to be likely to fall *after* the end of
+ * the low-speed time interval that follows. These high values are
+ * stored in bfqq->soft_rt_next_start after each invocation of
+ * this function. As a consequence, if the last value of
+ * bfqq->soft_rt_next_start is constantly used to lower-bound the
+ * next value that this function may return, then, from the very
+ * beginning of a low-speed interval, bfqq->soft_rt_next_start is
+ * likely to be constantly kept so high that any I/O request
+ * issued during the low-speed interval is considered as arriving
+ * to soon for the application to be deemed as soft
+ * real-time. Then, in the high-speed interval that follows, the
+ * application will not be deemed as soft real-time, just because
+ * it will do I/O at a high speed. And so on.
+ *
+ * Getting back to the filtering in item (a), in the following two
+ * cases this filtering might be easily passed by a greedy
+ * application, if the reference quantity was just
+ * bfqd->bfq_slice_idle:
+ * 1) HZ is so low that the duration of a jiffy is comparable to or
+ * higher than bfqd->bfq_slice_idle. This happens, e.g., on slow
+ * devices with HZ=100. The time granularity may be so coarse
+ * that the approximation, in jiffies, of bfqd->bfq_slice_idle
+ * is rather lower than the exact value.
* 2) jiffies, instead of increasing at a constant rate, may stop increasing
* for a while, then suddenly 'jump' by several units to recover the lost
* increments. This seems to happen, e.g., inside virtual machines.
- * To address this issue, we do not use as a reference time interval just
- * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In
- * particular we add the minimum number of jiffies for which the filter
- * seems to be quite precise also in embedded systems and KVM/QEMU virtual
- * machines.
+ * To address this issue, in the filtering in (a) we do not use as a
+ * reference time interval just bfqd->bfq_slice_idle, but
+ * bfqd->bfq_slice_idle plus a few jiffies. In particular, we add the
+ * minimum number of jiffies for which the filter seems to be quite
+ * precise also in embedded systems and KVM/QEMU virtual machines.
*/
static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
struct bfq_queue *bfqq)
{
- return max(bfqq->last_idle_bklogged +
- HZ * bfqq->service_from_backlogged /
- bfqd->bfq_wr_max_softrt_rate,
- jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
+ return max3(bfqq->soft_rt_next_start,
+ bfqq->last_idle_bklogged +
+ HZ * bfqq->service_from_backlogged /
+ bfqd->bfq_wr_max_softrt_rate,
+ jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
}
/**
@@ -3000,17 +3198,6 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta);
/*
- * Increase service_from_backlogged before next statement,
- * because the possible next invocation of
- * bfq_bfqq_charge_time would likely inflate
- * entity->service. In contrast, service_from_backlogged must
- * contain real service, to enable the soft real-time
- * heuristic to correctly compute the bandwidth consumed by
- * bfqq.
- */
- bfqq->service_from_backlogged += entity->service;
-
- /*
* As above explained, charge slow (typically seeky) and
* timed-out queues with the time and not the service
* received, to favor sequential workloads.
@@ -3535,6 +3722,12 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfqq->entity.prio_changed = 1;
}
}
+ if (bfqq->wr_coeff > 1 &&
+ bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time &&
+ bfqq->service_from_wr > max_service_from_wr) {
+ /* see comments on max_service_from_wr */
+ bfq_bfqq_end_wr(bfqq);
+ }
}
/*
* To improve latency (for this or other queues), immediately
@@ -3630,8 +3823,8 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
}
/*
- * We exploit the put_rq_private hook to decrement
- * rq_in_driver, but put_rq_private will not be
+ * We exploit the bfq_finish_request hook to decrement
+ * rq_in_driver, but bfq_finish_request will not be
* invoked on this request. So, to avoid unbalance,
* just start this request, without incrementing
* rq_in_driver. As a negative consequence,
@@ -3640,14 +3833,14 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
* bfq_schedule_dispatch to be invoked uselessly.
*
* As for implementing an exact solution, the
- * put_request hook, if defined, is probably invoked
- * also on this request. So, by exploiting this hook,
- * we could 1) increment rq_in_driver here, and 2)
- * decrement it in put_request. Such a solution would
- * let the value of the counter be always accurate,
- * but it would entail using an extra interface
- * function. This cost seems higher than the benefit,
- * being the frequency of non-elevator-private
+ * bfq_finish_request hook, if defined, is probably
+ * invoked also on this request. So, by exploiting
+ * this hook, we could 1) increment rq_in_driver here,
+ * and 2) decrement it in bfq_finish_request. Such a
+ * solution would let the value of the counter be
+ * always accurate, but it would entail using an extra
+ * interface function. This cost seems higher than the
+ * benefit, being the frequency of non-elevator-private
* requests very low.
*/
goto start_rq;
@@ -3689,35 +3882,16 @@ exit:
return rq;
}
-static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
-{
- struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
- struct request *rq;
#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
- struct bfq_queue *in_serv_queue, *bfqq;
- bool waiting_rq, idle_timer_disabled;
-#endif
-
- spin_lock_irq(&bfqd->lock);
-
-#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
- in_serv_queue = bfqd->in_service_queue;
- waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
-
- rq = __bfq_dispatch_request(hctx);
-
- idle_timer_disabled =
- waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
-
-#else
- rq = __bfq_dispatch_request(hctx);
-#endif
- spin_unlock_irq(&bfqd->lock);
+static void bfq_update_dispatch_stats(struct request_queue *q,
+ struct request *rq,
+ struct bfq_queue *in_serv_queue,
+ bool idle_timer_disabled)
+{
+ struct bfq_queue *bfqq = rq ? RQ_BFQQ(rq) : NULL;
-#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
- bfqq = rq ? RQ_BFQQ(rq) : NULL;
if (!idle_timer_disabled && !bfqq)
- return rq;
+ return;
/*
* rq and bfqq are guaranteed to exist until this function
@@ -3732,7 +3906,7 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
* In addition, the following queue lock guarantees that
* bfqq_group(bfqq) exists as well.
*/
- spin_lock_irq(hctx->queue->queue_lock);
+ spin_lock_irq(q->queue_lock);
if (idle_timer_disabled)
/*
* Since the idle timer has been disabled,
@@ -3751,9 +3925,37 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
bfqg_stats_set_start_empty_time(bfqg);
bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
}
- spin_unlock_irq(hctx->queue->queue_lock);
+ spin_unlock_irq(q->queue_lock);
+}
+#else
+static inline void bfq_update_dispatch_stats(struct request_queue *q,
+ struct request *rq,
+ struct bfq_queue *in_serv_queue,
+ bool idle_timer_disabled) {}
#endif
+static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
+{
+ struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
+ struct request *rq;
+ struct bfq_queue *in_serv_queue;
+ bool waiting_rq, idle_timer_disabled;
+
+ spin_lock_irq(&bfqd->lock);
+
+ in_serv_queue = bfqd->in_service_queue;
+ waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue);
+
+ rq = __bfq_dispatch_request(hctx);
+
+ idle_timer_disabled =
+ waiting_rq && !bfq_bfqq_wait_request(in_serv_queue);
+
+ spin_unlock_irq(&bfqd->lock);
+
+ bfq_update_dispatch_stats(hctx->queue, rq, in_serv_queue,
+ idle_timer_disabled);
+
return rq;
}
@@ -4002,10 +4204,15 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfqq->split_time = bfq_smallest_from_now();
/*
- * Set to the value for which bfqq will not be deemed as
- * soft rt when it becomes backlogged.
+ * To not forget the possibly high bandwidth consumed by a
+ * process/queue in the recent past,
+ * bfq_bfqq_softrt_next_start() returns a value at least equal
+ * to the current value of bfqq->soft_rt_next_start (see
+ * comments on bfq_bfqq_softrt_next_start). Set
+ * soft_rt_next_start to now, to mean that bfqq has consumed
+ * no bandwidth so far.
*/
- bfqq->soft_rt_next_start = bfq_greatest_from_now();
+ bfqq->soft_rt_next_start = jiffies;
/* first request is almost certainly seeky */
bfqq->seek_history = 1;
@@ -4276,16 +4483,46 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
return idle_timer_disabled;
}
+#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
+static void bfq_update_insert_stats(struct request_queue *q,
+ struct bfq_queue *bfqq,
+ bool idle_timer_disabled,
+ unsigned int cmd_flags)
+{
+ if (!bfqq)
+ return;
+
+ /*
+ * bfqq still exists, because it can disappear only after
+ * either it is merged with another queue, or the process it
+ * is associated with exits. But both actions must be taken by
+ * the same process currently executing this flow of
+ * instructions.
+ *
+ * In addition, the following queue lock guarantees that
+ * bfqq_group(bfqq) exists as well.
+ */
+ spin_lock_irq(q->queue_lock);
+ bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
+ if (idle_timer_disabled)
+ bfqg_stats_update_idle_time(bfqq_group(bfqq));
+ spin_unlock_irq(q->queue_lock);
+}
+#else
+static inline void bfq_update_insert_stats(struct request_queue *q,
+ struct bfq_queue *bfqq,
+ bool idle_timer_disabled,
+ unsigned int cmd_flags) {}
+#endif
+
static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
bool at_head)
{
struct request_queue *q = hctx->queue;
struct bfq_data *bfqd = q->elevator->elevator_data;
-#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
struct bfq_queue *bfqq = RQ_BFQQ(rq);
bool idle_timer_disabled = false;
unsigned int cmd_flags;
-#endif
spin_lock_irq(&bfqd->lock);
if (blk_mq_sched_try_insert_merge(q, rq)) {
@@ -4304,7 +4541,6 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
else
list_add_tail(&rq->queuelist, &bfqd->dispatch);
} else {
-#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
idle_timer_disabled = __bfq_insert_request(bfqd, rq);
/*
* Update bfqq, because, if a queue merge has occurred
@@ -4312,9 +4548,6 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
* redirected into a new queue.
*/
bfqq = RQ_BFQQ(rq);
-#else
- __bfq_insert_request(bfqd, rq);
-#endif
if (rq_mergeable(rq)) {
elv_rqhash_add(q, rq);
@@ -4323,35 +4556,17 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
}
}
-#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
/*
* Cache cmd_flags before releasing scheduler lock, because rq
* may disappear afterwards (for example, because of a request
* merge).
*/
cmd_flags = rq->cmd_flags;
-#endif
+
spin_unlock_irq(&bfqd->lock);
-#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
- if (!bfqq)
- return;
- /*
- * bfqq still exists, because it can disappear only after
- * either it is merged with another queue, or the process it
- * is associated with exits. But both actions must be taken by
- * the same process currently executing this flow of
- * instruction.
- *
- * In addition, the following queue lock guarantees that
- * bfqq_group(bfqq) exists as well.
- */
- spin_lock_irq(q->queue_lock);
- bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
- if (idle_timer_disabled)
- bfqg_stats_update_idle_time(bfqq_group(bfqq));
- spin_unlock_irq(q->queue_lock);
-#endif
+ bfq_update_insert_stats(q, bfqq, idle_timer_disabled,
+ cmd_flags);
}
static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
@@ -4482,7 +4697,7 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
bfq_schedule_dispatch(bfqd);
}
-static void bfq_put_rq_priv_body(struct bfq_queue *bfqq)
+static void bfq_finish_request_body(struct bfq_queue *bfqq)
{
bfqq->allocated--;
@@ -4512,7 +4727,7 @@ static void bfq_finish_request(struct request *rq)
spin_lock_irqsave(&bfqd->lock, flags);
bfq_completed_request(bfqq, bfqd);
- bfq_put_rq_priv_body(bfqq);
+ bfq_finish_request_body(bfqq);
spin_unlock_irqrestore(&bfqd->lock, flags);
} else {
@@ -4533,7 +4748,7 @@ static void bfq_finish_request(struct request *rq)
bfqg_stats_update_io_remove(bfqq_group(bfqq),
rq->cmd_flags);
}
- bfq_put_rq_priv_body(bfqq);
+ bfq_finish_request_body(bfqq);
}
rq->elv.priv[0] = NULL;
@@ -4818,6 +5033,9 @@ static void bfq_exit_queue(struct elevator_queue *e)
hrtimer_cancel(&bfqd->idle_slice_timer);
#ifdef CONFIG_BFQ_GROUP_IOSCHED
+ /* release oom-queue reference to root group */
+ bfqg_and_blkg_put(bfqd->root_group);
+
blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq);
#else
spin_lock_irq(&bfqd->lock);
@@ -5206,6 +5424,7 @@ static struct elv_fs_entry bfq_attrs[] = {
static struct elevator_type iosched_bfq_mq = {
.ops.mq = {
+ .limit_depth = bfq_limit_depth,
.prepare_request = bfq_prepare_request,
.finish_request = bfq_finish_request,
.exit_icq = bfq_exit_icq,
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index 91c4390903a1..350c39ae2896 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -337,6 +337,11 @@ struct bfq_queue {
* last transition from idle to backlogged.
*/
unsigned long service_from_backlogged;
+ /*
+ * Cumulative service received from the @bfq_queue since its
+ * last transition to weight-raised state.
+ */
+ unsigned long service_from_wr;
/*
* Value of wr start time when switching to soft rt
@@ -344,6 +349,8 @@ struct bfq_queue {
unsigned long wr_start_at_switch_to_srt;
unsigned long split_time; /* time of last split */
+
+ unsigned long first_IO_time; /* time of first I/O for this queue */
};
/**
@@ -627,6 +634,18 @@ struct bfq_data {
struct bfq_io_cq *bio_bic;
/* bfqq associated with the task issuing current bio for merging */
struct bfq_queue *bio_bfqq;
+
+ /*
+ * Cached sbitmap shift, used to compute depth limits in
+ * bfq_update_depths.
+ */
+ unsigned int sb_shift;
+
+ /*
+ * Depth limits used in bfq_limit_depth (see comments on the
+ * function)
+ */
+ unsigned int word_depths[2][2];
};
enum bfqq_state_flags {
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
index e495d3f9b4b0..4498c43245e2 100644
--- a/block/bfq-wf2q.c
+++ b/block/bfq-wf2q.c
@@ -835,6 +835,13 @@ void bfq_bfqq_served(struct bfq_queue *bfqq, int served)
struct bfq_entity *entity = &bfqq->entity;
struct bfq_service_tree *st;
+ if (!bfqq->service_from_backlogged)
+ bfqq->first_IO_time = jiffies;
+
+ if (bfqq->wr_coeff > 1)
+ bfqq->service_from_wr += served;
+
+ bfqq->service_from_backlogged += served;
for_each_entity(entity) {
st = bfq_entity_service_tree(entity);
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 23b42e8aa03e..9cfdd6c83b5b 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -374,7 +374,6 @@ static void bio_integrity_verify_fn(struct work_struct *work)
/**
* __bio_integrity_endio - Integrity I/O completion function
* @bio: Protected bio
- * @error: Pointer to errno
*
* Description: Completion for integrity I/O
*
diff --git a/block/bio.c b/block/bio.c
index 9ef6cf3addb3..e1708db48258 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -971,34 +971,6 @@ void bio_advance(struct bio *bio, unsigned bytes)
EXPORT_SYMBOL(bio_advance);
/**
- * bio_alloc_pages - allocates a single page for each bvec in a bio
- * @bio: bio to allocate pages for
- * @gfp_mask: flags for allocation
- *
- * Allocates pages up to @bio->bi_vcnt.
- *
- * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are
- * freed.
- */
-int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
-{
- int i;
- struct bio_vec *bv;
-
- bio_for_each_segment_all(bv, bio, i) {
- bv->bv_page = alloc_page(gfp_mask);
- if (!bv->bv_page) {
- while (--bv >= bio->bi_io_vec)
- __free_page(bv->bv_page);
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-EXPORT_SYMBOL(bio_alloc_pages);
-
-/**
* bio_copy_data - copy contents of data buffers from one chain of bios to
* another
* @src: source bio list
@@ -1838,7 +1810,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
bio_advance(bio, split->bi_iter.bi_size);
if (bio_flagged(bio, BIO_TRACE_COMPLETION))
- bio_set_flag(bio, BIO_TRACE_COMPLETION);
+ bio_set_flag(split, BIO_TRACE_COMPLETION);
return split;
}
diff --git a/block/blk-core.c b/block/blk-core.c
index 3ba4326a63b5..a2005a485335 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -126,6 +126,8 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
rq->start_time = jiffies;
set_start_time_ns(rq);
rq->part = NULL;
+ seqcount_init(&rq->gstate_seq);
+ u64_stats_init(&rq->aborted_gstate_sync);
}
EXPORT_SYMBOL(blk_rq_init);
@@ -699,6 +701,15 @@ void blk_cleanup_queue(struct request_queue *q)
queue_flag_set(QUEUE_FLAG_DEAD, q);
spin_unlock_irq(lock);
+ /*
+ * make sure all in-progress dispatch are completed because
+ * blk_freeze_queue() can only complete all requests, and
+ * dispatch may still be in-progress since we dispatch requests
+ * from more than one contexts
+ */
+ if (q->mq_ops)
+ blk_mq_quiesce_queue(q);
+
/* for synchronous bio-based driver finish in-flight integrity i/o */
blk_flush_integrity();
@@ -1646,6 +1657,7 @@ void __blk_put_request(struct request_queue *q, struct request *req)
lockdep_assert_held(q->queue_lock);
+ blk_req_zone_write_unlock(req);
blk_pm_put_request(req);
elv_completed_request(q, req);
@@ -2055,6 +2067,21 @@ static inline bool should_fail_request(struct hd_struct *part,
#endif /* CONFIG_FAIL_MAKE_REQUEST */
+static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
+{
+ if (part->policy && op_is_write(bio_op(bio))) {
+ char b[BDEVNAME_SIZE];
+
+ printk(KERN_ERR
+ "generic_make_request: Trying to write "
+ "to read-only block-device %s (partno %d)\n",
+ bio_devname(bio, b), part->partno);
+ return true;
+ }
+
+ return false;
+}
+
/*
* Remap block n of partition p to block n+start(p) of the disk.
*/
@@ -2063,27 +2090,28 @@ static inline int blk_partition_remap(struct bio *bio)
struct hd_struct *p;
int ret = 0;
+ rcu_read_lock();
+ p = __disk_get_part(bio->bi_disk, bio->bi_partno);
+ if (unlikely(!p || should_fail_request(p, bio->bi_iter.bi_size) ||
+ bio_check_ro(bio, p))) {
+ ret = -EIO;
+ goto out;
+ }
+
/*
* Zone reset does not include bi_size so bio_sectors() is always 0.
* Include a test for the reset op code and perform the remap if needed.
*/
- if (!bio->bi_partno ||
- (!bio_sectors(bio) && bio_op(bio) != REQ_OP_ZONE_RESET))
- return 0;
+ if (!bio_sectors(bio) && bio_op(bio) != REQ_OP_ZONE_RESET)
+ goto out;
- rcu_read_lock();
- p = __disk_get_part(bio->bi_disk, bio->bi_partno);
- if (likely(p && !should_fail_request(p, bio->bi_iter.bi_size))) {
- bio->bi_iter.bi_sector += p->start_sect;
- bio->bi_partno = 0;
- trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
- bio->bi_iter.bi_sector - p->start_sect);
- } else {
- printk("%s: fail for partition %d\n", __func__, bio->bi_partno);
- ret = -EIO;
- }
- rcu_read_unlock();
+ bio->bi_iter.bi_sector += p->start_sect;
+ bio->bi_partno = 0;
+ trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
+ bio->bi_iter.bi_sector - p->start_sect);
+out:
+ rcu_read_unlock();
return ret;
}
@@ -2142,15 +2170,19 @@ generic_make_request_checks(struct bio *bio)
* For a REQ_NOWAIT based request, return -EOPNOTSUPP
* if queue is not a request based queue.
*/
-
if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q))
goto not_supported;
if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size))
goto end_io;
- if (blk_partition_remap(bio))
- goto end_io;
+ if (!bio->bi_partno) {
+ if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0)))
+ goto end_io;
+ } else {
+ if (blk_partition_remap(bio))
+ goto end_io;
+ }
if (bio_check_eod(bio, nr_sectors))
goto end_io;
@@ -2493,8 +2525,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
* bypass a potential scheduler on the bottom device for
* insert.
*/
- blk_mq_request_bypass_insert(rq, true);
- return BLK_STS_OK;
+ return blk_mq_request_issue_directly(rq);
}
spin_lock_irqsave(q->queue_lock, flags);
@@ -2846,7 +2877,7 @@ void blk_start_request(struct request *req)
wbt_issue(req->q->rq_wb, &req->issue_stat);
}
- BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
+ BUG_ON(blk_rq_is_complete(req));
blk_add_timer(req);
}
EXPORT_SYMBOL(blk_start_request);
@@ -3415,20 +3446,6 @@ int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
}
EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
-int kblockd_schedule_delayed_work(struct delayed_work *dwork,
- unsigned long delay)
-{
- return queue_delayed_work(kblockd_workqueue, dwork, delay);
-}
-EXPORT_SYMBOL(kblockd_schedule_delayed_work);
-
-int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
- unsigned long delay)
-{
- return queue_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
-}
-EXPORT_SYMBOL(kblockd_schedule_delayed_work_on);
-
/**
* blk_start_plug - initialize blk_plug and track it inside the task_struct
* @plug: The &struct blk_plug that needs to be initialized
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 5c0f3dc446dc..f7b292f12449 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -61,7 +61,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
* be reused after dying flag is set
*/
if (q->mq_ops) {
- blk_mq_sched_insert_request(rq, at_head, true, false, false);
+ blk_mq_sched_insert_request(rq, at_head, true, false);
return;
}
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 2bc544ce3d2e..a676084d4740 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -37,6 +37,9 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
if (!q)
return -ENXIO;
+ if (bdev_read_only(bdev))
+ return -EPERM;
+
if (flags & BLKDEV_DISCARD_SECURE) {
if (!blk_queue_secure_erase(q))
return -EOPNOTSUPP;
@@ -156,6 +159,9 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
if (!q)
return -ENXIO;
+ if (bdev_read_only(bdev))
+ return -EPERM;
+
bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
if ((sector | nr_sects) & bs_mask)
return -EINVAL;
@@ -233,6 +239,9 @@ static int __blkdev_issue_write_zeroes(struct block_device *bdev,
if (!q)
return -ENXIO;
+ if (bdev_read_only(bdev))
+ return -EPERM;
+
/* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
@@ -287,6 +296,9 @@ static int __blkdev_issue_zero_pages(struct block_device *bdev,
if (!q)
return -ENXIO;
+ if (bdev_read_only(bdev))
+ return -EPERM;
+
while (nr_sects != 0) {
bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
gfp_mask);
diff --git a/block/blk-map.c b/block/blk-map.c
index d3a94719f03f..db9373bd31ac 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -119,7 +119,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
struct bio *bio = NULL;
struct iov_iter i;
- int ret;
+ int ret = -EINVAL;
if (!iter_is_iovec(iter))
goto fail;
@@ -148,7 +148,7 @@ unmap_rq:
__blk_rq_unmap_user(bio);
fail:
rq->bio = NULL;
- return -EINVAL;
+ return ret;
}
EXPORT_SYMBOL(blk_rq_map_user_iov);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index f5dedd57dff6..8452fc7164cc 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -128,9 +128,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
nsegs++;
sectors = max_sectors;
}
- if (sectors)
- goto split;
- /* Make this single bvec as the 1st segment */
+ goto split;
}
if (bvprvp && blk_queue_cluster(q)) {
@@ -146,22 +144,21 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
bvprvp = &bvprv;
sectors += bv.bv_len >> 9;
- if (nsegs == 1 && seg_size > front_seg_size)
- front_seg_size = seg_size;
continue;
}
new_segment:
if (nsegs == queue_max_segments(q))
goto split;
+ if (nsegs == 1 && seg_size > front_seg_size)
+ front_seg_size = seg_size;
+
nsegs++;
bvprv = bv;
bvprvp = &bvprv;
seg_size = bv.bv_len;
sectors += bv.bv_len >> 9;
- if (nsegs == 1 && seg_size > front_seg_size)
- front_seg_size = seg_size;
}
do_split = false;
@@ -174,6 +171,8 @@ split:
bio = new;
}
+ if (nsegs == 1 && seg_size > front_seg_size)
+ front_seg_size = seg_size;
bio->bi_seg_front_size = front_seg_size;
if (seg_size > bio->bi_seg_back_size)
bio->bi_seg_back_size = seg_size;
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index b56a4f35720d..21cbc1f071c6 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -289,17 +289,12 @@ static const char *const rqf_name[] = {
RQF_NAME(HASHED),
RQF_NAME(STATS),
RQF_NAME(SPECIAL_PAYLOAD),
+ RQF_NAME(ZONE_WRITE_LOCKED),
+ RQF_NAME(MQ_TIMEOUT_EXPIRED),
+ RQF_NAME(MQ_POLL_SLEPT),
};
#undef RQF_NAME
-#define RQAF_NAME(name) [REQ_ATOM_##name] = #name
-static const char *const rqaf_name[] = {
- RQAF_NAME(COMPLETE),
- RQAF_NAME(STARTED),
- RQAF_NAME(POLL_SLEPT),
-};
-#undef RQAF_NAME
-
int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
{
const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
@@ -316,8 +311,7 @@ int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
seq_puts(m, ", .rq_flags=");
blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
ARRAY_SIZE(rqf_name));
- seq_puts(m, ", .atomic_flags=");
- blk_flags_show(m, rq->atomic_flags, rqaf_name, ARRAY_SIZE(rqaf_name));
+ seq_printf(m, ", complete=%d", blk_rq_is_complete(rq));
seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
rq->internal_tag);
if (mq_ops->show_rq)
@@ -409,7 +403,7 @@ static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
const struct show_busy_params *params = data;
if (blk_mq_map_queue(rq->q, rq->mq_ctx->cpu) == params->hctx &&
- test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
+ blk_mq_rq_state(rq) != MQ_RQ_IDLE)
__blk_mq_debugfs_rq_show(params->m,
list_entry_rq(&rq->queuelist));
}
@@ -703,7 +697,11 @@ static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
const struct blk_mq_debugfs_attr *attr = m->private;
void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
- if (!attr->write)
+ /*
+ * Attributes that only implement .seq_ops are read-only and 'attr' is
+ * the same with 'data' in this case.
+ */
+ if (attr == data || !attr->write)
return -EPERM;
return attr->write(data, buf, count, ppos);
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index c117bd8fd1f6..55c0a745b427 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -172,7 +172,6 @@ static void blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
WRITE_ONCE(hctx->dispatch_from, ctx);
}
-/* return true if hw queue need to be run again */
void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
{
struct request_queue *q = hctx->queue;
@@ -428,7 +427,7 @@ done:
}
void blk_mq_sched_insert_request(struct request *rq, bool at_head,
- bool run_queue, bool async, bool can_block)
+ bool run_queue, bool async)
{
struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator;
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index ba1d1418a96d..1e9c9018ace1 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -18,7 +18,7 @@ bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
void blk_mq_sched_insert_request(struct request *rq, bool at_head,
- bool run_queue, bool async, bool can_block);
+ bool run_queue, bool async);
void blk_mq_sched_insert_requests(struct request_queue *q,
struct blk_mq_ctx *ctx,
struct list_head *list, bool run_queue_async);
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 79969c3c234f..a54b4b070f1c 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -248,7 +248,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
return ret;
}
-static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
+void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
int i;
@@ -265,13 +265,6 @@ static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
q->mq_sysfs_init_done = false;
}
-void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
-{
- mutex_lock(&q->sysfs_lock);
- __blk_mq_unregister_dev(dev, q);
- mutex_unlock(&q->sysfs_lock);
-}
-
void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
{
kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index c81b40ecd3f1..336dde07b230 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -134,12 +134,6 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
ws = bt_wait_ptr(bt, data->hctx);
drop_ctx = data->ctx == NULL;
do {
- prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE);
-
- tag = __blk_mq_get_tag(data, bt);
- if (tag != -1)
- break;
-
/*
* We're out of tags on this hardware queue, kick any
* pending IO submits before going to sleep waiting for
@@ -155,6 +149,13 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
if (tag != -1)
break;
+ prepare_to_wait_exclusive(&ws->wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+
+ tag = __blk_mq_get_tag(data, bt);
+ if (tag != -1)
+ break;
+
if (data->ctx)
blk_mq_put_ctx(data->ctx);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 3d3797327491..01f271d40825 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -95,8 +95,7 @@ static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
{
struct mq_inflight *mi = priv;
- if (test_bit(REQ_ATOM_STARTED, &rq->atomic_flags) &&
- !test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) {
+ if (blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) {
/*
* index[0] counts the specific partition that was asked
* for. index[1] counts the ones that are active on the
@@ -222,7 +221,7 @@ void blk_mq_quiesce_queue(struct request_queue *q)
queue_for_each_hw_ctx(q, hctx, i) {
if (hctx->flags & BLK_MQ_F_BLOCKING)
- synchronize_srcu(hctx->queue_rq_srcu);
+ synchronize_srcu(hctx->srcu);
else
rcu = true;
}
@@ -272,15 +271,14 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
{
struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
struct request *rq = tags->static_rqs[tag];
-
- rq->rq_flags = 0;
+ req_flags_t rq_flags = 0;
if (data->flags & BLK_MQ_REQ_INTERNAL) {
rq->tag = -1;
rq->internal_tag = tag;
} else {
if (blk_mq_tag_busy(data->hctx)) {
- rq->rq_flags = RQF_MQ_INFLIGHT;
+ rq_flags = RQF_MQ_INFLIGHT;
atomic_inc(&data->hctx->nr_active);
}
rq->tag = tag;
@@ -288,27 +286,22 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
data->hctx->tags->rqs[rq->tag] = rq;
}
- INIT_LIST_HEAD(&rq->queuelist);
/* csd/requeue_work/fifo_time is initialized before use */
rq->q = data->q;
rq->mq_ctx = data->ctx;
+ rq->rq_flags = rq_flags;
+ rq->cpu = -1;
rq->cmd_flags = op;
if (data->flags & BLK_MQ_REQ_PREEMPT)
rq->rq_flags |= RQF_PREEMPT;
if (blk_queue_io_stat(data->q))
rq->rq_flags |= RQF_IO_STAT;
- /* do not touch atomic flags, it needs atomic ops against the timer */
- rq->cpu = -1;
+ INIT_LIST_HEAD(&rq->queuelist);
INIT_HLIST_NODE(&rq->hash);
RB_CLEAR_NODE(&rq->rb_node);
rq->rq_disk = NULL;
rq->part = NULL;
rq->start_time = jiffies;
-#ifdef CONFIG_BLK_CGROUP
- rq->rl = NULL;
- set_start_time_ns(rq);
- rq->io_start_time_ns = 0;
-#endif
rq->nr_phys_segments = 0;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
rq->nr_integrity_segments = 0;
@@ -316,6 +309,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
rq->special = NULL;
/* tag was already set */
rq->extra_len = 0;
+ rq->__deadline = 0;
INIT_LIST_HEAD(&rq->timeout_list);
rq->timeout = 0;
@@ -324,6 +318,12 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
rq->end_io_data = NULL;
rq->next_rq = NULL;
+#ifdef CONFIG_BLK_CGROUP
+ rq->rl = NULL;
+ set_start_time_ns(rq);
+ rq->io_start_time_ns = 0;
+#endif
+
data->ctx->rq_dispatched[op_is_sync(op)]++;
return rq;
}
@@ -443,7 +443,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
blk_queue_exit(q);
return ERR_PTR(-EXDEV);
}
- cpu = cpumask_first(alloc_data.hctx->cpumask);
+ cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask);
alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
rq = blk_mq_get_request(q, NULL, op, &alloc_data);
@@ -485,8 +485,7 @@ void blk_mq_free_request(struct request *rq)
if (blk_rq_rl(rq))
blk_put_rl(blk_rq_rl(rq));
- clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
- clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
+ blk_mq_rq_update_state(rq, MQ_RQ_IDLE);
if (rq->tag != -1)
blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
if (sched_tag != -1)
@@ -532,6 +531,9 @@ static void __blk_mq_complete_request(struct request *rq)
bool shared = false;
int cpu;
+ WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT);
+ blk_mq_rq_update_state(rq, MQ_RQ_COMPLETE);
+
if (rq->internal_tag != -1)
blk_mq_sched_completed_request(rq);
if (rq->rq_flags & RQF_STATS) {
@@ -559,6 +561,56 @@ static void __blk_mq_complete_request(struct request *rq)
put_cpu();
}
+static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
+ __releases(hctx->srcu)
+{
+ if (!(hctx->flags & BLK_MQ_F_BLOCKING))
+ rcu_read_unlock();
+ else
+ srcu_read_unlock(hctx->srcu, srcu_idx);
+}
+
+static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
+ __acquires(hctx->srcu)
+{
+ if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
+ /* shut up gcc false positive */
+ *srcu_idx = 0;
+ rcu_read_lock();
+ } else
+ *srcu_idx = srcu_read_lock(hctx->srcu);
+}
+
+static void blk_mq_rq_update_aborted_gstate(struct request *rq, u64 gstate)
+{
+ unsigned long flags;
+
+ /*
+ * blk_mq_rq_aborted_gstate() is used from the completion path and
+ * can thus be called from irq context. u64_stats_fetch in the
+ * middle of update on the same CPU leads to lockup. Disable irq
+ * while updating.
+ */
+ local_irq_save(flags);
+ u64_stats_update_begin(&rq->aborted_gstate_sync);
+ rq->aborted_gstate = gstate;
+ u64_stats_update_end(&rq->aborted_gstate_sync);
+ local_irq_restore(flags);
+}
+
+static u64 blk_mq_rq_aborted_gstate(struct request *rq)
+{
+ unsigned int start;
+ u64 aborted_gstate;
+
+ do {
+ start = u64_stats_fetch_begin(&rq->aborted_gstate_sync);
+ aborted_gstate = rq->aborted_gstate;
+ } while (u64_stats_fetch_retry(&rq->aborted_gstate_sync, start));
+
+ return aborted_gstate;
+}
+
/**
* blk_mq_complete_request - end I/O on a request
* @rq: the request being processed
@@ -570,17 +622,33 @@ static void __blk_mq_complete_request(struct request *rq)
void blk_mq_complete_request(struct request *rq)
{
struct request_queue *q = rq->q;
+ struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
+ int srcu_idx;
if (unlikely(blk_should_fake_timeout(q)))
return;
- if (!blk_mark_rq_complete(rq))
+
+ /*
+ * If @rq->aborted_gstate equals the current instance, timeout is
+ * claiming @rq and we lost. This is synchronized through
+ * hctx_lock(). See blk_mq_timeout_work() for details.
+ *
+ * Completion path never blocks and we can directly use RCU here
+ * instead of hctx_lock() which can be either RCU or SRCU.
+ * However, that would complicate paths which want to synchronize
+ * against us. Let stay in sync with the issue path so that
+ * hctx_lock() covers both issue and completion paths.
+ */
+ hctx_lock(hctx, &srcu_idx);
+ if (blk_mq_rq_aborted_gstate(rq) != rq->gstate)
__blk_mq_complete_request(rq);
+ hctx_unlock(hctx, srcu_idx);
}
EXPORT_SYMBOL(blk_mq_complete_request);
int blk_mq_request_started(struct request *rq)
{
- return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+ return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
}
EXPORT_SYMBOL_GPL(blk_mq_request_started);
@@ -598,34 +666,27 @@ void blk_mq_start_request(struct request *rq)
wbt_issue(q->rq_wb, &rq->issue_stat);
}
- blk_add_timer(rq);
-
- WARN_ON_ONCE(test_bit(REQ_ATOM_STARTED, &rq->atomic_flags));
+ WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
/*
- * Mark us as started and clear complete. Complete might have been
- * set if requeue raced with timeout, which then marked it as
- * complete. So be sure to clear complete again when we start
- * the request, otherwise we'll ignore the completion event.
+ * Mark @rq in-flight which also advances the generation number,
+ * and register for timeout. Protect with a seqcount to allow the
+ * timeout path to read both @rq->gstate and @rq->deadline
+ * coherently.
*
- * Ensure that ->deadline is visible before we set STARTED, such that
- * blk_mq_check_expired() is guaranteed to observe our ->deadline when
- * it observes STARTED.
+ * This is the only place where a request is marked in-flight. If
+ * the timeout path reads an in-flight @rq->gstate, the
+ * @rq->deadline it reads together under @rq->gstate_seq is
+ * guaranteed to be the matching one.
*/
- smp_wmb();
- set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
- if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) {
- /*
- * Coherence order guarantees these consecutive stores to a
- * single variable propagate in the specified order. Thus the
- * clear_bit() is ordered _after_ the set bit. See
- * blk_mq_check_expired().
- *
- * (the bits must be part of the same byte for this to be
- * true).
- */
- clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
- }
+ preempt_disable();
+ write_seqcount_begin(&rq->gstate_seq);
+
+ blk_mq_rq_update_state(rq, MQ_RQ_IN_FLIGHT);
+ blk_add_timer(rq);
+
+ write_seqcount_end(&rq->gstate_seq);
+ preempt_enable();
if (q->dma_drain_size && blk_rq_bytes(rq)) {
/*
@@ -639,13 +700,9 @@ void blk_mq_start_request(struct request *rq)
EXPORT_SYMBOL(blk_mq_start_request);
/*
- * When we reach here because queue is busy, REQ_ATOM_COMPLETE
- * flag isn't set yet, so there may be race with timeout handler,
- * but given rq->deadline is just set in .queue_rq() under
- * this situation, the race won't be possible in reality because
- * rq->timeout should be set as big enough to cover the window
- * between blk_mq_start_request() called from .queue_rq() and
- * clearing REQ_ATOM_STARTED here.
+ * When we reach here because queue is busy, it's safe to change the state
+ * to IDLE without checking @rq->aborted_gstate because we should still be
+ * holding the RCU read lock and thus protected against timeout.
*/
static void __blk_mq_requeue_request(struct request *rq)
{
@@ -657,7 +714,8 @@ static void __blk_mq_requeue_request(struct request *rq)
wbt_requeue(q->rq_wb, &rq->issue_stat);
blk_mq_sched_requeue_request(rq);
- if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
+ if (blk_mq_rq_state(rq) != MQ_RQ_IDLE) {
+ blk_mq_rq_update_state(rq, MQ_RQ_IDLE);
if (q->dma_drain_size && blk_rq_bytes(rq))
rq->nr_phys_segments--;
}
@@ -689,13 +747,13 @@ static void blk_mq_requeue_work(struct work_struct *work)
rq->rq_flags &= ~RQF_SOFTBARRIER;
list_del_init(&rq->queuelist);
- blk_mq_sched_insert_request(rq, true, false, false, true);
+ blk_mq_sched_insert_request(rq, true, false, false);
}
while (!list_empty(&rq_list)) {
rq = list_entry(rq_list.next, struct request, queuelist);
list_del_init(&rq->queuelist);
- blk_mq_sched_insert_request(rq, false, false, false, true);
+ blk_mq_sched_insert_request(rq, false, false, false);
}
blk_mq_run_hw_queues(q, false);
@@ -729,7 +787,7 @@ EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
void blk_mq_kick_requeue_list(struct request_queue *q)
{
- kblockd_schedule_delayed_work(&q->requeue_work, 0);
+ kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
}
EXPORT_SYMBOL(blk_mq_kick_requeue_list);
@@ -755,24 +813,15 @@ EXPORT_SYMBOL(blk_mq_tag_to_rq);
struct blk_mq_timeout_data {
unsigned long next;
unsigned int next_set;
+ unsigned int nr_expired;
};
-void blk_mq_rq_timed_out(struct request *req, bool reserved)
+static void blk_mq_rq_timed_out(struct request *req, bool reserved)
{
const struct blk_mq_ops *ops = req->q->mq_ops;
enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
- /*
- * We know that complete is set at this point. If STARTED isn't set
- * anymore, then the request isn't active and the "timeout" should
- * just be ignored. This can happen due to the bitflag ordering.
- * Timeout first checks if STARTED is set, and if it is, assumes
- * the request is active. But if we race with completion, then
- * both flags will get cleared. So check here again, and ignore
- * a timeout event with a request that isn't active.
- */
- if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
- return;
+ req->rq_flags |= RQF_MQ_TIMEOUT_EXPIRED;
if (ops->timeout)
ret = ops->timeout(req, reserved);
@@ -782,8 +831,13 @@ void blk_mq_rq_timed_out(struct request *req, bool reserved)
__blk_mq_complete_request(req);
break;
case BLK_EH_RESET_TIMER:
+ /*
+ * As nothing prevents from completion happening while
+ * ->aborted_gstate is set, this may lead to ignored
+ * completions and further spurious timeouts.
+ */
+ blk_mq_rq_update_aborted_gstate(req, 0);
blk_add_timer(req);
- blk_clear_rq_complete(req);
break;
case BLK_EH_NOT_HANDLED:
break;
@@ -797,50 +851,51 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
struct request *rq, void *priv, bool reserved)
{
struct blk_mq_timeout_data *data = priv;
- unsigned long deadline;
+ unsigned long gstate, deadline;
+ int start;
- if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
- return;
+ might_sleep();
- /*
- * Ensures that if we see STARTED we must also see our
- * up-to-date deadline, see blk_mq_start_request().
- */
- smp_rmb();
+ if (rq->rq_flags & RQF_MQ_TIMEOUT_EXPIRED)
+ return;
- deadline = READ_ONCE(rq->deadline);
+ /* read coherent snapshots of @rq->state_gen and @rq->deadline */
+ while (true) {
+ start = read_seqcount_begin(&rq->gstate_seq);
+ gstate = READ_ONCE(rq->gstate);
+ deadline = blk_rq_deadline(rq);
+ if (!read_seqcount_retry(&rq->gstate_seq, start))
+ break;
+ cond_resched();
+ }
- /*
- * The rq being checked may have been freed and reallocated
- * out already here, we avoid this race by checking rq->deadline
- * and REQ_ATOM_COMPLETE flag together:
- *
- * - if rq->deadline is observed as new value because of
- * reusing, the rq won't be timed out because of timing.
- * - if rq->deadline is observed as previous value,
- * REQ_ATOM_COMPLETE flag won't be cleared in reuse path
- * because we put a barrier between setting rq->deadline
- * and clearing the flag in blk_mq_start_request(), so
- * this rq won't be timed out too.
- */
- if (time_after_eq(jiffies, deadline)) {
- if (!blk_mark_rq_complete(rq)) {
- /*
- * Again coherence order ensures that consecutive reads
- * from the same variable must be in that order. This
- * ensures that if we see COMPLETE clear, we must then
- * see STARTED set and we'll ignore this timeout.
- *
- * (There's also the MB implied by the test_and_clear())
- */
- blk_mq_rq_timed_out(rq, reserved);
- }
+ /* if in-flight && overdue, mark for abortion */
+ if ((gstate & MQ_RQ_STATE_MASK) == MQ_RQ_IN_FLIGHT &&
+ time_after_eq(jiffies, deadline)) {
+ blk_mq_rq_update_aborted_gstate(rq, gstate);
+ data->nr_expired++;
+ hctx->nr_expired++;
} else if (!data->next_set || time_after(data->next, deadline)) {
data->next = deadline;
data->next_set = 1;
}
}
+static void blk_mq_terminate_expired(struct blk_mq_hw_ctx *hctx,
+ struct request *rq, void *priv, bool reserved)
+{
+ /*
+ * We marked @rq->aborted_gstate and waited for RCU. If there were
+ * completions that we lost to, they would have finished and
+ * updated @rq->gstate by now; otherwise, the completion path is
+ * now guaranteed to see @rq->aborted_gstate and yield. If
+ * @rq->aborted_gstate still matches @rq->gstate, @rq is ours.
+ */
+ if (!(rq->rq_flags & RQF_MQ_TIMEOUT_EXPIRED) &&
+ READ_ONCE(rq->gstate) == rq->aborted_gstate)
+ blk_mq_rq_timed_out(rq, reserved);
+}
+
static void blk_mq_timeout_work(struct work_struct *work)
{
struct request_queue *q =
@@ -848,7 +903,9 @@ static void blk_mq_timeout_work(struct work_struct *work)
struct blk_mq_timeout_data data = {
.next = 0,
.next_set = 0,
+ .nr_expired = 0,
};
+ struct blk_mq_hw_ctx *hctx;
int i;
/* A deadlock might occur if a request is stuck requiring a
@@ -867,14 +924,46 @@ static void blk_mq_timeout_work(struct work_struct *work)
if (!percpu_ref_tryget(&q->q_usage_counter))
return;
+ /* scan for the expired ones and set their ->aborted_gstate */
blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
+ if (data.nr_expired) {
+ bool has_rcu = false;
+
+ /*
+ * Wait till everyone sees ->aborted_gstate. The
+ * sequential waits for SRCUs aren't ideal. If this ever
+ * becomes a problem, we can add per-hw_ctx rcu_head and
+ * wait in parallel.
+ */
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (!hctx->nr_expired)
+ continue;
+
+ if (!(hctx->flags & BLK_MQ_F_BLOCKING))
+ has_rcu = true;
+ else
+ synchronize_srcu(hctx->srcu);
+
+ hctx->nr_expired = 0;
+ }
+ if (has_rcu)
+ synchronize_rcu();
+
+ /* terminate the ones we won */
+ blk_mq_queue_tag_busy_iter(q, blk_mq_terminate_expired, NULL);
+ }
+
if (data.next_set) {
data.next = blk_rq_timeout(round_jiffies_up(data.next));
mod_timer(&q->timeout, data.next);
} else {
- struct blk_mq_hw_ctx *hctx;
-
+ /*
+ * Request timeouts are handled as a forward rolling timer. If
+ * we end up here it means that no requests are pending and
+ * also that no request has been pending for a while. Mark
+ * each hctx as idle.
+ */
queue_for_each_hw_ctx(q, hctx, i) {
/* the hctx may be unmapped, so check it here */
if (blk_mq_hw_queue_mapped(hctx))
@@ -1010,66 +1099,67 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
/*
* Mark us waiting for a tag. For shared tags, this involves hooking us into
- * the tag wakeups. For non-shared tags, we can simply mark us nedeing a
- * restart. For both caes, take care to check the condition again after
+ * the tag wakeups. For non-shared tags, we can simply mark us needing a
+ * restart. For both cases, take care to check the condition again after
* marking us as waiting.
*/
static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx,
struct request *rq)
{
struct blk_mq_hw_ctx *this_hctx = *hctx;
- bool shared_tags = (this_hctx->flags & BLK_MQ_F_TAG_SHARED) != 0;
struct sbq_wait_state *ws;
wait_queue_entry_t *wait;
bool ret;
- if (!shared_tags) {
+ if (!(this_hctx->flags & BLK_MQ_F_TAG_SHARED)) {
if (!test_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state))
set_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state);
- } else {
- wait = &this_hctx->dispatch_wait;
- if (!list_empty_careful(&wait->entry))
- return false;
- spin_lock(&this_hctx->lock);
- if (!list_empty(&wait->entry)) {
- spin_unlock(&this_hctx->lock);
- return false;
- }
+ /*
+ * It's possible that a tag was freed in the window between the
+ * allocation failure and adding the hardware queue to the wait
+ * queue.
+ *
+ * Don't clear RESTART here, someone else could have set it.
+ * At most this will cost an extra queue run.
+ */
+ return blk_mq_get_driver_tag(rq, hctx, false);
+ }
- ws = bt_wait_ptr(&this_hctx->tags->bitmap_tags, this_hctx);
- add_wait_queue(&ws->wait, wait);
+ wait = &this_hctx->dispatch_wait;
+ if (!list_empty_careful(&wait->entry))
+ return false;
+
+ spin_lock(&this_hctx->lock);
+ if (!list_empty(&wait->entry)) {
+ spin_unlock(&this_hctx->lock);
+ return false;
}
+ ws = bt_wait_ptr(&this_hctx->tags->bitmap_tags, this_hctx);
+ add_wait_queue(&ws->wait, wait);
+
/*
* It's possible that a tag was freed in the window between the
* allocation failure and adding the hardware queue to the wait
* queue.
*/
ret = blk_mq_get_driver_tag(rq, hctx, false);
-
- if (!shared_tags) {
- /*
- * Don't clear RESTART here, someone else could have set it.
- * At most this will cost an extra queue run.
- */
- return ret;
- } else {
- if (!ret) {
- spin_unlock(&this_hctx->lock);
- return false;
- }
-
- /*
- * We got a tag, remove ourselves from the wait queue to ensure
- * someone else gets the wakeup.
- */
- spin_lock_irq(&ws->wait.lock);
- list_del_init(&wait->entry);
- spin_unlock_irq(&ws->wait.lock);
+ if (!ret) {
spin_unlock(&this_hctx->lock);
- return true;
+ return false;
}
+
+ /*
+ * We got a tag, remove ourselves from the wait queue to ensure
+ * someone else gets the wakeup.
+ */
+ spin_lock_irq(&ws->wait.lock);
+ list_del_init(&wait->entry);
+ spin_unlock_irq(&ws->wait.lock);
+ spin_unlock(&this_hctx->lock);
+
+ return true;
}
bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
@@ -1206,9 +1296,27 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
/*
* We should be running this queue from one of the CPUs that
* are mapped to it.
+ *
+ * There are at least two related races now between setting
+ * hctx->next_cpu from blk_mq_hctx_next_cpu() and running
+ * __blk_mq_run_hw_queue():
+ *
+ * - hctx->next_cpu is found offline in blk_mq_hctx_next_cpu(),
+ * but later it becomes online, then this warning is harmless
+ * at all
+ *
+ * - hctx->next_cpu is found online in blk_mq_hctx_next_cpu(),
+ * but later it becomes offline, then the warning can't be
+ * triggered, and we depend on blk-mq timeout handler to
+ * handle dispatched requests to this hctx
*/
- WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
- cpu_online(hctx->next_cpu));
+ if (!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
+ cpu_online(hctx->next_cpu)) {
+ printk(KERN_WARNING "run queue from wrong CPU %d, hctx %s\n",
+ raw_smp_processor_id(),
+ cpumask_empty(hctx->cpumask) ? "inactive": "active");
+ dump_stack();
+ }
/*
* We can't run the queue inline with ints disabled. Ensure that
@@ -1216,17 +1324,11 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
*/
WARN_ON_ONCE(in_interrupt());
- if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
- rcu_read_lock();
- blk_mq_sched_dispatch_requests(hctx);
- rcu_read_unlock();
- } else {
- might_sleep();
+ might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
- srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
- blk_mq_sched_dispatch_requests(hctx);
- srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
- }
+ hctx_lock(hctx, &srcu_idx);
+ blk_mq_sched_dispatch_requests(hctx);
+ hctx_unlock(hctx, srcu_idx);
}
/*
@@ -1237,20 +1339,47 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
*/
static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
{
+ bool tried = false;
+
if (hctx->queue->nr_hw_queues == 1)
return WORK_CPU_UNBOUND;
if (--hctx->next_cpu_batch <= 0) {
int next_cpu;
-
- next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
+select_cpu:
+ next_cpu = cpumask_next_and(hctx->next_cpu, hctx->cpumask,
+ cpu_online_mask);
if (next_cpu >= nr_cpu_ids)
- next_cpu = cpumask_first(hctx->cpumask);
+ next_cpu = cpumask_first_and(hctx->cpumask,cpu_online_mask);
- hctx->next_cpu = next_cpu;
+ /*
+ * No online CPU is found, so have to make sure hctx->next_cpu
+ * is set correctly for not breaking workqueue.
+ */
+ if (next_cpu >= nr_cpu_ids)
+ hctx->next_cpu = cpumask_first(hctx->cpumask);
+ else
+ hctx->next_cpu = next_cpu;
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
}
+ /*
+ * Do unbound schedule if we can't find a online CPU for this hctx,
+ * and it should only happen in the path of handling CPU DEAD.
+ */
+ if (!cpu_online(hctx->next_cpu)) {
+ if (!tried) {
+ tried = true;
+ goto select_cpu;
+ }
+
+ /*
+ * Make sure to re-select CPU next time once after CPUs
+ * in hctx->cpumask become online again.
+ */
+ hctx->next_cpu_batch = 1;
+ return WORK_CPU_UNBOUND;
+ }
return hctx->next_cpu;
}
@@ -1274,9 +1403,8 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
put_cpu();
}
- kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
- &hctx->run_work,
- msecs_to_jiffies(msecs));
+ kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
+ msecs_to_jiffies(msecs));
}
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
@@ -1287,7 +1415,23 @@ EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
{
- if (blk_mq_hctx_has_pending(hctx)) {
+ int srcu_idx;
+ bool need_run;
+
+ /*
+ * When queue is quiesced, we may be switching io scheduler, or
+ * updating nr_hw_queues, or other things, and we can't run queue
+ * any more, even __blk_mq_hctx_has_pending() can't be called safely.
+ *
+ * And queue will be rerun in blk_mq_unquiesce_queue() if it is
+ * quiesced.
+ */
+ hctx_lock(hctx, &srcu_idx);
+ need_run = !blk_queue_quiesced(hctx->queue) &&
+ blk_mq_hctx_has_pending(hctx);
+ hctx_unlock(hctx, srcu_idx);
+
+ if (need_run) {
__blk_mq_delay_run_hw_queue(hctx, async, 0);
return true;
}
@@ -1595,9 +1739,9 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
}
-static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
- struct request *rq,
- blk_qc_t *cookie, bool may_sleep)
+static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
+ struct request *rq,
+ blk_qc_t *cookie)
{
struct request_queue *q = rq->q;
struct blk_mq_queue_data bd = {
@@ -1606,15 +1750,52 @@ static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
};
blk_qc_t new_cookie;
blk_status_t ret;
+
+ new_cookie = request_to_qc_t(hctx, rq);
+
+ /*
+ * For OK queue, we are done. For error, caller may kill it.
+ * Any other error (busy), just add it to our list as we
+ * previously would have done.
+ */
+ ret = q->mq_ops->queue_rq(hctx, &bd);
+ switch (ret) {
+ case BLK_STS_OK:
+ *cookie = new_cookie;
+ break;
+ case BLK_STS_RESOURCE:
+ __blk_mq_requeue_request(rq);
+ break;
+ default:
+ *cookie = BLK_QC_T_NONE;
+ break;
+ }
+
+ return ret;
+}
+
+static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+ struct request *rq,
+ blk_qc_t *cookie,
+ bool bypass_insert)
+{
+ struct request_queue *q = rq->q;
bool run_queue = true;
- /* RCU or SRCU read lock is needed before checking quiesced flag */
+ /*
+ * RCU or SRCU read lock is needed before checking quiesced flag.
+ *
+ * When queue is stopped or quiesced, ignore 'bypass_insert' from
+ * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
+ * and avoid driver to try to dispatch again.
+ */
if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
run_queue = false;
+ bypass_insert = false;
goto insert;
}
- if (q->elevator)
+ if (q->elevator && !bypass_insert)
goto insert;
if (!blk_mq_get_driver_tag(rq, NULL, false))
@@ -1625,47 +1806,47 @@ static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
goto insert;
}
- new_cookie = request_to_qc_t(hctx, rq);
-
- /*
- * For OK queue, we are done. For error, kill it. Any other
- * error (busy), just add it to our list as we previously
- * would have done
- */
- ret = q->mq_ops->queue_rq(hctx, &bd);
- switch (ret) {
- case BLK_STS_OK:
- *cookie = new_cookie;
- return;
- case BLK_STS_RESOURCE:
- __blk_mq_requeue_request(rq);
- goto insert;
- default:
- *cookie = BLK_QC_T_NONE;
- blk_mq_end_request(rq, ret);
- return;
- }
-
+ return __blk_mq_issue_directly(hctx, rq, cookie);
insert:
- blk_mq_sched_insert_request(rq, false, run_queue, false, may_sleep);
+ if (bypass_insert)
+ return BLK_STS_RESOURCE;
+
+ blk_mq_sched_insert_request(rq, false, run_queue, false);
+ return BLK_STS_OK;
}
static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq, blk_qc_t *cookie)
{
- if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
- rcu_read_lock();
- __blk_mq_try_issue_directly(hctx, rq, cookie, false);
- rcu_read_unlock();
- } else {
- unsigned int srcu_idx;
+ blk_status_t ret;
+ int srcu_idx;
- might_sleep();
+ might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
- srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
- __blk_mq_try_issue_directly(hctx, rq, cookie, true);
- srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
- }
+ hctx_lock(hctx, &srcu_idx);
+
+ ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false);
+ if (ret == BLK_STS_RESOURCE)
+ blk_mq_sched_insert_request(rq, false, true, false);
+ else if (ret != BLK_STS_OK)
+ blk_mq_end_request(rq, ret);
+
+ hctx_unlock(hctx, srcu_idx);
+}
+
+blk_status_t blk_mq_request_issue_directly(struct request *rq)
+{
+ blk_status_t ret;
+ int srcu_idx;
+ blk_qc_t unused_cookie;
+ struct blk_mq_ctx *ctx = rq->mq_ctx;
+ struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);
+
+ hctx_lock(hctx, &srcu_idx);
+ ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true);
+ hctx_unlock(hctx, srcu_idx);
+
+ return ret;
}
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
@@ -1776,7 +1957,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
} else if (q->elevator) {
blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio);
- blk_mq_sched_insert_request(rq, false, true, true, true);
+ blk_mq_sched_insert_request(rq, false, true, true);
} else {
blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio);
@@ -1869,6 +2050,22 @@ static size_t order_to_size(unsigned int order)
return (size_t)PAGE_SIZE << order;
}
+static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx, int node)
+{
+ int ret;
+
+ if (set->ops->init_request) {
+ ret = set->ops->init_request(set, rq, hctx_idx, node);
+ if (ret)
+ return ret;
+ }
+
+ seqcount_init(&rq->gstate_seq);
+ u64_stats_init(&rq->aborted_gstate_sync);
+ return 0;
+}
+
int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
unsigned int hctx_idx, unsigned int depth)
{
@@ -1930,12 +2127,9 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
struct request *rq = p;
tags->static_rqs[i] = rq;
- if (set->ops->init_request) {
- if (set->ops->init_request(set, rq, hctx_idx,
- node)) {
- tags->static_rqs[i] = NULL;
- goto fail;
- }
+ if (blk_mq_init_request(set, rq, hctx_idx, node)) {
+ tags->static_rqs[i] = NULL;
+ goto fail;
}
p += rq_size;
@@ -1994,7 +2188,8 @@ static void blk_mq_exit_hctx(struct request_queue *q,
{
blk_mq_debugfs_unregister_hctx(hctx);
- blk_mq_tag_idle(hctx);
+ if (blk_mq_hw_queue_mapped(hctx))
+ blk_mq_tag_idle(hctx);
if (set->ops->exit_request)
set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
@@ -2005,7 +2200,7 @@ static void blk_mq_exit_hctx(struct request_queue *q,
set->ops->exit_hctx(hctx, hctx_idx);
if (hctx->flags & BLK_MQ_F_BLOCKING)
- cleanup_srcu_struct(hctx->queue_rq_srcu);
+ cleanup_srcu_struct(hctx->srcu);
blk_mq_remove_cpuhp(hctx);
blk_free_flush_queue(hctx->fq);
@@ -2074,13 +2269,11 @@ static int blk_mq_init_hctx(struct request_queue *q,
if (!hctx->fq)
goto sched_exit_hctx;
- if (set->ops->init_request &&
- set->ops->init_request(set, hctx->fq->flush_rq, hctx_idx,
- node))
+ if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node))
goto free_fq;
if (hctx->flags & BLK_MQ_F_BLOCKING)
- init_srcu_struct(hctx->queue_rq_srcu);
+ init_srcu_struct(hctx->srcu);
blk_mq_debugfs_register_hctx(q, hctx);
@@ -2116,16 +2309,11 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
INIT_LIST_HEAD(&__ctx->rq_list);
__ctx->queue = q;
- /* If the cpu isn't present, the cpu is mapped to first hctx */
- if (!cpu_present(i))
- continue;
-
- hctx = blk_mq_map_queue(q, i);
-
/*
* Set local node, IFF we have more than one hw queue. If
* not, we remain on the home node of the device
*/
+ hctx = blk_mq_map_queue(q, i);
if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
hctx->numa_node = local_memory_node(cpu_to_node(i));
}
@@ -2182,7 +2370,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
*
* If the cpu isn't present, the cpu is mapped to first hctx.
*/
- for_each_present_cpu(i) {
+ for_each_possible_cpu(i) {
hctx_idx = q->mq_map[i];
/* unmapped hw queue can be remapped after CPU topo changed */
if (!set->tags[hctx_idx] &&
@@ -2236,7 +2424,8 @@ static void blk_mq_map_swqueue(struct request_queue *q)
/*
* Initialize batch roundrobin counts
*/
- hctx->next_cpu = cpumask_first(hctx->cpumask);
+ hctx->next_cpu = cpumask_first_and(hctx->cpumask,
+ cpu_online_mask);
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
}
}
@@ -2369,7 +2558,7 @@ static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
{
int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
- BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, queue_rq_srcu),
+ BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
__alignof__(struct blk_mq_hw_ctx)) !=
sizeof(struct blk_mq_hw_ctx));
@@ -2386,6 +2575,9 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
blk_mq_sysfs_unregister(q);
+
+ /* protect against switching io scheduler */
+ mutex_lock(&q->sysfs_lock);
for (i = 0; i < set->nr_hw_queues; i++) {
int node;
@@ -2430,6 +2622,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
}
}
q->nr_hw_queues = i;
+ mutex_unlock(&q->sysfs_lock);
blk_mq_sysfs_register(q);
}
@@ -2601,9 +2794,27 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
{
- if (set->ops->map_queues)
+ if (set->ops->map_queues) {
+ int cpu;
+ /*
+ * transport .map_queues is usually done in the following
+ * way:
+ *
+ * for (queue = 0; queue < set->nr_hw_queues; queue++) {
+ * mask = get_cpu_mask(queue)
+ * for_each_cpu(cpu, mask)
+ * set->mq_map[cpu] = queue;
+ * }
+ *
+ * When we need to remap, the table has to be cleared for
+ * killing stale mapping since one CPU may not be mapped
+ * to any hw queue.
+ */
+ for_each_possible_cpu(cpu)
+ set->mq_map[cpu] = 0;
+
return set->ops->map_queues(set);
- else
+ } else
return blk_mq_map_queues(set);
}
@@ -2712,6 +2923,7 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
return -EINVAL;
blk_mq_freeze_queue(q);
+ blk_mq_quiesce_queue(q);
ret = 0;
queue_for_each_hw_ctx(q, hctx, i) {
@@ -2735,6 +2947,7 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
if (!ret)
q->nr_requests = nr;
+ blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q);
return ret;
@@ -2850,7 +3063,7 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
unsigned int nsecs;
ktime_t kt;
- if (test_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags))
+ if (rq->rq_flags & RQF_MQ_POLL_SLEPT)
return false;
/*
@@ -2870,7 +3083,7 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
if (!nsecs)
return false;
- set_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
+ rq->rq_flags |= RQF_MQ_POLL_SLEPT;
/*
* This will be replaced with the stats tracking code, using
@@ -2884,7 +3097,7 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
hrtimer_init_sleeper(&hs, current);
do {
- if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
+ if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
break;
set_current_state(TASK_UNINTERRUPTIBLE);
hrtimer_start_expires(&hs.timer, mode);
@@ -2970,12 +3183,6 @@ static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
static int __init blk_mq_init(void)
{
- /*
- * See comment in block/blk.h rq_atomic_flags enum
- */
- BUILD_BUG_ON((REQ_ATOM_STARTED / BITS_PER_BYTE) !=
- (REQ_ATOM_COMPLETE / BITS_PER_BYTE));
-
cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
blk_mq_hctx_notify_dead);
return 0;
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 6c7c3ff5bf62..88c558f71819 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -27,6 +27,20 @@ struct blk_mq_ctx {
struct kobject kobj;
} ____cacheline_aligned_in_smp;
+/*
+ * Bits for request->gstate. The lower two bits carry MQ_RQ_* state value
+ * and the upper bits the generation number.
+ */
+enum mq_rq_state {
+ MQ_RQ_IDLE = 0,
+ MQ_RQ_IN_FLIGHT = 1,
+ MQ_RQ_COMPLETE = 2,
+
+ MQ_RQ_STATE_BITS = 2,
+ MQ_RQ_STATE_MASK = (1 << MQ_RQ_STATE_BITS) - 1,
+ MQ_RQ_GEN_INC = 1 << MQ_RQ_STATE_BITS,
+};
+
void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_free_queue(struct request_queue *q);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
@@ -60,6 +74,9 @@ void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
struct list_head *list);
+/* Used by blk_insert_cloned_request() to issue request directly */
+blk_status_t blk_mq_request_issue_directly(struct request *rq);
+
/*
* CPU -> queue mappings
*/
@@ -81,10 +98,41 @@ extern int blk_mq_sysfs_register(struct request_queue *q);
extern void blk_mq_sysfs_unregister(struct request_queue *q);
extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
-extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
-
void blk_mq_release(struct request_queue *q);
+/**
+ * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
+ * @rq: target request.
+ */
+static inline int blk_mq_rq_state(struct request *rq)
+{
+ return READ_ONCE(rq->gstate) & MQ_RQ_STATE_MASK;
+}
+
+/**
+ * blk_mq_rq_update_state() - set the current MQ_RQ_* state of a request
+ * @rq: target request.
+ * @state: new state to set.
+ *
+ * Set @rq's state to @state. The caller is responsible for ensuring that
+ * there are no other updaters. A request can transition into IN_FLIGHT
+ * only from IDLE and doing so increments the generation number.
+ */
+static inline void blk_mq_rq_update_state(struct request *rq,
+ enum mq_rq_state state)
+{
+ u64 old_val = READ_ONCE(rq->gstate);
+ u64 new_val = (old_val & ~MQ_RQ_STATE_MASK) | state;
+
+ if (state == MQ_RQ_IN_FLIGHT) {
+ WARN_ON_ONCE((old_val & MQ_RQ_STATE_MASK) != MQ_RQ_IDLE);
+ new_val += MQ_RQ_GEN_INC;
+ }
+
+ /* avoid exposing interim values */
+ WRITE_ONCE(rq->gstate, new_val);
+}
+
static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
unsigned int cpu)
{
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 870484eaed1f..cbea895a5547 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -853,6 +853,10 @@ struct kobj_type blk_queue_ktype = {
.release = blk_release_queue,
};
+/**
+ * blk_register_queue - register a block layer queue with sysfs
+ * @disk: Disk of which the request queue should be registered with sysfs.
+ */
int blk_register_queue(struct gendisk *disk)
{
int ret;
@@ -909,11 +913,12 @@ int blk_register_queue(struct gendisk *disk)
if (q->request_fn || (q->mq_ops && q->elevator)) {
ret = elv_register_queue(q);
if (ret) {
+ mutex_unlock(&q->sysfs_lock);
kobject_uevent(&q->kobj, KOBJ_REMOVE);
kobject_del(&q->kobj);
blk_trace_remove_sysfs(dev);
kobject_put(&dev->kobj);
- goto unlock;
+ return ret;
}
}
ret = 0;
@@ -921,7 +926,15 @@ unlock:
mutex_unlock(&q->sysfs_lock);
return ret;
}
+EXPORT_SYMBOL_GPL(blk_register_queue);
+/**
+ * blk_unregister_queue - counterpart of blk_register_queue()
+ * @disk: Disk of which the request queue should be unregistered from sysfs.
+ *
+ * Note: the caller is responsible for guaranteeing that this function is called
+ * after blk_register_queue() has finished.
+ */
void blk_unregister_queue(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
@@ -929,21 +942,39 @@ void blk_unregister_queue(struct gendisk *disk)
if (WARN_ON(!q))
return;
- mutex_lock(&q->sysfs_lock);
- queue_flag_clear_unlocked(QUEUE_FLAG_REGISTERED, q);
- mutex_unlock(&q->sysfs_lock);
+ /* Return early if disk->queue was never registered. */
+ if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
+ return;
- wbt_exit(q);
+ /*
+ * Since sysfs_remove_dir() prevents adding new directory entries
+ * before removal of existing entries starts, protect against
+ * concurrent elv_iosched_store() calls.
+ */
+ mutex_lock(&q->sysfs_lock);
+ spin_lock_irq(q->queue_lock);
+ queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
+ spin_unlock_irq(q->queue_lock);
+ /*
+ * Remove the sysfs attributes before unregistering the queue data
+ * structures that can be modified through sysfs.
+ */
if (q->mq_ops)
blk_mq_unregister_dev(disk_to_dev(disk), q);
-
- if (q->request_fn || (q->mq_ops && q->elevator))
- elv_unregister_queue(q);
+ mutex_unlock(&q->sysfs_lock);
kobject_uevent(&q->kobj, KOBJ_REMOVE);
kobject_del(&q->kobj);
blk_trace_remove_sysfs(disk_to_dev(disk));
+
+ wbt_exit(q);
+
+ mutex_lock(&q->sysfs_lock);
+ if (q->request_fn || (q->mq_ops && q->elevator))
+ elv_unregister_queue(q);
+ mutex_unlock(&q->sysfs_lock);
+
kobject_put(&disk_to_dev(disk)->kobj);
}
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index d19f416d6101..c5a131673733 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -216,9 +216,9 @@ struct throtl_data
unsigned int scale;
- struct latency_bucket tmp_buckets[LATENCY_BUCKET_SIZE];
- struct avg_latency_bucket avg_buckets[LATENCY_BUCKET_SIZE];
- struct latency_bucket __percpu *latency_buckets;
+ struct latency_bucket tmp_buckets[2][LATENCY_BUCKET_SIZE];
+ struct avg_latency_bucket avg_buckets[2][LATENCY_BUCKET_SIZE];
+ struct latency_bucket __percpu *latency_buckets[2];
unsigned long last_calculate_time;
unsigned long filtered_latency;
@@ -1511,10 +1511,20 @@ static struct cftype throtl_legacy_files[] = {
.seq_show = blkg_print_stat_bytes,
},
{
+ .name = "throttle.io_service_bytes_recursive",
+ .private = (unsigned long)&blkcg_policy_throtl,
+ .seq_show = blkg_print_stat_bytes_recursive,
+ },
+ {
.name = "throttle.io_serviced",
.private = (unsigned long)&blkcg_policy_throtl,
.seq_show = blkg_print_stat_ios,
},
+ {
+ .name = "throttle.io_serviced_recursive",
+ .private = (unsigned long)&blkcg_policy_throtl,
+ .seq_show = blkg_print_stat_ios_recursive,
+ },
{ } /* terminate */
};
@@ -2040,10 +2050,10 @@ static void blk_throtl_update_idletime(struct throtl_grp *tg)
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
static void throtl_update_latency_buckets(struct throtl_data *td)
{
- struct avg_latency_bucket avg_latency[LATENCY_BUCKET_SIZE];
- int i, cpu;
- unsigned long last_latency = 0;
- unsigned long latency;
+ struct avg_latency_bucket avg_latency[2][LATENCY_BUCKET_SIZE];
+ int i, cpu, rw;
+ unsigned long last_latency[2] = { 0 };
+ unsigned long latency[2];
if (!blk_queue_nonrot(td->queue))
return;
@@ -2052,56 +2062,67 @@ static void throtl_update_latency_buckets(struct throtl_data *td)
td->last_calculate_time = jiffies;
memset(avg_latency, 0, sizeof(avg_latency));
- for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
- struct latency_bucket *tmp = &td->tmp_buckets[i];
-
- for_each_possible_cpu(cpu) {
- struct latency_bucket *bucket;
-
- /* this isn't race free, but ok in practice */
- bucket = per_cpu_ptr(td->latency_buckets, cpu);
- tmp->total_latency += bucket[i].total_latency;
- tmp->samples += bucket[i].samples;
- bucket[i].total_latency = 0;
- bucket[i].samples = 0;
- }
+ for (rw = READ; rw <= WRITE; rw++) {
+ for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
+ struct latency_bucket *tmp = &td->tmp_buckets[rw][i];
+
+ for_each_possible_cpu(cpu) {
+ struct latency_bucket *bucket;
+
+ /* this isn't race free, but ok in practice */
+ bucket = per_cpu_ptr(td->latency_buckets[rw],
+ cpu);
+ tmp->total_latency += bucket[i].total_latency;
+ tmp->samples += bucket[i].samples;
+ bucket[i].total_latency = 0;
+ bucket[i].samples = 0;
+ }
- if (tmp->samples >= 32) {
- int samples = tmp->samples;
+ if (tmp->samples >= 32) {
+ int samples = tmp->samples;
- latency = tmp->total_latency;
+ latency[rw] = tmp->total_latency;
- tmp->total_latency = 0;
- tmp->samples = 0;
- latency /= samples;
- if (latency == 0)
- continue;
- avg_latency[i].latency = latency;
+ tmp->total_latency = 0;
+ tmp->samples = 0;
+ latency[rw] /= samples;
+ if (latency[rw] == 0)
+ continue;
+ avg_latency[rw][i].latency = latency[rw];
+ }
}
}
- for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
- if (!avg_latency[i].latency) {
- if (td->avg_buckets[i].latency < last_latency)
- td->avg_buckets[i].latency = last_latency;
- continue;
- }
+ for (rw = READ; rw <= WRITE; rw++) {
+ for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
+ if (!avg_latency[rw][i].latency) {
+ if (td->avg_buckets[rw][i].latency < last_latency[rw])
+ td->avg_buckets[rw][i].latency =
+ last_latency[rw];
+ continue;
+ }
- if (!td->avg_buckets[i].valid)
- latency = avg_latency[i].latency;
- else
- latency = (td->avg_buckets[i].latency * 7 +
- avg_latency[i].latency) >> 3;
+ if (!td->avg_buckets[rw][i].valid)
+ latency[rw] = avg_latency[rw][i].latency;
+ else
+ latency[rw] = (td->avg_buckets[rw][i].latency * 7 +
+ avg_latency[rw][i].latency) >> 3;
- td->avg_buckets[i].latency = max(latency, last_latency);
- td->avg_buckets[i].valid = true;
- last_latency = td->avg_buckets[i].latency;
+ td->avg_buckets[rw][i].latency = max(latency[rw],
+ last_latency[rw]);
+ td->avg_buckets[rw][i].valid = true;
+ last_latency[rw] = td->avg_buckets[rw][i].latency;
+ }
}
for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
throtl_log(&td->service_queue,
- "Latency bucket %d: latency=%ld, valid=%d", i,
- td->avg_buckets[i].latency, td->avg_buckets[i].valid);
+ "Latency bucket %d: read latency=%ld, read valid=%d, "
+ "write latency=%ld, write valid=%d", i,
+ td->avg_buckets[READ][i].latency,
+ td->avg_buckets[READ][i].valid,
+ td->avg_buckets[WRITE][i].latency,
+ td->avg_buckets[WRITE][i].valid);
}
#else
static inline void throtl_update_latency_buckets(struct throtl_data *td)
@@ -2242,16 +2263,17 @@ static void throtl_track_latency(struct throtl_data *td, sector_t size,
struct latency_bucket *latency;
int index;
- if (!td || td->limit_index != LIMIT_LOW || op != REQ_OP_READ ||
+ if (!td || td->limit_index != LIMIT_LOW ||
+ !(op == REQ_OP_READ || op == REQ_OP_WRITE) ||
!blk_queue_nonrot(td->queue))
return;
index = request_bucket_index(size);
- latency = get_cpu_ptr(td->latency_buckets);
+ latency = get_cpu_ptr(td->latency_buckets[op]);
latency[index].total_latency += time;
latency[index].samples++;
- put_cpu_ptr(td->latency_buckets);
+ put_cpu_ptr(td->latency_buckets[op]);
}
void blk_throtl_stat_add(struct request *rq, u64 time_ns)
@@ -2270,6 +2292,7 @@ void blk_throtl_bio_endio(struct bio *bio)
unsigned long finish_time;
unsigned long start_time;
unsigned long lat;
+ int rw = bio_data_dir(bio);
tg = bio->bi_cg_private;
if (!tg)
@@ -2298,7 +2321,7 @@ void blk_throtl_bio_endio(struct bio *bio)
bucket = request_bucket_index(
blk_stat_size(&bio->bi_issue_stat));
- threshold = tg->td->avg_buckets[bucket].latency +
+ threshold = tg->td->avg_buckets[rw][bucket].latency +
tg->latency_target;
if (lat > threshold)
tg->bad_bio_cnt++;
@@ -2391,9 +2414,16 @@ int blk_throtl_init(struct request_queue *q)
td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
if (!td)
return -ENOMEM;
- td->latency_buckets = __alloc_percpu(sizeof(struct latency_bucket) *
+ td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) *
LATENCY_BUCKET_SIZE, __alignof__(u64));
- if (!td->latency_buckets) {
+ if (!td->latency_buckets[READ]) {
+ kfree(td);
+ return -ENOMEM;
+ }
+ td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) *
+ LATENCY_BUCKET_SIZE, __alignof__(u64));
+ if (!td->latency_buckets[WRITE]) {
+ free_percpu(td->latency_buckets[READ]);
kfree(td);
return -ENOMEM;
}
@@ -2412,7 +2442,8 @@ int blk_throtl_init(struct request_queue *q)
/* activate policy */
ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
if (ret) {
- free_percpu(td->latency_buckets);
+ free_percpu(td->latency_buckets[READ]);
+ free_percpu(td->latency_buckets[WRITE]);
kfree(td);
}
return ret;
@@ -2423,7 +2454,8 @@ void blk_throtl_exit(struct request_queue *q)
BUG_ON(!q->td);
throtl_shutdown_wq(q);
blkcg_deactivate_policy(q, &blkcg_policy_throtl);
- free_percpu(q->td->latency_buckets);
+ free_percpu(q->td->latency_buckets[READ]);
+ free_percpu(q->td->latency_buckets[WRITE]);
kfree(q->td);
}
@@ -2441,15 +2473,17 @@ void blk_throtl_register_queue(struct request_queue *q)
} else {
td->throtl_slice = DFL_THROTL_SLICE_HD;
td->filtered_latency = LATENCY_FILTERED_HD;
- for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
- td->avg_buckets[i].latency = DFL_HD_BASELINE_LATENCY;
+ for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
+ td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY;
+ td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY;
+ }
}
#ifndef CONFIG_BLK_DEV_THROTTLING_LOW
/* if no low limit, use previous default */
td->throtl_slice = DFL_THROTL_SLICE_HD;
#endif
- td->track_bio_latency = !q->mq_ops && !q->request_fn;
+ td->track_bio_latency = !queue_is_rq_based(q);
if (!td->track_bio_latency)
blk_stat_enable_accounting(q);
}
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 764ecf9aeb30..a05e3676d24a 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -112,7 +112,9 @@ static void blk_rq_timed_out(struct request *req)
static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
unsigned int *next_set)
{
- if (time_after_eq(jiffies, rq->deadline)) {
+ const unsigned long deadline = blk_rq_deadline(rq);
+
+ if (time_after_eq(jiffies, deadline)) {
list_del_init(&rq->timeout_list);
/*
@@ -120,8 +122,8 @@ static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout
*/
if (!blk_mark_rq_complete(rq))
blk_rq_timed_out(rq);
- } else if (!*next_set || time_after(*next_timeout, rq->deadline)) {
- *next_timeout = rq->deadline;
+ } else if (!*next_set || time_after(*next_timeout, deadline)) {
+ *next_timeout = deadline;
*next_set = 1;
}
}
@@ -156,12 +158,17 @@ void blk_timeout_work(struct work_struct *work)
*/
void blk_abort_request(struct request *req)
{
- if (blk_mark_rq_complete(req))
- return;
-
if (req->q->mq_ops) {
- blk_mq_rq_timed_out(req, false);
+ /*
+ * All we need to ensure is that timeout scan takes place
+ * immediately and that scan sees the new timeout value.
+ * No need for fancy synchronizations.
+ */
+ blk_rq_set_deadline(req, jiffies);
+ mod_timer(&req->q->timeout, 0);
} else {
+ if (blk_mark_rq_complete(req))
+ return;
blk_delete_timer(req);
blk_rq_timed_out(req);
}
@@ -208,7 +215,8 @@ void blk_add_timer(struct request *req)
if (!req->timeout)
req->timeout = q->rq_timeout;
- WRITE_ONCE(req->deadline, jiffies + req->timeout);
+ blk_rq_set_deadline(req, jiffies + req->timeout);
+ req->rq_flags &= ~RQF_MQ_TIMEOUT_EXPIRED;
/*
* Only the non-mq case needs to add the request to a protected list.
@@ -222,7 +230,7 @@ void blk_add_timer(struct request *req)
* than an existing one, modify the timer. Round up to next nearest
* second.
*/
- expiry = blk_rq_timeout(round_jiffies_up(req->deadline));
+ expiry = blk_rq_timeout(round_jiffies_up(blk_rq_deadline(req)));
if (!timer_pending(&q->timeout) ||
time_before(expiry, q->timeout.expires)) {
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index ff57fb51b338..acb7252c7e81 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -22,6 +22,48 @@ static inline sector_t blk_zone_start(struct request_queue *q,
}
/*
+ * Return true if a request is a write requests that needs zone write locking.
+ */
+bool blk_req_needs_zone_write_lock(struct request *rq)
+{
+ if (!rq->q->seq_zones_wlock)
+ return false;
+
+ if (blk_rq_is_passthrough(rq))
+ return false;
+
+ switch (req_op(rq)) {
+ case REQ_OP_WRITE_ZEROES:
+ case REQ_OP_WRITE_SAME:
+ case REQ_OP_WRITE:
+ return blk_rq_zone_is_seq(rq);
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock);
+
+void __blk_req_zone_write_lock(struct request *rq)
+{
+ if (WARN_ON_ONCE(test_and_set_bit(blk_rq_zone_no(rq),
+ rq->q->seq_zones_wlock)))
+ return;
+
+ WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED);
+ rq->rq_flags |= RQF_ZONE_WRITE_LOCKED;
+}
+EXPORT_SYMBOL_GPL(__blk_req_zone_write_lock);
+
+void __blk_req_zone_write_unlock(struct request *rq)
+{
+ rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED;
+ if (rq->q->seq_zones_wlock)
+ WARN_ON_ONCE(!test_and_clear_bit(blk_rq_zone_no(rq),
+ rq->q->seq_zones_wlock));
+}
+EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock);
+
+/*
* Check that a zone report belongs to the partition.
* If yes, fix its start sector and write pointer, copy it in the
* zone information array and return true. Return false otherwise.
diff --git a/block/blk.h b/block/blk.h
index 442098aa9463..46db5dc83dcb 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -120,33 +120,23 @@ void blk_account_io_completion(struct request *req, unsigned int bytes);
void blk_account_io_done(struct request *req);
/*
- * Internal atomic flags for request handling
- */
-enum rq_atomic_flags {
- /*
- * Keep these two bits first - not because we depend on the
- * value of them, but we do depend on them being in the same
- * byte of storage to ensure ordering on writes. Keeping them
- * first will achieve that nicely.
- */
- REQ_ATOM_COMPLETE = 0,
- REQ_ATOM_STARTED,
-
- REQ_ATOM_POLL_SLEPT,
-};
-
-/*
* EH timer and IO completion will both attempt to 'grab' the request, make
- * sure that only one of them succeeds
+ * sure that only one of them succeeds. Steal the bottom bit of the
+ * __deadline field for this.
*/
static inline int blk_mark_rq_complete(struct request *rq)
{
- return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
+ return test_and_set_bit(0, &rq->__deadline);
}
static inline void blk_clear_rq_complete(struct request *rq)
{
- clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
+ clear_bit(0, &rq->__deadline);
+}
+
+static inline bool blk_rq_is_complete(struct request *rq)
+{
+ return test_bit(0, &rq->__deadline);
}
/*
@@ -172,6 +162,9 @@ static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq
e->type->ops.sq.elevator_deactivate_req_fn(q, rq);
}
+int elv_register_queue(struct request_queue *q);
+void elv_unregister_queue(struct request_queue *q);
+
struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);
#ifdef CONFIG_FAIL_IO_TIMEOUT
@@ -246,6 +239,21 @@ static inline void req_set_nomerge(struct request_queue *q, struct request *req)
}
/*
+ * Steal a bit from this field for legacy IO path atomic IO marking. Note that
+ * setting the deadline clears the bottom bit, potentially clearing the
+ * completed bit. The user has to be OK with this (current ones are fine).
+ */
+static inline void blk_rq_set_deadline(struct request *rq, unsigned long time)
+{
+ rq->__deadline = time & ~0x1UL;
+}
+
+static inline unsigned long blk_rq_deadline(struct request *rq)
+{
+ return rq->__deadline & ~0x1UL;
+}
+
+/*
* Internal io_context interface
*/
void get_io_context(struct io_context *ioc);
diff --git a/block/bounce.c b/block/bounce.c
index 1d05c422c932..6a3e68292273 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -113,45 +113,50 @@ int init_emergency_isa_pool(void)
static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
{
unsigned char *vfrom;
- struct bio_vec tovec, *fromvec = from->bi_io_vec;
+ struct bio_vec tovec, fromvec;
struct bvec_iter iter;
+ /*
+ * The bio of @from is created by bounce, so we can iterate
+ * its bvec from start to end, but the @from->bi_iter can't be
+ * trusted because it might be changed by splitting.
+ */
+ struct bvec_iter from_iter = BVEC_ITER_ALL_INIT;
bio_for_each_segment(tovec, to, iter) {
- if (tovec.bv_page != fromvec->bv_page) {
+ fromvec = bio_iter_iovec(from, from_iter);
+ if (tovec.bv_page != fromvec.bv_page) {
/*
* fromvec->bv_offset and fromvec->bv_len might have
* been modified by the block layer, so use the original
* copy, bounce_copy_vec already uses tovec->bv_len
*/
- vfrom = page_address(fromvec->bv_page) +
+ vfrom = page_address(fromvec.bv_page) +
tovec.bv_offset;
bounce_copy_vec(&tovec, vfrom);
flush_dcache_page(tovec.bv_page);
}
-
- fromvec++;
+ bio_advance_iter(from, &from_iter, tovec.bv_len);
}
}
static void bounce_end_io(struct bio *bio, mempool_t *pool)
{
struct bio *bio_orig = bio->bi_private;
- struct bio_vec *bvec, *org_vec;
+ struct bio_vec *bvec, orig_vec;
int i;
- int start = bio_orig->bi_iter.bi_idx;
+ struct bvec_iter orig_iter = bio_orig->bi_iter;
/*
* free up bounce indirect pages used
*/
bio_for_each_segment_all(bvec, bio, i) {
- org_vec = bio_orig->bi_io_vec + i + start;
-
- if (bvec->bv_page == org_vec->bv_page)
- continue;
-
- dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
- mempool_free(bvec->bv_page, pool);
+ orig_vec = bio_iter_iovec(bio_orig, orig_iter);
+ if (bvec->bv_page != orig_vec.bv_page) {
+ dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
+ mempool_free(bvec->bv_page, pool);
+ }
+ bio_advance_iter(bio_orig, &orig_iter, orig_vec.bv_len);
}
bio_orig->bi_status = bio->bi_status;
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index 15d25ccd51a5..1474153f73e3 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -30,7 +30,7 @@
/**
* bsg_teardown_job - routine to teardown a bsg job
- * @job: bsg_job that is to be torn down
+ * @kref: kref inside bsg_job that is to be torn down
*/
static void bsg_teardown_job(struct kref *kref)
{
@@ -251,6 +251,7 @@ static void bsg_exit_rq(struct request_queue *q, struct request *req)
* @name: device to give bsg device
* @job_fn: bsg job handler
* @dd_job_size: size of LLD data needed for each job
+ * @release: @dev release function
*/
struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
bsg_job_fn *job_fn, int dd_job_size,
diff --git a/block/bsg.c b/block/bsg.c
index 452f94f1c5d4..2e2c1e222209 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -32,6 +32,9 @@
#define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver"
#define BSG_VERSION "0.4"
+#define bsg_dbg(bd, fmt, ...) \
+ pr_debug("%s: " fmt, (bd)->name, ##__VA_ARGS__)
+
struct bsg_device {
struct request_queue *queue;
spinlock_t lock;
@@ -55,14 +58,6 @@ enum {
#define BSG_DEFAULT_CMDS 64
#define BSG_MAX_DEVS 32768
-#undef BSG_DEBUG
-
-#ifdef BSG_DEBUG
-#define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args)
-#else
-#define dprintk(fmt, args...)
-#endif
-
static DEFINE_MUTEX(bsg_mutex);
static DEFINE_IDR(bsg_minor_idr);
@@ -123,7 +118,7 @@ static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
bc->bd = bd;
INIT_LIST_HEAD(&bc->list);
- dprintk("%s: returning free cmd %p\n", bd->name, bc);
+ bsg_dbg(bd, "returning free cmd %p\n", bc);
return bc;
out:
spin_unlock_irq(&bd->lock);
@@ -222,7 +217,8 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t mode)
if (!bcd->class_dev)
return ERR_PTR(-ENXIO);
- dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp,
+ bsg_dbg(bd, "map hdr %llx/%u %llx/%u\n",
+ (unsigned long long) hdr->dout_xferp,
hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
hdr->din_xfer_len);
@@ -299,8 +295,8 @@ static void bsg_rq_end_io(struct request *rq, blk_status_t status)
struct bsg_device *bd = bc->bd;
unsigned long flags;
- dprintk("%s: finished rq %p bc %p, bio %p\n",
- bd->name, rq, bc, bc->bio);
+ bsg_dbg(bd, "finished rq %p bc %p, bio %p\n",
+ rq, bc, bc->bio);
bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
@@ -333,7 +329,7 @@ static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
list_add_tail(&bc->list, &bd->busy_list);
spin_unlock_irq(&bd->lock);
- dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc);
+ bsg_dbg(bd, "queueing rq %p, bc %p\n", rq, bc);
rq->end_io_data = bc;
blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io);
@@ -379,7 +375,7 @@ static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
}
} while (1);
- dprintk("%s: returning done %p\n", bd->name, bc);
+ bsg_dbg(bd, "returning done %p\n", bc);
return bc;
}
@@ -390,7 +386,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
struct scsi_request *req = scsi_req(rq);
int ret = 0;
- dprintk("rq %p bio %p 0x%x\n", rq, bio, req->result);
+ pr_debug("rq %p bio %p 0x%x\n", rq, bio, req->result);
/*
* fill in all the output members
*/
@@ -469,7 +465,7 @@ static int bsg_complete_all_commands(struct bsg_device *bd)
struct bsg_command *bc;
int ret, tret;
- dprintk("%s: entered\n", bd->name);
+ bsg_dbg(bd, "entered\n");
/*
* wait for all commands to complete
@@ -572,7 +568,7 @@ bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
int ret;
ssize_t bytes_read;
- dprintk("%s: read %zd bytes\n", bd->name, count);
+ bsg_dbg(bd, "read %zd bytes\n", count);
bsg_set_block(bd, file);
@@ -646,7 +642,7 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
ssize_t bytes_written;
int ret;
- dprintk("%s: write %zd bytes\n", bd->name, count);
+ bsg_dbg(bd, "write %zd bytes\n", count);
if (unlikely(uaccess_kernel()))
return -EINVAL;
@@ -664,7 +660,7 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
if (!bytes_written || err_block_err(ret))
bytes_written = ret;
- dprintk("%s: returning %zd\n", bd->name, bytes_written);
+ bsg_dbg(bd, "returning %zd\n", bytes_written);
return bytes_written;
}
@@ -717,7 +713,7 @@ static int bsg_put_device(struct bsg_device *bd)
hlist_del(&bd->dev_list);
mutex_unlock(&bsg_mutex);
- dprintk("%s: tearing down\n", bd->name);
+ bsg_dbg(bd, "tearing down\n");
/*
* close can always block
@@ -744,9 +740,7 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
struct file *file)
{
struct bsg_device *bd;
-#ifdef BSG_DEBUG
unsigned char buf[32];
-#endif
if (!blk_queue_scsi_passthrough(rq)) {
WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
@@ -771,7 +765,7 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
- dprintk("bound to <%s>, max queue %d\n",
+ bsg_dbg(bd, "bound to <%s>, max queue %d\n",
format_dev_t(buf, inode->i_rdev), bd->max_queue);
mutex_unlock(&bsg_mutex);
@@ -845,10 +839,10 @@ static int bsg_release(struct inode *inode, struct file *file)
return bsg_put_device(bd);
}
-static unsigned int bsg_poll(struct file *file, poll_table *wait)
+static __poll_t bsg_poll(struct file *file, poll_table *wait)
{
struct bsg_device *bd = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &bd->wq_done, wait);
poll_wait(file, &bd->wq_free, wait);
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index b83f77460d28..9de9f156e203 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -50,8 +50,6 @@ struct deadline_data {
int front_merges;
};
-static void deadline_move_request(struct deadline_data *, struct request *);
-
static inline struct rb_root *
deadline_rb_root(struct deadline_data *dd, struct request *rq)
{
@@ -100,6 +98,12 @@ deadline_add_request(struct request_queue *q, struct request *rq)
struct deadline_data *dd = q->elevator->elevator_data;
const int data_dir = rq_data_dir(rq);
+ /*
+ * This may be a requeue of a write request that has locked its
+ * target zone. If it is the case, this releases the zone lock.
+ */
+ blk_req_zone_write_unlock(rq);
+
deadline_add_rq_rb(dd, rq);
/*
@@ -190,6 +194,12 @@ deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq)
{
struct request_queue *q = rq->q;
+ /*
+ * For a zoned block device, write requests must write lock their
+ * target zone.
+ */
+ blk_req_zone_write_lock(rq);
+
deadline_remove_request(q, rq);
elv_dispatch_add_tail(q, rq);
}
@@ -231,6 +241,69 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
}
/*
+ * For the specified data direction, return the next request to dispatch using
+ * arrival ordered lists.
+ */
+static struct request *
+deadline_fifo_request(struct deadline_data *dd, int data_dir)
+{
+ struct request *rq;
+
+ if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
+ return NULL;
+
+ if (list_empty(&dd->fifo_list[data_dir]))
+ return NULL;
+
+ rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
+ if (data_dir == READ || !blk_queue_is_zoned(rq->q))
+ return rq;
+
+ /*
+ * Look for a write request that can be dispatched, that is one with
+ * an unlocked target zone.
+ */
+ list_for_each_entry(rq, &dd->fifo_list[WRITE], queuelist) {
+ if (blk_req_can_dispatch_to_zone(rq))
+ return rq;
+ }
+
+ return NULL;
+}
+
+/*
+ * For the specified data direction, return the next request to dispatch using
+ * sector position sorted lists.
+ */
+static struct request *
+deadline_next_request(struct deadline_data *dd, int data_dir)
+{
+ struct request *rq;
+
+ if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
+ return NULL;
+
+ rq = dd->next_rq[data_dir];
+ if (!rq)
+ return NULL;
+
+ if (data_dir == READ || !blk_queue_is_zoned(rq->q))
+ return rq;
+
+ /*
+ * Look for a write request that can be dispatched, that is one with
+ * an unlocked target zone.
+ */
+ while (rq) {
+ if (blk_req_can_dispatch_to_zone(rq))
+ return rq;
+ rq = deadline_latter_request(rq);
+ }
+
+ return NULL;
+}
+
+/*
* deadline_dispatch_requests selects the best request according to
* read/write expire, fifo_batch, etc
*/
@@ -239,16 +312,15 @@ static int deadline_dispatch_requests(struct request_queue *q, int force)
struct deadline_data *dd = q->elevator->elevator_data;
const int reads = !list_empty(&dd->fifo_list[READ]);
const int writes = !list_empty(&dd->fifo_list[WRITE]);
- struct request *rq;
+ struct request *rq, *next_rq;
int data_dir;
/*
* batches are currently reads XOR writes
*/
- if (dd->next_rq[WRITE])
- rq = dd->next_rq[WRITE];
- else
- rq = dd->next_rq[READ];
+ rq = deadline_next_request(dd, WRITE);
+ if (!rq)
+ rq = deadline_next_request(dd, READ);
if (rq && dd->batching < dd->fifo_batch)
/* we have a next request are still entitled to batch */
@@ -262,7 +334,8 @@ static int deadline_dispatch_requests(struct request_queue *q, int force)
if (reads) {
BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
- if (writes && (dd->starved++ >= dd->writes_starved))
+ if (deadline_fifo_request(dd, WRITE) &&
+ (dd->starved++ >= dd->writes_starved))
goto dispatch_writes;
data_dir = READ;
@@ -291,21 +364,29 @@ dispatch_find_request:
/*
* we are not running a batch, find best request for selected data_dir
*/
- if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) {
+ next_rq = deadline_next_request(dd, data_dir);
+ if (deadline_check_fifo(dd, data_dir) || !next_rq) {
/*
* A deadline has expired, the last request was in the other
* direction, or we have run out of higher-sectored requests.
* Start again from the request with the earliest expiry time.
*/
- rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
+ rq = deadline_fifo_request(dd, data_dir);
} else {
/*
* The last req was the same dir and we have a next request in
* sort order. No expired requests so continue on from here.
*/
- rq = dd->next_rq[data_dir];
+ rq = next_rq;
}
+ /*
+ * For a zoned block device, if we only have writes queued and none of
+ * them can be dispatched, rq will be NULL.
+ */
+ if (!rq)
+ return 0;
+
dd->batching = 0;
dispatch_request:
@@ -318,6 +399,16 @@ dispatch_request:
return 1;
}
+/*
+ * For zoned block devices, write unlock the target zone of completed
+ * write requests.
+ */
+static void
+deadline_completed_request(struct request_queue *q, struct request *rq)
+{
+ blk_req_zone_write_unlock(rq);
+}
+
static void deadline_exit_queue(struct elevator_queue *e)
{
struct deadline_data *dd = e->elevator_data;
@@ -439,6 +530,7 @@ static struct elevator_type iosched_deadline = {
.elevator_merged_fn = deadline_merged_request,
.elevator_merge_req_fn = deadline_merged_requests,
.elevator_dispatch_fn = deadline_dispatch_requests,
+ .elevator_completed_req_fn = deadline_completed_request,
.elevator_add_req_fn = deadline_add_request,
.elevator_former_req_fn = elv_rb_former_request,
.elevator_latter_req_fn = elv_rb_latter_request,
diff --git a/block/elevator.c b/block/elevator.c
index 7bda083d5968..e87e9b43aba0 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -869,6 +869,8 @@ int elv_register_queue(struct request_queue *q)
struct elevator_queue *e = q->elevator;
int error;
+ lockdep_assert_held(&q->sysfs_lock);
+
error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
if (!error) {
struct elv_fs_entry *attr = e->type->elevator_attrs;
@@ -886,10 +888,11 @@ int elv_register_queue(struct request_queue *q)
}
return error;
}
-EXPORT_SYMBOL(elv_register_queue);
void elv_unregister_queue(struct request_queue *q)
{
+ lockdep_assert_held(&q->sysfs_lock);
+
if (q) {
struct elevator_queue *e = q->elevator;
@@ -900,7 +903,6 @@ void elv_unregister_queue(struct request_queue *q)
wbt_enable_default(q);
}
}
-EXPORT_SYMBOL(elv_unregister_queue);
int elv_register(struct elevator_type *e)
{
@@ -967,7 +969,10 @@ static int elevator_switch_mq(struct request_queue *q,
{
int ret;
+ lockdep_assert_held(&q->sysfs_lock);
+
blk_mq_freeze_queue(q);
+ blk_mq_quiesce_queue(q);
if (q->elevator) {
if (q->elevator->registered)
@@ -994,6 +999,7 @@ static int elevator_switch_mq(struct request_queue *q,
blk_add_trace_msg(q, "elv switch: none");
out:
+ blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q);
return ret;
}
@@ -1010,6 +1016,8 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
bool old_registered = false;
int err;
+ lockdep_assert_held(&q->sysfs_lock);
+
if (q->mq_ops)
return elevator_switch_mq(q, new_e);
diff --git a/block/genhd.c b/block/genhd.c
index 96a66f671720..88a53c188cb7 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -629,16 +629,18 @@ exit:
}
/**
- * device_add_disk - add partitioning information to kernel list
+ * __device_add_disk - add disk information to kernel list
* @parent: parent device for the disk
* @disk: per-device partitioning information
+ * @register_queue: register the queue if set to true
*
* This function registers the partitioning information in @disk
* with the kernel.
*
* FIXME: error handling
*/
-void device_add_disk(struct device *parent, struct gendisk *disk)
+static void __device_add_disk(struct device *parent, struct gendisk *disk,
+ bool register_queue)
{
dev_t devt;
int retval;
@@ -682,7 +684,8 @@ void device_add_disk(struct device *parent, struct gendisk *disk)
exact_match, exact_lock, disk);
}
register_disk(parent, disk);
- blk_register_queue(disk);
+ if (register_queue)
+ blk_register_queue(disk);
/*
* Take an extra ref on queue which will be put on disk_release()
@@ -693,8 +696,19 @@ void device_add_disk(struct device *parent, struct gendisk *disk)
disk_add_events(disk);
blk_integrity_add(disk);
}
+
+void device_add_disk(struct device *parent, struct gendisk *disk)
+{
+ __device_add_disk(parent, disk, true);
+}
EXPORT_SYMBOL(device_add_disk);
+void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk)
+{
+ __device_add_disk(parent, disk, false);
+}
+EXPORT_SYMBOL(device_add_disk_no_queue_reg);
+
void del_gendisk(struct gendisk *disk)
{
struct disk_part_iter piter;
@@ -725,7 +739,8 @@ void del_gendisk(struct gendisk *disk)
* Unregister bdi before releasing device numbers (as they can
* get reused and we'd get clashes in sysfs).
*/
- bdi_unregister(disk->queue->backing_dev_info);
+ if (!(disk->flags & GENHD_FL_HIDDEN))
+ bdi_unregister(disk->queue->backing_dev_info);
blk_unregister_queue(disk);
} else {
WARN_ON(1);
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 0179e484ec98..c56f211c8440 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -59,6 +59,7 @@ struct deadline_data {
int front_merges;
spinlock_t lock;
+ spinlock_t zone_lock;
struct list_head dispatch;
};
@@ -192,13 +193,83 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
}
/*
+ * For the specified data direction, return the next request to
+ * dispatch using arrival ordered lists.
+ */
+static struct request *
+deadline_fifo_request(struct deadline_data *dd, int data_dir)
+{
+ struct request *rq;
+ unsigned long flags;
+
+ if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
+ return NULL;
+
+ if (list_empty(&dd->fifo_list[data_dir]))
+ return NULL;
+
+ rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
+ if (data_dir == READ || !blk_queue_is_zoned(rq->q))
+ return rq;
+
+ /*
+ * Look for a write request that can be dispatched, that is one with
+ * an unlocked target zone.
+ */
+ spin_lock_irqsave(&dd->zone_lock, flags);
+ list_for_each_entry(rq, &dd->fifo_list[WRITE], queuelist) {
+ if (blk_req_can_dispatch_to_zone(rq))
+ goto out;
+ }
+ rq = NULL;
+out:
+ spin_unlock_irqrestore(&dd->zone_lock, flags);
+
+ return rq;
+}
+
+/*
+ * For the specified data direction, return the next request to
+ * dispatch using sector position sorted lists.
+ */
+static struct request *
+deadline_next_request(struct deadline_data *dd, int data_dir)
+{
+ struct request *rq;
+ unsigned long flags;
+
+ if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
+ return NULL;
+
+ rq = dd->next_rq[data_dir];
+ if (!rq)
+ return NULL;
+
+ if (data_dir == READ || !blk_queue_is_zoned(rq->q))
+ return rq;
+
+ /*
+ * Look for a write request that can be dispatched, that is one with
+ * an unlocked target zone.
+ */
+ spin_lock_irqsave(&dd->zone_lock, flags);
+ while (rq) {
+ if (blk_req_can_dispatch_to_zone(rq))
+ break;
+ rq = deadline_latter_request(rq);
+ }
+ spin_unlock_irqrestore(&dd->zone_lock, flags);
+
+ return rq;
+}
+
+/*
* deadline_dispatch_requests selects the best request according to
* read/write expire, fifo_batch, etc
*/
-static struct request *__dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
+static struct request *__dd_dispatch_request(struct deadline_data *dd)
{
- struct deadline_data *dd = hctx->queue->elevator->elevator_data;
- struct request *rq;
+ struct request *rq, *next_rq;
bool reads, writes;
int data_dir;
@@ -214,10 +285,9 @@ static struct request *__dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
/*
* batches are currently reads XOR writes
*/
- if (dd->next_rq[WRITE])
- rq = dd->next_rq[WRITE];
- else
- rq = dd->next_rq[READ];
+ rq = deadline_next_request(dd, WRITE);
+ if (!rq)
+ rq = deadline_next_request(dd, READ);
if (rq && dd->batching < dd->fifo_batch)
/* we have a next request are still entitled to batch */
@@ -231,7 +301,8 @@ static struct request *__dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
if (reads) {
BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
- if (writes && (dd->starved++ >= dd->writes_starved))
+ if (deadline_fifo_request(dd, WRITE) &&
+ (dd->starved++ >= dd->writes_starved))
goto dispatch_writes;
data_dir = READ;
@@ -260,21 +331,29 @@ dispatch_find_request:
/*
* we are not running a batch, find best request for selected data_dir
*/
- if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) {
+ next_rq = deadline_next_request(dd, data_dir);
+ if (deadline_check_fifo(dd, data_dir) || !next_rq) {
/*
* A deadline has expired, the last request was in the other
* direction, or we have run out of higher-sectored requests.
* Start again from the request with the earliest expiry time.
*/
- rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
+ rq = deadline_fifo_request(dd, data_dir);
} else {
/*
* The last req was the same dir and we have a next request in
* sort order. No expired requests so continue on from here.
*/
- rq = dd->next_rq[data_dir];
+ rq = next_rq;
}
+ /*
+ * For a zoned block device, if we only have writes queued and none of
+ * them can be dispatched, rq will be NULL.
+ */
+ if (!rq)
+ return NULL;
+
dd->batching = 0;
dispatch_request:
@@ -284,17 +363,27 @@ dispatch_request:
dd->batching++;
deadline_move_request(dd, rq);
done:
+ /*
+ * If the request needs its target zone locked, do it.
+ */
+ blk_req_zone_write_lock(rq);
rq->rq_flags |= RQF_STARTED;
return rq;
}
+/*
+ * One confusing aspect here is that we get called for a specific
+ * hardware queue, but we return a request that may not be for a
+ * different hardware queue. This is because mq-deadline has shared
+ * state for all hardware queues, in terms of sorting, FIFOs, etc.
+ */
static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
{
struct deadline_data *dd = hctx->queue->elevator->elevator_data;
struct request *rq;
spin_lock(&dd->lock);
- rq = __dd_dispatch_request(hctx);
+ rq = __dd_dispatch_request(dd);
spin_unlock(&dd->lock);
return rq;
@@ -339,6 +428,7 @@ static int dd_init_queue(struct request_queue *q, struct elevator_type *e)
dd->front_merges = 1;
dd->fifo_batch = fifo_batch;
spin_lock_init(&dd->lock);
+ spin_lock_init(&dd->zone_lock);
INIT_LIST_HEAD(&dd->dispatch);
q->elevator = eq;
@@ -395,6 +485,12 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
struct deadline_data *dd = q->elevator->elevator_data;
const int data_dir = rq_data_dir(rq);
+ /*
+ * This may be a requeue of a write request that has locked its
+ * target zone. If it is the case, this releases the zone lock.
+ */
+ blk_req_zone_write_unlock(rq);
+
if (blk_mq_sched_try_insert_merge(q, rq))
return;
@@ -439,6 +535,26 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
spin_unlock(&dd->lock);
}
+/*
+ * For zoned block devices, write unlock the target zone of
+ * completed write requests. Do this while holding the zone lock
+ * spinlock so that the zone is never unlocked while deadline_fifo_request()
+ * while deadline_next_request() are executing.
+ */
+static void dd_completed_request(struct request *rq)
+{
+ struct request_queue *q = rq->q;
+
+ if (blk_queue_is_zoned(q)) {
+ struct deadline_data *dd = q->elevator->elevator_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->zone_lock, flags);
+ blk_req_zone_write_unlock(rq);
+ spin_unlock_irqrestore(&dd->zone_lock, flags);
+ }
+}
+
static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
{
struct deadline_data *dd = hctx->queue->elevator->elevator_data;
@@ -640,6 +756,7 @@ static struct elevator_type mq_deadline = {
.ops.mq = {
.insert_requests = dd_insert_requests,
.dispatch_request = dd_dispatch_request,
+ .completed_request = dd_completed_request,
.next_request = elv_rb_latter_request,
.former_request = elv_rb_former_request,
.bio_merge = dd_bio_merge,
diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c
index 0af3a3db6fb0..82c44f7df911 100644
--- a/block/partitions/msdos.c
+++ b/block/partitions/msdos.c
@@ -301,7 +301,9 @@ static void parse_bsd(struct parsed_partitions *state,
continue;
bsd_start = le32_to_cpu(p->p_offset);
bsd_size = le32_to_cpu(p->p_size);
- if (memcmp(flavour, "bsd\0", 4) == 0)
+ /* FreeBSD has relative offset if C partition offset is zero */
+ if (memcmp(flavour, "bsd\0", 4) == 0 &&
+ le32_to_cpu(l->d_partitions[2].p_offset) == 0)
bsd_start += offset;
if (offset == bsd_start && size == bsd_size)
/* full parent partition, we have it already */
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index edcfff974527..60b471f8621b 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -384,9 +384,10 @@ out_put_request:
/**
* sg_scsi_ioctl -- handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl
- * @file: file this ioctl operates on (optional)
* @q: request queue to send scsi commands down
* @disk: gendisk to operate on (option)
+ * @mode: mode used to open the file through which the ioctl has been
+ * submitted
* @sic: userspace structure describing the command to perform
*
* Send down the scsi command described by @sic to the device below
@@ -415,10 +416,10 @@ out_put_request:
* Positive numbers returned are the compacted SCSI error codes (4
* bytes in one int) where the lowest byte is the SCSI status.
*/
-#define OMAX_SB_LEN 16 /* For backward compatibility */
int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
struct scsi_ioctl_command __user *sic)
{
+ enum { OMAX_SB_LEN = 16 }; /* For backward compatibility */
struct request *rq;
struct scsi_request *req;
int err;
@@ -692,38 +693,9 @@ int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
if (bd && bd == bd->bd_contains)
return 0;
- /* Actually none of these is particularly useful on a partition,
- * but they are safe.
- */
- switch (cmd) {
- case SCSI_IOCTL_GET_IDLUN:
- case SCSI_IOCTL_GET_BUS_NUMBER:
- case SCSI_IOCTL_GET_PCI:
- case SCSI_IOCTL_PROBE_HOST:
- case SG_GET_VERSION_NUM:
- case SG_SET_TIMEOUT:
- case SG_GET_TIMEOUT:
- case SG_GET_RESERVED_SIZE:
- case SG_SET_RESERVED_SIZE:
- case SG_EMULATED_HOST:
- return 0;
- case CDROM_GET_CAPABILITY:
- /* Keep this until we remove the printk below. udev sends it
- * and we do not want to spam dmesg about it. CD-ROMs do
- * not have partitions, so we get here only for disks.
- */
- return -ENOIOCTLCMD;
- default:
- break;
- }
-
if (capable(CAP_SYS_RAWIO))
return 0;
- /* In particular, rule out all resets and host-specific ioctls. */
- printk_ratelimited(KERN_WARNING
- "%s: sending ioctl %x to a partition!\n", current->comm, cmd);
-
return -ENOIOCTLCMD;
}
EXPORT_SYMBOL(scsi_verify_blk_ioctl);
diff --git a/crypto/Kconfig b/crypto/Kconfig
index f7911963bb79..b75264b09a46 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -106,6 +106,7 @@ config CRYPTO_KPP
config CRYPTO_ACOMP2
tristate
select CRYPTO_ALGAPI2
+ select SGL_ALLOC
config CRYPTO_ACOMP
tristate
@@ -130,7 +131,7 @@ config CRYPTO_DH
config CRYPTO_ECDH
tristate "ECDH algorithm"
- select CRYTPO_KPP
+ select CRYPTO_KPP
select CRYPTO_RNG_DEFAULT
help
Generic implementation of the ECDH algorithm
@@ -1339,6 +1340,7 @@ config CRYPTO_SALSA20_586
tristate "Salsa20 stream cipher algorithm (i586)"
depends on (X86 || UML_X86) && !64BIT
select CRYPTO_BLKCIPHER
+ select CRYPTO_SALSA20
help
Salsa20 stream cipher algorithm.
@@ -1352,6 +1354,7 @@ config CRYPTO_SALSA20_X86_64
tristate "Salsa20 stream cipher algorithm (x86_64)"
depends on (X86 || UML_X86) && 64BIT
select CRYPTO_BLKCIPHER
+ select CRYPTO_SALSA20
help
Salsa20 stream cipher algorithm.
diff --git a/crypto/Makefile b/crypto/Makefile
index d674884b2d51..cdbc03b35510 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -99,6 +99,7 @@ obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o
CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
+CFLAGS_aes_generic.o := $(call cc-option,-fno-code-hoisting) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=83356
obj-$(CONFIG_CRYPTO_AES_TI) += aes_ti.o
obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o
obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o
diff --git a/crypto/ablk_helper.c b/crypto/ablk_helper.c
index 1441f07d0a19..09776bb1360e 100644
--- a/crypto/ablk_helper.c
+++ b/crypto/ablk_helper.c
@@ -18,9 +18,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
@@ -28,7 +26,6 @@
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/hardirq.h>
#include <crypto/algapi.h>
#include <crypto/cryptd.h>
#include <crypto/ablk_helper.h>
diff --git a/crypto/aead.c b/crypto/aead.c
index f794b30a9407..60b3bbe973e7 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -54,11 +54,18 @@ int crypto_aead_setkey(struct crypto_aead *tfm,
const u8 *key, unsigned int keylen)
{
unsigned long alignmask = crypto_aead_alignmask(tfm);
+ int err;
if ((unsigned long)key & alignmask)
- return setkey_unaligned(tfm, key, keylen);
+ err = setkey_unaligned(tfm, key, keylen);
+ else
+ err = crypto_aead_alg(tfm)->setkey(tfm, key, keylen);
+
+ if (err)
+ return err;
- return crypto_aead_alg(tfm)->setkey(tfm, key, keylen);
+ crypto_aead_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
}
EXPORT_SYMBOL_GPL(crypto_aead_setkey);
@@ -93,6 +100,8 @@ static int crypto_aead_init_tfm(struct crypto_tfm *tfm)
struct crypto_aead *aead = __crypto_aead_cast(tfm);
struct aead_alg *alg = crypto_aead_alg(aead);
+ crypto_aead_set_flags(aead, CRYPTO_TFM_NEED_KEY);
+
aead->authsize = alg->maxauthsize;
if (alg->exit)
@@ -295,7 +304,7 @@ int aead_init_geniv(struct crypto_aead *aead)
if (err)
goto out;
- ctx->sknull = crypto_get_default_null_skcipher2();
+ ctx->sknull = crypto_get_default_null_skcipher();
err = PTR_ERR(ctx->sknull);
if (IS_ERR(ctx->sknull))
goto out;
@@ -315,7 +324,7 @@ out:
return err;
drop_null:
- crypto_put_default_null_skcipher2();
+ crypto_put_default_null_skcipher();
goto out;
}
EXPORT_SYMBOL_GPL(aead_init_geniv);
@@ -325,7 +334,7 @@ void aead_exit_geniv(struct crypto_aead *tfm)
struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_aead(ctx->child);
- crypto_put_default_null_skcipher2();
+ crypto_put_default_null_skcipher();
}
EXPORT_SYMBOL_GPL(aead_exit_geniv);
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 35d4dcea381f..0f8d8d5523c3 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -150,7 +150,7 @@ EXPORT_SYMBOL_GPL(af_alg_release_parent);
static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
- const u32 forbidden = CRYPTO_ALG_INTERNAL;
+ const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct sockaddr_alg *sa = (void *)uaddr;
@@ -158,6 +158,10 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
void *private;
int err;
+ /* If caller uses non-allowed flag, return error. */
+ if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
+ return -EINVAL;
+
if (sock->state == SS_CONNECTED)
return -EINVAL;
@@ -176,9 +180,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (IS_ERR(type))
return PTR_ERR(type);
- private = type->bind(sa->salg_name,
- sa->salg_feat & ~forbidden,
- sa->salg_mask & ~forbidden);
+ private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask);
if (IS_ERR(private)) {
module_put(type->owner);
return PTR_ERR(private);
@@ -1062,13 +1064,13 @@ EXPORT_SYMBOL_GPL(af_alg_async_cb);
/**
* af_alg_poll - poll system call handler
*/
-unsigned int af_alg_poll(struct file *file, struct socket *sock,
+__poll_t af_alg_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
- unsigned int mask;
+ __poll_t mask;
sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 3a35d67de7d9..266fc1d64f61 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -193,11 +193,18 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
unsigned long alignmask = crypto_ahash_alignmask(tfm);
+ int err;
if ((unsigned long)key & alignmask)
- return ahash_setkey_unaligned(tfm, key, keylen);
+ err = ahash_setkey_unaligned(tfm, key, keylen);
+ else
+ err = tfm->setkey(tfm, key, keylen);
+
+ if (err)
+ return err;
- return tfm->setkey(tfm, key, keylen);
+ crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
}
EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
@@ -368,7 +375,12 @@ EXPORT_SYMBOL_GPL(crypto_ahash_finup);
int crypto_ahash_digest(struct ahash_request *req)
{
- return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
+ return crypto_ahash_op(req, tfm->digest);
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
@@ -450,7 +462,6 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
struct ahash_alg *alg = crypto_ahash_alg(hash);
hash->setkey = ahash_nosetkey;
- hash->has_setkey = false;
hash->export = ahash_no_export;
hash->import = ahash_no_import;
@@ -465,7 +476,8 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
if (alg->setkey) {
hash->setkey = alg->setkey;
- hash->has_setkey = true;
+ if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
+ crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
}
if (alg->export)
hash->export = alg->export;
@@ -649,5 +661,16 @@ struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(ahash_attr_alg);
+bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
+{
+ struct crypto_alg *alg = &halg->base;
+
+ if (alg->cra_type != &crypto_ahash_type)
+ return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
+
+ return __crypto_ahash_alg(alg)->setkey != NULL;
+}
+EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 9a636f961572..395b082d03a9 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -62,7 +62,7 @@ static int crypto_check_alg(struct crypto_alg *alg)
if (alg->cra_priority < 0)
return -EINVAL;
- atomic_set(&alg->cra_refcnt, 1);
+ refcount_set(&alg->cra_refcnt, 1);
return crypto_set_driver_name(alg);
}
@@ -123,7 +123,6 @@ static void crypto_remove_instance(struct crypto_instance *inst,
if (!tmpl || !crypto_tmpl_get(tmpl))
return;
- crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, &inst->alg);
list_move(&inst->alg.cra_list, list);
hlist_del(&inst->list);
inst->alg.cra_destroy = crypto_destroy_instance;
@@ -236,7 +235,7 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
if (!larval->adult)
goto free_larval;
- atomic_set(&larval->alg.cra_refcnt, 1);
+ refcount_set(&larval->alg.cra_refcnt, 1);
memcpy(larval->alg.cra_driver_name, alg->cra_driver_name,
CRYPTO_MAX_ALG_NAME);
larval->alg.cra_priority = alg->cra_priority;
@@ -392,7 +391,6 @@ static int crypto_remove_alg(struct crypto_alg *alg, struct list_head *list)
alg->cra_flags |= CRYPTO_ALG_DEAD;
- crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg);
list_del_init(&alg->cra_list);
crypto_remove_spawns(alg, list, NULL);
@@ -411,7 +409,7 @@ int crypto_unregister_alg(struct crypto_alg *alg)
if (ret)
return ret;
- BUG_ON(atomic_read(&alg->cra_refcnt) != 1);
+ BUG_ON(refcount_read(&alg->cra_refcnt) != 1);
if (alg->cra_destroy)
alg->cra_destroy(alg);
@@ -470,7 +468,6 @@ int crypto_register_template(struct crypto_template *tmpl)
}
list_add(&tmpl->list, &crypto_template_list);
- crypto_notify(CRYPTO_MSG_TMPL_REGISTER, tmpl);
err = 0;
out:
up_write(&crypto_alg_sem);
@@ -497,12 +494,10 @@ void crypto_unregister_template(struct crypto_template *tmpl)
BUG_ON(err);
}
- crypto_notify(CRYPTO_MSG_TMPL_UNREGISTER, tmpl);
-
up_write(&crypto_alg_sem);
hlist_for_each_entry_safe(inst, n, list, list) {
- BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1);
+ BUG_ON(refcount_read(&inst->alg.cra_refcnt) != 1);
crypto_free_instance(inst);
}
crypto_remove_final(&users);
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index e9885a35ef6e..4b07edd5a9ff 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -42,7 +42,6 @@
struct aead_tfm {
struct crypto_aead *aead;
- bool has_key;
struct crypto_skcipher *null_tfm;
};
@@ -398,7 +397,7 @@ static int aead_check_key(struct socket *sock)
err = -ENOKEY;
lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
- if (!tfm->has_key)
+ if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
goto unlock;
if (!pask->refcnt++)
@@ -491,7 +490,7 @@ static void *aead_bind(const char *name, u32 type, u32 mask)
return ERR_CAST(aead);
}
- null_tfm = crypto_get_default_null_skcipher2();
+ null_tfm = crypto_get_default_null_skcipher();
if (IS_ERR(null_tfm)) {
crypto_free_aead(aead);
kfree(tfm);
@@ -509,7 +508,7 @@ static void aead_release(void *private)
struct aead_tfm *tfm = private;
crypto_free_aead(tfm->aead);
- crypto_put_default_null_skcipher2();
+ crypto_put_default_null_skcipher();
kfree(tfm);
}
@@ -523,12 +522,8 @@ static int aead_setauthsize(void *private, unsigned int authsize)
static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
{
struct aead_tfm *tfm = private;
- int err;
-
- err = crypto_aead_setkey(tfm->aead, key, keylen);
- tfm->has_key = !err;
- return err;
+ return crypto_aead_setkey(tfm->aead, key, keylen);
}
static void aead_sock_destruct(struct sock *sk)
@@ -589,7 +584,7 @@ static int aead_accept_parent(void *private, struct sock *sk)
{
struct aead_tfm *tfm = private;
- if (!tfm->has_key)
+ if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
return aead_accept_parent_nokey(private, sk);
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 76d2e716c792..6c9b1927a520 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -34,11 +34,6 @@ struct hash_ctx {
struct ahash_request req;
};
-struct algif_hash_tfm {
- struct crypto_ahash *hash;
- bool has_key;
-};
-
static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx)
{
unsigned ds;
@@ -307,7 +302,7 @@ static int hash_check_key(struct socket *sock)
int err = 0;
struct sock *psk;
struct alg_sock *pask;
- struct algif_hash_tfm *tfm;
+ struct crypto_ahash *tfm;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
@@ -321,7 +316,7 @@ static int hash_check_key(struct socket *sock)
err = -ENOKEY;
lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
- if (!tfm->has_key)
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
goto unlock;
if (!pask->refcnt++)
@@ -412,41 +407,17 @@ static struct proto_ops algif_hash_ops_nokey = {
static void *hash_bind(const char *name, u32 type, u32 mask)
{
- struct algif_hash_tfm *tfm;
- struct crypto_ahash *hash;
-
- tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
- if (!tfm)
- return ERR_PTR(-ENOMEM);
-
- hash = crypto_alloc_ahash(name, type, mask);
- if (IS_ERR(hash)) {
- kfree(tfm);
- return ERR_CAST(hash);
- }
-
- tfm->hash = hash;
-
- return tfm;
+ return crypto_alloc_ahash(name, type, mask);
}
static void hash_release(void *private)
{
- struct algif_hash_tfm *tfm = private;
-
- crypto_free_ahash(tfm->hash);
- kfree(tfm);
+ crypto_free_ahash(private);
}
static int hash_setkey(void *private, const u8 *key, unsigned int keylen)
{
- struct algif_hash_tfm *tfm = private;
- int err;
-
- err = crypto_ahash_setkey(tfm->hash, key, keylen);
- tfm->has_key = !err;
-
- return err;
+ return crypto_ahash_setkey(private, key, keylen);
}
static void hash_sock_destruct(struct sock *sk)
@@ -461,11 +432,10 @@ static void hash_sock_destruct(struct sock *sk)
static int hash_accept_parent_nokey(void *private, struct sock *sk)
{
- struct hash_ctx *ctx;
+ struct crypto_ahash *tfm = private;
struct alg_sock *ask = alg_sk(sk);
- struct algif_hash_tfm *tfm = private;
- struct crypto_ahash *hash = tfm->hash;
- unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash);
+ struct hash_ctx *ctx;
+ unsigned int len = sizeof(*ctx) + crypto_ahash_reqsize(tfm);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
@@ -478,7 +448,7 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
ask->private = ctx;
- ahash_request_set_tfm(&ctx->req, hash);
+ ahash_request_set_tfm(&ctx->req, tfm);
ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &ctx->wait);
@@ -489,9 +459,9 @@ static int hash_accept_parent_nokey(void *private, struct sock *sk)
static int hash_accept_parent(void *private, struct sock *sk)
{
- struct algif_hash_tfm *tfm = private;
+ struct crypto_ahash *tfm = private;
- if (!tfm->has_key && crypto_ahash_has_setkey(tfm->hash))
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
return hash_accept_parent_nokey(private, sk);
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index c5c47b680152..c4e885df4564 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -38,11 +38,6 @@
#include <linux/net.h>
#include <net/sock.h>
-struct skcipher_tfm {
- struct crypto_skcipher *skcipher;
- bool has_key;
-};
-
static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
size_t size)
{
@@ -50,8 +45,7 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
- struct skcipher_tfm *skc = pask->private;
- struct crypto_skcipher *tfm = skc->skcipher;
+ struct crypto_skcipher *tfm = pask->private;
unsigned ivsize = crypto_skcipher_ivsize(tfm);
return af_alg_sendmsg(sock, msg, size, ivsize);
@@ -65,8 +59,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct af_alg_ctx *ctx = ask->private;
- struct skcipher_tfm *skc = pask->private;
- struct crypto_skcipher *tfm = skc->skcipher;
+ struct crypto_skcipher *tfm = pask->private;
unsigned int bs = crypto_skcipher_blocksize(tfm);
struct af_alg_async_req *areq;
int err = 0;
@@ -193,7 +186,6 @@ out:
return ret;
}
-
static struct proto_ops algif_skcipher_ops = {
.family = PF_ALG,
@@ -221,7 +213,7 @@ static int skcipher_check_key(struct socket *sock)
int err = 0;
struct sock *psk;
struct alg_sock *pask;
- struct skcipher_tfm *tfm;
+ struct crypto_skcipher *tfm;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
@@ -235,7 +227,7 @@ static int skcipher_check_key(struct socket *sock)
err = -ENOKEY;
lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
- if (!tfm->has_key)
+ if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
goto unlock;
if (!pask->refcnt++)
@@ -314,41 +306,17 @@ static struct proto_ops algif_skcipher_ops_nokey = {
static void *skcipher_bind(const char *name, u32 type, u32 mask)
{
- struct skcipher_tfm *tfm;
- struct crypto_skcipher *skcipher;
-
- tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
- if (!tfm)
- return ERR_PTR(-ENOMEM);
-
- skcipher = crypto_alloc_skcipher(name, type, mask);
- if (IS_ERR(skcipher)) {
- kfree(tfm);
- return ERR_CAST(skcipher);
- }
-
- tfm->skcipher = skcipher;
-
- return tfm;
+ return crypto_alloc_skcipher(name, type, mask);
}
static void skcipher_release(void *private)
{
- struct skcipher_tfm *tfm = private;
-
- crypto_free_skcipher(tfm->skcipher);
- kfree(tfm);
+ crypto_free_skcipher(private);
}
static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
{
- struct skcipher_tfm *tfm = private;
- int err;
-
- err = crypto_skcipher_setkey(tfm->skcipher, key, keylen);
- tfm->has_key = !err;
-
- return err;
+ return crypto_skcipher_setkey(private, key, keylen);
}
static void skcipher_sock_destruct(struct sock *sk)
@@ -357,8 +325,7 @@ static void skcipher_sock_destruct(struct sock *sk)
struct af_alg_ctx *ctx = ask->private;
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
- struct skcipher_tfm *skc = pask->private;
- struct crypto_skcipher *tfm = skc->skcipher;
+ struct crypto_skcipher *tfm = pask->private;
af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
@@ -370,22 +337,21 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
{
struct af_alg_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
- struct skcipher_tfm *tfm = private;
- struct crypto_skcipher *skcipher = tfm->skcipher;
+ struct crypto_skcipher *tfm = private;
unsigned int len = sizeof(*ctx);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
- ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(skcipher),
+ ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(tfm),
GFP_KERNEL);
if (!ctx->iv) {
sock_kfree_s(sk, ctx, len);
return -ENOMEM;
}
- memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher));
+ memset(ctx->iv, 0, crypto_skcipher_ivsize(tfm));
INIT_LIST_HEAD(&ctx->tsgl_list);
ctx->len = len;
@@ -405,9 +371,9 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
static int skcipher_accept_parent(void *private, struct sock *sk)
{
- struct skcipher_tfm *tfm = private;
+ struct crypto_skcipher *tfm = private;
- if (!tfm->has_key && crypto_skcipher_has_setkey(tfm->skcipher))
+ if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
return skcipher_accept_parent_nokey(private, sk);
diff --git a/crypto/api.c b/crypto/api.c
index 2a2479d168aa..70a894e52ff3 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -137,7 +137,7 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type,
if (IS_ERR(larval))
return ERR_CAST(larval);
- atomic_set(&larval->alg.cra_refcnt, 2);
+ refcount_set(&larval->alg.cra_refcnt, 2);
down_write(&crypto_alg_sem);
alg = __crypto_alg_lookup(name, type, mask);
@@ -205,7 +205,8 @@ struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_alg_lookup);
-struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
+static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type,
+ u32 mask)
{
struct crypto_alg *alg;
@@ -231,7 +232,6 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
return crypto_larval_add(name, type, mask);
}
-EXPORT_SYMBOL_GPL(crypto_larval_lookup);
int crypto_probing_notify(unsigned long val, void *v)
{
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 875470b0e026..d3d6d72fe649 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -329,7 +329,7 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(enc))
goto err_free_ahash;
- null = crypto_get_default_null_skcipher2();
+ null = crypto_get_default_null_skcipher();
err = PTR_ERR(null);
if (IS_ERR(null))
goto err_free_skcipher;
@@ -363,7 +363,7 @@ static void crypto_authenc_exit_tfm(struct crypto_aead *tfm)
crypto_free_ahash(ctx->auth);
crypto_free_skcipher(ctx->enc);
- crypto_put_default_null_skcipher2();
+ crypto_put_default_null_skcipher();
}
static void crypto_authenc_free(struct aead_instance *inst)
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 0cf5fefdb859..15f91ddd7f0e 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -352,7 +352,7 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(enc))
goto err_free_ahash;
- null = crypto_get_default_null_skcipher2();
+ null = crypto_get_default_null_skcipher();
err = PTR_ERR(null);
if (IS_ERR(null))
goto err_free_skcipher;
@@ -389,7 +389,7 @@ static void crypto_authenc_esn_exit_tfm(struct crypto_aead *tfm)
crypto_free_ahash(ctx->auth);
crypto_free_skcipher(ctx->enc);
- crypto_put_default_null_skcipher2();
+ crypto_put_default_null_skcipher();
}
static void crypto_authenc_esn_free(struct aead_instance *inst)
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 6c43a0a17a55..01c0d4aa2563 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -18,7 +18,6 @@
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/errno.h>
-#include <linux/hardirq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/seq_file.h>
diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c
index a02286bf319e..32ddd4836ff5 100644
--- a/crypto/camellia_generic.c
+++ b/crypto/camellia_generic.c
@@ -13,8 +13,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c
index df5c72629383..66169c178314 100644
--- a/crypto/cast5_generic.c
+++ b/crypto/cast5_generic.c
@@ -16,8 +16,7 @@
* any later version.
*
* You should have received a copy of the GNU General Public License
-* along with this program; if not, write to the Free Software
-* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c
index 058c8d755d03..c8e5ec69790e 100644
--- a/crypto/cast6_generic.c
+++ b/crypto/cast6_generic.c
@@ -13,8 +13,7 @@
* any later version.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
diff --git a/crypto/chacha20_generic.c b/crypto/chacha20_generic.c
index 4a45fa4890c0..e451c3cb6a56 100644
--- a/crypto/chacha20_generic.c
+++ b/crypto/chacha20_generic.c
@@ -9,44 +9,38 @@
* (at your option) any later version.
*/
+#include <asm/unaligned.h>
#include <crypto/algapi.h>
#include <crypto/chacha20.h>
#include <crypto/internal/skcipher.h>
#include <linux/module.h>
-static inline u32 le32_to_cpuvp(const void *p)
-{
- return le32_to_cpup(p);
-}
-
static void chacha20_docrypt(u32 *state, u8 *dst, const u8 *src,
unsigned int bytes)
{
- u8 stream[CHACHA20_BLOCK_SIZE];
+ u32 stream[CHACHA20_BLOCK_WORDS];
if (dst != src)
memcpy(dst, src, bytes);
while (bytes >= CHACHA20_BLOCK_SIZE) {
chacha20_block(state, stream);
- crypto_xor(dst, stream, CHACHA20_BLOCK_SIZE);
+ crypto_xor(dst, (const u8 *)stream, CHACHA20_BLOCK_SIZE);
bytes -= CHACHA20_BLOCK_SIZE;
dst += CHACHA20_BLOCK_SIZE;
}
if (bytes) {
chacha20_block(state, stream);
- crypto_xor(dst, stream, bytes);
+ crypto_xor(dst, (const u8 *)stream, bytes);
}
}
void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv)
{
- static const char constant[16] = "expand 32-byte k";
-
- state[0] = le32_to_cpuvp(constant + 0);
- state[1] = le32_to_cpuvp(constant + 4);
- state[2] = le32_to_cpuvp(constant + 8);
- state[3] = le32_to_cpuvp(constant + 12);
+ state[0] = 0x61707865; /* "expa" */
+ state[1] = 0x3320646e; /* "nd 3" */
+ state[2] = 0x79622d32; /* "2-by" */
+ state[3] = 0x6b206574; /* "te k" */
state[4] = ctx->key[0];
state[5] = ctx->key[1];
state[6] = ctx->key[2];
@@ -55,10 +49,10 @@ void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv)
state[9] = ctx->key[5];
state[10] = ctx->key[6];
state[11] = ctx->key[7];
- state[12] = le32_to_cpuvp(iv + 0);
- state[13] = le32_to_cpuvp(iv + 4);
- state[14] = le32_to_cpuvp(iv + 8);
- state[15] = le32_to_cpuvp(iv + 12);
+ state[12] = get_unaligned_le32(iv + 0);
+ state[13] = get_unaligned_le32(iv + 4);
+ state[14] = get_unaligned_le32(iv + 8);
+ state[15] = get_unaligned_le32(iv + 12);
}
EXPORT_SYMBOL_GPL(crypto_chacha20_init);
@@ -72,7 +66,7 @@ int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(ctx->key); i++)
- ctx->key[i] = le32_to_cpuvp(key + i * sizeof(u32));
+ ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32));
return 0;
}
@@ -111,7 +105,6 @@ static struct skcipher_alg alg = {
.base.cra_priority = 100,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha20_ctx),
- .base.cra_alignmask = sizeof(u32) - 1,
.base.cra_module = THIS_MODULE,
.min_keysize = CHACHA20_KEY_SIZE,
diff --git a/crypto/crc32_generic.c b/crypto/crc32_generic.c
index aa2a25fc7482..718cbce8d169 100644
--- a/crypto/crc32_generic.c
+++ b/crypto/crc32_generic.c
@@ -133,6 +133,7 @@ static struct shash_alg alg = {
.cra_name = "crc32",
.cra_driver_name = "crc32-generic",
.cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(u32),
.cra_module = THIS_MODULE,
diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c
index 4c0a0e271876..372320399622 100644
--- a/crypto/crc32c_generic.c
+++ b/crypto/crc32c_generic.c
@@ -146,6 +146,7 @@ static struct shash_alg alg = {
.cra_name = "crc32c",
.cra_driver_name = "crc32c-generic",
.cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_alignmask = 3,
.cra_ctxsize = sizeof(struct chksum_ctx),
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index bd43cf5be14c..addca7bae33f 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -32,7 +32,9 @@
#include <linux/sched.h>
#include <linux/slab.h>
-#define CRYPTD_MAX_CPU_QLEN 1000
+static unsigned int cryptd_max_cpu_qlen = 1000;
+module_param(cryptd_max_cpu_qlen, uint, 0);
+MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
struct cryptd_cpu_queue {
struct crypto_queue queue;
@@ -116,6 +118,7 @@ static int cryptd_init_queue(struct cryptd_queue *queue,
crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
}
+ pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
return 0;
}
@@ -893,10 +896,9 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
if (err)
goto out_free_inst;
- type = CRYPTO_ALG_ASYNC;
- if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
- type |= CRYPTO_ALG_INTERNAL;
- inst->alg.halg.base.cra_flags = type;
+ inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
+ (alg->cra_flags & (CRYPTO_ALG_INTERNAL |
+ CRYPTO_ALG_OPTIONAL_KEY));
inst->alg.halg.digestsize = salg->digestsize;
inst->alg.halg.statesize = salg->statesize;
@@ -911,7 +913,8 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
inst->alg.finup = cryptd_hash_finup_enqueue;
inst->alg.export = cryptd_hash_export;
inst->alg.import = cryptd_hash_import;
- inst->alg.setkey = cryptd_hash_setkey;
+ if (crypto_shash_alg_has_setkey(salg))
+ inst->alg.setkey = cryptd_hash_setkey;
inst->alg.digest = cryptd_hash_digest_enqueue;
err = ahash_register_instance(tmpl, inst);
@@ -1372,7 +1375,7 @@ static int __init cryptd_init(void)
{
int err;
- err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
+ err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
if (err)
return err;
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 0dbe2be7f783..5c291eedaa70 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -169,7 +169,7 @@ static int crypto_report_one(struct crypto_alg *alg,
ualg->cru_type = 0;
ualg->cru_mask = 0;
ualg->cru_flags = alg->cra_flags;
- ualg->cru_refcnt = atomic_read(&alg->cra_refcnt);
+ ualg->cru_refcnt = refcount_read(&alg->cra_refcnt);
if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
goto nla_put_failure;
@@ -387,7 +387,7 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
goto drop_alg;
err = -EBUSY;
- if (atomic_read(&alg->cra_refcnt) > 2)
+ if (refcount_read(&alg->cra_refcnt) > 2)
goto drop_alg;
err = crypto_unregister_instance((struct crypto_instance *)alg);
diff --git a/crypto/ecc.c b/crypto/ecc.c
index 633a9bcdc574..18f32f2a5e1c 100644
--- a/crypto/ecc.c
+++ b/crypto/ecc.c
@@ -964,7 +964,7 @@ int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey)
* DRBG with a security strength of 256.
*/
if (crypto_get_default_rng())
- err = -EFAULT;
+ return -EFAULT;
err = crypto_rng_get_bytes(crypto_default_rng, (u8 *)priv, nbytes);
crypto_put_default_rng();
diff --git a/crypto/echainiv.c b/crypto/echainiv.c
index e3d889b122e0..45819e6015bf 100644
--- a/crypto/echainiv.c
+++ b/crypto/echainiv.c
@@ -118,8 +118,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
struct aead_instance *inst;
- struct crypto_aead_spawn *spawn;
- struct aead_alg *alg;
int err;
inst = aead_geniv_alloc(tmpl, tb, 0, 0);
@@ -127,9 +125,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
if (IS_ERR(inst))
return PTR_ERR(inst);
- spawn = aead_instance_ctx(inst);
- alg = crypto_spawn_aead_alg(spawn);
-
err = -EINVAL;
if (inst->alg.ivsize & (sizeof(u64) - 1) || !inst->alg.ivsize)
goto free_inst;
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 8589681fb9f6..0ad879e1f9b2 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -1101,7 +1101,7 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(aead))
return PTR_ERR(aead);
- null = crypto_get_default_null_skcipher2();
+ null = crypto_get_default_null_skcipher();
err = PTR_ERR(null);
if (IS_ERR(null))
goto err_free_aead;
@@ -1129,7 +1129,7 @@ static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm)
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_aead(ctx->child);
- crypto_put_default_null_skcipher2();
+ crypto_put_default_null_skcipher();
}
static void crypto_rfc4543_free(struct aead_instance *inst)
diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
index 24e601954c7a..a4b1c026aaee 100644
--- a/crypto/gf128mul.c
+++ b/crypto/gf128mul.c
@@ -160,8 +160,6 @@ void gf128mul_x8_ble(le128 *r, const le128 *x)
{
u64 a = le64_to_cpu(x->a);
u64 b = le64_to_cpu(x->b);
-
- /* equivalent to gf128mul_table_be[b >> 63] (see crypto/gf128mul.c): */
u64 _tt = gf128mul_table_be[a >> 56];
r->a = cpu_to_le64((a << 8) | (b >> 56));
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index 12ad3e3a84e3..1bffb3f712dd 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -56,9 +56,6 @@ static int ghash_update(struct shash_desc *desc,
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *dst = dctx->buffer;
- if (!ctx->gf128)
- return -ENOKEY;
-
if (dctx->bytes) {
int n = min(srclen, dctx->bytes);
u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
@@ -111,9 +108,6 @@ static int ghash_final(struct shash_desc *desc, u8 *dst)
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *buf = dctx->buffer;
- if (!ctx->gf128)
- return -ENOKEY;
-
ghash_flush(ctx, dctx);
memcpy(dst, buf, GHASH_BLOCK_SIZE);
diff --git a/crypto/internal.h b/crypto/internal.h
index f07320423191..5ac27fba10e8 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -30,9 +30,6 @@
enum {
CRYPTO_MSG_ALG_REQUEST,
CRYPTO_MSG_ALG_REGISTER,
- CRYPTO_MSG_ALG_UNREGISTER,
- CRYPTO_MSG_TMPL_REGISTER,
- CRYPTO_MSG_TMPL_UNREGISTER,
};
struct crypto_instance;
@@ -78,7 +75,6 @@ int crypto_init_compress_ops(struct crypto_tfm *tfm);
struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask);
void crypto_larval_kill(struct crypto_alg *alg);
-struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask);
void crypto_alg_tested(const char *name, int err);
void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
@@ -106,13 +102,13 @@ int crypto_type_has_alg(const char *name, const struct crypto_type *frontend,
static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
{
- atomic_inc(&alg->cra_refcnt);
+ refcount_inc(&alg->cra_refcnt);
return alg;
}
static inline void crypto_alg_put(struct crypto_alg *alg)
{
- if (atomic_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy)
+ if (refcount_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy)
alg->cra_destroy(alg);
}
diff --git a/crypto/keywrap.c b/crypto/keywrap.c
index 744e35134c45..ec5c6a087c90 100644
--- a/crypto/keywrap.c
+++ b/crypto/keywrap.c
@@ -188,7 +188,7 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
}
/* Perform authentication check */
- if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6))
+ if (block.A != cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL))
ret = -EBADMSG;
memzero_explicit(&block, sizeof(struct crypto_kw_block));
@@ -221,7 +221,7 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
* Place the predefined IV into block A -- for encrypt, the caller
* does not need to provide an IV, but he needs to fetch the final IV.
*/
- block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6);
+ block.A = cpu_to_be64(0xa6a6a6a6a6a6a6a6ULL);
/*
* src scatterlist is read-only. dst scatterlist is r/w. During the
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index eca04d3729b3..fe5129d6ff4e 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -26,7 +26,6 @@
#include <linux/sched.h>
#include <linux/sched/stat.h>
#include <linux/slab.h>
-#include <linux/hardirq.h>
#define MCRYPTD_MAX_CPU_QLEN 100
#define MCRYPTD_BATCH 9
@@ -517,10 +516,9 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
if (err)
goto out_free_inst;
- type = CRYPTO_ALG_ASYNC;
- if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
- type |= CRYPTO_ALG_INTERNAL;
- inst->alg.halg.base.cra_flags = type;
+ inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
+ (alg->cra_flags & (CRYPTO_ALG_INTERNAL |
+ CRYPTO_ALG_OPTIONAL_KEY));
inst->alg.halg.digestsize = halg->digestsize;
inst->alg.halg.statesize = halg->statesize;
@@ -535,7 +533,8 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
inst->alg.finup = mcryptd_hash_finup_enqueue;
inst->alg.export = mcryptd_hash_export;
inst->alg.import = mcryptd_hash_import;
- inst->alg.setkey = mcryptd_hash_setkey;
+ if (crypto_hash_alg_has_setkey(halg))
+ inst->alg.setkey = mcryptd_hash_setkey;
inst->alg.digest = mcryptd_hash_digest_enqueue;
err = ahash_register_instance(tmpl, inst);
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c
index b1c2d57dc734..b7a3a0613a30 100644
--- a/crypto/poly1305_generic.c
+++ b/crypto/poly1305_generic.c
@@ -47,17 +47,6 @@ int crypto_poly1305_init(struct shash_desc *desc)
}
EXPORT_SYMBOL_GPL(crypto_poly1305_init);
-int crypto_poly1305_setkey(struct crypto_shash *tfm,
- const u8 *key, unsigned int keylen)
-{
- /* Poly1305 requires a unique key for each tag, which implies that
- * we can't set it on the tfm that gets accessed by multiple users
- * simultaneously. Instead we expect the key as the first 32 bytes in
- * the update() call. */
- return -ENOTSUPP;
-}
-EXPORT_SYMBOL_GPL(crypto_poly1305_setkey);
-
static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key)
{
/* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
@@ -76,6 +65,11 @@ static void poly1305_setskey(struct poly1305_desc_ctx *dctx, const u8 *key)
dctx->s[3] = get_unaligned_le32(key + 12);
}
+/*
+ * Poly1305 requires a unique key for each tag, which implies that we can't set
+ * it on the tfm that gets accessed by multiple users simultaneously. Instead we
+ * expect the key as the first 32 bytes in the update() call.
+ */
unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
const u8 *src, unsigned int srclen)
{
@@ -210,7 +204,6 @@ EXPORT_SYMBOL_GPL(crypto_poly1305_update);
int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
{
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
- __le32 *mac = (__le32 *)dst;
u32 h0, h1, h2, h3, h4;
u32 g0, g1, g2, g3, g4;
u32 mask;
@@ -267,10 +260,10 @@ int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
h3 = (h3 >> 18) | (h4 << 8);
/* mac = (h + s) % (2^128) */
- f = (f >> 32) + h0 + dctx->s[0]; mac[0] = cpu_to_le32(f);
- f = (f >> 32) + h1 + dctx->s[1]; mac[1] = cpu_to_le32(f);
- f = (f >> 32) + h2 + dctx->s[2]; mac[2] = cpu_to_le32(f);
- f = (f >> 32) + h3 + dctx->s[3]; mac[3] = cpu_to_le32(f);
+ f = (f >> 32) + h0 + dctx->s[0]; put_unaligned_le32(f, dst + 0);
+ f = (f >> 32) + h1 + dctx->s[1]; put_unaligned_le32(f, dst + 4);
+ f = (f >> 32) + h2 + dctx->s[2]; put_unaligned_le32(f, dst + 8);
+ f = (f >> 32) + h3 + dctx->s[3]; put_unaligned_le32(f, dst + 12);
return 0;
}
@@ -281,14 +274,12 @@ static struct shash_alg poly1305_alg = {
.init = crypto_poly1305_init,
.update = crypto_poly1305_update,
.final = crypto_poly1305_final,
- .setkey = crypto_poly1305_setkey,
.descsize = sizeof(struct poly1305_desc_ctx),
.base = {
.cra_name = "poly1305",
.cra_driver_name = "poly1305-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_alignmask = sizeof(u32) - 1,
.cra_blocksize = POLY1305_BLOCK_SIZE,
.cra_module = THIS_MODULE,
},
diff --git a/crypto/proc.c b/crypto/proc.c
index 2cc10c96d753..822fcef6d91c 100644
--- a/crypto/proc.c
+++ b/crypto/proc.c
@@ -46,7 +46,7 @@ static int c_show(struct seq_file *m, void *p)
seq_printf(m, "driver : %s\n", alg->cra_driver_name);
seq_printf(m, "module : %s\n", module_name(alg->cra_module));
seq_printf(m, "priority : %d\n", alg->cra_priority);
- seq_printf(m, "refcnt : %d\n", atomic_read(&alg->cra_refcnt));
+ seq_printf(m, "refcnt : %u\n", refcount_read(&alg->cra_refcnt));
seq_printf(m, "selftest : %s\n",
(alg->cra_flags & CRYPTO_ALG_TESTED) ?
"passed" : "unknown");
diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
index d7da0eea5622..5074006a56c3 100644
--- a/crypto/salsa20_generic.c
+++ b/crypto/salsa20_generic.c
@@ -19,49 +19,19 @@
*
*/
-#include <linux/init.h>
+#include <asm/unaligned.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/salsa20.h>
#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/crypto.h>
-#include <linux/types.h>
-#include <linux/bitops.h>
-#include <crypto/algapi.h>
-#include <asm/byteorder.h>
-#define SALSA20_IV_SIZE 8U
-#define SALSA20_MIN_KEY_SIZE 16U
-#define SALSA20_MAX_KEY_SIZE 32U
-
-/*
- * Start of code taken from D. J. Bernstein's reference implementation.
- * With some modifications and optimizations made to suit our needs.
- */
-
-/*
-salsa20-ref.c version 20051118
-D. J. Bernstein
-Public domain.
-*/
-
-#define U32TO8_LITTLE(p, v) \
- { (p)[0] = (v >> 0) & 0xff; (p)[1] = (v >> 8) & 0xff; \
- (p)[2] = (v >> 16) & 0xff; (p)[3] = (v >> 24) & 0xff; }
-#define U8TO32_LITTLE(p) \
- (((u32)((p)[0]) ) | ((u32)((p)[1]) << 8) | \
- ((u32)((p)[2]) << 16) | ((u32)((p)[3]) << 24) )
-
-struct salsa20_ctx
-{
- u32 input[16];
-};
-
-static void salsa20_wordtobyte(u8 output[64], const u32 input[16])
+static void salsa20_block(u32 *state, __le32 *stream)
{
u32 x[16];
int i;
- memcpy(x, input, sizeof(x));
- for (i = 20; i > 0; i -= 2) {
+ memcpy(x, state, sizeof(x));
+
+ for (i = 0; i < 20; i += 2) {
x[ 4] ^= rol32((x[ 0] + x[12]), 7);
x[ 8] ^= rol32((x[ 4] + x[ 0]), 9);
x[12] ^= rol32((x[ 8] + x[ 4]), 13);
@@ -95,145 +65,137 @@ static void salsa20_wordtobyte(u8 output[64], const u32 input[16])
x[14] ^= rol32((x[13] + x[12]), 13);
x[15] ^= rol32((x[14] + x[13]), 18);
}
- for (i = 0; i < 16; ++i)
- x[i] += input[i];
- for (i = 0; i < 16; ++i)
- U32TO8_LITTLE(output + 4 * i,x[i]);
-}
-static const char sigma[16] = "expand 32-byte k";
-static const char tau[16] = "expand 16-byte k";
+ for (i = 0; i < 16; i++)
+ stream[i] = cpu_to_le32(x[i] + state[i]);
+
+ if (++state[8] == 0)
+ state[9]++;
+}
-static void salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k, u32 kbytes)
+static void salsa20_docrypt(u32 *state, u8 *dst, const u8 *src,
+ unsigned int bytes)
{
- const char *constants;
+ __le32 stream[SALSA20_BLOCK_SIZE / sizeof(__le32)];
- ctx->input[1] = U8TO32_LITTLE(k + 0);
- ctx->input[2] = U8TO32_LITTLE(k + 4);
- ctx->input[3] = U8TO32_LITTLE(k + 8);
- ctx->input[4] = U8TO32_LITTLE(k + 12);
- if (kbytes == 32) { /* recommended */
- k += 16;
- constants = sigma;
- } else { /* kbytes == 16 */
- constants = tau;
+ if (dst != src)
+ memcpy(dst, src, bytes);
+
+ while (bytes >= SALSA20_BLOCK_SIZE) {
+ salsa20_block(state, stream);
+ crypto_xor(dst, (const u8 *)stream, SALSA20_BLOCK_SIZE);
+ bytes -= SALSA20_BLOCK_SIZE;
+ dst += SALSA20_BLOCK_SIZE;
+ }
+ if (bytes) {
+ salsa20_block(state, stream);
+ crypto_xor(dst, (const u8 *)stream, bytes);
}
- ctx->input[11] = U8TO32_LITTLE(k + 0);
- ctx->input[12] = U8TO32_LITTLE(k + 4);
- ctx->input[13] = U8TO32_LITTLE(k + 8);
- ctx->input[14] = U8TO32_LITTLE(k + 12);
- ctx->input[0] = U8TO32_LITTLE(constants + 0);
- ctx->input[5] = U8TO32_LITTLE(constants + 4);
- ctx->input[10] = U8TO32_LITTLE(constants + 8);
- ctx->input[15] = U8TO32_LITTLE(constants + 12);
}
-static void salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv)
+void crypto_salsa20_init(u32 *state, const struct salsa20_ctx *ctx,
+ const u8 *iv)
{
- ctx->input[6] = U8TO32_LITTLE(iv + 0);
- ctx->input[7] = U8TO32_LITTLE(iv + 4);
- ctx->input[8] = 0;
- ctx->input[9] = 0;
+ memcpy(state, ctx->initial_state, sizeof(ctx->initial_state));
+ state[6] = get_unaligned_le32(iv + 0);
+ state[7] = get_unaligned_le32(iv + 4);
}
+EXPORT_SYMBOL_GPL(crypto_salsa20_init);
-static void salsa20_encrypt_bytes(struct salsa20_ctx *ctx, u8 *dst,
- const u8 *src, unsigned int bytes)
+int crypto_salsa20_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keysize)
{
- u8 buf[64];
-
- if (dst != src)
- memcpy(dst, src, bytes);
-
- while (bytes) {
- salsa20_wordtobyte(buf, ctx->input);
-
- ctx->input[8]++;
- if (!ctx->input[8])
- ctx->input[9]++;
+ static const char sigma[16] = "expand 32-byte k";
+ static const char tau[16] = "expand 16-byte k";
+ struct salsa20_ctx *ctx = crypto_skcipher_ctx(tfm);
+ const char *constants;
- if (bytes <= 64) {
- crypto_xor(dst, buf, bytes);
- return;
- }
+ if (keysize != SALSA20_MIN_KEY_SIZE &&
+ keysize != SALSA20_MAX_KEY_SIZE)
+ return -EINVAL;
- crypto_xor(dst, buf, 64);
- bytes -= 64;
- dst += 64;
+ ctx->initial_state[1] = get_unaligned_le32(key + 0);
+ ctx->initial_state[2] = get_unaligned_le32(key + 4);
+ ctx->initial_state[3] = get_unaligned_le32(key + 8);
+ ctx->initial_state[4] = get_unaligned_le32(key + 12);
+ if (keysize == 32) { /* recommended */
+ key += 16;
+ constants = sigma;
+ } else { /* keysize == 16 */
+ constants = tau;
}
-}
-
-/*
- * End of code taken from D. J. Bernstein's reference implementation.
- */
+ ctx->initial_state[11] = get_unaligned_le32(key + 0);
+ ctx->initial_state[12] = get_unaligned_le32(key + 4);
+ ctx->initial_state[13] = get_unaligned_le32(key + 8);
+ ctx->initial_state[14] = get_unaligned_le32(key + 12);
+ ctx->initial_state[0] = get_unaligned_le32(constants + 0);
+ ctx->initial_state[5] = get_unaligned_le32(constants + 4);
+ ctx->initial_state[10] = get_unaligned_le32(constants + 8);
+ ctx->initial_state[15] = get_unaligned_le32(constants + 12);
+
+ /* space for the nonce; it will be overridden for each request */
+ ctx->initial_state[6] = 0;
+ ctx->initial_state[7] = 0;
+
+ /* initial block number */
+ ctx->initial_state[8] = 0;
+ ctx->initial_state[9] = 0;
-static int setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keysize)
-{
- struct salsa20_ctx *ctx = crypto_tfm_ctx(tfm);
- salsa20_keysetup(ctx, key, keysize);
return 0;
}
+EXPORT_SYMBOL_GPL(crypto_salsa20_setkey);
-static int encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+static int salsa20_crypt(struct skcipher_request *req)
{
- struct blkcipher_walk walk;
- struct crypto_blkcipher *tfm = desc->tfm;
- struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct salsa20_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ u32 state[16];
int err;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, 64);
+ err = skcipher_walk_virt(&walk, req, true);
- salsa20_ivsetup(ctx, walk.iv);
+ crypto_salsa20_init(state, ctx, walk.iv);
- while (walk.nbytes >= 64) {
- salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
- walk.src.virt.addr,
- walk.nbytes - (walk.nbytes % 64));
- err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64);
- }
+ while (walk.nbytes > 0) {
+ unsigned int nbytes = walk.nbytes;
- if (walk.nbytes) {
- salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
- walk.src.virt.addr, walk.nbytes);
- err = blkcipher_walk_done(desc, &walk, 0);
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, walk.stride);
+
+ salsa20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes);
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
return err;
}
-static struct crypto_alg alg = {
- .cra_name = "salsa20",
- .cra_driver_name = "salsa20-generic",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_type = &crypto_blkcipher_type,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct salsa20_ctx),
- .cra_alignmask = 3,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .setkey = setkey,
- .encrypt = encrypt,
- .decrypt = encrypt,
- .min_keysize = SALSA20_MIN_KEY_SIZE,
- .max_keysize = SALSA20_MAX_KEY_SIZE,
- .ivsize = SALSA20_IV_SIZE,
- }
- }
+static struct skcipher_alg alg = {
+ .base.cra_name = "salsa20",
+ .base.cra_driver_name = "salsa20-generic",
+ .base.cra_priority = 100,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct salsa20_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = SALSA20_MIN_KEY_SIZE,
+ .max_keysize = SALSA20_MAX_KEY_SIZE,
+ .ivsize = SALSA20_IV_SIZE,
+ .chunksize = SALSA20_BLOCK_SIZE,
+ .setkey = crypto_salsa20_setkey,
+ .encrypt = salsa20_crypt,
+ .decrypt = salsa20_crypt,
};
static int __init salsa20_generic_mod_init(void)
{
- return crypto_register_alg(&alg);
+ return crypto_register_skcipher(&alg);
}
static void __exit salsa20_generic_mod_fini(void)
{
- crypto_unregister_alg(&alg);
+ crypto_unregister_skcipher(&alg);
}
module_init(salsa20_generic_mod_init);
diff --git a/crypto/scompress.c b/crypto/scompress.c
index 2075e2c4e7df..968bbcf65c94 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -140,53 +140,6 @@ static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
return ret;
}
-static void crypto_scomp_sg_free(struct scatterlist *sgl)
-{
- int i, n;
- struct page *page;
-
- if (!sgl)
- return;
-
- n = sg_nents(sgl);
- for_each_sg(sgl, sgl, n, i) {
- page = sg_page(sgl);
- if (page)
- __free_page(page);
- }
-
- kfree(sgl);
-}
-
-static struct scatterlist *crypto_scomp_sg_alloc(size_t size, gfp_t gfp)
-{
- struct scatterlist *sgl;
- struct page *page;
- int i, n;
-
- n = ((size - 1) >> PAGE_SHIFT) + 1;
-
- sgl = kmalloc_array(n, sizeof(struct scatterlist), gfp);
- if (!sgl)
- return NULL;
-
- sg_init_table(sgl, n);
-
- for (i = 0; i < n; i++) {
- page = alloc_page(gfp);
- if (!page)
- goto err;
- sg_set_page(sgl + i, page, PAGE_SIZE, 0);
- }
-
- return sgl;
-
-err:
- sg_mark_end(sgl + i);
- crypto_scomp_sg_free(sgl);
- return NULL;
-}
-
static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
@@ -220,7 +173,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
scratch_dst, &req->dlen, *ctx);
if (!ret) {
if (!req->dst) {
- req->dst = crypto_scomp_sg_alloc(req->dlen, GFP_ATOMIC);
+ req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
if (!req->dst)
goto out;
}
@@ -274,7 +227,7 @@ int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
crt->compress = scomp_acomp_compress;
crt->decompress = scomp_acomp_decompress;
- crt->dst_free = crypto_scomp_sg_free;
+ crt->dst_free = sgl_free;
crt->reqsize = sizeof(void *);
return 0;
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index 570b7d1aa0ca..39dbf2f7e5f5 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -144,8 +144,6 @@ static int seqiv_aead_decrypt(struct aead_request *req)
static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct aead_instance *inst;
- struct crypto_aead_spawn *spawn;
- struct aead_alg *alg;
int err;
inst = aead_geniv_alloc(tmpl, tb, 0, 0);
@@ -153,9 +151,6 @@ static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
if (IS_ERR(inst))
return PTR_ERR(inst);
- spawn = aead_instance_ctx(inst);
- alg = crypto_spawn_aead_alg(spawn);
-
err = -EINVAL;
if (inst->alg.ivsize != sizeof(u64))
goto free_inst;
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
index 7e8ed96236ce..a965b9d80559 100644
--- a/crypto/sha3_generic.c
+++ b/crypto/sha3_generic.c
@@ -5,6 +5,7 @@
* http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf
*
* SHA-3 code by Jeff Garzik <jeff@garzik.org>
+ * Ard Biesheuvel <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -17,12 +18,10 @@
#include <linux/module.h>
#include <linux/types.h>
#include <crypto/sha3.h>
-#include <asm/byteorder.h>
+#include <asm/unaligned.h>
#define KECCAK_ROUNDS 24
-#define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y))))
-
static const u64 keccakf_rndc[24] = {
0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL,
0x8000000080008000ULL, 0x000000000000808bULL, 0x0000000080000001ULL,
@@ -34,100 +33,133 @@ static const u64 keccakf_rndc[24] = {
0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL
};
-static const int keccakf_rotc[24] = {
- 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14,
- 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44
-};
-
-static const int keccakf_piln[24] = {
- 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4,
- 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1
-};
-
/* update the state with given number of rounds */
-static void keccakf(u64 st[25])
+static void __attribute__((__optimize__("O3"))) keccakf(u64 st[25])
{
- int i, j, round;
- u64 t, bc[5];
+ u64 t[5], tt, bc[5];
+ int round;
for (round = 0; round < KECCAK_ROUNDS; round++) {
/* Theta */
- for (i = 0; i < 5; i++)
- bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15]
- ^ st[i + 20];
-
- for (i = 0; i < 5; i++) {
- t = bc[(i + 4) % 5] ^ ROTL64(bc[(i + 1) % 5], 1);
- for (j = 0; j < 25; j += 5)
- st[j + i] ^= t;
- }
+ bc[0] = st[0] ^ st[5] ^ st[10] ^ st[15] ^ st[20];
+ bc[1] = st[1] ^ st[6] ^ st[11] ^ st[16] ^ st[21];
+ bc[2] = st[2] ^ st[7] ^ st[12] ^ st[17] ^ st[22];
+ bc[3] = st[3] ^ st[8] ^ st[13] ^ st[18] ^ st[23];
+ bc[4] = st[4] ^ st[9] ^ st[14] ^ st[19] ^ st[24];
+
+ t[0] = bc[4] ^ rol64(bc[1], 1);
+ t[1] = bc[0] ^ rol64(bc[2], 1);
+ t[2] = bc[1] ^ rol64(bc[3], 1);
+ t[3] = bc[2] ^ rol64(bc[4], 1);
+ t[4] = bc[3] ^ rol64(bc[0], 1);
+
+ st[0] ^= t[0];
/* Rho Pi */
- t = st[1];
- for (i = 0; i < 24; i++) {
- j = keccakf_piln[i];
- bc[0] = st[j];
- st[j] = ROTL64(t, keccakf_rotc[i]);
- t = bc[0];
- }
+ tt = st[1];
+ st[ 1] = rol64(st[ 6] ^ t[1], 44);
+ st[ 6] = rol64(st[ 9] ^ t[4], 20);
+ st[ 9] = rol64(st[22] ^ t[2], 61);
+ st[22] = rol64(st[14] ^ t[4], 39);
+ st[14] = rol64(st[20] ^ t[0], 18);
+ st[20] = rol64(st[ 2] ^ t[2], 62);
+ st[ 2] = rol64(st[12] ^ t[2], 43);
+ st[12] = rol64(st[13] ^ t[3], 25);
+ st[13] = rol64(st[19] ^ t[4], 8);
+ st[19] = rol64(st[23] ^ t[3], 56);
+ st[23] = rol64(st[15] ^ t[0], 41);
+ st[15] = rol64(st[ 4] ^ t[4], 27);
+ st[ 4] = rol64(st[24] ^ t[4], 14);
+ st[24] = rol64(st[21] ^ t[1], 2);
+ st[21] = rol64(st[ 8] ^ t[3], 55);
+ st[ 8] = rol64(st[16] ^ t[1], 45);
+ st[16] = rol64(st[ 5] ^ t[0], 36);
+ st[ 5] = rol64(st[ 3] ^ t[3], 28);
+ st[ 3] = rol64(st[18] ^ t[3], 21);
+ st[18] = rol64(st[17] ^ t[2], 15);
+ st[17] = rol64(st[11] ^ t[1], 10);
+ st[11] = rol64(st[ 7] ^ t[2], 6);
+ st[ 7] = rol64(st[10] ^ t[0], 3);
+ st[10] = rol64( tt ^ t[1], 1);
/* Chi */
- for (j = 0; j < 25; j += 5) {
- for (i = 0; i < 5; i++)
- bc[i] = st[j + i];
- for (i = 0; i < 5; i++)
- st[j + i] ^= (~bc[(i + 1) % 5]) &
- bc[(i + 2) % 5];
- }
+ bc[ 0] = ~st[ 1] & st[ 2];
+ bc[ 1] = ~st[ 2] & st[ 3];
+ bc[ 2] = ~st[ 3] & st[ 4];
+ bc[ 3] = ~st[ 4] & st[ 0];
+ bc[ 4] = ~st[ 0] & st[ 1];
+ st[ 0] ^= bc[ 0];
+ st[ 1] ^= bc[ 1];
+ st[ 2] ^= bc[ 2];
+ st[ 3] ^= bc[ 3];
+ st[ 4] ^= bc[ 4];
+
+ bc[ 0] = ~st[ 6] & st[ 7];
+ bc[ 1] = ~st[ 7] & st[ 8];
+ bc[ 2] = ~st[ 8] & st[ 9];
+ bc[ 3] = ~st[ 9] & st[ 5];
+ bc[ 4] = ~st[ 5] & st[ 6];
+ st[ 5] ^= bc[ 0];
+ st[ 6] ^= bc[ 1];
+ st[ 7] ^= bc[ 2];
+ st[ 8] ^= bc[ 3];
+ st[ 9] ^= bc[ 4];
+
+ bc[ 0] = ~st[11] & st[12];
+ bc[ 1] = ~st[12] & st[13];
+ bc[ 2] = ~st[13] & st[14];
+ bc[ 3] = ~st[14] & st[10];
+ bc[ 4] = ~st[10] & st[11];
+ st[10] ^= bc[ 0];
+ st[11] ^= bc[ 1];
+ st[12] ^= bc[ 2];
+ st[13] ^= bc[ 3];
+ st[14] ^= bc[ 4];
+
+ bc[ 0] = ~st[16] & st[17];
+ bc[ 1] = ~st[17] & st[18];
+ bc[ 2] = ~st[18] & st[19];
+ bc[ 3] = ~st[19] & st[15];
+ bc[ 4] = ~st[15] & st[16];
+ st[15] ^= bc[ 0];
+ st[16] ^= bc[ 1];
+ st[17] ^= bc[ 2];
+ st[18] ^= bc[ 3];
+ st[19] ^= bc[ 4];
+
+ bc[ 0] = ~st[21] & st[22];
+ bc[ 1] = ~st[22] & st[23];
+ bc[ 2] = ~st[23] & st[24];
+ bc[ 3] = ~st[24] & st[20];
+ bc[ 4] = ~st[20] & st[21];
+ st[20] ^= bc[ 0];
+ st[21] ^= bc[ 1];
+ st[22] ^= bc[ 2];
+ st[23] ^= bc[ 3];
+ st[24] ^= bc[ 4];
/* Iota */
st[0] ^= keccakf_rndc[round];
}
}
-static void sha3_init(struct sha3_state *sctx, unsigned int digest_sz)
-{
- memset(sctx, 0, sizeof(*sctx));
- sctx->md_len = digest_sz;
- sctx->rsiz = 200 - 2 * digest_sz;
- sctx->rsizw = sctx->rsiz / 8;
-}
-
-static int sha3_224_init(struct shash_desc *desc)
-{
- struct sha3_state *sctx = shash_desc_ctx(desc);
-
- sha3_init(sctx, SHA3_224_DIGEST_SIZE);
- return 0;
-}
-
-static int sha3_256_init(struct shash_desc *desc)
+int crypto_sha3_init(struct shash_desc *desc)
{
struct sha3_state *sctx = shash_desc_ctx(desc);
+ unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
- sha3_init(sctx, SHA3_256_DIGEST_SIZE);
- return 0;
-}
-
-static int sha3_384_init(struct shash_desc *desc)
-{
- struct sha3_state *sctx = shash_desc_ctx(desc);
-
- sha3_init(sctx, SHA3_384_DIGEST_SIZE);
- return 0;
-}
-
-static int sha3_512_init(struct shash_desc *desc)
-{
- struct sha3_state *sctx = shash_desc_ctx(desc);
+ sctx->rsiz = 200 - 2 * digest_size;
+ sctx->rsizw = sctx->rsiz / 8;
+ sctx->partial = 0;
- sha3_init(sctx, SHA3_512_DIGEST_SIZE);
+ memset(sctx->st, 0, sizeof(sctx->st));
return 0;
}
+EXPORT_SYMBOL(crypto_sha3_init);
-static int sha3_update(struct shash_desc *desc, const u8 *data,
+int crypto_sha3_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha3_state *sctx = shash_desc_ctx(desc);
@@ -149,7 +181,7 @@ static int sha3_update(struct shash_desc *desc, const u8 *data,
unsigned int i;
for (i = 0; i < sctx->rsizw; i++)
- sctx->st[i] ^= ((u64 *) src)[i];
+ sctx->st[i] ^= get_unaligned_le64(src + 8 * i);
keccakf(sctx->st);
done += sctx->rsiz;
@@ -163,125 +195,89 @@ static int sha3_update(struct shash_desc *desc, const u8 *data,
return 0;
}
+EXPORT_SYMBOL(crypto_sha3_update);
-static int sha3_final(struct shash_desc *desc, u8 *out)
+int crypto_sha3_final(struct shash_desc *desc, u8 *out)
{
struct sha3_state *sctx = shash_desc_ctx(desc);
unsigned int i, inlen = sctx->partial;
+ unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
+ __le64 *digest = (__le64 *)out;
sctx->buf[inlen++] = 0x06;
memset(sctx->buf + inlen, 0, sctx->rsiz - inlen);
sctx->buf[sctx->rsiz - 1] |= 0x80;
for (i = 0; i < sctx->rsizw; i++)
- sctx->st[i] ^= ((u64 *) sctx->buf)[i];
+ sctx->st[i] ^= get_unaligned_le64(sctx->buf + 8 * i);
keccakf(sctx->st);
- for (i = 0; i < sctx->rsizw; i++)
- sctx->st[i] = cpu_to_le64(sctx->st[i]);
+ for (i = 0; i < digest_size / 8; i++)
+ put_unaligned_le64(sctx->st[i], digest++);
- memcpy(out, sctx->st, sctx->md_len);
+ if (digest_size & 4)
+ put_unaligned_le32(sctx->st[i], (__le32 *)digest);
memset(sctx, 0, sizeof(*sctx));
return 0;
}
-
-static struct shash_alg sha3_224 = {
- .digestsize = SHA3_224_DIGEST_SIZE,
- .init = sha3_224_init,
- .update = sha3_update,
- .final = sha3_final,
- .descsize = sizeof(struct sha3_state),
- .base = {
- .cra_name = "sha3-224",
- .cra_driver_name = "sha3-224-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_blocksize = SHA3_224_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-};
-
-static struct shash_alg sha3_256 = {
- .digestsize = SHA3_256_DIGEST_SIZE,
- .init = sha3_256_init,
- .update = sha3_update,
- .final = sha3_final,
- .descsize = sizeof(struct sha3_state),
- .base = {
- .cra_name = "sha3-256",
- .cra_driver_name = "sha3-256-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_blocksize = SHA3_256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-};
-
-static struct shash_alg sha3_384 = {
- .digestsize = SHA3_384_DIGEST_SIZE,
- .init = sha3_384_init,
- .update = sha3_update,
- .final = sha3_final,
- .descsize = sizeof(struct sha3_state),
- .base = {
- .cra_name = "sha3-384",
- .cra_driver_name = "sha3-384-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_blocksize = SHA3_384_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-};
-
-static struct shash_alg sha3_512 = {
- .digestsize = SHA3_512_DIGEST_SIZE,
- .init = sha3_512_init,
- .update = sha3_update,
- .final = sha3_final,
- .descsize = sizeof(struct sha3_state),
- .base = {
- .cra_name = "sha3-512",
- .cra_driver_name = "sha3-512-generic",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_blocksize = SHA3_512_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- }
-};
+EXPORT_SYMBOL(crypto_sha3_final);
+
+static struct shash_alg algs[] = { {
+ .digestsize = SHA3_224_DIGEST_SIZE,
+ .init = crypto_sha3_init,
+ .update = crypto_sha3_update,
+ .final = crypto_sha3_final,
+ .descsize = sizeof(struct sha3_state),
+ .base.cra_name = "sha3-224",
+ .base.cra_driver_name = "sha3-224-generic",
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA3_224_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+}, {
+ .digestsize = SHA3_256_DIGEST_SIZE,
+ .init = crypto_sha3_init,
+ .update = crypto_sha3_update,
+ .final = crypto_sha3_final,
+ .descsize = sizeof(struct sha3_state),
+ .base.cra_name = "sha3-256",
+ .base.cra_driver_name = "sha3-256-generic",
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA3_256_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+}, {
+ .digestsize = SHA3_384_DIGEST_SIZE,
+ .init = crypto_sha3_init,
+ .update = crypto_sha3_update,
+ .final = crypto_sha3_final,
+ .descsize = sizeof(struct sha3_state),
+ .base.cra_name = "sha3-384",
+ .base.cra_driver_name = "sha3-384-generic",
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA3_384_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+}, {
+ .digestsize = SHA3_512_DIGEST_SIZE,
+ .init = crypto_sha3_init,
+ .update = crypto_sha3_update,
+ .final = crypto_sha3_final,
+ .descsize = sizeof(struct sha3_state),
+ .base.cra_name = "sha3-512",
+ .base.cra_driver_name = "sha3-512-generic",
+ .base.cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .base.cra_blocksize = SHA3_512_BLOCK_SIZE,
+ .base.cra_module = THIS_MODULE,
+} };
static int __init sha3_generic_mod_init(void)
{
- int ret;
-
- ret = crypto_register_shash(&sha3_224);
- if (ret < 0)
- goto err_out;
- ret = crypto_register_shash(&sha3_256);
- if (ret < 0)
- goto err_out_224;
- ret = crypto_register_shash(&sha3_384);
- if (ret < 0)
- goto err_out_256;
- ret = crypto_register_shash(&sha3_512);
- if (ret < 0)
- goto err_out_384;
-
- return 0;
-
-err_out_384:
- crypto_unregister_shash(&sha3_384);
-err_out_256:
- crypto_unregister_shash(&sha3_256);
-err_out_224:
- crypto_unregister_shash(&sha3_224);
-err_out:
- return ret;
+ return crypto_register_shashes(algs, ARRAY_SIZE(algs));
}
static void __exit sha3_generic_mod_fini(void)
{
- crypto_unregister_shash(&sha3_224);
- crypto_unregister_shash(&sha3_256);
- crypto_unregister_shash(&sha3_384);
- crypto_unregister_shash(&sha3_512);
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
}
module_init(sha3_generic_mod_init);
diff --git a/crypto/shash.c b/crypto/shash.c
index e849d3ee2e27..5d732c6bb4b2 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -58,11 +58,18 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
{
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
+ int err;
if ((unsigned long)key & alignmask)
- return shash_setkey_unaligned(tfm, key, keylen);
+ err = shash_setkey_unaligned(tfm, key, keylen);
+ else
+ err = shash->setkey(tfm, key, keylen);
+
+ if (err)
+ return err;
- return shash->setkey(tfm, key, keylen);
+ crypto_shash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
}
EXPORT_SYMBOL_GPL(crypto_shash_setkey);
@@ -181,6 +188,9 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
+ if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
if (((unsigned long)data | (unsigned long)out) & alignmask)
return shash_digest_unaligned(desc, data, len, out);
@@ -360,7 +370,8 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
crt->digest = shash_async_digest;
crt->setkey = shash_async_setkey;
- crt->has_setkey = alg->setkey != shash_no_setkey;
+ crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
+ CRYPTO_TFM_NEED_KEY);
if (alg->export)
crt->export = shash_async_export;
@@ -375,8 +386,14 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_shash *hash = __crypto_shash_cast(tfm);
+ struct shash_alg *alg = crypto_shash_alg(hash);
+
+ hash->descsize = alg->descsize;
+
+ if (crypto_shash_alg_has_setkey(alg) &&
+ !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
+ crypto_shash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
- hash->descsize = crypto_shash_alg(hash)->descsize;
return 0;
}
diff --git a/crypto/simd.c b/crypto/simd.c
index 88203370a62f..208226d7f908 100644
--- a/crypto/simd.c
+++ b/crypto/simd.c
@@ -19,9 +19,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 11af5fd6a443..0fe2a2923ad0 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -598,8 +598,11 @@ static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
err = crypto_blkcipher_setkey(blkcipher, key, keylen);
crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
CRYPTO_TFM_RES_MASK);
+ if (err)
+ return err;
- return err;
+ crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
}
static int skcipher_crypt_blkcipher(struct skcipher_request *req,
@@ -674,6 +677,9 @@ static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
skcipher->keysize = calg->cra_blkcipher.max_keysize;
+ if (skcipher->keysize)
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
+
return 0;
}
@@ -692,8 +698,11 @@ static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
crypto_skcipher_set_flags(tfm,
crypto_ablkcipher_get_flags(ablkcipher) &
CRYPTO_TFM_RES_MASK);
+ if (err)
+ return err;
- return err;
+ crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
}
static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
@@ -767,6 +776,9 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
sizeof(struct ablkcipher_request);
skcipher->keysize = calg->cra_ablkcipher.max_keysize;
+ if (skcipher->keysize)
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
+
return 0;
}
@@ -796,6 +808,7 @@ static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
{
struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
unsigned long alignmask = crypto_skcipher_alignmask(tfm);
+ int err;
if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
@@ -803,9 +816,15 @@ static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
}
if ((unsigned long)key & alignmask)
- return skcipher_setkey_unaligned(tfm, key, keylen);
+ err = skcipher_setkey_unaligned(tfm, key, keylen);
+ else
+ err = cipher->setkey(tfm, key, keylen);
+
+ if (err)
+ return err;
- return cipher->setkey(tfm, key, keylen);
+ crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
}
static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
@@ -834,6 +853,9 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
skcipher->ivsize = alg->ivsize;
skcipher->keysize = alg->max_keysize;
+ if (skcipher->keysize)
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
+
if (alg->exit)
skcipher->base.exit = crypto_skcipher_exit_tfm;
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 9267cbdb14d2..14213a096fd2 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -67,6 +67,7 @@ static char *alg = NULL;
static u32 type;
static u32 mask;
static int mode;
+static u32 num_mb = 8;
static char *tvmem[TVMEMSIZE];
static char *check[] = {
@@ -79,6 +80,66 @@ static char *check[] = {
NULL
};
+static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 };
+static u32 aead_sizes[] = { 16, 64, 256, 512, 1024, 2048, 4096, 8192, 0 };
+
+#define XBUFSIZE 8
+#define MAX_IVLEN 32
+
+static int testmgr_alloc_buf(char *buf[XBUFSIZE])
+{
+ int i;
+
+ for (i = 0; i < XBUFSIZE; i++) {
+ buf[i] = (void *)__get_free_page(GFP_KERNEL);
+ if (!buf[i])
+ goto err_free_buf;
+ }
+
+ return 0;
+
+err_free_buf:
+ while (i-- > 0)
+ free_page((unsigned long)buf[i]);
+
+ return -ENOMEM;
+}
+
+static void testmgr_free_buf(char *buf[XBUFSIZE])
+{
+ int i;
+
+ for (i = 0; i < XBUFSIZE; i++)
+ free_page((unsigned long)buf[i]);
+}
+
+static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE],
+ unsigned int buflen, const void *assoc,
+ unsigned int aad_size)
+{
+ int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE;
+ int k, rem;
+
+ if (np > XBUFSIZE) {
+ rem = PAGE_SIZE;
+ np = XBUFSIZE;
+ } else {
+ rem = buflen % PAGE_SIZE;
+ }
+
+ sg_init_table(sg, np + 1);
+
+ sg_set_buf(&sg[0], assoc, aad_size);
+
+ if (rem)
+ np--;
+ for (k = 0; k < np; k++)
+ sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE);
+
+ if (rem)
+ sg_set_buf(&sg[k + 1], xbuf[k], rem);
+}
+
static inline int do_one_aead_op(struct aead_request *req, int ret)
{
struct crypto_wait *wait = req->base.data;
@@ -86,6 +147,298 @@ static inline int do_one_aead_op(struct aead_request *req, int ret)
return crypto_wait_req(ret, wait);
}
+struct test_mb_aead_data {
+ struct scatterlist sg[XBUFSIZE];
+ struct scatterlist sgout[XBUFSIZE];
+ struct aead_request *req;
+ struct crypto_wait wait;
+ char *xbuf[XBUFSIZE];
+ char *xoutbuf[XBUFSIZE];
+ char *axbuf[XBUFSIZE];
+};
+
+static int do_mult_aead_op(struct test_mb_aead_data *data, int enc,
+ u32 num_mb)
+{
+ int i, rc[num_mb], err = 0;
+
+ /* Fire up a bunch of concurrent requests */
+ for (i = 0; i < num_mb; i++) {
+ if (enc == ENCRYPT)
+ rc[i] = crypto_aead_encrypt(data[i].req);
+ else
+ rc[i] = crypto_aead_decrypt(data[i].req);
+ }
+
+ /* Wait for all requests to finish */
+ for (i = 0; i < num_mb; i++) {
+ rc[i] = crypto_wait_req(rc[i], &data[i].wait);
+
+ if (rc[i]) {
+ pr_info("concurrent request %d error %d\n", i, rc[i]);
+ err = rc[i];
+ }
+ }
+
+ return err;
+}
+
+static int test_mb_aead_jiffies(struct test_mb_aead_data *data, int enc,
+ int blen, int secs, u32 num_mb)
+{
+ unsigned long start, end;
+ int bcount;
+ int ret;
+
+ for (start = jiffies, end = start + secs * HZ, bcount = 0;
+ time_before(jiffies, end); bcount++) {
+ ret = do_mult_aead_op(data, enc, num_mb);
+ if (ret)
+ return ret;
+ }
+
+ pr_cont("%d operations in %d seconds (%ld bytes)\n",
+ bcount * num_mb, secs, (long)bcount * blen * num_mb);
+ return 0;
+}
+
+static int test_mb_aead_cycles(struct test_mb_aead_data *data, int enc,
+ int blen, u32 num_mb)
+{
+ unsigned long cycles = 0;
+ int ret = 0;
+ int i;
+
+ /* Warm-up run. */
+ for (i = 0; i < 4; i++) {
+ ret = do_mult_aead_op(data, enc, num_mb);
+ if (ret)
+ goto out;
+ }
+
+ /* The real thing. */
+ for (i = 0; i < 8; i++) {
+ cycles_t start, end;
+
+ start = get_cycles();
+ ret = do_mult_aead_op(data, enc, num_mb);
+ end = get_cycles();
+
+ if (ret)
+ goto out;
+
+ cycles += end - start;
+ }
+
+out:
+ if (ret == 0)
+ pr_cont("1 operation in %lu cycles (%d bytes)\n",
+ (cycles + 4) / (8 * num_mb), blen);
+
+ return ret;
+}
+
+static void test_mb_aead_speed(const char *algo, int enc, int secs,
+ struct aead_speed_template *template,
+ unsigned int tcount, u8 authsize,
+ unsigned int aad_size, u8 *keysize, u32 num_mb)
+{
+ struct test_mb_aead_data *data;
+ struct crypto_aead *tfm;
+ unsigned int i, j, iv_len;
+ const char *key;
+ const char *e;
+ void *assoc;
+ u32 *b_size;
+ char *iv;
+ int ret;
+
+
+ if (aad_size >= PAGE_SIZE) {
+ pr_err("associate data length (%u) too big\n", aad_size);
+ return;
+ }
+
+ iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
+ if (!iv)
+ return;
+
+ if (enc == ENCRYPT)
+ e = "encryption";
+ else
+ e = "decryption";
+
+ data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ goto out_free_iv;
+
+ tfm = crypto_alloc_aead(algo, 0, 0);
+ if (IS_ERR(tfm)) {
+ pr_err("failed to load transform for %s: %ld\n",
+ algo, PTR_ERR(tfm));
+ goto out_free_data;
+ }
+
+ ret = crypto_aead_setauthsize(tfm, authsize);
+
+ for (i = 0; i < num_mb; ++i)
+ if (testmgr_alloc_buf(data[i].xbuf)) {
+ while (i--)
+ testmgr_free_buf(data[i].xbuf);
+ goto out_free_tfm;
+ }
+
+ for (i = 0; i < num_mb; ++i)
+ if (testmgr_alloc_buf(data[i].axbuf)) {
+ while (i--)
+ testmgr_free_buf(data[i].axbuf);
+ goto out_free_xbuf;
+ }
+
+ for (i = 0; i < num_mb; ++i)
+ if (testmgr_alloc_buf(data[i].xoutbuf)) {
+ while (i--)
+ testmgr_free_buf(data[i].xoutbuf);
+ goto out_free_axbuf;
+ }
+
+ for (i = 0; i < num_mb; ++i) {
+ data[i].req = aead_request_alloc(tfm, GFP_KERNEL);
+ if (!data[i].req) {
+ pr_err("alg: skcipher: Failed to allocate request for %s\n",
+ algo);
+ while (i--)
+ aead_request_free(data[i].req);
+ goto out_free_xoutbuf;
+ }
+ }
+
+ for (i = 0; i < num_mb; ++i) {
+ crypto_init_wait(&data[i].wait);
+ aead_request_set_callback(data[i].req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &data[i].wait);
+ }
+
+ pr_info("\ntesting speed of multibuffer %s (%s) %s\n", algo,
+ get_driver_name(crypto_aead, tfm), e);
+
+ i = 0;
+ do {
+ b_size = aead_sizes;
+ do {
+ if (*b_size + authsize > XBUFSIZE * PAGE_SIZE) {
+ pr_err("template (%u) too big for buffer (%lu)\n",
+ authsize + *b_size,
+ XBUFSIZE * PAGE_SIZE);
+ goto out;
+ }
+
+ pr_info("test %u (%d bit key, %d byte blocks): ", i,
+ *keysize * 8, *b_size);
+
+ /* Set up tfm global state, i.e. the key */
+
+ memset(tvmem[0], 0xff, PAGE_SIZE);
+ key = tvmem[0];
+ for (j = 0; j < tcount; j++) {
+ if (template[j].klen == *keysize) {
+ key = template[j].key;
+ break;
+ }
+ }
+
+ crypto_aead_clear_flags(tfm, ~0);
+
+ ret = crypto_aead_setkey(tfm, key, *keysize);
+ if (ret) {
+ pr_err("setkey() failed flags=%x\n",
+ crypto_aead_get_flags(tfm));
+ goto out;
+ }
+
+ iv_len = crypto_aead_ivsize(tfm);
+ if (iv_len)
+ memset(iv, 0xff, iv_len);
+
+ /* Now setup per request stuff, i.e. buffers */
+
+ for (j = 0; j < num_mb; ++j) {
+ struct test_mb_aead_data *cur = &data[j];
+
+ assoc = cur->axbuf[0];
+ memset(assoc, 0xff, aad_size);
+
+ sg_init_aead(cur->sg, cur->xbuf,
+ *b_size + (enc ? 0 : authsize),
+ assoc, aad_size);
+
+ sg_init_aead(cur->sgout, cur->xoutbuf,
+ *b_size + (enc ? authsize : 0),
+ assoc, aad_size);
+
+ aead_request_set_ad(cur->req, aad_size);
+
+ if (!enc) {
+
+ aead_request_set_crypt(cur->req,
+ cur->sgout,
+ cur->sg,
+ *b_size, iv);
+ ret = crypto_aead_encrypt(cur->req);
+ ret = do_one_aead_op(cur->req, ret);
+
+ if (ret) {
+ pr_err("calculating auth failed failed (%d)\n",
+ ret);
+ break;
+ }
+ }
+
+ aead_request_set_crypt(cur->req, cur->sg,
+ cur->sgout, *b_size +
+ (enc ? 0 : authsize),
+ iv);
+
+ }
+
+ if (secs)
+ ret = test_mb_aead_jiffies(data, enc, *b_size,
+ secs, num_mb);
+ else
+ ret = test_mb_aead_cycles(data, enc, *b_size,
+ num_mb);
+
+ if (ret) {
+ pr_err("%s() failed return code=%d\n", e, ret);
+ break;
+ }
+ b_size++;
+ i++;
+ } while (*b_size);
+ keysize++;
+ } while (*keysize);
+
+out:
+ for (i = 0; i < num_mb; ++i)
+ aead_request_free(data[i].req);
+out_free_xoutbuf:
+ for (i = 0; i < num_mb; ++i)
+ testmgr_free_buf(data[i].xoutbuf);
+out_free_axbuf:
+ for (i = 0; i < num_mb; ++i)
+ testmgr_free_buf(data[i].axbuf);
+out_free_xbuf:
+ for (i = 0; i < num_mb; ++i)
+ testmgr_free_buf(data[i].xbuf);
+out_free_tfm:
+ crypto_free_aead(tfm);
+out_free_data:
+ kfree(data);
+out_free_iv:
+ kfree(iv);
+}
+
static int test_aead_jiffies(struct aead_request *req, int enc,
int blen, int secs)
{
@@ -151,60 +504,6 @@ out:
return ret;
}
-static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 };
-static u32 aead_sizes[] = { 16, 64, 256, 512, 1024, 2048, 4096, 8192, 0 };
-
-#define XBUFSIZE 8
-#define MAX_IVLEN 32
-
-static int testmgr_alloc_buf(char *buf[XBUFSIZE])
-{
- int i;
-
- for (i = 0; i < XBUFSIZE; i++) {
- buf[i] = (void *)__get_free_page(GFP_KERNEL);
- if (!buf[i])
- goto err_free_buf;
- }
-
- return 0;
-
-err_free_buf:
- while (i-- > 0)
- free_page((unsigned long)buf[i]);
-
- return -ENOMEM;
-}
-
-static void testmgr_free_buf(char *buf[XBUFSIZE])
-{
- int i;
-
- for (i = 0; i < XBUFSIZE; i++)
- free_page((unsigned long)buf[i]);
-}
-
-static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE],
- unsigned int buflen)
-{
- int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE;
- int k, rem;
-
- if (np > XBUFSIZE) {
- rem = PAGE_SIZE;
- np = XBUFSIZE;
- } else {
- rem = buflen % PAGE_SIZE;
- }
-
- sg_init_table(sg, np + 1);
- np--;
- for (k = 0; k < np; k++)
- sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE);
-
- sg_set_buf(&sg[k + 1], xbuf[k], rem);
-}
-
static void test_aead_speed(const char *algo, int enc, unsigned int secs,
struct aead_speed_template *template,
unsigned int tcount, u8 authsize,
@@ -316,19 +615,37 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
goto out;
}
- sg_init_aead(sg, xbuf,
- *b_size + (enc ? 0 : authsize));
+ sg_init_aead(sg, xbuf, *b_size + (enc ? 0 : authsize),
+ assoc, aad_size);
sg_init_aead(sgout, xoutbuf,
- *b_size + (enc ? authsize : 0));
+ *b_size + (enc ? authsize : 0), assoc,
+ aad_size);
- sg_set_buf(&sg[0], assoc, aad_size);
- sg_set_buf(&sgout[0], assoc, aad_size);
+ aead_request_set_ad(req, aad_size);
+
+ if (!enc) {
+
+ /*
+ * For decryption we need a proper auth so
+ * we do the encryption path once with buffers
+ * reversed (input <-> output) to calculate it
+ */
+ aead_request_set_crypt(req, sgout, sg,
+ *b_size, iv);
+ ret = do_one_aead_op(req,
+ crypto_aead_encrypt(req));
+
+ if (ret) {
+ pr_err("calculating auth failed failed (%d)\n",
+ ret);
+ break;
+ }
+ }
aead_request_set_crypt(req, sg, sgout,
*b_size + (enc ? 0 : authsize),
iv);
- aead_request_set_ad(req, aad_size);
if (secs)
ret = test_aead_jiffies(req, enc, *b_size,
@@ -381,24 +698,98 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret)
}
struct test_mb_ahash_data {
- struct scatterlist sg[TVMEMSIZE];
+ struct scatterlist sg[XBUFSIZE];
char result[64];
struct ahash_request *req;
struct crypto_wait wait;
char *xbuf[XBUFSIZE];
};
-static void test_mb_ahash_speed(const char *algo, unsigned int sec,
- struct hash_speed *speed)
+static inline int do_mult_ahash_op(struct test_mb_ahash_data *data, u32 num_mb)
+{
+ int i, rc[num_mb], err = 0;
+
+ /* Fire up a bunch of concurrent requests */
+ for (i = 0; i < num_mb; i++)
+ rc[i] = crypto_ahash_digest(data[i].req);
+
+ /* Wait for all requests to finish */
+ for (i = 0; i < num_mb; i++) {
+ rc[i] = crypto_wait_req(rc[i], &data[i].wait);
+
+ if (rc[i]) {
+ pr_info("concurrent request %d error %d\n", i, rc[i]);
+ err = rc[i];
+ }
+ }
+
+ return err;
+}
+
+static int test_mb_ahash_jiffies(struct test_mb_ahash_data *data, int blen,
+ int secs, u32 num_mb)
+{
+ unsigned long start, end;
+ int bcount;
+ int ret;
+
+ for (start = jiffies, end = start + secs * HZ, bcount = 0;
+ time_before(jiffies, end); bcount++) {
+ ret = do_mult_ahash_op(data, num_mb);
+ if (ret)
+ return ret;
+ }
+
+ pr_cont("%d operations in %d seconds (%ld bytes)\n",
+ bcount * num_mb, secs, (long)bcount * blen * num_mb);
+ return 0;
+}
+
+static int test_mb_ahash_cycles(struct test_mb_ahash_data *data, int blen,
+ u32 num_mb)
+{
+ unsigned long cycles = 0;
+ int ret = 0;
+ int i;
+
+ /* Warm-up run. */
+ for (i = 0; i < 4; i++) {
+ ret = do_mult_ahash_op(data, num_mb);
+ if (ret)
+ goto out;
+ }
+
+ /* The real thing. */
+ for (i = 0; i < 8; i++) {
+ cycles_t start, end;
+
+ start = get_cycles();
+ ret = do_mult_ahash_op(data, num_mb);
+ end = get_cycles();
+
+ if (ret)
+ goto out;
+
+ cycles += end - start;
+ }
+
+out:
+ if (ret == 0)
+ pr_cont("1 operation in %lu cycles (%d bytes)\n",
+ (cycles + 4) / (8 * num_mb), blen);
+
+ return ret;
+}
+
+static void test_mb_ahash_speed(const char *algo, unsigned int secs,
+ struct hash_speed *speed, u32 num_mb)
{
struct test_mb_ahash_data *data;
struct crypto_ahash *tfm;
- unsigned long start, end;
- unsigned long cycles;
unsigned int i, j, k;
int ret;
- data = kzalloc(sizeof(*data) * 8, GFP_KERNEL);
+ data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL);
if (!data)
return;
@@ -409,7 +800,7 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
goto free_data;
}
- for (i = 0; i < 8; ++i) {
+ for (i = 0; i < num_mb; ++i) {
if (testmgr_alloc_buf(data[i].xbuf))
goto out;
@@ -424,7 +815,12 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
ahash_request_set_callback(data[i].req, 0, crypto_req_done,
&data[i].wait);
- test_hash_sg_init(data[i].sg);
+
+ sg_init_table(data[i].sg, XBUFSIZE);
+ for (j = 0; j < XBUFSIZE; j++) {
+ sg_set_buf(data[i].sg + j, data[i].xbuf[j], PAGE_SIZE);
+ memset(data[i].xbuf[j], 0xff, PAGE_SIZE);
+ }
}
pr_info("\ntesting speed of multibuffer %s (%s)\n", algo,
@@ -435,16 +831,16 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
if (speed[i].blen != speed[i].plen)
continue;
- if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
+ if (speed[i].blen > XBUFSIZE * PAGE_SIZE) {
pr_err("template (%u) too big for tvmem (%lu)\n",
- speed[i].blen, TVMEMSIZE * PAGE_SIZE);
+ speed[i].blen, XBUFSIZE * PAGE_SIZE);
goto out;
}
if (speed[i].klen)
crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen);
- for (k = 0; k < 8; k++)
+ for (k = 0; k < num_mb; k++)
ahash_request_set_crypt(data[k].req, data[k].sg,
data[k].result, speed[i].blen);
@@ -453,34 +849,12 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
i, speed[i].blen, speed[i].plen,
speed[i].blen / speed[i].plen);
- start = get_cycles();
-
- for (k = 0; k < 8; k++) {
- ret = crypto_ahash_digest(data[k].req);
- if (ret == -EINPROGRESS) {
- ret = 0;
- continue;
- }
-
- if (ret)
- break;
-
- crypto_req_done(&data[k].req->base, 0);
- }
-
- for (j = 0; j < k; j++) {
- struct crypto_wait *wait = &data[j].wait;
- int wait_ret;
-
- wait_ret = crypto_wait_req(-EINPROGRESS, wait);
- if (wait_ret)
- ret = wait_ret;
- }
+ if (secs)
+ ret = test_mb_ahash_jiffies(data, speed[i].blen, secs,
+ num_mb);
+ else
+ ret = test_mb_ahash_cycles(data, speed[i].blen, num_mb);
- end = get_cycles();
- cycles = end - start;
- pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
- cycles, cycles / (8 * speed[i].blen));
if (ret) {
pr_err("At least one hashing failed ret=%d\n", ret);
@@ -489,10 +863,10 @@ static void test_mb_ahash_speed(const char *algo, unsigned int sec,
}
out:
- for (k = 0; k < 8; ++k)
+ for (k = 0; k < num_mb; ++k)
ahash_request_free(data[k].req);
- for (k = 0; k < 8; ++k)
+ for (k = 0; k < num_mb; ++k)
testmgr_free_buf(data[k].xbuf);
crypto_free_ahash(tfm);
@@ -736,6 +1110,254 @@ static void test_hash_speed(const char *algo, unsigned int secs,
return test_ahash_speed_common(algo, secs, speed, CRYPTO_ALG_ASYNC);
}
+struct test_mb_skcipher_data {
+ struct scatterlist sg[XBUFSIZE];
+ struct skcipher_request *req;
+ struct crypto_wait wait;
+ char *xbuf[XBUFSIZE];
+};
+
+static int do_mult_acipher_op(struct test_mb_skcipher_data *data, int enc,
+ u32 num_mb)
+{
+ int i, rc[num_mb], err = 0;
+
+ /* Fire up a bunch of concurrent requests */
+ for (i = 0; i < num_mb; i++) {
+ if (enc == ENCRYPT)
+ rc[i] = crypto_skcipher_encrypt(data[i].req);
+ else
+ rc[i] = crypto_skcipher_decrypt(data[i].req);
+ }
+
+ /* Wait for all requests to finish */
+ for (i = 0; i < num_mb; i++) {
+ rc[i] = crypto_wait_req(rc[i], &data[i].wait);
+
+ if (rc[i]) {
+ pr_info("concurrent request %d error %d\n", i, rc[i]);
+ err = rc[i];
+ }
+ }
+
+ return err;
+}
+
+static int test_mb_acipher_jiffies(struct test_mb_skcipher_data *data, int enc,
+ int blen, int secs, u32 num_mb)
+{
+ unsigned long start, end;
+ int bcount;
+ int ret;
+
+ for (start = jiffies, end = start + secs * HZ, bcount = 0;
+ time_before(jiffies, end); bcount++) {
+ ret = do_mult_acipher_op(data, enc, num_mb);
+ if (ret)
+ return ret;
+ }
+
+ pr_cont("%d operations in %d seconds (%ld bytes)\n",
+ bcount * num_mb, secs, (long)bcount * blen * num_mb);
+ return 0;
+}
+
+static int test_mb_acipher_cycles(struct test_mb_skcipher_data *data, int enc,
+ int blen, u32 num_mb)
+{
+ unsigned long cycles = 0;
+ int ret = 0;
+ int i;
+
+ /* Warm-up run. */
+ for (i = 0; i < 4; i++) {
+ ret = do_mult_acipher_op(data, enc, num_mb);
+ if (ret)
+ goto out;
+ }
+
+ /* The real thing. */
+ for (i = 0; i < 8; i++) {
+ cycles_t start, end;
+
+ start = get_cycles();
+ ret = do_mult_acipher_op(data, enc, num_mb);
+ end = get_cycles();
+
+ if (ret)
+ goto out;
+
+ cycles += end - start;
+ }
+
+out:
+ if (ret == 0)
+ pr_cont("1 operation in %lu cycles (%d bytes)\n",
+ (cycles + 4) / (8 * num_mb), blen);
+
+ return ret;
+}
+
+static void test_mb_skcipher_speed(const char *algo, int enc, int secs,
+ struct cipher_speed_template *template,
+ unsigned int tcount, u8 *keysize, u32 num_mb)
+{
+ struct test_mb_skcipher_data *data;
+ struct crypto_skcipher *tfm;
+ unsigned int i, j, iv_len;
+ const char *key;
+ const char *e;
+ u32 *b_size;
+ char iv[128];
+ int ret;
+
+ if (enc == ENCRYPT)
+ e = "encryption";
+ else
+ e = "decryption";
+
+ data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return;
+
+ tfm = crypto_alloc_skcipher(algo, 0, 0);
+ if (IS_ERR(tfm)) {
+ pr_err("failed to load transform for %s: %ld\n",
+ algo, PTR_ERR(tfm));
+ goto out_free_data;
+ }
+
+ for (i = 0; i < num_mb; ++i)
+ if (testmgr_alloc_buf(data[i].xbuf)) {
+ while (i--)
+ testmgr_free_buf(data[i].xbuf);
+ goto out_free_tfm;
+ }
+
+
+ for (i = 0; i < num_mb; ++i)
+ if (testmgr_alloc_buf(data[i].xbuf)) {
+ while (i--)
+ testmgr_free_buf(data[i].xbuf);
+ goto out_free_tfm;
+ }
+
+
+ for (i = 0; i < num_mb; ++i) {
+ data[i].req = skcipher_request_alloc(tfm, GFP_KERNEL);
+ if (!data[i].req) {
+ pr_err("alg: skcipher: Failed to allocate request for %s\n",
+ algo);
+ while (i--)
+ skcipher_request_free(data[i].req);
+ goto out_free_xbuf;
+ }
+ }
+
+ for (i = 0; i < num_mb; ++i) {
+ skcipher_request_set_callback(data[i].req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &data[i].wait);
+ crypto_init_wait(&data[i].wait);
+ }
+
+ pr_info("\ntesting speed of multibuffer %s (%s) %s\n", algo,
+ get_driver_name(crypto_skcipher, tfm), e);
+
+ i = 0;
+ do {
+ b_size = block_sizes;
+ do {
+ if (*b_size > XBUFSIZE * PAGE_SIZE) {
+ pr_err("template (%u) too big for buffer (%lu)\n",
+ *b_size, XBUFSIZE * PAGE_SIZE);
+ goto out;
+ }
+
+ pr_info("test %u (%d bit key, %d byte blocks): ", i,
+ *keysize * 8, *b_size);
+
+ /* Set up tfm global state, i.e. the key */
+
+ memset(tvmem[0], 0xff, PAGE_SIZE);
+ key = tvmem[0];
+ for (j = 0; j < tcount; j++) {
+ if (template[j].klen == *keysize) {
+ key = template[j].key;
+ break;
+ }
+ }
+
+ crypto_skcipher_clear_flags(tfm, ~0);
+
+ ret = crypto_skcipher_setkey(tfm, key, *keysize);
+ if (ret) {
+ pr_err("setkey() failed flags=%x\n",
+ crypto_skcipher_get_flags(tfm));
+ goto out;
+ }
+
+ iv_len = crypto_skcipher_ivsize(tfm);
+ if (iv_len)
+ memset(&iv, 0xff, iv_len);
+
+ /* Now setup per request stuff, i.e. buffers */
+
+ for (j = 0; j < num_mb; ++j) {
+ struct test_mb_skcipher_data *cur = &data[j];
+ unsigned int k = *b_size;
+ unsigned int pages = DIV_ROUND_UP(k, PAGE_SIZE);
+ unsigned int p = 0;
+
+ sg_init_table(cur->sg, pages);
+
+ while (k > PAGE_SIZE) {
+ sg_set_buf(cur->sg + p, cur->xbuf[p],
+ PAGE_SIZE);
+ memset(cur->xbuf[p], 0xff, PAGE_SIZE);
+ p++;
+ k -= PAGE_SIZE;
+ }
+
+ sg_set_buf(cur->sg + p, cur->xbuf[p], k);
+ memset(cur->xbuf[p], 0xff, k);
+
+ skcipher_request_set_crypt(cur->req, cur->sg,
+ cur->sg, *b_size,
+ iv);
+ }
+
+ if (secs)
+ ret = test_mb_acipher_jiffies(data, enc,
+ *b_size, secs,
+ num_mb);
+ else
+ ret = test_mb_acipher_cycles(data, enc,
+ *b_size, num_mb);
+
+ if (ret) {
+ pr_err("%s() failed flags=%x\n", e,
+ crypto_skcipher_get_flags(tfm));
+ break;
+ }
+ b_size++;
+ i++;
+ } while (*b_size);
+ keysize++;
+ } while (*keysize);
+
+out:
+ for (i = 0; i < num_mb; ++i)
+ skcipher_request_free(data[i].req);
+out_free_xbuf:
+ for (i = 0; i < num_mb; ++i)
+ testmgr_free_buf(data[i].xbuf);
+out_free_tfm:
+ crypto_free_skcipher(tfm);
+out_free_data:
+ kfree(data);
+}
+
static inline int do_one_acipher_op(struct skcipher_request *req, int ret)
{
struct crypto_wait *wait = req->base.data;
@@ -1557,16 +2179,24 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
NULL, 0, 16, 16, aead_speed_template_20);
test_aead_speed("gcm(aes)", ENCRYPT, sec,
NULL, 0, 16, 8, speed_template_16_24_32);
+ test_aead_speed("rfc4106(gcm(aes))", DECRYPT, sec,
+ NULL, 0, 16, 16, aead_speed_template_20);
+ test_aead_speed("gcm(aes)", DECRYPT, sec,
+ NULL, 0, 16, 8, speed_template_16_24_32);
break;
case 212:
test_aead_speed("rfc4309(ccm(aes))", ENCRYPT, sec,
NULL, 0, 16, 16, aead_speed_template_19);
+ test_aead_speed("rfc4309(ccm(aes))", DECRYPT, sec,
+ NULL, 0, 16, 16, aead_speed_template_19);
break;
case 213:
test_aead_speed("rfc7539esp(chacha20,poly1305)", ENCRYPT, sec,
NULL, 0, 16, 8, aead_speed_template_36);
+ test_aead_speed("rfc7539esp(chacha20,poly1305)", DECRYPT, sec,
+ NULL, 0, 16, 8, aead_speed_template_36);
break;
case 214:
@@ -1574,6 +2204,33 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
speed_template_32);
break;
+ case 215:
+ test_mb_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec, NULL,
+ 0, 16, 16, aead_speed_template_20, num_mb);
+ test_mb_aead_speed("gcm(aes)", ENCRYPT, sec, NULL, 0, 16, 8,
+ speed_template_16_24_32, num_mb);
+ test_mb_aead_speed("rfc4106(gcm(aes))", DECRYPT, sec, NULL,
+ 0, 16, 16, aead_speed_template_20, num_mb);
+ test_mb_aead_speed("gcm(aes)", DECRYPT, sec, NULL, 0, 16, 8,
+ speed_template_16_24_32, num_mb);
+ break;
+
+ case 216:
+ test_mb_aead_speed("rfc4309(ccm(aes))", ENCRYPT, sec, NULL, 0,
+ 16, 16, aead_speed_template_19, num_mb);
+ test_mb_aead_speed("rfc4309(ccm(aes))", DECRYPT, sec, NULL, 0,
+ 16, 16, aead_speed_template_19, num_mb);
+ break;
+
+ case 217:
+ test_mb_aead_speed("rfc7539esp(chacha20,poly1305)", ENCRYPT,
+ sec, NULL, 0, 16, 8, aead_speed_template_36,
+ num_mb);
+ test_mb_aead_speed("rfc7539esp(chacha20,poly1305)", DECRYPT,
+ sec, NULL, 0, 16, 8, aead_speed_template_36,
+ num_mb);
+ break;
+
case 300:
if (alg) {
test_hash_speed(alg, sec, generic_hash_speed_template);
@@ -1778,19 +2435,23 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
if (mode > 400 && mode < 500) break;
/* fall through */
case 422:
- test_mb_ahash_speed("sha1", sec, generic_hash_speed_template);
+ test_mb_ahash_speed("sha1", sec, generic_hash_speed_template,
+ num_mb);
if (mode > 400 && mode < 500) break;
/* fall through */
case 423:
- test_mb_ahash_speed("sha256", sec, generic_hash_speed_template);
+ test_mb_ahash_speed("sha256", sec, generic_hash_speed_template,
+ num_mb);
if (mode > 400 && mode < 500) break;
/* fall through */
case 424:
- test_mb_ahash_speed("sha512", sec, generic_hash_speed_template);
+ test_mb_ahash_speed("sha512", sec, generic_hash_speed_template,
+ num_mb);
if (mode > 400 && mode < 500) break;
/* fall through */
case 425:
- test_mb_ahash_speed("sm3", sec, generic_hash_speed_template);
+ test_mb_ahash_speed("sm3", sec, generic_hash_speed_template,
+ num_mb);
if (mode > 400 && mode < 500) break;
/* fall through */
case 499:
@@ -2008,6 +2669,218 @@ static int do_test(const char *alg, u32 type, u32 mask, int m)
speed_template_8_32);
break;
+ case 600:
+ test_mb_skcipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ecb(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("cbc(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("cbc(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("lrw(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_40_48, num_mb);
+ test_mb_skcipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_32_40_48, num_mb);
+ test_mb_skcipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ test_mb_skcipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ test_mb_skcipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ofb(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ofb(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("rfc3686(ctr(aes))", ENCRYPT, sec, NULL,
+ 0, speed_template_20_28_36, num_mb);
+ test_mb_skcipher_speed("rfc3686(ctr(aes))", DECRYPT, sec, NULL,
+ 0, speed_template_20_28_36, num_mb);
+ break;
+
+ case 601:
+ test_mb_skcipher_speed("ecb(des3_ede)", ENCRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ test_mb_skcipher_speed("ecb(des3_ede)", DECRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ test_mb_skcipher_speed("cbc(des3_ede)", ENCRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ test_mb_skcipher_speed("cbc(des3_ede)", DECRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ test_mb_skcipher_speed("cfb(des3_ede)", ENCRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ test_mb_skcipher_speed("cfb(des3_ede)", DECRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ test_mb_skcipher_speed("ofb(des3_ede)", ENCRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ test_mb_skcipher_speed("ofb(des3_ede)", DECRYPT, sec,
+ des3_speed_template, DES3_SPEED_VECTORS,
+ speed_template_24, num_mb);
+ break;
+
+ case 602:
+ test_mb_skcipher_speed("ecb(des)", ENCRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ test_mb_skcipher_speed("ecb(des)", DECRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ test_mb_skcipher_speed("cbc(des)", ENCRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ test_mb_skcipher_speed("cbc(des)", DECRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ test_mb_skcipher_speed("cfb(des)", ENCRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ test_mb_skcipher_speed("cfb(des)", DECRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ test_mb_skcipher_speed("ofb(des)", ENCRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ test_mb_skcipher_speed("ofb(des)", DECRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ break;
+
+ case 603:
+ test_mb_skcipher_speed("ecb(serpent)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ecb(serpent)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("cbc(serpent)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("cbc(serpent)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ctr(serpent)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ctr(serpent)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("lrw(serpent)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_48, num_mb);
+ test_mb_skcipher_speed("lrw(serpent)", DECRYPT, sec, NULL, 0,
+ speed_template_32_48, num_mb);
+ test_mb_skcipher_speed("xts(serpent)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ test_mb_skcipher_speed("xts(serpent)", DECRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ break;
+
+ case 604:
+ test_mb_skcipher_speed("ecb(twofish)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ecb(twofish)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("cbc(twofish)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("cbc(twofish)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ctr(twofish)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("ctr(twofish)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32, num_mb);
+ test_mb_skcipher_speed("lrw(twofish)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_40_48, num_mb);
+ test_mb_skcipher_speed("lrw(twofish)", DECRYPT, sec, NULL, 0,
+ speed_template_32_40_48, num_mb);
+ test_mb_skcipher_speed("xts(twofish)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_48_64, num_mb);
+ test_mb_skcipher_speed("xts(twofish)", DECRYPT, sec, NULL, 0,
+ speed_template_32_48_64, num_mb);
+ break;
+
+ case 605:
+ test_mb_skcipher_speed("ecb(arc4)", ENCRYPT, sec, NULL, 0,
+ speed_template_8, num_mb);
+ break;
+
+ case 606:
+ test_mb_skcipher_speed("ecb(cast5)", ENCRYPT, sec, NULL, 0,
+ speed_template_8_16, num_mb);
+ test_mb_skcipher_speed("ecb(cast5)", DECRYPT, sec, NULL, 0,
+ speed_template_8_16, num_mb);
+ test_mb_skcipher_speed("cbc(cast5)", ENCRYPT, sec, NULL, 0,
+ speed_template_8_16, num_mb);
+ test_mb_skcipher_speed("cbc(cast5)", DECRYPT, sec, NULL, 0,
+ speed_template_8_16, num_mb);
+ test_mb_skcipher_speed("ctr(cast5)", ENCRYPT, sec, NULL, 0,
+ speed_template_8_16, num_mb);
+ test_mb_skcipher_speed("ctr(cast5)", DECRYPT, sec, NULL, 0,
+ speed_template_8_16, num_mb);
+ break;
+
+ case 607:
+ test_mb_skcipher_speed("ecb(cast6)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ecb(cast6)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("cbc(cast6)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("cbc(cast6)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ctr(cast6)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ctr(cast6)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("lrw(cast6)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_48, num_mb);
+ test_mb_skcipher_speed("lrw(cast6)", DECRYPT, sec, NULL, 0,
+ speed_template_32_48, num_mb);
+ test_mb_skcipher_speed("xts(cast6)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ test_mb_skcipher_speed("xts(cast6)", DECRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ break;
+
+ case 608:
+ test_mb_skcipher_speed("ecb(camellia)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ecb(camellia)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("cbc(camellia)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("cbc(camellia)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ctr(camellia)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("ctr(camellia)", DECRYPT, sec, NULL, 0,
+ speed_template_16_32, num_mb);
+ test_mb_skcipher_speed("lrw(camellia)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_48, num_mb);
+ test_mb_skcipher_speed("lrw(camellia)", DECRYPT, sec, NULL, 0,
+ speed_template_32_48, num_mb);
+ test_mb_skcipher_speed("xts(camellia)", ENCRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ test_mb_skcipher_speed("xts(camellia)", DECRYPT, sec, NULL, 0,
+ speed_template_32_64, num_mb);
+ break;
+
+ case 609:
+ test_mb_skcipher_speed("ecb(blowfish)", ENCRYPT, sec, NULL, 0,
+ speed_template_8_32, num_mb);
+ test_mb_skcipher_speed("ecb(blowfish)", DECRYPT, sec, NULL, 0,
+ speed_template_8_32, num_mb);
+ test_mb_skcipher_speed("cbc(blowfish)", ENCRYPT, sec, NULL, 0,
+ speed_template_8_32, num_mb);
+ test_mb_skcipher_speed("cbc(blowfish)", DECRYPT, sec, NULL, 0,
+ speed_template_8_32, num_mb);
+ test_mb_skcipher_speed("ctr(blowfish)", ENCRYPT, sec, NULL, 0,
+ speed_template_8_32, num_mb);
+ test_mb_skcipher_speed("ctr(blowfish)", DECRYPT, sec, NULL, 0,
+ speed_template_8_32, num_mb);
+ break;
+
case 1000:
test_available();
break;
@@ -2069,6 +2942,8 @@ module_param(mode, int, 0);
module_param(sec, uint, 0);
MODULE_PARM_DESC(sec, "Length in seconds of speed tests "
"(defaults to zero which uses CPU cycles instead)");
+module_param(num_mb, uint, 0000);
+MODULE_PARM_DESC(num_mb, "Number of concurrent requests to be used in mb speed tests (defaults to 8)");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Quick & dirty crypto testing module");
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 29d7020b8826..d5e23a142a04 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -177,6 +177,18 @@ static void testmgr_free_buf(char *buf[XBUFSIZE])
free_page((unsigned long)buf[i]);
}
+static int ahash_guard_result(char *result, char c, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (result[i] != c)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int ahash_partial_update(struct ahash_request **preq,
struct crypto_ahash *tfm, const struct hash_testvec *template,
void *hash_buff, int k, int temp, struct scatterlist *sg,
@@ -185,7 +197,8 @@ static int ahash_partial_update(struct ahash_request **preq,
char *state;
struct ahash_request *req;
int statesize, ret = -EINVAL;
- const char guard[] = { 0x00, 0xba, 0xad, 0x00 };
+ static const unsigned char guard[] = { 0x00, 0xba, 0xad, 0x00 };
+ int digestsize = crypto_ahash_digestsize(tfm);
req = *preq;
statesize = crypto_ahash_statesize(
@@ -196,12 +209,19 @@ static int ahash_partial_update(struct ahash_request **preq,
goto out_nostate;
}
memcpy(state + statesize, guard, sizeof(guard));
+ memset(result, 1, digestsize);
ret = crypto_ahash_export(req, state);
WARN_ON(memcmp(state + statesize, guard, sizeof(guard)));
if (ret) {
pr_err("alg: hash: Failed to export() for %s\n", algo);
goto out;
}
+ ret = ahash_guard_result(result, 1, digestsize);
+ if (ret) {
+ pr_err("alg: hash: Failed, export used req->result for %s\n",
+ algo);
+ goto out;
+ }
ahash_request_free(req);
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req) {
@@ -221,6 +241,12 @@ static int ahash_partial_update(struct ahash_request **preq,
pr_err("alg: hash: Failed to import() for %s\n", algo);
goto out;
}
+ ret = ahash_guard_result(result, 1, digestsize);
+ if (ret) {
+ pr_err("alg: hash: Failed, import used req->result for %s\n",
+ algo);
+ goto out;
+ }
ret = crypto_wait_req(crypto_ahash_update(req), wait);
if (ret)
goto out;
@@ -316,18 +342,31 @@ static int __test_hash(struct crypto_ahash *tfm,
goto out;
}
} else {
+ memset(result, 1, digest_size);
ret = crypto_wait_req(crypto_ahash_init(req), &wait);
if (ret) {
pr_err("alg: hash: init failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
+ ret = ahash_guard_result(result, 1, digest_size);
+ if (ret) {
+ pr_err("alg: hash: init failed on test %d "
+ "for %s: used req->result\n", j, algo);
+ goto out;
+ }
ret = crypto_wait_req(crypto_ahash_update(req), &wait);
if (ret) {
pr_err("alg: hash: update failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
+ ret = ahash_guard_result(result, 1, digest_size);
+ if (ret) {
+ pr_err("alg: hash: update failed on test %d "
+ "for %s: used req->result\n", j, algo);
+ goto out;
+ }
ret = crypto_wait_req(crypto_ahash_final(req), &wait);
if (ret) {
pr_err("alg: hash: final failed on test %d "
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index a714b6293959..6044f6906bd6 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -1052,6 +1052,142 @@ static const struct hash_testvec sha3_224_tv_template[] = {
"\xc9\xfd\x55\x74\x49\x44\x79\xba"
"\x5c\x7e\x7a\xb7\x6e\xf2\x64\xea"
"\xd0\xfc\xce\x33",
+ .np = 2,
+ .tap = { 28, 28 },
+ }, {
+ .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3"
+ "\x7a\x11\x85\x1c\xb3\x27\xbe\x55"
+ "\xec\x60\xf7\x8e\x02\x99\x30\xc7"
+ "\x3b\xd2\x69\x00\x74\x0b\xa2\x16"
+ "\xad\x44\xdb\x4f\xe6\x7d\x14\x88"
+ "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa"
+ "\x91\x05\x9c\x33\xca\x3e\xd5\x6c"
+ "\x03\x77\x0e\xa5\x19\xb0\x47\xde"
+ "\x52\xe9\x80\x17\x8b\x22\xb9\x2d"
+ "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f"
+ "\x36\xcd\x41\xd8\x6f\x06\x7a\x11"
+ "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83"
+ "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5"
+ "\x69\x00\x97\x0b\xa2\x39\xd0\x44"
+ "\xdb\x72\x09\x7d\x14\xab\x1f\xb6"
+ "\x4d\xe4\x58\xef\x86\x1d\x91\x28"
+ "\xbf\x33\xca\x61\xf8\x6c\x03\x9a"
+ "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c"
+ "\x80\x17\xae\x22\xb9\x50\xe7\x5b"
+ "\xf2\x89\x20\x94\x2b\xc2\x36\xcd"
+ "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f"
+ "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1"
+ "\x25\xbc\x53\xea\x5e\xf5\x8c\x00"
+ "\x97\x2e\xc5\x39\xd0\x67\xfe\x72"
+ "\x09\xa0\x14\xab\x42\xd9\x4d\xe4"
+ "\x7b\x12\x86\x1d\xb4\x28\xbf\x56"
+ "\xed\x61\xf8\x8f\x03\x9a\x31\xc8"
+ "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17"
+ "\xae\x45\xdc\x50\xe7\x7e\x15\x89"
+ "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb"
+ "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d"
+ "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf"
+ "\x53\xea\x81\x18\x8c\x23\xba\x2e"
+ "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0"
+ "\x37\xce\x42\xd9\x70\x07\x7b\x12"
+ "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84"
+ "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6"
+ "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45"
+ "\xdc\x73\x0a\x7e\x15\xac\x20\xb7"
+ "\x4e\xe5\x59\xf0\x87\x1e\x92\x29"
+ "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b"
+ "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d"
+ "\x81\x18\xaf\x23\xba\x51\xe8\x5c"
+ "\xf3\x8a\x21\x95\x2c\xc3\x37\xce"
+ "\x65\xfc\x70\x07\x9e\x12\xa9\x40"
+ "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2"
+ "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01"
+ "\x98\x2f\xc6\x3a\xd1\x68\xff\x73"
+ "\x0a\xa1\x15\xac\x43\xda\x4e\xe5"
+ "\x7c\x13\x87\x1e\xb5\x29\xc0\x57"
+ "\xee\x62\xf9\x90\x04\x9b\x32\xc9"
+ "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18"
+ "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a"
+ "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc"
+ "\x93\x07\x9e\x35\xcc\x40\xd7\x6e"
+ "\x05\x79\x10\xa7\x1b\xb2\x49\xe0"
+ "\x54\xeb\x82\x19\x8d\x24\xbb\x2f"
+ "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1"
+ "\x38\xcf\x43\xda\x71\x08\x7c\x13"
+ "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85"
+ "\x1c\x90\x27\xbe\x32\xc9\x60\xf7"
+ "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46"
+ "\xdd\x74\x0b\x7f\x16\xad\x21\xb8"
+ "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a"
+ "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c"
+ "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e"
+ "\x82\x19\xb0\x24\xbb\x52\xe9\x5d"
+ "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf"
+ "\x66\xfd\x71\x08\x9f\x13\xaa\x41"
+ "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3"
+ "\x27\xbe\x55\xec\x60\xf7\x8e\x02"
+ "\x99\x30\xc7\x3b\xd2\x69\x00\x74"
+ "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6"
+ "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58"
+ "\xef\x63\xfa\x91\x05\x9c\x33\xca"
+ "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19"
+ "\xb0\x47\xde\x52\xe9\x80\x17\x8b"
+ "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd"
+ "\x94\x08\x9f\x36\xcd\x41\xd8\x6f"
+ "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1"
+ "\x55\xec\x83\x1a\x8e\x25\xbc\x30"
+ "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2"
+ "\x39\xd0\x44\xdb\x72\x09\x7d\x14"
+ "\xab\x1f\xb6\x4d\xe4\x58\xef\x86"
+ "\x1d\x91\x28\xbf\x33\xca\x61\xf8"
+ "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47"
+ "\xde\x75\x0c\x80\x17\xae\x22\xb9"
+ "\x50\xe7\x5b\xf2\x89\x20\x94\x2b"
+ "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d"
+ "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f"
+ "\x83\x1a\xb1\x25\xbc\x53\xea\x5e"
+ "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0"
+ "\x67\xfe\x72\x09\xa0\x14\xab\x42"
+ "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4"
+ "\x28\xbf\x56\xed\x61\xf8\x8f\x03"
+ "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75"
+ "\x0c\xa3\x17\xae\x45\xdc\x50\xe7"
+ "\x7e\x15\x89\x20\xb7\x2b\xc2\x59"
+ "\xf0\x64\xfb\x92\x06\x9d\x34\xcb"
+ "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a"
+ "\xb1\x48\xdf\x53\xea\x81\x18\x8c"
+ "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe"
+ "\x95\x09\xa0\x37\xce\x42\xd9\x70"
+ "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2"
+ "\x56\xed\x84\x1b\x8f\x26\xbd\x31"
+ "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3"
+ "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15"
+ "\xac\x20\xb7\x4e\xe5\x59\xf0\x87"
+ "\x1e\x92\x29\xc0\x34\xcb\x62\xf9"
+ "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48"
+ "\xdf\x76\x0d\x81\x18\xaf\x23\xba"
+ "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c"
+ "\xc3\x37\xce\x65\xfc\x70\x07\x9e"
+ "\x12\xa9\x40\xd7\x4b\xe2\x79\x10"
+ "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f"
+ "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1"
+ "\x68\xff\x73\x0a\xa1\x15\xac\x43"
+ "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5"
+ "\x29\xc0\x57\xee\x62\xf9\x90\x04"
+ "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76"
+ "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8"
+ "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a"
+ "\xf1\x65\xfc\x93\x07\x9e\x35\xcc"
+ "\x40\xd7\x6e\x05\x79\x10\xa7\x1b"
+ "\xb2\x49\xe0\x54\xeb\x82\x19\x8d"
+ "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff"
+ "\x96\x0a\xa1\x38\xcf\x43\xda\x71"
+ "\x08\x7c\x13\xaa\x1e\xb5\x4c",
+ .psize = 1023,
+ .digest = "\x7d\x0f\x2f\xb7\x65\x3b\xa7\x26"
+ "\xc3\x88\x20\x71\x15\x06\xe8\x2d"
+ "\xa3\x92\x44\xab\x3e\xe7\xff\x86"
+ "\xb6\x79\x10\x72",
},
};
@@ -1077,6 +1213,142 @@ static const struct hash_testvec sha3_256_tv_template[] = {
"\x49\x10\x03\x76\xa8\x23\x5e\x2c"
"\x82\xe1\xb9\x99\x8a\x99\x9e\x21"
"\xdb\x32\xdd\x97\x49\x6d\x33\x76",
+ .np = 2,
+ .tap = { 28, 28 },
+ }, {
+ .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3"
+ "\x7a\x11\x85\x1c\xb3\x27\xbe\x55"
+ "\xec\x60\xf7\x8e\x02\x99\x30\xc7"
+ "\x3b\xd2\x69\x00\x74\x0b\xa2\x16"
+ "\xad\x44\xdb\x4f\xe6\x7d\x14\x88"
+ "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa"
+ "\x91\x05\x9c\x33\xca\x3e\xd5\x6c"
+ "\x03\x77\x0e\xa5\x19\xb0\x47\xde"
+ "\x52\xe9\x80\x17\x8b\x22\xb9\x2d"
+ "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f"
+ "\x36\xcd\x41\xd8\x6f\x06\x7a\x11"
+ "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83"
+ "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5"
+ "\x69\x00\x97\x0b\xa2\x39\xd0\x44"
+ "\xdb\x72\x09\x7d\x14\xab\x1f\xb6"
+ "\x4d\xe4\x58\xef\x86\x1d\x91\x28"
+ "\xbf\x33\xca\x61\xf8\x6c\x03\x9a"
+ "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c"
+ "\x80\x17\xae\x22\xb9\x50\xe7\x5b"
+ "\xf2\x89\x20\x94\x2b\xc2\x36\xcd"
+ "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f"
+ "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1"
+ "\x25\xbc\x53\xea\x5e\xf5\x8c\x00"
+ "\x97\x2e\xc5\x39\xd0\x67\xfe\x72"
+ "\x09\xa0\x14\xab\x42\xd9\x4d\xe4"
+ "\x7b\x12\x86\x1d\xb4\x28\xbf\x56"
+ "\xed\x61\xf8\x8f\x03\x9a\x31\xc8"
+ "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17"
+ "\xae\x45\xdc\x50\xe7\x7e\x15\x89"
+ "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb"
+ "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d"
+ "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf"
+ "\x53\xea\x81\x18\x8c\x23\xba\x2e"
+ "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0"
+ "\x37\xce\x42\xd9\x70\x07\x7b\x12"
+ "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84"
+ "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6"
+ "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45"
+ "\xdc\x73\x0a\x7e\x15\xac\x20\xb7"
+ "\x4e\xe5\x59\xf0\x87\x1e\x92\x29"
+ "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b"
+ "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d"
+ "\x81\x18\xaf\x23\xba\x51\xe8\x5c"
+ "\xf3\x8a\x21\x95\x2c\xc3\x37\xce"
+ "\x65\xfc\x70\x07\x9e\x12\xa9\x40"
+ "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2"
+ "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01"
+ "\x98\x2f\xc6\x3a\xd1\x68\xff\x73"
+ "\x0a\xa1\x15\xac\x43\xda\x4e\xe5"
+ "\x7c\x13\x87\x1e\xb5\x29\xc0\x57"
+ "\xee\x62\xf9\x90\x04\x9b\x32\xc9"
+ "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18"
+ "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a"
+ "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc"
+ "\x93\x07\x9e\x35\xcc\x40\xd7\x6e"
+ "\x05\x79\x10\xa7\x1b\xb2\x49\xe0"
+ "\x54\xeb\x82\x19\x8d\x24\xbb\x2f"
+ "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1"
+ "\x38\xcf\x43\xda\x71\x08\x7c\x13"
+ "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85"
+ "\x1c\x90\x27\xbe\x32\xc9\x60\xf7"
+ "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46"
+ "\xdd\x74\x0b\x7f\x16\xad\x21\xb8"
+ "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a"
+ "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c"
+ "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e"
+ "\x82\x19\xb0\x24\xbb\x52\xe9\x5d"
+ "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf"
+ "\x66\xfd\x71\x08\x9f\x13\xaa\x41"
+ "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3"
+ "\x27\xbe\x55\xec\x60\xf7\x8e\x02"
+ "\x99\x30\xc7\x3b\xd2\x69\x00\x74"
+ "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6"
+ "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58"
+ "\xef\x63\xfa\x91\x05\x9c\x33\xca"
+ "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19"
+ "\xb0\x47\xde\x52\xe9\x80\x17\x8b"
+ "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd"
+ "\x94\x08\x9f\x36\xcd\x41\xd8\x6f"
+ "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1"
+ "\x55\xec\x83\x1a\x8e\x25\xbc\x30"
+ "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2"
+ "\x39\xd0\x44\xdb\x72\x09\x7d\x14"
+ "\xab\x1f\xb6\x4d\xe4\x58\xef\x86"
+ "\x1d\x91\x28\xbf\x33\xca\x61\xf8"
+ "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47"
+ "\xde\x75\x0c\x80\x17\xae\x22\xb9"
+ "\x50\xe7\x5b\xf2\x89\x20\x94\x2b"
+ "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d"
+ "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f"
+ "\x83\x1a\xb1\x25\xbc\x53\xea\x5e"
+ "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0"
+ "\x67\xfe\x72\x09\xa0\x14\xab\x42"
+ "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4"
+ "\x28\xbf\x56\xed\x61\xf8\x8f\x03"
+ "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75"
+ "\x0c\xa3\x17\xae\x45\xdc\x50\xe7"
+ "\x7e\x15\x89\x20\xb7\x2b\xc2\x59"
+ "\xf0\x64\xfb\x92\x06\x9d\x34\xcb"
+ "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a"
+ "\xb1\x48\xdf\x53\xea\x81\x18\x8c"
+ "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe"
+ "\x95\x09\xa0\x37\xce\x42\xd9\x70"
+ "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2"
+ "\x56\xed\x84\x1b\x8f\x26\xbd\x31"
+ "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3"
+ "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15"
+ "\xac\x20\xb7\x4e\xe5\x59\xf0\x87"
+ "\x1e\x92\x29\xc0\x34\xcb\x62\xf9"
+ "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48"
+ "\xdf\x76\x0d\x81\x18\xaf\x23\xba"
+ "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c"
+ "\xc3\x37\xce\x65\xfc\x70\x07\x9e"
+ "\x12\xa9\x40\xd7\x4b\xe2\x79\x10"
+ "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f"
+ "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1"
+ "\x68\xff\x73\x0a\xa1\x15\xac\x43"
+ "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5"
+ "\x29\xc0\x57\xee\x62\xf9\x90\x04"
+ "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76"
+ "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8"
+ "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a"
+ "\xf1\x65\xfc\x93\x07\x9e\x35\xcc"
+ "\x40\xd7\x6e\x05\x79\x10\xa7\x1b"
+ "\xb2\x49\xe0\x54\xeb\x82\x19\x8d"
+ "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff"
+ "\x96\x0a\xa1\x38\xcf\x43\xda\x71"
+ "\x08\x7c\x13\xaa\x1e\xb5\x4c",
+ .psize = 1023,
+ .digest = "\xde\x41\x04\xbd\xda\xda\xd9\x71"
+ "\xf7\xfa\x80\xf5\xea\x11\x03\xb1"
+ "\x3b\x6a\xbc\x5f\xb9\x66\x26\xf7"
+ "\x8a\x97\xbb\xf2\x07\x08\x38\x30",
},
};
@@ -1109,6 +1381,144 @@ static const struct hash_testvec sha3_384_tv_template[] = {
"\x9b\xfd\xbc\x32\xb9\xd4\xad\x5a"
"\xa0\x4a\x1f\x07\x6e\x62\xfe\xa1"
"\x9e\xef\x51\xac\xd0\x65\x7c\x22",
+ .np = 2,
+ .tap = { 28, 28 },
+ }, {
+ .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3"
+ "\x7a\x11\x85\x1c\xb3\x27\xbe\x55"
+ "\xec\x60\xf7\x8e\x02\x99\x30\xc7"
+ "\x3b\xd2\x69\x00\x74\x0b\xa2\x16"
+ "\xad\x44\xdb\x4f\xe6\x7d\x14\x88"
+ "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa"
+ "\x91\x05\x9c\x33\xca\x3e\xd5\x6c"
+ "\x03\x77\x0e\xa5\x19\xb0\x47\xde"
+ "\x52\xe9\x80\x17\x8b\x22\xb9\x2d"
+ "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f"
+ "\x36\xcd\x41\xd8\x6f\x06\x7a\x11"
+ "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83"
+ "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5"
+ "\x69\x00\x97\x0b\xa2\x39\xd0\x44"
+ "\xdb\x72\x09\x7d\x14\xab\x1f\xb6"
+ "\x4d\xe4\x58\xef\x86\x1d\x91\x28"
+ "\xbf\x33\xca\x61\xf8\x6c\x03\x9a"
+ "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c"
+ "\x80\x17\xae\x22\xb9\x50\xe7\x5b"
+ "\xf2\x89\x20\x94\x2b\xc2\x36\xcd"
+ "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f"
+ "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1"
+ "\x25\xbc\x53\xea\x5e\xf5\x8c\x00"
+ "\x97\x2e\xc5\x39\xd0\x67\xfe\x72"
+ "\x09\xa0\x14\xab\x42\xd9\x4d\xe4"
+ "\x7b\x12\x86\x1d\xb4\x28\xbf\x56"
+ "\xed\x61\xf8\x8f\x03\x9a\x31\xc8"
+ "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17"
+ "\xae\x45\xdc\x50\xe7\x7e\x15\x89"
+ "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb"
+ "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d"
+ "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf"
+ "\x53\xea\x81\x18\x8c\x23\xba\x2e"
+ "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0"
+ "\x37\xce\x42\xd9\x70\x07\x7b\x12"
+ "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84"
+ "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6"
+ "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45"
+ "\xdc\x73\x0a\x7e\x15\xac\x20\xb7"
+ "\x4e\xe5\x59\xf0\x87\x1e\x92\x29"
+ "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b"
+ "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d"
+ "\x81\x18\xaf\x23\xba\x51\xe8\x5c"
+ "\xf3\x8a\x21\x95\x2c\xc3\x37\xce"
+ "\x65\xfc\x70\x07\x9e\x12\xa9\x40"
+ "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2"
+ "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01"
+ "\x98\x2f\xc6\x3a\xd1\x68\xff\x73"
+ "\x0a\xa1\x15\xac\x43\xda\x4e\xe5"
+ "\x7c\x13\x87\x1e\xb5\x29\xc0\x57"
+ "\xee\x62\xf9\x90\x04\x9b\x32\xc9"
+ "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18"
+ "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a"
+ "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc"
+ "\x93\x07\x9e\x35\xcc\x40\xd7\x6e"
+ "\x05\x79\x10\xa7\x1b\xb2\x49\xe0"
+ "\x54\xeb\x82\x19\x8d\x24\xbb\x2f"
+ "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1"
+ "\x38\xcf\x43\xda\x71\x08\x7c\x13"
+ "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85"
+ "\x1c\x90\x27\xbe\x32\xc9\x60\xf7"
+ "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46"
+ "\xdd\x74\x0b\x7f\x16\xad\x21\xb8"
+ "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a"
+ "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c"
+ "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e"
+ "\x82\x19\xb0\x24\xbb\x52\xe9\x5d"
+ "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf"
+ "\x66\xfd\x71\x08\x9f\x13\xaa\x41"
+ "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3"
+ "\x27\xbe\x55\xec\x60\xf7\x8e\x02"
+ "\x99\x30\xc7\x3b\xd2\x69\x00\x74"
+ "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6"
+ "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58"
+ "\xef\x63\xfa\x91\x05\x9c\x33\xca"
+ "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19"
+ "\xb0\x47\xde\x52\xe9\x80\x17\x8b"
+ "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd"
+ "\x94\x08\x9f\x36\xcd\x41\xd8\x6f"
+ "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1"
+ "\x55\xec\x83\x1a\x8e\x25\xbc\x30"
+ "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2"
+ "\x39\xd0\x44\xdb\x72\x09\x7d\x14"
+ "\xab\x1f\xb6\x4d\xe4\x58\xef\x86"
+ "\x1d\x91\x28\xbf\x33\xca\x61\xf8"
+ "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47"
+ "\xde\x75\x0c\x80\x17\xae\x22\xb9"
+ "\x50\xe7\x5b\xf2\x89\x20\x94\x2b"
+ "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d"
+ "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f"
+ "\x83\x1a\xb1\x25\xbc\x53\xea\x5e"
+ "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0"
+ "\x67\xfe\x72\x09\xa0\x14\xab\x42"
+ "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4"
+ "\x28\xbf\x56\xed\x61\xf8\x8f\x03"
+ "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75"
+ "\x0c\xa3\x17\xae\x45\xdc\x50\xe7"
+ "\x7e\x15\x89\x20\xb7\x2b\xc2\x59"
+ "\xf0\x64\xfb\x92\x06\x9d\x34\xcb"
+ "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a"
+ "\xb1\x48\xdf\x53\xea\x81\x18\x8c"
+ "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe"
+ "\x95\x09\xa0\x37\xce\x42\xd9\x70"
+ "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2"
+ "\x56\xed\x84\x1b\x8f\x26\xbd\x31"
+ "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3"
+ "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15"
+ "\xac\x20\xb7\x4e\xe5\x59\xf0\x87"
+ "\x1e\x92\x29\xc0\x34\xcb\x62\xf9"
+ "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48"
+ "\xdf\x76\x0d\x81\x18\xaf\x23\xba"
+ "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c"
+ "\xc3\x37\xce\x65\xfc\x70\x07\x9e"
+ "\x12\xa9\x40\xd7\x4b\xe2\x79\x10"
+ "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f"
+ "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1"
+ "\x68\xff\x73\x0a\xa1\x15\xac\x43"
+ "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5"
+ "\x29\xc0\x57\xee\x62\xf9\x90\x04"
+ "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76"
+ "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8"
+ "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a"
+ "\xf1\x65\xfc\x93\x07\x9e\x35\xcc"
+ "\x40\xd7\x6e\x05\x79\x10\xa7\x1b"
+ "\xb2\x49\xe0\x54\xeb\x82\x19\x8d"
+ "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff"
+ "\x96\x0a\xa1\x38\xcf\x43\xda\x71"
+ "\x08\x7c\x13\xaa\x1e\xb5\x4c",
+ .psize = 1023,
+ .digest = "\x1b\x19\x4d\x8f\xd5\x36\x87\x71"
+ "\xcf\xca\x30\x85\x9b\xc1\x25\xc7"
+ "\x00\xcb\x73\x8a\x8e\xd4\xfe\x2b"
+ "\x1a\xa2\xdc\x2e\x41\xfd\x52\x51"
+ "\xd2\x21\xae\x2d\xc7\xae\x8c\x40"
+ "\xb9\xe6\x56\x48\x03\xcd\x88\x6b",
},
};
@@ -1147,6 +1557,146 @@ static const struct hash_testvec sha3_512_tv_template[] = {
"\xba\x1b\x0d\x8d\xc7\x8c\x08\x63"
"\x46\xb5\x33\xb4\x9c\x03\x0d\x99"
"\xa2\x7d\xaf\x11\x39\xd6\xe7\x5e",
+ .np = 2,
+ .tap = { 28, 28 },
+ }, {
+ .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3"
+ "\x7a\x11\x85\x1c\xb3\x27\xbe\x55"
+ "\xec\x60\xf7\x8e\x02\x99\x30\xc7"
+ "\x3b\xd2\x69\x00\x74\x0b\xa2\x16"
+ "\xad\x44\xdb\x4f\xe6\x7d\x14\x88"
+ "\x1f\xb6\x2a\xc1\x58\xef\x63\xfa"
+ "\x91\x05\x9c\x33\xca\x3e\xd5\x6c"
+ "\x03\x77\x0e\xa5\x19\xb0\x47\xde"
+ "\x52\xe9\x80\x17\x8b\x22\xb9\x2d"
+ "\xc4\x5b\xf2\x66\xfd\x94\x08\x9f"
+ "\x36\xcd\x41\xd8\x6f\x06\x7a\x11"
+ "\xa8\x1c\xb3\x4a\xe1\x55\xec\x83"
+ "\x1a\x8e\x25\xbc\x30\xc7\x5e\xf5"
+ "\x69\x00\x97\x0b\xa2\x39\xd0\x44"
+ "\xdb\x72\x09\x7d\x14\xab\x1f\xb6"
+ "\x4d\xe4\x58\xef\x86\x1d\x91\x28"
+ "\xbf\x33\xca\x61\xf8\x6c\x03\x9a"
+ "\x0e\xa5\x3c\xd3\x47\xde\x75\x0c"
+ "\x80\x17\xae\x22\xb9\x50\xe7\x5b"
+ "\xf2\x89\x20\x94\x2b\xc2\x36\xcd"
+ "\x64\xfb\x6f\x06\x9d\x11\xa8\x3f"
+ "\xd6\x4a\xe1\x78\x0f\x83\x1a\xb1"
+ "\x25\xbc\x53\xea\x5e\xf5\x8c\x00"
+ "\x97\x2e\xc5\x39\xd0\x67\xfe\x72"
+ "\x09\xa0\x14\xab\x42\xd9\x4d\xe4"
+ "\x7b\x12\x86\x1d\xb4\x28\xbf\x56"
+ "\xed\x61\xf8\x8f\x03\x9a\x31\xc8"
+ "\x3c\xd3\x6a\x01\x75\x0c\xa3\x17"
+ "\xae\x45\xdc\x50\xe7\x7e\x15\x89"
+ "\x20\xb7\x2b\xc2\x59\xf0\x64\xfb"
+ "\x92\x06\x9d\x34\xcb\x3f\xd6\x6d"
+ "\x04\x78\x0f\xa6\x1a\xb1\x48\xdf"
+ "\x53\xea\x81\x18\x8c\x23\xba\x2e"
+ "\xc5\x5c\xf3\x67\xfe\x95\x09\xa0"
+ "\x37\xce\x42\xd9\x70\x07\x7b\x12"
+ "\xa9\x1d\xb4\x4b\xe2\x56\xed\x84"
+ "\x1b\x8f\x26\xbd\x31\xc8\x5f\xf6"
+ "\x6a\x01\x98\x0c\xa3\x3a\xd1\x45"
+ "\xdc\x73\x0a\x7e\x15\xac\x20\xb7"
+ "\x4e\xe5\x59\xf0\x87\x1e\x92\x29"
+ "\xc0\x34\xcb\x62\xf9\x6d\x04\x9b"
+ "\x0f\xa6\x3d\xd4\x48\xdf\x76\x0d"
+ "\x81\x18\xaf\x23\xba\x51\xe8\x5c"
+ "\xf3\x8a\x21\x95\x2c\xc3\x37\xce"
+ "\x65\xfc\x70\x07\x9e\x12\xa9\x40"
+ "\xd7\x4b\xe2\x79\x10\x84\x1b\xb2"
+ "\x26\xbd\x54\xeb\x5f\xf6\x8d\x01"
+ "\x98\x2f\xc6\x3a\xd1\x68\xff\x73"
+ "\x0a\xa1\x15\xac\x43\xda\x4e\xe5"
+ "\x7c\x13\x87\x1e\xb5\x29\xc0\x57"
+ "\xee\x62\xf9\x90\x04\x9b\x32\xc9"
+ "\x3d\xd4\x6b\x02\x76\x0d\xa4\x18"
+ "\xaf\x46\xdd\x51\xe8\x7f\x16\x8a"
+ "\x21\xb8\x2c\xc3\x5a\xf1\x65\xfc"
+ "\x93\x07\x9e\x35\xcc\x40\xd7\x6e"
+ "\x05\x79\x10\xa7\x1b\xb2\x49\xe0"
+ "\x54\xeb\x82\x19\x8d\x24\xbb\x2f"
+ "\xc6\x5d\xf4\x68\xff\x96\x0a\xa1"
+ "\x38\xcf\x43\xda\x71\x08\x7c\x13"
+ "\xaa\x1e\xb5\x4c\xe3\x57\xee\x85"
+ "\x1c\x90\x27\xbe\x32\xc9\x60\xf7"
+ "\x6b\x02\x99\x0d\xa4\x3b\xd2\x46"
+ "\xdd\x74\x0b\x7f\x16\xad\x21\xb8"
+ "\x4f\xe6\x5a\xf1\x88\x1f\x93\x2a"
+ "\xc1\x35\xcc\x63\xfa\x6e\x05\x9c"
+ "\x10\xa7\x3e\xd5\x49\xe0\x77\x0e"
+ "\x82\x19\xb0\x24\xbb\x52\xe9\x5d"
+ "\xf4\x8b\x22\x96\x2d\xc4\x38\xcf"
+ "\x66\xfd\x71\x08\x9f\x13\xaa\x41"
+ "\xd8\x4c\xe3\x7a\x11\x85\x1c\xb3"
+ "\x27\xbe\x55\xec\x60\xf7\x8e\x02"
+ "\x99\x30\xc7\x3b\xd2\x69\x00\x74"
+ "\x0b\xa2\x16\xad\x44\xdb\x4f\xe6"
+ "\x7d\x14\x88\x1f\xb6\x2a\xc1\x58"
+ "\xef\x63\xfa\x91\x05\x9c\x33\xca"
+ "\x3e\xd5\x6c\x03\x77\x0e\xa5\x19"
+ "\xb0\x47\xde\x52\xe9\x80\x17\x8b"
+ "\x22\xb9\x2d\xc4\x5b\xf2\x66\xfd"
+ "\x94\x08\x9f\x36\xcd\x41\xd8\x6f"
+ "\x06\x7a\x11\xa8\x1c\xb3\x4a\xe1"
+ "\x55\xec\x83\x1a\x8e\x25\xbc\x30"
+ "\xc7\x5e\xf5\x69\x00\x97\x0b\xa2"
+ "\x39\xd0\x44\xdb\x72\x09\x7d\x14"
+ "\xab\x1f\xb6\x4d\xe4\x58\xef\x86"
+ "\x1d\x91\x28\xbf\x33\xca\x61\xf8"
+ "\x6c\x03\x9a\x0e\xa5\x3c\xd3\x47"
+ "\xde\x75\x0c\x80\x17\xae\x22\xb9"
+ "\x50\xe7\x5b\xf2\x89\x20\x94\x2b"
+ "\xc2\x36\xcd\x64\xfb\x6f\x06\x9d"
+ "\x11\xa8\x3f\xd6\x4a\xe1\x78\x0f"
+ "\x83\x1a\xb1\x25\xbc\x53\xea\x5e"
+ "\xf5\x8c\x00\x97\x2e\xc5\x39\xd0"
+ "\x67\xfe\x72\x09\xa0\x14\xab\x42"
+ "\xd9\x4d\xe4\x7b\x12\x86\x1d\xb4"
+ "\x28\xbf\x56\xed\x61\xf8\x8f\x03"
+ "\x9a\x31\xc8\x3c\xd3\x6a\x01\x75"
+ "\x0c\xa3\x17\xae\x45\xdc\x50\xe7"
+ "\x7e\x15\x89\x20\xb7\x2b\xc2\x59"
+ "\xf0\x64\xfb\x92\x06\x9d\x34\xcb"
+ "\x3f\xd6\x6d\x04\x78\x0f\xa6\x1a"
+ "\xb1\x48\xdf\x53\xea\x81\x18\x8c"
+ "\x23\xba\x2e\xc5\x5c\xf3\x67\xfe"
+ "\x95\x09\xa0\x37\xce\x42\xd9\x70"
+ "\x07\x7b\x12\xa9\x1d\xb4\x4b\xe2"
+ "\x56\xed\x84\x1b\x8f\x26\xbd\x31"
+ "\xc8\x5f\xf6\x6a\x01\x98\x0c\xa3"
+ "\x3a\xd1\x45\xdc\x73\x0a\x7e\x15"
+ "\xac\x20\xb7\x4e\xe5\x59\xf0\x87"
+ "\x1e\x92\x29\xc0\x34\xcb\x62\xf9"
+ "\x6d\x04\x9b\x0f\xa6\x3d\xd4\x48"
+ "\xdf\x76\x0d\x81\x18\xaf\x23\xba"
+ "\x51\xe8\x5c\xf3\x8a\x21\x95\x2c"
+ "\xc3\x37\xce\x65\xfc\x70\x07\x9e"
+ "\x12\xa9\x40\xd7\x4b\xe2\x79\x10"
+ "\x84\x1b\xb2\x26\xbd\x54\xeb\x5f"
+ "\xf6\x8d\x01\x98\x2f\xc6\x3a\xd1"
+ "\x68\xff\x73\x0a\xa1\x15\xac\x43"
+ "\xda\x4e\xe5\x7c\x13\x87\x1e\xb5"
+ "\x29\xc0\x57\xee\x62\xf9\x90\x04"
+ "\x9b\x32\xc9\x3d\xd4\x6b\x02\x76"
+ "\x0d\xa4\x18\xaf\x46\xdd\x51\xe8"
+ "\x7f\x16\x8a\x21\xb8\x2c\xc3\x5a"
+ "\xf1\x65\xfc\x93\x07\x9e\x35\xcc"
+ "\x40\xd7\x6e\x05\x79\x10\xa7\x1b"
+ "\xb2\x49\xe0\x54\xeb\x82\x19\x8d"
+ "\x24\xbb\x2f\xc6\x5d\xf4\x68\xff"
+ "\x96\x0a\xa1\x38\xcf\x43\xda\x71"
+ "\x08\x7c\x13\xaa\x1e\xb5\x4c",
+ .psize = 1023,
+ .digest = "\x59\xda\x30\xe3\x90\xe4\x3d\xde"
+ "\xf0\xc6\x42\x17\xd7\xb2\x26\x47"
+ "\x90\x28\xa6\x84\xe8\x49\x7a\x86"
+ "\xd6\xb8\x9e\xf8\x07\x59\x21\x03"
+ "\xad\xd2\xed\x48\xa3\xb9\xa5\xf0"
+ "\xb3\xae\x02\x2b\xb8\xaf\xc3\x3b"
+ "\xd6\xb0\x8f\xcb\x76\x8b\xa7\x41"
+ "\x32\xc2\x8e\x50\x91\x86\x90\xfb",
},
};
diff --git a/crypto/twofish_common.c b/crypto/twofish_common.c
index 5f62c4f9f6e0..f3a0dd25f871 100644
--- a/crypto/twofish_common.c
+++ b/crypto/twofish_common.c
@@ -24,9 +24,8 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
*
* This code is a "clean room" implementation, written from the paper
* _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey,
diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c
index ebf7a3efb572..07e62433fbfb 100644
--- a/crypto/twofish_generic.c
+++ b/crypto/twofish_generic.c
@@ -23,9 +23,8 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
*
* This code is a "clean room" implementation, written from the paper
* _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey,
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index df90b332554c..25c75af50d3f 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -12,8 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* Author:
* Kazunori Miyazawa <miyazawa@linux-ipv6.org>
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 46505396869e..d650c5b6ec90 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -361,22 +361,6 @@ config ACPI_PCI_SLOT
i.e., segment/bus/device/function tuples, with physical slots in
the system. If you are unsure, say N.
-config X86_PM_TIMER
- bool "Power Management Timer Support" if EXPERT
- depends on X86
- default y
- help
- The Power Management Timer is available on all ACPI-capable,
- in most cases even if ACPI is unusable or blacklisted.
-
- This timing source is not affected by power management features
- like aggressive processor idling, throttling, frequency and/or
- voltage scaling, unlike the commonly used Time Stamp Counter
- (TSC) timing source.
-
- You should nearly always say Y here because many modern
- systems require this timer.
-
config ACPI_CONTAINER
bool "Container and Module Devices"
default (ACPI_HOTPLUG_MEMORY || ACPI_HOTPLUG_CPU)
@@ -564,3 +548,19 @@ config TPS68470_PMIC_OPREGION
using this, are probed.
endif # ACPI
+
+config X86_PM_TIMER
+ bool "Power Management Timer Support" if EXPERT
+ depends on X86 && (ACPI || JAILHOUSE_GUEST)
+ default y
+ help
+ The Power Management Timer is available on all ACPI-capable,
+ in most cases even if ACPI is unusable or blacklisted.
+
+ This timing source is not affected by power management features
+ like aggressive processor idling, throttling, frequency and/or
+ voltage scaling, unlike the commonly used Time Stamp Counter
+ (TSC) timing source.
+
+ You should nearly always say Y here because many modern
+ systems require this timer.
diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c
index 3ec05aa1a903..2ff5c8c04e3b 100644
--- a/drivers/acpi/acpi_dbg.c
+++ b/drivers/acpi/acpi_dbg.c
@@ -718,9 +718,9 @@ again:
return size > 0 ? size : ret;
}
-static unsigned int acpi_aml_poll(struct file *file, poll_table *wait)
+static __poll_t acpi_aml_poll(struct file *file, poll_table *wait)
{
- int masks = 0;
+ __poll_t masks = 0;
poll_wait(file, &acpi_aml_io.wait, wait);
if (acpi_aml_user_readable())
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 7f2b02cc8ea1..2bcffec8dbf0 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -427,6 +427,142 @@ out:
return 0;
}
+struct lpss_device_links {
+ const char *supplier_hid;
+ const char *supplier_uid;
+ const char *consumer_hid;
+ const char *consumer_uid;
+ u32 flags;
+};
+
+/*
+ * The _DEP method is used to identify dependencies but instead of creating
+ * device links for every handle in _DEP, only links in the following list are
+ * created. That is necessary because, in the general case, _DEP can refer to
+ * devices that might not have drivers, or that are on different buses, or where
+ * the supplier is not enumerated until after the consumer is probed.
+ */
+static const struct lpss_device_links lpss_device_links[] = {
+ {"808622C1", "7", "80860F14", "3", DL_FLAG_PM_RUNTIME},
+};
+
+static bool hid_uid_match(const char *hid1, const char *uid1,
+ const char *hid2, const char *uid2)
+{
+ return !strcmp(hid1, hid2) && uid1 && uid2 && !strcmp(uid1, uid2);
+}
+
+static bool acpi_lpss_is_supplier(struct acpi_device *adev,
+ const struct lpss_device_links *link)
+{
+ return hid_uid_match(acpi_device_hid(adev), acpi_device_uid(adev),
+ link->supplier_hid, link->supplier_uid);
+}
+
+static bool acpi_lpss_is_consumer(struct acpi_device *adev,
+ const struct lpss_device_links *link)
+{
+ return hid_uid_match(acpi_device_hid(adev), acpi_device_uid(adev),
+ link->consumer_hid, link->consumer_uid);
+}
+
+struct hid_uid {
+ const char *hid;
+ const char *uid;
+};
+
+static int match_hid_uid(struct device *dev, void *data)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ struct hid_uid *id = data;
+
+ if (!adev)
+ return 0;
+
+ return hid_uid_match(acpi_device_hid(adev), acpi_device_uid(adev),
+ id->hid, id->uid);
+}
+
+static struct device *acpi_lpss_find_device(const char *hid, const char *uid)
+{
+ struct hid_uid data = {
+ .hid = hid,
+ .uid = uid,
+ };
+
+ return bus_find_device(&platform_bus_type, NULL, &data, match_hid_uid);
+}
+
+static bool acpi_lpss_dep(struct acpi_device *adev, acpi_handle handle)
+{
+ struct acpi_handle_list dep_devices;
+ acpi_status status;
+ int i;
+
+ if (!acpi_has_method(adev->handle, "_DEP"))
+ return false;
+
+ status = acpi_evaluate_reference(adev->handle, "_DEP", NULL,
+ &dep_devices);
+ if (ACPI_FAILURE(status)) {
+ dev_dbg(&adev->dev, "Failed to evaluate _DEP.\n");
+ return false;
+ }
+
+ for (i = 0; i < dep_devices.count; i++) {
+ if (dep_devices.handles[i] == handle)
+ return true;
+ }
+
+ return false;
+}
+
+static void acpi_lpss_link_consumer(struct device *dev1,
+ const struct lpss_device_links *link)
+{
+ struct device *dev2;
+
+ dev2 = acpi_lpss_find_device(link->consumer_hid, link->consumer_uid);
+ if (!dev2)
+ return;
+
+ if (acpi_lpss_dep(ACPI_COMPANION(dev2), ACPI_HANDLE(dev1)))
+ device_link_add(dev2, dev1, link->flags);
+
+ put_device(dev2);
+}
+
+static void acpi_lpss_link_supplier(struct device *dev1,
+ const struct lpss_device_links *link)
+{
+ struct device *dev2;
+
+ dev2 = acpi_lpss_find_device(link->supplier_hid, link->supplier_uid);
+ if (!dev2)
+ return;
+
+ if (acpi_lpss_dep(ACPI_COMPANION(dev1), ACPI_HANDLE(dev2)))
+ device_link_add(dev1, dev2, link->flags);
+
+ put_device(dev2);
+}
+
+static void acpi_lpss_create_device_links(struct acpi_device *adev,
+ struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lpss_device_links); i++) {
+ const struct lpss_device_links *link = &lpss_device_links[i];
+
+ if (acpi_lpss_is_supplier(adev, link))
+ acpi_lpss_link_consumer(&pdev->dev, link);
+
+ if (acpi_lpss_is_consumer(adev, link))
+ acpi_lpss_link_supplier(&pdev->dev, link);
+ }
+}
+
static int acpi_lpss_create_device(struct acpi_device *adev,
const struct acpi_device_id *id)
{
@@ -465,6 +601,8 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
acpi_dev_free_resource_list(&resource_list);
if (!pdata->mmio_base) {
+ /* Avoid acpi_bus_attach() instantiating a pdev for this dev. */
+ adev->pnp.type.platform_id = 0;
/* Skip the device, but continue the namespace scan. */
ret = 0;
goto err_out;
@@ -500,6 +638,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
adev->driver_data = pdata;
pdev = acpi_create_platform_device(adev, dev_desc->properties);
if (!IS_ERR_OR_NULL(pdev)) {
+ acpi_lpss_create_device_links(adev, pdev);
return 1;
}
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 0972ec0e2eb8..f53ccc680238 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -80,8 +80,8 @@ MODULE_PARM_DESC(report_key_events,
static bool device_id_scheme = false;
module_param(device_id_scheme, bool, 0444);
-static bool only_lcd = false;
-module_param(only_lcd, bool, 0444);
+static int only_lcd = -1;
+module_param(only_lcd, int, 0444);
static int register_count;
static DEFINE_MUTEX(register_count_mutex);
@@ -2136,6 +2136,16 @@ int acpi_video_register(void)
goto leave;
}
+ /*
+ * We're seeing a lot of bogus backlight interfaces on newer machines
+ * without a LCD such as desktops, servers and HDMI sticks. Checking
+ * the lcd flag fixes this, so enable this on any machines which are
+ * win8 ready (where we also prefer the native backlight driver, so
+ * normally the acpi_video code should not register there anyways).
+ */
+ if (only_lcd == -1)
+ only_lcd = acpi_osi_is_win8();
+
dmi_check_system(video_dmi_table);
ret = acpi_bus_register_driver(&acpi_video_bus);
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index 7a1a68b5ac5c..2243c8164b34 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -80,6 +80,9 @@
prefix, ACPICA_COPYRIGHT, \
prefix
+#define ACPI_COMMON_BUILD_TIME \
+ "Build date/time: %s %s\n", __DATE__, __TIME__
+
/* Macros for usage messages */
#define ACPI_USAGE_HEADER(usage) \
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 71743e5252f5..54b8d9df9423 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -223,6 +223,10 @@ void
acpi_db_execute(char *name, char **args, acpi_object_type *types, u32 flags);
void
+acpi_db_create_execution_thread(char *method_name_arg,
+ char **arguments, acpi_object_type *types);
+
+void
acpi_db_create_execution_threads(char *num_threads_arg,
char *num_loops_arg, char *method_name_arg);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 95eed442703f..45ef3f5dc9ad 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -46,7 +46,7 @@
/*****************************************************************************
*
- * Globals related to the ACPI tables
+ * Globals related to the incoming ACPI tables
*
****************************************************************************/
@@ -87,7 +87,7 @@ ACPI_GLOBAL(u8, acpi_gbl_integer_nybble_width);
/*****************************************************************************
*
- * Mutual exclusion within ACPICA subsystem
+ * Mutual exclusion within the ACPICA subsystem
*
****************************************************************************/
@@ -167,7 +167,7 @@ ACPI_GLOBAL(u8, acpi_gbl_next_owner_id_offset);
ACPI_INIT_GLOBAL(u8, acpi_gbl_namespace_initialized, FALSE);
-/* Misc */
+/* Miscellaneous */
ACPI_GLOBAL(u32, acpi_gbl_original_mode);
ACPI_GLOBAL(u32, acpi_gbl_ns_lookup_count);
@@ -191,10 +191,9 @@ extern const char acpi_gbl_lower_hex_digits[];
extern const char acpi_gbl_upper_hex_digits[];
extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES];
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
-
/* Lists for tracking memory allocations (debug only) */
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
ACPI_GLOBAL(struct acpi_memory_list *, acpi_gbl_global_list);
ACPI_GLOBAL(struct acpi_memory_list *, acpi_gbl_ns_node_list);
ACPI_GLOBAL(u8, acpi_gbl_display_final_mem_stats);
@@ -203,7 +202,7 @@ ACPI_GLOBAL(u8, acpi_gbl_disable_mem_tracking);
/*****************************************************************************
*
- * Namespace globals
+ * ACPI Namespace
*
****************************************************************************/
@@ -234,15 +233,20 @@ ACPI_INIT_GLOBAL(u32, acpi_gbl_nesting_level, 0);
/*****************************************************************************
*
- * Interpreter globals
+ * Interpreter/Parser globals
*
****************************************************************************/
-ACPI_GLOBAL(struct acpi_thread_state *, acpi_gbl_current_walk_list);
-
/* Control method single step flag */
ACPI_GLOBAL(u8, acpi_gbl_cm_single_step);
+ACPI_GLOBAL(struct acpi_thread_state *, acpi_gbl_current_walk_list);
+ACPI_INIT_GLOBAL(union acpi_parse_object, *acpi_gbl_current_scope, NULL);
+
+/* ASL/ASL+ converter */
+
+ACPI_INIT_GLOBAL(u8, acpi_gbl_capture_comments, FALSE);
+ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_last_list_head, NULL);
/*****************************************************************************
*
@@ -252,7 +256,6 @@ ACPI_GLOBAL(u8, acpi_gbl_cm_single_step);
extern struct acpi_bit_register_info
acpi_gbl_bit_register_info[ACPI_NUM_BITREG];
-
ACPI_GLOBAL(u8, acpi_gbl_sleep_type_a);
ACPI_GLOBAL(u8, acpi_gbl_sleep_type_b);
@@ -263,7 +266,6 @@ ACPI_GLOBAL(u8, acpi_gbl_sleep_type_b);
****************************************************************************/
#if (!ACPI_REDUCED_HARDWARE)
-
ACPI_GLOBAL(u8, acpi_gbl_all_gpes_initialized);
ACPI_GLOBAL(struct acpi_gpe_xrupt_info *, acpi_gbl_gpe_xrupt_list_head);
ACPI_GLOBAL(struct acpi_gpe_block_info *,
@@ -272,10 +274,8 @@ ACPI_GLOBAL(acpi_gbl_event_handler, acpi_gbl_global_event_handler);
ACPI_GLOBAL(void *, acpi_gbl_global_event_handler_context);
ACPI_GLOBAL(struct acpi_fixed_event_handler,
acpi_gbl_fixed_event_handlers[ACPI_NUM_FIXED_EVENTS]);
-
extern struct acpi_fixed_event_info
acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS];
-
#endif /* !ACPI_REDUCED_HARDWARE */
/*****************************************************************************
@@ -291,14 +291,14 @@ ACPI_GLOBAL(u32, acpi_gpe_count);
ACPI_GLOBAL(u32, acpi_sci_count);
ACPI_GLOBAL(u32, acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS]);
-/* Support for dynamic control method tracing mechanism */
+/* Dynamic control method tracing mechanism */
ACPI_GLOBAL(u32, acpi_gbl_original_dbg_level);
ACPI_GLOBAL(u32, acpi_gbl_original_dbg_layer);
/*****************************************************************************
*
- * Debugger and Disassembler globals
+ * Debugger and Disassembler
*
****************************************************************************/
@@ -326,7 +326,6 @@ ACPI_GLOBAL(struct acpi_external_file *, acpi_gbl_external_file_list);
#endif
#ifdef ACPI_DEBUGGER
-
ACPI_INIT_GLOBAL(u8, acpi_gbl_abort_method, FALSE);
ACPI_INIT_GLOBAL(acpi_thread_id, acpi_gbl_db_thread_id, ACPI_INVALID_THREAD_ID);
@@ -340,7 +339,6 @@ ACPI_GLOBAL(u32, acpi_gbl_db_console_debug_level);
ACPI_GLOBAL(struct acpi_namespace_node *, acpi_gbl_db_scope_node);
ACPI_GLOBAL(u8, acpi_gbl_db_terminate_loop);
ACPI_GLOBAL(u8, acpi_gbl_db_threads_terminated);
-
ACPI_GLOBAL(char *, acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS]);
ACPI_GLOBAL(acpi_object_type, acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS]);
@@ -350,32 +348,33 @@ ACPI_GLOBAL(char, acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE]);
ACPI_GLOBAL(char, acpi_gbl_db_scope_buf[ACPI_DB_LINE_BUFFER_SIZE]);
ACPI_GLOBAL(char, acpi_gbl_db_debug_filename[ACPI_DB_LINE_BUFFER_SIZE]);
-/*
- * Statistic globals
- */
+/* Statistics globals */
+
ACPI_GLOBAL(u16, acpi_gbl_obj_type_count[ACPI_TOTAL_TYPES]);
ACPI_GLOBAL(u16, acpi_gbl_node_type_count[ACPI_TOTAL_TYPES]);
ACPI_GLOBAL(u16, acpi_gbl_obj_type_count_misc);
ACPI_GLOBAL(u16, acpi_gbl_node_type_count_misc);
ACPI_GLOBAL(u32, acpi_gbl_num_nodes);
ACPI_GLOBAL(u32, acpi_gbl_num_objects);
-
#endif /* ACPI_DEBUGGER */
#if defined (ACPI_DISASSEMBLER) || defined (ACPI_ASL_COMPILER)
-
ACPI_GLOBAL(const char, *acpi_gbl_pld_panel_list[]);
ACPI_GLOBAL(const char, *acpi_gbl_pld_vertical_position_list[]);
ACPI_GLOBAL(const char, *acpi_gbl_pld_horizontal_position_list[]);
ACPI_GLOBAL(const char, *acpi_gbl_pld_shape_list[]);
-
ACPI_INIT_GLOBAL(u8, acpi_gbl_disasm_flag, FALSE);
-
#endif
-/*
- * Meant for the -ca option.
- */
+/*****************************************************************************
+ *
+ * ACPICA application-specific globals
+ *
+ ****************************************************************************/
+
+/* ASL-to-ASL+ conversion utility (implemented within the iASL compiler) */
+
+#ifdef ACPI_ASL_COMPILER
ACPI_INIT_GLOBAL(char *, acpi_gbl_current_inline_comment, NULL);
ACPI_INIT_GLOBAL(char *, acpi_gbl_current_end_node_comment, NULL);
ACPI_INIT_GLOBAL(char *, acpi_gbl_current_open_brace_comment, NULL);
@@ -386,23 +385,18 @@ ACPI_INIT_GLOBAL(char *, acpi_gbl_current_filename, NULL);
ACPI_INIT_GLOBAL(char *, acpi_gbl_current_parent_filename, NULL);
ACPI_INIT_GLOBAL(char *, acpi_gbl_current_include_filename, NULL);
-ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_last_list_head, NULL);
-
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_def_blk_comment_list_head,
NULL);
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_def_blk_comment_list_tail,
NULL);
-
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_reg_comment_list_head,
NULL);
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_reg_comment_list_tail,
NULL);
-
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_inc_comment_list_head,
NULL);
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_inc_comment_list_tail,
NULL);
-
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_end_blk_comment_list_head,
NULL);
ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_end_blk_comment_list_tail,
@@ -410,30 +404,18 @@ ACPI_INIT_GLOBAL(struct acpi_comment_node, *acpi_gbl_end_blk_comment_list_tail,
ACPI_INIT_GLOBAL(struct acpi_comment_addr_node,
*acpi_gbl_comment_addr_list_head, NULL);
-
-ACPI_INIT_GLOBAL(union acpi_parse_object, *acpi_gbl_current_scope, NULL);
-
ACPI_INIT_GLOBAL(struct acpi_file_node, *acpi_gbl_file_tree_root, NULL);
ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_reg_comment_cache);
ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_comment_addr_cache);
ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_file_cache);
-ACPI_INIT_GLOBAL(u8, gbl_capture_comments, FALSE);
-
ACPI_INIT_GLOBAL(u8, acpi_gbl_debug_asl_conversion, FALSE);
ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_conv_debug_file, NULL);
-
ACPI_GLOBAL(char, acpi_gbl_table_sig[4]);
-
-/*****************************************************************************
- *
- * Application globals
- *
- ****************************************************************************/
+#endif
#ifdef ACPI_APPLICATION
-
ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_debug_file, NULL);
ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_output_file, NULL);
ACPI_INIT_GLOBAL(u8, acpi_gbl_debug_timeout, FALSE);
@@ -442,16 +424,6 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_debug_timeout, FALSE);
ACPI_GLOBAL(acpi_spinlock, acpi_gbl_print_lock); /* For print buffer */
ACPI_GLOBAL(char, acpi_gbl_print_buffer[1024]);
-
#endif /* ACPI_APPLICATION */
-/*****************************************************************************
- *
- * Info/help support
- *
- ****************************************************************************/
-
-extern const struct ah_predefined_name asl_predefined_info[];
-extern const struct ah_device_id asl_device_ids[];
-
#endif /* __ACGLOBAL_H__ */
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 0d45b8bb1678..a56675f0661e 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -622,7 +622,7 @@ struct acpi_control_state {
union acpi_parse_object *predicate_op;
u8 *aml_predicate_start; /* Start of if/while predicate */
u8 *package_end; /* End of if/while block */
- u32 loop_count; /* While() loop counter */
+ u64 loop_timeout; /* While() loop timeout */
};
/*
@@ -1218,16 +1218,17 @@ struct acpi_db_method_info {
acpi_object_type *types;
/*
- * Arguments to be passed to method for the command
- * Threads -
- * the Number of threads, ID of current thread and
- * Index of current thread inside all them created.
+ * Arguments to be passed to method for the commands Threads and
+ * Background. Note, ACPI specifies a maximum of 7 arguments (0 - 6).
+ *
+ * For the Threads command, the Number of threads, ID of current
+ * thread and Index of current thread inside all them created.
*/
char init_args;
#ifdef ACPI_DEBUGGER
- acpi_object_type arg_types[4];
+ acpi_object_type arg_types[ACPI_METHOD_NUM_ARGS];
#endif
- char *arguments[4];
+ char *arguments[ACPI_METHOD_NUM_ARGS];
char num_threads_str[11];
char id_of_thread_str[11];
char index_of_thread_str[11];
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index c7f0c96cc00f..128a3d71b598 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -455,7 +455,7 @@
* the plist contains a set of parens to allow variable-length lists.
* These macros are used for both the debug and non-debug versions of the code.
*/
-#define ACPI_ERROR_NAMESPACE(s, e) acpi_ut_namespace_error (AE_INFO, s, e);
+#define ACPI_ERROR_NAMESPACE(s, p, e) acpi_ut_prefixed_namespace_error (AE_INFO, s, p, e);
#define ACPI_ERROR_METHOD(s, n, p, e) acpi_ut_method_error (AE_INFO, s, n, p, e);
#define ACPI_WARN_PREDEFINED(plist) acpi_ut_predefined_warning plist
#define ACPI_INFO_PREDEFINED(plist) acpi_ut_predefined_info plist
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 54a0c51b3e37..2fb1bb78d85c 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -289,6 +289,9 @@ acpi_ns_build_normalized_path(struct acpi_namespace_node *node,
char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
u8 no_trailing);
+char *acpi_ns_build_prefixed_pathname(union acpi_generic_state *prefix_scope,
+ const char *internal_path);
+
char *acpi_ns_name_of_current_scope(struct acpi_walk_state *walk_state);
acpi_status
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 83b75e9db7ef..b6b29d717824 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -118,9 +118,6 @@ extern const char *acpi_gbl_ptyp_decode[];
#ifndef ACPI_MSG_ERROR
#define ACPI_MSG_ERROR "ACPI Error: "
#endif
-#ifndef ACPI_MSG_EXCEPTION
-#define ACPI_MSG_EXCEPTION "ACPI Exception: "
-#endif
#ifndef ACPI_MSG_WARNING
#define ACPI_MSG_WARNING "ACPI Warning: "
#endif
@@ -129,10 +126,10 @@ extern const char *acpi_gbl_ptyp_decode[];
#endif
#ifndef ACPI_MSG_BIOS_ERROR
-#define ACPI_MSG_BIOS_ERROR "ACPI BIOS Error (bug): "
+#define ACPI_MSG_BIOS_ERROR "Firmware Error (ACPI): "
#endif
#ifndef ACPI_MSG_BIOS_WARNING
-#define ACPI_MSG_BIOS_WARNING "ACPI BIOS Warning (bug): "
+#define ACPI_MSG_BIOS_WARNING "Firmware Warning (ACPI): "
#endif
/*
@@ -233,10 +230,10 @@ u64 acpi_ut_implicit_strtoul64(char *string);
*/
acpi_status acpi_ut_init_globals(void);
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
-
const char *acpi_ut_get_mutex_name(u32 mutex_id);
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+
const char *acpi_ut_get_notify_name(u32 notify_value, acpi_object_type type);
#endif
@@ -641,9 +638,11 @@ void ut_convert_backslashes(char *pathname);
void acpi_ut_repair_name(char *name);
-#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
+#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION) || defined (ACPI_DEBUG_OUTPUT)
u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source);
+void acpi_ut_safe_strncpy(char *dest, char *source, acpi_size dest_size);
+
u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source);
u8
@@ -737,9 +736,11 @@ acpi_ut_predefined_bios_error(const char *module_name,
u8 node_flags, const char *format, ...);
void
-acpi_ut_namespace_error(const char *module_name,
- u32 line_number,
- const char *internal_name, acpi_status lookup_status);
+acpi_ut_prefixed_namespace_error(const char *module_name,
+ u32 line_number,
+ union acpi_generic_state *prefix_scope,
+ const char *internal_name,
+ acpi_status lookup_status);
void
acpi_ut_method_error(const char *module_name,
diff --git a/drivers/acpi/acpica/dbexec.c b/drivers/acpi/acpica/dbexec.c
index 3b30319752f0..ed088fceb18d 100644
--- a/drivers/acpi/acpica/dbexec.c
+++ b/drivers/acpi/acpica/dbexec.c
@@ -67,6 +67,8 @@ static acpi_status
acpi_db_execution_walk(acpi_handle obj_handle,
u32 nesting_level, void *context, void **return_value);
+static void ACPI_SYSTEM_XFACE acpi_db_single_execution_thread(void *context);
+
/*******************************************************************************
*
* FUNCTION: acpi_db_delete_objects
@@ -229,7 +231,7 @@ static acpi_status acpi_db_execute_setup(struct acpi_db_method_info *info)
ACPI_FUNCTION_NAME(db_execute_setup);
- /* Catenate the current scope to the supplied name */
+ /* Concatenate the current scope to the supplied name */
info->pathname[0] = 0;
if ((info->name[0] != '\\') && (info->name[0] != '/')) {
@@ -611,6 +613,112 @@ static void ACPI_SYSTEM_XFACE acpi_db_method_thread(void *context)
/*******************************************************************************
*
+ * FUNCTION: acpi_db_single_execution_thread
+ *
+ * PARAMETERS: context - Method info struct
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Create one thread and execute a method
+ *
+ ******************************************************************************/
+
+static void ACPI_SYSTEM_XFACE acpi_db_single_execution_thread(void *context)
+{
+ struct acpi_db_method_info *info = context;
+ acpi_status status;
+ struct acpi_buffer return_obj;
+
+ acpi_os_printf("\n");
+
+ status = acpi_db_execute_method(info, &return_obj);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf("%s During evaluation of %s\n",
+ acpi_format_exception(status), info->pathname);
+ return;
+ }
+
+ /* Display a return object, if any */
+
+ if (return_obj.length) {
+ acpi_os_printf("Evaluation of %s returned object %p, "
+ "external buffer length %X\n",
+ acpi_gbl_db_method_info.pathname,
+ return_obj.pointer, (u32)return_obj.length);
+
+ acpi_db_dump_external_object(return_obj.pointer, 1);
+ }
+
+ acpi_os_printf("\nBackground thread completed\n%c ",
+ ACPI_DEBUGGER_COMMAND_PROMPT);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_db_create_execution_thread
+ *
+ * PARAMETERS: method_name_arg - Control method to execute
+ * arguments - Array of arguments to the method
+ * types - Corresponding array of object types
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Create a single thread to evaluate a namespace object. Handles
+ * arguments passed on command line for control methods.
+ *
+ ******************************************************************************/
+
+void
+acpi_db_create_execution_thread(char *method_name_arg,
+ char **arguments, acpi_object_type *types)
+{
+ acpi_status status;
+ u32 i;
+
+ memset(&acpi_gbl_db_method_info, 0, sizeof(struct acpi_db_method_info));
+ acpi_gbl_db_method_info.name = method_name_arg;
+ acpi_gbl_db_method_info.init_args = 1;
+ acpi_gbl_db_method_info.args = acpi_gbl_db_method_info.arguments;
+ acpi_gbl_db_method_info.types = acpi_gbl_db_method_info.arg_types;
+
+ /* Setup method arguments, up to 7 (0-6) */
+
+ for (i = 0; (i < ACPI_METHOD_NUM_ARGS) && *arguments; i++) {
+ acpi_gbl_db_method_info.arguments[i] = *arguments;
+ arguments++;
+
+ acpi_gbl_db_method_info.arg_types[i] = *types;
+ types++;
+ }
+
+ status = acpi_db_execute_setup(&acpi_gbl_db_method_info);
+ if (ACPI_FAILURE(status)) {
+ return;
+ }
+
+ /* Get the NS node, determines existence also */
+
+ status = acpi_get_handle(NULL, acpi_gbl_db_method_info.pathname,
+ &acpi_gbl_db_method_info.method);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf("%s Could not get handle for %s\n",
+ acpi_format_exception(status),
+ acpi_gbl_db_method_info.pathname);
+ return;
+ }
+
+ status = acpi_os_execute(OSL_DEBUGGER_EXEC_THREAD,
+ acpi_db_single_execution_thread,
+ &acpi_gbl_db_method_info);
+ if (ACPI_FAILURE(status)) {
+ return;
+ }
+
+ acpi_os_printf("\nBackground thread started\n");
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_db_create_execution_threads
*
* PARAMETERS: num_threads_arg - Number of threads to create
diff --git a/drivers/acpi/acpica/dbfileio.c b/drivers/acpi/acpica/dbfileio.c
index 4d81ea291d93..cf9607945704 100644
--- a/drivers/acpi/acpica/dbfileio.c
+++ b/drivers/acpi/acpica/dbfileio.c
@@ -99,8 +99,8 @@ void acpi_db_open_debug_file(char *name)
}
acpi_os_printf("Debug output file %s opened\n", name);
- strncpy(acpi_gbl_db_debug_filename, name,
- sizeof(acpi_gbl_db_debug_filename));
+ acpi_ut_safe_strncpy(acpi_gbl_db_debug_filename, name,
+ sizeof(acpi_gbl_db_debug_filename));
acpi_gbl_db_output_to_file = TRUE;
}
#endif
diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c
index 2626d79db064..954ca3b981a7 100644
--- a/drivers/acpi/acpica/dbinput.c
+++ b/drivers/acpi/acpica/dbinput.c
@@ -136,6 +136,7 @@ enum acpi_ex_debugger_commands {
CMD_UNLOAD,
CMD_TERMINATE,
+ CMD_BACKGROUND,
CMD_THREADS,
CMD_TEST,
@@ -212,6 +213,7 @@ static const struct acpi_db_command_info acpi_gbl_db_commands[] = {
{"UNLOAD", 1},
{"TERMINATE", 0},
+ {"BACKGROUND", 1},
{"THREADS", 3},
{"TEST", 1},
@@ -222,9 +224,56 @@ static const struct acpi_db_command_info acpi_gbl_db_commands[] = {
/*
* Help for all debugger commands. First argument is the number of lines
* of help to output for the command.
+ *
+ * Note: Some commands are not supported by the kernel-level version of
+ * the debugger.
*/
static const struct acpi_db_command_help acpi_gbl_db_command_help[] = {
- {0, "\nGeneral-Purpose Commands:", "\n"},
+ {0, "\nNamespace Access:", "\n"},
+ {1, " Businfo", "Display system bus info\n"},
+ {1, " Disassemble <Method>", "Disassemble a control method\n"},
+ {1, " Find <AcpiName> (? is wildcard)",
+ "Find ACPI name(s) with wildcards\n"},
+ {1, " Integrity", "Validate namespace integrity\n"},
+ {1, " Methods", "Display list of loaded control methods\n"},
+ {1, " Namespace [Object] [Depth]",
+ "Display loaded namespace tree/subtree\n"},
+ {1, " Notify <Object> <Value>", "Send a notification on Object\n"},
+ {1, " Objects [ObjectType]",
+ "Display summary of all objects or just given type\n"},
+ {1, " Owner <OwnerId> [Depth]",
+ "Display loaded namespace by object owner\n"},
+ {1, " Paths", "Display full pathnames of namespace objects\n"},
+ {1, " Predefined", "Check all predefined names\n"},
+ {1, " Prefix [<Namepath>]", "Set or Get current execution prefix\n"},
+ {1, " References <Addr>", "Find all references to object at addr\n"},
+ {1, " Resources [DeviceName]",
+ "Display Device resources (no arg = all devices)\n"},
+ {1, " Set N <NamedObject> <Value>", "Set value for named integer\n"},
+ {1, " Template <Object>", "Format/dump a Buffer/ResourceTemplate\n"},
+ {1, " Type <Object>", "Display object type\n"},
+
+ {0, "\nControl Method Execution:", "\n"},
+ {1, " Evaluate <Namepath> [Arguments]",
+ "Evaluate object or control method\n"},
+ {1, " Execute <Namepath> [Arguments]", "Synonym for Evaluate\n"},
+#ifdef ACPI_APPLICATION
+ {1, " Background <Namepath> [Arguments]",
+ "Evaluate object/method in a separate thread\n"},
+ {1, " Thread <Threads><Loops><NamePath>",
+ "Spawn threads to execute method(s)\n"},
+#endif
+ {1, " Debug <Namepath> [Arguments]", "Single-Step a control method\n"},
+ {7, " [Arguments] formats:", "Control method argument formats\n"},
+ {1, " Hex Integer", "Integer\n"},
+ {1, " \"Ascii String\"", "String\n"},
+ {1, " (Hex Byte List)", "Buffer\n"},
+ {1, " (01 42 7A BF)", "Buffer example (4 bytes)\n"},
+ {1, " [Package Element List]", "Package\n"},
+ {1, " [0x01 0x1234 \"string\"]",
+ "Package example (3 elements)\n"},
+
+ {0, "\nMiscellaneous:", "\n"},
{1, " Allocations", "Display list of current memory allocations\n"},
{2, " Dump <Address>|<Namepath>", "\n"},
{0, " [Byte|Word|Dword|Qword]",
@@ -248,46 +297,30 @@ static const struct acpi_db_command_help acpi_gbl_db_command_help[] = {
{1, " Stack", "Display CPU stack usage\n"},
{1, " Tables", "Info about current ACPI table(s)\n"},
{1, " Tables", "Display info about loaded ACPI tables\n"},
+#ifdef ACPI_APPLICATION
+ {1, " Terminate", "Delete namespace and all internal objects\n"},
+#endif
{1, " ! <CommandNumber>", "Execute command from history buffer\n"},
{1, " !!", "Execute last command again\n"},
- {0, "\nNamespace Access Commands:", "\n"},
- {1, " Businfo", "Display system bus info\n"},
- {1, " Disassemble <Method>", "Disassemble a control method\n"},
- {1, " Find <AcpiName> (? is wildcard)",
- "Find ACPI name(s) with wildcards\n"},
- {1, " Integrity", "Validate namespace integrity\n"},
- {1, " Methods", "Display list of loaded control methods\n"},
- {1, " Namespace [Object] [Depth]",
- "Display loaded namespace tree/subtree\n"},
- {1, " Notify <Object> <Value>", "Send a notification on Object\n"},
- {1, " Objects [ObjectType]",
- "Display summary of all objects or just given type\n"},
- {1, " Owner <OwnerId> [Depth]",
- "Display loaded namespace by object owner\n"},
- {1, " Paths", "Display full pathnames of namespace objects\n"},
- {1, " Predefined", "Check all predefined names\n"},
- {1, " Prefix [<Namepath>]", "Set or Get current execution prefix\n"},
- {1, " References <Addr>", "Find all references to object at addr\n"},
- {1, " Resources [DeviceName]",
- "Display Device resources (no arg = all devices)\n"},
- {1, " Set N <NamedObject> <Value>", "Set value for named integer\n"},
- {1, " Template <Object>", "Format/dump a Buffer/ResourceTemplate\n"},
- {1, " Type <Object>", "Display object type\n"},
+ {0, "\nMethod and Namespace Debugging:", "\n"},
+ {5, " Trace <State> [<Namepath>] [Once]",
+ "Trace control method execution\n"},
+ {1, " Enable", "Enable all messages\n"},
+ {1, " Disable", "Disable tracing\n"},
+ {1, " Method", "Enable method execution messages\n"},
+ {1, " Opcode", "Enable opcode execution messages\n"},
+ {3, " Test <TestName>", "Invoke a debug test\n"},
+ {1, " Objects", "Read/write/compare all namespace data objects\n"},
+ {1, " Predefined",
+ "Validate all ACPI predefined names (_STA, etc.)\n"},
+ {1, " Execute predefined",
+ "Execute all predefined (public) methods\n"},
- {0, "\nControl Method Execution Commands:", "\n"},
+ {0, "\nControl Method Single-Step Execution:", "\n"},
{1, " Arguments (or Args)", "Display method arguments\n"},
{1, " Breakpoint <AmlOffset>", "Set an AML execution breakpoint\n"},
{1, " Call", "Run to next control method invocation\n"},
- {1, " Debug <Namepath> [Arguments]", "Single Step a control method\n"},
- {6, " Evaluate", "Synonym for Execute\n"},
- {5, " Execute <Namepath> [Arguments]", "Execute control method\n"},
- {1, " Hex Integer", "Integer method argument\n"},
- {1, " \"Ascii String\"", "String method argument\n"},
- {1, " (Hex Byte List)", "Buffer method argument\n"},
- {1, " [Package Element List]", "Package method argument\n"},
- {5, " Execute predefined",
- "Execute all predefined (public) methods\n"},
{1, " Go", "Allow method to run to completion\n"},
{1, " Information", "Display info about the current method\n"},
{1, " Into", "Step into (not over) a method call\n"},
@@ -296,41 +329,24 @@ static const struct acpi_db_command_help acpi_gbl_db_command_help[] = {
{1, " Results", "Display method result stack\n"},
{1, " Set <A|L> <#> <Value>", "Set method data (Arguments/Locals)\n"},
{1, " Stop", "Terminate control method\n"},
- {5, " Trace <State> [<Namepath>] [Once]",
- "Trace control method execution\n"},
- {1, " Enable", "Enable all messages\n"},
- {1, " Disable", "Disable tracing\n"},
- {1, " Method", "Enable method execution messages\n"},
- {1, " Opcode", "Enable opcode execution messages\n"},
{1, " Tree", "Display control method calling tree\n"},
{1, " <Enter>", "Single step next AML opcode (over calls)\n"},
#ifdef ACPI_APPLICATION
- {0, "\nHardware Simulation Commands:", "\n"},
- {1, " EnableAcpi", "Enable ACPI (hardware) mode\n"},
- {1, " Event <F|G> <Value>", "Generate AcpiEvent (Fixed/GPE)\n"},
- {1, " Gpe <GpeNum> [GpeBlockDevice]", "Simulate a GPE\n"},
- {1, " Gpes", "Display info on all GPE devices\n"},
- {1, " Sci", "Generate an SCI\n"},
- {1, " Sleep [SleepState]", "Simulate sleep/wake sequence(s) (0-5)\n"},
-
- {0, "\nFile I/O Commands:", "\n"},
+ {0, "\nFile Operations:", "\n"},
{1, " Close", "Close debug output file\n"},
{1, " Load <Input Filename>", "Load ACPI table from a file\n"},
{1, " Open <Output Filename>", "Open a file for debug output\n"},
{1, " Unload <Namepath>",
"Unload an ACPI table via namespace object\n"},
- {0, "\nUser Space Commands:", "\n"},
- {1, " Terminate", "Delete namespace and all internal objects\n"},
- {1, " Thread <Threads><Loops><NamePath>",
- "Spawn threads to execute method(s)\n"},
-
- {0, "\nDebug Test Commands:", "\n"},
- {3, " Test <TestName>", "Invoke a debug test\n"},
- {1, " Objects", "Read/write/compare all namespace data objects\n"},
- {1, " Predefined",
- "Execute all ACPI predefined names (_STA, etc.)\n"},
+ {0, "\nHardware Simulation:", "\n"},
+ {1, " EnableAcpi", "Enable ACPI (hardware) mode\n"},
+ {1, " Event <F|G> <Value>", "Generate AcpiEvent (Fixed/GPE)\n"},
+ {1, " Gpe <GpeNum> [GpeBlockDevice]", "Simulate a GPE\n"},
+ {1, " Gpes", "Display info on all GPE devices\n"},
+ {1, " Sci", "Generate an SCI\n"},
+ {1, " Sleep [SleepState]", "Simulate sleep/wake sequence(s) (0-5)\n"},
#endif
{0, NULL, NULL}
};
@@ -442,11 +458,15 @@ static void acpi_db_display_help(char *command)
/* No argument to help, display help for all commands */
+ acpi_os_printf("\nSummary of AML Debugger Commands\n\n");
+
while (next->invocation) {
acpi_os_printf("%-38s%s", next->invocation,
next->description);
next++;
}
+ acpi_os_printf("\n");
+
} else {
/* Display help for all commands that match the subtring */
@@ -1087,6 +1107,13 @@ acpi_db_command_dispatch(char *input_buffer,
/* acpi_initialize (NULL); */
break;
+ case CMD_BACKGROUND:
+
+ acpi_db_create_execution_thread(acpi_gbl_db_args[1],
+ &acpi_gbl_db_args[2],
+ &acpi_gbl_db_arg_types[2]);
+ break;
+
case CMD_THREADS:
acpi_db_create_execution_threads(acpi_gbl_db_args[1],
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index f470e81b0499..4b6ebc2a2851 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -118,6 +118,8 @@ acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
control_state->control.package_end =
walk_state->parser_state.pkg_end;
control_state->control.opcode = op->common.aml_opcode;
+ control_state->control.loop_timeout = acpi_os_get_timer() +
+ (u64)(acpi_gbl_max_loop_iterations * ACPI_100NSEC_PER_SEC);
/* Push the control state on this walk's control stack */
@@ -206,15 +208,15 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state *walk_state,
/* Predicate was true, the body of the loop was just executed */
/*
- * This loop counter mechanism allows the interpreter to escape
- * possibly infinite loops. This can occur in poorly written AML
- * when the hardware does not respond within a while loop and the
- * loop does not implement a timeout.
+ * This infinite loop detection mechanism allows the interpreter
+ * to escape possibly infinite loops. This can occur in poorly
+ * written AML when the hardware does not respond within a while
+ * loop and the loop does not implement a timeout.
*/
- control_state->control.loop_count++;
- if (control_state->control.loop_count >
- acpi_gbl_max_loop_iterations) {
- status = AE_AML_INFINITE_LOOP;
+ if (ACPI_TIME_AFTER(acpi_os_get_timer(),
+ control_state->control.
+ loop_timeout)) {
+ status = AE_AML_LOOP_TIMEOUT;
break;
}
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 7bcf5f5ea029..0cab34a593d5 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -209,7 +209,8 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
ACPI_IMODE_LOAD_PASS1, flags,
walk_state, &node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.string, status);
return_ACPI_STATUS(status);
}
}
@@ -383,7 +384,9 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
walk_state,
&info->connection_node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(child->common.
+ ACPI_ERROR_NAMESPACE(walk_state->
+ scope_info,
+ child->common.
value.name,
status);
return_ACPI_STATUS(status);
@@ -402,7 +405,8 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
ACPI_NS_DONT_OPEN_SCOPE,
walk_state, &info->field_node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE((char *)&arg->named.name,
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ (char *)&arg->named.name,
status);
return_ACPI_STATUS(status);
} else {
@@ -498,7 +502,8 @@ acpi_ds_create_field(union acpi_parse_object *op,
&region_node);
#endif
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(arg->common.value.name, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.name, status);
return_ACPI_STATUS(status);
}
}
@@ -618,7 +623,8 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
ACPI_IMODE_LOAD_PASS1, flags,
walk_state, &node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE((char *)&arg->named.name,
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ (char *)&arg->named.name,
status);
if (status != AE_ALREADY_EXISTS) {
return_ACPI_STATUS(status);
@@ -681,7 +687,8 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
&region_node);
#endif
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(arg->common.value.name, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.name, status);
return_ACPI_STATUS(status);
}
}
@@ -695,7 +702,8 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
ACPI_NS_SEARCH_PARENT, walk_state,
&info.register_node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.string, status);
return_ACPI_STATUS(status);
}
@@ -765,7 +773,8 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
ACPI_NS_SEARCH_PARENT, walk_state,
&info.register_node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.string, status);
return_ACPI_STATUS(status);
}
@@ -778,7 +787,8 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
ACPI_NS_SEARCH_PARENT, walk_state,
&info.data_register_node);
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.string, status);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 82448551781b..b21fe084ffc8 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -112,7 +112,9 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
acpi_namespace_node,
&(op->common.node)));
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(op->common.value.
+ ACPI_ERROR_NAMESPACE(walk_state->
+ scope_info,
+ op->common.value.
string, status);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c
index 6d487edfe2de..5a602b75084e 100644
--- a/drivers/acpi/acpica/dspkginit.c
+++ b/drivers/acpi/acpica/dspkginit.c
@@ -297,8 +297,10 @@ acpi_ds_init_package_element(u8 object_type,
{
union acpi_operand_object **element_ptr;
+ ACPI_FUNCTION_TRACE(ds_init_package_element);
+
if (!source_object) {
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
/*
@@ -329,7 +331,7 @@ acpi_ds_init_package_element(u8 object_type,
source_object->package.flags |= AOPOBJ_DATA_VALID;
}
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
@@ -352,6 +354,7 @@ acpi_ds_resolve_package_element(union acpi_operand_object **element_ptr)
union acpi_generic_state scope_info;
union acpi_operand_object *element = *element_ptr;
struct acpi_namespace_node *resolved_node;
+ struct acpi_namespace_node *original_node;
char *external_path = NULL;
acpi_object_type type;
@@ -441,6 +444,7 @@ acpi_ds_resolve_package_element(union acpi_operand_object **element_ptr)
* will remain as named references. This behavior is not described
* in the ACPI spec, but it appears to be an oversight.
*/
+ original_node = resolved_node;
status = acpi_ex_resolve_node_to_value(&resolved_node, NULL);
if (ACPI_FAILURE(status)) {
return_VOID;
@@ -468,26 +472,27 @@ acpi_ds_resolve_package_element(union acpi_operand_object **element_ptr)
*/
case ACPI_TYPE_DEVICE:
case ACPI_TYPE_THERMAL:
-
- /* TBD: This may not be necesssary */
-
- acpi_ut_add_reference(resolved_node->object);
+ case ACPI_TYPE_METHOD:
break;
case ACPI_TYPE_MUTEX:
- case ACPI_TYPE_METHOD:
case ACPI_TYPE_POWER:
case ACPI_TYPE_PROCESSOR:
case ACPI_TYPE_EVENT:
case ACPI_TYPE_REGION:
+ /* acpi_ex_resolve_node_to_value gave these an extra reference */
+
+ acpi_ut_remove_reference(original_node->object);
break;
default:
/*
* For all other types - the node was resolved to an actual
- * operand object with a value, return the object
+ * operand object with a value, return the object. Remove
+ * a reference on the existing object.
*/
+ acpi_ut_remove_reference(element);
*element_ptr = (union acpi_operand_object *)resolved_node;
break;
}
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 0dabd9b95684..4c5faf629a83 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -583,7 +583,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
}
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(name_string, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ name_string, status);
}
}
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index eaa859a89702..5771e4e4a99a 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -207,7 +207,8 @@ acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state,
}
#endif
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(path, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info, path,
+ status);
return_ACPI_STATUS(status);
}
@@ -375,7 +376,8 @@ acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state,
}
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(path, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ path, status);
return_ACPI_STATUS(status);
}
}
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index aad83ef5a4ec..b3d0aaec8203 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -184,11 +184,14 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
if (status == AE_NOT_FOUND) {
status = AE_OK;
} else {
- ACPI_ERROR_NAMESPACE(buffer_ptr,
+ ACPI_ERROR_NAMESPACE(walk_state->
+ scope_info,
+ buffer_ptr,
status);
}
#else
- ACPI_ERROR_NAMESPACE(buffer_ptr, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ buffer_ptr, status);
#endif
return_ACPI_STATUS(status);
}
@@ -343,7 +346,8 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
}
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(buffer_ptr, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ buffer_ptr, status);
return_ACPI_STATUS(status);
}
@@ -719,7 +723,8 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
*/
op->common.node = new_node;
} else {
- ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info,
+ arg->common.value.string, status);
}
break;
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 28b447ff92df..bb58419f0d61 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -298,6 +298,16 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
ACPI_EXCEPTION((AE_INFO, status, "Returned by Handler for [%s]",
acpi_ut_get_region_name(region_obj->region.
space_id)));
+
+ /*
+ * Special case for an EC timeout. These are seen so frequently
+ * that an additional error message is helpful
+ */
+ if ((region_obj->region.space_id == ACPI_ADR_SPACE_EC) &&
+ (status == AE_TIME)) {
+ ACPI_ERROR((AE_INFO,
+ "Timeout from EC hardware or EC device driver"));
+ }
}
if (!(handler_desc->address_space.handler_flags &
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 83398dc4b7c2..b2ff61bdb9a8 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -617,10 +617,11 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
u32 length;
u32 index;
- ACPI_FUNCTION_NAME(ex_dump_operand)
+ ACPI_FUNCTION_NAME(ex_dump_operand);
- /* Check if debug output enabled */
- if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_EXEC, _COMPONENT)) {
+ /* Check if debug output enabled */
+
+ if (!ACPI_IS_DEBUG_ENABLED(ACPI_LV_EXEC, _COMPONENT)) {
return;
}
@@ -904,7 +905,7 @@ void
acpi_ex_dump_operands(union acpi_operand_object **operands,
const char *opcode_name, u32 num_operands)
{
- ACPI_FUNCTION_NAME(ex_dump_operands);
+ ACPI_FUNCTION_TRACE(ex_dump_operands);
if (!opcode_name) {
opcode_name = "UNKNOWN";
@@ -928,7 +929,7 @@ acpi_ex_dump_operands(union acpi_operand_object **operands,
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"**** End operand dump for [%s]\n", opcode_name));
- return;
+ return_VOID;
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index a2f4e25d45b1..5b4282902a83 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -150,10 +150,10 @@ ACPI_EXPORT_SYMBOL(acpi_get_timer)
*
******************************************************************************/
acpi_status
-acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
+acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 *time_elapsed)
{
acpi_status status;
- u32 delta_ticks;
+ u64 delta_ticks;
u64 quotient;
ACPI_FUNCTION_TRACE(acpi_get_timer_duration);
@@ -168,30 +168,29 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
return_ACPI_STATUS(AE_SUPPORT);
}
+ if (start_ticks == end_ticks) {
+ *time_elapsed = 0;
+ return_ACPI_STATUS(AE_OK);
+ }
+
/*
* Compute Tick Delta:
* Handle (max one) timer rollovers on 24-bit versus 32-bit timers.
*/
- if (start_ticks < end_ticks) {
- delta_ticks = end_ticks - start_ticks;
- } else if (start_ticks > end_ticks) {
+ delta_ticks = end_ticks;
+ if (start_ticks > end_ticks) {
if ((acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) == 0) {
/* 24-bit Timer */
- delta_ticks =
- (((0x00FFFFFF - start_ticks) +
- end_ticks) & 0x00FFFFFF);
+ delta_ticks |= (u64)1 << 24;
} else {
/* 32-bit Timer */
- delta_ticks = (0xFFFFFFFF - start_ticks) + end_ticks;
+ delta_ticks |= (u64)1 << 32;
}
- } else { /* start_ticks == end_ticks */
-
- *time_elapsed = 0;
- return_ACPI_STATUS(AE_OK);
}
+ delta_ticks -= start_ticks;
/*
* Compute Duration (Requires a 64-bit multiply and divide):
@@ -199,10 +198,10 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
* time_elapsed (microseconds) =
* (delta_ticks * ACPI_USEC_PER_SEC) / ACPI_PM_TIMER_FREQUENCY;
*/
- status = acpi_ut_short_divide(((u64)delta_ticks) * ACPI_USEC_PER_SEC,
+ status = acpi_ut_short_divide(delta_ticks * ACPI_USEC_PER_SEC,
ACPI_PM_TIMER_FREQUENCY, &quotient, NULL);
- *time_elapsed = (u32) quotient;
+ *time_elapsed = (u32)quotient;
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index 3094cec4eab4..d1679035d5f3 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -128,14 +128,14 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
acpi_io_address last_address;
const struct acpi_port_info *port_info;
- ACPI_FUNCTION_NAME(hw_validate_io_request);
+ ACPI_FUNCTION_TRACE(hw_validate_io_request);
/* Supported widths are 8/16/32 */
if ((bit_width != 8) && (bit_width != 16) && (bit_width != 32)) {
ACPI_ERROR((AE_INFO,
"Bad BitWidth parameter: %8.8X", bit_width));
- return (AE_BAD_PARAMETER);
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
}
port_info = acpi_protected_ports;
@@ -153,13 +153,13 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
ACPI_ERROR((AE_INFO,
"Illegal I/O port address/length above 64K: %8.8X%8.8X/0x%X",
ACPI_FORMAT_UINT64(address), byte_width));
- return (AE_LIMIT);
+ return_ACPI_STATUS(AE_LIMIT);
}
/* Exit if requested address is not within the protected port table */
if (address > acpi_protected_ports[ACPI_PORT_INFO_ENTRIES - 1].end) {
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
/* Check request against the list of protected I/O ports */
@@ -180,8 +180,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
/* Port illegality may depend on the _OSI calls made by the BIOS */
if (acpi_gbl_osi_data >= port_info->osi_dependency) {
- ACPI_DEBUG_PRINT((ACPI_DB_IO,
- "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)",
+ ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
+ "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)\n",
ACPI_FORMAT_UINT64(address),
byte_width, port_info->name,
port_info->start,
@@ -198,7 +198,7 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
}
}
- return (AE_OK);
+ return_ACPI_STATUS(AE_OK);
}
/******************************************************************************
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index f2733f51ca8d..33e652a12fca 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -644,17 +644,18 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
this_node->object;
}
}
-#ifdef ACPI_ASL_COMPILER
- if (!acpi_gbl_disasm_flag &&
- (this_node->flags & ANOBJ_IS_EXTERNAL)) {
- this_node->flags |= IMPLICIT_EXTERNAL;
- }
-#endif
}
/* Special handling for the last segment (num_segments == 0) */
else {
+#ifdef ACPI_ASL_COMPILER
+ if (!acpi_gbl_disasm_flag
+ && (this_node->flags & ANOBJ_IS_EXTERNAL)) {
+ this_node->flags &= ~IMPLICIT_EXTERNAL;
+ }
+#endif
+
/*
* Sanity typecheck of the target object:
*
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index 539d775bbc92..d55dcc82f434 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -495,7 +495,8 @@ acpi_ns_convert_to_reference(struct acpi_namespace_node *scope,
/* Check if we are resolving a named reference within a package */
- ACPI_ERROR_NAMESPACE(original_object->string.pointer, status);
+ ACPI_ERROR_NAMESPACE(&scope_info,
+ original_object->string.pointer, status);
goto error_exit;
}
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index a410760a0308..22c92d1a24d8 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -49,6 +49,9 @@
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsnames")
+/* Local Prototypes */
+static void acpi_ns_normalize_pathname(char *original_path);
+
/*******************************************************************************
*
* FUNCTION: acpi_ns_get_external_pathname
@@ -63,6 +66,7 @@ ACPI_MODULE_NAME("nsnames")
* for error and debug statements.
*
******************************************************************************/
+
char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
{
char *name_buffer;
@@ -352,3 +356,148 @@ char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
return_PTR(name_buffer);
}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_build_prefixed_pathname
+ *
+ * PARAMETERS: prefix_scope - Scope/Path that prefixes the internal path
+ * internal_path - Name or path of the namespace node
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Construct a fully qualified pathname from a concatenation of:
+ * 1) Path associated with the prefix_scope namespace node
+ * 2) External path representation of the Internal path
+ *
+ ******************************************************************************/
+
+char *acpi_ns_build_prefixed_pathname(union acpi_generic_state *prefix_scope,
+ const char *internal_path)
+{
+ acpi_status status;
+ char *full_path = NULL;
+ char *external_path = NULL;
+ char *prefix_path = NULL;
+ u32 prefix_path_length = 0;
+
+ /* If there is a prefix, get the pathname to it */
+
+ if (prefix_scope && prefix_scope->scope.node) {
+ prefix_path =
+ acpi_ns_get_normalized_pathname(prefix_scope->scope.node,
+ TRUE);
+ if (prefix_path) {
+ prefix_path_length = strlen(prefix_path);
+ }
+ }
+
+ status = acpi_ns_externalize_name(ACPI_UINT32_MAX, internal_path,
+ NULL, &external_path);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ /* Merge the prefix path and the path. 2 is for one dot and trailing null */
+
+ full_path =
+ ACPI_ALLOCATE_ZEROED(prefix_path_length + strlen(external_path) +
+ 2);
+ if (!full_path) {
+ goto cleanup;
+ }
+
+ /* Don't merge if the External path is already fully qualified */
+
+ if (prefix_path && (*external_path != '\\') && (*external_path != '^')) {
+ strcat(full_path, prefix_path);
+ if (prefix_path[1]) {
+ strcat(full_path, ".");
+ }
+ }
+
+ acpi_ns_normalize_pathname(external_path);
+ strcat(full_path, external_path);
+
+cleanup:
+ if (prefix_path) {
+ ACPI_FREE(prefix_path);
+ }
+ if (external_path) {
+ ACPI_FREE(external_path);
+ }
+
+ return (full_path);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ns_normalize_pathname
+ *
+ * PARAMETERS: original_path - Path to be normalized, in External format
+ *
+ * RETURN: The original path is processed in-place
+ *
+ * DESCRIPTION: Remove trailing underscores from each element of a path.
+ *
+ * For example: \A___.B___.C___ becomes \A.B.C
+ *
+ ******************************************************************************/
+
+static void acpi_ns_normalize_pathname(char *original_path)
+{
+ char *input_path = original_path;
+ char *new_path_buffer;
+ char *new_path;
+ u32 i;
+
+ /* Allocate a temp buffer in which to construct the new path */
+
+ new_path_buffer = ACPI_ALLOCATE_ZEROED(strlen(input_path) + 1);
+ new_path = new_path_buffer;
+ if (!new_path_buffer) {
+ return;
+ }
+
+ /* Special characters may appear at the beginning of the path */
+
+ if (*input_path == '\\') {
+ *new_path = *input_path;
+ new_path++;
+ input_path++;
+ }
+
+ while (*input_path == '^') {
+ *new_path = *input_path;
+ new_path++;
+ input_path++;
+ }
+
+ /* Remainder of the path */
+
+ while (*input_path) {
+
+ /* Do one nameseg at a time */
+
+ for (i = 0; (i < ACPI_NAME_SIZE) && *input_path; i++) {
+ if ((i == 0) || (*input_path != '_')) { /* First char is allowed to be underscore */
+ *new_path = *input_path;
+ new_path++;
+ }
+
+ input_path++;
+ }
+
+ /* Dot means that there are more namesegs to come */
+
+ if (*input_path == '.') {
+ *new_path = *input_path;
+ new_path++;
+ input_path++;
+ }
+ }
+
+ *new_path = 0;
+ strcpy(original_path, new_path_buffer);
+ ACPI_FREE(new_path_buffer);
+}
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 5de8957f5ef0..e91dbee9235f 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -417,6 +417,7 @@ acpi_ns_search_and_enter(u32 target_name,
if (flags & ACPI_NS_EXTERNAL ||
(walk_state && walk_state->opcode == AML_SCOPE_OP)) {
new_node->flags |= ANOBJ_IS_EXTERNAL;
+ new_node->flags |= IMPLICIT_EXTERNAL;
}
#endif
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index 783f4c838aee..9b51f65823b2 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -61,10 +61,10 @@ static void acpi_ns_resolve_references(struct acpi_evaluate_info *info);
*
* PARAMETERS: handle - Object handle (optional)
* pathname - Object pathname (optional)
- * external_params - List of parameters to pass to method,
+ * external_params - List of parameters to pass to a method,
* terminated by NULL. May be NULL
* if no parameters are being passed.
- * return_buffer - Where to put method's return value (if
+ * return_buffer - Where to put the object's return value (if
* any). If NULL, no value is returned.
* return_type - Expected type of return object
*
@@ -100,13 +100,14 @@ acpi_evaluate_object_typed(acpi_handle handle,
free_buffer_on_error = TRUE;
}
+ /* Get a handle here, in order to build an error message if needed */
+
+ target_handle = handle;
if (pathname) {
status = acpi_get_handle(handle, pathname, &target_handle);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
- } else {
- target_handle = handle;
}
full_pathname = acpi_ns_get_external_pathname(target_handle);
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index eb9dfaca555f..171e2faa7c50 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -361,7 +361,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
/* Final exception check (may have been changed from code above) */
if (ACPI_FAILURE(status)) {
- ACPI_ERROR_NAMESPACE(path, status);
+ ACPI_ERROR_NAMESPACE(walk_state->scope_info, path, status);
if ((walk_state->parse_flags & ACPI_PARSE_MODE_MASK) ==
ACPI_PARSE_EXECUTE) {
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index 0bef6df71bba..c0b179883ff2 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -372,16 +372,10 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
* external declaration opcode. Setting walk_state->Aml to
* walk_state->parser_state.Aml + 2 moves increments the
* walk_state->Aml past the object type and the paramcount of the
- * external opcode. For the error message, only print the AML
- * offset. We could attempt to print the name but this may cause
- * a segmentation fault when printing the namepath because the
- * AML may be incorrect.
+ * external opcode.
*/
- acpi_os_printf
- ("// Invalid external declaration at AML offset 0x%x.\n",
- walk_state->aml -
- walk_state->parser_state.aml_start);
walk_state->aml = walk_state->parser_state.aml + 2;
+ walk_state->parser_state.aml = walk_state->aml;
return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
}
#endif
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 02642760cb93..cd59dfe6a47d 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -94,9 +94,11 @@ void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode)
op->common.descriptor_type = ACPI_DESC_TYPE_PARSER;
op->common.aml_opcode = opcode;
- ACPI_DISASM_ONLY_MEMBERS(strncpy(op->common.aml_op_name,
- (acpi_ps_get_opcode_info(opcode))->
- name, sizeof(op->common.aml_op_name)));
+ ACPI_DISASM_ONLY_MEMBERS(acpi_ut_safe_strncpy(op->common.aml_op_name,
+ (acpi_ps_get_opcode_info
+ (opcode))->name,
+ sizeof(op->common.
+ aml_op_name)));
}
/*******************************************************************************
@@ -158,10 +160,10 @@ union acpi_parse_object *acpi_ps_alloc_op(u16 opcode, u8 *aml)
if (opcode == AML_SCOPE_OP) {
acpi_gbl_current_scope = op;
}
- }
- if (gbl_capture_comments) {
- ASL_CV_TRANSFER_COMMENTS(op);
+ if (acpi_gbl_capture_comments) {
+ ASL_CV_TRANSFER_COMMENTS(op);
+ }
}
return (op);
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 615a885e2ca3..cff7154b7fee 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -163,6 +163,9 @@ acpi_debug_print(u32 requested_debug_level,
{
acpi_thread_id thread_id;
va_list args;
+#ifdef ACPI_APPLICATION
+ int fill_count;
+#endif
/* Check if debug output enabled */
@@ -202,10 +205,21 @@ acpi_debug_print(u32 requested_debug_level,
acpi_os_printf("[%u] ", (u32)thread_id);
}
- acpi_os_printf("[%02ld] ", acpi_gbl_nesting_level);
-#endif
+ fill_count = 48 - acpi_gbl_nesting_level -
+ strlen(acpi_ut_trim_function_name(function_name));
+ if (fill_count < 0) {
+ fill_count = 0;
+ }
+
+ acpi_os_printf("[%02ld] %*s",
+ acpi_gbl_nesting_level, acpi_gbl_nesting_level + 1, " ");
+ acpi_os_printf("%s%*s: ",
+ acpi_ut_trim_function_name(function_name), fill_count,
+ " ");
+#else
acpi_os_printf("%-22.22s: ", acpi_ut_trim_function_name(function_name));
+#endif
va_start(args, format);
acpi_os_vprintf(format, args);
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 02cd2c2d961a..55debbad487d 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -395,11 +395,6 @@ const char *acpi_ut_get_reference_name(union acpi_operand_object *object)
return (acpi_gbl_ref_class_names[object->reference.class]);
}
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
-/*
- * Strings and procedures used for debug only
- */
-
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_mutex_name
@@ -433,6 +428,12 @@ const char *acpi_ut_get_mutex_name(u32 mutex_id)
return (acpi_gbl_mutex_names[mutex_id]);
}
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+
+/*
+ * Strings and procedures used for debug only
+ */
+
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_notify_name
diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c
index e3368186e1c1..42388dcb5ccc 100644
--- a/drivers/acpi/acpica/uterror.c
+++ b/drivers/acpi/acpica/uterror.c
@@ -182,6 +182,78 @@ acpi_ut_predefined_bios_error(const char *module_name,
/*******************************************************************************
*
+ * FUNCTION: acpi_ut_prefixed_namespace_error
+ *
+ * PARAMETERS: module_name - Caller's module name (for error output)
+ * line_number - Caller's line number (for error output)
+ * prefix_scope - Scope/Path that prefixes the internal path
+ * internal_path - Name or path of the namespace node
+ * lookup_status - Exception code from NS lookup
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print error message with the full pathname constructed this way:
+ *
+ * prefix_scope_node_full_path.externalized_internal_path
+ *
+ * NOTE: 10/2017: Treat the major ns_lookup errors as firmware errors
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_prefixed_namespace_error(const char *module_name,
+ u32 line_number,
+ union acpi_generic_state *prefix_scope,
+ const char *internal_path,
+ acpi_status lookup_status)
+{
+ char *full_path;
+ const char *message;
+
+ /*
+ * Main cases:
+ * 1) Object creation, object must not already exist
+ * 2) Object lookup, object must exist
+ */
+ switch (lookup_status) {
+ case AE_ALREADY_EXISTS:
+
+ acpi_os_printf(ACPI_MSG_BIOS_ERROR);
+ message = "Failure creating";
+ break;
+
+ case AE_NOT_FOUND:
+
+ acpi_os_printf(ACPI_MSG_BIOS_ERROR);
+ message = "Failure looking up";
+ break;
+
+ default:
+
+ acpi_os_printf(ACPI_MSG_ERROR);
+ message = "Failure looking up";
+ break;
+ }
+
+ /* Concatenate the prefix path and the internal path */
+
+ full_path =
+ acpi_ns_build_prefixed_pathname(prefix_scope, internal_path);
+
+ acpi_os_printf("%s [%s], %s", message,
+ full_path ? full_path : "Could not get pathname",
+ acpi_format_exception(lookup_status));
+
+ if (full_path) {
+ ACPI_FREE(full_path);
+ }
+
+ ACPI_MSG_SUFFIX;
+}
+
+#ifdef __OBSOLETE_FUNCTION
+/*******************************************************************************
+ *
* FUNCTION: acpi_ut_namespace_error
*
* PARAMETERS: module_name - Caller's module name (for error output)
@@ -240,6 +312,7 @@ acpi_ut_namespace_error(const char *module_name,
ACPI_MSG_SUFFIX;
ACPI_MSG_REDIRECT_END;
}
+#endif
/*******************************************************************************
*
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 23e766d1691d..45eeb0dcf283 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -206,7 +206,6 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_next_owner_id_offset = 0;
acpi_gbl_debugger_configuration = DEBUGGER_THREADING;
acpi_gbl_osi_mutex = NULL;
- acpi_gbl_max_loop_iterations = ACPI_MAX_LOOP_COUNT;
/* Hardware oriented */
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index 5f9c680076c4..2055a858e5f5 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -134,7 +134,7 @@ acpi_status acpi_ut_short_shift_left(u64 operand, u32 count, u64 *out_result)
if ((count & 63) >= 32) {
operand_ovl.part.hi = operand_ovl.part.lo;
- operand_ovl.part.lo ^= operand_ovl.part.lo;
+ operand_ovl.part.lo = 0;
count = (count & 63) - 32;
}
ACPI_SHIFT_LEFT_64_BY_32(operand_ovl.part.hi,
@@ -171,7 +171,7 @@ acpi_status acpi_ut_short_shift_right(u64 operand, u32 count, u64 *out_result)
if ((count & 63) >= 32) {
operand_ovl.part.lo = operand_ovl.part.hi;
- operand_ovl.part.hi ^= operand_ovl.part.hi;
+ operand_ovl.part.hi = 0;
count = (count & 63) - 32;
}
ACPI_SHIFT_RIGHT_64_BY_32(operand_ovl.part.hi,
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 586354788018..524ba931d5e8 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -286,8 +286,9 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
acpi_gbl_mutex_info[mutex_id].thread_id = this_thread_id;
} else {
ACPI_EXCEPTION((AE_INFO, status,
- "Thread %u could not acquire Mutex [0x%X]",
- (u32)this_thread_id, mutex_id));
+ "Thread %u could not acquire Mutex [%s] (0x%X)",
+ (u32)this_thread_id,
+ acpi_ut_get_mutex_name(mutex_id), mutex_id));
}
return (status);
@@ -322,8 +323,8 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
*/
if (acpi_gbl_mutex_info[mutex_id].thread_id == ACPI_MUTEX_NOT_ACQUIRED) {
ACPI_ERROR((AE_INFO,
- "Mutex [0x%X] is not acquired, cannot release",
- mutex_id));
+ "Mutex [%s] (0x%X) is not acquired, cannot release",
+ acpi_ut_get_mutex_name(mutex_id), mutex_id));
return (AE_NOT_ACQUIRED);
}
diff --git a/drivers/acpi/acpica/utnonansi.c b/drivers/acpi/acpica/utnonansi.c
index 792664982ea3..33a0970646df 100644
--- a/drivers/acpi/acpica/utnonansi.c
+++ b/drivers/acpi/acpica/utnonansi.c
@@ -140,7 +140,7 @@ int acpi_ut_stricmp(char *string1, char *string2)
return (c1 - c2);
}
-#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
+#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION) || defined (ACPI_DEBUG_OUTPUT)
/*******************************************************************************
*
* FUNCTION: acpi_ut_safe_strcpy, acpi_ut_safe_strcat, acpi_ut_safe_strncat
@@ -199,4 +199,13 @@ acpi_ut_safe_strncat(char *dest,
strncat(dest, source, max_transfer_length);
return (FALSE);
}
+
+void acpi_ut_safe_strncpy(char *dest, char *source, acpi_size dest_size)
+{
+ /* Always terminate destination string */
+
+ strncpy(dest, source, dest_size);
+ dest[dest_size - 1] = 0;
+}
+
#endif
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 3175b133c0e4..f6b8dd24b006 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -101,6 +101,8 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
{"Windows 2012", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8 and Server 2012 - Added 08/2012 */
{"Windows 2013", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8.1 and Server 2012 R2 - Added 01/2014 */
{"Windows 2015", NULL, 0, ACPI_OSI_WIN_10}, /* Windows 10 - Added 03/2015 */
+ {"Windows 2016", NULL, 0, ACPI_OSI_WIN_10_RS1}, /* Windows 10 version 1607 - Added 12/2017 */
+ {"Windows 2017", NULL, 0, ACPI_OSI_WIN_10_RS2}, /* Windows 10 version 1703 - Added 12/2017 */
/* Feature Group Strings */
diff --git a/drivers/acpi/acpica/utstrsuppt.c b/drivers/acpi/acpica/utstrsuppt.c
index 965fb5cec94f..97f48d71f9e6 100644
--- a/drivers/acpi/acpica/utstrsuppt.c
+++ b/drivers/acpi/acpica/utstrsuppt.c
@@ -52,10 +52,9 @@ static acpi_status
acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit);
static acpi_status
-acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product);
+acpi_ut_strtoul_multiply64(u64 multiplicand, u32 base, u64 *out_product);
-static acpi_status
-acpi_ut_strtoul_add64(u64 addend1, u64 addend2, u64 *out_sum);
+static acpi_status acpi_ut_strtoul_add64(u64 addend1, u32 digit, u64 *out_sum);
/*******************************************************************************
*
@@ -357,7 +356,7 @@ acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit)
* FUNCTION: acpi_ut_strtoul_multiply64
*
* PARAMETERS: multiplicand - Current accumulated converted integer
- * multiplier - Base/Radix
+ * base - Base/Radix
* out_product - Where the product is returned
*
* RETURN: Status and 64-bit product
@@ -369,33 +368,40 @@ acpi_ut_insert_digit(u64 *accumulated_value, u32 base, int ascii_digit)
******************************************************************************/
static acpi_status
-acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product)
+acpi_ut_strtoul_multiply64(u64 multiplicand, u32 base, u64 *out_product)
{
- u64 val;
+ u64 product;
+ u64 quotient;
/* Exit if either operand is zero */
*out_product = 0;
- if (!multiplicand || !multiplier) {
+ if (!multiplicand || !base) {
return (AE_OK);
}
- /* Check for 64-bit overflow before the actual multiplication */
-
- acpi_ut_short_divide(ACPI_UINT64_MAX, (u32)multiplier, &val, NULL);
- if (multiplicand > val) {
+ /*
+ * Check for 64-bit overflow before the actual multiplication.
+ *
+ * Notes: 64-bit division is often not supported on 32-bit platforms
+ * (it requires a library function), Therefore ACPICA has a local
+ * 64-bit divide function. Also, Multiplier is currently only used
+ * as the radix (8/10/16), to the 64/32 divide will always work.
+ */
+ acpi_ut_short_divide(ACPI_UINT64_MAX, base, &quotient, NULL);
+ if (multiplicand > quotient) {
return (AE_NUMERIC_OVERFLOW);
}
- val = multiplicand * multiplier;
+ product = multiplicand * base;
/* Check for 32-bit overflow if necessary */
- if ((acpi_gbl_integer_bit_width == 32) && (val > ACPI_UINT32_MAX)) {
+ if ((acpi_gbl_integer_bit_width == 32) && (product > ACPI_UINT32_MAX)) {
return (AE_NUMERIC_OVERFLOW);
}
- *out_product = val;
+ *out_product = product;
return (AE_OK);
}
@@ -404,7 +410,7 @@ acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product)
* FUNCTION: acpi_ut_strtoul_add64
*
* PARAMETERS: addend1 - Current accumulated converted integer
- * addend2 - New hex value/char
+ * digit - New hex value/char
* out_sum - Where sum is returned (Accumulator)
*
* RETURN: Status and 64-bit sum
@@ -415,17 +421,17 @@ acpi_ut_strtoul_multiply64(u64 multiplicand, u64 multiplier, u64 *out_product)
*
******************************************************************************/
-static acpi_status acpi_ut_strtoul_add64(u64 addend1, u64 addend2, u64 *out_sum)
+static acpi_status acpi_ut_strtoul_add64(u64 addend1, u32 digit, u64 *out_sum)
{
u64 sum;
/* Check for 64-bit overflow before the actual addition */
- if ((addend1 > 0) && (addend2 > (ACPI_UINT64_MAX - addend1))) {
+ if ((addend1 > 0) && (digit > (ACPI_UINT64_MAX - addend1))) {
return (AE_NUMERIC_OVERFLOW);
}
- sum = addend1 + addend2;
+ sum = addend1 + digit;
/* Check for 32-bit overflow if necessary */
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index 3c8de88ecbd5..633b4e2c669f 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -402,8 +402,8 @@ acpi_ut_track_allocation(struct acpi_debug_mem_block *allocation,
allocation->component = component;
allocation->line = line;
- strncpy(allocation->module, module, ACPI_MAX_MODULE_NAME);
- allocation->module[ACPI_MAX_MODULE_NAME - 1] = 0;
+ acpi_ut_safe_strncpy(allocation->module, (char *)module,
+ ACPI_MAX_MODULE_NAME);
if (!element) {
@@ -717,7 +717,7 @@ exit:
if (!num_outstanding) {
ACPI_INFO(("No outstanding allocations"));
} else {
- ACPI_ERROR((AE_INFO, "%u(0x%X) Outstanding allocations",
+ ACPI_ERROR((AE_INFO, "%u (0x%X) Outstanding cache allocations",
num_outstanding, num_outstanding));
}
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index 950a1e500bfa..9da4f8ef2e77 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -96,8 +96,8 @@ ACPI_EXPORT_SYMBOL(acpi_error)
*
* RETURN: None
*
- * DESCRIPTION: Print "ACPI Exception" message with module/line/version info
- * and decoded acpi_status.
+ * DESCRIPTION: Print an "ACPI Error" message with module/line/version
+ * info as well as decoded acpi_status.
*
******************************************************************************/
void ACPI_INTERNAL_VAR_XFACE
@@ -111,10 +111,10 @@ acpi_exception(const char *module_name,
/* For AE_OK, just print the message */
if (ACPI_SUCCESS(status)) {
- acpi_os_printf(ACPI_MSG_EXCEPTION);
+ acpi_os_printf(ACPI_MSG_ERROR);
} else {
- acpi_os_printf(ACPI_MSG_EXCEPTION "%s, ",
+ acpi_os_printf(ACPI_MSG_ERROR "%s, ",
acpi_format_exception(status));
}
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 6402f7fad3bb..1efefe919555 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -410,7 +410,52 @@ static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int
flags = 0;
if (flags != -1)
- memory_failure_queue(pfn, 0, flags);
+ memory_failure_queue(pfn, flags);
+#endif
+}
+
+/*
+ * PCIe AER errors need to be sent to the AER driver for reporting and
+ * recovery. The GHES severities map to the following AER severities and
+ * require the following handling:
+ *
+ * GHES_SEV_CORRECTABLE -> AER_CORRECTABLE
+ * These need to be reported by the AER driver but no recovery is
+ * necessary.
+ * GHES_SEV_RECOVERABLE -> AER_NONFATAL
+ * GHES_SEV_RECOVERABLE && CPER_SEC_RESET -> AER_FATAL
+ * These both need to be reported and recovered from by the AER driver.
+ * GHES_SEV_PANIC does not make it to this handling since the kernel must
+ * panic.
+ */
+static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
+{
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+ struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata);
+
+ if (pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
+ pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
+ unsigned int devfn;
+ int aer_severity;
+
+ devfn = PCI_DEVFN(pcie_err->device_id.device,
+ pcie_err->device_id.function);
+ aer_severity = cper_severity_to_aer(gdata->error_severity);
+
+ /*
+ * If firmware reset the component to contain
+ * the error, we must reinitialize it before
+ * use, so treat it as a fatal AER error.
+ */
+ if (gdata->flags & CPER_SEC_RESET)
+ aer_severity = AER_FATAL;
+
+ aer_recover_queue(pcie_err->device_id.segment,
+ pcie_err->device_id.bus,
+ devfn, aer_severity,
+ (struct aer_capability_regs *)
+ pcie_err->aer_info);
+ }
#endif
}
@@ -441,38 +486,9 @@ static void ghes_do_proc(struct ghes *ghes,
arch_apei_report_mem_error(sev, mem_err);
ghes_handle_memory_failure(gdata, sev);
}
-#ifdef CONFIG_ACPI_APEI_PCIEAER
else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
- struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata);
-
- if (sev == GHES_SEV_RECOVERABLE &&
- sec_sev == GHES_SEV_RECOVERABLE &&
- pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
- pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
- unsigned int devfn;
- int aer_severity;
-
- devfn = PCI_DEVFN(pcie_err->device_id.device,
- pcie_err->device_id.function);
- aer_severity = cper_severity_to_aer(gdata->error_severity);
-
- /*
- * If firmware reset the component to contain
- * the error, we must reinitialize it before
- * use, so treat it as a fatal AER error.
- */
- if (gdata->flags & CPER_SEC_RESET)
- aer_severity = AER_FATAL;
-
- aer_recover_queue(pcie_err->device_id.segment,
- pcie_err->device_id.bus,
- devfn, aer_severity,
- (struct aer_capability_regs *)
- pcie_err->aer_info);
- }
-
+ ghes_handle_aer(gdata);
}
-#endif
else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
@@ -870,7 +886,6 @@ static void ghes_print_queued_estatus(void)
struct ghes_estatus_node *estatus_node;
struct acpi_hest_generic *generic;
struct acpi_hest_generic_status *estatus;
- u32 len, node_len;
llnode = llist_del_all(&ghes_estatus_llist);
/*
@@ -882,8 +897,6 @@ static void ghes_print_queued_estatus(void)
estatus_node = llist_entry(llnode, struct ghes_estatus_node,
llnode);
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
- len = cper_estatus_len(estatus);
- node_len = GHES_ESTATUS_NODE_LEN(len);
generic = estatus_node->generic;
ghes_print_estatus(NULL, generic, estatus);
llnode = llnode->next;
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 13e7b56e33ae..19bc440820e6 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -70,6 +70,7 @@ static async_cookie_t async_cookie;
static bool battery_driver_registered;
static int battery_bix_broken_package;
static int battery_notification_delay_ms;
+static int battery_full_discharging;
static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
@@ -214,9 +215,12 @@ static int acpi_battery_get_property(struct power_supply *psy,
return -ENODEV;
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
- if (battery->state & ACPI_BATTERY_STATE_DISCHARGING)
- val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
- else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
+ if (battery->state & ACPI_BATTERY_STATE_DISCHARGING) {
+ if (battery_full_discharging && battery->rate_now == 0)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ else
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ } else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
val->intval = POWER_SUPPLY_STATUS_CHARGING;
else if (acpi_battery_is_charged(battery))
val->intval = POWER_SUPPLY_STATUS_FULL;
@@ -1166,6 +1170,12 @@ battery_notification_delay_quirk(const struct dmi_system_id *d)
return 0;
}
+static int __init battery_full_discharging_quirk(const struct dmi_system_id *d)
+{
+ battery_full_discharging = 1;
+ return 0;
+}
+
static const struct dmi_system_id bat_dmi_table[] __initconst = {
{
.callback = battery_bix_broken_package_quirk,
@@ -1183,6 +1193,22 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-573G"),
},
},
+ {
+ .callback = battery_full_discharging_quirk,
+ .ident = "ASUS GL502VSK",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GL502VSK"),
+ },
+ },
+ {
+ .callback = battery_full_discharging_quirk,
+ .ident = "ASUS UX305LA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UX305LA"),
+ },
+ },
{},
};
@@ -1237,13 +1263,11 @@ static int acpi_battery_add(struct acpi_device *device)
#ifdef CONFIG_ACPI_PROCFS_POWER
result = acpi_battery_add_fs(device);
-#endif
if (result) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_battery_remove_fs(device);
-#endif
goto fail;
}
+#endif
printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 4d0979e02a28..f87ed3be779a 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -785,6 +785,24 @@ const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
}
EXPORT_SYMBOL_GPL(acpi_match_device);
+void *acpi_get_match_data(const struct device *dev)
+{
+ const struct acpi_device_id *match;
+
+ if (!dev->driver)
+ return NULL;
+
+ if (!dev->driver->acpi_match_table)
+ return NULL;
+
+ match = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!match)
+ return NULL;
+
+ return (void *)match->driver_data;
+}
+EXPORT_SYMBOL_GPL(acpi_get_match_data);
+
int acpi_match_device_ids(struct acpi_device *device,
const struct acpi_device_id *ids)
{
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index bf8e4d371fa7..e1eee7a60fad 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -30,6 +30,7 @@
#include <linux/input.h>
#include <linux/slab.h>
#include <linux/acpi.h>
+#include <linux/dmi.h>
#include <acpi/button.h>
#define PREFIX "ACPI: "
@@ -76,6 +77,22 @@ static const struct acpi_device_id button_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, button_device_ids);
+/*
+ * Some devices which don't even have a lid in anyway have a broken _LID
+ * method (e.g. pointing to a floating gpio pin) causing spurious LID events.
+ */
+static const struct dmi_system_id lid_blacklst[] = {
+ {
+ /* GP-electronic T701 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T701"),
+ DMI_MATCH(DMI_BIOS_VERSION, "BYT70A.YNCHENG.WIN.007"),
+ },
+ },
+ {}
+};
+
static int acpi_button_add(struct acpi_device *device);
static int acpi_button_remove(struct acpi_device *device);
static void acpi_button_notify(struct acpi_device *device, u32 event);
@@ -210,6 +227,8 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state)
}
/* Send the platform triggered reliable event */
if (do_update) {
+ acpi_handle_debug(device->handle, "ACPI LID %s\n",
+ state ? "open" : "closed");
input_report_switch(button->input, SW_LID, !state);
input_sync(button->input);
button->last_state = !!state;
@@ -473,6 +492,9 @@ static int acpi_button_add(struct acpi_device *device)
char *name, *class;
int error;
+ if (!strcmp(hid, ACPI_BUTTON_HID_LID) && dmi_check_system(lid_blacklst))
+ return -ENODEV;
+
button = kzalloc(sizeof(struct acpi_button), GFP_KERNEL);
if (!button)
return -ENOMEM;
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index a4c8ad98560d..c4d0a1c912f0 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -990,7 +990,7 @@ void acpi_subsys_complete(struct device *dev)
* the sleep state it is going out of and it has never been resumed till
* now, resume it in case the firmware powered it up.
*/
- if (dev->power.direct_complete && pm_resume_via_firmware())
+ if (pm_runtime_suspended(dev) && pm_resume_via_firmware())
pm_request_resume(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_complete);
@@ -1039,10 +1039,28 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend_late);
*/
int acpi_subsys_suspend_noirq(struct device *dev)
{
- if (dev_pm_smart_suspend_and_suspended(dev))
+ int ret;
+
+ if (dev_pm_smart_suspend_and_suspended(dev)) {
+ dev->power.may_skip_resume = true;
return 0;
+ }
+
+ ret = pm_generic_suspend_noirq(dev);
+ if (ret)
+ return ret;
+
+ /*
+ * If the target system sleep state is suspend-to-idle, it is sufficient
+ * to check whether or not the device's wakeup settings are good for
+ * runtime PM. Otherwise, the pm_resume_via_firmware() check will cause
+ * acpi_subsys_complete() to take care of fixing up the device's state
+ * anyway, if need be.
+ */
+ dev->power.may_skip_resume = device_may_wakeup(dev) ||
+ !device_can_wakeup(dev);
- return pm_generic_suspend_noirq(dev);
+ return 0;
}
EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq);
@@ -1052,6 +1070,9 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq);
*/
int acpi_subsys_resume_noirq(struct device *dev)
{
+ if (dev_pm_may_skip_resume(dev))
+ return 0;
+
/*
* Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend
* during system suspend, so update their runtime PM status to "active"
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 0252c9b9af3d..d9f38c645e4a 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1516,7 +1516,7 @@ static int acpi_ec_setup(struct acpi_ec *ec, bool handle_events)
}
acpi_handle_info(ec->handle,
- "GPE=0x%lx, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
+ "GPE=0x%x, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n",
ec->gpe, ec->command_addr, ec->data_addr);
return ret;
}
diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
index 6c7dd7af789e..dd70d6c2bca0 100644
--- a/drivers/acpi/ec_sys.c
+++ b/drivers/acpi/ec_sys.c
@@ -128,7 +128,7 @@ static int acpi_ec_add_debugfs(struct acpi_ec *ec, unsigned int ec_device_count)
return -ENOMEM;
}
- if (!debugfs_create_x32("gpe", 0444, dev_dir, (u32 *)&first_ec->gpe))
+ if (!debugfs_create_x32("gpe", 0444, dev_dir, &first_ec->gpe))
goto error;
if (!debugfs_create_bool("use_global_lock", 0444, dev_dir,
&first_ec->global_lock))
diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c
index 46f060356a22..f13ba2c07667 100644
--- a/drivers/acpi/evged.c
+++ b/drivers/acpi/evged.c
@@ -49,6 +49,11 @@
#define MODULE_NAME "acpi-ged"
+struct acpi_ged_device {
+ struct device *dev;
+ struct list_head event_list;
+};
+
struct acpi_ged_event {
struct list_head node;
struct device *dev;
@@ -76,7 +81,8 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
unsigned int irq;
unsigned int gsi;
unsigned int irqflags = IRQF_ONESHOT;
- struct device *dev = context;
+ struct acpi_ged_device *geddev = context;
+ struct device *dev = geddev->dev;
acpi_handle handle = ACPI_HANDLE(dev);
acpi_handle evt_handle;
struct resource r;
@@ -102,8 +108,6 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
return AE_ERROR;
}
- dev_info(dev, "GED listening GSI %u @ IRQ %u\n", gsi, irq);
-
event = devm_kzalloc(dev, sizeof(*event), GFP_KERNEL);
if (!event)
return AE_ERROR;
@@ -116,29 +120,58 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
if (r.flags & IORESOURCE_IRQ_SHAREABLE)
irqflags |= IRQF_SHARED;
- if (devm_request_threaded_irq(dev, irq, NULL, acpi_ged_irq_handler,
- irqflags, "ACPI:Ged", event)) {
+ if (request_threaded_irq(irq, NULL, acpi_ged_irq_handler,
+ irqflags, "ACPI:Ged", event)) {
dev_err(dev, "failed to setup event handler for irq %u\n", irq);
return AE_ERROR;
}
+ dev_dbg(dev, "GED listening GSI %u @ IRQ %u\n", gsi, irq);
+ list_add_tail(&event->node, &geddev->event_list);
return AE_OK;
}
static int ged_probe(struct platform_device *pdev)
{
+ struct acpi_ged_device *geddev;
acpi_status acpi_ret;
+ geddev = devm_kzalloc(&pdev->dev, sizeof(*geddev), GFP_KERNEL);
+ if (!geddev)
+ return -ENOMEM;
+
+ geddev->dev = &pdev->dev;
+ INIT_LIST_HEAD(&geddev->event_list);
acpi_ret = acpi_walk_resources(ACPI_HANDLE(&pdev->dev), "_CRS",
- acpi_ged_request_interrupt, &pdev->dev);
+ acpi_ged_request_interrupt, geddev);
if (ACPI_FAILURE(acpi_ret)) {
dev_err(&pdev->dev, "unable to parse the _CRS record\n");
return -EINVAL;
}
+ platform_set_drvdata(pdev, geddev);
return 0;
}
+static void ged_shutdown(struct platform_device *pdev)
+{
+ struct acpi_ged_device *geddev = platform_get_drvdata(pdev);
+ struct acpi_ged_event *event, *next;
+
+ list_for_each_entry_safe(event, next, &geddev->event_list, node) {
+ free_irq(event->irq, event);
+ list_del(&event->node);
+ dev_dbg(geddev->dev, "GED releasing GSI %u @ IRQ %u\n",
+ event->gsi, event->irq);
+ }
+}
+
+static int ged_remove(struct platform_device *pdev)
+{
+ ged_shutdown(pdev);
+ return 0;
+}
+
static const struct acpi_device_id ged_acpi_ids[] = {
{"ACPI0013"},
{},
@@ -146,6 +179,8 @@ static const struct acpi_device_id ged_acpi_ids[] = {
static struct platform_driver ged_driver = {
.probe = ged_probe,
+ .remove = ged_remove,
+ .shutdown = ged_shutdown,
.driver = {
.name = MODULE_NAME,
.acpi_match_table = ACPI_PTR(ged_acpi_ids),
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 7f43423de43c..1d0a501bc7f0 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -159,7 +159,7 @@ static inline void acpi_early_processor_osc(void) {}
-------------------------------------------------------------------------- */
struct acpi_ec {
acpi_handle handle;
- unsigned long gpe;
+ u32 gpe;
unsigned long command_addr;
unsigned long data_addr;
bool global_lock;
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 917f1cc0fda4..8ccaae3550d2 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -460,8 +460,7 @@ int __init acpi_numa_init(void)
srat_proc, ARRAY_SIZE(srat_proc), 0);
cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
- acpi_parse_memory_affinity,
- NR_NODE_MEMBLKS);
+ acpi_parse_memory_affinity, 0);
}
/* SLIT: System Locality Information Table */
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index bc3d914dfc3e..85ad679390e3 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -612,7 +612,7 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
acpi_isa_irq_penalty[link->irq.active] +=
PIRQ_PENALTY_PCI_USING;
- printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n",
+ pr_info("%s [%s] enabled at IRQ %d\n",
acpi_device_name(link->device),
acpi_device_bid(link->device), link->irq.active);
}
diff --git a/drivers/acpi/pmic/intel_pmic_bxtwc.c b/drivers/acpi/pmic/intel_pmic_bxtwc.c
index 90011aad4d20..886ac8b93cd0 100644
--- a/drivers/acpi/pmic/intel_pmic_bxtwc.c
+++ b/drivers/acpi/pmic/intel_pmic_bxtwc.c
@@ -400,7 +400,7 @@ static int intel_bxtwc_pmic_opregion_probe(struct platform_device *pdev)
&intel_bxtwc_pmic_opregion_data);
}
-static struct platform_device_id bxt_wc_opregion_id_table[] = {
+static const struct platform_device_id bxt_wc_opregion_id_table[] = {
{ .name = "bxt_wcove_region" },
{},
};
@@ -412,9 +412,4 @@ static struct platform_driver intel_bxtwc_pmic_opregion_driver = {
},
.id_table = bxt_wc_opregion_id_table,
};
-
-static int __init intel_bxtwc_pmic_opregion_driver_init(void)
-{
- return platform_driver_register(&intel_bxtwc_pmic_opregion_driver);
-}
-device_initcall(intel_bxtwc_pmic_opregion_driver_init);
+builtin_platform_driver(intel_bxtwc_pmic_opregion_driver);
diff --git a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
index 109c1e9c9c7a..f6d73a243d80 100644
--- a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
+++ b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
@@ -131,7 +131,4 @@ static struct platform_driver chtdc_ti_pmic_opregion_driver = {
},
.id_table = chtdc_ti_pmic_opregion_id_table,
};
-module_platform_driver(chtdc_ti_pmic_opregion_driver);
-
-MODULE_DESCRIPTION("Dollar Cove TI PMIC opregion driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(chtdc_ti_pmic_opregion_driver);
diff --git a/drivers/acpi/pmic/intel_pmic_chtwc.c b/drivers/acpi/pmic/intel_pmic_chtwc.c
index 85636d7a9d39..9912422c8185 100644
--- a/drivers/acpi/pmic/intel_pmic_chtwc.c
+++ b/drivers/acpi/pmic/intel_pmic_chtwc.c
@@ -260,11 +260,10 @@ static int intel_cht_wc_pmic_opregion_probe(struct platform_device *pdev)
&intel_cht_wc_pmic_opregion_data);
}
-static struct platform_device_id cht_wc_opregion_id_table[] = {
+static const struct platform_device_id cht_wc_opregion_id_table[] = {
{ .name = "cht_wcove_region" },
{},
};
-MODULE_DEVICE_TABLE(platform, cht_wc_opregion_id_table);
static struct platform_driver intel_cht_wc_pmic_opregion_driver = {
.probe = intel_cht_wc_pmic_opregion_probe,
@@ -273,8 +272,4 @@ static struct platform_driver intel_cht_wc_pmic_opregion_driver = {
},
.id_table = cht_wc_opregion_id_table,
};
-module_platform_driver(intel_cht_wc_pmic_opregion_driver);
-
-MODULE_DESCRIPTION("Intel CHT Whiskey Cove PMIC operation region driver");
-MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(intel_cht_wc_pmic_opregion_driver);
diff --git a/drivers/acpi/pmic/intel_pmic_crc.c b/drivers/acpi/pmic/intel_pmic_crc.c
index d7f1761ab1bc..7ffa74048107 100644
--- a/drivers/acpi/pmic/intel_pmic_crc.c
+++ b/drivers/acpi/pmic/intel_pmic_crc.c
@@ -201,9 +201,4 @@ static struct platform_driver intel_crc_pmic_opregion_driver = {
.name = "crystal_cove_pmic",
},
};
-
-static int __init intel_crc_pmic_opregion_driver_init(void)
-{
- return platform_driver_register(&intel_crc_pmic_opregion_driver);
-}
-device_initcall(intel_crc_pmic_opregion_driver_init);
+builtin_platform_driver(intel_crc_pmic_opregion_driver);
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
index 6c99d3f81095..316e55174aa9 100644
--- a/drivers/acpi/pmic/intel_pmic_xpower.c
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
@@ -278,9 +278,4 @@ static struct platform_driver intel_xpower_pmic_opregion_driver = {
.name = "axp288_pmic_acpi",
},
};
-
-static int __init intel_xpower_pmic_opregion_driver_init(void)
-{
- return platform_driver_register(&intel_xpower_pmic_opregion_driver);
-}
-device_initcall(intel_xpower_pmic_opregion_driver_init);
+builtin_platform_driver(intel_xpower_pmic_opregion_driver);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index d50a7b6ccddd..5f0071c7e2e1 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -207,6 +207,7 @@ static void tsc_check_state(int state)
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
case X86_VENDOR_INTEL:
+ case X86_VENDOR_CENTAUR:
/*
* AMD Fam10h TSC will tick in all
* C/P/S0/S1 states when this bit is set.
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index e26ea209b63e..466d1503aba0 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -1271,9 +1271,17 @@ static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
return 0;
}
+static void *
+acpi_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
+ const struct device *dev)
+{
+ return acpi_get_match_data(dev);
+}
+
#define DECLARE_ACPI_FWNODE_OPS(ops) \
const struct fwnode_operations ops = { \
.device_is_available = acpi_fwnode_device_is_available, \
+ .device_get_match_data = acpi_fwnode_device_get_match_data, \
.property_present = acpi_fwnode_property_present, \
.property_read_int_array = \
acpi_fwnode_property_read_int_array, \
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 8082871b409a..46cde0912762 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -367,10 +367,20 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
{},
};
+static bool ignore_blacklist;
+
+void __init acpi_sleep_no_blacklist(void)
+{
+ ignore_blacklist = true;
+}
+
static void __init acpi_sleep_dmi_check(void)
{
int year;
+ if (ignore_blacklist)
+ return;
+
if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year >= 2012)
acpi_nvs_nosave_s3();
@@ -697,7 +707,8 @@ static const struct acpi_device_id lps0_device_ids[] = {
#define ACPI_LPS0_ENTRY 5
#define ACPI_LPS0_EXIT 6
-#define ACPI_S2IDLE_FUNC_MASK ((1 << ACPI_LPS0_ENTRY) | (1 << ACPI_LPS0_EXIT))
+#define ACPI_LPS0_SCREEN_MASK ((1 << ACPI_LPS0_SCREEN_OFF) | (1 << ACPI_LPS0_SCREEN_ON))
+#define ACPI_LPS0_PLATFORM_MASK ((1 << ACPI_LPS0_ENTRY) | (1 << ACPI_LPS0_EXIT))
static acpi_handle lps0_device_handle;
static guid_t lps0_dsm_guid;
@@ -900,7 +911,8 @@ static int lps0_device_attach(struct acpi_device *adev,
if (out_obj && out_obj->type == ACPI_TYPE_BUFFER) {
char bitmask = *(char *)out_obj->buffer.pointer;
- if ((bitmask & ACPI_S2IDLE_FUNC_MASK) == ACPI_S2IDLE_FUNC_MASK) {
+ if ((bitmask & ACPI_LPS0_PLATFORM_MASK) == ACPI_LPS0_PLATFORM_MASK ||
+ (bitmask & ACPI_LPS0_SCREEN_MASK) == ACPI_LPS0_SCREEN_MASK) {
lps0_dsm_func_mask = bitmask;
lps0_device_handle = adev->handle;
/*
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 06a150bb35bf..4fc59c3bc673 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -816,14 +816,8 @@ end:
* interface:
* echo unmask > /sys/firmware/acpi/interrupts/gpe00
*/
-
-/*
- * Currently, the GPE flooding prevention only supports to mask the GPEs
- * numbered from 00 to 7f.
- */
-#define ACPI_MASKABLE_GPE_MAX 0x80
-
-static u64 __initdata acpi_masked_gpes;
+#define ACPI_MASKABLE_GPE_MAX 0xFF
+static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata;
static int __init acpi_gpe_set_masked_gpes(char *val)
{
@@ -831,7 +825,7 @@ static int __init acpi_gpe_set_masked_gpes(char *val)
if (kstrtou8(val, 0, &gpe) || gpe > ACPI_MASKABLE_GPE_MAX)
return -EINVAL;
- acpi_masked_gpes |= ((u64)1<<gpe);
+ set_bit(gpe, acpi_masked_gpes_map);
return 1;
}
@@ -843,15 +837,11 @@ void __init acpi_gpe_apply_masked_gpes(void)
acpi_status status;
u8 gpe;
- for (gpe = 0;
- gpe < min_t(u8, ACPI_MASKABLE_GPE_MAX, acpi_current_gpe_count);
- gpe++) {
- if (acpi_masked_gpes & ((u64)1<<gpe)) {
- status = acpi_get_gpe_device(gpe, &handle);
- if (ACPI_SUCCESS(status)) {
- pr_info("Masking GPE 0x%x.\n", gpe);
- (void)acpi_mask_gpe(handle, gpe, TRUE);
- }
+ for_each_set_bit(gpe, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) {
+ status = acpi_get_gpe_device(gpe, &handle);
+ if (ACPI_SUCCESS(status)) {
+ pr_info("Masking GPE 0x%x.\n", gpe);
+ (void)acpi_mask_gpe(handle, gpe, TRUE);
}
}
}
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 9d49a1acebe3..78db97687f26 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -737,16 +737,17 @@ bool acpi_dev_found(const char *hid)
}
EXPORT_SYMBOL(acpi_dev_found);
-struct acpi_dev_present_info {
+struct acpi_dev_match_info {
+ const char *dev_name;
struct acpi_device_id hid[2];
const char *uid;
s64 hrv;
};
-static int acpi_dev_present_cb(struct device *dev, void *data)
+static int acpi_dev_match_cb(struct device *dev, void *data)
{
struct acpi_device *adev = to_acpi_device(dev);
- struct acpi_dev_present_info *match = data;
+ struct acpi_dev_match_info *match = data;
unsigned long long hrv;
acpi_status status;
@@ -757,6 +758,8 @@ static int acpi_dev_present_cb(struct device *dev, void *data)
strcmp(adev->pnp.unique_id, match->uid)))
return 0;
+ match->dev_name = acpi_dev_name(adev);
+
if (match->hrv == -1)
return 1;
@@ -789,20 +792,44 @@ static int acpi_dev_present_cb(struct device *dev, void *data)
*/
bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
{
- struct acpi_dev_present_info match = {};
+ struct acpi_dev_match_info match = {};
struct device *dev;
strlcpy(match.hid[0].id, hid, sizeof(match.hid[0].id));
match.uid = uid;
match.hrv = hrv;
- dev = bus_find_device(&acpi_bus_type, NULL, &match,
- acpi_dev_present_cb);
-
+ dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
return !!dev;
}
EXPORT_SYMBOL(acpi_dev_present);
+/**
+ * acpi_dev_get_first_match_name - Return name of first match of ACPI device
+ * @hid: Hardware ID of the device.
+ * @uid: Unique ID of the device, pass NULL to not check _UID
+ * @hrv: Hardware Revision of the device, pass -1 to not check _HRV
+ *
+ * Return device name if a matching device was present
+ * at the moment of invocation, or NULL otherwise.
+ *
+ * See additional information in acpi_dev_present() as well.
+ */
+const char *
+acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv)
+{
+ struct acpi_dev_match_info match = {};
+ struct device *dev;
+
+ strlcpy(match.hid[0].id, hid, sizeof(match.hid[0].id));
+ match.uid = uid;
+ match.hrv = hrv;
+
+ dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
+ return dev ? match.dev_name : NULL;
+}
+EXPORT_SYMBOL(acpi_dev_get_first_match_name);
+
/*
* acpi_backlight= handling, this is done here rather then in video_detect.c
* because __setup cannot be used in modules.
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index a7ecfde66b7b..cc89d0d2b965 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -4311,7 +4311,7 @@ static int binder_thread_release(struct binder_proc *proc,
return active_transactions;
}
-static unsigned int binder_poll(struct file *filp,
+static __poll_t binder_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct binder_proc *proc = filp->private_data;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index cb5339166563..a7120d621154 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -92,6 +92,25 @@ config SATA_AHCI
If unsure, say N.
+config SATA_MOBILE_LPM_POLICY
+ int "Default SATA Link Power Management policy for mobile chipsets"
+ range 0 4
+ default 0
+ depends on SATA_AHCI
+ help
+ Select the Default SATA Link Power Management (LPM) policy to use
+ for mobile / laptop variants of chipsets / "South Bridges".
+
+ The value set has the following meanings:
+ 0 => Keep firmware settings
+ 1 => Maximum performance
+ 2 => Medium power
+ 3 => Medium power with Device Initiated PM enabled
+ 4 => Minimum power
+
+ Note "Minimum power" is known to cause issues, including disk
+ corruption, with some disks and should not be used.
+
config SATA_AHCI_PLATFORM
tristate "Platform AHCI SATA support"
help
@@ -925,15 +944,6 @@ endif # ATA_BMDMA
comment "PIO-only SFF controllers"
-config PATA_AT32
- tristate "Atmel AVR32 PATA support (Experimental)"
- depends on AVR32 && PLATFORM_AT32AP
- help
- This option enables support for the IDE devices on the
- Atmel AT32AP platform.
-
- If unsure, say N.
-
config PATA_CMD640_PCI
tristate "CMD640 PCI PATA support (Experimental)"
depends on PCI
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 8daec3e657f8..f1f5a3fbc777 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -96,7 +96,6 @@ obj-$(CONFIG_PATA_VIA) += pata_via.o
obj-$(CONFIG_PATA_WINBOND) += pata_sl82c105.o
# SFF PIO only
-obj-$(CONFIG_PATA_AT32) += pata_at32.o
obj-$(CONFIG_PATA_CMD640_PCI) += pata_cmd640.o
obj-$(CONFIG_PATA_FALCON) += pata_falcon.o
obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 5443cb71d7ba..355a95a83a34 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -65,6 +65,7 @@ enum board_ids {
/* board IDs by feature in alphabetical order */
board_ahci,
board_ahci_ign_iferr,
+ board_ahci_mobile,
board_ahci_nomsi,
board_ahci_noncq,
board_ahci_nosntf,
@@ -140,6 +141,13 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
+ [board_ahci_mobile] = {
+ AHCI_HFLAGS (AHCI_HFLAG_IS_MOBILE),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
[board_ahci_nomsi] = {
AHCI_HFLAGS (AHCI_HFLAG_NO_MSI),
.flags = AHCI_FLAG_COMMON,
@@ -252,13 +260,13 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
{ PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
{ PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
- { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
- { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x2929), board_ahci_mobile }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292a), board_ahci_mobile }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292b), board_ahci_mobile }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292c), board_ahci_mobile }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x292f), board_ahci_mobile }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
- { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
+ { PCI_VDEVICE(INTEL, 0x294e), board_ahci_mobile }, /* ICH9M */
{ PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
{ PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
{ PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
@@ -268,9 +276,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
{ PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
- { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */
+ { PCI_VDEVICE(INTEL, 0x3b29), board_ahci_mobile }, /* PCH M AHCI */
{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
- { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
+ { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci_mobile }, /* PCH M RAID */
{ PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
{ PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
@@ -293,9 +301,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
- { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
+ { PCI_VDEVICE(INTEL, 0x1c03), board_ahci_mobile }, /* CPT M AHCI */
{ PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
- { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
+ { PCI_VDEVICE(INTEL, 0x1c05), board_ahci_mobile }, /* CPT M RAID */
{ PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
{ PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
{ PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
@@ -304,28 +312,28 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */
{ PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
{ PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */
- { PCI_VDEVICE(INTEL, 0x1e03), board_ahci }, /* Panther Point AHCI */
+ { PCI_VDEVICE(INTEL, 0x1e03), board_ahci_mobile }, /* Panther M AHCI */
{ PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */
- { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point RAID */
+ { PCI_VDEVICE(INTEL, 0x1e07), board_ahci_mobile }, /* Panther M RAID */
{ PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x8c02), board_ahci }, /* Lynx Point AHCI */
- { PCI_VDEVICE(INTEL, 0x8c03), board_ahci }, /* Lynx Point AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c03), board_ahci_mobile }, /* Lynx M AHCI */
{ PCI_VDEVICE(INTEL, 0x8c04), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c05), board_ahci }, /* Lynx Point RAID */
+ { PCI_VDEVICE(INTEL, 0x8c05), board_ahci_mobile }, /* Lynx M RAID */
{ PCI_VDEVICE(INTEL, 0x8c06), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point RAID */
+ { PCI_VDEVICE(INTEL, 0x8c07), board_ahci_mobile }, /* Lynx M RAID */
{ PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point RAID */
- { PCI_VDEVICE(INTEL, 0x9c02), board_ahci }, /* Lynx Point-LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9c03), board_ahci }, /* Lynx Point-LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9c04), board_ahci }, /* Lynx Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c05), board_ahci }, /* Lynx Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c06), board_ahci }, /* Lynx Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c07), board_ahci }, /* Lynx Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci }, /* Lynx Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci }, /* Lynx Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci_mobile }, /* Lynx M RAID */
+ { PCI_VDEVICE(INTEL, 0x9c02), board_ahci_mobile }, /* Lynx LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c03), board_ahci_mobile }, /* Lynx LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c04), board_ahci_mobile }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c05), board_ahci_mobile }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c06), board_ahci_mobile }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c07), board_ahci_mobile }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_mobile }, /* Lynx LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_mobile }, /* Lynx LP RAID */
{ PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */
{ PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */
{ PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */
@@ -353,26 +361,26 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x23a3), board_ahci }, /* Coleto Creek AHCI */
- { PCI_VDEVICE(INTEL, 0x9c83), board_ahci }, /* Wildcat Point-LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9c85), board_ahci }, /* Wildcat Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c83), board_ahci_mobile }, /* Wildcat LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c85), board_ahci_mobile }, /* Wildcat LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c87), board_ahci_mobile }, /* Wildcat LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci_mobile }, /* Wildcat LP RAID */
{ PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */
- { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c83), board_ahci_mobile }, /* 9 Series M AHCI */
{ PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c85), board_ahci_mobile }, /* 9 Series M RAID */
{ PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c87), board_ahci_mobile }, /* 9 Series M RAID */
{ PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
- { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
- { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
- { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci_mobile }, /* 9 Series M RAID */
+ { PCI_VDEVICE(INTEL, 0x9d03), board_ahci_mobile }, /* Sunrise LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9d05), board_ahci_mobile }, /* Sunrise LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9d07), board_ahci_mobile }, /* Sunrise LP RAID */
{ PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */
- { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
+ { PCI_VDEVICE(INTEL, 0xa103), board_ahci_mobile }, /* Sunrise M AHCI */
{ PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
- { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
+ { PCI_VDEVICE(INTEL, 0xa107), board_ahci_mobile }, /* Sunrise M RAID */
{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/
@@ -386,6 +394,11 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
+ { PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */
+ { PCI_VDEVICE(INTEL, 0x0f22), board_ahci_mobile }, /* Bay Trail AHCI */
+ { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */
+ { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */
+ { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_mobile }, /* ApolloLake AHCI */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -593,6 +606,9 @@ static int marvell_enable = 1;
module_param(marvell_enable, int, 0644);
MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
+static int mobile_lpm_policy = CONFIG_SATA_MOBILE_LPM_POLICY;
+module_param(mobile_lpm_policy, int, 0644);
+MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
struct ahci_host_priv *hpriv)
@@ -1728,6 +1744,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ap->flags & ATA_FLAG_EM)
ap->em_message_type = hpriv->em_msg_type;
+ if ((hpriv->flags & AHCI_HFLAG_IS_MOBILE) &&
+ mobile_lpm_policy >= ATA_LPM_UNKNOWN &&
+ mobile_lpm_policy <= ATA_LPM_MIN_POWER)
+ ap->target_lpm_policy = mobile_lpm_policy;
/* disabled/not-implemented port */
if (!(hpriv->port_map & (1 << i)))
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 749fd94441b0..a9d996e17d75 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -251,6 +251,9 @@ enum {
AHCI_HFLAG_YES_ALPM = (1 << 23), /* force ALPM cap on */
AHCI_HFLAG_NO_WRITE_TO_RO = (1 << 24), /* don't write to read
only registers */
+ AHCI_HFLAG_IS_MOBILE = (1 << 25), /* mobile chipset, use
+ SATA_MOBILE_LPM_POLICY
+ as default lpm_policy */
/* ap->flags bits */
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index 5936d1679bf3..ea430819c80b 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -70,6 +70,13 @@
(DATA_ENDIAN << DMADESC_ENDIAN_SHIFT) | \
(MMIO_ENDIAN << MMIO_ENDIAN_SHIFT))
+#define BUS_CTRL_ENDIAN_NSP_CONF \
+ (0x02 << DMADATA_ENDIAN_SHIFT | 0x02 << DMADESC_ENDIAN_SHIFT)
+
+#define BUS_CTRL_ENDIAN_CONF_MASK \
+ (0x3 << MMIO_ENDIAN_SHIFT | 0x3 << DMADESC_ENDIAN_SHIFT | \
+ 0x3 << DMADATA_ENDIAN_SHIFT | 0x3 << PIODATA_ENDIAN_SHIFT)
+
enum brcm_ahci_version {
BRCM_SATA_BCM7425 = 1,
BRCM_SATA_BCM7445,
@@ -89,14 +96,6 @@ struct brcm_ahci_priv {
enum brcm_ahci_version version;
};
-static const struct ata_port_info ahci_brcm_port_info = {
- .flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM,
- .link_flags = ATA_LFLAG_NO_DB_DELAY,
- .pio_mask = ATA_PIO4,
- .udma_mask = ATA_UDMA6,
- .port_ops = &ahci_platform_ops,
-};
-
static inline u32 brcm_sata_readreg(void __iomem *addr)
{
/*
@@ -250,20 +249,105 @@ static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
static void brcm_sata_init(struct brcm_ahci_priv *priv)
{
void __iomem *ctrl = priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL;
+ u32 data;
/* Configure endianness */
- if (priv->version == BRCM_SATA_NSP) {
- u32 data = brcm_sata_readreg(ctrl);
-
- data &= ~((0x03 << DMADATA_ENDIAN_SHIFT) |
- (0x03 << DMADESC_ENDIAN_SHIFT));
- data |= (0x02 << DMADATA_ENDIAN_SHIFT) |
- (0x02 << DMADESC_ENDIAN_SHIFT);
- brcm_sata_writereg(data, ctrl);
- } else
- brcm_sata_writereg(BUS_CTRL_ENDIAN_CONF, ctrl);
+ data = brcm_sata_readreg(ctrl);
+ data &= ~BUS_CTRL_ENDIAN_CONF_MASK;
+ if (priv->version == BRCM_SATA_NSP)
+ data |= BUS_CTRL_ENDIAN_NSP_CONF;
+ else
+ data |= BUS_CTRL_ENDIAN_CONF;
+ brcm_sata_writereg(data, ctrl);
+}
+
+static unsigned int brcm_ahci_read_id(struct ata_device *dev,
+ struct ata_taskfile *tf, u16 *id)
+{
+ struct ata_port *ap = dev->link->ap;
+ struct ata_host *host = ap->host;
+ struct ahci_host_priv *hpriv = host->private_data;
+ struct brcm_ahci_priv *priv = hpriv->plat_data;
+ void __iomem *mmio = hpriv->mmio;
+ unsigned int err_mask;
+ unsigned long flags;
+ int i, rc;
+ u32 ctl;
+
+ /* Try to read the device ID and, if this fails, proceed with the
+ * recovery sequence below
+ */
+ err_mask = ata_do_dev_read_id(dev, tf, id);
+ if (likely(!err_mask))
+ return err_mask;
+
+ /* Disable host interrupts */
+ spin_lock_irqsave(&host->lock, flags);
+ ctl = readl(mmio + HOST_CTL);
+ ctl &= ~HOST_IRQ_EN;
+ writel(ctl, mmio + HOST_CTL);
+ readl(mmio + HOST_CTL); /* flush */
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ /* Perform the SATA PHY reset sequence */
+ brcm_sata_phy_disable(priv, ap->port_no);
+
+ /* Bring the PHY back on */
+ brcm_sata_phy_enable(priv, ap->port_no);
+
+ /* Re-initialize and calibrate the PHY */
+ for (i = 0; i < hpriv->nports; i++) {
+ rc = phy_init(hpriv->phys[i]);
+ if (rc)
+ goto disable_phys;
+
+ rc = phy_calibrate(hpriv->phys[i]);
+ if (rc) {
+ phy_exit(hpriv->phys[i]);
+ goto disable_phys;
+ }
+ }
+
+ /* Re-enable host interrupts */
+ spin_lock_irqsave(&host->lock, flags);
+ ctl = readl(mmio + HOST_CTL);
+ ctl |= HOST_IRQ_EN;
+ writel(ctl, mmio + HOST_CTL);
+ readl(mmio + HOST_CTL); /* flush */
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return ata_do_dev_read_id(dev, tf, id);
+
+disable_phys:
+ while (--i >= 0) {
+ phy_power_off(hpriv->phys[i]);
+ phy_exit(hpriv->phys[i]);
+ }
+
+ return AC_ERR_OTHER;
+}
+
+static void brcm_ahci_host_stop(struct ata_host *host)
+{
+ struct ahci_host_priv *hpriv = host->private_data;
+
+ ahci_platform_disable_resources(hpriv);
}
+static struct ata_port_operations ahci_brcm_platform_ops = {
+ .inherits = &ahci_ops,
+ .host_stop = brcm_ahci_host_stop,
+ .read_id = brcm_ahci_read_id,
+};
+
+static const struct ata_port_info ahci_brcm_port_info = {
+ .flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM,
+ .link_flags = ATA_LFLAG_NO_DB_DELAY,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_brcm_platform_ops,
+};
+
#ifdef CONFIG_PM_SLEEP
static int brcm_ahci_suspend(struct device *dev)
{
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index b702c20fbc2b..7ecb1322a514 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -458,7 +458,7 @@ static const struct piix_map_db *piix_map_db_table[] = {
[ich8_2port_sata_byt] = &ich8_2port_map_db,
};
-static struct pci_bits piix_enable_bits[] = {
+static const struct pci_bits piix_enable_bits[] = {
{ 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
{ 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
};
diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
deleted file mode 100644
index 9aeb7a6dd4d4..000000000000
--- a/drivers/ata/pata_at32.c
+++ /dev/null
@@ -1,400 +0,0 @@
-/*
- * AVR32 SMC/CFC PATA Driver
- *
- * Copyright (C) 2007 Atmel Norway
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- */
-
-#define DEBUG
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/slab.h>
-#include <scsi/scsi_host.h>
-#include <linux/ata.h>
-#include <linux/libata.h>
-#include <linux/err.h>
-#include <linux/io.h>
-
-#include <mach/board.h>
-#include <mach/smc.h>
-
-#define DRV_NAME "pata_at32"
-#define DRV_VERSION "0.0.3"
-
-/*
- * CompactFlash controller memory layout relative to the base address:
- *
- * Attribute memory: 0000 0000 -> 003f ffff
- * Common memory: 0040 0000 -> 007f ffff
- * I/O memory: 0080 0000 -> 00bf ffff
- * True IDE Mode: 00c0 0000 -> 00df ffff
- * Alt IDE Mode: 00e0 0000 -> 00ff ffff
- *
- * Only True IDE and Alt True IDE mode are needed for this driver.
- *
- * True IDE mode => CS0 = 0, CS1 = 1 (cmd, error, stat, etc)
- * Alt True IDE mode => CS0 = 1, CS1 = 0 (ctl, alt_stat)
- */
-#define CF_IDE_OFFSET 0x00c00000
-#define CF_ALT_IDE_OFFSET 0x00e00000
-#define CF_RES_SIZE 2048
-
-/*
- * Define DEBUG_BUS if you are doing debugging of your own EBI -> PATA
- * adaptor with a logic analyzer or similar.
- */
-#undef DEBUG_BUS
-
-/*
- * ATA PIO modes
- *
- * Name | Mb/s | Min cycle time | Mask
- * --------+-------+----------------+--------
- * Mode 0 | 3.3 | 600 ns | 0x01
- * Mode 1 | 5.2 | 383 ns | 0x03
- * Mode 2 | 8.3 | 240 ns | 0x07
- * Mode 3 | 11.1 | 180 ns | 0x0f
- * Mode 4 | 16.7 | 120 ns | 0x1f
- *
- * Alter PIO_MASK below according to table to set maximal PIO mode.
- */
-enum {
- PIO_MASK = ATA_PIO4,
-};
-
-/*
- * Struct containing private information about device.
- */
-struct at32_ide_info {
- unsigned int irq;
- struct resource res_ide;
- struct resource res_alt;
- void __iomem *ide_addr;
- void __iomem *alt_addr;
- unsigned int cs;
- struct smc_config smc;
-};
-
-/*
- * Setup SMC for the given ATA timing.
- */
-static int pata_at32_setup_timing(struct device *dev,
- struct at32_ide_info *info,
- const struct ata_timing *ata)
-{
- struct smc_config *smc = &info->smc;
- struct smc_timing timing;
-
- int active;
- int recover;
-
- memset(&timing, 0, sizeof(struct smc_timing));
-
- /* Total cycle time */
- timing.read_cycle = ata->cyc8b;
-
- /* DIOR <= CFIOR timings */
- timing.nrd_setup = ata->setup;
- timing.nrd_pulse = ata->act8b;
- timing.nrd_recover = ata->rec8b;
-
- /* Convert nanosecond timing to clock cycles */
- smc_set_timing(smc, &timing);
-
- /* Add one extra cycle setup due to signal ring */
- smc->nrd_setup = smc->nrd_setup + 1;
-
- active = smc->nrd_setup + smc->nrd_pulse;
- recover = smc->read_cycle - active;
-
- /* Need at least two cycles recovery */
- if (recover < 2)
- smc->read_cycle = active + 2;
-
- /* (CS0, CS1, DIR, OE) <= (CFCE1, CFCE2, CFRNW, NCSX) timings */
- smc->ncs_read_setup = 1;
- smc->ncs_read_pulse = smc->read_cycle - 2;
-
- /* Write timings same as read timings */
- smc->write_cycle = smc->read_cycle;
- smc->nwe_setup = smc->nrd_setup;
- smc->nwe_pulse = smc->nrd_pulse;
- smc->ncs_write_setup = smc->ncs_read_setup;
- smc->ncs_write_pulse = smc->ncs_read_pulse;
-
- /* Do some debugging output of ATA and SMC timings */
- dev_dbg(dev, "ATA: C=%d S=%d P=%d R=%d\n",
- ata->cyc8b, ata->setup, ata->act8b, ata->rec8b);
-
- dev_dbg(dev, "SMC: C=%d S=%d P=%d NS=%d NP=%d\n",
- smc->read_cycle, smc->nrd_setup, smc->nrd_pulse,
- smc->ncs_read_setup, smc->ncs_read_pulse);
-
- /* Finally, configure the SMC */
- return smc_set_configuration(info->cs, smc);
-}
-
-/*
- * Procedures for libATA.
- */
-static void pata_at32_set_piomode(struct ata_port *ap, struct ata_device *adev)
-{
- struct ata_timing timing;
- struct at32_ide_info *info = ap->host->private_data;
-
- int ret;
-
- /* Compute ATA timing */
- ret = ata_timing_compute(adev, adev->pio_mode, &timing, 1000, 0);
- if (ret) {
- dev_warn(ap->dev, "Failed to compute ATA timing %d\n", ret);
- return;
- }
-
- /* Setup SMC to ATA timing */
- ret = pata_at32_setup_timing(ap->dev, info, &timing);
- if (ret) {
- dev_warn(ap->dev, "Failed to setup ATA timing %d\n", ret);
- return;
- }
-}
-
-static struct scsi_host_template at32_sht = {
- ATA_PIO_SHT(DRV_NAME),
-};
-
-static struct ata_port_operations at32_port_ops = {
- .inherits = &ata_sff_port_ops,
- .cable_detect = ata_cable_40wire,
- .set_piomode = pata_at32_set_piomode,
-};
-
-static int __init pata_at32_init_one(struct device *dev,
- struct at32_ide_info *info)
-{
- struct ata_host *host;
- struct ata_port *ap;
-
- host = ata_host_alloc(dev, 1);
- if (!host)
- return -ENOMEM;
-
- ap = host->ports[0];
-
- /* Setup ATA bindings */
- ap->ops = &at32_port_ops;
- ap->pio_mask = PIO_MASK;
- ap->flags |= ATA_FLAG_SLAVE_POSS;
-
- /*
- * Since all 8-bit taskfile transfers has to go on the lower
- * byte of the data bus and there is a bug in the SMC that
- * makes it impossible to alter the bus width during runtime,
- * we need to hardwire the address signals as follows:
- *
- * A_IDE(2:0) <= A_EBI(3:1)
- *
- * This makes all addresses on the EBI even, thus all data
- * will be on the lower byte of the data bus. All addresses
- * used by libATA need to be altered according to this.
- */
- ap->ioaddr.altstatus_addr = info->alt_addr + (0x06 << 1);
- ap->ioaddr.ctl_addr = info->alt_addr + (0x06 << 1);
-
- ap->ioaddr.data_addr = info->ide_addr + (ATA_REG_DATA << 1);
- ap->ioaddr.error_addr = info->ide_addr + (ATA_REG_ERR << 1);
- ap->ioaddr.feature_addr = info->ide_addr + (ATA_REG_FEATURE << 1);
- ap->ioaddr.nsect_addr = info->ide_addr + (ATA_REG_NSECT << 1);
- ap->ioaddr.lbal_addr = info->ide_addr + (ATA_REG_LBAL << 1);
- ap->ioaddr.lbam_addr = info->ide_addr + (ATA_REG_LBAM << 1);
- ap->ioaddr.lbah_addr = info->ide_addr + (ATA_REG_LBAH << 1);
- ap->ioaddr.device_addr = info->ide_addr + (ATA_REG_DEVICE << 1);
- ap->ioaddr.status_addr = info->ide_addr + (ATA_REG_STATUS << 1);
- ap->ioaddr.command_addr = info->ide_addr + (ATA_REG_CMD << 1);
-
- /* Set info as private data of ATA host */
- host->private_data = info;
-
- /* Register ATA device and return */
- return ata_host_activate(host, info->irq, ata_sff_interrupt,
- IRQF_SHARED | IRQF_TRIGGER_RISING,
- &at32_sht);
-}
-
-/*
- * This function may come in handy for people analyzing their own
- * EBI -> PATA adaptors.
- */
-#ifdef DEBUG_BUS
-
-static void __init pata_at32_debug_bus(struct device *dev,
- struct at32_ide_info *info)
-{
- const int d1 = 0xff;
- const int d2 = 0x00;
-
- int i;
-
- /* Write 8-bit values (registers) */
- iowrite8(d1, info->alt_addr + (0x06 << 1));
- iowrite8(d2, info->alt_addr + (0x06 << 1));
-
- for (i = 0; i < 8; i++) {
- iowrite8(d1, info->ide_addr + (i << 1));
- iowrite8(d2, info->ide_addr + (i << 1));
- }
-
- /* Write 16 bit values (data) */
- iowrite16(d1, info->ide_addr);
- iowrite16(d1 << 8, info->ide_addr);
-
- iowrite16(d1, info->ide_addr);
- iowrite16(d1 << 8, info->ide_addr);
-}
-
-#endif
-
-static int __init pata_at32_probe(struct platform_device *pdev)
-{
- const struct ata_timing initial_timing =
- {XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0};
-
- struct device *dev = &pdev->dev;
- struct at32_ide_info *info;
- struct ide_platform_data *board = dev_get_platdata(&pdev->dev);
- struct resource *res;
-
- int irq;
- int ret;
-
- if (!board)
- return -ENXIO;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENXIO;
-
- /* Retrive IRQ */
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- /* Setup struct containing private information */
- info = kzalloc(sizeof(struct at32_ide_info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- info->irq = irq;
- info->cs = board->cs;
-
- /* Request memory resources */
- info->res_ide.start = res->start + CF_IDE_OFFSET;
- info->res_ide.end = info->res_ide.start + CF_RES_SIZE - 1;
- info->res_ide.name = "ide";
- info->res_ide.flags = IORESOURCE_MEM;
-
- ret = request_resource(res, &info->res_ide);
- if (ret)
- goto err_req_res_ide;
-
- info->res_alt.start = res->start + CF_ALT_IDE_OFFSET;
- info->res_alt.end = info->res_alt.start + CF_RES_SIZE - 1;
- info->res_alt.name = "alt";
- info->res_alt.flags = IORESOURCE_MEM;
-
- ret = request_resource(res, &info->res_alt);
- if (ret)
- goto err_req_res_alt;
-
- /* Setup non-timing elements of SMC */
- info->smc.bus_width = 2; /* 16 bit data bus */
- info->smc.nrd_controlled = 1; /* Sample data on rising edge of NRD */
- info->smc.nwe_controlled = 0; /* Drive data on falling edge of NCS */
- info->smc.nwait_mode = 3; /* NWAIT is in READY mode */
- info->smc.byte_write = 0; /* Byte select access type */
- info->smc.tdf_mode = 0; /* TDF optimization disabled */
- info->smc.tdf_cycles = 0; /* No TDF wait cycles */
-
- /* Setup SMC to ATA timing */
- ret = pata_at32_setup_timing(dev, info, &initial_timing);
- if (ret)
- goto err_setup_timing;
-
- /* Map ATA address space */
- ret = -ENOMEM;
- info->ide_addr = devm_ioremap(dev, info->res_ide.start, 16);
- info->alt_addr = devm_ioremap(dev, info->res_alt.start, 16);
- if (!info->ide_addr || !info->alt_addr)
- goto err_ioremap;
-
-#ifdef DEBUG_BUS
- pata_at32_debug_bus(dev, info);
-#endif
-
- /* Setup and register ATA device */
- ret = pata_at32_init_one(dev, info);
- if (ret)
- goto err_ata_device;
-
- return 0;
-
- err_ata_device:
- err_ioremap:
- err_setup_timing:
- release_resource(&info->res_alt);
- err_req_res_alt:
- release_resource(&info->res_ide);
- err_req_res_ide:
- kfree(info);
-
- return ret;
-}
-
-static int __exit pata_at32_remove(struct platform_device *pdev)
-{
- struct ata_host *host = platform_get_drvdata(pdev);
- struct at32_ide_info *info;
-
- if (!host)
- return 0;
-
- info = host->private_data;
- ata_host_detach(host);
-
- if (!info)
- return 0;
-
- release_resource(&info->res_ide);
- release_resource(&info->res_alt);
-
- kfree(info);
-
- return 0;
-}
-
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:at32_ide");
-
-static struct platform_driver pata_at32_driver = {
- .remove = __exit_p(pata_at32_remove),
- .driver = {
- .name = "at32_ide",
- },
-};
-
-module_platform_driver_probe(pata_at32_driver, pata_at32_probe);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("AVR32 SMC/CFC PATA Driver");
-MODULE_AUTHOR("Kristoffer Nyborg Gregertsen <kngregertsen@norway.atmel.com>");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 49d705c9f0f7..4d49fd3c927b 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -278,6 +278,10 @@ static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
};
const struct ata_port_info *ppi[] = { &info, &info };
+ /* SB600 doesn't have secondary port wired */
+ if((pdev->device == PCI_DEVICE_ID_ATI_IXP600_IDE))
+ ppi[1] = &ata_dummy_port_info;
+
return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
ATA_HOST_PARALLEL_SCAN);
}
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 7a21edf89e72..8468b300193b 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -683,7 +683,7 @@ static u8 *it821x_firmware_command(struct ata_port *ap, u8 cmd, int len)
ioread16_rep(ap->ioaddr.data_addr, buf, len/2);
return (u8 *)buf;
}
- mdelay(1);
+ usleep_range(500, 1000);
}
kfree(buf);
printk(KERN_ERR "it821x_firmware_command: timeout\n");
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index 6db2e34bd52f..1a18e675ba9f 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -580,7 +580,7 @@ static void pdc_adjust_pll(struct ata_host *host, long pll_clock, unsigned int b
ioread16(mmio_base + PDC_PLL_CTL); /* flush */
/* Wait the PLL circuit to be stable */
- mdelay(30);
+ msleep(30);
#ifdef PDC_DEBUG
/*
@@ -620,7 +620,7 @@ static long pdc_detect_pll_input_clock(struct ata_host *host)
start_time = ktime_get();
/* Let the counter run for 100 ms. */
- mdelay(100);
+ msleep(100);
/* Read the counter values again */
end_count = pdc_read_counter(host);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index cc208b72b199..42d4589b43d4 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -3596,7 +3596,7 @@ static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
hpriv->ops->phy_errata(hpriv, mmio, port_no);
if (IS_GEN_I(hpriv))
- mdelay(1);
+ usleep_range(500, 1000);
}
static void mv_pmp_select(struct ata_port *ap, int pmp)
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 1d60b58a8c19..fe4b24f05f6a 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -569,7 +569,7 @@ store_hard_offline_page(struct device *dev,
if (kstrtoull(buf, 0, &pfn) < 0)
return -EINVAL;
pfn >>= PAGE_SHIFT;
- ret = memory_failure(pfn, 0, 0);
+ ret = memory_failure(pfn, 0);
return ret ? ret : count;
}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 0c80bea05bcb..528b24149bc7 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1032,15 +1032,12 @@ static int genpd_prepare(struct device *dev)
static int genpd_finish_suspend(struct device *dev, bool poweroff)
{
struct generic_pm_domain *genpd;
- int ret;
+ int ret = 0;
genpd = dev_to_genpd(dev);
if (IS_ERR(genpd))
return -EINVAL;
- if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
- return 0;
-
if (poweroff)
ret = pm_generic_poweroff_noirq(dev);
else
@@ -1048,10 +1045,19 @@ static int genpd_finish_suspend(struct device *dev, bool poweroff)
if (ret)
return ret;
- if (genpd->dev_ops.stop && genpd->dev_ops.start) {
- ret = pm_runtime_force_suspend(dev);
- if (ret)
+ if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
+ return 0;
+
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev)) {
+ ret = genpd_stop_dev(genpd, dev);
+ if (ret) {
+ if (poweroff)
+ pm_generic_restore_noirq(dev);
+ else
+ pm_generic_resume_noirq(dev);
return ret;
+ }
}
genpd_lock(genpd);
@@ -1085,7 +1091,7 @@ static int genpd_suspend_noirq(struct device *dev)
static int genpd_resume_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
- int ret = 0;
+ int ret;
dev_dbg(dev, "%s()\n", __func__);
@@ -1094,21 +1100,21 @@ static int genpd_resume_noirq(struct device *dev)
return -EINVAL;
if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd))
- return 0;
+ return pm_generic_resume_noirq(dev);
genpd_lock(genpd);
genpd_sync_power_on(genpd, true, 0);
genpd->suspended_count--;
genpd_unlock(genpd);
- if (genpd->dev_ops.stop && genpd->dev_ops.start)
- ret = pm_runtime_force_resume(dev);
-
- ret = pm_generic_resume_noirq(dev);
- if (ret)
- return ret;
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev)) {
+ ret = genpd_start_dev(genpd, dev);
+ if (ret)
+ return ret;
+ }
- return ret;
+ return pm_generic_resume_noirq(dev);
}
/**
@@ -1135,8 +1141,9 @@ static int genpd_freeze_noirq(struct device *dev)
if (ret)
return ret;
- if (genpd->dev_ops.stop && genpd->dev_ops.start)
- ret = pm_runtime_force_suspend(dev);
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev))
+ ret = genpd_stop_dev(genpd, dev);
return ret;
}
@@ -1159,8 +1166,9 @@ static int genpd_thaw_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- if (genpd->dev_ops.stop && genpd->dev_ops.start) {
- ret = pm_runtime_force_resume(dev);
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev)) {
+ ret = genpd_start_dev(genpd, dev);
if (ret)
return ret;
}
@@ -1217,8 +1225,9 @@ static int genpd_restore_noirq(struct device *dev)
genpd_sync_power_on(genpd, true, 0);
genpd_unlock(genpd);
- if (genpd->dev_ops.stop && genpd->dev_ops.start) {
- ret = pm_runtime_force_resume(dev);
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev)) {
+ ret = genpd_start_dev(genpd, dev);
if (ret)
return ret;
}
@@ -2199,20 +2208,8 @@ int genpd_dev_pm_attach(struct device *dev)
ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
"#power-domain-cells", 0, &pd_args);
- if (ret < 0) {
- if (ret != -ENOENT)
- return ret;
-
- /*
- * Try legacy Samsung-specific bindings
- * (for backwards compatibility of DT ABI)
- */
- pd_args.args_count = 0;
- pd_args.np = of_parse_phandle(dev->of_node,
- "samsung,power-domain", 0);
- if (!pd_args.np)
- return -ENOENT;
- }
+ if (ret < 0)
+ return ret;
mutex_lock(&gpd_list_lock);
pd = genpd_get_from_provider(&pd_args);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 08744b572af6..02a497e7c785 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -18,7 +18,6 @@
*/
#include <linux/device.h>
-#include <linux/kallsyms.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/pm.h>
@@ -541,6 +540,73 @@ void dev_pm_skip_next_resume_phases(struct device *dev)
}
/**
+ * suspend_event - Return a "suspend" message for given "resume" one.
+ * @resume_msg: PM message representing a system-wide resume transition.
+ */
+static pm_message_t suspend_event(pm_message_t resume_msg)
+{
+ switch (resume_msg.event) {
+ case PM_EVENT_RESUME:
+ return PMSG_SUSPEND;
+ case PM_EVENT_THAW:
+ case PM_EVENT_RESTORE:
+ return PMSG_FREEZE;
+ case PM_EVENT_RECOVER:
+ return PMSG_HIBERNATE;
+ }
+ return PMSG_ON;
+}
+
+/**
+ * dev_pm_may_skip_resume - System-wide device resume optimization check.
+ * @dev: Target device.
+ *
+ * Checks whether or not the device may be left in suspend after a system-wide
+ * transition to the working state.
+ */
+bool dev_pm_may_skip_resume(struct device *dev)
+{
+ return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
+}
+
+static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev,
+ pm_message_t state,
+ const char **info_p)
+{
+ pm_callback_t callback;
+ const char *info;
+
+ if (dev->pm_domain) {
+ info = "noirq power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "noirq type ";
+ callback = pm_noirq_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "noirq class ";
+ callback = pm_noirq_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "noirq bus ";
+ callback = pm_noirq_op(dev->bus->pm, state);
+ } else {
+ return NULL;
+ }
+
+ if (info_p)
+ *info_p = info;
+
+ return callback;
+}
+
+static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
+ pm_message_t state,
+ const char **info_p);
+
+static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
+ pm_message_t state,
+ const char **info_p);
+
+/**
* device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
@@ -551,8 +617,9 @@ void dev_pm_skip_next_resume_phases(struct device *dev)
*/
static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback = NULL;
- const char *info = NULL;
+ pm_callback_t callback;
+ const char *info;
+ bool skip_resume;
int error = 0;
TRACE_DEVICE(dev);
@@ -566,29 +633,61 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
dpm_wait_for_superior(dev, async);
- if (dev->pm_domain) {
- info = "noirq power domain ";
- callback = pm_noirq_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "noirq type ";
- callback = pm_noirq_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "noirq class ";
- callback = pm_noirq_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "noirq bus ";
- callback = pm_noirq_op(dev->bus->pm, state);
+ skip_resume = dev_pm_may_skip_resume(dev);
+
+ callback = dpm_subsys_resume_noirq_cb(dev, state, &info);
+ if (callback)
+ goto Run;
+
+ if (skip_resume)
+ goto Skip;
+
+ if (dev_pm_smart_suspend_and_suspended(dev)) {
+ pm_message_t suspend_msg = suspend_event(state);
+
+ /*
+ * If "freeze" callbacks have been skipped during a transition
+ * related to hibernation, the subsequent "thaw" callbacks must
+ * be skipped too or bad things may happen. Otherwise, resume
+ * callbacks are going to be run for the device, so its runtime
+ * PM status must be changed to reflect the new state after the
+ * transition under way.
+ */
+ if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) &&
+ !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) {
+ if (state.event == PM_EVENT_THAW) {
+ skip_resume = true;
+ goto Skip;
+ } else {
+ pm_runtime_set_active(dev);
+ }
+ }
}
- if (!callback && dev->driver && dev->driver->pm) {
+ if (dev->driver && dev->driver->pm) {
info = "noirq driver ";
callback = pm_noirq_op(dev->driver->pm, state);
}
+Run:
error = dpm_run_callback(callback, dev, state, info);
+
+Skip:
dev->power.is_noirq_suspended = false;
- Out:
+ if (skip_resume) {
+ /*
+ * The device is going to be left in suspend, but it might not
+ * have been in runtime suspend before the system suspended, so
+ * its runtime PM status needs to be updated to avoid confusing
+ * the runtime PM framework when runtime PM is enabled for the
+ * device again.
+ */
+ pm_runtime_set_suspended(dev);
+ dev_pm_skip_next_resume_phases(dev);
+ }
+
+Out:
complete_all(&dev->power.completion);
TRACE_RESUME(error);
return error;
@@ -681,6 +780,35 @@ void dpm_resume_noirq(pm_message_t state)
dpm_noirq_end();
}
+static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
+ pm_message_t state,
+ const char **info_p)
+{
+ pm_callback_t callback;
+ const char *info;
+
+ if (dev->pm_domain) {
+ info = "early power domain ";
+ callback = pm_late_early_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "early type ";
+ callback = pm_late_early_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "early class ";
+ callback = pm_late_early_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "early bus ";
+ callback = pm_late_early_op(dev->bus->pm, state);
+ } else {
+ return NULL;
+ }
+
+ if (info_p)
+ *info_p = info;
+
+ return callback;
+}
+
/**
* device_resume_early - Execute an "early resume" callback for given device.
* @dev: Device to handle.
@@ -691,8 +819,8 @@ void dpm_resume_noirq(pm_message_t state)
*/
static int device_resume_early(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback = NULL;
- const char *info = NULL;
+ pm_callback_t callback;
+ const char *info;
int error = 0;
TRACE_DEVICE(dev);
@@ -706,19 +834,7 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn
dpm_wait_for_superior(dev, async);
- if (dev->pm_domain) {
- info = "early power domain ";
- callback = pm_late_early_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "early type ";
- callback = pm_late_early_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "early class ";
- callback = pm_late_early_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "early bus ";
- callback = pm_late_early_op(dev->bus->pm, state);
- }
+ callback = dpm_subsys_resume_early_cb(dev, state, &info);
if (!callback && dev->driver && dev->driver->pm) {
info = "early driver ";
@@ -1089,6 +1205,77 @@ static pm_message_t resume_event(pm_message_t sleep_state)
return PMSG_ON;
}
+static void dpm_superior_set_must_resume(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ if (dev->parent)
+ dev->parent->power.must_resume = true;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ link->supplier->power.must_resume = true;
+
+ device_links_read_unlock(idx);
+}
+
+static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
+ pm_message_t state,
+ const char **info_p)
+{
+ pm_callback_t callback;
+ const char *info;
+
+ if (dev->pm_domain) {
+ info = "noirq power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "noirq type ";
+ callback = pm_noirq_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "noirq class ";
+ callback = pm_noirq_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "noirq bus ";
+ callback = pm_noirq_op(dev->bus->pm, state);
+ } else {
+ return NULL;
+ }
+
+ if (info_p)
+ *info_p = info;
+
+ return callback;
+}
+
+static bool device_must_resume(struct device *dev, pm_message_t state,
+ bool no_subsys_suspend_noirq)
+{
+ pm_message_t resume_msg = resume_event(state);
+
+ /*
+ * If all of the device driver's "noirq", "late" and "early" callbacks
+ * are invoked directly by the core, the decision to allow the device to
+ * stay in suspend can be based on its current runtime PM status and its
+ * wakeup settings.
+ */
+ if (no_subsys_suspend_noirq &&
+ !dpm_subsys_suspend_late_cb(dev, state, NULL) &&
+ !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) &&
+ !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL))
+ return !pm_runtime_status_suspended(dev) &&
+ (resume_msg.event != PM_EVENT_RESUME ||
+ (device_can_wakeup(dev) && !device_may_wakeup(dev)));
+
+ /*
+ * The only safe strategy here is to require that if the device may not
+ * be left in suspend, resume callbacks must be invoked for it.
+ */
+ return !dev->power.may_skip_resume;
+}
+
/**
* __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
* @dev: Device to handle.
@@ -1100,8 +1287,9 @@ static pm_message_t resume_event(pm_message_t sleep_state)
*/
static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback = NULL;
- const char *info = NULL;
+ pm_callback_t callback;
+ const char *info;
+ bool no_subsys_cb = false;
int error = 0;
TRACE_DEVICE(dev);
@@ -1120,30 +1308,40 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
- if (dev->pm_domain) {
- info = "noirq power domain ";
- callback = pm_noirq_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "noirq type ";
- callback = pm_noirq_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "noirq class ";
- callback = pm_noirq_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "noirq bus ";
- callback = pm_noirq_op(dev->bus->pm, state);
- }
+ callback = dpm_subsys_suspend_noirq_cb(dev, state, &info);
+ if (callback)
+ goto Run;
- if (!callback && dev->driver && dev->driver->pm) {
+ no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL);
+
+ if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb)
+ goto Skip;
+
+ if (dev->driver && dev->driver->pm) {
info = "noirq driver ";
callback = pm_noirq_op(dev->driver->pm, state);
}
+Run:
error = dpm_run_callback(callback, dev, state, info);
- if (!error)
- dev->power.is_noirq_suspended = true;
- else
+ if (error) {
async_error = error;
+ goto Complete;
+ }
+
+Skip:
+ dev->power.is_noirq_suspended = true;
+
+ if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
+ dev->power.must_resume = dev->power.must_resume ||
+ atomic_read(&dev->power.usage_count) > 1 ||
+ device_must_resume(dev, state, no_subsys_cb);
+ } else {
+ dev->power.must_resume = true;
+ }
+
+ if (dev->power.must_resume)
+ dpm_superior_set_must_resume(dev);
Complete:
complete_all(&dev->power.completion);
@@ -1249,6 +1447,50 @@ int dpm_suspend_noirq(pm_message_t state)
return ret;
}
+static void dpm_propagate_wakeup_to_parent(struct device *dev)
+{
+ struct device *parent = dev->parent;
+
+ if (!parent)
+ return;
+
+ spin_lock_irq(&parent->power.lock);
+
+ if (dev->power.wakeup_path && !parent->power.ignore_children)
+ parent->power.wakeup_path = true;
+
+ spin_unlock_irq(&parent->power.lock);
+}
+
+static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
+ pm_message_t state,
+ const char **info_p)
+{
+ pm_callback_t callback;
+ const char *info;
+
+ if (dev->pm_domain) {
+ info = "late power domain ";
+ callback = pm_late_early_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "late type ";
+ callback = pm_late_early_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "late class ";
+ callback = pm_late_early_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "late bus ";
+ callback = pm_late_early_op(dev->bus->pm, state);
+ } else {
+ return NULL;
+ }
+
+ if (info_p)
+ *info_p = info;
+
+ return callback;
+}
+
/**
* __device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
@@ -1259,8 +1501,8 @@ int dpm_suspend_noirq(pm_message_t state)
*/
static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
{
- pm_callback_t callback = NULL;
- const char *info = NULL;
+ pm_callback_t callback;
+ const char *info;
int error = 0;
TRACE_DEVICE(dev);
@@ -1281,30 +1523,29 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
- if (dev->pm_domain) {
- info = "late power domain ";
- callback = pm_late_early_op(&dev->pm_domain->ops, state);
- } else if (dev->type && dev->type->pm) {
- info = "late type ";
- callback = pm_late_early_op(dev->type->pm, state);
- } else if (dev->class && dev->class->pm) {
- info = "late class ";
- callback = pm_late_early_op(dev->class->pm, state);
- } else if (dev->bus && dev->bus->pm) {
- info = "late bus ";
- callback = pm_late_early_op(dev->bus->pm, state);
- }
+ callback = dpm_subsys_suspend_late_cb(dev, state, &info);
+ if (callback)
+ goto Run;
- if (!callback && dev->driver && dev->driver->pm) {
+ if (dev_pm_smart_suspend_and_suspended(dev) &&
+ !dpm_subsys_suspend_noirq_cb(dev, state, NULL))
+ goto Skip;
+
+ if (dev->driver && dev->driver->pm) {
info = "late driver ";
callback = pm_late_early_op(dev->driver->pm, state);
}
+Run:
error = dpm_run_callback(callback, dev, state, info);
- if (!error)
- dev->power.is_late_suspended = true;
- else
+ if (error) {
async_error = error;
+ goto Complete;
+ }
+ dpm_propagate_wakeup_to_parent(dev);
+
+Skip:
+ dev->power.is_late_suspended = true;
Complete:
TRACE_SUSPEND(error);
@@ -1435,11 +1676,17 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
return error;
}
-static void dpm_clear_suppliers_direct_complete(struct device *dev)
+static void dpm_clear_superiors_direct_complete(struct device *dev)
{
struct device_link *link;
int idx;
+ if (dev->parent) {
+ spin_lock_irq(&dev->parent->power.lock);
+ dev->parent->power.direct_complete = false;
+ spin_unlock_irq(&dev->parent->power.lock);
+ }
+
idx = device_links_read_lock();
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
@@ -1500,6 +1747,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
dev->power.direct_complete = false;
}
+ dev->power.may_skip_resume = false;
+ dev->power.must_resume = false;
+
dpm_watchdog_set(&wd, dev);
device_lock(dev);
@@ -1543,20 +1793,12 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
End:
if (!error) {
- struct device *parent = dev->parent;
-
dev->power.is_suspended = true;
- if (parent) {
- spin_lock_irq(&parent->power.lock);
-
- dev->parent->power.direct_complete = false;
- if (dev->power.wakeup_path
- && !dev->parent->power.ignore_children)
- dev->parent->power.wakeup_path = true;
+ if (device_may_wakeup(dev))
+ dev->power.wakeup_path = true;
- spin_unlock_irq(&parent->power.lock);
- }
- dpm_clear_suppliers_direct_complete(dev);
+ dpm_propagate_wakeup_to_parent(dev);
+ dpm_clear_superiors_direct_complete(dev);
}
device_unlock(dev);
@@ -1665,8 +1907,9 @@ static int device_prepare(struct device *dev, pm_message_t state)
if (dev->power.syscore)
return 0;
- WARN_ON(dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
- !pm_runtime_enabled(dev));
+ WARN_ON(!pm_runtime_enabled(dev) &&
+ dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
+ DPM_FLAG_LEAVE_SUSPENDED));
/*
* If a device's parent goes into runtime suspend at the wrong time,
@@ -1678,7 +1921,7 @@ static int device_prepare(struct device *dev, pm_message_t state)
device_lock(dev);
- dev->power.wakeup_path = device_may_wakeup(dev);
+ dev->power.wakeup_path = false;
if (dev->power.no_pm_callbacks) {
ret = 1; /* Let device go direct_complete */
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 7beee75399d4..21244c53e377 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -41,20 +41,15 @@ extern void dev_pm_disable_wake_irq_check(struct device *dev);
#ifdef CONFIG_PM_SLEEP
-extern int device_wakeup_attach_irq(struct device *dev,
- struct wake_irq *wakeirq);
+extern void device_wakeup_attach_irq(struct device *dev, struct wake_irq *wakeirq);
extern void device_wakeup_detach_irq(struct device *dev);
extern void device_wakeup_arm_wake_irqs(void);
extern void device_wakeup_disarm_wake_irqs(void);
#else
-static inline int
-device_wakeup_attach_irq(struct device *dev,
- struct wake_irq *wakeirq)
-{
- return 0;
-}
+static inline void device_wakeup_attach_irq(struct device *dev,
+ struct wake_irq *wakeirq) {}
static inline void device_wakeup_detach_irq(struct device *dev)
{
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 6e89b51ea3d9..8bef3cb2424d 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1613,22 +1613,34 @@ void pm_runtime_drop_link(struct device *dev)
spin_unlock_irq(&dev->power.lock);
}
+static bool pm_runtime_need_not_resume(struct device *dev)
+{
+ return atomic_read(&dev->power.usage_count) <= 1 &&
+ (atomic_read(&dev->power.child_count) == 0 ||
+ dev->power.ignore_children);
+}
+
/**
* pm_runtime_force_suspend - Force a device into suspend state if needed.
* @dev: Device to suspend.
*
* Disable runtime PM so we safely can check the device's runtime PM status and
- * if it is active, invoke it's .runtime_suspend callback to bring it into
- * suspend state. Keep runtime PM disabled to preserve the state unless we
- * encounter errors.
+ * if it is active, invoke its ->runtime_suspend callback to suspend it and
+ * change its runtime PM status field to RPM_SUSPENDED. Also, if the device's
+ * usage and children counters don't indicate that the device was in use before
+ * the system-wide transition under way, decrement its parent's children counter
+ * (if there is a parent). Keep runtime PM disabled to preserve the state
+ * unless we encounter errors.
*
* Typically this function may be invoked from a system suspend callback to make
- * sure the device is put into low power state.
+ * sure the device is put into low power state and it should only be used during
+ * system-wide PM transitions to sleep states. It assumes that the analogous
+ * pm_runtime_force_resume() will be used to resume the device.
*/
int pm_runtime_force_suspend(struct device *dev)
{
int (*callback)(struct device *);
- int ret = 0;
+ int ret;
pm_runtime_disable(dev);
if (pm_runtime_status_suspended(dev))
@@ -1636,27 +1648,23 @@ int pm_runtime_force_suspend(struct device *dev)
callback = RPM_GET_CALLBACK(dev, runtime_suspend);
- if (!callback) {
- ret = -ENOSYS;
- goto err;
- }
-
- ret = callback(dev);
+ ret = callback ? callback(dev) : 0;
if (ret)
goto err;
/*
- * Increase the runtime PM usage count for the device's parent, in case
- * when we find the device being used when system suspend was invoked.
- * This informs pm_runtime_force_resume() to resume the parent
- * immediately, which is needed to be able to resume its children,
- * when not deferring the resume to be managed via runtime PM.
+ * If the device can stay in suspend after the system-wide transition
+ * to the working state that will follow, drop the children counter of
+ * its parent, but set its status to RPM_SUSPENDED anyway in case this
+ * function will be called again for it in the meantime.
*/
- if (dev->parent && atomic_read(&dev->power.usage_count) > 1)
- pm_runtime_get_noresume(dev->parent);
+ if (pm_runtime_need_not_resume(dev))
+ pm_runtime_set_suspended(dev);
+ else
+ __update_runtime_status(dev, RPM_SUSPENDED);
- pm_runtime_set_suspended(dev);
return 0;
+
err:
pm_runtime_enable(dev);
return ret;
@@ -1669,13 +1677,9 @@ EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
*
* Prior invoking this function we expect the user to have brought the device
* into low power state by a call to pm_runtime_force_suspend(). Here we reverse
- * those actions and brings the device into full power, if it is expected to be
- * used on system resume. To distinguish that, we check whether the runtime PM
- * usage count is greater than 1 (the PM core increases the usage count in the
- * system PM prepare phase), as that indicates a real user (such as a subsystem,
- * driver, userspace, etc.) is using it. If that is the case, the device is
- * expected to be used on system resume as well, so then we resume it. In the
- * other case, we defer the resume to be managed via runtime PM.
+ * those actions and bring the device into full power, if it is expected to be
+ * used on system resume. In the other case, we defer the resume to be managed
+ * via runtime PM.
*
* Typically this function may be invoked from a system resume callback.
*/
@@ -1684,32 +1688,18 @@ int pm_runtime_force_resume(struct device *dev)
int (*callback)(struct device *);
int ret = 0;
- callback = RPM_GET_CALLBACK(dev, runtime_resume);
-
- if (!callback) {
- ret = -ENOSYS;
- goto out;
- }
-
- if (!pm_runtime_status_suspended(dev))
+ if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
goto out;
/*
- * Decrease the parent's runtime PM usage count, if we increased it
- * during system suspend in pm_runtime_force_suspend().
- */
- if (atomic_read(&dev->power.usage_count) > 1) {
- if (dev->parent)
- pm_runtime_put_noidle(dev->parent);
- } else {
- goto out;
- }
+ * The value of the parent's children counter is correct already, so
+ * just update the status of the device.
+ */
+ __update_runtime_status(dev, RPM_ACTIVE);
- ret = pm_runtime_set_active(dev);
- if (ret)
- goto out;
+ callback = RPM_GET_CALLBACK(dev, runtime_resume);
- ret = callback(dev);
+ ret = callback ? callback(dev) : 0;
if (ret) {
pm_runtime_set_suspended(dev);
goto out;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index e153e28b1857..0f651efc58a1 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -108,16 +108,10 @@ static ssize_t control_show(struct device *dev, struct device_attribute *attr,
static ssize_t control_store(struct device * dev, struct device_attribute *attr,
const char * buf, size_t n)
{
- char *cp;
- int len = n;
-
- cp = memchr(buf, '\n', n);
- if (cp)
- len = cp - buf;
device_lock(dev);
- if (len == sizeof ctrl_auto - 1 && strncmp(buf, ctrl_auto, len) == 0)
+ if (sysfs_streq(buf, ctrl_auto))
pm_runtime_allow(dev);
- else if (len == sizeof ctrl_on - 1 && strncmp(buf, ctrl_on, len) == 0)
+ else if (sysfs_streq(buf, ctrl_on))
pm_runtime_forbid(dev);
else
n = -EINVAL;
@@ -125,9 +119,9 @@ static ssize_t control_store(struct device * dev, struct device_attribute *attr,
return n;
}
-static DEVICE_ATTR(control, 0644, control_show, control_store);
+static DEVICE_ATTR_RW(control);
-static ssize_t rtpm_active_time_show(struct device *dev,
+static ssize_t runtime_active_time_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
@@ -138,9 +132,9 @@ static ssize_t rtpm_active_time_show(struct device *dev,
return ret;
}
-static DEVICE_ATTR(runtime_active_time, 0444, rtpm_active_time_show, NULL);
+static DEVICE_ATTR_RO(runtime_active_time);
-static ssize_t rtpm_suspended_time_show(struct device *dev,
+static ssize_t runtime_suspended_time_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
@@ -152,9 +146,9 @@ static ssize_t rtpm_suspended_time_show(struct device *dev,
return ret;
}
-static DEVICE_ATTR(runtime_suspended_time, 0444, rtpm_suspended_time_show, NULL);
+static DEVICE_ATTR_RO(runtime_suspended_time);
-static ssize_t rtpm_status_show(struct device *dev,
+static ssize_t runtime_status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const char *p;
@@ -184,7 +178,7 @@ static ssize_t rtpm_status_show(struct device *dev,
return sprintf(buf, p);
}
-static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
+static DEVICE_ATTR_RO(runtime_status);
static ssize_t autosuspend_delay_ms_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -211,26 +205,25 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
return n;
}
-static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,
- autosuspend_delay_ms_store);
+static DEVICE_ATTR_RW(autosuspend_delay_ms);
-static ssize_t pm_qos_resume_latency_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t pm_qos_resume_latency_us_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
s32 value = dev_pm_qos_requested_resume_latency(dev);
if (value == 0)
return sprintf(buf, "n/a\n");
- else if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+ if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
value = 0;
return sprintf(buf, "%d\n", value);
}
-static ssize_t pm_qos_resume_latency_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t n)
+static ssize_t pm_qos_resume_latency_us_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
{
s32 value;
int ret;
@@ -245,7 +238,7 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev,
if (value == 0)
value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
- } else if (!strcmp(buf, "n/a") || !strcmp(buf, "n/a\n")) {
+ } else if (sysfs_streq(buf, "n/a")) {
value = 0;
} else {
return -EINVAL;
@@ -256,26 +249,25 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev,
return ret < 0 ? ret : n;
}
-static DEVICE_ATTR(pm_qos_resume_latency_us, 0644,
- pm_qos_resume_latency_show, pm_qos_resume_latency_store);
+static DEVICE_ATTR_RW(pm_qos_resume_latency_us);
-static ssize_t pm_qos_latency_tolerance_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t pm_qos_latency_tolerance_us_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
s32 value = dev_pm_qos_get_user_latency_tolerance(dev);
if (value < 0)
return sprintf(buf, "auto\n");
- else if (value == PM_QOS_LATENCY_ANY)
+ if (value == PM_QOS_LATENCY_ANY)
return sprintf(buf, "any\n");
return sprintf(buf, "%d\n", value);
}
-static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t n)
+static ssize_t pm_qos_latency_tolerance_us_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
{
s32 value;
int ret;
@@ -285,9 +277,9 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
if (value < 0)
return -EINVAL;
} else {
- if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n"))
+ if (sysfs_streq(buf, "auto"))
value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
- else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
+ else if (sysfs_streq(buf, "any"))
value = PM_QOS_LATENCY_ANY;
else
return -EINVAL;
@@ -296,8 +288,7 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
return ret < 0 ? ret : n;
}
-static DEVICE_ATTR(pm_qos_latency_tolerance_us, 0644,
- pm_qos_latency_tolerance_show, pm_qos_latency_tolerance_store);
+static DEVICE_ATTR_RW(pm_qos_latency_tolerance_us);
static ssize_t pm_qos_no_power_off_show(struct device *dev,
struct device_attribute *attr,
@@ -323,49 +314,39 @@ static ssize_t pm_qos_no_power_off_store(struct device *dev,
return ret < 0 ? ret : n;
}
-static DEVICE_ATTR(pm_qos_no_power_off, 0644,
- pm_qos_no_power_off_show, pm_qos_no_power_off_store);
+static DEVICE_ATTR_RW(pm_qos_no_power_off);
#ifdef CONFIG_PM_SLEEP
static const char _enabled[] = "enabled";
static const char _disabled[] = "disabled";
-static ssize_t
-wake_show(struct device * dev, struct device_attribute *attr, char * buf)
+static ssize_t wakeup_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%s\n", device_can_wakeup(dev)
? (device_may_wakeup(dev) ? _enabled : _disabled)
: "");
}
-static ssize_t
-wake_store(struct device * dev, struct device_attribute *attr,
- const char * buf, size_t n)
+static ssize_t wakeup_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
{
- char *cp;
- int len = n;
-
if (!device_can_wakeup(dev))
return -EINVAL;
- cp = memchr(buf, '\n', n);
- if (cp)
- len = cp - buf;
- if (len == sizeof _enabled - 1
- && strncmp(buf, _enabled, sizeof _enabled - 1) == 0)
+ if (sysfs_streq(buf, _enabled))
device_set_wakeup_enable(dev, 1);
- else if (len == sizeof _disabled - 1
- && strncmp(buf, _disabled, sizeof _disabled - 1) == 0)
+ else if (sysfs_streq(buf, _disabled))
device_set_wakeup_enable(dev, 0);
else
return -EINVAL;
return n;
}
-static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store);
+static DEVICE_ATTR_RW(wakeup);
static ssize_t wakeup_count_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
unsigned long count = 0;
bool enabled = false;
@@ -379,10 +360,11 @@ static ssize_t wakeup_count_show(struct device *dev,
return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_count, 0444, wakeup_count_show, NULL);
+static DEVICE_ATTR_RO(wakeup_count);
static ssize_t wakeup_active_count_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
unsigned long count = 0;
bool enabled = false;
@@ -396,11 +378,11 @@ static ssize_t wakeup_active_count_show(struct device *dev,
return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_active_count, 0444, wakeup_active_count_show, NULL);
+static DEVICE_ATTR_RO(wakeup_active_count);
static ssize_t wakeup_abort_count_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
unsigned long count = 0;
bool enabled = false;
@@ -414,7 +396,7 @@ static ssize_t wakeup_abort_count_show(struct device *dev,
return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_abort_count, 0444, wakeup_abort_count_show, NULL);
+static DEVICE_ATTR_RO(wakeup_abort_count);
static ssize_t wakeup_expire_count_show(struct device *dev,
struct device_attribute *attr,
@@ -432,10 +414,10 @@ static ssize_t wakeup_expire_count_show(struct device *dev,
return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_expire_count, 0444, wakeup_expire_count_show, NULL);
+static DEVICE_ATTR_RO(wakeup_expire_count);
static ssize_t wakeup_active_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
unsigned int active = 0;
bool enabled = false;
@@ -449,10 +431,11 @@ static ssize_t wakeup_active_show(struct device *dev,
return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_active, 0444, wakeup_active_show, NULL);
+static DEVICE_ATTR_RO(wakeup_active);
-static ssize_t wakeup_total_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t wakeup_total_time_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
s64 msec = 0;
bool enabled = false;
@@ -466,10 +449,10 @@ static ssize_t wakeup_total_time_show(struct device *dev,
return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_total_time_ms, 0444, wakeup_total_time_show, NULL);
+static DEVICE_ATTR_RO(wakeup_total_time_ms);
-static ssize_t wakeup_max_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t wakeup_max_time_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
s64 msec = 0;
bool enabled = false;
@@ -483,10 +466,11 @@ static ssize_t wakeup_max_time_show(struct device *dev,
return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_max_time_ms, 0444, wakeup_max_time_show, NULL);
+static DEVICE_ATTR_RO(wakeup_max_time_ms);
-static ssize_t wakeup_last_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t wakeup_last_time_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
s64 msec = 0;
bool enabled = false;
@@ -500,12 +484,12 @@ static ssize_t wakeup_last_time_show(struct device *dev,
return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_last_time_ms, 0444, wakeup_last_time_show, NULL);
+static DEVICE_ATTR_RO(wakeup_last_time_ms);
#ifdef CONFIG_PM_AUTOSLEEP
-static ssize_t wakeup_prevent_sleep_time_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
s64 msec = 0;
bool enabled = false;
@@ -519,40 +503,39 @@ static ssize_t wakeup_prevent_sleep_time_show(struct device *dev,
return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
}
-static DEVICE_ATTR(wakeup_prevent_sleep_time_ms, 0444,
- wakeup_prevent_sleep_time_show, NULL);
+static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms);
#endif /* CONFIG_PM_AUTOSLEEP */
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_ADVANCED_DEBUG
-static ssize_t rtpm_usagecount_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t runtime_usage_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count));
}
+static DEVICE_ATTR_RO(runtime_usage);
-static ssize_t rtpm_children_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t runtime_active_kids_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%d\n", dev->power.ignore_children ?
0 : atomic_read(&dev->power.child_count));
}
+static DEVICE_ATTR_RO(runtime_active_kids);
-static ssize_t rtpm_enabled_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t runtime_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- if ((dev->power.disable_depth) && (dev->power.runtime_auto == false))
+ if (dev->power.disable_depth && (dev->power.runtime_auto == false))
return sprintf(buf, "disabled & forbidden\n");
- else if (dev->power.disable_depth)
+ if (dev->power.disable_depth)
return sprintf(buf, "disabled\n");
- else if (dev->power.runtime_auto == false)
+ if (dev->power.runtime_auto == false)
return sprintf(buf, "forbidden\n");
return sprintf(buf, "enabled\n");
}
-
-static DEVICE_ATTR(runtime_usage, 0444, rtpm_usagecount_show, NULL);
-static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL);
-static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL);
+static DEVICE_ATTR_RO(runtime_enabled);
#ifdef CONFIG_PM_SLEEP
static ssize_t async_show(struct device *dev, struct device_attribute *attr,
@@ -566,23 +549,16 @@ static ssize_t async_show(struct device *dev, struct device_attribute *attr,
static ssize_t async_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t n)
{
- char *cp;
- int len = n;
-
- cp = memchr(buf, '\n', n);
- if (cp)
- len = cp - buf;
- if (len == sizeof _enabled - 1 && strncmp(buf, _enabled, len) == 0)
+ if (sysfs_streq(buf, _enabled))
device_enable_async_suspend(dev);
- else if (len == sizeof _disabled - 1 &&
- strncmp(buf, _disabled, len) == 0)
+ else if (sysfs_streq(buf, _disabled))
device_disable_async_suspend(dev);
else
return -EINVAL;
return n;
}
-static DEVICE_ATTR(async, 0644, async_show, async_store);
+static DEVICE_ATTR_RW(async);
#endif /* CONFIG_PM_SLEEP */
#endif /* CONFIG_PM_ADVANCED_DEBUG */
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index ae0429827f31..a8ac86e4d79e 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -33,7 +33,6 @@ static int dev_pm_attach_wake_irq(struct device *dev, int irq,
struct wake_irq *wirq)
{
unsigned long flags;
- int err;
if (!dev || !wirq)
return -EINVAL;
@@ -45,12 +44,11 @@ static int dev_pm_attach_wake_irq(struct device *dev, int irq,
return -EEXIST;
}
- err = device_wakeup_attach_irq(dev, wirq);
- if (!err)
- dev->power.wakeirq = wirq;
+ dev->power.wakeirq = wirq;
+ device_wakeup_attach_irq(dev, wirq);
spin_unlock_irqrestore(&dev->power.lock, flags);
- return err;
+ return 0;
}
/**
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 38559f04db2c..ea01621ed769 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -19,6 +19,11 @@
#include "power.h"
+#ifndef CONFIG_SUSPEND
+suspend_state_t pm_suspend_target_state;
+#define pm_suspend_target_state (PM_SUSPEND_ON)
+#endif
+
/*
* If set, the suspend/hibernate code will abort transitions to a sleep state
* if wakeup events are registered during or immediately before the transition.
@@ -268,6 +273,9 @@ int device_wakeup_enable(struct device *dev)
if (!dev || !dev->power.can_wakeup)
return -EINVAL;
+ if (pm_suspend_target_state != PM_SUSPEND_ON)
+ dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__);
+
ws = wakeup_source_register(dev_name(dev));
if (!ws)
return -ENOMEM;
@@ -291,22 +299,19 @@ EXPORT_SYMBOL_GPL(device_wakeup_enable);
*
* Call under the device's power.lock lock.
*/
-int device_wakeup_attach_irq(struct device *dev,
+void device_wakeup_attach_irq(struct device *dev,
struct wake_irq *wakeirq)
{
struct wakeup_source *ws;
ws = dev->power.wakeup;
- if (!ws) {
- dev_err(dev, "forgot to call call device_init_wakeup?\n");
- return -EINVAL;
- }
+ if (!ws)
+ return;
if (ws->wakeirq)
- return -EEXIST;
+ dev_err(dev, "Leftover wakeup IRQ found, overriding\n");
ws->wakeirq = wakeirq;
- return 0;
}
/**
@@ -448,9 +453,7 @@ int device_init_wakeup(struct device *dev, bool enable)
device_set_wakeup_capable(dev, true);
ret = device_wakeup_enable(dev);
} else {
- if (dev->power.can_wakeup)
- device_wakeup_disable(dev);
-
+ device_wakeup_disable(dev);
device_set_wakeup_capable(dev, false);
}
@@ -464,9 +467,6 @@ EXPORT_SYMBOL_GPL(device_init_wakeup);
*/
int device_set_wakeup_enable(struct device *dev, bool enable)
{
- if (!dev || !dev->power.can_wakeup)
- return -EINVAL;
-
return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
}
EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 613ba820f545..96aa71c93daf 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -1418,3 +1418,10 @@ int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
return fwnode_call_int_op(fwnode, graph_parse_endpoint, endpoint);
}
EXPORT_SYMBOL(fwnode_graph_parse_endpoint);
+
+void *device_get_match_data(struct device *dev)
+{
+ return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data,
+ dev);
+}
+EXPORT_SYMBOL_GPL(device_get_match_data);
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 3a1535d812d8..067073e4beb1 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -6,7 +6,6 @@
config REGMAP
default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ)
select IRQ_DOMAIN if REGMAP_IRQ
- select REGMAP_HWSPINLOCK if HWSPINLOCK=y
bool
config REGCACHE_COMPRESSED
@@ -39,5 +38,6 @@ config REGMAP_MMIO
config REGMAP_IRQ
bool
-config REGMAP_HWSPINLOCK
- bool
+config REGMAP_SOUNDWIRE
+ tristate
+ depends on SOUNDWIRE_BUS
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index 0d298c446108..22d263cca395 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o
obj-$(CONFIG_REGMAP_MMIO) += regmap-mmio.o
obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o
obj-$(CONFIG_REGMAP_W1) += regmap-w1.o
+obj-$(CONFIG_REGMAP_SOUNDWIRE) += regmap-sdw.o
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 8641183cac2f..53785e0e297a 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -77,6 +77,7 @@ struct regmap {
int async_ret;
#ifdef CONFIG_DEBUG_FS
+ bool debugfs_disable;
struct dentry *debugfs;
const char *debugfs_name;
@@ -215,10 +216,17 @@ struct regmap_field {
extern void regmap_debugfs_initcall(void);
extern void regmap_debugfs_init(struct regmap *map, const char *name);
extern void regmap_debugfs_exit(struct regmap *map);
+
+static inline void regmap_debugfs_disable(struct regmap *map)
+{
+ map->debugfs_disable = true;
+}
+
#else
static inline void regmap_debugfs_initcall(void) { }
static inline void regmap_debugfs_init(struct regmap *map, const char *name) { }
static inline void regmap_debugfs_exit(struct regmap *map) { }
+static inline void regmap_debugfs_disable(struct regmap *map) { }
#endif
/* regcache core declarations */
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
index 4d2e50bfc726..bc6cd88b8cc6 100644
--- a/drivers/base/regmap/regcache-flat.c
+++ b/drivers/base/regmap/regcache-flat.c
@@ -37,9 +37,12 @@ static int regcache_flat_init(struct regmap *map)
cache = map->cache;
- for (i = 0; i < map->num_reg_defaults; i++)
- cache[regcache_flat_get_index(map, map->reg_defaults[i].reg)] =
- map->reg_defaults[i].def;
+ for (i = 0; i < map->num_reg_defaults; i++) {
+ unsigned int reg = map->reg_defaults[i].reg;
+ unsigned int index = regcache_flat_get_index(map, reg);
+
+ cache[index] = map->reg_defaults[i].def;
+ }
return 0;
}
@@ -56,8 +59,9 @@ static int regcache_flat_read(struct regmap *map,
unsigned int reg, unsigned int *value)
{
unsigned int *cache = map->cache;
+ unsigned int index = regcache_flat_get_index(map, reg);
- *value = cache[regcache_flat_get_index(map, reg)];
+ *value = cache[index];
return 0;
}
@@ -66,8 +70,9 @@ static int regcache_flat_write(struct regmap *map, unsigned int reg,
unsigned int value)
{
unsigned int *cache = map->cache;
+ unsigned int index = regcache_flat_get_index(map, reg);
- cache[regcache_flat_get_index(map, reg)] = value;
+ cache[index] = value;
return 0;
}
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 36ce3511c733..f3266334063e 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -529,6 +529,18 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
struct regmap_range_node *range_node;
const char *devname = "dummy";
+ /*
+ * Userspace can initiate reads from the hardware over debugfs.
+ * Normally internal regmap structures and buffers are protected with
+ * a mutex or a spinlock, but if the regmap owner decided to disable
+ * all locking mechanisms, this is no longer the case. For safety:
+ * don't create the debugfs entries if locking is disabled.
+ */
+ if (map->debugfs_disable) {
+ dev_dbg(map->dev, "regmap locking disabled - not creating debugfs entries\n");
+ return;
+ }
+
/* If we don't have the debugfs root yet, postpone init */
if (!regmap_debugfs_root) {
struct regmap_debugfs_node *node;
diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c
new file mode 100644
index 000000000000..50a66382d87d
--- /dev/null
+++ b/drivers/base/regmap/regmap-sdw.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2015-17 Intel Corporation.
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/soundwire/sdw.h>
+#include "internal.h"
+
+static int regmap_sdw_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct device *dev = context;
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+
+ return sdw_write(slave, reg, val);
+}
+
+static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct device *dev = context;
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ int read;
+
+ read = sdw_read(slave, reg);
+ if (read < 0)
+ return read;
+
+ *val = read;
+ return 0;
+}
+
+static struct regmap_bus regmap_sdw = {
+ .reg_read = regmap_sdw_read,
+ .reg_write = regmap_sdw_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+};
+
+static int regmap_sdw_config_check(const struct regmap_config *config)
+{
+ /* All register are 8-bits wide as per MIPI Soundwire 1.0 Spec */
+ if (config->val_bits != 8)
+ return -ENOTSUPP;
+
+ /* Registers are 32 bits wide */
+ if (config->reg_bits != 32)
+ return -ENOTSUPP;
+
+ if (config->pad_bits != 0)
+ return -ENOTSUPP;
+
+ return 0;
+}
+
+struct regmap *__regmap_init_sdw(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ int ret;
+
+ ret = regmap_sdw_config_check(config);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return __regmap_init(&sdw->dev, &regmap_sdw,
+ &sdw->dev, config, lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_sdw);
+
+struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ int ret;
+
+ ret = regmap_sdw_config_check(config);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return __devm_regmap_init(&sdw->dev, &regmap_sdw,
+ &sdw->dev, config, lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw);
+
+MODULE_DESCRIPTION("Regmap SoundWire Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 8d516a9bfc01..ee302ccdfbc8 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -414,7 +414,6 @@ static unsigned int regmap_parse_64_native(const void *buf)
}
#endif
-#ifdef REGMAP_HWSPINLOCK
static void regmap_lock_hwlock(void *__map)
{
struct regmap *map = __map;
@@ -457,7 +456,11 @@ static void regmap_unlock_hwlock_irqrestore(void *__map)
hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
}
-#endif
+
+static void regmap_lock_unlock_none(void *__map)
+{
+
+}
static void regmap_lock_mutex(void *__map)
{
@@ -669,16 +672,26 @@ struct regmap *__regmap_init(struct device *dev,
goto err;
}
- if (config->lock && config->unlock) {
+ if (config->name) {
+ map->name = kstrdup_const(config->name, GFP_KERNEL);
+ if (!map->name) {
+ ret = -ENOMEM;
+ goto err_map;
+ }
+ }
+
+ if (config->disable_locking) {
+ map->lock = map->unlock = regmap_lock_unlock_none;
+ regmap_debugfs_disable(map);
+ } else if (config->lock && config->unlock) {
map->lock = config->lock;
map->unlock = config->unlock;
map->lock_arg = config->lock_arg;
- } else if (config->hwlock_id) {
-#ifdef REGMAP_HWSPINLOCK
+ } else if (config->use_hwlock) {
map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
if (!map->hwlock) {
ret = -ENXIO;
- goto err_map;
+ goto err_name;
}
switch (config->hwlock_mode) {
@@ -697,10 +710,6 @@ struct regmap *__regmap_init(struct device *dev,
}
map->lock_arg = map;
-#else
- ret = -EINVAL;
- goto err_map;
-#endif
} else {
if ((bus && bus->fast_io) ||
config->fast_io) {
@@ -762,14 +771,15 @@ struct regmap *__regmap_init(struct device *dev,
map->volatile_reg = config->volatile_reg;
map->precious_reg = config->precious_reg;
map->cache_type = config->cache_type;
- map->name = config->name;
spin_lock_init(&map->async_lock);
INIT_LIST_HEAD(&map->async_list);
INIT_LIST_HEAD(&map->async_free);
init_waitqueue_head(&map->async_waitq);
- if (config->read_flag_mask || config->write_flag_mask) {
+ if (config->read_flag_mask ||
+ config->write_flag_mask ||
+ config->zero_flag_mask) {
map->read_flag_mask = config->read_flag_mask;
map->write_flag_mask = config->write_flag_mask;
} else if (bus) {
@@ -1116,8 +1126,10 @@ err_range:
regmap_range_exit(map);
kfree(map->work_buf);
err_hwlock:
- if (IS_ENABLED(REGMAP_HWSPINLOCK) && map->hwlock)
+ if (map->hwlock)
hwspin_lock_free(map->hwlock);
+err_name:
+ kfree_const(map->name);
err_map:
kfree(map);
err:
@@ -1305,8 +1317,9 @@ void regmap_exit(struct regmap *map)
kfree(async->work_buf);
kfree(async);
}
- if (IS_ENABLED(REGMAP_HWSPINLOCK) && map->hwlock)
+ if (map->hwlock)
hwspin_lock_free(map->hwlock);
+ kfree_const(map->name);
kfree(map);
}
EXPORT_SYMBOL_GPL(regmap_exit);
@@ -2423,13 +2436,15 @@ static int _regmap_bus_read(void *context, unsigned int reg,
{
int ret;
struct regmap *map = context;
+ void *work_val = map->work_buf + map->format.reg_bytes +
+ map->format.pad_bytes;
if (!map->format.parse_val)
return -EINVAL;
- ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
+ ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes);
if (ret == 0)
- *val = map->format.parse_val(map->work_buf);
+ *val = map->format.parse_val(work_val);
return ret;
}
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 442e777bdfb2..728075214959 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -6619,43 +6619,27 @@ static void DAC960_DestroyProcEntries(DAC960_Controller_T *Controller)
#ifdef DAC960_GAM_MINOR
-/*
- * DAC960_gam_ioctl is the ioctl function for performing RAID operations.
-*/
-
-static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
- unsigned long Argument)
+static long DAC960_gam_get_controller_info(DAC960_ControllerInfo_T __user *UserSpaceControllerInfo)
{
- long ErrorCode = 0;
- if (!capable(CAP_SYS_ADMIN)) return -EACCES;
-
- mutex_lock(&DAC960_mutex);
- switch (Request)
- {
- case DAC960_IOCTL_GET_CONTROLLER_COUNT:
- ErrorCode = DAC960_ControllerCount;
- break;
- case DAC960_IOCTL_GET_CONTROLLER_INFO:
- {
- DAC960_ControllerInfo_T __user *UserSpaceControllerInfo =
- (DAC960_ControllerInfo_T __user *) Argument;
DAC960_ControllerInfo_T ControllerInfo;
DAC960_Controller_T *Controller;
int ControllerNumber;
+ long ErrorCode;
+
if (UserSpaceControllerInfo == NULL)
ErrorCode = -EINVAL;
else ErrorCode = get_user(ControllerNumber,
&UserSpaceControllerInfo->ControllerNumber);
if (ErrorCode != 0)
- break;
+ goto out;
ErrorCode = -ENXIO;
if (ControllerNumber < 0 ||
ControllerNumber > DAC960_ControllerCount - 1) {
- break;
+ goto out;
}
Controller = DAC960_Controllers[ControllerNumber];
if (Controller == NULL)
- break;
+ goto out;
memset(&ControllerInfo, 0, sizeof(DAC960_ControllerInfo_T));
ControllerInfo.ControllerNumber = ControllerNumber;
ControllerInfo.FirmwareType = Controller->FirmwareType;
@@ -6670,12 +6654,12 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
strcpy(ControllerInfo.FirmwareVersion, Controller->FirmwareVersion);
ErrorCode = (copy_to_user(UserSpaceControllerInfo, &ControllerInfo,
sizeof(DAC960_ControllerInfo_T)) ? -EFAULT : 0);
- break;
- }
- case DAC960_IOCTL_V1_EXECUTE_COMMAND:
- {
- DAC960_V1_UserCommand_T __user *UserSpaceUserCommand =
- (DAC960_V1_UserCommand_T __user *) Argument;
+out:
+ return ErrorCode;
+}
+
+static long DAC960_gam_v1_execute_command(DAC960_V1_UserCommand_T __user *UserSpaceUserCommand)
+{
DAC960_V1_UserCommand_T UserCommand;
DAC960_Controller_T *Controller;
DAC960_Command_T *Command = NULL;
@@ -6688,39 +6672,41 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
int ControllerNumber, DataTransferLength;
unsigned char *DataTransferBuffer = NULL;
dma_addr_t DataTransferBufferDMA;
+ long ErrorCode;
+
if (UserSpaceUserCommand == NULL) {
ErrorCode = -EINVAL;
- break;
+ goto out;
}
if (copy_from_user(&UserCommand, UserSpaceUserCommand,
sizeof(DAC960_V1_UserCommand_T))) {
ErrorCode = -EFAULT;
- break;
+ goto out;
}
ControllerNumber = UserCommand.ControllerNumber;
ErrorCode = -ENXIO;
if (ControllerNumber < 0 ||
ControllerNumber > DAC960_ControllerCount - 1)
- break;
+ goto out;
Controller = DAC960_Controllers[ControllerNumber];
if (Controller == NULL)
- break;
+ goto out;
ErrorCode = -EINVAL;
if (Controller->FirmwareType != DAC960_V1_Controller)
- break;
+ goto out;
CommandOpcode = UserCommand.CommandMailbox.Common.CommandOpcode;
DataTransferLength = UserCommand.DataTransferLength;
if (CommandOpcode & 0x80)
- break;
+ goto out;
if (CommandOpcode == DAC960_V1_DCDB)
{
if (copy_from_user(&DCDB, UserCommand.DCDB,
sizeof(DAC960_V1_DCDB_T))) {
ErrorCode = -EFAULT;
- break;
+ goto out;
}
if (DCDB.Channel >= DAC960_V1_MaxChannels)
- break;
+ goto out;
if (!((DataTransferLength == 0 &&
DCDB.Direction
== DAC960_V1_DCDB_NoDataTransfer) ||
@@ -6730,15 +6716,15 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
(DataTransferLength < 0 &&
DCDB.Direction
== DAC960_V1_DCDB_DataTransferSystemToDevice)))
- break;
+ goto out;
if (((DCDB.TransferLengthHigh4 << 16) | DCDB.TransferLength)
!= abs(DataTransferLength))
- break;
+ goto out;
DCDB_IOBUF = pci_alloc_consistent(Controller->PCIDevice,
sizeof(DAC960_V1_DCDB_T), &DCDB_IOBUFDMA);
if (DCDB_IOBUF == NULL) {
ErrorCode = -ENOMEM;
- break;
+ goto out;
}
}
ErrorCode = -ENOMEM;
@@ -6748,19 +6734,19 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
DataTransferLength,
&DataTransferBufferDMA);
if (DataTransferBuffer == NULL)
- break;
+ goto out;
}
else if (DataTransferLength < 0)
{
DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
-DataTransferLength, &DataTransferBufferDMA);
if (DataTransferBuffer == NULL)
- break;
+ goto out;
if (copy_from_user(DataTransferBuffer,
UserCommand.DataTransferBuffer,
-DataTransferLength)) {
ErrorCode = -EFAULT;
- break;
+ goto out;
}
}
if (CommandOpcode == DAC960_V1_DCDB)
@@ -6837,12 +6823,12 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
if (DCDB_IOBUF != NULL)
pci_free_consistent(Controller->PCIDevice, sizeof(DAC960_V1_DCDB_T),
DCDB_IOBUF, DCDB_IOBUFDMA);
- break;
- }
- case DAC960_IOCTL_V2_EXECUTE_COMMAND:
- {
- DAC960_V2_UserCommand_T __user *UserSpaceUserCommand =
- (DAC960_V2_UserCommand_T __user *) Argument;
+ out:
+ return ErrorCode;
+}
+
+static long DAC960_gam_v2_execute_command(DAC960_V2_UserCommand_T __user *UserSpaceUserCommand)
+{
DAC960_V2_UserCommand_T UserCommand;
DAC960_Controller_T *Controller;
DAC960_Command_T *Command = NULL;
@@ -6855,26 +6841,26 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
dma_addr_t DataTransferBufferDMA;
unsigned char *RequestSenseBuffer = NULL;
dma_addr_t RequestSenseBufferDMA;
+ long ErrorCode = -EINVAL;
- ErrorCode = -EINVAL;
if (UserSpaceUserCommand == NULL)
- break;
+ goto out;
if (copy_from_user(&UserCommand, UserSpaceUserCommand,
sizeof(DAC960_V2_UserCommand_T))) {
ErrorCode = -EFAULT;
- break;
+ goto out;
}
ErrorCode = -ENXIO;
ControllerNumber = UserCommand.ControllerNumber;
if (ControllerNumber < 0 ||
ControllerNumber > DAC960_ControllerCount - 1)
- break;
+ goto out;
Controller = DAC960_Controllers[ControllerNumber];
if (Controller == NULL)
- break;
+ goto out;
if (Controller->FirmwareType != DAC960_V2_Controller){
ErrorCode = -EINVAL;
- break;
+ goto out;
}
DataTransferLength = UserCommand.DataTransferLength;
ErrorCode = -ENOMEM;
@@ -6884,14 +6870,14 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
DataTransferLength,
&DataTransferBufferDMA);
if (DataTransferBuffer == NULL)
- break;
+ goto out;
}
else if (DataTransferLength < 0)
{
DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
-DataTransferLength, &DataTransferBufferDMA);
if (DataTransferBuffer == NULL)
- break;
+ goto out;
if (copy_from_user(DataTransferBuffer,
UserCommand.DataTransferBuffer,
-DataTransferLength)) {
@@ -7001,42 +6987,44 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
if (RequestSenseBuffer != NULL)
pci_free_consistent(Controller->PCIDevice, RequestSenseLength,
RequestSenseBuffer, RequestSenseBufferDMA);
- break;
- }
- case DAC960_IOCTL_V2_GET_HEALTH_STATUS:
- {
- DAC960_V2_GetHealthStatus_T __user *UserSpaceGetHealthStatus =
- (DAC960_V2_GetHealthStatus_T __user *) Argument;
+out:
+ return ErrorCode;
+}
+
+static long DAC960_gam_v2_get_health_status(DAC960_V2_GetHealthStatus_T __user *UserSpaceGetHealthStatus)
+{
DAC960_V2_GetHealthStatus_T GetHealthStatus;
DAC960_V2_HealthStatusBuffer_T HealthStatusBuffer;
DAC960_Controller_T *Controller;
int ControllerNumber;
+ long ErrorCode;
+
if (UserSpaceGetHealthStatus == NULL) {
ErrorCode = -EINVAL;
- break;
+ goto out;
}
if (copy_from_user(&GetHealthStatus, UserSpaceGetHealthStatus,
sizeof(DAC960_V2_GetHealthStatus_T))) {
ErrorCode = -EFAULT;
- break;
+ goto out;
}
ErrorCode = -ENXIO;
ControllerNumber = GetHealthStatus.ControllerNumber;
if (ControllerNumber < 0 ||
ControllerNumber > DAC960_ControllerCount - 1)
- break;
+ goto out;
Controller = DAC960_Controllers[ControllerNumber];
if (Controller == NULL)
- break;
+ goto out;
if (Controller->FirmwareType != DAC960_V2_Controller) {
ErrorCode = -EINVAL;
- break;
+ goto out;
}
if (copy_from_user(&HealthStatusBuffer,
GetHealthStatus.HealthStatusBuffer,
sizeof(DAC960_V2_HealthStatusBuffer_T))) {
ErrorCode = -EFAULT;
- break;
+ goto out;
}
ErrorCode = wait_event_interruptible_timeout(Controller->HealthStatusWaitQueue,
!(Controller->V2.HealthStatusBuffer->StatusChangeCounter
@@ -7046,7 +7034,7 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
DAC960_MonitoringTimerInterval);
if (ErrorCode == -ERESTARTSYS) {
ErrorCode = -EINTR;
- break;
+ goto out;
}
if (copy_to_user(GetHealthStatus.HealthStatusBuffer,
Controller->V2.HealthStatusBuffer,
@@ -7054,7 +7042,39 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
ErrorCode = -EFAULT;
else
ErrorCode = 0;
- }
+
+out:
+ return ErrorCode;
+}
+
+/*
+ * DAC960_gam_ioctl is the ioctl function for performing RAID operations.
+*/
+
+static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
+ unsigned long Argument)
+{
+ long ErrorCode = 0;
+ void __user *argp = (void __user *)Argument;
+ if (!capable(CAP_SYS_ADMIN)) return -EACCES;
+
+ mutex_lock(&DAC960_mutex);
+ switch (Request)
+ {
+ case DAC960_IOCTL_GET_CONTROLLER_COUNT:
+ ErrorCode = DAC960_ControllerCount;
+ break;
+ case DAC960_IOCTL_GET_CONTROLLER_INFO:
+ ErrorCode = DAC960_gam_get_controller_info(argp);
+ break;
+ case DAC960_IOCTL_V1_EXECUTE_COMMAND:
+ ErrorCode = DAC960_gam_v1_execute_command(argp);
+ break;
+ case DAC960_IOCTL_V2_EXECUTE_COMMAND:
+ ErrorCode = DAC960_gam_v2_execute_command(argp);
+ break;
+ case DAC960_IOCTL_V2_GET_HEALTH_STATUS:
+ ErrorCode = DAC960_gam_v2_get_health_status(argp);
break;
default:
ErrorCode = -ENOTTY;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 40579d0cb3d1..ad9b687a236a 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -20,6 +20,10 @@ config BLK_DEV_NULL_BLK
tristate "Null test block driver"
select CONFIGFS_FS
+config BLK_DEV_NULL_BLK_FAULT_INJECTION
+ bool "Support fault injection for Null test block driver"
+ depends on BLK_DEV_NULL_BLK && FAULT_INJECTION
+
config BLK_DEV_FD
tristate "Normal floppy disk support"
depends on ARCH_MAY_HAVE_PC_FDC
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 9220f8e833d0..c0ebda1283cc 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -112,8 +112,7 @@ enum frame_flags {
struct frame {
struct list_head head;
u32 tag;
- struct timeval sent; /* high-res time packet was sent */
- u32 sent_jiffs; /* low-res jiffies-based sent time */
+ ktime_t sent; /* high-res time packet was sent */
ulong waited;
ulong waited_total;
struct aoetgt *t; /* parent target I belong to */
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 812fed069708..540bb60cd071 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -398,8 +398,7 @@ aoecmd_ata_rw(struct aoedev *d)
skb = skb_clone(f->skb, GFP_ATOMIC);
if (skb) {
- do_gettimeofday(&f->sent);
- f->sent_jiffs = (u32) jiffies;
+ f->sent = ktime_get();
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
@@ -489,8 +488,7 @@ resend(struct aoedev *d, struct frame *f)
skb = skb_clone(skb, GFP_ATOMIC);
if (skb == NULL)
return;
- do_gettimeofday(&f->sent);
- f->sent_jiffs = (u32) jiffies;
+ f->sent = ktime_get();
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
@@ -499,33 +497,17 @@ resend(struct aoedev *d, struct frame *f)
static int
tsince_hr(struct frame *f)
{
- struct timeval now;
- int n;
+ u64 delta = ktime_to_ns(ktime_sub(ktime_get(), f->sent));
- do_gettimeofday(&now);
- n = now.tv_usec - f->sent.tv_usec;
- n += (now.tv_sec - f->sent.tv_sec) * USEC_PER_SEC;
+ /* delta is normally under 4.2 seconds, avoid 64-bit division */
+ if (likely(delta <= UINT_MAX))
+ return (u32)delta / NSEC_PER_USEC;
- if (n < 0)
- n = -n;
+ /* avoid overflow after 71 minutes */
+ if (delta > ((u64)INT_MAX * NSEC_PER_USEC))
+ return INT_MAX;
- /* For relatively long periods, use jiffies to avoid
- * discrepancies caused by updates to the system time.
- *
- * On system with HZ of 1000, 32-bits is over 49 days
- * worth of jiffies, or over 71 minutes worth of usecs.
- *
- * Jiffies overflow is handled by subtraction of unsigned ints:
- * (gdb) print (unsigned) 2 - (unsigned) 0xfffffffe
- * $3 = 4
- * (gdb)
- */
- if (n > USEC_PER_SEC / 4) {
- n = ((u32) jiffies) - f->sent_jiffs;
- n *= USEC_PER_SEC / HZ;
- }
-
- return n;
+ return div_u64(delta, NSEC_PER_USEC);
}
static int
@@ -589,7 +571,6 @@ reassign_frame(struct frame *f)
nf->waited = 0;
nf->waited_total = f->waited_total;
nf->sent = f->sent;
- nf->sent_jiffs = f->sent_jiffs;
f->skb = skb;
return nf;
@@ -633,8 +614,7 @@ probe(struct aoetgt *t)
skb = skb_clone(f->skb, GFP_ATOMIC);
if (skb) {
- do_gettimeofday(&f->sent);
- f->sent_jiffs = (u32) jiffies;
+ f->sent = ktime_get();
__skb_queue_head_init(&queue);
__skb_queue_tail(&queue, skb);
aoenet_xmit(&queue);
@@ -1432,10 +1412,8 @@ aoecmd_ata_id(struct aoedev *d)
d->timer.function = rexmit_timer;
skb = skb_clone(skb, GFP_ATOMIC);
- if (skb) {
- do_gettimeofday(&f->sent);
- f->sent_jiffs = (u32) jiffies;
- }
+ if (skb)
+ f->sent = ktime_get();
return skb;
}
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index bd97908c766f..9f4e6f502b84 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -953,7 +953,7 @@ static void drbd_bm_endio(struct bio *bio)
struct drbd_bm_aio_ctx *ctx = bio->bi_private;
struct drbd_device *device = ctx->device;
struct drbd_bitmap *b = device->bitmap;
- unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
+ unsigned int idx = bm_page_to_idx(bio_first_page_all(bio));
if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
!bm_test_page_unchanged(b->bm_pages[idx]))
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 4b4697a1f963..0a0394aa1b9c 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1847,7 +1847,7 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock,
void *buf, size_t size, unsigned msg_flags)
{
struct kvec iov = {.iov_base = buf, .iov_len = size};
- struct msghdr msg;
+ struct msghdr msg = {.msg_flags = msg_flags | MSG_NOSIGNAL};
int rv, sent = 0;
if (!sock)
@@ -1855,12 +1855,6 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock,
/* THINK if (signal_pending) return ... ? */
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = msg_flags | MSG_NOSIGNAL;
-
iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1, size);
if (sock == connection->data.socket) {
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index cb2fa63f6bc0..c72dee0ef083 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -516,7 +516,8 @@ static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flag
struct msghdr msg = {
.msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
};
- return kernel_recvmsg(sock, &msg, &iov, 1, size, msg.msg_flags);
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, size);
+ return sock_recvmsg(sock, &msg, msg.msg_flags);
}
static int drbd_recv(struct drbd_connection *connection, void *buf, size_t size)
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index ad0477ae820f..6655893a3a7a 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -12,9 +12,9 @@
#include <linux/slab.h>
#include <linux/blk-mq.h>
#include <linux/hrtimer.h>
-#include <linux/lightnvm.h>
#include <linux/configfs.h>
#include <linux/badblocks.h>
+#include <linux/fault-inject.h>
#define SECTOR_SHIFT 9
#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
@@ -27,6 +27,10 @@
#define TICKS_PER_SEC 50ULL
#define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
+#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
+static DECLARE_FAULT_ATTR(null_timeout_attr);
+#endif
+
static inline u64 mb_per_tick(int mbps)
{
return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
@@ -107,7 +111,6 @@ struct nullb_device {
unsigned int hw_queue_depth; /* queue depth */
unsigned int index; /* index of the disk, only valid with a disk */
unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */
- bool use_lightnvm; /* register as a LightNVM device */
bool blocking; /* blocking blk-mq device */
bool use_per_node_hctx; /* use per-node allocation for hardware context */
bool power; /* power on/off the device */
@@ -121,7 +124,6 @@ struct nullb {
unsigned int index;
struct request_queue *q;
struct gendisk *disk;
- struct nvm_dev *ndev;
struct blk_mq_tag_set *tag_set;
struct blk_mq_tag_set __tag_set;
unsigned int queue_depth;
@@ -139,7 +141,6 @@ static LIST_HEAD(nullb_list);
static struct mutex lock;
static int null_major;
static DEFINE_IDA(nullb_indexes);
-static struct kmem_cache *ppa_cache;
static struct blk_mq_tag_set tag_set;
enum {
@@ -166,6 +167,11 @@ static int g_home_node = NUMA_NO_NODE;
module_param_named(home_node, g_home_node, int, S_IRUGO);
MODULE_PARM_DESC(home_node, "Home node for the device");
+#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
+static char g_timeout_str[80];
+module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), S_IRUGO);
+#endif
+
static int g_queue_mode = NULL_Q_MQ;
static int null_param_store_val(const char *str, int *val, int min, int max)
@@ -208,10 +214,6 @@ static int nr_devices = 1;
module_param(nr_devices, int, S_IRUGO);
MODULE_PARM_DESC(nr_devices, "Number of devices to register");
-static bool g_use_lightnvm;
-module_param_named(use_lightnvm, g_use_lightnvm, bool, S_IRUGO);
-MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
-
static bool g_blocking;
module_param_named(blocking, g_blocking, bool, S_IRUGO);
MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
@@ -345,7 +347,6 @@ NULLB_DEVICE_ATTR(blocksize, uint);
NULLB_DEVICE_ATTR(irqmode, uint);
NULLB_DEVICE_ATTR(hw_queue_depth, uint);
NULLB_DEVICE_ATTR(index, uint);
-NULLB_DEVICE_ATTR(use_lightnvm, bool);
NULLB_DEVICE_ATTR(blocking, bool);
NULLB_DEVICE_ATTR(use_per_node_hctx, bool);
NULLB_DEVICE_ATTR(memory_backed, bool);
@@ -455,7 +456,6 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_irqmode,
&nullb_device_attr_hw_queue_depth,
&nullb_device_attr_index,
- &nullb_device_attr_use_lightnvm,
&nullb_device_attr_blocking,
&nullb_device_attr_use_per_node_hctx,
&nullb_device_attr_power,
@@ -573,7 +573,6 @@ static struct nullb_device *null_alloc_dev(void)
dev->blocksize = g_bs;
dev->irqmode = g_irqmode;
dev->hw_queue_depth = g_hw_queue_depth;
- dev->use_lightnvm = g_use_lightnvm;
dev->blocking = g_blocking;
dev->use_per_node_hctx = g_use_per_node_hctx;
return dev;
@@ -1352,6 +1351,12 @@ static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}
+static enum blk_eh_timer_return null_rq_timed_out_fn(struct request *rq)
+{
+ pr_info("null: rq %p timed out\n", rq);
+ return BLK_EH_HANDLED;
+}
+
static int null_rq_prep_fn(struct request_queue *q, struct request *req)
{
struct nullb *nullb = q->queuedata;
@@ -1369,6 +1374,16 @@ static int null_rq_prep_fn(struct request_queue *q, struct request *req)
return BLKPREP_DEFER;
}
+static bool should_timeout_request(struct request *rq)
+{
+#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
+ if (g_timeout_str[0])
+ return should_fail(&null_timeout_attr, 1);
+#endif
+
+ return false;
+}
+
static void null_request_fn(struct request_queue *q)
{
struct request *rq;
@@ -1376,12 +1391,20 @@ static void null_request_fn(struct request_queue *q)
while ((rq = blk_fetch_request(q)) != NULL) {
struct nullb_cmd *cmd = rq->special;
- spin_unlock_irq(q->queue_lock);
- null_handle_cmd(cmd);
- spin_lock_irq(q->queue_lock);
+ if (!should_timeout_request(rq)) {
+ spin_unlock_irq(q->queue_lock);
+ null_handle_cmd(cmd);
+ spin_lock_irq(q->queue_lock);
+ }
}
}
+static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
+{
+ pr_info("null: rq %p timed out\n", rq);
+ return BLK_EH_HANDLED;
+}
+
static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -1399,12 +1422,16 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(bd->rq);
- return null_handle_cmd(cmd);
+ if (!should_timeout_request(bd->rq))
+ return null_handle_cmd(cmd);
+
+ return BLK_STS_OK;
}
static const struct blk_mq_ops null_mq_ops = {
.queue_rq = null_queue_rq,
.complete = null_softirq_done_fn,
+ .timeout = null_timeout_rq,
};
static void cleanup_queue(struct nullb_queue *nq)
@@ -1423,170 +1450,6 @@ static void cleanup_queues(struct nullb *nullb)
kfree(nullb->queues);
}
-#ifdef CONFIG_NVM
-
-static void null_lnvm_end_io(struct request *rq, blk_status_t status)
-{
- struct nvm_rq *rqd = rq->end_io_data;
-
- /* XXX: lighnvm core seems to expect NVM_RSP_* values here.. */
- rqd->error = status ? -EIO : 0;
- nvm_end_io(rqd);
-
- blk_put_request(rq);
-}
-
-static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
-{
- struct request_queue *q = dev->q;
- struct request *rq;
- struct bio *bio = rqd->bio;
-
- rq = blk_mq_alloc_request(q,
- op_is_write(bio_op(bio)) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
- if (IS_ERR(rq))
- return -ENOMEM;
-
- blk_init_request_from_bio(rq, bio);
-
- rq->end_io_data = rqd;
-
- blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);
-
- return 0;
-}
-
-static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
-{
- struct nullb *nullb = dev->q->queuedata;
- sector_t size = (sector_t)nullb->dev->size * 1024 * 1024ULL;
- sector_t blksize;
- struct nvm_id_group *grp;
-
- id->ver_id = 0x1;
- id->vmnt = 0;
- id->cap = 0x2;
- id->dom = 0x1;
-
- id->ppaf.blk_offset = 0;
- id->ppaf.blk_len = 16;
- id->ppaf.pg_offset = 16;
- id->ppaf.pg_len = 16;
- id->ppaf.sect_offset = 32;
- id->ppaf.sect_len = 8;
- id->ppaf.pln_offset = 40;
- id->ppaf.pln_len = 8;
- id->ppaf.lun_offset = 48;
- id->ppaf.lun_len = 8;
- id->ppaf.ch_offset = 56;
- id->ppaf.ch_len = 8;
-
- sector_div(size, nullb->dev->blocksize); /* convert size to pages */
- size >>= 8; /* concert size to pgs pr blk */
- grp = &id->grp;
- grp->mtype = 0;
- grp->fmtype = 0;
- grp->num_ch = 1;
- grp->num_pg = 256;
- blksize = size;
- size >>= 16;
- grp->num_lun = size + 1;
- sector_div(blksize, grp->num_lun);
- grp->num_blk = blksize;
- grp->num_pln = 1;
-
- grp->fpg_sz = nullb->dev->blocksize;
- grp->csecs = nullb->dev->blocksize;
- grp->trdt = 25000;
- grp->trdm = 25000;
- grp->tprt = 500000;
- grp->tprm = 500000;
- grp->tbet = 1500000;
- grp->tbem = 1500000;
- grp->mpos = 0x010101; /* single plane rwe */
- grp->cpar = nullb->dev->hw_queue_depth;
-
- return 0;
-}
-
-static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name)
-{
- mempool_t *virtmem_pool;
-
- virtmem_pool = mempool_create_slab_pool(64, ppa_cache);
- if (!virtmem_pool) {
- pr_err("null_blk: Unable to create virtual memory pool\n");
- return NULL;
- }
-
- return virtmem_pool;
-}
-
-static void null_lnvm_destroy_dma_pool(void *pool)
-{
- mempool_destroy(pool);
-}
-
-static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
- gfp_t mem_flags, dma_addr_t *dma_handler)
-{
- return mempool_alloc(pool, mem_flags);
-}
-
-static void null_lnvm_dev_dma_free(void *pool, void *entry,
- dma_addr_t dma_handler)
-{
- mempool_free(entry, pool);
-}
-
-static struct nvm_dev_ops null_lnvm_dev_ops = {
- .identity = null_lnvm_id,
- .submit_io = null_lnvm_submit_io,
-
- .create_dma_pool = null_lnvm_create_dma_pool,
- .destroy_dma_pool = null_lnvm_destroy_dma_pool,
- .dev_dma_alloc = null_lnvm_dev_dma_alloc,
- .dev_dma_free = null_lnvm_dev_dma_free,
-
- /* Simulate nvme protocol restriction */
- .max_phys_sect = 64,
-};
-
-static int null_nvm_register(struct nullb *nullb)
-{
- struct nvm_dev *dev;
- int rv;
-
- dev = nvm_alloc_dev(0);
- if (!dev)
- return -ENOMEM;
-
- dev->q = nullb->q;
- memcpy(dev->name, nullb->disk_name, DISK_NAME_LEN);
- dev->ops = &null_lnvm_dev_ops;
-
- rv = nvm_register(dev);
- if (rv) {
- kfree(dev);
- return rv;
- }
- nullb->ndev = dev;
- return 0;
-}
-
-static void null_nvm_unregister(struct nullb *nullb)
-{
- nvm_unregister(nullb->ndev);
-}
-#else
-static int null_nvm_register(struct nullb *nullb)
-{
- pr_err("null_blk: CONFIG_NVM needs to be enabled for LightNVM\n");
- return -EINVAL;
-}
-static void null_nvm_unregister(struct nullb *nullb) {}
-#endif /* CONFIG_NVM */
-
static void null_del_dev(struct nullb *nullb)
{
struct nullb_device *dev = nullb->dev;
@@ -1595,10 +1458,7 @@ static void null_del_dev(struct nullb *nullb)
list_del_init(&nullb->list);
- if (dev->use_lightnvm)
- null_nvm_unregister(nullb);
- else
- del_gendisk(nullb->disk);
+ del_gendisk(nullb->disk);
if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
hrtimer_cancel(&nullb->bw_timer);
@@ -1610,8 +1470,7 @@ static void null_del_dev(struct nullb *nullb)
if (dev->queue_mode == NULL_Q_MQ &&
nullb->tag_set == &nullb->__tag_set)
blk_mq_free_tag_set(nullb->tag_set);
- if (!dev->use_lightnvm)
- put_disk(nullb->disk);
+ put_disk(nullb->disk);
cleanup_queues(nullb);
if (null_cache_active(nullb))
null_free_device_storage(nullb->dev, true);
@@ -1775,11 +1634,6 @@ static void null_validate_conf(struct nullb_device *dev)
{
dev->blocksize = round_down(dev->blocksize, 512);
dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
- if (dev->use_lightnvm && dev->blocksize != 4096)
- dev->blocksize = 4096;
-
- if (dev->use_lightnvm && dev->queue_mode != NULL_Q_MQ)
- dev->queue_mode = NULL_Q_MQ;
if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) {
if (dev->submit_queues != nr_online_nodes)
@@ -1805,6 +1659,20 @@ static void null_validate_conf(struct nullb_device *dev)
dev->mbps = 0;
}
+static bool null_setup_fault(void)
+{
+#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
+ if (!g_timeout_str[0])
+ return true;
+
+ if (!setup_fault_attr(&null_timeout_attr, g_timeout_str))
+ return false;
+
+ null_timeout_attr.verbose = 0;
+#endif
+ return true;
+}
+
static int null_add_dev(struct nullb_device *dev)
{
struct nullb *nullb;
@@ -1838,6 +1706,10 @@ static int null_add_dev(struct nullb_device *dev)
if (rv)
goto out_cleanup_queues;
+ if (!null_setup_fault())
+ goto out_cleanup_queues;
+
+ nullb->tag_set->timeout = 5 * HZ;
nullb->q = blk_mq_init_queue(nullb->tag_set);
if (IS_ERR(nullb->q)) {
rv = -ENOMEM;
@@ -1861,8 +1733,14 @@ static int null_add_dev(struct nullb_device *dev)
rv = -ENOMEM;
goto out_cleanup_queues;
}
+
+ if (!null_setup_fault())
+ goto out_cleanup_blk_queue;
+
blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
+ blk_queue_rq_timed_out(nullb->q, null_rq_timed_out_fn);
+ nullb->q->rq_timeout = 5 * HZ;
rv = init_driver_queues(nullb);
if (rv)
goto out_cleanup_blk_queue;
@@ -1895,11 +1773,7 @@ static int null_add_dev(struct nullb_device *dev)
sprintf(nullb->disk_name, "nullb%d", nullb->index);
- if (dev->use_lightnvm)
- rv = null_nvm_register(nullb);
- else
- rv = null_gendisk_register(nullb);
-
+ rv = null_gendisk_register(nullb);
if (rv)
goto out_cleanup_blk_queue;
@@ -1938,18 +1812,6 @@ static int __init null_init(void)
g_bs = PAGE_SIZE;
}
- if (g_use_lightnvm && g_bs != 4096) {
- pr_warn("null_blk: LightNVM only supports 4k block size\n");
- pr_warn("null_blk: defaults block size to 4k\n");
- g_bs = 4096;
- }
-
- if (g_use_lightnvm && g_queue_mode != NULL_Q_MQ) {
- pr_warn("null_blk: LightNVM only supported for blk-mq\n");
- pr_warn("null_blk: defaults queue mode to blk-mq\n");
- g_queue_mode = NULL_Q_MQ;
- }
-
if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
if (g_submit_queues != nr_online_nodes) {
pr_warn("null_blk: submit_queues param is set to %u.\n",
@@ -1982,16 +1844,6 @@ static int __init null_init(void)
goto err_conf;
}
- if (g_use_lightnvm) {
- ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
- 0, 0, NULL);
- if (!ppa_cache) {
- pr_err("null_blk: unable to create ppa cache\n");
- ret = -ENOMEM;
- goto err_ppa;
- }
- }
-
for (i = 0; i < nr_devices; i++) {
dev = null_alloc_dev();
if (!dev) {
@@ -2015,8 +1867,6 @@ err_dev:
null_del_dev(nullb);
null_free_dev(dev);
}
- kmem_cache_destroy(ppa_cache);
-err_ppa:
unregister_blkdev(null_major, "nullb");
err_conf:
configfs_unregister_subsystem(&nullb_subsys);
@@ -2047,8 +1897,6 @@ static void __exit null_exit(void)
if (g_queue_mode == NULL_Q_MQ && shared_tags)
blk_mq_free_tag_set(&tag_set);
-
- kmem_cache_destroy(ppa_cache);
}
module_init(null_init);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 67974796c350..531a0915066b 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2579,14 +2579,14 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
bdev = bdget(dev);
if (!bdev)
return -ENOMEM;
+ ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
+ if (ret)
+ return ret;
if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
- bdput(bdev);
+ blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
return -EINVAL;
}
- ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
- if (ret)
- return ret;
/* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE);
@@ -2745,7 +2745,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
pd->pkt_dev = MKDEV(pktdev_major, idx);
ret = pkt_new_dev(pd, dev);
if (ret)
- goto out_new_dev;
+ goto out_mem2;
/* inherit events of the host device */
disk->events = pd->bdev->bd_disk->events;
@@ -2763,8 +2763,6 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
mutex_unlock(&ctl_mutex);
return 0;
-out_new_dev:
- blk_cleanup_queue(disk->queue);
out_mem2:
put_disk(disk);
out_mem:
diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
deleted file mode 100644
index e5565fbaeb30..000000000000
--- a/drivers/block/smart1,2.h
+++ /dev/null
@@ -1,278 +0,0 @@
-/*
- * Disk Array driver for Compaq SMART2 Controllers
- * Copyright 1998 Compaq Computer Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Questions/Comments/Bugfixes to iss_storagedev@hp.com
- *
- * If you want to make changes, improve or add functionality to this
- * driver, you'll probably need the Compaq Array Controller Interface
- * Specificiation (Document number ECG086/1198)
- */
-
-/*
- * This file contains the controller communication implementation for
- * Compaq SMART-1 and SMART-2 controllers. To the best of my knowledge,
- * this should support:
- *
- * PCI:
- * SMART-2/P, SMART-2DH, SMART-2SL, SMART-221, SMART-3100ES, SMART-3200
- * Integerated SMART Array Controller, SMART-4200, SMART-4250ES
- *
- * EISA:
- * SMART-2/E, SMART, IAES, IDA-2, IDA
- */
-
-/*
- * Memory mapped FIFO interface (SMART 42xx cards)
- */
-static void smart4_submit_command(ctlr_info_t *h, cmdlist_t *c)
-{
- writel(c->busaddr, h->vaddr + S42XX_REQUEST_PORT_OFFSET);
-}
-
-/*
- * This card is the opposite of the other cards.
- * 0 turns interrupts on...
- * 0x08 turns them off...
- */
-static void smart4_intr_mask(ctlr_info_t *h, unsigned long val)
-{
- if (val)
- { /* Turn interrupts on */
- writel(0, h->vaddr + S42XX_REPLY_INTR_MASK_OFFSET);
- } else /* Turn them off */
- {
- writel( S42XX_INTR_OFF,
- h->vaddr + S42XX_REPLY_INTR_MASK_OFFSET);
- }
-}
-
-/*
- * For older cards FIFO Full = 0.
- * On this card 0 means there is room, anything else FIFO Full.
- *
- */
-static unsigned long smart4_fifo_full(ctlr_info_t *h)
-{
-
- return (!readl(h->vaddr + S42XX_REQUEST_PORT_OFFSET));
-}
-
-/* This type of controller returns -1 if the fifo is empty,
- * Not 0 like the others.
- * And we need to let it know we read a value out
- */
-static unsigned long smart4_completed(ctlr_info_t *h)
-{
- long register_value
- = readl(h->vaddr + S42XX_REPLY_PORT_OFFSET);
-
- /* Fifo is empty */
- if( register_value == 0xffffffff)
- return 0;
-
- /* Need to let it know we got the reply */
- /* We do this by writing a 0 to the port we just read from */
- writel(0, h->vaddr + S42XX_REPLY_PORT_OFFSET);
-
- return ((unsigned long) register_value);
-}
-
- /*
- * This hardware returns interrupt pending at a different place and
- * it does not tell us if the fifo is empty, we will have check
- * that by getting a 0 back from the command_completed call.
- */
-static unsigned long smart4_intr_pending(ctlr_info_t *h)
-{
- unsigned long register_value =
- readl(h->vaddr + S42XX_INTR_STATUS);
-
- if( register_value & S42XX_INTR_PENDING)
- return FIFO_NOT_EMPTY;
- return 0 ;
-}
-
-static struct access_method smart4_access = {
- smart4_submit_command,
- smart4_intr_mask,
- smart4_fifo_full,
- smart4_intr_pending,
- smart4_completed,
-};
-
-/*
- * Memory mapped FIFO interface (PCI SMART2 and SMART 3xxx cards)
- */
-static void smart2_submit_command(ctlr_info_t *h, cmdlist_t *c)
-{
- writel(c->busaddr, h->vaddr + COMMAND_FIFO);
-}
-
-static void smart2_intr_mask(ctlr_info_t *h, unsigned long val)
-{
- writel(val, h->vaddr + INTR_MASK);
-}
-
-static unsigned long smart2_fifo_full(ctlr_info_t *h)
-{
- return readl(h->vaddr + COMMAND_FIFO);
-}
-
-static unsigned long smart2_completed(ctlr_info_t *h)
-{
- return readl(h->vaddr + COMMAND_COMPLETE_FIFO);
-}
-
-static unsigned long smart2_intr_pending(ctlr_info_t *h)
-{
- return readl(h->vaddr + INTR_PENDING);
-}
-
-static struct access_method smart2_access = {
- smart2_submit_command,
- smart2_intr_mask,
- smart2_fifo_full,
- smart2_intr_pending,
- smart2_completed,
-};
-
-/*
- * IO access for SMART-2/E cards
- */
-static void smart2e_submit_command(ctlr_info_t *h, cmdlist_t *c)
-{
- outl(c->busaddr, h->io_mem_addr + COMMAND_FIFO);
-}
-
-static void smart2e_intr_mask(ctlr_info_t *h, unsigned long val)
-{
- outl(val, h->io_mem_addr + INTR_MASK);
-}
-
-static unsigned long smart2e_fifo_full(ctlr_info_t *h)
-{
- return inl(h->io_mem_addr + COMMAND_FIFO);
-}
-
-static unsigned long smart2e_completed(ctlr_info_t *h)
-{
- return inl(h->io_mem_addr + COMMAND_COMPLETE_FIFO);
-}
-
-static unsigned long smart2e_intr_pending(ctlr_info_t *h)
-{
- return inl(h->io_mem_addr + INTR_PENDING);
-}
-
-static struct access_method smart2e_access = {
- smart2e_submit_command,
- smart2e_intr_mask,
- smart2e_fifo_full,
- smart2e_intr_pending,
- smart2e_completed,
-};
-
-/*
- * IO access for older SMART-1 type cards
- */
-#define SMART1_SYSTEM_MASK 0xC8E
-#define SMART1_SYSTEM_DOORBELL 0xC8F
-#define SMART1_LOCAL_MASK 0xC8C
-#define SMART1_LOCAL_DOORBELL 0xC8D
-#define SMART1_INTR_MASK 0xC89
-#define SMART1_LISTADDR 0xC90
-#define SMART1_LISTLEN 0xC94
-#define SMART1_TAG 0xC97
-#define SMART1_COMPLETE_ADDR 0xC98
-#define SMART1_LISTSTATUS 0xC9E
-
-#define CHANNEL_BUSY 0x01
-#define CHANNEL_CLEAR 0x02
-
-static void smart1_submit_command(ctlr_info_t *h, cmdlist_t *c)
-{
- /*
- * This __u16 is actually a bunch of control flags on SMART
- * and below. We want them all to be zero.
- */
- c->hdr.size = 0;
-
- outb(CHANNEL_CLEAR, h->io_mem_addr + SMART1_SYSTEM_DOORBELL);
-
- outl(c->busaddr, h->io_mem_addr + SMART1_LISTADDR);
- outw(c->size, h->io_mem_addr + SMART1_LISTLEN);
-
- outb(CHANNEL_BUSY, h->io_mem_addr + SMART1_LOCAL_DOORBELL);
-}
-
-static void smart1_intr_mask(ctlr_info_t *h, unsigned long val)
-{
- if (val == 1) {
- outb(0xFD, h->io_mem_addr + SMART1_SYSTEM_DOORBELL);
- outb(CHANNEL_BUSY, h->io_mem_addr + SMART1_LOCAL_DOORBELL);
- outb(0x01, h->io_mem_addr + SMART1_INTR_MASK);
- outb(0x01, h->io_mem_addr + SMART1_SYSTEM_MASK);
- } else {
- outb(0, h->io_mem_addr + 0xC8E);
- }
-}
-
-static unsigned long smart1_fifo_full(ctlr_info_t *h)
-{
- unsigned char chan;
- chan = inb(h->io_mem_addr + SMART1_SYSTEM_DOORBELL) & CHANNEL_CLEAR;
- return chan;
-}
-
-static unsigned long smart1_completed(ctlr_info_t *h)
-{
- unsigned char status;
- unsigned long cmd;
-
- if (inb(h->io_mem_addr + SMART1_SYSTEM_DOORBELL) & CHANNEL_BUSY) {
- outb(CHANNEL_BUSY, h->io_mem_addr + SMART1_SYSTEM_DOORBELL);
-
- cmd = inl(h->io_mem_addr + SMART1_COMPLETE_ADDR);
- status = inb(h->io_mem_addr + SMART1_LISTSTATUS);
-
- outb(CHANNEL_CLEAR, h->io_mem_addr + SMART1_LOCAL_DOORBELL);
-
- /*
- * this is x86 (actually compaq x86) only, so it's ok
- */
- if (cmd) ((cmdlist_t*)bus_to_virt(cmd))->req.hdr.rcode = status;
- } else {
- cmd = 0;
- }
- return cmd;
-}
-
-static unsigned long smart1_intr_pending(ctlr_info_t *h)
-{
- unsigned char chan;
- chan = inb(h->io_mem_addr + SMART1_SYSTEM_DOORBELL) & CHANNEL_BUSY;
- return chan;
-}
-
-static struct access_method smart1_access = {
- smart1_submit_command,
- smart1_intr_mask,
- smart1_fifo_full,
- smart1_intr_pending,
- smart1_completed,
-};
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index d70eba30003a..0afa6c8c3857 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -430,7 +430,7 @@ static void put_entry_bdev(struct zram *zram, unsigned long entry)
static void zram_page_end_io(struct bio *bio)
{
- struct page *page = bio->bi_io_vec[0].bv_page;
+ struct page *page = bio_first_page_all(bio);
page_endio(page, op_is_write(bio_op(bio)),
blk_status_to_errno(bio->bi_status));
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index c823914b3a80..b6a71705b7d6 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -794,7 +794,7 @@ static ssize_t hci_uart_tty_write(struct tty_struct *tty, struct file *file,
return 0;
}
-static unsigned int hci_uart_tty_poll(struct tty_struct *tty,
+static __poll_t hci_uart_tty_poll(struct tty_struct *tty,
struct file *filp, poll_table *wait)
{
return 0;
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index e6f6dbc04131..0521748a1972 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -299,7 +299,7 @@ static ssize_t vhci_write(struct kiocb *iocb, struct iov_iter *from)
return vhci_get_user(data, from);
}
-static unsigned int vhci_poll(struct file *file, poll_table *wait)
+static __poll_t vhci_poll(struct file *file, poll_table *wait)
{
struct vhci_data *data = file->private_data;
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index dc7b3c7b7d42..57e011d36a79 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -120,7 +120,7 @@ config QCOM_EBI2
SRAM, ethernet adapters, FPGAs and LCD displays.
config SIMPLE_PM_BUS
- bool "Simple Power-Managed Bus Driver"
+ tristate "Simple Power-Managed Bus Driver"
depends on OF && PM
help
Driver for transparent busses that don't need a real driver, but
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index 1dfb9f8de171..a2a1c1478cd0 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -236,7 +236,7 @@ static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t
return ret;
}
-static unsigned int apm_poll(struct file *fp, poll_table * wait)
+static __poll_t apm_poll(struct file *fp, poll_table * wait)
{
struct apm_user *as = fp->private_data;
diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c
index 0d7b577e0ff0..2f92cc46698b 100644
--- a/drivers/char/dsp56k.c
+++ b/drivers/char/dsp56k.c
@@ -406,7 +406,7 @@ static long dsp56k_ioctl(struct file *file, unsigned int cmd,
* Do I need this function at all???
*/
#if 0
-static unsigned int dsp56k_poll(struct file *file, poll_table *wait)
+static __poll_t dsp56k_poll(struct file *file, poll_table *wait)
{
int dev = iminor(file_inode(file)) & 0x0f;
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
index 839ee61d352a..2697c22e3be2 100644
--- a/drivers/char/dtlk.c
+++ b/drivers/char/dtlk.c
@@ -91,7 +91,7 @@ static ssize_t dtlk_read(struct file *, char __user *,
size_t nbytes, loff_t * ppos);
static ssize_t dtlk_write(struct file *, const char __user *,
size_t nbytes, loff_t * ppos);
-static unsigned int dtlk_poll(struct file *, poll_table *);
+static __poll_t dtlk_poll(struct file *, poll_table *);
static int dtlk_open(struct inode *, struct file *);
static int dtlk_release(struct inode *, struct file *);
static long dtlk_ioctl(struct file *file,
@@ -228,9 +228,9 @@ static ssize_t dtlk_write(struct file *file, const char __user *buf,
return -EAGAIN;
}
-static unsigned int dtlk_poll(struct file *file, poll_table * wait)
+static __poll_t dtlk_poll(struct file *file, poll_table * wait)
{
- int mask = 0;
+ __poll_t mask = 0;
unsigned long expires;
TRACE_TEXT(" dtlk_poll");
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index b941e6d59fd6..dbed4953f86c 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -342,7 +342,7 @@ out:
return retval;
}
-static unsigned int hpet_poll(struct file *file, poll_table * wait)
+static __poll_t hpet_poll(struct file *file, poll_table * wait)
{
unsigned long v;
struct hpet_dev *devp;
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index f6e3e5abc117..4d0f571c15f9 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -73,26 +73,14 @@ config HW_RANDOM_ATMEL
If unsure, say Y.
-config HW_RANDOM_BCM63XX
- tristate "Broadcom BCM63xx Random Number Generator support"
- depends on BCM63XX || BMIPS_GENERIC
- default HW_RANDOM
- ---help---
- This driver provides kernel-side support for the Random Number
- Generator hardware found on the Broadcom BCM63xx SoCs.
-
- To compile this driver as a module, choose M here: the
- module will be called bcm63xx-rng
-
- If unusure, say Y.
-
config HW_RANDOM_BCM2835
- tristate "Broadcom BCM2835 Random Number Generator support"
- depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X
+ tristate "Broadcom BCM2835/BCM63xx Random Number Generator support"
+ depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X || \
+ ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
- Generator hardware found on the Broadcom BCM2835 SoCs.
+ Generator hardware found on the Broadcom BCM2835 and BCM63xx SoCs.
To compile this driver as a module, choose M here: the
module will be called bcm2835-rng
@@ -306,19 +294,6 @@ config HW_RANDOM_POWERNV
If unsure, say Y.
-config HW_RANDOM_TPM
- tristate "TPM HW Random Number Generator support"
- depends on TCG_TPM
- default HW_RANDOM
- ---help---
- This driver provides kernel-side support for the Random Number
- Generator in the Trusted Platform Module
-
- To compile this driver as a module, choose M here: the
- module will be called tpm-rng.
-
- If unsure, say Y.
-
config HW_RANDOM_HISI
tristate "Hisilicon Random Number Generator support"
depends on HW_RANDOM && ARCH_HISI
@@ -449,6 +424,18 @@ config HW_RANDOM_S390
If unsure, say Y.
+config HW_RANDOM_EXYNOS
+ tristate "Samsung Exynos True Random Number Generator support"
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ default HW_RANDOM
+ ---help---
+ This driver provides support for the True Random Number
+ Generator available in Exynos SoCs.
+
+ To compile this driver as a module, choose M here: the module
+ will be called exynos-trng.
+
+ If unsure, say Y.
endif # HW_RANDOM
config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index f3728d008fff..b780370bd4eb 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -9,11 +9,11 @@ obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o
obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o
-obj-$(CONFIG_HW_RANDOM_BCM63XX) += bcm63xx-rng.o
obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o
n2-rng-y := n2-drv.o n2-asm.o
obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o
+obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-trng.o
obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o
obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o
obj-$(CONFIG_HW_RANDOM_OMAP3_ROM) += omap3-rom-rng.o
@@ -27,7 +27,6 @@ obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o
-obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index 574211a49549..7a84cec30c3a 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -15,6 +15,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
+#include <linux/clk.h>
#define RNG_CTRL 0x0
#define RNG_STATUS 0x4
@@ -29,116 +30,180 @@
#define RNG_INT_OFF 0x1
-static void __init nsp_rng_init(void __iomem *base)
+struct bcm2835_rng_priv {
+ struct hwrng rng;
+ void __iomem *base;
+ bool mask_interrupts;
+ struct clk *clk;
+};
+
+static inline struct bcm2835_rng_priv *to_rng_priv(struct hwrng *rng)
{
- u32 val;
+ return container_of(rng, struct bcm2835_rng_priv, rng);
+}
+
+static inline u32 rng_readl(struct bcm2835_rng_priv *priv, u32 offset)
+{
+ /* MIPS chips strapped for BE will automagically configure the
+ * peripheral registers for CPU-native byte order.
+ */
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ return __raw_readl(priv->base + offset);
+ else
+ return readl(priv->base + offset);
+}
- /* mask the interrupt */
- val = readl(base + RNG_INT_MASK);
- val |= RNG_INT_OFF;
- writel(val, base + RNG_INT_MASK);
+static inline void rng_writel(struct bcm2835_rng_priv *priv, u32 val,
+ u32 offset)
+{
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ __raw_writel(val, priv->base + offset);
+ else
+ writel(val, priv->base + offset);
}
static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max,
bool wait)
{
- void __iomem *rng_base = (void __iomem *)rng->priv;
+ struct bcm2835_rng_priv *priv = to_rng_priv(rng);
u32 max_words = max / sizeof(u32);
u32 num_words, count;
- while ((__raw_readl(rng_base + RNG_STATUS) >> 24) == 0) {
+ while ((rng_readl(priv, RNG_STATUS) >> 24) == 0) {
if (!wait)
return 0;
cpu_relax();
}
- num_words = readl(rng_base + RNG_STATUS) >> 24;
+ num_words = rng_readl(priv, RNG_STATUS) >> 24;
if (num_words > max_words)
num_words = max_words;
for (count = 0; count < num_words; count++)
- ((u32 *)buf)[count] = readl(rng_base + RNG_DATA);
+ ((u32 *)buf)[count] = rng_readl(priv, RNG_DATA);
return num_words * sizeof(u32);
}
-static struct hwrng bcm2835_rng_ops = {
- .name = "bcm2835",
- .read = bcm2835_rng_read,
+static int bcm2835_rng_init(struct hwrng *rng)
+{
+ struct bcm2835_rng_priv *priv = to_rng_priv(rng);
+ int ret = 0;
+ u32 val;
+
+ if (!IS_ERR(priv->clk)) {
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+ }
+
+ if (priv->mask_interrupts) {
+ /* mask the interrupt */
+ val = rng_readl(priv, RNG_INT_MASK);
+ val |= RNG_INT_OFF;
+ rng_writel(priv, val, RNG_INT_MASK);
+ }
+
+ /* set warm-up count & enable */
+ rng_writel(priv, RNG_WARMUP_COUNT, RNG_STATUS);
+ rng_writel(priv, RNG_RBGEN, RNG_CTRL);
+
+ return ret;
+}
+
+static void bcm2835_rng_cleanup(struct hwrng *rng)
+{
+ struct bcm2835_rng_priv *priv = to_rng_priv(rng);
+
+ /* disable rng hardware */
+ rng_writel(priv, 0, RNG_CTRL);
+
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+}
+
+struct bcm2835_rng_of_data {
+ bool mask_interrupts;
+};
+
+static const struct bcm2835_rng_of_data nsp_rng_of_data = {
+ .mask_interrupts = true,
};
static const struct of_device_id bcm2835_rng_of_match[] = {
{ .compatible = "brcm,bcm2835-rng"},
- { .compatible = "brcm,bcm-nsp-rng", .data = nsp_rng_init},
- { .compatible = "brcm,bcm5301x-rng", .data = nsp_rng_init},
+ { .compatible = "brcm,bcm-nsp-rng", .data = &nsp_rng_of_data },
+ { .compatible = "brcm,bcm5301x-rng", .data = &nsp_rng_of_data },
+ { .compatible = "brcm,bcm6368-rng"},
{},
};
static int bcm2835_rng_probe(struct platform_device *pdev)
{
+ const struct bcm2835_rng_of_data *of_data;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- void (*rng_setup)(void __iomem *base);
const struct of_device_id *rng_id;
- void __iomem *rng_base;
+ struct bcm2835_rng_priv *priv;
+ struct resource *r;
int err;
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
/* map peripheral */
- rng_base = of_iomap(np, 0);
- if (!rng_base) {
- dev_err(dev, "failed to remap rng regs");
- return -ENODEV;
- }
- bcm2835_rng_ops.priv = (unsigned long)rng_base;
+ priv->base = devm_ioremap_resource(dev, r);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ /* Clock is optional on most platforms */
+ priv->clk = devm_clk_get(dev, NULL);
+
+ priv->rng.name = pdev->name;
+ priv->rng.init = bcm2835_rng_init;
+ priv->rng.read = bcm2835_rng_read;
+ priv->rng.cleanup = bcm2835_rng_cleanup;
rng_id = of_match_node(bcm2835_rng_of_match, np);
- if (!rng_id) {
- iounmap(rng_base);
+ if (!rng_id)
return -EINVAL;
- }
- /* Check for rng init function, execute it */
- rng_setup = rng_id->data;
- if (rng_setup)
- rng_setup(rng_base);
- /* set warm-up count & enable */
- __raw_writel(RNG_WARMUP_COUNT, rng_base + RNG_STATUS);
- __raw_writel(RNG_RBGEN, rng_base + RNG_CTRL);
+ /* Check for rng init function, execute it */
+ of_data = rng_id->data;
+ if (of_data)
+ priv->mask_interrupts = of_data->mask_interrupts;
/* register driver */
- err = hwrng_register(&bcm2835_rng_ops);
- if (err) {
+ err = devm_hwrng_register(dev, &priv->rng);
+ if (err)
dev_err(dev, "hwrng registration failed\n");
- iounmap(rng_base);
- } else
+ else
dev_info(dev, "hwrng registered\n");
return err;
}
-static int bcm2835_rng_remove(struct platform_device *pdev)
-{
- void __iomem *rng_base = (void __iomem *)bcm2835_rng_ops.priv;
-
- /* disable rng hardware */
- __raw_writel(0, rng_base + RNG_CTRL);
-
- /* unregister driver */
- hwrng_unregister(&bcm2835_rng_ops);
- iounmap(rng_base);
-
- return 0;
-}
-
MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match);
+static struct platform_device_id bcm2835_rng_devtype[] = {
+ { .name = "bcm2835-rng" },
+ { .name = "bcm63xx-rng" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, bcm2835_rng_devtype);
+
static struct platform_driver bcm2835_rng_driver = {
.driver = {
.name = "bcm2835-rng",
.of_match_table = bcm2835_rng_of_match,
},
.probe = bcm2835_rng_probe,
- .remove = bcm2835_rng_remove,
+ .id_table = bcm2835_rng_devtype,
};
module_platform_driver(bcm2835_rng_driver);
diff --git a/drivers/char/hw_random/bcm63xx-rng.c b/drivers/char/hw_random/bcm63xx-rng.c
deleted file mode 100644
index 5132c9cde50d..000000000000
--- a/drivers/char/hw_random/bcm63xx-rng.c
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Broadcom BCM63xx Random Number Generator support
- *
- * Copyright (C) 2011, Florian Fainelli <florian@openwrt.org>
- * Copyright (C) 2009, Broadcom Corporation
- *
- */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/hw_random.h>
-#include <linux/of.h>
-
-#define RNG_CTRL 0x00
-#define RNG_EN (1 << 0)
-
-#define RNG_STAT 0x04
-#define RNG_AVAIL_MASK (0xff000000)
-
-#define RNG_DATA 0x08
-#define RNG_THRES 0x0c
-#define RNG_MASK 0x10
-
-struct bcm63xx_rng_priv {
- struct hwrng rng;
- struct clk *clk;
- void __iomem *regs;
-};
-
-#define to_rng_priv(rng) container_of(rng, struct bcm63xx_rng_priv, rng)
-
-static int bcm63xx_rng_init(struct hwrng *rng)
-{
- struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
- u32 val;
- int error;
-
- error = clk_prepare_enable(priv->clk);
- if (error)
- return error;
-
- val = __raw_readl(priv->regs + RNG_CTRL);
- val |= RNG_EN;
- __raw_writel(val, priv->regs + RNG_CTRL);
-
- return 0;
-}
-
-static void bcm63xx_rng_cleanup(struct hwrng *rng)
-{
- struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
- u32 val;
-
- val = __raw_readl(priv->regs + RNG_CTRL);
- val &= ~RNG_EN;
- __raw_writel(val, priv->regs + RNG_CTRL);
-
- clk_disable_unprepare(priv->clk);
-}
-
-static int bcm63xx_rng_data_present(struct hwrng *rng, int wait)
-{
- struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
-
- return __raw_readl(priv->regs + RNG_STAT) & RNG_AVAIL_MASK;
-}
-
-static int bcm63xx_rng_data_read(struct hwrng *rng, u32 *data)
-{
- struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
-
- *data = __raw_readl(priv->regs + RNG_DATA);
-
- return 4;
-}
-
-static int bcm63xx_rng_probe(struct platform_device *pdev)
-{
- struct resource *r;
- int ret;
- struct bcm63xx_rng_priv *priv;
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "no iomem resource\n");
- return -ENXIO;
- }
-
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->rng.name = pdev->name;
- priv->rng.init = bcm63xx_rng_init;
- priv->rng.cleanup = bcm63xx_rng_cleanup;
- priv->rng.data_present = bcm63xx_rng_data_present;
- priv->rng.data_read = bcm63xx_rng_data_read;
-
- priv->clk = devm_clk_get(&pdev->dev, "ipsec");
- if (IS_ERR(priv->clk)) {
- ret = PTR_ERR(priv->clk);
- dev_err(&pdev->dev, "no clock for device: %d\n", ret);
- return ret;
- }
-
- if (!devm_request_mem_region(&pdev->dev, r->start,
- resource_size(r), pdev->name)) {
- dev_err(&pdev->dev, "request mem failed");
- return -EBUSY;
- }
-
- priv->regs = devm_ioremap_nocache(&pdev->dev, r->start,
- resource_size(r));
- if (!priv->regs) {
- dev_err(&pdev->dev, "ioremap failed");
- return -ENOMEM;
- }
-
- ret = devm_hwrng_register(&pdev->dev, &priv->rng);
- if (ret) {
- dev_err(&pdev->dev, "failed to register rng device: %d\n",
- ret);
- return ret;
- }
-
- dev_info(&pdev->dev, "registered RNG driver\n");
-
- return 0;
-}
-
-#ifdef CONFIG_OF
-static const struct of_device_id bcm63xx_rng_of_match[] = {
- { .compatible = "brcm,bcm6368-rng", },
- {},
-};
-MODULE_DEVICE_TABLE(of, bcm63xx_rng_of_match);
-#endif
-
-static struct platform_driver bcm63xx_rng_driver = {
- .probe = bcm63xx_rng_probe,
- .driver = {
- .name = "bcm63xx-rng",
- .of_match_table = of_match_ptr(bcm63xx_rng_of_match),
- },
-};
-
-module_platform_driver(bcm63xx_rng_driver);
-
-MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
-MODULE_DESCRIPTION("Broadcom BCM63xx RNG driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 657b8770b6b9..91bb98c42a1c 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -306,6 +306,10 @@ static int enable_best_rng(void)
ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
if (!ret)
cur_rng_set_by_user = 0;
+ } else {
+ drop_current_rng();
+ cur_rng_set_by_user = 0;
+ ret = 0;
}
return ret;
diff --git a/drivers/char/hw_random/exynos-trng.c b/drivers/char/hw_random/exynos-trng.c
new file mode 100644
index 000000000000..1947aed7c044
--- /dev/null
+++ b/drivers/char/hw_random/exynos-trng.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RNG driver for Exynos TRNGs
+ *
+ * Author: Łukasz Stelmach <l.stelmach@samsung.com>
+ *
+ * Copyright 2017 (c) Samsung Electronics Software, Inc.
+ *
+ * Based on the Exynos PRNG driver drivers/crypto/exynos-rng by
+ * Krzysztof Kozłowski <krzk@kernel.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#define EXYNOS_TRNG_CLKDIV (0x0)
+
+#define EXYNOS_TRNG_CTRL (0x20)
+#define EXYNOS_TRNG_CTRL_RNGEN BIT(31)
+
+#define EXYNOS_TRNG_POST_CTRL (0x30)
+#define EXYNOS_TRNG_ONLINE_CTRL (0x40)
+#define EXYNOS_TRNG_ONLINE_STAT (0x44)
+#define EXYNOS_TRNG_ONLINE_MAXCHI2 (0x48)
+#define EXYNOS_TRNG_FIFO_CTRL (0x50)
+#define EXYNOS_TRNG_FIFO_0 (0x80)
+#define EXYNOS_TRNG_FIFO_1 (0x84)
+#define EXYNOS_TRNG_FIFO_2 (0x88)
+#define EXYNOS_TRNG_FIFO_3 (0x8c)
+#define EXYNOS_TRNG_FIFO_4 (0x90)
+#define EXYNOS_TRNG_FIFO_5 (0x94)
+#define EXYNOS_TRNG_FIFO_6 (0x98)
+#define EXYNOS_TRNG_FIFO_7 (0x9c)
+#define EXYNOS_TRNG_FIFO_LEN (8)
+#define EXYNOS_TRNG_CLOCK_RATE (500000)
+
+
+struct exynos_trng_dev {
+ struct device *dev;
+ void __iomem *mem;
+ struct clk *clk;
+ struct hwrng rng;
+};
+
+static int exynos_trng_do_read(struct hwrng *rng, void *data, size_t max,
+ bool wait)
+{
+ struct exynos_trng_dev *trng;
+ int val;
+
+ max = min_t(size_t, max, (EXYNOS_TRNG_FIFO_LEN * 4));
+
+ trng = (struct exynos_trng_dev *)rng->priv;
+
+ writel_relaxed(max * 8, trng->mem + EXYNOS_TRNG_FIFO_CTRL);
+ val = readl_poll_timeout(trng->mem + EXYNOS_TRNG_FIFO_CTRL, val,
+ val == 0, 200, 1000000);
+ if (val < 0)
+ return val;
+
+ memcpy_fromio(data, trng->mem + EXYNOS_TRNG_FIFO_0, max);
+
+ return max;
+}
+
+static int exynos_trng_init(struct hwrng *rng)
+{
+ struct exynos_trng_dev *trng = (struct exynos_trng_dev *)rng->priv;
+ unsigned long sss_rate;
+ u32 val;
+
+ sss_rate = clk_get_rate(trng->clk);
+
+ /*
+ * For most TRNG circuits the clock frequency of under 500 kHz
+ * is safe.
+ */
+ val = sss_rate / (EXYNOS_TRNG_CLOCK_RATE * 2);
+ if (val > 0x7fff) {
+ dev_err(trng->dev, "clock divider too large: %d", val);
+ return -ERANGE;
+ }
+ val = val << 1;
+ writel_relaxed(val, trng->mem + EXYNOS_TRNG_CLKDIV);
+
+ /* Enable the generator. */
+ val = EXYNOS_TRNG_CTRL_RNGEN;
+ writel_relaxed(val, trng->mem + EXYNOS_TRNG_CTRL);
+
+ /*
+ * Disable post-processing. /dev/hwrng is supposed to deliver
+ * unprocessed data.
+ */
+ writel_relaxed(0, trng->mem + EXYNOS_TRNG_POST_CTRL);
+
+ return 0;
+}
+
+static int exynos_trng_probe(struct platform_device *pdev)
+{
+ struct exynos_trng_dev *trng;
+ struct resource *res;
+ int ret = -ENOMEM;
+
+ trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
+ if (!trng)
+ return ret;
+
+ trng->rng.name = devm_kstrdup(&pdev->dev, dev_name(&pdev->dev),
+ GFP_KERNEL);
+ if (!trng->rng.name)
+ return ret;
+
+ trng->rng.init = exynos_trng_init;
+ trng->rng.read = exynos_trng_do_read;
+ trng->rng.priv = (unsigned long) trng;
+
+ platform_set_drvdata(pdev, trng);
+ trng->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ trng->mem = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(trng->mem))
+ return PTR_ERR(trng->mem);
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not get runtime PM.\n");
+ goto err_pm_get;
+ }
+
+ trng->clk = devm_clk_get(&pdev->dev, "secss");
+ if (IS_ERR(trng->clk)) {
+ ret = PTR_ERR(trng->clk);
+ dev_err(&pdev->dev, "Could not get clock.\n");
+ goto err_clock;
+ }
+
+ ret = clk_prepare_enable(trng->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not enable the clk.\n");
+ goto err_clock;
+ }
+
+ ret = hwrng_register(&trng->rng);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not register hwrng device.\n");
+ goto err_register;
+ }
+
+ dev_info(&pdev->dev, "Exynos True Random Number Generator.\n");
+
+ return 0;
+
+err_register:
+ clk_disable_unprepare(trng->clk);
+
+err_clock:
+ pm_runtime_put_sync(&pdev->dev);
+
+err_pm_get:
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int exynos_trng_remove(struct platform_device *pdev)
+{
+ struct exynos_trng_dev *trng = platform_get_drvdata(pdev);
+
+ hwrng_unregister(&trng->rng);
+ clk_disable_unprepare(trng->clk);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused exynos_trng_suspend(struct device *dev)
+{
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int __maybe_unused exynos_trng_resume(struct device *dev)
+{
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Could not get runtime PM.\n");
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(exynos_trng_pm_ops, exynos_trng_suspend,
+ exynos_trng_resume);
+
+static const struct of_device_id exynos_trng_dt_match[] = {
+ {
+ .compatible = "samsung,exynos5250-trng",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, exynos_trng_dt_match);
+
+static struct platform_driver exynos_trng_driver = {
+ .driver = {
+ .name = "exynos-trng",
+ .pm = &exynos_trng_pm_ops,
+ .of_match_table = exynos_trng_dt_match,
+ },
+ .probe = exynos_trng_probe,
+ .remove = exynos_trng_remove,
+};
+
+module_platform_driver(exynos_trng_driver);
+MODULE_AUTHOR("Łukasz Stelmach");
+MODULE_DESCRIPTION("H/W TRNG driver for Exynos chips");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
index 88db42d30760..eca87249bcff 100644
--- a/drivers/char/hw_random/imx-rngc.c
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -282,8 +282,7 @@ static int __exit imx_rngc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int imx_rngc_suspend(struct device *dev)
+static int __maybe_unused imx_rngc_suspend(struct device *dev)
{
struct imx_rngc *rngc = dev_get_drvdata(dev);
@@ -292,7 +291,7 @@ static int imx_rngc_suspend(struct device *dev)
return 0;
}
-static int imx_rngc_resume(struct device *dev)
+static int __maybe_unused imx_rngc_resume(struct device *dev)
{
struct imx_rngc *rngc = dev_get_drvdata(dev);
@@ -301,11 +300,7 @@ static int imx_rngc_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops imx_rngc_pm_ops = {
- .suspend = imx_rngc_suspend,
- .resume = imx_rngc_resume,
-};
-#endif
+SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume);
static const struct of_device_id imx_rngc_dt_ids[] = {
{ .compatible = "fsl,imx25-rngb", .data = NULL, },
@@ -316,9 +311,7 @@ MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids);
static struct platform_driver imx_rngc_driver = {
.driver = {
.name = "imx_rngc",
-#ifdef CONFIG_PM
.pm = &imx_rngc_pm_ops,
-#endif
.of_match_table = imx_rngc_dt_ids,
},
.remove = __exit_p(imx_rngc_remove),
diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c
index 8da7bcf54105..7f99cd52b40e 100644
--- a/drivers/char/hw_random/mtk-rng.c
+++ b/drivers/char/hw_random/mtk-rng.c
@@ -135,6 +135,7 @@ static int mtk_rng_probe(struct platform_device *pdev)
#endif
priv->rng.read = mtk_rng_read;
priv->rng.priv = (unsigned long)&pdev->dev;
+ priv->rng.quality = 900;
priv->clk = devm_clk_get(&pdev->dev, "rng");
if (IS_ERR(priv->clk)) {
diff --git a/drivers/char/hw_random/tpm-rng.c b/drivers/char/hw_random/tpm-rng.c
deleted file mode 100644
index d6d448266f07..000000000000
--- a/drivers/char/hw_random/tpm-rng.c
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2012 Kent Yoder IBM Corporation
- *
- * HWRNG interfaces to pull RNG data from a TPM
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/module.h>
-#include <linux/hw_random.h>
-#include <linux/tpm.h>
-
-#define MODULE_NAME "tpm-rng"
-
-static int tpm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
-{
- return tpm_get_random(TPM_ANY_NUM, data, max);
-}
-
-static struct hwrng tpm_rng = {
- .name = MODULE_NAME,
- .read = tpm_rng_read,
-};
-
-static int __init rng_init(void)
-{
- return hwrng_register(&tpm_rng);
-}
-module_init(rng_init);
-
-static void __exit rng_exit(void)
-{
- hwrng_unregister(&tpm_rng);
-}
-module_exit(rng_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Kent Yoder <key@linux.vnet.ibm.com>");
-MODULE_DESCRIPTION("RNG driver for TPM devices");
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
index 6edfaa72b98b..7992c870b0a2 100644
--- a/drivers/char/ipmi/bt-bmc.c
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -338,10 +338,10 @@ static int bt_bmc_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int bt_bmc_poll(struct file *file, poll_table *wait)
+static __poll_t bt_bmc_poll(struct file *file, poll_table *wait)
{
struct bt_bmc *bt_bmc = file_bt_bmc(file);
- unsigned int mask = 0;
+ __poll_t mask = 0;
u8 ctrl;
poll_wait(file, &bt_bmc->queue, wait);
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 2ffca4232686..a011a7739f5e 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -78,10 +78,10 @@ static void file_receive_handler(struct ipmi_recv_msg *msg,
spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
}
-static unsigned int ipmi_poll(struct file *file, poll_table *wait)
+static __poll_t ipmi_poll(struct file *file, poll_table *wait)
{
struct ipmi_file_private *priv = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
unsigned long flags;
poll_wait(file, &priv->wait, wait);
diff --git a/drivers/char/ipmi/ipmi_dmi.c b/drivers/char/ipmi/ipmi_dmi.c
index ab78b3be7e33..c5112b17d7ea 100644
--- a/drivers/char/ipmi/ipmi_dmi.c
+++ b/drivers/char/ipmi/ipmi_dmi.c
@@ -106,7 +106,10 @@ static void __init dmi_add_platform_ipmi(unsigned long base_addr,
pr_err("ipmi:dmi: Error allocation IPMI platform device\n");
return;
}
- pdev->driver_override = override;
+ pdev->driver_override = kasprintf(GFP_KERNEL, "%s",
+ override);
+ if (!pdev->driver_override)
+ goto err;
if (type == IPMI_DMI_TYPE_SSIF) {
set_prop_entry(p[pidx++], "i2c-addr", u16, base_addr);
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index f45732a2cb3e..01fbffb3168e 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -84,7 +84,7 @@ static int panic_op_write_handler(const char *val,
char valcp[16];
char *s;
- strncpy(valcp, val, 16);
+ strncpy(valcp, val, 15);
valcp[15] = '\0';
s = strstrip(valcp);
diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c
index 07fddbefefe4..bcf493d8e238 100644
--- a/drivers/char/ipmi/ipmi_powernv.c
+++ b/drivers/char/ipmi/ipmi_powernv.c
@@ -250,8 +250,9 @@ static int ipmi_powernv_probe(struct platform_device *pdev)
ipmi->irq = opal_event_request(prop);
}
- if (request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH,
- "opal-ipmi", ipmi)) {
+ rc = request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH,
+ "opal-ipmi", ipmi);
+ if (rc) {
dev_warn(dev, "Unable to request irq\n");
goto err_dispose;
}
@@ -264,7 +265,6 @@ static int ipmi_powernv_probe(struct platform_device *pdev)
goto err_unregister;
}
- /* todo: query actual ipmi_device_id */
rc = ipmi_register_smi(&ipmi_powernv_smi_handlers, ipmi, dev, 0);
if (rc) {
dev_warn(dev, "IPMI SMI registration failed (%d)\n", rc);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 71fad747c0c7..6768cb2dd740 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1938,8 +1938,10 @@ static void check_for_broken_irqs(struct smi_info *smi_info)
static inline void stop_timer_and_thread(struct smi_info *smi_info)
{
- if (smi_info->thread != NULL)
+ if (smi_info->thread != NULL) {
kthread_stop(smi_info->thread);
+ smi_info->thread = NULL;
+ }
smi_info->timer_can_start = false;
if (smi_info->timer_running)
@@ -2045,6 +2047,7 @@ static int try_smi_init(struct smi_info *new_smi)
int rv = 0;
int i;
char *init_name = NULL;
+ bool platform_device_registered = false;
pr_info(PFX "Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
@@ -2173,6 +2176,7 @@ static int try_smi_init(struct smi_info *new_smi)
rv);
goto out_err;
}
+ platform_device_registered = true;
}
dev_set_drvdata(new_smi->io.dev, new_smi);
@@ -2279,10 +2283,11 @@ out_err:
}
if (new_smi->pdev) {
- platform_device_unregister(new_smi->pdev);
+ if (platform_device_registered)
+ platform_device_unregister(new_smi->pdev);
+ else
+ platform_device_put(new_smi->pdev);
new_smi->pdev = NULL;
- } else if (new_smi->pdev) {
- platform_device_put(new_smi->pdev);
}
kfree(init_name);
diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
index 9573f1116450..f4214870d726 100644
--- a/drivers/char/ipmi/ipmi_si_platform.c
+++ b/drivers/char/ipmi/ipmi_si_platform.c
@@ -40,7 +40,7 @@ MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
#endif
#ifdef CONFIG_OF
module_param_named(tryopenfirmware, si_tryopenfirmware, bool, 0);
-MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
+MODULE_PARM_DESC(tryopenfirmware, "Setting this to zero will disable the"
" default scan of the interfaces identified via OpenFirmware");
#endif
#ifdef CONFIG_DMI
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 3cfaec728604..f929e72bdac8 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -2071,8 +2071,7 @@ static int ssif_platform_remove(struct platform_device *dev)
return 0;
mutex_lock(&ssif_infos_mutex);
- if (addr_info->client)
- i2c_unregister_device(addr_info->client);
+ i2c_unregister_device(addr_info->client);
list_del(&addr_info->link);
kfree(addr_info);
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 76b270678b50..34bc1f3ca414 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -232,7 +232,7 @@ static int set_param_str(const char *val, const struct kernel_param *kp)
char valcp[16];
char *s;
- strncpy(valcp, val, 16);
+ strncpy(valcp, val, 15);
valcp[15] = '\0';
s = strstrip(valcp);
@@ -298,7 +298,7 @@ module_param(pretimeout, timeout, 0644);
MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds.");
module_param(panic_wdt_timeout, timeout, 0644);
-MODULE_PARM_DESC(timeout, "Timeout value on kernel panic in seconds.");
+MODULE_PARM_DESC(panic_wdt_timeout, "Timeout value on kernel panic in seconds.");
module_param_cb(action, &param_ops_str, action_op, 0644);
MODULE_PARM_DESC(action, "Timeout action. One of: "
@@ -887,9 +887,9 @@ static int ipmi_open(struct inode *ino, struct file *filep)
}
}
-static unsigned int ipmi_poll(struct file *file, poll_table *wait)
+static __poll_t ipmi_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &read_q, wait);
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index 9a1aaf538758..819fe37a3683 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -415,10 +415,10 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf,
return count;
}
-static unsigned int cm4040_poll(struct file *filp, poll_table *wait)
+static __poll_t cm4040_poll(struct file *filp, poll_table *wait)
{
struct reader_dev *dev = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(filp, &dev->poll_wait, wait);
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index d256110ba672..7a56d1a13ec3 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -769,10 +769,10 @@ static int pp_release(struct inode *inode, struct file *file)
}
/* No kernel lock held - fine */
-static unsigned int pp_poll(struct file *file, poll_table *wait)
+static __poll_t pp_poll(struct file *file, poll_table *wait)
{
struct pp_struct *pp = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &pp->irq_wait, wait);
if (atomic_read(&pp->irqc))
diff --git a/drivers/char/random.c b/drivers/char/random.c
index ec42c8bb9b0d..80f2c326db47 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -431,9 +431,9 @@ static int crng_init = 0;
static int crng_init_cnt = 0;
#define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE)
static void _extract_crng(struct crng_state *crng,
- __u8 out[CHACHA20_BLOCK_SIZE]);
+ __u32 out[CHACHA20_BLOCK_WORDS]);
static void _crng_backtrack_protect(struct crng_state *crng,
- __u8 tmp[CHACHA20_BLOCK_SIZE], int used);
+ __u32 tmp[CHACHA20_BLOCK_WORDS], int used);
static void process_random_ready_list(void);
static void _get_random_bytes(void *buf, int nbytes);
@@ -817,7 +817,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
unsigned long flags;
int i, num;
union {
- __u8 block[CHACHA20_BLOCK_SIZE];
+ __u32 block[CHACHA20_BLOCK_WORDS];
__u32 key[8];
} buf;
@@ -851,7 +851,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
}
static void _extract_crng(struct crng_state *crng,
- __u8 out[CHACHA20_BLOCK_SIZE])
+ __u32 out[CHACHA20_BLOCK_WORDS])
{
unsigned long v, flags;
@@ -867,7 +867,7 @@ static void _extract_crng(struct crng_state *crng,
spin_unlock_irqrestore(&crng->lock, flags);
}
-static void extract_crng(__u8 out[CHACHA20_BLOCK_SIZE])
+static void extract_crng(__u32 out[CHACHA20_BLOCK_WORDS])
{
struct crng_state *crng = NULL;
@@ -885,7 +885,7 @@ static void extract_crng(__u8 out[CHACHA20_BLOCK_SIZE])
* enough) to mutate the CRNG key to provide backtracking protection.
*/
static void _crng_backtrack_protect(struct crng_state *crng,
- __u8 tmp[CHACHA20_BLOCK_SIZE], int used)
+ __u32 tmp[CHACHA20_BLOCK_WORDS], int used)
{
unsigned long flags;
__u32 *s, *d;
@@ -897,14 +897,14 @@ static void _crng_backtrack_protect(struct crng_state *crng,
used = 0;
}
spin_lock_irqsave(&crng->lock, flags);
- s = (__u32 *) &tmp[used];
+ s = &tmp[used / sizeof(__u32)];
d = &crng->state[4];
for (i=0; i < 8; i++)
*d++ ^= *s++;
spin_unlock_irqrestore(&crng->lock, flags);
}
-static void crng_backtrack_protect(__u8 tmp[CHACHA20_BLOCK_SIZE], int used)
+static void crng_backtrack_protect(__u32 tmp[CHACHA20_BLOCK_WORDS], int used)
{
struct crng_state *crng = NULL;
@@ -920,7 +920,7 @@ static void crng_backtrack_protect(__u8 tmp[CHACHA20_BLOCK_SIZE], int used)
static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
{
ssize_t ret = 0, i = CHACHA20_BLOCK_SIZE;
- __u8 tmp[CHACHA20_BLOCK_SIZE];
+ __u32 tmp[CHACHA20_BLOCK_WORDS];
int large_request = (nbytes > 256);
while (nbytes) {
@@ -1507,7 +1507,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
*/
static void _get_random_bytes(void *buf, int nbytes)
{
- __u8 tmp[CHACHA20_BLOCK_SIZE];
+ __u32 tmp[CHACHA20_BLOCK_WORDS];
trace_get_random_bytes(nbytes, _RET_IP_);
@@ -1784,10 +1784,10 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
return ret;
}
-static unsigned int
+static __poll_t
random_poll(struct file *file, poll_table * wait)
{
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, &random_read_wait, wait);
poll_wait(file, &random_write_wait, wait);
@@ -2114,7 +2114,7 @@ u64 get_random_u64(void)
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
- extract_crng((u8 *)batch->entropy_u64);
+ extract_crng((__u32 *)batch->entropy_u64);
batch->position = 0;
}
ret = batch->entropy_u64[batch->position++];
@@ -2144,7 +2144,7 @@ u32 get_random_u32(void)
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
- extract_crng((u8 *)batch->entropy_u32);
+ extract_crng(batch->entropy_u32);
batch->position = 0;
}
ret = batch->entropy_u32[batch->position++];
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index 5542a438bbd0..c6a317120a55 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -147,7 +147,7 @@ static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
static void rtc_get_rtc_time(struct rtc_time *rtc_tm);
#ifdef RTC_IRQ
-static unsigned int rtc_poll(struct file *file, poll_table *wait);
+static __poll_t rtc_poll(struct file *file, poll_table *wait);
#endif
static void get_rtc_alm_time(struct rtc_time *alm_tm);
@@ -790,7 +790,7 @@ no_irq:
}
#ifdef RTC_IRQ
-static unsigned int rtc_poll(struct file *file, poll_table *wait)
+static __poll_t rtc_poll(struct file *file, poll_table *wait)
{
unsigned long l;
diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c
index 6aa32679fd58..7f49fa0f41d7 100644
--- a/drivers/char/snsc.c
+++ b/drivers/char/snsc.c
@@ -321,10 +321,10 @@ scdrv_write(struct file *file, const char __user *buf,
return status;
}
-static unsigned int
+static __poll_t
scdrv_poll(struct file *file, struct poll_table_struct *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
int status = 0;
struct subch_data_s *sd = (struct subch_data_s *) file->private_data;
unsigned long flags;
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index d3a979e25724..fc041c462aa4 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -940,7 +940,7 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
return ret;
}
-static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait)
+static __poll_t sonypi_misc_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &sonypi_device.fifo_proc_list, wait);
if (kfifo_len(&sonypi_device.fifo))
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index a30352202f1f..18c81cbe4704 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -26,6 +26,17 @@ menuconfig TCG_TPM
if TCG_TPM
+config HW_RANDOM_TPM
+ bool "TPM HW Random Number Generator support"
+ depends on TCG_TPM && HW_RANDOM && !(TCG_TPM=y && HW_RANDOM=m)
+ default y
+ ---help---
+ This setting exposes the TPM's Random Number Generator as a hwrng
+ device. This allows the kernel to collect randomness from the TPM at
+ boot, and provides the TPM randomines in /dev/hwrng.
+
+ If unsure, say Y.
+
config TCG_TIS_CORE
tristate
---help---
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 34b4bcf46f43..acd758381c58 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -6,8 +6,9 @@ obj-$(CONFIG_TCG_TPM) += tpm.o
tpm-y := tpm-interface.o tpm-dev.o tpm-sysfs.o tpm-chip.o tpm2-cmd.o \
tpm-dev-common.o tpmrm-dev.o tpm1_eventlog.o tpm2_eventlog.o \
tpm2-space.o
-tpm-$(CONFIG_ACPI) += tpm_ppi.o tpm_acpi.o
-tpm-$(CONFIG_OF) += tpm_of.o
+tpm-$(CONFIG_ACPI) += tpm_ppi.o tpm_eventlog_acpi.o
+tpm-$(CONFIG_EFI) += tpm_eventlog_efi.o
+tpm-$(CONFIG_OF) += tpm_eventlog_of.o
obj-$(CONFIG_TCG_TIS_CORE) += tpm_tis_core.o
obj-$(CONFIG_TCG_TIS) += tpm_tis.o
obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi.o
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 0eca20c5a80c..0a62c19937b6 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -26,8 +26,9 @@
#include <linux/spinlock.h>
#include <linux/freezer.h>
#include <linux/major.h>
+#include <linux/tpm_eventlog.h>
+#include <linux/hw_random.h>
#include "tpm.h"
-#include "tpm_eventlog.h"
DEFINE_IDR(dev_nums_idr);
static DEFINE_MUTEX(idr_lock);
@@ -80,21 +81,26 @@ void tpm_put_ops(struct tpm_chip *chip)
EXPORT_SYMBOL_GPL(tpm_put_ops);
/**
- * tpm_chip_find_get() - return tpm_chip for a given chip number
- * @chip_num: id to find
+ * tpm_chip_find_get() - find and reserve a TPM chip
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
*
- * The return'd chip has been tpm_try_get_ops'd and must be released via
- * tpm_put_ops
+ * Finds a TPM chip and reserves its class device and operations. The chip must
+ * be released with tpm_chip_put_ops() after use.
+ *
+ * Return:
+ * A reserved &struct tpm_chip instance.
+ * %NULL if a chip is not found.
+ * %NULL if the chip is not available.
*/
-struct tpm_chip *tpm_chip_find_get(int chip_num)
+struct tpm_chip *tpm_chip_find_get(struct tpm_chip *chip)
{
- struct tpm_chip *chip, *res = NULL;
+ struct tpm_chip *res = NULL;
+ int chip_num = 0;
int chip_prev;
mutex_lock(&idr_lock);
- if (chip_num == TPM_ANY_NUM) {
- chip_num = 0;
+ if (!chip) {
do {
chip_prev = chip_num;
chip = idr_get_next(&dev_nums_idr, &chip_num);
@@ -104,8 +110,7 @@ struct tpm_chip *tpm_chip_find_get(int chip_num)
}
} while (chip_prev != chip_num);
} else {
- chip = idr_find(&dev_nums_idr, chip_num);
- if (chip && !tpm_try_get_ops(chip))
+ if (!tpm_try_get_ops(chip))
res = chip;
}
@@ -387,6 +392,26 @@ static int tpm_add_legacy_sysfs(struct tpm_chip *chip)
return 0;
}
+
+static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng);
+
+ return tpm_get_random(chip, data, max);
+}
+
+static int tpm_add_hwrng(struct tpm_chip *chip)
+{
+ if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM))
+ return 0;
+
+ snprintf(chip->hwrng_name, sizeof(chip->hwrng_name),
+ "tpm-rng-%d", chip->dev_num);
+ chip->hwrng.name = chip->hwrng_name;
+ chip->hwrng.read = tpm_hwrng_read;
+ return hwrng_register(&chip->hwrng);
+}
+
/*
* tpm_chip_register() - create a character device for the TPM chip
* @chip: TPM chip to use.
@@ -419,11 +444,13 @@ int tpm_chip_register(struct tpm_chip *chip)
tpm_add_ppi(chip);
+ rc = tpm_add_hwrng(chip);
+ if (rc)
+ goto out_ppi;
+
rc = tpm_add_char_device(chip);
- if (rc) {
- tpm_bios_log_teardown(chip);
- return rc;
- }
+ if (rc)
+ goto out_hwrng;
rc = tpm_add_legacy_sysfs(chip);
if (rc) {
@@ -432,6 +459,14 @@ int tpm_chip_register(struct tpm_chip *chip)
}
return 0;
+
+out_hwrng:
+ if (IS_ENABLED(CONFIG_HW_RANDOM_TPM))
+ hwrng_unregister(&chip->hwrng);
+out_ppi:
+ tpm_bios_log_teardown(chip);
+
+ return rc;
}
EXPORT_SYMBOL_GPL(tpm_chip_register);
@@ -451,6 +486,8 @@ EXPORT_SYMBOL_GPL(tpm_chip_register);
void tpm_chip_unregister(struct tpm_chip *chip)
{
tpm_del_legacy_sysfs(chip);
+ if (IS_ENABLED(CONFIG_HW_RANDOM_TPM))
+ hwrng_unregister(&chip->hwrng);
tpm_bios_log_teardown(chip);
if (chip->flags & TPM_CHIP_FLAG_TPM2)
cdev_device_del(&chip->cdevs, &chip->devs);
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 1d6729be4cd6..76df4fbcf089 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -30,9 +30,9 @@
#include <linux/spinlock.h>
#include <linux/freezer.h>
#include <linux/pm_runtime.h>
+#include <linux/tpm_eventlog.h>
#include "tpm.h"
-#include "tpm_eventlog.h"
#define TPM_MAX_ORDINAL 243
#define TSC_MAX_ORDINAL 12
@@ -328,7 +328,7 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
}
EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
-static bool tpm_validate_command(struct tpm_chip *chip,
+static int tpm_validate_command(struct tpm_chip *chip,
struct tpm_space *space,
const u8 *cmd,
size_t len)
@@ -340,10 +340,10 @@ static bool tpm_validate_command(struct tpm_chip *chip,
unsigned int nr_handles;
if (len < TPM_HEADER_SIZE)
- return false;
+ return -EINVAL;
if (!space)
- return true;
+ return 0;
if (chip->flags & TPM_CHIP_FLAG_TPM2 && chip->nr_commands) {
cc = be32_to_cpu(header->ordinal);
@@ -352,7 +352,7 @@ static bool tpm_validate_command(struct tpm_chip *chip,
if (i < 0) {
dev_dbg(&chip->dev, "0x%04X is an invalid command\n",
cc);
- return false;
+ return -EOPNOTSUPP;
}
attrs = chip->cc_attrs_tbl[i];
@@ -362,11 +362,11 @@ static bool tpm_validate_command(struct tpm_chip *chip,
goto err_len;
}
- return true;
+ return 0;
err_len:
dev_dbg(&chip->dev,
"%s: insufficient command length %zu", __func__, len);
- return false;
+ return -EINVAL;
}
/**
@@ -391,8 +391,20 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
unsigned long stop;
bool need_locality;
- if (!tpm_validate_command(chip, space, buf, bufsiz))
- return -EINVAL;
+ rc = tpm_validate_command(chip, space, buf, bufsiz);
+ if (rc == -EINVAL)
+ return rc;
+ /*
+ * If the command is not implemented by the TPM, synthesize a
+ * response with a TPM2_RC_COMMAND_CODE return for user-space.
+ */
+ if (rc == -EOPNOTSUPP) {
+ header->length = cpu_to_be32(sizeof(*header));
+ header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
+ header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE |
+ TSS2_RESMGR_TPM_RC_LAYER);
+ return bufsiz;
+ }
if (bufsiz > TPM_BUFSIZE)
bufsiz = TPM_BUFSIZE;
@@ -413,6 +425,9 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
if (chip->dev.parent)
pm_runtime_get_sync(chip->dev.parent);
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, true);
+
/* Store the decision as chip->locality will be changed. */
need_locality = chip->locality == -1;
@@ -489,6 +504,9 @@ out:
chip->locality = -1;
}
out_no_locality:
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, false);
+
if (chip->dev.parent)
pm_runtime_put_sync(chip->dev.parent);
@@ -809,19 +827,20 @@ int tpm_pcr_read_dev(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
}
/**
- * tpm_is_tpm2 - is the chip a TPM2 chip?
- * @chip_num: tpm idx # or ANY
+ * tpm_is_tpm2 - do we a have a TPM2 chip?
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
*
- * Returns < 0 on error, and 1 or 0 on success depending whether the chip
- * is a TPM2 chip.
+ * Return:
+ * 1 if we have a TPM2 chip.
+ * 0 if we don't have a TPM2 chip.
+ * A negative number for system errors (errno).
*/
-int tpm_is_tpm2(u32 chip_num)
+int tpm_is_tpm2(struct tpm_chip *chip)
{
- struct tpm_chip *chip;
int rc;
- chip = tpm_chip_find_get(chip_num);
- if (chip == NULL)
+ chip = tpm_chip_find_get(chip);
+ if (!chip)
return -ENODEV;
rc = (chip->flags & TPM_CHIP_FLAG_TPM2) != 0;
@@ -833,23 +852,19 @@ int tpm_is_tpm2(u32 chip_num)
EXPORT_SYMBOL_GPL(tpm_is_tpm2);
/**
- * tpm_pcr_read - read a pcr value
- * @chip_num: tpm idx # or ANY
- * @pcr_idx: pcr idx to retrieve
- * @res_buf: TPM_PCR value
- * size of res_buf is 20 bytes (or NULL if you don't care)
+ * tpm_pcr_read - read a PCR value from SHA1 bank
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @pcr_idx: the PCR to be retrieved
+ * @res_buf: the value of the PCR
*
- * The TPM driver should be built-in, but for whatever reason it
- * isn't, protect against the chip disappearing, by incrementing
- * the module usage count.
+ * Return: same as with tpm_transmit_cmd()
*/
-int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf)
+int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
{
- struct tpm_chip *chip;
int rc;
- chip = tpm_chip_find_get(chip_num);
- if (chip == NULL)
+ chip = tpm_chip_find_get(chip);
+ if (!chip)
return -ENODEV;
if (chip->flags & TPM_CHIP_FLAG_TPM2)
rc = tpm2_pcr_read(chip, pcr_idx, res_buf);
@@ -889,25 +904,26 @@ static int tpm1_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash,
}
/**
- * tpm_pcr_extend - extend pcr value with hash
- * @chip_num: tpm idx # or AN&
- * @pcr_idx: pcr idx to extend
- * @hash: hash value used to extend pcr value
+ * tpm_pcr_extend - extend a PCR value in SHA1 bank.
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @pcr_idx: the PCR to be retrieved
+ * @hash: the hash value used to extend the PCR value
*
- * The TPM driver should be built-in, but for whatever reason it
- * isn't, protect against the chip disappearing, by incrementing
- * the module usage count.
+ * Note: with TPM 2.0 extends also those banks with a known digest size to the
+ * cryto subsystem in order to prevent malicious use of those PCR banks. In the
+ * future we should dynamically determine digest sizes.
+ *
+ * Return: same as with tpm_transmit_cmd()
*/
-int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
+int tpm_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash)
{
int rc;
- struct tpm_chip *chip;
struct tpm2_digest digest_list[ARRAY_SIZE(chip->active_banks)];
u32 count = 0;
int i;
- chip = tpm_chip_find_get(chip_num);
- if (chip == NULL)
+ chip = tpm_chip_find_get(chip);
+ if (!chip)
return -ENODEV;
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
@@ -1019,82 +1035,29 @@ out:
return rc;
}
-int tpm_send(u32 chip_num, void *cmd, size_t buflen)
+/**
+ * tpm_send - send a TPM command
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @cmd: a TPM command buffer
+ * @buflen: the length of the TPM command buffer
+ *
+ * Return: same as with tpm_transmit_cmd()
+ */
+int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen)
{
- struct tpm_chip *chip;
int rc;
- chip = tpm_chip_find_get(chip_num);
- if (chip == NULL)
+ chip = tpm_chip_find_get(chip);
+ if (!chip)
return -ENODEV;
rc = tpm_transmit_cmd(chip, NULL, cmd, buflen, 0, 0,
- "attempting tpm_cmd");
+ "attempting to a send a command");
tpm_put_ops(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_send);
-static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
- bool check_cancel, bool *canceled)
-{
- u8 status = chip->ops->status(chip);
-
- *canceled = false;
- if ((status & mask) == mask)
- return true;
- if (check_cancel && chip->ops->req_canceled(chip, status)) {
- *canceled = true;
- return true;
- }
- return false;
-}
-
-int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
- wait_queue_head_t *queue, bool check_cancel)
-{
- unsigned long stop;
- long rc;
- u8 status;
- bool canceled = false;
-
- /* check current status */
- status = chip->ops->status(chip);
- if ((status & mask) == mask)
- return 0;
-
- stop = jiffies + timeout;
-
- if (chip->flags & TPM_CHIP_FLAG_IRQ) {
-again:
- timeout = stop - jiffies;
- if ((long)timeout <= 0)
- return -ETIME;
- rc = wait_event_interruptible_timeout(*queue,
- wait_for_tpm_stat_cond(chip, mask, check_cancel,
- &canceled),
- timeout);
- if (rc > 0) {
- if (canceled)
- return -ECANCELED;
- return 0;
- }
- if (rc == -ERESTARTSYS && freezing(current)) {
- clear_thread_flag(TIF_SIGPENDING);
- goto again;
- }
- } else {
- do {
- tpm_msleep(TPM_TIMEOUT);
- status = chip->ops->status(chip);
- if ((status & mask) == mask)
- return 0;
- } while (time_before(jiffies, stop));
- }
- return -ETIME;
-}
-EXPORT_SYMBOL_GPL(wait_for_tpm_stat);
-
#define TPM_ORD_SAVESTATE 152
#define SAVESTATE_RESULT_SIZE 10
@@ -1187,16 +1150,15 @@ static const struct tpm_input_header tpm_getrandom_header = {
};
/**
- * tpm_get_random() - Get random bytes from the tpm's RNG
- * @chip_num: A specific chip number for the request or TPM_ANY_NUM
- * @out: destination buffer for the random bytes
- * @max: the max number of bytes to write to @out
+ * tpm_get_random() - get random bytes from the TPM's RNG
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @out: destination buffer for the random bytes
+ * @max: the max number of bytes to write to @out
*
- * Returns < 0 on error and the number of bytes read on success
+ * Return: same as with tpm_transmit_cmd()
*/
-int tpm_get_random(u32 chip_num, u8 *out, size_t max)
+int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
{
- struct tpm_chip *chip;
struct tpm_cmd_t tpm_cmd;
u32 recd, num_bytes = min_t(u32, max, TPM_MAX_RNG_DATA), rlength;
int err, total = 0, retries = 5;
@@ -1205,8 +1167,8 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
if (!out || !num_bytes || max > TPM_MAX_RNG_DATA)
return -EINVAL;
- chip = tpm_chip_find_get(chip_num);
- if (chip == NULL)
+ chip = tpm_chip_find_get(chip);
+ if (!chip)
return -ENODEV;
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
@@ -1248,22 +1210,23 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
EXPORT_SYMBOL_GPL(tpm_get_random);
/**
- * tpm_seal_trusted() - seal a trusted key
- * @chip_num: A specific chip number for the request or TPM_ANY_NUM
- * @options: authentication values and other options
- * @payload: the key data in clear and encrypted form
+ * tpm_seal_trusted() - seal a trusted key payload
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @options: authentication values and other options
+ * @payload: the key data in clear and encrypted form
*
- * Returns < 0 on error and 0 on success. At the moment, only TPM 2.0 chips
- * are supported.
+ * Note: only TPM 2.0 chip are supported. TPM 1.x implementation is located in
+ * the keyring subsystem.
+ *
+ * Return: same as with tpm_transmit_cmd()
*/
-int tpm_seal_trusted(u32 chip_num, struct trusted_key_payload *payload,
+int tpm_seal_trusted(struct tpm_chip *chip, struct trusted_key_payload *payload,
struct trusted_key_options *options)
{
- struct tpm_chip *chip;
int rc;
- chip = tpm_chip_find_get(chip_num);
- if (chip == NULL || !(chip->flags & TPM_CHIP_FLAG_TPM2))
+ chip = tpm_chip_find_get(chip);
+ if (!chip || !(chip->flags & TPM_CHIP_FLAG_TPM2))
return -ENODEV;
rc = tpm2_seal_trusted(chip, payload, options);
@@ -1275,21 +1238,23 @@ EXPORT_SYMBOL_GPL(tpm_seal_trusted);
/**
* tpm_unseal_trusted() - unseal a trusted key
- * @chip_num: A specific chip number for the request or TPM_ANY_NUM
- * @options: authentication values and other options
- * @payload: the key data in clear and encrypted form
+ * @chip: a &struct tpm_chip instance, %NULL for the default chip
+ * @options: authentication values and other options
+ * @payload: the key data in clear and encrypted form
+ *
+ * Note: only TPM 2.0 chip are supported. TPM 1.x implementation is located in
+ * the keyring subsystem.
*
- * Returns < 0 on error and 0 on success. At the moment, only TPM 2.0 chips
- * are supported.
+ * Return: same as with tpm_transmit_cmd()
*/
-int tpm_unseal_trusted(u32 chip_num, struct trusted_key_payload *payload,
+int tpm_unseal_trusted(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
struct trusted_key_options *options)
{
- struct tpm_chip *chip;
int rc;
- chip = tpm_chip_find_get(chip_num);
- if (chip == NULL || !(chip->flags & TPM_CHIP_FLAG_TPM2))
+ chip = tpm_chip_find_get(chip);
+ if (!chip || !(chip->flags & TPM_CHIP_FLAG_TPM2))
return -ENODEV;
rc = tpm2_unseal_trusted(chip, payload, options);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 528cffbd49d3..f895fba4e20d 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -26,6 +26,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/fs.h>
+#include <linux/hw_random.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/platform_device.h>
@@ -34,6 +35,7 @@
#include <linux/acpi.h>
#include <linux/cdev.h>
#include <linux/highmem.h>
+#include <linux/tpm_eventlog.h>
#include <crypto/hash_info.h>
#ifdef CONFIG_X86
@@ -93,12 +95,17 @@ enum tpm2_structures {
TPM2_ST_SESSIONS = 0x8002,
};
+/* Indicates from what layer of the software stack the error comes from */
+#define TSS2_RC_LAYER_SHIFT 16
+#define TSS2_RESMGR_TPM_RC_LAYER (11 << TSS2_RC_LAYER_SHIFT)
+
enum tpm2_return_codes {
TPM2_RC_SUCCESS = 0x0000,
TPM2_RC_HASH = 0x0083, /* RC_FMT1 */
TPM2_RC_HANDLE = 0x008B,
TPM2_RC_INITIALIZE = 0x0100, /* RC_VER1 */
TPM2_RC_DISABLED = 0x0120,
+ TPM2_RC_COMMAND_CODE = 0x0143,
TPM2_RC_TESTING = 0x090A, /* RC_WARN */
TPM2_RC_REFERENCE_H0 = 0x0910,
};
@@ -210,6 +217,9 @@ struct tpm_chip {
int dev_num; /* /dev/tpm# */
unsigned long is_open; /* only one allowed */
+ char hwrng_name[64];
+ struct hwrng hwrng;
+
struct mutex tpm_mutex; /* tpm is processing */
unsigned long timeout_a; /* jiffies */
@@ -385,10 +395,6 @@ struct tpm_cmd_t {
tpm_cmd_params params;
} __packed;
-struct tpm2_digest {
- u16 alg_id;
- u8 digest[SHA512_DIGEST_SIZE];
-} __packed;
/* A string buffer type for constructing TPM commands. This is based on the
* ideas of string buffer code in security/keys/trusted.h but is heap based
@@ -512,16 +518,14 @@ int tpm_do_selftest(struct tpm_chip *chip);
unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
int tpm_pm_suspend(struct device *dev);
int tpm_pm_resume(struct device *dev);
-int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
- wait_queue_head_t *queue, bool check_cancel);
static inline void tpm_msleep(unsigned int delay_msec)
{
- usleep_range(delay_msec * 1000,
- (delay_msec * 1000) + TPM_TIMEOUT_RANGE_US);
+ usleep_range((delay_msec * 1000) - TPM_TIMEOUT_RANGE_US,
+ delay_msec * 1000);
};
-struct tpm_chip *tpm_chip_find_get(int chip_num);
+struct tpm_chip *tpm_chip_find_get(struct tpm_chip *chip);
__must_check int tpm_try_get_ops(struct tpm_chip *chip);
void tpm_put_ops(struct tpm_chip *chip);
@@ -575,4 +579,34 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u32 cc,
u8 *cmd);
int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space,
u32 cc, u8 *buf, size_t *bufsiz);
+
+extern const struct seq_operations tpm2_binary_b_measurements_seqops;
+
+#if defined(CONFIG_ACPI)
+int tpm_read_log_acpi(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_acpi(struct tpm_chip *chip)
+{
+ return -ENODEV;
+}
+#endif
+#if defined(CONFIG_OF)
+int tpm_read_log_of(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_of(struct tpm_chip *chip)
+{
+ return -ENODEV;
+}
+#endif
+#if defined(CONFIG_EFI)
+int tpm_read_log_efi(struct tpm_chip *chip);
+#else
+static inline int tpm_read_log_efi(struct tpm_chip *chip)
+{
+ return -ENODEV;
+}
+#endif
+
+int tpm_bios_log_setup(struct tpm_chip *chip);
+void tpm_bios_log_teardown(struct tpm_chip *chip);
#endif
diff --git a/drivers/char/tpm/tpm1_eventlog.c b/drivers/char/tpm/tpm1_eventlog.c
index 9a8605e500b5..add798bd69d0 100644
--- a/drivers/char/tpm/tpm1_eventlog.c
+++ b/drivers/char/tpm/tpm1_eventlog.c
@@ -21,13 +21,14 @@
*/
#include <linux/seq_file.h>
+#include <linux/efi.h>
#include <linux/fs.h>
#include <linux/security.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/tpm_eventlog.h>
#include "tpm.h"
-#include "tpm_eventlog.h"
static const char* tcpa_event_type_strings[] = {
@@ -371,6 +372,10 @@ static int tpm_read_log(struct tpm_chip *chip)
if (rc != -ENODEV)
return rc;
+ rc = tpm_read_log_efi(chip);
+ if (rc != -ENODEV)
+ return rc;
+
return tpm_read_log_of(chip);
}
@@ -388,11 +393,13 @@ int tpm_bios_log_setup(struct tpm_chip *chip)
{
const char *name = dev_name(&chip->dev);
unsigned int cnt;
+ int log_version;
int rc = 0;
rc = tpm_read_log(chip);
- if (rc)
+ if (rc < 0)
return rc;
+ log_version = rc;
cnt = 0;
chip->bios_dir[cnt] = securityfs_create_dir(name, NULL);
@@ -404,7 +411,7 @@ int tpm_bios_log_setup(struct tpm_chip *chip)
cnt++;
chip->bin_log_seqops.chip = chip;
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ if (log_version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
chip->bin_log_seqops.seqops =
&tpm2_binary_b_measurements_seqops;
else
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index f40d20671a78..c17e75348a99 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -849,28 +849,26 @@ static const struct tpm_input_header tpm2_selftest_header = {
static int tpm2_do_selftest(struct tpm_chip *chip)
{
int rc;
- unsigned int delay_msec = 20;
+ unsigned int delay_msec = 10;
long duration;
struct tpm2_cmd cmd;
duration = jiffies_to_msecs(
tpm2_calc_ordinal_duration(chip, TPM2_CC_SELF_TEST));
- while (duration > 0) {
+ while (1) {
cmd.header.in = tpm2_selftest_header;
cmd.params.selftest_in.full_test = 0;
rc = tpm_transmit_cmd(chip, NULL, &cmd, TPM2_SELF_TEST_IN_SIZE,
0, 0, "continue selftest");
- if (rc != TPM2_RC_TESTING)
+ if (rc != TPM2_RC_TESTING || delay_msec >= duration)
break;
- tpm_msleep(delay_msec);
- duration -= delay_msec;
-
- /* wait longer the next round */
+ /* wait longer than before */
delay_msec *= 2;
+ tpm_msleep(delay_msec);
}
return rc;
diff --git a/drivers/char/tpm/tpm2_eventlog.c b/drivers/char/tpm/tpm2_eventlog.c
index 34a8afa69138..1ce4411292ba 100644
--- a/drivers/char/tpm/tpm2_eventlog.c
+++ b/drivers/char/tpm/tpm2_eventlog.c
@@ -21,9 +21,9 @@
#include <linux/security.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/tpm_eventlog.h>
#include "tpm.h"
-#include "tpm_eventlog.h"
/*
* calc_tpm2_event_size() - calculate the event size, where event
diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_eventlog_acpi.c
index 169edf3ce86d..66f19e93c216 100644
--- a/drivers/char/tpm/tpm_acpi.c
+++ b/drivers/char/tpm/tpm_eventlog_acpi.c
@@ -25,9 +25,9 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/acpi.h>
+#include <linux/tpm_eventlog.h>
#include "tpm.h"
-#include "tpm_eventlog.h"
struct acpi_tcpa {
struct acpi_table_header hdr;
@@ -102,7 +102,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
memcpy_fromio(log->bios_event_log, virt, len);
acpi_os_unmap_iomem(virt, len);
- return 0;
+ return EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
err:
kfree(log->bios_event_log);
diff --git a/drivers/char/tpm/tpm_eventlog_efi.c b/drivers/char/tpm/tpm_eventlog_efi.c
new file mode 100644
index 000000000000..e3f9ffd341d2
--- /dev/null
+++ b/drivers/char/tpm/tpm_eventlog_efi.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2017 Google
+ *
+ * Authors:
+ * Thiebaud Weksteen <tweek@google.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/efi.h>
+#include <linux/tpm_eventlog.h>
+
+#include "tpm.h"
+
+/* read binary bios log from EFI configuration table */
+int tpm_read_log_efi(struct tpm_chip *chip)
+{
+
+ struct linux_efi_tpm_eventlog *log_tbl;
+ struct tpm_bios_log *log;
+ u32 log_size;
+ u8 tpm_log_version;
+
+ if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
+ return -ENODEV;
+
+ if (efi.tpm_log == EFI_INVALID_TABLE_ADDR)
+ return -ENODEV;
+
+ log = &chip->log;
+
+ log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl), MEMREMAP_WB);
+ if (!log_tbl) {
+ pr_err("Could not map UEFI TPM log table !\n");
+ return -ENOMEM;
+ }
+
+ log_size = log_tbl->size;
+ memunmap(log_tbl);
+
+ log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl) + log_size,
+ MEMREMAP_WB);
+ if (!log_tbl) {
+ pr_err("Could not map UEFI TPM log table payload!\n");
+ return -ENOMEM;
+ }
+
+ /* malloc EventLog space */
+ log->bios_event_log = kmalloc(log_size, GFP_KERNEL);
+ if (!log->bios_event_log)
+ goto err_memunmap;
+ memcpy(log->bios_event_log, log_tbl->log, log_size);
+ log->bios_event_log_end = log->bios_event_log + log_size;
+
+ tpm_log_version = log_tbl->version;
+ memunmap(log_tbl);
+ return tpm_log_version;
+
+err_memunmap:
+ memunmap(log_tbl);
+ return -ENOMEM;
+}
diff --git a/drivers/char/tpm/tpm_of.c b/drivers/char/tpm/tpm_eventlog_of.c
index aadb7f464076..96fd5646f866 100644
--- a/drivers/char/tpm/tpm_of.c
+++ b/drivers/char/tpm/tpm_eventlog_of.c
@@ -17,9 +17,9 @@
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/tpm_eventlog.h>
#include "tpm.h"
-#include "tpm_eventlog.h"
int tpm_read_log_of(struct tpm_chip *chip)
{
@@ -76,5 +76,7 @@ int tpm_read_log_of(struct tpm_chip *chip)
memcpy(log->bios_event_log, __va(base), size);
- return 0;
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ return EFI_TCG2_EVENT_LOG_FORMAT_TCG_2;
+ return EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
}
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index 79d6bbb58e39..c1dd39eaaeeb 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -665,9 +665,9 @@ out_err:
}
static const struct i2c_device_id tpm_tis_i2c_table[] = {
- {"tpm_i2c_infineon", 0},
- {"slb9635tt", 0},
- {"slb9645tt", 1},
+ {"tpm_i2c_infineon"},
+ {"slb9635tt"},
+ {"slb9645tt"},
{},
};
@@ -675,24 +675,9 @@ MODULE_DEVICE_TABLE(i2c, tpm_tis_i2c_table);
#ifdef CONFIG_OF
static const struct of_device_id tpm_tis_i2c_of_match[] = {
- {
- .name = "tpm_i2c_infineon",
- .type = "tpm",
- .compatible = "infineon,tpm_i2c_infineon",
- .data = (void *)0
- },
- {
- .name = "slb9635tt",
- .type = "tpm",
- .compatible = "infineon,slb9635tt",
- .data = (void *)0
- },
- {
- .name = "slb9645tt",
- .type = "tpm",
- .compatible = "infineon,slb9645tt",
- .data = (void *)1
- },
+ {.compatible = "infineon,tpm_i2c_infineon"},
+ {.compatible = "infineon,slb9635tt"},
+ {.compatible = "infineon,slb9645tt"},
{},
};
MODULE_DEVICE_TABLE(of, tpm_tis_i2c_of_match);
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index e2d1055fb814..f08949a5f678 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -133,93 +133,14 @@ static int check_acpi_tpm2(struct device *dev)
}
#endif
-#ifdef CONFIG_X86
-#define INTEL_LEGACY_BLK_BASE_ADDR 0xFED08000
-#define ILB_REMAP_SIZE 0x100
-#define LPC_CNTRL_REG_OFFSET 0x84
-#define LPC_CLKRUN_EN (1 << 2)
-
-static void __iomem *ilb_base_addr;
-
-static inline bool is_bsw(void)
-{
- return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0);
-}
-
-/**
- * tpm_platform_begin_xfer() - clear LPC CLKRUN_EN i.e. clocks will be running
- */
-static void tpm_platform_begin_xfer(void)
-{
- u32 clkrun_val;
-
- if (!is_bsw())
- return;
-
- clkrun_val = ioread32(ilb_base_addr + LPC_CNTRL_REG_OFFSET);
-
- /* Disable LPC CLKRUN# */
- clkrun_val &= ~LPC_CLKRUN_EN;
- iowrite32(clkrun_val, ilb_base_addr + LPC_CNTRL_REG_OFFSET);
-
- /*
- * Write any random value on port 0x80 which is on LPC, to make
- * sure LPC clock is running before sending any TPM command.
- */
- outb(0xCC, 0x80);
-
-}
-
-/**
- * tpm_platform_end_xfer() - set LPC CLKRUN_EN i.e. clocks can be turned off
- */
-static void tpm_platform_end_xfer(void)
-{
- u32 clkrun_val;
-
- if (!is_bsw())
- return;
-
- clkrun_val = ioread32(ilb_base_addr + LPC_CNTRL_REG_OFFSET);
-
- /* Enable LPC CLKRUN# */
- clkrun_val |= LPC_CLKRUN_EN;
- iowrite32(clkrun_val, ilb_base_addr + LPC_CNTRL_REG_OFFSET);
-
- /*
- * Write any random value on port 0x80 which is on LPC, to make
- * sure LPC clock is running before sending any TPM command.
- */
- outb(0xCC, 0x80);
-
-}
-#else
-static inline bool is_bsw(void)
-{
- return false;
-}
-
-static void tpm_platform_begin_xfer(void)
-{
-}
-
-static void tpm_platform_end_xfer(void)
-{
-}
-#endif
-
static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
u8 *result)
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
- tpm_platform_begin_xfer();
-
while (len--)
*result++ = ioread8(phy->iobase + addr);
- tpm_platform_end_xfer();
-
return 0;
}
@@ -228,13 +149,9 @@ static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
- tpm_platform_begin_xfer();
-
while (len--)
iowrite8(*value++, phy->iobase + addr);
- tpm_platform_end_xfer();
-
return 0;
}
@@ -242,12 +159,8 @@ static int tpm_tcg_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
- tpm_platform_begin_xfer();
-
*result = ioread16(phy->iobase + addr);
- tpm_platform_end_xfer();
-
return 0;
}
@@ -255,12 +168,8 @@ static int tpm_tcg_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
- tpm_platform_begin_xfer();
-
*result = ioread32(phy->iobase + addr);
- tpm_platform_end_xfer();
-
return 0;
}
@@ -268,12 +177,8 @@ static int tpm_tcg_write32(struct tpm_tis_data *data, u32 addr, u32 value)
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
- tpm_platform_begin_xfer();
-
iowrite32(value, phy->iobase + addr);
- tpm_platform_end_xfer();
-
return 0;
}
@@ -461,11 +366,6 @@ static int __init init_tis(void)
if (rc)
goto err_force;
-#ifdef CONFIG_X86
- if (is_bsw())
- ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR,
- ILB_REMAP_SIZE);
-#endif
rc = platform_driver_register(&tis_drv);
if (rc)
goto err_platform;
@@ -484,10 +384,6 @@ err_pnp:
err_platform:
if (force_pdev)
platform_device_unregister(force_pdev);
-#ifdef CONFIG_X86
- if (is_bsw())
- iounmap(ilb_base_addr);
-#endif
err_force:
return rc;
}
@@ -497,10 +393,6 @@ static void __exit cleanup_tis(void)
pnp_unregister_driver(&tis_pnp_driver);
platform_driver_unregister(&tis_drv);
-#ifdef CONFIG_X86
- if (is_bsw())
- iounmap(ilb_base_addr);
-#endif
if (force_pdev)
platform_device_unregister(force_pdev);
}
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index fdde971bc810..183a5f54d875 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -31,6 +31,74 @@
#include "tpm.h"
#include "tpm_tis_core.h"
+/* This is a polling delay to check for status and burstcount.
+ * As per ddwg input, expectation is that status check and burstcount
+ * check should return within few usecs.
+ */
+#define TPM_POLL_SLEEP 1 /* msec */
+
+static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value);
+
+static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
+ bool check_cancel, bool *canceled)
+{
+ u8 status = chip->ops->status(chip);
+
+ *canceled = false;
+ if ((status & mask) == mask)
+ return true;
+ if (check_cancel && chip->ops->req_canceled(chip, status)) {
+ *canceled = true;
+ return true;
+ }
+ return false;
+}
+
+static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask,
+ unsigned long timeout, wait_queue_head_t *queue,
+ bool check_cancel)
+{
+ unsigned long stop;
+ long rc;
+ u8 status;
+ bool canceled = false;
+
+ /* check current status */
+ status = chip->ops->status(chip);
+ if ((status & mask) == mask)
+ return 0;
+
+ stop = jiffies + timeout;
+
+ if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+again:
+ timeout = stop - jiffies;
+ if ((long)timeout <= 0)
+ return -ETIME;
+ rc = wait_event_interruptible_timeout(*queue,
+ wait_for_tpm_stat_cond(chip, mask, check_cancel,
+ &canceled),
+ timeout);
+ if (rc > 0) {
+ if (canceled)
+ return -ECANCELED;
+ return 0;
+ }
+ if (rc == -ERESTARTSYS && freezing(current)) {
+ clear_thread_flag(TIF_SIGPENDING);
+ goto again;
+ }
+ } else {
+ do {
+ tpm_msleep(TPM_POLL_SLEEP);
+ status = chip->ops->status(chip);
+ if ((status & mask) == mask)
+ return 0;
+ } while (time_before(jiffies, stop));
+ }
+ return -ETIME;
+}
+
/* Before we attempt to access the TPM we must see that the valid bit is set.
* The specification says that this bit is 0 at reset and remains 0 until the
* 'TPM has gone through its self test and initialization and has established
@@ -164,7 +232,7 @@ static int get_burstcount(struct tpm_chip *chip)
burstcnt = (value >> 8) & 0xFFFF;
if (burstcnt)
return burstcnt;
- tpm_msleep(TPM_TIMEOUT);
+ tpm_msleep(TPM_POLL_SLEEP);
} while (time_before(jiffies, stop));
return -EBUSY;
}
@@ -421,19 +489,28 @@ static bool tpm_tis_update_timeouts(struct tpm_chip *chip,
int i, rc;
u32 did_vid;
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, true);
+
rc = tpm_tis_read32(priv, TPM_DID_VID(0), &did_vid);
if (rc < 0)
- return rc;
+ goto out;
for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) {
if (vendor_timeout_overrides[i].did_vid != did_vid)
continue;
memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us,
sizeof(vendor_timeout_overrides[i].timeout_us));
- return true;
+ rc = true;
}
- return false;
+ rc = false;
+
+out:
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, false);
+
+ return rc;
}
/*
@@ -653,14 +730,73 @@ void tpm_tis_remove(struct tpm_chip *chip)
u32 interrupt;
int rc;
+ tpm_tis_clkrun_enable(chip, true);
+
rc = tpm_tis_read32(priv, reg, &interrupt);
if (rc < 0)
interrupt = 0;
tpm_tis_write32(priv, reg, ~TPM_GLOBAL_INT_ENABLE & interrupt);
+
+ tpm_tis_clkrun_enable(chip, false);
+
+ if (priv->ilb_base_addr)
+ iounmap(priv->ilb_base_addr);
}
EXPORT_SYMBOL_GPL(tpm_tis_remove);
+/**
+ * tpm_tis_clkrun_enable() - Keep clkrun protocol disabled for entire duration
+ * of a single TPM command
+ * @chip: TPM chip to use
+ * @value: 1 - Disable CLKRUN protocol, so that clocks are free running
+ * 0 - Enable CLKRUN protocol
+ * Call this function directly in tpm_tis_remove() in error or driver removal
+ * path, since the chip->ops is set to NULL in tpm_chip_unregister().
+ */
+static void tpm_tis_clkrun_enable(struct tpm_chip *chip, bool value)
+{
+ struct tpm_tis_data *data = dev_get_drvdata(&chip->dev);
+ u32 clkrun_val;
+
+ if (!IS_ENABLED(CONFIG_X86) || !is_bsw() ||
+ !data->ilb_base_addr)
+ return;
+
+ if (value) {
+ data->clkrun_enabled++;
+ if (data->clkrun_enabled > 1)
+ return;
+ clkrun_val = ioread32(data->ilb_base_addr + LPC_CNTRL_OFFSET);
+
+ /* Disable LPC CLKRUN# */
+ clkrun_val &= ~LPC_CLKRUN_EN;
+ iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET);
+
+ /*
+ * Write any random value on port 0x80 which is on LPC, to make
+ * sure LPC clock is running before sending any TPM command.
+ */
+ outb(0xCC, 0x80);
+ } else {
+ data->clkrun_enabled--;
+ if (data->clkrun_enabled)
+ return;
+
+ clkrun_val = ioread32(data->ilb_base_addr + LPC_CNTRL_OFFSET);
+
+ /* Enable LPC CLKRUN# */
+ clkrun_val |= LPC_CLKRUN_EN;
+ iowrite32(clkrun_val, data->ilb_base_addr + LPC_CNTRL_OFFSET);
+
+ /*
+ * Write any random value on port 0x80 which is on LPC, to make
+ * sure LPC clock is running before sending any TPM command.
+ */
+ outb(0xCC, 0x80);
+ }
+}
+
static const struct tpm_class_ops tpm_tis = {
.flags = TPM_OPS_AUTO_STARTUP,
.status = tpm_tis_status,
@@ -673,13 +809,17 @@ static const struct tpm_class_ops tpm_tis = {
.req_canceled = tpm_tis_req_canceled,
.request_locality = request_locality,
.relinquish_locality = release_locality,
+ .clk_enable = tpm_tis_clkrun_enable,
};
int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
const struct tpm_tis_phy_ops *phy_ops,
acpi_handle acpi_dev_handle)
{
- u32 vendor, intfcaps, intmask;
+ u32 vendor;
+ u32 intfcaps;
+ u32 intmask;
+ u32 clkrun_val;
u8 rid;
int rc, probe;
struct tpm_chip *chip;
@@ -700,6 +840,23 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
priv->phy_ops = phy_ops;
dev_set_drvdata(&chip->dev, priv);
+ if (is_bsw()) {
+ priv->ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR,
+ ILB_REMAP_SIZE);
+ if (!priv->ilb_base_addr)
+ return -ENOMEM;
+
+ clkrun_val = ioread32(priv->ilb_base_addr + LPC_CNTRL_OFFSET);
+ /* Check if CLKRUN# is already not enabled in the LPC bus */
+ if (!(clkrun_val & LPC_CLKRUN_EN)) {
+ iounmap(priv->ilb_base_addr);
+ priv->ilb_base_addr = NULL;
+ }
+ }
+
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, true);
+
if (wait_startup(chip, 0) != 0) {
rc = -ENODEV;
goto out_err;
@@ -790,9 +947,20 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
}
}
- return tpm_chip_register(chip);
+ rc = tpm_chip_register(chip);
+ if (rc)
+ goto out_err;
+
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, false);
+
+ return 0;
out_err:
+ if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL))
+ chip->ops->clk_enable(chip, false);
+
tpm_tis_remove(chip);
+
return rc;
}
EXPORT_SYMBOL_GPL(tpm_tis_core_init);
@@ -804,22 +972,31 @@ static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
u32 intmask;
int rc;
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, true);
+
/* reenable interrupts that device may have lost or
* BIOS/firmware may have disabled
*/
rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), priv->irq);
if (rc < 0)
- return;
+ goto out;
rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
if (rc < 0)
- return;
+ goto out;
intmask |= TPM_INTF_CMD_READY_INT
| TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
| TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE;
tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
+
+out:
+ if (chip->ops->clk_enable != NULL)
+ chip->ops->clk_enable(chip, false);
+
+ return;
}
int tpm_tis_resume(struct device *dev)
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index 6bbac319ff3b..d5c6a2e952b3 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -79,6 +79,11 @@ enum tis_defaults {
#define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
#define TPM_RID(l) (0x0F04 | ((l) << 12))
+#define LPC_CNTRL_OFFSET 0x84
+#define LPC_CLKRUN_EN (1 << 2)
+#define INTEL_LEGACY_BLK_BASE_ADDR 0xFED08000
+#define ILB_REMAP_SIZE 0x100
+
enum tpm_tis_flags {
TPM_TIS_ITPM_WORKAROUND = BIT(0),
};
@@ -89,6 +94,8 @@ struct tpm_tis_data {
int irq;
bool irq_tested;
unsigned int flags;
+ void __iomem *ilb_base_addr;
+ u16 clkrun_enabled;
wait_queue_head_t int_queue;
wait_queue_head_t read_queue;
const struct tpm_tis_phy_ops *phy_ops;
@@ -144,6 +151,15 @@ static inline int tpm_tis_write32(struct tpm_tis_data *data, u32 addr,
return data->phy_ops->write32(data, addr, value);
}
+static inline bool is_bsw(void)
+{
+#ifdef CONFIG_X86
+ return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0);
+#else
+ return false;
+#endif
+}
+
void tpm_tis_remove(struct tpm_chip *chip);
int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
const struct tpm_tis_phy_ops *phy_ops,
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
index 1d877cc9af97..674218b50b13 100644
--- a/drivers/char/tpm/tpm_vtpm_proxy.c
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -173,10 +173,10 @@ static ssize_t vtpm_proxy_fops_write(struct file *filp, const char __user *buf,
*
* Return: Poll flags
*/
-static unsigned int vtpm_proxy_fops_poll(struct file *filp, poll_table *wait)
+static __poll_t vtpm_proxy_fops_poll(struct file *filp, poll_table *wait)
{
struct proxy_dev *proxy_dev = filp->private_data;
- unsigned ret;
+ __poll_t ret;
poll_wait(filp, &proxy_dev->wq, wait);
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 656e8af95d52..911475d36800 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -10,6 +10,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/interrupt.h>
+#include <linux/freezer.h>
#include <xen/xen.h>
#include <xen/events.h>
#include <xen/interface/io/tpmif.h>
@@ -39,6 +40,66 @@ enum status_bits {
VTPM_STATUS_CANCELED = 0x8,
};
+static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
+ bool check_cancel, bool *canceled)
+{
+ u8 status = chip->ops->status(chip);
+
+ *canceled = false;
+ if ((status & mask) == mask)
+ return true;
+ if (check_cancel && chip->ops->req_canceled(chip, status)) {
+ *canceled = true;
+ return true;
+ }
+ return false;
+}
+
+static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask,
+ unsigned long timeout, wait_queue_head_t *queue,
+ bool check_cancel)
+{
+ unsigned long stop;
+ long rc;
+ u8 status;
+ bool canceled = false;
+
+ /* check current status */
+ status = chip->ops->status(chip);
+ if ((status & mask) == mask)
+ return 0;
+
+ stop = jiffies + timeout;
+
+ if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+again:
+ timeout = stop - jiffies;
+ if ((long)timeout <= 0)
+ return -ETIME;
+ rc = wait_event_interruptible_timeout(*queue,
+ wait_for_tpm_stat_cond(chip, mask, check_cancel,
+ &canceled),
+ timeout);
+ if (rc > 0) {
+ if (canceled)
+ return -ECANCELED;
+ return 0;
+ }
+ if (rc == -ERESTARTSYS && freezing(current)) {
+ clear_thread_flag(TIF_SIGPENDING);
+ goto again;
+ }
+ } else {
+ do {
+ tpm_msleep(TPM_TIMEOUT);
+ status = chip->ops->status(chip);
+ if ((status & mask) == mask)
+ return 0;
+ } while (time_before(jiffies, stop));
+ }
+ return -ETIME;
+}
+
static u8 vtpm_status(struct tpm_chip *chip)
{
struct tpm_private *priv = dev_get_drvdata(&chip->dev);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index d1aed2513bd9..813a2e46824d 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -982,10 +982,10 @@ error_out:
return ret;
}
-static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
+static __poll_t port_fops_poll(struct file *filp, poll_table *wait)
{
struct port *port;
- unsigned int ret;
+ __poll_t ret;
port = filp->private_data;
poll_wait(filp, &port->waitqueue, wait);
diff --git a/drivers/char/xillybus/xillybus_core.c b/drivers/char/xillybus/xillybus_core.c
index b6c9cdead7f3..88e1cf475d3f 100644
--- a/drivers/char/xillybus/xillybus_core.c
+++ b/drivers/char/xillybus/xillybus_core.c
@@ -1736,10 +1736,10 @@ end:
return pos;
}
-static unsigned int xillybus_poll(struct file *filp, poll_table *wait)
+static __poll_t xillybus_poll(struct file *filp, poll_table *wait)
{
struct xilly_channel *channel = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
unsigned long flags;
poll_wait(filp, &channel->endpoint->ep_wait, wait);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index c729a88007d0..b3b4ed9b6874 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -269,6 +269,7 @@ config CLKSRC_STM32
bool "Clocksource for STM32 SoCs" if !ARCH_STM32
depends on OF && ARM && (ARCH_STM32 || COMPILE_TEST)
select CLKSRC_MMIO
+ select TIMER_OF
config CLKSRC_MPS2
bool "Clocksource for MPS2 SoCs" if COMPILE_TEST
@@ -441,6 +442,13 @@ config MTK_TIMER
help
Support for Mediatek timer driver.
+config SPRD_TIMER
+ bool "Spreadtrum timer driver" if COMPILE_TEST
+ depends on HAS_IOMEM
+ select TIMER_OF
+ help
+ Enables support for the Spreadtrum timer driver.
+
config SYS_SUPPORTS_SH_MTU2
bool
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 72711f1491e3..d6dec4489d66 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -54,6 +54,7 @@ obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o
obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o
obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o
obj-$(CONFIG_OWL_TIMER) += owl-timer.o
+obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o
obj-$(CONFIG_ARC_TIMERS) += arc_timer.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
diff --git a/drivers/clocksource/owl-timer.c b/drivers/clocksource/owl-timer.c
index c68630565079..ea00a5e8f95d 100644
--- a/drivers/clocksource/owl-timer.c
+++ b/drivers/clocksource/owl-timer.c
@@ -168,5 +168,6 @@ static int __init owl_timer_init(struct device_node *node)
return 0;
}
-CLOCKSOURCE_OF_DECLARE(owl_s500, "actions,s500-timer", owl_timer_init);
-CLOCKSOURCE_OF_DECLARE(owl_s900, "actions,s900-timer", owl_timer_init);
+TIMER_OF_DECLARE(owl_s500, "actions,s500-timer", owl_timer_init);
+TIMER_OF_DECLARE(owl_s700, "actions,s700-timer", owl_timer_init);
+TIMER_OF_DECLARE(owl_s900, "actions,s900-timer", owl_timer_init);
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 9de47d4d2d9e..43f4d5c4d6fa 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -384,7 +384,7 @@ static int __init tcb_clksrc_init(void)
printk(bootinfo, clksrc.name, CONFIG_ATMEL_TCB_CLKSRC_BLOCK,
divided_rate / 1000000,
- ((divided_rate + 500000) % 1000000) / 1000);
+ ((divided_rate % 1000000) + 500) / 1000);
if (tc->tcb_config && tc->tcb_config->counter_width == 32) {
/* use apropriate function to read 32 bit counter */
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index a31990408153..06ed88a2a8a0 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -24,7 +24,13 @@
#include "timer-of.h"
-static __init void timer_irq_exit(struct of_timer_irq *of_irq)
+/**
+ * timer_of_irq_exit - Release the interrupt
+ * @of_irq: an of_timer_irq structure pointer
+ *
+ * Free the irq resource
+ */
+static __init void timer_of_irq_exit(struct of_timer_irq *of_irq)
{
struct timer_of *to = container_of(of_irq, struct timer_of, of_irq);
@@ -34,8 +40,24 @@ static __init void timer_irq_exit(struct of_timer_irq *of_irq)
free_irq(of_irq->irq, clkevt);
}
-static __init int timer_irq_init(struct device_node *np,
- struct of_timer_irq *of_irq)
+/**
+ * timer_of_irq_init - Request the interrupt
+ * @np: a device tree node pointer
+ * @of_irq: an of_timer_irq structure pointer
+ *
+ * Get the interrupt number from the DT from its definition and
+ * request it. The interrupt is gotten by falling back the following way:
+ *
+ * - Get interrupt number by name
+ * - Get interrupt number by index
+ *
+ * When the interrupt is per CPU, 'request_percpu_irq()' is called,
+ * otherwise 'request_irq()' is used.
+ *
+ * Returns 0 on success, < 0 otherwise
+ */
+static __init int timer_of_irq_init(struct device_node *np,
+ struct of_timer_irq *of_irq)
{
int ret;
struct timer_of *to = container_of(of_irq, struct timer_of, of_irq);
@@ -72,15 +94,30 @@ static __init int timer_irq_init(struct device_node *np,
return 0;
}
-static __init void timer_clk_exit(struct of_timer_clk *of_clk)
+/**
+ * timer_of_clk_exit - Release the clock resources
+ * @of_clk: a of_timer_clk structure pointer
+ *
+ * Disables and releases the refcount on the clk
+ */
+static __init void timer_of_clk_exit(struct of_timer_clk *of_clk)
{
of_clk->rate = 0;
clk_disable_unprepare(of_clk->clk);
clk_put(of_clk->clk);
}
-static __init int timer_clk_init(struct device_node *np,
- struct of_timer_clk *of_clk)
+/**
+ * timer_of_clk_init - Initialize the clock resources
+ * @np: a device tree node pointer
+ * @of_clk: a of_timer_clk structure pointer
+ *
+ * Get the clock by name or by index, enable it and get the rate
+ *
+ * Returns 0 on success, < 0 otherwise
+ */
+static __init int timer_of_clk_init(struct device_node *np,
+ struct of_timer_clk *of_clk)
{
int ret;
@@ -116,19 +153,19 @@ out_clk_put:
goto out;
}
-static __init void timer_base_exit(struct of_timer_base *of_base)
+static __init void timer_of_base_exit(struct of_timer_base *of_base)
{
iounmap(of_base->base);
}
-static __init int timer_base_init(struct device_node *np,
- struct of_timer_base *of_base)
+static __init int timer_of_base_init(struct device_node *np,
+ struct of_timer_base *of_base)
{
- const char *name = of_base->name ? of_base->name : np->full_name;
-
- of_base->base = of_io_request_and_map(np, of_base->index, name);
+ of_base->base = of_base->name ?
+ of_io_request_and_map(np, of_base->index, of_base->name) :
+ of_iomap(np, of_base->index);
if (IS_ERR(of_base->base)) {
- pr_err("Failed to iomap (%s)\n", name);
+ pr_err("Failed to iomap (%s)\n", of_base->name);
return PTR_ERR(of_base->base);
}
@@ -141,21 +178,21 @@ int __init timer_of_init(struct device_node *np, struct timer_of *to)
int flags = 0;
if (to->flags & TIMER_OF_BASE) {
- ret = timer_base_init(np, &to->of_base);
+ ret = timer_of_base_init(np, &to->of_base);
if (ret)
goto out_fail;
flags |= TIMER_OF_BASE;
}
if (to->flags & TIMER_OF_CLOCK) {
- ret = timer_clk_init(np, &to->of_clk);
+ ret = timer_of_clk_init(np, &to->of_clk);
if (ret)
goto out_fail;
flags |= TIMER_OF_CLOCK;
}
if (to->flags & TIMER_OF_IRQ) {
- ret = timer_irq_init(np, &to->of_irq);
+ ret = timer_of_irq_init(np, &to->of_irq);
if (ret)
goto out_fail;
flags |= TIMER_OF_IRQ;
@@ -163,17 +200,20 @@ int __init timer_of_init(struct device_node *np, struct timer_of *to)
if (!to->clkevt.name)
to->clkevt.name = np->name;
+
+ to->np = np;
+
return ret;
out_fail:
if (flags & TIMER_OF_IRQ)
- timer_irq_exit(&to->of_irq);
+ timer_of_irq_exit(&to->of_irq);
if (flags & TIMER_OF_CLOCK)
- timer_clk_exit(&to->of_clk);
+ timer_of_clk_exit(&to->of_clk);
if (flags & TIMER_OF_BASE)
- timer_base_exit(&to->of_base);
+ timer_of_base_exit(&to->of_base);
return ret;
}
@@ -187,11 +227,11 @@ out_fail:
void __init timer_of_cleanup(struct timer_of *to)
{
if (to->flags & TIMER_OF_IRQ)
- timer_irq_exit(&to->of_irq);
+ timer_of_irq_exit(&to->of_irq);
if (to->flags & TIMER_OF_CLOCK)
- timer_clk_exit(&to->of_clk);
+ timer_of_clk_exit(&to->of_clk);
if (to->flags & TIMER_OF_BASE)
- timer_base_exit(&to->of_base);
+ timer_of_base_exit(&to->of_base);
}
diff --git a/drivers/clocksource/timer-of.h b/drivers/clocksource/timer-of.h
index 3f708f1be43d..a5478f3e8589 100644
--- a/drivers/clocksource/timer-of.h
+++ b/drivers/clocksource/timer-of.h
@@ -33,6 +33,7 @@ struct of_timer_clk {
struct timer_of {
unsigned int flags;
+ struct device_node *np;
struct clock_event_device clkevt;
struct of_timer_base of_base;
struct of_timer_irq of_irq;
diff --git a/drivers/clocksource/timer-sprd.c b/drivers/clocksource/timer-sprd.c
new file mode 100644
index 000000000000..ef9ebeafb3ed
--- /dev/null
+++ b/drivers/clocksource/timer-sprd.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Spreadtrum Communications Inc.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+#include "timer-of.h"
+
+#define TIMER_NAME "sprd_timer"
+
+#define TIMER_LOAD_LO 0x0
+#define TIMER_LOAD_HI 0x4
+#define TIMER_VALUE_LO 0x8
+#define TIMER_VALUE_HI 0xc
+
+#define TIMER_CTL 0x10
+#define TIMER_CTL_PERIOD_MODE BIT(0)
+#define TIMER_CTL_ENABLE BIT(1)
+#define TIMER_CTL_64BIT_WIDTH BIT(16)
+
+#define TIMER_INT 0x14
+#define TIMER_INT_EN BIT(0)
+#define TIMER_INT_RAW_STS BIT(1)
+#define TIMER_INT_MASK_STS BIT(2)
+#define TIMER_INT_CLR BIT(3)
+
+#define TIMER_VALUE_SHDW_LO 0x18
+#define TIMER_VALUE_SHDW_HI 0x1c
+
+#define TIMER_VALUE_LO_MASK GENMASK(31, 0)
+
+static void sprd_timer_enable(void __iomem *base, u32 flag)
+{
+ u32 val = readl_relaxed(base + TIMER_CTL);
+
+ val |= TIMER_CTL_ENABLE;
+ if (flag & TIMER_CTL_64BIT_WIDTH)
+ val |= TIMER_CTL_64BIT_WIDTH;
+ else
+ val &= ~TIMER_CTL_64BIT_WIDTH;
+
+ if (flag & TIMER_CTL_PERIOD_MODE)
+ val |= TIMER_CTL_PERIOD_MODE;
+ else
+ val &= ~TIMER_CTL_PERIOD_MODE;
+
+ writel_relaxed(val, base + TIMER_CTL);
+}
+
+static void sprd_timer_disable(void __iomem *base)
+{
+ u32 val = readl_relaxed(base + TIMER_CTL);
+
+ val &= ~TIMER_CTL_ENABLE;
+ writel_relaxed(val, base + TIMER_CTL);
+}
+
+static void sprd_timer_update_counter(void __iomem *base, unsigned long cycles)
+{
+ writel_relaxed(cycles & TIMER_VALUE_LO_MASK, base + TIMER_LOAD_LO);
+ writel_relaxed(0, base + TIMER_LOAD_HI);
+}
+
+static void sprd_timer_enable_interrupt(void __iomem *base)
+{
+ writel_relaxed(TIMER_INT_EN, base + TIMER_INT);
+}
+
+static void sprd_timer_clear_interrupt(void __iomem *base)
+{
+ u32 val = readl_relaxed(base + TIMER_INT);
+
+ val |= TIMER_INT_CLR;
+ writel_relaxed(val, base + TIMER_INT);
+}
+
+static int sprd_timer_set_next_event(unsigned long cycles,
+ struct clock_event_device *ce)
+{
+ struct timer_of *to = to_timer_of(ce);
+
+ sprd_timer_disable(timer_of_base(to));
+ sprd_timer_update_counter(timer_of_base(to), cycles);
+ sprd_timer_enable(timer_of_base(to), 0);
+
+ return 0;
+}
+
+static int sprd_timer_set_periodic(struct clock_event_device *ce)
+{
+ struct timer_of *to = to_timer_of(ce);
+
+ sprd_timer_disable(timer_of_base(to));
+ sprd_timer_update_counter(timer_of_base(to), timer_of_period(to));
+ sprd_timer_enable(timer_of_base(to), TIMER_CTL_PERIOD_MODE);
+
+ return 0;
+}
+
+static int sprd_timer_shutdown(struct clock_event_device *ce)
+{
+ struct timer_of *to = to_timer_of(ce);
+
+ sprd_timer_disable(timer_of_base(to));
+ return 0;
+}
+
+static irqreturn_t sprd_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *ce = (struct clock_event_device *)dev_id;
+ struct timer_of *to = to_timer_of(ce);
+
+ sprd_timer_clear_interrupt(timer_of_base(to));
+
+ if (clockevent_state_oneshot(ce))
+ sprd_timer_disable(timer_of_base(to));
+
+ ce->event_handler(ce);
+ return IRQ_HANDLED;
+}
+
+static struct timer_of to = {
+ .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
+
+ .clkevt = {
+ .name = TIMER_NAME,
+ .rating = 300,
+ .features = CLOCK_EVT_FEAT_DYNIRQ | CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .set_state_shutdown = sprd_timer_shutdown,
+ .set_state_periodic = sprd_timer_set_periodic,
+ .set_next_event = sprd_timer_set_next_event,
+ .cpumask = cpu_possible_mask,
+ },
+
+ .of_irq = {
+ .handler = sprd_timer_interrupt,
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ },
+};
+
+static int __init sprd_timer_init(struct device_node *np)
+{
+ int ret;
+
+ ret = timer_of_init(np, &to);
+ if (ret)
+ return ret;
+
+ sprd_timer_enable_interrupt(timer_of_base(&to));
+ clockevents_config_and_register(&to.clkevt, timer_of_rate(&to),
+ 1, UINT_MAX);
+
+ return 0;
+}
+
+TIMER_OF_DECLARE(sc9860_timer, "sprd,sc9860-timer", sprd_timer_init);
diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c
index 8f2423789ba9..e5cdc3af684c 100644
--- a/drivers/clocksource/timer-stm32.c
+++ b/drivers/clocksource/timer-stm32.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
+#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/of.h>
@@ -16,175 +17,318 @@
#include <linux/of_irq.h>
#include <linux/clk.h>
#include <linux/reset.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+
+#include "timer-of.h"
#define TIM_CR1 0x00
#define TIM_DIER 0x0c
#define TIM_SR 0x10
#define TIM_EGR 0x14
+#define TIM_CNT 0x24
#define TIM_PSC 0x28
#define TIM_ARR 0x2c
+#define TIM_CCR1 0x34
#define TIM_CR1_CEN BIT(0)
+#define TIM_CR1_UDIS BIT(1)
#define TIM_CR1_OPM BIT(3)
#define TIM_CR1_ARPE BIT(7)
#define TIM_DIER_UIE BIT(0)
+#define TIM_DIER_CC1IE BIT(1)
#define TIM_SR_UIF BIT(0)
#define TIM_EGR_UG BIT(0)
-struct stm32_clock_event_ddata {
- struct clock_event_device evtdev;
- unsigned periodic_top;
- void __iomem *base;
+#define TIM_PSC_MAX USHRT_MAX
+#define TIM_PSC_CLKRATE 10000
+
+struct stm32_timer_private {
+ int bits;
};
-static int stm32_clock_event_shutdown(struct clock_event_device *evtdev)
+/**
+ * stm32_timer_of_bits_set - set accessor helper
+ * @to: a timer_of structure pointer
+ * @bits: the number of bits (16 or 32)
+ *
+ * Accessor helper to set the number of bits in the timer-of private
+ * structure.
+ *
+ */
+static void stm32_timer_of_bits_set(struct timer_of *to, int bits)
{
- struct stm32_clock_event_ddata *data =
- container_of(evtdev, struct stm32_clock_event_ddata, evtdev);
- void *base = data->base;
+ struct stm32_timer_private *pd = to->private_data;
- writel_relaxed(0, base + TIM_CR1);
- return 0;
+ pd->bits = bits;
+}
+
+/**
+ * stm32_timer_of_bits_get - get accessor helper
+ * @to: a timer_of structure pointer
+ *
+ * Accessor helper to get the number of bits in the timer-of private
+ * structure.
+ *
+ * Returns an integer corresponding to the number of bits.
+ */
+static int stm32_timer_of_bits_get(struct timer_of *to)
+{
+ struct stm32_timer_private *pd = to->private_data;
+
+ return pd->bits;
+}
+
+static void __iomem *stm32_timer_cnt __read_mostly;
+
+static u64 notrace stm32_read_sched_clock(void)
+{
+ return readl_relaxed(stm32_timer_cnt);
+}
+
+static struct delay_timer stm32_timer_delay;
+
+static unsigned long stm32_read_delay(void)
+{
+ return readl_relaxed(stm32_timer_cnt);
}
-static int stm32_clock_event_set_periodic(struct clock_event_device *evtdev)
+static void stm32_clock_event_disable(struct timer_of *to)
{
- struct stm32_clock_event_ddata *data =
- container_of(evtdev, struct stm32_clock_event_ddata, evtdev);
- void *base = data->base;
+ writel_relaxed(0, timer_of_base(to) + TIM_DIER);
+}
+
+/**
+ * stm32_timer_start - Start the counter without event
+ * @to: a timer_of structure pointer
+ *
+ * Start the timer in order to have the counter reset and start
+ * incrementing but disable interrupt event when there is a counter
+ * overflow. By default, the counter direction is used as upcounter.
+ */
+static void stm32_timer_start(struct timer_of *to)
+{
+ writel_relaxed(TIM_CR1_UDIS | TIM_CR1_CEN, timer_of_base(to) + TIM_CR1);
+}
+
+static int stm32_clock_event_shutdown(struct clock_event_device *clkevt)
+{
+ struct timer_of *to = to_timer_of(clkevt);
+
+ stm32_clock_event_disable(to);
- writel_relaxed(data->periodic_top, base + TIM_ARR);
- writel_relaxed(TIM_CR1_ARPE | TIM_CR1_CEN, base + TIM_CR1);
return 0;
}
static int stm32_clock_event_set_next_event(unsigned long evt,
- struct clock_event_device *evtdev)
+ struct clock_event_device *clkevt)
{
- struct stm32_clock_event_ddata *data =
- container_of(evtdev, struct stm32_clock_event_ddata, evtdev);
+ struct timer_of *to = to_timer_of(clkevt);
+ unsigned long now, next;
+
+ next = readl_relaxed(timer_of_base(to) + TIM_CNT) + evt;
+ writel_relaxed(next, timer_of_base(to) + TIM_CCR1);
+ now = readl_relaxed(timer_of_base(to) + TIM_CNT);
+
+ if ((next - now) > evt)
+ return -ETIME;
- writel_relaxed(evt, data->base + TIM_ARR);
- writel_relaxed(TIM_CR1_ARPE | TIM_CR1_OPM | TIM_CR1_CEN,
- data->base + TIM_CR1);
+ writel_relaxed(TIM_DIER_CC1IE, timer_of_base(to) + TIM_DIER);
+
+ return 0;
+}
+
+static int stm32_clock_event_set_periodic(struct clock_event_device *clkevt)
+{
+ struct timer_of *to = to_timer_of(clkevt);
+
+ stm32_timer_start(to);
+
+ return stm32_clock_event_set_next_event(timer_of_period(to), clkevt);
+}
+
+static int stm32_clock_event_set_oneshot(struct clock_event_device *clkevt)
+{
+ struct timer_of *to = to_timer_of(clkevt);
+
+ stm32_timer_start(to);
return 0;
}
static irqreturn_t stm32_clock_event_handler(int irq, void *dev_id)
{
- struct stm32_clock_event_ddata *data = dev_id;
+ struct clock_event_device *clkevt = (struct clock_event_device *)dev_id;
+ struct timer_of *to = to_timer_of(clkevt);
+
+ writel_relaxed(0, timer_of_base(to) + TIM_SR);
- writel_relaxed(0, data->base + TIM_SR);
+ if (clockevent_state_periodic(clkevt))
+ stm32_clock_event_set_periodic(clkevt);
+ else
+ stm32_clock_event_shutdown(clkevt);
- data->evtdev.event_handler(&data->evtdev);
+ clkevt->event_handler(clkevt);
return IRQ_HANDLED;
}
-static struct stm32_clock_event_ddata clock_event_ddata = {
- .evtdev = {
- .name = "stm32 clockevent",
- .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
- .set_state_shutdown = stm32_clock_event_shutdown,
- .set_state_periodic = stm32_clock_event_set_periodic,
- .set_state_oneshot = stm32_clock_event_shutdown,
- .tick_resume = stm32_clock_event_shutdown,
- .set_next_event = stm32_clock_event_set_next_event,
- .rating = 200,
- },
-};
+/**
+ * stm32_timer_width - Sort out the timer width (32/16)
+ * @to: a pointer to a timer-of structure
+ *
+ * Write the 32-bit max value and read/return the result. If the timer
+ * is 32 bits wide, the result will be UINT_MAX, otherwise it will
+ * be truncated by the 16-bit register to USHRT_MAX.
+ *
+ */
+static void __init stm32_timer_set_width(struct timer_of *to)
+{
+ u32 width;
+
+ writel_relaxed(UINT_MAX, timer_of_base(to) + TIM_ARR);
+
+ width = readl_relaxed(timer_of_base(to) + TIM_ARR);
+
+ stm32_timer_of_bits_set(to, width == UINT_MAX ? 32 : 16);
+}
-static int __init stm32_clockevent_init(struct device_node *np)
+/**
+ * stm32_timer_set_prescaler - Compute and set the prescaler register
+ * @to: a pointer to a timer-of structure
+ *
+ * Depending on the timer width, compute the prescaler to always
+ * target a 10MHz timer rate for 16 bits. 32-bit timers are
+ * considered precise and long enough to not use the prescaler.
+ */
+static void __init stm32_timer_set_prescaler(struct timer_of *to)
{
- struct stm32_clock_event_ddata *data = &clock_event_ddata;
- struct clk *clk;
- struct reset_control *rstc;
- unsigned long rate, max_delta;
- int irq, ret, bits, prescaler = 1;
-
- clk = of_clk_get(np, 0);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- pr_err("failed to get clock for clockevent (%d)\n", ret);
- goto err_clk_get;
+ int prescaler = 1;
+
+ if (stm32_timer_of_bits_get(to) != 32) {
+ prescaler = DIV_ROUND_CLOSEST(timer_of_rate(to),
+ TIM_PSC_CLKRATE);
+ /*
+ * The prescaler register is an u16, the variable
+ * can't be greater than TIM_PSC_MAX, let's cap it in
+ * this case.
+ */
+ prescaler = prescaler < TIM_PSC_MAX ? prescaler : TIM_PSC_MAX;
}
- ret = clk_prepare_enable(clk);
- if (ret) {
- pr_err("failed to enable timer clock for clockevent (%d)\n",
- ret);
- goto err_clk_enable;
- }
+ writel_relaxed(prescaler - 1, timer_of_base(to) + TIM_PSC);
+ writel_relaxed(TIM_EGR_UG, timer_of_base(to) + TIM_EGR);
+ writel_relaxed(0, timer_of_base(to) + TIM_SR);
- rate = clk_get_rate(clk);
+ /* Adjust rate and period given the prescaler value */
+ to->of_clk.rate = DIV_ROUND_CLOSEST(to->of_clk.rate, prescaler);
+ to->of_clk.period = DIV_ROUND_UP(to->of_clk.rate, HZ);
+}
- rstc = of_reset_control_get(np, NULL);
- if (!IS_ERR(rstc)) {
- reset_control_assert(rstc);
- reset_control_deassert(rstc);
+static int __init stm32_clocksource_init(struct timer_of *to)
+{
+ u32 bits = stm32_timer_of_bits_get(to);
+ const char *name = to->np->full_name;
+
+ /*
+ * This driver allows to register several timers and relies on
+ * the generic time framework to select the right one.
+ * However, nothing allows to do the same for the
+ * sched_clock. We are not interested in a sched_clock for the
+ * 16-bit timers but only for the 32-bit one, so if no 32-bit
+ * timer is registered yet, we select this 32-bit timer as a
+ * sched_clock.
+ */
+ if (bits == 32 && !stm32_timer_cnt) {
+
+ /*
+ * Start immediately the counter as we will be using
+ * it right after.
+ */
+ stm32_timer_start(to);
+
+ stm32_timer_cnt = timer_of_base(to) + TIM_CNT;
+ sched_clock_register(stm32_read_sched_clock, bits, timer_of_rate(to));
+ pr_info("%s: STM32 sched_clock registered\n", name);
+
+ stm32_timer_delay.read_current_timer = stm32_read_delay;
+ stm32_timer_delay.freq = timer_of_rate(to);
+ register_current_timer_delay(&stm32_timer_delay);
+ pr_info("%s: STM32 delay timer registered\n", name);
}
- data->base = of_iomap(np, 0);
- if (!data->base) {
- ret = -ENXIO;
- pr_err("failed to map registers for clockevent\n");
- goto err_iomap;
- }
+ return clocksource_mmio_init(timer_of_base(to) + TIM_CNT, name,
+ timer_of_rate(to), bits == 32 ? 250 : 100,
+ bits, clocksource_mmio_readl_up);
+}
- irq = irq_of_parse_and_map(np, 0);
- if (!irq) {
- ret = -EINVAL;
- pr_err("%pOF: failed to get irq.\n", np);
- goto err_get_irq;
- }
+static void __init stm32_clockevent_init(struct timer_of *to)
+{
+ u32 bits = stm32_timer_of_bits_get(to);
- /* Detect whether the timer is 16 or 32 bits */
- writel_relaxed(~0U, data->base + TIM_ARR);
- max_delta = readl_relaxed(data->base + TIM_ARR);
- if (max_delta == ~0U) {
- prescaler = 1;
- bits = 32;
- } else {
- prescaler = 1024;
- bits = 16;
- }
- writel_relaxed(0, data->base + TIM_ARR);
+ to->clkevt.name = to->np->full_name;
+ to->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ to->clkevt.set_state_shutdown = stm32_clock_event_shutdown;
+ to->clkevt.set_state_periodic = stm32_clock_event_set_periodic;
+ to->clkevt.set_state_oneshot = stm32_clock_event_set_oneshot;
+ to->clkevt.tick_resume = stm32_clock_event_shutdown;
+ to->clkevt.set_next_event = stm32_clock_event_set_next_event;
+ to->clkevt.rating = bits == 32 ? 250 : 100;
+
+ clockevents_config_and_register(&to->clkevt, timer_of_rate(to), 0x1,
+ (1 << bits) - 1);
+
+ pr_info("%pOF: STM32 clockevent driver initialized (%d bits)\n",
+ to->np, bits);
+}
+
+static int __init stm32_timer_init(struct device_node *node)
+{
+ struct reset_control *rstc;
+ struct timer_of *to;
+ int ret;
+
+ to = kzalloc(sizeof(*to), GFP_KERNEL);
+ if (!to)
+ return -ENOMEM;
- writel_relaxed(prescaler - 1, data->base + TIM_PSC);
- writel_relaxed(TIM_EGR_UG, data->base + TIM_EGR);
- writel_relaxed(TIM_DIER_UIE, data->base + TIM_DIER);
- writel_relaxed(0, data->base + TIM_SR);
+ to->flags = TIMER_OF_IRQ | TIMER_OF_CLOCK | TIMER_OF_BASE;
+ to->of_irq.handler = stm32_clock_event_handler;
- data->periodic_top = DIV_ROUND_CLOSEST(rate, prescaler * HZ);
+ ret = timer_of_init(node, to);
+ if (ret)
+ goto err;
- clockevents_config_and_register(&data->evtdev,
- DIV_ROUND_CLOSEST(rate, prescaler),
- 0x1, max_delta);
+ to->private_data = kzalloc(sizeof(struct stm32_timer_private),
+ GFP_KERNEL);
+ if (!to->private_data)
+ goto deinit;
- ret = request_irq(irq, stm32_clock_event_handler, IRQF_TIMER,
- "stm32 clockevent", data);
- if (ret) {
- pr_err("%pOF: failed to request irq.\n", np);
- goto err_get_irq;
+ rstc = of_reset_control_get(node, NULL);
+ if (!IS_ERR(rstc)) {
+ reset_control_assert(rstc);
+ reset_control_deassert(rstc);
}
- pr_info("%pOF: STM32 clockevent driver initialized (%d bits)\n",
- np, bits);
+ stm32_timer_set_width(to);
- return ret;
+ stm32_timer_set_prescaler(to);
+
+ ret = stm32_clocksource_init(to);
+ if (ret)
+ goto deinit;
+
+ stm32_clockevent_init(to);
+ return 0;
-err_get_irq:
- iounmap(data->base);
-err_iomap:
- clk_disable_unprepare(clk);
-err_clk_enable:
- clk_put(clk);
-err_clk_get:
+deinit:
+ timer_of_cleanup(to);
+err:
+ kfree(to);
return ret;
}
-TIMER_OF_DECLARE(stm32, "st,stm32-timer", stm32_clockevent_init);
+TIMER_OF_DECLARE(stm32, "st,stm32-timer", stm32_timer_init);
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index bdce4488ded1..3a88e33b0cfe 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -2,6 +2,29 @@
# ARM CPU Frequency scaling drivers
#
+config ACPI_CPPC_CPUFREQ
+ tristate "CPUFreq driver based on the ACPI CPPC spec"
+ depends on ACPI_PROCESSOR
+ select ACPI_CPPC_LIB
+ help
+ This adds a CPUFreq driver which uses CPPC methods
+ as described in the ACPIv5.1 spec. CPPC stands for
+ Collaborative Processor Performance Controls. It
+ is based on an abstract continuous scale of CPU
+ performance values which allows the remote power
+ processor to flexibly optimize for power and
+ performance. CPPC relies on power management firmware
+ support for its operation.
+
+ If in doubt, say N.
+
+config ARM_ARMADA_37XX_CPUFREQ
+ tristate "Armada 37xx CPUFreq support"
+ depends on ARCH_MVEBU
+ help
+ This adds the CPUFreq driver support for Marvell Armada 37xx SoCs.
+ The Armada 37xx PMU supports 4 frequency and VDD levels.
+
# big LITTLE core layer and glue drivers
config ARM_BIG_LITTLE_CPUFREQ
tristate "Generic ARM big LITTLE CPUfreq driver"
@@ -12,6 +35,30 @@ config ARM_BIG_LITTLE_CPUFREQ
help
This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
+config ARM_DT_BL_CPUFREQ
+ tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver"
+ depends on ARM_BIG_LITTLE_CPUFREQ && OF
+ help
+ This enables probing via DT for Generic CPUfreq driver for ARM
+ big.LITTLE platform. This gets frequency tables from DT.
+
+config ARM_SCPI_CPUFREQ
+ tristate "SCPI based CPUfreq driver"
+ depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
+ help
+ This adds the CPUfreq driver support for ARM big.LITTLE platforms
+ using SCPI protocol for CPU power management.
+
+ This driver uses SCPI Message Protocol driver to interact with the
+ firmware providing the CPU DVFS functionality.
+
+config ARM_VEXPRESS_SPC_CPUFREQ
+ tristate "Versatile Express SPC based CPUfreq driver"
+ depends on ARM_BIG_LITTLE_CPUFREQ && ARCH_VEXPRESS_SPC
+ help
+ This add the CPUfreq driver support for Versatile Express
+ big.LITTLE platforms using SPC for power management.
+
config ARM_BRCMSTB_AVS_CPUFREQ
tristate "Broadcom STB AVS CPUfreq driver"
depends on ARCH_BRCMSTB || COMPILE_TEST
@@ -33,20 +80,6 @@ config ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
If in doubt, say N.
-config ARM_DT_BL_CPUFREQ
- tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver"
- depends on ARM_BIG_LITTLE_CPUFREQ && OF
- help
- This enables probing via DT for Generic CPUfreq driver for ARM
- big.LITTLE platform. This gets frequency tables from DT.
-
-config ARM_VEXPRESS_SPC_CPUFREQ
- tristate "Versatile Express SPC based CPUfreq driver"
- depends on ARM_BIG_LITTLE_CPUFREQ && ARCH_VEXPRESS_SPC
- help
- This add the CPUfreq driver support for Versatile Express
- big.LITTLE platforms using SPC for power management.
-
config ARM_EXYNOS5440_CPUFREQ
tristate "SAMSUNG EXYNOS5440"
depends on SOC_EXYNOS5440
@@ -205,16 +238,6 @@ config ARM_SA1100_CPUFREQ
config ARM_SA1110_CPUFREQ
bool
-config ARM_SCPI_CPUFREQ
- tristate "SCPI based CPUfreq driver"
- depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
- help
- This adds the CPUfreq driver support for ARM big.LITTLE platforms
- using SCPI protocol for CPU power management.
-
- This driver uses SCPI Message Protocol driver to interact with the
- firmware providing the CPU DVFS functionality.
-
config ARM_SPEAR_CPUFREQ
bool "SPEAr CPUFreq support"
depends on PLAT_SPEAR
@@ -275,20 +298,3 @@ config ARM_PXA2xx_CPUFREQ
This add the CPUFreq driver support for Intel PXA2xx SOCs.
If in doubt, say N.
-
-config ACPI_CPPC_CPUFREQ
- tristate "CPUFreq driver based on the ACPI CPPC spec"
- depends on ACPI_PROCESSOR
- select ACPI_CPPC_LIB
- default n
- help
- This adds a CPUFreq driver which uses CPPC methods
- as described in the ACPIv5.1 spec. CPPC stands for
- Collaborative Processor Performance Controls. It
- is based on an abstract continuous scale of CPU
- performance values which allows the remote power
- processor to flexibly optimize for power and
- performance. CPPC relies on power management firmware
- support for its operation.
-
- If in doubt, say N.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 812f9e0d01a3..e07715ce8844 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -52,23 +52,26 @@ obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o
# LITTLE drivers, so that it is probed last.
obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o
+obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o
obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o
+obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o
obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
obj-$(CONFIG_ARM_MEDIATEK_CPUFREQ) += mediatek-cpufreq.o
+obj-$(CONFIG_MACH_MVEBU_V7) += mvebu-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o
obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
-obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o
-obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o
obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o
obj-$(CONFIG_ARM_S3C2412_CPUFREQ) += s3c2412-cpufreq.o
obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o
obj-$(CONFIG_ARM_S3C2440_CPUFREQ) += s3c2440-cpufreq.o
obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
+obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o
+obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o
obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o
obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o
@@ -81,8 +84,6 @@ obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o
obj-$(CONFIG_ARM_TEGRA186_CPUFREQ) += tegra186-cpufreq.o
obj-$(CONFIG_ARM_TI_CPUFREQ) += ti-cpufreq.o
obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
-obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
-obj-$(CONFIG_MACH_MVEBU_V7) += mvebu-cpufreq.o
##################################################################################
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index 65ec5f01aa8d..c56b57dcfda5 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -526,34 +526,13 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy)
static void bL_cpufreq_ready(struct cpufreq_policy *policy)
{
- struct device *cpu_dev = get_cpu_device(policy->cpu);
int cur_cluster = cpu_to_cluster(policy->cpu);
- struct device_node *np;
/* Do not register a cpu_cooling device if we are in IKS mode */
if (cur_cluster >= MAX_CLUSTERS)
return;
- np = of_node_get(cpu_dev->of_node);
- if (WARN_ON(!np))
- return;
-
- if (of_find_property(np, "#cooling-cells", NULL)) {
- u32 power_coefficient = 0;
-
- of_property_read_u32(np, "dynamic-power-coefficient",
- &power_coefficient);
-
- cdev[cur_cluster] = of_cpufreq_power_cooling_register(np,
- policy, power_coefficient, NULL);
- if (IS_ERR(cdev[cur_cluster])) {
- dev_err(cpu_dev,
- "running cpufreq without cooling device: %ld\n",
- PTR_ERR(cdev[cur_cluster]));
- cdev[cur_cluster] = NULL;
- }
- }
- of_node_put(np);
+ cdev[cur_cluster] = of_cpufreq_cooling_register(policy);
}
static struct cpufreq_driver bL_cpufreq_driver = {
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
new file mode 100644
index 000000000000..c6ebc88a7d8d
--- /dev/null
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * CPU frequency scaling support for Armada 37xx platform.
+ *
+ * Copyright (C) 2017 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+/* Power management in North Bridge register set */
+#define ARMADA_37XX_NB_L0L1 0x18
+#define ARMADA_37XX_NB_L2L3 0x1C
+#define ARMADA_37XX_NB_TBG_DIV_OFF 13
+#define ARMADA_37XX_NB_TBG_DIV_MASK 0x7
+#define ARMADA_37XX_NB_CLK_SEL_OFF 11
+#define ARMADA_37XX_NB_CLK_SEL_MASK 0x1
+#define ARMADA_37XX_NB_CLK_SEL_TBG 0x1
+#define ARMADA_37XX_NB_TBG_SEL_OFF 9
+#define ARMADA_37XX_NB_TBG_SEL_MASK 0x3
+#define ARMADA_37XX_NB_VDD_SEL_OFF 6
+#define ARMADA_37XX_NB_VDD_SEL_MASK 0x3
+#define ARMADA_37XX_NB_CONFIG_SHIFT 16
+#define ARMADA_37XX_NB_DYN_MOD 0x24
+#define ARMADA_37XX_NB_CLK_SEL_EN BIT(26)
+#define ARMADA_37XX_NB_TBG_EN BIT(28)
+#define ARMADA_37XX_NB_DIV_EN BIT(29)
+#define ARMADA_37XX_NB_VDD_EN BIT(30)
+#define ARMADA_37XX_NB_DFS_EN BIT(31)
+#define ARMADA_37XX_NB_CPU_LOAD 0x30
+#define ARMADA_37XX_NB_CPU_LOAD_MASK 0x3
+#define ARMADA_37XX_DVFS_LOAD_0 0
+#define ARMADA_37XX_DVFS_LOAD_1 1
+#define ARMADA_37XX_DVFS_LOAD_2 2
+#define ARMADA_37XX_DVFS_LOAD_3 3
+
+/*
+ * On Armada 37xx the Power management manages 4 level of CPU load,
+ * each level can be associated with a CPU clock source, a CPU
+ * divider, a VDD level, etc...
+ */
+#define LOAD_LEVEL_NR 4
+
+struct armada_37xx_dvfs {
+ u32 cpu_freq_max;
+ u8 divider[LOAD_LEVEL_NR];
+};
+
+static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
+ {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} },
+ {.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} },
+ {.cpu_freq_max = 800*1000*1000, .divider = {1, 2, 3, 4} },
+ {.cpu_freq_max = 600*1000*1000, .divider = {2, 4, 5, 6} },
+};
+
+static struct armada_37xx_dvfs *armada_37xx_cpu_freq_info_get(u32 freq)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(armada_37xx_dvfs); i++) {
+ if (freq == armada_37xx_dvfs[i].cpu_freq_max)
+ return &armada_37xx_dvfs[i];
+ }
+
+ pr_err("Unsupported CPU frequency %d MHz\n", freq/1000000);
+ return NULL;
+}
+
+/*
+ * Setup the four level managed by the hardware. Once the four level
+ * will be configured then the DVFS will be enabled.
+ */
+static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
+ struct clk *clk, u8 *divider)
+{
+ int load_lvl;
+ struct clk *parent;
+
+ for (load_lvl = 0; load_lvl < LOAD_LEVEL_NR; load_lvl++) {
+ unsigned int reg, mask, val, offset = 0;
+
+ if (load_lvl <= ARMADA_37XX_DVFS_LOAD_1)
+ reg = ARMADA_37XX_NB_L0L1;
+ else
+ reg = ARMADA_37XX_NB_L2L3;
+
+ if (load_lvl == ARMADA_37XX_DVFS_LOAD_0 ||
+ load_lvl == ARMADA_37XX_DVFS_LOAD_2)
+ offset += ARMADA_37XX_NB_CONFIG_SHIFT;
+
+ /* Set cpu clock source, for all the level we use TBG */
+ val = ARMADA_37XX_NB_CLK_SEL_TBG << ARMADA_37XX_NB_CLK_SEL_OFF;
+ mask = (ARMADA_37XX_NB_CLK_SEL_MASK
+ << ARMADA_37XX_NB_CLK_SEL_OFF);
+
+ /*
+ * Set cpu divider based on the pre-computed array in
+ * order to have balanced step.
+ */
+ val |= divider[load_lvl] << ARMADA_37XX_NB_TBG_DIV_OFF;
+ mask |= (ARMADA_37XX_NB_TBG_DIV_MASK
+ << ARMADA_37XX_NB_TBG_DIV_OFF);
+
+ /* Set VDD divider which is actually the load level. */
+ val |= load_lvl << ARMADA_37XX_NB_VDD_SEL_OFF;
+ mask |= (ARMADA_37XX_NB_VDD_SEL_MASK
+ << ARMADA_37XX_NB_VDD_SEL_OFF);
+
+ val <<= offset;
+ mask <<= offset;
+
+ regmap_update_bits(base, reg, mask, val);
+ }
+
+ /*
+ * Set cpu clock source, for all the level we keep the same
+ * clock source that the one already configured. For this one
+ * we need to use the clock framework
+ */
+ parent = clk_get_parent(clk);
+ clk_set_parent(clk, parent);
+}
+
+static void __init armada37xx_cpufreq_disable_dvfs(struct regmap *base)
+{
+ unsigned int reg = ARMADA_37XX_NB_DYN_MOD,
+ mask = ARMADA_37XX_NB_DFS_EN;
+
+ regmap_update_bits(base, reg, mask, 0);
+}
+
+static void __init armada37xx_cpufreq_enable_dvfs(struct regmap *base)
+{
+ unsigned int val, reg = ARMADA_37XX_NB_CPU_LOAD,
+ mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
+
+ /* Start with the highest load (0) */
+ val = ARMADA_37XX_DVFS_LOAD_0;
+ regmap_update_bits(base, reg, mask, val);
+
+ /* Now enable DVFS for the CPUs */
+ reg = ARMADA_37XX_NB_DYN_MOD;
+ mask = ARMADA_37XX_NB_CLK_SEL_EN | ARMADA_37XX_NB_TBG_EN |
+ ARMADA_37XX_NB_DIV_EN | ARMADA_37XX_NB_VDD_EN |
+ ARMADA_37XX_NB_DFS_EN;
+
+ regmap_update_bits(base, reg, mask, mask);
+}
+
+static int __init armada37xx_cpufreq_driver_init(void)
+{
+ struct armada_37xx_dvfs *dvfs;
+ struct platform_device *pdev;
+ unsigned int cur_frequency;
+ struct regmap *nb_pm_base;
+ struct device *cpu_dev;
+ int load_lvl, ret;
+ struct clk *clk;
+
+ nb_pm_base =
+ syscon_regmap_lookup_by_compatible("marvell,armada-3700-nb-pm");
+
+ if (IS_ERR(nb_pm_base))
+ return -ENODEV;
+
+ /* Before doing any configuration on the DVFS first, disable it */
+ armada37xx_cpufreq_disable_dvfs(nb_pm_base);
+
+ /*
+ * On CPU 0 register the operating points supported (which are
+ * the nominal CPU frequency and full integer divisions of
+ * it).
+ */
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev) {
+ dev_err(cpu_dev, "Cannot get CPU\n");
+ return -ENODEV;
+ }
+
+ clk = clk_get(cpu_dev, 0);
+ if (IS_ERR(clk)) {
+ dev_err(cpu_dev, "Cannot get clock for CPU0\n");
+ return PTR_ERR(clk);
+ }
+
+ /* Get nominal (current) CPU frequency */
+ cur_frequency = clk_get_rate(clk);
+ if (!cur_frequency) {
+ dev_err(cpu_dev, "Failed to get clock rate for CPU\n");
+ return -EINVAL;
+ }
+
+ dvfs = armada_37xx_cpu_freq_info_get(cur_frequency);
+ if (!dvfs)
+ return -EINVAL;
+
+ armada37xx_cpufreq_dvfs_setup(nb_pm_base, clk, dvfs->divider);
+
+ for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
+ load_lvl++) {
+ unsigned long freq = cur_frequency / dvfs->divider[load_lvl];
+
+ ret = dev_pm_opp_add(cpu_dev, freq, 0);
+ if (ret) {
+ /* clean-up the already added opp before leaving */
+ while (load_lvl-- > ARMADA_37XX_DVFS_LOAD_0) {
+ freq = cur_frequency / dvfs->divider[load_lvl];
+ dev_pm_opp_remove(cpu_dev, freq);
+ }
+ return ret;
+ }
+ }
+
+ /* Now that everything is setup, enable the DVFS at hardware level */
+ armada37xx_cpufreq_enable_dvfs(nb_pm_base);
+
+ pdev = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+
+ return PTR_ERR_OR_ZERO(pdev);
+}
+/* late_initcall, to guarantee the driver is loaded after A37xx clock driver */
+late_initcall(armada37xx_cpufreq_driver_init);
+
+MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>");
+MODULE_DESCRIPTION("Armada 37xx cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index ecc56e26f8f6..3b585e4bfac5 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -108,6 +108,14 @@ static const struct of_device_id blacklist[] __initconst = {
{ .compatible = "marvell,armadaxp", },
+ { .compatible = "mediatek,mt2701", },
+ { .compatible = "mediatek,mt2712", },
+ { .compatible = "mediatek,mt7622", },
+ { .compatible = "mediatek,mt7623", },
+ { .compatible = "mediatek,mt817x", },
+ { .compatible = "mediatek,mt8173", },
+ { .compatible = "mediatek,mt8176", },
+
{ .compatible = "nvidia,tegra124", },
{ .compatible = "st,stih407", },
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 545946ad0752..de3d104c25d7 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -319,33 +319,8 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
static void cpufreq_ready(struct cpufreq_policy *policy)
{
struct private_data *priv = policy->driver_data;
- struct device_node *np = of_node_get(priv->cpu_dev->of_node);
- if (WARN_ON(!np))
- return;
-
- /*
- * For now, just loading the cooling device;
- * thermal DT code takes care of matching them.
- */
- if (of_find_property(np, "#cooling-cells", NULL)) {
- u32 power_coefficient = 0;
-
- of_property_read_u32(np, "dynamic-power-coefficient",
- &power_coefficient);
-
- priv->cdev = of_cpufreq_power_cooling_register(np,
- policy, power_coefficient, NULL);
- if (IS_ERR(priv->cdev)) {
- dev_err(priv->cpu_dev,
- "running cpufreq without cooling device: %ld\n",
- PTR_ERR(priv->cdev));
-
- priv->cdev = NULL;
- }
- }
-
- of_node_put(np);
+ priv->cdev = of_cpufreq_cooling_register(policy);
}
static struct cpufreq_driver dt_cpufreq_driver = {
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 41d148af7748..421f318c0e66 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -601,19 +601,18 @@ static struct cpufreq_governor *find_governor(const char *str_governor)
/**
* cpufreq_parse_governor - parse a governor string
*/
-static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
- struct cpufreq_governor **governor)
+static int cpufreq_parse_governor(char *str_governor,
+ struct cpufreq_policy *policy)
{
- int err = -EINVAL;
-
if (cpufreq_driver->setpolicy) {
if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
- *policy = CPUFREQ_POLICY_PERFORMANCE;
- err = 0;
- } else if (!strncasecmp(str_governor, "powersave",
- CPUFREQ_NAME_LEN)) {
- *policy = CPUFREQ_POLICY_POWERSAVE;
- err = 0;
+ policy->policy = CPUFREQ_POLICY_PERFORMANCE;
+ return 0;
+ }
+
+ if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
+ policy->policy = CPUFREQ_POLICY_POWERSAVE;
+ return 0;
}
} else {
struct cpufreq_governor *t;
@@ -621,26 +620,31 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
mutex_lock(&cpufreq_governor_mutex);
t = find_governor(str_governor);
-
- if (t == NULL) {
+ if (!t) {
int ret;
mutex_unlock(&cpufreq_governor_mutex);
+
ret = request_module("cpufreq_%s", str_governor);
- mutex_lock(&cpufreq_governor_mutex);
+ if (ret)
+ return -EINVAL;
- if (ret == 0)
- t = find_governor(str_governor);
- }
+ mutex_lock(&cpufreq_governor_mutex);
- if (t != NULL) {
- *governor = t;
- err = 0;
+ t = find_governor(str_governor);
}
+ if (t && !try_module_get(t->owner))
+ t = NULL;
mutex_unlock(&cpufreq_governor_mutex);
+
+ if (t) {
+ policy->governor = t;
+ return 0;
+ }
}
- return err;
+
+ return -EINVAL;
}
/**
@@ -760,11 +764,14 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
if (ret != 1)
return -EINVAL;
- if (cpufreq_parse_governor(str_governor, &new_policy.policy,
- &new_policy.governor))
+ if (cpufreq_parse_governor(str_governor, &new_policy))
return -EINVAL;
ret = cpufreq_set_policy(policy, &new_policy);
+
+ if (new_policy.governor)
+ module_put(new_policy.governor->owner);
+
return ret ? ret : count;
}
@@ -1044,8 +1051,7 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
if (policy->last_policy)
new_policy.policy = policy->last_policy;
else
- cpufreq_parse_governor(gov->name, &new_policy.policy,
- NULL);
+ cpufreq_parse_governor(gov->name, &new_policy);
}
/* set default policy */
return cpufreq_set_policy(policy, &new_policy);
@@ -2160,7 +2166,6 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
mutex_lock(&cpufreq_governor_mutex);
list_del(&governor->governor_list);
mutex_unlock(&cpufreq_governor_mutex);
- return;
}
EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 1e55b5790853..1572129844a5 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -27,7 +27,7 @@ struct cpufreq_stats {
unsigned int *trans_table;
};
-static int cpufreq_stats_update(struct cpufreq_stats *stats)
+static void cpufreq_stats_update(struct cpufreq_stats *stats)
{
unsigned long long cur_time = get_jiffies_64();
@@ -35,7 +35,6 @@ static int cpufreq_stats_update(struct cpufreq_stats *stats)
stats->time_in_state[stats->last_index] += cur_time - stats->last_time;
stats->last_time = cur_time;
spin_unlock(&cpufreq_stats_lock);
- return 0;
}
static void cpufreq_stats_clear_table(struct cpufreq_stats *stats)
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index d9b2c2de49c4..741f22e5cee3 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -25,15 +25,29 @@ static struct regulator *arm_reg;
static struct regulator *pu_reg;
static struct regulator *soc_reg;
-static struct clk *arm_clk;
-static struct clk *pll1_sys_clk;
-static struct clk *pll1_sw_clk;
-static struct clk *step_clk;
-static struct clk *pll2_pfd2_396m_clk;
-
-/* clk used by i.MX6UL */
-static struct clk *pll2_bus_clk;
-static struct clk *secondary_sel_clk;
+enum IMX6_CPUFREQ_CLKS {
+ ARM,
+ PLL1_SYS,
+ STEP,
+ PLL1_SW,
+ PLL2_PFD2_396M,
+ /* MX6UL requires two more clks */
+ PLL2_BUS,
+ SECONDARY_SEL,
+};
+#define IMX6Q_CPUFREQ_CLK_NUM 5
+#define IMX6UL_CPUFREQ_CLK_NUM 7
+
+static int num_clks;
+static struct clk_bulk_data clks[] = {
+ { .id = "arm" },
+ { .id = "pll1_sys" },
+ { .id = "step" },
+ { .id = "pll1_sw" },
+ { .id = "pll2_pfd2_396m" },
+ { .id = "pll2_bus" },
+ { .id = "secondary_sel" },
+};
static struct device *cpu_dev;
static bool free_opp;
@@ -53,7 +67,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
new_freq = freq_table[index].frequency;
freq_hz = new_freq * 1000;
- old_freq = clk_get_rate(arm_clk) / 1000;
+ old_freq = clk_get_rate(clks[ARM].clk) / 1000;
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
if (IS_ERR(opp)) {
@@ -112,29 +126,35 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
* voltage of 528MHz, so lower the CPU frequency to one
* half before changing CPU frequency.
*/
- clk_set_rate(arm_clk, (old_freq >> 1) * 1000);
- clk_set_parent(pll1_sw_clk, pll1_sys_clk);
- if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk))
- clk_set_parent(secondary_sel_clk, pll2_bus_clk);
+ clk_set_rate(clks[ARM].clk, (old_freq >> 1) * 1000);
+ clk_set_parent(clks[PLL1_SW].clk, clks[PLL1_SYS].clk);
+ if (freq_hz > clk_get_rate(clks[PLL2_PFD2_396M].clk))
+ clk_set_parent(clks[SECONDARY_SEL].clk,
+ clks[PLL2_BUS].clk);
else
- clk_set_parent(secondary_sel_clk, pll2_pfd2_396m_clk);
- clk_set_parent(step_clk, secondary_sel_clk);
- clk_set_parent(pll1_sw_clk, step_clk);
+ clk_set_parent(clks[SECONDARY_SEL].clk,
+ clks[PLL2_PFD2_396M].clk);
+ clk_set_parent(clks[STEP].clk, clks[SECONDARY_SEL].clk);
+ clk_set_parent(clks[PLL1_SW].clk, clks[STEP].clk);
+ if (freq_hz > clk_get_rate(clks[PLL2_BUS].clk)) {
+ clk_set_rate(clks[PLL1_SYS].clk, new_freq * 1000);
+ clk_set_parent(clks[PLL1_SW].clk, clks[PLL1_SYS].clk);
+ }
} else {
- clk_set_parent(step_clk, pll2_pfd2_396m_clk);
- clk_set_parent(pll1_sw_clk, step_clk);
- if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) {
- clk_set_rate(pll1_sys_clk, new_freq * 1000);
- clk_set_parent(pll1_sw_clk, pll1_sys_clk);
+ clk_set_parent(clks[STEP].clk, clks[PLL2_PFD2_396M].clk);
+ clk_set_parent(clks[PLL1_SW].clk, clks[STEP].clk);
+ if (freq_hz > clk_get_rate(clks[PLL2_PFD2_396M].clk)) {
+ clk_set_rate(clks[PLL1_SYS].clk, new_freq * 1000);
+ clk_set_parent(clks[PLL1_SW].clk, clks[PLL1_SYS].clk);
} else {
/* pll1_sys needs to be enabled for divider rate change to work. */
pll1_sys_temp_enabled = true;
- clk_prepare_enable(pll1_sys_clk);
+ clk_prepare_enable(clks[PLL1_SYS].clk);
}
}
/* Ensure the arm clock divider is what we expect */
- ret = clk_set_rate(arm_clk, new_freq * 1000);
+ ret = clk_set_rate(clks[ARM].clk, new_freq * 1000);
if (ret) {
dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
regulator_set_voltage_tol(arm_reg, volt_old, 0);
@@ -143,7 +163,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
/* PLL1 is only needed until after ARM-PODF is set. */
if (pll1_sys_temp_enabled)
- clk_disable_unprepare(pll1_sys_clk);
+ clk_disable_unprepare(clks[PLL1_SYS].clk);
/* scaling down? scale voltage after frequency */
if (new_freq < old_freq) {
@@ -174,7 +194,7 @@ static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
{
int ret;
- policy->clk = arm_clk;
+ policy->clk = clks[ARM].clk;
ret = cpufreq_generic_init(policy, freq_table, transition_latency);
policy->suspend_freq = policy->max;
@@ -244,6 +264,43 @@ put_node:
of_node_put(np);
}
+#define OCOTP_CFG3_6UL_SPEED_696MHZ 0x2
+
+static void imx6ul_opp_check_speed_grading(struct device *dev)
+{
+ struct device_node *np;
+ void __iomem *base;
+ u32 val;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-ocotp");
+ if (!np)
+ return;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ dev_err(dev, "failed to map ocotp\n");
+ goto put_node;
+ }
+
+ /*
+ * Speed GRADING[1:0] defines the max speed of ARM:
+ * 2b'00: Reserved;
+ * 2b'01: 528000000Hz;
+ * 2b'10: 696000000Hz;
+ * 2b'11: Reserved;
+ * We need to set the max speed of ARM according to fuse map.
+ */
+ val = readl_relaxed(base + OCOTP_CFG3);
+ val >>= OCOTP_CFG3_SPEED_SHIFT;
+ val &= 0x3;
+ if (val != OCOTP_CFG3_6UL_SPEED_696MHZ)
+ if (dev_pm_opp_disable(dev, 696000000))
+ dev_warn(dev, "failed to disable 696MHz OPP\n");
+ iounmap(base);
+put_node:
+ of_node_put(np);
+}
+
static int imx6q_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
@@ -266,28 +323,15 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
return -ENOENT;
}
- arm_clk = clk_get(cpu_dev, "arm");
- pll1_sys_clk = clk_get(cpu_dev, "pll1_sys");
- pll1_sw_clk = clk_get(cpu_dev, "pll1_sw");
- step_clk = clk_get(cpu_dev, "step");
- pll2_pfd2_396m_clk = clk_get(cpu_dev, "pll2_pfd2_396m");
- if (IS_ERR(arm_clk) || IS_ERR(pll1_sys_clk) || IS_ERR(pll1_sw_clk) ||
- IS_ERR(step_clk) || IS_ERR(pll2_pfd2_396m_clk)) {
- dev_err(cpu_dev, "failed to get clocks\n");
- ret = -ENOENT;
- goto put_clk;
- }
-
if (of_machine_is_compatible("fsl,imx6ul") ||
- of_machine_is_compatible("fsl,imx6ull")) {
- pll2_bus_clk = clk_get(cpu_dev, "pll2_bus");
- secondary_sel_clk = clk_get(cpu_dev, "secondary_sel");
- if (IS_ERR(pll2_bus_clk) || IS_ERR(secondary_sel_clk)) {
- dev_err(cpu_dev, "failed to get clocks specific to imx6ul\n");
- ret = -ENOENT;
- goto put_clk;
- }
- }
+ of_machine_is_compatible("fsl,imx6ull"))
+ num_clks = IMX6UL_CPUFREQ_CLK_NUM;
+ else
+ num_clks = IMX6Q_CPUFREQ_CLK_NUM;
+
+ ret = clk_bulk_get(cpu_dev, num_clks, clks);
+ if (ret)
+ goto put_node;
arm_reg = regulator_get(cpu_dev, "arm");
pu_reg = regulator_get_optional(cpu_dev, "pu");
@@ -311,7 +355,10 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
goto put_reg;
}
- imx6q_opp_check_speed_grading(cpu_dev);
+ if (of_machine_is_compatible("fsl,imx6ul"))
+ imx6ul_opp_check_speed_grading(cpu_dev);
+ else
+ imx6q_opp_check_speed_grading(cpu_dev);
/* Because we have added the OPPs here, we must free them */
free_opp = true;
@@ -424,22 +471,11 @@ put_reg:
regulator_put(pu_reg);
if (!IS_ERR(soc_reg))
regulator_put(soc_reg);
-put_clk:
- if (!IS_ERR(arm_clk))
- clk_put(arm_clk);
- if (!IS_ERR(pll1_sys_clk))
- clk_put(pll1_sys_clk);
- if (!IS_ERR(pll1_sw_clk))
- clk_put(pll1_sw_clk);
- if (!IS_ERR(step_clk))
- clk_put(step_clk);
- if (!IS_ERR(pll2_pfd2_396m_clk))
- clk_put(pll2_pfd2_396m_clk);
- if (!IS_ERR(pll2_bus_clk))
- clk_put(pll2_bus_clk);
- if (!IS_ERR(secondary_sel_clk))
- clk_put(secondary_sel_clk);
+
+ clk_bulk_put(num_clks, clks);
+put_node:
of_node_put(np);
+
return ret;
}
@@ -453,13 +489,8 @@ static int imx6q_cpufreq_remove(struct platform_device *pdev)
if (!IS_ERR(pu_reg))
regulator_put(pu_reg);
regulator_put(soc_reg);
- clk_put(arm_clk);
- clk_put(pll1_sys_clk);
- clk_put(pll1_sw_clk);
- clk_put(step_clk);
- clk_put(pll2_pfd2_396m_clk);
- clk_put(pll2_bus_clk);
- clk_put(secondary_sel_clk);
+
+ clk_bulk_put(num_clks, clks);
return 0;
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 93a0e88bef76..7edf7a0e5a96 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1595,15 +1595,6 @@ static const struct pstate_funcs knl_funcs = {
.get_val = core_get_val,
};
-static const struct pstate_funcs bxt_funcs = {
- .get_max = core_get_max_pstate,
- .get_max_physical = core_get_max_pstate_physical,
- .get_min = core_get_min_pstate,
- .get_turbo = core_get_turbo_pstate,
- .get_scaling = core_get_scaling,
- .get_val = core_get_val,
-};
-
#define ICPU(model, policy) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
(unsigned long)&policy }
@@ -1627,8 +1618,9 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_funcs),
ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_funcs),
ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_funcs),
- ICPU(INTEL_FAM6_ATOM_GOLDMONT, bxt_funcs),
- ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, bxt_funcs),
+ ICPU(INTEL_FAM6_ATOM_GOLDMONT, core_funcs),
+ ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, core_funcs),
+ ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index c46a12df40dd..5faa37c5b091 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -894,7 +894,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
if ((longhaul_version != TYPE_LONGHAUL_V1) && (scale_voltage != 0))
longhaul_setup_voltagescaling();
- policy->cpuinfo.transition_latency = 200000; /* nsec */
+ policy->transition_delay_us = 200000; /* usec */
return cpufreq_table_validate_and_show(policy, longhaul_table);
}
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index e0d5090b303d..8c04dddd3c28 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -310,28 +310,8 @@ static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
static void mtk_cpufreq_ready(struct cpufreq_policy *policy)
{
struct mtk_cpu_dvfs_info *info = policy->driver_data;
- struct device_node *np = of_node_get(info->cpu_dev->of_node);
- u32 capacitance = 0;
- if (WARN_ON(!np))
- return;
-
- if (of_find_property(np, "#cooling-cells", NULL)) {
- of_property_read_u32(np, DYNAMIC_POWER, &capacitance);
-
- info->cdev = of_cpufreq_power_cooling_register(np,
- policy, capacitance, NULL);
-
- if (IS_ERR(info->cdev)) {
- dev_err(info->cpu_dev,
- "running cpufreq without cooling device: %ld\n",
- PTR_ERR(info->cdev));
-
- info->cdev = NULL;
- }
- }
-
- of_node_put(np);
+ info->cdev = of_cpufreq_cooling_register(policy);
}
static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
@@ -574,6 +554,7 @@ static struct platform_driver mtk_cpufreq_platdrv = {
/* List of machines supported by this driver */
static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
{ .compatible = "mediatek,mt2701", },
+ { .compatible = "mediatek,mt2712", },
{ .compatible = "mediatek,mt7622", },
{ .compatible = "mediatek,mt7623", },
{ .compatible = "mediatek,mt817x", },
diff --git a/drivers/cpufreq/mvebu-cpufreq.c b/drivers/cpufreq/mvebu-cpufreq.c
index ed915ee85dd9..31513bd42705 100644
--- a/drivers/cpufreq/mvebu-cpufreq.c
+++ b/drivers/cpufreq/mvebu-cpufreq.c
@@ -76,12 +76,6 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
return PTR_ERR(clk);
}
- /*
- * In case of a failure of dev_pm_opp_add(), we don't
- * bother with cleaning up the registered OPP (there's
- * no function to do so), and simply cancel the
- * registration of the cpufreq device.
- */
ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk), 0);
if (ret) {
clk_put(clk);
@@ -91,7 +85,8 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk) / 2, 0);
if (ret) {
clk_put(clk);
- return ret;
+ dev_err(cpu_dev, "Failed to register OPPs\n");
+ goto opp_register_failed;
}
ret = dev_pm_opp_set_sharing_cpus(cpu_dev,
@@ -99,9 +94,16 @@ static int __init armada_xp_pmsu_cpufreq_init(void)
if (ret)
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
__func__, ret);
+ clk_put(clk);
}
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
return 0;
+
+opp_register_failed:
+ /* As registering has failed remove all the opp for all cpus */
+ dev_pm_opp_cpumask_remove_table(cpu_possible_mask);
+
+ return ret;
}
device_initcall(armada_xp_pmsu_cpufreq_init);
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index b6d7c4c98d0a..29cdec198657 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -29,6 +29,7 @@
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/cpu.h>
+#include <linux/hashtable.h>
#include <trace/events/power.h>
#include <asm/cputhreads.h>
@@ -38,14 +39,13 @@
#include <asm/opal.h>
#include <linux/timer.h>
-#define POWERNV_MAX_PSTATES 256
+#define POWERNV_MAX_PSTATES_ORDER 8
+#define POWERNV_MAX_PSTATES (1UL << (POWERNV_MAX_PSTATES_ORDER))
#define PMSR_PSAFE_ENABLE (1UL << 30)
#define PMSR_SPR_EM_DISABLE (1UL << 31)
-#define PMSR_MAX(x) ((x >> 32) & 0xFF)
+#define MAX_PSTATE_SHIFT 32
#define LPSTATE_SHIFT 48
#define GPSTATE_SHIFT 56
-#define GET_LPSTATE(x) (((x) >> LPSTATE_SHIFT) & 0xFF)
-#define GET_GPSTATE(x) (((x) >> GPSTATE_SHIFT) & 0xFF)
#define MAX_RAMP_DOWN_TIME 5120
/*
@@ -94,6 +94,27 @@ struct global_pstate_info {
};
static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
+
+DEFINE_HASHTABLE(pstate_revmap, POWERNV_MAX_PSTATES_ORDER);
+/**
+ * struct pstate_idx_revmap_data: Entry in the hashmap pstate_revmap
+ * indexed by a function of pstate id.
+ *
+ * @pstate_id: pstate id for this entry.
+ *
+ * @cpufreq_table_idx: Index into the powernv_freqs
+ * cpufreq_frequency_table for frequency
+ * corresponding to pstate_id.
+ *
+ * @hentry: hlist_node that hooks this entry into the pstate_revmap
+ * hashtable
+ */
+struct pstate_idx_revmap_data {
+ u8 pstate_id;
+ unsigned int cpufreq_table_idx;
+ struct hlist_node hentry;
+};
+
static bool rebooting, throttled, occ_reset;
static const char * const throttle_reason[] = {
@@ -148,39 +169,56 @@ static struct powernv_pstate_info {
bool wof_enabled;
} powernv_pstate_info;
-/* Use following macros for conversions between pstate_id and index */
-static inline int idx_to_pstate(unsigned int i)
+static inline u8 extract_pstate(u64 pmsr_val, unsigned int shift)
+{
+ return ((pmsr_val >> shift) & 0xFF);
+}
+
+#define extract_local_pstate(x) extract_pstate(x, LPSTATE_SHIFT)
+#define extract_global_pstate(x) extract_pstate(x, GPSTATE_SHIFT)
+#define extract_max_pstate(x) extract_pstate(x, MAX_PSTATE_SHIFT)
+
+/* Use following functions for conversions between pstate_id and index */
+
+/**
+ * idx_to_pstate : Returns the pstate id corresponding to the
+ * frequency in the cpufreq frequency table
+ * powernv_freqs indexed by @i.
+ *
+ * If @i is out of bound, this will return the pstate
+ * corresponding to the nominal frequency.
+ */
+static inline u8 idx_to_pstate(unsigned int i)
{
if (unlikely(i >= powernv_pstate_info.nr_pstates)) {
- pr_warn_once("index %u is out of bound\n", i);
+ pr_warn_once("idx_to_pstate: index %u is out of bound\n", i);
return powernv_freqs[powernv_pstate_info.nominal].driver_data;
}
return powernv_freqs[i].driver_data;
}
-static inline unsigned int pstate_to_idx(int pstate)
+/**
+ * pstate_to_idx : Returns the index in the cpufreq frequencytable
+ * powernv_freqs for the frequency whose corresponding
+ * pstate id is @pstate.
+ *
+ * If no frequency corresponding to @pstate is found,
+ * this will return the index of the nominal
+ * frequency.
+ */
+static unsigned int pstate_to_idx(u8 pstate)
{
- int min = powernv_freqs[powernv_pstate_info.min].driver_data;
- int max = powernv_freqs[powernv_pstate_info.max].driver_data;
+ unsigned int key = pstate % POWERNV_MAX_PSTATES;
+ struct pstate_idx_revmap_data *revmap_data;
- if (min > 0) {
- if (unlikely((pstate < max) || (pstate > min))) {
- pr_warn_once("pstate %d is out of bound\n", pstate);
- return powernv_pstate_info.nominal;
- }
- } else {
- if (unlikely((pstate > max) || (pstate < min))) {
- pr_warn_once("pstate %d is out of bound\n", pstate);
- return powernv_pstate_info.nominal;
- }
+ hash_for_each_possible(pstate_revmap, revmap_data, hentry, key) {
+ if (revmap_data->pstate_id == pstate)
+ return revmap_data->cpufreq_table_idx;
}
- /*
- * abs() is deliberately used so that is works with
- * both monotonically increasing and decreasing
- * pstate values
- */
- return abs(pstate - idx_to_pstate(powernv_pstate_info.max));
+
+ pr_warn_once("pstate_to_idx: pstate 0x%x not found\n", pstate);
+ return powernv_pstate_info.nominal;
}
static inline void reset_gpstates(struct cpufreq_policy *policy)
@@ -247,7 +285,7 @@ static int init_powernv_pstates(void)
powernv_pstate_info.wof_enabled = true;
next:
- pr_info("cpufreq pstate min %d nominal %d max %d\n", pstate_min,
+ pr_info("cpufreq pstate min 0x%x nominal 0x%x max 0x%x\n", pstate_min,
pstate_nominal, pstate_max);
pr_info("Workload Optimized Frequency is %s in the platform\n",
(powernv_pstate_info.wof_enabled) ? "enabled" : "disabled");
@@ -278,19 +316,30 @@ next:
powernv_pstate_info.nr_pstates = nr_pstates;
pr_debug("NR PStates %d\n", nr_pstates);
+
for (i = 0; i < nr_pstates; i++) {
u32 id = be32_to_cpu(pstate_ids[i]);
u32 freq = be32_to_cpu(pstate_freqs[i]);
+ struct pstate_idx_revmap_data *revmap_data;
+ unsigned int key;
pr_debug("PState id %d freq %d MHz\n", id, freq);
powernv_freqs[i].frequency = freq * 1000; /* kHz */
- powernv_freqs[i].driver_data = id;
+ powernv_freqs[i].driver_data = id & 0xFF;
+
+ revmap_data = (struct pstate_idx_revmap_data *)
+ kmalloc(sizeof(*revmap_data), GFP_KERNEL);
+
+ revmap_data->pstate_id = id & 0xFF;
+ revmap_data->cpufreq_table_idx = i;
+ key = (revmap_data->pstate_id) % POWERNV_MAX_PSTATES;
+ hash_add(pstate_revmap, &revmap_data->hentry, key);
if (id == pstate_max)
powernv_pstate_info.max = i;
- else if (id == pstate_nominal)
+ if (id == pstate_nominal)
powernv_pstate_info.nominal = i;
- else if (id == pstate_min)
+ if (id == pstate_min)
powernv_pstate_info.min = i;
if (powernv_pstate_info.wof_enabled && id == pstate_turbo) {
@@ -307,14 +356,13 @@ next:
}
/* Returns the CPU frequency corresponding to the pstate_id. */
-static unsigned int pstate_id_to_freq(int pstate_id)
+static unsigned int pstate_id_to_freq(u8 pstate_id)
{
int i;
i = pstate_to_idx(pstate_id);
if (i >= powernv_pstate_info.nr_pstates || i < 0) {
- pr_warn("PState id %d outside of PState table, "
- "reporting nominal id %d instead\n",
+ pr_warn("PState id 0x%x outside of PState table, reporting nominal id 0x%x instead\n",
pstate_id, idx_to_pstate(powernv_pstate_info.nominal));
i = powernv_pstate_info.nominal;
}
@@ -420,8 +468,8 @@ static inline void set_pmspr(unsigned long sprn, unsigned long val)
*/
struct powernv_smp_call_data {
unsigned int freq;
- int pstate_id;
- int gpstate_id;
+ u8 pstate_id;
+ u8 gpstate_id;
};
/*
@@ -438,22 +486,15 @@ struct powernv_smp_call_data {
static void powernv_read_cpu_freq(void *arg)
{
unsigned long pmspr_val;
- s8 local_pstate_id;
struct powernv_smp_call_data *freq_data = arg;
pmspr_val = get_pmspr(SPRN_PMSR);
-
- /*
- * The local pstate id corresponds bits 48..55 in the PMSR.
- * Note: Watch out for the sign!
- */
- local_pstate_id = (pmspr_val >> 48) & 0xFF;
- freq_data->pstate_id = local_pstate_id;
+ freq_data->pstate_id = extract_local_pstate(pmspr_val);
freq_data->freq = pstate_id_to_freq(freq_data->pstate_id);
- pr_debug("cpu %d pmsr %016lX pstate_id %d frequency %d kHz\n",
- raw_smp_processor_id(), pmspr_val, freq_data->pstate_id,
- freq_data->freq);
+ pr_debug("cpu %d pmsr %016lX pstate_id 0x%x frequency %d kHz\n",
+ raw_smp_processor_id(), pmspr_val, freq_data->pstate_id,
+ freq_data->freq);
}
/*
@@ -515,21 +556,21 @@ static void powernv_cpufreq_throttle_check(void *data)
struct chip *chip;
unsigned int cpu = smp_processor_id();
unsigned long pmsr;
- int pmsr_pmax;
+ u8 pmsr_pmax;
unsigned int pmsr_pmax_idx;
pmsr = get_pmspr(SPRN_PMSR);
chip = this_cpu_read(chip_info);
/* Check for Pmax Capping */
- pmsr_pmax = (s8)PMSR_MAX(pmsr);
+ pmsr_pmax = extract_max_pstate(pmsr);
pmsr_pmax_idx = pstate_to_idx(pmsr_pmax);
if (pmsr_pmax_idx != powernv_pstate_info.max) {
if (chip->throttled)
goto next;
chip->throttled = true;
if (pmsr_pmax_idx > powernv_pstate_info.nominal) {
- pr_warn_once("CPU %d on Chip %u has Pmax(%d) reduced below nominal frequency(%d)\n",
+ pr_warn_once("CPU %d on Chip %u has Pmax(0x%x) reduced below that of nominal frequency(0x%x)\n",
cpu, chip->id, pmsr_pmax,
idx_to_pstate(powernv_pstate_info.nominal));
chip->throttle_sub_turbo++;
@@ -645,8 +686,8 @@ void gpstate_timer_handler(struct timer_list *t)
* value. Hence, read from PMCR to get correct data.
*/
val = get_pmspr(SPRN_PMCR);
- freq_data.gpstate_id = (s8)GET_GPSTATE(val);
- freq_data.pstate_id = (s8)GET_LPSTATE(val);
+ freq_data.gpstate_id = extract_global_pstate(val);
+ freq_data.pstate_id = extract_local_pstate(val);
if (freq_data.gpstate_id == freq_data.pstate_id) {
reset_gpstates(policy);
spin_unlock(&gpstates->gpstate_lock);
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index 4ada55b8856e..0562761a3dec 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -275,20 +275,8 @@ static int qoriq_cpufreq_target(struct cpufreq_policy *policy,
static void qoriq_cpufreq_ready(struct cpufreq_policy *policy)
{
struct cpu_data *cpud = policy->driver_data;
- struct device_node *np = of_get_cpu_node(policy->cpu, NULL);
- if (of_find_property(np, "#cooling-cells", NULL)) {
- cpud->cdev = of_cpufreq_cooling_register(np, policy);
-
- if (IS_ERR(cpud->cdev) && PTR_ERR(cpud->cdev) != -ENOSYS) {
- pr_err("cpu%d is not running as cooling device: %ld\n",
- policy->cpu, PTR_ERR(cpud->cdev));
-
- cpud->cdev = NULL;
- }
- }
-
- of_node_put(np);
+ cpud->cdev = of_cpufreq_cooling_register(policy);
}
static struct cpufreq_driver qoriq_cpufreq_driver = {
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 05d299052c5c..247fcbfa4cb5 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -18,27 +18,89 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/cpu_cooling.h>
+#include <linux/export.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
+#include <linux/of_platform.h>
#include <linux/pm_opp.h>
#include <linux/scpi_protocol.h>
+#include <linux/slab.h>
#include <linux/types.h>
-#include "arm_big_little.h"
+struct scpi_data {
+ struct clk *clk;
+ struct device *cpu_dev;
+ struct thermal_cooling_device *cdev;
+};
static struct scpi_ops *scpi_ops;
-static int scpi_get_transition_latency(struct device *cpu_dev)
+static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
+ struct scpi_data *priv = policy->driver_data;
+ unsigned long rate = clk_get_rate(priv->clk);
+
+ return rate / 1000;
+}
+
+static int
+scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
+{
+ struct scpi_data *priv = policy->driver_data;
+ u64 rate = policy->freq_table[index].frequency * 1000;
+ int ret;
+
+ ret = clk_set_rate(priv->clk, rate);
+ if (!ret && (clk_get_rate(priv->clk) != rate))
+ ret = -EIO;
+
+ return ret;
+}
+
+static int
+scpi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
{
- return scpi_ops->get_transition_latency(cpu_dev);
+ int cpu, domain, tdomain;
+ struct device *tcpu_dev;
+
+ domain = scpi_ops->device_domain_id(cpu_dev);
+ if (domain < 0)
+ return domain;
+
+ for_each_possible_cpu(cpu) {
+ if (cpu == cpu_dev->id)
+ continue;
+
+ tcpu_dev = get_cpu_device(cpu);
+ if (!tcpu_dev)
+ continue;
+
+ tdomain = scpi_ops->device_domain_id(tcpu_dev);
+ if (tdomain == domain)
+ cpumask_set_cpu(cpu, cpumask);
+ }
+
+ return 0;
}
-static int scpi_init_opp_table(const struct cpumask *cpumask)
+static int scpi_cpufreq_init(struct cpufreq_policy *policy)
{
int ret;
- struct device *cpu_dev = get_cpu_device(cpumask_first(cpumask));
+ unsigned int latency;
+ struct device *cpu_dev;
+ struct scpi_data *priv;
+ struct cpufreq_frequency_table *freq_table;
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("failed to get cpu%d device\n", policy->cpu);
+ return -ENODEV;
+ }
ret = scpi_ops->add_opps_to_device(cpu_dev);
if (ret) {
@@ -46,32 +108,133 @@ static int scpi_init_opp_table(const struct cpumask *cpumask)
return ret;
}
- ret = dev_pm_opp_set_sharing_cpus(cpu_dev, cpumask);
- if (ret)
+ ret = scpi_get_sharing_cpus(cpu_dev, policy->cpus);
+ if (ret) {
+ dev_warn(cpu_dev, "failed to get sharing cpumask\n");
+ return ret;
+ }
+
+ ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
+ if (ret) {
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
__func__, ret);
+ return ret;
+ }
+
+ ret = dev_pm_opp_get_opp_count(cpu_dev);
+ if (ret <= 0) {
+ dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
+ ret = -EPROBE_DEFER;
+ goto out_free_opp;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out_free_opp;
+ }
+
+ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
+ if (ret) {
+ dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
+ goto out_free_priv;
+ }
+
+ priv->cpu_dev = cpu_dev;
+ priv->clk = clk_get(cpu_dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d\n",
+ __func__, cpu_dev->id);
+ goto out_free_cpufreq_table;
+ }
+
+ policy->driver_data = priv;
+
+ ret = cpufreq_table_validate_and_show(policy, freq_table);
+ if (ret) {
+ dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
+ ret);
+ goto out_put_clk;
+ }
+
+ /* scpi allows DVFS request for any domain from any CPU */
+ policy->dvfs_possible_from_any_cpu = true;
+
+ latency = scpi_ops->get_transition_latency(cpu_dev);
+ if (!latency)
+ latency = CPUFREQ_ETERNAL;
+
+ policy->cpuinfo.transition_latency = latency;
+
+ policy->fast_switch_possible = false;
+ return 0;
+
+out_put_clk:
+ clk_put(priv->clk);
+out_free_cpufreq_table:
+ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+out_free_priv:
+ kfree(priv);
+out_free_opp:
+ dev_pm_opp_cpumask_remove_table(policy->cpus);
+
return ret;
}
-static const struct cpufreq_arm_bL_ops scpi_cpufreq_ops = {
- .name = "scpi",
- .get_transition_latency = scpi_get_transition_latency,
- .init_opp_table = scpi_init_opp_table,
- .free_opp_table = dev_pm_opp_cpumask_remove_table,
+static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ struct scpi_data *priv = policy->driver_data;
+
+ cpufreq_cooling_unregister(priv->cdev);
+ clk_put(priv->clk);
+ dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
+ kfree(priv);
+ dev_pm_opp_cpumask_remove_table(policy->related_cpus);
+
+ return 0;
+}
+
+static void scpi_cpufreq_ready(struct cpufreq_policy *policy)
+{
+ struct scpi_data *priv = policy->driver_data;
+ struct thermal_cooling_device *cdev;
+
+ cdev = of_cpufreq_cooling_register(policy);
+ if (!IS_ERR(cdev))
+ priv->cdev = cdev;
+}
+
+static struct cpufreq_driver scpi_cpufreq_driver = {
+ .name = "scpi-cpufreq",
+ .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
+ CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .attr = cpufreq_generic_attr,
+ .get = scpi_cpufreq_get_rate,
+ .init = scpi_cpufreq_init,
+ .exit = scpi_cpufreq_exit,
+ .ready = scpi_cpufreq_ready,
+ .target_index = scpi_cpufreq_set_target,
};
static int scpi_cpufreq_probe(struct platform_device *pdev)
{
+ int ret;
+
scpi_ops = get_scpi_ops();
if (!scpi_ops)
return -EIO;
- return bL_cpufreq_register(&scpi_cpufreq_ops);
+ ret = cpufreq_register_driver(&scpi_cpufreq_driver);
+ if (ret)
+ dev_err(&pdev->dev, "%s: registering cpufreq failed, err: %d\n",
+ __func__, ret);
+ return ret;
}
static int scpi_cpufreq_remove(struct platform_device *pdev)
{
- bL_cpufreq_unregister(&scpi_cpufreq_ops);
+ cpufreq_unregister_driver(&scpi_cpufreq_driver);
scpi_ops = NULL;
return 0;
}
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 923317f03b4b..a099b7bf74cd 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -17,6 +17,7 @@
#include <linux/cpu.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -50,6 +51,7 @@ struct ti_cpufreq_soc_data {
unsigned long efuse_mask;
unsigned long efuse_shift;
unsigned long rev_offset;
+ bool multi_regulator;
};
struct ti_cpufreq_data {
@@ -57,6 +59,7 @@ struct ti_cpufreq_data {
struct device_node *opp_node;
struct regmap *syscon;
const struct ti_cpufreq_soc_data *soc_data;
+ struct opp_table *opp_table;
};
static unsigned long amx3_efuse_xlate(struct ti_cpufreq_data *opp_data,
@@ -95,6 +98,7 @@ static struct ti_cpufreq_soc_data am3x_soc_data = {
.efuse_offset = 0x07fc,
.efuse_mask = 0x1fff,
.rev_offset = 0x600,
+ .multi_regulator = false,
};
static struct ti_cpufreq_soc_data am4x_soc_data = {
@@ -103,6 +107,7 @@ static struct ti_cpufreq_soc_data am4x_soc_data = {
.efuse_offset = 0x0610,
.efuse_mask = 0x3f,
.rev_offset = 0x600,
+ .multi_regulator = false,
};
static struct ti_cpufreq_soc_data dra7_soc_data = {
@@ -111,6 +116,7 @@ static struct ti_cpufreq_soc_data dra7_soc_data = {
.efuse_mask = 0xf80000,
.efuse_shift = 19,
.rev_offset = 0x204,
+ .multi_regulator = true,
};
/**
@@ -195,12 +201,14 @@ static const struct of_device_id ti_cpufreq_of_match[] = {
{},
};
-static int ti_cpufreq_init(void)
+static int ti_cpufreq_probe(struct platform_device *pdev)
{
u32 version[VERSION_COUNT];
struct device_node *np;
const struct of_device_id *match;
+ struct opp_table *ti_opp_table;
struct ti_cpufreq_data *opp_data;
+ const char * const reg_names[] = {"vdd", "vbb"};
int ret;
np = of_find_node_by_path("/");
@@ -247,16 +255,29 @@ static int ti_cpufreq_init(void)
if (ret)
goto fail_put_node;
- ret = PTR_ERR_OR_ZERO(dev_pm_opp_set_supported_hw(opp_data->cpu_dev,
- version, VERSION_COUNT));
- if (ret) {
+ ti_opp_table = dev_pm_opp_set_supported_hw(opp_data->cpu_dev,
+ version, VERSION_COUNT);
+ if (IS_ERR(ti_opp_table)) {
dev_err(opp_data->cpu_dev,
"Failed to set supported hardware\n");
+ ret = PTR_ERR(ti_opp_table);
goto fail_put_node;
}
- of_node_put(opp_data->opp_node);
+ opp_data->opp_table = ti_opp_table;
+
+ if (opp_data->soc_data->multi_regulator) {
+ ti_opp_table = dev_pm_opp_set_regulators(opp_data->cpu_dev,
+ reg_names,
+ ARRAY_SIZE(reg_names));
+ if (IS_ERR(ti_opp_table)) {
+ dev_pm_opp_put_supported_hw(opp_data->opp_table);
+ ret = PTR_ERR(ti_opp_table);
+ goto fail_put_node;
+ }
+ }
+ of_node_put(opp_data->opp_node);
register_cpufreq_dt:
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
@@ -269,4 +290,22 @@ free_opp_data:
return ret;
}
-device_initcall(ti_cpufreq_init);
+
+static int ti_cpufreq_init(void)
+{
+ platform_device_register_simple("ti-cpufreq", -1, NULL, 0);
+ return 0;
+}
+module_init(ti_cpufreq_init);
+
+static struct platform_driver ti_cpufreq_driver = {
+ .probe = ti_cpufreq_probe,
+ .driver = {
+ .name = "ti-cpufreq",
+ },
+};
+module_platform_driver(ti_cpufreq_driver);
+
+MODULE_DESCRIPTION("TI CPUFreq/OPP hw-supported driver");
+MODULE_AUTHOR("Dave Gerlach <d-gerlach@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index 4e78263e34a4..5d359aff3cc5 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -36,14 +36,15 @@ static struct cpuidle_governor * __cpuidle_find_governor(const char *str)
/**
* cpuidle_switch_governor - changes the governor
* @gov: the new target governor
- *
- * NOTE: "gov" can be NULL to specify disabled
* Must be called with cpuidle_lock acquired.
*/
int cpuidle_switch_governor(struct cpuidle_governor *gov)
{
struct cpuidle_device *dev;
+ if (!gov)
+ return -EINVAL;
+
if (gov == cpuidle_curr_governor)
return 0;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 47ec920d5b71..4b741b83e23f 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -723,7 +723,6 @@ config CRYPTO_DEV_ARTPEC6
select CRYPTO_HASH
select CRYPTO_SHA1
select CRYPTO_SHA256
- select CRYPTO_SHA384
select CRYPTO_SHA512
help
Enables the driver for the on-chip crypto accelerator
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index eeaf27859d80..ea83d0bff0e9 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -256,10 +256,6 @@ static inline bool crypto4xx_aead_need_fallback(struct aead_request *req,
if (is_ccm && !(req->iv[0] == 1 || req->iv[0] == 3))
return true;
- /* CCM - fix CBC MAC mismatch in special case */
- if (is_ccm && decrypt && !req->assoclen)
- return true;
-
return false;
}
@@ -330,7 +326,7 @@ int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher, const u8 *key,
sa = (struct dynamic_sa_ctl *) ctx->sa_in;
sa->sa_contents.w = SA_AES_CCM_CONTENTS | (keylen << 2);
- set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
+ set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
SA_CIPHER_ALG_AES,
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index c44954e274bc..76f459ad2821 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -128,7 +128,14 @@ static void crypto4xx_hw_init(struct crypto4xx_device *dev)
writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
- writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
+ if (dev->is_revb) {
+ writel(PPC4XX_INT_TIMEOUT_CNT_REVB << 10,
+ dev->ce_base + CRYPTO4XX_INT_TIMEOUT_CNT);
+ writel(PPC4XX_PD_DONE_INT | PPC4XX_TMO_ERR_INT,
+ dev->ce_base + CRYPTO4XX_INT_EN);
+ } else {
+ writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
+ }
}
int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
@@ -275,14 +282,12 @@ static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
*/
static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
{
- dev->gdr = dma_alloc_coherent(dev->core_dev->device,
- sizeof(struct ce_gd) * PPC4XX_NUM_GD,
- &dev->gdr_pa, GFP_ATOMIC);
+ dev->gdr = dma_zalloc_coherent(dev->core_dev->device,
+ sizeof(struct ce_gd) * PPC4XX_NUM_GD,
+ &dev->gdr_pa, GFP_ATOMIC);
if (!dev->gdr)
return -ENOMEM;
- memset(dev->gdr, 0, sizeof(struct ce_gd) * PPC4XX_NUM_GD);
-
return 0;
}
@@ -570,15 +575,14 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev,
struct pd_uinfo *pd_uinfo,
struct ce_pd *pd)
{
- struct aead_request *aead_req;
- struct crypto4xx_ctx *ctx;
+ struct aead_request *aead_req = container_of(pd_uinfo->async_req,
+ struct aead_request, base);
struct scatterlist *dst = pd_uinfo->dest_va;
+ size_t cp_len = crypto_aead_authsize(
+ crypto_aead_reqtfm(aead_req));
+ u32 icv[cp_len];
int err = 0;
- aead_req = container_of(pd_uinfo->async_req, struct aead_request,
- base);
- ctx = crypto_tfm_ctx(aead_req->base.tfm);
-
if (pd_uinfo->using_sd) {
crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
pd->pd_ctl_len.bf.pkt_len,
@@ -590,38 +594,39 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev,
if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) {
/* append icv at the end */
- size_t cp_len = crypto_aead_authsize(
- crypto_aead_reqtfm(aead_req));
- u32 icv[cp_len];
-
crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest,
cp_len);
scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen,
cp_len, 1);
+ } else {
+ /* check icv at the end */
+ scatterwalk_map_and_copy(icv, aead_req->src,
+ aead_req->assoclen + aead_req->cryptlen -
+ cp_len, cp_len, 0);
+
+ crypto4xx_memcpy_from_le32(icv, icv, cp_len);
+
+ if (crypto_memneq(icv, pd_uinfo->sr_va->save_digest, cp_len))
+ err = -EBADMSG;
}
crypto4xx_ret_sg_desc(dev, pd_uinfo);
if (pd->pd_ctl.bf.status & 0xff) {
- if (pd->pd_ctl.bf.status & 0x1) {
- /* authentication error */
- err = -EBADMSG;
- } else {
- if (!__ratelimit(&dev->aead_ratelimit)) {
- if (pd->pd_ctl.bf.status & 2)
- pr_err("pad fail error\n");
- if (pd->pd_ctl.bf.status & 4)
- pr_err("seqnum fail\n");
- if (pd->pd_ctl.bf.status & 8)
- pr_err("error _notify\n");
- pr_err("aead return err status = 0x%02x\n",
- pd->pd_ctl.bf.status & 0xff);
- pr_err("pd pad_ctl = 0x%08x\n",
- pd->pd_ctl.bf.pd_pad_ctl);
- }
- err = -EINVAL;
+ if (!__ratelimit(&dev->aead_ratelimit)) {
+ if (pd->pd_ctl.bf.status & 2)
+ pr_err("pad fail error\n");
+ if (pd->pd_ctl.bf.status & 4)
+ pr_err("seqnum fail\n");
+ if (pd->pd_ctl.bf.status & 8)
+ pr_err("error _notify\n");
+ pr_err("aead return err status = 0x%02x\n",
+ pd->pd_ctl.bf.status & 0xff);
+ pr_err("pd pad_ctl = 0x%08x\n",
+ pd->pd_ctl.bf.pd_pad_ctl);
}
+ err = -EINVAL;
}
if (pd_uinfo->state & PD_ENTRY_BUSY)
@@ -1070,21 +1075,29 @@ static void crypto4xx_bh_tasklet_cb(unsigned long data)
/**
* Top Half of isr.
*/
-static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
+static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data,
+ u32 clr_val)
{
struct device *dev = (struct device *)data;
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
- if (!core_dev->dev->ce_base)
- return 0;
-
- writel(PPC4XX_INTERRUPT_CLR,
- core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
+ writel(clr_val, core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
tasklet_schedule(&core_dev->tasklet);
return IRQ_HANDLED;
}
+static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
+{
+ return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR);
+}
+
+static irqreturn_t crypto4xx_ce_interrupt_handler_revb(int irq, void *data)
+{
+ return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR |
+ PPC4XX_TMO_ERR_INT);
+}
+
/**
* Supported Crypto Algorithms
*/
@@ -1266,6 +1279,8 @@ static int crypto4xx_probe(struct platform_device *ofdev)
struct resource res;
struct device *dev = &ofdev->dev;
struct crypto4xx_core_device *core_dev;
+ u32 pvr;
+ bool is_revb = true;
rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
if (rc)
@@ -1282,6 +1297,7 @@ static int crypto4xx_probe(struct platform_device *ofdev)
mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
mtdcri(SDR0, PPC405EX_SDR0_SRST,
mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
+ is_revb = false;
} else if (of_find_compatible_node(NULL, NULL,
"amcc,ppc460sx-crypto")) {
mtdcri(SDR0, PPC460SX_SDR0_SRST,
@@ -1304,7 +1320,22 @@ static int crypto4xx_probe(struct platform_device *ofdev)
if (!core_dev->dev)
goto err_alloc_dev;
+ /*
+ * Older version of 460EX/GT have a hardware bug.
+ * Hence they do not support H/W based security intr coalescing
+ */
+ pvr = mfspr(SPRN_PVR);
+ if (is_revb && ((pvr >> 4) == 0x130218A)) {
+ u32 min = PVR_MIN(pvr);
+
+ if (min < 4) {
+ dev_info(dev, "RevA detected - disable interrupt coalescing\n");
+ is_revb = false;
+ }
+ }
+
core_dev->dev->core_dev = core_dev;
+ core_dev->dev->is_revb = is_revb;
core_dev->device = dev;
spin_lock_init(&core_dev->lock);
INIT_LIST_HEAD(&core_dev->dev->alg_list);
@@ -1325,13 +1356,6 @@ static int crypto4xx_probe(struct platform_device *ofdev)
tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
(unsigned long) dev);
- /* Register for Crypto isr, Crypto Engine IRQ */
- core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
- rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
- core_dev->dev->name, dev);
- if (rc)
- goto err_request_irq;
-
core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
if (!core_dev->dev->ce_base) {
dev_err(dev, "failed to of_iomap\n");
@@ -1339,6 +1363,15 @@ static int crypto4xx_probe(struct platform_device *ofdev)
goto err_iomap;
}
+ /* Register for Crypto isr, Crypto Engine IRQ */
+ core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+ rc = request_irq(core_dev->irq, is_revb ?
+ crypto4xx_ce_interrupt_handler_revb :
+ crypto4xx_ce_interrupt_handler, 0,
+ KBUILD_MODNAME, dev);
+ if (rc)
+ goto err_request_irq;
+
/* need to setup pdr, rdr, gdr and sdr before this */
crypto4xx_hw_init(core_dev->dev);
@@ -1352,11 +1385,11 @@ static int crypto4xx_probe(struct platform_device *ofdev)
return 0;
err_start_dev:
- iounmap(core_dev->dev->ce_base);
-err_iomap:
free_irq(core_dev->irq, dev);
err_request_irq:
irq_dispose_mapping(core_dev->irq);
+ iounmap(core_dev->dev->ce_base);
+err_iomap:
tasklet_kill(&core_dev->tasklet);
err_build_sdr:
crypto4xx_destroy_sdr(core_dev->dev);
@@ -1397,7 +1430,7 @@ MODULE_DEVICE_TABLE(of, crypto4xx_match);
static struct platform_driver crypto4xx_driver = {
.driver = {
- .name = MODULE_NAME,
+ .name = KBUILD_MODNAME,
.of_match_table = crypto4xx_match,
},
.probe = crypto4xx_probe,
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index 8ac3bd37203b..23b726da6534 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -28,8 +28,6 @@
#include "crypto4xx_reg_def.h"
#include "crypto4xx_sa.h"
-#define MODULE_NAME "crypto4xx"
-
#define PPC460SX_SDR0_SRST 0x201
#define PPC405EX_SDR0_SRST 0x200
#define PPC460EX_SDR0_SRST 0x201
@@ -82,7 +80,6 @@ struct pd_uinfo {
struct crypto4xx_device {
struct crypto4xx_core_device *core_dev;
- char *name;
void __iomem *ce_base;
void __iomem *trng_base;
@@ -109,6 +106,7 @@ struct crypto4xx_device {
struct list_head alg_list; /* List of algorithm supported
by this device */
struct ratelimit_state aead_ratelimit;
+ bool is_revb;
};
struct crypto4xx_core_device {
diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h
index 0a22ec5d1a96..472331787e04 100644
--- a/drivers/crypto/amcc/crypto4xx_reg_def.h
+++ b/drivers/crypto/amcc/crypto4xx_reg_def.h
@@ -121,13 +121,15 @@
#define PPC4XX_PD_SIZE 6
#define PPC4XX_CTX_DONE_INT 0x2000
#define PPC4XX_PD_DONE_INT 0x8000
+#define PPC4XX_TMO_ERR_INT 0x40000
#define PPC4XX_BYTE_ORDER 0x22222
#define PPC4XX_INTERRUPT_CLR 0x3ffff
#define PPC4XX_PRNG_CTRL_AUTO_EN 0x3
#define PPC4XX_DC_3DES_EN 1
#define PPC4XX_TRNG_EN 0x00020000
-#define PPC4XX_INT_DESCR_CNT 4
+#define PPC4XX_INT_DESCR_CNT 7
#define PPC4XX_INT_TIMEOUT_CNT 0
+#define PPC4XX_INT_TIMEOUT_CNT_REVB 0x3FF
#define PPC4XX_INT_CFG 1
/**
* all follow define are ad hoc
diff --git a/drivers/crypto/amcc/crypto4xx_trng.c b/drivers/crypto/amcc/crypto4xx_trng.c
index 677ca17fd223..5e63742b0d22 100644
--- a/drivers/crypto/amcc/crypto4xx_trng.c
+++ b/drivers/crypto/amcc/crypto4xx_trng.c
@@ -92,7 +92,7 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev)
if (!rng)
goto err_out;
- rng->name = MODULE_NAME;
+ rng->name = KBUILD_MODNAME;
rng->data_present = ppc4xx_trng_data_present;
rng->data_read = ppc4xx_trng_data_read;
rng->priv = (unsigned long) dev;
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 456278440863..0fb8bbf41a8d 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <crypto/aes.h>
+#include <crypto/gcm.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
@@ -1934,7 +1935,7 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq)
memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher));
// The HW omits the initial increment of the counter field.
- crypto_inc(req_ctx->hw_ctx.J0+12, 4);
+ memcpy(req_ctx->hw_ctx.J0 + GCM_AES_IV_SIZE, "\x00\x00\x00\x01", 4);
ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx,
sizeof(struct artpec6_crypto_aead_hw_ctx), false, false);
@@ -2956,7 +2957,7 @@ static struct aead_alg aead_algos[] = {
.setkey = artpec6_crypto_aead_set_key,
.encrypt = artpec6_crypto_aead_encrypt,
.decrypt = artpec6_crypto_aead_decrypt,
- .ivsize = AES_BLOCK_SIZE,
+ .ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.base = {
@@ -3041,9 +3042,6 @@ static int artpec6_crypto_probe(struct platform_device *pdev)
variant = (enum artpec6_crypto_variant)match->data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index ce70b44d0fb6..2b75f95bbe1b 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -42,7 +42,6 @@
#include <crypto/authenc.h>
#include <crypto/skcipher.h>
#include <crypto/hash.h>
-#include <crypto/aes.h>
#include <crypto/sha3.h>
#include "util.h"
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index a118b9bed669..bfbf8bf77f03 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -494,7 +494,8 @@ static struct ahash_alg algs = {
.cra_driver_name = DRIVER_NAME,
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
- CRYPTO_ALG_ASYNC,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct bfin_crypto_crc_ctx),
.cra_alignmask = 3,
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index baa8dd52472d..2188235be02d 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -108,6 +108,7 @@ struct caam_ctx {
dma_addr_t sh_desc_dec_dma;
dma_addr_t sh_desc_givenc_dma;
dma_addr_t key_dma;
+ enum dma_data_direction dir;
struct device *jrdev;
struct alginfo adata;
struct alginfo cdata;
@@ -118,6 +119,7 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
ctx->adata.keylen_pad;
@@ -136,9 +138,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
/* aead_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
+ cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
@@ -154,9 +157,10 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
/* aead_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
- cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
+ cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -168,6 +172,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 ctx1_iv_off = 0;
u32 *desc, *nonce = NULL;
u32 inl_mask;
@@ -234,9 +239,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc;
cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
- false);
+ false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
skip_enc:
/*
@@ -266,9 +271,9 @@ skip_enc:
desc = ctx->sh_desc_dec;
cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, alg->caam.geniv, is_rfc3686,
- nonce, ctx1_iv_off, false);
+ nonce, ctx1_iv_off, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
if (!alg->caam.geniv)
goto skip_givenc;
@@ -300,9 +305,9 @@ skip_enc:
desc = ctx->sh_desc_enc;
cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, is_rfc3686, nonce,
- ctx1_iv_off, false);
+ ctx1_iv_off, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
skip_givenc:
return 0;
@@ -346,7 +351,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc;
cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
@@ -363,7 +368,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_dec;
cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -405,7 +410,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc;
cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
@@ -422,7 +427,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_dec;
cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -465,7 +470,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_enc;
cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
@@ -482,7 +487,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
desc = ctx->sh_desc_dec;
cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -503,6 +508,7 @@ static int aead_setkey(struct crypto_aead *aead,
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
struct crypto_authenc_keys keys;
int ret = 0;
@@ -517,6 +523,27 @@ static int aead_setkey(struct crypto_aead *aead,
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
+ /*
+ * If DKP is supported, use it in the shared descriptor to generate
+ * the split key.
+ */
+ if (ctrlpriv->era >= 6) {
+ ctx->adata.keylen = keys.authkeylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
+
+ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+ goto badkey;
+
+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
+ keys.enckeylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma,
+ ctx->adata.keylen_pad +
+ keys.enckeylen, ctx->dir);
+ goto skip_split_key;
+ }
+
ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
keys.authkeylen, CAAM_MAX_KEY_SIZE -
keys.enckeylen);
@@ -527,12 +554,14 @@ static int aead_setkey(struct crypto_aead *aead,
/* postpend encryption key to auth split key */
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
- keys.enckeylen, DMA_TO_DEVICE);
+ keys.enckeylen, ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
ctx->adata.keylen_pad + keys.enckeylen, 1);
#endif
+
+skip_split_key:
ctx->cdata.keylen = keys.enckeylen;
return aead_set_sh_desc(aead);
badkey:
@@ -552,7 +581,7 @@ static int gcm_setkey(struct crypto_aead *aead,
#endif
memcpy(ctx->key, key, keylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
ctx->cdata.keylen = keylen;
return gcm_set_sh_desc(aead);
@@ -580,7 +609,7 @@ static int rfc4106_setkey(struct crypto_aead *aead,
*/
ctx->cdata.keylen = keylen - 4;
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
- DMA_TO_DEVICE);
+ ctx->dir);
return rfc4106_set_sh_desc(aead);
}
@@ -606,7 +635,7 @@ static int rfc4543_setkey(struct crypto_aead *aead,
*/
ctx->cdata.keylen = keylen - 4;
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
- DMA_TO_DEVICE);
+ ctx->dir);
return rfc4543_set_sh_desc(aead);
}
@@ -625,7 +654,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
const bool is_rfc3686 = (ctr_mode &&
(strstr(alg_name, "rfc3686") != NULL));
- memcpy(ctx->key, key, keylen);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -648,9 +676,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
}
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
ctx->cdata.keylen = keylen;
- ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
/* ablkcipher_encrypt shared descriptor */
@@ -658,21 +685,21 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/* ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/* ablkcipher_givencrypt shared descriptor */
desc = ctx->sh_desc_givenc;
cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -691,23 +718,21 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
return -EINVAL;
}
- memcpy(ctx->key, key, keylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
ctx->cdata.keylen = keylen;
- ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
/* xts_ablkcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
/* xts_ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
return 0;
}
@@ -979,9 +1004,6 @@ static void init_aead_job(struct aead_request *req,
append_seq_out_ptr(desc, dst_dma,
req->assoclen + req->cryptlen - authsize,
out_options);
-
- /* REG3 = assoclen */
- append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
}
static void init_gcm_job(struct aead_request *req,
@@ -996,6 +1018,7 @@ static void init_gcm_job(struct aead_request *req,
unsigned int last;
init_aead_job(req, edesc, all_contig, encrypt);
+ append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
/* BUG This should not be specific to generic GCM. */
last = 0;
@@ -1022,6 +1045,7 @@ static void init_authenc_job(struct aead_request *req,
struct caam_aead_alg, aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
@@ -1045,6 +1069,15 @@ static void init_authenc_job(struct aead_request *req,
init_aead_job(req, edesc, all_contig, encrypt);
+ /*
+ * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
+ * having DPOVRD as destination.
+ */
+ if (ctrlpriv->era < 3)
+ append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
+ else
+ append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
+
if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
append_load_as_imm(desc, req->iv, ivsize,
LDST_CLASS_1_CCB |
@@ -3228,9 +3261,11 @@ struct caam_crypto_alg {
struct caam_alg_entry caam;
};
-static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
+static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
+ bool uses_dkp)
{
dma_addr_t dma_addr;
+ struct caam_drv_private *priv;
ctx->jrdev = caam_jr_alloc();
if (IS_ERR(ctx->jrdev)) {
@@ -3238,10 +3273,16 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
return PTR_ERR(ctx->jrdev);
}
+ priv = dev_get_drvdata(ctx->jrdev->parent);
+ if (priv->era >= 6 && uses_dkp)
+ ctx->dir = DMA_BIDIRECTIONAL;
+ else
+ ctx->dir = DMA_TO_DEVICE;
+
dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
offsetof(struct caam_ctx,
sh_desc_enc_dma),
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->jrdev, dma_addr)) {
dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
caam_jr_free(ctx->jrdev);
@@ -3269,7 +3310,7 @@ static int caam_cra_init(struct crypto_tfm *tfm)
container_of(alg, struct caam_crypto_alg, crypto_alg);
struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam);
+ return caam_init_common(ctx, &caam_alg->caam, false);
}
static int caam_aead_init(struct crypto_aead *tfm)
@@ -3279,14 +3320,15 @@ static int caam_aead_init(struct crypto_aead *tfm)
container_of(alg, struct caam_aead_alg, aead);
struct caam_ctx *ctx = crypto_aead_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam);
+ return caam_init_common(ctx, &caam_alg->caam,
+ alg->setkey == aead_setkey);
}
static void caam_exit_common(struct caam_ctx *ctx)
{
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
offsetof(struct caam_ctx, sh_desc_enc_dma),
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev);
}
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index 530c14ee32de..ceb93fbb76e6 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -45,16 +45,16 @@ static inline void append_dec_op1(u32 *desc, u32 type)
* cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
* (non-protocol) with no (null) encryption.
* @desc: pointer to buffer used for descriptor construction
- * @adata: pointer to authentication transform definitions. Note that since a
- * split key is to be used, the size of the split key itself is
- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case. Valid algorithm values - one of
+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ * with OP_ALG_AAI_HMAC_PRECOMP.
* @icvsize: integrity check value (ICV) size (truncated or full)
- *
- * Note: Requires an MDHA split key.
+ * @era: SEC Era
*/
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
- unsigned int icvsize)
+ unsigned int icvsize, int era)
{
u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
@@ -63,13 +63,18 @@ void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
/* Skip if already shared */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
- if (adata->key_inline)
- append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
- adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
- KEY_ENC);
- else
- append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ if (era < 6) {
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt,
+ adata->keylen_pad, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT |
+ KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ } else {
+ append_proto_dkp(desc, adata);
+ }
set_jump_tgt_here(desc, key_jump_cmd);
/* assoclen + cryptlen = seqinlen */
@@ -121,16 +126,16 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
* cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
* (non-protocol) with no (null) decryption.
* @desc: pointer to buffer used for descriptor construction
- * @adata: pointer to authentication transform definitions. Note that since a
- * split key is to be used, the size of the split key itself is
- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case. Valid algorithm values - one of
+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ * with OP_ALG_AAI_HMAC_PRECOMP.
* @icvsize: integrity check value (ICV) size (truncated or full)
- *
- * Note: Requires an MDHA split key.
+ * @era: SEC Era
*/
void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
- unsigned int icvsize)
+ unsigned int icvsize, int era)
{
u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
@@ -139,13 +144,18 @@ void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
/* Skip if already shared */
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
- if (adata->key_inline)
- append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
- adata->keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- else
- append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ if (era < 6) {
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt,
+ adata->keylen_pad, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT |
+ KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ } else {
+ append_proto_dkp(desc, adata);
+ }
set_jump_tgt_here(desc, key_jump_cmd);
/* Class 2 operation */
@@ -204,7 +214,7 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
static void init_sh_desc_key_aead(u32 * const desc,
struct alginfo * const cdata,
struct alginfo * const adata,
- const bool is_rfc3686, u32 *nonce)
+ const bool is_rfc3686, u32 *nonce, int era)
{
u32 *key_jump_cmd;
unsigned int enckeylen = cdata->keylen;
@@ -224,13 +234,18 @@ static void init_sh_desc_key_aead(u32 * const desc,
if (is_rfc3686)
enckeylen -= CTR_RFC3686_NONCE_SIZE;
- if (adata->key_inline)
- append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
- adata->keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- else
- append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ if (era < 6) {
+ if (adata->key_inline)
+ append_key_as_imm(desc, adata->key_virt,
+ adata->keylen_pad, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT |
+ KEY_ENC);
+ else
+ append_key(desc, adata->key_dma, adata->keylen,
+ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ } else {
+ append_proto_dkp(desc, adata);
+ }
if (cdata->key_inline)
append_key_as_imm(desc, cdata->key_virt, enckeylen,
@@ -261,26 +276,27 @@ static void init_sh_desc_key_aead(u32 * const desc,
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
* with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
- * @adata: pointer to authentication transform definitions. Note that since a
- * split key is to be used, the size of the split key itself is
- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case. Valid algorithm values - one of
+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ * with OP_ALG_AAI_HMAC_PRECOMP.
* @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @nonce: pointer to rfc3686 nonce
* @ctx1_iv_off: IV offset in CONTEXT1 register
* @is_qi: true when called from caam/qi
- *
- * Note: Requires an MDHA split key.
+ * @era: SEC Era
*/
void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686,
- u32 *nonce, const u32 ctx1_iv_off, const bool is_qi)
+ u32 *nonce, const u32 ctx1_iv_off, const bool is_qi,
+ int era)
{
/* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
/* Class 2 operation */
append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
@@ -306,8 +322,13 @@ void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
}
/* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (is_qi || era < 3) {
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ } else {
+ append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+ }
/* Skip assoc data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -350,27 +371,27 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
* with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
- * @adata: pointer to authentication transform definitions. Note that since a
- * split key is to be used, the size of the split key itself is
- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case. Valid algorithm values - one of
+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ * with OP_ALG_AAI_HMAC_PRECOMP.
* @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @nonce: pointer to rfc3686 nonce
* @ctx1_iv_off: IV offset in CONTEXT1 register
* @is_qi: true when called from caam/qi
- *
- * Note: Requires an MDHA split key.
+ * @era: SEC Era
*/
void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool geniv,
const bool is_rfc3686, u32 *nonce,
- const u32 ctx1_iv_off, const bool is_qi)
+ const u32 ctx1_iv_off, const bool is_qi, int era)
{
/* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
/* Class 2 operation */
append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
@@ -397,11 +418,23 @@ void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
}
/* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- if (geniv)
- append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
- else
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (is_qi || era < 3) {
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (geniv)
+ append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM,
+ ivsize);
+ else
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3,
+ CAAM_CMD_SZ);
+ } else {
+ append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+ if (geniv)
+ append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM,
+ ivsize);
+ else
+ append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD,
+ CAAM_CMD_SZ);
+ }
/* Skip assoc data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -456,29 +489,29 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
* with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
- * @adata: pointer to authentication transform definitions. Note that since a
- * split key is to be used, the size of the split key itself is
- * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
- * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case. Valid algorithm values - one of
+ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
+ * with OP_ALG_AAI_HMAC_PRECOMP.
* @ivsize: initialization vector size
* @icvsize: integrity check value (ICV) size (truncated or full)
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @nonce: pointer to rfc3686 nonce
* @ctx1_iv_off: IV offset in CONTEXT1 register
* @is_qi: true when called from caam/qi
- *
- * Note: Requires an MDHA split key.
+ * @era: SEC Era
*/
void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686,
u32 *nonce, const u32 ctx1_iv_off,
- const bool is_qi)
+ const bool is_qi, int era)
{
u32 geniv, moveiv;
/* Note: Context registers are saved. */
- init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
if (is_qi) {
u32 *wait_load_cmd;
@@ -528,8 +561,13 @@ copy_iv:
OP_ALG_ENCRYPT);
/* Read and write assoclen bytes */
- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (is_qi || era < 3) {
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ } else {
+ append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
+ }
/* Skip assoc data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -1075,7 +1113,7 @@ void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
- u8 *nonce = cdata->key_virt + cdata->keylen;
+ const u8 *nonce = cdata->key_virt + cdata->keylen;
append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
LDST_CLASS_IND_CCB |
@@ -1140,7 +1178,7 @@ void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
- u8 *nonce = cdata->key_virt + cdata->keylen;
+ const u8 *nonce = cdata->key_virt + cdata->keylen;
append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
LDST_CLASS_IND_CCB |
@@ -1209,7 +1247,7 @@ void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
/* Load Nonce into CONTEXT1 reg */
if (is_rfc3686) {
- u8 *nonce = cdata->key_virt + cdata->keylen;
+ const u8 *nonce = cdata->key_virt + cdata->keylen;
append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
LDST_CLASS_IND_CCB |
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index e412ec8f7005..5f9445ae2114 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -43,28 +43,28 @@
15 * CAAM_CMD_SZ)
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
- unsigned int icvsize);
+ unsigned int icvsize, int era);
void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
- unsigned int icvsize);
+ unsigned int icvsize, int era);
void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686,
u32 *nonce, const u32 ctx1_iv_off,
- const bool is_qi);
+ const bool is_qi, int era);
void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool geniv,
const bool is_rfc3686, u32 *nonce,
- const u32 ctx1_iv_off, const bool is_qi);
+ const u32 ctx1_iv_off, const bool is_qi, int era);
void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
struct alginfo *adata, unsigned int ivsize,
unsigned int icvsize, const bool is_rfc3686,
u32 *nonce, const u32 ctx1_iv_off,
- const bool is_qi);
+ const bool is_qi, int era);
void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
unsigned int icvsize);
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index f9f08fce4356..4aecc9435f69 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -53,6 +53,7 @@ struct caam_ctx {
u32 sh_desc_givenc[DESC_MAX_USED_LEN];
u8 key[CAAM_MAX_KEY_SIZE];
dma_addr_t key_dma;
+ enum dma_data_direction dir;
struct alginfo adata;
struct alginfo cdata;
unsigned int authsize;
@@ -74,6 +75,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
if (!ctx->cdata.keylen || !ctx->authsize)
return 0;
@@ -124,7 +126,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
ivsize, ctx->authsize, is_rfc3686, nonce,
- ctx1_iv_off, true);
+ ctx1_iv_off, true, ctrlpriv->era);
skip_enc:
/* aead_decrypt shared descriptor */
@@ -149,7 +151,8 @@ skip_enc:
cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
ivsize, ctx->authsize, alg->caam.geniv,
- is_rfc3686, nonce, ctx1_iv_off, true);
+ is_rfc3686, nonce, ctx1_iv_off, true,
+ ctrlpriv->era);
if (!alg->caam.geniv)
goto skip_givenc;
@@ -176,7 +179,7 @@ skip_enc:
cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
ivsize, ctx->authsize, is_rfc3686, nonce,
- ctx1_iv_off, true);
+ ctx1_iv_off, true, ctrlpriv->era);
skip_givenc:
return 0;
@@ -197,6 +200,7 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
struct crypto_authenc_keys keys;
int ret = 0;
@@ -211,6 +215,27 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
+ /*
+ * If DKP is supported, use it in the shared descriptor to generate
+ * the split key.
+ */
+ if (ctrlpriv->era >= 6) {
+ ctx->adata.keylen = keys.authkeylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
+
+ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+ goto badkey;
+
+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
+ keys.enckeylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma,
+ ctx->adata.keylen_pad +
+ keys.enckeylen, ctx->dir);
+ goto skip_split_key;
+ }
+
ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
keys.authkeylen, CAAM_MAX_KEY_SIZE -
keys.enckeylen);
@@ -220,13 +245,14 @@ static int aead_setkey(struct crypto_aead *aead, const u8 *key,
/* postpend encryption key to auth split key */
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
- keys.enckeylen, DMA_TO_DEVICE);
+ keys.enckeylen, ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
ctx->adata.keylen_pad + keys.enckeylen, 1);
#endif
+skip_split_key:
ctx->cdata.keylen = keys.enckeylen;
ret = aead_set_sh_desc(aead);
@@ -272,7 +298,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
int ret = 0;
- memcpy(ctx->key, key, keylen);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -295,9 +320,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
keylen -= CTR_RFC3686_NONCE_SIZE;
}
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
ctx->cdata.keylen = keylen;
- ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
/* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
@@ -356,10 +380,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
return -EINVAL;
}
- memcpy(ctx->key, key, keylen);
- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
ctx->cdata.keylen = keylen;
- ctx->cdata.key_virt = ctx->key;
+ ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
/* xts ablkcipher encrypt, decrypt shared descriptors */
@@ -668,7 +690,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
(mapped_dst_nents > 1 ? mapped_dst_nents : 0);
if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
- dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+ dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
iv_dma, ivsize, op_type, 0, 0);
@@ -905,7 +927,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
- dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+ dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
iv_dma, ivsize, op_type, 0, 0);
@@ -1058,7 +1080,7 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
}
if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
- dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
+ dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
iv_dma, ivsize, GIVENCRYPT, 0, 0);
@@ -2123,7 +2145,8 @@ struct caam_crypto_alg {
struct caam_alg_entry caam;
};
-static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
+static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
+ bool uses_dkp)
{
struct caam_drv_private *priv;
@@ -2137,8 +2160,14 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
return PTR_ERR(ctx->jrdev);
}
+ priv = dev_get_drvdata(ctx->jrdev->parent);
+ if (priv->era >= 6 && uses_dkp)
+ ctx->dir = DMA_BIDIRECTIONAL;
+ else
+ ctx->dir = DMA_TO_DEVICE;
+
ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
- DMA_TO_DEVICE);
+ ctx->dir);
if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
dev_err(ctx->jrdev, "unable to map key\n");
caam_jr_free(ctx->jrdev);
@@ -2149,7 +2178,6 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
- priv = dev_get_drvdata(ctx->jrdev->parent);
ctx->qidev = priv->qidev;
spin_lock_init(&ctx->lock);
@@ -2167,7 +2195,7 @@ static int caam_cra_init(struct crypto_tfm *tfm)
crypto_alg);
struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam);
+ return caam_init_common(ctx, &caam_alg->caam, false);
}
static int caam_aead_init(struct crypto_aead *tfm)
@@ -2177,7 +2205,8 @@ static int caam_aead_init(struct crypto_aead *tfm)
aead);
struct caam_ctx *ctx = crypto_aead_ctx(tfm);
- return caam_init_common(ctx, &caam_alg->caam);
+ return caam_init_common(ctx, &caam_alg->caam,
+ alg->setkey == aead_setkey);
}
static void caam_exit_common(struct caam_ctx *ctx)
@@ -2186,8 +2215,7 @@ static void caam_exit_common(struct caam_ctx *ctx)
caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
- dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
- DMA_TO_DEVICE);
+ dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
caam_jr_free(ctx->jrdev);
}
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 616720a04e7a..0beb28196e20 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -107,6 +107,7 @@ struct caam_hash_ctx {
dma_addr_t sh_desc_update_first_dma;
dma_addr_t sh_desc_fin_dma;
dma_addr_t sh_desc_digest_dma;
+ enum dma_data_direction dir;
struct device *jrdev;
u8 key[CAAM_MAX_HASH_KEY_SIZE];
int ctx_len;
@@ -241,7 +242,8 @@ static inline int ctx_map_to_sec4_sg(struct device *jrdev,
* read and write to seqout
*/
static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
- struct caam_hash_ctx *ctx, bool import_ctx)
+ struct caam_hash_ctx *ctx, bool import_ctx,
+ int era)
{
u32 op = ctx->adata.algtype;
u32 *skip_key_load;
@@ -254,9 +256,12 @@ static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
JUMP_COND_SHRD);
- append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
- ctx->adata.keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ if (era < 6)
+ append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
+ ctx->adata.keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_proto_dkp(desc, &ctx->adata);
set_jump_tgt_here(desc, skip_key_load);
@@ -289,13 +294,17 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
+ ctx->adata.key_virt = ctx->key;
+
/* ahash_update shared descriptor */
desc = ctx->sh_desc_update;
- ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
+ ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash update shdesc@"__stringify(__LINE__)": ",
@@ -304,9 +313,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_update_first shared descriptor */
desc = ctx->sh_desc_update_first;
- ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
+ ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash update first shdesc@"__stringify(__LINE__)": ",
@@ -315,9 +325,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_final shared descriptor */
desc = ctx->sh_desc_fin;
- ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
+ ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
@@ -326,9 +337,10 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_digest shared descriptor */
desc = ctx->sh_desc_digest;
- ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
+ ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false,
+ ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
- desc_bytes(desc), DMA_TO_DEVICE);
+ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash digest shdesc@"__stringify(__LINE__)": ",
@@ -421,6 +433,7 @@ static int ahash_setkey(struct crypto_ahash *ahash,
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
int digestsize = crypto_ahash_digestsize(ahash);
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
int ret;
u8 *hashed_key = NULL;
@@ -441,16 +454,26 @@ static int ahash_setkey(struct crypto_ahash *ahash,
key = hashed_key;
}
- ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
- CAAM_MAX_HASH_KEY_SIZE);
- if (ret)
- goto bad_free_key;
+ /*
+ * If DKP is supported, use it in the shared descriptor to generate
+ * the split key.
+ */
+ if (ctrlpriv->era >= 6) {
+ ctx->adata.key_inline = true;
+ ctx->adata.keylen = keylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->adata.keylen_pad, 1);
-#endif
+ if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
+ goto bad_free_key;
+
+ memcpy(ctx->key, key, keylen);
+ } else {
+ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
+ keylen, CAAM_MAX_HASH_KEY_SIZE);
+ if (ret)
+ goto bad_free_key;
+ }
kfree(hashed_key);
return ahash_set_sh_desc(ahash);
@@ -1715,6 +1738,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
HASH_MSG_LEN + 64,
HASH_MSG_LEN + SHA512_DIGEST_SIZE };
dma_addr_t dma_addr;
+ struct caam_drv_private *priv;
/*
* Get a Job ring from Job Ring driver to ensure in-order
@@ -1726,10 +1750,13 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
return PTR_ERR(ctx->jrdev);
}
+ priv = dev_get_drvdata(ctx->jrdev->parent);
+ ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
offsetof(struct caam_hash_ctx,
sh_desc_update_dma),
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(ctx->jrdev, dma_addr)) {
dev_err(ctx->jrdev, "unable to map shared descriptors\n");
caam_jr_free(ctx->jrdev);
@@ -1764,7 +1791,7 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
offsetof(struct caam_hash_ctx,
sh_desc_update_dma),
- DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev);
}
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 027e121c6f70..75d280cb2dc0 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -611,6 +611,8 @@ static int caam_probe(struct platform_device *pdev)
goto iounmap_ctrl;
}
+ ctrlpriv->era = caam_get_era();
+
ret = of_platform_populate(nprop, caam_match, NULL, dev);
if (ret) {
dev_err(dev, "JR platform devices creation error\n");
@@ -742,7 +744,7 @@ static int caam_probe(struct platform_device *pdev)
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
- caam_get_era());
+ ctrlpriv->era);
dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
ctrlpriv->total_jobrs, ctrlpriv->qi_present,
caam_dpaa2 ? "yes" : "no");
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 8142de7ba050..f76ff160a02c 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -444,6 +444,18 @@
#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
#define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT)
#define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_MD5 (0x60 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA1 (0x61 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA224 (0x62 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA256 (0x63 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA384 (0x64 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_RIF_SHA512 (0x65 << OP_PCLID_SHIFT)
/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
@@ -1093,6 +1105,22 @@
/* MacSec protinfos */
#define OP_PCL_MACSEC 0x0001
+/* Derived Key Protocol (DKP) Protinfo */
+#define OP_PCL_DKP_SRC_SHIFT 14
+#define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_DST_SHIFT 12
+#define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_KEY_SHIFT 0
+#define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT)
+
/* PKI unidirectional protocol protinfo bits */
#define OP_PCL_PKPROT_TEST 0x0008
#define OP_PCL_PKPROT_DECRYPT 0x0004
@@ -1452,6 +1480,7 @@
#define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
#define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
#define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
+#define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT)
#define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
#define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
#define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index ba1ca0806f0a..d4256fa4a1d6 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -109,7 +109,7 @@ static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr,
append_ptr(desc, ptr);
}
-static inline void append_data(u32 * const desc, void *data, int len)
+static inline void append_data(u32 * const desc, const void *data, int len)
{
u32 *offset = desc_end(desc);
@@ -172,7 +172,7 @@ static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr,
append_cmd(desc, len);
}
-static inline void append_cmd_data(u32 * const desc, void *data, int len,
+static inline void append_cmd_data(u32 * const desc, const void *data, int len,
u32 command)
{
append_cmd(desc, command | IMMEDIATE | len);
@@ -271,7 +271,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
APPEND_SEQ_PTR_INTLEN(out, OUT)
#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
+static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
unsigned int len, u32 options) \
{ \
PRINT_POS; \
@@ -312,7 +312,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32)
* from length of immediate data provided, e.g., split keys
*/
#define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
+static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
unsigned int data_len, \
unsigned int len, u32 options) \
{ \
@@ -452,7 +452,7 @@ struct alginfo {
unsigned int keylen_pad;
union {
dma_addr_t key_dma;
- void *key_virt;
+ const void *key_virt;
};
bool key_inline;
};
@@ -496,4 +496,45 @@ static inline int desc_inline_query(unsigned int sd_base_len,
return (rem_bytes >= 0) ? 0 : -1;
}
+/**
+ * append_proto_dkp - Derived Key Protocol (DKP): key -> split key
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions.
+ * keylen should be the length of initial key, while keylen_pad
+ * the length of the derived (split) key.
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
+ * SHA256, SHA384, SHA512}.
+ */
+static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata)
+{
+ u32 protid;
+
+ /*
+ * Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*}
+ * to OP_PCLID_DKP_{MD5, SHA*}
+ */
+ protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) |
+ (0x20 << OP_ALG_ALGSEL_SHIFT);
+
+ if (adata->key_inline) {
+ int words;
+
+ append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
+ OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM |
+ adata->keylen);
+ append_data(desc, adata->key_virt, adata->keylen);
+
+ /* Reserve space in descriptor buffer for the derived key */
+ words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
+ ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ;
+ if (words)
+ (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words);
+ } else {
+ append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
+ OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR |
+ adata->keylen);
+ append_ptr(desc, adata->key_dma);
+ }
+}
+
#endif /* DESC_CONSTR_H */
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 91f1107276e5..7696a774a362 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -84,6 +84,7 @@ struct caam_drv_private {
u8 qi_present; /* Nonzero if QI present in device */
int secvio_irq; /* Security violation interrupt number */
int virt_en; /* Virtualization enabled in CAAM */
+ int era; /* CAAM Era (internal HW revision) */
#define RNG4_MAX_HANDLES 2
/* RNG4 block */
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index 8c79c3a153dc..312b5f042f31 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -11,36 +11,6 @@
#include "desc_constr.h"
#include "key_gen.h"
-/**
- * split_key_len - Compute MDHA split key length for a given algorithm
- * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
- * SHA224, SHA384, SHA512.
- *
- * Return: MDHA split key length
- */
-static inline u32 split_key_len(u32 hash)
-{
- /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
- static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
- u32 idx;
-
- idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
-
- return (u32)(mdpadlen[idx] * 2);
-}
-
-/**
- * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
- * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
- * SHA224, SHA384, SHA512.
- *
- * Return: MDHA split key pad length
- */
-static inline u32 split_key_pad_len(u32 hash)
-{
- return ALIGN(split_key_len(hash), 16);
-}
-
void split_key_done(struct device *dev, u32 *desc, u32 err,
void *context)
{
diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h
index 5db055c25bd2..818f78f6fc1a 100644
--- a/drivers/crypto/caam/key_gen.h
+++ b/drivers/crypto/caam/key_gen.h
@@ -6,6 +6,36 @@
*
*/
+/**
+ * split_key_len - Compute MDHA split key length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ * SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key length
+ */
+static inline u32 split_key_len(u32 hash)
+{
+ /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+ static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+ u32 idx;
+
+ idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
+
+ return (u32)(mdpadlen[idx] * 2);
+}
+
+/**
+ * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ * SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key pad length
+ */
+static inline u32 split_key_pad_len(u32 hash)
+{
+ return ALIGN(split_key_len(hash), 16);
+}
+
struct split_key_result {
struct completion completion;
int err;
diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
index 169e66231bcf..b0ba4331944b 100644
--- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
+++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
@@ -459,7 +459,8 @@ int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req)
info->completion_addr = kzalloc(sizeof(union cpt_res_s), GFP_KERNEL);
if (unlikely(!info->completion_addr)) {
dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto request_cleanup;
}
result = (union cpt_res_s *)info->completion_addr;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index 4addc238a6ef..deaefd532aaa 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -6,7 +6,6 @@
#include "nitrox_dev.h"
#include "nitrox_req.h"
#include "nitrox_csr.h"
-#include "nitrox_req.h"
/* SLC_STORE_INFO */
#define MIN_UDD_LEN 16
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
index ff02b713c6f6..ca1f0d780b61 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -21,7 +21,6 @@
#include <crypto/ctr.h>
#include <crypto/gcm.h>
#include <crypto/scatterwalk.h>
-#include <linux/delay.h>
#include "ccp-crypto.h"
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index b56b3f711d94..5ae9f8706f17 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -19,3 +19,13 @@ config CRYPTO_DEV_CHELSIO
To compile this driver as a module, choose M here: the module
will be called chcr.
+
+config CHELSIO_IPSEC_INLINE
+ bool "Chelsio IPSec XFRM Tx crypto offload"
+ depends on CHELSIO_T4
+ depends on CRYPTO_DEV_CHELSIO
+ depends on XFRM_OFFLOAD
+ depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
+ default n
+ ---help---
+ Enable support for IPSec Tx Inline.
diff --git a/drivers/crypto/chelsio/Makefile b/drivers/crypto/chelsio/Makefile
index bebdf06687ad..eaecaf1ebcf3 100644
--- a/drivers/crypto/chelsio/Makefile
+++ b/drivers/crypto/chelsio/Makefile
@@ -2,3 +2,4 @@ ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chcr.o
chcr-objs := chcr_core.o chcr_algo.o
+chcr-$(CONFIG_CHELSIO_IPSEC_INLINE) += chcr_ipsec.o
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 4eed7171e2ae..34a02d690548 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -73,6 +73,29 @@
#define IV AES_BLOCK_SIZE
+static unsigned int sgl_ent_len[] = {
+ 0, 0, 16, 24, 40, 48, 64, 72, 88,
+ 96, 112, 120, 136, 144, 160, 168, 184,
+ 192, 208, 216, 232, 240, 256, 264, 280,
+ 288, 304, 312, 328, 336, 352, 360, 376
+};
+
+static unsigned int dsgl_ent_len[] = {
+ 0, 32, 32, 48, 48, 64, 64, 80, 80,
+ 112, 112, 128, 128, 144, 144, 160, 160,
+ 192, 192, 208, 208, 224, 224, 240, 240,
+ 272, 272, 288, 288, 304, 304, 320, 320
+};
+
+static u32 round_constant[11] = {
+ 0x01000000, 0x02000000, 0x04000000, 0x08000000,
+ 0x10000000, 0x20000000, 0x40000000, 0x80000000,
+ 0x1B000000, 0x36000000, 0x6C000000
+};
+
+static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
+ unsigned char *input, int err);
+
static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
{
return ctx->crypto_ctx->aeadctx;
@@ -108,18 +131,6 @@ static inline int is_ofld_imm(const struct sk_buff *skb)
return (skb->len <= SGE_MAX_WR_LEN);
}
-/*
- * sgl_len - calculates the size of an SGL of the given capacity
- * @n: the number of SGL entries
- * Calculates the number of flits needed for a scatter/gather list that
- * can hold the given number of entries.
- */
-static inline unsigned int sgl_len(unsigned int n)
-{
- n--;
- return (3 * n) / 2 + (n & 1) + 2;
-}
-
static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
unsigned int entlen,
unsigned int skip)
@@ -160,7 +171,6 @@ static inline void chcr_handle_ahash_resp(struct ahash_request *req,
if (input == NULL)
goto out;
- reqctx = ahash_request_ctx(req);
digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
if (reqctx->is_sg_map)
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
@@ -183,30 +193,17 @@ static inline void chcr_handle_ahash_resp(struct ahash_request *req,
}
out:
req->base.complete(&req->base, err);
+}
- }
-
-static inline void chcr_handle_aead_resp(struct aead_request *req,
- unsigned char *input,
- int err)
+static inline int get_aead_subtype(struct crypto_aead *aead)
{
- struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
-
-
- chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
- if (reqctx->b0_dma)
- dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma,
- reqctx->b0_len, DMA_BIDIRECTIONAL);
- if (reqctx->verify == VERIFY_SW) {
- chcr_verify_tag(req, input, &err);
- reqctx->verify = VERIFY_HW;
+ struct aead_alg *alg = crypto_aead_alg(aead);
+ struct chcr_alg_template *chcr_crypto_alg =
+ container_of(alg, struct chcr_alg_template, alg.aead);
+ return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
}
- req->base.complete(&req->base, err);
-}
-static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
+void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
{
u8 temp[SHA512_DIGEST_SIZE];
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -231,6 +228,25 @@ static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
*err = 0;
}
+static inline void chcr_handle_aead_resp(struct aead_request *req,
+ unsigned char *input,
+ int err)
+{
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
+
+ chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
+ if (reqctx->b0_dma)
+ dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma,
+ reqctx->b0_len, DMA_BIDIRECTIONAL);
+ if (reqctx->verify == VERIFY_SW) {
+ chcr_verify_tag(req, input, &err);
+ reqctx->verify = VERIFY_HW;
+ }
+ req->base.complete(&req->base, err);
+}
+
/*
* chcr_handle_resp - Unmap the DMA buffers associated with the request
* @req: crypto request
@@ -558,7 +574,8 @@ static void ulptx_walk_add_sg(struct ulptx_walk *walk,
skip = 0;
}
}
- if (walk->nents == 0) {
+ WARN(!sg, "SG should not be null here\n");
+ if (sg && (walk->nents == 0)) {
small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
walk->sgl->len0 = cpu_to_be32(sgmin);
@@ -595,14 +612,6 @@ static void ulptx_walk_add_sg(struct ulptx_walk *walk,
}
}
-static inline int get_aead_subtype(struct crypto_aead *aead)
-{
- struct aead_alg *alg = crypto_aead_alg(aead);
- struct chcr_alg_template *chcr_crypto_alg =
- container_of(alg, struct chcr_alg_template, alg.aead);
- return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
-}
-
static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
{
struct crypto_alg *alg = tfm->__crt_alg;
@@ -675,7 +684,7 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
if (srclen <= dstlen)
break;
less = min_t(unsigned int, sg_dma_len(dst) - offset -
- dstskip, CHCR_DST_SG_SIZE);
+ dstskip, CHCR_DST_SG_SIZE);
dstlen += less;
offset += less;
if (offset == sg_dma_len(dst)) {
@@ -686,7 +695,7 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
dstskip = 0;
}
src = sg_next(src);
- srcskip = 0;
+ srcskip = 0;
}
return min(srclen, dstlen);
}
@@ -1008,7 +1017,8 @@ static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
return bytes;
}
-static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv)
+static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
+ u32 isfinal)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
@@ -1035,7 +1045,8 @@ static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv)
for (i = 0; i < (round % 8); i++)
gf128mul_x_ble((le128 *)iv, (le128 *)iv);
- crypto_cipher_decrypt_one(cipher, iv, iv);
+ if (!isfinal)
+ crypto_cipher_decrypt_one(cipher, iv, iv);
out:
return ret;
}
@@ -1056,7 +1067,7 @@ static int chcr_update_cipher_iv(struct ablkcipher_request *req,
CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
AES_BLOCK_SIZE) + 1);
else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
- ret = chcr_update_tweak(req, iv);
+ ret = chcr_update_tweak(req, iv, 0);
else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
if (reqctx->op)
sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
@@ -1087,7 +1098,7 @@ static int chcr_final_cipher_iv(struct ablkcipher_request *req,
ctr_add_iv(iv, req->info, (reqctx->processed /
AES_BLOCK_SIZE));
else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
- ret = chcr_update_tweak(req, iv);
+ ret = chcr_update_tweak(req, iv, 1);
else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
if (reqctx->op)
sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
@@ -1101,7 +1112,6 @@ static int chcr_final_cipher_iv(struct ablkcipher_request *req,
}
-
static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
unsigned char *input, int err)
{
@@ -1135,10 +1145,10 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1,
SPACE_LEFT(ablkctx->enckey_len),
reqctx->src_ofst, reqctx->dst_ofst);
- if ((bytes + reqctx->processed) >= req->nbytes)
- bytes = req->nbytes - reqctx->processed;
- else
- bytes = ROUND_16(bytes);
+ if ((bytes + reqctx->processed) >= req->nbytes)
+ bytes = req->nbytes - reqctx->processed;
+ else
+ bytes = ROUND_16(bytes);
} else {
/*CTR mode counter overfloa*/
bytes = req->nbytes - reqctx->processed;
@@ -1239,15 +1249,15 @@ static int process_cipher(struct ablkcipher_request *req,
MIN_CIPHER_SG,
SPACE_LEFT(ablkctx->enckey_len),
0, 0);
- if ((bytes + reqctx->processed) >= req->nbytes)
- bytes = req->nbytes - reqctx->processed;
- else
- bytes = ROUND_16(bytes);
+ if ((bytes + reqctx->processed) >= req->nbytes)
+ bytes = req->nbytes - reqctx->processed;
+ else
+ bytes = ROUND_16(bytes);
} else {
bytes = req->nbytes;
}
if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
- CRYPTO_ALG_SUB_TYPE_CTR) {
+ CRYPTO_ALG_SUB_TYPE_CTR) {
bytes = adjust_ctr_overflow(req->info, bytes);
}
if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
@@ -2014,11 +2024,8 @@ static int chcr_aead_common_init(struct aead_request *req,
struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
int error = -EINVAL;
- unsigned int dst_size;
unsigned int authsize = crypto_aead_authsize(tfm);
- dst_size = req->assoclen + req->cryptlen + (op_type ?
- -authsize : authsize);
/* validate key size */
if (aeadctx->enckey_len == 0)
goto err;
@@ -2083,7 +2090,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
struct cpl_rx_phys_dsgl *phys_cpl;
struct ulptx_sgl *ulptx;
unsigned int transhdr_len;
- unsigned int dst_size = 0, temp;
+ unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
unsigned int kctx_len = 0, dnents;
unsigned int assoclen = req->assoclen;
unsigned int authsize = crypto_aead_authsize(tfm);
@@ -2097,24 +2104,19 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
return NULL;
reqctx->b0_dma = 0;
- if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
null = 1;
assoclen = 0;
}
- dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
- authsize);
error = chcr_aead_common_init(req, op_type);
if (error)
return ERR_PTR(error);
- if (dst_size) {
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
- dnents += sg_nents_xlen(req->dst, req->cryptlen +
- (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE,
- req->assoclen);
- dnents += MIN_AUTH_SG; // For IV
- } else {
- dnents = 0;
- }
+ dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+ dnents += sg_nents_xlen(req->dst, req->cryptlen +
+ (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE,
+ req->assoclen);
+ dnents += MIN_AUTH_SG; // For IV
dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
@@ -2162,16 +2164,23 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
temp & 0xF,
null ? 0 : assoclen + IV + 1,
temp, temp);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
+ temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
+ else
+ temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
(op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
- CHCR_SCMD_CIPHER_MODE_AES_CBC,
+ temp,
actx->auth_mode, aeadctx->hmac_ctrl,
IV >> 1);
chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
0, 0, dst_size);
chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
- if (op_type == CHCR_ENCRYPT_OP)
+ if (op_type == CHCR_ENCRYPT_OP ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
memcpy(chcr_req->key_ctx.key, aeadctx->key,
aeadctx->enckey_len);
else
@@ -2181,7 +2190,16 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
4), actx->h_iopad, kctx_len -
(DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
- memcpy(reqctx->iv, req->iv, IV);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
+ memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
+ memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
+ CTR_RFC3686_IV_SIZE);
+ *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
+ CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
+ } else {
+ memcpy(reqctx->iv, req->iv, IV);
+ }
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
@@ -2202,9 +2220,9 @@ err:
return ERR_PTR(error);
}
-static int chcr_aead_dma_map(struct device *dev,
- struct aead_request *req,
- unsigned short op_type)
+int chcr_aead_dma_map(struct device *dev,
+ struct aead_request *req,
+ unsigned short op_type)
{
int error;
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
@@ -2246,9 +2264,9 @@ err:
return -ENOMEM;
}
-static void chcr_aead_dma_unmap(struct device *dev,
- struct aead_request *req,
- unsigned short op_type)
+void chcr_aead_dma_unmap(struct device *dev,
+ struct aead_request *req,
+ unsigned short op_type)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -2273,10 +2291,10 @@ static void chcr_aead_dma_unmap(struct device *dev,
}
}
-static inline void chcr_add_aead_src_ent(struct aead_request *req,
- struct ulptx_sgl *ulptx,
- unsigned int assoclen,
- unsigned short op_type)
+void chcr_add_aead_src_ent(struct aead_request *req,
+ struct ulptx_sgl *ulptx,
+ unsigned int assoclen,
+ unsigned short op_type)
{
struct ulptx_walk ulp_walk;
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
@@ -2308,11 +2326,11 @@ static inline void chcr_add_aead_src_ent(struct aead_request *req,
}
}
-static inline void chcr_add_aead_dst_ent(struct aead_request *req,
- struct cpl_rx_phys_dsgl *phys_cpl,
- unsigned int assoclen,
- unsigned short op_type,
- unsigned short qid)
+void chcr_add_aead_dst_ent(struct aead_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ unsigned int assoclen,
+ unsigned short op_type,
+ unsigned short qid)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -2330,9 +2348,9 @@ static inline void chcr_add_aead_dst_ent(struct aead_request *req,
dsgl_walk_end(&dsgl_walk, qid);
}
-static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
- struct ulptx_sgl *ulptx,
- struct cipher_wr_param *wrparam)
+void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
+ struct ulptx_sgl *ulptx,
+ struct cipher_wr_param *wrparam)
{
struct ulptx_walk ulp_walk;
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
@@ -2355,10 +2373,10 @@ static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
}
}
-static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
- struct cpl_rx_phys_dsgl *phys_cpl,
- struct cipher_wr_param *wrparam,
- unsigned short qid)
+void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ struct cipher_wr_param *wrparam,
+ unsigned short qid)
{
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
struct dsgl_walk dsgl_walk;
@@ -2373,9 +2391,9 @@ static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
dsgl_walk_end(&dsgl_walk, qid);
}
-static inline void chcr_add_hash_src_ent(struct ahash_request *req,
- struct ulptx_sgl *ulptx,
- struct hash_wr_param *param)
+void chcr_add_hash_src_ent(struct ahash_request *req,
+ struct ulptx_sgl *ulptx,
+ struct hash_wr_param *param)
{
struct ulptx_walk ulp_walk;
struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
@@ -2395,16 +2413,13 @@ static inline void chcr_add_hash_src_ent(struct ahash_request *req,
ulptx_walk_add_page(&ulp_walk, param->bfr_len,
&reqctx->dma_addr);
ulptx_walk_add_sg(&ulp_walk, req->src, param->sg_len,
- 0);
-// reqctx->srcsg = ulp_walk.last_sg;
-// reqctx->src_ofst = ulp_walk.last_sg_len;
- ulptx_walk_end(&ulp_walk);
+ 0);
+ ulptx_walk_end(&ulp_walk);
}
}
-
-static inline int chcr_hash_dma_map(struct device *dev,
- struct ahash_request *req)
+int chcr_hash_dma_map(struct device *dev,
+ struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
int error = 0;
@@ -2414,13 +2429,13 @@ static inline int chcr_hash_dma_map(struct device *dev,
error = dma_map_sg(dev, req->src, sg_nents(req->src),
DMA_TO_DEVICE);
if (!error)
- return error;
+ return -ENOMEM;
req_ctx->is_sg_map = 1;
return 0;
}
-static inline void chcr_hash_dma_unmap(struct device *dev,
- struct ahash_request *req)
+void chcr_hash_dma_unmap(struct device *dev,
+ struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
@@ -2433,9 +2448,8 @@ static inline void chcr_hash_dma_unmap(struct device *dev,
}
-
-static int chcr_cipher_dma_map(struct device *dev,
- struct ablkcipher_request *req)
+int chcr_cipher_dma_map(struct device *dev,
+ struct ablkcipher_request *req)
{
int error;
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
@@ -2469,8 +2483,9 @@ err:
dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
return -ENOMEM;
}
-static void chcr_cipher_dma_unmap(struct device *dev,
- struct ablkcipher_request *req)
+
+void chcr_cipher_dma_unmap(struct device *dev,
+ struct ablkcipher_request *req)
{
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
@@ -2666,8 +2681,6 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
sub_type = get_aead_subtype(tfm);
if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
assoclen -= 8;
- dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
- authsize);
error = chcr_aead_common_init(req, op_type);
if (error)
return ERR_PTR(error);
@@ -2677,15 +2690,11 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type);
if (error)
goto err;
- if (dst_size) {
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
- dnents += sg_nents_xlen(req->dst, req->cryptlen
- + (op_type ? -authsize : authsize),
- CHCR_DST_SG_SIZE, req->assoclen);
- dnents += MIN_CCM_SG; // For IV and B0
- } else {
- dnents = 0;
- }
+ dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+ dnents += sg_nents_xlen(req->dst, req->cryptlen
+ + (op_type ? -authsize : authsize),
+ CHCR_DST_SG_SIZE, req->assoclen);
+ dnents += MIN_CCM_SG; // For IV and B0
dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
@@ -2780,19 +2789,14 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
assoclen = req->assoclen - 8;
reqctx->b0_dma = 0;
- dst_size = assoclen + req->cryptlen + (op_type ? -authsize : authsize);
error = chcr_aead_common_init(req, op_type);
- if (error)
- return ERR_PTR(error);
- if (dst_size) {
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
- dnents += sg_nents_xlen(req->dst,
- req->cryptlen + (op_type ? -authsize : authsize),
+ if (error)
+ return ERR_PTR(error);
+ dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+ dnents += sg_nents_xlen(req->dst, req->cryptlen +
+ (op_type ? -authsize : authsize),
CHCR_DST_SG_SIZE, req->assoclen);
- dnents += MIN_GCM_SG; // For IV
- } else {
- dnents = 0;
- }
+ dnents += MIN_GCM_SG; // For IV
dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
AEAD_H_SIZE;
@@ -2829,10 +2833,10 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
assoclen ? 1 : 0, assoclen,
assoclen + IV + 1, 0);
- chcr_req->sec_cpl.cipherstop_lo_authinsert =
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
temp, temp);
- chcr_req->sec_cpl.seqno_numivs =
+ chcr_req->sec_cpl.seqno_numivs =
FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
CHCR_ENCRYPT_OP) ? 1 : 0,
CHCR_SCMD_CIPHER_MODE_AES_GCM,
@@ -3212,7 +3216,7 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
/* it contains auth and cipher key both*/
struct crypto_authenc_keys keys;
- unsigned int bs;
+ unsigned int bs, subtype;
unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
int err = 0, i, key_ctx_len = 0;
unsigned char ck_size = 0;
@@ -3241,6 +3245,15 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
pr_err("chcr : Unsupported digest size\n");
goto out;
}
+ subtype = get_aead_subtype(authenc);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
+ if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
+ goto out;
+ memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
+ - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
+ keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
+ }
if (keys.enckeylen == AES_KEYSIZE_128) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
} else if (keys.enckeylen == AES_KEYSIZE_192) {
@@ -3258,9 +3271,12 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
*/
memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
aeadctx->enckey_len = keys.enckeylen;
- get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
- aeadctx->enckey_len << 3);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
+ get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+ aeadctx->enckey_len << 3);
+ }
base_hash = chcr_alloc_shash(max_authsize);
if (IS_ERR(base_hash)) {
pr_err("chcr : Base driver cannot be loaded\n");
@@ -3333,6 +3349,7 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
struct crypto_authenc_keys keys;
int err;
/* it contains auth and cipher key both*/
+ unsigned int subtype;
int key_ctx_len = 0;
unsigned char ck_size = 0;
@@ -3350,6 +3367,15 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
goto out;
}
+ subtype = get_aead_subtype(authenc);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
+ if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
+ goto out;
+ memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
+ - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
+ keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
+ }
if (keys.enckeylen == AES_KEYSIZE_128) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
} else if (keys.enckeylen == AES_KEYSIZE_192) {
@@ -3357,13 +3383,16 @@ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
} else if (keys.enckeylen == AES_KEYSIZE_256) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
} else {
- pr_err("chcr : Unsupported cipher key\n");
+ pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
goto out;
}
memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
aeadctx->enckey_len = keys.enckeylen;
- get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
- aeadctx->enckey_len << 3);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
+ get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+ aeadctx->enckey_len << 3);
+ }
key_ctx_len = sizeof(struct _key_ctx)
+ ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4);
@@ -3375,6 +3404,40 @@ out:
aeadctx->enckey_len = 0;
return -EINVAL;
}
+
+static int chcr_aead_op(struct aead_request *req,
+ unsigned short op_type,
+ int size,
+ create_wr_t create_wr_fn)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct uld_ctx *u_ctx;
+ struct sk_buff *skb;
+
+ if (!a_ctx(tfm)->dev) {
+ pr_err("chcr : %s : No crypto device.\n", __func__);
+ return -ENXIO;
+ }
+ u_ctx = ULD_CTX(a_ctx(tfm));
+ if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ a_ctx(tfm)->tx_qidx)) {
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return -EBUSY;
+ }
+
+ /* Form a WR from req */
+ skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size,
+ op_type);
+
+ if (IS_ERR(skb) || !skb)
+ return PTR_ERR(skb);
+
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
+ chcr_send_wr(skb);
+ return -EINPROGRESS;
+}
+
static int chcr_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -3383,8 +3446,10 @@ static int chcr_aead_encrypt(struct aead_request *req)
reqctx->verify = VERIFY_HW;
switch (get_aead_subtype(tfm)) {
- case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
- case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
+ case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
+ case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
+ case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
+ case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
create_authenc_wr);
case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
@@ -3413,8 +3478,10 @@ static int chcr_aead_decrypt(struct aead_request *req)
}
switch (get_aead_subtype(tfm)) {
- case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
- case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
+ case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
+ case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
+ case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
+ case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
create_authenc_wr);
case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
@@ -3427,38 +3494,6 @@ static int chcr_aead_decrypt(struct aead_request *req)
}
}
-static int chcr_aead_op(struct aead_request *req,
- unsigned short op_type,
- int size,
- create_wr_t create_wr_fn)
-{
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct uld_ctx *u_ctx;
- struct sk_buff *skb;
-
- if (!a_ctx(tfm)->dev) {
- pr_err("chcr : %s : No crypto device.\n", __func__);
- return -ENXIO;
- }
- u_ctx = ULD_CTX(a_ctx(tfm));
- if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- a_ctx(tfm)->tx_qidx)) {
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -EBUSY;
- }
-
- /* Form a WR from req */
- skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size,
- op_type);
-
- if (IS_ERR(skb) || !skb)
- return PTR_ERR(skb);
-
- skb->dev = u_ctx->lldi.ports[0];
- set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
- chcr_send_wr(skb);
- return -EINPROGRESS;
-}
static struct chcr_alg_template driver_algs[] = {
/* AES-CBC */
{
@@ -3742,7 +3777,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3763,7 +3798,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3785,7 +3820,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3805,7 +3840,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3826,7 +3861,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3847,7 +3882,7 @@ static struct chcr_alg_template driver_algs[] = {
}
},
{
- .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_NULL,
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
.is_registered = 0,
.alg.aead = {
.base = {
@@ -3867,6 +3902,133 @@ static struct chcr_alg_template driver_algs[] = {
.setauthsize = chcr_authenc_null_setauthsize,
}
},
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+
+ .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ .setkey = chcr_authenc_setkey,
+ .setauthsize = chcr_authenc_setauthsize,
+ }
+ },
+ {
+ .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
+ .is_registered = 0,
+ .alg.aead = {
+ .base = {
+ .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
+ .cra_driver_name =
+ "authenc-digest_null-rfc3686-ctr-aes-chcr",
+ .cra_blocksize = 1,
+ .cra_priority = CHCR_AEAD_PRIORITY,
+ .cra_ctxsize = sizeof(struct chcr_context) +
+ sizeof(struct chcr_aead_ctx) +
+ sizeof(struct chcr_authenc_ctx),
+
+ },
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = 0,
+ .setkey = chcr_aead_digest_null_setkey,
+ .setauthsize = chcr_authenc_null_setauthsize,
+ }
+ },
+
};
/*
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index 96c9335ee728..d1673a5d4bf5 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -226,15 +226,6 @@
#define SPACE_LEFT(len) \
((SGE_MAX_WR_LEN - WR_MIN_LEN - (len)))
-unsigned int sgl_ent_len[] = {0, 0, 16, 24, 40, 48, 64, 72, 88,
- 96, 112, 120, 136, 144, 160, 168, 184,
- 192, 208, 216, 232, 240, 256, 264, 280,
- 288, 304, 312, 328, 336, 352, 360, 376};
-unsigned int dsgl_ent_len[] = {0, 32, 32, 48, 48, 64, 64, 80, 80,
- 112, 112, 128, 128, 144, 144, 160, 160,
- 192, 192, 208, 208, 224, 224, 240, 240,
- 272, 272, 288, 288, 304, 304, 320, 320};
-
struct algo_param {
unsigned int auth_mode;
unsigned int mk_size;
@@ -404,10 +395,4 @@ static inline u32 aes_ks_subword(const u32 w)
return *(u32 *)(&bytes[0]);
}
-static u32 round_constant[11] = {
- 0x01000000, 0x02000000, 0x04000000, 0x08000000,
- 0x10000000, 0x20000000, 0x40000000, 0x80000000,
- 0x1B000000, 0x36000000, 0x6C000000
-};
-
#endif /* __CHCR_ALGO_H__ */
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index f5a2624081dc..04f277cade7c 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -48,6 +48,9 @@ static struct cxgb4_uld_info chcr_uld_info = {
.add = chcr_uld_add,
.state_change = chcr_uld_state_change,
.rx_handler = chcr_uld_rx_handler,
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+ .tx_handler = chcr_uld_tx_handler,
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
};
struct uld_ctx *assign_chcr_device(void)
@@ -164,6 +167,10 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
goto out;
}
u_ctx->lldi = *lld;
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+ if (lld->crypto & ULP_CRYPTO_IPSEC_INLINE)
+ chcr_add_xfrmops(lld);
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
out:
return u_ctx;
}
@@ -187,6 +194,13 @@ int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
return 0;
}
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev)
+{
+ return chcr_ipsec_xmit(skb, dev);
+}
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+
static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
{
struct uld_ctx *u_ctx = handle;
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index 94e7412f6164..3c29ee09b8b5 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -39,6 +39,7 @@
#include <crypto/algapi.h>
#include "t4_hw.h"
#include "cxgb4.h"
+#include "t4_msg.h"
#include "cxgb4_uld.h"
#define DRV_MODULE_NAME "chcr"
@@ -89,12 +90,49 @@ struct uld_ctx {
struct chcr_dev *dev;
};
+struct chcr_ipsec_req {
+ struct ulp_txpkt ulptx;
+ struct ulptx_idata sc_imm;
+ struct cpl_tx_sec_pdu sec_cpl;
+ struct _key_ctx key_ctx;
+};
+
+struct chcr_ipsec_wr {
+ struct fw_ulptx_wr wreq;
+ struct chcr_ipsec_req req;
+};
+
+struct ipsec_sa_entry {
+ int hmac_ctrl;
+ unsigned int enckey_len;
+ unsigned int kctx_len;
+ unsigned int authsize;
+ __be32 key_ctx_hdr;
+ char salt[MAX_SALT];
+ char key[2 * AES_MAX_KEY_SIZE];
+};
+
+/*
+ * sgl_len - calculates the size of an SGL of the given capacity
+ * @n: the number of SGL entries
+ * Calculates the number of flits needed for a scatter/gather list that
+ * can hold the given number of entries.
+ */
+static inline unsigned int sgl_len(unsigned int n)
+{
+ n--;
+ return (3 * n) / 2 + (n & 1) + 2;
+}
+
struct uld_ctx *assign_chcr_device(void);
int chcr_send_wr(struct sk_buff *skb);
int start_crypto(void);
int stop_crypto(void);
int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
const struct pkt_gl *pgl);
+int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev);
int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
int err);
+int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
+void chcr_add_xfrmops(const struct cxgb4_lld_info *lld);
#endif /* __CHCR_CORE_H__ */
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 94a87e3ad9bc..7daf0a17a7d2 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -134,14 +134,16 @@
#define CRYPTO_ALG_SUB_TYPE_HASH_HMAC 0x01000000
#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 0x02000000
#define CRYPTO_ALG_SUB_TYPE_AEAD_GCM 0x03000000
-#define CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC 0x04000000
+#define CRYPTO_ALG_SUB_TYPE_CBC_SHA 0x04000000
#define CRYPTO_ALG_SUB_TYPE_AEAD_CCM 0x05000000
#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309 0x06000000
-#define CRYPTO_ALG_SUB_TYPE_AEAD_NULL 0x07000000
+#define CRYPTO_ALG_SUB_TYPE_CBC_NULL 0x07000000
#define CRYPTO_ALG_SUB_TYPE_CTR 0x08000000
#define CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 0x09000000
#define CRYPTO_ALG_SUB_TYPE_XTS 0x0a000000
#define CRYPTO_ALG_SUB_TYPE_CBC 0x0b000000
+#define CRYPTO_ALG_SUB_TYPE_CTR_SHA 0x0c000000
+#define CRYPTO_ALG_SUB_TYPE_CTR_NULL 0x0d000000
#define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\
CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
@@ -210,8 +212,6 @@ struct dsgl_walk {
struct phys_sge_pairs *to;
};
-
-
struct chcr_gcm_ctx {
u8 ghash_h[AEAD_H_SIZE];
};
@@ -227,21 +227,18 @@ struct __aead_ctx {
struct chcr_authenc_ctx authenc[0];
};
-
-
struct chcr_aead_ctx {
__be32 key_ctx_hdr;
unsigned int enckey_len;
struct crypto_aead *sw_cipher;
u8 salt[MAX_SALT];
u8 key[CHCR_AES_MAX_KEY_LEN];
+ u8 nonce[4];
u16 hmac_ctrl;
u16 mayverify;
struct __aead_ctx ctx[0];
};
-
-
struct hmac_ctx {
struct crypto_shash *base_hash;
u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
@@ -307,44 +304,29 @@ typedef struct sk_buff *(*create_wr_t)(struct aead_request *req,
int size,
unsigned short op_type);
-static int chcr_aead_op(struct aead_request *req_base,
- unsigned short op_type,
- int size,
- create_wr_t create_wr_fn);
-static inline int get_aead_subtype(struct crypto_aead *aead);
-static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
- unsigned char *input, int err);
-static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err);
-static int chcr_aead_dma_map(struct device *dev, struct aead_request *req,
- unsigned short op_type);
-static void chcr_aead_dma_unmap(struct device *dev, struct aead_request
- *req, unsigned short op_type);
-static inline void chcr_add_aead_dst_ent(struct aead_request *req,
- struct cpl_rx_phys_dsgl *phys_cpl,
- unsigned int assoclen,
- unsigned short op_type,
- unsigned short qid);
-static inline void chcr_add_aead_src_ent(struct aead_request *req,
- struct ulptx_sgl *ulptx,
- unsigned int assoclen,
- unsigned short op_type);
-static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
- struct ulptx_sgl *ulptx,
- struct cipher_wr_param *wrparam);
-static int chcr_cipher_dma_map(struct device *dev,
- struct ablkcipher_request *req);
-static void chcr_cipher_dma_unmap(struct device *dev,
- struct ablkcipher_request *req);
-static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
- struct cpl_rx_phys_dsgl *phys_cpl,
- struct cipher_wr_param *wrparam,
- unsigned short qid);
+void chcr_verify_tag(struct aead_request *req, u8 *input, int *err);
+int chcr_aead_dma_map(struct device *dev, struct aead_request *req,
+ unsigned short op_type);
+void chcr_aead_dma_unmap(struct device *dev, struct aead_request *req,
+ unsigned short op_type);
+void chcr_add_aead_dst_ent(struct aead_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ unsigned int assoclen, unsigned short op_type,
+ unsigned short qid);
+void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx,
+ unsigned int assoclen, unsigned short op_type);
+void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
+ struct ulptx_sgl *ulptx,
+ struct cipher_wr_param *wrparam);
+int chcr_cipher_dma_map(struct device *dev, struct ablkcipher_request *req);
+void chcr_cipher_dma_unmap(struct device *dev, struct ablkcipher_request *req);
+void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+ struct cipher_wr_param *wrparam,
+ unsigned short qid);
int sg_nents_len_skip(struct scatterlist *sg, u64 len, u64 skip);
-static inline void chcr_add_hash_src_ent(struct ahash_request *req,
- struct ulptx_sgl *ulptx,
- struct hash_wr_param *param);
-static inline int chcr_hash_dma_map(struct device *dev,
- struct ahash_request *req);
-static inline void chcr_hash_dma_unmap(struct device *dev,
- struct ahash_request *req);
+void chcr_add_hash_src_ent(struct ahash_request *req, struct ulptx_sgl *ulptx,
+ struct hash_wr_param *param);
+int chcr_hash_dma_map(struct device *dev, struct ahash_request *req);
+void chcr_hash_dma_unmap(struct device *dev, struct ahash_request *req);
#endif /* __CHCR_CRYPTO_H__ */
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
new file mode 100644
index 000000000000..db1e241104ed
--- /dev/null
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -0,0 +1,654 @@
+/*
+ * This file is part of the Chelsio T6 Crypto driver for Linux.
+ *
+ * Copyright (c) 2003-2017 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Written and Maintained by:
+ * Atul Gupta (atul.gupta@chelsio.com)
+ */
+
+#define pr_fmt(fmt) "chcr:" fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/highmem.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/netdevice.h>
+#include <net/esp.h>
+#include <net/xfrm.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <crypto/authenc.h>
+#include <crypto/internal/aead.h>
+#include <crypto/null.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aead.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/hash.h>
+
+#include "chcr_core.h"
+#include "chcr_algo.h"
+#include "chcr_crypto.h"
+
+/*
+ * Max Tx descriptor space we allow for an Ethernet packet to be inlined
+ * into a WR.
+ */
+#define MAX_IMM_TX_PKT_LEN 256
+#define GCM_ESP_IV_SIZE 8
+
+static int chcr_xfrm_add_state(struct xfrm_state *x);
+static void chcr_xfrm_del_state(struct xfrm_state *x);
+static void chcr_xfrm_free_state(struct xfrm_state *x);
+static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
+
+static const struct xfrmdev_ops chcr_xfrmdev_ops = {
+ .xdo_dev_state_add = chcr_xfrm_add_state,
+ .xdo_dev_state_delete = chcr_xfrm_del_state,
+ .xdo_dev_state_free = chcr_xfrm_free_state,
+ .xdo_dev_offload_ok = chcr_ipsec_offload_ok,
+};
+
+/* Add offload xfrms to Chelsio Interface */
+void chcr_add_xfrmops(const struct cxgb4_lld_info *lld)
+{
+ struct net_device *netdev = NULL;
+ int i;
+
+ for (i = 0; i < lld->nports; i++) {
+ netdev = lld->ports[i];
+ if (!netdev)
+ continue;
+ netdev->xfrmdev_ops = &chcr_xfrmdev_ops;
+ netdev->hw_enc_features |= NETIF_F_HW_ESP;
+ netdev->features |= NETIF_F_HW_ESP;
+ rtnl_lock();
+ netdev_change_features(netdev);
+ rtnl_unlock();
+ }
+}
+
+static inline int chcr_ipsec_setauthsize(struct xfrm_state *x,
+ struct ipsec_sa_entry *sa_entry)
+{
+ int hmac_ctrl;
+ int authsize = x->aead->alg_icv_len / 8;
+
+ sa_entry->authsize = authsize;
+
+ switch (authsize) {
+ case ICV_8:
+ hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+ break;
+ case ICV_12:
+ hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ break;
+ case ICV_16:
+ hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return hmac_ctrl;
+}
+
+static inline int chcr_ipsec_setkey(struct xfrm_state *x,
+ struct ipsec_sa_entry *sa_entry)
+{
+ struct crypto_cipher *cipher;
+ int keylen = (x->aead->alg_key_len + 7) / 8;
+ unsigned char *key = x->aead->alg_key;
+ int ck_size, key_ctx_size = 0;
+ unsigned char ghash_h[AEAD_H_SIZE];
+ int ret = 0;
+
+ if (keylen > 3) {
+ keylen -= 4; /* nonce/salt is present in the last 4 bytes */
+ memcpy(sa_entry->salt, key + keylen, 4);
+ }
+
+ if (keylen == AES_KEYSIZE_128) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+ } else if (keylen == AES_KEYSIZE_192) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+ } else if (keylen == AES_KEYSIZE_256) {
+ ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+ } else {
+ pr_err("GCM: Invalid key length %d\n", keylen);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(sa_entry->key, key, keylen);
+ sa_entry->enckey_len = keylen;
+ key_ctx_size = sizeof(struct _key_ctx) +
+ ((DIV_ROUND_UP(keylen, 16)) << 4) +
+ AEAD_H_SIZE;
+
+ sa_entry->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
+ CHCR_KEYCTX_MAC_KEY_SIZE_128,
+ 0, 0,
+ key_ctx_size >> 4);
+
+ /* Calculate the H = CIPH(K, 0 repeated 16 times).
+ * It will go in key context
+ */
+ cipher = crypto_alloc_cipher("aes-generic", 0, 0);
+ if (IS_ERR(cipher)) {
+ sa_entry->enckey_len = 0;
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = crypto_cipher_setkey(cipher, key, keylen);
+ if (ret) {
+ sa_entry->enckey_len = 0;
+ goto out1;
+ }
+ memset(ghash_h, 0, AEAD_H_SIZE);
+ crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h);
+ memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) *
+ 16), ghash_h, AEAD_H_SIZE);
+ sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) +
+ AEAD_H_SIZE;
+out1:
+ crypto_free_cipher(cipher);
+out:
+ return ret;
+}
+
+/*
+ * chcr_xfrm_add_state
+ * returns 0 on success, negative error if failed to send message to FPGA
+ * positive error if FPGA returned a bad response
+ */
+static int chcr_xfrm_add_state(struct xfrm_state *x)
+{
+ struct ipsec_sa_entry *sa_entry;
+ int res = 0;
+
+ if (x->props.aalgo != SADB_AALG_NONE) {
+ pr_debug("CHCR: Cannot offload authenticated xfrm states\n");
+ return -EINVAL;
+ }
+ if (x->props.calgo != SADB_X_CALG_NONE) {
+ pr_debug("CHCR: Cannot offload compressed xfrm states\n");
+ return -EINVAL;
+ }
+ if (x->props.flags & XFRM_STATE_ESN) {
+ pr_debug("CHCR: Cannot offload ESN xfrm states\n");
+ return -EINVAL;
+ }
+ if (x->props.family != AF_INET &&
+ x->props.family != AF_INET6) {
+ pr_debug("CHCR: Only IPv4/6 xfrm state offloaded\n");
+ return -EINVAL;
+ }
+ if (x->props.mode != XFRM_MODE_TRANSPORT &&
+ x->props.mode != XFRM_MODE_TUNNEL) {
+ pr_debug("CHCR: Only transport and tunnel xfrm offload\n");
+ return -EINVAL;
+ }
+ if (x->id.proto != IPPROTO_ESP) {
+ pr_debug("CHCR: Only ESP xfrm state offloaded\n");
+ return -EINVAL;
+ }
+ if (x->encap) {
+ pr_debug("CHCR: Encapsulated xfrm state not offloaded\n");
+ return -EINVAL;
+ }
+ if (!x->aead) {
+ pr_debug("CHCR: Cannot offload xfrm states without aead\n");
+ return -EINVAL;
+ }
+ if (x->aead->alg_icv_len != 128 &&
+ x->aead->alg_icv_len != 96) {
+ pr_debug("CHCR: Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n");
+ return -EINVAL;
+ }
+ if ((x->aead->alg_key_len != 128 + 32) &&
+ (x->aead->alg_key_len != 256 + 32)) {
+ pr_debug("CHCR: Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
+ return -EINVAL;
+ }
+ if (x->tfcpad) {
+ pr_debug("CHCR: Cannot offload xfrm states with tfc padding\n");
+ return -EINVAL;
+ }
+ if (!x->geniv) {
+ pr_debug("CHCR: Cannot offload xfrm states without geniv\n");
+ return -EINVAL;
+ }
+ if (strcmp(x->geniv, "seqiv")) {
+ pr_debug("CHCR: Cannot offload xfrm states with geniv other than seqiv\n");
+ return -EINVAL;
+ }
+
+ sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
+ if (!sa_entry) {
+ res = -ENOMEM;
+ goto out;
+ }
+
+ sa_entry->hmac_ctrl = chcr_ipsec_setauthsize(x, sa_entry);
+ chcr_ipsec_setkey(x, sa_entry);
+ x->xso.offload_handle = (unsigned long)sa_entry;
+ try_module_get(THIS_MODULE);
+out:
+ return res;
+}
+
+static void chcr_xfrm_del_state(struct xfrm_state *x)
+{
+ /* do nothing */
+ if (!x->xso.offload_handle)
+ return;
+}
+
+static void chcr_xfrm_free_state(struct xfrm_state *x)
+{
+ struct ipsec_sa_entry *sa_entry;
+
+ if (!x->xso.offload_handle)
+ return;
+
+ sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
+ kfree(sa_entry);
+ module_put(THIS_MODULE);
+}
+
+static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
+{
+ /* Offload with IP options is not supported yet */
+ if (ip_hdr(skb)->ihl > 5)
+ return false;
+
+ return true;
+}
+
+static inline int is_eth_imm(const struct sk_buff *skb, unsigned int kctx_len)
+{
+ int hdrlen = sizeof(struct chcr_ipsec_req) + kctx_len;
+
+ hdrlen += sizeof(struct cpl_tx_pkt);
+ if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
+ return hdrlen;
+ return 0;
+}
+
+static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
+ unsigned int kctx_len)
+{
+ unsigned int flits;
+ int hdrlen = is_eth_imm(skb, kctx_len);
+
+ /* If the skb is small enough, we can pump it out as a work request
+ * with only immediate data. In that case we just have to have the
+ * TX Packet header plus the skb data in the Work Request.
+ */
+
+ if (hdrlen)
+ return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
+
+ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
+
+ /* Otherwise, we're going to have to construct a Scatter gather list
+ * of the skb body and fragments. We also include the flits necessary
+ * for the TX Packet Work Request and CPL. We always have a firmware
+ * Write Header (incorporated as part of the cpl_tx_pkt_lso and
+ * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
+ * message or, if we're doing a Large Send Offload, an LSO CPL message
+ * with an embedded TX Packet Write CPL message.
+ */
+ flits += (sizeof(struct fw_ulptx_wr) +
+ sizeof(struct chcr_ipsec_req) +
+ kctx_len +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ return flits;
+}
+
+inline void *copy_cpltx_pktxt(struct sk_buff *skb,
+ struct net_device *dev,
+ void *pos)
+{
+ struct adapter *adap;
+ struct port_info *pi;
+ struct sge_eth_txq *q;
+ struct cpl_tx_pkt_core *cpl;
+ u64 cntrl = 0;
+ u32 ctrl0, qidx;
+
+ pi = netdev_priv(dev);
+ adap = pi->adapter;
+ qidx = skb->queue_mapping;
+ q = &adap->sge.ethtxq[qidx + pi->first_qset];
+
+ cpl = (struct cpl_tx_pkt_core *)pos;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
+ ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
+ TXPKT_PF_V(adap->pf);
+ if (skb_vlan_tag_present(skb)) {
+ q->vlan_ins++;
+ cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
+ }
+
+ cpl->ctrl0 = htonl(ctrl0);
+ cpl->pack = htons(0);
+ cpl->len = htons(skb->len);
+ cpl->ctrl1 = cpu_to_be64(cntrl);
+
+ pos += sizeof(struct cpl_tx_pkt_core);
+ return pos;
+}
+
+inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
+ struct net_device *dev,
+ void *pos,
+ struct ipsec_sa_entry *sa_entry)
+{
+ struct adapter *adap;
+ struct port_info *pi;
+ struct sge_eth_txq *q;
+ unsigned int len, qidx;
+ struct _key_ctx *key_ctx;
+ int left, eoq, key_len;
+
+ pi = netdev_priv(dev);
+ adap = pi->adapter;
+ qidx = skb->queue_mapping;
+ q = &adap->sge.ethtxq[qidx + pi->first_qset];
+ len = sa_entry->enckey_len + sizeof(struct cpl_tx_pkt_core);
+ key_len = sa_entry->kctx_len;
+
+ /* end of queue, reset pos to start of queue */
+ eoq = (void *)q->q.stat - pos;
+ left = eoq;
+ if (!eoq) {
+ pos = q->q.desc;
+ left = 64 * q->q.size;
+ }
+
+ /* Copy the Key context header */
+ key_ctx = (struct _key_ctx *)pos;
+ key_ctx->ctx_hdr = sa_entry->key_ctx_hdr;
+ memcpy(key_ctx->salt, sa_entry->salt, MAX_SALT);
+ pos += sizeof(struct _key_ctx);
+ left -= sizeof(struct _key_ctx);
+
+ if (likely(len <= left)) {
+ memcpy(key_ctx->key, sa_entry->key, key_len);
+ pos += key_len;
+ } else {
+ if (key_len <= left) {
+ memcpy(pos, sa_entry->key, key_len);
+ pos += key_len;
+ } else {
+ memcpy(pos, sa_entry->key, left);
+ memcpy(q->q.desc, sa_entry->key + left,
+ key_len - left);
+ pos = (u8 *)q->q.desc + (key_len - left);
+ }
+ }
+ /* Copy CPL TX PKT XT */
+ pos = copy_cpltx_pktxt(skb, dev, pos);
+
+ return pos;
+}
+
+inline void *chcr_crypto_wreq(struct sk_buff *skb,
+ struct net_device *dev,
+ void *pos,
+ int credits,
+ struct ipsec_sa_entry *sa_entry)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ unsigned int immdatalen = 0;
+ unsigned int ivsize = GCM_ESP_IV_SIZE;
+ struct chcr_ipsec_wr *wr;
+ unsigned int flits;
+ u32 wr_mid;
+ int qidx = skb_get_queue_mapping(skb);
+ struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
+ unsigned int kctx_len = sa_entry->kctx_len;
+ int qid = q->q.cntxt_id;
+
+ atomic_inc(&adap->chcr_stats.ipsec_cnt);
+
+ flits = calc_tx_sec_flits(skb, kctx_len);
+
+ if (is_eth_imm(skb, kctx_len))
+ immdatalen = skb->len;
+
+ /* WR Header */
+ wr = (struct chcr_ipsec_wr *)pos;
+ wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
+ wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
+
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ netif_tx_stop_queue(q->txq);
+ q->q.stops++;
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+ wr_mid |= FW_ULPTX_WR_DATA_F;
+ wr->wreq.flowid_len16 = htonl(wr_mid);
+
+ /* ULPTX */
+ wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid);
+ wr->req.ulptx.len = htonl(DIV_ROUND_UP(flits, 2) - 1);
+
+ /* Sub-command */
+ wr->req.sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
+ wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
+ sizeof(wr->req.key_ctx) +
+ kctx_len +
+ sizeof(struct cpl_tx_pkt_core) +
+ immdatalen);
+
+ /* CPL_SEC_PDU */
+ wr->req.sec_cpl.op_ivinsrtofst = htonl(
+ CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
+ CPL_TX_SEC_PDU_CPLLEN_V(2) |
+ CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
+ CPL_TX_SEC_PDU_IVINSRTOFST_V(
+ (skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr) + 1)));
+
+ wr->req.sec_cpl.pldlen = htonl(skb->len);
+
+ wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+ (skb_transport_offset(skb) + 1),
+ (skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr)),
+ (skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr) +
+ GCM_ESP_IV_SIZE + 1), 0);
+
+ wr->req.sec_cpl.cipherstop_lo_authinsert =
+ FILL_SEC_CPL_AUTHINSERT(0, skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr) +
+ GCM_ESP_IV_SIZE + 1,
+ sa_entry->authsize,
+ sa_entry->authsize);
+ wr->req.sec_cpl.seqno_numivs =
+ FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1,
+ CHCR_SCMD_CIPHER_MODE_AES_GCM,
+ CHCR_SCMD_AUTH_MODE_GHASH,
+ sa_entry->hmac_ctrl,
+ ivsize >> 1);
+ wr->req.sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+ 0, 0, 0);
+
+ pos += sizeof(struct fw_ulptx_wr) +
+ sizeof(struct ulp_txpkt) +
+ sizeof(struct ulptx_idata) +
+ sizeof(struct cpl_tx_sec_pdu);
+
+ pos = copy_key_cpltx_pktxt(skb, dev, pos, sa_entry);
+
+ return pos;
+}
+
+/**
+ * flits_to_desc - returns the num of Tx descriptors for the given flits
+ * @n: the number of flits
+ *
+ * Returns the number of Tx descriptors needed for the supplied number
+ * of flits.
+ */
+static inline unsigned int flits_to_desc(unsigned int n)
+{
+ WARN_ON(n > SGE_MAX_WR_LEN / 8);
+ return DIV_ROUND_UP(n, 8);
+}
+
+static inline unsigned int txq_avail(const struct sge_txq *q)
+{
+ return q->size - 1 - q->in_use;
+}
+
+static void eth_txq_stop(struct sge_eth_txq *q)
+{
+ netif_tx_stop_queue(q->txq);
+ q->q.stops++;
+}
+
+static inline void txq_advance(struct sge_txq *q, unsigned int n)
+{
+ q->in_use += n;
+ q->pidx += n;
+ if (q->pidx >= q->size)
+ q->pidx -= q->size;
+}
+
+/*
+ * chcr_ipsec_xmit called from ULD Tx handler
+ */
+int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct xfrm_state *x = xfrm_input_state(skb);
+ struct ipsec_sa_entry *sa_entry;
+ u64 *pos, *end, *before, *sgl;
+ int qidx, left, credits;
+ unsigned int flits = 0, ndesc, kctx_len;
+ struct adapter *adap;
+ struct sge_eth_txq *q;
+ struct port_info *pi;
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+ bool immediate = false;
+
+ if (!x->xso.offload_handle)
+ return NETDEV_TX_BUSY;
+
+ sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
+ kctx_len = sa_entry->kctx_len;
+
+ if (skb->sp->len != 1) {
+out_free: dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ pi = netdev_priv(dev);
+ adap = pi->adapter;
+ qidx = skb->queue_mapping;
+ q = &adap->sge.ethtxq[qidx + pi->first_qset];
+
+ cxgb4_reclaim_completed_tx(adap, &q->q, true);
+
+ flits = calc_tx_sec_flits(skb, sa_entry->kctx_len);
+ ndesc = flits_to_desc(flits);
+ credits = txq_avail(&q->q) - ndesc;
+
+ if (unlikely(credits < 0)) {
+ eth_txq_stop(q);
+ dev_err(adap->pdev_dev,
+ "%s: Tx ring %u full while queue awake! cred:%d %d %d flits:%d\n",
+ dev->name, qidx, credits, ndesc, txq_avail(&q->q),
+ flits);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (is_eth_imm(skb, kctx_len))
+ immediate = true;
+
+ if (!immediate &&
+ unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
+ q->mapping_err++;
+ goto out_free;
+ }
+
+ pos = (u64 *)&q->q.desc[q->q.pidx];
+ before = (u64 *)pos;
+ end = (u64 *)pos + flits;
+ /* Setup IPSec CPL */
+ pos = (void *)chcr_crypto_wreq(skb, dev, (void *)pos,
+ credits, sa_entry);
+ if (before > (u64 *)pos) {
+ left = (u8 *)end - (u8 *)q->q.stat;
+ end = (void *)q->q.desc + left;
+ }
+ if (pos == (u64 *)q->q.stat) {
+ left = (u8 *)end - (u8 *)q->q.stat;
+ end = (void *)q->q.desc + left;
+ pos = (void *)q->q.desc;
+ }
+
+ sgl = (void *)pos;
+ if (immediate) {
+ cxgb4_inline_tx_skb(skb, &q->q, sgl);
+ dev_consume_skb_any(skb);
+ } else {
+ int last_desc;
+
+ cxgb4_write_sgl(skb, &q->q, (void *)sgl, end,
+ 0, addr);
+ skb_orphan(skb);
+
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ q->q.sdesc[last_desc].skb = skb;
+ q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl;
+ }
+ txq_advance(&q->q, ndesc);
+
+ cxgb4_ring_tx_db(adap, &q->q, ndesc);
+ return NETDEV_TX_OK;
+}
diff --git a/drivers/crypto/exynos-rng.c b/drivers/crypto/exynos-rng.c
index 451620b475a0..86f5f459762e 100644
--- a/drivers/crypto/exynos-rng.c
+++ b/drivers/crypto/exynos-rng.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* exynos-rng.c - Random Number Generator driver for the Exynos
*
@@ -6,15 +7,6 @@
* Loosely based on old driver from drivers/char/hw_random/exynos-rng.c:
* Copyright (C) 2012 Samsung Electronics
* Jonghwa Lee <jonghwa3.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation;
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
@@ -22,12 +14,18 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <crypto/internal/rng.h>
#define EXYNOS_RNG_CONTROL 0x0
#define EXYNOS_RNG_STATUS 0x10
+
+#define EXYNOS_RNG_SEED_CONF 0x14
+#define EXYNOS_RNG_GEN_PRNG BIT(1)
+
#define EXYNOS_RNG_SEED_BASE 0x140
#define EXYNOS_RNG_SEED(n) (EXYNOS_RNG_SEED_BASE + (n * 0x4))
#define EXYNOS_RNG_OUT_BASE 0x160
@@ -43,13 +41,21 @@
#define EXYNOS_RNG_SEED_REGS 5
#define EXYNOS_RNG_SEED_SIZE (EXYNOS_RNG_SEED_REGS * 4)
+enum exynos_prng_type {
+ EXYNOS_PRNG_UNKNOWN = 0,
+ EXYNOS_PRNG_EXYNOS4,
+ EXYNOS_PRNG_EXYNOS5,
+};
+
/*
- * Driver re-seeds itself with generated random numbers to increase
- * the randomness.
+ * Driver re-seeds itself with generated random numbers to hinder
+ * backtracking of the original seed.
*
* Time for next re-seed in ms.
*/
-#define EXYNOS_RNG_RESEED_TIME 100
+#define EXYNOS_RNG_RESEED_TIME 1000
+#define EXYNOS_RNG_RESEED_BYTES 65536
+
/*
* In polling mode, do not wait infinitely for the engine to finish the work.
*/
@@ -63,13 +69,17 @@ struct exynos_rng_ctx {
/* Device associated memory */
struct exynos_rng_dev {
struct device *dev;
+ enum exynos_prng_type type;
void __iomem *mem;
struct clk *clk;
+ struct mutex lock;
/* Generated numbers stored for seeding during resume */
u8 seed_save[EXYNOS_RNG_SEED_SIZE];
unsigned int seed_save_len;
/* Time of last seeding in jiffies */
unsigned long last_seeding;
+ /* Bytes generated since last seeding */
+ unsigned long bytes_seeding;
};
static struct exynos_rng_dev *exynos_rng_dev;
@@ -114,39 +124,12 @@ static int exynos_rng_set_seed(struct exynos_rng_dev *rng,
}
rng->last_seeding = jiffies;
+ rng->bytes_seeding = 0;
return 0;
}
/*
- * Read from output registers and put the data under 'dst' array,
- * up to dlen bytes.
- *
- * Returns number of bytes actually stored in 'dst' (dlen
- * or EXYNOS_RNG_SEED_SIZE).
- */
-static unsigned int exynos_rng_copy_random(struct exynos_rng_dev *rng,
- u8 *dst, unsigned int dlen)
-{
- unsigned int cnt = 0;
- int i, j;
- u32 val;
-
- for (j = 0; j < EXYNOS_RNG_SEED_REGS; j++) {
- val = exynos_rng_readl(rng, EXYNOS_RNG_OUT(j));
-
- for (i = 0; i < 4; i++) {
- dst[cnt] = val & 0xff;
- val >>= 8;
- if (++cnt >= dlen)
- return cnt;
- }
- }
-
- return cnt;
-}
-
-/*
* Start the engine and poll for finish. Then read from output registers
* filling the 'dst' buffer up to 'dlen' bytes or up to size of generated
* random data (EXYNOS_RNG_SEED_SIZE).
@@ -160,8 +143,13 @@ static int exynos_rng_get_random(struct exynos_rng_dev *rng,
{
int retry = EXYNOS_RNG_WAIT_RETRIES;
- exynos_rng_writel(rng, EXYNOS_RNG_CONTROL_START,
- EXYNOS_RNG_CONTROL);
+ if (rng->type == EXYNOS_PRNG_EXYNOS4) {
+ exynos_rng_writel(rng, EXYNOS_RNG_CONTROL_START,
+ EXYNOS_RNG_CONTROL);
+ } else if (rng->type == EXYNOS_PRNG_EXYNOS5) {
+ exynos_rng_writel(rng, EXYNOS_RNG_GEN_PRNG,
+ EXYNOS_RNG_SEED_CONF);
+ }
while (!(exynos_rng_readl(rng,
EXYNOS_RNG_STATUS) & EXYNOS_RNG_STATUS_RNG_DONE) && --retry)
@@ -173,7 +161,9 @@ static int exynos_rng_get_random(struct exynos_rng_dev *rng,
/* Clear status bit */
exynos_rng_writel(rng, EXYNOS_RNG_STATUS_RNG_DONE,
EXYNOS_RNG_STATUS);
- *read = exynos_rng_copy_random(rng, dst, dlen);
+ *read = min_t(size_t, dlen, EXYNOS_RNG_SEED_SIZE);
+ memcpy_fromio(dst, rng->mem + EXYNOS_RNG_OUT_BASE, *read);
+ rng->bytes_seeding += *read;
return 0;
}
@@ -187,13 +177,18 @@ static void exynos_rng_reseed(struct exynos_rng_dev *rng)
unsigned int read = 0;
u8 seed[EXYNOS_RNG_SEED_SIZE];
- if (time_before(now, next_seeding))
+ if (time_before(now, next_seeding) &&
+ rng->bytes_seeding < EXYNOS_RNG_RESEED_BYTES)
return;
if (exynos_rng_get_random(rng, seed, sizeof(seed), &read))
return;
exynos_rng_set_seed(rng, seed, read);
+
+ /* Let others do some of their job. */
+ mutex_unlock(&rng->lock);
+ mutex_lock(&rng->lock);
}
static int exynos_rng_generate(struct crypto_rng *tfm,
@@ -209,6 +204,7 @@ static int exynos_rng_generate(struct crypto_rng *tfm,
if (ret)
return ret;
+ mutex_lock(&rng->lock);
do {
ret = exynos_rng_get_random(rng, dst, dlen, &read);
if (ret)
@@ -219,6 +215,7 @@ static int exynos_rng_generate(struct crypto_rng *tfm,
exynos_rng_reseed(rng);
} while (dlen > 0);
+ mutex_unlock(&rng->lock);
clk_disable_unprepare(rng->clk);
@@ -236,7 +233,9 @@ static int exynos_rng_seed(struct crypto_rng *tfm, const u8 *seed,
if (ret)
return ret;
+ mutex_lock(&rng->lock);
ret = exynos_rng_set_seed(ctx->rng, seed, slen);
+ mutex_unlock(&rng->lock);
clk_disable_unprepare(rng->clk);
@@ -259,7 +258,7 @@ static struct rng_alg exynos_rng_alg = {
.base = {
.cra_name = "stdrng",
.cra_driver_name = "exynos_rng",
- .cra_priority = 100,
+ .cra_priority = 300,
.cra_ctxsize = sizeof(struct exynos_rng_ctx),
.cra_module = THIS_MODULE,
.cra_init = exynos_rng_kcapi_init,
@@ -279,6 +278,10 @@ static int exynos_rng_probe(struct platform_device *pdev)
if (!rng)
return -ENOMEM;
+ rng->type = (enum exynos_prng_type)of_device_get_match_data(&pdev->dev);
+
+ mutex_init(&rng->lock);
+
rng->dev = &pdev->dev;
rng->clk = devm_clk_get(&pdev->dev, "secss");
if (IS_ERR(rng->clk)) {
@@ -329,9 +332,14 @@ static int __maybe_unused exynos_rng_suspend(struct device *dev)
if (ret)
return ret;
+ mutex_lock(&rng->lock);
+
/* Get new random numbers and store them for seeding on resume. */
exynos_rng_get_random(rng, rng->seed_save, sizeof(rng->seed_save),
&(rng->seed_save_len));
+
+ mutex_unlock(&rng->lock);
+
dev_dbg(rng->dev, "Stored %u bytes for seeding on system resume\n",
rng->seed_save_len);
@@ -354,8 +362,12 @@ static int __maybe_unused exynos_rng_resume(struct device *dev)
if (ret)
return ret;
+ mutex_lock(&rng->lock);
+
ret = exynos_rng_set_seed(rng, rng->seed_save, rng->seed_save_len);
+ mutex_unlock(&rng->lock);
+
clk_disable_unprepare(rng->clk);
return ret;
@@ -367,6 +379,10 @@ static SIMPLE_DEV_PM_OPS(exynos_rng_pm_ops, exynos_rng_suspend,
static const struct of_device_id exynos_rng_dt_match[] = {
{
.compatible = "samsung,exynos4-rng",
+ .data = (const void *)EXYNOS_PRNG_EXYNOS4,
+ }, {
+ .compatible = "samsung,exynos5250-prng",
+ .data = (const void *)EXYNOS_PRNG_EXYNOS5,
},
{ },
};
@@ -386,4 +402,4 @@ module_platform_driver(exynos_rng_driver);
MODULE_DESCRIPTION("Exynos H/W Random Number Generator driver");
MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index e09d4055b19e..a5a36fe7bf2c 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -2579,6 +2579,7 @@ err_out_unmap_bars:
for (i = 0; i < 3; ++i)
if (dev->bar[i])
iounmap(dev->bar[i]);
+ kfree(dev);
err_out_free_regions:
pci_release_regions(pdev);
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 4bcef78a08aa..225e74a7f724 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -108,10 +108,10 @@ static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
- priv->base + ctrl);
+ EIP197_PE(priv) + ctrl);
/* Enable access to the program memory */
- writel(prog_en, priv->base + EIP197_PE_ICE_RAM_CTRL);
+ writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
/* Write the firmware */
for (i = 0; i < fw->size / sizeof(u32); i++)
@@ -119,12 +119,12 @@ static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
/* Disable access to the program memory */
- writel(0, priv->base + EIP197_PE_ICE_RAM_CTRL);
+ writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL);
/* Release engine from reset */
- val = readl(priv->base + ctrl);
+ val = readl(EIP197_PE(priv) + ctrl);
val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET;
- writel(val, priv->base + ctrl);
+ writel(val, EIP197_PE(priv) + ctrl);
}
static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
@@ -145,14 +145,14 @@ static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
}
/* Clear the scratchpad memory */
- val = readl(priv->base + EIP197_PE_ICE_SCRATCH_CTRL);
+ val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
- writel(val, priv->base + EIP197_PE_ICE_SCRATCH_CTRL);
+ writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL);
- memset(priv->base + EIP197_PE_ICE_SCRATCH_RAM, 0,
+ memset(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM, 0,
EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL,
@@ -173,7 +173,7 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
u32 hdw, cd_size_rnd, val;
int i;
- hdw = readl(priv->base + EIP197_HIA_OPTIONS);
+ hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
hdw &= GENMASK(27, 25);
hdw >>= 25;
@@ -182,26 +182,25 @@ static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
for (i = 0; i < priv->config.rings; i++) {
/* ring base address */
writel(lower_32_bits(priv->ring[i].cdr.base_dma),
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
writel(upper_32_bits(priv->ring[i].cdr.base_dma),
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
priv->config.cd_size,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_DESC_SIZE);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) |
(EIP197_FETCH_COUNT * priv->config.cd_offset),
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_CFG);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Configure DMA tx control */
val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
- writel(val,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_DMA_CFG);
+ writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
/* clear any pending interrupt */
writel(GENMASK(5, 0),
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_STAT);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
}
return 0;
@@ -212,7 +211,7 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
u32 hdw, rd_size_rnd, val;
int i;
- hdw = readl(priv->base + EIP197_HIA_OPTIONS);
+ hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
hdw &= GENMASK(27, 25);
hdw >>= 25;
@@ -221,33 +220,33 @@ static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
for (i = 0; i < priv->config.rings; i++) {
/* ring base address */
writel(lower_32_bits(priv->ring[i].rdr.base_dma),
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
writel(upper_32_bits(priv->ring[i].rdr.base_dma),
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
priv->config.rd_size,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_DESC_SIZE);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) |
(EIP197_FETCH_COUNT * priv->config.rd_offset),
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_CFG);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Configure DMA tx control */
val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUG;
writel(val,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_DMA_CFG);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
/* clear any pending interrupt */
writel(GENMASK(7, 0),
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_STAT);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
/* enable ring interrupt */
- val = readl(priv->base + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
+ val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
val |= EIP197_RDR_IRQ(i);
- writel(val, priv->base + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
+ writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
}
return 0;
@@ -259,39 +258,40 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
int i, ret;
/* Determine endianess and configure byte swap */
- version = readl(priv->base + EIP197_HIA_VERSION);
- val = readl(priv->base + EIP197_HIA_MST_CTRL);
+ version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION);
+ val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
if ((version & 0xffff) == EIP197_HIA_VERSION_BE)
val |= EIP197_MST_CTRL_BYTE_SWAP;
else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);
- writel(val, priv->base + EIP197_HIA_MST_CTRL);
-
+ writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
/* Configure wr/rd cache values */
writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
- priv->base + EIP197_MST_CTRL);
+ EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
/* Interrupts reset */
/* Disable all global interrupts */
- writel(0, priv->base + EIP197_HIA_AIC_G_ENABLE_CTRL);
+ writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
/* Clear any pending interrupt */
- writel(GENMASK(31, 0), priv->base + EIP197_HIA_AIC_G_ACK);
+ writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
/* Data Fetch Engine configuration */
/* Reset all DFE threads */
writel(EIP197_DxE_THR_CTRL_RESET_PE,
- priv->base + EIP197_HIA_DFE_THR_CTRL);
+ EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
- /* Reset HIA input interface arbiter */
- writel(EIP197_HIA_RA_PE_CTRL_RESET,
- priv->base + EIP197_HIA_RA_PE_CTRL);
+ if (priv->version == EIP197) {
+ /* Reset HIA input interface arbiter */
+ writel(EIP197_HIA_RA_PE_CTRL_RESET,
+ EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
+ }
/* DMA transfer size to use */
val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
@@ -299,29 +299,32 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
- writel(val, priv->base + EIP197_HIA_DFE_CFG);
+ writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG);
/* Leave the DFE threads reset state */
- writel(0, priv->base + EIP197_HIA_DFE_THR_CTRL);
+ writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
/* Configure the procesing engine thresholds */
writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(9),
- priv->base + EIP197_PE_IN_DBUF_THRES);
+ EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES);
writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(7),
- priv->base + EIP197_PE_IN_TBUF_THRES);
+ EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES);
- /* enable HIA input interface arbiter and rings */
- writel(EIP197_HIA_RA_PE_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
- priv->base + EIP197_HIA_RA_PE_CTRL);
+ if (priv->version == EIP197) {
+ /* enable HIA input interface arbiter and rings */
+ writel(EIP197_HIA_RA_PE_CTRL_EN |
+ GENMASK(priv->config.rings - 1, 0),
+ EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL);
+ }
/* Data Store Engine configuration */
/* Reset all DSE threads */
writel(EIP197_DxE_THR_CTRL_RESET_PE,
- priv->base + EIP197_HIA_DSE_THR_CTRL);
+ EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
/* Wait for all DSE threads to complete */
- while ((readl(priv->base + EIP197_HIA_DSE_THR_STAT) &
+ while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT) &
GENMASK(15, 12)) != GENMASK(15, 12))
;
@@ -330,15 +333,19 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
val |= EIP197_HIA_DSE_CFG_ALLWAYS_BUFFERABLE;
- val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
- writel(val, priv->base + EIP197_HIA_DSE_CFG);
+ /* FIXME: instability issues can occur for EIP97 but disabling it impact
+ * performances.
+ */
+ if (priv->version == EIP197)
+ val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
+ writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG);
/* Leave the DSE threads reset state */
- writel(0, priv->base + EIP197_HIA_DSE_THR_CTRL);
+ writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
/* Configure the procesing engine thresholds */
writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) | EIP197_PE_OUT_DBUF_THRES_MAX(8),
- priv->base + EIP197_PE_OUT_DBUF_THRES);
+ EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES);
/* Processing Engine configuration */
@@ -348,73 +355,75 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
val |= EIP197_ALG_SHA2;
- writel(val, priv->base + EIP197_PE_EIP96_FUNCTION_EN);
+ writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN);
/* Command Descriptor Rings prepare */
for (i = 0; i < priv->config.rings; i++) {
/* Clear interrupts for this ring */
writel(GENMASK(31, 0),
- priv->base + EIP197_HIA_AIC_R_ENABLE_CLR(i));
+ EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
/* Disable external triggering */
- writel(0, priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_CFG);
+ writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Clear the pending prepared counter */
writel(EIP197_xDR_PREP_CLR_COUNT,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_COUNT);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
/* Clear the pending processed counter */
writel(EIP197_xDR_PROC_CLR_COUNT,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PROC_COUNT);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
writel(0,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_PNTR);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
writel(0,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PROC_PNTR);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
- priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_SIZE);
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
}
/* Result Descriptor Ring prepare */
for (i = 0; i < priv->config.rings; i++) {
/* Disable external triggering*/
- writel(0, priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_CFG);
+ writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
/* Clear the pending prepared counter */
writel(EIP197_xDR_PREP_CLR_COUNT,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_COUNT);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
/* Clear the pending processed counter */
writel(EIP197_xDR_PROC_CLR_COUNT,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PROC_COUNT);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
writel(0,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_PNTR);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
writel(0,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PROC_PNTR);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
/* Ring size */
writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
- priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_SIZE);
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
}
/* Enable command descriptor rings */
writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
- priv->base + EIP197_HIA_DFE_THR_CTRL);
+ EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL);
/* Enable result descriptor rings */
writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
- priv->base + EIP197_HIA_DSE_THR_CTRL);
+ EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL);
/* Clear any HIA interrupt */
- writel(GENMASK(30, 20), priv->base + EIP197_HIA_AIC_G_ACK);
+ writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
- eip197_trc_cache_init(priv);
+ if (priv->version == EIP197) {
+ eip197_trc_cache_init(priv);
- ret = eip197_load_firmwares(priv);
- if (ret)
- return ret;
+ ret = eip197_load_firmwares(priv);
+ if (ret)
+ return ret;
+ }
safexcel_hw_setup_cdesc_rings(priv);
safexcel_hw_setup_rdesc_rings(priv);
@@ -422,6 +431,23 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
return 0;
}
+/* Called with ring's lock taken */
+static int safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
+ int ring, int reqs)
+{
+ int coal = min_t(int, reqs, EIP197_MAX_BATCH_SZ);
+
+ if (!coal)
+ return 0;
+
+ /* Configure when we want an interrupt */
+ writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
+ EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
+ EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
+
+ return coal;
+}
+
void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
{
struct crypto_async_request *req, *backlog;
@@ -429,34 +455,36 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
struct safexcel_request *request;
int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
- priv->ring[ring].need_dequeue = false;
+ /* If a request wasn't properly dequeued because of a lack of resources,
+ * proceeded it first,
+ */
+ req = priv->ring[ring].req;
+ backlog = priv->ring[ring].backlog;
+ if (req)
+ goto handle_req;
- do {
+ while (true) {
spin_lock_bh(&priv->ring[ring].queue_lock);
backlog = crypto_get_backlog(&priv->ring[ring].queue);
req = crypto_dequeue_request(&priv->ring[ring].queue);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!req)
+ if (!req) {
+ priv->ring[ring].req = NULL;
+ priv->ring[ring].backlog = NULL;
goto finalize;
+ }
+handle_req:
request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req));
- if (!request) {
- spin_lock_bh(&priv->ring[ring].queue_lock);
- crypto_enqueue_request(&priv->ring[ring].queue, req);
- spin_unlock_bh(&priv->ring[ring].queue_lock);
-
- priv->ring[ring].need_dequeue = true;
- goto finalize;
- }
+ if (!request)
+ goto request_failed;
ctx = crypto_tfm_ctx(req->tfm);
ret = ctx->send(req, ring, request, &commands, &results);
if (ret) {
kfree(request);
- req->complete(req, ret);
- priv->ring[ring].need_dequeue = true;
- goto finalize;
+ goto request_failed;
}
if (backlog)
@@ -468,30 +496,39 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
cdesc += commands;
rdesc += results;
- } while (nreq++ < EIP197_MAX_BATCH_SZ);
+ nreq++;
+ }
+
+request_failed:
+ /* Not enough resources to handle all the requests. Bail out and save
+ * the request and the backlog for the next dequeue call (per-ring).
+ */
+ priv->ring[ring].req = req;
+ priv->ring[ring].backlog = backlog;
finalize:
- if (nreq == EIP197_MAX_BATCH_SZ)
- priv->ring[ring].need_dequeue = true;
- else if (!nreq)
+ if (!nreq)
return;
- spin_lock_bh(&priv->ring[ring].lock);
+ spin_lock_bh(&priv->ring[ring].egress_lock);
- /* Configure when we want an interrupt */
- writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
- EIP197_HIA_RDR_THRESH_PROC_PKT(nreq),
- priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_THRESH);
+ if (!priv->ring[ring].busy) {
+ nreq -= safexcel_try_push_requests(priv, ring, nreq);
+ if (nreq)
+ priv->ring[ring].busy = true;
+ }
+
+ priv->ring[ring].requests_left += nreq;
+
+ spin_unlock_bh(&priv->ring[ring].egress_lock);
/* let the RDR know we have pending descriptors */
writel((rdesc * priv->config.rd_offset) << 2,
- priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PREP_COUNT);
+ EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
/* let the CDR know we have pending descriptors */
writel((cdesc * priv->config.cd_offset) << 2,
- priv->base + EIP197_HIA_CDR(ring) + EIP197_HIA_xDR_PREP_COUNT);
-
- spin_unlock_bh(&priv->ring[ring].lock);
+ EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
}
void safexcel_free_context(struct safexcel_crypto_priv *priv,
@@ -540,7 +577,6 @@ void safexcel_inv_complete(struct crypto_async_request *req, int error)
}
int safexcel_invalidate_cache(struct crypto_async_request *async,
- struct safexcel_context *ctx,
struct safexcel_crypto_priv *priv,
dma_addr_t ctxr_dma, int ring,
struct safexcel_request *request)
@@ -587,14 +623,17 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
{
struct safexcel_request *sreq;
struct safexcel_context *ctx;
- int ret, i, nreq, ndesc = 0;
+ int ret, i, nreq, ndesc, tot_descs, done;
bool should_complete;
- nreq = readl(priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PROC_COUNT);
- nreq >>= 24;
- nreq &= GENMASK(6, 0);
+handle_results:
+ tot_descs = 0;
+
+ nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
+ nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
+ nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
if (!nreq)
- return;
+ goto requests_left;
for (i = 0; i < nreq; i++) {
spin_lock_bh(&priv->ring[ring].egress_lock);
@@ -609,13 +648,9 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
if (ndesc < 0) {
kfree(sreq);
dev_err(priv->dev, "failed to handle result (%d)", ndesc);
- return;
+ goto acknowledge;
}
- writel(EIP197_xDR_PROC_xD_PKT(1) |
- EIP197_xDR_PROC_xD_COUNT(ndesc * priv->config.rd_offset),
- priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PROC_COUNT);
-
if (should_complete) {
local_bh_disable();
sreq->req->complete(sreq->req, ret);
@@ -623,19 +658,41 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
}
kfree(sreq);
+ tot_descs += ndesc;
}
+
+acknowledge:
+ if (i) {
+ writel(EIP197_xDR_PROC_xD_PKT(i) |
+ EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
+ EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
+ }
+
+ /* If the number of requests overflowed the counter, try to proceed more
+ * requests.
+ */
+ if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
+ goto handle_results;
+
+requests_left:
+ spin_lock_bh(&priv->ring[ring].egress_lock);
+
+ done = safexcel_try_push_requests(priv, ring,
+ priv->ring[ring].requests_left);
+
+ priv->ring[ring].requests_left -= done;
+ if (!done && !priv->ring[ring].requests_left)
+ priv->ring[ring].busy = false;
+
+ spin_unlock_bh(&priv->ring[ring].egress_lock);
}
-static void safexcel_handle_result_work(struct work_struct *work)
+static void safexcel_dequeue_work(struct work_struct *work)
{
struct safexcel_work_data *data =
container_of(work, struct safexcel_work_data, work);
- struct safexcel_crypto_priv *priv = data->priv;
-
- safexcel_handle_result_descriptor(priv, data->ring);
- if (priv->ring[data->ring].need_dequeue)
- safexcel_dequeue(data->priv, data->ring);
+ safexcel_dequeue(data->priv, data->ring);
}
struct safexcel_ring_irq_data {
@@ -647,16 +704,16 @@ static irqreturn_t safexcel_irq_ring(int irq, void *data)
{
struct safexcel_ring_irq_data *irq_data = data;
struct safexcel_crypto_priv *priv = irq_data->priv;
- int ring = irq_data->ring;
+ int ring = irq_data->ring, rc = IRQ_NONE;
u32 status, stat;
- status = readl(priv->base + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
+ status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
if (!status)
- return IRQ_NONE;
+ return rc;
/* RDR interrupts */
if (status & EIP197_RDR_IRQ(ring)) {
- stat = readl(priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_STAT);
+ stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
if (unlikely(stat & EIP197_xDR_ERR)) {
/*
@@ -666,22 +723,37 @@ static irqreturn_t safexcel_irq_ring(int irq, void *data)
*/
dev_err(priv->dev, "RDR: fatal error.");
} else if (likely(stat & EIP197_xDR_THRESH)) {
- queue_work(priv->ring[ring].workqueue, &priv->ring[ring].work_data.work);
+ rc = IRQ_WAKE_THREAD;
}
/* ACK the interrupts */
writel(stat & 0xff,
- priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_STAT);
+ EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
}
/* ACK the interrupts */
- writel(status, priv->base + EIP197_HIA_AIC_R_ACK(ring));
+ writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
+
+ return rc;
+}
+
+static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
+{
+ struct safexcel_ring_irq_data *irq_data = data;
+ struct safexcel_crypto_priv *priv = irq_data->priv;
+ int ring = irq_data->ring;
+
+ safexcel_handle_result_descriptor(priv, ring);
+
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
return IRQ_HANDLED;
}
static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name,
irq_handler_t handler,
+ irq_handler_t threaded_handler,
struct safexcel_ring_irq_data *ring_irq_priv)
{
int ret, irq = platform_get_irq_byname(pdev, name);
@@ -691,8 +763,9 @@ static int safexcel_request_ring_irq(struct platform_device *pdev, const char *n
return irq;
}
- ret = devm_request_irq(&pdev->dev, irq, handler, 0,
- dev_name(&pdev->dev), ring_irq_priv);
+ ret = devm_request_threaded_irq(&pdev->dev, irq, handler,
+ threaded_handler, IRQF_ONESHOT,
+ dev_name(&pdev->dev), ring_irq_priv);
if (ret) {
dev_err(&pdev->dev, "unable to request IRQ %d\n", irq);
return ret;
@@ -755,11 +828,11 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv)
{
u32 val, mask;
- val = readl(priv->base + EIP197_HIA_OPTIONS);
+ val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
val = (val & GENMASK(27, 25)) >> 25;
mask = BIT(val) - 1;
- val = readl(priv->base + EIP197_HIA_OPTIONS);
+ val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
@@ -769,6 +842,35 @@ static void safexcel_configure(struct safexcel_crypto_priv *priv)
priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
}
+static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
+{
+ struct safexcel_register_offsets *offsets = &priv->offsets;
+
+ if (priv->version == EIP197) {
+ offsets->hia_aic = EIP197_HIA_AIC_BASE;
+ offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE;
+ offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE;
+ offsets->hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE;
+ offsets->hia_dfe = EIP197_HIA_DFE_BASE;
+ offsets->hia_dfe_thr = EIP197_HIA_DFE_THR_BASE;
+ offsets->hia_dse = EIP197_HIA_DSE_BASE;
+ offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE;
+ offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE;
+ offsets->pe = EIP197_PE_BASE;
+ } else {
+ offsets->hia_aic = EIP97_HIA_AIC_BASE;
+ offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE;
+ offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE;
+ offsets->hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE;
+ offsets->hia_dfe = EIP97_HIA_DFE_BASE;
+ offsets->hia_dfe_thr = EIP97_HIA_DFE_THR_BASE;
+ offsets->hia_dse = EIP97_HIA_DSE_BASE;
+ offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE;
+ offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE;
+ offsets->pe = EIP97_PE_BASE;
+ }
+}
+
static int safexcel_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -781,6 +883,9 @@ static int safexcel_probe(struct platform_device *pdev)
return -ENOMEM;
priv->dev = dev;
+ priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
+
+ safexcel_init_register_offsets(priv);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(dev, res);
@@ -839,6 +944,7 @@ static int safexcel_probe(struct platform_device *pdev)
snprintf(irq_name, 6, "ring%d", i);
irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring,
+ safexcel_irq_ring_thread,
ring_irq);
if (irq < 0) {
ret = irq;
@@ -847,7 +953,7 @@ static int safexcel_probe(struct platform_device *pdev)
priv->ring[i].work_data.priv = priv;
priv->ring[i].work_data.ring = i;
- INIT_WORK(&priv->ring[i].work_data.work, safexcel_handle_result_work);
+ INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work);
snprintf(wq_name, 9, "wq_ring%d", i);
priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
@@ -856,6 +962,9 @@ static int safexcel_probe(struct platform_device *pdev)
goto err_clk;
}
+ priv->ring[i].requests_left = 0;
+ priv->ring[i].busy = false;
+
crypto_init_queue(&priv->ring[i].queue,
EIP197_DEFAULT_RING_SIZE);
@@ -903,7 +1012,14 @@ static int safexcel_remove(struct platform_device *pdev)
}
static const struct of_device_id safexcel_of_match_table[] = {
- { .compatible = "inside-secure,safexcel-eip197" },
+ {
+ .compatible = "inside-secure,safexcel-eip97",
+ .data = (void *)EIP97,
+ },
+ {
+ .compatible = "inside-secure,safexcel-eip197",
+ .data = (void *)EIP197,
+ },
{},
};
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 304c5838c11a..4e219c21608b 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -19,64 +19,103 @@
#define EIP197_HIA_VERSION_BE 0x35ca
/* Static configuration */
-#define EIP197_DEFAULT_RING_SIZE 64
+#define EIP197_DEFAULT_RING_SIZE 400
#define EIP197_MAX_TOKENS 5
#define EIP197_MAX_RINGS 4
#define EIP197_FETCH_COUNT 1
-#define EIP197_MAX_BATCH_SZ EIP197_DEFAULT_RING_SIZE
+#define EIP197_MAX_BATCH_SZ 64
#define EIP197_GFP_FLAGS(base) ((base).flags & CRYPTO_TFM_REQ_MAY_SLEEP ? \
GFP_KERNEL : GFP_ATOMIC)
+/* Register base offsets */
+#define EIP197_HIA_AIC(priv) ((priv)->base + (priv)->offsets.hia_aic)
+#define EIP197_HIA_AIC_G(priv) ((priv)->base + (priv)->offsets.hia_aic_g)
+#define EIP197_HIA_AIC_R(priv) ((priv)->base + (priv)->offsets.hia_aic_r)
+#define EIP197_HIA_AIC_xDR(priv) ((priv)->base + (priv)->offsets.hia_aic_xdr)
+#define EIP197_HIA_DFE(priv) ((priv)->base + (priv)->offsets.hia_dfe)
+#define EIP197_HIA_DFE_THR(priv) ((priv)->base + (priv)->offsets.hia_dfe_thr)
+#define EIP197_HIA_DSE(priv) ((priv)->base + (priv)->offsets.hia_dse)
+#define EIP197_HIA_DSE_THR(priv) ((priv)->base + (priv)->offsets.hia_dse_thr)
+#define EIP197_HIA_GEN_CFG(priv) ((priv)->base + (priv)->offsets.hia_gen_cfg)
+#define EIP197_PE(priv) ((priv)->base + (priv)->offsets.pe)
+
+/* EIP197 base offsets */
+#define EIP197_HIA_AIC_BASE 0x90000
+#define EIP197_HIA_AIC_G_BASE 0x90000
+#define EIP197_HIA_AIC_R_BASE 0x90800
+#define EIP197_HIA_AIC_xDR_BASE 0x80000
+#define EIP197_HIA_DFE_BASE 0x8c000
+#define EIP197_HIA_DFE_THR_BASE 0x8c040
+#define EIP197_HIA_DSE_BASE 0x8d000
+#define EIP197_HIA_DSE_THR_BASE 0x8d040
+#define EIP197_HIA_GEN_CFG_BASE 0xf0000
+#define EIP197_PE_BASE 0xa0000
+
+/* EIP97 base offsets */
+#define EIP97_HIA_AIC_BASE 0x0
+#define EIP97_HIA_AIC_G_BASE 0x0
+#define EIP97_HIA_AIC_R_BASE 0x0
+#define EIP97_HIA_AIC_xDR_BASE 0x0
+#define EIP97_HIA_DFE_BASE 0xf000
+#define EIP97_HIA_DFE_THR_BASE 0xf200
+#define EIP97_HIA_DSE_BASE 0xf400
+#define EIP97_HIA_DSE_THR_BASE 0xf600
+#define EIP97_HIA_GEN_CFG_BASE 0x10000
+#define EIP97_PE_BASE 0x10000
+
/* CDR/RDR register offsets */
-#define EIP197_HIA_xDR_OFF(r) (0x80000 + (r) * 0x1000)
-#define EIP197_HIA_CDR(r) (EIP197_HIA_xDR_OFF(r))
-#define EIP197_HIA_RDR(r) (EIP197_HIA_xDR_OFF(r) + 0x800)
-#define EIP197_HIA_xDR_RING_BASE_ADDR_LO 0x0
-#define EIP197_HIA_xDR_RING_BASE_ADDR_HI 0x4
-#define EIP197_HIA_xDR_RING_SIZE 0x18
-#define EIP197_HIA_xDR_DESC_SIZE 0x1c
-#define EIP197_HIA_xDR_CFG 0x20
-#define EIP197_HIA_xDR_DMA_CFG 0x24
-#define EIP197_HIA_xDR_THRESH 0x28
-#define EIP197_HIA_xDR_PREP_COUNT 0x2c
-#define EIP197_HIA_xDR_PROC_COUNT 0x30
-#define EIP197_HIA_xDR_PREP_PNTR 0x34
-#define EIP197_HIA_xDR_PROC_PNTR 0x38
-#define EIP197_HIA_xDR_STAT 0x3c
+#define EIP197_HIA_xDR_OFF(priv, r) (EIP197_HIA_AIC_xDR(priv) + (r) * 0x1000)
+#define EIP197_HIA_CDR(priv, r) (EIP197_HIA_xDR_OFF(priv, r))
+#define EIP197_HIA_RDR(priv, r) (EIP197_HIA_xDR_OFF(priv, r) + 0x800)
+#define EIP197_HIA_xDR_RING_BASE_ADDR_LO 0x0000
+#define EIP197_HIA_xDR_RING_BASE_ADDR_HI 0x0004
+#define EIP197_HIA_xDR_RING_SIZE 0x0018
+#define EIP197_HIA_xDR_DESC_SIZE 0x001c
+#define EIP197_HIA_xDR_CFG 0x0020
+#define EIP197_HIA_xDR_DMA_CFG 0x0024
+#define EIP197_HIA_xDR_THRESH 0x0028
+#define EIP197_HIA_xDR_PREP_COUNT 0x002c
+#define EIP197_HIA_xDR_PROC_COUNT 0x0030
+#define EIP197_HIA_xDR_PREP_PNTR 0x0034
+#define EIP197_HIA_xDR_PROC_PNTR 0x0038
+#define EIP197_HIA_xDR_STAT 0x003c
/* register offsets */
-#define EIP197_HIA_DFE_CFG 0x8c000
-#define EIP197_HIA_DFE_THR_CTRL 0x8c040
-#define EIP197_HIA_DFE_THR_STAT 0x8c044
-#define EIP197_HIA_DSE_CFG 0x8d000
-#define EIP197_HIA_DSE_THR_CTRL 0x8d040
-#define EIP197_HIA_DSE_THR_STAT 0x8d044
-#define EIP197_HIA_RA_PE_CTRL 0x90010
-#define EIP197_HIA_RA_PE_STAT 0x90014
+#define EIP197_HIA_DFE_CFG 0x0000
+#define EIP197_HIA_DFE_THR_CTRL 0x0000
+#define EIP197_HIA_DFE_THR_STAT 0x0004
+#define EIP197_HIA_DSE_CFG 0x0000
+#define EIP197_HIA_DSE_THR_CTRL 0x0000
+#define EIP197_HIA_DSE_THR_STAT 0x0004
+#define EIP197_HIA_RA_PE_CTRL 0x0010
+#define EIP197_HIA_RA_PE_STAT 0x0014
#define EIP197_HIA_AIC_R_OFF(r) ((r) * 0x1000)
-#define EIP197_HIA_AIC_R_ENABLE_CTRL(r) (0x9e808 - EIP197_HIA_AIC_R_OFF(r))
-#define EIP197_HIA_AIC_R_ENABLED_STAT(r) (0x9e810 - EIP197_HIA_AIC_R_OFF(r))
-#define EIP197_HIA_AIC_R_ACK(r) (0x9e810 - EIP197_HIA_AIC_R_OFF(r))
-#define EIP197_HIA_AIC_R_ENABLE_CLR(r) (0x9e814 - EIP197_HIA_AIC_R_OFF(r))
-#define EIP197_HIA_AIC_G_ENABLE_CTRL 0x9f808
-#define EIP197_HIA_AIC_G_ENABLED_STAT 0x9f810
-#define EIP197_HIA_AIC_G_ACK 0x9f810
-#define EIP197_HIA_MST_CTRL 0x9fff4
-#define EIP197_HIA_OPTIONS 0x9fff8
-#define EIP197_HIA_VERSION 0x9fffc
-#define EIP197_PE_IN_DBUF_THRES 0xa0000
-#define EIP197_PE_IN_TBUF_THRES 0xa0100
-#define EIP197_PE_ICE_SCRATCH_RAM 0xa0800
-#define EIP197_PE_ICE_PUE_CTRL 0xa0c80
-#define EIP197_PE_ICE_SCRATCH_CTRL 0xa0d04
-#define EIP197_PE_ICE_FPP_CTRL 0xa0d80
-#define EIP197_PE_ICE_RAM_CTRL 0xa0ff0
-#define EIP197_PE_EIP96_FUNCTION_EN 0xa1004
-#define EIP197_PE_EIP96_CONTEXT_CTRL 0xa1008
-#define EIP197_PE_EIP96_CONTEXT_STAT 0xa100c
-#define EIP197_PE_OUT_DBUF_THRES 0xa1c00
-#define EIP197_PE_OUT_TBUF_THRES 0xa1d00
+#define EIP197_HIA_AIC_R_ENABLE_CTRL(r) (0xe008 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_ENABLED_STAT(r) (0xe010 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_ACK(r) (0xe010 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_ENABLE_CLR(r) (0xe014 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_G_ENABLE_CTRL 0xf808
+#define EIP197_HIA_AIC_G_ENABLED_STAT 0xf810
+#define EIP197_HIA_AIC_G_ACK 0xf810
+#define EIP197_HIA_MST_CTRL 0xfff4
+#define EIP197_HIA_OPTIONS 0xfff8
+#define EIP197_HIA_VERSION 0xfffc
+#define EIP197_PE_IN_DBUF_THRES 0x0000
+#define EIP197_PE_IN_TBUF_THRES 0x0100
+#define EIP197_PE_ICE_SCRATCH_RAM 0x0800
+#define EIP197_PE_ICE_PUE_CTRL 0x0c80
+#define EIP197_PE_ICE_SCRATCH_CTRL 0x0d04
+#define EIP197_PE_ICE_FPP_CTRL 0x0d80
+#define EIP197_PE_ICE_RAM_CTRL 0x0ff0
+#define EIP197_PE_EIP96_FUNCTION_EN 0x1004
+#define EIP197_PE_EIP96_CONTEXT_CTRL 0x1008
+#define EIP197_PE_EIP96_CONTEXT_STAT 0x100c
+#define EIP197_PE_OUT_DBUF_THRES 0x1c00
+#define EIP197_PE_OUT_TBUF_THRES 0x1d00
+#define EIP197_MST_CTRL 0xfff4
+
+/* EIP197-specific registers, no indirection */
#define EIP197_CLASSIFICATION_RAMS 0xe0000
#define EIP197_TRC_CTRL 0xf0800
#define EIP197_TRC_LASTRES 0xf0804
@@ -90,7 +129,6 @@
#define EIP197_TRC_ECCDATASTAT 0xf083c
#define EIP197_TRC_ECCDATA 0xf0840
#define EIP197_CS_RAM_CTRL 0xf7ff0
-#define EIP197_MST_CTRL 0xffff4
/* EIP197_HIA_xDR_DESC_SIZE */
#define EIP197_xDR_DESC_MODE_64BIT BIT(31)
@@ -117,6 +155,8 @@
#define EIP197_xDR_PREP_CLR_COUNT BIT(31)
/* EIP197_HIA_xDR_PROC_COUNT */
+#define EIP197_xDR_PROC_xD_PKT_OFFSET 24
+#define EIP197_xDR_PROC_xD_PKT_MASK GENMASK(6, 0)
#define EIP197_xDR_PROC_xD_COUNT(n) ((n) << 2)
#define EIP197_xDR_PROC_xD_PKT(n) ((n) << 24)
#define EIP197_xDR_PROC_CLR_COUNT BIT(31)
@@ -463,12 +503,33 @@ struct safexcel_work_data {
int ring;
};
+enum safexcel_eip_version {
+ EIP97,
+ EIP197,
+};
+
+struct safexcel_register_offsets {
+ u32 hia_aic;
+ u32 hia_aic_g;
+ u32 hia_aic_r;
+ u32 hia_aic_xdr;
+ u32 hia_dfe;
+ u32 hia_dfe_thr;
+ u32 hia_dse;
+ u32 hia_dse_thr;
+ u32 hia_gen_cfg;
+ u32 pe;
+};
+
struct safexcel_crypto_priv {
void __iomem *base;
struct device *dev;
struct clk *clk;
struct safexcel_config config;
+ enum safexcel_eip_version version;
+ struct safexcel_register_offsets offsets;
+
/* context DMA pool */
struct dma_pool *context_pool;
@@ -489,7 +550,20 @@ struct safexcel_crypto_priv {
/* queue */
struct crypto_queue queue;
spinlock_t queue_lock;
- bool need_dequeue;
+
+ /* Number of requests in the engine that needs the threshold
+ * interrupt to be set up.
+ */
+ int requests_left;
+
+ /* The ring is currently handling at least one request */
+ bool busy;
+
+ /* Store for current requests when bailing out of the dequeueing
+ * function when no enough resources are available.
+ */
+ struct crypto_async_request *req;
+ struct crypto_async_request *backlog;
} ring[EIP197_MAX_RINGS];
};
@@ -539,7 +613,6 @@ void safexcel_free_context(struct safexcel_crypto_priv *priv,
struct crypto_async_request *req,
int result_sz);
int safexcel_invalidate_cache(struct crypto_async_request *async,
- struct safexcel_context *ctx,
struct safexcel_crypto_priv *priv,
dma_addr_t ctxr_dma, int ring,
struct safexcel_request *request);
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index fcc0a606d748..63a8768ed2ae 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -27,7 +27,6 @@ struct safexcel_cipher_ctx {
struct safexcel_context base;
struct safexcel_crypto_priv *priv;
- enum safexcel_cipher_direction direction;
u32 mode;
__le32 key[8];
@@ -35,6 +34,7 @@ struct safexcel_cipher_ctx {
};
struct safexcel_cipher_req {
+ enum safexcel_cipher_direction direction;
bool needs_inv;
};
@@ -69,6 +69,7 @@ static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
{
struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct safexcel_crypto_priv *priv = ctx->priv;
struct crypto_aes_ctx aes;
int ret, i;
@@ -78,10 +79,12 @@ static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
return ret;
}
- for (i = 0; i < len / sizeof(u32); i++) {
- if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
- ctx->base.needs_inv = true;
- break;
+ if (priv->version == EIP197 && ctx->base.ctxr_dma) {
+ for (i = 0; i < len / sizeof(u32); i++) {
+ if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
+ ctx->base.needs_inv = true;
+ break;
+ }
}
}
@@ -95,12 +98,15 @@ static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
}
static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
+ struct crypto_async_request *async,
struct safexcel_command_desc *cdesc)
{
struct safexcel_crypto_priv *priv = ctx->priv;
+ struct skcipher_request *req = skcipher_request_cast(async);
+ struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
int ctrl_size;
- if (ctx->direction == SAFEXCEL_ENCRYPT)
+ if (sreq->direction == SAFEXCEL_ENCRYPT)
cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT;
else
cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_IN;
@@ -243,7 +249,7 @@ static int safexcel_aes_send(struct crypto_async_request *async,
n_cdesc++;
if (n_cdesc == 1) {
- safexcel_context_control(ctx, cdesc);
+ safexcel_context_control(ctx, async, cdesc);
safexcel_cipher_token(ctx, async, cdesc, req->cryptlen);
}
@@ -353,8 +359,8 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
if (enq_ret != -EINPROGRESS)
*ret = enq_ret;
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
*should_complete = false;
@@ -390,7 +396,7 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
- ret = safexcel_invalidate_cache(async, &ctx->base, priv,
+ ret = safexcel_invalidate_cache(async, priv,
ctx->base.ctxr_dma, ring, request);
if (unlikely(ret))
return ret;
@@ -406,9 +412,13 @@ static int safexcel_send(struct crypto_async_request *async,
int *commands, int *results)
{
struct skcipher_request *req = skcipher_request_cast(async);
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
+ struct safexcel_crypto_priv *priv = ctx->priv;
int ret;
+ BUG_ON(priv->version == EIP97 && sreq->needs_inv);
+
if (sreq->needs_inv)
ret = safexcel_cipher_send_inv(async, ring, request,
commands, results);
@@ -443,8 +453,8 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
wait_for_completion_interruptible(&result.completion);
@@ -467,11 +477,11 @@ static int safexcel_aes(struct skcipher_request *req,
int ret, ring;
sreq->needs_inv = false;
- ctx->direction = dir;
+ sreq->direction = dir;
ctx->mode = mode;
if (ctx->base.ctxr) {
- if (ctx->base.needs_inv) {
+ if (priv->version == EIP197 && ctx->base.needs_inv) {
sreq->needs_inv = true;
ctx->base.needs_inv = false;
}
@@ -490,8 +500,8 @@ static int safexcel_aes(struct skcipher_request *req,
ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
return ret;
}
@@ -539,9 +549,14 @@ static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
memzero_explicit(ctx->base.ctxr->data, 8 * sizeof(u32));
- ret = safexcel_cipher_exit_inv(tfm);
- if (ret)
- dev_warn(priv->dev, "cipher: invalidation error %d\n", ret);
+ if (priv->version == EIP197) {
+ ret = safexcel_cipher_exit_inv(tfm);
+ if (ret)
+ dev_warn(priv->dev, "cipher: invalidation error %d\n", ret);
+ } else {
+ dma_pool_free(priv->context_pool, ctx->base.ctxr,
+ ctx->base.ctxr_dma);
+ }
}
struct safexcel_alg_template safexcel_alg_ecb_aes = {
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 0c5a5820b06e..122a2a58e98f 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -14,7 +14,6 @@
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
-
#include "safexcel.h"
struct safexcel_ahash_ctx {
@@ -34,6 +33,8 @@ struct safexcel_ahash_req {
bool hmac;
bool needs_inv;
+ int nents;
+
u8 state_sz; /* expected sate size, only set once */
u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
@@ -152,8 +153,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
memcpy(areq->result, sreq->state,
crypto_ahash_digestsize(ahash));
- dma_unmap_sg(priv->dev, areq->src,
- sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
+ if (sreq->nents) {
+ dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
+ sreq->nents = 0;
+ }
safexcel_free_context(priv, async, sreq->state_sz);
@@ -178,7 +181,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
struct safexcel_result_desc *rdesc;
struct scatterlist *sg;
- int i, nents, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
+ int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
queued = len = req->len - req->processed;
if (queued < crypto_ahash_blocksize(ahash))
@@ -186,17 +189,31 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
else
cache_len = queued - areq->nbytes;
- /*
- * If this is not the last request and the queued data does not fit
- * into full blocks, cache it for the next send() call.
- */
- extra = queued & (crypto_ahash_blocksize(ahash) - 1);
- if (!req->last_req && extra) {
- sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
- req->cache_next, extra, areq->nbytes - extra);
-
- queued -= extra;
- len -= extra;
+ if (!req->last_req) {
+ /* If this is not the last request and the queued data does not
+ * fit into full blocks, cache it for the next send() call.
+ */
+ extra = queued & (crypto_ahash_blocksize(ahash) - 1);
+ if (!extra)
+ /* If this is not the last request and the queued data
+ * is a multiple of a block, cache the last one for now.
+ */
+ extra = queued - crypto_ahash_blocksize(ahash);
+
+ if (extra) {
+ sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+ req->cache_next, extra,
+ areq->nbytes - extra);
+
+ queued -= extra;
+ len -= extra;
+
+ if (!queued) {
+ *commands = 0;
+ *results = 0;
+ return 0;
+ }
+ }
}
spin_lock_bh(&priv->ring[ring].egress_lock);
@@ -234,15 +251,15 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
}
/* Now handle the current ahash request buffer(s) */
- nents = dma_map_sg(priv->dev, areq->src,
- sg_nents_for_len(areq->src, areq->nbytes),
- DMA_TO_DEVICE);
- if (!nents) {
+ req->nents = dma_map_sg(priv->dev, areq->src,
+ sg_nents_for_len(areq->src, areq->nbytes),
+ DMA_TO_DEVICE);
+ if (!req->nents) {
ret = -ENOMEM;
goto cdesc_rollback;
}
- for_each_sg(areq->src, sg, nents, i) {
+ for_each_sg(areq->src, sg, req->nents, i) {
int sglen = sg_dma_len(sg);
/* Do not overflow the request */
@@ -382,8 +399,8 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
if (enq_ret != -EINPROGRESS)
*ret = enq_ret;
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
*should_complete = false;
@@ -398,6 +415,8 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
int err;
+ BUG_ON(priv->version == EIP97 && req->needs_inv);
+
if (req->needs_inv) {
req->needs_inv = false;
err = safexcel_handle_inv_result(priv, ring, async,
@@ -418,7 +437,7 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
int ret;
- ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv,
+ ret = safexcel_invalidate_cache(async, ctx->priv,
ctx->base.ctxr_dma, ring, request);
if (unlikely(ret))
return ret;
@@ -471,8 +490,8 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
wait_for_completion_interruptible(&result.completion);
@@ -485,13 +504,23 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
return 0;
}
+/* safexcel_ahash_cache: cache data until at least one request can be sent to
+ * the engine, aka. when there is at least 1 block size in the pipe.
+ */
static int safexcel_ahash_cache(struct ahash_request *areq)
{
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
int queued, cache_len;
+ /* cache_len: everyting accepted by the driver but not sent yet,
+ * tot sz handled by update() - last req sz - tot sz handled by send()
+ */
cache_len = req->len - areq->nbytes - req->processed;
+ /* queued: everything accepted by the driver which will be handled by
+ * the next send() calls.
+ * tot sz handled by update() - tot sz handled by send()
+ */
queued = req->len - req->processed;
/*
@@ -505,7 +534,7 @@ static int safexcel_ahash_cache(struct ahash_request *areq)
return areq->nbytes;
}
- /* We could'nt cache all the data */
+ /* We couldn't cache all the data */
return -E2BIG;
}
@@ -518,10 +547,17 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
req->needs_inv = false;
- if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
- ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
-
if (ctx->base.ctxr) {
+ if (priv->version == EIP197 &&
+ !ctx->base.needs_inv && req->processed &&
+ ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
+ /* We're still setting needs_inv here, even though it is
+ * cleared right away, because the needs_inv flag can be
+ * set in other functions and we want to keep the same
+ * logic.
+ */
+ ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
+
if (ctx->base.needs_inv) {
ctx->base.needs_inv = false;
req->needs_inv = true;
@@ -541,8 +577,8 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
spin_unlock_bh(&priv->ring[ring].queue_lock);
- if (!priv->ring[ring].need_dequeue)
- safexcel_dequeue(priv, ring);
+ queue_work(priv->ring[ring].workqueue,
+ &priv->ring[ring].work_data.work);
return ret;
}
@@ -625,7 +661,6 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
export->processed = req->processed;
memcpy(export->state, req->state, req->state_sz);
- memset(export->cache, 0, crypto_ahash_blocksize(ahash));
memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
return 0;
@@ -707,9 +742,14 @@ static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
if (!ctx->base.ctxr)
return;
- ret = safexcel_ahash_exit_inv(tfm);
- if (ret)
- dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
+ if (priv->version == EIP197) {
+ ret = safexcel_ahash_exit_inv(tfm);
+ if (ret)
+ dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
+ } else {
+ dma_pool_free(priv->context_pool, ctx->base.ctxr,
+ ctx->base.ctxr_dma);
+ }
}
struct safexcel_alg_template safexcel_alg_sha1 = {
@@ -848,7 +888,7 @@ static int safexcel_hmac_init_iv(struct ahash_request *areq,
req->last_req = true;
ret = crypto_ahash_update(areq);
- if (ret && ret != -EINPROGRESS)
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY)
return ret;
wait_for_completion_interruptible(&result.completion);
@@ -913,6 +953,7 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+ struct safexcel_crypto_priv *priv = ctx->priv;
struct safexcel_ahash_export_state istate, ostate;
int ret, i;
@@ -920,11 +961,13 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
if (ret)
return ret;
- for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
- if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
- ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
- ctx->base.needs_inv = true;
- break;
+ if (priv->version == EIP197 && ctx->base.ctxr) {
+ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
+ if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
+ ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
+ ctx->base.needs_inv = true;
+ break;
+ }
}
}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 8705b28eb02c..717a26607bdb 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -260,12 +260,11 @@ static int setup_crypt_desc(void)
{
struct device *dev = &pdev->dev;
BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
- crypt_virt = dma_alloc_coherent(dev,
- NPE_QLEN * sizeof(struct crypt_ctl),
- &crypt_phys, GFP_ATOMIC);
+ crypt_virt = dma_zalloc_coherent(dev,
+ NPE_QLEN * sizeof(struct crypt_ctl),
+ &crypt_phys, GFP_ATOMIC);
if (!crypt_virt)
return -ENOMEM;
- memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl));
return 0;
}
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index 293832488cc9..aca2373fa1de 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -15,6 +15,7 @@
*/
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/genalloc.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -24,6 +25,7 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/dma-direct.h> /* XXX: drivers shall never use this directly! */
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -409,8 +411,11 @@ static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
if (IS_ERR(engine->sram))
return PTR_ERR(engine->sram);
- engine->sram_dma = phys_to_dma(cesa->dev,
- (phys_addr_t)res->start);
+ engine->sram_dma = dma_map_resource(cesa->dev, res->start,
+ cesa->sram_size,
+ DMA_BIDIRECTIONAL, 0);
+ if (dma_mapping_error(cesa->dev, engine->sram_dma))
+ return -ENOMEM;
return 0;
}
@@ -420,11 +425,12 @@ static void mv_cesa_put_sram(struct platform_device *pdev, int idx)
struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
struct mv_cesa_engine *engine = &cesa->engines[idx];
- if (!engine->pool)
- return;
-
- gen_pool_free(engine->pool, (unsigned long)engine->sram,
- cesa->sram_size);
+ if (engine->pool)
+ gen_pool_free(engine->pool, (unsigned long)engine->sram,
+ cesa->sram_size);
+ else
+ dma_unmap_resource(cesa->dev, engine->sram_dma,
+ cesa->sram_size, DMA_BIDIRECTIONAL, 0);
}
static int mv_cesa_probe(struct platform_device *pdev)
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
index f2246a5abcf6..1e87637c412d 100644
--- a/drivers/crypto/nx/nx-842-powernv.c
+++ b/drivers/crypto/nx/nx-842-powernv.c
@@ -743,8 +743,8 @@ static int nx842_open_percpu_txwins(void)
}
if (!per_cpu(cpu_txwin, i)) {
- /* shoudn't happen, Each chip will have NX engine */
- pr_err("NX engine is not availavle for CPU %d\n", i);
+ /* shouldn't happen, Each chip will have NX engine */
+ pr_err("NX engine is not available for CPU %d\n", i);
return -EINVAL;
}
}
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 5a6dc53b2b9d..4ef52c9d72fc 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -1618,7 +1618,7 @@ MODULE_DEVICE_TABLE(of, spacc_of_id_table);
static int spacc_probe(struct platform_device *pdev)
{
- int i, err, ret = -EINVAL;
+ int i, err, ret;
struct resource *mem, *irq;
struct device_node *np = pdev->dev.of_node;
struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine),
@@ -1679,22 +1679,18 @@ static int spacc_probe(struct platform_device *pdev)
engine->clk = clk_get(&pdev->dev, "ref");
if (IS_ERR(engine->clk)) {
dev_info(&pdev->dev, "clk unavailable\n");
- device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
return PTR_ERR(engine->clk);
}
if (clk_prepare_enable(engine->clk)) {
dev_info(&pdev->dev, "unable to prepare/enable clk\n");
- clk_put(engine->clk);
- return -EIO;
+ ret = -EIO;
+ goto err_clk_put;
}
- err = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
- if (err) {
- clk_disable_unprepare(engine->clk);
- clk_put(engine->clk);
- return err;
- }
+ ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
+ if (ret)
+ goto err_clk_disable;
/*
@@ -1725,6 +1721,7 @@ static int spacc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, engine);
+ ret = -EINVAL;
INIT_LIST_HEAD(&engine->registered_algs);
for (i = 0; i < engine->num_algs; ++i) {
engine->algs[i].engine = engine;
@@ -1759,6 +1756,16 @@ static int spacc_probe(struct platform_device *pdev)
engine->aeads[i].alg.base.cra_name);
}
+ if (!ret)
+ return 0;
+
+ del_timer_sync(&engine->packet_timeout);
+ device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
+err_clk_disable:
+ clk_disable_unprepare(engine->clk);
+err_clk_put:
+ clk_put(engine->clk);
+
return ret;
}
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
index 8c4fd255a601..ff149e176f64 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -117,19 +117,19 @@ void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
#define CSR_RETRY_TIMES 500
static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle,
- unsigned char ae, unsigned int csr,
- unsigned int *value)
+ unsigned char ae, unsigned int csr)
{
unsigned int iterations = CSR_RETRY_TIMES;
+ int value;
do {
- *value = GET_AE_CSR(handle, ae, csr);
+ value = GET_AE_CSR(handle, ae, csr);
if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
- return 0;
+ return value;
} while (iterations--);
pr_err("QAT: Read CSR timeout\n");
- return -EFAULT;
+ return 0;
}
static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle,
@@ -154,9 +154,9 @@ static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle,
{
unsigned int cur_ctx;
- qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
- qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, events);
+ *events = qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT);
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
}
@@ -169,13 +169,13 @@ static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle,
int times = MAX_RETRY_TIMES;
int elapsed_cycles = 0;
- qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &base_cnt);
+ base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
base_cnt &= 0xffff;
while ((int)cycles > elapsed_cycles && times--) {
if (chk_inactive)
- qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &csr);
+ csr = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
- qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &cur_cnt);
+ cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
cur_cnt &= 0xffff;
elapsed_cycles = cur_cnt - base_cnt;
@@ -207,7 +207,7 @@ int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
}
/* Sets the accelaration engine context mode to either four or eight */
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+ csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr = IGNORE_W1C_MASK & csr;
new_csr = (mode == 4) ?
SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) :
@@ -221,7 +221,7 @@ int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
{
unsigned int csr, new_csr;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+ csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr &= IGNORE_W1C_MASK;
new_csr = (mode) ?
@@ -240,7 +240,7 @@ int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
{
unsigned int csr, new_csr;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+ csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr &= IGNORE_W1C_MASK;
switch (lm_type) {
case ICP_LMEM0:
@@ -328,7 +328,7 @@ static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
{
unsigned int ctx, cur_ctx;
- qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
if (!(ctx_mask & (1 << ctx)))
@@ -340,16 +340,18 @@ static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
}
-static void qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
+static unsigned int qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
unsigned char ae, unsigned char ctx,
- unsigned int ae_csr, unsigned int *csr_val)
+ unsigned int ae_csr)
{
- unsigned int cur_ctx;
+ unsigned int cur_ctx, csr_val;
- qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
- qat_hal_rd_ae_csr(handle, ae, ae_csr, csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, ae_csr);
qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+
+ return csr_val;
}
static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
@@ -358,7 +360,7 @@ static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
{
unsigned int ctx, cur_ctx;
- qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
if (!(ctx_mask & (1 << ctx)))
continue;
@@ -374,7 +376,7 @@ static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle,
{
unsigned int ctx, cur_ctx;
- qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+ cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
if (!(ctx_mask & (1 << ctx)))
continue;
@@ -392,13 +394,11 @@ static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
int times = MAX_RETRY_TIMES;
for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
- qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
- (unsigned int *)&base_cnt);
+ base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
base_cnt &= 0xffff;
do {
- qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
- (unsigned int *)&cur_cnt);
+ cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
cur_cnt &= 0xffff;
} while (times-- && (cur_cnt == base_cnt));
@@ -416,8 +416,8 @@ int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
{
unsigned int enable = 0, active = 0;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &enable);
- qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &active);
+ enable = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
+ active = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
if ((enable & (0xff << CE_ENABLE_BITPOS)) ||
(active & (1 << ACS_ABO_BITPOS)))
return 1;
@@ -540,7 +540,7 @@ static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle,
{
unsigned int ctx;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
+ ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx &= IGNORE_W1C_MASK &
(~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS));
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
@@ -583,7 +583,7 @@ void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
unsigned int ustore_addr;
unsigned int i;
- qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+ ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
uaddr |= UA_ECS;
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
for (i = 0; i < words_num; i++) {
@@ -604,7 +604,7 @@ static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle,
{
unsigned int ctx;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
+ ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx &= IGNORE_W1C_MASK;
ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF;
ctx |= (ctx_mask << CE_ENABLE_BITPOS);
@@ -636,10 +636,10 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
int ret = 0;
for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
- qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
csr_val &= IGNORE_W1C_MASK;
csr_val |= CE_NN_MODE;
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
@@ -648,7 +648,7 @@ static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
handle->hal_handle->upc_mask &
INIT_PC_VALUE);
- qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
+ savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0);
qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY);
qat_hal_wr_indr_csr(handle, ae, ctx_mask,
@@ -760,7 +760,7 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
unsigned int csr_val = 0;
- qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE);
csr_val |= 0x1;
qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
}
@@ -826,16 +826,16 @@ static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle,
unsigned int i, uwrd_lo, uwrd_hi;
unsigned int ustore_addr, misc_control;
- qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &misc_control);
+ misc_control = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL,
misc_control & 0xfffffffb);
- qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+ ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
uaddr |= UA_ECS;
for (i = 0; i < words_num; i++) {
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
uaddr++;
- qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER, &uwrd_lo);
- qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER, &uwrd_hi);
+ uwrd_lo = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER);
+ uwrd_hi = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER);
uword[i] = uwrd_hi;
uword[i] = (uword[i] << 0x20) | uwrd_lo;
}
@@ -849,7 +849,7 @@ void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle,
{
unsigned int i, ustore_addr;
- qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+ ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
uaddr |= UA_ECS;
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
for (i = 0; i < words_num; i++) {
@@ -890,26 +890,27 @@ static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
return -EINVAL;
}
/* save current context */
- qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT, &ind_lm_addr0);
- qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT, &ind_lm_addr1);
- qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX,
- &ind_lm_addr_byte0);
- qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX,
- &ind_lm_addr_byte1);
+ ind_lm_addr0 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT);
+ ind_lm_addr1 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT);
+ ind_lm_addr_byte0 = qat_hal_rd_indr_csr(handle, ae, ctx,
+ INDIRECT_LM_ADDR_0_BYTE_INDEX);
+ ind_lm_addr_byte1 = qat_hal_rd_indr_csr(handle, ae, ctx,
+ INDIRECT_LM_ADDR_1_BYTE_INDEX);
if (inst_num <= MAX_EXEC_INST)
qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords);
qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events);
- qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, &savpc);
+ savpc = qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT);
savpc = (savpc & handle->hal_handle->upc_mask) >> 0;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx_enables &= IGNORE_W1C_MASK;
- qat_hal_rd_ae_csr(handle, ae, CC_ENABLE, &savcc);
- qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
- qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_ctl);
- qat_hal_rd_indr_csr(handle, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT,
- &ind_cnt_sig);
- qat_hal_rd_indr_csr(handle, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &ind_sig);
- qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, &act_sig);
+ savcc = qat_hal_rd_ae_csr(handle, ae, CC_ENABLE);
+ savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+ ctxarb_ctl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL);
+ ind_cnt_sig = qat_hal_rd_indr_csr(handle, ae, ctx,
+ FUTURE_COUNT_SIGNAL_INDIRECT);
+ ind_sig = qat_hal_rd_indr_csr(handle, ae, ctx,
+ CTX_SIG_EVENTS_INDIRECT);
+ act_sig = qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE);
/* execute micro codes */
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst);
@@ -927,8 +928,8 @@ static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
if (endpc) {
unsigned int ctx_status;
- qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT,
- &ctx_status);
+ ctx_status = qat_hal_rd_indr_csr(handle, ae, ctx,
+ CTX_STS_INDIRECT);
*endpc = ctx_status & handle->hal_handle->upc_mask;
}
/* retore to saved context */
@@ -938,7 +939,7 @@ static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events);
qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT,
handle->hal_handle->upc_mask & savpc);
- qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
+ csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc);
@@ -986,16 +987,16 @@ static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
break;
}
- qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
- qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_cntl);
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
+ ctxarb_cntl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx_enables &= IGNORE_W1C_MASK;
if (ctx != (savctx & ACS_ACNO))
qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
ctx & ACS_ACNO);
qat_hal_get_uwords(handle, ae, 0, 1, &savuword);
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
- qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+ ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
uaddr = UA_ECS;
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
insts = qat_hal_set_uword_ecc(insts);
@@ -1011,7 +1012,7 @@ static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
* the instruction should have been executed
* prior to clearing the ECS in putUwords
*/
- qat_hal_rd_ae_csr(handle, ae, ALU_OUT, data);
+ *data = qat_hal_rd_ae_csr(handle, ae, ALU_OUT);
qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
qat_hal_wr_uwords(handle, ae, 0, 1, &savuword);
if (ctx != (savctx & ACS_ACNO))
@@ -1188,7 +1189,7 @@ static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
unsigned short mask;
unsigned short dr_offset = 0x10;
- status = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ status = ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
if (CE_INUSE_CONTEXTS & ctx_enables) {
if (ctx & 0x1) {
pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
@@ -1238,7 +1239,7 @@ static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1;
const unsigned short gprnum = 0, dly = num_inst * 0x5;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
if (CE_INUSE_CONTEXTS & ctx_enables) {
if (ctx & 0x1) {
pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx);
@@ -1282,7 +1283,7 @@ static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle,
unsigned int ctx_enables;
int stat = 0;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
ctx_enables &= IGNORE_W1C_MASK;
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE);
@@ -1299,7 +1300,7 @@ static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle
{
unsigned int ctx_enables;
- qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
if (ctx_enables & CE_INUSE_CONTEXTS) {
/* 4-ctx mode */
*relreg = absreg_num & 0x1F;
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 142c6020cec7..188f44b7eb27 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -1,17 +1,13 @@
-/*
- * Cryptographic API.
- *
- * Support for Samsung S5PV210 and Exynos HW acceleration.
- *
- * Copyright (C) 2011 NetUP Inc. All rights reserved.
- * Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Hash part based on omap-sham.c driver.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Cryptographic API.
+//
+// Support for Samsung S5PV210 and Exynos HW acceleration.
+//
+// Copyright (C) 2011 NetUP Inc. All rights reserved.
+// Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved.
+//
+// Hash part based on omap-sham.c driver.
#include <linux/clk.h>
#include <linux/crypto.h>
@@ -1461,7 +1457,7 @@ static void s5p_hash_tasklet_cb(unsigned long data)
&dd->hash_flags)) {
/* hash or semi-hash ready */
clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags);
- goto finish;
+ goto finish;
}
}
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
index 602332e02729..63aa78c0b12b 100644
--- a/drivers/crypto/stm32/Kconfig
+++ b/drivers/crypto/stm32/Kconfig
@@ -1,4 +1,4 @@
-config CRC_DEV_STM32
+config CRYPTO_DEV_STM32_CRC
tristate "Support for STM32 crc accelerators"
depends on ARCH_STM32
select CRYPTO_HASH
@@ -6,7 +6,7 @@ config CRC_DEV_STM32
This enables support for the CRC32 hw accelerator which can be found
on STMicroelectronics STM32 SOC.
-config HASH_DEV_STM32
+config CRYPTO_DEV_STM32_HASH
tristate "Support for STM32 hash accelerators"
depends on ARCH_STM32
depends on HAS_DMA
@@ -18,3 +18,12 @@ config HASH_DEV_STM32
help
This enables support for the HASH hw accelerator which can be found
on STMicroelectronics STM32 SOC.
+
+config CRYPTO_DEV_STM32_CRYP
+ tristate "Support for STM32 cryp accelerators"
+ depends on ARCH_STM32
+ select CRYPTO_HASH
+ select CRYPTO_ENGINE
+ help
+ This enables support for the CRYP (AES/DES/TDES) hw accelerator which
+ can be found on STMicroelectronics STM32 SOC.
diff --git a/drivers/crypto/stm32/Makefile b/drivers/crypto/stm32/Makefile
index 73cd56cad0cc..53d1bb94b221 100644
--- a/drivers/crypto/stm32/Makefile
+++ b/drivers/crypto/stm32/Makefile
@@ -1,2 +1,3 @@
-obj-$(CONFIG_CRC_DEV_STM32) += stm32_crc32.o
-obj-$(CONFIG_HASH_DEV_STM32) += stm32-hash.o \ No newline at end of file
+obj-$(CONFIG_CRYPTO_DEV_STM32_CRC) += stm32_crc32.o
+obj-$(CONFIG_CRYPTO_DEV_STM32_HASH) += stm32-hash.o
+obj-$(CONFIG_CRYPTO_DEV_STM32_CRYP) += stm32-cryp.o
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
new file mode 100644
index 000000000000..4a06a7a665ee
--- /dev/null
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -0,0 +1,1170 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2017
+ * Author: Fabien Dessenne <fabien.dessenne@st.com>
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/engine.h>
+#include <crypto/scatterwalk.h>
+
+#define DRIVER_NAME "stm32-cryp"
+
+/* Bit [0] encrypt / decrypt */
+#define FLG_ENCRYPT BIT(0)
+/* Bit [8..1] algo & operation mode */
+#define FLG_AES BIT(1)
+#define FLG_DES BIT(2)
+#define FLG_TDES BIT(3)
+#define FLG_ECB BIT(4)
+#define FLG_CBC BIT(5)
+#define FLG_CTR BIT(6)
+/* Mode mask = bits [15..0] */
+#define FLG_MODE_MASK GENMASK(15, 0)
+
+/* Registers */
+#define CRYP_CR 0x00000000
+#define CRYP_SR 0x00000004
+#define CRYP_DIN 0x00000008
+#define CRYP_DOUT 0x0000000C
+#define CRYP_DMACR 0x00000010
+#define CRYP_IMSCR 0x00000014
+#define CRYP_RISR 0x00000018
+#define CRYP_MISR 0x0000001C
+#define CRYP_K0LR 0x00000020
+#define CRYP_K0RR 0x00000024
+#define CRYP_K1LR 0x00000028
+#define CRYP_K1RR 0x0000002C
+#define CRYP_K2LR 0x00000030
+#define CRYP_K2RR 0x00000034
+#define CRYP_K3LR 0x00000038
+#define CRYP_K3RR 0x0000003C
+#define CRYP_IV0LR 0x00000040
+#define CRYP_IV0RR 0x00000044
+#define CRYP_IV1LR 0x00000048
+#define CRYP_IV1RR 0x0000004C
+
+/* Registers values */
+#define CR_DEC_NOT_ENC 0x00000004
+#define CR_TDES_ECB 0x00000000
+#define CR_TDES_CBC 0x00000008
+#define CR_DES_ECB 0x00000010
+#define CR_DES_CBC 0x00000018
+#define CR_AES_ECB 0x00000020
+#define CR_AES_CBC 0x00000028
+#define CR_AES_CTR 0x00000030
+#define CR_AES_KP 0x00000038
+#define CR_AES_UNKNOWN 0xFFFFFFFF
+#define CR_ALGO_MASK 0x00080038
+#define CR_DATA32 0x00000000
+#define CR_DATA16 0x00000040
+#define CR_DATA8 0x00000080
+#define CR_DATA1 0x000000C0
+#define CR_KEY128 0x00000000
+#define CR_KEY192 0x00000100
+#define CR_KEY256 0x00000200
+#define CR_FFLUSH 0x00004000
+#define CR_CRYPEN 0x00008000
+
+#define SR_BUSY 0x00000010
+#define SR_OFNE 0x00000004
+
+#define IMSCR_IN BIT(0)
+#define IMSCR_OUT BIT(1)
+
+#define MISR_IN BIT(0)
+#define MISR_OUT BIT(1)
+
+/* Misc */
+#define AES_BLOCK_32 (AES_BLOCK_SIZE / sizeof(u32))
+#define _walked_in (cryp->in_walk.offset - cryp->in_sg->offset)
+#define _walked_out (cryp->out_walk.offset - cryp->out_sg->offset)
+
+struct stm32_cryp_ctx {
+ struct stm32_cryp *cryp;
+ int keylen;
+ u32 key[AES_KEYSIZE_256 / sizeof(u32)];
+ unsigned long flags;
+};
+
+struct stm32_cryp_reqctx {
+ unsigned long mode;
+};
+
+struct stm32_cryp {
+ struct list_head list;
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *clk;
+ unsigned long flags;
+ u32 irq_status;
+ struct stm32_cryp_ctx *ctx;
+
+ struct crypto_engine *engine;
+
+ struct mutex lock; /* protects req */
+ struct ablkcipher_request *req;
+
+ size_t hw_blocksize;
+
+ size_t total_in;
+ size_t total_in_save;
+ size_t total_out;
+ size_t total_out_save;
+
+ struct scatterlist *in_sg;
+ struct scatterlist *out_sg;
+ struct scatterlist *out_sg_save;
+
+ struct scatterlist in_sgl;
+ struct scatterlist out_sgl;
+ bool sgs_copied;
+
+ int in_sg_len;
+ int out_sg_len;
+
+ struct scatter_walk in_walk;
+ struct scatter_walk out_walk;
+
+ u32 last_ctr[4];
+};
+
+struct stm32_cryp_list {
+ struct list_head dev_list;
+ spinlock_t lock; /* protect dev_list */
+};
+
+static struct stm32_cryp_list cryp_list = {
+ .dev_list = LIST_HEAD_INIT(cryp_list.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(cryp_list.lock),
+};
+
+static inline bool is_aes(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_AES;
+}
+
+static inline bool is_des(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_DES;
+}
+
+static inline bool is_tdes(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_TDES;
+}
+
+static inline bool is_ecb(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_ECB;
+}
+
+static inline bool is_cbc(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_CBC;
+}
+
+static inline bool is_ctr(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_CTR;
+}
+
+static inline bool is_encrypt(struct stm32_cryp *cryp)
+{
+ return cryp->flags & FLG_ENCRYPT;
+}
+
+static inline bool is_decrypt(struct stm32_cryp *cryp)
+{
+ return !is_encrypt(cryp);
+}
+
+static inline u32 stm32_cryp_read(struct stm32_cryp *cryp, u32 ofst)
+{
+ return readl_relaxed(cryp->regs + ofst);
+}
+
+static inline void stm32_cryp_write(struct stm32_cryp *cryp, u32 ofst, u32 val)
+{
+ writel_relaxed(val, cryp->regs + ofst);
+}
+
+static inline int stm32_cryp_wait_busy(struct stm32_cryp *cryp)
+{
+ u32 status;
+
+ return readl_relaxed_poll_timeout(cryp->regs + CRYP_SR, status,
+ !(status & SR_BUSY), 10, 100000);
+}
+
+static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx)
+{
+ struct stm32_cryp *tmp, *cryp = NULL;
+
+ spin_lock_bh(&cryp_list.lock);
+ if (!ctx->cryp) {
+ list_for_each_entry(tmp, &cryp_list.dev_list, list) {
+ cryp = tmp;
+ break;
+ }
+ ctx->cryp = cryp;
+ } else {
+ cryp = ctx->cryp;
+ }
+
+ spin_unlock_bh(&cryp_list.lock);
+
+ return cryp;
+}
+
+static int stm32_cryp_check_aligned(struct scatterlist *sg, size_t total,
+ size_t align)
+{
+ int len = 0;
+
+ if (!total)
+ return 0;
+
+ if (!IS_ALIGNED(total, align))
+ return -EINVAL;
+
+ while (sg) {
+ if (!IS_ALIGNED(sg->offset, sizeof(u32)))
+ return -EINVAL;
+
+ if (!IS_ALIGNED(sg->length, align))
+ return -EINVAL;
+
+ len += sg->length;
+ sg = sg_next(sg);
+ }
+
+ if (len != total)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int stm32_cryp_check_io_aligned(struct stm32_cryp *cryp)
+{
+ int ret;
+
+ ret = stm32_cryp_check_aligned(cryp->in_sg, cryp->total_in,
+ cryp->hw_blocksize);
+ if (ret)
+ return ret;
+
+ ret = stm32_cryp_check_aligned(cryp->out_sg, cryp->total_out,
+ cryp->hw_blocksize);
+
+ return ret;
+}
+
+static void sg_copy_buf(void *buf, struct scatterlist *sg,
+ unsigned int start, unsigned int nbytes, int out)
+{
+ struct scatter_walk walk;
+
+ if (!nbytes)
+ return;
+
+ scatterwalk_start(&walk, sg);
+ scatterwalk_advance(&walk, start);
+ scatterwalk_copychunks(buf, &walk, nbytes, out);
+ scatterwalk_done(&walk, out, 0);
+}
+
+static int stm32_cryp_copy_sgs(struct stm32_cryp *cryp)
+{
+ void *buf_in, *buf_out;
+ int pages, total_in, total_out;
+
+ if (!stm32_cryp_check_io_aligned(cryp)) {
+ cryp->sgs_copied = 0;
+ return 0;
+ }
+
+ total_in = ALIGN(cryp->total_in, cryp->hw_blocksize);
+ pages = total_in ? get_order(total_in) : 1;
+ buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
+
+ total_out = ALIGN(cryp->total_out, cryp->hw_blocksize);
+ pages = total_out ? get_order(total_out) : 1;
+ buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages);
+
+ if (!buf_in || !buf_out) {
+ dev_err(cryp->dev, "Can't allocate pages when unaligned\n");
+ cryp->sgs_copied = 0;
+ return -EFAULT;
+ }
+
+ sg_copy_buf(buf_in, cryp->in_sg, 0, cryp->total_in, 0);
+
+ sg_init_one(&cryp->in_sgl, buf_in, total_in);
+ cryp->in_sg = &cryp->in_sgl;
+ cryp->in_sg_len = 1;
+
+ sg_init_one(&cryp->out_sgl, buf_out, total_out);
+ cryp->out_sg_save = cryp->out_sg;
+ cryp->out_sg = &cryp->out_sgl;
+ cryp->out_sg_len = 1;
+
+ cryp->sgs_copied = 1;
+
+ return 0;
+}
+
+static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, u32 *iv)
+{
+ if (!iv)
+ return;
+
+ stm32_cryp_write(cryp, CRYP_IV0LR, cpu_to_be32(*iv++));
+ stm32_cryp_write(cryp, CRYP_IV0RR, cpu_to_be32(*iv++));
+
+ if (is_aes(cryp)) {
+ stm32_cryp_write(cryp, CRYP_IV1LR, cpu_to_be32(*iv++));
+ stm32_cryp_write(cryp, CRYP_IV1RR, cpu_to_be32(*iv++));
+ }
+}
+
+static void stm32_cryp_hw_write_key(struct stm32_cryp *c)
+{
+ unsigned int i;
+ int r_id;
+
+ if (is_des(c)) {
+ stm32_cryp_write(c, CRYP_K1LR, cpu_to_be32(c->ctx->key[0]));
+ stm32_cryp_write(c, CRYP_K1RR, cpu_to_be32(c->ctx->key[1]));
+ } else {
+ r_id = CRYP_K3RR;
+ for (i = c->ctx->keylen / sizeof(u32); i > 0; i--, r_id -= 4)
+ stm32_cryp_write(c, r_id,
+ cpu_to_be32(c->ctx->key[i - 1]));
+ }
+}
+
+static u32 stm32_cryp_get_hw_mode(struct stm32_cryp *cryp)
+{
+ if (is_aes(cryp) && is_ecb(cryp))
+ return CR_AES_ECB;
+
+ if (is_aes(cryp) && is_cbc(cryp))
+ return CR_AES_CBC;
+
+ if (is_aes(cryp) && is_ctr(cryp))
+ return CR_AES_CTR;
+
+ if (is_des(cryp) && is_ecb(cryp))
+ return CR_DES_ECB;
+
+ if (is_des(cryp) && is_cbc(cryp))
+ return CR_DES_CBC;
+
+ if (is_tdes(cryp) && is_ecb(cryp))
+ return CR_TDES_ECB;
+
+ if (is_tdes(cryp) && is_cbc(cryp))
+ return CR_TDES_CBC;
+
+ dev_err(cryp->dev, "Unknown mode\n");
+ return CR_AES_UNKNOWN;
+}
+
+static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
+{
+ int ret;
+ u32 cfg, hw_mode;
+
+ /* Disable interrupt */
+ stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+
+ /* Set key */
+ stm32_cryp_hw_write_key(cryp);
+
+ /* Set configuration */
+ cfg = CR_DATA8 | CR_FFLUSH;
+
+ switch (cryp->ctx->keylen) {
+ case AES_KEYSIZE_128:
+ cfg |= CR_KEY128;
+ break;
+
+ case AES_KEYSIZE_192:
+ cfg |= CR_KEY192;
+ break;
+
+ default:
+ case AES_KEYSIZE_256:
+ cfg |= CR_KEY256;
+ break;
+ }
+
+ hw_mode = stm32_cryp_get_hw_mode(cryp);
+ if (hw_mode == CR_AES_UNKNOWN)
+ return -EINVAL;
+
+ /* AES ECB/CBC decrypt: run key preparation first */
+ if (is_decrypt(cryp) &&
+ ((hw_mode == CR_AES_ECB) || (hw_mode == CR_AES_CBC))) {
+ stm32_cryp_write(cryp, CRYP_CR, cfg | CR_AES_KP | CR_CRYPEN);
+
+ /* Wait for end of processing */
+ ret = stm32_cryp_wait_busy(cryp);
+ if (ret) {
+ dev_err(cryp->dev, "Timeout (key preparation)\n");
+ return ret;
+ }
+ }
+
+ cfg |= hw_mode;
+
+ if (is_decrypt(cryp))
+ cfg |= CR_DEC_NOT_ENC;
+
+ /* Apply config and flush (valid when CRYPEN = 0) */
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ switch (hw_mode) {
+ case CR_DES_CBC:
+ case CR_TDES_CBC:
+ case CR_AES_CBC:
+ case CR_AES_CTR:
+ stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->req->info);
+ break;
+
+ default:
+ break;
+ }
+
+ /* Enable now */
+ cfg |= CR_CRYPEN;
+
+ stm32_cryp_write(cryp, CRYP_CR, cfg);
+
+ return 0;
+}
+
+static void stm32_cryp_finish_req(struct stm32_cryp *cryp)
+{
+ int err = 0;
+
+ if (cryp->sgs_copied) {
+ void *buf_in, *buf_out;
+ int pages, len;
+
+ buf_in = sg_virt(&cryp->in_sgl);
+ buf_out = sg_virt(&cryp->out_sgl);
+
+ sg_copy_buf(buf_out, cryp->out_sg_save, 0,
+ cryp->total_out_save, 1);
+
+ len = ALIGN(cryp->total_in_save, cryp->hw_blocksize);
+ pages = len ? get_order(len) : 1;
+ free_pages((unsigned long)buf_in, pages);
+
+ len = ALIGN(cryp->total_out_save, cryp->hw_blocksize);
+ pages = len ? get_order(len) : 1;
+ free_pages((unsigned long)buf_out, pages);
+ }
+
+ crypto_finalize_cipher_request(cryp->engine, cryp->req, err);
+ cryp->req = NULL;
+
+ memset(cryp->ctx->key, 0, cryp->ctx->keylen);
+
+ mutex_unlock(&cryp->lock);
+}
+
+static int stm32_cryp_cpu_start(struct stm32_cryp *cryp)
+{
+ /* Enable interrupt and let the IRQ handler do everything */
+ stm32_cryp_write(cryp, CRYP_IMSCR, IMSCR_IN | IMSCR_OUT);
+
+ return 0;
+}
+
+static int stm32_cryp_cra_init(struct crypto_tfm *tfm)
+{
+ tfm->crt_ablkcipher.reqsize = sizeof(struct stm32_cryp_reqctx);
+
+ return 0;
+}
+
+static int stm32_cryp_crypt(struct ablkcipher_request *req, unsigned long mode)
+{
+ struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ struct stm32_cryp_reqctx *rctx = ablkcipher_request_ctx(req);
+ struct stm32_cryp *cryp = stm32_cryp_find_dev(ctx);
+
+ if (!cryp)
+ return -ENODEV;
+
+ rctx->mode = mode;
+
+ return crypto_transfer_cipher_request_to_engine(cryp->engine, req);
+}
+
+static int stm32_cryp_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+
+ memcpy(ctx->key, key, keylen);
+ ctx->keylen = keylen;
+
+ return 0;
+}
+
+static int stm32_cryp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+ else
+ return stm32_cryp_setkey(tfm, key, keylen);
+}
+
+static int stm32_cryp_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ if (keylen != DES_KEY_SIZE)
+ return -EINVAL;
+ else
+ return stm32_cryp_setkey(tfm, key, keylen);
+}
+
+static int stm32_cryp_tdes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ if (keylen != (3 * DES_KEY_SIZE))
+ return -EINVAL;
+ else
+ return stm32_cryp_setkey(tfm, key, keylen);
+}
+
+static int stm32_cryp_aes_ecb_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_ECB | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_aes_ecb_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_ECB);
+}
+
+static int stm32_cryp_aes_cbc_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_CBC | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_aes_cbc_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_CBC);
+}
+
+static int stm32_cryp_aes_ctr_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_CTR | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_aes_ctr_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_AES | FLG_CTR);
+}
+
+static int stm32_cryp_des_ecb_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_DES | FLG_ECB | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_des_ecb_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_DES | FLG_ECB);
+}
+
+static int stm32_cryp_des_cbc_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_DES | FLG_CBC | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_des_cbc_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_DES | FLG_CBC);
+}
+
+static int stm32_cryp_tdes_ecb_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_tdes_ecb_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB);
+}
+
+static int stm32_cryp_tdes_cbc_encrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC | FLG_ENCRYPT);
+}
+
+static int stm32_cryp_tdes_cbc_decrypt(struct ablkcipher_request *req)
+{
+ return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC);
+}
+
+static int stm32_cryp_prepare_req(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ struct stm32_cryp_ctx *ctx;
+ struct stm32_cryp *cryp;
+ struct stm32_cryp_reqctx *rctx;
+ int ret;
+
+ if (!req)
+ return -EINVAL;
+
+ ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+
+ cryp = ctx->cryp;
+
+ if (!cryp)
+ return -ENODEV;
+
+ mutex_lock(&cryp->lock);
+
+ rctx = ablkcipher_request_ctx(req);
+ rctx->mode &= FLG_MODE_MASK;
+
+ ctx->cryp = cryp;
+
+ cryp->flags = (cryp->flags & ~FLG_MODE_MASK) | rctx->mode;
+ cryp->hw_blocksize = is_aes(cryp) ? AES_BLOCK_SIZE : DES_BLOCK_SIZE;
+ cryp->ctx = ctx;
+
+ cryp->req = req;
+ cryp->total_in = req->nbytes;
+ cryp->total_out = cryp->total_in;
+
+ cryp->total_in_save = cryp->total_in;
+ cryp->total_out_save = cryp->total_out;
+
+ cryp->in_sg = req->src;
+ cryp->out_sg = req->dst;
+ cryp->out_sg_save = cryp->out_sg;
+
+ cryp->in_sg_len = sg_nents_for_len(cryp->in_sg, cryp->total_in);
+ if (cryp->in_sg_len < 0) {
+ dev_err(cryp->dev, "Cannot get in_sg_len\n");
+ ret = cryp->in_sg_len;
+ goto out;
+ }
+
+ cryp->out_sg_len = sg_nents_for_len(cryp->out_sg, cryp->total_out);
+ if (cryp->out_sg_len < 0) {
+ dev_err(cryp->dev, "Cannot get out_sg_len\n");
+ ret = cryp->out_sg_len;
+ goto out;
+ }
+
+ ret = stm32_cryp_copy_sgs(cryp);
+ if (ret)
+ goto out;
+
+ scatterwalk_start(&cryp->in_walk, cryp->in_sg);
+ scatterwalk_start(&cryp->out_walk, cryp->out_sg);
+
+ ret = stm32_cryp_hw_init(cryp);
+out:
+ if (ret)
+ mutex_unlock(&cryp->lock);
+
+ return ret;
+}
+
+static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ return stm32_cryp_prepare_req(engine, req);
+}
+
+static int stm32_cryp_cipher_one_req(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ struct stm32_cryp_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ struct stm32_cryp *cryp = ctx->cryp;
+
+ if (!cryp)
+ return -ENODEV;
+
+ return stm32_cryp_cpu_start(cryp);
+}
+
+static u32 *stm32_cryp_next_out(struct stm32_cryp *cryp, u32 *dst,
+ unsigned int n)
+{
+ scatterwalk_advance(&cryp->out_walk, n);
+
+ if (unlikely(cryp->out_sg->length == _walked_out)) {
+ cryp->out_sg = sg_next(cryp->out_sg);
+ if (cryp->out_sg) {
+ scatterwalk_start(&cryp->out_walk, cryp->out_sg);
+ return (sg_virt(cryp->out_sg) + _walked_out);
+ }
+ }
+
+ return (u32 *)((u8 *)dst + n);
+}
+
+static u32 *stm32_cryp_next_in(struct stm32_cryp *cryp, u32 *src,
+ unsigned int n)
+{
+ scatterwalk_advance(&cryp->in_walk, n);
+
+ if (unlikely(cryp->in_sg->length == _walked_in)) {
+ cryp->in_sg = sg_next(cryp->in_sg);
+ if (cryp->in_sg) {
+ scatterwalk_start(&cryp->in_walk, cryp->in_sg);
+ return (sg_virt(cryp->in_sg) + _walked_in);
+ }
+ }
+
+ return (u32 *)((u8 *)src + n);
+}
+
+static void stm32_cryp_check_ctr_counter(struct stm32_cryp *cryp)
+{
+ u32 cr;
+
+ if (unlikely(cryp->last_ctr[3] == 0xFFFFFFFF)) {
+ cryp->last_ctr[3] = 0;
+ cryp->last_ctr[2]++;
+ if (!cryp->last_ctr[2]) {
+ cryp->last_ctr[1]++;
+ if (!cryp->last_ctr[1])
+ cryp->last_ctr[0]++;
+ }
+
+ cr = stm32_cryp_read(cryp, CRYP_CR);
+ stm32_cryp_write(cryp, CRYP_CR, cr & ~CR_CRYPEN);
+
+ stm32_cryp_hw_write_iv(cryp, (u32 *)cryp->last_ctr);
+
+ stm32_cryp_write(cryp, CRYP_CR, cr);
+ }
+
+ cryp->last_ctr[0] = stm32_cryp_read(cryp, CRYP_IV0LR);
+ cryp->last_ctr[1] = stm32_cryp_read(cryp, CRYP_IV0RR);
+ cryp->last_ctr[2] = stm32_cryp_read(cryp, CRYP_IV1LR);
+ cryp->last_ctr[3] = stm32_cryp_read(cryp, CRYP_IV1RR);
+}
+
+static bool stm32_cryp_irq_read_data(struct stm32_cryp *cryp)
+{
+ unsigned int i, j;
+ u32 d32, *dst;
+ u8 *d8;
+
+ dst = sg_virt(cryp->out_sg) + _walked_out;
+
+ for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) {
+ if (likely(cryp->total_out >= sizeof(u32))) {
+ /* Read a full u32 */
+ *dst = stm32_cryp_read(cryp, CRYP_DOUT);
+
+ dst = stm32_cryp_next_out(cryp, dst, sizeof(u32));
+ cryp->total_out -= sizeof(u32);
+ } else if (!cryp->total_out) {
+ /* Empty fifo out (data from input padding) */
+ d32 = stm32_cryp_read(cryp, CRYP_DOUT);
+ } else {
+ /* Read less than an u32 */
+ d32 = stm32_cryp_read(cryp, CRYP_DOUT);
+ d8 = (u8 *)&d32;
+
+ for (j = 0; j < cryp->total_out; j++) {
+ *((u8 *)dst) = *(d8++);
+ dst = stm32_cryp_next_out(cryp, dst, 1);
+ }
+ cryp->total_out = 0;
+ }
+ }
+
+ return !cryp->total_out || !cryp->total_in;
+}
+
+static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp)
+{
+ unsigned int i, j;
+ u32 *src;
+ u8 d8[4];
+
+ src = sg_virt(cryp->in_sg) + _walked_in;
+
+ for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++) {
+ if (likely(cryp->total_in >= sizeof(u32))) {
+ /* Write a full u32 */
+ stm32_cryp_write(cryp, CRYP_DIN, *src);
+
+ src = stm32_cryp_next_in(cryp, src, sizeof(u32));
+ cryp->total_in -= sizeof(u32);
+ } else if (!cryp->total_in) {
+ /* Write padding data */
+ stm32_cryp_write(cryp, CRYP_DIN, 0);
+ } else {
+ /* Write less than an u32 */
+ memset(d8, 0, sizeof(u32));
+ for (j = 0; j < cryp->total_in; j++) {
+ d8[j] = *((u8 *)src);
+ src = stm32_cryp_next_in(cryp, src, 1);
+ }
+
+ stm32_cryp_write(cryp, CRYP_DIN, *(u32 *)d8);
+ cryp->total_in = 0;
+ }
+ }
+}
+
+static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp)
+{
+ if (unlikely(!cryp->total_in)) {
+ dev_warn(cryp->dev, "No more data to process\n");
+ return;
+ }
+
+ if (is_aes(cryp) && is_ctr(cryp))
+ stm32_cryp_check_ctr_counter(cryp);
+
+ stm32_cryp_irq_write_block(cryp);
+}
+
+static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg)
+{
+ struct stm32_cryp *cryp = arg;
+
+ if (cryp->irq_status & MISR_OUT)
+ /* Output FIFO IRQ: read data */
+ if (unlikely(stm32_cryp_irq_read_data(cryp))) {
+ /* All bytes processed, finish */
+ stm32_cryp_write(cryp, CRYP_IMSCR, 0);
+ stm32_cryp_finish_req(cryp);
+ return IRQ_HANDLED;
+ }
+
+ if (cryp->irq_status & MISR_IN) {
+ /* Input FIFO IRQ: write data */
+ stm32_cryp_irq_write_data(cryp);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stm32_cryp_irq(int irq, void *arg)
+{
+ struct stm32_cryp *cryp = arg;
+
+ cryp->irq_status = stm32_cryp_read(cryp, CRYP_MISR);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static struct crypto_alg crypto_algs[] = {
+{
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "stm32-ecb-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_ecb_encrypt,
+ .decrypt = stm32_cryp_aes_ecb_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "stm32-cbc-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_cbc_encrypt,
+ .decrypt = stm32_cryp_aes_cbc_decrypt,
+ }
+},
+{
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "stm32-ctr-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = stm32_cryp_aes_setkey,
+ .encrypt = stm32_cryp_aes_ctr_encrypt,
+ .decrypt = stm32_cryp_aes_ctr_decrypt,
+ }
+},
+{
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "stm32-ecb-des",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = DES_BLOCK_SIZE,
+ .max_keysize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_des_setkey,
+ .encrypt = stm32_cryp_des_ecb_encrypt,
+ .decrypt = stm32_cryp_des_ecb_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "stm32-cbc-des",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = DES_BLOCK_SIZE,
+ .max_keysize = DES_BLOCK_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_des_setkey,
+ .encrypt = stm32_cryp_des_cbc_encrypt,
+ .decrypt = stm32_cryp_des_cbc_decrypt,
+ }
+},
+{
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "stm32-ecb-des3",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = 3 * DES_BLOCK_SIZE,
+ .max_keysize = 3 * DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_tdes_setkey,
+ .encrypt = stm32_cryp_tdes_ecb_encrypt,
+ .decrypt = stm32_cryp_tdes_ecb_decrypt,
+ }
+},
+{
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "stm32-cbc-des3",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct stm32_cryp_ctx),
+ .cra_alignmask = 0xf,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = stm32_cryp_cra_init,
+ .cra_ablkcipher = {
+ .min_keysize = 3 * DES_BLOCK_SIZE,
+ .max_keysize = 3 * DES_BLOCK_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = stm32_cryp_tdes_setkey,
+ .encrypt = stm32_cryp_tdes_cbc_encrypt,
+ .decrypt = stm32_cryp_tdes_cbc_decrypt,
+ }
+},
+};
+
+static const struct of_device_id stm32_dt_ids[] = {
+ { .compatible = "st,stm32f756-cryp", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_dt_ids);
+
+static int stm32_cryp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct stm32_cryp *cryp;
+ struct resource *res;
+ struct reset_control *rst;
+ int irq, ret;
+
+ cryp = devm_kzalloc(dev, sizeof(*cryp), GFP_KERNEL);
+ if (!cryp)
+ return -ENOMEM;
+
+ cryp->dev = dev;
+
+ mutex_init(&cryp->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cryp->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cryp->regs))
+ return PTR_ERR(cryp->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "Cannot get IRQ resource\n");
+ return irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq, stm32_cryp_irq,
+ stm32_cryp_irq_thread, IRQF_ONESHOT,
+ dev_name(dev), cryp);
+ if (ret) {
+ dev_err(dev, "Cannot grab IRQ\n");
+ return ret;
+ }
+
+ cryp->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(cryp->clk)) {
+ dev_err(dev, "Could not get clock\n");
+ return PTR_ERR(cryp->clk);
+ }
+
+ ret = clk_prepare_enable(cryp->clk);
+ if (ret) {
+ dev_err(cryp->dev, "Failed to enable clock\n");
+ return ret;
+ }
+
+ rst = devm_reset_control_get(dev, NULL);
+ if (!IS_ERR(rst)) {
+ reset_control_assert(rst);
+ udelay(2);
+ reset_control_deassert(rst);
+ }
+
+ platform_set_drvdata(pdev, cryp);
+
+ spin_lock(&cryp_list.lock);
+ list_add(&cryp->list, &cryp_list.dev_list);
+ spin_unlock(&cryp_list.lock);
+
+ /* Initialize crypto engine */
+ cryp->engine = crypto_engine_alloc_init(dev, 1);
+ if (!cryp->engine) {
+ dev_err(dev, "Could not init crypto engine\n");
+ ret = -ENOMEM;
+ goto err_engine1;
+ }
+
+ cryp->engine->prepare_cipher_request = stm32_cryp_prepare_cipher_req;
+ cryp->engine->cipher_one_request = stm32_cryp_cipher_one_req;
+
+ ret = crypto_engine_start(cryp->engine);
+ if (ret) {
+ dev_err(dev, "Could not start crypto engine\n");
+ goto err_engine2;
+ }
+
+ ret = crypto_register_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
+ if (ret) {
+ dev_err(dev, "Could not register algs\n");
+ goto err_algs;
+ }
+
+ dev_info(dev, "Initialized\n");
+
+ return 0;
+
+err_algs:
+err_engine2:
+ crypto_engine_exit(cryp->engine);
+err_engine1:
+ spin_lock(&cryp_list.lock);
+ list_del(&cryp->list);
+ spin_unlock(&cryp_list.lock);
+
+ clk_disable_unprepare(cryp->clk);
+
+ return ret;
+}
+
+static int stm32_cryp_remove(struct platform_device *pdev)
+{
+ struct stm32_cryp *cryp = platform_get_drvdata(pdev);
+
+ if (!cryp)
+ return -ENODEV;
+
+ crypto_unregister_algs(crypto_algs, ARRAY_SIZE(crypto_algs));
+
+ crypto_engine_exit(cryp->engine);
+
+ spin_lock(&cryp_list.lock);
+ list_del(&cryp->list);
+ spin_unlock(&cryp_list.lock);
+
+ clk_disable_unprepare(cryp->clk);
+
+ return 0;
+}
+
+static struct platform_driver stm32_cryp_driver = {
+ .probe = stm32_cryp_probe,
+ .remove = stm32_cryp_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = stm32_dt_ids,
+ },
+};
+
+module_platform_driver(stm32_cryp_driver);
+
+MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
+MODULE_DESCRIPTION("STMicrolectronics STM32 CRYP hardware driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/stm32/stm32_crc32.c b/drivers/crypto/stm32/stm32_crc32.c
index 090582baecfe..8f09b8430893 100644
--- a/drivers/crypto/stm32/stm32_crc32.c
+++ b/drivers/crypto/stm32/stm32_crc32.c
@@ -208,6 +208,7 @@ static struct shash_alg algs[] = {
.cra_name = "crc32",
.cra_driver_name = DRIVER_NAME,
.cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_alignmask = 3,
.cra_ctxsize = sizeof(struct stm32_crc_ctx),
@@ -229,6 +230,7 @@ static struct shash_alg algs[] = {
.cra_name = "crc32c",
.cra_driver_name = DRIVER_NAME,
.cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_alignmask = 3,
.cra_ctxsize = sizeof(struct stm32_crc_ctx),
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 78fb496ecb4e..fe2af6aa88fc 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -737,7 +737,7 @@ struct devfreq *devm_devfreq_add_device(struct device *dev,
devfreq = devfreq_add_device(dev, profile, governor_name, data);
if (IS_ERR(devfreq)) {
devres_free(ptr);
- return ERR_PTR(-ENOMEM);
+ return devfreq;
}
*ptr = devfreq;
@@ -996,7 +996,8 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
if (df->governor == governor) {
ret = 0;
goto out;
- } else if (df->governor->immutable || governor->immutable) {
+ } else if ((df->governor && df->governor->immutable) ||
+ governor->immutable) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index bc1cb284111c..12b62d0aac27 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -157,13 +157,13 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
spin_unlock_irqrestore(&dcb->poll->lock, flags);
}
-static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
+static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
{
struct dma_buf *dmabuf;
struct reservation_object *resv;
struct reservation_object_list *fobj;
struct dma_fence *fence_excl;
- unsigned long events;
+ __poll_t events;
unsigned shared_count, seq;
dmabuf = file->private_data;
@@ -195,7 +195,7 @@ retry:
if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) {
struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
- unsigned long pevents = POLLIN;
+ __poll_t pevents = POLLIN;
if (shared_count == 0)
pevents |= POLLOUT;
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 03830634e141..8e8c4a12a0bc 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -312,7 +312,7 @@ static int sync_file_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int sync_file_poll(struct file *file, poll_table *wait)
+static __poll_t sync_file_poll(struct file *file, poll_table *wait)
{
struct sync_file *sync_file = file->private_data;
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index b52b0d55247e..97483df1f82e 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -2182,7 +2182,7 @@ static int pl08x_terminate_all(struct dma_chan *chan)
}
/* Dequeue jobs and free LLIs */
if (plchan->at) {
- pl08x_desc_free(&plchan->at->vd);
+ vchan_terminate_vdesc(&plchan->at->vd);
plchan->at = NULL;
}
/* Dequeue jobs not yet fired as well */
@@ -2193,6 +2193,13 @@ static int pl08x_terminate_all(struct dma_chan *chan)
return 0;
}
+static void pl08x_synchronize(struct dma_chan *chan)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+
+ vchan_synchronize(&plchan->vc);
+}
+
static int pl08x_pause(struct dma_chan *chan)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
@@ -2773,6 +2780,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->memcpy.device_pause = pl08x_pause;
pl08x->memcpy.device_resume = pl08x_resume;
pl08x->memcpy.device_terminate_all = pl08x_terminate_all;
+ pl08x->memcpy.device_synchronize = pl08x_synchronize;
pl08x->memcpy.src_addr_widths = PL80X_DMA_BUSWIDTHS;
pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM);
@@ -2802,6 +2810,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->slave.device_pause = pl08x_pause;
pl08x->slave.device_resume = pl08x_resume;
pl08x->slave.device_terminate_all = pl08x_terminate_all;
+ pl08x->slave.device_synchronize = pl08x_synchronize;
pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS;
pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
pl08x->slave.directions =
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 6204cc32d09c..847f84a41a69 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -812,7 +812,7 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
* c->desc is NULL and exit.)
*/
if (c->desc) {
- bcm2835_dma_desc_free(&c->desc->vd);
+ vchan_terminate_vdesc(&c->desc->vd);
c->desc = NULL;
bcm2835_dma_abort(c->chan_base);
@@ -836,6 +836,13 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
return 0;
}
+static void bcm2835_dma_synchronize(struct dma_chan *chan)
+{
+ struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+
+ vchan_synchronize(&c->vc);
+}
+
static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id,
int irq, unsigned int irq_flags)
{
@@ -942,6 +949,7 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
od->ddev.device_prep_dma_memcpy = bcm2835_dma_prep_dma_memcpy;
od->ddev.device_config = bcm2835_dma_slave_config;
od->ddev.device_terminate_all = bcm2835_dma_terminate_all;
+ od->ddev.device_synchronize = bcm2835_dma_synchronize;
od->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
od->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index f7e965f63274..d9bee65a18a4 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -934,7 +934,7 @@ static bool cpp41_dma_filter_fn(struct dma_chan *chan, void *param)
BUILD_BUG_ON(ARRAY_SIZE(am335x_usb_queues_rx) !=
ARRAY_SIZE(am335x_usb_queues_tx));
- if (WARN_ON(cchan->port_num > ARRAY_SIZE(am335x_usb_queues_rx)))
+ if (WARN_ON(cchan->port_num >= ARRAY_SIZE(am335x_usb_queues_rx)))
return false;
cchan->q_num = queues[cchan->port_num].submit;
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 7373b7a555ec..85820a2d69d4 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -511,7 +511,7 @@ static int jz4780_dma_terminate_all(struct dma_chan *chan)
/* Clear the DMA status and stop the transfer. */
jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0);
if (jzchan->desc) {
- jz4780_dma_desc_free(&jzchan->desc->vdesc);
+ vchan_terminate_vdesc(&jzchan->desc->vdesc);
jzchan->desc = NULL;
}
@@ -523,6 +523,13 @@ static int jz4780_dma_terminate_all(struct dma_chan *chan)
return 0;
}
+static void jz4780_dma_synchronize(struct dma_chan *chan)
+{
+ struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
+
+ vchan_synchronize(&jzchan->vchan);
+}
+
static int jz4780_dma_config(struct dma_chan *chan,
struct dma_slave_config *config)
{
@@ -813,6 +820,7 @@ static int jz4780_dma_probe(struct platform_device *pdev)
dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy;
dd->device_config = jz4780_dma_config;
dd->device_terminate_all = jz4780_dma_terminate_all;
+ dd->device_synchronize = jz4780_dma_synchronize;
dd->device_tx_status = jz4780_dma_tx_status;
dd->device_issue_pending = jz4780_dma_issue_pending;
dd->src_addr_widths = JZ_DMA_BUSWIDTHS;
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index ec5f9d2bc820..80cc2be6483c 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -355,7 +355,7 @@ static void dmatest_callback(void *arg)
{
struct dmatest_done *done = arg;
struct dmatest_thread *thread =
- container_of(arg, struct dmatest_thread, done_wait);
+ container_of(done, struct dmatest_thread, test_done);
if (!thread->done) {
done->done = true;
wake_up_all(done->wait);
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 9364a3ed345a..948df1ab5f1a 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -860,11 +860,8 @@ static int edma_terminate_all(struct dma_chan *chan)
/* Move the cyclic channel back to default queue */
if (!echan->tc && echan->edesc->cyclic)
edma_assign_channel_eventq(echan, EVENTQ_DEFAULT);
- /*
- * free the running request descriptor
- * since it is not in any of the vdesc lists
- */
- edma_desc_free(&echan->edesc->vdesc);
+
+ vchan_terminate_vdesc(&echan->edesc->vdesc);
echan->edesc = NULL;
}
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index 0391f930aecc..25cec9c243e1 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -694,7 +694,6 @@ static unsigned int mdc_get_new_events(struct mdc_chan *mchan)
static int mdc_terminate_all(struct dma_chan *chan)
{
struct mdc_chan *mchan = to_mdc_chan(chan);
- struct mdc_tx_desc *mdesc;
unsigned long flags;
LIST_HEAD(head);
@@ -703,21 +702,28 @@ static int mdc_terminate_all(struct dma_chan *chan)
mdc_chan_writel(mchan, MDC_CONTROL_AND_STATUS_CANCEL,
MDC_CONTROL_AND_STATUS);
- mdesc = mchan->desc;
- mchan->desc = NULL;
+ if (mchan->desc) {
+ vchan_terminate_vdesc(&mchan->desc->vd);
+ mchan->desc = NULL;
+ }
vchan_get_all_descriptors(&mchan->vc, &head);
mdc_get_new_events(mchan);
spin_unlock_irqrestore(&mchan->vc.lock, flags);
- if (mdesc)
- mdc_desc_free(&mdesc->vd);
vchan_dma_desc_free_list(&mchan->vc, &head);
return 0;
}
+static void mdc_synchronize(struct dma_chan *chan)
+{
+ struct mdc_chan *mchan = to_mdc_chan(chan);
+
+ vchan_synchronize(&mchan->vc);
+}
+
static int mdc_slave_config(struct dma_chan *chan,
struct dma_slave_config *config)
{
@@ -952,6 +958,7 @@ static int mdc_dma_probe(struct platform_device *pdev)
mdma->dma_dev.device_tx_status = mdc_tx_status;
mdma->dma_dev.device_issue_pending = mdc_issue_pending;
mdma->dma_dev.device_terminate_all = mdc_terminate_all;
+ mdma->dma_dev.device_synchronize = mdc_synchronize;
mdma->dma_dev.device_config = mdc_slave_config;
mdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 2184881afe76..e7db24c67030 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1939,4 +1939,10 @@ module_platform_driver(sdma_driver);
MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION("i.MX SDMA driver");
+#if IS_ENABLED(CONFIG_SOC_IMX6Q)
+MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin");
+#endif
+#if IS_ENABLED(CONFIG_SOC_IMX7D)
+MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin");
+#endif
MODULE_LICENSE("GPL");
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 58d4ccd33672..8b5b23a8ace9 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -597,7 +597,6 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
for (i = 0; i < active && !seen_current; i++) {
struct dma_async_tx_descriptor *tx;
- smp_read_barrier_depends();
prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
desc = ioat_get_ring_ent(ioat_chan, idx + i);
dump_desc_dbg(ioat_chan, desc);
@@ -715,7 +714,6 @@ static void ioat_abort_descs(struct ioatdma_chan *ioat_chan)
for (i = 1; i < active; i++) {
struct dma_async_tx_descriptor *tx;
- smp_read_barrier_depends();
prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
desc = ioat_get_ring_ent(ioat_chan, idx + i);
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index 01d2a750a621..26b67455208f 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -719,7 +719,7 @@ static int k3_dma_terminate_all(struct dma_chan *chan)
c->phy = NULL;
p->vchan = NULL;
if (p->ds_run) {
- k3_dma_free_desc(&p->ds_run->vd);
+ vchan_terminate_vdesc(&p->ds_run->vd);
p->ds_run = NULL;
}
p->ds_done = NULL;
@@ -730,6 +730,13 @@ static int k3_dma_terminate_all(struct dma_chan *chan)
return 0;
}
+static void k3_dma_synchronize(struct dma_chan *chan)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
+
+ vchan_synchronize(&c->vc);
+}
+
static int k3_dma_transfer_pause(struct dma_chan *chan)
{
struct k3_dma_chan *c = to_k3_chan(chan);
@@ -868,6 +875,7 @@ static int k3_dma_probe(struct platform_device *op)
d->slave.device_pause = k3_dma_transfer_pause;
d->slave.device_resume = k3_dma_transfer_resume;
d->slave.device_terminate_all = k3_dma_terminate_all;
+ d->slave.device_synchronize = k3_dma_synchronize;
d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES;
/* init virtual channel */
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
index 5ba5714d0b7c..94d7bd7d2880 100644
--- a/drivers/dma/mic_x100_dma.c
+++ b/drivers/dma/mic_x100_dma.c
@@ -480,9 +480,7 @@ static int mic_dma_setup_irq(struct mic_dma_chan *ch)
to_mbus_hw_ops(ch)->request_threaded_irq(to_mbus_device(ch),
mic_dma_intr_handler, mic_dma_thread_fn,
"mic dma_channel", ch, ch->ch_num);
- if (IS_ERR(ch->cookie))
- return PTR_ERR(ch->cookie);
- return 0;
+ return PTR_ERR_OR_ZERO(ch->cookie);
}
static inline void mic_dma_free_irq(struct mic_dma_chan *ch)
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index f6dd849159d8..d21c19822feb 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -1311,7 +1311,7 @@ static int omap_dma_terminate_all(struct dma_chan *chan)
* c->desc is NULL and exit.)
*/
if (c->desc) {
- omap_dma_desc_free(&c->desc->vd);
+ vchan_terminate_vdesc(&c->desc->vd);
c->desc = NULL;
/* Avoid stopping the dma twice */
if (!c->paused)
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index e3669850aef4..963cc5228d05 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -50,6 +50,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/of_dma.h>
+#include <linux/of_device.h>
#include <linux/property.h>
#include <linux/delay.h>
#include <linux/acpi.h>
@@ -104,6 +105,10 @@ static unsigned int nr_desc_prm;
module_param(nr_desc_prm, uint, 0644);
MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)");
+enum hidma_cap {
+ HIDMA_MSI_CAP = 1,
+ HIDMA_IDENTITY_CAP,
+};
/* process completed descriptors */
static void hidma_process_completed(struct hidma_chan *mchan)
@@ -736,25 +741,12 @@ static int hidma_request_msi(struct hidma_dev *dmadev,
#endif
}
-static bool hidma_msi_capable(struct device *dev)
+static bool hidma_test_capability(struct device *dev, enum hidma_cap test_cap)
{
- struct acpi_device *adev = ACPI_COMPANION(dev);
- const char *of_compat;
- int ret = -EINVAL;
-
- if (!adev || acpi_disabled) {
- ret = device_property_read_string(dev, "compatible",
- &of_compat);
- if (ret)
- return false;
+ enum hidma_cap cap;
- ret = strcmp(of_compat, "qcom,hidma-1.1");
- } else {
-#ifdef CONFIG_ACPI
- ret = strcmp(acpi_device_hid(adev), "QCOM8062");
-#endif
- }
- return ret == 0;
+ cap = (enum hidma_cap) device_get_match_data(dev);
+ return cap ? ((cap & test_cap) > 0) : 0;
}
static int hidma_probe(struct platform_device *pdev)
@@ -834,8 +826,7 @@ static int hidma_probe(struct platform_device *pdev)
* Determine the MSI capability of the platform. Old HW doesn't
* support MSI.
*/
- msi = hidma_msi_capable(&pdev->dev);
-
+ msi = hidma_test_capability(&pdev->dev, HIDMA_MSI_CAP);
device_property_read_u32(&pdev->dev, "desc-count",
&dmadev->nr_descriptors);
@@ -848,7 +839,10 @@ static int hidma_probe(struct platform_device *pdev)
if (!dmadev->nr_descriptors)
dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
- dmadev->chidx = readl(dmadev->dev_trca + 0x28);
+ if (hidma_test_capability(&pdev->dev, HIDMA_IDENTITY_CAP))
+ dmadev->chidx = readl(dmadev->dev_trca + 0x40);
+ else
+ dmadev->chidx = readl(dmadev->dev_trca + 0x28);
/* Set DMA mask to 64 bits. */
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
@@ -953,7 +947,8 @@ static int hidma_remove(struct platform_device *pdev)
#if IS_ENABLED(CONFIG_ACPI)
static const struct acpi_device_id hidma_acpi_ids[] = {
{"QCOM8061"},
- {"QCOM8062"},
+ {"QCOM8062", HIDMA_MSI_CAP},
+ {"QCOM8063", (HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP)},
{},
};
MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
@@ -961,7 +956,9 @@ MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
static const struct of_device_id hidma_match[] = {
{.compatible = "qcom,hidma-1.0",},
- {.compatible = "qcom,hidma-1.1",},
+ {.compatible = "qcom,hidma-1.1", .data = (void *)(HIDMA_MSI_CAP),},
+ {.compatible = "qcom,hidma-1.2",
+ .data = (void *)(HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP),},
{},
};
MODULE_DEVICE_TABLE(of, hidma_match);
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c
index 4999e266b2de..7c6e2ff212a2 100644
--- a/drivers/dma/qcom/hidma_ll.c
+++ b/drivers/dma/qcom/hidma_ll.c
@@ -393,6 +393,8 @@ static int hidma_ll_reset(struct hidma_lldev *lldev)
*/
static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
{
+ unsigned long irqflags;
+
if (cause & HIDMA_ERR_INT_MASK) {
dev_err(lldev->dev, "error 0x%x, disabling...\n",
cause);
@@ -410,6 +412,10 @@ static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
return;
}
+ spin_lock_irqsave(&lldev->lock, irqflags);
+ writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+ spin_unlock_irqrestore(&lldev->lock, irqflags);
+
/*
* Fine tuned for this HW...
*
@@ -421,9 +427,6 @@ static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
* Try to consume as many EVREs as possible.
*/
hidma_handle_tre_completion(lldev);
-
- /* We consumed TREs or there are pending TREs or EVREs. */
- writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
}
irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index 7335e2eb9b72..000c7019ca7d 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -17,6 +17,7 @@
#include <linux/acpi.h>
#include <linux/of.h>
#include <linux/property.h>
+#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/module.h>
@@ -356,67 +357,37 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
{
struct platform_device *pdev_parent = of_find_device_by_node(np);
struct platform_device_info pdevinfo;
- struct of_phandle_args out_irq;
struct device_node *child;
- struct resource *res = NULL;
- const __be32 *cell;
- int ret = 0, size, i, num;
- u64 addr, addr_size;
+ struct resource *res;
+ int ret = 0;
+
+ /* allocate a resource array */
+ res = kcalloc(3, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
for_each_available_child_of_node(np, child) {
- struct resource *res_iter;
struct platform_device *new_pdev;
- cell = of_get_property(child, "reg", &size);
- if (!cell) {
- ret = -EINVAL;
+ ret = of_address_to_resource(child, 0, &res[0]);
+ if (!ret)
goto out;
- }
-
- size /= sizeof(*cell);
- num = size /
- (of_n_addr_cells(child) + of_n_size_cells(child)) + 1;
- /* allocate a resource array */
- res = kcalloc(num, sizeof(*res), GFP_KERNEL);
- if (!res) {
- ret = -ENOMEM;
+ ret = of_address_to_resource(child, 1, &res[1]);
+ if (!ret)
goto out;
- }
-
- /* read each reg value */
- i = 0;
- res_iter = res;
- while (i < size) {
- addr = of_read_number(&cell[i],
- of_n_addr_cells(child));
- i += of_n_addr_cells(child);
-
- addr_size = of_read_number(&cell[i],
- of_n_size_cells(child));
- i += of_n_size_cells(child);
-
- res_iter->start = addr;
- res_iter->end = res_iter->start + addr_size - 1;
- res_iter->flags = IORESOURCE_MEM;
- res_iter++;
- }
- ret = of_irq_parse_one(child, 0, &out_irq);
- if (ret)
+ ret = of_irq_to_resource(child, 0, &res[2]);
+ if (ret <= 0)
goto out;
- res_iter->start = irq_create_of_mapping(&out_irq);
- res_iter->name = "hidma event irq";
- res_iter->flags = IORESOURCE_IRQ;
-
memset(&pdevinfo, 0, sizeof(pdevinfo));
pdevinfo.fwnode = &child->fwnode;
pdevinfo.parent = pdev_parent ? &pdev_parent->dev : NULL;
pdevinfo.name = child->name;
pdevinfo.id = object_counter++;
pdevinfo.res = res;
- pdevinfo.num_res = num;
+ pdevinfo.num_res = 3;
pdevinfo.data = NULL;
pdevinfo.size_data = 0;
pdevinfo.dma_mask = DMA_BIT_MASK(64);
@@ -434,8 +405,6 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
*/
of_msi_configure(&new_pdev->dev, child);
of_node_put(child);
- kfree(res);
- res = NULL;
}
out:
kfree(res);
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index f04c4702d98b..cd92d696bcf9 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -732,7 +732,7 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
/* Dequeue current job */
if (s3cchan->at) {
- s3c24xx_dma_desc_free(&s3cchan->at->vd);
+ vchan_terminate_vdesc(&s3cchan->at->vd);
s3cchan->at = NULL;
}
@@ -744,6 +744,13 @@ unlock:
return ret;
}
+static void s3c24xx_dma_synchronize(struct dma_chan *chan)
+{
+ struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
+
+ vchan_synchronize(&s3cchan->vc);
+}
+
static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan)
{
/* Ensure all queued descriptors are freed */
@@ -1282,6 +1289,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending;
s3cdma->memcpy.device_config = s3c24xx_dma_set_runtime_config;
s3cdma->memcpy.device_terminate_all = s3c24xx_dma_terminate_all;
+ s3cdma->memcpy.device_synchronize = s3c24xx_dma_synchronize;
/* Initialize slave engine for SoC internal dedicated peripherals */
dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask);
@@ -1296,6 +1304,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic;
s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config;
s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all;
+ s3cdma->slave.device_synchronize = s3c24xx_dma_synchronize;
s3cdma->slave.filter.map = pdata->slave_map;
s3cdma->slave.filter.mapcnt = pdata->slavecnt;
s3cdma->slave.filter.fn = s3c24xx_dma_filter;
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 2b2c7db3e480..e3ff162c03fc 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -10,6 +10,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
@@ -741,6 +742,41 @@ static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan,
/* -----------------------------------------------------------------------------
* Stop and reset
*/
+static void rcar_dmac_chcr_de_barrier(struct rcar_dmac_chan *chan)
+{
+ u32 chcr;
+ unsigned int i;
+
+ /*
+ * Ensure that the setting of the DE bit is actually 0 after
+ * clearing it.
+ */
+ for (i = 0; i < 1024; i++) {
+ chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
+ if (!(chcr & RCAR_DMACHCR_DE))
+ return;
+ udelay(1);
+ }
+
+ dev_err(chan->chan.device->dev, "CHCR DE check error\n");
+}
+
+static void rcar_dmac_sync_tcr(struct rcar_dmac_chan *chan)
+{
+ u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR);
+
+ if (!(chcr & RCAR_DMACHCR_DE))
+ return;
+
+ /* set DE=0 and flush remaining data */
+ rcar_dmac_chan_write(chan, RCAR_DMACHCR, (chcr & ~RCAR_DMACHCR_DE));
+
+ /* make sure all remaining data was flushed */
+ rcar_dmac_chcr_de_barrier(chan);
+
+ /* back DE */
+ rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
+}
static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
{
@@ -749,6 +785,7 @@ static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan)
chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE |
RCAR_DMACHCR_TE | RCAR_DMACHCR_DE);
rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr);
+ rcar_dmac_chcr_de_barrier(chan);
}
static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan)
@@ -1309,8 +1346,11 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
residue += chunk->size;
}
+ if (desc->direction == DMA_DEV_TO_MEM)
+ rcar_dmac_sync_tcr(chan);
+
/* Add the residue for the current chunk. */
- residue += rcar_dmac_chan_read(chan, RCAR_DMATCR) << desc->xfer_shift;
+ residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift;
return residue;
}
@@ -1481,6 +1521,8 @@ static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev)
if (chcr & RCAR_DMACHCR_TE)
mask |= RCAR_DMACHCR_DE;
rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask);
+ if (mask & RCAR_DMACHCR_DE)
+ rcar_dmac_chcr_de_barrier(chan);
if (chcr & RCAR_DMACHCR_DSE)
ret |= rcar_dmac_isr_desc_stage_end(chan);
@@ -1615,22 +1657,6 @@ static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
* Power management
*/
-#ifdef CONFIG_PM_SLEEP
-static int rcar_dmac_sleep_suspend(struct device *dev)
-{
- /*
- * TODO: Wait for the current transfer to complete and stop the device.
- */
- return 0;
-}
-
-static int rcar_dmac_sleep_resume(struct device *dev)
-{
- /* TODO: Resume transfers, if any. */
- return 0;
-}
-#endif
-
#ifdef CONFIG_PM
static int rcar_dmac_runtime_suspend(struct device *dev)
{
@@ -1646,7 +1672,13 @@ static int rcar_dmac_runtime_resume(struct device *dev)
#endif
static const struct dev_pm_ops rcar_dmac_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(rcar_dmac_sleep_suspend, rcar_dmac_sleep_resume)
+ /*
+ * TODO for system sleep/resume:
+ * - Wait for the current transfer to complete and stop the device,
+ * - Resume transfers, if any.
+ */
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
NULL)
};
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index b652071a2096..b106e8a60af6 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -710,7 +710,7 @@ static int sprd_dma_config(struct dma_chan *chan, struct sprd_dma_desc *sdesc,
return 0;
}
-struct dma_async_tx_descriptor *
+static struct dma_async_tx_descriptor *
sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsigned long flags)
{
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index d5db0f6e1ff8..4dbb30cf94ac 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -253,9 +253,6 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
iomem = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(iomem))
return PTR_ERR(iomem);
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index b9d75a54c896..9a558e30c461 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -353,7 +353,8 @@ static int tegra_dma_slave_config(struct dma_chan *dc,
}
memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
- if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID) {
+ if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID &&
+ sconfig->device_fc) {
if (sconfig->slave_id > TEGRA_APBDMA_CSR_REQ_SEL_MASK)
return -EINVAL;
tdc->slave_id = sconfig->slave_id;
@@ -970,8 +971,13 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
- csr |= TEGRA_APBDMA_CSR_ONCE | TEGRA_APBDMA_CSR_FLOW;
- csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
+ csr |= TEGRA_APBDMA_CSR_ONCE;
+
+ if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
+ csr |= TEGRA_APBDMA_CSR_FLOW;
+ csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
+ }
+
if (flags & DMA_PREP_INTERRUPT)
csr |= TEGRA_APBDMA_CSR_IE_EOC;
@@ -1110,10 +1116,13 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
- csr |= TEGRA_APBDMA_CSR_FLOW;
+ if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
+ csr |= TEGRA_APBDMA_CSR_FLOW;
+ csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
+ }
+
if (flags & DMA_PREP_INTERRUPT)
csr |= TEGRA_APBDMA_CSR_IE_EOC;
- csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index 7df910e7c348..9272b173c746 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -54,7 +54,15 @@ struct ti_am335x_xbar_map {
static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val)
{
- writeb_relaxed(val, iomem + event);
+ /*
+ * TPCC_EVT_MUX_60_63 register layout is different than the
+ * rest, in the sense, that event 63 is mapped to lowest byte
+ * and event 60 is mapped to highest, handle it separately.
+ */
+ if (event >= 60 && event <= 63)
+ writeb_relaxed(val, iomem + (63 - event % 4));
+ else
+ writeb_relaxed(val, iomem + event);
}
static void ti_am335x_xbar_free(struct device *dev, void *route_data)
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 896bafb7a532..395c698edb4d 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -422,7 +422,7 @@ static int td_alloc_chan_resources(struct dma_chan *chan)
break;
else {
dev_err(chan2dev(chan),
- "Couldnt allocate any descriptors\n");
+ "Couldn't allocate any descriptors\n");
return -ENOMEM;
}
}
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index 545e97279083..88ad8ed2a8d6 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -107,10 +107,7 @@ static void vchan_complete(unsigned long arg)
dmaengine_desc_get_callback(&vd->tx, &cb);
list_del(&vd->node);
- if (dmaengine_desc_test_reuse(&vd->tx))
- list_add(&vd->node, &vc->desc_allocated);
- else
- vc->desc_free(vd);
+ vchan_vdesc_fini(vd);
dmaengine_desc_callback_invoke(&cb, NULL);
}
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index 3f776a46a29c..b09b75ab0751 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -35,6 +35,7 @@ struct virt_dma_chan {
struct list_head desc_completed;
struct virt_dma_desc *cyclic;
+ struct virt_dma_desc *vd_terminated;
};
static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
@@ -104,6 +105,20 @@ static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
}
/**
+ * vchan_vdesc_fini - Free or reuse a descriptor
+ * @vd: virtual descriptor to free/reuse
+ */
+static inline void vchan_vdesc_fini(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+ if (dmaengine_desc_test_reuse(&vd->tx))
+ list_add(&vd->node, &vc->desc_allocated);
+ else
+ vc->desc_free(vd);
+}
+
+/**
* vchan_cyclic_callback - report the completion of a period
* @vd: virtual descriptor
*/
@@ -116,6 +131,25 @@ static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
}
/**
+ * vchan_terminate_vdesc - Disable pending cyclic callback
+ * @vd: virtual descriptor to be terminated
+ *
+ * vc.lock must be held by caller
+ */
+static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+ /* free up stuck descriptor */
+ if (vc->vd_terminated)
+ vchan_vdesc_fini(vc->vd_terminated);
+
+ vc->vd_terminated = vd;
+ if (vc->cyclic == vd)
+ vc->cyclic = NULL;
+}
+
+/**
* vchan_next_desc - peek at the next descriptor to be processed
* @vc: virtual channel to obtain descriptor from
*
@@ -168,10 +202,20 @@ static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
* Makes sure that all scheduled or active callbacks have finished running. For
* proper operation the caller has to ensure that no new callbacks are scheduled
* after the invocation of this function started.
+ * Free up the terminated cyclic descriptor to prevent memory leakage.
*/
static inline void vchan_synchronize(struct virt_dma_chan *vc)
{
+ unsigned long flags;
+
tasklet_kill(&vc->task);
+
+ spin_lock_irqsave(&vc->lock, flags);
+ if (vc->vd_terminated) {
+ vchan_vdesc_fini(vc->vd_terminated);
+ vc->vd_terminated = NULL;
+ }
+ spin_unlock_irqrestore(&vc->lock, flags);
}
#endif
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 5eef13380ca8..27b523530c4a 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -99,7 +99,9 @@
#define XILINX_DMA_REG_FRMPTR_STS 0x0024
#define XILINX_DMA_REG_PARK_PTR 0x0028
#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
+#define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
+#define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
#define XILINX_DMA_REG_VDMA_VERSION 0x002c
/* Register Direct Mode Registers */
@@ -163,6 +165,7 @@
#define XILINX_DMA_BD_SOP BIT(27)
#define XILINX_DMA_BD_EOP BIT(26)
#define XILINX_DMA_COALESCE_MAX 255
+#define XILINX_DMA_NUM_DESCS 255
#define XILINX_DMA_NUM_APP_WORDS 5
/* Multi-Channel DMA Descriptor offsets*/
@@ -211,8 +214,8 @@ struct xilinx_vdma_desc_hw {
* @next_desc_msb: MSB of Next Descriptor Pointer @0x04
* @buf_addr: Buffer address @0x08
* @buf_addr_msb: MSB of Buffer address @0x0C
- * @pad1: Reserved @0x10
- * @pad2: Reserved @0x14
+ * @mcdma_control: Control field for mcdma @0x10
+ * @vsize_stride: Vsize and Stride field for mcdma @0x14
* @control: Control field @0x18
* @status: Status field @0x1C
* @app: APP Fields @0x20 - 0x30
@@ -232,11 +235,11 @@ struct xilinx_axidma_desc_hw {
/**
* struct xilinx_cdma_desc_hw - Hardware Descriptor
* @next_desc: Next Descriptor Pointer @0x00
- * @next_descmsb: Next Descriptor Pointer MSB @0x04
+ * @next_desc_msb: Next Descriptor Pointer MSB @0x04
* @src_addr: Source address @0x08
- * @src_addrmsb: Source address MSB @0x0C
+ * @src_addr_msb: Source address MSB @0x0C
* @dest_addr: Destination address @0x10
- * @dest_addrmsb: Destination address MSB @0x14
+ * @dest_addr_msb: Destination address MSB @0x14
* @control: Control field @0x18
* @status: Status field @0x1C
*/
@@ -310,6 +313,7 @@ struct xilinx_dma_tx_descriptor {
* @pending_list: Descriptors waiting
* @active_list: Descriptors ready to submit
* @done_list: Complete descriptors
+ * @free_seg_list: Free descriptors
* @common: DMA common channel
* @desc_pool: Descriptors pool
* @dev: The dma device
@@ -321,6 +325,7 @@ struct xilinx_dma_tx_descriptor {
* @cyclic: Check for cyclic transfers.
* @genlock: Support genlock mode
* @err: Channel has errors
+ * @idle: Check for channel idle
* @tasklet: Cleanup work after irq
* @config: Device configuration info
* @flush_on_fsync: Flush on Frame sync
@@ -329,9 +334,12 @@ struct xilinx_dma_tx_descriptor {
* @desc_submitcount: Descriptor h/w submitted count
* @residue: Residue for AXI DMA
* @seg_v: Statically allocated segments base
+ * @seg_p: Physical allocated segments base
* @cyclic_seg_v: Statically allocated segment base for cyclic transfers
+ * @cyclic_seg_p: Physical allocated segments base for cyclic dma
* @start_transfer: Differentiate b/w DMA IP's transfer
* @stop_transfer: Differentiate b/w DMA IP's quiesce
+ * @tdest: TDEST value for mcdma
*/
struct xilinx_dma_chan {
struct xilinx_dma_device *xdev;
@@ -341,6 +349,7 @@ struct xilinx_dma_chan {
struct list_head pending_list;
struct list_head active_list;
struct list_head done_list;
+ struct list_head free_seg_list;
struct dma_chan common;
struct dma_pool *desc_pool;
struct device *dev;
@@ -352,6 +361,7 @@ struct xilinx_dma_chan {
bool cyclic;
bool genlock;
bool err;
+ bool idle;
struct tasklet_struct tasklet;
struct xilinx_vdma_config config;
bool flush_on_fsync;
@@ -360,18 +370,20 @@ struct xilinx_dma_chan {
u32 desc_submitcount;
u32 residue;
struct xilinx_axidma_tx_segment *seg_v;
+ dma_addr_t seg_p;
struct xilinx_axidma_tx_segment *cyclic_seg_v;
+ dma_addr_t cyclic_seg_p;
void (*start_transfer)(struct xilinx_dma_chan *chan);
int (*stop_transfer)(struct xilinx_dma_chan *chan);
u16 tdest;
};
/**
- * enum xdma_ip_type: DMA IP type.
+ * enum xdma_ip_type - DMA IP type.
*
- * XDMA_TYPE_AXIDMA: Axi dma ip.
- * XDMA_TYPE_CDMA: Axi cdma ip.
- * XDMA_TYPE_VDMA: Axi vdma ip.
+ * @XDMA_TYPE_AXIDMA: Axi dma ip.
+ * @XDMA_TYPE_CDMA: Axi cdma ip.
+ * @XDMA_TYPE_VDMA: Axi vdma ip.
*
*/
enum xdma_ip_type {
@@ -580,18 +592,32 @@ xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
static struct xilinx_axidma_tx_segment *
xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
{
- struct xilinx_axidma_tx_segment *segment;
- dma_addr_t phys;
-
- segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
- if (!segment)
- return NULL;
+ struct xilinx_axidma_tx_segment *segment = NULL;
+ unsigned long flags;
- segment->phys = phys;
+ spin_lock_irqsave(&chan->lock, flags);
+ if (!list_empty(&chan->free_seg_list)) {
+ segment = list_first_entry(&chan->free_seg_list,
+ struct xilinx_axidma_tx_segment,
+ node);
+ list_del(&segment->node);
+ }
+ spin_unlock_irqrestore(&chan->lock, flags);
return segment;
}
+static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
+{
+ u32 next_desc = hw->next_desc;
+ u32 next_desc_msb = hw->next_desc_msb;
+
+ memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
+
+ hw->next_desc = next_desc;
+ hw->next_desc_msb = next_desc_msb;
+}
+
/**
* xilinx_dma_free_tx_segment - Free transaction segment
* @chan: Driver specific DMA channel
@@ -600,7 +626,9 @@ xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
struct xilinx_axidma_tx_segment *segment)
{
- dma_pool_free(chan->desc_pool, segment, segment->phys);
+ xilinx_dma_clean_hw_desc(&segment->hw);
+
+ list_add_tail(&segment->node, &chan->free_seg_list);
}
/**
@@ -725,16 +753,31 @@ static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
{
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ unsigned long flags;
dev_dbg(chan->dev, "Free all channel resources.\n");
xilinx_dma_free_descriptors(chan);
+
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- xilinx_dma_free_tx_segment(chan, chan->cyclic_seg_v);
- xilinx_dma_free_tx_segment(chan, chan->seg_v);
+ spin_lock_irqsave(&chan->lock, flags);
+ INIT_LIST_HEAD(&chan->free_seg_list);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ /* Free memory that is allocated for BD */
+ dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
+ XILINX_DMA_NUM_DESCS, chan->seg_v,
+ chan->seg_p);
+
+ /* Free Memory that is allocated for cyclic DMA Mode */
+ dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
+ chan->cyclic_seg_v, chan->cyclic_seg_p);
+ }
+
+ if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) {
+ dma_pool_destroy(chan->desc_pool);
+ chan->desc_pool = NULL;
}
- dma_pool_destroy(chan->desc_pool);
- chan->desc_pool = NULL;
}
/**
@@ -817,6 +860,7 @@ static void xilinx_dma_do_tasklet(unsigned long data)
static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
{
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ int i;
/* Has this channel already been allocated? */
if (chan->desc_pool)
@@ -827,11 +871,30 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
* for meeting Xilinx VDMA specification requirement.
*/
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
- chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool",
- chan->dev,
- sizeof(struct xilinx_axidma_tx_segment),
- __alignof__(struct xilinx_axidma_tx_segment),
- 0);
+ /* Allocate the buffer descriptors. */
+ chan->seg_v = dma_zalloc_coherent(chan->dev,
+ sizeof(*chan->seg_v) *
+ XILINX_DMA_NUM_DESCS,
+ &chan->seg_p, GFP_KERNEL);
+ if (!chan->seg_v) {
+ dev_err(chan->dev,
+ "unable to allocate channel %d descriptors\n",
+ chan->id);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
+ chan->seg_v[i].hw.next_desc =
+ lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
+ ((i + 1) % XILINX_DMA_NUM_DESCS));
+ chan->seg_v[i].hw.next_desc_msb =
+ upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
+ ((i + 1) % XILINX_DMA_NUM_DESCS));
+ chan->seg_v[i].phys = chan->seg_p +
+ sizeof(*chan->seg_v) * i;
+ list_add_tail(&chan->seg_v[i].node,
+ &chan->free_seg_list);
+ }
} else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
chan->dev,
@@ -846,7 +909,8 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
0);
}
- if (!chan->desc_pool) {
+ if (!chan->desc_pool &&
+ (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) {
dev_err(chan->dev,
"unable to allocate channel %d descriptor pool\n",
chan->id);
@@ -855,22 +919,20 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
/*
- * For AXI DMA case after submitting a pending_list, keep
- * an extra segment allocated so that the "next descriptor"
- * pointer on the tail descriptor always points to a
- * valid descriptor, even when paused after reaching taildesc.
- * This way, it is possible to issue additional
- * transfers without halting and restarting the channel.
- */
- chan->seg_v = xilinx_axidma_alloc_tx_segment(chan);
-
- /*
* For cyclic DMA mode we need to program the tail Descriptor
* register with a value which is not a part of the BD chain
* so allocating a desc segment during channel allocation for
* programming tail descriptor.
*/
- chan->cyclic_seg_v = xilinx_axidma_alloc_tx_segment(chan);
+ chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev,
+ sizeof(*chan->cyclic_seg_v),
+ &chan->cyclic_seg_p, GFP_KERNEL);
+ if (!chan->cyclic_seg_v) {
+ dev_err(chan->dev,
+ "unable to allocate desc segment for cyclic DMA\n");
+ return -ENOMEM;
+ }
+ chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
}
dma_cookie_init(dchan);
@@ -936,34 +998,10 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
}
/**
- * xilinx_dma_is_running - Check if DMA channel is running
- * @chan: Driver specific DMA channel
- *
- * Return: '1' if running, '0' if not.
- */
-static bool xilinx_dma_is_running(struct xilinx_dma_chan *chan)
-{
- return !(dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
- XILINX_DMA_DMASR_HALTED) &&
- (dma_ctrl_read(chan, XILINX_DMA_REG_DMACR) &
- XILINX_DMA_DMACR_RUNSTOP);
-}
-
-/**
- * xilinx_dma_is_idle - Check if DMA channel is idle
- * @chan: Driver specific DMA channel
- *
- * Return: '1' if idle, '0' if not.
- */
-static bool xilinx_dma_is_idle(struct xilinx_dma_chan *chan)
-{
- return dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
- XILINX_DMA_DMASR_IDLE;
-}
-
-/**
* xilinx_dma_stop_transfer - Halt DMA channel
* @chan: Driver specific DMA channel
+ *
+ * Return: '0' on success and failure value on error
*/
static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
{
@@ -980,6 +1018,8 @@ static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
/**
* xilinx_cdma_stop_transfer - Wait for the current transfer to complete
* @chan: Driver specific DMA channel
+ *
+ * Return: '0' on success and failure value on error
*/
static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
{
@@ -1022,13 +1062,16 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
{
struct xilinx_vdma_config *config = &chan->config;
struct xilinx_dma_tx_descriptor *desc, *tail_desc;
- u32 reg;
+ u32 reg, j;
struct xilinx_vdma_tx_segment *tail_segment;
/* This function was invoked with lock held */
if (chan->err)
return;
+ if (!chan->idle)
+ return;
+
if (list_empty(&chan->pending_list))
return;
@@ -1040,13 +1083,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
tail_segment = list_last_entry(&tail_desc->segments,
struct xilinx_vdma_tx_segment, node);
- /* If it is SG mode and hardware is busy, cannot submit */
- if (chan->has_sg && xilinx_dma_is_running(chan) &&
- !xilinx_dma_is_idle(chan)) {
- dev_dbg(chan->dev, "DMA controller still busy\n");
- return;
- }
-
/*
* If hardware is idle, then all descriptors on the running lists are
* done, start new transfers
@@ -1063,10 +1099,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
else
reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
- /* Configure channel to allow number frame buffers */
- dma_ctrl_write(chan, XILINX_DMA_REG_FRMSTORE,
- chan->desc_pendingcount);
-
/*
* With SG, start with circular mode, so that BDs can be fetched.
* In direct register mode, if not parking, enable circular mode
@@ -1079,17 +1111,16 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
- if (config->park && (config->park_frm >= 0) &&
- (config->park_frm < chan->num_frms)) {
- if (chan->direction == DMA_MEM_TO_DEV)
- dma_write(chan, XILINX_DMA_REG_PARK_PTR,
- config->park_frm <<
- XILINX_DMA_PARK_PTR_RD_REF_SHIFT);
- else
- dma_write(chan, XILINX_DMA_REG_PARK_PTR,
- config->park_frm <<
- XILINX_DMA_PARK_PTR_WR_REF_SHIFT);
+ j = chan->desc_submitcount;
+ reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
+ reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
+ } else {
+ reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
+ reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
}
+ dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
/* Start the hardware */
xilinx_dma_start(chan);
@@ -1101,6 +1132,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
if (chan->has_sg) {
dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
tail_segment->phys);
+ list_splice_tail_init(&chan->pending_list, &chan->active_list);
+ chan->desc_pendingcount = 0;
} else {
struct xilinx_vdma_tx_segment *segment, *last = NULL;
int i = 0;
@@ -1130,19 +1163,16 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
last->hw.stride);
vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
- }
- if (!chan->has_sg) {
- list_del(&desc->node);
- list_add_tail(&desc->node, &chan->active_list);
chan->desc_submitcount++;
chan->desc_pendingcount--;
+ list_del(&desc->node);
+ list_add_tail(&desc->node, &chan->active_list);
if (chan->desc_submitcount == chan->num_frms)
chan->desc_submitcount = 0;
- } else {
- list_splice_tail_init(&chan->pending_list, &chan->active_list);
- chan->desc_pendingcount = 0;
}
+
+ chan->idle = false;
}
/**
@@ -1158,6 +1188,9 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
if (chan->err)
return;
+ if (!chan->idle)
+ return;
+
if (list_empty(&chan->pending_list))
return;
@@ -1176,6 +1209,12 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
}
if (chan->has_sg) {
+ dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
+ XILINX_CDMA_CR_SGMODE);
+
+ dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+ XILINX_CDMA_CR_SGMODE);
+
xilinx_write(chan, XILINX_DMA_REG_CURDESC,
head_desc->async_tx.phys);
@@ -1203,6 +1242,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
list_splice_tail_init(&chan->pending_list, &chan->active_list);
chan->desc_pendingcount = 0;
+ chan->idle = false;
}
/**
@@ -1212,7 +1252,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
{
struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
- struct xilinx_axidma_tx_segment *tail_segment, *old_head, *new_head;
+ struct xilinx_axidma_tx_segment *tail_segment;
u32 reg;
if (chan->err)
@@ -1221,12 +1261,8 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
if (list_empty(&chan->pending_list))
return;
- /* If it is SG mode and hardware is busy, cannot submit */
- if (chan->has_sg && xilinx_dma_is_running(chan) &&
- !xilinx_dma_is_idle(chan)) {
- dev_dbg(chan->dev, "DMA controller still busy\n");
+ if (!chan->idle)
return;
- }
head_desc = list_first_entry(&chan->pending_list,
struct xilinx_dma_tx_descriptor, node);
@@ -1235,21 +1271,6 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
tail_segment = list_last_entry(&tail_desc->segments,
struct xilinx_axidma_tx_segment, node);
- if (chan->has_sg && !chan->xdev->mcdma) {
- old_head = list_first_entry(&head_desc->segments,
- struct xilinx_axidma_tx_segment, node);
- new_head = chan->seg_v;
- /* Copy Buffer Descriptor fields. */
- new_head->hw = old_head->hw;
-
- /* Swap and save new reserve */
- list_replace_init(&old_head->node, &new_head->node);
- chan->seg_v = old_head;
-
- tail_segment->hw.next_desc = chan->seg_v->phys;
- head_desc->async_tx.phys = new_head->phys;
- }
-
reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
@@ -1324,6 +1345,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
list_splice_tail_init(&chan->pending_list, &chan->active_list);
chan->desc_pendingcount = 0;
+ chan->idle = false;
}
/**
@@ -1388,6 +1410,8 @@ static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
}
chan->err = false;
+ chan->idle = true;
+ chan->desc_submitcount = 0;
return err;
}
@@ -1469,6 +1493,7 @@ static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
spin_lock(&chan->lock);
xilinx_dma_complete_descriptor(chan);
+ chan->idle = true;
chan->start_transfer(chan);
spin_unlock(&chan->lock);
}
@@ -1591,7 +1616,7 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
{
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_vdma_tx_segment *segment, *prev = NULL;
+ struct xilinx_vdma_tx_segment *segment;
struct xilinx_vdma_desc_hw *hw;
if (!is_slave_direction(xt->dir))
@@ -1645,8 +1670,6 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
/* Insert the segment into the descriptor segments list. */
list_add_tail(&segment->node, &desc->segments);
- prev = segment;
-
/* Link the last hardware descriptor with the first. */
segment = list_first_entry(&desc->segments,
struct xilinx_vdma_tx_segment, node);
@@ -1733,7 +1756,7 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
{
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
struct xilinx_dma_tx_descriptor *desc;
- struct xilinx_axidma_tx_segment *segment = NULL, *prev = NULL;
+ struct xilinx_axidma_tx_segment *segment = NULL;
u32 *app_w = (u32 *)context;
struct scatterlist *sg;
size_t copy;
@@ -1784,10 +1807,6 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
XILINX_DMA_NUM_APP_WORDS);
}
- if (prev)
- prev->hw.next_desc = segment->phys;
-
- prev = segment;
sg_used += copy;
/*
@@ -1801,7 +1820,6 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
segment = list_first_entry(&desc->segments,
struct xilinx_axidma_tx_segment, node);
desc->async_tx.phys = segment->phys;
- prev->hw.next_desc = segment->phys;
/* For the last DMA_MEM_TO_DEV transfer, set EOP */
if (chan->direction == DMA_MEM_TO_DEV) {
@@ -1821,11 +1839,14 @@ error:
/**
* xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
- * @chan: DMA channel
- * @sgl: scatterlist to transfer to/from
- * @sg_len: number of entries in @scatterlist
+ * @dchan: DMA channel
+ * @buf_addr: Physical address of the buffer
+ * @buf_len: Total length of the cyclic buffers
+ * @period_len: length of individual cyclic buffer
* @direction: DMA direction
* @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
*/
static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
@@ -2009,7 +2030,9 @@ error:
/**
* xilinx_dma_terminate_all - Halt the channel and free descriptors
- * @chan: Driver specific DMA Channel pointer
+ * @dchan: Driver specific DMA Channel pointer
+ *
+ * Return: '0' always.
*/
static int xilinx_dma_terminate_all(struct dma_chan *dchan)
{
@@ -2029,6 +2052,7 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
/* Remove and free all of the descriptors in the lists */
xilinx_dma_free_descriptors(chan);
+ chan->idle = true;
if (chan->cyclic) {
reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
@@ -2037,6 +2061,10 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
chan->cyclic = false;
}
+ if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
+ dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
+ XILINX_CDMA_CR_SGMODE);
+
return 0;
}
@@ -2323,6 +2351,7 @@ static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
*
* @xdev: Driver specific device structure
* @node: Device node
+ * @chan_id: DMA Channel id
*
* Return: '0' on success and failure value on error
*/
@@ -2344,11 +2373,18 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
chan->has_sg = xdev->has_sg;
chan->desc_pendingcount = 0x0;
chan->ext_addr = xdev->ext_addr;
+ /* This variable ensures that descriptors are not
+ * Submitted when dma engine is in progress. This variable is
+ * Added to avoid polling for a bit in the status register to
+ * Know dma state in the driver hot path.
+ */
+ chan->idle = true;
spin_lock_init(&chan->lock);
INIT_LIST_HEAD(&chan->pending_list);
INIT_LIST_HEAD(&chan->done_list);
INIT_LIST_HEAD(&chan->active_list);
+ INIT_LIST_HEAD(&chan->free_seg_list);
/* Retrieve the channel properties from the device tree */
has_dre = of_property_read_bool(node, "xlnx,include-dre");
@@ -2379,6 +2415,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
+ chan->config.park = 1;
if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
@@ -2395,6 +2432,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
+ chan->config.park = 1;
if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
@@ -2459,7 +2497,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
* Return: 0 always.
*/
static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
- struct device_node *node) {
+ struct device_node *node)
+{
int ret, i, nr_channels = 1;
ret = of_property_read_u32(node, "dma-channels", &nr_channels);
@@ -2654,7 +2693,12 @@ static int xilinx_dma_probe(struct platform_device *pdev)
goto error;
}
- dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
+ dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
+ else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
+ dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
+ else
+ dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
return 0;
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 1ee1241ca797..f14645817ed8 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/pm_runtime.h>
#include "../dmaengine.h"
@@ -47,6 +48,7 @@
#define ZYNQMP_DMA_SRC_START_MSB 0x15C
#define ZYNQMP_DMA_DST_START_LSB 0x160
#define ZYNQMP_DMA_DST_START_MSB 0x164
+#define ZYNQMP_DMA_TOTAL_BYTE 0x188
#define ZYNQMP_DMA_RATE_CTRL 0x18C
#define ZYNQMP_DMA_IRQ_SRC_ACCT 0x190
#define ZYNQMP_DMA_IRQ_DST_ACCT 0x194
@@ -138,6 +140,8 @@
#define ZYNQMP_DMA_BUS_WIDTH_64 64
#define ZYNQMP_DMA_BUS_WIDTH_128 128
+#define ZDMA_PM_TIMEOUT 100
+
#define ZYNQMP_DMA_DESC_SIZE(chan) (chan->desc_size)
#define to_chan(chan) container_of(chan, struct zynqmp_dma_chan, \
@@ -211,8 +215,6 @@ struct zynqmp_dma_desc_sw {
* @bus_width: Bus width
* @src_burst_len: Source burst length
* @dst_burst_len: Dest burst length
- * @clk_main: Pointer to main clock
- * @clk_apb: Pointer to apb clock
*/
struct zynqmp_dma_chan {
struct zynqmp_dma_device *zdev;
@@ -237,8 +239,6 @@ struct zynqmp_dma_chan {
u32 bus_width;
u32 src_burst_len;
u32 dst_burst_len;
- struct clk *clk_main;
- struct clk *clk_apb;
};
/**
@@ -246,11 +246,15 @@ struct zynqmp_dma_chan {
* @dev: Device Structure
* @common: DMA device structure
* @chan: Driver specific DMA channel
+ * @clk_main: Pointer to main clock
+ * @clk_apb: Pointer to apb clock
*/
struct zynqmp_dma_device {
struct device *dev;
struct dma_device common;
struct zynqmp_dma_chan *chan;
+ struct clk *clk_main;
+ struct clk *clk_apb;
};
static inline void zynqmp_dma_writeq(struct zynqmp_dma_chan *chan, u32 reg,
@@ -461,7 +465,11 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
{
struct zynqmp_dma_chan *chan = to_chan(dchan);
struct zynqmp_dma_desc_sw *desc;
- int i;
+ int i, ret;
+
+ ret = pm_runtime_get_sync(chan->dev);
+ if (ret < 0)
+ return ret;
chan->sw_desc_pool = kzalloc(sizeof(*desc) * ZYNQMP_DMA_NUM_DESCS,
GFP_KERNEL);
@@ -506,6 +514,7 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
static void zynqmp_dma_start(struct zynqmp_dma_chan *chan)
{
writel(ZYNQMP_DMA_INT_EN_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IER);
+ writel(0, chan->regs + ZYNQMP_DMA_TOTAL_BYTE);
chan->idle = false;
writel(ZYNQMP_DMA_ENABLE, chan->regs + ZYNQMP_DMA_CTRL2);
}
@@ -517,12 +526,12 @@ static void zynqmp_dma_start(struct zynqmp_dma_chan *chan)
*/
static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status)
{
- u32 val;
-
+ if (status & ZYNQMP_DMA_BYTE_CNT_OVRFL)
+ writel(0, chan->regs + ZYNQMP_DMA_TOTAL_BYTE);
if (status & ZYNQMP_DMA_IRQ_DST_ACCT_ERR)
- val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
+ readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
if (status & ZYNQMP_DMA_IRQ_SRC_ACCT_ERR)
- val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT);
+ readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT);
}
static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
@@ -545,6 +554,8 @@ static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
* zynqmp_dma_device_config - Zynqmp dma device configuration
* @dchan: DMA channel
* @config: DMA device config
+ *
+ * Return: 0 always
*/
static int zynqmp_dma_device_config(struct dma_chan *dchan,
struct dma_slave_config *config)
@@ -640,7 +651,7 @@ static void zynqmp_dma_issue_pending(struct dma_chan *dchan)
/**
* zynqmp_dma_free_descriptors - Free channel descriptors
- * @dchan: DMA channel pointer
+ * @chan: ZynqMP DMA channel pointer
*/
static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan)
{
@@ -664,6 +675,8 @@ static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan)
(2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS),
chan->desc_pool_v, chan->desc_pool_p);
kfree(chan->sw_desc_pool);
+ pm_runtime_mark_last_busy(chan->dev);
+ pm_runtime_put_autosuspend(chan->dev);
}
/**
@@ -715,7 +728,7 @@ static irqreturn_t zynqmp_dma_irq_handler(int irq, void *data)
if (status & ZYNQMP_DMA_INT_OVRFL) {
zynqmp_dma_handle_ovfl_int(chan, status);
- dev_info(chan->dev, "Channel %p overflow interrupt\n", chan);
+ dev_dbg(chan->dev, "Channel %p overflow interrupt\n", chan);
ret = IRQ_HANDLED;
}
@@ -838,11 +851,10 @@ static void zynqmp_dma_chan_remove(struct zynqmp_dma_chan *chan)
if (!chan)
return;
- devm_free_irq(chan->zdev->dev, chan->irq, chan);
+ if (chan->irq)
+ devm_free_irq(chan->zdev->dev, chan->irq, chan);
tasklet_kill(&chan->tasklet);
list_del(&chan->common.device_node);
- clk_disable_unprepare(chan->clk_apb);
- clk_disable_unprepare(chan->clk_main);
}
/**
@@ -907,30 +919,6 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
"zynqmp-dma", chan);
if (err)
return err;
- chan->clk_main = devm_clk_get(&pdev->dev, "clk_main");
- if (IS_ERR(chan->clk_main)) {
- dev_err(&pdev->dev, "main clock not found.\n");
- return PTR_ERR(chan->clk_main);
- }
-
- chan->clk_apb = devm_clk_get(&pdev->dev, "clk_apb");
- if (IS_ERR(chan->clk_apb)) {
- dev_err(&pdev->dev, "apb clock not found.\n");
- return PTR_ERR(chan->clk_apb);
- }
-
- err = clk_prepare_enable(chan->clk_main);
- if (err) {
- dev_err(&pdev->dev, "Unable to enable main clock.\n");
- return err;
- }
-
- err = clk_prepare_enable(chan->clk_apb);
- if (err) {
- clk_disable_unprepare(chan->clk_main);
- dev_err(&pdev->dev, "Unable to enable apb clock.\n");
- return err;
- }
chan->desc_size = sizeof(struct zynqmp_dma_desc_ll);
chan->idle = true;
@@ -953,6 +941,87 @@ static struct dma_chan *of_zynqmp_dma_xlate(struct of_phandle_args *dma_spec,
}
/**
+ * zynqmp_dma_suspend - Suspend method for the driver
+ * @dev: Address of the device structure
+ *
+ * Put the driver into low power mode.
+ * Return: 0 on success and failure value on error
+ */
+static int __maybe_unused zynqmp_dma_suspend(struct device *dev)
+{
+ if (!device_may_wakeup(dev))
+ return pm_runtime_force_suspend(dev);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dma_resume - Resume from suspend
+ * @dev: Address of the device structure
+ *
+ * Resume operation after suspend.
+ * Return: 0 on success and failure value on error
+ */
+static int __maybe_unused zynqmp_dma_resume(struct device *dev)
+{
+ if (!device_may_wakeup(dev))
+ return pm_runtime_force_resume(dev);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dma_runtime_suspend - Runtime suspend method for the driver
+ * @dev: Address of the device structure
+ *
+ * Put the driver into low power mode.
+ * Return: 0 always
+ */
+static int __maybe_unused zynqmp_dma_runtime_suspend(struct device *dev)
+{
+ struct zynqmp_dma_device *zdev = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(zdev->clk_main);
+ clk_disable_unprepare(zdev->clk_apb);
+
+ return 0;
+}
+
+/**
+ * zynqmp_dma_runtime_resume - Runtime suspend method for the driver
+ * @dev: Address of the device structure
+ *
+ * Put the driver into low power mode.
+ * Return: 0 always
+ */
+static int __maybe_unused zynqmp_dma_runtime_resume(struct device *dev)
+{
+ struct zynqmp_dma_device *zdev = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(zdev->clk_main);
+ if (err) {
+ dev_err(dev, "Unable to enable main clock.\n");
+ return err;
+ }
+
+ err = clk_prepare_enable(zdev->clk_apb);
+ if (err) {
+ dev_err(dev, "Unable to enable apb clock.\n");
+ clk_disable_unprepare(zdev->clk_main);
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops zynqmp_dma_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(zynqmp_dma_suspend, zynqmp_dma_resume)
+ SET_RUNTIME_PM_OPS(zynqmp_dma_runtime_suspend,
+ zynqmp_dma_runtime_resume, NULL)
+};
+
+/**
* zynqmp_dma_probe - Driver probe function
* @pdev: Pointer to the platform_device structure
*
@@ -984,12 +1053,33 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
p->device_config = zynqmp_dma_device_config;
p->dev = &pdev->dev;
+ zdev->clk_main = devm_clk_get(&pdev->dev, "clk_main");
+ if (IS_ERR(zdev->clk_main)) {
+ dev_err(&pdev->dev, "main clock not found.\n");
+ return PTR_ERR(zdev->clk_main);
+ }
+
+ zdev->clk_apb = devm_clk_get(&pdev->dev, "clk_apb");
+ if (IS_ERR(zdev->clk_apb)) {
+ dev_err(&pdev->dev, "apb clock not found.\n");
+ return PTR_ERR(zdev->clk_apb);
+ }
+
platform_set_drvdata(pdev, zdev);
+ pm_runtime_set_autosuspend_delay(zdev->dev, ZDMA_PM_TIMEOUT);
+ pm_runtime_use_autosuspend(zdev->dev);
+ pm_runtime_enable(zdev->dev);
+ pm_runtime_get_sync(zdev->dev);
+ if (!pm_runtime_enabled(zdev->dev)) {
+ ret = zynqmp_dma_runtime_resume(zdev->dev);
+ if (ret)
+ return ret;
+ }
ret = zynqmp_dma_chan_probe(zdev, pdev);
if (ret) {
dev_err(&pdev->dev, "Probing channel failed\n");
- goto free_chan_resources;
+ goto err_disable_pm;
}
p->dst_addr_widths = BIT(zdev->chan->bus_width / 8);
@@ -1005,12 +1095,19 @@ static int zynqmp_dma_probe(struct platform_device *pdev)
goto free_chan_resources;
}
+ pm_runtime_mark_last_busy(zdev->dev);
+ pm_runtime_put_sync_autosuspend(zdev->dev);
+
dev_info(&pdev->dev, "ZynqMP DMA driver Probe success\n");
return 0;
free_chan_resources:
zynqmp_dma_chan_remove(zdev->chan);
+err_disable_pm:
+ if (!pm_runtime_enabled(zdev->dev))
+ zynqmp_dma_runtime_suspend(zdev->dev);
+ pm_runtime_disable(zdev->dev);
return ret;
}
@@ -1028,6 +1125,9 @@ static int zynqmp_dma_remove(struct platform_device *pdev)
dma_async_device_unregister(&zdev->common);
zynqmp_dma_chan_remove(zdev->chan);
+ pm_runtime_disable(zdev->dev);
+ if (!pm_runtime_enabled(zdev->dev))
+ zynqmp_dma_runtime_suspend(zdev->dev);
return 0;
}
@@ -1042,6 +1142,7 @@ static struct platform_driver zynqmp_dma_driver = {
.driver = {
.name = "xilinx-zynqmp-dma",
.of_match_table = zynqmp_dma_of_match,
+ .pm = &zynqmp_dma_dev_pm_ops,
},
.probe = zynqmp_dma_probe,
.remove = zynqmp_dma_remove,
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 96afb2aeed18..3c4017007647 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -457,4 +457,11 @@ config EDAC_XGENE
Support for error detection and correction on the
APM X-Gene family of SOCs.
+config EDAC_TI
+ tristate "Texas Instruments DDR3 ECC Controller"
+ depends on ARCH_KEYSTONE || SOC_DRA7XX
+ help
+ Support for error detection and correction on the
+ TI SoCs.
+
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 0fd9ffa63299..b54912eb39af 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -78,3 +78,4 @@ obj-$(CONFIG_EDAC_THUNDERX) += thunderx_edac.o
obj-$(CONFIG_EDAC_ALTERA) += altera_edac.o
obj-$(CONFIG_EDAC_SYNOPSYS) += synopsys_edac.o
obj-$(CONFIG_EDAC_XGENE) += xgene_edac.o
+obj-$(CONFIG_EDAC_TI) += ti_edac.o
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index ec5d695bbb72..3c68bb525d5d 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -758,7 +758,7 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev)
/* Non-ECC RAM? */
printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
res = -ENODEV;
- goto err2;
+ goto err;
}
edac_dbg(3, "init mci\n");
diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c
index 9c1ffe3e912b..aeb222ca3ed1 100644
--- a/drivers/edac/octeon_edac-lmc.c
+++ b/drivers/edac/octeon_edac-lmc.c
@@ -78,6 +78,7 @@ static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci)
if (!pvt->inject)
int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx));
else {
+ int_reg.u64 = 0;
if (pvt->error_type == 1)
int_reg.s.sec_err = 1;
if (pvt->error_type == 2)
diff --git a/drivers/edac/ti_edac.c b/drivers/edac/ti_edac.c
new file mode 100644
index 000000000000..6ac26d1b929f
--- /dev/null
+++ b/drivers/edac/ti_edac.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Texas Instruments DDR3 ECC error correction and detection driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/init.h>
+#include <linux/edac.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+
+#include "edac_module.h"
+
+/* EMIF controller registers */
+#define EMIF_SDRAM_CONFIG 0x008
+#define EMIF_IRQ_STATUS 0x0ac
+#define EMIF_IRQ_ENABLE_SET 0x0b4
+#define EMIF_ECC_CTRL 0x110
+#define EMIF_1B_ECC_ERR_CNT 0x130
+#define EMIF_1B_ECC_ERR_THRSH 0x134
+#define EMIF_1B_ECC_ERR_ADDR_LOG 0x13c
+#define EMIF_2B_ECC_ERR_ADDR_LOG 0x140
+
+/* Bit definitions for EMIF_SDRAM_CONFIG */
+#define SDRAM_TYPE_SHIFT 29
+#define SDRAM_TYPE_MASK GENMASK(31, 29)
+#define SDRAM_TYPE_DDR3 (3 << SDRAM_TYPE_SHIFT)
+#define SDRAM_TYPE_DDR2 (2 << SDRAM_TYPE_SHIFT)
+#define SDRAM_NARROW_MODE_MASK GENMASK(15, 14)
+#define SDRAM_K2_NARROW_MODE_SHIFT 12
+#define SDRAM_K2_NARROW_MODE_MASK GENMASK(13, 12)
+#define SDRAM_ROWSIZE_SHIFT 7
+#define SDRAM_ROWSIZE_MASK GENMASK(9, 7)
+#define SDRAM_IBANK_SHIFT 4
+#define SDRAM_IBANK_MASK GENMASK(6, 4)
+#define SDRAM_K2_IBANK_SHIFT 5
+#define SDRAM_K2_IBANK_MASK GENMASK(6, 5)
+#define SDRAM_K2_EBANK_SHIFT 3
+#define SDRAM_K2_EBANK_MASK BIT(SDRAM_K2_EBANK_SHIFT)
+#define SDRAM_PAGESIZE_SHIFT 0
+#define SDRAM_PAGESIZE_MASK GENMASK(2, 0)
+#define SDRAM_K2_PAGESIZE_SHIFT 0
+#define SDRAM_K2_PAGESIZE_MASK GENMASK(1, 0)
+
+#define EMIF_1B_ECC_ERR_THRSH_SHIFT 24
+
+/* IRQ bit definitions */
+#define EMIF_1B_ECC_ERR BIT(5)
+#define EMIF_2B_ECC_ERR BIT(4)
+#define EMIF_WR_ECC_ERR BIT(3)
+#define EMIF_SYS_ERR BIT(0)
+/* Bit 31 enables ECC and 28 enables RMW */
+#define ECC_ENABLED (BIT(31) | BIT(28))
+
+#define EDAC_MOD_NAME "ti-emif-edac"
+
+enum {
+ EMIF_TYPE_DRA7,
+ EMIF_TYPE_K2
+};
+
+struct ti_edac {
+ void __iomem *reg;
+};
+
+static u32 ti_edac_readl(struct ti_edac *edac, u16 offset)
+{
+ return readl_relaxed(edac->reg + offset);
+}
+
+static void ti_edac_writel(struct ti_edac *edac, u32 val, u16 offset)
+{
+ writel_relaxed(val, edac->reg + offset);
+}
+
+static irqreturn_t ti_edac_isr(int irq, void *data)
+{
+ struct mem_ctl_info *mci = data;
+ struct ti_edac *edac = mci->pvt_info;
+ u32 irq_status;
+ u32 err_addr;
+ int err_count;
+
+ irq_status = ti_edac_readl(edac, EMIF_IRQ_STATUS);
+
+ if (irq_status & EMIF_1B_ECC_ERR) {
+ err_addr = ti_edac_readl(edac, EMIF_1B_ECC_ERR_ADDR_LOG);
+ err_count = ti_edac_readl(edac, EMIF_1B_ECC_ERR_CNT);
+ ti_edac_writel(edac, err_count, EMIF_1B_ECC_ERR_CNT);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, err_count,
+ err_addr >> PAGE_SHIFT,
+ err_addr & ~PAGE_MASK, -1, 0, 0, 0,
+ mci->ctl_name, "1B");
+ }
+
+ if (irq_status & EMIF_2B_ECC_ERR) {
+ err_addr = ti_edac_readl(edac, EMIF_2B_ECC_ERR_ADDR_LOG);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
+ err_addr >> PAGE_SHIFT,
+ err_addr & ~PAGE_MASK, -1, 0, 0, 0,
+ mci->ctl_name, "2B");
+ }
+
+ if (irq_status & EMIF_WR_ECC_ERR)
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
+ 0, 0, -1, 0, 0, 0,
+ mci->ctl_name, "WR");
+
+ ti_edac_writel(edac, irq_status, EMIF_IRQ_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+static void ti_edac_setup_dimm(struct mem_ctl_info *mci, u32 type)
+{
+ struct dimm_info *dimm;
+ struct ti_edac *edac = mci->pvt_info;
+ int bits;
+ u32 val;
+ u32 memsize;
+
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, 0, 0, 0);
+
+ val = ti_edac_readl(edac, EMIF_SDRAM_CONFIG);
+
+ if (type == EMIF_TYPE_DRA7) {
+ bits = ((val & SDRAM_PAGESIZE_MASK) >> SDRAM_PAGESIZE_SHIFT) + 8;
+ bits += ((val & SDRAM_ROWSIZE_MASK) >> SDRAM_ROWSIZE_SHIFT) + 9;
+ bits += (val & SDRAM_IBANK_MASK) >> SDRAM_IBANK_SHIFT;
+
+ if (val & SDRAM_NARROW_MODE_MASK) {
+ bits++;
+ dimm->dtype = DEV_X16;
+ } else {
+ bits += 2;
+ dimm->dtype = DEV_X32;
+ }
+ } else {
+ bits = 16;
+ bits += ((val & SDRAM_K2_PAGESIZE_MASK) >>
+ SDRAM_K2_PAGESIZE_SHIFT) + 8;
+ bits += (val & SDRAM_K2_IBANK_MASK) >> SDRAM_K2_IBANK_SHIFT;
+ bits += (val & SDRAM_K2_EBANK_MASK) >> SDRAM_K2_EBANK_SHIFT;
+
+ val = (val & SDRAM_K2_NARROW_MODE_MASK) >>
+ SDRAM_K2_NARROW_MODE_SHIFT;
+ switch (val) {
+ case 0:
+ bits += 3;
+ dimm->dtype = DEV_X64;
+ break;
+ case 1:
+ bits += 2;
+ dimm->dtype = DEV_X32;
+ break;
+ case 2:
+ bits++;
+ dimm->dtype = DEV_X16;
+ break;
+ }
+ }
+
+ memsize = 1 << bits;
+
+ dimm->nr_pages = memsize >> PAGE_SHIFT;
+ dimm->grain = 4;
+ if ((val & SDRAM_TYPE_MASK) == SDRAM_TYPE_DDR2)
+ dimm->mtype = MEM_DDR2;
+ else
+ dimm->mtype = MEM_DDR3;
+
+ val = ti_edac_readl(edac, EMIF_ECC_CTRL);
+ if (val & ECC_ENABLED)
+ dimm->edac_mode = EDAC_SECDED;
+ else
+ dimm->edac_mode = EDAC_NONE;
+}
+
+static const struct of_device_id ti_edac_of_match[] = {
+ { .compatible = "ti,emif-keystone", .data = (void *)EMIF_TYPE_K2 },
+ { .compatible = "ti,emif-dra7xx", .data = (void *)EMIF_TYPE_DRA7 },
+ {},
+};
+
+static int _emif_get_id(struct device_node *node)
+{
+ struct device_node *np;
+ const __be32 *addrp;
+ u32 addr, my_addr;
+ int my_id = 0;
+
+ addrp = of_get_address(node, 0, NULL, NULL);
+ my_addr = (u32)of_translate_address(node, addrp);
+
+ for_each_matching_node(np, ti_edac_of_match) {
+ if (np == node)
+ continue;
+
+ addrp = of_get_address(np, 0, NULL, NULL);
+ addr = (u32)of_translate_address(np, addrp);
+
+ edac_printk(KERN_INFO, EDAC_MOD_NAME,
+ "addr=%x, my_addr=%x\n",
+ addr, my_addr);
+
+ if (addr < my_addr)
+ my_id++;
+ }
+
+ return my_id;
+}
+
+static int ti_edac_probe(struct platform_device *pdev)
+{
+ int error_irq = 0, ret = -ENODEV;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *reg;
+ struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[1];
+ const struct of_device_id *id;
+ struct ti_edac *edac;
+ int emif_id;
+
+ id = of_match_device(ti_edac_of_match, &pdev->dev);
+ if (!id)
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg = devm_ioremap_resource(dev, res);
+ if (IS_ERR(reg)) {
+ edac_printk(KERN_ERR, EDAC_MOD_NAME,
+ "EMIF controller regs not defined\n");
+ return PTR_ERR(reg);
+ }
+
+ layers[0].type = EDAC_MC_LAYER_ALL_MEM;
+ layers[0].size = 1;
+
+ /* Allocate ID number for our EMIF controller */
+ emif_id = _emif_get_id(pdev->dev.of_node);
+ if (emif_id < 0)
+ return -EINVAL;
+
+ mci = edac_mc_alloc(emif_id, 1, layers, sizeof(*edac));
+ if (!mci)
+ return -ENOMEM;
+
+ mci->pdev = &pdev->dev;
+ edac = mci->pvt_info;
+ edac->reg = reg;
+ platform_set_drvdata(pdev, mci);
+
+ mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR2;
+ mci->edac_ctl_cap = EDAC_FLAG_SECDED | EDAC_FLAG_NONE;
+ mci->mod_name = EDAC_MOD_NAME;
+ mci->ctl_name = id->compatible;
+ mci->dev_name = dev_name(&pdev->dev);
+
+ /* Setup memory layout */
+ ti_edac_setup_dimm(mci, (u32)(id->data));
+
+ /* add EMIF ECC error handler */
+ error_irq = platform_get_irq(pdev, 0);
+ if (!error_irq) {
+ edac_printk(KERN_ERR, EDAC_MOD_NAME,
+ "EMIF irq number not defined.\n");
+ goto err;
+ }
+
+ ret = devm_request_irq(dev, error_irq, ti_edac_isr, 0,
+ "emif-edac-irq", mci);
+ if (ret) {
+ edac_printk(KERN_ERR, EDAC_MOD_NAME,
+ "request_irq fail for EMIF EDAC irq\n");
+ goto err;
+ }
+
+ ret = edac_mc_add_mc(mci);
+ if (ret) {
+ edac_printk(KERN_ERR, EDAC_MOD_NAME,
+ "Failed to register mci: %d.\n", ret);
+ goto err;
+ }
+
+ /* Generate an interrupt with each 1b error */
+ ti_edac_writel(edac, 1 << EMIF_1B_ECC_ERR_THRSH_SHIFT,
+ EMIF_1B_ECC_ERR_THRSH);
+
+ /* Enable interrupts */
+ ti_edac_writel(edac,
+ EMIF_1B_ECC_ERR | EMIF_2B_ECC_ERR | EMIF_WR_ECC_ERR,
+ EMIF_IRQ_ENABLE_SET);
+
+ return 0;
+
+err:
+ edac_mc_free(mci);
+ return ret;
+}
+
+static int ti_edac_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+
+ edac_mc_del_mc(&pdev->dev);
+ edac_mc_free(mci);
+
+ return 0;
+}
+
+static struct platform_driver ti_edac_driver = {
+ .probe = ti_edac_probe,
+ .remove = ti_edac_remove,
+ .driver = {
+ .name = EDAC_MOD_NAME,
+ .of_match_table = ti_edac_of_match,
+ },
+};
+
+module_platform_driver(ti_edac_driver);
+
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_DESCRIPTION("EDAC Driver for Texas Instruments DDR3 MC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index 981fba56bc18..1621f2f7f129 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -24,8 +24,6 @@
#include <linux/notifier.h>
#include <linux/extcon-provider.h>
#include <linux/regmap.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
#include <linux/mfd/axp20x.h>
/* Power source status register */
@@ -79,11 +77,6 @@ enum axp288_extcon_reg {
AXP288_BC_DET_STAT_REG = 0x2f,
};
-enum axp288_mux_select {
- EXTCON_GPIO_MUX_SEL_PMIC = 0,
- EXTCON_GPIO_MUX_SEL_SOC,
-};
-
enum axp288_extcon_irq {
VBUS_FALLING_IRQ = 0,
VBUS_RISING_IRQ,
@@ -104,10 +97,8 @@ struct axp288_extcon_info {
struct device *dev;
struct regmap *regmap;
struct regmap_irq_chip_data *regmap_irqc;
- struct gpio_desc *gpio_mux_cntl;
int irq[EXTCON_IRQ_END];
struct extcon_dev *edev;
- struct notifier_block extcon_nb;
unsigned int previous_cable;
};
@@ -197,15 +188,6 @@ static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
}
no_vbus:
- /*
- * If VBUS is absent Connect D+/D- lines to PMIC for BC
- * detection. Else connect them to SOC for USB communication.
- */
- if (info->gpio_mux_cntl)
- gpiod_set_value(info->gpio_mux_cntl,
- vbus_attach ? EXTCON_GPIO_MUX_SEL_SOC
- : EXTCON_GPIO_MUX_SEL_PMIC);
-
extcon_set_state_sync(info->edev, info->previous_cable, false);
if (info->previous_cable == EXTCON_CHG_USB_SDP)
extcon_set_state_sync(info->edev, EXTCON_USB, false);
@@ -253,8 +235,7 @@ static int axp288_extcon_probe(struct platform_device *pdev)
{
struct axp288_extcon_info *info;
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
- struct axp288_extcon_pdata *pdata = pdev->dev.platform_data;
- int ret, i, pirq, gpio;
+ int ret, i, pirq;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -264,8 +245,6 @@ static int axp288_extcon_probe(struct platform_device *pdev)
info->regmap = axp20x->regmap;
info->regmap_irqc = axp20x->regmap_irqc;
info->previous_cable = EXTCON_NONE;
- if (pdata)
- info->gpio_mux_cntl = pdata->gpio_mux_cntl;
platform_set_drvdata(pdev, info);
@@ -286,21 +265,11 @@ static int axp288_extcon_probe(struct platform_device *pdev)
return ret;
}
- /* Set up gpio control for USB Mux */
- if (info->gpio_mux_cntl) {
- gpio = desc_to_gpio(info->gpio_mux_cntl);
- ret = devm_gpio_request(&pdev->dev, gpio, "USB_MUX");
- if (ret < 0) {
- dev_err(&pdev->dev,
- "failed to request the gpio=%d\n", gpio);
- return ret;
- }
- gpiod_direction_output(info->gpio_mux_cntl,
- EXTCON_GPIO_MUX_SEL_PMIC);
- }
-
for (i = 0; i < EXTCON_IRQ_END; i++) {
pirq = platform_get_irq(pdev, i);
+ if (pirq < 0)
+ return pirq;
+
info->irq[i] = regmap_irq_get_virq(info->regmap_irqc, pirq);
if (info->irq[i] < 0) {
dev_err(&pdev->dev,
diff --git a/drivers/extcon/extcon-usbc-cros-ec.c b/drivers/extcon/extcon-usbc-cros-ec.c
index 6187f731b29d..6721ab01fe7d 100644
--- a/drivers/extcon/extcon-usbc-cros-ec.c
+++ b/drivers/extcon/extcon-usbc-cros-ec.c
@@ -34,16 +34,26 @@ struct cros_ec_extcon_info {
struct notifier_block notifier;
+ unsigned int dr; /* data role */
+ bool pr; /* power role (true if VBUS enabled) */
bool dp; /* DisplayPort enabled */
bool mux; /* SuperSpeed (usb3) enabled */
unsigned int power_type;
};
static const unsigned int usb_type_c_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
EXTCON_DISP_DP,
EXTCON_NONE,
};
+enum usb_data_roles {
+ DR_NONE,
+ DR_HOST,
+ DR_DEVICE,
+};
+
/**
* cros_ec_pd_command() - Send a command to the EC.
* @info: pointer to struct cros_ec_extcon_info
@@ -150,6 +160,7 @@ static int cros_ec_usb_get_role(struct cros_ec_extcon_info *info,
pd_control.port = info->port_id;
pd_control.role = USB_PD_CTRL_ROLE_NO_CHANGE;
pd_control.mux = USB_PD_CTRL_MUX_NO_CHANGE;
+ pd_control.swap = USB_PD_CTRL_SWAP_NONE;
ret = cros_ec_pd_command(info, EC_CMD_USB_PD_CONTROL, 1,
&pd_control, sizeof(pd_control),
&resp, sizeof(resp));
@@ -183,11 +194,72 @@ static int cros_ec_pd_get_num_ports(struct cros_ec_extcon_info *info)
return resp.num_ports;
}
+static const char *cros_ec_usb_role_string(unsigned int role)
+{
+ return role == DR_NONE ? "DISCONNECTED" :
+ (role == DR_HOST ? "DFP" : "UFP");
+}
+
+static const char *cros_ec_usb_power_type_string(unsigned int type)
+{
+ switch (type) {
+ case USB_CHG_TYPE_NONE:
+ return "USB_CHG_TYPE_NONE";
+ case USB_CHG_TYPE_PD:
+ return "USB_CHG_TYPE_PD";
+ case USB_CHG_TYPE_PROPRIETARY:
+ return "USB_CHG_TYPE_PROPRIETARY";
+ case USB_CHG_TYPE_C:
+ return "USB_CHG_TYPE_C";
+ case USB_CHG_TYPE_BC12_DCP:
+ return "USB_CHG_TYPE_BC12_DCP";
+ case USB_CHG_TYPE_BC12_CDP:
+ return "USB_CHG_TYPE_BC12_CDP";
+ case USB_CHG_TYPE_BC12_SDP:
+ return "USB_CHG_TYPE_BC12_SDP";
+ case USB_CHG_TYPE_OTHER:
+ return "USB_CHG_TYPE_OTHER";
+ case USB_CHG_TYPE_VBUS:
+ return "USB_CHG_TYPE_VBUS";
+ case USB_CHG_TYPE_UNKNOWN:
+ return "USB_CHG_TYPE_UNKNOWN";
+ default:
+ return "USB_CHG_TYPE_UNKNOWN";
+ }
+}
+
+static bool cros_ec_usb_power_type_is_wall_wart(unsigned int type,
+ unsigned int role)
+{
+ switch (type) {
+ /* FIXME : Guppy, Donnettes, and other chargers will be miscategorized
+ * because they identify with USB_CHG_TYPE_C, but we can't return true
+ * here from that code because that breaks Suzy-Q and other kinds of
+ * USB Type-C cables and peripherals.
+ */
+ case USB_CHG_TYPE_PROPRIETARY:
+ case USB_CHG_TYPE_BC12_DCP:
+ return true;
+ case USB_CHG_TYPE_PD:
+ case USB_CHG_TYPE_C:
+ case USB_CHG_TYPE_BC12_CDP:
+ case USB_CHG_TYPE_BC12_SDP:
+ case USB_CHG_TYPE_OTHER:
+ case USB_CHG_TYPE_VBUS:
+ case USB_CHG_TYPE_UNKNOWN:
+ case USB_CHG_TYPE_NONE:
+ default:
+ return false;
+ }
+}
+
static int extcon_cros_ec_detect_cable(struct cros_ec_extcon_info *info,
bool force)
{
struct device *dev = info->dev;
int role, power_type;
+ unsigned int dr = DR_NONE;
+ bool pr = false;
bool polarity = false;
bool dp = false;
bool mux = false;
@@ -206,9 +278,12 @@ static int extcon_cros_ec_detect_cable(struct cros_ec_extcon_info *info,
dev_err(dev, "failed getting role err = %d\n", role);
return role;
}
+ dev_dbg(dev, "disconnected\n");
} else {
int pd_mux_state;
+ dr = (role & PD_CTRL_RESP_ROLE_DATA) ? DR_HOST : DR_DEVICE;
+ pr = (role & PD_CTRL_RESP_ROLE_POWER);
pd_mux_state = cros_ec_usb_get_pd_mux_state(info);
if (pd_mux_state < 0)
pd_mux_state = USB_PD_MUX_USB_ENABLED;
@@ -216,20 +291,62 @@ static int extcon_cros_ec_detect_cable(struct cros_ec_extcon_info *info,
dp = pd_mux_state & USB_PD_MUX_DP_ENABLED;
mux = pd_mux_state & USB_PD_MUX_USB_ENABLED;
hpd = pd_mux_state & USB_PD_MUX_HPD_IRQ;
- }
- if (force || info->dp != dp || info->mux != mux ||
- info->power_type != power_type) {
+ dev_dbg(dev,
+ "connected role 0x%x pwr type %d dr %d pr %d pol %d mux %d dp %d hpd %d\n",
+ role, power_type, dr, pr, polarity, mux, dp, hpd);
+ }
+ /*
+ * When there is no USB host (e.g. USB PD charger),
+ * we are not really a UFP for the AP.
+ */
+ if (dr == DR_DEVICE &&
+ cros_ec_usb_power_type_is_wall_wart(power_type, role))
+ dr = DR_NONE;
+
+ if (force || info->dr != dr || info->pr != pr || info->dp != dp ||
+ info->mux != mux || info->power_type != power_type) {
+ bool host_connected = false, device_connected = false;
+
+ dev_dbg(dev, "Type/Role switch! type = %s role = %s\n",
+ cros_ec_usb_power_type_string(power_type),
+ cros_ec_usb_role_string(dr));
+ info->dr = dr;
+ info->pr = pr;
info->dp = dp;
info->mux = mux;
info->power_type = power_type;
- extcon_set_state(info->edev, EXTCON_DISP_DP, dp);
+ if (dr == DR_DEVICE)
+ device_connected = true;
+ else if (dr == DR_HOST)
+ host_connected = true;
+ extcon_set_state(info->edev, EXTCON_USB, device_connected);
+ extcon_set_state(info->edev, EXTCON_USB_HOST, host_connected);
+ extcon_set_state(info->edev, EXTCON_DISP_DP, dp);
+ extcon_set_property(info->edev, EXTCON_USB,
+ EXTCON_PROP_USB_VBUS,
+ (union extcon_property_value)(int)pr);
+ extcon_set_property(info->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_VBUS,
+ (union extcon_property_value)(int)pr);
+ extcon_set_property(info->edev, EXTCON_USB,
+ EXTCON_PROP_USB_TYPEC_POLARITY,
+ (union extcon_property_value)(int)polarity);
+ extcon_set_property(info->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_TYPEC_POLARITY,
+ (union extcon_property_value)(int)polarity);
extcon_set_property(info->edev, EXTCON_DISP_DP,
EXTCON_PROP_USB_TYPEC_POLARITY,
(union extcon_property_value)(int)polarity);
+ extcon_set_property(info->edev, EXTCON_USB,
+ EXTCON_PROP_USB_SS,
+ (union extcon_property_value)(int)mux);
+ extcon_set_property(info->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_SS,
+ (union extcon_property_value)(int)mux);
extcon_set_property(info->edev, EXTCON_DISP_DP,
EXTCON_PROP_USB_SS,
(union extcon_property_value)(int)mux);
@@ -237,6 +354,8 @@ static int extcon_cros_ec_detect_cable(struct cros_ec_extcon_info *info,
EXTCON_PROP_DISP_HPD,
(union extcon_property_value)(int)hpd);
+ extcon_sync(info->edev, EXTCON_USB);
+ extcon_sync(info->edev, EXTCON_USB_HOST);
extcon_sync(info->edev, EXTCON_DISP_DP);
} else if (hpd) {
@@ -322,13 +441,28 @@ static int extcon_cros_ec_probe(struct platform_device *pdev)
return ret;
}
+ extcon_set_property_capability(info->edev, EXTCON_USB,
+ EXTCON_PROP_USB_VBUS);
+ extcon_set_property_capability(info->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_VBUS);
+ extcon_set_property_capability(info->edev, EXTCON_USB,
+ EXTCON_PROP_USB_TYPEC_POLARITY);
+ extcon_set_property_capability(info->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_TYPEC_POLARITY);
extcon_set_property_capability(info->edev, EXTCON_DISP_DP,
EXTCON_PROP_USB_TYPEC_POLARITY);
+ extcon_set_property_capability(info->edev, EXTCON_USB,
+ EXTCON_PROP_USB_SS);
+ extcon_set_property_capability(info->edev, EXTCON_USB_HOST,
+ EXTCON_PROP_USB_SS);
extcon_set_property_capability(info->edev, EXTCON_DISP_DP,
EXTCON_PROP_USB_SS);
extcon_set_property_capability(info->edev, EXTCON_DISP_DP,
EXTCON_PROP_DISP_HPD);
+ info->dr = DR_NONE;
+ info->pr = false;
+
platform_set_drvdata(pdev, info);
/* Get PD events from the EC */
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index a301fcf46e88..523391bb3fbe 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1784,10 +1784,10 @@ static int fw_device_op_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
+static __poll_t fw_device_op_poll(struct file *file, poll_table * pt)
{
struct client *client = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &client->wait, pt);
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index 180f0a96528c..fee2e9e7ea20 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -328,11 +328,11 @@ nosy_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int
+static __poll_t
nosy_poll(struct file *file, poll_table *pt)
{
struct client *client = file->private_data;
- unsigned int ret = 0;
+ __poll_t ret = 0;
poll_wait(file, &client->buffer.wait, pt);
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index fa87a055905e..e77f77caa0f3 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -48,6 +48,14 @@ config ARM_SCPI_POWER_DOMAIN
This enables support for the SCPI power domains which can be
enabled or disabled via the SCP firmware
+config ARM_SDE_INTERFACE
+ bool "ARM Software Delegated Exception Interface (SDEI)"
+ depends on ARM64
+ help
+ The Software Delegated Exception Interface (SDEI) is an ARM
+ standard for registering callbacks from the platform firmware
+ into the OS. This is typically used to implement RAS notifications.
+
config EDD
tristate "BIOS Enhanced Disk Drive calls determine boot disk"
depends on X86
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index feaa890197f3..b248238ddc6a 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_ARM_PSCI_FW) += psci.o
obj-$(CONFIG_ARM_PSCI_CHECKER) += psci_checker.o
obj-$(CONFIG_ARM_SCPI_PROTOCOL) += arm_scpi.o
obj-$(CONFIG_ARM_SCPI_POWER_DOMAIN) += scpi_pm_domain.o
+obj-$(CONFIG_ARM_SDE_INTERFACE) += arm_sdei.o
obj-$(CONFIG_DMI) += dmi_scan.o
obj-$(CONFIG_DMI_SYSFS) += dmi-sysfs.o
obj-$(CONFIG_EDD) += edd.o
diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
new file mode 100644
index 000000000000..1ea71640fdc2
--- /dev/null
+++ b/drivers/firmware/arm_sdei.c
@@ -0,0 +1,1092 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2017 Arm Ltd.
+#define pr_fmt(fmt) "sdei: " fmt
+
+#include <linux/acpi.h>
+#include <linux/arm_sdei.h>
+#include <linux/arm-smccc.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/cpuhotplug.h>
+#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
+#include <linux/errno.h>
+#include <linux/hardirq.h>
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/kvm_host.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/percpu.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/ptrace.h>
+#include <linux/preempt.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+
+/*
+ * The call to use to reach the firmware.
+ */
+static asmlinkage void (*sdei_firmware_call)(unsigned long function_id,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, struct arm_smccc_res *res);
+
+/* entry point from firmware to arch asm code */
+static unsigned long sdei_entry_point;
+
+struct sdei_event {
+ /* These three are protected by the sdei_list_lock */
+ struct list_head list;
+ bool reregister;
+ bool reenable;
+
+ u32 event_num;
+ u8 type;
+ u8 priority;
+
+ /* This pointer is handed to firmware as the event argument. */
+ union {
+ /* Shared events */
+ struct sdei_registered_event *registered;
+
+ /* CPU private events */
+ struct sdei_registered_event __percpu *private_registered;
+ };
+};
+
+/* Take the mutex for any API call or modification. Take the mutex first. */
+static DEFINE_MUTEX(sdei_events_lock);
+
+/* and then hold this when modifying the list */
+static DEFINE_SPINLOCK(sdei_list_lock);
+static LIST_HEAD(sdei_list);
+
+/* Private events are registered/enabled via IPI passing one of these */
+struct sdei_crosscall_args {
+ struct sdei_event *event;
+ atomic_t errors;
+ int first_error;
+};
+
+#define CROSSCALL_INIT(arg, event) (arg.event = event, \
+ arg.first_error = 0, \
+ atomic_set(&arg.errors, 0))
+
+static inline int sdei_do_cross_call(void *fn, struct sdei_event * event)
+{
+ struct sdei_crosscall_args arg;
+
+ CROSSCALL_INIT(arg, event);
+ on_each_cpu(fn, &arg, true);
+
+ return arg.first_error;
+}
+
+static inline void
+sdei_cross_call_return(struct sdei_crosscall_args *arg, int err)
+{
+ if (err && (atomic_inc_return(&arg->errors) == 1))
+ arg->first_error = err;
+}
+
+static int sdei_to_linux_errno(unsigned long sdei_err)
+{
+ switch (sdei_err) {
+ case SDEI_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ case SDEI_INVALID_PARAMETERS:
+ return -EINVAL;
+ case SDEI_DENIED:
+ return -EPERM;
+ case SDEI_PENDING:
+ return -EINPROGRESS;
+ case SDEI_OUT_OF_RESOURCE:
+ return -ENOMEM;
+ }
+
+ /* Not an error value ... */
+ return sdei_err;
+}
+
+/*
+ * If x0 is any of these values, then the call failed, use sdei_to_linux_errno()
+ * to translate.
+ */
+static int sdei_is_err(struct arm_smccc_res *res)
+{
+ switch (res->a0) {
+ case SDEI_NOT_SUPPORTED:
+ case SDEI_INVALID_PARAMETERS:
+ case SDEI_DENIED:
+ case SDEI_PENDING:
+ case SDEI_OUT_OF_RESOURCE:
+ return true;
+ }
+
+ return false;
+}
+
+static int invoke_sdei_fn(unsigned long function_id, unsigned long arg0,
+ unsigned long arg1, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4,
+ u64 *result)
+{
+ int err = 0;
+ struct arm_smccc_res res;
+
+ if (sdei_firmware_call) {
+ sdei_firmware_call(function_id, arg0, arg1, arg2, arg3, arg4,
+ &res);
+ if (sdei_is_err(&res))
+ err = sdei_to_linux_errno(res.a0);
+ } else {
+ /*
+ * !sdei_firmware_call means we failed to probe or called
+ * sdei_mark_interface_broken(). -EIO is not an error returned
+ * by sdei_to_linux_errno() and is used to suppress messages
+ * from this driver.
+ */
+ err = -EIO;
+ res.a0 = SDEI_NOT_SUPPORTED;
+ }
+
+ if (result)
+ *result = res.a0;
+
+ return err;
+}
+
+static struct sdei_event *sdei_event_find(u32 event_num)
+{
+ struct sdei_event *e, *found = NULL;
+
+ lockdep_assert_held(&sdei_events_lock);
+
+ spin_lock(&sdei_list_lock);
+ list_for_each_entry(e, &sdei_list, list) {
+ if (e->event_num == event_num) {
+ found = e;
+ break;
+ }
+ }
+ spin_unlock(&sdei_list_lock);
+
+ return found;
+}
+
+int sdei_api_event_context(u32 query, u64 *result)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_CONTEXT, query, 0, 0, 0, 0,
+ result);
+}
+NOKPROBE_SYMBOL(sdei_api_event_context);
+
+static int sdei_api_event_get_info(u32 event, u32 info, u64 *result)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0,
+ 0, 0, result);
+}
+
+static struct sdei_event *sdei_event_create(u32 event_num,
+ sdei_event_callback *cb,
+ void *cb_arg)
+{
+ int err;
+ u64 result;
+ struct sdei_event *event;
+ struct sdei_registered_event *reg;
+
+ lockdep_assert_held(&sdei_events_lock);
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (!event)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&event->list);
+ event->event_num = event_num;
+
+ err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
+ &result);
+ if (err) {
+ kfree(event);
+ return ERR_PTR(err);
+ }
+ event->priority = result;
+
+ err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_TYPE,
+ &result);
+ if (err) {
+ kfree(event);
+ return ERR_PTR(err);
+ }
+ event->type = result;
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED) {
+ reg = kzalloc(sizeof(*reg), GFP_KERNEL);
+ if (!reg) {
+ kfree(event);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ reg->event_num = event_num;
+ reg->priority = event->priority;
+
+ reg->callback = cb;
+ reg->callback_arg = cb_arg;
+ event->registered = reg;
+ } else {
+ int cpu;
+ struct sdei_registered_event __percpu *regs;
+
+ regs = alloc_percpu(struct sdei_registered_event);
+ if (!regs) {
+ kfree(event);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for_each_possible_cpu(cpu) {
+ reg = per_cpu_ptr(regs, cpu);
+
+ reg->event_num = event->event_num;
+ reg->priority = event->priority;
+ reg->callback = cb;
+ reg->callback_arg = cb_arg;
+ }
+
+ event->private_registered = regs;
+ }
+
+ if (sdei_event_find(event_num)) {
+ kfree(event->registered);
+ kfree(event);
+ event = ERR_PTR(-EBUSY);
+ } else {
+ spin_lock(&sdei_list_lock);
+ list_add(&event->list, &sdei_list);
+ spin_unlock(&sdei_list_lock);
+ }
+
+ return event;
+}
+
+static void sdei_event_destroy(struct sdei_event *event)
+{
+ lockdep_assert_held(&sdei_events_lock);
+
+ spin_lock(&sdei_list_lock);
+ list_del(&event->list);
+ spin_unlock(&sdei_list_lock);
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ kfree(event->registered);
+ else
+ free_percpu(event->private_registered);
+
+ kfree(event);
+}
+
+static int sdei_api_get_version(u64 *version)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_VERSION, 0, 0, 0, 0, 0, version);
+}
+
+int sdei_mask_local_cpu(void)
+{
+ int err;
+
+ WARN_ON_ONCE(preemptible());
+
+ err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL);
+ if (err && err != -EIO) {
+ pr_warn_once("failed to mask CPU[%u]: %d\n",
+ smp_processor_id(), err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void _ipi_mask_cpu(void *ignored)
+{
+ sdei_mask_local_cpu();
+}
+
+int sdei_unmask_local_cpu(void)
+{
+ int err;
+
+ WARN_ON_ONCE(preemptible());
+
+ err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL);
+ if (err && err != -EIO) {
+ pr_warn_once("failed to unmask CPU[%u]: %d\n",
+ smp_processor_id(), err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void _ipi_unmask_cpu(void *ignored)
+{
+ sdei_unmask_local_cpu();
+}
+
+static void _ipi_private_reset(void *ignored)
+{
+ int err;
+
+ err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0,
+ NULL);
+ if (err && err != -EIO)
+ pr_warn_once("failed to reset CPU[%u]: %d\n",
+ smp_processor_id(), err);
+}
+
+static int sdei_api_shared_reset(void)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_SHARED_RESET, 0, 0, 0, 0, 0,
+ NULL);
+}
+
+static void sdei_mark_interface_broken(void)
+{
+ pr_err("disabling SDEI firmware interface\n");
+ on_each_cpu(&_ipi_mask_cpu, NULL, true);
+ sdei_firmware_call = NULL;
+}
+
+static int sdei_platform_reset(void)
+{
+ int err;
+
+ on_each_cpu(&_ipi_private_reset, NULL, true);
+ err = sdei_api_shared_reset();
+ if (err) {
+ pr_err("Failed to reset platform: %d\n", err);
+ sdei_mark_interface_broken();
+ }
+
+ return err;
+}
+
+static int sdei_api_event_enable(u32 event_num)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_ENABLE, event_num, 0, 0, 0,
+ 0, NULL);
+}
+
+/* Called directly by the hotplug callbacks */
+static void _local_event_enable(void *data)
+{
+ int err;
+ struct sdei_crosscall_args *arg = data;
+
+ WARN_ON_ONCE(preemptible());
+
+ err = sdei_api_event_enable(arg->event->event_num);
+
+ sdei_cross_call_return(arg, err);
+}
+
+int sdei_event_enable(u32 event_num)
+{
+ int err = -EINVAL;
+ struct sdei_event *event;
+
+ mutex_lock(&sdei_events_lock);
+ event = sdei_event_find(event_num);
+ if (!event) {
+ mutex_unlock(&sdei_events_lock);
+ return -ENOENT;
+ }
+
+ spin_lock(&sdei_list_lock);
+ event->reenable = true;
+ spin_unlock(&sdei_list_lock);
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ err = sdei_api_event_enable(event->event_num);
+ else
+ err = sdei_do_cross_call(_local_event_enable, event);
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(sdei_event_enable);
+
+static int sdei_api_event_disable(u32 event_num)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_DISABLE, event_num, 0, 0,
+ 0, 0, NULL);
+}
+
+static void _ipi_event_disable(void *data)
+{
+ int err;
+ struct sdei_crosscall_args *arg = data;
+
+ err = sdei_api_event_disable(arg->event->event_num);
+
+ sdei_cross_call_return(arg, err);
+}
+
+int sdei_event_disable(u32 event_num)
+{
+ int err = -EINVAL;
+ struct sdei_event *event;
+
+ mutex_lock(&sdei_events_lock);
+ event = sdei_event_find(event_num);
+ if (!event) {
+ mutex_unlock(&sdei_events_lock);
+ return -ENOENT;
+ }
+
+ spin_lock(&sdei_list_lock);
+ event->reenable = false;
+ spin_unlock(&sdei_list_lock);
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ err = sdei_api_event_disable(event->event_num);
+ else
+ err = sdei_do_cross_call(_ipi_event_disable, event);
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(sdei_event_disable);
+
+static int sdei_api_event_unregister(u32 event_num)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_UNREGISTER, event_num, 0,
+ 0, 0, 0, NULL);
+}
+
+/* Called directly by the hotplug callbacks */
+static void _local_event_unregister(void *data)
+{
+ int err;
+ struct sdei_crosscall_args *arg = data;
+
+ WARN_ON_ONCE(preemptible());
+
+ err = sdei_api_event_unregister(arg->event->event_num);
+
+ sdei_cross_call_return(arg, err);
+}
+
+static int _sdei_event_unregister(struct sdei_event *event)
+{
+ lockdep_assert_held(&sdei_events_lock);
+
+ spin_lock(&sdei_list_lock);
+ event->reregister = false;
+ event->reenable = false;
+ spin_unlock(&sdei_list_lock);
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ return sdei_api_event_unregister(event->event_num);
+
+ return sdei_do_cross_call(_local_event_unregister, event);
+}
+
+int sdei_event_unregister(u32 event_num)
+{
+ int err;
+ struct sdei_event *event;
+
+ WARN_ON(in_nmi());
+
+ mutex_lock(&sdei_events_lock);
+ event = sdei_event_find(event_num);
+ do {
+ if (!event) {
+ pr_warn("Event %u not registered\n", event_num);
+ err = -ENOENT;
+ break;
+ }
+
+ err = _sdei_event_unregister(event);
+ if (err)
+ break;
+
+ sdei_event_destroy(event);
+ } while (0);
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(sdei_event_unregister);
+
+/*
+ * unregister events, but don't destroy them as they are re-registered by
+ * sdei_reregister_shared().
+ */
+static int sdei_unregister_shared(void)
+{
+ int err = 0;
+ struct sdei_event *event;
+
+ mutex_lock(&sdei_events_lock);
+ spin_lock(&sdei_list_lock);
+ list_for_each_entry(event, &sdei_list, list) {
+ if (event->type != SDEI_EVENT_TYPE_SHARED)
+ continue;
+
+ err = _sdei_event_unregister(event);
+ if (err)
+ break;
+ }
+ spin_unlock(&sdei_list_lock);
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+
+static int sdei_api_event_register(u32 event_num, unsigned long entry_point,
+ void *arg, u64 flags, u64 affinity)
+{
+ return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_REGISTER, event_num,
+ (unsigned long)entry_point, (unsigned long)arg,
+ flags, affinity, NULL);
+}
+
+/* Called directly by the hotplug callbacks */
+static void _local_event_register(void *data)
+{
+ int err;
+ struct sdei_registered_event *reg;
+ struct sdei_crosscall_args *arg = data;
+
+ WARN_ON(preemptible());
+
+ reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id());
+ err = sdei_api_event_register(arg->event->event_num, sdei_entry_point,
+ reg, 0, 0);
+
+ sdei_cross_call_return(arg, err);
+}
+
+static int _sdei_event_register(struct sdei_event *event)
+{
+ int err;
+
+ lockdep_assert_held(&sdei_events_lock);
+
+ spin_lock(&sdei_list_lock);
+ event->reregister = true;
+ spin_unlock(&sdei_list_lock);
+
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ return sdei_api_event_register(event->event_num,
+ sdei_entry_point,
+ event->registered,
+ SDEI_EVENT_REGISTER_RM_ANY, 0);
+
+
+ err = sdei_do_cross_call(_local_event_register, event);
+ if (err) {
+ spin_lock(&sdei_list_lock);
+ event->reregister = false;
+ event->reenable = false;
+ spin_unlock(&sdei_list_lock);
+
+ sdei_do_cross_call(_local_event_unregister, event);
+ }
+
+ return err;
+}
+
+int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
+{
+ int err;
+ struct sdei_event *event;
+
+ WARN_ON(in_nmi());
+
+ mutex_lock(&sdei_events_lock);
+ do {
+ if (sdei_event_find(event_num)) {
+ pr_warn("Event %u already registered\n", event_num);
+ err = -EBUSY;
+ break;
+ }
+
+ event = sdei_event_create(event_num, cb, arg);
+ if (IS_ERR(event)) {
+ err = PTR_ERR(event);
+ pr_warn("Failed to create event %u: %d\n", event_num,
+ err);
+ break;
+ }
+
+ err = _sdei_event_register(event);
+ if (err) {
+ sdei_event_destroy(event);
+ pr_warn("Failed to register event %u: %d\n", event_num,
+ err);
+ }
+ } while (0);
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(sdei_event_register);
+
+static int sdei_reregister_event(struct sdei_event *event)
+{
+ int err;
+
+ lockdep_assert_held(&sdei_events_lock);
+
+ err = _sdei_event_register(event);
+ if (err) {
+ pr_err("Failed to re-register event %u\n", event->event_num);
+ sdei_event_destroy(event);
+ return err;
+ }
+
+ if (event->reenable) {
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ err = sdei_api_event_enable(event->event_num);
+ else
+ err = sdei_do_cross_call(_local_event_enable, event);
+ }
+
+ if (err)
+ pr_err("Failed to re-enable event %u\n", event->event_num);
+
+ return err;
+}
+
+static int sdei_reregister_shared(void)
+{
+ int err = 0;
+ struct sdei_event *event;
+
+ mutex_lock(&sdei_events_lock);
+ spin_lock(&sdei_list_lock);
+ list_for_each_entry(event, &sdei_list, list) {
+ if (event->type != SDEI_EVENT_TYPE_SHARED)
+ continue;
+
+ if (event->reregister) {
+ err = sdei_reregister_event(event);
+ if (err)
+ break;
+ }
+ }
+ spin_unlock(&sdei_list_lock);
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+}
+
+static int sdei_cpuhp_down(unsigned int cpu)
+{
+ struct sdei_event *event;
+ struct sdei_crosscall_args arg;
+
+ /* un-register private events */
+ spin_lock(&sdei_list_lock);
+ list_for_each_entry(event, &sdei_list, list) {
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ continue;
+
+ CROSSCALL_INIT(arg, event);
+ /* call the cross-call function locally... */
+ _local_event_unregister(&arg);
+ if (arg.first_error)
+ pr_err("Failed to unregister event %u: %d\n",
+ event->event_num, arg.first_error);
+ }
+ spin_unlock(&sdei_list_lock);
+
+ return sdei_mask_local_cpu();
+}
+
+static int sdei_cpuhp_up(unsigned int cpu)
+{
+ struct sdei_event *event;
+ struct sdei_crosscall_args arg;
+
+ /* re-register/enable private events */
+ spin_lock(&sdei_list_lock);
+ list_for_each_entry(event, &sdei_list, list) {
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ continue;
+
+ if (event->reregister) {
+ CROSSCALL_INIT(arg, event);
+ /* call the cross-call function locally... */
+ _local_event_register(&arg);
+ if (arg.first_error)
+ pr_err("Failed to re-register event %u: %d\n",
+ event->event_num, arg.first_error);
+ }
+
+ if (event->reenable) {
+ CROSSCALL_INIT(arg, event);
+ _local_event_enable(&arg);
+ if (arg.first_error)
+ pr_err("Failed to re-enable event %u: %d\n",
+ event->event_num, arg.first_error);
+ }
+ }
+ spin_unlock(&sdei_list_lock);
+
+ return sdei_unmask_local_cpu();
+}
+
+/* When entering idle, mask/unmask events for this cpu */
+static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ int rv;
+
+ switch (action) {
+ case CPU_PM_ENTER:
+ rv = sdei_mask_local_cpu();
+ break;
+ case CPU_PM_EXIT:
+ case CPU_PM_ENTER_FAILED:
+ rv = sdei_unmask_local_cpu();
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ if (rv)
+ return notifier_from_errno(rv);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block sdei_pm_nb = {
+ .notifier_call = sdei_pm_notifier,
+};
+
+static int sdei_device_suspend(struct device *dev)
+{
+ on_each_cpu(_ipi_mask_cpu, NULL, true);
+
+ return 0;
+}
+
+static int sdei_device_resume(struct device *dev)
+{
+ on_each_cpu(_ipi_unmask_cpu, NULL, true);
+
+ return 0;
+}
+
+/*
+ * We need all events to be reregistered when we resume from hibernate.
+ *
+ * The sequence is freeze->thaw. Reboot. freeze->restore. We unregister
+ * events during freeze, then re-register and re-enable them during thaw
+ * and restore.
+ */
+static int sdei_device_freeze(struct device *dev)
+{
+ int err;
+
+ /* unregister private events */
+ cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
+
+ err = sdei_unregister_shared();
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int sdei_device_thaw(struct device *dev)
+{
+ int err;
+
+ /* re-register shared events */
+ err = sdei_reregister_shared();
+ if (err) {
+ pr_warn("Failed to re-register shared events...\n");
+ sdei_mark_interface_broken();
+ return err;
+ }
+
+ err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
+ &sdei_cpuhp_up, &sdei_cpuhp_down);
+ if (err)
+ pr_warn("Failed to re-register CPU hotplug notifier...\n");
+
+ return err;
+}
+
+static int sdei_device_restore(struct device *dev)
+{
+ int err;
+
+ err = sdei_platform_reset();
+ if (err)
+ return err;
+
+ return sdei_device_thaw(dev);
+}
+
+static const struct dev_pm_ops sdei_pm_ops = {
+ .suspend = sdei_device_suspend,
+ .resume = sdei_device_resume,
+ .freeze = sdei_device_freeze,
+ .thaw = sdei_device_thaw,
+ .restore = sdei_device_restore,
+};
+
+/*
+ * Mask all CPUs and unregister all events on panic, reboot or kexec.
+ */
+static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ /*
+ * We are going to reset the interface, after this there is no point
+ * doing work when we take CPUs offline.
+ */
+ cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
+
+ sdei_platform_reset();
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block sdei_reboot_nb = {
+ .notifier_call = sdei_reboot_notifier,
+};
+
+static void sdei_smccc_smc(unsigned long function_id,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, struct arm_smccc_res *res)
+{
+ arm_smccc_smc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
+}
+
+static void sdei_smccc_hvc(unsigned long function_id,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, struct arm_smccc_res *res)
+{
+ arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
+}
+
+static int sdei_get_conduit(struct platform_device *pdev)
+{
+ const char *method;
+ struct device_node *np = pdev->dev.of_node;
+
+ sdei_firmware_call = NULL;
+ if (np) {
+ if (of_property_read_string(np, "method", &method)) {
+ pr_warn("missing \"method\" property\n");
+ return CONDUIT_INVALID;
+ }
+
+ if (!strcmp("hvc", method)) {
+ sdei_firmware_call = &sdei_smccc_hvc;
+ return CONDUIT_HVC;
+ } else if (!strcmp("smc", method)) {
+ sdei_firmware_call = &sdei_smccc_smc;
+ return CONDUIT_SMC;
+ }
+
+ pr_warn("invalid \"method\" property: %s\n", method);
+ } else if (IS_ENABLED(CONFIG_ACPI) && !acpi_disabled) {
+ if (acpi_psci_use_hvc()) {
+ sdei_firmware_call = &sdei_smccc_hvc;
+ return CONDUIT_HVC;
+ } else {
+ sdei_firmware_call = &sdei_smccc_smc;
+ return CONDUIT_SMC;
+ }
+ }
+
+ return CONDUIT_INVALID;
+}
+
+static int sdei_probe(struct platform_device *pdev)
+{
+ int err;
+ u64 ver = 0;
+ int conduit;
+
+ conduit = sdei_get_conduit(pdev);
+ if (!sdei_firmware_call)
+ return 0;
+
+ err = sdei_api_get_version(&ver);
+ if (err == -EOPNOTSUPP)
+ pr_err("advertised but not implemented in platform firmware\n");
+ if (err) {
+ pr_err("Failed to get SDEI version: %d\n", err);
+ sdei_mark_interface_broken();
+ return err;
+ }
+
+ pr_info("SDEIv%d.%d (0x%x) detected in firmware.\n",
+ (int)SDEI_VERSION_MAJOR(ver), (int)SDEI_VERSION_MINOR(ver),
+ (int)SDEI_VERSION_VENDOR(ver));
+
+ if (SDEI_VERSION_MAJOR(ver) != 1) {
+ pr_warn("Conflicting SDEI version detected.\n");
+ sdei_mark_interface_broken();
+ return -EINVAL;
+ }
+
+ err = sdei_platform_reset();
+ if (err)
+ return err;
+
+ sdei_entry_point = sdei_arch_get_entry_point(conduit);
+ if (!sdei_entry_point) {
+ /* Not supported due to hardware or boot configuration */
+ sdei_mark_interface_broken();
+ return 0;
+ }
+
+ err = cpu_pm_register_notifier(&sdei_pm_nb);
+ if (err) {
+ pr_warn("Failed to register CPU PM notifier...\n");
+ goto error;
+ }
+
+ err = register_reboot_notifier(&sdei_reboot_nb);
+ if (err) {
+ pr_warn("Failed to register reboot notifier...\n");
+ goto remove_cpupm;
+ }
+
+ err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
+ &sdei_cpuhp_up, &sdei_cpuhp_down);
+ if (err) {
+ pr_warn("Failed to register CPU hotplug notifier...\n");
+ goto remove_reboot;
+ }
+
+ return 0;
+
+remove_reboot:
+ unregister_reboot_notifier(&sdei_reboot_nb);
+
+remove_cpupm:
+ cpu_pm_unregister_notifier(&sdei_pm_nb);
+
+error:
+ sdei_mark_interface_broken();
+ return err;
+}
+
+static const struct of_device_id sdei_of_match[] = {
+ { .compatible = "arm,sdei-1.0" },
+ {}
+};
+
+static struct platform_driver sdei_driver = {
+ .driver = {
+ .name = "sdei",
+ .pm = &sdei_pm_ops,
+ .of_match_table = sdei_of_match,
+ },
+ .probe = sdei_probe,
+};
+
+static bool __init sdei_present_dt(void)
+{
+ struct platform_device *pdev;
+ struct device_node *np, *fw_np;
+
+ fw_np = of_find_node_by_name(NULL, "firmware");
+ if (!fw_np)
+ return false;
+
+ np = of_find_matching_node(fw_np, sdei_of_match);
+ of_node_put(fw_np);
+ if (!np)
+ return false;
+
+ pdev = of_platform_device_create(np, sdei_driver.driver.name, NULL);
+ of_node_put(np);
+ if (!pdev)
+ return false;
+
+ return true;
+}
+
+static bool __init sdei_present_acpi(void)
+{
+ acpi_status status;
+ struct platform_device *pdev;
+ struct acpi_table_header *sdei_table_header;
+
+ if (acpi_disabled)
+ return false;
+
+ status = acpi_get_table(ACPI_SIG_SDEI, 0, &sdei_table_header);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ const char *msg = acpi_format_exception(status);
+
+ pr_info("Failed to get ACPI:SDEI table, %s\n", msg);
+ }
+ if (ACPI_FAILURE(status))
+ return false;
+
+ pdev = platform_device_register_simple(sdei_driver.driver.name, 0, NULL,
+ 0);
+ if (IS_ERR(pdev))
+ return false;
+
+ return true;
+}
+
+static int __init sdei_init(void)
+{
+ if (sdei_present_dt() || sdei_present_acpi())
+ platform_driver_register(&sdei_driver);
+
+ return 0;
+}
+
+/*
+ * On an ACPI system SDEI needs to be ready before HEST:GHES tries to register
+ * its events. ACPI is initialised from a subsys_initcall(), GHES is initialised
+ * by device_initcall(). We want to be called in the middle.
+ */
+subsys_initcall_sync(sdei_init);
+
+int sdei_event_handler(struct pt_regs *regs,
+ struct sdei_registered_event *arg)
+{
+ int err;
+ mm_segment_t orig_addr_limit;
+ u32 event_num = arg->event_num;
+
+ orig_addr_limit = get_fs();
+ set_fs(USER_DS);
+
+ err = arg->callback(event_num, regs, arg->callback_arg);
+ if (err)
+ pr_err_ratelimited("event %u on CPU %u failed with error: %d\n",
+ event_num, smp_processor_id(), err);
+
+ set_fs(orig_addr_limit);
+
+ return err;
+}
+NOKPROBE_SYMBOL(sdei_event_handler);
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 2b4c39fdfa91..6047ed4e8a3d 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -159,13 +159,21 @@ config RESET_ATTACK_MITIGATION
using the TCG Platform Reset Attack Mitigation specification. This
protects against an attacker forcibly rebooting the system while it
still contains secrets in RAM, booting another OS and extracting the
- secrets.
+ secrets. This should only be enabled when userland is configured to
+ clear the MemoryOverwriteRequest flag on clean shutdown after secrets
+ have been evicted, since otherwise it will trigger even on clean
+ reboots.
endmenu
config UEFI_CPER
bool
+config UEFI_CPER_ARM
+ bool
+ depends on UEFI_CPER && ( ARM || ARM64 )
+ default y
+
config EFI_DEV_PATH_PARSER
bool
depends on ACPI
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index 269501dfba53..cb805374f4bc 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -11,7 +11,7 @@
KASAN_SANITIZE_runtime-wrappers.o := n
obj-$(CONFIG_ACPI_BGRT) += efi-bgrt.o
-obj-$(CONFIG_EFI) += efi.o vars.o reboot.o memattr.o
+obj-$(CONFIG_EFI) += efi.o vars.o reboot.o memattr.o tpm.o
obj-$(CONFIG_EFI) += capsule.o memmap.o
obj-$(CONFIG_EFI_VARS) += efivars.o
obj-$(CONFIG_EFI_ESRT) += esrt.o
@@ -30,3 +30,4 @@ arm-obj-$(CONFIG_EFI) := arm-init.o arm-runtime.o
obj-$(CONFIG_ARM) += $(arm-obj-y)
obj-$(CONFIG_ARM64) += $(arm-obj-y)
obj-$(CONFIG_EFI_CAPSULE_LOADER) += capsule-loader.o
+obj-$(CONFIG_UEFI_CPER_ARM) += cper-arm.o
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index 055e2e8f985a..e456f4602df1 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -45,7 +45,7 @@ int __efi_capsule_setup_info(struct capsule_info *cap_info)
pages_needed = ALIGN(cap_info->total_size, PAGE_SIZE) / PAGE_SIZE;
if (pages_needed == 0) {
- pr_err("invalid capsule size");
+ pr_err("invalid capsule size\n");
return -EINVAL;
}
diff --git a/drivers/firmware/efi/cper-arm.c b/drivers/firmware/efi/cper-arm.c
new file mode 100644
index 000000000000..698e5c8e0c8d
--- /dev/null
+++ b/drivers/firmware/efi/cper-arm.c
@@ -0,0 +1,356 @@
+/*
+ * UEFI Common Platform Error Record (CPER) support
+ *
+ * Copyright (C) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/time.h>
+#include <linux/cper.h>
+#include <linux/dmi.h>
+#include <linux/acpi.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/printk.h>
+#include <linux/bcd.h>
+#include <acpi/ghes.h>
+#include <ras/ras_event.h>
+
+#define INDENT_SP " "
+
+static const char * const arm_reg_ctx_strs[] = {
+ "AArch32 general purpose registers",
+ "AArch32 EL1 context registers",
+ "AArch32 EL2 context registers",
+ "AArch32 secure context registers",
+ "AArch64 general purpose registers",
+ "AArch64 EL1 context registers",
+ "AArch64 EL2 context registers",
+ "AArch64 EL3 context registers",
+ "Misc. system register structure",
+};
+
+static const char * const arm_err_trans_type_strs[] = {
+ "Instruction",
+ "Data Access",
+ "Generic",
+};
+
+static const char * const arm_bus_err_op_strs[] = {
+ "Generic error (type cannot be determined)",
+ "Generic read (type of instruction or data request cannot be determined)",
+ "Generic write (type of instruction of data request cannot be determined)",
+ "Data read",
+ "Data write",
+ "Instruction fetch",
+ "Prefetch",
+};
+
+static const char * const arm_cache_err_op_strs[] = {
+ "Generic error (type cannot be determined)",
+ "Generic read (type of instruction or data request cannot be determined)",
+ "Generic write (type of instruction of data request cannot be determined)",
+ "Data read",
+ "Data write",
+ "Instruction fetch",
+ "Prefetch",
+ "Eviction",
+ "Snooping (processor initiated a cache snoop that resulted in an error)",
+ "Snooped (processor raised a cache error caused by another processor or device snooping its cache)",
+ "Management",
+};
+
+static const char * const arm_tlb_err_op_strs[] = {
+ "Generic error (type cannot be determined)",
+ "Generic read (type of instruction or data request cannot be determined)",
+ "Generic write (type of instruction of data request cannot be determined)",
+ "Data read",
+ "Data write",
+ "Instruction fetch",
+ "Prefetch",
+ "Local management operation (processor initiated a TLB management operation that resulted in an error)",
+ "External management operation (processor raised a TLB error caused by another processor or device broadcasting TLB operations)",
+};
+
+static const char * const arm_bus_err_part_type_strs[] = {
+ "Local processor originated request",
+ "Local processor responded to request",
+ "Local processor observed",
+ "Generic",
+};
+
+static const char * const arm_bus_err_addr_space_strs[] = {
+ "External Memory Access",
+ "Internal Memory Access",
+ "Unknown",
+ "Device Memory Access",
+};
+
+static void cper_print_arm_err_info(const char *pfx, u32 type,
+ u64 error_info)
+{
+ u8 trans_type, op_type, level, participation_type, address_space;
+ u16 mem_attributes;
+ bool proc_context_corrupt, corrected, precise_pc, restartable_pc;
+ bool time_out, access_mode;
+
+ /* If the type is unknown, bail. */
+ if (type > CPER_ARM_MAX_TYPE)
+ return;
+
+ /*
+ * Vendor type errors have error information values that are vendor
+ * specific.
+ */
+ if (type == CPER_ARM_VENDOR_ERROR)
+ return;
+
+ if (error_info & CPER_ARM_ERR_VALID_TRANSACTION_TYPE) {
+ trans_type = ((error_info >> CPER_ARM_ERR_TRANSACTION_SHIFT)
+ & CPER_ARM_ERR_TRANSACTION_MASK);
+ if (trans_type < ARRAY_SIZE(arm_err_trans_type_strs)) {
+ printk("%stransaction type: %s\n", pfx,
+ arm_err_trans_type_strs[trans_type]);
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_OPERATION_TYPE) {
+ op_type = ((error_info >> CPER_ARM_ERR_OPERATION_SHIFT)
+ & CPER_ARM_ERR_OPERATION_MASK);
+ switch (type) {
+ case CPER_ARM_CACHE_ERROR:
+ if (op_type < ARRAY_SIZE(arm_cache_err_op_strs)) {
+ printk("%soperation type: %s\n", pfx,
+ arm_cache_err_op_strs[op_type]);
+ }
+ break;
+ case CPER_ARM_TLB_ERROR:
+ if (op_type < ARRAY_SIZE(arm_tlb_err_op_strs)) {
+ printk("%soperation type: %s\n", pfx,
+ arm_tlb_err_op_strs[op_type]);
+ }
+ break;
+ case CPER_ARM_BUS_ERROR:
+ if (op_type < ARRAY_SIZE(arm_bus_err_op_strs)) {
+ printk("%soperation type: %s\n", pfx,
+ arm_bus_err_op_strs[op_type]);
+ }
+ break;
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_LEVEL) {
+ level = ((error_info >> CPER_ARM_ERR_LEVEL_SHIFT)
+ & CPER_ARM_ERR_LEVEL_MASK);
+ switch (type) {
+ case CPER_ARM_CACHE_ERROR:
+ printk("%scache level: %d\n", pfx, level);
+ break;
+ case CPER_ARM_TLB_ERROR:
+ printk("%sTLB level: %d\n", pfx, level);
+ break;
+ case CPER_ARM_BUS_ERROR:
+ printk("%saffinity level at which the bus error occurred: %d\n",
+ pfx, level);
+ break;
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_PROC_CONTEXT_CORRUPT) {
+ proc_context_corrupt = ((error_info >> CPER_ARM_ERR_PC_CORRUPT_SHIFT)
+ & CPER_ARM_ERR_PC_CORRUPT_MASK);
+ if (proc_context_corrupt)
+ printk("%sprocessor context corrupted\n", pfx);
+ else
+ printk("%sprocessor context not corrupted\n", pfx);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_CORRECTED) {
+ corrected = ((error_info >> CPER_ARM_ERR_CORRECTED_SHIFT)
+ & CPER_ARM_ERR_CORRECTED_MASK);
+ if (corrected)
+ printk("%sthe error has been corrected\n", pfx);
+ else
+ printk("%sthe error has not been corrected\n", pfx);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_PRECISE_PC) {
+ precise_pc = ((error_info >> CPER_ARM_ERR_PRECISE_PC_SHIFT)
+ & CPER_ARM_ERR_PRECISE_PC_MASK);
+ if (precise_pc)
+ printk("%sPC is precise\n", pfx);
+ else
+ printk("%sPC is imprecise\n", pfx);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_RESTARTABLE_PC) {
+ restartable_pc = ((error_info >> CPER_ARM_ERR_RESTARTABLE_PC_SHIFT)
+ & CPER_ARM_ERR_RESTARTABLE_PC_MASK);
+ if (restartable_pc)
+ printk("%sProgram execution can be restarted reliably at the PC associated with the error.\n", pfx);
+ }
+
+ /* The rest of the fields are specific to bus errors */
+ if (type != CPER_ARM_BUS_ERROR)
+ return;
+
+ if (error_info & CPER_ARM_ERR_VALID_PARTICIPATION_TYPE) {
+ participation_type = ((error_info >> CPER_ARM_ERR_PARTICIPATION_TYPE_SHIFT)
+ & CPER_ARM_ERR_PARTICIPATION_TYPE_MASK);
+ if (participation_type < ARRAY_SIZE(arm_bus_err_part_type_strs)) {
+ printk("%sparticipation type: %s\n", pfx,
+ arm_bus_err_part_type_strs[participation_type]);
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_TIME_OUT) {
+ time_out = ((error_info >> CPER_ARM_ERR_TIME_OUT_SHIFT)
+ & CPER_ARM_ERR_TIME_OUT_MASK);
+ if (time_out)
+ printk("%srequest timed out\n", pfx);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_ADDRESS_SPACE) {
+ address_space = ((error_info >> CPER_ARM_ERR_ADDRESS_SPACE_SHIFT)
+ & CPER_ARM_ERR_ADDRESS_SPACE_MASK);
+ if (address_space < ARRAY_SIZE(arm_bus_err_addr_space_strs)) {
+ printk("%saddress space: %s\n", pfx,
+ arm_bus_err_addr_space_strs[address_space]);
+ }
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_MEM_ATTRIBUTES) {
+ mem_attributes = ((error_info >> CPER_ARM_ERR_MEM_ATTRIBUTES_SHIFT)
+ & CPER_ARM_ERR_MEM_ATTRIBUTES_MASK);
+ printk("%smemory access attributes:0x%x\n", pfx, mem_attributes);
+ }
+
+ if (error_info & CPER_ARM_ERR_VALID_ACCESS_MODE) {
+ access_mode = ((error_info >> CPER_ARM_ERR_ACCESS_MODE_SHIFT)
+ & CPER_ARM_ERR_ACCESS_MODE_MASK);
+ if (access_mode)
+ printk("%saccess mode: normal\n", pfx);
+ else
+ printk("%saccess mode: secure\n", pfx);
+ }
+}
+
+void cper_print_proc_arm(const char *pfx,
+ const struct cper_sec_proc_arm *proc)
+{
+ int i, len, max_ctx_type;
+ struct cper_arm_err_info *err_info;
+ struct cper_arm_ctx_info *ctx_info;
+ char newpfx[64], infopfx[64];
+
+ printk("%sMIDR: 0x%016llx\n", pfx, proc->midr);
+
+ len = proc->section_length - (sizeof(*proc) +
+ proc->err_info_num * (sizeof(*err_info)));
+ if (len < 0) {
+ printk("%ssection length: %d\n", pfx, proc->section_length);
+ printk("%ssection length is too small\n", pfx);
+ printk("%sfirmware-generated error record is incorrect\n", pfx);
+ printk("%sERR_INFO_NUM is %d\n", pfx, proc->err_info_num);
+ return;
+ }
+
+ if (proc->validation_bits & CPER_ARM_VALID_MPIDR)
+ printk("%sMultiprocessor Affinity Register (MPIDR): 0x%016llx\n",
+ pfx, proc->mpidr);
+
+ if (proc->validation_bits & CPER_ARM_VALID_AFFINITY_LEVEL)
+ printk("%serror affinity level: %d\n", pfx,
+ proc->affinity_level);
+
+ if (proc->validation_bits & CPER_ARM_VALID_RUNNING_STATE) {
+ printk("%srunning state: 0x%x\n", pfx, proc->running_state);
+ printk("%sPower State Coordination Interface state: %d\n",
+ pfx, proc->psci_state);
+ }
+
+ snprintf(newpfx, sizeof(newpfx), "%s%s", pfx, INDENT_SP);
+
+ err_info = (struct cper_arm_err_info *)(proc + 1);
+ for (i = 0; i < proc->err_info_num; i++) {
+ printk("%sError info structure %d:\n", pfx, i);
+
+ printk("%snum errors: %d\n", pfx, err_info->multiple_error + 1);
+
+ if (err_info->validation_bits & CPER_ARM_INFO_VALID_FLAGS) {
+ if (err_info->flags & CPER_ARM_INFO_FLAGS_FIRST)
+ printk("%sfirst error captured\n", newpfx);
+ if (err_info->flags & CPER_ARM_INFO_FLAGS_LAST)
+ printk("%slast error captured\n", newpfx);
+ if (err_info->flags & CPER_ARM_INFO_FLAGS_PROPAGATED)
+ printk("%spropagated error captured\n",
+ newpfx);
+ if (err_info->flags & CPER_ARM_INFO_FLAGS_OVERFLOW)
+ printk("%soverflow occurred, error info is incomplete\n",
+ newpfx);
+ }
+
+ printk("%serror_type: %d, %s\n", newpfx, err_info->type,
+ err_info->type < ARRAY_SIZE(cper_proc_error_type_strs) ?
+ cper_proc_error_type_strs[err_info->type] : "unknown");
+ if (err_info->validation_bits & CPER_ARM_INFO_VALID_ERR_INFO) {
+ printk("%serror_info: 0x%016llx\n", newpfx,
+ err_info->error_info);
+ snprintf(infopfx, sizeof(infopfx), "%s%s", newpfx, INDENT_SP);
+ cper_print_arm_err_info(infopfx, err_info->type,
+ err_info->error_info);
+ }
+ if (err_info->validation_bits & CPER_ARM_INFO_VALID_VIRT_ADDR)
+ printk("%svirtual fault address: 0x%016llx\n",
+ newpfx, err_info->virt_fault_addr);
+ if (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR)
+ printk("%sphysical fault address: 0x%016llx\n",
+ newpfx, err_info->physical_fault_addr);
+ err_info += 1;
+ }
+
+ ctx_info = (struct cper_arm_ctx_info *)err_info;
+ max_ctx_type = ARRAY_SIZE(arm_reg_ctx_strs) - 1;
+ for (i = 0; i < proc->context_info_num; i++) {
+ int size = sizeof(*ctx_info) + ctx_info->size;
+
+ printk("%sContext info structure %d:\n", pfx, i);
+ if (len < size) {
+ printk("%ssection length is too small\n", newpfx);
+ printk("%sfirmware-generated error record is incorrect\n", pfx);
+ return;
+ }
+ if (ctx_info->type > max_ctx_type) {
+ printk("%sInvalid context type: %d (max: %d)\n",
+ newpfx, ctx_info->type, max_ctx_type);
+ return;
+ }
+ printk("%sregister context type: %s\n", newpfx,
+ arm_reg_ctx_strs[ctx_info->type]);
+ print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4,
+ (ctx_info + 1), ctx_info->size, 0);
+ len -= size;
+ ctx_info = (struct cper_arm_ctx_info *)((long)ctx_info + size);
+ }
+
+ if (len > 0) {
+ printk("%sVendor specific error info has %u bytes:\n", pfx,
+ len);
+ print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4, ctx_info,
+ len, true);
+ }
+}
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index d2fcafcea07e..c165933ebf38 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -122,7 +122,7 @@ static const char * const proc_isa_strs[] = {
"ARM A64",
};
-static const char * const proc_error_type_strs[] = {
+const char * const cper_proc_error_type_strs[] = {
"cache error",
"TLB error",
"bus error",
@@ -157,8 +157,8 @@ static void cper_print_proc_generic(const char *pfx,
if (proc->validation_bits & CPER_PROC_VALID_ERROR_TYPE) {
printk("%s""error_type: 0x%02x\n", pfx, proc->proc_error_type);
cper_print_bits(pfx, proc->proc_error_type,
- proc_error_type_strs,
- ARRAY_SIZE(proc_error_type_strs));
+ cper_proc_error_type_strs,
+ ARRAY_SIZE(cper_proc_error_type_strs));
}
if (proc->validation_bits & CPER_PROC_VALID_OPERATION)
printk("%s""operation: %d, %s\n", pfx, proc->operation,
@@ -188,122 +188,6 @@ static void cper_print_proc_generic(const char *pfx,
printk("%s""IP: 0x%016llx\n", pfx, proc->ip);
}
-#if defined(CONFIG_ARM64) || defined(CONFIG_ARM)
-static const char * const arm_reg_ctx_strs[] = {
- "AArch32 general purpose registers",
- "AArch32 EL1 context registers",
- "AArch32 EL2 context registers",
- "AArch32 secure context registers",
- "AArch64 general purpose registers",
- "AArch64 EL1 context registers",
- "AArch64 EL2 context registers",
- "AArch64 EL3 context registers",
- "Misc. system register structure",
-};
-
-static void cper_print_proc_arm(const char *pfx,
- const struct cper_sec_proc_arm *proc)
-{
- int i, len, max_ctx_type;
- struct cper_arm_err_info *err_info;
- struct cper_arm_ctx_info *ctx_info;
- char newpfx[64];
-
- printk("%sMIDR: 0x%016llx\n", pfx, proc->midr);
-
- len = proc->section_length - (sizeof(*proc) +
- proc->err_info_num * (sizeof(*err_info)));
- if (len < 0) {
- printk("%ssection length: %d\n", pfx, proc->section_length);
- printk("%ssection length is too small\n", pfx);
- printk("%sfirmware-generated error record is incorrect\n", pfx);
- printk("%sERR_INFO_NUM is %d\n", pfx, proc->err_info_num);
- return;
- }
-
- if (proc->validation_bits & CPER_ARM_VALID_MPIDR)
- printk("%sMultiprocessor Affinity Register (MPIDR): 0x%016llx\n",
- pfx, proc->mpidr);
-
- if (proc->validation_bits & CPER_ARM_VALID_AFFINITY_LEVEL)
- printk("%serror affinity level: %d\n", pfx,
- proc->affinity_level);
-
- if (proc->validation_bits & CPER_ARM_VALID_RUNNING_STATE) {
- printk("%srunning state: 0x%x\n", pfx, proc->running_state);
- printk("%sPower State Coordination Interface state: %d\n",
- pfx, proc->psci_state);
- }
-
- snprintf(newpfx, sizeof(newpfx), "%s%s", pfx, INDENT_SP);
-
- err_info = (struct cper_arm_err_info *)(proc + 1);
- for (i = 0; i < proc->err_info_num; i++) {
- printk("%sError info structure %d:\n", pfx, i);
-
- printk("%snum errors: %d\n", pfx, err_info->multiple_error + 1);
-
- if (err_info->validation_bits & CPER_ARM_INFO_VALID_FLAGS) {
- if (err_info->flags & CPER_ARM_INFO_FLAGS_FIRST)
- printk("%sfirst error captured\n", newpfx);
- if (err_info->flags & CPER_ARM_INFO_FLAGS_LAST)
- printk("%slast error captured\n", newpfx);
- if (err_info->flags & CPER_ARM_INFO_FLAGS_PROPAGATED)
- printk("%spropagated error captured\n",
- newpfx);
- if (err_info->flags & CPER_ARM_INFO_FLAGS_OVERFLOW)
- printk("%soverflow occurred, error info is incomplete\n",
- newpfx);
- }
-
- printk("%serror_type: %d, %s\n", newpfx, err_info->type,
- err_info->type < ARRAY_SIZE(proc_error_type_strs) ?
- proc_error_type_strs[err_info->type] : "unknown");
- if (err_info->validation_bits & CPER_ARM_INFO_VALID_ERR_INFO)
- printk("%serror_info: 0x%016llx\n", newpfx,
- err_info->error_info);
- if (err_info->validation_bits & CPER_ARM_INFO_VALID_VIRT_ADDR)
- printk("%svirtual fault address: 0x%016llx\n",
- newpfx, err_info->virt_fault_addr);
- if (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR)
- printk("%sphysical fault address: 0x%016llx\n",
- newpfx, err_info->physical_fault_addr);
- err_info += 1;
- }
-
- ctx_info = (struct cper_arm_ctx_info *)err_info;
- max_ctx_type = ARRAY_SIZE(arm_reg_ctx_strs) - 1;
- for (i = 0; i < proc->context_info_num; i++) {
- int size = sizeof(*ctx_info) + ctx_info->size;
-
- printk("%sContext info structure %d:\n", pfx, i);
- if (len < size) {
- printk("%ssection length is too small\n", newpfx);
- printk("%sfirmware-generated error record is incorrect\n", pfx);
- return;
- }
- if (ctx_info->type > max_ctx_type) {
- printk("%sInvalid context type: %d (max: %d)\n",
- newpfx, ctx_info->type, max_ctx_type);
- return;
- }
- printk("%sregister context type: %s\n", newpfx,
- arm_reg_ctx_strs[ctx_info->type]);
- print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4,
- (ctx_info + 1), ctx_info->size, 0);
- len -= size;
- ctx_info = (struct cper_arm_ctx_info *)((long)ctx_info + size);
- }
-
- if (len > 0) {
- printk("%sVendor specific error info has %u bytes:\n", pfx,
- len);
- print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4, ctx_info,
- len, true);
- }
-}
-#endif
-
static const char * const mem_err_type_strs[] = {
"unknown",
"no error",
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 557a47829d03..cd42f66a7c85 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -52,6 +52,7 @@ struct efi __read_mostly efi = {
.properties_table = EFI_INVALID_TABLE_ADDR,
.mem_attr_table = EFI_INVALID_TABLE_ADDR,
.rng_seed = EFI_INVALID_TABLE_ADDR,
+ .tpm_log = EFI_INVALID_TABLE_ADDR
};
EXPORT_SYMBOL(efi);
@@ -464,6 +465,7 @@ static __initdata efi_config_table_type_t common_tables[] = {
{EFI_PROPERTIES_TABLE_GUID, "PROP", &efi.properties_table},
{EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi.mem_attr_table},
{LINUX_EFI_RANDOM_SEED_TABLE_GUID, "RNG", &efi.rng_seed},
+ {LINUX_EFI_TPM_EVENT_LOG_GUID, "TPMEventLog", &efi.tpm_log},
{NULL_GUID, NULL, NULL},
};
@@ -552,6 +554,8 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
if (efi_enabled(EFI_MEMMAP))
efi_memattr_init();
+ efi_tpm_eventlog_init();
+
/* Parse the EFI Properties table if it exists */
if (efi.properties_table != EFI_INVALID_TABLE_ADDR) {
efi_properties_table_t *tbl;
@@ -608,7 +612,7 @@ static int __init efi_load_efivars(void)
return 0;
pdev = platform_device_register_simple("efivars", 0, NULL, 0);
- return IS_ERR(pdev) ? PTR_ERR(pdev) : 0;
+ return PTR_ERR_OR_ZERO(pdev);
}
device_initcall(efi_load_efivars);
#endif
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index adaa4a964f0c..7b3ba40f0745 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -30,8 +30,7 @@ OBJECT_FILES_NON_STANDARD := y
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
KCOV_INSTRUMENT := n
-lib-y := efi-stub-helper.o gop.o secureboot.o
-lib-$(CONFIG_RESET_ATTACK_MITIGATION) += tpm.o
+lib-y := efi-stub-helper.o gop.o secureboot.o tpm.o
# include the stub's generic dependencies from lib/ when building for ARM/arm64
arm-deps-y := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
index 6224cdbc9669..da661bf8cb96 100644
--- a/drivers/firmware/efi/libstub/tpm.c
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -4,15 +4,18 @@
* Copyright (C) 2016 CoreOS, Inc
* Copyright (C) 2017 Google, Inc.
* Matthew Garrett <mjg59@google.com>
+ * Thiebaud Weksteen <tweek@google.com>
*
* This file is part of the Linux kernel, and is made available under the
* terms of the GNU General Public License version 2.
*/
#include <linux/efi.h>
+#include <linux/tpm_eventlog.h>
#include <asm/efi.h>
#include "efistub.h"
+#ifdef CONFIG_RESET_ATTACK_MITIGATION
static const efi_char16_t efi_MemoryOverWriteRequest_name[] = {
'M', 'e', 'm', 'o', 'r', 'y', 'O', 'v', 'e', 'r', 'w', 'r', 'i', 't',
'e', 'R', 'e', 'q', 'u', 'e', 's', 't', 'C', 'o', 'n', 't', 'r', 'o',
@@ -56,3 +59,81 @@ void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg)
EFI_VARIABLE_BOOTSERVICE_ACCESS |
EFI_VARIABLE_RUNTIME_ACCESS, sizeof(val), &val);
}
+
+#endif
+
+void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg)
+{
+ efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
+ efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID;
+ efi_status_t status;
+ efi_physical_addr_t log_location, log_last_entry;
+ struct linux_efi_tpm_eventlog *log_tbl;
+ unsigned long first_entry_addr, last_entry_addr;
+ size_t log_size, last_entry_size;
+ efi_bool_t truncated;
+ void *tcg2_protocol;
+
+ status = efi_call_early(locate_protocol, &tcg2_guid, NULL,
+ &tcg2_protocol);
+ if (status != EFI_SUCCESS)
+ return;
+
+ status = efi_call_proto(efi_tcg2_protocol, get_event_log, tcg2_protocol,
+ EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2,
+ &log_location, &log_last_entry, &truncated);
+ if (status != EFI_SUCCESS)
+ return;
+
+ if (!log_location)
+ return;
+ first_entry_addr = (unsigned long) log_location;
+
+ /*
+ * We populate the EFI table even if the logs are empty.
+ */
+ if (!log_last_entry) {
+ log_size = 0;
+ } else {
+ last_entry_addr = (unsigned long) log_last_entry;
+ /*
+ * get_event_log only returns the address of the last entry.
+ * We need to calculate its size to deduce the full size of
+ * the logs.
+ */
+ last_entry_size = sizeof(struct tcpa_event) +
+ ((struct tcpa_event *) last_entry_addr)->event_size;
+ log_size = log_last_entry - log_location + last_entry_size;
+ }
+
+ /* Allocate space for the logs and copy them. */
+ status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+ sizeof(*log_tbl) + log_size,
+ (void **) &log_tbl);
+
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table_arg,
+ "Unable to allocate memory for event log\n");
+ return;
+ }
+
+ memset(log_tbl, 0, sizeof(*log_tbl) + log_size);
+ log_tbl->size = log_size;
+ log_tbl->version = EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
+ memcpy(log_tbl->log, (void *) first_entry_addr, log_size);
+
+ status = efi_call_early(install_configuration_table,
+ &linux_eventlog_guid, log_tbl);
+ if (status != EFI_SUCCESS)
+ goto err_free;
+ return;
+
+err_free:
+ efi_call_early(free_pool, log_tbl);
+}
+
+void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
+{
+ /* Only try to retrieve the logs in 1.2 format. */
+ efi_retrieve_tpm2_eventlog_1_2(sys_table_arg);
+}
diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c
new file mode 100644
index 000000000000..0cbeb3d46b18
--- /dev/null
+++ b/drivers/firmware/efi/tpm.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2017 Google, Inc.
+ * Thiebaud Weksteen <tweek@google.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/memblock.h>
+
+#include <asm/early_ioremap.h>
+
+/*
+ * Reserve the memory associated with the TPM Event Log configuration table.
+ */
+int __init efi_tpm_eventlog_init(void)
+{
+ struct linux_efi_tpm_eventlog *log_tbl;
+ unsigned int tbl_size;
+
+ if (efi.tpm_log == EFI_INVALID_TABLE_ADDR)
+ return 0;
+
+ log_tbl = early_memremap(efi.tpm_log, sizeof(*log_tbl));
+ if (!log_tbl) {
+ pr_err("Failed to map TPM Event Log table @ 0x%lx\n",
+ efi.tpm_log);
+ efi.tpm_log = EFI_INVALID_TABLE_ADDR;
+ return -ENOMEM;
+ }
+
+ tbl_size = sizeof(*log_tbl) + log_tbl->size;
+ memblock_reserve(efi.tpm_log, tbl_size);
+ early_memunmap(log_tbl, sizeof(*log_tbl));
+ return 0;
+}
+
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
index d687ca3d5049..8b25d31e8401 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -496,6 +496,8 @@ static void __init psci_init_migrate(void)
static void __init psci_0_2_set_functions(void)
{
pr_info("Using standard PSCI v0.2 function IDs\n");
+ psci_ops.get_version = psci_get_version;
+
psci_function_id[PSCI_FN_CPU_SUSPEND] =
PSCI_FN_NATIVE(0_2, CPU_SUSPEND);
psci_ops.cpu_suspend = psci_cpu_suspend;
diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
index f3f4f810e5df..bb1c068bff19 100644
--- a/drivers/firmware/psci_checker.c
+++ b/drivers/firmware/psci_checker.c
@@ -77,8 +77,8 @@ static int psci_ops_check(void)
return 0;
}
-static int find_clusters(const struct cpumask *cpus,
- const struct cpumask **clusters)
+static int find_cpu_groups(const struct cpumask *cpus,
+ const struct cpumask **cpu_groups)
{
unsigned int nb = 0;
cpumask_var_t tmp;
@@ -88,11 +88,11 @@ static int find_clusters(const struct cpumask *cpus,
cpumask_copy(tmp, cpus);
while (!cpumask_empty(tmp)) {
- const struct cpumask *cluster =
+ const struct cpumask *cpu_group =
topology_core_cpumask(cpumask_any(tmp));
- clusters[nb++] = cluster;
- cpumask_andnot(tmp, tmp, cluster);
+ cpu_groups[nb++] = cpu_group;
+ cpumask_andnot(tmp, tmp, cpu_group);
}
free_cpumask_var(tmp);
@@ -170,24 +170,24 @@ static int hotplug_tests(void)
{
int err;
cpumask_var_t offlined_cpus;
- int i, nb_cluster;
- const struct cpumask **clusters;
+ int i, nb_cpu_group;
+ const struct cpumask **cpu_groups;
char *page_buf;
err = -ENOMEM;
if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL))
return err;
- /* We may have up to nb_available_cpus clusters. */
- clusters = kmalloc_array(nb_available_cpus, sizeof(*clusters),
- GFP_KERNEL);
- if (!clusters)
+ /* We may have up to nb_available_cpus cpu_groups. */
+ cpu_groups = kmalloc_array(nb_available_cpus, sizeof(*cpu_groups),
+ GFP_KERNEL);
+ if (!cpu_groups)
goto out_free_cpus;
page_buf = (char *)__get_free_page(GFP_KERNEL);
if (!page_buf)
- goto out_free_clusters;
+ goto out_free_cpu_groups;
err = 0;
- nb_cluster = find_clusters(cpu_online_mask, clusters);
+ nb_cpu_group = find_cpu_groups(cpu_online_mask, cpu_groups);
/*
* Of course the last CPU cannot be powered down and cpu_down() should
@@ -197,24 +197,22 @@ static int hotplug_tests(void)
err += down_and_up_cpus(cpu_online_mask, offlined_cpus);
/*
- * Take down CPUs by cluster this time. When the last CPU is turned
- * off, the cluster itself should shut down.
+ * Take down CPUs by cpu group this time. When the last CPU is turned
+ * off, the cpu group itself should shut down.
*/
- for (i = 0; i < nb_cluster; ++i) {
- int cluster_id =
- topology_physical_package_id(cpumask_any(clusters[i]));
+ for (i = 0; i < nb_cpu_group; ++i) {
ssize_t len = cpumap_print_to_pagebuf(true, page_buf,
- clusters[i]);
+ cpu_groups[i]);
/* Remove trailing newline. */
page_buf[len - 1] = '\0';
- pr_info("Trying to turn off and on again cluster %d "
- "(CPUs %s)\n", cluster_id, page_buf);
- err += down_and_up_cpus(clusters[i], offlined_cpus);
+ pr_info("Trying to turn off and on again group %d (CPUs %s)\n",
+ i, page_buf);
+ err += down_and_up_cpus(cpu_groups[i], offlined_cpus);
}
free_page((unsigned long)page_buf);
-out_free_clusters:
- kfree(clusters);
+out_free_cpu_groups:
+ kfree(cpu_groups);
out_free_cpus:
free_cpumask_var(offlined_cpus);
return err;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index d6a8e851ad13..8dbb2280538d 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -122,12 +122,6 @@ config GPIO_ATH79
Select this option to enable GPIO driver for
Atheros AR71XX/AR724X/AR913X SoC devices.
-config GPIO_AXP209
- tristate "X-Powers AXP209 PMIC GPIO Support"
- depends on MFD_AXP20X
- help
- Say yes to enable GPIO support for the AXP209 PMIC
-
config GPIO_BCM_KONA
bool "Broadcom Kona GPIO"
depends on OF_GPIO && (ARCH_BCM_MOBILE || COMPILE_TEST)
@@ -704,6 +698,22 @@ config GPIO_TS5500
blocks of the TS-5500: DIO1, DIO2 and the LCD port, and the TS-5600
LCD port.
+config GPIO_WINBOND
+ tristate "Winbond Super I/O GPIO support"
+ depends on ISA_BUS_API
+ help
+ This option enables support for GPIOs found on Winbond Super I/O
+ chips.
+ Currently, only W83627UHG (also known as Nuvoton NCT6627UD) is
+ supported.
+
+ You will need to provide a module parameter "gpios", or a
+ boot-time parameter "gpio_winbond.gpios" with a bitmask of GPIO
+ ports to enable (bit 0 is GPIO1, bit 1 is GPIO2, etc.).
+
+ To compile this driver as a module, choose M here: the module will
+ be called gpio-winbond.
+
config GPIO_WS16C48
tristate "WinSystems WS16C48 GPIO support"
depends on ISA_BUS_API
@@ -1234,6 +1244,16 @@ config GPIO_PCI_IDIO_16
low). Input filter control is not supported by this driver, and the
input filters are deactivated by this driver.
+config GPIO_PCIE_IDIO_24
+ tristate "ACCES PCIe-IDIO-24 GPIO support"
+ select GPIOLIB_IRQCHIP
+ help
+ Enables GPIO support for the ACCES PCIe-IDIO-24 family (PCIe-IDIO-24,
+ PCIe-IDI-24, PCIe-IDO-24, PCIe-IDIO-12). An interrupt is generated
+ when any of the inputs change state (low to high or high to low).
+ Input filter control is not supported by this driver, and the input
+ filters are deactivated by this driver.
+
config GPIO_RDC321X
tristate "RDC R-321x GPIO support"
select MFD_CORE
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 4bc24febb889..cccb0d40846c 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -32,7 +32,6 @@ obj-$(CONFIG_GPIO_AMDPT) += gpio-amdpt.o
obj-$(CONFIG_GPIO_ARIZONA) += gpio-arizona.o
obj-$(CONFIG_GPIO_ATH79) += gpio-ath79.o
obj-$(CONFIG_GPIO_ASPEED) += gpio-aspeed.o
-obj-$(CONFIG_GPIO_AXP209) += gpio-axp209.o
obj-$(CONFIG_GPIO_BCM_KONA) += gpio-bcm-kona.o
obj-$(CONFIG_GPIO_BD9571MWV) += gpio-bd9571mwv.o
obj-$(CONFIG_GPIO_BRCMSTB) += gpio-brcmstb.o
@@ -96,6 +95,7 @@ obj-$(CONFIG_GPIO_PCA953X) += gpio-pca953x.o
obj-$(CONFIG_GPIO_PCF857X) += gpio-pcf857x.o
obj-$(CONFIG_GPIO_PCH) += gpio-pch.o
obj-$(CONFIG_GPIO_PCI_IDIO_16) += gpio-pci-idio-16.o
+obj-$(CONFIG_GPIO_PCIE_IDIO_24) += gpio-pcie-idio-24.o
obj-$(CONFIG_GPIO_PISOSR) += gpio-pisosr.o
obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o
obj-$(CONFIG_GPIO_PXA) += gpio-pxa.o
@@ -140,6 +140,7 @@ obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
obj-$(CONFIG_GPIO_VR41XX) += gpio-vr41xx.o
obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o
obj-$(CONFIG_GPIO_WHISKEY_COVE) += gpio-wcove.o
+obj-$(CONFIG_GPIO_WINBOND) += gpio-winbond.o
obj-$(CONFIG_GPIO_WM831X) += gpio-wm831x.o
obj-$(CONFIG_GPIO_WM8350) += gpio-wm8350.o
obj-$(CONFIG_GPIO_WM8994) += gpio-wm8994.o
diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c
index afbff155a0ba..e82cc763633c 100644
--- a/drivers/gpio/devres.c
+++ b/drivers/gpio/devres.c
@@ -125,6 +125,48 @@ struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
EXPORT_SYMBOL(devm_gpiod_get_index);
/**
+ * devm_gpiod_get_from_of_node() - obtain a GPIO from an OF node
+ * @dev: device for lifecycle management
+ * @node: handle of the OF node
+ * @propname: name of the DT property representing the GPIO
+ * @index: index of the GPIO to obtain for the consumer
+ * @dflags: GPIO initialization flags
+ * @label: label to attach to the requested GPIO
+ *
+ * Returns:
+ * On successful request the GPIO pin is configured in accordance with
+ * provided @dflags.
+ *
+ * In case of error an ERR_PTR() is returned.
+ */
+struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
+ struct device_node *node,
+ const char *propname, int index,
+ enum gpiod_flags dflags,
+ const char *label)
+{
+ struct gpio_desc **dr;
+ struct gpio_desc *desc;
+
+ dr = devres_alloc(devm_gpiod_release, sizeof(struct gpio_desc *),
+ GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ desc = gpiod_get_from_of_node(node, propname, index, dflags, label);
+ if (IS_ERR(desc)) {
+ devres_free(dr);
+ return desc;
+ }
+
+ *dr = desc;
+ devres_add(dev, dr);
+
+ return desc;
+}
+EXPORT_SYMBOL(devm_gpiod_get_from_of_node);
+
+/**
* devm_fwnode_get_index_gpiod_from_child - get a GPIO descriptor from a
* device's child node
* @dev: GPIO consumer
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index 15a1f4b348c4..fb7b620763a2 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -9,12 +9,11 @@
* published by the Free Software Foundation.
*/
-#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/spi/spi.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/gpio/gpio-adp5520.c b/drivers/gpio/gpio-adp5520.c
index abf199609546..21452622d954 100644
--- a/drivers/gpio/gpio-adp5520.c
+++ b/drivers/gpio/gpio-adp5520.c
@@ -12,8 +12,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mfd/adp5520.h>
-
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
struct adp5520_gpio {
struct device *master;
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index e717f8dc3966..3530ccd17e04 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -12,7 +12,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 8e76d390e653..8c3ff6e2366f 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -18,7 +18,8 @@
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/of_gpio.h> /* For of_mm_gpio_chip */
#include <linux/platform_device.h>
#define ALTERA_GPIO_MAX_NGPIO 32
diff --git a/drivers/gpio/gpio-amd8111.c b/drivers/gpio/gpio-amd8111.c
index 30ad7d7c1678..fdcebe59510d 100644
--- a/drivers/gpio/gpio-amd8111.c
+++ b/drivers/gpio/gpio-amd8111.c
@@ -28,7 +28,7 @@
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index d4e6ba0301bc..ba51ea15f379 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/seq_file.h>
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 6b3ca6601af2..77e485557498 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -60,6 +60,7 @@ struct aspeed_gpio_bank {
uint16_t val_regs;
uint16_t irq_regs;
uint16_t debounce_regs;
+ uint16_t tolerance_regs;
const char names[4][3];
};
@@ -70,48 +71,56 @@ static const struct aspeed_gpio_bank aspeed_gpio_banks[] = {
.val_regs = 0x0000,
.irq_regs = 0x0008,
.debounce_regs = 0x0040,
+ .tolerance_regs = 0x001c,
.names = { "A", "B", "C", "D" },
},
{
.val_regs = 0x0020,
.irq_regs = 0x0028,
.debounce_regs = 0x0048,
+ .tolerance_regs = 0x003c,
.names = { "E", "F", "G", "H" },
},
{
.val_regs = 0x0070,
.irq_regs = 0x0098,
.debounce_regs = 0x00b0,
+ .tolerance_regs = 0x00ac,
.names = { "I", "J", "K", "L" },
},
{
.val_regs = 0x0078,
.irq_regs = 0x00e8,
.debounce_regs = 0x0100,
+ .tolerance_regs = 0x00fc,
.names = { "M", "N", "O", "P" },
},
{
.val_regs = 0x0080,
.irq_regs = 0x0118,
.debounce_regs = 0x0130,
+ .tolerance_regs = 0x012c,
.names = { "Q", "R", "S", "T" },
},
{
.val_regs = 0x0088,
.irq_regs = 0x0148,
.debounce_regs = 0x0160,
+ .tolerance_regs = 0x015c,
.names = { "U", "V", "W", "X" },
},
{
.val_regs = 0x01E0,
.irq_regs = 0x0178,
.debounce_regs = 0x0190,
+ .tolerance_regs = 0x018c,
.names = { "Y", "Z", "AA", "AB" },
},
{
- .val_regs = 0x01E8,
- .irq_regs = 0x01A8,
+ .val_regs = 0x01e8,
+ .irq_regs = 0x01a8,
.debounce_regs = 0x01c0,
+ .tolerance_regs = 0x01bc,
.names = { "AC", "", "", "" },
},
};
@@ -140,7 +149,7 @@ static const struct aspeed_gpio_bank *to_bank(unsigned int offset)
{
unsigned int bank = GPIO_BANK(offset);
- WARN_ON(bank > ARRAY_SIZE(aspeed_gpio_banks));
+ WARN_ON(bank >= ARRAY_SIZE(aspeed_gpio_banks));
return &aspeed_gpio_banks[bank];
}
@@ -534,6 +543,30 @@ static int aspeed_gpio_setup_irqs(struct aspeed_gpio *gpio,
return 0;
}
+static int aspeed_gpio_reset_tolerance(struct gpio_chip *chip,
+ unsigned int offset, bool enable)
+{
+ struct aspeed_gpio *gpio = gpiochip_get_data(chip);
+ const struct aspeed_gpio_bank *bank;
+ unsigned long flags;
+ u32 val;
+
+ bank = to_bank(offset);
+
+ spin_lock_irqsave(&gpio->lock, flags);
+ val = readl(gpio->base + bank->tolerance_regs);
+
+ if (enable)
+ val |= GPIO_BIT(offset);
+ else
+ val &= ~GPIO_BIT(offset);
+
+ writel(val, gpio->base + bank->tolerance_regs);
+ spin_unlock_irqrestore(&gpio->lock, flags);
+
+ return 0;
+}
+
static int aspeed_gpio_request(struct gpio_chip *chip, unsigned int offset)
{
if (!have_gpio(gpiochip_get_data(chip), offset))
@@ -771,6 +804,8 @@ static int aspeed_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
param == PIN_CONFIG_DRIVE_OPEN_SOURCE)
/* Return -ENOTSUPP to trigger emulation, as per datasheet */
return -ENOTSUPP;
+ else if (param == PIN_CONFIG_PERSIST_STATE)
+ return aspeed_gpio_reset_tolerance(chip, offset, arg);
return -ENOTSUPP;
}
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index 5fad89dfab7e..3ae7c1876bf4 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -324,3 +324,6 @@ static struct platform_driver ath79_gpio_driver = {
};
module_platform_driver(ath79_gpio_driver);
+
+MODULE_DESCRIPTION("Atheros AR71XX/AR724X/AR913X GPIO API support");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-axp209.c b/drivers/gpio/gpio-axp209.c
deleted file mode 100644
index 4a346b7b4172..000000000000
--- a/drivers/gpio/gpio-axp209.c
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * AXP20x GPIO driver
- *
- * Copyright (C) 2016 Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/bitops.h>
-#include <linux/device.h>
-#include <linux/gpio/driver.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/mfd/axp20x.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include <linux/slab.h>
-
-#define AXP20X_GPIO_FUNCTIONS 0x7
-#define AXP20X_GPIO_FUNCTION_OUT_LOW 0
-#define AXP20X_GPIO_FUNCTION_OUT_HIGH 1
-#define AXP20X_GPIO_FUNCTION_INPUT 2
-
-struct axp20x_gpio {
- struct gpio_chip chip;
- struct regmap *regmap;
-};
-
-static int axp20x_gpio_get_reg(unsigned offset)
-{
- switch (offset) {
- case 0:
- return AXP20X_GPIO0_CTRL;
- case 1:
- return AXP20X_GPIO1_CTRL;
- case 2:
- return AXP20X_GPIO2_CTRL;
- }
-
- return -EINVAL;
-}
-
-static int axp20x_gpio_input(struct gpio_chip *chip, unsigned offset)
-{
- struct axp20x_gpio *gpio = gpiochip_get_data(chip);
- int reg;
-
- reg = axp20x_gpio_get_reg(offset);
- if (reg < 0)
- return reg;
-
- return regmap_update_bits(gpio->regmap, reg,
- AXP20X_GPIO_FUNCTIONS,
- AXP20X_GPIO_FUNCTION_INPUT);
-}
-
-static int axp20x_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- struct axp20x_gpio *gpio = gpiochip_get_data(chip);
- unsigned int val;
- int ret;
-
- ret = regmap_read(gpio->regmap, AXP20X_GPIO20_SS, &val);
- if (ret)
- return ret;
-
- return !!(val & BIT(offset + 4));
-}
-
-static int axp20x_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
-{
- struct axp20x_gpio *gpio = gpiochip_get_data(chip);
- unsigned int val;
- int reg, ret;
-
- reg = axp20x_gpio_get_reg(offset);
- if (reg < 0)
- return reg;
-
- ret = regmap_read(gpio->regmap, reg, &val);
- if (ret)
- return ret;
-
- /*
- * This shouldn't really happen if the pin is in use already,
- * or if it's not in use yet, it doesn't matter since we're
- * going to change the value soon anyway. Default to output.
- */
- if ((val & AXP20X_GPIO_FUNCTIONS) > 2)
- return 0;
-
- /*
- * The GPIO directions are the three lowest values.
- * 2 is input, 0 and 1 are output
- */
- return val & 2;
-}
-
-static int axp20x_gpio_output(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- struct axp20x_gpio *gpio = gpiochip_get_data(chip);
- int reg;
-
- reg = axp20x_gpio_get_reg(offset);
- if (reg < 0)
- return reg;
-
- return regmap_update_bits(gpio->regmap, reg,
- AXP20X_GPIO_FUNCTIONS,
- value ? AXP20X_GPIO_FUNCTION_OUT_HIGH
- : AXP20X_GPIO_FUNCTION_OUT_LOW);
-}
-
-static void axp20x_gpio_set(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- axp20x_gpio_output(chip, offset, value);
-}
-
-static int axp20x_gpio_probe(struct platform_device *pdev)
-{
- struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
- struct axp20x_gpio *gpio;
- int ret;
-
- if (!of_device_is_available(pdev->dev.of_node))
- return -ENODEV;
-
- if (!axp20x) {
- dev_err(&pdev->dev, "Parent drvdata not set\n");
- return -EINVAL;
- }
-
- gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
- if (!gpio)
- return -ENOMEM;
-
- gpio->chip.base = -1;
- gpio->chip.can_sleep = true;
- gpio->chip.parent = &pdev->dev;
- gpio->chip.label = dev_name(&pdev->dev);
- gpio->chip.owner = THIS_MODULE;
- gpio->chip.get = axp20x_gpio_get;
- gpio->chip.get_direction = axp20x_gpio_get_direction;
- gpio->chip.set = axp20x_gpio_set;
- gpio->chip.direction_input = axp20x_gpio_input;
- gpio->chip.direction_output = axp20x_gpio_output;
- gpio->chip.ngpio = 3;
-
- gpio->regmap = axp20x->regmap;
-
- ret = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
- if (ret) {
- dev_err(&pdev->dev, "Failed to register GPIO chip\n");
- return ret;
- }
-
- dev_info(&pdev->dev, "AXP209 GPIO driver loaded\n");
-
- return 0;
-}
-
-static const struct of_device_id axp20x_gpio_match[] = {
- { .compatible = "x-powers,axp209-gpio" },
- { }
-};
-MODULE_DEVICE_TABLE(of, axp20x_gpio_match);
-
-static struct platform_driver axp20x_gpio_driver = {
- .probe = axp20x_gpio_probe,
- .driver = {
- .name = "axp20x-gpio",
- .of_match_table = axp20x_gpio_match,
- },
-};
-
-module_platform_driver(axp20x_gpio_driver);
-
-MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
-MODULE_DESCRIPTION("AXP20x PMIC GPIO driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 76861a00bb92..eb8369b21e90 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -17,7 +17,7 @@
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/init.h>
@@ -127,7 +127,7 @@ static int bcm_kona_gpio_get_dir(struct gpio_chip *chip, unsigned gpio)
u32 val;
val = readl(reg_base + GPIO_CONTROL(gpio)) & GPIO_GPCTR0_IOTR_MASK;
- return val ? GPIOF_DIR_IN : GPIOF_DIR_OUT;
+ return !!val;
}
static void bcm_kona_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
@@ -144,7 +144,7 @@ static void bcm_kona_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
raw_spin_lock_irqsave(&kona_gpio->lock, flags);
/* this function only applies to output pin */
- if (bcm_kona_gpio_get_dir(chip, gpio) == GPIOF_DIR_IN)
+ if (bcm_kona_gpio_get_dir(chip, gpio) == 1)
goto out;
reg_offset = value ? GPIO_OUT_SET(bank_id) : GPIO_OUT_CLEAR(bank_id);
@@ -170,7 +170,7 @@ static int bcm_kona_gpio_get(struct gpio_chip *chip, unsigned gpio)
reg_base = kona_gpio->reg_base;
raw_spin_lock_irqsave(&kona_gpio->lock, flags);
- if (bcm_kona_gpio_get_dir(chip, gpio) == GPIOF_DIR_IN)
+ if (bcm_kona_gpio_get_dir(chip, gpio) == 1)
reg_offset = GPIO_IN_STATUS(bank_id);
else
reg_offset = GPIO_OUT_STATUS(bank_id);
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index bb4f8cf18bd9..16c7f9f49416 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -19,7 +19,6 @@
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/interrupt.h>
-#include <linux/bitops.h>
enum gio_reg_index {
GIO_REG_ODEN = 0,
diff --git a/drivers/gpio/gpio-bt8xx.c b/drivers/gpio/gpio-bt8xx.c
index acefb25e8eca..b8ec75cbd4b5 100644
--- a/drivers/gpio/gpio-bt8xx.c
+++ b/drivers/gpio/gpio-bt8xx.c
@@ -46,7 +46,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/slab.h>
/* Steal the hardware definitions from the bttv driver. */
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index b6f0f729656c..58531d8b8c6e 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -18,7 +18,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/seq_file.h>
#include <linux/bitops.h>
#include <linux/regmap.h>
diff --git a/drivers/gpio/gpio-cs5535.c b/drivers/gpio/gpio-cs5535.c
index 90278b19aa0e..8814c8f47e57 100644
--- a/drivers/gpio/gpio-cs5535.c
+++ b/drivers/gpio/gpio-cs5535.c
@@ -12,7 +12,7 @@
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/cs5535.h>
#include <asm/msr.h>
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index dd8977cf3e85..b6d3e997eb26 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -15,7 +15,7 @@
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/syscalls.h>
#include <linux/seq_file.h>
diff --git a/drivers/gpio/gpio-da9055.c b/drivers/gpio/gpio-da9055.c
index 82053b52cba0..2f1b5d23b10c 100644
--- a/drivers/gpio/gpio-da9055.c
+++ b/drivers/gpio/gpio-da9055.c
@@ -13,7 +13,7 @@
*/
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/mfd/da9055/core.h>
#include <linux/mfd/da9055/reg.h>
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index e4b3d7db68c9..0b951ca78ec4 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -9,7 +9,7 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/clk.h>
diff --git a/drivers/gpio/gpio-ftgpio010.c b/drivers/gpio/gpio-ftgpio010.c
index 7b3394fdc624..b7a3a2db699b 100644
--- a/drivers/gpio/gpio-ftgpio010.c
+++ b/drivers/gpio/gpio-ftgpio010.c
@@ -176,8 +176,8 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
return PTR_ERR(g->base);
irq = platform_get_irq(pdev, 0);
- if (!irq)
- return -EINVAL;
+ if (irq <= 0)
+ return irq ? irq : -EINVAL;
ret = bgpio_init(&g->gc, dev, 4,
g->base + GPIO_DATA_IN,
diff --git a/drivers/gpio/gpio-iop.c b/drivers/gpio/gpio-iop.c
index 98c7ff2a76e7..8d62db447ec1 100644
--- a/drivers/gpio/gpio-iop.c
+++ b/drivers/gpio/gpio-iop.c
@@ -58,3 +58,7 @@ static int __init iop3xx_gpio_init(void)
return platform_driver_register(&iop3xx_gpio_driver);
}
arch_initcall(iop3xx_gpio_init);
+
+MODULE_DESCRIPTION("GPIO handling for Intel IOP3xx processors");
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-it87.c b/drivers/gpio/gpio-it87.c
index d43d0a2cc4c5..efb46edff81f 100644
--- a/drivers/gpio/gpio-it87.c
+++ b/drivers/gpio/gpio-it87.c
@@ -414,6 +414,6 @@ static void __exit it87_gpio_exit(void)
module_init(it87_gpio_init);
module_exit(it87_gpio_exit);
-MODULE_AUTHOR("Diego Elio Pettenò <flameeyes@flameeyes.eu>");
+MODULE_AUTHOR("Diego Elio Pettenò <flameeyes@flameeyes.eu>");
MODULE_DESCRIPTION("GPIO interface for IT87xx Super I/O chips");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index c04fae1ba32a..9d8bcc69f245 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -709,8 +709,7 @@ static int max732x_probe(struct i2c_client *client,
return 0;
out_failed:
- if (chip->client_dummy)
- i2c_unregister_device(chip->client_dummy);
+ i2c_unregister_device(chip->client_dummy);
return ret;
}
@@ -734,8 +733,7 @@ static int max732x_remove(struct i2c_client *client)
gpiochip_remove(&chip->gpio_chip);
/* unregister any dummy i2c_client */
- if (chip->client_dummy)
- i2c_unregister_device(chip->client_dummy);
+ i2c_unregister_device(chip->client_dummy);
return 0;
}
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index dd67a31ac337..c38624ea0251 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -9,6 +9,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
#include <linux/init.h>
@@ -380,9 +381,16 @@ static void mrfld_irq_init_hw(struct mrfld_gpio *priv)
}
}
+static const char *mrfld_gpio_get_pinctrl_dev_name(void)
+{
+ const char *dev_name = acpi_dev_get_first_match_name("INTC1002", NULL, -1);
+ return dev_name ? dev_name : "pinctrl-merrifield";
+}
+
static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
const struct mrfld_gpio_pinrange *range;
+ const char *pinctrl_dev_name;
struct mrfld_gpio *priv;
u32 gpio_base, irq_base;
void __iomem *base;
@@ -439,10 +447,11 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
return retval;
}
+ pinctrl_dev_name = mrfld_gpio_get_pinctrl_dev_name();
for (i = 0; i < ARRAY_SIZE(mrfld_gpio_ranges); i++) {
range = &mrfld_gpio_ranges[i];
retval = gpiochip_add_pin_range(&priv->chip,
- "pinctrl-merrifield",
+ pinctrl_dev_name,
range->gpio_base,
range->pin_base,
range->npins);
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 9532d86a82f7..3a545ad17817 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -27,13 +27,15 @@
#include "gpiolib.h"
#define GPIO_MOCKUP_NAME "gpio-mockup"
-#define GPIO_MOCKUP_MAX_GC 10
+#define GPIO_MOCKUP_MAX_GC 10
/*
* We're storing two values per chip: the GPIO base and the number
* of GPIO lines.
*/
#define GPIO_MOCKUP_MAX_RANGES (GPIO_MOCKUP_MAX_GC * 2)
+#define gpio_mockup_err(...) pr_err(GPIO_MOCKUP_NAME ": " __VA_ARGS__)
+
enum {
GPIO_MOCKUP_DIR_OUT = 0,
GPIO_MOCKUP_DIR_IN = 1,
@@ -46,7 +48,7 @@ enum {
*/
struct gpio_mockup_line_status {
int dir;
- bool value;
+ int value;
};
struct gpio_mockup_chip {
@@ -62,17 +64,33 @@ struct gpio_mockup_dbgfs_private {
int offset;
};
+struct gpio_mockup_platform_data {
+ int base;
+ int ngpio;
+ int index;
+ bool named_lines;
+};
+
static int gpio_mockup_ranges[GPIO_MOCKUP_MAX_RANGES];
-static int gpio_mockup_params_nr;
-module_param_array(gpio_mockup_ranges, int, &gpio_mockup_params_nr, 0400);
+static int gpio_mockup_num_ranges;
+module_param_array(gpio_mockup_ranges, int, &gpio_mockup_num_ranges, 0400);
static bool gpio_mockup_named_lines;
module_param_named(gpio_mockup_named_lines,
gpio_mockup_named_lines, bool, 0400);
-static const char gpio_mockup_name_start = 'A';
static struct dentry *gpio_mockup_dbg_dir;
+static int gpio_mockup_range_base(unsigned int index)
+{
+ return gpio_mockup_ranges[index * 2];
+}
+
+static int gpio_mockup_range_ngpio(unsigned int index)
+{
+ return gpio_mockup_ranges[index * 2 + 1];
+}
+
static int gpio_mockup_get(struct gpio_chip *gc, unsigned int offset)
{
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
@@ -80,16 +98,26 @@ static int gpio_mockup_get(struct gpio_chip *gc, unsigned int offset)
return chip->lines[offset].value;
}
-static void gpio_mockup_set(struct gpio_chip *gc, unsigned int offset,
- int value)
+static void gpio_mockup_set(struct gpio_chip *gc,
+ unsigned int offset, int value)
{
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
chip->lines[offset].value = !!value;
}
-static int gpio_mockup_dirout(struct gpio_chip *gc, unsigned int offset,
- int value)
+static void gpio_mockup_set_multiple(struct gpio_chip *gc,
+ unsigned long *mask, unsigned long *bits)
+{
+ unsigned int bit;
+
+ for_each_set_bit(bit, mask, gc->ngpio)
+ gpio_mockup_set(gc, bit, test_bit(bit, bits));
+
+}
+
+static int gpio_mockup_dirout(struct gpio_chip *gc,
+ unsigned int offset, int value)
{
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
@@ -115,29 +143,6 @@ static int gpio_mockup_get_direction(struct gpio_chip *gc, unsigned int offset)
return chip->lines[offset].dir;
}
-static int gpio_mockup_name_lines(struct device *dev,
- struct gpio_mockup_chip *chip)
-{
- struct gpio_chip *gc = &chip->gc;
- char **names;
- int i;
-
- names = devm_kcalloc(dev, gc->ngpio, sizeof(char *), GFP_KERNEL);
- if (!names)
- return -ENOMEM;
-
- for (i = 0; i < gc->ngpio; i++) {
- names[i] = devm_kasprintf(dev, GFP_KERNEL,
- "%s-%d", gc->label, i);
- if (!names[i])
- return -ENOMEM;
- }
-
- gc->names = (const char *const *)names;
-
- return 0;
-}
-
static int gpio_mockup_to_irq(struct gpio_chip *gc, unsigned int offset)
{
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
@@ -188,15 +193,21 @@ static void gpio_mockup_debugfs_setup(struct device *dev,
struct gpio_mockup_chip *chip)
{
struct gpio_mockup_dbgfs_private *priv;
- struct dentry *evfile;
+ struct dentry *evfile, *link;
struct gpio_chip *gc;
+ const char *devname;
char *name;
int i;
gc = &chip->gc;
+ devname = dev_name(&gc->gpiodev->dev);
- chip->dbg_dir = debugfs_create_dir(gc->label, gpio_mockup_dbg_dir);
- if (!chip->dbg_dir)
+ chip->dbg_dir = debugfs_create_dir(devname, gpio_mockup_dbg_dir);
+ if (IS_ERR_OR_NULL(chip->dbg_dir))
+ goto err;
+
+ link = debugfs_create_symlink(gc->label, gpio_mockup_dbg_dir, devname);
+ if (IS_ERR_OR_NULL(link))
goto err;
for (i = 0; i < gc->ngpio; i++) {
@@ -214,23 +225,63 @@ static void gpio_mockup_debugfs_setup(struct device *dev,
evfile = debugfs_create_file(name, 0200, chip->dbg_dir, priv,
&gpio_mockup_event_ops);
- if (!evfile)
+ if (IS_ERR_OR_NULL(evfile))
goto err;
}
return;
err:
- dev_err(dev, "error creating debugfs directory\n");
+ dev_err(dev, "error creating debugfs event files\n");
}
-static int gpio_mockup_add(struct device *dev,
- struct gpio_mockup_chip *chip,
- const char *name, int base, int ngpio)
+static int gpio_mockup_name_lines(struct device *dev,
+ struct gpio_mockup_chip *chip)
{
struct gpio_chip *gc = &chip->gc;
- int ret;
+ char **names;
+ int i;
+
+ names = devm_kcalloc(dev, gc->ngpio, sizeof(char *), GFP_KERNEL);
+ if (!names)
+ return -ENOMEM;
+
+ for (i = 0; i < gc->ngpio; i++) {
+ names[i] = devm_kasprintf(dev, GFP_KERNEL,
+ "%s-%d", gc->label, i);
+ if (!names[i])
+ return -ENOMEM;
+ }
+
+ gc->names = (const char *const *)names;
+
+ return 0;
+}
+
+static int gpio_mockup_probe(struct platform_device *pdev)
+{
+ struct gpio_mockup_platform_data *pdata;
+ struct gpio_mockup_chip *chip;
+ struct gpio_chip *gc;
+ int rv, base, ngpio;
+ struct device *dev;
+ char *name;
+
+ dev = &pdev->dev;
+ pdata = dev_get_platdata(dev);
+ base = pdata->base;
+ ngpio = pdata->ngpio;
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "%s-%c",
+ pdev->name, pdata->index);
+ if (!name)
+ return -ENOMEM;
+
+ gc = &chip->gc;
gc->base = base;
gc->ngpio = ngpio;
gc->label = name;
@@ -238,6 +289,7 @@ static int gpio_mockup_add(struct device *dev,
gc->parent = dev;
gc->get = gpio_mockup_get;
gc->set = gpio_mockup_set;
+ gc->set_multiple = gpio_mockup_set_multiple;
gc->direction_output = gpio_mockup_dirout;
gc->direction_input = gpio_mockup_dirin;
gc->get_direction = gpio_mockup_get_direction;
@@ -248,19 +300,19 @@ static int gpio_mockup_add(struct device *dev,
if (!chip->lines)
return -ENOMEM;
- if (gpio_mockup_named_lines) {
- ret = gpio_mockup_name_lines(dev, chip);
- if (ret)
- return ret;
+ if (pdata->named_lines) {
+ rv = gpio_mockup_name_lines(dev, chip);
+ if (rv)
+ return rv;
}
- ret = devm_irq_sim_init(dev, &chip->irqsim, gc->ngpio);
- if (ret)
- return ret;
+ rv = devm_irq_sim_init(dev, &chip->irqsim, gc->ngpio);
+ if (rv < 0)
+ return rv;
- ret = devm_gpiochip_add_data(dev, &chip->gc, chip);
- if (ret)
- return ret;
+ rv = devm_gpiochip_add_data(dev, &chip->gc, chip);
+ if (rv)
+ return rv;
if (gpio_mockup_dbg_dir)
gpio_mockup_debugfs_setup(dev, chip);
@@ -268,58 +320,6 @@ static int gpio_mockup_add(struct device *dev,
return 0;
}
-static int gpio_mockup_probe(struct platform_device *pdev)
-{
- int ret, i, base, ngpio, num_chips;
- struct device *dev = &pdev->dev;
- struct gpio_mockup_chip *chips;
- char *chip_name;
-
- if (gpio_mockup_params_nr < 2 || (gpio_mockup_params_nr % 2))
- return -EINVAL;
-
- /* Each chip is described by two values. */
- num_chips = gpio_mockup_params_nr / 2;
-
- chips = devm_kcalloc(dev, num_chips, sizeof(*chips), GFP_KERNEL);
- if (!chips)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, chips);
-
- for (i = 0; i < num_chips; i++) {
- base = gpio_mockup_ranges[i * 2];
-
- if (base == -1)
- ngpio = gpio_mockup_ranges[i * 2 + 1];
- else
- ngpio = gpio_mockup_ranges[i * 2 + 1] - base;
-
- if (ngpio >= 0) {
- chip_name = devm_kasprintf(dev, GFP_KERNEL,
- "%s-%c", GPIO_MOCKUP_NAME,
- gpio_mockup_name_start + i);
- if (!chip_name)
- return -ENOMEM;
-
- ret = gpio_mockup_add(dev, &chips[i],
- chip_name, base, ngpio);
- } else {
- ret = -EINVAL;
- }
-
- if (ret) {
- dev_err(dev,
- "adding gpiochip failed: %d (base: %d, ngpio: %d)\n",
- ret, base, base < 0 ? ngpio : base + ngpio);
-
- return ret;
- }
- }
-
- return 0;
-}
-
static struct platform_driver gpio_mockup_driver = {
.driver = {
.name = GPIO_MOCKUP_NAME,
@@ -327,44 +327,88 @@ static struct platform_driver gpio_mockup_driver = {
.probe = gpio_mockup_probe,
};
-static struct platform_device *pdev;
-static int __init mock_device_init(void)
+static struct platform_device *gpio_mockup_pdevs[GPIO_MOCKUP_MAX_GC];
+
+static void gpio_mockup_unregister_pdevs(void)
{
- int err;
+ struct platform_device *pdev;
+ int i;
- gpio_mockup_dbg_dir = debugfs_create_dir("gpio-mockup-event", NULL);
- if (!gpio_mockup_dbg_dir)
- pr_err("%s: error creating debugfs directory\n",
- GPIO_MOCKUP_NAME);
+ for (i = 0; i < GPIO_MOCKUP_MAX_GC; i++) {
+ pdev = gpio_mockup_pdevs[i];
- pdev = platform_device_alloc(GPIO_MOCKUP_NAME, -1);
- if (!pdev)
- return -ENOMEM;
+ if (pdev)
+ platform_device_unregister(pdev);
+ }
+}
- err = platform_device_add(pdev);
- if (err) {
- platform_device_put(pdev);
- return err;
+static int __init gpio_mockup_init(void)
+{
+ int i, num_chips, err = 0, index = 'A';
+ struct gpio_mockup_platform_data pdata;
+ struct platform_device *pdev;
+
+ if ((gpio_mockup_num_ranges < 2) ||
+ (gpio_mockup_num_ranges % 2) ||
+ (gpio_mockup_num_ranges > GPIO_MOCKUP_MAX_RANGES))
+ return -EINVAL;
+
+ /* Each chip is described by two values. */
+ num_chips = gpio_mockup_num_ranges / 2;
+
+ /*
+ * The second value in the <base GPIO - number of GPIOS> pair must
+ * always be greater than 0.
+ */
+ for (i = 0; i < num_chips; i++) {
+ if (gpio_mockup_range_ngpio(i) < 0)
+ return -EINVAL;
}
+ gpio_mockup_dbg_dir = debugfs_create_dir("gpio-mockup-event", NULL);
+ if (IS_ERR_OR_NULL(gpio_mockup_dbg_dir))
+ gpio_mockup_err("error creating debugfs directory\n");
+
err = platform_driver_register(&gpio_mockup_driver);
if (err) {
- platform_device_unregister(pdev);
+ gpio_mockup_err("error registering platform driver\n");
return err;
}
+ for (i = 0; i < num_chips; i++) {
+ pdata.index = index++;
+ pdata.base = gpio_mockup_range_base(i);
+ pdata.ngpio = pdata.base < 0
+ ? gpio_mockup_range_ngpio(i)
+ : gpio_mockup_range_ngpio(i) - pdata.base;
+ pdata.named_lines = gpio_mockup_named_lines;
+
+ pdev = platform_device_register_resndata(NULL,
+ GPIO_MOCKUP_NAME,
+ i, NULL, 0, &pdata,
+ sizeof(pdata));
+ if (IS_ERR(pdev)) {
+ gpio_mockup_err("error registering device");
+ platform_driver_unregister(&gpio_mockup_driver);
+ gpio_mockup_unregister_pdevs();
+ return PTR_ERR(pdev);
+ }
+
+ gpio_mockup_pdevs[i] = pdev;
+ }
+
return 0;
}
-static void __exit mock_device_exit(void)
+static void __exit gpio_mockup_exit(void)
{
debugfs_remove_recursive(gpio_mockup_dbg_dir);
platform_driver_unregister(&gpio_mockup_driver);
- platform_device_unregister(pdev);
+ gpio_mockup_unregister_pdevs();
}
-module_init(mock_device_init);
-module_exit(mock_device_exit);
+module_init(gpio_mockup_init);
+module_exit(gpio_mockup_exit);
MODULE_AUTHOR("Kamlakant Patel <kamlakant.patel@broadcom.com>");
MODULE_AUTHOR("Bamvor Jian Zhang <bamvor.zhangjian@linaro.org>");
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index e136d666f1e5..ab5035b96886 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1058,7 +1058,9 @@ static void omap_gpio_mod_init(struct gpio_bank *bank)
static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
{
+ struct gpio_irq_chip *irq;
static int gpio;
+ const char *label;
int irq_base = 0;
int ret;
@@ -1080,21 +1082,15 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
bank->chip.parent = &omap_mpuio_device.dev;
bank->chip.base = OMAP_MPUIO(0);
} else {
- bank->chip.label = "gpio";
+ label = devm_kasprintf(bank->chip.parent, GFP_KERNEL, "gpio-%d-%d",
+ gpio, gpio + bank->width - 1);
+ if (!label)
+ return -ENOMEM;
+ bank->chip.label = label;
bank->chip.base = gpio;
}
bank->chip.ngpio = bank->width;
- ret = gpiochip_add_data(&bank->chip, bank);
- if (ret) {
- dev_err(bank->chip.parent,
- "Could not register gpio chip %d\n", ret);
- return ret;
- }
-
- if (!bank->is_mpuio)
- gpio += bank->width;
-
#ifdef CONFIG_ARCH_OMAP1
/*
* REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop
@@ -1115,25 +1111,30 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
irqc->irq_set_wake = NULL;
}
- ret = gpiochip_irqchip_add(&bank->chip, irqc,
- irq_base, handle_bad_irq,
- IRQ_TYPE_NONE);
+ irq = &bank->chip.irq;
+ irq->chip = irqc;
+ irq->handler = handle_bad_irq;
+ irq->default_type = IRQ_TYPE_NONE;
+ irq->num_parents = 1;
+ irq->parents = &bank->irq;
+ irq->first = irq_base;
+ ret = gpiochip_add_data(&bank->chip, bank);
if (ret) {
dev_err(bank->chip.parent,
- "Couldn't add irqchip to gpiochip %d\n", ret);
- gpiochip_remove(&bank->chip);
- return -ENODEV;
+ "Could not register gpio chip %d\n", ret);
+ return ret;
}
- gpiochip_set_chained_irqchip(&bank->chip, irqc, bank->irq, NULL);
-
ret = devm_request_irq(bank->chip.parent, bank->irq,
omap_gpio_irq_handler,
0, dev_name(bank->chip.parent), bank);
if (ret)
gpiochip_remove(&bank->chip);
+ if (!bank->is_mpuio)
+ gpio += bank->width;
+
return ret;
}
diff --git a/drivers/gpio/gpio-pcie-idio-24.c b/drivers/gpio/gpio-pcie-idio-24.c
new file mode 100644
index 000000000000..f666e2e69074
--- /dev/null
+++ b/drivers/gpio/gpio-pcie-idio-24.c
@@ -0,0 +1,447 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * GPIO driver for the ACCES PCIe-IDIO-24 family
+ * Copyright (C) 2018 William Breathitt Gray
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * This driver supports the following ACCES devices: PCIe-IDIO-24,
+ * PCIe-IDI-24, PCIe-IDO-24, and PCIe-IDIO-12.
+ */
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/irqdesc.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+/**
+ * struct idio_24_gpio_reg - GPIO device registers structure
+ * @out0_7: Read: FET Outputs 0-7
+ * Write: FET Outputs 0-7
+ * @out8_15: Read: FET Outputs 8-15
+ * Write: FET Outputs 8-15
+ * @out16_23: Read: FET Outputs 16-23
+ * Write: FET Outputs 16-23
+ * @ttl_out0_7: Read: TTL/CMOS Outputs 0-7
+ * Write: TTL/CMOS Outputs 0-7
+ * @in0_7: Read: Isolated Inputs 0-7
+ * Write: Reserved
+ * @in8_15: Read: Isolated Inputs 8-15
+ * Write: Reserved
+ * @in16_23: Read: Isolated Inputs 16-23
+ * Write: Reserved
+ * @ttl_in0_7: Read: TTL/CMOS Inputs 0-7
+ * Write: Reserved
+ * @cos0_7: Read: COS Status Inputs 0-7
+ * Write: COS Clear Inputs 0-7
+ * @cos8_15: Read: COS Status Inputs 8-15
+ * Write: COS Clear Inputs 8-15
+ * @cos16_23: Read: COS Status Inputs 16-23
+ * Write: COS Clear Inputs 16-23
+ * @cos_ttl0_7: Read: COS Status TTL/CMOS 0-7
+ * Write: COS Clear TTL/CMOS 0-7
+ * @ctl: Read: Control Register
+ * Write: Control Register
+ * @reserved: Read: Reserved
+ * Write: Reserved
+ * @cos_enable: Read: COS Enable
+ * Write: COS Enable
+ * @soft_reset: Read: IRQ Output Pin Status
+ * Write: Software Board Reset
+ */
+struct idio_24_gpio_reg {
+ u8 out0_7;
+ u8 out8_15;
+ u8 out16_23;
+ u8 ttl_out0_7;
+ u8 in0_7;
+ u8 in8_15;
+ u8 in16_23;
+ u8 ttl_in0_7;
+ u8 cos0_7;
+ u8 cos8_15;
+ u8 cos16_23;
+ u8 cos_ttl0_7;
+ u8 ctl;
+ u8 reserved;
+ u8 cos_enable;
+ u8 soft_reset;
+};
+
+/**
+ * struct idio_24_gpio - GPIO device private data structure
+ * @chip: instance of the gpio_chip
+ * @lock: synchronization lock to prevent I/O race conditions
+ * @reg: I/O address offset for the GPIO device registers
+ * @irq_mask: I/O bits affected by interrupts
+ */
+struct idio_24_gpio {
+ struct gpio_chip chip;
+ raw_spinlock_t lock;
+ struct idio_24_gpio_reg __iomem *reg;
+ unsigned long irq_mask;
+};
+
+static int idio_24_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
+ const unsigned long out_mode_mask = BIT(1);
+
+ /* FET Outputs */
+ if (offset < 24)
+ return 0;
+
+ /* Isolated Inputs */
+ if (offset < 48)
+ return 1;
+
+ /* TTL/CMOS I/O */
+ /* OUT MODE = 1 when TTL/CMOS Output Mode is set */
+ return !(ioread8(&idio24gpio->reg->ctl) & out_mode_mask);
+}
+
+static int idio_24_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
+ unsigned long flags;
+ unsigned int ctl_state;
+ const unsigned long out_mode_mask = BIT(1);
+
+ /* TTL/CMOS I/O */
+ if (offset > 47) {
+ raw_spin_lock_irqsave(&idio24gpio->lock, flags);
+
+ /* Clear TTL/CMOS Output Mode */
+ ctl_state = ioread8(&idio24gpio->reg->ctl) & ~out_mode_mask;
+ iowrite8(ctl_state, &idio24gpio->reg->ctl);
+
+ raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
+ }
+
+ return 0;
+}
+
+static int idio_24_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
+ unsigned long flags;
+ unsigned int ctl_state;
+ const unsigned long out_mode_mask = BIT(1);
+
+ /* TTL/CMOS I/O */
+ if (offset > 47) {
+ raw_spin_lock_irqsave(&idio24gpio->lock, flags);
+
+ /* Set TTL/CMOS Output Mode */
+ ctl_state = ioread8(&idio24gpio->reg->ctl) | out_mode_mask;
+ iowrite8(ctl_state, &idio24gpio->reg->ctl);
+
+ raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
+ }
+
+ chip->set(chip, offset, value);
+ return 0;
+}
+
+static int idio_24_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
+ const unsigned long offset_mask = BIT(offset % 8);
+ const unsigned long out_mode_mask = BIT(1);
+
+ /* FET Outputs */
+ if (offset < 8)
+ return !!(ioread8(&idio24gpio->reg->out0_7) & offset_mask);
+
+ if (offset < 16)
+ return !!(ioread8(&idio24gpio->reg->out8_15) & offset_mask);
+
+ if (offset < 24)
+ return !!(ioread8(&idio24gpio->reg->out16_23) & offset_mask);
+
+ /* Isolated Inputs */
+ if (offset < 32)
+ return !!(ioread8(&idio24gpio->reg->in0_7) & offset_mask);
+
+ if (offset < 40)
+ return !!(ioread8(&idio24gpio->reg->in8_15) & offset_mask);
+
+ if (offset < 48)
+ return !!(ioread8(&idio24gpio->reg->in16_23) & offset_mask);
+
+ /* TTL/CMOS Outputs */
+ if (ioread8(&idio24gpio->reg->ctl) & out_mode_mask)
+ return !!(ioread8(&idio24gpio->reg->ttl_out0_7) & offset_mask);
+
+ /* TTL/CMOS Inputs */
+ return !!(ioread8(&idio24gpio->reg->ttl_in0_7) & offset_mask);
+}
+
+static void idio_24_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
+ const unsigned long out_mode_mask = BIT(1);
+ void __iomem *base;
+ const unsigned int mask = BIT(offset % 8);
+ unsigned long flags;
+ unsigned int out_state;
+
+ /* Isolated Inputs */
+ if (offset > 23 && offset < 48)
+ return;
+
+ /* TTL/CMOS Inputs */
+ if (offset > 47 && !(ioread8(&idio24gpio->reg->ctl) & out_mode_mask))
+ return;
+
+ /* TTL/CMOS Outputs */
+ if (offset > 47)
+ base = &idio24gpio->reg->ttl_out0_7;
+ /* FET Outputs */
+ else if (offset > 15)
+ base = &idio24gpio->reg->out16_23;
+ else if (offset > 7)
+ base = &idio24gpio->reg->out8_15;
+ else
+ base = &idio24gpio->reg->out0_7;
+
+ raw_spin_lock_irqsave(&idio24gpio->lock, flags);
+
+ if (value)
+ out_state = ioread8(base) | mask;
+ else
+ out_state = ioread8(base) & ~mask;
+
+ iowrite8(out_state, base);
+
+ raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
+}
+
+static void idio_24_irq_ack(struct irq_data *data)
+{
+}
+
+static void idio_24_irq_mask(struct irq_data *data)
+{
+ struct gpio_chip *const chip = irq_data_get_irq_chip_data(data);
+ struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
+ unsigned long flags;
+ const unsigned long bit_offset = irqd_to_hwirq(data) - 24;
+ unsigned char new_irq_mask;
+ const unsigned long bank_offset = bit_offset/8 * 8;
+ unsigned char cos_enable_state;
+
+ raw_spin_lock_irqsave(&idio24gpio->lock, flags);
+
+ idio24gpio->irq_mask &= BIT(bit_offset);
+ new_irq_mask = idio24gpio->irq_mask >> bank_offset;
+
+ if (!new_irq_mask) {
+ cos_enable_state = ioread8(&idio24gpio->reg->cos_enable);
+
+ /* Disable Rising Edge detection */
+ cos_enable_state &= ~BIT(bank_offset);
+ /* Disable Falling Edge detection */
+ cos_enable_state &= ~BIT(bank_offset + 4);
+
+ iowrite8(cos_enable_state, &idio24gpio->reg->cos_enable);
+ }
+
+ raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
+}
+
+static void idio_24_irq_unmask(struct irq_data *data)
+{
+ struct gpio_chip *const chip = irq_data_get_irq_chip_data(data);
+ struct idio_24_gpio *const idio24gpio = gpiochip_get_data(chip);
+ unsigned long flags;
+ unsigned char prev_irq_mask;
+ const unsigned long bit_offset = irqd_to_hwirq(data) - 24;
+ const unsigned long bank_offset = bit_offset/8 * 8;
+ unsigned char cos_enable_state;
+
+ raw_spin_lock_irqsave(&idio24gpio->lock, flags);
+
+ prev_irq_mask = idio24gpio->irq_mask >> bank_offset;
+ idio24gpio->irq_mask |= BIT(bit_offset);
+
+ if (!prev_irq_mask) {
+ cos_enable_state = ioread8(&idio24gpio->reg->cos_enable);
+
+ /* Enable Rising Edge detection */
+ cos_enable_state |= BIT(bank_offset);
+ /* Enable Falling Edge detection */
+ cos_enable_state |= BIT(bank_offset + 4);
+
+ iowrite8(cos_enable_state, &idio24gpio->reg->cos_enable);
+ }
+
+ raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
+}
+
+static int idio_24_irq_set_type(struct irq_data *data, unsigned int flow_type)
+{
+ /* The only valid irq types are none and both-edges */
+ if (flow_type != IRQ_TYPE_NONE &&
+ (flow_type & IRQ_TYPE_EDGE_BOTH) != IRQ_TYPE_EDGE_BOTH)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct irq_chip idio_24_irqchip = {
+ .name = "pcie-idio-24",
+ .irq_ack = idio_24_irq_ack,
+ .irq_mask = idio_24_irq_mask,
+ .irq_unmask = idio_24_irq_unmask,
+ .irq_set_type = idio_24_irq_set_type
+};
+
+static irqreturn_t idio_24_irq_handler(int irq, void *dev_id)
+{
+ struct idio_24_gpio *const idio24gpio = dev_id;
+ unsigned long irq_status;
+ struct gpio_chip *const chip = &idio24gpio->chip;
+ unsigned long irq_mask;
+ int gpio;
+
+ raw_spin_lock(&idio24gpio->lock);
+
+ /* Read Change-Of-State status */
+ irq_status = ioread32(&idio24gpio->reg->cos0_7);
+
+ raw_spin_unlock(&idio24gpio->lock);
+
+ /* Make sure our device generated IRQ */
+ if (!irq_status)
+ return IRQ_NONE;
+
+ /* Handle only unmasked IRQ */
+ irq_mask = idio24gpio->irq_mask & irq_status;
+
+ for_each_set_bit(gpio, &irq_mask, chip->ngpio - 24)
+ generic_handle_irq(irq_find_mapping(chip->irq.domain,
+ gpio + 24));
+
+ raw_spin_lock(&idio24gpio->lock);
+
+ /* Clear Change-Of-State status */
+ iowrite32(irq_status, &idio24gpio->reg->cos0_7);
+
+ raw_spin_unlock(&idio24gpio->lock);
+
+ return IRQ_HANDLED;
+}
+
+#define IDIO_24_NGPIO 56
+static const char *idio_24_names[IDIO_24_NGPIO] = {
+ "OUT0", "OUT1", "OUT2", "OUT3", "OUT4", "OUT5", "OUT6", "OUT7",
+ "OUT8", "OUT9", "OUT10", "OUT11", "OUT12", "OUT13", "OUT14", "OUT15",
+ "OUT16", "OUT17", "OUT18", "OUT19", "OUT20", "OUT21", "OUT22", "OUT23",
+ "IIN0", "IIN1", "IIN2", "IIN3", "IIN4", "IIN5", "IIN6", "IIN7",
+ "IIN8", "IIN9", "IIN10", "IIN11", "IIN12", "IIN13", "IIN14", "IIN15",
+ "IIN16", "IIN17", "IIN18", "IIN19", "IIN20", "IIN21", "IIN22", "IIN23",
+ "TTL0", "TTL1", "TTL2", "TTL3", "TTL4", "TTL5", "TTL6", "TTL7"
+};
+
+static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *const dev = &pdev->dev;
+ struct idio_24_gpio *idio24gpio;
+ int err;
+ const size_t pci_bar_index = 2;
+ const char *const name = pci_name(pdev);
+
+ idio24gpio = devm_kzalloc(dev, sizeof(*idio24gpio), GFP_KERNEL);
+ if (!idio24gpio)
+ return -ENOMEM;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device (%d)\n", err);
+ return err;
+ }
+
+ err = pcim_iomap_regions(pdev, BIT(pci_bar_index), name);
+ if (err) {
+ dev_err(dev, "Unable to map PCI I/O addresses (%d)\n", err);
+ return err;
+ }
+
+ idio24gpio->reg = pcim_iomap_table(pdev)[pci_bar_index];
+
+ idio24gpio->chip.label = name;
+ idio24gpio->chip.parent = dev;
+ idio24gpio->chip.owner = THIS_MODULE;
+ idio24gpio->chip.base = -1;
+ idio24gpio->chip.ngpio = IDIO_24_NGPIO;
+ idio24gpio->chip.names = idio_24_names;
+ idio24gpio->chip.get_direction = idio_24_gpio_get_direction;
+ idio24gpio->chip.direction_input = idio_24_gpio_direction_input;
+ idio24gpio->chip.direction_output = idio_24_gpio_direction_output;
+ idio24gpio->chip.get = idio_24_gpio_get;
+ idio24gpio->chip.set = idio_24_gpio_set;
+
+ raw_spin_lock_init(&idio24gpio->lock);
+
+ /* Software board reset */
+ iowrite8(0, &idio24gpio->reg->soft_reset);
+
+ err = devm_gpiochip_add_data(dev, &idio24gpio->chip, idio24gpio);
+ if (err) {
+ dev_err(dev, "GPIO registering failed (%d)\n", err);
+ return err;
+ }
+
+ err = gpiochip_irqchip_add(&idio24gpio->chip, &idio_24_irqchip, 0,
+ handle_edge_irq, IRQ_TYPE_NONE);
+ if (err) {
+ dev_err(dev, "Could not add irqchip (%d)\n", err);
+ return err;
+ }
+
+ err = devm_request_irq(dev, pdev->irq, idio_24_irq_handler, IRQF_SHARED,
+ name, idio24gpio);
+ if (err) {
+ dev_err(dev, "IRQ handler registering failed (%d)\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct pci_device_id idio_24_pci_dev_id[] = {
+ { PCI_DEVICE(0x494F, 0x0FD0) }, { PCI_DEVICE(0x494F, 0x0BD0) },
+ { PCI_DEVICE(0x494F, 0x07D0) }, { PCI_DEVICE(0x494F, 0x0FC0) },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, idio_24_pci_dev_id);
+
+static struct pci_driver idio_24_driver = {
+ .name = "pcie-idio-24",
+ .id_table = idio_24_pci_dev_id,
+ .probe = idio_24_probe
+};
+
+module_pci_driver(idio_24_driver);
+
+MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
+MODULE_DESCRIPTION("ACCES PCIe-IDIO-24 GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index e6e5cca624a7..f8d7d1cd8488 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -190,6 +190,16 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
};
int i, j;
+ /*
+ * STMPE1600: to be able to get IRQ from pins,
+ * a read must be done on GPMR register, or a write in
+ * GPSR or GPCR registers
+ */
+ if (stmpe->partnum == STMPE1600) {
+ stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]);
+ stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]);
+ }
+
for (i = 0; i < CACHE_NR_REGS; i++) {
/* STMPE801 and STMPE1600 don't have RE and FE registers */
if ((stmpe->partnum == STMPE801 ||
@@ -227,21 +237,11 @@ static void stmpe_gpio_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(gc);
- struct stmpe *stmpe = stmpe_gpio->stmpe;
int offset = d->hwirq;
int regoffset = offset / 8;
int mask = BIT(offset % 8);
stmpe_gpio->regs[REG_IE][regoffset] |= mask;
-
- /*
- * STMPE1600 workaround: to be able to get IRQ from pins,
- * a read must be done on GPMR register, or a write in
- * GPSR or GPCR registers
- */
- if (stmpe->partnum == STMPE1600)
- stmpe_reg_read(stmpe,
- stmpe->regs[STMPE_IDX_GPMR_LSB + regoffset]);
}
static void stmpe_dbg_show_one(struct seq_file *s,
@@ -273,15 +273,21 @@ static void stmpe_dbg_show_one(struct seq_file *s,
u8 fall_reg;
u8 irqen_reg;
- char *edge_det_values[] = {"edge-inactive",
- "edge-asserted",
- "not-supported"};
- char *rise_values[] = {"no-rising-edge-detection",
- "rising-edge-detection",
- "not-supported"};
- char *fall_values[] = {"no-falling-edge-detection",
- "falling-edge-detection",
- "not-supported"};
+ static const char * const edge_det_values[] = {
+ "edge-inactive",
+ "edge-asserted",
+ "not-supported"
+ };
+ static const char * const rise_values[] = {
+ "no-rising-edge-detection",
+ "rising-edge-detection",
+ "not-supported"
+ };
+ static const char * const fall_values[] = {
+ "no-falling-edge-detection",
+ "falling-edge-detection",
+ "not-supported"
+ };
#define NOT_SUPPORTED_IDX 2
u8 edge_det = NOT_SUPPORTED_IDX;
u8 rise = NOT_SUPPORTED_IDX;
@@ -344,7 +350,7 @@ static void stmpe_dbg_show(struct seq_file *s, struct gpio_chip *gc)
for (i = 0; i < gc->ngpio; i++, gpio++) {
stmpe_dbg_show_one(s, gc, i, gpio);
- seq_printf(s, "\n");
+ seq_putc(s, '\n');
}
}
@@ -426,12 +432,9 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
struct device_node *np = pdev->dev.of_node;
struct stmpe_gpio *stmpe_gpio;
- int ret;
- int irq = 0;
-
- irq = platform_get_irq(pdev, 0);
+ int ret, irq;
- stmpe_gpio = kzalloc(sizeof(struct stmpe_gpio), GFP_KERNEL);
+ stmpe_gpio = kzalloc(sizeof(*stmpe_gpio), GFP_KERNEL);
if (!stmpe_gpio)
return -ENOMEM;
@@ -453,6 +456,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
if (stmpe_gpio->norequest_mask)
stmpe_gpio->chip.irq.need_valid_mask = true;
+ irq = platform_get_irq(pdev, 0);
if (irq < 0)
dev_info(&pdev->dev,
"device configured in no-irq mode: "
diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c
index b5adb79a631a..d16e9d4a129b 100644
--- a/drivers/gpio/gpio-thunderx.c
+++ b/drivers/gpio/gpio-thunderx.c
@@ -553,8 +553,10 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
txgpio->irqd = irq_domain_create_hierarchy(irq_get_irq_data(txgpio->msix_entries[0].vector)->domain,
0, 0, of_node_to_fwnode(dev->of_node),
&thunderx_gpio_irqd_ops, txgpio);
- if (!txgpio->irqd)
+ if (!txgpio->irqd) {
+ err = -ENOMEM;
goto out;
+ }
/* Push on irq_data and the domain for each line. */
for (i = 0; i < ngpio; i++) {
diff --git a/drivers/gpio/gpio-winbond.c b/drivers/gpio/gpio-winbond.c
new file mode 100644
index 000000000000..7f8f5b02e31d
--- /dev/null
+++ b/drivers/gpio/gpio-winbond.c
@@ -0,0 +1,732 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * GPIO interface for Winbond Super I/O chips
+ * Currently, only W83627UHG (Nuvoton NCT6627UD) is supported.
+ *
+ * Author: Maciej S. Szmigiero <mail@maciej.szmigiero.name>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/gpio/driver.h>
+#include <linux/ioport.h>
+#include <linux/isa.h>
+#include <linux/module.h>
+
+#define WB_GPIO_DRIVER_NAME KBUILD_MODNAME
+
+#define WB_SIO_BASE 0x2e
+#define WB_SIO_BASE_HIGH 0x4e
+
+#define WB_SIO_EXT_ENTER_KEY 0x87
+#define WB_SIO_EXT_EXIT_KEY 0xaa
+
+/* global chip registers */
+
+#define WB_SIO_REG_LOGICAL 0x07
+
+#define WB_SIO_REG_CHIP_MSB 0x20
+#define WB_SIO_REG_CHIP_LSB 0x21
+
+#define WB_SIO_CHIP_ID_W83627UHG 0xa230
+#define WB_SIO_CHIP_ID_W83627UHG_MASK GENMASK(15, 4)
+
+#define WB_SIO_REG_DPD 0x22
+#define WB_SIO_REG_DPD_UARTA 4
+#define WB_SIO_REG_DPD_UARTB 5
+
+#define WB_SIO_REG_IDPD 0x23
+#define WB_SIO_REG_IDPD_UARTC 4
+#define WB_SIO_REG_IDPD_UARTD 5
+#define WB_SIO_REG_IDPD_UARTE 6
+#define WB_SIO_REG_IDPD_UARTF 7
+
+#define WB_SIO_REG_GLOBAL_OPT 0x24
+#define WB_SIO_REG_GO_ENFDC 1
+
+#define WB_SIO_REG_OVTGPIO3456 0x29
+#define WB_SIO_REG_OG3456_G3PP 3
+#define WB_SIO_REG_OG3456_G4PP 4
+#define WB_SIO_REG_OG3456_G5PP 5
+#define WB_SIO_REG_OG3456_G6PP 7
+
+#define WB_SIO_REG_I2C_PS 0x2a
+#define WB_SIO_REG_I2CPS_I2CFS 1
+
+#define WB_SIO_REG_GPIO1_MF 0x2c
+#define WB_SIO_REG_G1MF_G1PP 6
+#define WB_SIO_REG_G1MF_G2PP 7
+#define WB_SIO_REG_G1MF_FS_MASK GENMASK(1, 0)
+#define WB_SIO_REG_G1MF_FS_IR_OFF 0
+#define WB_SIO_REG_G1MF_FS_IR 1
+#define WB_SIO_REG_G1MF_FS_GPIO1 2
+#define WB_SIO_REG_G1MF_FS_UARTB 3
+
+/* not an actual device number, just a value meaning 'no device' */
+#define WB_SIO_DEV_NONE 0xff
+
+/* registers with offsets >= 0x30 are specific for a particular device */
+
+/* UART B logical device */
+#define WB_SIO_DEV_UARTB 0x03
+#define WB_SIO_UARTB_REG_ENABLE 0x30
+#define WB_SIO_UARTB_ENABLE_ON 0
+
+/* UART C logical device */
+#define WB_SIO_DEV_UARTC 0x06
+#define WB_SIO_UARTC_REG_ENABLE 0x30
+#define WB_SIO_UARTC_ENABLE_ON 0
+
+/* GPIO3, GPIO4 logical device */
+#define WB_SIO_DEV_GPIO34 0x07
+#define WB_SIO_GPIO34_REG_ENABLE 0x30
+#define WB_SIO_GPIO34_ENABLE_3 0
+#define WB_SIO_GPIO34_ENABLE_4 1
+#define WB_SIO_GPIO34_REG_IO3 0xe0
+#define WB_SIO_GPIO34_REG_DATA3 0xe1
+#define WB_SIO_GPIO34_REG_INV3 0xe2
+#define WB_SIO_GPIO34_REG_IO4 0xe4
+#define WB_SIO_GPIO34_REG_DATA4 0xe5
+#define WB_SIO_GPIO34_REG_INV4 0xe6
+
+/* WDTO, PLED, GPIO5, GPIO6 logical device */
+#define WB_SIO_DEV_WDGPIO56 0x08
+#define WB_SIO_WDGPIO56_REG_ENABLE 0x30
+#define WB_SIO_WDGPIO56_ENABLE_5 1
+#define WB_SIO_WDGPIO56_ENABLE_6 2
+#define WB_SIO_WDGPIO56_REG_IO5 0xe0
+#define WB_SIO_WDGPIO56_REG_DATA5 0xe1
+#define WB_SIO_WDGPIO56_REG_INV5 0xe2
+#define WB_SIO_WDGPIO56_REG_IO6 0xe4
+#define WB_SIO_WDGPIO56_REG_DATA6 0xe5
+#define WB_SIO_WDGPIO56_REG_INV6 0xe6
+
+/* GPIO1, GPIO2, SUSLED logical device */
+#define WB_SIO_DEV_GPIO12 0x09
+#define WB_SIO_GPIO12_REG_ENABLE 0x30
+#define WB_SIO_GPIO12_ENABLE_1 0
+#define WB_SIO_GPIO12_ENABLE_2 1
+#define WB_SIO_GPIO12_REG_IO1 0xe0
+#define WB_SIO_GPIO12_REG_DATA1 0xe1
+#define WB_SIO_GPIO12_REG_INV1 0xe2
+#define WB_SIO_GPIO12_REG_IO2 0xe4
+#define WB_SIO_GPIO12_REG_DATA2 0xe5
+#define WB_SIO_GPIO12_REG_INV2 0xe6
+
+/* UART D logical device */
+#define WB_SIO_DEV_UARTD 0x0d
+#define WB_SIO_UARTD_REG_ENABLE 0x30
+#define WB_SIO_UARTD_ENABLE_ON 0
+
+/* UART E logical device */
+#define WB_SIO_DEV_UARTE 0x0e
+#define WB_SIO_UARTE_REG_ENABLE 0x30
+#define WB_SIO_UARTE_ENABLE_ON 0
+
+/*
+ * for a description what a particular field of this struct means please see
+ * a description of the relevant module parameter at the bottom of this file
+ */
+struct winbond_gpio_params {
+ unsigned long base;
+ unsigned long gpios;
+ unsigned long ppgpios;
+ unsigned long odgpios;
+ bool pledgpio;
+ bool beepgpio;
+ bool i2cgpio;
+};
+
+static struct winbond_gpio_params params;
+
+static int winbond_sio_enter(unsigned long base)
+{
+ if (!request_muxed_region(base, 2, WB_GPIO_DRIVER_NAME))
+ return -EBUSY;
+
+ /*
+ * datasheet says two successive writes of the "key" value are needed
+ * in order for chip to enter the "Extended Function Mode"
+ */
+ outb(WB_SIO_EXT_ENTER_KEY, base);
+ outb(WB_SIO_EXT_ENTER_KEY, base);
+
+ return 0;
+}
+
+static void winbond_sio_select_logical(unsigned long base, u8 dev)
+{
+ outb(WB_SIO_REG_LOGICAL, base);
+ outb(dev, base + 1);
+}
+
+static void winbond_sio_leave(unsigned long base)
+{
+ outb(WB_SIO_EXT_EXIT_KEY, base);
+
+ release_region(base, 2);
+}
+
+static void winbond_sio_reg_write(unsigned long base, u8 reg, u8 data)
+{
+ outb(reg, base);
+ outb(data, base + 1);
+}
+
+static u8 winbond_sio_reg_read(unsigned long base, u8 reg)
+{
+ outb(reg, base);
+ return inb(base + 1);
+}
+
+static void winbond_sio_reg_bset(unsigned long base, u8 reg, u8 bit)
+{
+ u8 val;
+
+ val = winbond_sio_reg_read(base, reg);
+ val |= BIT(bit);
+ winbond_sio_reg_write(base, reg, val);
+}
+
+static void winbond_sio_reg_bclear(unsigned long base, u8 reg, u8 bit)
+{
+ u8 val;
+
+ val = winbond_sio_reg_read(base, reg);
+ val &= ~BIT(bit);
+ winbond_sio_reg_write(base, reg, val);
+}
+
+static bool winbond_sio_reg_btest(unsigned long base, u8 reg, u8 bit)
+{
+ return winbond_sio_reg_read(base, reg) & BIT(bit);
+}
+
+/**
+ * struct winbond_gpio_port_conflict - possibly conflicting device information
+ * @name: device name (NULL means no conflicting device defined)
+ * @dev: Super I/O logical device number where the testreg register
+ * is located (or WB_SIO_DEV_NONE - don't select any
+ * logical device)
+ * @testreg: register number where the testbit bit is located
+ * @testbit: index of a bit to check whether an actual conflict exists
+ * @warnonly: if set then a conflict isn't fatal (just warn about it),
+ * otherwise disable the particular GPIO port if a conflict
+ * is detected
+ */
+struct winbond_gpio_port_conflict {
+ const char *name;
+ u8 dev;
+ u8 testreg;
+ u8 testbit;
+ bool warnonly;
+};
+
+/**
+ * struct winbond_gpio_info - information about a particular GPIO port (device)
+ * @dev: Super I/O logical device number of the registers
+ * specified below
+ * @enablereg: port enable bit register number
+ * @enablebit: index of a port enable bit
+ * @outputreg: output driver mode bit register number
+ * @outputppbit: index of a push-pull output driver mode bit
+ * @ioreg: data direction register number
+ * @invreg: pin data inversion register number
+ * @datareg: pin data register number
+ * @conflict: description of a device that possibly conflicts with
+ * this port
+ */
+struct winbond_gpio_info {
+ u8 dev;
+ u8 enablereg;
+ u8 enablebit;
+ u8 outputreg;
+ u8 outputppbit;
+ u8 ioreg;
+ u8 invreg;
+ u8 datareg;
+ struct winbond_gpio_port_conflict conflict;
+};
+
+static const struct winbond_gpio_info winbond_gpio_infos[6] = {
+ { /* 0 */
+ .dev = WB_SIO_DEV_GPIO12,
+ .enablereg = WB_SIO_GPIO12_REG_ENABLE,
+ .enablebit = WB_SIO_GPIO12_ENABLE_1,
+ .outputreg = WB_SIO_REG_GPIO1_MF,
+ .outputppbit = WB_SIO_REG_G1MF_G1PP,
+ .ioreg = WB_SIO_GPIO12_REG_IO1,
+ .invreg = WB_SIO_GPIO12_REG_INV1,
+ .datareg = WB_SIO_GPIO12_REG_DATA1,
+ .conflict = {
+ .name = "UARTB",
+ .dev = WB_SIO_DEV_UARTB,
+ .testreg = WB_SIO_UARTB_REG_ENABLE,
+ .testbit = WB_SIO_UARTB_ENABLE_ON,
+ .warnonly = true
+ }
+ },
+ { /* 1 */
+ .dev = WB_SIO_DEV_GPIO12,
+ .enablereg = WB_SIO_GPIO12_REG_ENABLE,
+ .enablebit = WB_SIO_GPIO12_ENABLE_2,
+ .outputreg = WB_SIO_REG_GPIO1_MF,
+ .outputppbit = WB_SIO_REG_G1MF_G2PP,
+ .ioreg = WB_SIO_GPIO12_REG_IO2,
+ .invreg = WB_SIO_GPIO12_REG_INV2,
+ .datareg = WB_SIO_GPIO12_REG_DATA2
+ /* special conflict handling so doesn't use conflict data */
+ },
+ { /* 2 */
+ .dev = WB_SIO_DEV_GPIO34,
+ .enablereg = WB_SIO_GPIO34_REG_ENABLE,
+ .enablebit = WB_SIO_GPIO34_ENABLE_3,
+ .outputreg = WB_SIO_REG_OVTGPIO3456,
+ .outputppbit = WB_SIO_REG_OG3456_G3PP,
+ .ioreg = WB_SIO_GPIO34_REG_IO3,
+ .invreg = WB_SIO_GPIO34_REG_INV3,
+ .datareg = WB_SIO_GPIO34_REG_DATA3,
+ .conflict = {
+ .name = "UARTC",
+ .dev = WB_SIO_DEV_UARTC,
+ .testreg = WB_SIO_UARTC_REG_ENABLE,
+ .testbit = WB_SIO_UARTC_ENABLE_ON,
+ .warnonly = true
+ }
+ },
+ { /* 3 */
+ .dev = WB_SIO_DEV_GPIO34,
+ .enablereg = WB_SIO_GPIO34_REG_ENABLE,
+ .enablebit = WB_SIO_GPIO34_ENABLE_4,
+ .outputreg = WB_SIO_REG_OVTGPIO3456,
+ .outputppbit = WB_SIO_REG_OG3456_G4PP,
+ .ioreg = WB_SIO_GPIO34_REG_IO4,
+ .invreg = WB_SIO_GPIO34_REG_INV4,
+ .datareg = WB_SIO_GPIO34_REG_DATA4,
+ .conflict = {
+ .name = "UARTD",
+ .dev = WB_SIO_DEV_UARTD,
+ .testreg = WB_SIO_UARTD_REG_ENABLE,
+ .testbit = WB_SIO_UARTD_ENABLE_ON,
+ .warnonly = true
+ }
+ },
+ { /* 4 */
+ .dev = WB_SIO_DEV_WDGPIO56,
+ .enablereg = WB_SIO_WDGPIO56_REG_ENABLE,
+ .enablebit = WB_SIO_WDGPIO56_ENABLE_5,
+ .outputreg = WB_SIO_REG_OVTGPIO3456,
+ .outputppbit = WB_SIO_REG_OG3456_G5PP,
+ .ioreg = WB_SIO_WDGPIO56_REG_IO5,
+ .invreg = WB_SIO_WDGPIO56_REG_INV5,
+ .datareg = WB_SIO_WDGPIO56_REG_DATA5,
+ .conflict = {
+ .name = "UARTE",
+ .dev = WB_SIO_DEV_UARTE,
+ .testreg = WB_SIO_UARTE_REG_ENABLE,
+ .testbit = WB_SIO_UARTE_ENABLE_ON,
+ .warnonly = true
+ }
+ },
+ { /* 5 */
+ .dev = WB_SIO_DEV_WDGPIO56,
+ .enablereg = WB_SIO_WDGPIO56_REG_ENABLE,
+ .enablebit = WB_SIO_WDGPIO56_ENABLE_6,
+ .outputreg = WB_SIO_REG_OVTGPIO3456,
+ .outputppbit = WB_SIO_REG_OG3456_G6PP,
+ .ioreg = WB_SIO_WDGPIO56_REG_IO6,
+ .invreg = WB_SIO_WDGPIO56_REG_INV6,
+ .datareg = WB_SIO_WDGPIO56_REG_DATA6,
+ .conflict = {
+ .name = "FDC",
+ .dev = WB_SIO_DEV_NONE,
+ .testreg = WB_SIO_REG_GLOBAL_OPT,
+ .testbit = WB_SIO_REG_GO_ENFDC,
+ .warnonly = false
+ }
+ }
+};
+
+/* returns whether changing a pin is allowed */
+static bool winbond_gpio_get_info(unsigned int *gpio_num,
+ const struct winbond_gpio_info **info)
+{
+ bool allow_changing = true;
+ unsigned long i;
+
+ for_each_set_bit(i, &params.gpios, BITS_PER_LONG) {
+ if (*gpio_num < 8)
+ break;
+
+ *gpio_num -= 8;
+ }
+
+ *info = &winbond_gpio_infos[i];
+
+ /*
+ * GPIO2 (the second port) shares some pins with a basic PC
+ * functionality, which is very likely controlled by the firmware.
+ * Don't allow changing these pins by default.
+ */
+ if (i == 1) {
+ if (*gpio_num == 0 && !params.pledgpio)
+ allow_changing = false;
+ else if (*gpio_num == 1 && !params.beepgpio)
+ allow_changing = false;
+ else if ((*gpio_num == 5 || *gpio_num == 6) && !params.i2cgpio)
+ allow_changing = false;
+ }
+
+ return allow_changing;
+}
+
+static int winbond_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ unsigned long *base = gpiochip_get_data(gc);
+ const struct winbond_gpio_info *info;
+ bool val;
+
+ winbond_gpio_get_info(&offset, &info);
+
+ val = winbond_sio_enter(*base);
+ if (val)
+ return val;
+
+ winbond_sio_select_logical(*base, info->dev);
+
+ val = winbond_sio_reg_btest(*base, info->datareg, offset);
+ if (winbond_sio_reg_btest(*base, info->invreg, offset))
+ val = !val;
+
+ winbond_sio_leave(*base);
+
+ return val;
+}
+
+static int winbond_gpio_direction_in(struct gpio_chip *gc, unsigned int offset)
+{
+ unsigned long *base = gpiochip_get_data(gc);
+ const struct winbond_gpio_info *info;
+ int ret;
+
+ if (!winbond_gpio_get_info(&offset, &info))
+ return -EACCES;
+
+ ret = winbond_sio_enter(*base);
+ if (ret)
+ return ret;
+
+ winbond_sio_select_logical(*base, info->dev);
+
+ winbond_sio_reg_bset(*base, info->ioreg, offset);
+
+ winbond_sio_leave(*base);
+
+ return 0;
+}
+
+static int winbond_gpio_direction_out(struct gpio_chip *gc,
+ unsigned int offset,
+ int val)
+{
+ unsigned long *base = gpiochip_get_data(gc);
+ const struct winbond_gpio_info *info;
+ int ret;
+
+ if (!winbond_gpio_get_info(&offset, &info))
+ return -EACCES;
+
+ ret = winbond_sio_enter(*base);
+ if (ret)
+ return ret;
+
+ winbond_sio_select_logical(*base, info->dev);
+
+ winbond_sio_reg_bclear(*base, info->ioreg, offset);
+
+ if (winbond_sio_reg_btest(*base, info->invreg, offset))
+ val = !val;
+
+ if (val)
+ winbond_sio_reg_bset(*base, info->datareg, offset);
+ else
+ winbond_sio_reg_bclear(*base, info->datareg, offset);
+
+ winbond_sio_leave(*base);
+
+ return 0;
+}
+
+static void winbond_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int val)
+{
+ unsigned long *base = gpiochip_get_data(gc);
+ const struct winbond_gpio_info *info;
+
+ if (!winbond_gpio_get_info(&offset, &info))
+ return;
+
+ if (winbond_sio_enter(*base) != 0)
+ return;
+
+ winbond_sio_select_logical(*base, info->dev);
+
+ if (winbond_sio_reg_btest(*base, info->invreg, offset))
+ val = !val;
+
+ if (val)
+ winbond_sio_reg_bset(*base, info->datareg, offset);
+ else
+ winbond_sio_reg_bclear(*base, info->datareg, offset);
+
+ winbond_sio_leave(*base);
+}
+
+static struct gpio_chip winbond_gpio_chip = {
+ .base = -1,
+ .label = WB_GPIO_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .can_sleep = true,
+ .get = winbond_gpio_get,
+ .direction_input = winbond_gpio_direction_in,
+ .set = winbond_gpio_set,
+ .direction_output = winbond_gpio_direction_out,
+};
+
+static void winbond_gpio_configure_port0_pins(unsigned long base)
+{
+ unsigned int val;
+
+ val = winbond_sio_reg_read(base, WB_SIO_REG_GPIO1_MF);
+ if ((val & WB_SIO_REG_G1MF_FS_MASK) == WB_SIO_REG_G1MF_FS_GPIO1)
+ return;
+
+ pr_warn("GPIO1 pins were connected to something else (%.2x), fixing\n",
+ val);
+
+ val &= ~WB_SIO_REG_G1MF_FS_MASK;
+ val |= WB_SIO_REG_G1MF_FS_GPIO1;
+
+ winbond_sio_reg_write(base, WB_SIO_REG_GPIO1_MF, val);
+}
+
+static void winbond_gpio_configure_port1_check_i2c(unsigned long base)
+{
+ params.i2cgpio = !winbond_sio_reg_btest(base, WB_SIO_REG_I2C_PS,
+ WB_SIO_REG_I2CPS_I2CFS);
+ if (!params.i2cgpio)
+ pr_warn("disabling GPIO2.5 and GPIO2.6 as I2C is enabled\n");
+}
+
+static bool winbond_gpio_configure_port(unsigned long base, unsigned int idx)
+{
+ const struct winbond_gpio_info *info = &winbond_gpio_infos[idx];
+ const struct winbond_gpio_port_conflict *conflict = &info->conflict;
+
+ /* is there a possible conflicting device defined? */
+ if (conflict->name != NULL) {
+ if (conflict->dev != WB_SIO_DEV_NONE)
+ winbond_sio_select_logical(base, conflict->dev);
+
+ if (winbond_sio_reg_btest(base, conflict->testreg,
+ conflict->testbit)) {
+ if (conflict->warnonly)
+ pr_warn("enabled GPIO%u share pins with active %s\n",
+ idx + 1, conflict->name);
+ else {
+ pr_warn("disabling GPIO%u as %s is enabled\n",
+ idx + 1, conflict->name);
+ return false;
+ }
+ }
+ }
+
+ /* GPIO1 and GPIO2 need some (additional) special handling */
+ if (idx == 0)
+ winbond_gpio_configure_port0_pins(base);
+ else if (idx == 1)
+ winbond_gpio_configure_port1_check_i2c(base);
+
+ winbond_sio_select_logical(base, info->dev);
+
+ winbond_sio_reg_bset(base, info->enablereg, info->enablebit);
+
+ if (params.ppgpios & BIT(idx))
+ winbond_sio_reg_bset(base, info->outputreg,
+ info->outputppbit);
+ else if (params.odgpios & BIT(idx))
+ winbond_sio_reg_bclear(base, info->outputreg,
+ info->outputppbit);
+ else
+ pr_notice("GPIO%u pins are %s\n", idx + 1,
+ winbond_sio_reg_btest(base, info->outputreg,
+ info->outputppbit) ?
+ "push-pull" :
+ "open drain");
+
+ return true;
+}
+
+static int winbond_gpio_configure(unsigned long base)
+{
+ unsigned long i;
+
+ for_each_set_bit(i, &params.gpios, BITS_PER_LONG)
+ if (!winbond_gpio_configure_port(base, i))
+ __clear_bit(i, &params.gpios);
+
+ if (!params.gpios) {
+ pr_err("please use 'gpios' module parameter to select some active GPIO ports to enable\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int winbond_gpio_check_chip(unsigned long base)
+{
+ int ret;
+ unsigned int chip;
+
+ ret = winbond_sio_enter(base);
+ if (ret)
+ return ret;
+
+ chip = winbond_sio_reg_read(base, WB_SIO_REG_CHIP_MSB) << 8;
+ chip |= winbond_sio_reg_read(base, WB_SIO_REG_CHIP_LSB);
+
+ pr_notice("chip ID at %lx is %.4x\n", base, chip);
+
+ if ((chip & WB_SIO_CHIP_ID_W83627UHG_MASK) !=
+ WB_SIO_CHIP_ID_W83627UHG) {
+ pr_err("not an our chip\n");
+ ret = -ENODEV;
+ }
+
+ winbond_sio_leave(base);
+
+ return ret;
+}
+
+static int winbond_gpio_imatch(struct device *dev, unsigned int id)
+{
+ unsigned long gpios_rem;
+ int ret;
+
+ gpios_rem = params.gpios & ~GENMASK(ARRAY_SIZE(winbond_gpio_infos) - 1,
+ 0);
+ if (gpios_rem) {
+ pr_warn("unknown ports (%lx) enabled in GPIO ports bitmask\n",
+ gpios_rem);
+ params.gpios &= ~gpios_rem;
+ }
+
+ if (params.ppgpios & params.odgpios) {
+ pr_err("some GPIO ports are set both to push-pull and open drain mode at the same time\n");
+ return 0;
+ }
+
+ if (params.base != 0)
+ return winbond_gpio_check_chip(params.base) == 0;
+
+ /*
+ * if the 'base' module parameter is unset probe two chip default
+ * I/O port bases
+ */
+ params.base = WB_SIO_BASE;
+ ret = winbond_gpio_check_chip(params.base);
+ if (ret == 0)
+ return 1;
+ if (ret != -ENODEV && ret != -EBUSY)
+ return 0;
+
+ params.base = WB_SIO_BASE_HIGH;
+ return winbond_gpio_check_chip(params.base) == 0;
+}
+
+static int winbond_gpio_iprobe(struct device *dev, unsigned int id)
+{
+ int ret;
+
+ if (params.base == 0)
+ return -EINVAL;
+
+ ret = winbond_sio_enter(params.base);
+ if (ret)
+ return ret;
+
+ ret = winbond_gpio_configure(params.base);
+
+ winbond_sio_leave(params.base);
+
+ if (ret)
+ return ret;
+
+ /*
+ * Add 8 gpios for every GPIO port that was enabled in gpios
+ * module parameter (that wasn't disabled earlier in
+ * winbond_gpio_configure() & co. due to, for example, a pin conflict).
+ */
+ winbond_gpio_chip.ngpio = hweight_long(params.gpios) * 8;
+
+ /*
+ * GPIO6 port has only 5 pins, so if it is enabled we have to adjust
+ * the total count appropriately
+ */
+ if (params.gpios & BIT(5))
+ winbond_gpio_chip.ngpio -= (8 - 5);
+
+ winbond_gpio_chip.parent = dev;
+
+ return devm_gpiochip_add_data(dev, &winbond_gpio_chip, &params.base);
+}
+
+static struct isa_driver winbond_gpio_idriver = {
+ .driver = {
+ .name = WB_GPIO_DRIVER_NAME,
+ },
+ .match = winbond_gpio_imatch,
+ .probe = winbond_gpio_iprobe,
+};
+
+module_isa_driver(winbond_gpio_idriver, 1);
+
+module_param_named(base, params.base, ulong, 0444);
+MODULE_PARM_DESC(base,
+ "I/O port base (when unset - probe chip default ones)");
+
+/* This parameter sets which GPIO devices (ports) we enable */
+module_param_named(gpios, params.gpios, ulong, 0444);
+MODULE_PARM_DESC(gpios,
+ "bitmask of GPIO ports to enable (bit 0 - GPIO1, bit 1 - GPIO2, etc.");
+
+/*
+ * These two parameters below set how we configure GPIO ports output drivers.
+ * It can't be a one bitmask since we need three values per port: push-pull,
+ * open-drain and keep as-is (this is the default).
+ */
+module_param_named(ppgpios, params.ppgpios, ulong, 0444);
+MODULE_PARM_DESC(ppgpios,
+ "bitmask of GPIO ports to set to push-pull mode (bit 0 - GPIO1, bit 1 - GPIO2, etc.");
+
+module_param_named(odgpios, params.odgpios, ulong, 0444);
+MODULE_PARM_DESC(odgpios,
+ "bitmask of GPIO ports to set to open drain mode (bit 0 - GPIO1, bit 1 - GPIO2, etc.");
+
+/*
+ * GPIO2.0 and GPIO2.1 control a basic PC functionality that we
+ * don't allow tinkering with by default (it is very likely that the
+ * firmware owns these pins).
+ * These two parameters below allow overriding these prohibitions.
+ */
+module_param_named(pledgpio, params.pledgpio, bool, 0644);
+MODULE_PARM_DESC(pledgpio,
+ "enable changing value of GPIO2.0 bit (Power LED), default no.");
+
+module_param_named(beepgpio, params.beepgpio, bool, 0644);
+MODULE_PARM_DESC(beepgpio,
+ "enable changing value of GPIO2.1 bit (BEEP), default no.");
+
+MODULE_AUTHOR("Maciej S. Szmigiero <mail@maciej.szmigiero.name>");
+MODULE_DESCRIPTION("GPIO interface for Winbond Super I/O chips");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index d6f3d9ee1350..0ecffd172a80 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -414,7 +414,8 @@ EXPORT_SYMBOL_GPL(devm_acpi_dev_remove_driver_gpios);
static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
const char *name, int index,
- struct acpi_reference_args *args)
+ struct acpi_reference_args *args,
+ unsigned int *quirks)
{
const struct acpi_gpio_mapping *gm;
@@ -430,6 +431,8 @@ static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
args->args[1] = par->line_index;
args->args[2] = par->active_low;
args->nargs = 3;
+
+ *quirks = gm->quirks;
return true;
}
@@ -461,8 +464,8 @@ acpi_gpio_to_gpiod_flags(const struct acpi_resource_gpio *agpio)
}
}
-int
-acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, enum gpiod_flags update)
+static int
+__acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, enum gpiod_flags update)
{
int ret = 0;
@@ -489,12 +492,31 @@ acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, enum gpiod_flags update)
return ret;
}
+int
+acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, struct acpi_gpio_info *info)
+{
+ struct device *dev = &info->adev->dev;
+ enum gpiod_flags old = *flags;
+ int ret;
+
+ ret = __acpi_gpio_update_gpiod_flags(&old, info->flags);
+ if (info->quirks & ACPI_GPIO_QUIRK_NO_IO_RESTRICTION) {
+ if (ret)
+ dev_warn(dev, FW_BUG "GPIO not in correct mode, fixing\n");
+ } else {
+ if (ret)
+ dev_dbg(dev, "Override GPIO initialization flags\n");
+ *flags = old;
+ }
+
+ return ret;
+}
+
struct acpi_gpio_lookup {
struct acpi_gpio_info info;
int index;
int pin_index;
bool active_low;
- struct acpi_device *adev;
struct gpio_desc *desc;
int n;
};
@@ -531,8 +553,8 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
lookup->info.triggering = agpio->triggering;
} else {
lookup->info.flags = acpi_gpio_to_gpiod_flags(agpio);
+ lookup->info.polarity = lookup->active_low;
}
-
}
return 1;
@@ -541,12 +563,13 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
static int acpi_gpio_resource_lookup(struct acpi_gpio_lookup *lookup,
struct acpi_gpio_info *info)
{
+ struct acpi_device *adev = lookup->info.adev;
struct list_head res_list;
int ret;
INIT_LIST_HEAD(&res_list);
- ret = acpi_dev_get_resources(lookup->adev, &res_list,
+ ret = acpi_dev_get_resources(adev, &res_list,
acpi_populate_gpio_lookup,
lookup);
if (ret < 0)
@@ -557,11 +580,8 @@ static int acpi_gpio_resource_lookup(struct acpi_gpio_lookup *lookup,
if (!lookup->desc)
return -ENOENT;
- if (info) {
+ if (info)
*info = lookup->info;
- if (lookup->active_low)
- info->polarity = lookup->active_low;
- }
return 0;
}
@@ -570,6 +590,7 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
struct acpi_gpio_lookup *lookup)
{
struct acpi_reference_args args;
+ unsigned int quirks = 0;
int ret;
memset(&args, 0, sizeof(args));
@@ -581,14 +602,14 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
if (!adev)
return ret;
- if (!acpi_get_driver_gpio_data(adev, propname, index, &args))
+ if (!acpi_get_driver_gpio_data(adev, propname, index, &args,
+ &quirks))
return ret;
}
/*
* The property was found and resolved, so need to lookup the GPIO based
* on returned args.
*/
- lookup->adev = args.adev;
if (args.nargs != 3)
return -EPROTO;
@@ -596,6 +617,8 @@ static int acpi_gpio_property_lookup(struct fwnode_handle *fwnode,
lookup->pin_index = args.args[1];
lookup->active_low = !!args.args[2];
+ lookup->info.adev = args.adev;
+ lookup->info.quirks = quirks;
return 0;
}
@@ -643,11 +666,11 @@ static struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev,
return ERR_PTR(ret);
dev_dbg(&adev->dev, "GPIO: _DSD returned %s %d %d %u\n",
- dev_name(&lookup.adev->dev), lookup.index,
+ dev_name(&lookup.info.adev->dev), lookup.index,
lookup.pin_index, lookup.active_low);
} else {
dev_dbg(&adev->dev, "GPIO: looking up %d in _CRS\n", index);
- lookup.adev = adev;
+ lookup.info.adev = adev;
}
ret = acpi_gpio_resource_lookup(&lookup, info);
@@ -664,7 +687,6 @@ struct gpio_desc *acpi_find_gpio(struct device *dev,
struct acpi_gpio_info info;
struct gpio_desc *desc;
char propname[32];
- int err;
int i;
/* Try first from _DSD */
@@ -703,10 +725,7 @@ struct gpio_desc *acpi_find_gpio(struct device *dev,
if (info.polarity == GPIO_ACTIVE_LOW)
*lookupflags |= GPIO_ACTIVE_LOW;
- err = acpi_gpio_update_gpiod_flags(dflags, info.flags);
- if (err)
- dev_dbg(dev, "Override GPIO initialization flags\n");
-
+ acpi_gpio_update_gpiod_flags(dflags, &info);
return desc;
}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 72a0695d2ac3..564bb7a31da4 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -56,6 +56,42 @@ static struct gpio_desc *of_xlate_and_get_gpiod_flags(struct gpio_chip *chip,
return gpiochip_get_desc(chip, ret);
}
+static void of_gpio_flags_quirks(struct device_node *np,
+ enum of_gpio_flags *flags)
+{
+ /*
+ * Some GPIO fixed regulator quirks.
+ * Note that active low is the default.
+ */
+ if (IS_ENABLED(CONFIG_REGULATOR) &&
+ (of_device_is_compatible(np, "reg-fixed-voltage") ||
+ of_device_is_compatible(np, "regulator-gpio"))) {
+ /*
+ * The regulator GPIO handles are specified such that the
+ * presence or absence of "enable-active-high" solely controls
+ * the polarity of the GPIO line. Any phandle flags must
+ * be actively ignored.
+ */
+ if (*flags & OF_GPIO_ACTIVE_LOW) {
+ pr_warn("%s GPIO handle specifies active low - ignored\n",
+ of_node_full_name(np));
+ *flags &= ~OF_GPIO_ACTIVE_LOW;
+ }
+ if (!of_property_read_bool(np, "enable-active-high"))
+ *flags |= OF_GPIO_ACTIVE_LOW;
+ }
+ /*
+ * Legacy open drain handling for fixed voltage regulators.
+ */
+ if (IS_ENABLED(CONFIG_REGULATOR) &&
+ of_device_is_compatible(np, "reg-fixed-voltage") &&
+ of_property_read_bool(np, "gpio-open-drain")) {
+ *flags |= (OF_GPIO_SINGLE_ENDED | OF_GPIO_OPEN_DRAIN);
+ pr_info("%s uses legacy open drain flag - update the DTS if you can\n",
+ of_node_full_name(np));
+ }
+}
+
/**
* of_get_named_gpiod_flags() - Get a GPIO descriptor and flags for GPIO API
* @np: device node to get GPIO from
@@ -93,6 +129,9 @@ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
if (IS_ERR(desc))
goto out;
+ if (flags)
+ of_gpio_flags_quirks(np, flags);
+
pr_debug("%s: parsed '%s' property of node '%pOF[%d]' - status (%d)\n",
__func__, propname, np, index,
PTR_ERR_OR_ZERO(desc));
@@ -117,6 +156,71 @@ int of_get_named_gpio_flags(struct device_node *np, const char *list_name,
}
EXPORT_SYMBOL(of_get_named_gpio_flags);
+/*
+ * The SPI GPIO bindings happened before we managed to establish that GPIO
+ * properties should be named "foo-gpios" so we have this special kludge for
+ * them.
+ */
+static struct gpio_desc *of_find_spi_gpio(struct device *dev, const char *con_id,
+ enum of_gpio_flags *of_flags)
+{
+ char prop_name[32]; /* 32 is max size of property name */
+ struct device_node *np = dev->of_node;
+ struct gpio_desc *desc;
+
+ /*
+ * Hopefully the compiler stubs the rest of the function if this
+ * is false.
+ */
+ if (!IS_ENABLED(CONFIG_SPI_MASTER))
+ return ERR_PTR(-ENOENT);
+
+ /* Allow this specifically for "spi-gpio" devices */
+ if (!of_device_is_compatible(np, "spi-gpio") || !con_id)
+ return ERR_PTR(-ENOENT);
+
+ /* Will be "gpio-sck", "gpio-mosi" or "gpio-miso" */
+ snprintf(prop_name, sizeof(prop_name), "%s-%s", "gpio", con_id);
+
+ desc = of_get_named_gpiod_flags(np, prop_name, 0, of_flags);
+ return desc;
+}
+
+/*
+ * Some regulator bindings happened before we managed to establish that GPIO
+ * properties should be named "foo-gpios" so we have this special kludge for
+ * them.
+ */
+static struct gpio_desc *of_find_regulator_gpio(struct device *dev, const char *con_id,
+ enum of_gpio_flags *of_flags)
+{
+ /* These are the connection IDs we accept as legacy GPIO phandles */
+ const char *whitelist[] = {
+ "wlf,ldoena", /* Arizona */
+ "wlf,ldo1ena", /* WM8994 */
+ "wlf,ldo2ena", /* WM8994 */
+ };
+ struct device_node *np = dev->of_node;
+ struct gpio_desc *desc;
+ int i;
+
+ if (!IS_ENABLED(CONFIG_REGULATOR))
+ return ERR_PTR(-ENOENT);
+
+ if (!con_id)
+ return ERR_PTR(-ENOENT);
+
+ for (i = 0; i < ARRAY_SIZE(whitelist); i++)
+ if (!strcmp(con_id, whitelist[i]))
+ break;
+
+ if (i == ARRAY_SIZE(whitelist))
+ return ERR_PTR(-ENOENT);
+
+ desc = of_get_named_gpiod_flags(np, con_id, 0, of_flags);
+ return desc;
+}
+
struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
unsigned int idx,
enum gpio_lookup_flags *flags)
@@ -126,6 +230,7 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
struct gpio_desc *desc;
unsigned int i;
+ /* Try GPIO property "foo-gpios" and "foo-gpio" */
for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) {
if (con_id)
snprintf(prop_name, sizeof(prop_name), "%s-%s", con_id,
@@ -140,6 +245,14 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
break;
}
+ /* Special handling for SPI GPIOs if used */
+ if (IS_ERR(desc))
+ desc = of_find_spi_gpio(dev, con_id, &of_flags);
+
+ /* Special handling for regulator GPIOs if used */
+ if (IS_ERR(desc))
+ desc = of_find_regulator_gpio(dev, con_id, &of_flags);
+
if (IS_ERR(desc))
return desc;
@@ -153,8 +266,8 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
*flags |= GPIO_OPEN_SOURCE;
}
- if (of_flags & OF_GPIO_SLEEP_MAY_LOSE_VALUE)
- *flags |= GPIO_SLEEP_MAY_LOSE_VALUE;
+ if (of_flags & OF_GPIO_TRANSITORY)
+ *flags |= GPIO_TRANSITORY;
return desc;
}
@@ -214,6 +327,8 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
if (xlate_flags & OF_GPIO_ACTIVE_LOW)
*lflags |= GPIO_ACTIVE_LOW;
+ if (xlate_flags & OF_GPIO_TRANSITORY)
+ *lflags |= GPIO_TRANSITORY;
if (of_property_read_bool(np, "input"))
*dflags |= GPIOD_IN;
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 3f454eaf2101..3dbaf489a8a5 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -8,6 +8,7 @@
#include <linux/interrupt.h>
#include <linux/kdev_t.h>
#include <linux/slab.h>
+#include <linux/ctype.h>
#include "gpiolib.h"
@@ -106,8 +107,14 @@ static ssize_t value_show(struct device *dev,
mutex_lock(&data->mutex);
- status = sprintf(buf, "%d\n", gpiod_get_value_cansleep(desc));
+ status = gpiod_get_value_cansleep(desc);
+ if (status < 0)
+ goto err;
+ buf[0] = '0' + status;
+ buf[1] = '\n';
+ status = 2;
+err:
mutex_unlock(&data->mutex);
return status;
@@ -118,7 +125,7 @@ static ssize_t value_store(struct device *dev,
{
struct gpiod_data *data = dev_get_drvdata(dev);
struct gpio_desc *desc = data->desc;
- ssize_t status;
+ ssize_t status = 0;
mutex_lock(&data->mutex);
@@ -127,7 +134,11 @@ static ssize_t value_store(struct device *dev,
} else {
long value;
- status = kstrtol(buf, 0, &value);
+ if (size <= 2 && isdigit(buf[0]) &&
+ (size == 1 || buf[1] == '\n'))
+ value = buf[0] - '0';
+ else
+ status = kstrtol(buf, 0, &value);
if (status == 0) {
gpiod_set_value_cansleep(desc, value);
status = size;
@@ -138,7 +149,7 @@ static ssize_t value_store(struct device *dev,
return status;
}
-static DEVICE_ATTR_RW(value);
+static DEVICE_ATTR_PREALLOC(value, S_IWUSR | S_IRUGO, value_show, value_store);
static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
{
@@ -474,11 +485,15 @@ static ssize_t export_store(struct class *class,
status = -ENODEV;
goto done;
}
- status = gpiod_export(desc, true);
- if (status < 0)
- gpiod_free(desc);
- else
- set_bit(FLAG_SYSFS, &desc->flags);
+
+ status = gpiod_set_transitory(desc, false);
+ if (!status) {
+ status = gpiod_export(desc, true);
+ if (status < 0)
+ gpiod_free(desc);
+ else
+ set_bit(FLAG_SYSFS, &desc->flags);
+ }
done:
if (status)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 14532d9576e4..36ca5064486e 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -162,7 +162,7 @@ EXPORT_SYMBOL_GPL(desc_to_gpio);
*/
struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
{
- if (!desc || !desc->gdev || !desc->gdev->chip)
+ if (!desc || !desc->gdev)
return NULL;
return desc->gdev->chip;
}
@@ -196,7 +196,7 @@ static int gpiochip_find_base(int ngpio)
* gpiod_get_direction - return the current direction of a GPIO
* @desc: GPIO to get the direction of
*
- * Return GPIOF_DIR_IN or GPIOF_DIR_OUT, or an error code in case of error.
+ * Returns 0 for output, 1 for input, or an error code in case of error.
*
* This function may sleep if gpiod_cansleep() is true.
*/
@@ -460,6 +460,15 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS)
return -EINVAL;
+ /*
+ * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
+ * the hardware actually supports enabling both at the same time the
+ * electrical result would be disastrous.
+ */
+ if ((lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) &&
+ (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE))
+ return -EINVAL;
+
/* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */
if (!(lflags & GPIOHANDLE_REQUEST_OUTPUT) &&
((lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
@@ -506,6 +515,10 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
if (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
+ ret = gpiod_set_transitory(desc, false);
+ if (ret < 0)
+ goto out_free_descs;
+
/*
* Lines have to be requested explicitly for input
* or output, else the line will be treated "as is".
@@ -588,6 +601,9 @@ out_free_lh:
* @events: KFIFO for the GPIO events
* @read_lock: mutex lock to protect reads from colliding with adding
* new events to the FIFO
+ * @timestamp: cache for the timestamp storing it between hardirq
+ * and IRQ thread, used to bring the timestamp close to the actual
+ * event
*/
struct lineevent_state {
struct gpio_device *gdev;
@@ -598,17 +614,18 @@ struct lineevent_state {
wait_queue_head_t wait;
DECLARE_KFIFO(events, struct gpioevent_data, 16);
struct mutex read_lock;
+ u64 timestamp;
};
#define GPIOEVENT_REQUEST_VALID_FLAGS \
(GPIOEVENT_REQUEST_RISING_EDGE | \
GPIOEVENT_REQUEST_FALLING_EDGE)
-static unsigned int lineevent_poll(struct file *filep,
+static __poll_t lineevent_poll(struct file *filep,
struct poll_table_struct *wait)
{
struct lineevent_state *le = filep->private_data;
- unsigned int events = 0;
+ __poll_t events = 0;
poll_wait(filep, &le->wait, wait);
@@ -732,7 +749,10 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
struct gpioevent_data ge;
int ret, level;
- ge.timestamp = ktime_get_real_ns();
+ /* Do not leak kernel stack to userspace */
+ memset(&ge, 0, sizeof(ge));
+
+ ge.timestamp = le->timestamp;
level = gpiod_get_value_cansleep(le->desc);
if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
@@ -760,6 +780,19 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
return IRQ_HANDLED;
}
+static irqreturn_t lineevent_irq_handler(int irq, void *p)
+{
+ struct lineevent_state *le = p;
+
+ /*
+ * Just store the timestamp in hardirq context so we get it as
+ * close in time as possible to the actual event.
+ */
+ le->timestamp = ktime_get_real_ns();
+
+ return IRQ_WAKE_THREAD;
+}
+
static int lineevent_create(struct gpio_device *gdev, void __user *ip)
{
struct gpioevent_request eventreq;
@@ -852,7 +885,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
/* Request a thread to read the events */
ret = request_threaded_irq(le->irq,
- NULL,
+ lineevent_irq_handler,
lineevent_irq_thread,
irqflags,
le->label,
@@ -1050,7 +1083,7 @@ static void gpiodevice_release(struct device *dev)
list_del(&gdev->list);
ida_simple_remove(&gpio_ida, gdev->id);
- kfree(gdev->label);
+ kfree_const(gdev->label);
kfree(gdev->descs);
kfree(gdev);
}
@@ -1159,10 +1192,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
goto err_free_descs;
}
- if (chip->label)
- gdev->label = kstrdup(chip->label, GFP_KERNEL);
- else
- gdev->label = kstrdup("unknown", GFP_KERNEL);
+ gdev->label = kstrdup_const(chip->label ?: "unknown", GFP_KERNEL);
if (!gdev->label) {
status = -ENOMEM;
goto err_free_descs;
@@ -1209,31 +1239,14 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
struct gpio_desc *desc = &gdev->descs[i];
desc->gdev = gdev;
- /*
- * REVISIT: most hardware initializes GPIOs as inputs
- * (often with pullups enabled) so power usage is
- * minimized. Linux code should set the gpio direction
- * first thing; but until it does, and in case
- * chip->get_direction is not set, we may expose the
- * wrong direction in sysfs.
- */
- if (chip->get_direction) {
- /*
- * If we have .get_direction, set up the initial
- * direction flag from the hardware.
- */
- int dir = chip->get_direction(chip, i);
-
- if (!dir)
- set_bit(FLAG_IS_OUT, &desc->flags);
- } else if (!chip->direction_input) {
- /*
- * If the chip lacks the .direction_input callback
- * we logically assume all lines are outputs.
- */
- set_bit(FLAG_IS_OUT, &desc->flags);
- }
+ /* REVISIT: most hardware initializes GPIOs as inputs (often
+ * with pullups enabled) so power usage is minimized. Linux
+ * code should set the gpio direction first thing; but until
+ * it does, and in case chip->get_direction is not set, we may
+ * expose the wrong direction in sysfs.
+ */
+ desc->flags = !chip->direction_input ? (1 << FLAG_IS_OUT) : 0;
}
#ifdef CONFIG_PINCTRL
@@ -1283,7 +1296,7 @@ err_remove_from_list:
list_del(&gdev->list);
spin_unlock_irqrestore(&gpio_lock, flags);
err_free_label:
- kfree(gdev->label);
+ kfree_const(gdev->label);
err_free_descs:
kfree(gdev->descs);
err_free_gdev:
@@ -1383,7 +1396,7 @@ static int devm_gpio_chip_match(struct device *dev, void *res, void *data)
}
/**
- * devm_gpiochip_add_data() - Resource manager piochip_add_data()
+ * devm_gpiochip_add_data() - Resource manager gpiochip_add_data()
* @dev: the device pointer on which irq_chip belongs to.
* @chip: the chip to register, with chip->base initialized
* @data: driver-private data associated with this chip
@@ -1510,14 +1523,15 @@ static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip)
gpiochip->irq.valid_mask = NULL;
}
-static bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip,
- unsigned int offset)
+bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip,
+ unsigned int offset)
{
/* No mask means all valid */
if (likely(!gpiochip->irq.valid_mask))
return true;
return test_bit(offset, gpiochip->irq.valid_mask);
}
+EXPORT_SYMBOL_GPL(gpiochip_irqchip_irq_valid);
/**
* gpiochip_set_cascaded_irqchip() - connects a cascaded irqchip to a gpiochip
@@ -2174,40 +2188,37 @@ done:
* macro to avoid endless duplication. If the desc is NULL it is an
* optional GPIO and calls should just bail out.
*/
+static int validate_desc(const struct gpio_desc *desc, const char *func)
+{
+ if (!desc)
+ return 0;
+ if (IS_ERR(desc)) {
+ pr_warn("%s: invalid GPIO (errorpointer)\n", func);
+ return PTR_ERR(desc);
+ }
+ if (!desc->gdev) {
+ pr_warn("%s: invalid GPIO (no device)\n", func);
+ return -EINVAL;
+ }
+ if (!desc->gdev->chip) {
+ dev_warn(&desc->gdev->dev,
+ "%s: backing chip is gone\n", func);
+ return 0;
+ }
+ return 1;
+}
+
#define VALIDATE_DESC(desc) do { \
- if (!desc) \
- return 0; \
- if (IS_ERR(desc)) { \
- pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \
- return PTR_ERR(desc); \
- } \
- if (!desc->gdev) { \
- pr_warn("%s: invalid GPIO (no device)\n", __func__); \
- return -EINVAL; \
- } \
- if ( !desc->gdev->chip ) { \
- dev_warn(&desc->gdev->dev, \
- "%s: backing chip is gone\n", __func__); \
- return 0; \
- } } while (0)
+ int __valid = validate_desc(desc, __func__); \
+ if (__valid <= 0) \
+ return __valid; \
+ } while (0)
#define VALIDATE_DESC_VOID(desc) do { \
- if (!desc) \
- return; \
- if (IS_ERR(desc)) { \
- pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \
- return; \
- } \
- if (!desc->gdev) { \
- pr_warn("%s: invalid GPIO (no device)\n", __func__); \
- return; \
- } \
- if (!desc->gdev->chip) { \
- dev_warn(&desc->gdev->dev, \
- "%s: backing chip is gone\n", __func__); \
+ int __valid = validate_desc(desc, __func__); \
+ if (__valid <= 0) \
return; \
- } } while (0)
-
+ } while (0)
int gpiod_request(struct gpio_desc *desc, const char *label)
{
@@ -2456,7 +2467,7 @@ EXPORT_SYMBOL_GPL(gpiod_direction_output_raw);
*/
int gpiod_direction_output(struct gpio_desc *desc, int value)
{
- struct gpio_chip *gc = desc->gdev->chip;
+ struct gpio_chip *gc;
int ret;
VALIDATE_DESC(desc);
@@ -2473,6 +2484,7 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
return -EIO;
}
+ gc = desc->gdev->chip;
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) {
/* First see if we can enable open drain in hardware */
ret = gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc),
@@ -2530,6 +2542,50 @@ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
EXPORT_SYMBOL_GPL(gpiod_set_debounce);
/**
+ * gpiod_set_transitory - Lose or retain GPIO state on suspend or reset
+ * @desc: descriptor of the GPIO for which to configure persistence
+ * @transitory: True to lose state on suspend or reset, false for persistence
+ *
+ * Returns:
+ * 0 on success, otherwise a negative error code.
+ */
+int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
+{
+ struct gpio_chip *chip;
+ unsigned long packed;
+ int gpio;
+ int rc;
+
+ VALIDATE_DESC(desc);
+ /*
+ * Handle FLAG_TRANSITORY first, enabling queries to gpiolib for
+ * persistence state.
+ */
+ if (transitory)
+ set_bit(FLAG_TRANSITORY, &desc->flags);
+ else
+ clear_bit(FLAG_TRANSITORY, &desc->flags);
+
+ /* If the driver supports it, set the persistence state now */
+ chip = desc->gdev->chip;
+ if (!chip->set_config)
+ return 0;
+
+ packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE,
+ !transitory);
+ gpio = gpio_chip_hwgpio(desc);
+ rc = chip->set_config(chip, gpio, packed);
+ if (rc == -ENOTSUPP) {
+ dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n",
+ gpio);
+ return 0;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(gpiod_set_transitory);
+
+/**
* gpiod_is_active_low - test whether a GPIO is active-low or not
* @desc: the gpio descriptor to test
*
@@ -3129,8 +3185,7 @@ bool gpiochip_line_is_persistent(struct gpio_chip *chip, unsigned int offset)
if (offset >= chip->ngpio)
return false;
- return !test_bit(FLAG_SLEEP_MAY_LOSE_VALUE,
- &chip->gpiodev->descs[offset].flags);
+ return !test_bit(FLAG_TRANSITORY, &chip->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_persistent);
@@ -3565,8 +3620,10 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
if (lflags & GPIO_OPEN_SOURCE)
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
- if (lflags & GPIO_SLEEP_MAY_LOSE_VALUE)
- set_bit(FLAG_SLEEP_MAY_LOSE_VALUE, &desc->flags);
+
+ status = gpiod_set_transitory(desc, (lflags & GPIO_TRANSITORY));
+ if (status < 0)
+ return status;
/* No particular flag request, return here... */
if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
@@ -3606,6 +3663,8 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
struct gpio_desc *desc = NULL;
int status;
enum gpio_lookup_flags lookupflags = 0;
+ /* Maybe we have a device name, maybe not */
+ const char *devname = dev ? dev_name(dev) : "?";
dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id);
@@ -3634,7 +3693,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
return desc;
}
- status = gpiod_request(desc, con_id);
+ /*
+ * If a connection label was passed use that, else attempt to use
+ * the device name as label
+ */
+ status = gpiod_request(desc, con_id ? con_id : devname);
if (status < 0)
return ERR_PTR(status);
@@ -3650,17 +3713,88 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
EXPORT_SYMBOL_GPL(gpiod_get_index);
/**
+ * gpiod_get_from_of_node() - obtain a GPIO from an OF node
+ * @node: handle of the OF node
+ * @propname: name of the DT property representing the GPIO
+ * @index: index of the GPIO to obtain for the consumer
+ * @dflags: GPIO initialization flags
+ * @label: label to attach to the requested GPIO
+ *
+ * Returns:
+ * On successful request the GPIO pin is configured in accordance with
+ * provided @dflags. If the node does not have the requested GPIO
+ * property, NULL is returned.
+ *
+ * In case of error an ERR_PTR() is returned.
+ */
+struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
+ const char *propname, int index,
+ enum gpiod_flags dflags,
+ const char *label)
+{
+ struct gpio_desc *desc;
+ unsigned long lflags = 0;
+ enum of_gpio_flags flags;
+ bool active_low = false;
+ bool single_ended = false;
+ bool open_drain = false;
+ bool transitory = false;
+ int ret;
+
+ desc = of_get_named_gpiod_flags(node, propname,
+ index, &flags);
+
+ if (!desc || IS_ERR(desc)) {
+ /* If it is not there, just return NULL */
+ if (PTR_ERR(desc) == -ENOENT)
+ return NULL;
+ return desc;
+ }
+
+ active_low = flags & OF_GPIO_ACTIVE_LOW;
+ single_ended = flags & OF_GPIO_SINGLE_ENDED;
+ open_drain = flags & OF_GPIO_OPEN_DRAIN;
+ transitory = flags & OF_GPIO_TRANSITORY;
+
+ ret = gpiod_request(desc, label);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (active_low)
+ lflags |= GPIO_ACTIVE_LOW;
+
+ if (single_ended) {
+ if (open_drain)
+ lflags |= GPIO_OPEN_DRAIN;
+ else
+ lflags |= GPIO_OPEN_SOURCE;
+ }
+
+ if (transitory)
+ lflags |= GPIO_TRANSITORY;
+
+ ret = gpiod_configure_flags(desc, propname, lflags, dflags);
+ if (ret < 0) {
+ gpiod_put(desc);
+ return ERR_PTR(ret);
+ }
+
+ return desc;
+}
+EXPORT_SYMBOL(gpiod_get_from_of_node);
+
+/**
* fwnode_get_named_gpiod - obtain a GPIO from firmware node
* @fwnode: handle of the firmware node
* @propname: name of the firmware property representing the GPIO
- * @index: index of the GPIO to obtain in the consumer
+ * @index: index of the GPIO to obtain for the consumer
* @dflags: GPIO initialization flags
* @label: label to attach to the requested GPIO
*
* This function can be used for drivers that get their configuration
- * from firmware.
+ * from opaque firmware.
*
- * Function properly finds the corresponding GPIO using whatever is the
+ * The function properly finds the corresponding GPIO using whatever is the
* underlying firmware interface and then makes sure that the GPIO
* descriptor is requested before it is returned to the caller.
*
@@ -3677,53 +3811,35 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
{
struct gpio_desc *desc = ERR_PTR(-ENODEV);
unsigned long lflags = 0;
- bool active_low = false;
- bool single_ended = false;
- bool open_drain = false;
int ret;
if (!fwnode)
return ERR_PTR(-EINVAL);
if (is_of_node(fwnode)) {
- enum of_gpio_flags flags;
-
- desc = of_get_named_gpiod_flags(to_of_node(fwnode), propname,
- index, &flags);
- if (!IS_ERR(desc)) {
- active_low = flags & OF_GPIO_ACTIVE_LOW;
- single_ended = flags & OF_GPIO_SINGLE_ENDED;
- open_drain = flags & OF_GPIO_OPEN_DRAIN;
- }
+ desc = gpiod_get_from_of_node(to_of_node(fwnode),
+ propname, index,
+ dflags,
+ label);
+ return desc;
} else if (is_acpi_node(fwnode)) {
struct acpi_gpio_info info;
desc = acpi_node_get_gpiod(fwnode, propname, index, &info);
- if (!IS_ERR(desc)) {
- active_low = info.polarity == GPIO_ACTIVE_LOW;
- ret = acpi_gpio_update_gpiod_flags(&dflags, info.flags);
- if (ret)
- pr_debug("Override GPIO initialization flags\n");
- }
- }
+ if (IS_ERR(desc))
+ return desc;
- if (IS_ERR(desc))
- return desc;
+ acpi_gpio_update_gpiod_flags(&dflags, &info);
+ if (info.polarity == GPIO_ACTIVE_LOW)
+ lflags |= GPIO_ACTIVE_LOW;
+ }
+
+ /* Currently only ACPI takes this path */
ret = gpiod_request(desc, label);
if (ret)
return ERR_PTR(ret);
- if (active_low)
- lflags |= GPIO_ACTIVE_LOW;
-
- if (single_ended) {
- if (open_drain)
- lflags |= GPIO_OPEN_DRAIN;
- else
- lflags |= GPIO_OPEN_SOURCE;
- }
-
ret = gpiod_configure_flags(desc, propname, lflags, dflags);
if (ret < 0) {
gpiod_put(desc);
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 6c44d1652139..b17ec6795c81 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -58,7 +58,7 @@ struct gpio_device {
struct gpio_desc *descs;
int base;
u16 ngpio;
- char *label;
+ const char *label;
void *data;
struct list_head list;
@@ -75,16 +75,20 @@ struct gpio_device {
/**
* struct acpi_gpio_info - ACPI GPIO specific information
+ * @adev: reference to ACPI device which consumes GPIO resource
* @flags: GPIO initialization flags
* @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo
* @polarity: interrupt polarity as provided by ACPI
* @triggering: triggering type as provided by ACPI
+ * @quirks: Linux specific quirks as provided by struct acpi_gpio_mapping
*/
struct acpi_gpio_info {
+ struct acpi_device *adev;
enum gpiod_flags flags;
bool gpioint;
int polarity;
int triggering;
+ unsigned int quirks;
};
/* gpio suffixes used for ACPI and device tree lookup */
@@ -124,7 +128,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip);
void acpi_gpiochip_free_interrupts(struct gpio_chip *chip);
int acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags,
- enum gpiod_flags update);
+ struct acpi_gpio_info *info);
struct gpio_desc *acpi_find_gpio(struct device *dev,
const char *con_id,
@@ -149,7 +153,7 @@ static inline void
acpi_gpiochip_free_interrupts(struct gpio_chip *chip) { }
static inline int
-acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, enum gpiod_flags update)
+acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, struct acpi_gpio_info *info)
{
return 0;
}
@@ -189,6 +193,12 @@ void gpiod_set_array_value_complex(bool raw, bool can_sleep,
struct gpio_desc **desc_array,
int *value_array);
+/* This is just passed between gpiolib and devres */
+struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
+ const char *propname, int index,
+ enum gpiod_flags dflags,
+ const char *label);
+
extern struct spinlock gpio_lock;
extern struct list_head gpio_devices;
@@ -205,7 +215,7 @@ struct gpio_desc {
#define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */
#define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */
#define FLAG_IS_HOGGED 11 /* GPIO is hogged */
-#define FLAG_SLEEP_MAY_LOSE_VALUE 12 /* GPIO may lose value in sleep */
+#define FLAG_TRANSITORY 12 /* GPIO may lose value in sleep or reset */
/* Connection label */
const char *label;
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index b3c6e997ccdb..9a17bd3639d1 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -559,10 +559,10 @@ EXPORT_SYMBOL(drm_read);
*
* Mask of POLL flags indicating the current status of the file.
*/
-unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
+__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait)
{
struct drm_file *file_priv = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(filp, &file_priv->event_wait, wait);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 59ee808f8fd9..d453756ca128 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -2331,12 +2331,12 @@ static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
*
* Returns: any poll events that are ready without sleeping
*/
-static unsigned int i915_perf_poll_locked(struct drm_i915_private *dev_priv,
+static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv,
struct i915_perf_stream *stream,
struct file *file,
poll_table *wait)
{
- unsigned int events = 0;
+ __poll_t events = 0;
stream->ops->poll_wait(stream, file, wait);
@@ -2365,11 +2365,11 @@ static unsigned int i915_perf_poll_locked(struct drm_i915_private *dev_priv,
*
* Returns: any poll events that are ready without sleeping
*/
-static unsigned int i915_perf_poll(struct file *file, poll_table *wait)
+static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
{
struct i915_perf_stream *stream = file->private_data;
struct drm_i915_private *dev_priv = stream->dev_priv;
- int ret;
+ __poll_t ret;
mutex_lock(&dev_priv->perf.lock);
ret = i915_perf_poll_locked(dev_priv, stream, file, wait);
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index 8fdc56c1c953..b9bfa806d346 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -982,25 +982,14 @@ static int r128_cce_dispatch_write_pixels(struct drm_device *dev,
xbuf_size = count * sizeof(*x);
ybuf_size = count * sizeof(*y);
- x = kmalloc(xbuf_size, GFP_KERNEL);
- if (x == NULL)
- return -ENOMEM;
- y = kmalloc(ybuf_size, GFP_KERNEL);
- if (y == NULL) {
- kfree(x);
- return -ENOMEM;
- }
- if (copy_from_user(x, depth->x, xbuf_size)) {
- kfree(x);
- kfree(y);
- return -EFAULT;
- }
- if (copy_from_user(y, depth->y, xbuf_size)) {
+ x = memdup_user(depth->x, xbuf_size);
+ if (IS_ERR(x))
+ return PTR_ERR(x);
+ y = memdup_user(depth->y, ybuf_size);
+ if (IS_ERR(y)) {
kfree(x);
- kfree(y);
- return -EFAULT;
+ return PTR_ERR(y);
}
-
buffer_size = depth->n * sizeof(u32);
buffer = memdup_user(depth->buffer, buffer_size);
if (IS_ERR(buffer)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 7e5f30e234b1..d08753e8fd94 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -713,7 +713,7 @@ extern int vmw_present_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern unsigned int vmw_fops_poll(struct file *filp,
+extern __poll_t vmw_fops_poll(struct file *filp,
struct poll_table_struct *wait);
extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 01be355525e4..67f844678ac8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -412,7 +412,7 @@ out_clips:
* Wrapper around the drm_poll function that makes sure the device is
* processing the fifo if drm_poll decides to wait.
*/
-unsigned int vmw_fops_poll(struct file *filp, struct poll_table_struct *wait)
+__poll_t vmw_fops_poll(struct file *filp, struct poll_table_struct *wait)
{
struct drm_file *file_priv = filp->private_data;
struct vmw_private *dev_priv =
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index d35d6d271f3f..dfd8d0048980 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -1266,7 +1266,7 @@ done:
return ret_val;
}
-static unsigned int vga_arb_fpoll(struct file *file, poll_table *wait)
+static __poll_t vga_arb_fpoll(struct file *file, poll_table *wait)
{
pr_debug("%s\n", __func__);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 779c5ae47f36..19c499f5623d 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -280,6 +280,7 @@ config HID_ELECOM
---help---
Support for ELECOM devices:
- BM084 Bluetooth Mouse
+ - EX-G Trackball (Wired and wireless)
- DEFT Trackball (Wired and wireless)
- HUGE Trackball (Wired and wireless)
@@ -396,6 +397,17 @@ config HID_ITE
---help---
Support for ITE devices not fully compliant with HID standard.
+config HID_JABRA
+ tristate "Jabra USB HID Driver"
+ depends on HID
+ ---help---
+ Support for Jabra USB HID devices.
+
+ Prevents mapping of vendor defined HID usages to input events. Without
+ this driver HID reports from Jabra devices may incorrectly be seen as
+ mouse button events.
+ Say M here if you may ever plug in a Jabra USB device.
+
config HID_TWINHAN
tristate "Twinhan IR remote control"
depends on HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 235bd2a7b333..eb13b9e92d85 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -2,7 +2,7 @@
#
# Makefile for the HID driver
#
-hid-y := hid-core.o hid-input.o
+hid-y := hid-core.o hid-input.o hid-quirks.o
hid-$(CONFIG_DEBUG_FS) += hid-debug.o
obj-$(CONFIG_HID) += hid.o
@@ -52,6 +52,7 @@ obj-$(CONFIG_HID_HOLTEK) += hid-holtekff.o
obj-$(CONFIG_HID_HYPERV_MOUSE) += hid-hyperv.o
obj-$(CONFIG_HID_ICADE) += hid-icade.o
obj-$(CONFIG_HID_ITE) += hid-ite.o
+obj-$(CONFIG_HID_JABRA) += hid-jabra.o
obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o
obj-$(CONFIG_HID_KEYTOUCH) += hid-keytouch.o
obj-$(CONFIG_HID_KYE) += hid-kye.o
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 1bb7b63b3150..88b9703318e4 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -26,6 +26,7 @@
* any later version.
*/
+#include <linux/dmi.h>
#include <linux/hid.h>
#include <linux/module.h>
#include <linux/input/mt.h>
@@ -119,6 +120,24 @@ static const struct asus_touchpad_info asus_t100ta_tp = {
.max_contacts = 5,
};
+static const struct asus_touchpad_info asus_t100ha_tp = {
+ .max_x = 2640,
+ .max_y = 1320,
+ .res_x = 30, /* units/mm */
+ .res_y = 29, /* units/mm */
+ .contact_size = 5,
+ .max_contacts = 5,
+};
+
+static const struct asus_touchpad_info asus_t200ta_tp = {
+ .max_x = 3120,
+ .max_y = 1716,
+ .res_x = 30, /* units/mm */
+ .res_y = 28, /* units/mm */
+ .contact_size = 5,
+ .max_contacts = 5,
+};
+
static const struct asus_touchpad_info asus_t100chi_tp = {
.max_x = 2640,
.max_y = 1320,
@@ -606,7 +625,17 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (intf->altsetting->desc.bInterfaceNumber == T100_TPAD_INTF) {
drvdata->quirks = QUIRK_SKIP_INPUT_MAPPING;
- drvdata->tp = &asus_t100ta_tp;
+ /*
+ * The T100HA uses the same USB-ids as the T100TAF and
+ * the T200TA uses the same USB-ids as the T100TA, while
+ * both have different max x/y values as the T100TA[F].
+ */
+ if (dmi_match(DMI_PRODUCT_NAME, "T100HAN"))
+ drvdata->tp = &asus_t100ha_tp;
+ else if (dmi_match(DMI_PRODUCT_NAME, "T200TA"))
+ drvdata->tp = &asus_t200ta_tp;
+ else
+ drvdata->tp = &asus_t100ta_tp;
}
}
@@ -686,9 +715,10 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
hid_info(hdev, "Fixing up Asus notebook report descriptor\n");
rdesc[55] = 0xdd;
}
- /* For the T100TA keyboard dock */
+ /* For the T100TA/T200TA keyboard dock */
if (drvdata->quirks & QUIRK_T100_KEYBOARD &&
- *rsize == 76 && rdesc[73] == 0x81 && rdesc[74] == 0x01) {
+ (*rsize == 76 || *rsize == 101) &&
+ rdesc[73] == 0x81 && rdesc[74] == 0x01) {
hid_info(hdev, "Fixing up Asus T100 keyb report descriptor\n");
rdesc[74] &= ~HID_MAIN_ITEM_CONSTANT;
}
@@ -751,7 +781,10 @@ static const struct hid_device_id asus_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3), QUIRK_G752_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
- USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD),
+ USB_DEVICE_ID_ASUSTEK_T100TA_KEYBOARD),
+ QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_T100TAF_KEYBOARD),
QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_ASUS_AK1D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_ASUS_MD_5110) },
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 0c3f608131cf..c2560aae5542 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -830,31 +830,6 @@ static int hid_scan_report(struct hid_device *hid)
break;
}
- /* fall back to generic driver in case specific driver doesn't exist */
- switch (hid->group) {
- case HID_GROUP_MULTITOUCH_WIN_8:
- /* fall-through */
- case HID_GROUP_MULTITOUCH:
- if (!IS_ENABLED(CONFIG_HID_MULTITOUCH))
- hid->group = HID_GROUP_GENERIC;
- break;
- case HID_GROUP_SENSOR_HUB:
- if (!IS_ENABLED(CONFIG_HID_SENSOR_HUB))
- hid->group = HID_GROUP_GENERIC;
- break;
- case HID_GROUP_RMI:
- if (!IS_ENABLED(CONFIG_HID_RMI))
- hid->group = HID_GROUP_GENERIC;
- break;
- case HID_GROUP_WACOM:
- if (!IS_ENABLED(CONFIG_HID_WACOM))
- hid->group = HID_GROUP_GENERIC;
- break;
- case HID_GROUP_LOGITECH_DJ_DEVICE:
- if (!IS_ENABLED(CONFIG_HID_LOGITECH_DJ))
- hid->group = HID_GROUP_GENERIC;
- break;
- }
vfree(parser);
return 0;
}
@@ -1597,8 +1572,8 @@ unlock:
}
EXPORT_SYMBOL_GPL(hid_input_report);
-static bool hid_match_one_id(struct hid_device *hdev,
- const struct hid_device_id *id)
+bool hid_match_one_id(const struct hid_device *hdev,
+ const struct hid_device_id *id)
{
return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) &&
(id->group == HID_GROUP_ANY || id->group == hdev->group) &&
@@ -1606,7 +1581,7 @@ static bool hid_match_one_id(struct hid_device *hdev,
(id->product == HID_ANY_ID || id->product == hdev->product);
}
-const struct hid_device_id *hid_match_id(struct hid_device *hdev,
+const struct hid_device_id *hid_match_id(const struct hid_device *hdev,
const struct hid_device_id *id)
{
for (; id->bus; id++)
@@ -1862,541 +1837,6 @@ void hid_hw_close(struct hid_device *hdev)
}
EXPORT_SYMBOL_GPL(hid_hw_close);
-/*
- * A list of devices for which there is a specialized driver on HID bus.
- *
- * Please note that for multitouch devices (driven by hid-multitouch driver),
- * there is a proper autodetection and autoloading in place (based on presence
- * of HID_DG_CONTACTID), so those devices don't need to be added to this list,
- * as we are doing the right thing in hid_scan_usage().
- *
- * Autodetection for (USB) HID sensor hubs exists too. If a collection of type
- * physical is found inside a usage page of type sensor, hid-sensor-hub will be
- * used as a driver. See hid_scan_report().
- */
-static const struct hid_device_id hid_have_special_driver[] = {
-#if IS_ENABLED(CONFIG_HID_A4TECH)
- { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
- { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
- { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ACCUTOUCH)
- { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ACRUX)
- { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ALPS)
- { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1) },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_T4_BTNLESS) },
-#endif
-#if IS_ENABLED(CONFIG_HID_APPLE)
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
-#endif
-#if IS_ENABLED(CONFIG_HID_APPLEIR)
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ASUS)
- { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD) },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_ASUS_MD_5112) },
- { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_ASUS_MD_5110) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD) },
-#endif
-#if IS_ENABLED(CONFIG_HID_AUREAL)
- { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
-#endif
-#if IS_ENABLED(CONFIG_HID_BELKIN)
- { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
-#endif
-#if IS_ENABLED(CONFIG_HID_BETOP_FF)
- { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) },
- { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185PC, 0x5506) },
- { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2PC, 0x1850) },
- { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2BFM, 0x5500) },
-#endif
-#if IS_ENABLED(CONFIG_HID_CHERRY)
- { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
-#endif
-#if IS_ENABLED(CONFIG_HID_CHICONY)
- { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_ASUS_AK1D) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
-#endif
-#if IS_ENABLED(CONFIG_HID_CMEDIA)
- { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) },
-#endif
-#if IS_ENABLED(CONFIG_HID_CORSAIR)
- { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
-#endif
-#if IS_ENABLED(CONFIG_HID_CP2112)
- { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) },
-#endif
-#if IS_ENABLED(CONFIG_HID_CYPRESS)
- { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_DRAGONRISE)
- { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ELECOM)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ELO)
- { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) },
-#endif
-#if IS_ENABLED(CONFIG_HID_EMS_FF)
- { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
-#endif
-#if IS_ENABLED(CONFIG_HID_EZKEY)
- { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
-#endif
-#if IS_ENABLED(CONFIG_HID_GEMBIRD)
- { HID_USB_DEVICE(USB_VENDOR_ID_GEMBIRD, USB_DEVICE_ID_GEMBIRD_JPD_DUALFORCE2) },
-#endif
-#if IS_ENABLED(CONFIG_HID_GFRM)
- { HID_BLUETOOTH_DEVICE(0x58, 0x2000) },
- { HID_BLUETOOTH_DEVICE(0x471, 0x2210) },
-#endif
-#if IS_ENABLED(CONFIG_HID_GREENASIA)
- { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
-#endif
-#if IS_ENABLED(CONFIG_HID_GT683R)
- { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
-#endif
-#if IS_ENABLED(CONFIG_HID_GYRATION)
- { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
-#endif
-#if IS_ENABLED(CONFIG_HID_HOLTEK)
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A070) },
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) },
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
- { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ICADE)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ITE)
- { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
-#endif
-#if IS_ENABLED(CONFIG_HID_KENSINGTON)
- { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_KEYTOUCH)
- { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
-#endif
-#if IS_ENABLED(CONFIG_HID_KYE)
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912) },
-#endif
-#if IS_ENABLED(CONFIG_HID_LCPOWER)
- { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
-#endif
-#if IS_ENABLED(CONFIG_HID_LED)
- { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
-#endif
-#if IS_ENABLED(CONFIG_HID_LENOVO)
- { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) },
-#endif
-#if IS_ENABLED(CONFIG_HID_LOGITECH)
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DUAL_ACTION) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G29_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
-#endif
-#if IS_ENABLED(CONFIG_HID_LOGITECH_HIDPP)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) },
-#endif
-#if IS_ENABLED(CONFIG_HID_LOGITECH_DJ)
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) },
-#endif
-#if IS_ENABLED(CONFIG_HID_MAGICMOUSE)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
-#endif
-#if IS_ENABLED(CONFIG_HID_MAYFLASH)
- { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) },
-#endif
-#if IS_ENABLED(CONFIG_HID_MICROSOFT)
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
-#endif
-#if IS_ENABLED(CONFIG_HID_MONTEREY)
- { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
-#endif
-#if IS_ENABLED(CONFIG_HID_MULTITOUCH)
- { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) },
-#endif
-#if IS_ENABLED(CONFIG_HID_WIIMOTE)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
-#endif
-#if IS_ENABLED(CONFIG_HID_NTI)
- { HID_USB_DEVICE(USB_VENDOR_ID_NTI, USB_DEVICE_ID_USB_SUN) },
-#endif
-#if IS_ENABLED(CONFIG_HID_NTRIG)
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_4) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_5) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_6) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_7) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_8) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_9) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_10) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_11) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_12) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_13) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_14) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_15) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ORTEK)
- { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
-#endif
-#if IS_ENABLED(CONFIG_HID_PANTHERLORD)
- { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) },
- { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
-#endif
-#if IS_ENABLED(CONFIG_HID_PENMOUNT)
- { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_6000) },
-#endif
-#if IS_ENABLED(CONFIG_HID_PETALYNX)
- { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_PICOLCD)
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
-#endif
-#if IS_ENABLED(CONFIG_HID_PLANTRONICS)
- { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
-#endif
-#if IS_ENABLED(CONFIG_HID_PRIMAX)
- { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
-#endif
-#if IS_ENABLED(CONFIG_HID_PRODIKEYS)
- { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
-#endif
-#if IS_ENABLED(CONFIG_HID_RETRODE)
- { HID_USB_DEVICE(USB_VENDOR_ID_FUTURE_TECHNOLOGY, USB_DEVICE_ID_RETRODE2) },
-#endif
-#if IS_ENABLED(CONFIG_HID_RMI)
- { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ROCCAT)
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKUFX) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE_OPTICAL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEXTD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_LUA) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_GLOW) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_SAVU) },
-#endif
-#if IS_ENABLED(CONFIG_HID_SAITEK)
- { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_OLD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
-#endif
-#if IS_ENABLED(CONFIG_HID_SAMSUNG)
- { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_SMARTJOYPLUS)
- { HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) },
-#endif
-#if IS_ENABLED(CONFIG_HID_SONY)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_MOTION_CONTROLLER) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_MOTION_CONTROLLER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_BDREMOTE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
-#endif
-#if IS_ENABLED(CONFIG_HID_SPEEDLINK)
- { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_STEELSERIES)
- { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
-#endif
-#if IS_ENABLED(CONFIG_HID_SUNPLUS)
- { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
-#endif
-#if IS_ENABLED(CONFIG_HID_THRUSTMASTER)
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb605) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
- { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
-#endif
-#if IS_ENABLED(CONFIG_HID_TIVO)
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
- { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
-#endif
-#if IS_ENABLED(CONFIG_HID_TOPSEED)
- { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
- { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
-#endif
-#if IS_ENABLED(CONFIG_HID_TWINHAN)
- { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_UCLOGIC)
- { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_HUION_TABLET) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
- { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
-#endif
-#if IS_ENABLED(CONFIG_HID_UDRAW_PS3)
- { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
-#endif
-#if IS_ENABLED(CONFIG_HID_WALTOP)
- { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_Q_PAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_PID_0038) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) },
-#endif
-#if IS_ENABLED(CONFIG_HID_XINMO)
- { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ZEROPLUS)
- { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
-#endif
-#if IS_ENABLED(CONFIG_HID_ZYDACRON)
- { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
-#endif
- { }
-};
-
struct hid_dynid {
struct list_head list;
struct hid_device_id id;
@@ -2463,8 +1903,8 @@ static void hid_free_dynids(struct hid_driver *hdrv)
spin_unlock(&hdrv->dyn_lock);
}
-static const struct hid_device_id *hid_match_device(struct hid_device *hdev,
- struct hid_driver *hdrv)
+const struct hid_device_id *hid_match_device(struct hid_device *hdev,
+ struct hid_driver *hdrv)
{
struct hid_dynid *dynid;
@@ -2479,6 +1919,7 @@ static const struct hid_device_id *hid_match_device(struct hid_device *hdev,
return hid_match_id(hdev, hdrv->id_table);
}
+EXPORT_SYMBOL_GPL(hid_match_device);
static int hid_bus_match(struct device *dev, struct device_driver *drv)
{
@@ -2508,6 +1949,23 @@ static int hid_device_probe(struct device *dev)
goto unlock;
}
+ if (hdrv->match) {
+ if (!hdrv->match(hdev, hid_ignore_special_drivers)) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+ } else {
+ /*
+ * hid-generic implements .match(), so if
+ * hid_ignore_special_drivers is set, we can safely
+ * return.
+ */
+ if (hid_ignore_special_drivers) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+ }
+
hdev->driver = hdrv;
if (hdrv->probe) {
ret = hdrv->probe(hdev, id);
@@ -2604,7 +2062,7 @@ static int hid_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-static struct bus_type hid_bus_type = {
+struct bus_type hid_bus_type = {
.name = "hid",
.dev_groups = hid_dev_groups,
.drv_groups = hid_drv_groups,
@@ -2613,315 +2071,7 @@ static struct bus_type hid_bus_type = {
.remove = hid_device_remove,
.uevent = hid_uevent,
};
-
-/* a list of devices that shouldn't be handled by HID core at all */
-static const struct hid_device_id hid_ignore_list[] = {
- { HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_FLAIR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_302) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ADS_TECH, USB_DEVICE_ID_ADS_TECH_RADIO_SI470X) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_01) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_10) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_20) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_21) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_22) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_23) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)},
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)},
- { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
- { HID_USB_DEVICE(USB_VENDOR_ID_AXENTIA, USB_DEVICE_ID_AXENTIA_FM_RADIO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI4713) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM109) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_HIDCOM) },
- { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_ULTRAMOUSE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0401) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_SUPER_Q2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_GOGOPEN) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_PENPOWER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GRETAGMACBETH, USB_DEVICE_ID_GRETAGMACBETH_HUEY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_POWERMATE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_SOUNDKNOB) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_RADIOSHARK) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_90) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_100) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_101) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_103) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_104) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_105) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_106) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_107) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_108) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_200) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_201) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_202) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_203) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_204) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_205) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_206) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_207) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_300) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_301) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_302) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_303) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_304) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_305) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_306) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_307) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_308) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_309) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_400) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_401) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_402) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_403) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_404) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_405) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_500) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_501) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_502) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_503) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_504) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1000) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1001) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1002) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1003) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1004) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1005) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1006) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1007) },
- { HID_USB_DEVICE(USB_VENDOR_ID_IMATION, USB_DEVICE_ID_DISC_STAKKA) },
- { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, USB_DEVICE_ID_JABRA_SPEAK_410) },
- { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, USB_DEVICE_ID_JABRA_SPEAK_510) },
- { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, USB_DEVICE_ID_JABRA_GN9350E) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KBGEAR, USB_DEVICE_ID_KBGEAR_JAMSTUDIO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KWORLD, USB_DEVICE_ID_KWORLD_RADIO_FM700) },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_GPEN_560) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_KYE, 0x0058) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYVOLTAGE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYCURRENT) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIC) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIB) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_XRAY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_XRAY2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_VIDEOCOM) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOTOR) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_COM3LAB) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_TELEPORT) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_NETWORKANALYSER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERCONTROL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETEST) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOSTANALYSER) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOSTANALYSER2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_ABSESP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_AUTODATABUS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MCT) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
- { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICK16F1454) },
- { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICK16F1454_V2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR, USB_DEVICE_ID_N_S_HARMONY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 20) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 30) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 100) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 108) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 118) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 200) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 300) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 400) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 500) },
- { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0001) },
- { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0002) },
- { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0003) },
- { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) },
- { HID_USB_DEVICE(USB_VENDOR_ID_PETZL, USB_DEVICE_ID_PETZL_HEADLAMP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) },
- { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) },
-#if IS_ENABLED(CONFIG_MOUSE_SYNAPTICS_USB)
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_INT_TP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_CPAD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_STICK) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_WP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_COMP_TP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_WTP) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DPAD) },
-#endif
- { HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) },
- { }
-};
-
-/**
- * hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer
- *
- * There are composite devices for which we want to ignore only a certain
- * interface. This is a list of devices for which only the mouse interface will
- * be ignored. This allows a dedicated driver to take care of the interface.
- */
-static const struct hid_device_id hid_mouse_ignore_list[] = {
- /* appletouch driver */
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
- { }
-};
-
-bool hid_ignore(struct hid_device *hdev)
-{
- if (hdev->quirks & HID_QUIRK_NO_IGNORE)
- return false;
- if (hdev->quirks & HID_QUIRK_IGNORE)
- return true;
-
- switch (hdev->vendor) {
- case USB_VENDOR_ID_CODEMERCS:
- /* ignore all Code Mercenaries IOWarrior devices */
- if (hdev->product >= USB_DEVICE_ID_CODEMERCS_IOW_FIRST &&
- hdev->product <= USB_DEVICE_ID_CODEMERCS_IOW_LAST)
- return true;
- break;
- case USB_VENDOR_ID_LOGITECH:
- if (hdev->product >= USB_DEVICE_ID_LOGITECH_HARMONY_FIRST &&
- hdev->product <= USB_DEVICE_ID_LOGITECH_HARMONY_LAST)
- return true;
- /*
- * The Keene FM transmitter USB device has the same USB ID as
- * the Logitech AudioHub Speaker, but it should ignore the hid.
- * Check if the name is that of the Keene device.
- * For reference: the name of the AudioHub is
- * "HOLTEK AudioHub Speaker".
- */
- if (hdev->product == USB_DEVICE_ID_LOGITECH_AUDIOHUB &&
- !strcmp(hdev->name, "HOLTEK B-LINK USB Audio "))
- return true;
- break;
- case USB_VENDOR_ID_SOUNDGRAPH:
- if (hdev->product >= USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST &&
- hdev->product <= USB_DEVICE_ID_SOUNDGRAPH_IMON_LAST)
- return true;
- break;
- case USB_VENDOR_ID_HANWANG:
- if (hdev->product >= USB_DEVICE_ID_HANWANG_TABLET_FIRST &&
- hdev->product <= USB_DEVICE_ID_HANWANG_TABLET_LAST)
- return true;
- break;
- case USB_VENDOR_ID_JESS:
- if (hdev->product == USB_DEVICE_ID_JESS_YUREX &&
- hdev->type == HID_TYPE_USBNONE)
- return true;
- break;
- case USB_VENDOR_ID_VELLEMAN:
- /* These are not HID devices. They are handled by comedi. */
- if ((hdev->product >= USB_DEVICE_ID_VELLEMAN_K8055_FIRST &&
- hdev->product <= USB_DEVICE_ID_VELLEMAN_K8055_LAST) ||
- (hdev->product >= USB_DEVICE_ID_VELLEMAN_K8061_FIRST &&
- hdev->product <= USB_DEVICE_ID_VELLEMAN_K8061_LAST))
- return true;
- break;
- case USB_VENDOR_ID_ATMEL_V_USB:
- /* Masterkit MA901 usb radio based on Atmel tiny85 chip and
- * it has the same USB ID as many Atmel V-USB devices. This
- * usb radio is handled by radio-ma901.c driver so we want
- * ignore the hid. Check the name, bus, product and ignore
- * if we have MA901 usb radio.
- */
- if (hdev->product == USB_DEVICE_ID_ATMEL_V_USB &&
- hdev->bus == BUS_USB &&
- strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0)
- return true;
- break;
- }
-
- if (hdev->type == HID_TYPE_USBMOUSE &&
- hid_match_id(hdev, hid_mouse_ignore_list))
- return true;
-
- return !!hid_match_id(hdev, hid_ignore_list);
-}
-EXPORT_SYMBOL_GPL(hid_ignore);
+EXPORT_SYMBOL(hid_bus_type);
int hid_add_device(struct hid_device *hdev)
{
@@ -2931,6 +2081,8 @@ int hid_add_device(struct hid_device *hdev)
if (WARN_ON(hdev->status & HID_STAT_ADDED))
return -EBUSY;
+ hdev->quirks = hid_lookup_quirk(hdev);
+
/* we need to kill them here, otherwise they will stay allocated to
* wait for coming driver */
if (hid_ignore(hdev))
@@ -2960,7 +2112,7 @@ int hid_add_device(struct hid_device *hdev)
if (hid_ignore_special_drivers) {
hdev->group = HID_GROUP_GENERIC;
} else if (!hdev->group &&
- !hid_match_id(hdev, hid_have_special_driver)) {
+ !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) {
ret = hid_scan_report(hdev);
if (ret)
hid_warn(hdev, "bad device descriptor (%d)\n", ret);
@@ -3044,6 +2196,29 @@ void hid_destroy_device(struct hid_device *hdev)
}
EXPORT_SYMBOL_GPL(hid_destroy_device);
+
+static int __bus_add_driver(struct device_driver *drv, void *data)
+{
+ struct hid_driver *added_hdrv = data;
+ struct hid_driver *hdrv = to_hid_driver(drv);
+
+ if (hdrv->bus_add_driver)
+ hdrv->bus_add_driver(added_hdrv);
+
+ return 0;
+}
+
+static int __bus_removed_driver(struct device_driver *drv, void *data)
+{
+ struct hid_driver *removed_hdrv = data;
+ struct hid_driver *hdrv = to_hid_driver(drv);
+
+ if (hdrv->bus_removed_driver)
+ hdrv->bus_removed_driver(removed_hdrv);
+
+ return 0;
+}
+
int __hid_register_driver(struct hid_driver *hdrv, struct module *owner,
const char *mod_name)
{
@@ -3055,6 +2230,8 @@ int __hid_register_driver(struct hid_driver *hdrv, struct module *owner,
INIT_LIST_HEAD(&hdrv->dyn_list);
spin_lock_init(&hdrv->dyn_lock);
+ bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_add_driver);
+
return driver_register(&hdrv->driver);
}
EXPORT_SYMBOL_GPL(__hid_register_driver);
@@ -3063,6 +2240,8 @@ void hid_unregister_driver(struct hid_driver *hdrv)
{
driver_unregister(&hdrv->driver);
hid_free_dynids(hdrv);
+
+ bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_removed_driver);
}
EXPORT_SYMBOL_GPL(hid_unregister_driver);
@@ -3117,6 +2296,7 @@ static void __exit hid_exit(void)
hid_debug_exit();
hidraw_exit();
bus_unregister(&hid_bus_type);
+ hid_quirks_exit(HID_BUS_ANY);
}
module_init(hid_init);
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 5271db593478..c783fd5ef809 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -1179,7 +1179,7 @@ out:
return ret;
}
-static unsigned int hid_debug_events_poll(struct file *file, poll_table *wait)
+static __poll_t hid_debug_events_poll(struct file *file, poll_table *wait)
{
struct hid_debug_list *list = file->private_data;
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
index 54aeea57d209..1a1ecc491c02 100644
--- a/drivers/hid/hid-elecom.c
+++ b/drivers/hid/hid-elecom.c
@@ -1,9 +1,15 @@
/*
- * HID driver for ELECOM devices.
+ * HID driver for ELECOM devices:
+ * - BM084 Bluetooth Mouse
+ * - EX-G Trackball (Wired and wireless)
+ * - DEFT Trackball (Wired and wireless)
+ * - HUGE Trackball (Wired and wireless)
+ *
* Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com>
* Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com>
* Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu>
* Copyright (c) 2017 Alex Manoussakis <amanou@gnu.org>
+ * Copyright (c) 2017 Tomasz Kramkowski <tk@the-tk.com>
*/
/*
@@ -19,6 +25,34 @@
#include "hid-ids.h"
+/*
+ * Certain ELECOM mice misreport their button count meaning that they only work
+ * correctly with the ELECOM mouse assistant software which is unavailable for
+ * Linux. A four extra INPUT reports and a FEATURE report are described by the
+ * report descriptor but it does not appear that these enable software to
+ * control what the extra buttons map to. The only simple and straightforward
+ * solution seems to involve fixing up the report descriptor.
+ *
+ * Report descriptor format:
+ * Positions 13, 15, 21 and 31 store the button bit count, button usage minimum,
+ * button usage maximum and padding bit count respectively.
+ */
+#define MOUSE_BUTTONS_MAX 8
+static void mouse_button_fixup(struct hid_device *hdev,
+ __u8 *rdesc, unsigned int rsize,
+ int nbuttons)
+{
+ if (rsize < 32 || rdesc[12] != 0x95 ||
+ rdesc[14] != 0x75 || rdesc[15] != 0x01 ||
+ rdesc[20] != 0x29 || rdesc[30] != 0x75)
+ return;
+ hid_info(hdev, "Fixing up Elecom mouse button count\n");
+ nbuttons = clamp(nbuttons, 0, MOUSE_BUTTONS_MAX);
+ rdesc[13] = nbuttons;
+ rdesc[21] = nbuttons;
+ rdesc[31] = MOUSE_BUTTONS_MAX - nbuttons;
+}
+
static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
@@ -31,45 +65,15 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
rdesc[47] = 0x00;
}
break;
+ case USB_DEVICE_ID_ELECOM_EX_G_WIRED:
+ case USB_DEVICE_ID_ELECOM_EX_G_WIRELESS:
+ mouse_button_fixup(hdev, rdesc, *rsize, 6);
+ break;
case USB_DEVICE_ID_ELECOM_DEFT_WIRED:
case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS:
case USB_DEVICE_ID_ELECOM_HUGE_WIRED:
case USB_DEVICE_ID_ELECOM_HUGE_WIRELESS:
- /* The DEFT/HUGE trackball has eight buttons, but its descriptor
- * only reports five, disabling the three Fn buttons on the top
- * of the mouse.
- *
- * Apply the following diff to the descriptor:
- *
- * Collection (Physical), Collection (Physical),
- * Report ID (1), Report ID (1),
- * Report Count (5), -> Report Count (8),
- * Report Size (1), Report Size (1),
- * Usage Page (Button), Usage Page (Button),
- * Usage Minimum (01h), Usage Minimum (01h),
- * Usage Maximum (05h), -> Usage Maximum (08h),
- * Logical Minimum (0), Logical Minimum (0),
- * Logical Maximum (1), Logical Maximum (1),
- * Input (Variable), Input (Variable),
- * Report Count (1), -> Report Count (0),
- * Report Size (3), Report Size (3),
- * Input (Constant), Input (Constant),
- * Report Size (16), Report Size (16),
- * Report Count (2), Report Count (2),
- * Usage Page (Desktop), Usage Page (Desktop),
- * Usage (X), Usage (X),
- * Usage (Y), Usage (Y),
- * Logical Minimum (-32768), Logical Minimum (-32768),
- * Logical Maximum (32767), Logical Maximum (32767),
- * Input (Variable, Relative), Input (Variable, Relative),
- * End Collection, End Collection,
- */
- if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) {
- hid_info(hdev, "Fixing up Elecom DEFT/HUGE Fn buttons\n");
- rdesc[13] = 8; /* Button/Variable Report Count */
- rdesc[21] = 8; /* Button/Variable Usage Maximum */
- rdesc[29] = 0; /* Button/Constant Report Count */
- }
+ mouse_button_fixup(hdev, rdesc, *rsize, 8);
break;
}
return rdesc;
@@ -77,6 +81,8 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
static const struct hid_device_id elecom_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_EX_G_WIRED) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_EX_G_WIRELESS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
index 0cd4f7216239..5eea6fe0d7bd 100644
--- a/drivers/hid/hid-elo.c
+++ b/drivers/hid/hid-elo.c
@@ -42,6 +42,12 @@ static int elo_input_configured(struct hid_device *hdev,
{
struct input_dev *input = hidinput->input;
+ /*
+ * ELO devices have one Button usage in GenDesk field, which makes
+ * hid-input map it to BTN_LEFT; that confuses userspace, which then
+ * considers the device to be a mouse/touchpad instead of touchscreen.
+ */
+ clear_bit(BTN_LEFT, input->keybit);
set_bit(BTN_TOUCH, input->keybit);
set_bit(ABS_PRESSURE, input->absbit);
input_set_abs_params(input, ABS_PRESSURE, 0, 256, 0, 0);
diff --git a/drivers/hid/hid-generic.c b/drivers/hid/hid-generic.c
index e288a4a06fe8..3c0a1bf433d7 100644
--- a/drivers/hid/hid-generic.c
+++ b/drivers/hid/hid-generic.c
@@ -24,8 +24,71 @@
#include <linux/hid.h>
+static struct hid_driver hid_generic;
+
+static int __unmap_hid_generic(struct device *dev, void *data)
+{
+ struct hid_driver *hdrv = data;
+ struct hid_device *hdev = to_hid_device(dev);
+
+ /* only unbind matching devices already bound to hid-generic */
+ if (hdev->driver != &hid_generic ||
+ hid_match_device(hdev, hdrv) == NULL)
+ return 0;
+
+ if (dev->parent) /* Needed for USB */
+ device_lock(dev->parent);
+ device_release_driver(dev);
+ if (dev->parent)
+ device_unlock(dev->parent);
+
+ return 0;
+}
+
+static void hid_generic_add_driver(struct hid_driver *hdrv)
+{
+ bus_for_each_dev(&hid_bus_type, NULL, hdrv, __unmap_hid_generic);
+}
+
+static void hid_generic_removed_driver(struct hid_driver *hdrv)
+{
+ int ret;
+
+ ret = driver_attach(&hid_generic.driver);
+}
+
+static int __check_hid_generic(struct device_driver *drv, void *data)
+{
+ struct hid_driver *hdrv = to_hid_driver(drv);
+ struct hid_device *hdev = data;
+
+ if (hdrv == &hid_generic)
+ return 0;
+
+ return hid_match_device(hdev, hdrv) != NULL;
+}
+
+static bool hid_generic_match(struct hid_device *hdev,
+ bool ignore_special_driver)
+{
+ if (ignore_special_driver)
+ return true;
+
+ if (hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)
+ return false;
+
+ /*
+ * If any other driver wants the device, leave the device to this other
+ * driver.
+ */
+ if (bus_for_each_drv(&hid_bus_type, NULL, hdev, __check_hid_generic))
+ return false;
+
+ return true;
+}
+
static const struct hid_device_id hid_table[] = {
- { HID_DEVICE(HID_BUS_ANY, HID_GROUP_GENERIC, HID_ANY_ID, HID_ANY_ID) },
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, HID_ANY_ID, HID_ANY_ID) },
{ }
};
MODULE_DEVICE_TABLE(hid, hid_table);
@@ -33,6 +96,9 @@ MODULE_DEVICE_TABLE(hid, hid_table);
static struct hid_driver hid_generic = {
.name = "hid-generic",
.id_table = hid_table,
+ .match = hid_generic_match,
+ .bus_add_driver = hid_generic_add_driver,
+ .bus_removed_driver = hid_generic_removed_driver,
};
module_hid_driver(hid_generic);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 5da3d6256d25..43ddcdfbd0da 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -178,7 +178,8 @@
#define USB_VENDOR_ID_ASUSTEK 0x0b05
#define USB_DEVICE_ID_ASUSTEK_LCM 0x1726
#define USB_DEVICE_ID_ASUSTEK_LCM2 0x175b
-#define USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD 0x17e0
+#define USB_DEVICE_ID_ASUSTEK_T100TA_KEYBOARD 0x17e0
+#define USB_DEVICE_ID_ASUSTEK_T100TAF_KEYBOARD 0x1807
#define USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD 0x8502
#define USB_DEVICE_ID_ASUSTEK_T304_KEYBOARD 0x184a
#define USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD 0x8585
@@ -370,6 +371,8 @@
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
+#define USB_DEVICE_ID_ELECOM_EX_G_WIRED 0x00fb
+#define USB_DEVICE_ID_ELECOM_EX_G_WIRELESS 0x00fc
#define USB_DEVICE_ID_ELECOM_DEFT_WIRED 0x00fe
#define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS 0x00ff
#define USB_DEVICE_ID_ELECOM_HUGE_WIRED 0x010c
@@ -535,6 +538,7 @@
#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A 0x0a4a
#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a
+#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a
#define USB_VENDOR_ID_HUION 0x256c
#define USB_DEVICE_ID_HUION_TABLET 0x006e
@@ -1156,6 +1160,7 @@
#define USB_VENDOR_ID_PRIMAX 0x0461
#define USB_DEVICE_ID_PRIMAX_MOUSE_4D22 0x4d22
#define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05
+#define USB_DEVICE_ID_PRIMAX_REZEL 0x4e72
#define USB_VENDOR_ID_RISO_KAGAKU 0x1294 /* Riso Kagaku Corp. */
diff --git a/drivers/hid/hid-jabra.c b/drivers/hid/hid-jabra.c
new file mode 100644
index 000000000000..1f52daf14426
--- /dev/null
+++ b/drivers/hid/hid-jabra.c
@@ -0,0 +1,58 @@
+/*
+ * Jabra USB HID Driver
+ *
+ * Copyright (c) 2017 Niels Skou Olsen <nolsen@jabra.com>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+#define HID_UP_VENDOR_DEFINED_MIN 0xff000000
+#define HID_UP_VENDOR_DEFINED_MAX 0xffff0000
+
+static int jabra_input_mapping(struct hid_device *hdev,
+ struct hid_input *hi,
+ struct hid_field *field,
+ struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ int is_vendor_defined =
+ ((usage->hid & HID_USAGE_PAGE) >= HID_UP_VENDOR_DEFINED_MIN &&
+ (usage->hid & HID_USAGE_PAGE) <= HID_UP_VENDOR_DEFINED_MAX);
+
+ dbg_hid("hid=0x%08x appl=0x%08x coll_idx=0x%02x usage_idx=0x%02x: %s\n",
+ usage->hid,
+ field->application,
+ usage->collection_index,
+ usage->usage_index,
+ is_vendor_defined ? "ignored" : "defaulted");
+
+ /* Ignore vendor defined usages, default map standard usages */
+ return is_vendor_defined ? -1 : 0;
+}
+
+static const struct hid_device_id jabra_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, HID_ANY_ID) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, jabra_devices);
+
+static struct hid_driver jabra_driver = {
+ .name = "jabra",
+ .id_table = jabra_devices,
+ .input_mapping = jabra_input_mapping,
+};
+module_hid_driver(jabra_driver);
+
+MODULE_AUTHOR("Niels Skou Olsen <nolsen@jabra.com>");
+MODULE_DESCRIPTION("Jabra USB HID Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 65ea23be9677..3b4739bde05d 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -85,11 +85,12 @@ MODULE_LICENSE("GPL");
#define MT_IO_FLAGS_PENDING_SLOTS 2
struct mt_slot {
- __s32 x, y, cx, cy, p, w, h;
+ __s32 x, y, cx, cy, p, w, h, a;
__s32 contactid; /* the device ContactID assigned to this slot */
bool touch_state; /* is the touch valid? */
bool inrange_state; /* is the finger in proximity of the sensor? */
bool confidence_state; /* is the touch made by a finger? */
+ bool has_azimuth; /* the contact reports azimuth */
};
struct mt_class {
@@ -119,6 +120,10 @@ struct mt_device {
unsigned long mt_io_flags; /* mt flags (MT_IO_FLAGS_*) */
int cc_index; /* contact count field index in the report */
int cc_value_index; /* contact count value index in the field */
+ int scantime_index; /* scantime field index in the report */
+ int scantime_val_index; /* scantime value index in the field */
+ int prev_scantime; /* scantime reported in the previous packet */
+ int left_button_state; /* left button state */
unsigned last_slot_field; /* the last field of a slot */
unsigned mt_report_id; /* the report ID of the multitouch device */
unsigned long initial_quirks; /* initial quirks state */
@@ -582,8 +587,15 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
if (!(cls->quirks & MT_QUIRK_NO_AREA)) {
set_abs(hi->input, ABS_MT_TOUCH_MINOR, field,
cls->sn_height);
- input_set_abs_params(hi->input,
- ABS_MT_ORIENTATION, 0, 1, 0, 0);
+
+ /*
+ * Only set ABS_MT_ORIENTATION if it is not
+ * already set by the HID_DG_AZIMUTH usage.
+ */
+ if (!test_bit(ABS_MT_ORIENTATION,
+ hi->input->absbit))
+ input_set_abs_params(hi->input,
+ ABS_MT_ORIENTATION, 0, 1, 0, 0);
}
mt_store_field(usage, td, hi);
return 1;
@@ -599,6 +611,12 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
EV_MSC, MSC_TIMESTAMP);
input_set_capability(hi->input, EV_MSC, MSC_TIMESTAMP);
mt_store_field(usage, td, hi);
+ /* Ignore if indexes are out of bounds. */
+ if (field->index >= field->report->maxfield ||
+ usage->usage_index >= field->report_count)
+ return 1;
+ td->scantime_index = field->index;
+ td->scantime_val_index = usage->usage_index;
return 1;
case HID_DG_CONTACTCOUNT:
/* Ignore if indexes are out of bounds. */
@@ -608,6 +626,21 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
td->cc_index = field->index;
td->cc_value_index = usage->usage_index;
return 1;
+ case HID_DG_AZIMUTH:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_ORIENTATION);
+ /*
+ * Azimuth has the range of [0, MAX) representing a full
+ * revolution. Set ABS_MT_ORIENTATION to a quarter of
+ * MAX according the definition of ABS_MT_ORIENTATION
+ */
+ input_set_abs_params(hi->input, ABS_MT_ORIENTATION,
+ -field->logical_maximum / 4,
+ field->logical_maximum / 4,
+ cls->sn_move ?
+ field->logical_maximum / cls->sn_move : 0, 0);
+ mt_store_field(usage, td, hi);
+ return 1;
case HID_DG_CONTACTMAX:
/* we don't set td->last_slot_field as contactcount and
* contact max are global to the report */
@@ -700,6 +733,10 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
int wide = (s->w > s->h);
int major = max(s->w, s->h);
int minor = min(s->w, s->h);
+ int orientation = wide;
+
+ if (s->has_azimuth)
+ orientation = s->a;
/*
* divided by two to match visual scale of touch
@@ -716,7 +753,8 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
input_event(input, EV_ABS, ABS_MT_TOOL_Y, s->cy);
input_event(input, EV_ABS, ABS_MT_DISTANCE,
!s->touch_state);
- input_event(input, EV_ABS, ABS_MT_ORIENTATION, wide);
+ input_event(input, EV_ABS, ABS_MT_ORIENTATION,
+ orientation);
input_event(input, EV_ABS, ABS_MT_PRESSURE, s->p);
input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, major);
input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, minor);
@@ -734,10 +772,16 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
*/
static void mt_sync_frame(struct mt_device *td, struct input_dev *input)
{
+ __s32 cls = td->mtclass.name;
+
+ if (cls == MT_CLS_WIN_8 || cls == MT_CLS_WIN_8_DUAL)
+ input_event(input, EV_KEY, BTN_LEFT, td->left_button_state);
+
input_mt_sync_frame(input);
input_event(input, EV_MSC, MSC_TIMESTAMP, td->timestamp);
input_sync(input);
td->num_received = 0;
+ td->left_button_state = 0;
if (test_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags))
set_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags);
else
@@ -778,9 +822,11 @@ static int mt_touch_event(struct hid_device *hid, struct hid_field *field,
}
static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
- struct hid_usage *usage, __s32 value)
+ struct hid_usage *usage, __s32 value,
+ bool first_packet)
{
struct mt_device *td = hid_get_drvdata(hid);
+ __s32 cls = td->mtclass.name;
__s32 quirks = td->mtclass.quirks;
struct input_dev *input = field->hidinput->input;
@@ -832,11 +878,49 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
break;
case HID_DG_CONTACTCOUNT:
break;
+ case HID_DG_AZIMUTH:
+ /*
+ * Azimuth is counter-clockwise and ranges from [0, MAX)
+ * (a full revolution). Convert it to clockwise ranging
+ * [-MAX/2, MAX/2].
+ *
+ * Note that ABS_MT_ORIENTATION require us to report
+ * the limit of [-MAX/4, MAX/4], but the value can go
+ * out of range to [-MAX/2, MAX/2] to report an upside
+ * down ellipsis.
+ */
+ if (value > field->logical_maximum / 2)
+ value -= field->logical_maximum;
+ td->curdata.a = -value;
+ td->curdata.has_azimuth = true;
+ break;
case HID_DG_TOUCH:
/* do nothing */
break;
default:
+ /*
+ * For Win8 PTP touchpads we should only look at
+ * non finger/touch events in the first_packet of
+ * a (possible) multi-packet frame.
+ */
+ if ((cls == MT_CLS_WIN_8 || cls == MT_CLS_WIN_8_DUAL) &&
+ !first_packet)
+ return;
+
+ /*
+ * For Win8 PTP touchpads we map both the clickpad click
+ * and any "external" left buttons to BTN_LEFT if a
+ * device claims to have both we need to report 1 for
+ * BTN_LEFT if either is pressed, so we or all values
+ * together and report the result in mt_sync_frame().
+ */
+ if ((cls == MT_CLS_WIN_8 || cls == MT_CLS_WIN_8_DUAL) &&
+ usage->type == EV_KEY && usage->code == BTN_LEFT) {
+ td->left_button_state |= value;
+ return;
+ }
+
if (usage->type)
input_event(input, usage->type, usage->code,
value);
@@ -855,9 +939,11 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
static void mt_touch_report(struct hid_device *hid, struct hid_report *report)
{
struct mt_device *td = hid_get_drvdata(hid);
+ __s32 cls = td->mtclass.name;
struct hid_field *field;
+ bool first_packet;
unsigned count;
- int r, n;
+ int r, n, scantime = 0;
/* sticky fingers release in progress, abort */
if (test_and_set_bit(MT_IO_FLAGS_RUNNING, &td->mt_io_flags))
@@ -867,13 +953,31 @@ static void mt_touch_report(struct hid_device *hid, struct hid_report *report)
* Includes multi-packet support where subsequent
* packets are sent with zero contactcount.
*/
+ if (td->scantime_index >= 0) {
+ field = report->field[td->scantime_index];
+ scantime = field->value[td->scantime_val_index];
+ }
if (td->cc_index >= 0) {
struct hid_field *field = report->field[td->cc_index];
int value = field->value[td->cc_value_index];
- if (value)
+
+ /*
+ * For Win8 PTPs the first packet (td->num_received == 0) may
+ * have a contactcount of 0 if there only is a button event.
+ * We double check that this is not a continuation packet
+ * of a possible multi-packet frame be checking that the
+ * timestamp has changed.
+ */
+ if ((cls == MT_CLS_WIN_8 || cls == MT_CLS_WIN_8_DUAL) &&
+ td->num_received == 0 && td->prev_scantime != scantime)
+ td->num_expected = value;
+ /* A non 0 contact count always indicates a first packet */
+ else if (value)
td->num_expected = value;
}
+ td->prev_scantime = scantime;
+ first_packet = td->num_received == 0;
for (r = 0; r < report->maxfield; r++) {
field = report->field[r];
count = field->report_count;
@@ -883,7 +987,7 @@ static void mt_touch_report(struct hid_device *hid, struct hid_report *report)
for (n = 0; n < count; n++)
mt_process_mt_event(hid, field, &field->usage[n],
- field->value[n]);
+ field->value[n], first_packet);
}
if (td->num_received >= td->num_expected)
@@ -1329,6 +1433,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
td->maxcontact_report_id = -1;
td->inputmode_value = MT_INPUTMODE_TOUCHSCREEN;
td->cc_index = -1;
+ td->scantime_index = -1;
td->mt_report_id = -1;
hid_set_drvdata(hdev, td);
@@ -1649,14 +1754,6 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_TURBOX,
USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART) },
- /* Panasonic panels */
- { .driver_data = MT_CLS_PANASONIC,
- MT_USB_DEVICE(USB_VENDOR_ID_PANASONIC,
- USB_DEVICE_ID_PANABOARD_UBT780) },
- { .driver_data = MT_CLS_PANASONIC,
- MT_USB_DEVICE(USB_VENDOR_ID_PANASONIC,
- USB_DEVICE_ID_PANABOARD_UBT880) },
-
/* Novatek Panel */
{ .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_NOVATEK,
@@ -1667,6 +1764,14 @@ static const struct hid_device_id mt_devices[] = {
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_NTRIG, 0x1b05) },
+ /* Panasonic panels */
+ { .driver_data = MT_CLS_PANASONIC,
+ MT_USB_DEVICE(USB_VENDOR_ID_PANASONIC,
+ USB_DEVICE_ID_PANABOARD_UBT780) },
+ { .driver_data = MT_CLS_PANASONIC,
+ MT_USB_DEVICE(USB_VENDOR_ID_PANASONIC,
+ USB_DEVICE_ID_PANABOARD_UBT880) },
+
/* PixArt optical touch screen */
{ .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
MT_USB_DEVICE(USB_VENDOR_ID_PIXART,
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
new file mode 100644
index 000000000000..5f6035a5ce36
--- /dev/null
+++ b/drivers/hid/hid-quirks.c
@@ -0,0 +1,1276 @@
+/*
+ * HID quirks support for Linux
+ *
+ * Copyright (c) 1999 Andreas Gal
+ * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
+ * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
+ * Copyright (c) 2006-2007 Jiri Kosina
+ * Copyright (c) 2007 Paul Walmsley
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/hid.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+
+#include "hid-ids.h"
+
+/*
+ * Alphabetically sorted by vendor then product.
+ */
+
+static const struct hid_device_id hid_quirks[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_GAMEPAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016), HID_QUIRK_FULLSPEED_INTERVAL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS1758), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS692), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_THROTTLE), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70R), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K95RGB), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_M65RGB), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER), HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH_2968), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_FUTABA, USB_DEVICE_ID_LED_DISPLAY), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C01A), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MULTIPLE_1781, USB_DEVICE_ID_RAPHNET_4NES4SNES_OLD), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NATSU, USB_DEVICE_ID_NATSU_GAMEPAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NEC, USB_DEVICE_ID_NEC_USB_GAME_PAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NEXTWINDOW, USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN), HID_QUIRK_MULTI_INPUT},
+ { HID_USB_DEVICE(USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK), HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1610), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1640), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL), HID_QUIRK_HIDINPUT_FORCE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4D22), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS1030_TOUCH), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS817_TOUCH), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS9200_TOUCH), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS_TS), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
+
+ { 0 }
+};
+
+/*
+ * A list of devices for which there is a specialized driver on HID bus.
+ *
+ * Please note that for multitouch devices (driven by hid-multitouch driver),
+ * there is a proper autodetection and autoloading in place (based on presence
+ * of HID_DG_CONTACTID), so those devices don't need to be added to this list,
+ * as we are doing the right thing in hid_scan_usage().
+ *
+ * Autodetection for (USB) HID sensor hubs exists too. If a collection of type
+ * physical is found inside a usage page of type sensor, hid-sensor-hub will be
+ * used as a driver. See hid_scan_report().
+ */
+static const struct hid_device_id hid_have_special_driver[] = {
+#if IS_ENABLED(CONFIG_HID_A4TECH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ACCUTOUCH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ACRUX)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ALPS)
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_APPLE)
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+#endif
+#if IS_ENABLED(CONFIG_HID_APPLEIR)
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ASUS)
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD) },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_ASUS_MD_5112) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_ASUS_MD_5110) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_AUREAL)
+ { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
+#endif
+#if IS_ENABLED(CONFIG_HID_BELKIN)
+ { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_BETOP_FF)
+ { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185PC, 0x5506) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2PC, 0x1850) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2BFM, 0x5500) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CHERRY)
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CHICONY)
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_ASUS_AK1D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CMEDIA)
+ { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CORSAIR)
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CP2112)
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) },
+#endif
+#if IS_ENABLED(CONFIG_HID_CYPRESS)
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_DRAGONRISE)
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ELECOM)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_EX_G_WIRED) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_EX_G_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ELO)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) },
+#endif
+#if IS_ENABLED(CONFIG_HID_EMS_FF)
+ { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
+#endif
+#if IS_ENABLED(CONFIG_HID_EZKEY)
+ { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GEMBIRD)
+ { HID_USB_DEVICE(USB_VENDOR_ID_GEMBIRD, USB_DEVICE_ID_GEMBIRD_JPD_DUALFORCE2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GFRM)
+ { HID_BLUETOOTH_DEVICE(0x58, 0x2000) },
+ { HID_BLUETOOTH_DEVICE(0x471, 0x2210) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GREENASIA)
+ { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GT683R)
+ { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_GYRATION)
+ { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
+#endif
+#if IS_ENABLED(CONFIG_HID_HOLTEK)
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A070) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ITE)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ICADE)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_JABRA)
+ { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, HID_ANY_ID) },
+#endif
+#if IS_ENABLED(CONFIG_HID_KENSINGTON)
+ { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_KEYTOUCH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
+#endif
+#if IS_ENABLED(CONFIG_HID_KYE)
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LCPOWER)
+ { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LED)
+ { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LENOVO)
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LOGITECH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DUAL_ACTION) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G29_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LOGITECH_HIDPP)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_LOGITECH_DJ)
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MAGICMOUSE)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MAYFLASH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MICROSOFT)
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_7K) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MONTEREY)
+ { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
+#endif
+#if IS_ENABLED(CONFIG_HID_MULTITOUCH)
+ { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) },
+#endif
+#if IS_ENABLED(CONFIG_HID_WIIMOTE)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_NTI)
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTI, USB_DEVICE_ID_USB_SUN) },
+#endif
+#if IS_ENABLED(CONFIG_HID_NTRIG)
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_4) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_5) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_6) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_7) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_8) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_9) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_10) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_11) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_12) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_13) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_14) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_15) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ORTEK)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PANTHERLORD)
+ { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PENMOUNT)
+ { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_6000) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PETALYNX)
+ { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PICOLCD)
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PLANTRONICS)
+ { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PRIMAX)
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
+#endif
+#if IS_ENABLED(CONFIG_HID_PRODIKEYS)
+ { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
+#endif
+#if IS_ENABLED(CONFIG_HID_RETRODE)
+ { HID_USB_DEVICE(USB_VENDOR_ID_FUTURE_TECHNOLOGY, USB_DEVICE_ID_RETRODE2) },
+#endif
+#if IS_ENABLED(CONFIG_HID_RMI)
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_REZEL) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ROCCAT)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKUFX) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE_OPTICAL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEXTD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_LUA) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_GLOW) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_SAVU) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SAITEK)
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_OLD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SAMSUNG)
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SMARTJOYPLUS)
+ { HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SONY)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_MOTION_CONTROLLER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_MOTION_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_BDREMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SPEEDLINK)
+ { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_STEELSERIES)
+ { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
+#endif
+#if IS_ENABLED(CONFIG_HID_SUNPLUS)
+ { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
+#endif
+#if IS_ENABLED(CONFIG_HID_THRUSTMASTER)
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb605) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
+#endif
+#if IS_ENABLED(CONFIG_HID_TIVO)
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
+#endif
+#if IS_ENABLED(CONFIG_HID_TOPSEED)
+ { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
+#endif
+#if IS_ENABLED(CONFIG_HID_TWINHAN)
+ { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_UCLOGIC)
+ { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_HUION_TABLET) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
+#endif
+#if IS_ENABLED(CONFIG_HID_UDRAW_PS3)
+ { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
+#endif
+#if IS_ENABLED(CONFIG_HID_WALTOP)
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_Q_PAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_PID_0038) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) },
+#endif
+#if IS_ENABLED(CONFIG_HID_XINMO)
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ZEROPLUS)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
+#endif
+#if IS_ENABLED(CONFIG_HID_ZYDACRON)
+ { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
+#endif
+ { }
+};
+
+/* a list of devices that shouldn't be handled by HID core at all */
+static const struct hid_device_id hid_ignore_list[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_FLAIR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_302) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ADS_TECH, USB_DEVICE_ID_ADS_TECH_RADIO_SI470X) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_01) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_10) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_20) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_21) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_22) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_23) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_24) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AIRCABLE, USB_DEVICE_ID_AIRCABLE1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ALCOR, USB_DEVICE_ID_ALCOR_USBRS232) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)},
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)},
+ { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AXENTIA, USB_DEVICE_ID_AXENTIA_FM_RADIO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI4713) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM109) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_HIDCOM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_ULTRAMOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_SUPER_Q2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_GOGOPEN) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GOTOP, USB_DEVICE_ID_PENPOWER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GRETAGMACBETH, USB_DEVICE_ID_GRETAGMACBETH_HUEY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_POWERMATE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_SOUNDKNOB) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_RADIOSHARK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_90) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_100) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_101) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_103) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_104) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_105) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_106) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_107) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_108) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_200) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_201) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_202) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_203) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_204) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_205) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_206) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_207) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_300) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_301) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_302) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_303) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_304) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_305) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_306) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_307) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_308) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_309) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_400) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_401) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_402) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_403) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_404) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_405) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_500) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_501) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_502) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_503) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_504) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1000) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1001) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1002) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1003) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1004) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1005) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1006) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1007) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_IMATION, USB_DEVICE_ID_DISC_STAKKA) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, USB_DEVICE_ID_JABRA_GN9350E) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KBGEAR, USB_DEVICE_ID_KBGEAR_JAMSTUDIO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KWORLD, USB_DEVICE_ID_KWORLD_RADIO_FM700) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_GPEN_560) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_KYE, 0x0058) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYVOLTAGE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYCURRENT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIC) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIB) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_XRAY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_XRAY2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_VIDEOCOM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOTOR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_COM3LAB) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_TELEPORT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_NETWORKANALYSER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERCONTROL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETEST) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOSTANALYSER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOSTANALYSER2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_ABSESP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_AUTODATABUS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MCT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICK16F1454) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICK16F1454_V2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR, USB_DEVICE_ID_N_S_HARMONY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 20) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 30) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 100) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 108) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 118) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 200) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 300) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 400) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ONTRAK, USB_DEVICE_ID_ONTRAK_ADU100 + 500) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0001) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0002) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0003) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PETZL, USB_DEVICE_ID_PETZL_HEADLAMP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) },
+#if IS_ENABLED(CONFIG_MOUSE_SYNAPTICS_USB)
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_INT_TP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_CPAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_STICK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_WP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_COMP_TP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_WTP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DPAD) },
+#endif
+ { HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) },
+ { }
+};
+
+/**
+ * hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer
+ *
+ * There are composite devices for which we want to ignore only a certain
+ * interface. This is a list of devices for which only the mouse interface will
+ * be ignored. This allows a dedicated driver to take care of the interface.
+ */
+static const struct hid_device_id hid_mouse_ignore_list[] = {
+ /* appletouch driver */
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER3_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING2_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+ { }
+};
+
+bool hid_ignore(struct hid_device *hdev)
+{
+ if (hdev->quirks & HID_QUIRK_NO_IGNORE)
+ return false;
+ if (hdev->quirks & HID_QUIRK_IGNORE)
+ return true;
+
+ switch (hdev->vendor) {
+ case USB_VENDOR_ID_CODEMERCS:
+ /* ignore all Code Mercenaries IOWarrior devices */
+ if (hdev->product >= USB_DEVICE_ID_CODEMERCS_IOW_FIRST &&
+ hdev->product <= USB_DEVICE_ID_CODEMERCS_IOW_LAST)
+ return true;
+ break;
+ case USB_VENDOR_ID_LOGITECH:
+ if (hdev->product >= USB_DEVICE_ID_LOGITECH_HARMONY_FIRST &&
+ hdev->product <= USB_DEVICE_ID_LOGITECH_HARMONY_LAST)
+ return true;
+ /*
+ * The Keene FM transmitter USB device has the same USB ID as
+ * the Logitech AudioHub Speaker, but it should ignore the hid.
+ * Check if the name is that of the Keene device.
+ * For reference: the name of the AudioHub is
+ * "HOLTEK AudioHub Speaker".
+ */
+ if (hdev->product == USB_DEVICE_ID_LOGITECH_AUDIOHUB &&
+ !strcmp(hdev->name, "HOLTEK B-LINK USB Audio "))
+ return true;
+ break;
+ case USB_VENDOR_ID_SOUNDGRAPH:
+ if (hdev->product >= USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST &&
+ hdev->product <= USB_DEVICE_ID_SOUNDGRAPH_IMON_LAST)
+ return true;
+ break;
+ case USB_VENDOR_ID_HANWANG:
+ if (hdev->product >= USB_DEVICE_ID_HANWANG_TABLET_FIRST &&
+ hdev->product <= USB_DEVICE_ID_HANWANG_TABLET_LAST)
+ return true;
+ break;
+ case USB_VENDOR_ID_JESS:
+ if (hdev->product == USB_DEVICE_ID_JESS_YUREX &&
+ hdev->type == HID_TYPE_USBNONE)
+ return true;
+ break;
+ case USB_VENDOR_ID_VELLEMAN:
+ /* These are not HID devices. They are handled by comedi. */
+ if ((hdev->product >= USB_DEVICE_ID_VELLEMAN_K8055_FIRST &&
+ hdev->product <= USB_DEVICE_ID_VELLEMAN_K8055_LAST) ||
+ (hdev->product >= USB_DEVICE_ID_VELLEMAN_K8061_FIRST &&
+ hdev->product <= USB_DEVICE_ID_VELLEMAN_K8061_LAST))
+ return true;
+ break;
+ case USB_VENDOR_ID_ATMEL_V_USB:
+ /* Masterkit MA901 usb radio based on Atmel tiny85 chip and
+ * it has the same USB ID as many Atmel V-USB devices. This
+ * usb radio is handled by radio-ma901.c driver so we want
+ * ignore the hid. Check the name, bus, product and ignore
+ * if we have MA901 usb radio.
+ */
+ if (hdev->product == USB_DEVICE_ID_ATMEL_V_USB &&
+ hdev->bus == BUS_USB &&
+ strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0)
+ return true;
+ break;
+ case USB_VENDOR_ID_ELAN:
+ /*
+ * Many Elan devices have a product id of 0x0401 and are handled
+ * by the elan_i2c input driver. But the ACPI HID ELAN0800 dev
+ * is not (and cannot be) handled by that driver ->
+ * Ignore all 0x0401 devs except for the ELAN0800 dev.
+ */
+ if (hdev->product == 0x0401 &&
+ strncmp(hdev->name, "ELAN0800", 8) != 0)
+ return true;
+ break;
+ }
+
+ if (hdev->type == HID_TYPE_USBMOUSE &&
+ hid_match_id(hdev, hid_mouse_ignore_list))
+ return true;
+
+ return !!hid_match_id(hdev, hid_ignore_list);
+}
+EXPORT_SYMBOL_GPL(hid_ignore);
+
+/* Dynamic HID quirks list - specified at runtime */
+struct quirks_list_struct {
+ struct hid_device_id hid_bl_item;
+ struct list_head node;
+};
+
+static LIST_HEAD(dquirks_list);
+static DEFINE_MUTEX(dquirks_lock);
+
+/* Runtime ("dynamic") quirks manipulation functions */
+
+/**
+ * hid_exists_dquirk: find any dynamic quirks for a HID device
+ * @hdev: the HID device to match
+ *
+ * Description:
+ * Scans dquirks_list for a matching dynamic quirk and returns
+ * the pointer to the relevant struct hid_device_id if found.
+ * Must be called with a read lock held on dquirks_lock.
+ *
+ * Returns: NULL if no quirk found, struct hid_device_id * if found.
+ */
+static struct hid_device_id *hid_exists_dquirk(const struct hid_device *hdev)
+{
+ struct quirks_list_struct *q;
+ struct hid_device_id *bl_entry = NULL;
+
+ list_for_each_entry(q, &dquirks_list, node) {
+ if (hid_match_one_id(hdev, &q->hid_bl_item)) {
+ bl_entry = &q->hid_bl_item;
+ break;
+ }
+ }
+
+ if (bl_entry != NULL)
+ dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%hx:0x%hx\n",
+ bl_entry->driver_data, bl_entry->vendor,
+ bl_entry->product);
+
+ return bl_entry;
+}
+
+
+/**
+ * hid_modify_dquirk: add/replace a HID quirk
+ * @id: the HID device to match
+ * @quirks: the unsigned long quirks value to add/replace
+ *
+ * Description:
+ * If an dynamic quirk exists in memory for this device, replace its
+ * quirks value with what was provided. Otherwise, add the quirk
+ * to the dynamic quirks list.
+ *
+ * Returns: 0 OK, -error on failure.
+ */
+static int hid_modify_dquirk(const struct hid_device_id *id,
+ const unsigned long quirks)
+{
+ struct hid_device *hdev;
+ struct quirks_list_struct *q_new, *q;
+ int list_edited = 0;
+ int ret = 0;
+
+ hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return -ENOMEM;
+
+ q_new = kmalloc(sizeof(struct quirks_list_struct), GFP_KERNEL);
+ if (!q_new) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ hdev->bus = q_new->hid_bl_item.bus = id->bus;
+ hdev->group = q_new->hid_bl_item.group = id->group;
+ hdev->vendor = q_new->hid_bl_item.vendor = id->vendor;
+ hdev->product = q_new->hid_bl_item.product = id->product;
+ q_new->hid_bl_item.driver_data = quirks;
+
+ mutex_lock(&dquirks_lock);
+
+ list_for_each_entry(q, &dquirks_list, node) {
+
+ if (hid_match_one_id(hdev, &q->hid_bl_item)) {
+
+ list_replace(&q->node, &q_new->node);
+ kfree(q);
+ list_edited = 1;
+ break;
+
+ }
+
+ }
+
+ if (!list_edited)
+ list_add_tail(&q_new->node, &dquirks_list);
+
+ mutex_unlock(&dquirks_lock);
+
+ out:
+ kfree(hdev);
+ return ret;
+}
+
+/**
+ * hid_remove_all_dquirks: remove all runtime HID quirks from memory
+ * @bus: bus to match against. Use HID_BUS_ANY if all need to be removed.
+ *
+ * Description:
+ * Free all memory associated with dynamic quirks - called before
+ * module unload.
+ *
+ */
+static void hid_remove_all_dquirks(__u16 bus)
+{
+ struct quirks_list_struct *q, *temp;
+
+ mutex_lock(&dquirks_lock);
+ list_for_each_entry_safe(q, temp, &dquirks_list, node) {
+ if (bus == HID_BUS_ANY || bus == q->hid_bl_item.bus) {
+ list_del(&q->node);
+ kfree(q);
+ }
+ }
+ mutex_unlock(&dquirks_lock);
+
+}
+
+/**
+ * hid_quirks_init: apply HID quirks specified at module load time
+ */
+int hid_quirks_init(char **quirks_param, __u16 bus, int count)
+{
+ struct hid_device_id id = { 0 };
+ int n = 0, m;
+ unsigned short int vendor, product;
+ u32 quirks;
+
+ id.bus = bus;
+
+ for (; n < count && quirks_param[n]; n++) {
+
+ m = sscanf(quirks_param[n], "0x%hx:0x%hx:0x%x",
+ &vendor, &product, &quirks);
+
+ id.vendor = (__u16)vendor;
+ id.product = (__u16)product;
+
+ if (m != 3 ||
+ hid_modify_dquirk(&id, quirks) != 0) {
+ pr_warn("Could not parse HID quirk module param %s\n",
+ quirks_param[n]);
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hid_quirks_init);
+
+/**
+ * hid_quirks_exit: release memory associated with dynamic_quirks
+ * @bus: a bus to match against
+ *
+ * Description:
+ * Release all memory associated with dynamic quirks for a given bus.
+ * Called upon module unload.
+ * Use HID_BUS_ANY to remove all dynamic quirks.
+ *
+ * Returns: nothing
+ */
+void hid_quirks_exit(__u16 bus)
+{
+ hid_remove_all_dquirks(bus);
+}
+EXPORT_SYMBOL_GPL(hid_quirks_exit);
+
+/**
+ * hid_gets_squirk: return any static quirks for a HID device
+ * @hdev: the HID device to match
+ *
+ * Description:
+ * Given a HID device, return a pointer to the quirked hid_device_id entry
+ * associated with that device.
+ *
+ * Returns: the quirks.
+ */
+static unsigned long hid_gets_squirk(const struct hid_device *hdev)
+{
+ const struct hid_device_id *bl_entry;
+ unsigned long quirks = 0;
+
+ if (hid_match_id(hdev, hid_ignore_list))
+ quirks |= HID_QUIRK_IGNORE;
+
+ if (hid_match_id(hdev, hid_have_special_driver))
+ quirks |= HID_QUIRK_HAVE_SPECIAL_DRIVER;
+
+ bl_entry = hid_match_id(hdev, hid_quirks);
+ if (bl_entry != NULL)
+ quirks |= bl_entry->driver_data;
+
+ if (quirks)
+ dbg_hid("Found squirk 0x%lx for HID device 0x%hx:0x%hx\n",
+ quirks, hdev->vendor, hdev->product);
+ return quirks;
+}
+
+/**
+ * hid_lookup_quirk: return any quirks associated with a HID device
+ * @hdev: the HID device to look for
+ *
+ * Description:
+ * Given a HID device, return any quirks associated with that device.
+ *
+ * Returns: an unsigned long quirks value.
+ */
+unsigned long hid_lookup_quirk(const struct hid_device *hdev)
+{
+ unsigned long quirks = 0;
+ const struct hid_device_id *quirk_entry = NULL;
+
+ /* NCR devices must not be queried for reports */
+ if (hdev->bus == BUS_USB &&
+ hdev->vendor == USB_VENDOR_ID_NCR &&
+ hdev->product >= USB_DEVICE_ID_NCR_FIRST &&
+ hdev->product <= USB_DEVICE_ID_NCR_LAST)
+ return HID_QUIRK_NO_INIT_REPORTS;
+
+ /* These devices must be ignored if version (bcdDevice) is too old */
+ if (hdev->bus == BUS_USB && hdev->vendor == USB_VENDOR_ID_JABRA) {
+ switch (hdev->product) {
+ case USB_DEVICE_ID_JABRA_SPEAK_410:
+ if (hdev->version < 0x0111)
+ return HID_QUIRK_IGNORE;
+ break;
+ case USB_DEVICE_ID_JABRA_SPEAK_510:
+ if (hdev->version < 0x0214)
+ return HID_QUIRK_IGNORE;
+ break;
+ }
+ }
+
+ mutex_lock(&dquirks_lock);
+ quirk_entry = hid_exists_dquirk(hdev);
+ if (quirk_entry)
+ quirks = quirk_entry->driver_data;
+ else
+ quirks = hid_gets_squirk(hdev);
+ mutex_unlock(&dquirks_lock);
+
+ return quirks;
+}
+EXPORT_SYMBOL_GPL(hid_lookup_quirk);
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 0f43c4292685..c6c05df3e8d2 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -731,6 +731,7 @@ static const struct hid_device_id rmi_id[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14),
.driver_data = RMI_DEVICE_HAS_PHYS_BUTTONS },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_REZEL) },
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_RMI, HID_ANY_ID, HID_ANY_ID) },
{ }
};
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index 43617fb28b87..317c9c2c0a7c 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -37,6 +37,8 @@ static uint kovaplus_convert_event_cpi(uint value)
static void kovaplus_profile_activated(struct kovaplus_device *kovaplus,
uint new_profile_index)
{
+ if (new_profile_index >= ARRAY_SIZE(kovaplus->profile_settings))
+ return;
kovaplus->actual_profile = new_profile_index;
kovaplus->actual_cpi = kovaplus->profile_settings[new_profile_index].cpi_startup_level;
kovaplus->actual_x_sensitivity = kovaplus->profile_settings[new_profile_index].sensitivity_x;
diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c
index fb77dec720a4..b7e86aba6f33 100644
--- a/drivers/hid/hid-roccat.c
+++ b/drivers/hid/hid-roccat.c
@@ -137,7 +137,7 @@ exit_unlock:
return retval;
}
-static unsigned int roccat_poll(struct file *file, poll_table *wait)
+static __poll_t roccat_poll(struct file *file, poll_table *wait)
{
struct roccat_reader *reader = file->private_data;
poll_wait(file, &reader->device->wait, wait);
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
index 0bcf041368c7..21ed6c55c40a 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
@@ -702,11 +702,11 @@ static int hid_sensor_custom_open(struct inode *inode, struct file *file)
return nonseekable_open(inode, file);
}
-static unsigned int hid_sensor_custom_poll(struct file *file,
+static __poll_t hid_sensor_custom_poll(struct file *file,
struct poll_table_struct *wait)
{
struct hid_sensor_custom *sensor_inst;
- unsigned int mask = 0;
+ __poll_t mask = 0;
sensor_inst = container_of(file->private_data,
struct hid_sensor_custom, custom_dev);
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index b9dc3ac4d4aa..ccdc5f2d01b1 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -473,6 +473,7 @@ struct motion_output_report_02 {
#define DS4_FEATURE_REPORT_0x02_SIZE 37
#define DS4_FEATURE_REPORT_0x05_SIZE 41
#define DS4_FEATURE_REPORT_0x81_SIZE 7
+#define DS4_FEATURE_REPORT_0xA3_SIZE 49
#define DS4_INPUT_REPORT_0x11_SIZE 78
#define DS4_OUTPUT_REPORT_0x05_SIZE 32
#define DS4_OUTPUT_REPORT_0x11_SIZE 78
@@ -544,6 +545,8 @@ struct sony_sc {
struct power_supply *battery;
struct power_supply_desc battery_desc;
int device_id;
+ unsigned fw_version;
+ unsigned hw_version;
u8 *output_report_dmabuf;
#ifdef CONFIG_SONY_FF
@@ -627,6 +630,29 @@ static ssize_t ds4_store_poll_interval(struct device *dev,
static DEVICE_ATTR(bt_poll_interval, 0644, ds4_show_poll_interval,
ds4_store_poll_interval);
+static ssize_t sony_show_firmware_version(struct device *dev,
+ struct device_attribute
+ *attr, char *buf)
+{
+ struct hid_device *hdev = to_hid_device(dev);
+ struct sony_sc *sc = hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "0x%04x\n", sc->fw_version);
+}
+
+static DEVICE_ATTR(firmware_version, 0444, sony_show_firmware_version, NULL);
+
+static ssize_t sony_show_hardware_version(struct device *dev,
+ struct device_attribute
+ *attr, char *buf)
+{
+ struct hid_device *hdev = to_hid_device(dev);
+ struct sony_sc *sc = hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "0x%04x\n", sc->hw_version);
+}
+
+static DEVICE_ATTR(hardware_version, 0444, sony_show_hardware_version, NULL);
static u8 *motion_fixup(struct hid_device *hdev, u8 *rdesc,
unsigned int *rsize)
@@ -1646,6 +1672,31 @@ static void dualshock4_calibration_work(struct work_struct *work)
spin_unlock_irqrestore(&sc->lock, flags);
}
+static int dualshock4_get_version_info(struct sony_sc *sc)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kmalloc(DS4_FEATURE_REPORT_0xA3_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = hid_hw_raw_request(sc->hdev, 0xA3, buf,
+ DS4_FEATURE_REPORT_0xA3_SIZE,
+ HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ if (ret < 0) {
+ kfree(buf);
+ return ret;
+ }
+
+ sc->hw_version = get_unaligned_le16(&buf[35]);
+ sc->fw_version = get_unaligned_le16(&buf[41]);
+
+ kfree(buf);
+ return 0;
+}
+
static void sixaxis_set_leds_from_id(struct sony_sc *sc)
{
static const u8 sixaxis_leds[10][4] = {
@@ -2399,10 +2450,7 @@ static int sony_check_add(struct sony_sc *sc)
memcpy(sc->mac_address, &buf[1], sizeof(sc->mac_address));
snprintf(sc->hdev->uniq, sizeof(sc->hdev->uniq),
- "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
- sc->mac_address[5], sc->mac_address[4],
- sc->mac_address[3], sc->mac_address[2],
- sc->mac_address[1], sc->mac_address[0]);
+ "%pMR", sc->mac_address);
} else if ((sc->quirks & SIXAXIS_CONTROLLER_USB) ||
(sc->quirks & NAVIGATION_CONTROLLER_USB)) {
buf = kmalloc(SIXAXIS_REPORT_0xF2_SIZE, GFP_KERNEL);
@@ -2432,10 +2480,7 @@ static int sony_check_add(struct sony_sc *sc)
sc->mac_address[5-n] = buf[4+n];
snprintf(sc->hdev->uniq, sizeof(sc->hdev->uniq),
- "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
- sc->mac_address[5], sc->mac_address[4],
- sc->mac_address[3], sc->mac_address[2],
- sc->mac_address[1], sc->mac_address[0]);
+ "%pMR", sc->mac_address);
} else {
return 0;
}
@@ -2619,6 +2664,28 @@ static int sony_input_configured(struct hid_device *hdev,
goto err_stop;
}
+ ret = dualshock4_get_version_info(sc);
+ if (ret < 0) {
+ hid_err(sc->hdev, "Failed to get version data from Dualshock 4\n");
+ goto err_stop;
+ }
+
+ ret = device_create_file(&sc->hdev->dev, &dev_attr_firmware_version);
+ if (ret) {
+ /* Make zero for cleanup reasons of sysfs entries. */
+ sc->fw_version = 0;
+ sc->hw_version = 0;
+ hid_err(sc->hdev, "can't create sysfs firmware_version attribute err: %d\n", ret);
+ goto err_stop;
+ }
+
+ ret = device_create_file(&sc->hdev->dev, &dev_attr_hardware_version);
+ if (ret) {
+ sc->hw_version = 0;
+ hid_err(sc->hdev, "can't create sysfs hardware_version attribute err: %d\n", ret);
+ goto err_stop;
+ }
+
/*
* The Dualshock 4 touchpad supports 2 touches and has a
* resolution of 1920x942 (44.86 dots/mm).
@@ -2695,6 +2762,10 @@ err_stop:
*/
if (sc->ds4_bt_poll_interval)
device_remove_file(&sc->hdev->dev, &dev_attr_bt_poll_interval);
+ if (sc->fw_version)
+ device_remove_file(&sc->hdev->dev, &dev_attr_firmware_version);
+ if (sc->hw_version)
+ device_remove_file(&sc->hdev->dev, &dev_attr_hardware_version);
if (sc->quirks & SONY_LED_SUPPORT)
sony_leds_remove(sc);
if (sc->quirks & SONY_BATTERY_SUPPORT)
@@ -2796,6 +2867,12 @@ static void sony_remove(struct hid_device *hdev)
if (sc->quirks & DUALSHOCK4_CONTROLLER_BT)
device_remove_file(&sc->hdev->dev, &dev_attr_bt_poll_interval);
+ if (sc->fw_version)
+ device_remove_file(&sc->hdev->dev, &dev_attr_firmware_version);
+
+ if (sc->hw_version)
+ device_remove_file(&sc->hdev->dev, &dev_attr_hardware_version);
+
sony_cancel_work_sync(sc);
kfree(sc->output_report_dmabuf);
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 5fbe0f81ab2e..be210219f982 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -249,7 +249,7 @@ out:
return ret;
}
-static unsigned int hidraw_poll(struct file *file, poll_table *wait)
+static __poll_t hidraw_poll(struct file *file, poll_table *wait)
{
struct hidraw_list *list = file->private_data;
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index e054ee43c1e2..7230243b94d3 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -934,11 +934,6 @@ static int i2c_hid_of_probe(struct i2c_client *client,
}
pdata->hid_descriptor_address = val;
- ret = of_property_read_u32(dev->of_node, "post-power-on-delay-ms",
- &val);
- if (!ret)
- pdata->post_power_delay_ms = val;
-
return 0;
}
@@ -955,6 +950,16 @@ static inline int i2c_hid_of_probe(struct i2c_client *client,
}
#endif
+static void i2c_hid_fwnode_probe(struct i2c_client *client,
+ struct i2c_hid_platform_data *pdata)
+{
+ u32 val;
+
+ if (!device_property_read_u32(&client->dev, "post-power-on-delay-ms",
+ &val))
+ pdata->post_power_delay_ms = val;
+}
+
static int i2c_hid_probe(struct i2c_client *client,
const struct i2c_device_id *dev_id)
{
@@ -998,6 +1003,9 @@ static int i2c_hid_probe(struct i2c_client *client,
ihid->pdata = *platform_data;
}
+ /* Parse platform agnostic common properties from ACPI / device tree */
+ i2c_hid_fwnode_probe(client, &ihid->pdata);
+
ihid->pdata.supply = devm_regulator_get(&client->dev, "vdd");
if (IS_ERR(ihid->pdata.supply)) {
ret = PTR_ERR(ihid->pdata.supply);
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index 2aac097c3f70..97869b7410eb 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -28,6 +28,7 @@
#define SPT_Ax_DEVICE_ID 0x9D35
#define CNL_Ax_DEVICE_ID 0x9DFC
#define GLK_Ax_DEVICE_ID 0x31A2
+#define CNL_H_DEVICE_ID 0xA37C
#define REVISION_ID_CHT_A0 0x6
#define REVISION_ID_CHT_Ax_SI 0x0
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 20d824f74f99..582e449be9fe 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -37,6 +37,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_Ax_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)},
{0, }
};
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 6f819f144cb4..fc43850a155e 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -753,7 +753,7 @@ unlock:
return ret ? ret : count;
}
-static unsigned int uhid_char_poll(struct file *file, poll_table *wait)
+static __poll_t uhid_char_poll(struct file *file, poll_table *wait)
{
struct uhid_device *uhid = file->private_data;
diff --git a/drivers/hid/usbhid/Makefile b/drivers/hid/usbhid/Makefile
index 0ff227d0c033..b6349e30bd93 100644
--- a/drivers/hid/usbhid/Makefile
+++ b/drivers/hid/usbhid/Makefile
@@ -3,7 +3,7 @@
# Makefile for the USB input drivers
#
-usbhid-y := hid-core.o hid-quirks.o
+usbhid-y := hid-core.o
usbhid-$(CONFIG_USB_HIDDEV) += hiddev.o
usbhid-$(CONFIG_HID_PID) += hid-pidff.o
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 640dfb937c69..77c50cdfff97 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -978,8 +978,7 @@ static int usbhid_parse(struct hid_device *hid)
int num_descriptors;
size_t offset = offsetof(struct hid_descriptor, desc);
- quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
- le16_to_cpu(dev->descriptor.idProduct));
+ quirks = hid_lookup_quirk(hid);
if (quirks & HID_QUIRK_IGNORE)
return -ENODEV;
@@ -1328,8 +1327,8 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
hid->bus = BUS_USB;
hid->vendor = le16_to_cpu(dev->descriptor.idVendor);
hid->product = le16_to_cpu(dev->descriptor.idProduct);
+ hid->version = le16_to_cpu(dev->descriptor.bcdDevice);
hid->name[0] = 0;
- hid->quirks = usbhid_lookup_quirk(hid->vendor, hid->product);
if (intf->cur_altsetting->desc.bInterfaceProtocol ==
USB_INTERFACE_PROTOCOL_MOUSE)
hid->type = HID_TYPE_USBMOUSE;
@@ -1641,7 +1640,7 @@ static int __init hid_init(void)
{
int retval = -ENOMEM;
- retval = usbhid_quirks_init(quirks_param);
+ retval = hid_quirks_init(quirks_param, BUS_USB, MAX_USBHID_BOOT_QUIRKS);
if (retval)
goto usbhid_quirks_init_fail;
retval = usb_register(&hid_driver);
@@ -1651,7 +1650,7 @@ static int __init hid_init(void)
return 0;
usb_register_fail:
- usbhid_quirks_exit();
+ hid_quirks_exit(BUS_USB);
usbhid_quirks_init_fail:
return retval;
}
@@ -1659,7 +1658,7 @@ usbhid_quirks_init_fail:
static void __exit hid_exit(void)
{
usb_deregister(&hid_driver);
- usbhid_quirks_exit();
+ hid_quirks_exit(BUS_USB);
}
module_init(hid_init);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
deleted file mode 100644
index 331f7f34ec14..000000000000
--- a/drivers/hid/usbhid/hid-quirks.c
+++ /dev/null
@@ -1,402 +0,0 @@
-/*
- * USB HID quirks support for Linux
- *
- * Copyright (c) 1999 Andreas Gal
- * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
- * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
- * Copyright (c) 2006-2007 Jiri Kosina
- * Copyright (c) 2007 Paul Walmsley
- */
-
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
-#include <linux/hid.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-
-#include "../hid-ids.h"
-
-/*
- * Alphabetically sorted blacklist by quirk type.
- */
-
-static const struct hid_blacklist {
- __u16 idVendor;
- __u16 idProduct;
- __u32 quirks;
-} hid_blacklist[] = {
- { USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_GAMEPAD, HID_QUIRK_BADPAD },
- { USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD },
- { USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD },
- { USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
- { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
- { USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_NATSU, USB_DEVICE_ID_NATSU_GAMEPAD, HID_QUIRK_BADPAD },
- { USB_VENDOR_ID_NEC, USB_DEVICE_ID_NEC_USB_GAME_PAD, HID_QUIRK_BADPAD },
- { USB_VENDOR_ID_NEXTWINDOW, USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN, HID_QUIRK_MULTI_INPUT},
- { USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD, HID_QUIRK_BADPAD },
- { USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD, HID_QUIRK_BADPAD },
-
- { USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL },
-
- { USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH_2968, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
- { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT },
-
- { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS692, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS1758, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_THROTTLE, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70R, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_M65RGB, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K95RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_FUTABA, USB_DEVICE_ID_LED_DISPLAY, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C01A, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1610, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1640, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4D22, HID_QUIRK_ALWAYS_POLL },
- { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS9200_TOUCH, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS817_TOUCH, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS_TS, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS1030_TOUCH, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
-
- { USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
-
- { USB_VENDOR_ID_PI_ENGINEERING, USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL, HID_QUIRK_HIDINPUT_FORCE },
-
- { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096, HID_QUIRK_NO_INIT_REPORTS },
- { USB_VENDOR_ID_MULTIPLE_1781, USB_DEVICE_ID_RAPHNET_4NES4SNES_OLD, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI, HID_QUIRK_MULTI_INPUT },
- { USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK, HID_QUIRK_MULTI_INPUT },
-
- { 0, 0 }
-};
-
-/* Dynamic HID quirks list - specified at runtime */
-struct quirks_list_struct {
- struct hid_blacklist hid_bl_item;
- struct list_head node;
-};
-
-static LIST_HEAD(dquirks_list);
-static DECLARE_RWSEM(dquirks_rwsem);
-
-/* Runtime ("dynamic") quirks manipulation functions */
-
-/**
- * usbhid_exists_dquirk: find any dynamic quirks for a USB HID device
- * @idVendor: the 16-bit USB vendor ID, in native byteorder
- * @idProduct: the 16-bit USB product ID, in native byteorder
- *
- * Description:
- * Scans dquirks_list for a matching dynamic quirk and returns
- * the pointer to the relevant struct hid_blacklist if found.
- * Must be called with a read lock held on dquirks_rwsem.
- *
- * Returns: NULL if no quirk found, struct hid_blacklist * if found.
- */
-static struct hid_blacklist *usbhid_exists_dquirk(const u16 idVendor,
- const u16 idProduct)
-{
- struct quirks_list_struct *q;
- struct hid_blacklist *bl_entry = NULL;
-
- list_for_each_entry(q, &dquirks_list, node) {
- if (q->hid_bl_item.idVendor == idVendor &&
- q->hid_bl_item.idProduct == idProduct) {
- bl_entry = &q->hid_bl_item;
- break;
- }
- }
-
- if (bl_entry != NULL)
- dbg_hid("Found dynamic quirk 0x%x for USB HID vendor 0x%hx prod 0x%hx\n",
- bl_entry->quirks, bl_entry->idVendor,
- bl_entry->idProduct);
-
- return bl_entry;
-}
-
-
-/**
- * usbhid_modify_dquirk: add/replace a HID quirk
- * @idVendor: the 16-bit USB vendor ID, in native byteorder
- * @idProduct: the 16-bit USB product ID, in native byteorder
- * @quirks: the u32 quirks value to add/replace
- *
- * Description:
- * If an dynamic quirk exists in memory for this (idVendor,
- * idProduct) pair, replace its quirks value with what was
- * provided. Otherwise, add the quirk to the dynamic quirks list.
- *
- * Returns: 0 OK, -error on failure.
- */
-static int usbhid_modify_dquirk(const u16 idVendor, const u16 idProduct,
- const u32 quirks)
-{
- struct quirks_list_struct *q_new, *q;
- int list_edited = 0;
-
- if (!idVendor) {
- dbg_hid("Cannot add a quirk with idVendor = 0\n");
- return -EINVAL;
- }
-
- q_new = kmalloc(sizeof(struct quirks_list_struct), GFP_KERNEL);
- if (!q_new)
- return -ENOMEM;
-
- q_new->hid_bl_item.idVendor = idVendor;
- q_new->hid_bl_item.idProduct = idProduct;
- q_new->hid_bl_item.quirks = quirks;
-
- down_write(&dquirks_rwsem);
-
- list_for_each_entry(q, &dquirks_list, node) {
-
- if (q->hid_bl_item.idVendor == idVendor &&
- q->hid_bl_item.idProduct == idProduct) {
-
- list_replace(&q->node, &q_new->node);
- kfree(q);
- list_edited = 1;
- break;
-
- }
-
- }
-
- if (!list_edited)
- list_add_tail(&q_new->node, &dquirks_list);
-
- up_write(&dquirks_rwsem);
-
- return 0;
-}
-
-/**
- * usbhid_remove_all_dquirks: remove all runtime HID quirks from memory
- *
- * Description:
- * Free all memory associated with dynamic quirks - called before
- * module unload.
- *
- */
-static void usbhid_remove_all_dquirks(void)
-{
- struct quirks_list_struct *q, *temp;
-
- down_write(&dquirks_rwsem);
- list_for_each_entry_safe(q, temp, &dquirks_list, node) {
- list_del(&q->node);
- kfree(q);
- }
- up_write(&dquirks_rwsem);
-
-}
-
-/**
- * usbhid_quirks_init: apply USB HID quirks specified at module load time
- */
-int usbhid_quirks_init(char **quirks_param)
-{
- u16 idVendor, idProduct;
- u32 quirks;
- int n = 0, m;
-
- for (; n < MAX_USBHID_BOOT_QUIRKS && quirks_param[n]; n++) {
-
- m = sscanf(quirks_param[n], "0x%hx:0x%hx:0x%x",
- &idVendor, &idProduct, &quirks);
-
- if (m != 3 ||
- usbhid_modify_dquirk(idVendor, idProduct, quirks) != 0) {
- pr_warn("Could not parse HID quirk module param %s\n",
- quirks_param[n]);
- }
- }
-
- return 0;
-}
-
-/**
- * usbhid_quirks_exit: release memory associated with dynamic_quirks
- *
- * Description:
- * Release all memory associated with dynamic quirks. Called upon
- * module unload.
- *
- * Returns: nothing
- */
-void usbhid_quirks_exit(void)
-{
- usbhid_remove_all_dquirks();
-}
-
-/**
- * usbhid_exists_squirk: return any static quirks for a USB HID device
- * @idVendor: the 16-bit USB vendor ID, in native byteorder
- * @idProduct: the 16-bit USB product ID, in native byteorder
- *
- * Description:
- * Given a USB vendor ID and product ID, return a pointer to
- * the hid_blacklist entry associated with that device.
- *
- * Returns: pointer if quirk found, or NULL if no quirks found.
- */
-static const struct hid_blacklist *usbhid_exists_squirk(const u16 idVendor,
- const u16 idProduct)
-{
- const struct hid_blacklist *bl_entry = NULL;
- int n = 0;
-
- for (; hid_blacklist[n].idVendor; n++)
- if (hid_blacklist[n].idVendor == idVendor &&
- (hid_blacklist[n].idProduct == (__u16) HID_ANY_ID ||
- hid_blacklist[n].idProduct == idProduct))
- bl_entry = &hid_blacklist[n];
-
- if (bl_entry != NULL)
- dbg_hid("Found squirk 0x%x for USB HID vendor 0x%hx prod 0x%hx\n",
- bl_entry->quirks, bl_entry->idVendor,
- bl_entry->idProduct);
- return bl_entry;
-}
-
-/**
- * usbhid_lookup_quirk: return any quirks associated with a USB HID device
- * @idVendor: the 16-bit USB vendor ID, in native byteorder
- * @idProduct: the 16-bit USB product ID, in native byteorder
- *
- * Description:
- * Given a USB vendor ID and product ID, return any quirks associated
- * with that device.
- *
- * Returns: a u32 quirks value.
- */
-u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct)
-{
- u32 quirks = 0;
- const struct hid_blacklist *bl_entry = NULL;
-
- /* NCR devices must not be queried for reports */
- if (idVendor == USB_VENDOR_ID_NCR &&
- idProduct >= USB_DEVICE_ID_NCR_FIRST &&
- idProduct <= USB_DEVICE_ID_NCR_LAST)
- return HID_QUIRK_NO_INIT_REPORTS;
-
- down_read(&dquirks_rwsem);
- bl_entry = usbhid_exists_dquirk(idVendor, idProduct);
- if (!bl_entry)
- bl_entry = usbhid_exists_squirk(idVendor, idProduct);
- if (bl_entry)
- quirks = bl_entry->quirks;
- up_read(&dquirks_rwsem);
-
- return quirks;
-}
-
-EXPORT_SYMBOL_GPL(usbhid_lookup_quirk);
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 7d749b19c27c..0ff3e7e70c8d 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -422,7 +422,7 @@ static ssize_t hiddev_read(struct file * file, char __user * buffer, size_t coun
* "poll" file op
* No kernel lock - fine
*/
-static unsigned int hiddev_poll(struct file *file, poll_table *wait)
+static __poll_t hiddev_poll(struct file *file, poll_table *wait)
{
struct hiddev_list *list = file->private_data;
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index ee71ad9b6cc1..409543160af7 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -56,6 +56,107 @@ static int wacom_set_report(struct hid_device *hdev, u8 type, u8 *buf,
return retval;
}
+static void wacom_wac_queue_insert(struct hid_device *hdev,
+ struct kfifo_rec_ptr_2 *fifo,
+ u8 *raw_data, int size)
+{
+ bool warned = false;
+
+ while (kfifo_avail(fifo) < size) {
+ if (!warned)
+ hid_warn(hdev, "%s: kfifo has filled, starting to drop events\n", __func__);
+ warned = true;
+
+ kfifo_skip(fifo);
+ }
+
+ kfifo_in(fifo, raw_data, size);
+}
+
+static void wacom_wac_queue_flush(struct hid_device *hdev,
+ struct kfifo_rec_ptr_2 *fifo)
+{
+ while (!kfifo_is_empty(fifo)) {
+ u8 buf[WACOM_PKGLEN_MAX];
+ int size;
+ int err;
+
+ size = kfifo_out(fifo, buf, sizeof(buf));
+ err = hid_report_raw_event(hdev, HID_INPUT_REPORT, buf, size, false);
+ if (err) {
+ hid_warn(hdev, "%s: unable to flush event due to error %d\n",
+ __func__, err);
+ }
+ }
+}
+
+static int wacom_wac_pen_serial_enforce(struct hid_device *hdev,
+ struct hid_report *report, u8 *raw_data, int size)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_features *features = &wacom_wac->features;
+ bool flush = false;
+ bool insert = false;
+ int i, j;
+
+ if (wacom_wac->serial[0] || !(features->quirks & WACOM_QUIRK_TOOLSERIAL))
+ return 0;
+
+ /* Queue events which have invalid tool type or serial number */
+ for (i = 0; i < report->maxfield; i++) {
+ for (j = 0; j < report->field[i]->maxusage; j++) {
+ struct hid_field *field = report->field[i];
+ struct hid_usage *usage = &field->usage[j];
+ unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid);
+ unsigned int offset;
+ unsigned int size;
+ unsigned int value;
+
+ if (equivalent_usage != HID_DG_INRANGE &&
+ equivalent_usage != HID_DG_TOOLSERIALNUMBER &&
+ equivalent_usage != WACOM_HID_WD_SERIALHI &&
+ equivalent_usage != WACOM_HID_WD_TOOLTYPE)
+ continue;
+
+ offset = field->report_offset;
+ size = field->report_size;
+ value = hid_field_extract(hdev, raw_data+1, offset + j * size, size);
+
+ /* If we go out of range, we need to flush the queue ASAP */
+ if (equivalent_usage == HID_DG_INRANGE)
+ value = !value;
+
+ if (value) {
+ flush = true;
+ switch (equivalent_usage) {
+ case HID_DG_TOOLSERIALNUMBER:
+ wacom_wac->serial[0] = value;
+ break;
+
+ case WACOM_HID_WD_SERIALHI:
+ wacom_wac->serial[0] |= ((__u64)value) << 32;
+ break;
+
+ case WACOM_HID_WD_TOOLTYPE:
+ wacom_wac->id[0] = value;
+ break;
+ }
+ }
+ else {
+ insert = true;
+ }
+ }
+ }
+
+ if (flush)
+ wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo);
+ else if (insert)
+ wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo, raw_data, size);
+
+ return insert && !flush;
+}
+
static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
u8 *raw_data, int size)
{
@@ -64,6 +165,9 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
if (size > WACOM_PKGLEN_MAX)
return 1;
+ if (wacom_wac_pen_serial_enforce(hdev, report, raw_data, size))
+ return -1;
+
memcpy(wacom->wacom_wac.data, raw_data, size);
wacom_wac_irq(&wacom->wacom_wac, size);
@@ -2347,23 +2451,23 @@ static void wacom_remote_destroy_one(struct wacom *wacom, unsigned int index)
int i;
unsigned long flags;
- spin_lock_irqsave(&remote->remote_lock, flags);
- remote->remotes[index].registered = false;
- spin_unlock_irqrestore(&remote->remote_lock, flags);
+ for (i = 0; i < WACOM_MAX_REMOTES; i++) {
+ if (remote->remotes[i].serial == serial) {
- if (remote->remotes[index].battery.battery)
- devres_release_group(&wacom->hdev->dev,
- &remote->remotes[index].battery.bat_desc);
+ spin_lock_irqsave(&remote->remote_lock, flags);
+ remote->remotes[i].registered = false;
+ spin_unlock_irqrestore(&remote->remote_lock, flags);
- if (remote->remotes[index].group.name)
- devres_release_group(&wacom->hdev->dev,
- &remote->remotes[index]);
+ if (remote->remotes[i].battery.battery)
+ devres_release_group(&wacom->hdev->dev,
+ &remote->remotes[i].battery.bat_desc);
+
+ if (remote->remotes[i].group.name)
+ devres_release_group(&wacom->hdev->dev,
+ &remote->remotes[i]);
- for (i = 0; i < WACOM_MAX_REMOTES; i++) {
- if (remote->remotes[i].serial == serial) {
remote->remotes[i].serial = 0;
remote->remotes[i].group.name = NULL;
- remote->remotes[i].registered = false;
remote->remotes[i].battery.battery = NULL;
wacom->led.groups[i].select = WACOM_STATUS_UNKNOWN;
}
@@ -2580,6 +2684,10 @@ static int wacom_probe(struct hid_device *hdev,
goto fail;
}
+ error = kfifo_alloc(&wacom_wac->pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL);
+ if (error)
+ goto fail;
+
wacom_wac->hid_data.inputmode = -1;
wacom_wac->mode_report = -1;
@@ -2643,6 +2751,8 @@ static void wacom_remove(struct hid_device *hdev)
if (wacom->wacom_wac.features.type != REMOTE)
wacom_release_resources(wacom);
+ kfifo_free(&wacom_wac->pen_fifo);
+
hid_set_drvdata(hdev, NULL);
}
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 16af6886e828..90c38a0523e9 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1924,7 +1924,6 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
struct wacom_features *features = &wacom_wac->features;
unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
int i;
- bool is_touch_on = value;
bool do_report = false;
/*
@@ -1969,16 +1968,17 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
break;
case WACOM_HID_WD_MUTE_DEVICE:
- if (wacom_wac->shared->touch_input && value) {
- wacom_wac->shared->is_touch_on = !wacom_wac->shared->is_touch_on;
- is_touch_on = wacom_wac->shared->is_touch_on;
- }
-
- /* fall through*/
case WACOM_HID_WD_TOUCHONOFF:
if (wacom_wac->shared->touch_input) {
+ bool *is_touch_on = &wacom_wac->shared->is_touch_on;
+
+ if (equivalent_usage == WACOM_HID_WD_MUTE_DEVICE && value)
+ *is_touch_on = !(*is_touch_on);
+ else if (equivalent_usage == WACOM_HID_WD_TOUCHONOFF)
+ *is_touch_on = value;
+
input_report_switch(wacom_wac->shared->touch_input,
- SW_MUTE_DEVICE, !is_touch_on);
+ SW_MUTE_DEVICE, !(*is_touch_on));
input_sync(wacom_wac->shared->touch_input);
}
break;
@@ -2085,7 +2085,29 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
wacom_map_usage(input, usage, field, EV_KEY, BTN_STYLUS2, 0);
break;
case HID_DG_TOOLSERIALNUMBER:
+ features->quirks |= WACOM_QUIRK_TOOLSERIAL;
wacom_map_usage(input, usage, field, EV_MSC, MSC_SERIAL, 0);
+
+ /* Adjust AES usages to match modern convention */
+ if (usage->hid == WACOM_HID_WT_SERIALNUMBER && field->report_size == 16) {
+ if (field->index + 2 < field->report->maxfield) {
+ struct hid_field *a = field->report->field[field->index + 1];
+ struct hid_field *b = field->report->field[field->index + 2];
+
+ if (a->maxusage > 0 && a->usage[0].hid == HID_DG_TOOLSERIALNUMBER && a->report_size == 32 &&
+ b->maxusage > 0 && b->usage[0].hid == 0xFF000000 && b->report_size == 8) {
+ features->quirks |= WACOM_QUIRK_AESPEN;
+ usage->hid = WACOM_HID_WD_TOOLTYPE;
+ field->logical_minimum = S16_MIN;
+ field->logical_maximum = S16_MAX;
+ a->logical_minimum = S32_MIN;
+ a->logical_maximum = S32_MAX;
+ b->usage[0].hid = WACOM_HID_WD_SERIALHI;
+ b->logical_minimum = 0;
+ b->logical_maximum = U8_MAX;
+ }
+ }
+ }
break;
case WACOM_HID_WD_SENSE:
features->quirks |= WACOM_QUIRK_SENSE;
@@ -2093,15 +2115,18 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
break;
case WACOM_HID_WD_SERIALHI:
wacom_map_usage(input, usage, field, EV_ABS, ABS_MISC, 0);
- set_bit(EV_KEY, input->evbit);
- input_set_capability(input, EV_KEY, BTN_TOOL_PEN);
- input_set_capability(input, EV_KEY, BTN_TOOL_RUBBER);
- input_set_capability(input, EV_KEY, BTN_TOOL_BRUSH);
- input_set_capability(input, EV_KEY, BTN_TOOL_PENCIL);
- input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH);
- if (!(features->device_type & WACOM_DEVICETYPE_DIRECT)) {
- input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE);
- input_set_capability(input, EV_KEY, BTN_TOOL_LENS);
+
+ if (!(features->quirks & WACOM_QUIRK_AESPEN)) {
+ set_bit(EV_KEY, input->evbit);
+ input_set_capability(input, EV_KEY, BTN_TOOL_PEN);
+ input_set_capability(input, EV_KEY, BTN_TOOL_RUBBER);
+ input_set_capability(input, EV_KEY, BTN_TOOL_BRUSH);
+ input_set_capability(input, EV_KEY, BTN_TOOL_PENCIL);
+ input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH);
+ if (!(features->device_type & WACOM_DEVICETYPE_DIRECT)) {
+ input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE);
+ input_set_capability(input, EV_KEY, BTN_TOOL_LENS);
+ }
}
break;
case WACOM_HID_WD_FINGERWHEEL:
@@ -4390,6 +4415,12 @@ static const struct wacom_features wacom_features_0x360 =
static const struct wacom_features wacom_features_0x361 =
{ "Wacom Intuos Pro L", 62200, 43200, 8191, 63,
INTUOSP2_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 10 };
+static const struct wacom_features wacom_features_0x37A =
+ { "Wacom One by Wacom S", 15200, 9500, 2047, 63,
+ BAMBOO_PEN, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x37B =
+ { "Wacom One by Wacom M", 21600, 13500, 2047, 63,
+ BAMBOO_PEN, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_HID_ANY_ID =
{ "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
@@ -4558,6 +4589,8 @@ const struct hid_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x343) },
{ BT_DEVICE_WACOM(0x360) },
{ BT_DEVICE_WACOM(0x361) },
+ { USB_DEVICE_WACOM(0x37A) },
+ { USB_DEVICE_WACOM(0x37B) },
{ USB_DEVICE_WACOM(0x4001) },
{ USB_DEVICE_WACOM(0x4004) },
{ USB_DEVICE_WACOM(0x5000) },
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 64d8f014602e..15d9c14fbdf7 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/hid.h>
+#include <linux/kfifo.h>
/* maximum packet length for USB/BT devices */
#define WACOM_PKGLEN_MAX 361
@@ -86,7 +87,9 @@
/* device quirks */
#define WACOM_QUIRK_BBTOUCH_LOWRES 0x0001
#define WACOM_QUIRK_SENSE 0x0002
+#define WACOM_QUIRK_AESPEN 0x0004
#define WACOM_QUIRK_BATTERY 0x0008
+#define WACOM_QUIRK_TOOLSERIAL 0x0010
/* device types */
#define WACOM_DEVICETYPE_NONE 0x0000
@@ -107,6 +110,7 @@
#define WACOM_HID_WD_PEN (WACOM_HID_UP_WACOMDIGITIZER | 0x02)
#define WACOM_HID_WD_SENSE (WACOM_HID_UP_WACOMDIGITIZER | 0x36)
#define WACOM_HID_WD_DIGITIZERFNKEYS (WACOM_HID_UP_WACOMDIGITIZER | 0x39)
+#define WACOM_HID_WD_SERIALNUMBER (WACOM_HID_UP_WACOMDIGITIZER | 0x5b)
#define WACOM_HID_WD_SERIALHI (WACOM_HID_UP_WACOMDIGITIZER | 0x5c)
#define WACOM_HID_WD_TOOLTYPE (WACOM_HID_UP_WACOMDIGITIZER | 0x77)
#define WACOM_HID_WD_DISTANCE (WACOM_HID_UP_WACOMDIGITIZER | 0x0132)
@@ -150,6 +154,7 @@
#define WACOM_HID_WT_TOUCHSCREEN (WACOM_HID_UP_WACOMTOUCH | 0x04)
#define WACOM_HID_WT_TOUCHPAD (WACOM_HID_UP_WACOMTOUCH | 0x05)
#define WACOM_HID_WT_CONTACTMAX (WACOM_HID_UP_WACOMTOUCH | 0x55)
+#define WACOM_HID_WT_SERIALNUMBER (WACOM_HID_UP_WACOMTOUCH | 0x5b)
#define WACOM_HID_WT_X (WACOM_HID_UP_WACOMTOUCH | 0x130)
#define WACOM_HID_WT_Y (WACOM_HID_UP_WACOMTOUCH | 0x131)
@@ -336,6 +341,7 @@ struct wacom_wac {
struct input_dev *pen_input;
struct input_dev *touch_input;
struct input_dev *pad_input;
+ struct kfifo_rec_ptr_2 pen_fifo;
int pid;
int num_contacts_left;
u8 bt_features;
diff --git a/drivers/hsi/clients/cmt_speech.c b/drivers/hsi/clients/cmt_speech.c
index 727f968ac1cb..8fbbacb0fe21 100644
--- a/drivers/hsi/clients/cmt_speech.c
+++ b/drivers/hsi/clients/cmt_speech.c
@@ -451,11 +451,11 @@ static void cs_hsi_read_on_control_complete(struct hsi_msg *msg)
dev_dbg(&hi->cl->device, "Read on control: %08X\n", cmd);
cs_release_cmd(msg);
if (hi->flags & CS_FEAT_TSTAMP_RX_CTRL) {
- struct timespec tspec;
+ struct timespec64 tspec;
struct cs_timestamp *tstamp =
&hi->mmap_cfg->tstamp_rx_ctrl;
- ktime_get_ts(&tspec);
+ ktime_get_ts64(&tspec);
tstamp->tv_sec = (__u32) tspec.tv_sec;
tstamp->tv_nsec = (__u32) tspec.tv_nsec;
@@ -1124,10 +1124,10 @@ static int cs_char_fasync(int fd, struct file *file, int on)
return 0;
}
-static unsigned int cs_char_poll(struct file *file, poll_table *wait)
+static __poll_t cs_char_poll(struct file *file, poll_table *wait)
{
struct cs_char *csdata = file->private_data;
- unsigned int ret = 0;
+ __poll_t ret = 0;
poll_wait(file, &cs_char_data.wait, wait);
spin_lock_bh(&csdata->lock);
diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c
index 4402a71e23f7..047959e74bb1 100644
--- a/drivers/hv/hv_utils_transport.c
+++ b/drivers/hv/hv_utils_transport.c
@@ -104,7 +104,7 @@ static ssize_t hvt_op_write(struct file *file, const char __user *buf,
return ret ? ret : count;
}
-static unsigned int hvt_op_poll(struct file *file, poll_table *wait)
+static __poll_t hvt_op_poll(struct file *file, poll_table *wait)
{
struct hvutil_transport *hvt;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 7ad017690e3a..ef23553ff5cb 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -26,11 +26,9 @@ if HWMON
config HWMON_VID
tristate
- default n
config HWMON_DEBUG_CHIP
bool "Hardware Monitoring Chip debugging messages"
- default n
help
Say Y here if you want the I2C chip drivers to produce a bunch of
debug messages to the system log. Select this if you are having
@@ -42,7 +40,6 @@ comment "Native drivers"
config SENSORS_AB8500
tristate "AB8500 thermal monitoring"
depends on AB8500_GPADC && AB8500_BM
- default n
help
If you say yes here you get support for the thermal sensor part
of the AB8500 chip. The driver includes thermal management for
@@ -302,7 +299,6 @@ config SENSORS_APPLESMC
select NEW_LEDS
select LEDS_CLASS
select INPUT_POLLDEV
- default n
help
This driver provides support for the Apple System Management
Controller, which provides an accelerometer (Apple Sudden Motion
@@ -678,7 +674,6 @@ config SENSORS_JC42
config SENSORS_POWR1220
tristate "Lattice POWR1220 Power Monitoring"
depends on I2C
- default n
help
If you say yes here you get access to the hardware monitoring
functions of the Lattice POWR1220 isp Power Supply Monitoring,
@@ -702,7 +697,6 @@ config SENSORS_LTC2945
tristate "Linear Technology LTC2945"
depends on I2C
select REGMAP_I2C
- default n
help
If you say yes here you get support for Linear Technology LTC2945
I2C System Monitor.
@@ -727,7 +721,6 @@ config SENSORS_LTC2990
config SENSORS_LTC4151
tristate "Linear Technology LTC4151"
depends on I2C
- default n
help
If you say yes here you get support for Linear Technology LTC4151
High Voltage I2C Current and Voltage Monitor interface.
@@ -738,7 +731,6 @@ config SENSORS_LTC4151
config SENSORS_LTC4215
tristate "Linear Technology LTC4215"
depends on I2C
- default n
help
If you say yes here you get support for Linear Technology LTC4215
Hot Swap Controller I2C interface.
@@ -750,7 +742,6 @@ config SENSORS_LTC4222
tristate "Linear Technology LTC4222"
depends on I2C
select REGMAP_I2C
- default n
help
If you say yes here you get support for Linear Technology LTC4222
Dual Hot Swap Controller I2C interface.
@@ -761,7 +752,6 @@ config SENSORS_LTC4222
config SENSORS_LTC4245
tristate "Linear Technology LTC4245"
depends on I2C
- default n
help
If you say yes here you get support for Linear Technology LTC4245
Multiple Supply Hot Swap Controller I2C interface.
@@ -773,7 +763,6 @@ config SENSORS_LTC4260
tristate "Linear Technology LTC4260"
depends on I2C
select REGMAP_I2C
- default n
help
If you say yes here you get support for Linear Technology LTC4260
Positive Voltage Hot Swap Controller I2C interface.
@@ -784,7 +773,6 @@ config SENSORS_LTC4260
config SENSORS_LTC4261
tristate "Linear Technology LTC4261"
depends on I2C
- default n
help
If you say yes here you get support for Linear Technology LTC4261
Negative Voltage Hot Swap Controller I2C interface.
@@ -1276,7 +1264,6 @@ config SENSORS_NSA320
config SENSORS_PCF8591
tristate "Philips PCF8591 ADC/DAC"
depends on I2C
- default n
help
If you say yes here you get support for Philips PCF8591 4-channel
ADC, 1-channel DAC chips.
@@ -1459,7 +1446,6 @@ config SENSORS_SMSC47B397
config SENSORS_SCH56XX_COMMON
tristate
- default n
config SENSORS_SCH5627
tristate "SMSC SCH5627"
@@ -1505,7 +1491,6 @@ config SENSORS_STTS751
config SENSORS_SMM665
tristate "Summit Microelectronics SMM665"
depends on I2C
- default n
help
If you say yes here you get support for the hardware monitoring
features of the Summit Microelectronics SMM665/SMM665B Six-Channel
@@ -1725,6 +1710,16 @@ config SENSORS_VT8231
This driver can also be built as a module. If so, the module
will be called vt8231.
+config SENSORS_W83773G
+ tristate "Nuvoton W83773G"
+ depends on I2C
+ help
+ If you say yes here you get support for the Nuvoton W83773G hardware
+ monitoring chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called w83773g.
+
config SENSORS_W83781D
tristate "Winbond W83781D, W83782D, W83783S, Asus AS99127F"
depends on I2C
@@ -1782,7 +1777,6 @@ config SENSORS_W83795
config SENSORS_W83795_FANCTRL
bool "Include automatic fan control support (DANGEROUS)"
depends on SENSORS_W83795
- default n
help
If you say yes here, support for automatic fan speed control
will be included in the driver.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 0fe489fab663..f814b4ace138 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_SENSORS_ATK0110) += asus_atk0110.o
# asb100, then w83781d go first, as they can override other drivers' addresses.
obj-$(CONFIG_SENSORS_ASB100) += asb100.o
obj-$(CONFIG_SENSORS_W83627HF) += w83627hf.o
+obj-$(CONFIG_SENSORS_W83773G) += w83773g.o
obj-$(CONFIG_SENSORS_W83792D) += w83792d.o
obj-$(CONFIG_SENSORS_W83793) += w83793.o
obj-$(CONFIG_SENSORS_W83795) += w83795.o
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index 63a95e23ca81..693a3d53cab5 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -19,6 +19,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/reset.h>
#include <linux/sysfs.h>
#include <linux/thermal.h>
@@ -181,6 +182,7 @@ struct aspeed_cooling_device {
struct aspeed_pwm_tacho_data {
struct regmap *regmap;
+ struct reset_control *rst;
unsigned long clk_freq;
bool pwm_present[8];
bool fan_tach_present[16];
@@ -905,6 +907,13 @@ static int aspeed_create_fan(struct device *dev,
return 0;
}
+static void aspeed_pwm_tacho_remove(void *data)
+{
+ struct aspeed_pwm_tacho_data *priv = data;
+
+ reset_control_assert(priv->rst);
+}
+
static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -931,6 +940,19 @@ static int aspeed_pwm_tacho_probe(struct platform_device *pdev)
&aspeed_pwm_tacho_regmap_config);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
+
+ priv->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(priv->rst)) {
+ dev_err(dev,
+ "missing or invalid reset controller device tree entry");
+ return PTR_ERR(priv->rst);
+ }
+ reset_control_deassert(priv->rst);
+
+ ret = devm_add_action_or_reset(dev, aspeed_pwm_tacho_remove, priv);
+ if (ret)
+ return ret;
+
regmap_write(priv->regmap, ASPEED_PTCR_TACH_SOURCE, 0);
regmap_write(priv->regmap, ASPEED_PTCR_TACH_SOURCE_EXT, 0);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index c13a4fd86b3c..4bdbf77f7197 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -246,7 +246,8 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
int err;
u32 eax, edx;
int i;
- struct pci_dev *host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+ u16 devfn = PCI_DEVFN(0, 0);
+ struct pci_dev *host_bridge = pci_get_domain_bus_and_slot(0, 0, devfn);
/*
* Explicit tjmax table entries override heuristics.
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index c7c9e95e58a8..bf3bb7e1adab 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -76,6 +76,7 @@ static uint i8k_fan_mult = I8K_FAN_MULT;
static uint i8k_pwm_mult;
static uint i8k_fan_max = I8K_FAN_HIGH;
static bool disallow_fan_type_call;
+static bool disallow_fan_support;
#define I8K_HWMON_HAVE_TEMP1 (1 << 0)
#define I8K_HWMON_HAVE_TEMP2 (1 << 1)
@@ -242,6 +243,9 @@ static int i8k_get_fan_status(int fan)
{
struct smm_regs regs = { .eax = I8K_SMM_GET_FAN, };
+ if (disallow_fan_support)
+ return -EINVAL;
+
regs.ebx = fan & 0xff;
return i8k_smm(&regs) ? : regs.eax & 0xff;
}
@@ -253,6 +257,9 @@ static int i8k_get_fan_speed(int fan)
{
struct smm_regs regs = { .eax = I8K_SMM_GET_SPEED, };
+ if (disallow_fan_support)
+ return -EINVAL;
+
regs.ebx = fan & 0xff;
return i8k_smm(&regs) ? : (regs.eax & 0xffff) * i8k_fan_mult;
}
@@ -264,7 +271,7 @@ static int _i8k_get_fan_type(int fan)
{
struct smm_regs regs = { .eax = I8K_SMM_GET_FAN_TYPE, };
- if (disallow_fan_type_call)
+ if (disallow_fan_support || disallow_fan_type_call)
return -EINVAL;
regs.ebx = fan & 0xff;
@@ -289,6 +296,9 @@ static int i8k_get_fan_nominal_speed(int fan, int speed)
{
struct smm_regs regs = { .eax = I8K_SMM_GET_NOM_SPEED, };
+ if (disallow_fan_support)
+ return -EINVAL;
+
regs.ebx = (fan & 0xff) | (speed << 8);
return i8k_smm(&regs) ? : (regs.eax & 0xffff) * i8k_fan_mult;
}
@@ -300,6 +310,9 @@ static int i8k_set_fan(int fan, int speed)
{
struct smm_regs regs = { .eax = I8K_SMM_SET_FAN, };
+ if (disallow_fan_support)
+ return -EINVAL;
+
speed = (speed < 0) ? 0 : ((speed > i8k_fan_max) ? i8k_fan_max : speed);
regs.ebx = (fan & 0xff) | (speed << 8);
@@ -772,6 +785,8 @@ static struct attribute *i8k_attrs[] = {
static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
int index)
{
+ if (disallow_fan_support && index >= 8)
+ return 0;
if (disallow_fan_type_call &&
(index == 9 || index == 12 || index == 15))
return 0;
@@ -1039,6 +1054,30 @@ static const struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initconst
};
/*
+ * On some machines all fan related SMM functions implemented by Dell BIOS
+ * firmware freeze kernel for about 500ms. Until Dell fixes these problems fan
+ * support for affected blacklisted Dell machines stay disabled.
+ * See bug: https://bugzilla.kernel.org/show_bug.cgi?id=195751
+ */
+static struct dmi_system_id i8k_blacklist_fan_support_dmi_table[] __initdata = {
+ {
+ .ident = "Dell Inspiron 7720",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Inspiron 7720"),
+ },
+ },
+ {
+ .ident = "Dell Vostro 3360",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Vostro 3360"),
+ },
+ },
+ { }
+};
+
+/*
* Probe for the presence of a supported laptop.
*/
static int __init i8k_probe(void)
@@ -1060,8 +1099,17 @@ static int __init i8k_probe(void)
i8k_get_dmi_data(DMI_BIOS_VERSION));
}
- if (dmi_check_system(i8k_blacklist_fan_type_dmi_table))
- disallow_fan_type_call = true;
+ if (dmi_check_system(i8k_blacklist_fan_support_dmi_table)) {
+ pr_warn("broken Dell BIOS detected, disallow fan support\n");
+ if (!force)
+ disallow_fan_support = true;
+ }
+
+ if (dmi_check_system(i8k_blacklist_fan_type_dmi_table)) {
+ pr_warn("broken Dell BIOS detected, disallow fan type call\n");
+ if (!force)
+ disallow_fan_type_call = true;
+ }
strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION),
sizeof(bios_version));
diff --git a/drivers/hwmon/hih6130.c b/drivers/hwmon/hih6130.c
index 7b73d2002d3e..0ae1ee1dbf76 100644
--- a/drivers/hwmon/hih6130.c
+++ b/drivers/hwmon/hih6130.c
@@ -37,7 +37,7 @@
/**
* struct hih6130 - HIH-6130 device specific data
- * @hwmon_dev: device registered with hwmon
+ * @client: pointer to I2C client device
* @lock: mutex to protect measurement values
* @valid: only false before first measurement is taken
* @last_update: time of last update (jiffies)
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index af5123042990..32083e452cde 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -678,7 +678,7 @@ EXPORT_SYMBOL_GPL(hwmon_device_register_with_groups);
* @dev: the parent device
* @name: hwmon name attribute
* @drvdata: driver data to attach to created device
- * @info: pointer to hwmon chip information
+ * @chip: pointer to hwmon chip information
* @extra_groups: pointer to list of additional non-standard attribute groups
*
* hwmon_device_unregister() must be called when the device is no
@@ -785,11 +785,11 @@ EXPORT_SYMBOL_GPL(devm_hwmon_device_register_with_groups);
/**
* devm_hwmon_device_register_with_info - register w/ hwmon
- * @dev: the parent device
- * @name: hwmon name attribute
- * @drvdata: driver data to attach to created device
- * @info: Pointer to hwmon chip information
- * @groups - pointer to list of driver specific attribute groups
+ * @dev: the parent device
+ * @name: hwmon name attribute
+ * @drvdata: driver data to attach to created device
+ * @chip: pointer to hwmon chip information
+ * @groups: pointer to list of driver specific attribute groups
*
* Returns the pointer to the new device. The new device is automatically
* unregistered with the parent device.
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index f6a76679c650..5e5b32a1ec4b 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -23,7 +23,8 @@
* @channels: filled with array of channels from iio
* @num_channels: number of channels in channels (saves counting twice)
* @hwmon_dev: associated hwmon device
- * @attr_group: the group of attributes
+ * @attr_group: the group of attributes
+ * @groups: null terminated array of attribute groups
* @attrs: null terminated array of attribute pointers.
*/
struct iio_hwmon_state {
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index 62e38fa8cda2..e9e6aeabbf84 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -95,18 +95,20 @@ enum ina2xx_ids { ina219, ina226 };
struct ina2xx_config {
u16 config_default;
- int calibration_factor;
+ int calibration_value;
int registers;
int shunt_div;
int bus_voltage_shift;
int bus_voltage_lsb; /* uV */
- int power_lsb; /* uW */
+ int power_lsb_factor;
};
struct ina2xx_data {
const struct ina2xx_config *config;
long rshunt;
+ long current_lsb_uA;
+ long power_lsb_uW;
struct mutex config_lock;
struct regmap *regmap;
@@ -116,21 +118,21 @@ struct ina2xx_data {
static const struct ina2xx_config ina2xx_config[] = {
[ina219] = {
.config_default = INA219_CONFIG_DEFAULT,
- .calibration_factor = 40960000,
+ .calibration_value = 4096,
.registers = INA219_REGISTERS,
.shunt_div = 100,
.bus_voltage_shift = 3,
.bus_voltage_lsb = 4000,
- .power_lsb = 20000,
+ .power_lsb_factor = 20,
},
[ina226] = {
.config_default = INA226_CONFIG_DEFAULT,
- .calibration_factor = 5120000,
+ .calibration_value = 2048,
.registers = INA226_REGISTERS,
.shunt_div = 400,
.bus_voltage_shift = 0,
.bus_voltage_lsb = 1250,
- .power_lsb = 25000,
+ .power_lsb_factor = 25,
},
};
@@ -169,12 +171,16 @@ static u16 ina226_interval_to_reg(int interval)
return INA226_SHIFT_AVG(avg_bits);
}
+/*
+ * Calibration register is set to the best value, which eliminates
+ * truncation errors on calculating current register in hardware.
+ * According to datasheet (eq. 3) the best values are 2048 for
+ * ina226 and 4096 for ina219. They are hardcoded as calibration_value.
+ */
static int ina2xx_calibrate(struct ina2xx_data *data)
{
- u16 val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
- data->rshunt);
-
- return regmap_write(data->regmap, INA2XX_CALIBRATION, val);
+ return regmap_write(data->regmap, INA2XX_CALIBRATION,
+ data->config->calibration_value);
}
/*
@@ -187,10 +193,6 @@ static int ina2xx_init(struct ina2xx_data *data)
if (ret < 0)
return ret;
- /*
- * Set current LSB to 1mA, shunt is in uOhms
- * (equation 13 in datasheet).
- */
return ina2xx_calibrate(data);
}
@@ -268,15 +270,15 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg,
val = DIV_ROUND_CLOSEST(val, 1000);
break;
case INA2XX_POWER:
- val = regval * data->config->power_lsb;
+ val = regval * data->power_lsb_uW;
break;
case INA2XX_CURRENT:
- /* signed register, LSB=1mA (selected), in mA */
- val = (s16)regval;
+ /* signed register, result in mA */
+ val = regval * data->current_lsb_uA;
+ val = DIV_ROUND_CLOSEST(val, 1000);
break;
case INA2XX_CALIBRATION:
- val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
- regval);
+ val = regval;
break;
default:
/* programmer goofed */
@@ -304,9 +306,32 @@ static ssize_t ina2xx_show_value(struct device *dev,
ina2xx_get_value(data, attr->index, regval));
}
-static ssize_t ina2xx_set_shunt(struct device *dev,
- struct device_attribute *da,
- const char *buf, size_t count)
+/*
+ * In order to keep calibration register value fixed, the product
+ * of current_lsb and shunt_resistor should also be fixed and equal
+ * to shunt_voltage_lsb = 1 / shunt_div multiplied by 10^9 in order
+ * to keep the scale.
+ */
+static int ina2xx_set_shunt(struct ina2xx_data *data, long val)
+{
+ unsigned int dividend = DIV_ROUND_CLOSEST(1000000000,
+ data->config->shunt_div);
+ if (val <= 0 || val > dividend)
+ return -EINVAL;
+
+ mutex_lock(&data->config_lock);
+ data->rshunt = val;
+ data->current_lsb_uA = DIV_ROUND_CLOSEST(dividend, val);
+ data->power_lsb_uW = data->config->power_lsb_factor *
+ data->current_lsb_uA;
+ mutex_unlock(&data->config_lock);
+
+ return 0;
+}
+
+static ssize_t ina2xx_store_shunt(struct device *dev,
+ struct device_attribute *da,
+ const char *buf, size_t count)
{
unsigned long val;
int status;
@@ -316,18 +341,9 @@ static ssize_t ina2xx_set_shunt(struct device *dev,
if (status < 0)
return status;
- if (val == 0 ||
- /* Values greater than the calibration factor make no sense. */
- val > data->config->calibration_factor)
- return -EINVAL;
-
- mutex_lock(&data->config_lock);
- data->rshunt = val;
- status = ina2xx_calibrate(data);
- mutex_unlock(&data->config_lock);
+ status = ina2xx_set_shunt(data, val);
if (status < 0)
return status;
-
return count;
}
@@ -387,7 +403,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
/* shunt resistance */
static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
- ina2xx_show_value, ina2xx_set_shunt,
+ ina2xx_show_value, ina2xx_store_shunt,
INA2XX_CALIBRATION);
/* update interval (ina226 only) */
@@ -438,6 +454,7 @@ static int ina2xx_probe(struct i2c_client *client,
/* set the device type */
data->config = &ina2xx_config[chip];
+ mutex_init(&data->config_lock);
if (of_property_read_u32(dev->of_node, "shunt-resistor", &val) < 0) {
struct ina2xx_platform_data *pdata = dev_get_platdata(dev);
@@ -448,10 +465,7 @@ static int ina2xx_probe(struct i2c_client *client,
val = INA2XX_RSHUNT_DEFAULT;
}
- if (val <= 0 || val > data->config->calibration_factor)
- return -ENODEV;
-
- data->rshunt = val;
+ ina2xx_set_shunt(data, val);
ina2xx_regmap_config.max_register = data->config->registers;
@@ -467,8 +481,6 @@ static int ina2xx_probe(struct i2c_client *client,
return -ENODEV;
}
- mutex_init(&data->config_lock);
-
data->groups[group++] = &ina2xx_group;
if (id->driver_data == ina226)
data->groups[group++] = &ina226_group;
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 0721e175664a..06b4e1c78bd8 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -86,6 +86,7 @@ static const struct tctl_offset tctl_offset_table[] = {
{ 0x17, "AMD Ryzen 7 1800X", 20000 },
{ 0x17, "AMD Ryzen Threadripper 1950X", 27000 },
{ 0x17, "AMD Ryzen Threadripper 1920X", 27000 },
+ { 0x17, "AMD Ryzen Threadripper 1900X", 27000 },
{ 0x17, "AMD Ryzen Threadripper 1950", 10000 },
{ 0x17, "AMD Ryzen Threadripper 1920", 10000 },
{ 0x17, "AMD Ryzen Threadripper 1910", 10000 },
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 005ffb5ffa92..49f4b33a5685 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -100,7 +100,7 @@ static int lm75_read(struct device *dev, enum hwmon_sensor_types type,
switch (attr) {
case hwmon_chip_update_interval:
*val = data->sample_time;
- break;;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 08479006c7f9..6e4298e99222 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -39,6 +39,7 @@ config SENSORS_ADM1275
config SENSORS_IBM_CFFPS
tristate "IBM Common Form Factor Power Supply"
+ depends on LEDS_CLASS
help
If you say yes here you get hardware monitoring support for the IBM
Common Form Factor power supply.
diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c
index cb56da6834e5..93d9a9ea112b 100644
--- a/drivers/hwmon/pmbus/ibm-cffps.c
+++ b/drivers/hwmon/pmbus/ibm-cffps.c
@@ -8,12 +8,29 @@
*/
#include <linux/bitops.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
+#include <linux/fs.h>
#include <linux/i2c.h>
+#include <linux/jiffies.h>
+#include <linux/leds.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pmbus.h>
#include "pmbus.h"
+#define CFFPS_FRU_CMD 0x9A
+#define CFFPS_PN_CMD 0x9B
+#define CFFPS_SN_CMD 0x9E
+#define CFFPS_CCIN_CMD 0xBD
+#define CFFPS_FW_CMD_START 0xFA
+#define CFFPS_FW_NUM_BYTES 4
+#define CFFPS_SYS_CONFIG_CMD 0xDA
+
+#define CFFPS_INPUT_HISTORY_CMD 0xD6
+#define CFFPS_INPUT_HISTORY_SIZE 100
+
/* STATUS_MFR_SPECIFIC bits */
#define CFFPS_MFR_FAN_FAULT BIT(0)
#define CFFPS_MFR_THERMAL_FAULT BIT(1)
@@ -24,6 +41,153 @@
#define CFFPS_MFR_VAUX_FAULT BIT(6)
#define CFFPS_MFR_CURRENT_SHARE_WARNING BIT(7)
+#define CFFPS_LED_BLINK BIT(0)
+#define CFFPS_LED_ON BIT(1)
+#define CFFPS_LED_OFF BIT(2)
+#define CFFPS_BLINK_RATE_MS 250
+
+enum {
+ CFFPS_DEBUGFS_INPUT_HISTORY = 0,
+ CFFPS_DEBUGFS_FRU,
+ CFFPS_DEBUGFS_PN,
+ CFFPS_DEBUGFS_SN,
+ CFFPS_DEBUGFS_CCIN,
+ CFFPS_DEBUGFS_FW,
+ CFFPS_DEBUGFS_NUM_ENTRIES
+};
+
+struct ibm_cffps_input_history {
+ struct mutex update_lock;
+ unsigned long last_update;
+
+ u8 byte_count;
+ u8 data[CFFPS_INPUT_HISTORY_SIZE];
+};
+
+struct ibm_cffps {
+ struct i2c_client *client;
+
+ struct ibm_cffps_input_history input_history;
+
+ int debugfs_entries[CFFPS_DEBUGFS_NUM_ENTRIES];
+
+ char led_name[32];
+ u8 led_state;
+ struct led_classdev led;
+};
+
+#define to_psu(x, y) container_of((x), struct ibm_cffps, debugfs_entries[(y)])
+
+static ssize_t ibm_cffps_read_input_history(struct ibm_cffps *psu,
+ char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ int rc;
+ u8 msgbuf0[1] = { CFFPS_INPUT_HISTORY_CMD };
+ u8 msgbuf1[CFFPS_INPUT_HISTORY_SIZE + 1] = { 0 };
+ struct i2c_msg msg[2] = {
+ {
+ .addr = psu->client->addr,
+ .flags = psu->client->flags,
+ .len = 1,
+ .buf = msgbuf0,
+ }, {
+ .addr = psu->client->addr,
+ .flags = psu->client->flags | I2C_M_RD,
+ .len = CFFPS_INPUT_HISTORY_SIZE + 1,
+ .buf = msgbuf1,
+ },
+ };
+
+ if (!*ppos) {
+ mutex_lock(&psu->input_history.update_lock);
+ if (time_after(jiffies, psu->input_history.last_update + HZ)) {
+ /*
+ * Use a raw i2c transfer, since we need more bytes
+ * than Linux I2C supports through smbus xfr (only 32).
+ */
+ rc = i2c_transfer(psu->client->adapter, msg, 2);
+ if (rc < 0) {
+ mutex_unlock(&psu->input_history.update_lock);
+ return rc;
+ }
+
+ psu->input_history.byte_count = msgbuf1[0];
+ memcpy(psu->input_history.data, &msgbuf1[1],
+ CFFPS_INPUT_HISTORY_SIZE);
+ psu->input_history.last_update = jiffies;
+ }
+
+ mutex_unlock(&psu->input_history.update_lock);
+ }
+
+ return simple_read_from_buffer(buf, count, ppos,
+ psu->input_history.data,
+ psu->input_history.byte_count);
+}
+
+static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ u8 cmd;
+ int i, rc;
+ int *idxp = file->private_data;
+ int idx = *idxp;
+ struct ibm_cffps *psu = to_psu(idxp, idx);
+ char data[I2C_SMBUS_BLOCK_MAX] = { 0 };
+
+ switch (idx) {
+ case CFFPS_DEBUGFS_INPUT_HISTORY:
+ return ibm_cffps_read_input_history(psu, buf, count, ppos);
+ case CFFPS_DEBUGFS_FRU:
+ cmd = CFFPS_FRU_CMD;
+ break;
+ case CFFPS_DEBUGFS_PN:
+ cmd = CFFPS_PN_CMD;
+ break;
+ case CFFPS_DEBUGFS_SN:
+ cmd = CFFPS_SN_CMD;
+ break;
+ case CFFPS_DEBUGFS_CCIN:
+ rc = i2c_smbus_read_word_swapped(psu->client, CFFPS_CCIN_CMD);
+ if (rc < 0)
+ return rc;
+
+ rc = snprintf(data, 5, "%04X", rc);
+ goto done;
+ case CFFPS_DEBUGFS_FW:
+ for (i = 0; i < CFFPS_FW_NUM_BYTES; ++i) {
+ rc = i2c_smbus_read_byte_data(psu->client,
+ CFFPS_FW_CMD_START + i);
+ if (rc < 0)
+ return rc;
+
+ snprintf(&data[i * 2], 3, "%02X", rc);
+ }
+
+ rc = i * 2;
+ goto done;
+ default:
+ return -EINVAL;
+ }
+
+ rc = i2c_smbus_read_block_data(psu->client, cmd, data);
+ if (rc < 0)
+ return rc;
+
+done:
+ data[rc] = '\n';
+ rc += 2;
+
+ return simple_read_from_buffer(buf, count, ppos, data, rc);
+}
+
+static const struct file_operations ibm_cffps_fops = {
+ .llseek = noop_llseek,
+ .read = ibm_cffps_debugfs_op,
+ .open = simple_open,
+};
+
static int ibm_cffps_read_byte_data(struct i2c_client *client, int page,
int reg)
{
@@ -105,6 +269,69 @@ static int ibm_cffps_read_word_data(struct i2c_client *client, int page,
return rc;
}
+static void ibm_cffps_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ int rc;
+ struct ibm_cffps *psu = container_of(led_cdev, struct ibm_cffps, led);
+
+ if (brightness == LED_OFF) {
+ psu->led_state = CFFPS_LED_OFF;
+ } else {
+ brightness = LED_FULL;
+ if (psu->led_state != CFFPS_LED_BLINK)
+ psu->led_state = CFFPS_LED_ON;
+ }
+
+ rc = i2c_smbus_write_byte_data(psu->client, CFFPS_SYS_CONFIG_CMD,
+ psu->led_state);
+ if (rc < 0)
+ return;
+
+ led_cdev->brightness = brightness;
+}
+
+static int ibm_cffps_led_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ int rc;
+ struct ibm_cffps *psu = container_of(led_cdev, struct ibm_cffps, led);
+
+ psu->led_state = CFFPS_LED_BLINK;
+
+ if (led_cdev->brightness == LED_OFF)
+ return 0;
+
+ rc = i2c_smbus_write_byte_data(psu->client, CFFPS_SYS_CONFIG_CMD,
+ CFFPS_LED_BLINK);
+ if (rc < 0)
+ return rc;
+
+ *delay_on = CFFPS_BLINK_RATE_MS;
+ *delay_off = CFFPS_BLINK_RATE_MS;
+
+ return 0;
+}
+
+static void ibm_cffps_create_led_class(struct ibm_cffps *psu)
+{
+ int rc;
+ struct i2c_client *client = psu->client;
+ struct device *dev = &client->dev;
+
+ snprintf(psu->led_name, sizeof(psu->led_name), "%s-%02x", client->name,
+ client->addr);
+ psu->led.name = psu->led_name;
+ psu->led.max_brightness = LED_FULL;
+ psu->led.brightness_set = ibm_cffps_led_brightness_set;
+ psu->led.blink_set = ibm_cffps_led_blink_set;
+
+ rc = devm_led_classdev_register(dev, &psu->led);
+ if (rc)
+ dev_warn(dev, "failed to register led class: %d\n", rc);
+}
+
static struct pmbus_driver_info ibm_cffps_info = {
.pages = 1,
.func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
@@ -116,10 +343,69 @@ static struct pmbus_driver_info ibm_cffps_info = {
.read_word_data = ibm_cffps_read_word_data,
};
+static struct pmbus_platform_data ibm_cffps_pdata = {
+ .flags = PMBUS_SKIP_STATUS_CHECK,
+};
+
static int ibm_cffps_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- return pmbus_do_probe(client, id, &ibm_cffps_info);
+ int i, rc;
+ struct dentry *debugfs;
+ struct dentry *ibm_cffps_dir;
+ struct ibm_cffps *psu;
+
+ client->dev.platform_data = &ibm_cffps_pdata;
+ rc = pmbus_do_probe(client, id, &ibm_cffps_info);
+ if (rc)
+ return rc;
+
+ /*
+ * Don't fail the probe if there isn't enough memory for leds and
+ * debugfs.
+ */
+ psu = devm_kzalloc(&client->dev, sizeof(*psu), GFP_KERNEL);
+ if (!psu)
+ return 0;
+
+ psu->client = client;
+ mutex_init(&psu->input_history.update_lock);
+ psu->input_history.last_update = jiffies - HZ;
+
+ ibm_cffps_create_led_class(psu);
+
+ /* Don't fail the probe if we can't create debugfs */
+ debugfs = pmbus_get_debugfs_dir(client);
+ if (!debugfs)
+ return 0;
+
+ ibm_cffps_dir = debugfs_create_dir(client->name, debugfs);
+ if (!ibm_cffps_dir)
+ return 0;
+
+ for (i = 0; i < CFFPS_DEBUGFS_NUM_ENTRIES; ++i)
+ psu->debugfs_entries[i] = i;
+
+ debugfs_create_file("input_history", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_INPUT_HISTORY],
+ &ibm_cffps_fops);
+ debugfs_create_file("fru", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_FRU],
+ &ibm_cffps_fops);
+ debugfs_create_file("part_number", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_PN],
+ &ibm_cffps_fops);
+ debugfs_create_file("serial_number", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_SN],
+ &ibm_cffps_fops);
+ debugfs_create_file("ccin", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_CCIN],
+ &ibm_cffps_fops);
+ debugfs_create_file("fw_version", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_FW],
+ &ibm_cffps_fops);
+
+ return 0;
}
static const struct i2c_device_id ibm_cffps_id[] = {
diff --git a/drivers/hwmon/pmbus/ir35221.c b/drivers/hwmon/pmbus/ir35221.c
index 8b906b44484b..977315b0fd90 100644
--- a/drivers/hwmon/pmbus/ir35221.c
+++ b/drivers/hwmon/pmbus/ir35221.c
@@ -25,168 +25,19 @@
#define IR35221_MFR_IOUT_VALLEY 0xcb
#define IR35221_MFR_TEMP_VALLEY 0xcc
-static long ir35221_reg2data(int data, enum pmbus_sensor_classes class)
-{
- s16 exponent;
- s32 mantissa;
- long val;
-
- /* We only modify LINEAR11 formats */
- exponent = ((s16)data) >> 11;
- mantissa = ((s16)((data & 0x7ff) << 5)) >> 5;
-
- val = mantissa * 1000L;
-
- /* scale result to micro-units for power sensors */
- if (class == PSC_POWER)
- val = val * 1000L;
-
- if (exponent >= 0)
- val <<= exponent;
- else
- val >>= -exponent;
-
- return val;
-}
-
-#define MAX_MANTISSA (1023 * 1000)
-#define MIN_MANTISSA (511 * 1000)
-
-static u16 ir35221_data2reg(long val, enum pmbus_sensor_classes class)
-{
- s16 exponent = 0, mantissa;
- bool negative = false;
-
- if (val == 0)
- return 0;
-
- if (val < 0) {
- negative = true;
- val = -val;
- }
-
- /* Power is in uW. Convert to mW before converting. */
- if (class == PSC_POWER)
- val = DIV_ROUND_CLOSEST(val, 1000L);
-
- /* Reduce large mantissa until it fits into 10 bit */
- while (val >= MAX_MANTISSA && exponent < 15) {
- exponent++;
- val >>= 1;
- }
- /* Increase small mantissa to improve precision */
- while (val < MIN_MANTISSA && exponent > -15) {
- exponent--;
- val <<= 1;
- }
-
- /* Convert mantissa from milli-units to units */
- mantissa = DIV_ROUND_CLOSEST(val, 1000);
-
- /* Ensure that resulting number is within range */
- if (mantissa > 0x3ff)
- mantissa = 0x3ff;
-
- /* restore sign */
- if (negative)
- mantissa = -mantissa;
-
- /* Convert to 5 bit exponent, 11 bit mantissa */
- return (mantissa & 0x7ff) | ((exponent << 11) & 0xf800);
-}
-
-static u16 ir35221_scale_result(s16 data, int shift,
- enum pmbus_sensor_classes class)
-{
- long val;
-
- val = ir35221_reg2data(data, class);
-
- if (shift < 0)
- val >>= -shift;
- else
- val <<= shift;
-
- return ir35221_data2reg(val, class);
-}
-
static int ir35221_read_word_data(struct i2c_client *client, int page, int reg)
{
int ret;
switch (reg) {
- case PMBUS_IOUT_OC_FAULT_LIMIT:
- case PMBUS_IOUT_OC_WARN_LIMIT:
- ret = pmbus_read_word_data(client, page, reg);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, 1, PSC_CURRENT_OUT);
- break;
- case PMBUS_VIN_OV_FAULT_LIMIT:
- case PMBUS_VIN_OV_WARN_LIMIT:
- case PMBUS_VIN_UV_WARN_LIMIT:
- ret = pmbus_read_word_data(client, page, reg);
- ret = ir35221_scale_result(ret, -4, PSC_VOLTAGE_IN);
- break;
- case PMBUS_IIN_OC_WARN_LIMIT:
- ret = pmbus_read_word_data(client, page, reg);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, -1, PSC_CURRENT_IN);
- break;
- case PMBUS_READ_VIN:
- ret = pmbus_read_word_data(client, page, PMBUS_READ_VIN);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, -5, PSC_VOLTAGE_IN);
- break;
- case PMBUS_READ_IIN:
- ret = pmbus_read_word_data(client, page, PMBUS_READ_IIN);
- if (ret < 0)
- break;
- if (page == 0)
- ret = ir35221_scale_result(ret, -4, PSC_CURRENT_IN);
- else
- ret = ir35221_scale_result(ret, -5, PSC_CURRENT_IN);
- break;
- case PMBUS_READ_POUT:
- ret = pmbus_read_word_data(client, page, PMBUS_READ_POUT);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, -1, PSC_POWER);
- break;
- case PMBUS_READ_PIN:
- ret = pmbus_read_word_data(client, page, PMBUS_READ_PIN);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, -1, PSC_POWER);
- break;
- case PMBUS_READ_IOUT:
- ret = pmbus_read_word_data(client, page, PMBUS_READ_IOUT);
- if (ret < 0)
- break;
- if (page == 0)
- ret = ir35221_scale_result(ret, -1, PSC_CURRENT_OUT);
- else
- ret = ir35221_scale_result(ret, -2, PSC_CURRENT_OUT);
- break;
case PMBUS_VIRT_READ_VIN_MAX:
ret = pmbus_read_word_data(client, page, IR35221_MFR_VIN_PEAK);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, -5, PSC_VOLTAGE_IN);
break;
case PMBUS_VIRT_READ_VOUT_MAX:
ret = pmbus_read_word_data(client, page, IR35221_MFR_VOUT_PEAK);
break;
case PMBUS_VIRT_READ_IOUT_MAX:
ret = pmbus_read_word_data(client, page, IR35221_MFR_IOUT_PEAK);
- if (ret < 0)
- break;
- if (page == 0)
- ret = ir35221_scale_result(ret, -1, PSC_CURRENT_IN);
- else
- ret = ir35221_scale_result(ret, -2, PSC_CURRENT_IN);
break;
case PMBUS_VIRT_READ_TEMP_MAX:
ret = pmbus_read_word_data(client, page, IR35221_MFR_TEMP_PEAK);
@@ -194,9 +45,6 @@ static int ir35221_read_word_data(struct i2c_client *client, int page, int reg)
case PMBUS_VIRT_READ_VIN_MIN:
ret = pmbus_read_word_data(client, page,
IR35221_MFR_VIN_VALLEY);
- if (ret < 0)
- break;
- ret = ir35221_scale_result(ret, -5, PSC_VOLTAGE_IN);
break;
case PMBUS_VIRT_READ_VOUT_MIN:
ret = pmbus_read_word_data(client, page,
@@ -205,12 +53,6 @@ static int ir35221_read_word_data(struct i2c_client *client, int page, int reg)
case PMBUS_VIRT_READ_IOUT_MIN:
ret = pmbus_read_word_data(client, page,
IR35221_MFR_IOUT_VALLEY);
- if (ret < 0)
- break;
- if (page == 0)
- ret = ir35221_scale_result(ret, -1, PSC_CURRENT_IN);
- else
- ret = ir35221_scale_result(ret, -2, PSC_CURRENT_IN);
break;
case PMBUS_VIRT_READ_TEMP_MIN:
ret = pmbus_read_word_data(client, page,
@@ -224,36 +66,6 @@ static int ir35221_read_word_data(struct i2c_client *client, int page, int reg)
return ret;
}
-static int ir35221_write_word_data(struct i2c_client *client, int page, int reg,
- u16 word)
-{
- int ret;
- u16 val;
-
- switch (reg) {
- case PMBUS_IOUT_OC_FAULT_LIMIT:
- case PMBUS_IOUT_OC_WARN_LIMIT:
- val = ir35221_scale_result(word, -1, PSC_CURRENT_OUT);
- ret = pmbus_write_word_data(client, page, reg, val);
- break;
- case PMBUS_VIN_OV_FAULT_LIMIT:
- case PMBUS_VIN_OV_WARN_LIMIT:
- case PMBUS_VIN_UV_WARN_LIMIT:
- val = ir35221_scale_result(word, 4, PSC_VOLTAGE_IN);
- ret = pmbus_write_word_data(client, page, reg, val);
- break;
- case PMBUS_IIN_OC_WARN_LIMIT:
- val = ir35221_scale_result(word, 1, PSC_CURRENT_IN);
- ret = pmbus_write_word_data(client, page, reg, val);
- break;
- default:
- ret = -ENODATA;
- break;
- }
-
- return ret;
-}
-
static int ir35221_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -292,7 +104,6 @@ static int ir35221_probe(struct i2c_client *client,
if (!info)
return -ENOMEM;
- info->write_word_data = ir35221_write_word_data;
info->read_word_data = ir35221_read_word_data;
info->pages = 2;
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index 10d17fb8f283..53db78753a0d 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -1,5 +1,5 @@
/*
- * Hardware monitoring driver for LM25056 / LM25063 / LM25066 / LM5064 / LM5066
+ * Hardware monitoring driver for LM25056 / LM25066 / LM5064 / LM5066
*
* Copyright (c) 2011 Ericsson AB.
* Copyright (c) 2013 Guenter Roeck
@@ -28,7 +28,7 @@
#include <linux/i2c.h>
#include "pmbus.h"
-enum chips { lm25056, lm25063, lm25066, lm5064, lm5066, lm5066i };
+enum chips { lm25056, lm25066, lm5064, lm5066, lm5066i };
#define LM25066_READ_VAUX 0xd0
#define LM25066_MFR_READ_IIN 0xd1
@@ -53,11 +53,6 @@ enum chips { lm25056, lm25063, lm25066, lm5064, lm5066, lm5066i };
#define LM25056_MFR_STS_VAUX_OV_WARN BIT(1)
#define LM25056_MFR_STS_VAUX_UV_WARN BIT(0)
-/* LM25063 only */
-
-#define LM25063_READ_VOUT_MAX 0xe5
-#define LM25063_READ_VOUT_MIN 0xe6
-
struct __coeff {
short m, b, R;
};
@@ -122,36 +117,6 @@ static struct __coeff lm25066_coeff[6][PSC_NUM_CLASSES + 2] = {
.m = 16,
},
},
- [lm25063] = {
- [PSC_VOLTAGE_IN] = {
- .m = 16000,
- .R = -2,
- },
- [PSC_VOLTAGE_OUT] = {
- .m = 16000,
- .R = -2,
- },
- [PSC_CURRENT_IN] = {
- .m = 10000,
- .R = -2,
- },
- [PSC_CURRENT_IN_L] = {
- .m = 10000,
- .R = -2,
- },
- [PSC_POWER] = {
- .m = 5000,
- .R = -3,
- },
- [PSC_POWER_L] = {
- .m = 5000,
- .R = -3,
- },
- [PSC_TEMPERATURE] = {
- .m = 15596,
- .R = -3,
- },
- },
[lm5064] = {
[PSC_VOLTAGE_IN] = {
.m = 4611,
@@ -272,10 +237,6 @@ static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
/* VIN: 6.14 mV VAUX: 293 uV LSB */
ret = DIV_ROUND_CLOSEST(ret * 293, 6140);
break;
- case lm25063:
- /* VIN: 6.25 mV VAUX: 200.0 uV LSB */
- ret = DIV_ROUND_CLOSEST(ret * 20, 625);
- break;
case lm25066:
/* VIN: 4.54 mV VAUX: 283.2 uV LSB */
ret = DIV_ROUND_CLOSEST(ret * 2832, 45400);
@@ -330,24 +291,6 @@ static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
return ret;
}
-static int lm25063_read_word_data(struct i2c_client *client, int page, int reg)
-{
- int ret;
-
- switch (reg) {
- case PMBUS_VIRT_READ_VOUT_MAX:
- ret = pmbus_read_word_data(client, 0, LM25063_READ_VOUT_MAX);
- break;
- case PMBUS_VIRT_READ_VOUT_MIN:
- ret = pmbus_read_word_data(client, 0, LM25063_READ_VOUT_MIN);
- break;
- default:
- ret = lm25066_read_word_data(client, page, reg);
- break;
- }
- return ret;
-}
-
static int lm25056_read_word_data(struct i2c_client *client, int page, int reg)
{
int ret;
@@ -502,11 +445,6 @@ static int lm25066_probe(struct i2c_client *client,
info->read_word_data = lm25056_read_word_data;
info->read_byte_data = lm25056_read_byte_data;
data->rlimit = 0x0fff;
- } else if (data->id == lm25063) {
- info->func[0] |= PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
- | PMBUS_HAVE_POUT;
- info->read_word_data = lm25063_read_word_data;
- data->rlimit = 0xffff;
} else {
info->func[0] |= PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
info->read_word_data = lm25066_read_word_data;
@@ -543,7 +481,6 @@ static int lm25066_probe(struct i2c_client *client,
static const struct i2c_device_id lm25066_id[] = {
{"lm25056", lm25056},
- {"lm25063", lm25063},
{"lm25066", lm25066},
{"lm5064", lm5064},
{"lm5066", lm5066},
diff --git a/drivers/hwmon/pmbus/max31785.c b/drivers/hwmon/pmbus/max31785.c
index 9313849d5160..c9dc8799b5e1 100644
--- a/drivers/hwmon/pmbus/max31785.c
+++ b/drivers/hwmon/pmbus/max31785.c
@@ -16,12 +16,231 @@
enum max31785_regs {
MFR_REVISION = 0x9b,
+ MFR_FAN_CONFIG = 0xf1,
};
+#define MAX31785 0x3030
+#define MAX31785A 0x3040
+
+#define MFR_FAN_CONFIG_DUAL_TACH BIT(12)
+
#define MAX31785_NR_PAGES 23
+#define MAX31785_NR_FAN_PAGES 6
+
+static int max31785_read_byte_data(struct i2c_client *client, int page,
+ int reg)
+{
+ if (page < MAX31785_NR_PAGES)
+ return -ENODATA;
+
+ switch (reg) {
+ case PMBUS_VOUT_MODE:
+ return -ENOTSUPP;
+ case PMBUS_FAN_CONFIG_12:
+ return pmbus_read_byte_data(client, page - MAX31785_NR_PAGES,
+ reg);
+ }
+
+ return -ENODATA;
+}
+
+static int max31785_write_byte(struct i2c_client *client, int page, u8 value)
+{
+ if (page < MAX31785_NR_PAGES)
+ return -ENODATA;
+
+ return -ENOTSUPP;
+}
+
+static int max31785_read_long_data(struct i2c_client *client, int page,
+ int reg, u32 *data)
+{
+ unsigned char cmdbuf[1];
+ unsigned char rspbuf[4];
+ int rc;
+
+ struct i2c_msg msg[2] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = sizeof(cmdbuf),
+ .buf = cmdbuf,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = sizeof(rspbuf),
+ .buf = rspbuf,
+ },
+ };
+
+ cmdbuf[0] = reg;
+
+ rc = pmbus_set_page(client, page);
+ if (rc < 0)
+ return rc;
+
+ rc = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (rc < 0)
+ return rc;
+
+ *data = (rspbuf[0] << (0 * 8)) | (rspbuf[1] << (1 * 8)) |
+ (rspbuf[2] << (2 * 8)) | (rspbuf[3] << (3 * 8));
+
+ return rc;
+}
+
+static int max31785_get_pwm(struct i2c_client *client, int page)
+{
+ int rv;
+
+ rv = pmbus_get_fan_rate_device(client, page, 0, percent);
+ if (rv < 0)
+ return rv;
+ else if (rv >= 0x8000)
+ return 0;
+ else if (rv >= 0x2711)
+ return 0x2710;
+
+ return rv;
+}
+
+static int max31785_get_pwm_mode(struct i2c_client *client, int page)
+{
+ int config;
+ int command;
+
+ config = pmbus_read_byte_data(client, page, PMBUS_FAN_CONFIG_12);
+ if (config < 0)
+ return config;
+
+ command = pmbus_read_word_data(client, page, PMBUS_FAN_COMMAND_1);
+ if (command < 0)
+ return command;
+
+ if (config & PB_FAN_1_RPM)
+ return (command >= 0x8000) ? 3 : 2;
+
+ if (command >= 0x8000)
+ return 3;
+ else if (command >= 0x2711)
+ return 0;
+
+ return 1;
+}
+
+static int max31785_read_word_data(struct i2c_client *client, int page,
+ int reg)
+{
+ u32 val;
+ int rv;
+
+ switch (reg) {
+ case PMBUS_READ_FAN_SPEED_1:
+ if (page < MAX31785_NR_PAGES)
+ return -ENODATA;
+
+ rv = max31785_read_long_data(client, page - MAX31785_NR_PAGES,
+ reg, &val);
+ if (rv < 0)
+ return rv;
+
+ rv = (val >> 16) & 0xffff;
+ break;
+ case PMBUS_FAN_COMMAND_1:
+ /*
+ * PMBUS_FAN_COMMAND_x is probed to judge whether or not to
+ * expose fan control registers.
+ *
+ * Don't expose fan_target attribute for virtual pages.
+ */
+ rv = (page >= MAX31785_NR_PAGES) ? -ENOTSUPP : -ENODATA;
+ break;
+ case PMBUS_VIRT_PWM_1:
+ rv = max31785_get_pwm(client, page);
+ break;
+ case PMBUS_VIRT_PWM_ENABLE_1:
+ rv = max31785_get_pwm_mode(client, page);
+ break;
+ default:
+ rv = -ENODATA;
+ break;
+ }
+
+ return rv;
+}
+
+static inline u32 max31785_scale_pwm(u32 sensor_val)
+{
+ /*
+ * The datasheet describes the accepted value range for manual PWM as
+ * [0, 0x2710], while the hwmon pwmX sysfs interface accepts values in
+ * [0, 255]. The MAX31785 uses DIRECT mode to scale the FAN_COMMAND
+ * registers and in PWM mode the coefficients are m=1, b=0, R=2. The
+ * important observation here is that 0x2710 == 10000 == 100 * 100.
+ *
+ * R=2 (== 10^2 == 100) accounts for scaling the value provided at the
+ * sysfs interface into the required hardware resolution, but it does
+ * not yet yield a value that we can write to the device (this initial
+ * scaling is handled by pmbus_data2reg()). Multiplying by 100 below
+ * translates the parameter value into the percentage units required by
+ * PMBus, and then we scale back by 255 as required by the hwmon pwmX
+ * interface to yield the percentage value at the appropriate
+ * resolution for hardware.
+ */
+ return (sensor_val * 100) / 255;
+}
+
+static int max31785_pwm_enable(struct i2c_client *client, int page,
+ u16 word)
+{
+ int config = 0;
+ int rate;
+
+ switch (word) {
+ case 0:
+ rate = 0x7fff;
+ break;
+ case 1:
+ rate = pmbus_get_fan_rate_cached(client, page, 0, percent);
+ if (rate < 0)
+ return rate;
+ rate = max31785_scale_pwm(rate);
+ break;
+ case 2:
+ config = PB_FAN_1_RPM;
+ rate = pmbus_get_fan_rate_cached(client, page, 0, rpm);
+ if (rate < 0)
+ return rate;
+ break;
+ case 3:
+ rate = 0xffff;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return pmbus_update_fan(client, page, 0, config, PB_FAN_1_RPM, rate);
+}
+
+static int max31785_write_word_data(struct i2c_client *client, int page,
+ int reg, u16 word)
+{
+ switch (reg) {
+ case PMBUS_VIRT_PWM_1:
+ return pmbus_update_fan(client, page, 0, 0, PB_FAN_1_RPM,
+ max31785_scale_pwm(word));
+ case PMBUS_VIRT_PWM_ENABLE_1:
+ return max31785_pwm_enable(client, page, word);
+ default:
+ break;
+ }
+
+ return -ENODATA;
+}
#define MAX31785_FAN_FUNCS \
- (PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12)
+ (PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12 | PMBUS_HAVE_PWM12)
#define MAX31785_TEMP_FUNCS \
(PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP)
@@ -29,14 +248,26 @@ enum max31785_regs {
#define MAX31785_VOUT_FUNCS \
(PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT)
+#define MAX37185_NUM_FAN_PAGES 6
+
static const struct pmbus_driver_info max31785_info = {
.pages = MAX31785_NR_PAGES,
+ .write_word_data = max31785_write_word_data,
+ .read_byte_data = max31785_read_byte_data,
+ .read_word_data = max31785_read_word_data,
+ .write_byte = max31785_write_byte,
+
/* RPM */
.format[PSC_FAN] = direct,
.m[PSC_FAN] = 1,
.b[PSC_FAN] = 0,
.R[PSC_FAN] = 0,
+ /* PWM */
+ .format[PSC_PWM] = direct,
+ .m[PSC_PWM] = 1,
+ .b[PSC_PWM] = 0,
+ .R[PSC_PWM] = 2,
.func[0] = MAX31785_FAN_FUNCS,
.func[1] = MAX31785_FAN_FUNCS,
.func[2] = MAX31785_FAN_FUNCS,
@@ -72,13 +303,46 @@ static const struct pmbus_driver_info max31785_info = {
.func[22] = MAX31785_VOUT_FUNCS,
};
+static int max31785_configure_dual_tach(struct i2c_client *client,
+ struct pmbus_driver_info *info)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < MAX31785_NR_FAN_PAGES; i++) {
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, i);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, MFR_FAN_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MFR_FAN_CONFIG_DUAL_TACH) {
+ int virtual = MAX31785_NR_PAGES + i;
+
+ info->pages = virtual + 1;
+ info->func[virtual] |= PMBUS_HAVE_FAN12;
+ info->func[virtual] |= PMBUS_PAGE_VIRTUAL;
+ }
+ }
+
+ return 0;
+}
+
static int max31785_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
struct pmbus_driver_info *info;
+ bool dual_tach = false;
s64 ret;
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
info = devm_kzalloc(dev, sizeof(struct pmbus_driver_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
@@ -89,6 +353,25 @@ static int max31785_probe(struct i2c_client *client,
if (ret < 0)
return ret;
+ ret = i2c_smbus_read_word_data(client, MFR_REVISION);
+ if (ret < 0)
+ return ret;
+
+ if (ret == MAX31785A) {
+ dual_tach = true;
+ } else if (ret == MAX31785) {
+ if (!strcmp("max31785a", id->name))
+ dev_warn(dev, "Expected max3175a, found max31785: cannot provide secondary tachometer readings\n");
+ } else {
+ return -ENODEV;
+ }
+
+ if (dual_tach) {
+ ret = max31785_configure_dual_tach(client, info);
+ if (ret < 0)
+ return ret;
+ }
+
return pmbus_do_probe(client, id, info);
}
@@ -100,9 +383,18 @@ static const struct i2c_device_id max31785_id[] = {
MODULE_DEVICE_TABLE(i2c, max31785_id);
+static const struct of_device_id max31785_of_match[] = {
+ { .compatible = "maxim,max31785" },
+ { .compatible = "maxim,max31785a" },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, max31785_of_match);
+
static struct i2c_driver max31785_driver = {
.driver = {
.name = "max31785",
+ .of_match_table = max31785_of_match,
},
.probe = max31785_probe,
.remove = pmbus_do_remove,
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index fa613bd209e3..1d24397d36ec 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -190,6 +190,33 @@ enum pmbus_regs {
PMBUS_VIRT_VMON_UV_FAULT_LIMIT,
PMBUS_VIRT_VMON_OV_FAULT_LIMIT,
PMBUS_VIRT_STATUS_VMON,
+
+ /*
+ * RPM and PWM Fan control
+ *
+ * Drivers wanting to expose PWM control must define the behaviour of
+ * PMBUS_VIRT_PWM_[1-4] and PMBUS_VIRT_PWM_ENABLE_[1-4] in the
+ * {read,write}_word_data callback.
+ *
+ * pmbus core provides a default implementation for
+ * PMBUS_VIRT_FAN_TARGET_[1-4].
+ *
+ * TARGET, PWM and PWM_ENABLE members must be defined sequentially;
+ * pmbus core uses the difference between the provided register and
+ * it's _1 counterpart to calculate the FAN/PWM ID.
+ */
+ PMBUS_VIRT_FAN_TARGET_1,
+ PMBUS_VIRT_FAN_TARGET_2,
+ PMBUS_VIRT_FAN_TARGET_3,
+ PMBUS_VIRT_FAN_TARGET_4,
+ PMBUS_VIRT_PWM_1,
+ PMBUS_VIRT_PWM_2,
+ PMBUS_VIRT_PWM_3,
+ PMBUS_VIRT_PWM_4,
+ PMBUS_VIRT_PWM_ENABLE_1,
+ PMBUS_VIRT_PWM_ENABLE_2,
+ PMBUS_VIRT_PWM_ENABLE_3,
+ PMBUS_VIRT_PWM_ENABLE_4,
};
/*
@@ -223,6 +250,8 @@ enum pmbus_regs {
#define PB_FAN_1_RPM BIT(6)
#define PB_FAN_1_INSTALLED BIT(7)
+enum pmbus_fan_mode { percent = 0, rpm };
+
/*
* STATUS_BYTE, STATUS_WORD (lower)
*/
@@ -313,6 +342,7 @@ enum pmbus_sensor_classes {
PSC_POWER,
PSC_TEMPERATURE,
PSC_FAN,
+ PSC_PWM,
PSC_NUM_CLASSES /* Number of power sensor classes */
};
@@ -339,6 +369,10 @@ enum pmbus_sensor_classes {
#define PMBUS_HAVE_STATUS_FAN34 BIT(17)
#define PMBUS_HAVE_VMON BIT(18)
#define PMBUS_HAVE_STATUS_VMON BIT(19)
+#define PMBUS_HAVE_PWM12 BIT(20)
+#define PMBUS_HAVE_PWM34 BIT(21)
+
+#define PMBUS_PAGE_VIRTUAL BIT(31)
enum pmbus_data_format { linear = 0, direct, vid };
enum vrm_version { vr11 = 0, vr12, vr13 };
@@ -421,5 +455,12 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
int pmbus_do_remove(struct i2c_client *client);
const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client
*client);
+int pmbus_get_fan_rate_device(struct i2c_client *client, int page, int id,
+ enum pmbus_fan_mode mode);
+int pmbus_get_fan_rate_cached(struct i2c_client *client, int page, int id,
+ enum pmbus_fan_mode mode);
+int pmbus_update_fan(struct i2c_client *client, int page, int id,
+ u8 config, u8 mask, u16 command);
+struct dentry *pmbus_get_debugfs_dir(struct i2c_client *client);
#endif /* PMBUS_H */
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index a139940cd991..f7c47d7994e7 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -65,6 +65,7 @@ struct pmbus_sensor {
u16 reg; /* register */
enum pmbus_sensor_classes class; /* sensor class */
bool update; /* runtime sensor update needed */
+ bool convert; /* Whether or not to apply linear/vid/direct */
int data; /* Sensor data.
Negative if there was a read error */
};
@@ -129,6 +130,27 @@ struct pmbus_debugfs_entry {
u8 reg;
};
+static const int pmbus_fan_rpm_mask[] = {
+ PB_FAN_1_RPM,
+ PB_FAN_2_RPM,
+ PB_FAN_1_RPM,
+ PB_FAN_2_RPM,
+};
+
+static const int pmbus_fan_config_registers[] = {
+ PMBUS_FAN_CONFIG_12,
+ PMBUS_FAN_CONFIG_12,
+ PMBUS_FAN_CONFIG_34,
+ PMBUS_FAN_CONFIG_34
+};
+
+static const int pmbus_fan_command_registers[] = {
+ PMBUS_FAN_COMMAND_1,
+ PMBUS_FAN_COMMAND_2,
+ PMBUS_FAN_COMMAND_3,
+ PMBUS_FAN_COMMAND_4,
+};
+
void pmbus_clear_cache(struct i2c_client *client)
{
struct pmbus_data *data = i2c_get_clientdata(client);
@@ -140,18 +162,27 @@ EXPORT_SYMBOL_GPL(pmbus_clear_cache);
int pmbus_set_page(struct i2c_client *client, int page)
{
struct pmbus_data *data = i2c_get_clientdata(client);
- int rv = 0;
- int newpage;
+ int rv;
- if (page >= 0 && page != data->currpage) {
+ if (page < 0 || page == data->currpage)
+ return 0;
+
+ if (!(data->info->func[page] & PMBUS_PAGE_VIRTUAL)) {
rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
- newpage = i2c_smbus_read_byte_data(client, PMBUS_PAGE);
- if (newpage != page)
- rv = -EIO;
- else
- data->currpage = page;
+ if (rv < 0)
+ return rv;
+
+ rv = i2c_smbus_read_byte_data(client, PMBUS_PAGE);
+ if (rv < 0)
+ return rv;
+
+ if (rv != page)
+ return -EIO;
}
- return rv;
+
+ data->currpage = page;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(pmbus_set_page);
@@ -198,6 +229,28 @@ int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg,
}
EXPORT_SYMBOL_GPL(pmbus_write_word_data);
+
+static int pmbus_write_virt_reg(struct i2c_client *client, int page, int reg,
+ u16 word)
+{
+ int bit;
+ int id;
+ int rv;
+
+ switch (reg) {
+ case PMBUS_VIRT_FAN_TARGET_1 ... PMBUS_VIRT_FAN_TARGET_4:
+ id = reg - PMBUS_VIRT_FAN_TARGET_1;
+ bit = pmbus_fan_rpm_mask[id];
+ rv = pmbus_update_fan(client, page, id, bit, bit, word);
+ break;
+ default:
+ rv = -ENXIO;
+ break;
+ }
+
+ return rv;
+}
+
/*
* _pmbus_write_word_data() is similar to pmbus_write_word_data(), but checks if
* a device specific mapping function exists and calls it if necessary.
@@ -214,11 +267,38 @@ static int _pmbus_write_word_data(struct i2c_client *client, int page, int reg,
if (status != -ENODATA)
return status;
}
+
if (reg >= PMBUS_VIRT_BASE)
- return -ENXIO;
+ return pmbus_write_virt_reg(client, page, reg, word);
+
return pmbus_write_word_data(client, page, reg, word);
}
+int pmbus_update_fan(struct i2c_client *client, int page, int id,
+ u8 config, u8 mask, u16 command)
+{
+ int from;
+ int rv;
+ u8 to;
+
+ from = pmbus_read_byte_data(client, page,
+ pmbus_fan_config_registers[id]);
+ if (from < 0)
+ return from;
+
+ to = (from & ~mask) | (config & mask);
+ if (to != from) {
+ rv = pmbus_write_byte_data(client, page,
+ pmbus_fan_config_registers[id], to);
+ if (rv < 0)
+ return rv;
+ }
+
+ return _pmbus_write_word_data(client, page,
+ pmbus_fan_command_registers[id], command);
+}
+EXPORT_SYMBOL_GPL(pmbus_update_fan);
+
int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg)
{
int rv;
@@ -231,6 +311,24 @@ int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg)
}
EXPORT_SYMBOL_GPL(pmbus_read_word_data);
+static int pmbus_read_virt_reg(struct i2c_client *client, int page, int reg)
+{
+ int rv;
+ int id;
+
+ switch (reg) {
+ case PMBUS_VIRT_FAN_TARGET_1 ... PMBUS_VIRT_FAN_TARGET_4:
+ id = reg - PMBUS_VIRT_FAN_TARGET_1;
+ rv = pmbus_get_fan_rate_device(client, page, id, rpm);
+ break;
+ default:
+ rv = -ENXIO;
+ break;
+ }
+
+ return rv;
+}
+
/*
* _pmbus_read_word_data() is similar to pmbus_read_word_data(), but checks if
* a device specific mapping function exists and calls it if necessary.
@@ -246,8 +344,10 @@ static int _pmbus_read_word_data(struct i2c_client *client, int page, int reg)
if (status != -ENODATA)
return status;
}
+
if (reg >= PMBUS_VIRT_BASE)
- return -ENXIO;
+ return pmbus_read_virt_reg(client, page, reg);
+
return pmbus_read_word_data(client, page, reg);
}
@@ -312,6 +412,68 @@ static int _pmbus_read_byte_data(struct i2c_client *client, int page, int reg)
return pmbus_read_byte_data(client, page, reg);
}
+static struct pmbus_sensor *pmbus_find_sensor(struct pmbus_data *data, int page,
+ int reg)
+{
+ struct pmbus_sensor *sensor;
+
+ for (sensor = data->sensors; sensor; sensor = sensor->next) {
+ if (sensor->page == page && sensor->reg == reg)
+ return sensor;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int pmbus_get_fan_rate(struct i2c_client *client, int page, int id,
+ enum pmbus_fan_mode mode,
+ bool from_cache)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ bool want_rpm, have_rpm;
+ struct pmbus_sensor *s;
+ int config;
+ int reg;
+
+ want_rpm = (mode == rpm);
+
+ if (from_cache) {
+ reg = want_rpm ? PMBUS_VIRT_FAN_TARGET_1 : PMBUS_VIRT_PWM_1;
+ s = pmbus_find_sensor(data, page, reg + id);
+ if (IS_ERR(s))
+ return PTR_ERR(s);
+
+ return s->data;
+ }
+
+ config = pmbus_read_byte_data(client, page,
+ pmbus_fan_config_registers[id]);
+ if (config < 0)
+ return config;
+
+ have_rpm = !!(config & pmbus_fan_rpm_mask[id]);
+ if (want_rpm == have_rpm)
+ return pmbus_read_word_data(client, page,
+ pmbus_fan_command_registers[id]);
+
+ /* Can't sensibly map between RPM and PWM, just return zero */
+ return 0;
+}
+
+int pmbus_get_fan_rate_device(struct i2c_client *client, int page, int id,
+ enum pmbus_fan_mode mode)
+{
+ return pmbus_get_fan_rate(client, page, id, mode, false);
+}
+EXPORT_SYMBOL_GPL(pmbus_get_fan_rate_device);
+
+int pmbus_get_fan_rate_cached(struct i2c_client *client, int page, int id,
+ enum pmbus_fan_mode mode)
+{
+ return pmbus_get_fan_rate(client, page, id, mode, true);
+}
+EXPORT_SYMBOL_GPL(pmbus_get_fan_rate_cached);
+
static void pmbus_clear_fault_page(struct i2c_client *client, int page)
{
_pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS);
@@ -513,7 +675,7 @@ static long pmbus_reg2data_direct(struct pmbus_data *data,
/* X = 1/m * (Y * 10^-R - b) */
R = -R;
/* scale result to milli-units for everything but fans */
- if (sensor->class != PSC_FAN) {
+ if (!(sensor->class == PSC_FAN || sensor->class == PSC_PWM)) {
R += 3;
b *= 1000;
}
@@ -568,6 +730,9 @@ static long pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
{
long val;
+ if (!sensor->convert)
+ return sensor->data;
+
switch (data->info->format[sensor->class]) {
case direct:
val = pmbus_reg2data_direct(data, sensor);
@@ -672,7 +837,7 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
}
/* Calculate Y = (m * X + b) * 10^R */
- if (sensor->class != PSC_FAN) {
+ if (!(sensor->class == PSC_FAN || sensor->class == PSC_PWM)) {
R -= 3; /* Adjust R and b for data in milli-units */
b *= 1000;
}
@@ -703,6 +868,9 @@ static u16 pmbus_data2reg(struct pmbus_data *data,
{
u16 regval;
+ if (!sensor->convert)
+ return val;
+
switch (data->info->format[sensor->class]) {
case direct:
regval = pmbus_data2reg_direct(data, sensor, val);
@@ -915,7 +1083,8 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
const char *name, const char *type,
int seq, int page, int reg,
enum pmbus_sensor_classes class,
- bool update, bool readonly)
+ bool update, bool readonly,
+ bool convert)
{
struct pmbus_sensor *sensor;
struct device_attribute *a;
@@ -925,12 +1094,18 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
return NULL;
a = &sensor->attribute;
- snprintf(sensor->name, sizeof(sensor->name), "%s%d_%s",
- name, seq, type);
+ if (type)
+ snprintf(sensor->name, sizeof(sensor->name), "%s%d_%s",
+ name, seq, type);
+ else
+ snprintf(sensor->name, sizeof(sensor->name), "%s%d",
+ name, seq);
+
sensor->page = page;
sensor->reg = reg;
sensor->class = class;
sensor->update = update;
+ sensor->convert = convert;
pmbus_dev_attr_init(a, sensor->name,
readonly ? S_IRUGO : S_IRUGO | S_IWUSR,
pmbus_show_sensor, pmbus_set_sensor);
@@ -1029,7 +1204,7 @@ static int pmbus_add_limit_attrs(struct i2c_client *client,
curr = pmbus_add_sensor(data, name, l->attr, index,
page, l->reg, attr->class,
attr->update || l->update,
- false);
+ false, true);
if (!curr)
return -ENOMEM;
if (l->sbit && (info->func[page] & attr->sfunc)) {
@@ -1068,7 +1243,7 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
return ret;
}
base = pmbus_add_sensor(data, name, "input", index, page, attr->reg,
- attr->class, true, true);
+ attr->class, true, true, true);
if (!base)
return -ENOMEM;
if (attr->sfunc) {
@@ -1592,13 +1767,6 @@ static const int pmbus_fan_registers[] = {
PMBUS_READ_FAN_SPEED_4
};
-static const int pmbus_fan_config_registers[] = {
- PMBUS_FAN_CONFIG_12,
- PMBUS_FAN_CONFIG_12,
- PMBUS_FAN_CONFIG_34,
- PMBUS_FAN_CONFIG_34
-};
-
static const int pmbus_fan_status_registers[] = {
PMBUS_STATUS_FAN_12,
PMBUS_STATUS_FAN_12,
@@ -1621,6 +1789,42 @@ static const u32 pmbus_fan_status_flags[] = {
};
/* Fans */
+
+/* Precondition: FAN_CONFIG_x_y and FAN_COMMAND_x must exist for the fan ID */
+static int pmbus_add_fan_ctrl(struct i2c_client *client,
+ struct pmbus_data *data, int index, int page, int id,
+ u8 config)
+{
+ struct pmbus_sensor *sensor;
+
+ sensor = pmbus_add_sensor(data, "fan", "target", index, page,
+ PMBUS_VIRT_FAN_TARGET_1 + id, PSC_FAN,
+ false, false, true);
+
+ if (!sensor)
+ return -ENOMEM;
+
+ if (!((data->info->func[page] & PMBUS_HAVE_PWM12) ||
+ (data->info->func[page] & PMBUS_HAVE_PWM34)))
+ return 0;
+
+ sensor = pmbus_add_sensor(data, "pwm", NULL, index, page,
+ PMBUS_VIRT_PWM_1 + id, PSC_PWM,
+ false, false, true);
+
+ if (!sensor)
+ return -ENOMEM;
+
+ sensor = pmbus_add_sensor(data, "pwm", "enable", index, page,
+ PMBUS_VIRT_PWM_ENABLE_1 + id, PSC_PWM,
+ true, false, false);
+
+ if (!sensor)
+ return -ENOMEM;
+
+ return 0;
+}
+
static int pmbus_add_fan_attributes(struct i2c_client *client,
struct pmbus_data *data)
{
@@ -1655,9 +1859,18 @@ static int pmbus_add_fan_attributes(struct i2c_client *client,
if (pmbus_add_sensor(data, "fan", "input", index,
page, pmbus_fan_registers[f],
- PSC_FAN, true, true) == NULL)
+ PSC_FAN, true, true, true) == NULL)
return -ENOMEM;
+ /* Fan control */
+ if (pmbus_check_word_register(client, page,
+ pmbus_fan_command_registers[f])) {
+ ret = pmbus_add_fan_ctrl(client, data, index,
+ page, f, regval);
+ if (ret < 0)
+ return ret;
+ }
+
/*
* Each fan status register covers multiple fans,
* so we have to do some magic.
@@ -2168,6 +2381,14 @@ int pmbus_do_remove(struct i2c_client *client)
}
EXPORT_SYMBOL_GPL(pmbus_do_remove);
+struct dentry *pmbus_get_debugfs_dir(struct i2c_client *client)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+
+ return data->debugfs;
+}
+EXPORT_SYMBOL_GPL(pmbus_get_debugfs_dir);
+
static int __init pmbus_core_init(void)
{
pmbus_debugfs_dir = debugfs_create_dir("pmbus", NULL);
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 25d28343ba93..2be77752cd56 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -179,6 +179,7 @@ struct sht15_data {
* sht15_crc8() - compute crc8
* @data: sht15 specific data.
* @value: sht15 retrieved data.
+ * @len: Length of retrieved data
*
* This implements section 2 of the CRC datasheet.
*/
diff --git a/drivers/hwmon/sht21.c b/drivers/hwmon/sht21.c
index 06706d288355..190e7b39ce32 100644
--- a/drivers/hwmon/sht21.c
+++ b/drivers/hwmon/sht21.c
@@ -41,7 +41,7 @@
/**
* struct sht21 - SHT21 device specific data
- * @hwmon_dev: device registered with hwmon
+ * @client: I2C client device
* @lock: mutex to protect measurement values
* @last_update: time of last update (jiffies)
* @temperature: cached temperature measurement value
diff --git a/drivers/hwmon/sht3x.c b/drivers/hwmon/sht3x.c
index 6ea99cd6ae79..370b57dafab7 100644
--- a/drivers/hwmon/sht3x.c
+++ b/drivers/hwmon/sht3x.c
@@ -732,6 +732,13 @@ static int sht3x_probe(struct i2c_client *client,
mutex_init(&data->i2c_lock);
mutex_init(&data->data_lock);
+ /*
+ * An attempt to read limits register too early
+ * causes a NACK response from the chip.
+ * Waiting for an empirical delay of 500 us solves the issue.
+ */
+ usleep_range(500, 600);
+
ret = limits_update(data);
if (ret)
return ret;
diff --git a/drivers/hwmon/w83773g.c b/drivers/hwmon/w83773g.c
new file mode 100644
index 000000000000..e858093ac806
--- /dev/null
+++ b/drivers/hwmon/w83773g.c
@@ -0,0 +1,329 @@
+/*
+ * Copyright (C) 2017 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Driver for the Nuvoton W83773G SMBus temperature sensor IC.
+ * Supported models: W83773G
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+/* W83773 has 3 channels */
+#define W83773_CHANNELS 3
+
+/* The W83773 registers */
+#define W83773_CONVERSION_RATE_REG_READ 0x04
+#define W83773_CONVERSION_RATE_REG_WRITE 0x0A
+#define W83773_MANUFACTURER_ID_REG 0xFE
+#define W83773_LOCAL_TEMP 0x00
+
+static const u8 W83773_STATUS[2] = { 0x02, 0x17 };
+
+static const u8 W83773_TEMP_LSB[2] = { 0x10, 0x25 };
+static const u8 W83773_TEMP_MSB[2] = { 0x01, 0x24 };
+
+static const u8 W83773_OFFSET_LSB[2] = { 0x12, 0x16 };
+static const u8 W83773_OFFSET_MSB[2] = { 0x11, 0x15 };
+
+/* this is the number of sensors in the device */
+static const struct i2c_device_id w83773_id[] = {
+ { "w83773g" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, w83773_id);
+
+static const struct of_device_id w83773_of_match[] = {
+ {
+ .compatible = "nuvoton,w83773g"
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, w83773_of_match);
+
+static inline long temp_of_local(s8 reg)
+{
+ return reg * 1000;
+}
+
+static inline long temp_of_remote(s8 hb, u8 lb)
+{
+ return (hb << 3 | lb >> 5) * 125;
+}
+
+static int get_local_temp(struct regmap *regmap, long *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(regmap, W83773_LOCAL_TEMP, &regval);
+ if (ret < 0)
+ return ret;
+
+ *val = temp_of_local(regval);
+ return 0;
+}
+
+static int get_remote_temp(struct regmap *regmap, int index, long *val)
+{
+ unsigned int regval_high;
+ unsigned int regval_low;
+ int ret;
+
+ ret = regmap_read(regmap, W83773_TEMP_MSB[index], &regval_high);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(regmap, W83773_TEMP_LSB[index], &regval_low);
+ if (ret < 0)
+ return ret;
+
+ *val = temp_of_remote(regval_high, regval_low);
+ return 0;
+}
+
+static int get_fault(struct regmap *regmap, int index, long *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(regmap, W83773_STATUS[index], &regval);
+ if (ret < 0)
+ return ret;
+
+ *val = (regval & 0x04) >> 2;
+ return 0;
+}
+
+static int get_offset(struct regmap *regmap, int index, long *val)
+{
+ unsigned int regval_high;
+ unsigned int regval_low;
+ int ret;
+
+ ret = regmap_read(regmap, W83773_OFFSET_MSB[index], &regval_high);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(regmap, W83773_OFFSET_LSB[index], &regval_low);
+ if (ret < 0)
+ return ret;
+
+ *val = temp_of_remote(regval_high, regval_low);
+ return 0;
+}
+
+static int set_offset(struct regmap *regmap, int index, long val)
+{
+ int ret;
+ u8 high_byte;
+ u8 low_byte;
+
+ val = clamp_val(val, -127825, 127825);
+ /* offset value equals to (high_byte << 3 | low_byte >> 5) * 125 */
+ val /= 125;
+ high_byte = val >> 3;
+ low_byte = (val & 0x07) << 5;
+
+ ret = regmap_write(regmap, W83773_OFFSET_MSB[index], high_byte);
+ if (ret < 0)
+ return ret;
+
+ return regmap_write(regmap, W83773_OFFSET_LSB[index], low_byte);
+}
+
+static int get_update_interval(struct regmap *regmap, long *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(regmap, W83773_CONVERSION_RATE_REG_READ, &regval);
+ if (ret < 0)
+ return ret;
+
+ *val = 16000 >> regval;
+ return 0;
+}
+
+static int set_update_interval(struct regmap *regmap, long val)
+{
+ int rate;
+
+ /*
+ * For valid rates, interval can be calculated as
+ * interval = (1 << (8 - rate)) * 62.5;
+ * Rounded rate is therefore
+ * rate = 8 - __fls(interval * 8 / (62.5 * 7));
+ * Use clamp_val() to avoid overflows, and to ensure valid input
+ * for __fls.
+ */
+ val = clamp_val(val, 62, 16000) * 10;
+ rate = 8 - __fls((val * 8 / (625 * 7)));
+ return regmap_write(regmap, W83773_CONVERSION_RATE_REG_WRITE, rate);
+}
+
+static int w83773_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+
+ if (type == hwmon_chip) {
+ if (attr == hwmon_chip_update_interval)
+ return get_update_interval(regmap, val);
+ return -EOPNOTSUPP;
+ }
+
+ switch (attr) {
+ case hwmon_temp_input:
+ if (channel == 0)
+ return get_local_temp(regmap, val);
+ return get_remote_temp(regmap, channel - 1, val);
+ case hwmon_temp_fault:
+ return get_fault(regmap, channel - 1, val);
+ case hwmon_temp_offset:
+ return get_offset(regmap, channel - 1, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int w83773_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+
+ if (type == hwmon_chip && attr == hwmon_chip_update_interval)
+ return set_update_interval(regmap, val);
+
+ if (type == hwmon_temp && attr == hwmon_temp_offset)
+ return set_offset(regmap, channel - 1, val);
+
+ return -EOPNOTSUPP;
+}
+
+static umode_t w83773_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_chip:
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ return 0644;
+ }
+ break;
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_fault:
+ return 0444;
+ case hwmon_temp_offset:
+ return 0644;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const u32 w83773_chip_config[] = {
+ HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL,
+ 0
+};
+
+static const struct hwmon_channel_info w83773_chip = {
+ .type = hwmon_chip,
+ .config = w83773_chip_config,
+};
+
+static const u32 w83773_temp_config[] = {
+ HWMON_T_INPUT,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_OFFSET,
+ HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_OFFSET,
+ 0
+};
+
+static const struct hwmon_channel_info w83773_temp = {
+ .type = hwmon_temp,
+ .config = w83773_temp_config,
+};
+
+static const struct hwmon_channel_info *w83773_info[] = {
+ &w83773_chip,
+ &w83773_temp,
+ NULL
+};
+
+static const struct hwmon_ops w83773_ops = {
+ .is_visible = w83773_is_visible,
+ .read = w83773_read,
+ .write = w83773_write,
+};
+
+static const struct hwmon_chip_info w83773_chip_info = {
+ .ops = &w83773_ops,
+ .info = w83773_info,
+};
+
+static const struct regmap_config w83773_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int w83773_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device *hwmon_dev;
+ struct regmap *regmap;
+ int ret;
+
+ regmap = devm_regmap_init_i2c(client, &w83773_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to allocate register map\n");
+ return PTR_ERR(regmap);
+ }
+
+ /* Set the conversion rate to 2 Hz */
+ ret = regmap_write(regmap, W83773_CONVERSION_RATE_REG_WRITE, 0x05);
+ if (ret < 0) {
+ dev_err(&client->dev, "error writing config rate register\n");
+ return ret;
+ }
+
+ i2c_set_clientdata(client, regmap);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev,
+ client->name,
+ regmap,
+ &w83773_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static struct i2c_driver w83773_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "w83773g",
+ .of_match_table = of_match_ptr(w83773_of_match),
+ },
+ .probe = w83773_probe,
+ .id_table = w83773_id,
+};
+
+module_i2c_driver(w83773_driver);
+
+MODULE_AUTHOR("Lei YU <mine260309@gmail.com>");
+MODULE_DESCRIPTION("W83773G temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index a18794128bf8..7c375443ede6 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -104,26 +104,17 @@ static int of_coresight_alloc_memory(struct device *dev,
int of_coresight_get_cpu(const struct device_node *node)
{
int cpu;
- bool found;
- struct device_node *dn, *np;
+ struct device_node *dn;
dn = of_parse_phandle(node, "cpu", 0);
-
/* Affinity defaults to CPU0 */
if (!dn)
return 0;
-
- for_each_possible_cpu(cpu) {
- np = of_cpu_device_node_get(cpu);
- found = (dn == np);
- of_node_put(np);
- if (found)
- break;
- }
+ cpu = of_cpu_node_to_id(dn);
of_node_put(dn);
/* Affinity to CPU0 if no cpu nodes are found */
- return found ? cpu : 0;
+ return (cpu < 0) ? 0 : cpu;
}
EXPORT_SYMBOL_GPL(of_coresight_get_cpu);
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 21bf619a86c5..9fee4c054d3d 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -280,8 +280,6 @@ struct dw_i2c_dev {
int (*acquire_lock)(struct dw_i2c_dev *dev);
void (*release_lock)(struct dw_i2c_dev *dev);
bool pm_disabled;
- bool suspended;
- bool skip_resume;
void (*disable)(struct dw_i2c_dev *dev);
void (*disable_int)(struct dw_i2c_dev *dev);
int (*init)(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 58add69a441c..153b947702c5 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -42,6 +42,7 @@
#include <linux/reset.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/suspend.h>
#include "i2c-designware-core.h"
@@ -372,6 +373,11 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev));
adap->dev.of_node = pdev->dev.of_node;
+ dev_pm_set_driver_flags(&pdev->dev,
+ DPM_FLAG_SMART_PREPARE |
+ DPM_FLAG_SMART_SUSPEND |
+ DPM_FLAG_LEAVE_SUSPENDED);
+
/* The code below assumes runtime PM to be disabled. */
WARN_ON(pm_runtime_enabled(&pdev->dev));
@@ -435,12 +441,24 @@ MODULE_DEVICE_TABLE(of, dw_i2c_of_match);
#ifdef CONFIG_PM_SLEEP
static int dw_i2c_plat_prepare(struct device *dev)
{
- return pm_runtime_suspended(dev);
+ /*
+ * If the ACPI companion device object is present for this device, it
+ * may be accessed during suspend and resume of other devices via I2C
+ * operation regions, so tell the PM core and middle layers to avoid
+ * skipping system suspend/resume callbacks for it in that case.
+ */
+ return !has_acpi_companion(dev);
}
static void dw_i2c_plat_complete(struct device *dev)
{
- if (dev->power.direct_complete)
+ /*
+ * The device can only be in runtime suspend at this point if it has not
+ * been resumed throughout the ending system suspend/resume cycle, so if
+ * the platform firmware might mess up with it, request the runtime PM
+ * framework to resume it.
+ */
+ if (pm_runtime_suspended(dev) && pm_resume_via_firmware())
pm_request_resume(dev);
}
#else
@@ -453,16 +471,9 @@ static int dw_i2c_plat_suspend(struct device *dev)
{
struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
- if (i_dev->suspended) {
- i_dev->skip_resume = true;
- return 0;
- }
-
i_dev->disable(i_dev);
i2c_dw_plat_prepare_clk(i_dev, false);
- i_dev->suspended = true;
-
return 0;
}
@@ -470,19 +481,9 @@ static int dw_i2c_plat_resume(struct device *dev)
{
struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
- if (!i_dev->suspended)
- return 0;
-
- if (i_dev->skip_resume) {
- i_dev->skip_resume = false;
- return 0;
- }
-
i2c_dw_plat_prepare_clk(i_dev, true);
i_dev->init(i_dev);
- i_dev->suspended = false;
-
return 0;
}
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index ef86296b8b0d..39e3b345a6c8 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -629,6 +629,18 @@ config SPEAR_ADC
To compile this driver as a module, choose M here: the
module will be called spear_adc.
+config SD_ADC_MODULATOR
+ tristate "Generic sigma delta modulator"
+ depends on OF
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Select this option to enables sigma delta modulator. This driver can
+ support generic sigma delta modulators.
+
+ This driver can also be built as a module. If so, the module
+ will be called sd_adc_modulator.
+
config STM32_ADC_CORE
tristate "STMicroelectronics STM32 adc core"
depends on ARCH_STM32 || COMPILE_TEST
@@ -656,6 +668,31 @@ config STM32_ADC
This driver can also be built as a module. If so, the module
will be called stm32-adc.
+config STM32_DFSDM_CORE
+ tristate "STMicroelectronics STM32 DFSDM core"
+ depends on (ARCH_STM32 && OF) || COMPILE_TEST
+ select REGMAP
+ select REGMAP_MMIO
+ help
+ Select this option to enable the driver for STMicroelectronics
+ STM32 digital filter for sigma delta converter.
+
+ This driver can also be built as a module. If so, the module
+ will be called stm32-dfsdm-core.
+
+config STM32_DFSDM_ADC
+ tristate "STMicroelectronics STM32 dfsdm adc"
+ depends on (ARCH_STM32 && OF) || COMPILE_TEST
+ select STM32_DFSDM_CORE
+ select REGMAP_MMIO
+ select IIO_BUFFER_HW_CONSUMER
+ help
+ Select this option to support ADCSigma delta modulator for
+ STMicroelectronics STM32 digital filter for sigma delta converter.
+
+ This driver can also be built as a module. If so, the module
+ will be called stm32-dfsdm-adc.
+
config STX104
tristate "Apex Embedded Systems STX104 driver"
depends on PC104 && X86 && ISA_BUS_API
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 9572c1090f35..28a9423997f3 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -64,6 +64,8 @@ obj-$(CONFIG_STX104) += stx104.o
obj-$(CONFIG_SUN4I_GPADC) += sun4i-gpadc-iio.o
obj-$(CONFIG_STM32_ADC_CORE) += stm32-adc-core.o
obj-$(CONFIG_STM32_ADC) += stm32-adc.o
+obj-$(CONFIG_STM32_DFSDM_CORE) += stm32-dfsdm-core.o
+obj-$(CONFIG_STM32_DFSDM_ADC) += stm32-dfsdm-adc.o
obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
obj-$(CONFIG_TI_ADC0832) += ti-adc0832.o
obj-$(CONFIG_TI_ADC084S021) += ti-adc084s021.o
@@ -82,3 +84,4 @@ obj-$(CONFIG_VF610_ADC) += vf610_adc.o
obj-$(CONFIG_VIPERBOARD_ADC) += viperboard_adc.o
xilinx-xadc-y := xilinx-xadc-core.o xilinx-xadc-events.o
obj-$(CONFIG_XILINX_XADC) += xilinx-xadc.o
+obj-$(CONFIG_SD_ADC_MODULATOR) += sd_adc_modulator.o
diff --git a/drivers/iio/adc/sd_adc_modulator.c b/drivers/iio/adc/sd_adc_modulator.c
new file mode 100644
index 000000000000..560d8c7d9d86
--- /dev/null
+++ b/drivers/iio/adc/sd_adc_modulator.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic sigma delta modulator driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author: Arnaud Pouliquen <arnaud.pouliquen@st.com>.
+ */
+
+#include <linux/iio/iio.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+static const struct iio_info iio_sd_mod_iio_info;
+
+static const struct iio_chan_spec iio_sd_mod_ch = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 1,
+ .shift = 0,
+ },
+};
+
+static int iio_sd_mod_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct iio_dev *iio;
+
+ iio = devm_iio_device_alloc(dev, 0);
+ if (!iio)
+ return -ENOMEM;
+
+ iio->dev.parent = dev;
+ iio->dev.of_node = dev->of_node;
+ iio->name = dev_name(dev);
+ iio->info = &iio_sd_mod_iio_info;
+ iio->modes = INDIO_BUFFER_HARDWARE;
+
+ iio->num_channels = 1;
+ iio->channels = &iio_sd_mod_ch;
+
+ platform_set_drvdata(pdev, iio);
+
+ return devm_iio_device_register(&pdev->dev, iio);
+}
+
+static const struct of_device_id sd_adc_of_match[] = {
+ { .compatible = "sd-modulator" },
+ { .compatible = "ads1201" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sd_adc_of_match);
+
+static struct platform_driver iio_sd_mod_adc = {
+ .driver = {
+ .name = "iio_sd_adc_mod",
+ .of_match_table = of_match_ptr(sd_adc_of_match),
+ },
+ .probe = iio_sd_mod_probe,
+};
+
+module_platform_driver(iio_sd_mod_adc);
+
+MODULE_DESCRIPTION("Basic sigma delta modulator");
+MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
new file mode 100644
index 000000000000..daa026d6a94f
--- /dev/null
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -0,0 +1,1205 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is the ADC part of the STM32 DFSDM driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author: Arnaud Pouliquen <arnaud.pouliquen@st.com>.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/hw-consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "stm32-dfsdm.h"
+
+#define DFSDM_DMA_BUFFER_SIZE (4 * PAGE_SIZE)
+
+/* Conversion timeout */
+#define DFSDM_TIMEOUT_US 100000
+#define DFSDM_TIMEOUT (msecs_to_jiffies(DFSDM_TIMEOUT_US / 1000))
+
+/* Oversampling attribute default */
+#define DFSDM_DEFAULT_OVERSAMPLING 100
+
+/* Oversampling max values */
+#define DFSDM_MAX_INT_OVERSAMPLING 256
+#define DFSDM_MAX_FL_OVERSAMPLING 1024
+
+/* Max sample resolutions */
+#define DFSDM_MAX_RES BIT(31)
+#define DFSDM_DATA_RES BIT(23)
+
+enum sd_converter_type {
+ DFSDM_AUDIO,
+ DFSDM_IIO,
+};
+
+struct stm32_dfsdm_dev_data {
+ int type;
+ int (*init)(struct iio_dev *indio_dev);
+ unsigned int num_channels;
+ const struct regmap_config *regmap_cfg;
+};
+
+struct stm32_dfsdm_adc {
+ struct stm32_dfsdm *dfsdm;
+ const struct stm32_dfsdm_dev_data *dev_data;
+ unsigned int fl_id;
+ unsigned int ch_id;
+
+ /* ADC specific */
+ unsigned int oversamp;
+ struct iio_hw_consumer *hwc;
+ struct completion completion;
+ u32 *buffer;
+
+ /* Audio specific */
+ unsigned int spi_freq; /* SPI bus clock frequency */
+ unsigned int sample_freq; /* Sample frequency after filter decimation */
+ int (*cb)(const void *data, size_t size, void *cb_priv);
+ void *cb_priv;
+
+ /* DMA */
+ u8 *rx_buf;
+ unsigned int bufi; /* Buffer current position */
+ unsigned int buf_sz; /* Buffer size */
+ struct dma_chan *dma_chan;
+ dma_addr_t dma_buf;
+};
+
+struct stm32_dfsdm_str2field {
+ const char *name;
+ unsigned int val;
+};
+
+/* DFSDM channel serial interface type */
+static const struct stm32_dfsdm_str2field stm32_dfsdm_chan_type[] = {
+ { "SPI_R", 0 }, /* SPI with data on rising edge */
+ { "SPI_F", 1 }, /* SPI with data on falling edge */
+ { "MANCH_R", 2 }, /* Manchester codec, rising edge = logic 0 */
+ { "MANCH_F", 3 }, /* Manchester codec, falling edge = logic 1 */
+ {},
+};
+
+/* DFSDM channel clock source */
+static const struct stm32_dfsdm_str2field stm32_dfsdm_chan_src[] = {
+ /* External SPI clock (CLKIN x) */
+ { "CLKIN", DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL },
+ /* Internal SPI clock (CLKOUT) */
+ { "CLKOUT", DFSDM_CHANNEL_SPI_CLOCK_INTERNAL },
+ /* Internal SPI clock divided by 2 (falling edge) */
+ { "CLKOUT_F", DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_FALLING },
+ /* Internal SPI clock divided by 2 (falling edge) */
+ { "CLKOUT_R", DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_RISING },
+ {},
+};
+
+static int stm32_dfsdm_str2val(const char *str,
+ const struct stm32_dfsdm_str2field *list)
+{
+ const struct stm32_dfsdm_str2field *p = list;
+
+ for (p = list; p && p->name; p++)
+ if (!strcmp(p->name, str))
+ return p->val;
+
+ return -EINVAL;
+}
+
+static int stm32_dfsdm_set_osrs(struct stm32_dfsdm_filter *fl,
+ unsigned int fast, unsigned int oversamp)
+{
+ unsigned int i, d, fosr, iosr;
+ u64 res;
+ s64 delta;
+ unsigned int m = 1; /* multiplication factor */
+ unsigned int p = fl->ford; /* filter order (ford) */
+
+ pr_debug("%s: Requested oversampling: %d\n", __func__, oversamp);
+ /*
+ * This function tries to compute filter oversampling and integrator
+ * oversampling, base on oversampling ratio requested by user.
+ *
+ * Decimation d depends on the filter order and the oversampling ratios.
+ * ford: filter order
+ * fosr: filter over sampling ratio
+ * iosr: integrator over sampling ratio
+ */
+ if (fl->ford == DFSDM_FASTSINC_ORDER) {
+ m = 2;
+ p = 2;
+ }
+
+ /*
+ * Look for filter and integrator oversampling ratios which allows
+ * to reach 24 bits data output resolution.
+ * Leave as soon as if exact resolution if reached.
+ * Otherwise the higher resolution below 32 bits is kept.
+ */
+ for (fosr = 1; fosr <= DFSDM_MAX_FL_OVERSAMPLING; fosr++) {
+ for (iosr = 1; iosr <= DFSDM_MAX_INT_OVERSAMPLING; iosr++) {
+ if (fast)
+ d = fosr * iosr;
+ else if (fl->ford == DFSDM_FASTSINC_ORDER)
+ d = fosr * (iosr + 3) + 2;
+ else
+ d = fosr * (iosr - 1 + p) + p;
+
+ if (d > oversamp)
+ break;
+ else if (d != oversamp)
+ continue;
+ /*
+ * Check resolution (limited to signed 32 bits)
+ * res <= 2^31
+ * Sincx filters:
+ * res = m * fosr^p x iosr (with m=1, p=ford)
+ * FastSinc filter
+ * res = m * fosr^p x iosr (with m=2, p=2)
+ */
+ res = fosr;
+ for (i = p - 1; i > 0; i--) {
+ res = res * (u64)fosr;
+ if (res > DFSDM_MAX_RES)
+ break;
+ }
+ if (res > DFSDM_MAX_RES)
+ continue;
+ res = res * (u64)m * (u64)iosr;
+ if (res > DFSDM_MAX_RES)
+ continue;
+
+ delta = res - DFSDM_DATA_RES;
+
+ if (res >= fl->res) {
+ fl->res = res;
+ fl->fosr = fosr;
+ fl->iosr = iosr;
+ fl->fast = fast;
+ pr_debug("%s: fosr = %d, iosr = %d\n",
+ __func__, fl->fosr, fl->iosr);
+ }
+
+ if (!delta)
+ return 0;
+ }
+ }
+
+ if (!fl->fosr)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int stm32_dfsdm_start_channel(struct stm32_dfsdm *dfsdm,
+ unsigned int ch_id)
+{
+ return regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(ch_id),
+ DFSDM_CHCFGR1_CHEN_MASK,
+ DFSDM_CHCFGR1_CHEN(1));
+}
+
+static void stm32_dfsdm_stop_channel(struct stm32_dfsdm *dfsdm,
+ unsigned int ch_id)
+{
+ regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(ch_id),
+ DFSDM_CHCFGR1_CHEN_MASK, DFSDM_CHCFGR1_CHEN(0));
+}
+
+static int stm32_dfsdm_chan_configure(struct stm32_dfsdm *dfsdm,
+ struct stm32_dfsdm_channel *ch)
+{
+ unsigned int id = ch->id;
+ struct regmap *regmap = dfsdm->regmap;
+ int ret;
+
+ ret = regmap_update_bits(regmap, DFSDM_CHCFGR1(id),
+ DFSDM_CHCFGR1_SITP_MASK,
+ DFSDM_CHCFGR1_SITP(ch->type));
+ if (ret < 0)
+ return ret;
+ ret = regmap_update_bits(regmap, DFSDM_CHCFGR1(id),
+ DFSDM_CHCFGR1_SPICKSEL_MASK,
+ DFSDM_CHCFGR1_SPICKSEL(ch->src));
+ if (ret < 0)
+ return ret;
+ return regmap_update_bits(regmap, DFSDM_CHCFGR1(id),
+ DFSDM_CHCFGR1_CHINSEL_MASK,
+ DFSDM_CHCFGR1_CHINSEL(ch->alt_si));
+}
+
+static int stm32_dfsdm_start_filter(struct stm32_dfsdm *dfsdm,
+ unsigned int fl_id)
+{
+ int ret;
+
+ /* Enable filter */
+ ret = regmap_update_bits(dfsdm->regmap, DFSDM_CR1(fl_id),
+ DFSDM_CR1_DFEN_MASK, DFSDM_CR1_DFEN(1));
+ if (ret < 0)
+ return ret;
+
+ /* Start conversion */
+ return regmap_update_bits(dfsdm->regmap, DFSDM_CR1(fl_id),
+ DFSDM_CR1_RSWSTART_MASK,
+ DFSDM_CR1_RSWSTART(1));
+}
+
+static void stm32_dfsdm_stop_filter(struct stm32_dfsdm *dfsdm, unsigned int fl_id)
+{
+ /* Disable conversion */
+ regmap_update_bits(dfsdm->regmap, DFSDM_CR1(fl_id),
+ DFSDM_CR1_DFEN_MASK, DFSDM_CR1_DFEN(0));
+}
+
+static int stm32_dfsdm_filter_configure(struct stm32_dfsdm *dfsdm,
+ unsigned int fl_id, unsigned int ch_id)
+{
+ struct regmap *regmap = dfsdm->regmap;
+ struct stm32_dfsdm_filter *fl = &dfsdm->fl_list[fl_id];
+ int ret;
+
+ /* Average integrator oversampling */
+ ret = regmap_update_bits(regmap, DFSDM_FCR(fl_id), DFSDM_FCR_IOSR_MASK,
+ DFSDM_FCR_IOSR(fl->iosr - 1));
+ if (ret)
+ return ret;
+
+ /* Filter order and Oversampling */
+ ret = regmap_update_bits(regmap, DFSDM_FCR(fl_id), DFSDM_FCR_FOSR_MASK,
+ DFSDM_FCR_FOSR(fl->fosr - 1));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(regmap, DFSDM_FCR(fl_id), DFSDM_FCR_FORD_MASK,
+ DFSDM_FCR_FORD(fl->ford));
+ if (ret)
+ return ret;
+
+ /* No scan mode supported for the moment */
+ ret = regmap_update_bits(regmap, DFSDM_CR1(fl_id), DFSDM_CR1_RCH_MASK,
+ DFSDM_CR1_RCH(ch_id));
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(regmap, DFSDM_CR1(fl_id),
+ DFSDM_CR1_RSYNC_MASK,
+ DFSDM_CR1_RSYNC(fl->sync_mode));
+}
+
+static int stm32_dfsdm_channel_parse_of(struct stm32_dfsdm *dfsdm,
+ struct iio_dev *indio_dev,
+ struct iio_chan_spec *ch)
+{
+ struct stm32_dfsdm_channel *df_ch;
+ const char *of_str;
+ int chan_idx = ch->scan_index;
+ int ret, val;
+
+ ret = of_property_read_u32_index(indio_dev->dev.of_node,
+ "st,adc-channels", chan_idx,
+ &ch->channel);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ " Error parsing 'st,adc-channels' for idx %d\n",
+ chan_idx);
+ return ret;
+ }
+ if (ch->channel >= dfsdm->num_chs) {
+ dev_err(&indio_dev->dev,
+ " Error bad channel number %d (max = %d)\n",
+ ch->channel, dfsdm->num_chs);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_string_index(indio_dev->dev.of_node,
+ "st,adc-channel-names", chan_idx,
+ &ch->datasheet_name);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ " Error parsing 'st,adc-channel-names' for idx %d\n",
+ chan_idx);
+ return ret;
+ }
+
+ df_ch = &dfsdm->ch_list[ch->channel];
+ df_ch->id = ch->channel;
+
+ ret = of_property_read_string_index(indio_dev->dev.of_node,
+ "st,adc-channel-types", chan_idx,
+ &of_str);
+ if (!ret) {
+ val = stm32_dfsdm_str2val(of_str, stm32_dfsdm_chan_type);
+ if (val < 0)
+ return val;
+ } else {
+ val = 0;
+ }
+ df_ch->type = val;
+
+ ret = of_property_read_string_index(indio_dev->dev.of_node,
+ "st,adc-channel-clk-src", chan_idx,
+ &of_str);
+ if (!ret) {
+ val = stm32_dfsdm_str2val(of_str, stm32_dfsdm_chan_src);
+ if (val < 0)
+ return val;
+ } else {
+ val = 0;
+ }
+ df_ch->src = val;
+
+ ret = of_property_read_u32_index(indio_dev->dev.of_node,
+ "st,adc-alt-channel", chan_idx,
+ &df_ch->alt_si);
+ if (ret < 0)
+ df_ch->alt_si = 0;
+
+ return 0;
+}
+
+static ssize_t dfsdm_adc_audio_get_spiclk(struct iio_dev *indio_dev,
+ uintptr_t priv,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", adc->spi_freq);
+}
+
+static ssize_t dfsdm_adc_audio_set_spiclk(struct iio_dev *indio_dev,
+ uintptr_t priv,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
+ struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[adc->ch_id];
+ unsigned int sample_freq = adc->sample_freq;
+ unsigned int spi_freq;
+ int ret;
+
+ dev_err(&indio_dev->dev, "enter %s\n", __func__);
+ /* If DFSDM is master on SPI, SPI freq can not be updated */
+ if (ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL)
+ return -EPERM;
+
+ ret = kstrtoint(buf, 0, &spi_freq);
+ if (ret)
+ return ret;
+
+ if (!spi_freq)
+ return -EINVAL;
+
+ if (sample_freq) {
+ if (spi_freq % sample_freq)
+ dev_warn(&indio_dev->dev,
+ "Sampling rate not accurate (%d)\n",
+ spi_freq / (spi_freq / sample_freq));
+
+ ret = stm32_dfsdm_set_osrs(fl, 0, (spi_freq / sample_freq));
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "No filter parameters that match!\n");
+ return ret;
+ }
+ }
+ adc->spi_freq = spi_freq;
+
+ return len;
+}
+
+static int stm32_dfsdm_start_conv(struct stm32_dfsdm_adc *adc, bool dma)
+{
+ struct regmap *regmap = adc->dfsdm->regmap;
+ int ret;
+ unsigned int dma_en = 0, cont_en = 0;
+
+ ret = stm32_dfsdm_start_channel(adc->dfsdm, adc->ch_id);
+ if (ret < 0)
+ return ret;
+
+ ret = stm32_dfsdm_filter_configure(adc->dfsdm, adc->fl_id,
+ adc->ch_id);
+ if (ret < 0)
+ goto stop_channels;
+
+ if (dma) {
+ /* Enable DMA transfer*/
+ dma_en = DFSDM_CR1_RDMAEN(1);
+ /* Enable conversion triggered by SPI clock*/
+ cont_en = DFSDM_CR1_RCONT(1);
+ }
+ /* Enable DMA transfer*/
+ ret = regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RDMAEN_MASK, dma_en);
+ if (ret < 0)
+ goto stop_channels;
+
+ /* Enable conversion triggered by SPI clock*/
+ ret = regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RCONT_MASK, cont_en);
+ if (ret < 0)
+ goto stop_channels;
+
+ ret = stm32_dfsdm_start_filter(adc->dfsdm, adc->fl_id);
+ if (ret < 0)
+ goto stop_channels;
+
+ return 0;
+
+stop_channels:
+ regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RDMAEN_MASK, 0);
+
+ regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RCONT_MASK, 0);
+ stm32_dfsdm_stop_channel(adc->dfsdm, adc->fl_id);
+
+ return ret;
+}
+
+static void stm32_dfsdm_stop_conv(struct stm32_dfsdm_adc *adc)
+{
+ struct regmap *regmap = adc->dfsdm->regmap;
+
+ stm32_dfsdm_stop_filter(adc->dfsdm, adc->fl_id);
+
+ /* Clean conversion options */
+ regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RDMAEN_MASK, 0);
+
+ regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
+ DFSDM_CR1_RCONT_MASK, 0);
+
+ stm32_dfsdm_stop_channel(adc->dfsdm, adc->ch_id);
+}
+
+static int stm32_dfsdm_set_watermark(struct iio_dev *indio_dev,
+ unsigned int val)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ unsigned int watermark = DFSDM_DMA_BUFFER_SIZE / 2;
+
+ /*
+ * DMA cyclic transfers are used, buffer is split into two periods.
+ * There should be :
+ * - always one buffer (period) DMA is working on
+ * - one buffer (period) driver pushed to ASoC side.
+ */
+ watermark = min(watermark, val * (unsigned int)(sizeof(u32)));
+ adc->buf_sz = watermark * 2;
+
+ return 0;
+}
+
+static unsigned int stm32_dfsdm_adc_dma_residue(struct stm32_dfsdm_adc *adc)
+{
+ struct dma_tx_state state;
+ enum dma_status status;
+
+ status = dmaengine_tx_status(adc->dma_chan,
+ adc->dma_chan->cookie,
+ &state);
+ if (status == DMA_IN_PROGRESS) {
+ /* Residue is size in bytes from end of buffer */
+ unsigned int i = adc->buf_sz - state.residue;
+ unsigned int size;
+
+ /* Return available bytes */
+ if (i >= adc->bufi)
+ size = i - adc->bufi;
+ else
+ size = adc->buf_sz + i - adc->bufi;
+
+ return size;
+ }
+
+ return 0;
+}
+
+static void stm32_dfsdm_audio_dma_buffer_done(void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int available = stm32_dfsdm_adc_dma_residue(adc);
+ size_t old_pos;
+
+ /*
+ * FIXME: In Kernel interface does not support cyclic DMA buffer,and
+ * offers only an interface to push data samples per samples.
+ * For this reason IIO buffer interface is not used and interface is
+ * bypassed using a private callback registered by ASoC.
+ * This should be a temporary solution waiting a cyclic DMA engine
+ * support in IIO.
+ */
+
+ dev_dbg(&indio_dev->dev, "%s: pos = %d, available = %d\n", __func__,
+ adc->bufi, available);
+ old_pos = adc->bufi;
+
+ while (available >= indio_dev->scan_bytes) {
+ u32 *buffer = (u32 *)&adc->rx_buf[adc->bufi];
+
+ /* Mask 8 LSB that contains the channel ID */
+ *buffer = (*buffer & 0xFFFFFF00) << 8;
+ available -= indio_dev->scan_bytes;
+ adc->bufi += indio_dev->scan_bytes;
+ if (adc->bufi >= adc->buf_sz) {
+ if (adc->cb)
+ adc->cb(&adc->rx_buf[old_pos],
+ adc->buf_sz - old_pos, adc->cb_priv);
+ adc->bufi = 0;
+ old_pos = 0;
+ }
+ }
+ if (adc->cb)
+ adc->cb(&adc->rx_buf[old_pos], adc->bufi - old_pos,
+ adc->cb_priv);
+}
+
+static int stm32_dfsdm_adc_dma_start(struct iio_dev *indio_dev)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t cookie;
+ int ret;
+
+ if (!adc->dma_chan)
+ return -EINVAL;
+
+ dev_dbg(&indio_dev->dev, "%s size=%d watermark=%d\n", __func__,
+ adc->buf_sz, adc->buf_sz / 2);
+
+ /* Prepare a DMA cyclic transaction */
+ desc = dmaengine_prep_dma_cyclic(adc->dma_chan,
+ adc->dma_buf,
+ adc->buf_sz, adc->buf_sz / 2,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!desc)
+ return -EBUSY;
+
+ desc->callback = stm32_dfsdm_audio_dma_buffer_done;
+ desc->callback_param = indio_dev;
+
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dmaengine_terminate_all(adc->dma_chan);
+ return ret;
+ }
+
+ /* Issue pending DMA requests */
+ dma_async_issue_pending(adc->dma_chan);
+
+ return 0;
+}
+
+static int stm32_dfsdm_postenable(struct iio_dev *indio_dev)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int ret;
+
+ /* Reset adc buffer index */
+ adc->bufi = 0;
+
+ ret = stm32_dfsdm_start_dfsdm(adc->dfsdm);
+ if (ret < 0)
+ return ret;
+
+ ret = stm32_dfsdm_start_conv(adc, true);
+ if (ret) {
+ dev_err(&indio_dev->dev, "Can't start conversion\n");
+ goto stop_dfsdm;
+ }
+
+ if (adc->dma_chan) {
+ ret = stm32_dfsdm_adc_dma_start(indio_dev);
+ if (ret) {
+ dev_err(&indio_dev->dev, "Can't start DMA\n");
+ goto err_stop_conv;
+ }
+ }
+
+ return 0;
+
+err_stop_conv:
+ stm32_dfsdm_stop_conv(adc);
+stop_dfsdm:
+ stm32_dfsdm_stop_dfsdm(adc->dfsdm);
+
+ return ret;
+}
+
+static int stm32_dfsdm_predisable(struct iio_dev *indio_dev)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+
+ if (adc->dma_chan)
+ dmaengine_terminate_all(adc->dma_chan);
+
+ stm32_dfsdm_stop_conv(adc);
+
+ stm32_dfsdm_stop_dfsdm(adc->dfsdm);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops stm32_dfsdm_buffer_setup_ops = {
+ .postenable = &stm32_dfsdm_postenable,
+ .predisable = &stm32_dfsdm_predisable,
+};
+
+/**
+ * stm32_dfsdm_get_buff_cb() - register a callback that will be called when
+ * DMA transfer period is achieved.
+ *
+ * @iio_dev: Handle to IIO device.
+ * @cb: Pointer to callback function:
+ * - data: pointer to data buffer
+ * - size: size in byte of the data buffer
+ * - private: pointer to consumer private structure.
+ * @private: Pointer to consumer private structure.
+ */
+int stm32_dfsdm_get_buff_cb(struct iio_dev *iio_dev,
+ int (*cb)(const void *data, size_t size,
+ void *private),
+ void *private)
+{
+ struct stm32_dfsdm_adc *adc;
+
+ if (!iio_dev)
+ return -EINVAL;
+ adc = iio_priv(iio_dev);
+
+ adc->cb = cb;
+ adc->cb_priv = private;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stm32_dfsdm_get_buff_cb);
+
+/**
+ * stm32_dfsdm_release_buff_cb - unregister buffer callback
+ *
+ * @iio_dev: Handle to IIO device.
+ */
+int stm32_dfsdm_release_buff_cb(struct iio_dev *iio_dev)
+{
+ struct stm32_dfsdm_adc *adc;
+
+ if (!iio_dev)
+ return -EINVAL;
+ adc = iio_priv(iio_dev);
+
+ adc->cb = NULL;
+ adc->cb_priv = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stm32_dfsdm_release_buff_cb);
+
+static int stm32_dfsdm_single_conv(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *res)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ long timeout;
+ int ret;
+
+ reinit_completion(&adc->completion);
+
+ adc->buffer = res;
+
+ ret = stm32_dfsdm_start_dfsdm(adc->dfsdm);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(adc->dfsdm->regmap, DFSDM_CR2(adc->fl_id),
+ DFSDM_CR2_REOCIE_MASK, DFSDM_CR2_REOCIE(1));
+ if (ret < 0)
+ goto stop_dfsdm;
+
+ ret = stm32_dfsdm_start_conv(adc, false);
+ if (ret < 0) {
+ regmap_update_bits(adc->dfsdm->regmap, DFSDM_CR2(adc->fl_id),
+ DFSDM_CR2_REOCIE_MASK, DFSDM_CR2_REOCIE(0));
+ goto stop_dfsdm;
+ }
+
+ timeout = wait_for_completion_interruptible_timeout(&adc->completion,
+ DFSDM_TIMEOUT);
+
+ /* Mask IRQ for regular conversion achievement*/
+ regmap_update_bits(adc->dfsdm->regmap, DFSDM_CR2(adc->fl_id),
+ DFSDM_CR2_REOCIE_MASK, DFSDM_CR2_REOCIE(0));
+
+ if (timeout == 0)
+ ret = -ETIMEDOUT;
+ else if (timeout < 0)
+ ret = timeout;
+ else
+ ret = IIO_VAL_INT;
+
+ stm32_dfsdm_stop_conv(adc);
+
+stop_dfsdm:
+ stm32_dfsdm_stop_dfsdm(adc->dfsdm);
+
+ return ret;
+}
+
+static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
+ struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[adc->ch_id];
+ unsigned int spi_freq = adc->spi_freq;
+ int ret = -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ ret = stm32_dfsdm_set_osrs(fl, 0, val);
+ if (!ret)
+ adc->oversamp = val;
+
+ return ret;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (!val)
+ return -EINVAL;
+ if (ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL)
+ spi_freq = adc->dfsdm->spi_master_freq;
+
+ if (spi_freq % val)
+ dev_warn(&indio_dev->dev,
+ "Sampling rate not accurate (%d)\n",
+ spi_freq / (spi_freq / val));
+
+ ret = stm32_dfsdm_set_osrs(fl, 0, (spi_freq / val));
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "Not able to find parameter that match!\n");
+ return ret;
+ }
+ adc->sample_freq = val;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int stm32_dfsdm_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_hw_consumer_enable(adc->hwc);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "%s: IIO enable failed (channel %d)\n",
+ __func__, chan->channel);
+ return ret;
+ }
+ ret = stm32_dfsdm_single_conv(indio_dev, chan, val);
+ iio_hw_consumer_disable(adc->hwc);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "%s: Conversion failed (channel %d)\n",
+ __func__, chan->channel);
+ return ret;
+ }
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *val = adc->oversamp;
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = adc->sample_freq;
+
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info stm32_dfsdm_info_audio = {
+ .hwfifo_set_watermark = stm32_dfsdm_set_watermark,
+ .read_raw = stm32_dfsdm_read_raw,
+ .write_raw = stm32_dfsdm_write_raw,
+};
+
+static const struct iio_info stm32_dfsdm_info_adc = {
+ .read_raw = stm32_dfsdm_read_raw,
+ .write_raw = stm32_dfsdm_write_raw,
+};
+
+static irqreturn_t stm32_dfsdm_irq(int irq, void *arg)
+{
+ struct stm32_dfsdm_adc *adc = arg;
+ struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+ struct regmap *regmap = adc->dfsdm->regmap;
+ unsigned int status, int_en;
+
+ regmap_read(regmap, DFSDM_ISR(adc->fl_id), &status);
+ regmap_read(regmap, DFSDM_CR2(adc->fl_id), &int_en);
+
+ if (status & DFSDM_ISR_REOCF_MASK) {
+ /* Read the data register clean the IRQ status */
+ regmap_read(regmap, DFSDM_RDATAR(adc->fl_id), adc->buffer);
+ complete(&adc->completion);
+ }
+
+ if (status & DFSDM_ISR_ROVRF_MASK) {
+ if (int_en & DFSDM_CR2_ROVRIE_MASK)
+ dev_warn(&indio_dev->dev, "Overrun detected\n");
+ regmap_update_bits(regmap, DFSDM_ICR(adc->fl_id),
+ DFSDM_ICR_CLRROVRF_MASK,
+ DFSDM_ICR_CLRROVRF_MASK);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Define external info for SPI Frequency and audio sampling rate that can be
+ * configured by ASoC driver through consumer.h API
+ */
+static const struct iio_chan_spec_ext_info dfsdm_adc_audio_ext_info[] = {
+ /* spi_clk_freq : clock freq on SPI/manchester bus used by channel */
+ {
+ .name = "spi_clk_freq",
+ .shared = IIO_SHARED_BY_TYPE,
+ .read = dfsdm_adc_audio_get_spiclk,
+ .write = dfsdm_adc_audio_set_spiclk,
+ },
+ {},
+};
+
+static void stm32_dfsdm_dma_release(struct iio_dev *indio_dev)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+
+ if (adc->dma_chan) {
+ dma_free_coherent(adc->dma_chan->device->dev,
+ DFSDM_DMA_BUFFER_SIZE,
+ adc->rx_buf, adc->dma_buf);
+ dma_release_channel(adc->dma_chan);
+ }
+}
+
+static int stm32_dfsdm_dma_request(struct iio_dev *indio_dev)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ struct dma_slave_config config = {
+ .src_addr = (dma_addr_t)adc->dfsdm->phys_base +
+ DFSDM_RDATAR(adc->fl_id),
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ };
+ int ret;
+
+ adc->dma_chan = dma_request_slave_channel(&indio_dev->dev, "rx");
+ if (!adc->dma_chan)
+ return -EINVAL;
+
+ adc->rx_buf = dma_alloc_coherent(adc->dma_chan->device->dev,
+ DFSDM_DMA_BUFFER_SIZE,
+ &adc->dma_buf, GFP_KERNEL);
+ if (!adc->rx_buf) {
+ ret = -ENOMEM;
+ goto err_release;
+ }
+
+ ret = dmaengine_slave_config(adc->dma_chan, &config);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ dma_free_coherent(adc->dma_chan->device->dev, DFSDM_DMA_BUFFER_SIZE,
+ adc->rx_buf, adc->dma_buf);
+err_release:
+ dma_release_channel(adc->dma_chan);
+
+ return ret;
+}
+
+static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev,
+ struct iio_chan_spec *ch)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int ret;
+
+ ret = stm32_dfsdm_channel_parse_of(adc->dfsdm, indio_dev, ch);
+ if (ret < 0)
+ return ret;
+
+ ch->type = IIO_VOLTAGE;
+ ch->indexed = 1;
+
+ /*
+ * IIO_CHAN_INFO_RAW: used to compute regular conversion
+ * IIO_CHAN_INFO_OVERSAMPLING_RATIO: used to set oversampling
+ */
+ ch->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
+ ch->info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO);
+
+ if (adc->dev_data->type == DFSDM_AUDIO) {
+ ch->scan_type.sign = 's';
+ ch->ext_info = dfsdm_adc_audio_ext_info;
+ } else {
+ ch->scan_type.sign = 'u';
+ }
+ ch->scan_type.realbits = 24;
+ ch->scan_type.storagebits = 32;
+ adc->ch_id = ch->channel;
+
+ return stm32_dfsdm_chan_configure(adc->dfsdm,
+ &adc->dfsdm->ch_list[ch->channel]);
+}
+
+static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev)
+{
+ struct iio_chan_spec *ch;
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ struct stm32_dfsdm_channel *d_ch;
+ int ret;
+
+ indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
+ indio_dev->setup_ops = &stm32_dfsdm_buffer_setup_ops;
+
+ ch = devm_kzalloc(&indio_dev->dev, sizeof(*ch), GFP_KERNEL);
+ if (!ch)
+ return -ENOMEM;
+
+ ch->scan_index = 0;
+
+ ret = stm32_dfsdm_adc_chan_init_one(indio_dev, ch);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Channels init failed\n");
+ return ret;
+ }
+ ch->info_mask_separate = BIT(IIO_CHAN_INFO_SAMP_FREQ);
+
+ d_ch = &adc->dfsdm->ch_list[adc->ch_id];
+ if (d_ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL)
+ adc->spi_freq = adc->dfsdm->spi_master_freq;
+
+ indio_dev->num_channels = 1;
+ indio_dev->channels = ch;
+
+ return stm32_dfsdm_dma_request(indio_dev);
+}
+
+static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)
+{
+ struct iio_chan_spec *ch;
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int num_ch;
+ int ret, chan_idx;
+
+ adc->oversamp = DFSDM_DEFAULT_OVERSAMPLING;
+ ret = stm32_dfsdm_set_osrs(&adc->dfsdm->fl_list[adc->fl_id], 0,
+ adc->oversamp);
+ if (ret < 0)
+ return ret;
+
+ num_ch = of_property_count_u32_elems(indio_dev->dev.of_node,
+ "st,adc-channels");
+ if (num_ch < 0 || num_ch > adc->dfsdm->num_chs) {
+ dev_err(&indio_dev->dev, "Bad st,adc-channels\n");
+ return num_ch < 0 ? num_ch : -EINVAL;
+ }
+
+ /* Bind to SD modulator IIO device */
+ adc->hwc = devm_iio_hw_consumer_alloc(&indio_dev->dev);
+ if (IS_ERR(adc->hwc))
+ return -EPROBE_DEFER;
+
+ ch = devm_kcalloc(&indio_dev->dev, num_ch, sizeof(*ch),
+ GFP_KERNEL);
+ if (!ch)
+ return -ENOMEM;
+
+ for (chan_idx = 0; chan_idx < num_ch; chan_idx++) {
+ ch->scan_index = chan_idx;
+ ret = stm32_dfsdm_adc_chan_init_one(indio_dev, ch);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Channels init failed\n");
+ return ret;
+ }
+ }
+
+ indio_dev->num_channels = num_ch;
+ indio_dev->channels = ch;
+
+ init_completion(&adc->completion);
+
+ return 0;
+}
+
+static const struct stm32_dfsdm_dev_data stm32h7_dfsdm_adc_data = {
+ .type = DFSDM_IIO,
+ .init = stm32_dfsdm_adc_init,
+};
+
+static const struct stm32_dfsdm_dev_data stm32h7_dfsdm_audio_data = {
+ .type = DFSDM_AUDIO,
+ .init = stm32_dfsdm_audio_init,
+};
+
+static const struct of_device_id stm32_dfsdm_adc_match[] = {
+ {
+ .compatible = "st,stm32-dfsdm-adc",
+ .data = &stm32h7_dfsdm_adc_data,
+ },
+ {
+ .compatible = "st,stm32-dfsdm-dmic",
+ .data = &stm32h7_dfsdm_audio_data,
+ },
+ {}
+};
+
+static int stm32_dfsdm_adc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct stm32_dfsdm_adc *adc;
+ struct device_node *np = dev->of_node;
+ const struct stm32_dfsdm_dev_data *dev_data;
+ struct iio_dev *iio;
+ char *name;
+ int ret, irq, val;
+
+
+ dev_data = of_device_get_match_data(dev);
+ iio = devm_iio_device_alloc(dev, sizeof(*adc));
+ if (!iio) {
+ dev_err(dev, "%s: Failed to allocate IIO\n", __func__);
+ return -ENOMEM;
+ }
+
+ adc = iio_priv(iio);
+ adc->dfsdm = dev_get_drvdata(dev->parent);
+
+ iio->dev.parent = dev;
+ iio->dev.of_node = np;
+ iio->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
+
+ platform_set_drvdata(pdev, adc);
+
+ ret = of_property_read_u32(dev->of_node, "reg", &adc->fl_id);
+ if (ret != 0) {
+ dev_err(dev, "Missing reg property\n");
+ return -EINVAL;
+ }
+
+ name = devm_kzalloc(dev, sizeof("dfsdm-adc0"), GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+ if (dev_data->type == DFSDM_AUDIO) {
+ iio->info = &stm32_dfsdm_info_audio;
+ snprintf(name, sizeof("dfsdm-pdm0"), "dfsdm-pdm%d", adc->fl_id);
+ } else {
+ iio->info = &stm32_dfsdm_info_adc;
+ snprintf(name, sizeof("dfsdm-adc0"), "dfsdm-adc%d", adc->fl_id);
+ }
+ iio->name = name;
+
+ /*
+ * In a first step IRQs generated for channels are not treated.
+ * So IRQ associated to filter instance 0 is dedicated to the Filter 0.
+ */
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(dev, irq, stm32_dfsdm_irq,
+ 0, pdev->name, adc);
+ if (ret < 0) {
+ dev_err(dev, "Failed to request IRQ\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "st,filter-order", &val);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set filter order\n");
+ return ret;
+ }
+
+ adc->dfsdm->fl_list[adc->fl_id].ford = val;
+
+ ret = of_property_read_u32(dev->of_node, "st,filter0-sync", &val);
+ if (!ret)
+ adc->dfsdm->fl_list[adc->fl_id].sync_mode = val;
+
+ adc->dev_data = dev_data;
+ ret = dev_data->init(iio);
+ if (ret < 0)
+ return ret;
+
+ ret = iio_device_register(iio);
+ if (ret < 0)
+ goto err_cleanup;
+
+ dev_err(dev, "of_platform_populate\n");
+ if (dev_data->type == DFSDM_AUDIO) {
+ ret = of_platform_populate(np, NULL, NULL, dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to find an audio DAI\n");
+ goto err_unregister;
+ }
+ }
+
+ return 0;
+
+err_unregister:
+ iio_device_unregister(iio);
+err_cleanup:
+ stm32_dfsdm_dma_release(iio);
+
+ return ret;
+}
+
+static int stm32_dfsdm_adc_remove(struct platform_device *pdev)
+{
+ struct stm32_dfsdm_adc *adc = platform_get_drvdata(pdev);
+ struct iio_dev *indio_dev = iio_priv_to_dev(adc);
+
+ if (adc->dev_data->type == DFSDM_AUDIO)
+ of_platform_depopulate(&pdev->dev);
+ iio_device_unregister(indio_dev);
+ stm32_dfsdm_dma_release(indio_dev);
+
+ return 0;
+}
+
+static struct platform_driver stm32_dfsdm_adc_driver = {
+ .driver = {
+ .name = "stm32-dfsdm-adc",
+ .of_match_table = stm32_dfsdm_adc_match,
+ },
+ .probe = stm32_dfsdm_adc_probe,
+ .remove = stm32_dfsdm_adc_remove,
+};
+module_platform_driver(stm32_dfsdm_adc_driver);
+
+MODULE_DESCRIPTION("STM32 sigma delta ADC");
+MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/stm32-dfsdm-core.c b/drivers/iio/adc/stm32-dfsdm-core.c
new file mode 100644
index 000000000000..6290332cfd3f
--- /dev/null
+++ b/drivers/iio/adc/stm32-dfsdm-core.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is part the core part STM32 DFSDM driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Arnaud Pouliquen <arnaud.pouliquen@st.com> for STMicroelectronics.
+ */
+
+#include <linux/clk.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "stm32-dfsdm.h"
+
+struct stm32_dfsdm_dev_data {
+ unsigned int num_filters;
+ unsigned int num_channels;
+ const struct regmap_config *regmap_cfg;
+};
+
+#define STM32H7_DFSDM_NUM_FILTERS 4
+#define STM32H7_DFSDM_NUM_CHANNELS 8
+
+static bool stm32_dfsdm_volatile_reg(struct device *dev, unsigned int reg)
+{
+ if (reg < DFSDM_FILTER_BASE_ADR)
+ return false;
+
+ /*
+ * Mask is done on register to avoid to list registers of all
+ * filter instances.
+ */
+ switch (reg & DFSDM_FILTER_REG_MASK) {
+ case DFSDM_CR1(0) & DFSDM_FILTER_REG_MASK:
+ case DFSDM_ISR(0) & DFSDM_FILTER_REG_MASK:
+ case DFSDM_JDATAR(0) & DFSDM_FILTER_REG_MASK:
+ case DFSDM_RDATAR(0) & DFSDM_FILTER_REG_MASK:
+ return true;
+ }
+
+ return false;
+}
+
+static const struct regmap_config stm32h7_dfsdm_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = sizeof(u32),
+ .max_register = 0x2B8,
+ .volatile_reg = stm32_dfsdm_volatile_reg,
+ .fast_io = true,
+};
+
+static const struct stm32_dfsdm_dev_data stm32h7_dfsdm_data = {
+ .num_filters = STM32H7_DFSDM_NUM_FILTERS,
+ .num_channels = STM32H7_DFSDM_NUM_CHANNELS,
+ .regmap_cfg = &stm32h7_dfsdm_regmap_cfg,
+};
+
+struct dfsdm_priv {
+ struct platform_device *pdev; /* platform device */
+
+ struct stm32_dfsdm dfsdm; /* common data exported for all instances */
+
+ unsigned int spi_clk_out_div; /* SPI clkout divider value */
+ atomic_t n_active_ch; /* number of current active channels */
+
+ struct clk *clk; /* DFSDM clock */
+ struct clk *aclk; /* audio clock */
+};
+
+/**
+ * stm32_dfsdm_start_dfsdm - start global dfsdm interface.
+ *
+ * Enable interface if n_active_ch is not null.
+ * @dfsdm: Handle used to retrieve dfsdm context.
+ */
+int stm32_dfsdm_start_dfsdm(struct stm32_dfsdm *dfsdm)
+{
+ struct dfsdm_priv *priv = container_of(dfsdm, struct dfsdm_priv, dfsdm);
+ struct device *dev = &priv->pdev->dev;
+ unsigned int clk_div = priv->spi_clk_out_div;
+ int ret;
+
+ if (atomic_inc_return(&priv->n_active_ch) == 1) {
+ ret = clk_prepare_enable(priv->clk);
+ if (ret < 0) {
+ dev_err(dev, "Failed to start clock\n");
+ goto error_ret;
+ }
+ if (priv->aclk) {
+ ret = clk_prepare_enable(priv->aclk);
+ if (ret < 0) {
+ dev_err(dev, "Failed to start audio clock\n");
+ goto disable_clk;
+ }
+ }
+
+ /* Output the SPI CLKOUT (if clk_div == 0 clock if OFF) */
+ ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
+ DFSDM_CHCFGR1_CKOUTDIV_MASK,
+ DFSDM_CHCFGR1_CKOUTDIV(clk_div));
+ if (ret < 0)
+ goto disable_aclk;
+
+ /* Global enable of DFSDM interface */
+ ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
+ DFSDM_CHCFGR1_DFSDMEN_MASK,
+ DFSDM_CHCFGR1_DFSDMEN(1));
+ if (ret < 0)
+ goto disable_aclk;
+ }
+
+ dev_dbg(dev, "%s: n_active_ch %d\n", __func__,
+ atomic_read(&priv->n_active_ch));
+
+ return 0;
+
+disable_aclk:
+ clk_disable_unprepare(priv->aclk);
+disable_clk:
+ clk_disable_unprepare(priv->clk);
+
+error_ret:
+ atomic_dec(&priv->n_active_ch);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(stm32_dfsdm_start_dfsdm);
+
+/**
+ * stm32_dfsdm_stop_dfsdm - stop global DFSDM interface.
+ *
+ * Disable interface if n_active_ch is null
+ * @dfsdm: Handle used to retrieve dfsdm context.
+ */
+int stm32_dfsdm_stop_dfsdm(struct stm32_dfsdm *dfsdm)
+{
+ struct dfsdm_priv *priv = container_of(dfsdm, struct dfsdm_priv, dfsdm);
+ int ret;
+
+ if (atomic_dec_and_test(&priv->n_active_ch)) {
+ /* Global disable of DFSDM interface */
+ ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
+ DFSDM_CHCFGR1_DFSDMEN_MASK,
+ DFSDM_CHCFGR1_DFSDMEN(0));
+ if (ret < 0)
+ return ret;
+
+ /* Stop SPI CLKOUT */
+ ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
+ DFSDM_CHCFGR1_CKOUTDIV_MASK,
+ DFSDM_CHCFGR1_CKOUTDIV(0));
+ if (ret < 0)
+ return ret;
+
+ clk_disable_unprepare(priv->clk);
+ if (priv->aclk)
+ clk_disable_unprepare(priv->aclk);
+ }
+ dev_dbg(&priv->pdev->dev, "%s: n_active_ch %d\n", __func__,
+ atomic_read(&priv->n_active_ch));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stm32_dfsdm_stop_dfsdm);
+
+static int stm32_dfsdm_parse_of(struct platform_device *pdev,
+ struct dfsdm_priv *priv)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct resource *res;
+ unsigned long clk_freq;
+ unsigned int spi_freq, rem;
+ int ret;
+
+ if (!node)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get memory resource\n");
+ return -ENODEV;
+ }
+ priv->dfsdm.phys_base = res->start;
+ priv->dfsdm.base = devm_ioremap_resource(&pdev->dev, res);
+
+ /*
+ * "dfsdm" clock is mandatory for DFSDM peripheral clocking.
+ * "dfsdm" or "audio" clocks can be used as source clock for
+ * the SPI clock out signal and internal processing, depending
+ * on use case.
+ */
+ priv->clk = devm_clk_get(&pdev->dev, "dfsdm");
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "No stm32_dfsdm_clk clock found\n");
+ return -EINVAL;
+ }
+
+ priv->aclk = devm_clk_get(&pdev->dev, "audio");
+ if (IS_ERR(priv->aclk))
+ priv->aclk = NULL;
+
+ if (priv->aclk)
+ clk_freq = clk_get_rate(priv->aclk);
+ else
+ clk_freq = clk_get_rate(priv->clk);
+
+ /* SPI clock out frequency */
+ ret = of_property_read_u32(pdev->dev.of_node, "spi-max-frequency",
+ &spi_freq);
+ if (ret < 0) {
+ /* No SPI master mode */
+ return 0;
+ }
+
+ priv->spi_clk_out_div = div_u64_rem(clk_freq, spi_freq, &rem) - 1;
+ priv->dfsdm.spi_master_freq = spi_freq;
+
+ if (rem) {
+ dev_warn(&pdev->dev, "SPI clock not accurate\n");
+ dev_warn(&pdev->dev, "%ld = %d * %d + %d\n",
+ clk_freq, spi_freq, priv->spi_clk_out_div + 1, rem);
+ }
+
+ return 0;
+};
+
+static const struct of_device_id stm32_dfsdm_of_match[] = {
+ {
+ .compatible = "st,stm32h7-dfsdm",
+ .data = &stm32h7_dfsdm_data,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, stm32_dfsdm_of_match);
+
+static int stm32_dfsdm_probe(struct platform_device *pdev)
+{
+ struct dfsdm_priv *priv;
+ const struct stm32_dfsdm_dev_data *dev_data;
+ struct stm32_dfsdm *dfsdm;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->pdev = pdev;
+
+ dev_data = of_device_get_match_data(&pdev->dev);
+
+ dfsdm = &priv->dfsdm;
+ dfsdm->fl_list = devm_kcalloc(&pdev->dev, dev_data->num_filters,
+ sizeof(*dfsdm->fl_list), GFP_KERNEL);
+ if (!dfsdm->fl_list)
+ return -ENOMEM;
+
+ dfsdm->num_fls = dev_data->num_filters;
+ dfsdm->ch_list = devm_kcalloc(&pdev->dev, dev_data->num_channels,
+ sizeof(*dfsdm->ch_list),
+ GFP_KERNEL);
+ if (!dfsdm->ch_list)
+ return -ENOMEM;
+ dfsdm->num_chs = dev_data->num_channels;
+
+ ret = stm32_dfsdm_parse_of(pdev, priv);
+ if (ret < 0)
+ return ret;
+
+ dfsdm->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "dfsdm",
+ dfsdm->base,
+ &stm32h7_dfsdm_regmap_cfg);
+ if (IS_ERR(dfsdm->regmap)) {
+ ret = PTR_ERR(dfsdm->regmap);
+ dev_err(&pdev->dev, "%s: Failed to allocate regmap: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, dfsdm);
+
+ return devm_of_platform_populate(&pdev->dev);
+}
+
+static struct platform_driver stm32_dfsdm_driver = {
+ .probe = stm32_dfsdm_probe,
+ .driver = {
+ .name = "stm32-dfsdm",
+ .of_match_table = stm32_dfsdm_of_match,
+ },
+};
+
+module_platform_driver(stm32_dfsdm_driver);
+
+MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 dfsdm driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/stm32-dfsdm.h b/drivers/iio/adc/stm32-dfsdm.h
new file mode 100644
index 000000000000..8708394b0725
--- /dev/null
+++ b/drivers/iio/adc/stm32-dfsdm.h
@@ -0,0 +1,310 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file is part of STM32 DFSDM driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Arnaud Pouliquen <arnaud.pouliquen@st.com>.
+ */
+
+#ifndef MDF_STM32_DFSDM__H
+#define MDF_STM32_DFSDM__H
+
+#include <linux/bitfield.h>
+
+/*
+ * STM32 DFSDM - global register map
+ * ________________________________________________________
+ * | Offset | Registers block |
+ * --------------------------------------------------------
+ * | 0x000 | CHANNEL 0 + COMMON CHANNEL FIELDS |
+ * --------------------------------------------------------
+ * | 0x020 | CHANNEL 1 |
+ * --------------------------------------------------------
+ * | ... | ..... |
+ * --------------------------------------------------------
+ * | 0x0E0 | CHANNEL 7 |
+ * --------------------------------------------------------
+ * | 0x100 | FILTER 0 + COMMON FILTER FIELDs |
+ * --------------------------------------------------------
+ * | 0x200 | FILTER 1 |
+ * --------------------------------------------------------
+ * | 0x300 | FILTER 2 |
+ * --------------------------------------------------------
+ * | 0x400 | FILTER 3 |
+ * --------------------------------------------------------
+ */
+
+/*
+ * Channels register definitions
+ */
+#define DFSDM_CHCFGR1(y) ((y) * 0x20 + 0x00)
+#define DFSDM_CHCFGR2(y) ((y) * 0x20 + 0x04)
+#define DFSDM_AWSCDR(y) ((y) * 0x20 + 0x08)
+#define DFSDM_CHWDATR(y) ((y) * 0x20 + 0x0C)
+#define DFSDM_CHDATINR(y) ((y) * 0x20 + 0x10)
+
+/* CHCFGR1: Channel configuration register 1 */
+#define DFSDM_CHCFGR1_SITP_MASK GENMASK(1, 0)
+#define DFSDM_CHCFGR1_SITP(v) FIELD_PREP(DFSDM_CHCFGR1_SITP_MASK, v)
+#define DFSDM_CHCFGR1_SPICKSEL_MASK GENMASK(3, 2)
+#define DFSDM_CHCFGR1_SPICKSEL(v) FIELD_PREP(DFSDM_CHCFGR1_SPICKSEL_MASK, v)
+#define DFSDM_CHCFGR1_SCDEN_MASK BIT(5)
+#define DFSDM_CHCFGR1_SCDEN(v) FIELD_PREP(DFSDM_CHCFGR1_SCDEN_MASK, v)
+#define DFSDM_CHCFGR1_CKABEN_MASK BIT(6)
+#define DFSDM_CHCFGR1_CKABEN(v) FIELD_PREP(DFSDM_CHCFGR1_CKABEN_MASK, v)
+#define DFSDM_CHCFGR1_CHEN_MASK BIT(7)
+#define DFSDM_CHCFGR1_CHEN(v) FIELD_PREP(DFSDM_CHCFGR1_CHEN_MASK, v)
+#define DFSDM_CHCFGR1_CHINSEL_MASK BIT(8)
+#define DFSDM_CHCFGR1_CHINSEL(v) FIELD_PREP(DFSDM_CHCFGR1_CHINSEL_MASK, v)
+#define DFSDM_CHCFGR1_DATMPX_MASK GENMASK(13, 12)
+#define DFSDM_CHCFGR1_DATMPX(v) FIELD_PREP(DFSDM_CHCFGR1_DATMPX_MASK, v)
+#define DFSDM_CHCFGR1_DATPACK_MASK GENMASK(15, 14)
+#define DFSDM_CHCFGR1_DATPACK(v) FIELD_PREP(DFSDM_CHCFGR1_DATPACK_MASK, v)
+#define DFSDM_CHCFGR1_CKOUTDIV_MASK GENMASK(23, 16)
+#define DFSDM_CHCFGR1_CKOUTDIV(v) FIELD_PREP(DFSDM_CHCFGR1_CKOUTDIV_MASK, v)
+#define DFSDM_CHCFGR1_CKOUTSRC_MASK BIT(30)
+#define DFSDM_CHCFGR1_CKOUTSRC(v) FIELD_PREP(DFSDM_CHCFGR1_CKOUTSRC_MASK, v)
+#define DFSDM_CHCFGR1_DFSDMEN_MASK BIT(31)
+#define DFSDM_CHCFGR1_DFSDMEN(v) FIELD_PREP(DFSDM_CHCFGR1_DFSDMEN_MASK, v)
+
+/* CHCFGR2: Channel configuration register 2 */
+#define DFSDM_CHCFGR2_DTRBS_MASK GENMASK(7, 3)
+#define DFSDM_CHCFGR2_DTRBS(v) FIELD_PREP(DFSDM_CHCFGR2_DTRBS_MASK, v)
+#define DFSDM_CHCFGR2_OFFSET_MASK GENMASK(31, 8)
+#define DFSDM_CHCFGR2_OFFSET(v) FIELD_PREP(DFSDM_CHCFGR2_OFFSET_MASK, v)
+
+/* AWSCDR: Channel analog watchdog and short circuit detector */
+#define DFSDM_AWSCDR_SCDT_MASK GENMASK(7, 0)
+#define DFSDM_AWSCDR_SCDT(v) FIELD_PREP(DFSDM_AWSCDR_SCDT_MASK, v)
+#define DFSDM_AWSCDR_BKSCD_MASK GENMASK(15, 12)
+#define DFSDM_AWSCDR_BKSCD(v) FIELD_PREP(DFSDM_AWSCDR_BKSCD_MASK, v)
+#define DFSDM_AWSCDR_AWFOSR_MASK GENMASK(20, 16)
+#define DFSDM_AWSCDR_AWFOSR(v) FIELD_PREP(DFSDM_AWSCDR_AWFOSR_MASK, v)
+#define DFSDM_AWSCDR_AWFORD_MASK GENMASK(23, 22)
+#define DFSDM_AWSCDR_AWFORD(v) FIELD_PREP(DFSDM_AWSCDR_AWFORD_MASK, v)
+
+/*
+ * Filters register definitions
+ */
+#define DFSDM_FILTER_BASE_ADR 0x100
+#define DFSDM_FILTER_REG_MASK 0x7F
+#define DFSDM_FILTER_X_BASE_ADR(x) ((x) * 0x80 + DFSDM_FILTER_BASE_ADR)
+
+#define DFSDM_CR1(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x00)
+#define DFSDM_CR2(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x04)
+#define DFSDM_ISR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x08)
+#define DFSDM_ICR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x0C)
+#define DFSDM_JCHGR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x10)
+#define DFSDM_FCR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x14)
+#define DFSDM_JDATAR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x18)
+#define DFSDM_RDATAR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x1C)
+#define DFSDM_AWHTR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x20)
+#define DFSDM_AWLTR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x24)
+#define DFSDM_AWSR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x28)
+#define DFSDM_AWCFR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x2C)
+#define DFSDM_EXMAX(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x30)
+#define DFSDM_EXMIN(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x34)
+#define DFSDM_CNVTIMR(x) (DFSDM_FILTER_X_BASE_ADR(x) + 0x38)
+
+/* CR1 Control register 1 */
+#define DFSDM_CR1_DFEN_MASK BIT(0)
+#define DFSDM_CR1_DFEN(v) FIELD_PREP(DFSDM_CR1_DFEN_MASK, v)
+#define DFSDM_CR1_JSWSTART_MASK BIT(1)
+#define DFSDM_CR1_JSWSTART(v) FIELD_PREP(DFSDM_CR1_JSWSTART_MASK, v)
+#define DFSDM_CR1_JSYNC_MASK BIT(3)
+#define DFSDM_CR1_JSYNC(v) FIELD_PREP(DFSDM_CR1_JSYNC_MASK, v)
+#define DFSDM_CR1_JSCAN_MASK BIT(4)
+#define DFSDM_CR1_JSCAN(v) FIELD_PREP(DFSDM_CR1_JSCAN_MASK, v)
+#define DFSDM_CR1_JDMAEN_MASK BIT(5)
+#define DFSDM_CR1_JDMAEN(v) FIELD_PREP(DFSDM_CR1_JDMAEN_MASK, v)
+#define DFSDM_CR1_JEXTSEL_MASK GENMASK(12, 8)
+#define DFSDM_CR1_JEXTSEL(v) FIELD_PREP(DFSDM_CR1_JEXTSEL_MASK, v)
+#define DFSDM_CR1_JEXTEN_MASK GENMASK(14, 13)
+#define DFSDM_CR1_JEXTEN(v) FIELD_PREP(DFSDM_CR1_JEXTEN_MASK, v)
+#define DFSDM_CR1_RSWSTART_MASK BIT(17)
+#define DFSDM_CR1_RSWSTART(v) FIELD_PREP(DFSDM_CR1_RSWSTART_MASK, v)
+#define DFSDM_CR1_RCONT_MASK BIT(18)
+#define DFSDM_CR1_RCONT(v) FIELD_PREP(DFSDM_CR1_RCONT_MASK, v)
+#define DFSDM_CR1_RSYNC_MASK BIT(19)
+#define DFSDM_CR1_RSYNC(v) FIELD_PREP(DFSDM_CR1_RSYNC_MASK, v)
+#define DFSDM_CR1_RDMAEN_MASK BIT(21)
+#define DFSDM_CR1_RDMAEN(v) FIELD_PREP(DFSDM_CR1_RDMAEN_MASK, v)
+#define DFSDM_CR1_RCH_MASK GENMASK(26, 24)
+#define DFSDM_CR1_RCH(v) FIELD_PREP(DFSDM_CR1_RCH_MASK, v)
+#define DFSDM_CR1_FAST_MASK BIT(29)
+#define DFSDM_CR1_FAST(v) FIELD_PREP(DFSDM_CR1_FAST_MASK, v)
+#define DFSDM_CR1_AWFSEL_MASK BIT(30)
+#define DFSDM_CR1_AWFSEL(v) FIELD_PREP(DFSDM_CR1_AWFSEL_MASK, v)
+
+/* CR2: Control register 2 */
+#define DFSDM_CR2_IE_MASK GENMASK(6, 0)
+#define DFSDM_CR2_IE(v) FIELD_PREP(DFSDM_CR2_IE_MASK, v)
+#define DFSDM_CR2_JEOCIE_MASK BIT(0)
+#define DFSDM_CR2_JEOCIE(v) FIELD_PREP(DFSDM_CR2_JEOCIE_MASK, v)
+#define DFSDM_CR2_REOCIE_MASK BIT(1)
+#define DFSDM_CR2_REOCIE(v) FIELD_PREP(DFSDM_CR2_REOCIE_MASK, v)
+#define DFSDM_CR2_JOVRIE_MASK BIT(2)
+#define DFSDM_CR2_JOVRIE(v) FIELD_PREP(DFSDM_CR2_JOVRIE_MASK, v)
+#define DFSDM_CR2_ROVRIE_MASK BIT(3)
+#define DFSDM_CR2_ROVRIE(v) FIELD_PREP(DFSDM_CR2_ROVRIE_MASK, v)
+#define DFSDM_CR2_AWDIE_MASK BIT(4)
+#define DFSDM_CR2_AWDIE(v) FIELD_PREP(DFSDM_CR2_AWDIE_MASK, v)
+#define DFSDM_CR2_SCDIE_MASK BIT(5)
+#define DFSDM_CR2_SCDIE(v) FIELD_PREP(DFSDM_CR2_SCDIE_MASK, v)
+#define DFSDM_CR2_CKABIE_MASK BIT(6)
+#define DFSDM_CR2_CKABIE(v) FIELD_PREP(DFSDM_CR2_CKABIE_MASK, v)
+#define DFSDM_CR2_EXCH_MASK GENMASK(15, 8)
+#define DFSDM_CR2_EXCH(v) FIELD_PREP(DFSDM_CR2_EXCH_MASK, v)
+#define DFSDM_CR2_AWDCH_MASK GENMASK(23, 16)
+#define DFSDM_CR2_AWDCH(v) FIELD_PREP(DFSDM_CR2_AWDCH_MASK, v)
+
+/* ISR: Interrupt status register */
+#define DFSDM_ISR_JEOCF_MASK BIT(0)
+#define DFSDM_ISR_JEOCF(v) FIELD_PREP(DFSDM_ISR_JEOCF_MASK, v)
+#define DFSDM_ISR_REOCF_MASK BIT(1)
+#define DFSDM_ISR_REOCF(v) FIELD_PREP(DFSDM_ISR_REOCF_MASK, v)
+#define DFSDM_ISR_JOVRF_MASK BIT(2)
+#define DFSDM_ISR_JOVRF(v) FIELD_PREP(DFSDM_ISR_JOVRF_MASK, v)
+#define DFSDM_ISR_ROVRF_MASK BIT(3)
+#define DFSDM_ISR_ROVRF(v) FIELD_PREP(DFSDM_ISR_ROVRF_MASK, v)
+#define DFSDM_ISR_AWDF_MASK BIT(4)
+#define DFSDM_ISR_AWDF(v) FIELD_PREP(DFSDM_ISR_AWDF_MASK, v)
+#define DFSDM_ISR_JCIP_MASK BIT(13)
+#define DFSDM_ISR_JCIP(v) FIELD_PREP(DFSDM_ISR_JCIP_MASK, v)
+#define DFSDM_ISR_RCIP_MASK BIT(14)
+#define DFSDM_ISR_RCIP(v) FIELD_PREP(DFSDM_ISR_RCIP, v)
+#define DFSDM_ISR_CKABF_MASK GENMASK(23, 16)
+#define DFSDM_ISR_CKABF(v) FIELD_PREP(DFSDM_ISR_CKABF_MASK, v)
+#define DFSDM_ISR_SCDF_MASK GENMASK(31, 24)
+#define DFSDM_ISR_SCDF(v) FIELD_PREP(DFSDM_ISR_SCDF_MASK, v)
+
+/* ICR: Interrupt flag clear register */
+#define DFSDM_ICR_CLRJOVRF_MASK BIT(2)
+#define DFSDM_ICR_CLRJOVRF(v) FIELD_PREP(DFSDM_ICR_CLRJOVRF_MASK, v)
+#define DFSDM_ICR_CLRROVRF_MASK BIT(3)
+#define DFSDM_ICR_CLRROVRF(v) FIELD_PREP(DFSDM_ICR_CLRROVRF_MASK, v)
+#define DFSDM_ICR_CLRCKABF_MASK GENMASK(23, 16)
+#define DFSDM_ICR_CLRCKABF(v) FIELD_PREP(DFSDM_ICR_CLRCKABF_MASK, v)
+#define DFSDM_ICR_CLRCKABF_CH_MASK(y) BIT(16 + (y))
+#define DFSDM_ICR_CLRCKABF_CH(v, y) \
+ (((v) << (16 + (y))) & DFSDM_ICR_CLRCKABF_CH_MASK(y))
+#define DFSDM_ICR_CLRSCDF_MASK GENMASK(31, 24)
+#define DFSDM_ICR_CLRSCDF(v) FIELD_PREP(DFSDM_ICR_CLRSCDF_MASK, v)
+#define DFSDM_ICR_CLRSCDF_CH_MASK(y) BIT(24 + (y))
+#define DFSDM_ICR_CLRSCDF_CH(v, y) \
+ (((v) << (24 + (y))) & DFSDM_ICR_CLRSCDF_MASK(y))
+
+/* FCR: Filter control register */
+#define DFSDM_FCR_IOSR_MASK GENMASK(7, 0)
+#define DFSDM_FCR_IOSR(v) FIELD_PREP(DFSDM_FCR_IOSR_MASK, v)
+#define DFSDM_FCR_FOSR_MASK GENMASK(25, 16)
+#define DFSDM_FCR_FOSR(v) FIELD_PREP(DFSDM_FCR_FOSR_MASK, v)
+#define DFSDM_FCR_FORD_MASK GENMASK(31, 29)
+#define DFSDM_FCR_FORD(v) FIELD_PREP(DFSDM_FCR_FORD_MASK, v)
+
+/* RDATAR: Filter data register for regular channel */
+#define DFSDM_DATAR_CH_MASK GENMASK(2, 0)
+#define DFSDM_DATAR_DATA_OFFSET 8
+#define DFSDM_DATAR_DATA_MASK GENMASK(31, DFSDM_DATAR_DATA_OFFSET)
+
+/* AWLTR: Filter analog watchdog low threshold register */
+#define DFSDM_AWLTR_BKAWL_MASK GENMASK(3, 0)
+#define DFSDM_AWLTR_BKAWL(v) FIELD_PREP(DFSDM_AWLTR_BKAWL_MASK, v)
+#define DFSDM_AWLTR_AWLT_MASK GENMASK(31, 8)
+#define DFSDM_AWLTR_AWLT(v) FIELD_PREP(DFSDM_AWLTR_AWLT_MASK, v)
+
+/* AWHTR: Filter analog watchdog low threshold register */
+#define DFSDM_AWHTR_BKAWH_MASK GENMASK(3, 0)
+#define DFSDM_AWHTR_BKAWH(v) FIELD_PREP(DFSDM_AWHTR_BKAWH_MASK, v)
+#define DFSDM_AWHTR_AWHT_MASK GENMASK(31, 8)
+#define DFSDM_AWHTR_AWHT(v) FIELD_PREP(DFSDM_AWHTR_AWHT_MASK, v)
+
+/* AWSR: Filter watchdog status register */
+#define DFSDM_AWSR_AWLTF_MASK GENMASK(7, 0)
+#define DFSDM_AWSR_AWLTF(v) FIELD_PREP(DFSDM_AWSR_AWLTF_MASK, v)
+#define DFSDM_AWSR_AWHTF_MASK GENMASK(15, 8)
+#define DFSDM_AWSR_AWHTF(v) FIELD_PREP(DFSDM_AWSR_AWHTF_MASK, v)
+
+/* AWCFR: Filter watchdog status register */
+#define DFSDM_AWCFR_AWLTF_MASK GENMASK(7, 0)
+#define DFSDM_AWCFR_AWLTF(v) FIELD_PREP(DFSDM_AWCFR_AWLTF_MASK, v)
+#define DFSDM_AWCFR_AWHTF_MASK GENMASK(15, 8)
+#define DFSDM_AWCFR_AWHTF(v) FIELD_PREP(DFSDM_AWCFR_AWHTF_MASK, v)
+
+/* DFSDM filter order */
+enum stm32_dfsdm_sinc_order {
+ DFSDM_FASTSINC_ORDER, /* FastSinc filter type */
+ DFSDM_SINC1_ORDER, /* Sinc 1 filter type */
+ DFSDM_SINC2_ORDER, /* Sinc 2 filter type */
+ DFSDM_SINC3_ORDER, /* Sinc 3 filter type */
+ DFSDM_SINC4_ORDER, /* Sinc 4 filter type (N.A. for watchdog) */
+ DFSDM_SINC5_ORDER, /* Sinc 5 filter type (N.A. for watchdog) */
+ DFSDM_NB_SINC_ORDER,
+};
+
+/**
+ * struct stm32_dfsdm_filter - structure relative to stm32 FDSDM filter
+ * @iosr: integrator oversampling
+ * @fosr: filter oversampling
+ * @ford: filter order
+ * @res: output sample resolution
+ * @sync_mode: filter synchronized with filter 0
+ * @fast: filter fast mode
+ */
+struct stm32_dfsdm_filter {
+ unsigned int iosr;
+ unsigned int fosr;
+ enum stm32_dfsdm_sinc_order ford;
+ u64 res;
+ unsigned int sync_mode;
+ unsigned int fast;
+};
+
+/**
+ * struct stm32_dfsdm_channel - structure relative to stm32 FDSDM channel
+ * @id: id of the channel
+ * @type: interface type linked to stm32_dfsdm_chan_type
+ * @src: interface type linked to stm32_dfsdm_chan_src
+ * @alt_si: alternative serial input interface
+ */
+struct stm32_dfsdm_channel {
+ unsigned int id;
+ unsigned int type;
+ unsigned int src;
+ unsigned int alt_si;
+};
+
+/**
+ * struct stm32_dfsdm - stm32 FDSDM driver common data (for all instances)
+ * @base: control registers base cpu addr
+ * @phys_base: DFSDM IP register physical address
+ * @regmap: regmap for register read/write
+ * @fl_list: filter resources list
+ * @num_fls: number of filter resources available
+ * @ch_list: channel resources list
+ * @num_chs: number of channel resources available
+ * @spi_master_freq: SPI clock out frequency
+ */
+struct stm32_dfsdm {
+ void __iomem *base;
+ phys_addr_t phys_base;
+ struct regmap *regmap;
+ struct stm32_dfsdm_filter *fl_list;
+ unsigned int num_fls;
+ struct stm32_dfsdm_channel *ch_list;
+ unsigned int num_chs;
+ unsigned int spi_master_freq;
+};
+
+/* DFSDM channel serial spi clock source */
+enum stm32_dfsdm_spi_clk_src {
+ DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL,
+ DFSDM_CHANNEL_SPI_CLOCK_INTERNAL,
+ DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_FALLING,
+ DFSDM_CHANNEL_SPI_CLOCK_INTERNAL_DIV2_RISING
+};
+
+int stm32_dfsdm_start_dfsdm(struct stm32_dfsdm *dfsdm);
+int stm32_dfsdm_stop_dfsdm(struct stm32_dfsdm *dfsdm);
+
+#endif
diff --git a/drivers/iio/buffer/Kconfig b/drivers/iio/buffer/Kconfig
index 4ffd3db7817f..338774cba19b 100644
--- a/drivers/iio/buffer/Kconfig
+++ b/drivers/iio/buffer/Kconfig
@@ -29,6 +29,16 @@ config IIO_BUFFER_DMAENGINE
Should be selected by drivers that want to use this functionality.
+config IIO_BUFFER_HW_CONSUMER
+ tristate "Industrial I/O HW buffering"
+ help
+ Provides a way to bonding when an IIO device has a direct connection
+ to another device in hardware. In this case buffers for data transfers
+ are handled by hardware.
+
+ Should be selected by drivers that want to use the generic Hw consumer
+ interface.
+
config IIO_KFIFO_BUF
tristate "Industrial I/O buffering based on kfifo"
help
diff --git a/drivers/iio/buffer/Makefile b/drivers/iio/buffer/Makefile
index 95f9f41c58b7..1403eb2f9409 100644
--- a/drivers/iio/buffer/Makefile
+++ b/drivers/iio/buffer/Makefile
@@ -7,5 +7,6 @@
obj-$(CONFIG_IIO_BUFFER_CB) += industrialio-buffer-cb.o
obj-$(CONFIG_IIO_BUFFER_DMA) += industrialio-buffer-dma.o
obj-$(CONFIG_IIO_BUFFER_DMAENGINE) += industrialio-buffer-dmaengine.o
+obj-$(CONFIG_IIO_BUFFER_HW_CONSUMER) += industrialio-hw-consumer.o
obj-$(CONFIG_IIO_TRIGGERED_BUFFER) += industrialio-triggered-buffer.o
obj-$(CONFIG_IIO_KFIFO_BUF) += kfifo_buf.o
diff --git a/drivers/iio/buffer/industrialio-buffer-cb.c b/drivers/iio/buffer/industrialio-buffer-cb.c
index 4847534700e7..ea63c838eeae 100644
--- a/drivers/iio/buffer/industrialio-buffer-cb.c
+++ b/drivers/iio/buffer/industrialio-buffer-cb.c
@@ -104,6 +104,17 @@ error_free_cb_buff:
}
EXPORT_SYMBOL_GPL(iio_channel_get_all_cb);
+int iio_channel_cb_set_buffer_watermark(struct iio_cb_buffer *cb_buff,
+ size_t watermark)
+{
+ if (!watermark)
+ return -EINVAL;
+ cb_buff->buffer.watermark = watermark;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iio_channel_cb_set_buffer_watermark);
+
int iio_channel_start_all_cb(struct iio_cb_buffer *cb_buff)
{
return iio_update_buffers(cb_buff->indio_dev, &cb_buff->buffer,
diff --git a/drivers/iio/buffer/industrialio-hw-consumer.c b/drivers/iio/buffer/industrialio-hw-consumer.c
new file mode 100644
index 000000000000..95165697d8ae
--- /dev/null
+++ b/drivers/iio/buffer/industrialio-hw-consumer.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2017 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ */
+
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/hw-consumer.h>
+#include <linux/iio/buffer_impl.h>
+
+/**
+ * struct iio_hw_consumer - IIO hw consumer block
+ * @buffers: hardware buffers list head.
+ * @channels: IIO provider channels.
+ */
+struct iio_hw_consumer {
+ struct list_head buffers;
+ struct iio_channel *channels;
+};
+
+struct hw_consumer_buffer {
+ struct list_head head;
+ struct iio_dev *indio_dev;
+ struct iio_buffer buffer;
+ long scan_mask[];
+};
+
+static struct hw_consumer_buffer *iio_buffer_to_hw_consumer_buffer(
+ struct iio_buffer *buffer)
+{
+ return container_of(buffer, struct hw_consumer_buffer, buffer);
+}
+
+static void iio_hw_buf_release(struct iio_buffer *buffer)
+{
+ struct hw_consumer_buffer *hw_buf =
+ iio_buffer_to_hw_consumer_buffer(buffer);
+ kfree(hw_buf);
+}
+
+static const struct iio_buffer_access_funcs iio_hw_buf_access = {
+ .release = &iio_hw_buf_release,
+ .modes = INDIO_BUFFER_HARDWARE,
+};
+
+static struct hw_consumer_buffer *iio_hw_consumer_get_buffer(
+ struct iio_hw_consumer *hwc, struct iio_dev *indio_dev)
+{
+ size_t mask_size = BITS_TO_LONGS(indio_dev->masklength) * sizeof(long);
+ struct hw_consumer_buffer *buf;
+
+ list_for_each_entry(buf, &hwc->buffers, head) {
+ if (buf->indio_dev == indio_dev)
+ return buf;
+ }
+
+ buf = kzalloc(sizeof(*buf) + mask_size, GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ buf->buffer.access = &iio_hw_buf_access;
+ buf->indio_dev = indio_dev;
+ buf->buffer.scan_mask = buf->scan_mask;
+
+ iio_buffer_init(&buf->buffer);
+ list_add_tail(&buf->head, &hwc->buffers);
+
+ return buf;
+}
+
+/**
+ * iio_hw_consumer_alloc() - Allocate IIO hardware consumer
+ * @dev: Pointer to consumer device.
+ *
+ * Returns a valid iio_hw_consumer on success or a ERR_PTR() on failure.
+ */
+struct iio_hw_consumer *iio_hw_consumer_alloc(struct device *dev)
+{
+ struct hw_consumer_buffer *buf;
+ struct iio_hw_consumer *hwc;
+ struct iio_channel *chan;
+ int ret;
+
+ hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
+ if (!hwc)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&hwc->buffers);
+
+ hwc->channels = iio_channel_get_all(dev);
+ if (IS_ERR(hwc->channels)) {
+ ret = PTR_ERR(hwc->channels);
+ goto err_free_hwc;
+ }
+
+ chan = &hwc->channels[0];
+ while (chan->indio_dev) {
+ buf = iio_hw_consumer_get_buffer(hwc, chan->indio_dev);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_put_buffers;
+ }
+ set_bit(chan->channel->scan_index, buf->buffer.scan_mask);
+ chan++;
+ }
+
+ return hwc;
+
+err_put_buffers:
+ list_for_each_entry(buf, &hwc->buffers, head)
+ iio_buffer_put(&buf->buffer);
+ iio_channel_release_all(hwc->channels);
+err_free_hwc:
+ kfree(hwc);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(iio_hw_consumer_alloc);
+
+/**
+ * iio_hw_consumer_free() - Free IIO hardware consumer
+ * @hwc: hw consumer to free.
+ */
+void iio_hw_consumer_free(struct iio_hw_consumer *hwc)
+{
+ struct hw_consumer_buffer *buf, *n;
+
+ iio_channel_release_all(hwc->channels);
+ list_for_each_entry_safe(buf, n, &hwc->buffers, head)
+ iio_buffer_put(&buf->buffer);
+ kfree(hwc);
+}
+EXPORT_SYMBOL_GPL(iio_hw_consumer_free);
+
+static void devm_iio_hw_consumer_release(struct device *dev, void *res)
+{
+ iio_hw_consumer_free(*(struct iio_hw_consumer **)res);
+}
+
+static int devm_iio_hw_consumer_match(struct device *dev, void *res, void *data)
+{
+ struct iio_hw_consumer **r = res;
+
+ if (!r || !*r) {
+ WARN_ON(!r || !*r);
+ return 0;
+ }
+ return *r == data;
+}
+
+/**
+ * devm_iio_hw_consumer_alloc - Resource-managed iio_hw_consumer_alloc()
+ * @dev: Pointer to consumer device.
+ *
+ * Managed iio_hw_consumer_alloc. iio_hw_consumer allocated with this function
+ * is automatically freed on driver detach.
+ *
+ * If an iio_hw_consumer allocated with this function needs to be freed
+ * separately, devm_iio_hw_consumer_free() must be used.
+ *
+ * returns pointer to allocated iio_hw_consumer on success, NULL on failure.
+ */
+struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev)
+{
+ struct iio_hw_consumer **ptr, *iio_hwc;
+
+ ptr = devres_alloc(devm_iio_hw_consumer_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ iio_hwc = iio_hw_consumer_alloc(dev);
+ if (IS_ERR(iio_hwc)) {
+ devres_free(ptr);
+ } else {
+ *ptr = iio_hwc;
+ devres_add(dev, ptr);
+ }
+
+ return iio_hwc;
+}
+EXPORT_SYMBOL_GPL(devm_iio_hw_consumer_alloc);
+
+/**
+ * devm_iio_hw_consumer_free - Resource-managed iio_hw_consumer_free()
+ * @dev: Pointer to consumer device.
+ * @hwc: iio_hw_consumer to free.
+ *
+ * Free iio_hw_consumer allocated with devm_iio_hw_consumer_alloc().
+ */
+void devm_iio_hw_consumer_free(struct device *dev, struct iio_hw_consumer *hwc)
+{
+ int rc;
+
+ rc = devres_release(dev, devm_iio_hw_consumer_release,
+ devm_iio_hw_consumer_match, hwc);
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_iio_hw_consumer_free);
+
+/**
+ * iio_hw_consumer_enable() - Enable IIO hardware consumer
+ * @hwc: iio_hw_consumer to enable.
+ *
+ * Returns 0 on success.
+ */
+int iio_hw_consumer_enable(struct iio_hw_consumer *hwc)
+{
+ struct hw_consumer_buffer *buf;
+ int ret;
+
+ list_for_each_entry(buf, &hwc->buffers, head) {
+ ret = iio_update_buffers(buf->indio_dev, &buf->buffer, NULL);
+ if (ret)
+ goto err_disable_buffers;
+ }
+
+ return 0;
+
+err_disable_buffers:
+ list_for_each_entry_continue_reverse(buf, &hwc->buffers, head)
+ iio_update_buffers(buf->indio_dev, NULL, &buf->buffer);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_hw_consumer_enable);
+
+/**
+ * iio_hw_consumer_disable() - Disable IIO hardware consumer
+ * @hwc: iio_hw_consumer to disable.
+ */
+void iio_hw_consumer_disable(struct iio_hw_consumer *hwc)
+{
+ struct hw_consumer_buffer *buf;
+
+ list_for_each_entry(buf, &hwc->buffers, head)
+ iio_update_buffers(buf->indio_dev, NULL, &buf->buffer);
+}
+EXPORT_SYMBOL_GPL(iio_hw_consumer_disable);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Hardware consumer buffer the IIO framework");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/iio_core.h b/drivers/iio/iio_core.h
index 4c45488e3a7f..c775fedbcaf6 100644
--- a/drivers/iio/iio_core.h
+++ b/drivers/iio/iio_core.h
@@ -43,7 +43,7 @@ ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals);
#ifdef CONFIG_IIO_BUFFER
struct poll_table_struct;
-unsigned int iio_buffer_poll(struct file *filp,
+__poll_t iio_buffer_poll(struct file *filp,
struct poll_table_struct *wait);
ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
size_t n, loff_t *f_ps);
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index d2b465140a6b..0bc2fe31f211 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -169,7 +169,7 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
* Return: (POLLIN | POLLRDNORM) if data is available for reading
* or 0 for other cases
*/
-unsigned int iio_buffer_poll(struct file *filp,
+__poll_t iio_buffer_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct iio_dev *indio_dev = filp->private_data;
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index 90fac8ec63c9..0bcf073e46db 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -95,12 +95,12 @@ EXPORT_SYMBOL(iio_push_event);
* Return: (POLLIN | POLLRDNORM) if data is available for reading
* or a negative error code on failure
*/
-static unsigned int iio_event_poll(struct file *filep,
+static __poll_t iio_event_poll(struct file *filep,
struct poll_table_struct *wait)
{
struct iio_dev *indio_dev = filep->private_data;
struct iio_event_interface *ev_int = indio_dev->event_interface;
- unsigned int events = 0;
+ __poll_t events = 0;
if (!indio_dev->info)
return events;
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 069defcc6d9b..ec98790e2a28 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -664,9 +664,8 @@ err_unlock:
}
EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
-static int iio_read_channel_attribute(struct iio_channel *chan,
- int *val, int *val2,
- enum iio_chan_info_enum attribute)
+int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
+ enum iio_chan_info_enum attribute)
{
int ret;
@@ -682,6 +681,7 @@ err_unlock:
return ret;
}
+EXPORT_SYMBOL_GPL(iio_read_channel_attribute);
int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2)
{
@@ -850,7 +850,8 @@ static int iio_channel_write(struct iio_channel *chan, int val, int val2,
chan->channel, val, val2, info);
}
-int iio_write_channel_raw(struct iio_channel *chan, int val)
+int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
+ enum iio_chan_info_enum attribute)
{
int ret;
@@ -860,12 +861,18 @@ int iio_write_channel_raw(struct iio_channel *chan, int val)
goto err_unlock;
}
- ret = iio_channel_write(chan, val, 0, IIO_CHAN_INFO_RAW);
+ ret = iio_channel_write(chan, val, val2, attribute);
err_unlock:
mutex_unlock(&chan->indio_dev->info_exist_lock);
return ret;
}
+EXPORT_SYMBOL_GPL(iio_write_channel_attribute);
+
+int iio_write_channel_raw(struct iio_channel *chan, int val)
+{
+ return iio_write_channel_attribute(chan, val, 0, IIO_CHAN_INFO_RAW);
+}
EXPORT_SYMBOL_GPL(iio_write_channel_raw);
unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan)
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index cbf186522016..5cd700421695 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -4,6 +4,7 @@ menuconfig INFINIBAND
depends on NET
depends on INET
depends on m || IPV6 != m
+ depends on !ALPHA
select IRQ_POLL
---help---
Core support for InfiniBand (IB). Make sure to also select
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 504b926552c6..f69833db0a32 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -12,7 +12,7 @@ ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
device.o fmr_pool.o cache.o netlink.o \
roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \
multicast.o mad.o smi.o agent.o mad_rmpp.o \
- security.o nldev.o
+ security.o nldev.o restrack.o
ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index f4e8185bccd3..a5b4cf030c11 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -243,8 +243,7 @@ void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
EXPORT_SYMBOL(rdma_copy_addr);
int rdma_translate_ip(const struct sockaddr *addr,
- struct rdma_dev_addr *dev_addr,
- u16 *vlan_id)
+ struct rdma_dev_addr *dev_addr)
{
struct net_device *dev;
@@ -266,9 +265,6 @@ int rdma_translate_ip(const struct sockaddr *addr,
return -EADDRNOTAVAIL;
rdma_copy_addr(dev_addr, dev, NULL);
- dev_addr->bound_dev_if = dev->ifindex;
- if (vlan_id)
- *vlan_id = rdma_vlan_dev_vlan_id(dev);
dev_put(dev);
break;
#if IS_ENABLED(CONFIG_IPV6)
@@ -279,9 +275,6 @@ int rdma_translate_ip(const struct sockaddr *addr,
&((const struct sockaddr_in6 *)addr)->sin6_addr,
dev, 1)) {
rdma_copy_addr(dev_addr, dev, NULL);
- dev_addr->bound_dev_if = dev->ifindex;
- if (vlan_id)
- *vlan_id = rdma_vlan_dev_vlan_id(dev);
break;
}
}
@@ -481,7 +474,7 @@ static int addr_resolve_neigh(struct dst_entry *dst,
if (dst->dev->flags & IFF_LOOPBACK) {
int ret;
- ret = rdma_translate_ip(dst_in, addr, NULL);
+ ret = rdma_translate_ip(dst_in, addr);
if (!ret)
memcpy(addr->dst_dev_addr, addr->src_dev_addr,
MAX_ADDR_LEN);
@@ -558,7 +551,7 @@ static int addr_resolve(struct sockaddr *src_in,
}
if (ndev->flags & IFF_LOOPBACK) {
- ret = rdma_translate_ip(dst_in, addr, NULL);
+ ret = rdma_translate_ip(dst_in, addr);
/*
* Put the loopback device and get the translated
* device instead.
@@ -744,7 +737,6 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
EXPORT_SYMBOL(rdma_addr_cancel);
struct resolve_cb_context {
- struct rdma_dev_addr *addr;
struct completion comp;
int status;
};
@@ -752,39 +744,31 @@ struct resolve_cb_context {
static void resolve_cb(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context)
{
- if (!status)
- memcpy(((struct resolve_cb_context *)context)->addr,
- addr, sizeof(struct rdma_dev_addr));
((struct resolve_cb_context *)context)->status = status;
complete(&((struct resolve_cb_context *)context)->comp);
}
int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
const union ib_gid *dgid,
- u8 *dmac, u16 *vlan_id, int *if_index,
+ u8 *dmac, const struct net_device *ndev,
int *hoplimit)
{
- int ret = 0;
struct rdma_dev_addr dev_addr;
struct resolve_cb_context ctx;
- struct net_device *dev;
-
union {
struct sockaddr _sockaddr;
struct sockaddr_in _sockaddr_in;
struct sockaddr_in6 _sockaddr_in6;
} sgid_addr, dgid_addr;
-
+ int ret;
rdma_gid2ip(&sgid_addr._sockaddr, sgid);
rdma_gid2ip(&dgid_addr._sockaddr, dgid);
memset(&dev_addr, 0, sizeof(dev_addr));
- if (if_index)
- dev_addr.bound_dev_if = *if_index;
+ dev_addr.bound_dev_if = ndev->ifindex;
dev_addr.net = &init_net;
- ctx.addr = &dev_addr;
init_completion(&ctx.comp);
ret = rdma_resolve_ip(&self, &sgid_addr._sockaddr, &dgid_addr._sockaddr,
&dev_addr, 1000, resolve_cb, &ctx);
@@ -798,42 +782,9 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
return ret;
memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN);
- dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if);
- if (!dev)
- return -ENODEV;
- if (if_index)
- *if_index = dev_addr.bound_dev_if;
- if (vlan_id)
- *vlan_id = rdma_vlan_dev_vlan_id(dev);
- if (hoplimit)
- *hoplimit = dev_addr.hoplimit;
- dev_put(dev);
- return ret;
-}
-EXPORT_SYMBOL(rdma_addr_find_l2_eth_by_grh);
-
-int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id)
-{
- int ret = 0;
- struct rdma_dev_addr dev_addr;
- union {
- struct sockaddr _sockaddr;
- struct sockaddr_in _sockaddr_in;
- struct sockaddr_in6 _sockaddr_in6;
- } gid_addr;
-
- rdma_gid2ip(&gid_addr._sockaddr, sgid);
-
- memset(&dev_addr, 0, sizeof(dev_addr));
- dev_addr.net = &init_net;
- ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id);
- if (ret)
- return ret;
-
- memcpy(smac, dev_addr.src_dev_addr, ETH_ALEN);
- return ret;
+ *hoplimit = dev_addr.hoplimit;
+ return 0;
}
-EXPORT_SYMBOL(rdma_addr_find_smac_by_sgid);
static int netevent_callback(struct notifier_block *self, unsigned long event,
void *ctx)
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 77515638c55c..e9a409d7f4e2 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -573,27 +573,24 @@ static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
struct ib_gid_attr attr;
if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
- goto next;
+ continue;
if (memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
- goto next;
+ continue;
memcpy(&attr, &table->data_vec[i].attr, sizeof(attr));
- if (filter(gid, &attr, context))
+ if (filter(gid, &attr, context)) {
found = true;
-
-next:
- if (found)
+ if (index)
+ *index = i;
break;
+ }
}
read_unlock_irqrestore(&table->rwlock, flags);
if (!found)
return -ENOENT;
-
- if (index)
- *index = i;
return 0;
}
@@ -824,12 +821,7 @@ static int gid_table_setup_one(struct ib_device *ib_dev)
if (err)
return err;
- err = roce_rescan_device(ib_dev);
-
- if (err) {
- gid_table_cleanup_one(ib_dev);
- gid_table_release_one(ib_dev);
- }
+ rdma_roce_rescan_device(ib_dev);
return err;
}
@@ -883,7 +875,6 @@ int ib_find_gid_by_filter(struct ib_device *device,
port_num, filter,
context, index);
}
-EXPORT_SYMBOL(ib_find_gid_by_filter);
int ib_get_cached_pkey(struct ib_device *device,
u8 port_num,
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index f6b159d79977..e6749157fd86 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -452,13 +452,14 @@ static void cm_set_private_data(struct cm_id_private *cm_id_priv,
cm_id_priv->private_data_len = private_data_len;
}
-static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
- struct ib_grh *grh, struct cm_av *av)
+static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
+ struct ib_grh *grh, struct cm_av *av)
{
av->port = port;
av->pkey_index = wc->pkey_index;
- ib_init_ah_from_wc(port->cm_dev->ib_device, port->port_num, wc,
- grh, &av->ah_attr);
+ return ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
+ port->port_num, wc,
+ grh, &av->ah_attr);
}
static int cm_init_av_by_path(struct sa_path_rec *path, struct cm_av *av,
@@ -494,8 +495,11 @@ static int cm_init_av_by_path(struct sa_path_rec *path, struct cm_av *av,
return ret;
av->port = port;
- ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
- &av->ah_attr);
+ ret = ib_init_ah_attr_from_path(cm_dev->ib_device, port->port_num, path,
+ &av->ah_attr);
+ if (ret)
+ return ret;
+
av->timeout = path->packet_life_time + 1;
spin_lock_irqsave(&cm.lock, flags);
@@ -1560,6 +1564,35 @@ static u16 cm_get_bth_pkey(struct cm_work *work)
return pkey;
}
+/**
+ * Convert OPA SGID to IB SGID
+ * ULPs (such as IPoIB) do not understand OPA GIDs and will
+ * reject them as the local_gid will not match the sgid. Therefore,
+ * change the pathrec's SGID to an IB SGID.
+ *
+ * @work: Work completion
+ * @path: Path record
+ */
+static void cm_opa_to_ib_sgid(struct cm_work *work,
+ struct sa_path_rec *path)
+{
+ struct ib_device *dev = work->port->cm_dev->ib_device;
+ u8 port_num = work->port->port_num;
+
+ if (rdma_cap_opa_ah(dev, port_num) &&
+ (ib_is_opa_gid(&path->sgid))) {
+ union ib_gid sgid;
+
+ if (ib_get_cached_gid(dev, port_num, 0, &sgid, NULL)) {
+ dev_warn(&dev->dev,
+ "Error updating sgid in CM request\n");
+ return;
+ }
+
+ path->sgid = sgid;
+ }
+}
+
static void cm_format_req_event(struct cm_work *work,
struct cm_id_private *cm_id_priv,
struct ib_cm_id *listen_id)
@@ -1573,10 +1606,13 @@ static void cm_format_req_event(struct cm_work *work,
param->bth_pkey = cm_get_bth_pkey(work);
param->port = cm_id_priv->av.port->port_num;
param->primary_path = &work->path[0];
- if (cm_req_has_alt_path(req_msg))
+ cm_opa_to_ib_sgid(work, param->primary_path);
+ if (cm_req_has_alt_path(req_msg)) {
param->alternate_path = &work->path[1];
- else
+ cm_opa_to_ib_sgid(work, param->alternate_path);
+ } else {
param->alternate_path = NULL;
+ }
param->remote_ca_guid = req_msg->local_ca_guid;
param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
@@ -1826,9 +1862,11 @@ static int cm_req_handler(struct cm_work *work)
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
cm_id_priv->id.remote_id = req_msg->local_comm_id;
- cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
- work->mad_recv_wc->recv_buf.grh,
- &cm_id_priv->av);
+ ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
+ work->mad_recv_wc->recv_buf.grh,
+ &cm_id_priv->av);
+ if (ret)
+ goto destroy;
cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
id.local_id);
if (IS_ERR(cm_id_priv->timewait_info)) {
@@ -1841,9 +1879,10 @@ static int cm_req_handler(struct cm_work *work)
listen_cm_id_priv = cm_match_req(work, cm_id_priv);
if (!listen_cm_id_priv) {
+ pr_debug("%s: local_id %d, no listen_cm_id_priv\n", __func__,
+ be32_to_cpu(cm_id->local_id));
ret = -EINVAL;
- kfree(cm_id_priv->timewait_info);
- goto destroy;
+ goto free_timeinfo;
}
cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
@@ -1861,56 +1900,50 @@ static int cm_req_handler(struct cm_work *work)
work->port->port_num,
grh->sgid_index,
&gid, &gid_attr);
- if (!ret) {
- if (gid_attr.ndev) {
- work->path[0].rec_type =
- sa_conv_gid_to_pathrec_type(gid_attr.gid_type);
- sa_path_set_ifindex(&work->path[0],
- gid_attr.ndev->ifindex);
- sa_path_set_ndev(&work->path[0],
- dev_net(gid_attr.ndev));
- dev_put(gid_attr.ndev);
- } else {
- cm_path_set_rec_type(work->port->cm_dev->ib_device,
- work->port->port_num,
- &work->path[0],
- &req_msg->primary_local_gid);
- }
- if (cm_req_has_alt_path(req_msg))
- work->path[1].rec_type = work->path[0].rec_type;
- cm_format_paths_from_req(req_msg, &work->path[0],
- &work->path[1]);
- if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
- sa_path_set_dmac(&work->path[0],
- cm_id_priv->av.ah_attr.roce.dmac);
- work->path[0].hop_limit = grh->hop_limit;
- ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
- cm_id_priv);
+ if (ret) {
+ ib_send_cm_rej(cm_id, IB_CM_REJ_UNSUPPORTED, NULL, 0, NULL, 0);
+ goto rejected;
+ }
+
+ if (gid_attr.ndev) {
+ work->path[0].rec_type =
+ sa_conv_gid_to_pathrec_type(gid_attr.gid_type);
+ sa_path_set_ifindex(&work->path[0],
+ gid_attr.ndev->ifindex);
+ sa_path_set_ndev(&work->path[0],
+ dev_net(gid_attr.ndev));
+ dev_put(gid_attr.ndev);
+ } else {
+ cm_path_set_rec_type(work->port->cm_dev->ib_device,
+ work->port->port_num,
+ &work->path[0],
+ &req_msg->primary_local_gid);
}
+ if (cm_req_has_alt_path(req_msg))
+ work->path[1].rec_type = work->path[0].rec_type;
+ cm_format_paths_from_req(req_msg, &work->path[0],
+ &work->path[1]);
+ if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
+ sa_path_set_dmac(&work->path[0],
+ cm_id_priv->av.ah_attr.roce.dmac);
+ work->path[0].hop_limit = grh->hop_limit;
+ ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
+ cm_id_priv);
if (ret) {
- int err = ib_get_cached_gid(work->port->cm_dev->ib_device,
- work->port->port_num, 0,
- &work->path[0].sgid,
- &gid_attr);
- if (!err && gid_attr.ndev) {
- work->path[0].rec_type =
- sa_conv_gid_to_pathrec_type(gid_attr.gid_type);
- sa_path_set_ifindex(&work->path[0],
- gid_attr.ndev->ifindex);
- sa_path_set_ndev(&work->path[0],
- dev_net(gid_attr.ndev));
- dev_put(gid_attr.ndev);
- } else {
- cm_path_set_rec_type(work->port->cm_dev->ib_device,
- work->port->port_num,
- &work->path[0],
- &req_msg->primary_local_gid);
- }
- if (cm_req_has_alt_path(req_msg))
- work->path[1].rec_type = work->path[0].rec_type;
- ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
- &work->path[0].sgid, sizeof work->path[0].sgid,
- NULL, 0);
+ int err;
+
+ err = ib_get_cached_gid(work->port->cm_dev->ib_device,
+ work->port->port_num, 0,
+ &work->path[0].sgid,
+ NULL);
+ if (err)
+ ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
+ NULL, 0, NULL, 0);
+ else
+ ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
+ &work->path[0].sgid,
+ sizeof(work->path[0].sgid),
+ NULL, 0);
goto rejected;
}
if (cm_req_has_alt_path(req_msg)) {
@@ -1919,7 +1952,7 @@ static int cm_req_handler(struct cm_work *work)
if (ret) {
ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
&work->path[0].sgid,
- sizeof work->path[0].sgid, NULL, 0);
+ sizeof(work->path[0].sgid), NULL, 0);
goto rejected;
}
}
@@ -1945,6 +1978,8 @@ static int cm_req_handler(struct cm_work *work)
rejected:
atomic_dec(&cm_id_priv->refcount);
cm_deref_id(listen_cm_id_priv);
+free_timeinfo:
+ kfree(cm_id_priv->timewait_info);
destroy:
ib_destroy_cm_id(cm_id);
return ret;
@@ -1997,6 +2032,8 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id,
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_REQ_RCVD &&
cm_id->state != IB_CM_MRA_REQ_SENT) {
+ pr_debug("%s: local_comm_id %d, cm_id->state: %d\n", __func__,
+ be32_to_cpu(cm_id_priv->id.local_id), cm_id->state);
ret = -EINVAL;
goto out;
}
@@ -2063,6 +2100,8 @@ int ib_send_cm_rtu(struct ib_cm_id *cm_id,
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_REP_RCVD &&
cm_id->state != IB_CM_MRA_REP_SENT) {
+ pr_debug("%s: local_id %d, cm_id->state %d\n", __func__,
+ be32_to_cpu(cm_id->local_id), cm_id->state);
ret = -EINVAL;
goto error;
}
@@ -2170,6 +2209,8 @@ static int cm_rep_handler(struct cm_work *work)
cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
if (!cm_id_priv) {
cm_dup_rep_handler(work);
+ pr_debug("%s: remote_comm_id %d, no cm_id_priv\n", __func__,
+ be32_to_cpu(rep_msg->remote_comm_id));
return -EINVAL;
}
@@ -2183,6 +2224,10 @@ static int cm_rep_handler(struct cm_work *work)
default:
spin_unlock_irq(&cm_id_priv->lock);
ret = -EINVAL;
+ pr_debug("%s: cm_id_priv->id.state: %d, local_comm_id %d, remote_comm_id %d\n",
+ __func__, cm_id_priv->id.state,
+ be32_to_cpu(rep_msg->local_comm_id),
+ be32_to_cpu(rep_msg->remote_comm_id));
goto error;
}
@@ -2196,6 +2241,8 @@ static int cm_rep_handler(struct cm_work *work)
spin_unlock(&cm.lock);
spin_unlock_irq(&cm_id_priv->lock);
ret = -EINVAL;
+ pr_debug("%s: Failed to insert remote id %d\n", __func__,
+ be32_to_cpu(rep_msg->remote_comm_id));
goto error;
}
/* Check for a stale connection. */
@@ -2213,6 +2260,10 @@ static int cm_rep_handler(struct cm_work *work)
IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
NULL, 0);
ret = -EINVAL;
+ pr_debug("%s: Stale connection. local_comm_id %d, remote_comm_id %d\n",
+ __func__, be32_to_cpu(rep_msg->local_comm_id),
+ be32_to_cpu(rep_msg->remote_comm_id));
+
if (cur_cm_id_priv) {
cm_id = &cur_cm_id_priv->id;
ib_send_cm_dreq(cm_id, NULL, 0);
@@ -2359,6 +2410,8 @@ int ib_send_cm_dreq(struct ib_cm_id *cm_id,
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_ESTABLISHED) {
+ pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
+ be32_to_cpu(cm_id->local_id), cm_id->state);
ret = -EINVAL;
goto out;
}
@@ -2428,6 +2481,8 @@ int ib_send_cm_drep(struct ib_cm_id *cm_id,
if (cm_id->state != IB_CM_DREQ_RCVD) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
kfree(data);
+ pr_debug("%s: local_id %d, cm_idcm_id->state(%d) != IB_CM_DREQ_RCVD\n",
+ __func__, be32_to_cpu(cm_id->local_id), cm_id->state);
return -EINVAL;
}
@@ -2493,6 +2548,9 @@ static int cm_dreq_handler(struct cm_work *work)
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_DREQ_COUNTER]);
cm_issue_drep(work->port, work->mad_recv_wc);
+ pr_debug("%s: no cm_id_priv, local_comm_id %d, remote_comm_id %d\n",
+ __func__, be32_to_cpu(dreq_msg->local_comm_id),
+ be32_to_cpu(dreq_msg->remote_comm_id));
return -EINVAL;
}
@@ -2535,6 +2593,9 @@ static int cm_dreq_handler(struct cm_work *work)
counter[CM_DREQ_COUNTER]);
goto unlock;
default:
+ pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
+ __func__, be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
goto unlock;
}
cm_id_priv->id.state = IB_CM_DREQ_RCVD;
@@ -2638,6 +2699,8 @@ int ib_send_cm_rej(struct ib_cm_id *cm_id,
cm_enter_timewait(cm_id_priv);
break;
default:
+ pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
+ be32_to_cpu(cm_id_priv->id.local_id), cm_id->state);
ret = -EINVAL;
goto out;
}
@@ -2748,6 +2811,9 @@ static int cm_rej_handler(struct cm_work *work)
/* fall through */
default:
spin_unlock_irq(&cm_id_priv->lock);
+ pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
+ __func__, be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
ret = -EINVAL;
goto out;
}
@@ -2811,6 +2877,9 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
}
/* fall through */
default:
+ pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
+ __func__, be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
ret = -EINVAL;
goto error1;
}
@@ -2912,6 +2981,9 @@ static int cm_mra_handler(struct cm_work *work)
counter[CM_MRA_COUNTER]);
/* fall through */
default:
+ pr_debug("%s local_id %d, cm_id_priv->id.state: %d\n",
+ __func__, be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
goto out;
}
@@ -3085,6 +3157,12 @@ static int cm_lap_handler(struct cm_work *work)
if (!cm_id_priv)
return -EINVAL;
+ ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
+ work->mad_recv_wc->recv_buf.grh,
+ &cm_id_priv->av);
+ if (ret)
+ goto deref;
+
param = &work->cm_event.param.lap_rcvd;
memset(&work->path[0], 0, sizeof(work->path[1]));
cm_path_set_rec_type(work->port->cm_dev->ib_device,
@@ -3131,9 +3209,6 @@ static int cm_lap_handler(struct cm_work *work)
cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
cm_id_priv->tid = lap_msg->hdr.tid;
- cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
- work->mad_recv_wc->recv_buf.grh,
- &cm_id_priv->av);
cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
cm_id_priv);
ret = atomic_inc_and_test(&cm_id_priv->work_count);
@@ -3386,6 +3461,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
struct cm_sidr_req_msg *sidr_req_msg;
struct ib_wc *wc;
+ int ret;
cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
if (IS_ERR(cm_id))
@@ -3398,9 +3474,12 @@ static int cm_sidr_req_handler(struct cm_work *work)
wc = work->mad_recv_wc->wc;
cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
cm_id_priv->av.dgid.global.interface_id = 0;
- cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
- work->mad_recv_wc->recv_buf.grh,
- &cm_id_priv->av);
+ ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
+ work->mad_recv_wc->recv_buf.grh,
+ &cm_id_priv->av);
+ if (ret)
+ goto out;
+
cm_id_priv->id.remote_id = sidr_req_msg->request_id;
cm_id_priv->tid = sidr_req_msg->hdr.tid;
atomic_inc(&cm_id_priv->work_count);
@@ -3692,6 +3771,7 @@ static void cm_work_handler(struct work_struct *_work)
ret = cm_timewait_handler(work);
break;
default:
+ pr_debug("cm_event.event: 0x%x\n", work->cm_event.event);
ret = -EINVAL;
break;
}
@@ -3727,6 +3807,8 @@ static int cm_establish(struct ib_cm_id *cm_id)
ret = -EISCONN;
break;
default:
+ pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
+ be32_to_cpu(cm_id->local_id), cm_id->state);
ret = -EINVAL;
break;
}
@@ -3924,6 +4006,9 @@ static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
ret = 0;
break;
default:
+ pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
+ __func__, be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
ret = -EINVAL;
break;
}
@@ -3971,6 +4056,9 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
ret = 0;
break;
default:
+ pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
+ __func__, be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
ret = -EINVAL;
break;
}
@@ -4030,6 +4118,9 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
ret = 0;
break;
default:
+ pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
+ __func__, be32_to_cpu(cm_id_priv->id.local_id),
+ cm_id_priv->id.state);
ret = -EINVAL;
break;
}
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 6294a7001d33..e66963ca58bd 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -601,7 +601,7 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
int ret;
if (addr->sa_family != AF_IB) {
- ret = rdma_translate_ip(addr, dev_addr, NULL);
+ ret = rdma_translate_ip(addr, dev_addr);
} else {
cma_translate_ib((struct sockaddr_ib *) addr, dev_addr);
ret = 0;
@@ -612,11 +612,14 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
static inline int cma_validate_port(struct ib_device *device, u8 port,
enum ib_gid_type gid_type,
- union ib_gid *gid, int dev_type,
- int bound_if_index)
+ union ib_gid *gid,
+ struct rdma_id_private *id_priv)
{
- int ret = -ENODEV;
+ struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
+ int bound_if_index = dev_addr->bound_dev_if;
+ int dev_type = dev_addr->dev_type;
struct net_device *ndev = NULL;
+ int ret = -ENODEV;
if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port))
return ret;
@@ -624,11 +627,13 @@ static inline int cma_validate_port(struct ib_device *device, u8 port,
if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
return ret;
- if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port))
- ndev = dev_get_by_index(&init_net, bound_if_index);
- else
+ if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) {
+ ndev = dev_get_by_index(dev_addr->net, bound_if_index);
+ if (!ndev)
+ return ret;
+ } else {
gid_type = IB_GID_TYPE_IB;
-
+ }
ret = ib_find_cached_gid_by_port(device, gid, gid_type, port,
ndev, NULL);
@@ -669,8 +674,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv,
rdma_protocol_ib(cma_dev->device, port) ?
IB_GID_TYPE_IB :
listen_id_priv->gid_type, gidp,
- dev_addr->dev_type,
- dev_addr->bound_dev_if);
+ id_priv);
if (!ret) {
id_priv->id.port_num = port;
goto out;
@@ -691,8 +695,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv,
rdma_protocol_ib(cma_dev->device, port) ?
IB_GID_TYPE_IB :
cma_dev->default_gid_type[port - 1],
- gidp, dev_addr->dev_type,
- dev_addr->bound_dev_if);
+ gidp, id_priv);
if (!ret) {
id_priv->id.port_num = port;
goto out;
@@ -2036,6 +2039,33 @@ __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr)
}
EXPORT_SYMBOL(rdma_get_service_id);
+void rdma_read_gids(struct rdma_cm_id *cm_id, union ib_gid *sgid,
+ union ib_gid *dgid)
+{
+ struct rdma_addr *addr = &cm_id->route.addr;
+
+ if (!cm_id->device) {
+ if (sgid)
+ memset(sgid, 0, sizeof(*sgid));
+ if (dgid)
+ memset(dgid, 0, sizeof(*dgid));
+ return;
+ }
+
+ if (rdma_protocol_roce(cm_id->device, cm_id->port_num)) {
+ if (sgid)
+ rdma_ip2gid((struct sockaddr *)&addr->src_addr, sgid);
+ if (dgid)
+ rdma_ip2gid((struct sockaddr *)&addr->dst_addr, dgid);
+ } else {
+ if (sgid)
+ rdma_addr_get_sgid(&addr->dev_addr, sgid);
+ if (dgid)
+ rdma_addr_get_dgid(&addr->dev_addr, dgid);
+ }
+}
+EXPORT_SYMBOL(rdma_read_gids);
+
static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
{
struct rdma_id_private *id_priv = iw_id->context;
@@ -2132,7 +2162,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
conn_id->state = RDMA_CM_CONNECT;
- ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr, NULL);
+ ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr);
if (ret) {
mutex_unlock(&conn_id->handler_mutex);
rdma_destroy_id(new_cm_id);
@@ -2414,6 +2444,26 @@ out:
kfree(work);
}
+static void cma_init_resolve_route_work(struct cma_work *work,
+ struct rdma_id_private *id_priv)
+{
+ work->id = id_priv;
+ INIT_WORK(&work->work, cma_work_handler);
+ work->old_state = RDMA_CM_ROUTE_QUERY;
+ work->new_state = RDMA_CM_ROUTE_RESOLVED;
+ work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
+}
+
+static void cma_init_resolve_addr_work(struct cma_work *work,
+ struct rdma_id_private *id_priv)
+{
+ work->id = id_priv;
+ INIT_WORK(&work->work, cma_work_handler);
+ work->old_state = RDMA_CM_ADDR_QUERY;
+ work->new_state = RDMA_CM_ADDR_RESOLVED;
+ work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
+}
+
static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
{
struct rdma_route *route = &id_priv->id.route;
@@ -2424,11 +2474,7 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
if (!work)
return -ENOMEM;
- work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler);
- work->old_state = RDMA_CM_ROUTE_QUERY;
- work->new_state = RDMA_CM_ROUTE_RESOLVED;
- work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
+ cma_init_resolve_route_work(work, id_priv);
route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
if (!route->path_rec) {
@@ -2449,10 +2495,63 @@ err1:
return ret;
}
-int rdma_set_ib_paths(struct rdma_cm_id *id,
- struct sa_path_rec *path_rec, int num_paths)
+static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type,
+ unsigned long supported_gids,
+ enum ib_gid_type default_gid)
+{
+ if ((network_type == RDMA_NETWORK_IPV4 ||
+ network_type == RDMA_NETWORK_IPV6) &&
+ test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids))
+ return IB_GID_TYPE_ROCE_UDP_ENCAP;
+
+ return default_gid;
+}
+
+/*
+ * cma_iboe_set_path_rec_l2_fields() is helper function which sets
+ * path record type based on GID type.
+ * It also sets up other L2 fields which includes destination mac address
+ * netdev ifindex, of the path record.
+ * It returns the netdev of the bound interface for this path record entry.
+ */
+static struct net_device *
+cma_iboe_set_path_rec_l2_fields(struct rdma_id_private *id_priv)
+{
+ struct rdma_route *route = &id_priv->id.route;
+ enum ib_gid_type gid_type = IB_GID_TYPE_ROCE;
+ struct rdma_addr *addr = &route->addr;
+ unsigned long supported_gids;
+ struct net_device *ndev;
+
+ if (!addr->dev_addr.bound_dev_if)
+ return NULL;
+
+ ndev = dev_get_by_index(addr->dev_addr.net,
+ addr->dev_addr.bound_dev_if);
+ if (!ndev)
+ return NULL;
+
+ supported_gids = roce_gid_type_mask_support(id_priv->id.device,
+ id_priv->id.port_num);
+ gid_type = cma_route_gid_type(addr->dev_addr.network,
+ supported_gids,
+ id_priv->gid_type);
+ /* Use the hint from IP Stack to select GID Type */
+ if (gid_type < ib_network_to_gid_type(addr->dev_addr.network))
+ gid_type = ib_network_to_gid_type(addr->dev_addr.network);
+ route->path_rec->rec_type = sa_conv_gid_to_pathrec_type(gid_type);
+
+ sa_path_set_ndev(route->path_rec, addr->dev_addr.net);
+ sa_path_set_ifindex(route->path_rec, ndev->ifindex);
+ sa_path_set_dmac(route->path_rec, addr->dev_addr.dst_dev_addr);
+ return ndev;
+}
+
+int rdma_set_ib_path(struct rdma_cm_id *id,
+ struct sa_path_rec *path_rec)
{
struct rdma_id_private *id_priv;
+ struct net_device *ndev;
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
@@ -2460,20 +2559,33 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,
RDMA_CM_ROUTE_RESOLVED))
return -EINVAL;
- id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths,
+ id->route.path_rec = kmemdup(path_rec, sizeof(*path_rec),
GFP_KERNEL);
if (!id->route.path_rec) {
ret = -ENOMEM;
goto err;
}
- id->route.num_paths = num_paths;
+ if (rdma_protocol_roce(id->device, id->port_num)) {
+ ndev = cma_iboe_set_path_rec_l2_fields(id_priv);
+ if (!ndev) {
+ ret = -ENODEV;
+ goto err_free;
+ }
+ dev_put(ndev);
+ }
+
+ id->route.num_paths = 1;
return 0;
+
+err_free:
+ kfree(id->route.path_rec);
+ id->route.path_rec = NULL;
err:
cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED);
return ret;
}
-EXPORT_SYMBOL(rdma_set_ib_paths);
+EXPORT_SYMBOL(rdma_set_ib_path);
static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
{
@@ -2483,11 +2595,7 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
if (!work)
return -ENOMEM;
- work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler);
- work->old_state = RDMA_CM_ROUTE_QUERY;
- work->new_state = RDMA_CM_ROUTE_RESOLVED;
- work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
+ cma_init_resolve_route_work(work, id_priv);
queue_work(cma_wq, &work->work);
return 0;
}
@@ -2510,26 +2618,14 @@ static int iboe_tos_to_sl(struct net_device *ndev, int tos)
return 0;
}
-static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type,
- unsigned long supported_gids,
- enum ib_gid_type default_gid)
-{
- if ((network_type == RDMA_NETWORK_IPV4 ||
- network_type == RDMA_NETWORK_IPV6) &&
- test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids))
- return IB_GID_TYPE_ROCE_UDP_ENCAP;
-
- return default_gid;
-}
-
static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
{
struct rdma_route *route = &id_priv->id.route;
struct rdma_addr *addr = &route->addr;
struct cma_work *work;
int ret;
- struct net_device *ndev = NULL;
- enum ib_gid_type gid_type = IB_GID_TYPE_IB;
+ struct net_device *ndev;
+
u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num -
rdma_start_port(id_priv->cma_dev->device)];
u8 tos = id_priv->tos_set ? id_priv->tos : default_roce_tos;
@@ -2539,9 +2635,6 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
if (!work)
return -ENOMEM;
- work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler);
-
route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL);
if (!route->path_rec) {
ret = -ENOMEM;
@@ -2550,42 +2643,17 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
route->num_paths = 1;
- if (addr->dev_addr.bound_dev_if) {
- unsigned long supported_gids;
-
- ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if);
- if (!ndev) {
- ret = -ENODEV;
- goto err2;
- }
-
- supported_gids = roce_gid_type_mask_support(id_priv->id.device,
- id_priv->id.port_num);
- gid_type = cma_route_gid_type(addr->dev_addr.network,
- supported_gids,
- id_priv->gid_type);
- route->path_rec->rec_type =
- sa_conv_gid_to_pathrec_type(gid_type);
- sa_path_set_ndev(route->path_rec, &init_net);
- sa_path_set_ifindex(route->path_rec, ndev->ifindex);
- }
+ ndev = cma_iboe_set_path_rec_l2_fields(id_priv);
if (!ndev) {
ret = -ENODEV;
goto err2;
}
- sa_path_set_dmac(route->path_rec, addr->dev_addr.dst_dev_addr);
-
rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
&route->path_rec->sgid);
rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr,
&route->path_rec->dgid);
- /* Use the hint from IP Stack to select GID Type */
- if (gid_type < ib_network_to_gid_type(addr->dev_addr.network))
- gid_type = ib_network_to_gid_type(addr->dev_addr.network);
- route->path_rec->rec_type = sa_conv_gid_to_pathrec_type(gid_type);
-
if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB)
/* TODO: get the hoplimit from the inet/inet6 device */
route->path_rec->hop_limit = addr->dev_addr.hoplimit;
@@ -2607,11 +2675,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
goto err2;
}
- work->old_state = RDMA_CM_ROUTE_QUERY;
- work->new_state = RDMA_CM_ROUTE_RESOLVED;
- work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
- work->event.status = 0;
-
+ cma_init_resolve_route_work(work, id_priv);
queue_work(cma_wq, &work->work);
return 0;
@@ -2791,11 +2855,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
- work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler);
- work->old_state = RDMA_CM_ADDR_QUERY;
- work->new_state = RDMA_CM_ADDR_RESOLVED;
- work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
+ cma_init_resolve_addr_work(work, id_priv);
queue_work(cma_wq, &work->work);
return 0;
err:
@@ -2821,11 +2881,7 @@ static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *)
&(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr));
- work->id = id_priv;
- INIT_WORK(&work->work, cma_work_handler);
- work->old_state = RDMA_CM_ADDR_QUERY;
- work->new_state = RDMA_CM_ADDR_RESOLVED;
- work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
+ cma_init_resolve_addr_work(work, id_priv);
queue_work(cma_wq, &work->work);
return 0;
err:
@@ -3404,9 +3460,10 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
event.status = ret;
break;
}
- ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num,
- id_priv->id.route.path_rec,
- &event.param.ud.ah_attr);
+ ib_init_ah_attr_from_path(id_priv->id.device,
+ id_priv->id.port_num,
+ id_priv->id.route.path_rec,
+ &event.param.ud.ah_attr);
event.param.ud.qp_num = rep->qpn;
event.param.ud.qkey = rep->qkey;
event.event = RDMA_CM_EVENT_ESTABLISHED;
@@ -3873,7 +3930,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
struct rdma_dev_addr *dev_addr =
&id_priv->id.route.addr.dev_addr;
struct net_device *ndev =
- dev_get_by_index(&init_net, dev_addr->bound_dev_if);
+ dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
enum ib_gid_type gid_type =
id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
rdma_start_port(id_priv->cma_dev->device)];
@@ -4010,8 +4067,10 @@ static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
} else if (addr->sa_family == AF_INET6) {
memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
} else {
- mgid->raw[0] = (gid_type == IB_GID_TYPE_IB) ? 0xff : 0;
- mgid->raw[1] = (gid_type == IB_GID_TYPE_IB) ? 0x0e : 0;
+ mgid->raw[0] =
+ (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0xff;
+ mgid->raw[1] =
+ (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0x0e;
mgid->raw[2] = 0;
mgid->raw[3] = 0;
mgid->raw[4] = 0;
@@ -4061,7 +4120,7 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
if (dev_addr->bound_dev_if)
- ndev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
+ ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
if (!ndev) {
err = -ENODEV;
goto out2;
@@ -4179,7 +4238,7 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
struct net_device *ndev = NULL;
if (dev_addr->bound_dev_if)
- ndev = dev_get_by_index(&init_net,
+ ndev = dev_get_by_index(dev_addr->net,
dev_addr->bound_dev_if);
if (ndev) {
cma_igmp_send(ndev,
@@ -4235,7 +4294,7 @@ static int cma_netdev_callback(struct notifier_block *self, unsigned long event,
if (event != NETDEV_BONDING_FAILOVER)
return NOTIFY_DONE;
- if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING))
+ if (!netif_is_bond_master(ndev))
return NOTIFY_DONE;
mutex_lock(&lock);
@@ -4432,7 +4491,7 @@ static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
RDMA_NL_RDMA_CM_ATTR_SRC_ADDR))
goto out;
if (ibnl_put_attr(skb, nlh,
- rdma_addr_size(cma_src_addr(id_priv)),
+ rdma_addr_size(cma_dst_addr(id_priv)),
cma_dst_addr(id_priv),
RDMA_NL_RDMA_CM_ATTR_DST_ADDR))
goto out;
@@ -4444,6 +4503,7 @@ static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
id_stats->qp_type = id->qp_type;
i_id++;
+ nlmsg_end(skb, nlh);
}
cb->args[1] = 0;
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index 31dfee0c8295..eee38b40be99 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -295,7 +295,7 @@ static struct config_group *make_cma_dev(struct config_group *group,
goto fail;
}
- strncpy(cma_dev_group->name, name, sizeof(cma_dev_group->name));
+ strlcpy(cma_dev_group->name, name, sizeof(cma_dev_group->name));
config_group_init_type_name(&cma_dev_group->ports_group, "ports",
&cma_ports_group_type);
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 66f0268f37a6..c4560d84dfae 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -40,8 +40,12 @@
#include <rdma/ib_verbs.h>
#include <rdma/opa_addr.h>
#include <rdma/ib_mad.h>
+#include <rdma/restrack.h>
#include "mad_priv.h"
+/* Total number of ports combined across all struct ib_devices's */
+#define RDMA_MAX_PORTS 1024
+
struct pkey_index_qp_list {
struct list_head pkey_index_list;
u16 pkey_index;
@@ -137,7 +141,6 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
int roce_gid_mgmt_init(void);
void roce_gid_mgmt_cleanup(void);
-int roce_rescan_device(struct ib_device *ib_dev);
unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port);
int ib_cache_setup_one(struct ib_device *device);
@@ -191,13 +194,6 @@ void ib_sa_cleanup(void);
int rdma_nl_init(void);
void rdma_nl_exit(void);
-/**
- * Check if there are any listeners to the netlink group
- * @group: the netlink group ID
- * Returns 0 on success or a negative for no listeners.
- */
-int ibnl_chk_listeners(unsigned int group);
-
int ib_nl_handle_resolve_resp(struct sk_buff *skb,
struct nlmsghdr *nlh,
struct netlink_ext_ack *extack);
@@ -213,11 +209,6 @@ int ib_get_cached_subnet_prefix(struct ib_device *device,
u64 *sn_pfx);
#ifdef CONFIG_SECURITY_INFINIBAND
-int ib_security_pkey_access(struct ib_device *dev,
- u8 port_num,
- u16 pkey_index,
- void *sec);
-
void ib_security_destroy_port_pkey_list(struct ib_device *device);
void ib_security_cache_change(struct ib_device *device,
@@ -240,14 +231,6 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent);
int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index);
#else
-static inline int ib_security_pkey_access(struct ib_device *dev,
- u8 port_num,
- u16 pkey_index,
- void *sec)
-{
- return 0;
-}
-
static inline void ib_security_destroy_port_pkey_list(struct ib_device *device)
{
}
@@ -318,4 +301,31 @@ struct ib_device *ib_device_get_by_index(u32 ifindex);
/* RDMA device netlink */
void nldev_init(void);
void nldev_exit(void);
+
+static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
+ struct ib_pd *pd,
+ struct ib_qp_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct ib_qp *qp;
+
+ qp = dev->create_qp(pd, attr, udata);
+ if (IS_ERR(qp))
+ return qp;
+
+ qp->device = dev;
+ qp->pd = pd;
+ /*
+ * We don't track XRC QPs for now, because they don't have PD
+ * and more importantly they are created internaly by driver,
+ * see mlx5 create_dev_resources() as an example.
+ */
+ if (attr->qp_type < IB_QPT_XRC_INI) {
+ qp->res.type = RDMA_RESTRACK_QP;
+ rdma_restrack_add(&qp->res);
+ } else
+ qp->res.valid = false;
+
+ return qp;
+}
#endif /* _CORE_PRIV_H */
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index f2ae75fa3128..bc79ca8215d7 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -25,9 +25,10 @@
#define IB_POLL_FLAGS \
(IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)
-static int __ib_process_cq(struct ib_cq *cq, int budget)
+static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *poll_wc)
{
int i, n, completed = 0;
+ struct ib_wc *wcs = poll_wc ? : cq->wc;
/*
* budget might be (-1) if the caller does not
@@ -35,9 +36,9 @@ static int __ib_process_cq(struct ib_cq *cq, int budget)
* minimum here.
*/
while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH,
- budget - completed), cq->wc)) > 0) {
+ budget - completed), wcs)) > 0) {
for (i = 0; i < n; i++) {
- struct ib_wc *wc = &cq->wc[i];
+ struct ib_wc *wc = &wcs[i];
if (wc->wr_cqe)
wc->wr_cqe->done(cq, wc);
@@ -60,18 +61,20 @@ static int __ib_process_cq(struct ib_cq *cq, int budget)
* @cq: CQ to process
* @budget: number of CQEs to poll for
*
- * This function is used to process all outstanding CQ entries on a
- * %IB_POLL_DIRECT CQ. It does not offload CQ processing to a different
- * context and does not ask for completion interrupts from the HCA.
+ * This function is used to process all outstanding CQ entries.
+ * It does not offload CQ processing to a different context and does
+ * not ask for completion interrupts from the HCA.
+ * Using direct processing on CQ with non IB_POLL_DIRECT type may trigger
+ * concurrent processing.
*
* Note: do not pass -1 as %budget unless it is guaranteed that the number
* of completions that will be processed is small.
*/
int ib_process_cq_direct(struct ib_cq *cq, int budget)
{
- WARN_ON_ONCE(cq->poll_ctx != IB_POLL_DIRECT);
+ struct ib_wc wcs[IB_POLL_BATCH];
- return __ib_process_cq(cq, budget);
+ return __ib_process_cq(cq, budget, wcs);
}
EXPORT_SYMBOL(ib_process_cq_direct);
@@ -85,7 +88,7 @@ static int ib_poll_handler(struct irq_poll *iop, int budget)
struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
int completed;
- completed = __ib_process_cq(cq, budget);
+ completed = __ib_process_cq(cq, budget, NULL);
if (completed < budget) {
irq_poll_complete(&cq->iop);
if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
@@ -105,7 +108,7 @@ static void ib_cq_poll_work(struct work_struct *work)
struct ib_cq *cq = container_of(work, struct ib_cq, work);
int completed;
- completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE);
+ completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, NULL);
if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
queue_work(ib_comp_wq, &cq->work);
@@ -117,20 +120,22 @@ static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
}
/**
- * ib_alloc_cq - allocate a completion queue
+ * __ib_alloc_cq - allocate a completion queue
* @dev: device to allocate the CQ for
* @private: driver private data, accessible from cq->cq_context
* @nr_cqe: number of CQEs to allocate
* @comp_vector: HCA completion vectors for this CQ
* @poll_ctx: context to poll the CQ from.
+ * @caller: module owner name.
*
* This is the proper interface to allocate a CQ for in-kernel users. A
* CQ allocated with this interface will automatically be polled from the
* specified context. The ULP must use wr->wr_cqe instead of wr->wr_id
* to use this CQ abstraction.
*/
-struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
- int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx)
+struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
+ int nr_cqe, int comp_vector,
+ enum ib_poll_context poll_ctx, const char *caller)
{
struct ib_cq_init_attr cq_attr = {
.cqe = nr_cqe,
@@ -154,6 +159,10 @@ struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
if (!cq->wc)
goto out_destroy_cq;
+ cq->res.type = RDMA_RESTRACK_CQ;
+ cq->res.kern_name = caller;
+ rdma_restrack_add(&cq->res);
+
switch (cq->poll_ctx) {
case IB_POLL_DIRECT:
cq->comp_handler = ib_cq_completion_direct;
@@ -178,11 +187,12 @@ struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
out_free_wc:
kfree(cq->wc);
+ rdma_restrack_del(&cq->res);
out_destroy_cq:
cq->device->destroy_cq(cq);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL(ib_alloc_cq);
+EXPORT_SYMBOL(__ib_alloc_cq);
/**
* ib_free_cq - free a completion queue
@@ -209,6 +219,7 @@ void ib_free_cq(struct ib_cq *cq)
}
kfree(cq->wc);
+ rdma_restrack_del(&cq->res);
ret = cq->device->destroy_cq(cq);
WARN_ON_ONCE(ret);
}
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 465520627e4b..e8010e73a1cf 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -263,6 +263,8 @@ struct ib_device *ib_alloc_device(size_t size)
if (!device)
return NULL;
+ rdma_restrack_init(&device->res);
+
device->dev.class = &ib_class;
device_initialize(&device->dev);
@@ -288,7 +290,7 @@ void ib_dealloc_device(struct ib_device *device)
{
WARN_ON(device->reg_state != IB_DEV_UNREGISTERED &&
device->reg_state != IB_DEV_UNINITIALIZED);
- kobject_put(&device->dev.kobj);
+ put_device(&device->dev);
}
EXPORT_SYMBOL(ib_dealloc_device);
@@ -462,7 +464,6 @@ int ib_register_device(struct ib_device *device,
struct ib_udata uhw = {.outlen = 0, .inlen = 0};
struct device *parent = device->dev.parent;
- WARN_ON_ONCE(!parent);
WARN_ON_ONCE(device->dma_device);
if (device->dev.dma_ops) {
/*
@@ -471,16 +472,25 @@ int ib_register_device(struct ib_device *device,
* into device->dev.
*/
device->dma_device = &device->dev;
- if (!device->dev.dma_mask)
- device->dev.dma_mask = parent->dma_mask;
- if (!device->dev.coherent_dma_mask)
- device->dev.coherent_dma_mask =
- parent->coherent_dma_mask;
+ if (!device->dev.dma_mask) {
+ if (parent)
+ device->dev.dma_mask = parent->dma_mask;
+ else
+ WARN_ON_ONCE(true);
+ }
+ if (!device->dev.coherent_dma_mask) {
+ if (parent)
+ device->dev.coherent_dma_mask =
+ parent->coherent_dma_mask;
+ else
+ WARN_ON_ONCE(true);
+ }
} else {
/*
* The caller did not provide custom DMA operations. Use the
* DMA mapping operations of the parent device.
*/
+ WARN_ON_ONCE(!parent);
device->dma_device = parent;
}
@@ -588,6 +598,8 @@ void ib_unregister_device(struct ib_device *device)
}
up_read(&lists_rwsem);
+ rdma_restrack_clean(&device->res);
+
ib_device_unregister_rdmacg(device);
ib_device_unregister_sysfs(device);
@@ -1033,32 +1045,22 @@ EXPORT_SYMBOL(ib_modify_port);
/**
* ib_find_gid - Returns the port number and GID table index where
- * a specified GID value occurs.
+ * a specified GID value occurs. Its searches only for IB link layer.
* @device: The device to query.
* @gid: The GID value to search for.
- * @gid_type: Type of GID.
* @ndev: The ndev related to the GID to search for.
* @port_num: The port number of the device where the GID value was found.
* @index: The index into the GID table where the GID was found. This
* parameter may be NULL.
*/
int ib_find_gid(struct ib_device *device, union ib_gid *gid,
- enum ib_gid_type gid_type, struct net_device *ndev,
- u8 *port_num, u16 *index)
+ struct net_device *ndev, u8 *port_num, u16 *index)
{
union ib_gid tmp_gid;
int ret, port, i;
for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) {
- if (rdma_cap_roce_gid_table(device, port)) {
- if (!ib_find_cached_gid_by_port(device, gid, gid_type, port,
- ndev, index)) {
- *port_num = port;
- return 0;
- }
- }
-
- if (gid_type != IB_GID_TYPE_IB)
+ if (rdma_cap_roce_gid_table(device, port))
continue;
for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) {
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 84d2615b5d4b..a0a9ed719031 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -388,13 +388,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
EXPORT_SYMBOL(ib_flush_fmr_pool);
/**
- * ib_fmr_pool_map_phys -
- * @pool:FMR pool to allocate FMR from
- * @page_list:List of pages to map
- * @list_len:Number of pages in @page_list
- * @io_virtual_address:I/O virtual address for new FMR
- *
- * Map an FMR from an FMR pool.
+ * ib_fmr_pool_map_phys - Map an FMR from an FMR pool.
+ * @pool_handle: FMR pool to allocate FMR from
+ * @page_list: List of pages to map
+ * @list_len: Number of pages in @page_list
+ * @io_virtual_address: I/O virtual address for new FMR
*/
struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
u64 *page_list,
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index 3c4faadb8cdd..81528f64061a 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -654,6 +654,7 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid)
}
skb_num++;
spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
+ ret = -EINVAL;
for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) {
hlist_for_each_entry(map_info, &iwpm_hash_bucket[i],
hlist_node) {
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index cb91245e9163..c50596f7f98a 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -49,7 +49,6 @@
#include "smi.h"
#include "opa_smi.h"
#include "agent.h"
-#include "core_priv.h"
static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index 1fb72c356e36..3ccaae18ad75 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -41,8 +41,6 @@
#include <linux/module.h>
#include "core_priv.h"
-#include "core_priv.h"
-
static DEFINE_MUTEX(rdma_nl_mutex);
static struct sock *nls;
static struct {
@@ -83,15 +81,13 @@ static bool is_nl_valid(unsigned int type, unsigned int op)
if (!is_nl_msg_valid(type, op))
return false;
- cb_table = rdma_nl_types[type].cb_table;
-#ifdef CONFIG_MODULES
- if (!cb_table) {
+ if (!rdma_nl_types[type].cb_table) {
mutex_unlock(&rdma_nl_mutex);
request_module("rdma-netlink-subsys-%d", type);
mutex_lock(&rdma_nl_mutex);
- cb_table = rdma_nl_types[type].cb_table;
}
-#endif
+
+ cb_table = rdma_nl_types[type].cb_table;
if (!cb_table || (!cb_table[op].dump && !cb_table[op].doit))
return false;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 0dcd1aa6f683..fa8655e3b3ed 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -31,6 +31,8 @@
*/
#include <linux/module.h>
+#include <linux/pid.h>
+#include <linux/pid_namespace.h>
#include <net/netlink.h>
#include <rdma/rdma_netlink.h>
@@ -52,16 +54,42 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
[RDMA_NLDEV_ATTR_PORT_STATE] = { .type = NLA_U8 },
[RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 },
[RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_RES_SUMMARY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME] = { .type = NLA_NUL_STRING,
+ .len = 16 },
+ [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR] = { .type = NLA_U64 },
+ [RDMA_NLDEV_ATTR_RES_QP] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_QP_ENTRY] = { .type = NLA_NESTED },
+ [RDMA_NLDEV_ATTR_RES_LQPN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_RQPN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_RQ_PSN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_SQ_PSN] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_RES_STATE] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_RES_PID] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_RES_KERN_NAME] = { .type = NLA_NUL_STRING,
+ .len = TASK_COMM_LEN },
};
-static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
+static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
{
- char fw[IB_FW_VERSION_NAME_MAX];
-
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
return -EMSGSIZE;
if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, device->name))
return -EMSGSIZE;
+
+ return 0;
+}
+
+static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
+{
+ char fw[IB_FW_VERSION_NAME_MAX];
+
+ if (fill_nldev_handle(msg, device))
+ return -EMSGSIZE;
+
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device)))
return -EMSGSIZE;
@@ -92,10 +120,9 @@ static int fill_port_info(struct sk_buff *msg,
struct ib_port_attr attr;
int ret;
- if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
- return -EMSGSIZE;
- if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, device->name))
+ if (fill_nldev_handle(msg, device))
return -EMSGSIZE;
+
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
return -EMSGSIZE;
@@ -126,6 +153,137 @@ static int fill_port_info(struct sk_buff *msg,
return 0;
}
+static int fill_res_info_entry(struct sk_buff *msg,
+ const char *name, u64 curr)
+{
+ struct nlattr *entry_attr;
+
+ entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
+ if (!entry_attr)
+ return -EMSGSIZE;
+
+ if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name))
+ goto err;
+ if (nla_put_u64_64bit(msg,
+ RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr, 0))
+ goto err;
+
+ nla_nest_end(msg, entry_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, entry_attr);
+ return -EMSGSIZE;
+}
+
+static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
+{
+ static const char * const names[RDMA_RESTRACK_MAX] = {
+ [RDMA_RESTRACK_PD] = "pd",
+ [RDMA_RESTRACK_CQ] = "cq",
+ [RDMA_RESTRACK_QP] = "qp",
+ };
+
+ struct rdma_restrack_root *res = &device->res;
+ struct nlattr *table_attr;
+ int ret, i, curr;
+
+ if (fill_nldev_handle(msg, device))
+ return -EMSGSIZE;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
+ if (!table_attr)
+ return -EMSGSIZE;
+
+ for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
+ if (!names[i])
+ continue;
+ curr = rdma_restrack_count(res, i, task_active_pid_ns(current));
+ ret = fill_res_info_entry(msg, names[i], curr);
+ if (ret)
+ goto err;
+ }
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, table_attr);
+ return ret;
+}
+
+static int fill_res_qp_entry(struct sk_buff *msg,
+ struct ib_qp *qp, uint32_t port)
+{
+ struct rdma_restrack_entry *res = &qp->res;
+ struct ib_qp_init_attr qp_init_attr;
+ struct nlattr *entry_attr;
+ struct ib_qp_attr qp_attr;
+ int ret;
+
+ ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr);
+ if (ret)
+ return ret;
+
+ if (port && port != qp_attr.port_num)
+ return 0;
+
+ entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY);
+ if (!entry_attr)
+ goto out;
+
+ /* In create_qp() port is not set yet */
+ if (qp_attr.port_num &&
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp_attr.port_num))
+ goto err;
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num))
+ goto err;
+ if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) {
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN,
+ qp_attr.dest_qp_num))
+ goto err;
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN,
+ qp_attr.rq_psn))
+ goto err;
+ }
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn))
+ goto err;
+
+ if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC ||
+ qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) {
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE,
+ qp_attr.path_mig_state))
+ goto err;
+ }
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type))
+ goto err;
+ if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state))
+ goto err;
+
+ /*
+ * Existence of task means that it is user QP and netlink
+ * user is invited to go and read /proc/PID/comm to get name
+ * of the task file and res->task_com should be NULL.
+ */
+ if (rdma_is_kernel_res(res)) {
+ if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME, res->kern_name))
+ goto err;
+ } else {
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID, task_pid_vnr(res->task)))
+ goto err;
+ }
+
+ nla_nest_end(msg, entry_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, entry_attr);
+out:
+ return -EMSGSIZE;
+}
+
static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -321,6 +479,213 @@ out:
return skb->len;
}
+static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct ib_device *device;
+ struct sk_buff *msg;
+ u32 index;
+ int ret;
+
+ ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
+ if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+ return -EINVAL;
+
+ index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = ib_device_get_by_index(index);
+ if (!device)
+ return -EINVAL;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ goto err;
+
+ nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
+ 0, 0);
+
+ ret = fill_res_info(msg, device);
+ if (ret)
+ goto err_free;
+
+ nlmsg_end(msg, nlh);
+ put_device(&device->dev);
+ return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
+
+err_free:
+ nlmsg_free(msg);
+err:
+ put_device(&device->dev);
+ return ret;
+}
+
+static int _nldev_res_get_dumpit(struct ib_device *device,
+ struct sk_buff *skb,
+ struct netlink_callback *cb,
+ unsigned int idx)
+{
+ int start = cb->args[0];
+ struct nlmsghdr *nlh;
+
+ if (idx < start)
+ return 0;
+
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
+ 0, NLM_F_MULTI);
+
+ if (fill_res_info(skb, device)) {
+ nlmsg_cancel(skb, nlh);
+ goto out;
+ }
+
+ nlmsg_end(skb, nlh);
+
+ idx++;
+
+out:
+ cb->args[0] = idx;
+ return skb->len;
+}
+
+static int nldev_res_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb);
+}
+
+static int nldev_res_get_qp_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct rdma_restrack_entry *res;
+ int err, ret = 0, idx = 0;
+ struct nlattr *table_attr;
+ struct ib_device *device;
+ int start = cb->args[0];
+ struct ib_qp *qp = NULL;
+ struct nlmsghdr *nlh;
+ u32 index, port = 0;
+
+ err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NULL);
+ /*
+ * Right now, we are expecting the device index to get QP information,
+ * but it is possible to extend this code to return all devices in
+ * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX.
+ * if it doesn't exist, we will iterate over all devices.
+ *
+ * But it is not needed for now.
+ */
+ if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+ return -EINVAL;
+
+ index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = ib_device_get_by_index(index);
+ if (!device)
+ return -EINVAL;
+
+ /*
+ * If no PORT_INDEX is supplied, we will return all QPs from that device
+ */
+ if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
+ port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
+ if (!rdma_is_port_valid(device, port)) {
+ ret = -EINVAL;
+ goto err_index;
+ }
+ }
+
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_QP_GET),
+ 0, NLM_F_MULTI);
+
+ if (fill_nldev_handle(skb, device)) {
+ ret = -EMSGSIZE;
+ goto err;
+ }
+
+ table_attr = nla_nest_start(skb, RDMA_NLDEV_ATTR_RES_QP);
+ if (!table_attr) {
+ ret = -EMSGSIZE;
+ goto err;
+ }
+
+ down_read(&device->res.rwsem);
+ hash_for_each_possible(device->res.hash, res, node, RDMA_RESTRACK_QP) {
+ if (idx < start)
+ goto next;
+
+ if ((rdma_is_kernel_res(res) &&
+ task_active_pid_ns(current) != &init_pid_ns) ||
+ (!rdma_is_kernel_res(res) &&
+ task_active_pid_ns(current) != task_active_pid_ns(res->task)))
+ /*
+ * 1. Kernel QPs should be visible in init namspace only
+ * 2. Present only QPs visible in the current namespace
+ */
+ goto next;
+
+ if (!rdma_restrack_get(res))
+ /*
+ * Resource is under release now, but we are not
+ * relesing lock now, so it will be released in
+ * our next pass, once we will get ->next pointer.
+ */
+ goto next;
+
+ qp = container_of(res, struct ib_qp, res);
+
+ up_read(&device->res.rwsem);
+ ret = fill_res_qp_entry(skb, qp, port);
+ down_read(&device->res.rwsem);
+ /*
+ * Return resource back, but it won't be released till
+ * the &device->res.rwsem will be released for write.
+ */
+ rdma_restrack_put(res);
+
+ if (ret == -EMSGSIZE)
+ /*
+ * There is a chance to optimize here.
+ * It can be done by using list_prepare_entry
+ * and list_for_each_entry_continue afterwards.
+ */
+ break;
+ if (ret)
+ goto res_err;
+next: idx++;
+ }
+ up_read(&device->res.rwsem);
+
+ nla_nest_end(skb, table_attr);
+ nlmsg_end(skb, nlh);
+ cb->args[0] = idx;
+
+ /*
+ * No more QPs to fill, cancel the message and
+ * return 0 to mark end of dumpit.
+ */
+ if (!qp)
+ goto err;
+
+ put_device(&device->dev);
+ return skb->len;
+
+res_err:
+ nla_nest_cancel(skb, table_attr);
+ up_read(&device->res.rwsem);
+
+err:
+ nlmsg_cancel(skb, nlh);
+
+err_index:
+ put_device(&device->dev);
+ return ret;
+}
+
static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
[RDMA_NLDEV_CMD_GET] = {
.doit = nldev_get_doit,
@@ -330,6 +695,23 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
.doit = nldev_port_get_doit,
.dump = nldev_port_get_dumpit,
},
+ [RDMA_NLDEV_CMD_RES_GET] = {
+ .doit = nldev_res_get_doit,
+ .dump = nldev_res_get_dumpit,
+ },
+ [RDMA_NLDEV_CMD_RES_QP_GET] = {
+ .dump = nldev_res_get_qp_dumpit,
+ /*
+ * .doit is not implemented yet for two reasons:
+ * 1. It is not needed yet.
+ * 2. There is a need to provide identifier, while it is easy
+ * for the QPs (device index + port index + LQPN), it is not
+ * the case for the rest of resources (PD and CQ). Because it
+ * is better to provide similar interface for all resources,
+ * let's wait till we will have other resources implemented
+ * too.
+ */
+ },
};
void __init nldev_init(void)
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
new file mode 100644
index 000000000000..857637bf46da
--- /dev/null
+++ b/drivers/infiniband/core/restrack.c
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
+ */
+
+#include <rdma/ib_verbs.h>
+#include <rdma/restrack.h>
+#include <linux/mutex.h>
+#include <linux/sched/task.h>
+#include <linux/uaccess.h>
+#include <linux/pid_namespace.h>
+
+void rdma_restrack_init(struct rdma_restrack_root *res)
+{
+ init_rwsem(&res->rwsem);
+}
+
+void rdma_restrack_clean(struct rdma_restrack_root *res)
+{
+ WARN_ON_ONCE(!hash_empty(res->hash));
+}
+
+int rdma_restrack_count(struct rdma_restrack_root *res,
+ enum rdma_restrack_type type,
+ struct pid_namespace *ns)
+{
+ struct rdma_restrack_entry *e;
+ u32 cnt = 0;
+
+ down_read(&res->rwsem);
+ hash_for_each_possible(res->hash, e, node, type) {
+ if (ns == &init_pid_ns ||
+ (!rdma_is_kernel_res(e) &&
+ ns == task_active_pid_ns(e->task)))
+ cnt++;
+ }
+ up_read(&res->rwsem);
+ return cnt;
+}
+EXPORT_SYMBOL(rdma_restrack_count);
+
+static void set_kern_name(struct rdma_restrack_entry *res)
+{
+ enum rdma_restrack_type type = res->type;
+ struct ib_qp *qp;
+
+ if (type != RDMA_RESTRACK_QP)
+ /* PD and CQ types already have this name embedded in */
+ return;
+
+ qp = container_of(res, struct ib_qp, res);
+ if (!qp->pd) {
+ WARN_ONCE(true, "XRC QPs are not supported\n");
+ /* Survive, despite the programmer's error */
+ res->kern_name = " ";
+ return;
+ }
+
+ res->kern_name = qp->pd->res.kern_name;
+}
+
+static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
+{
+ enum rdma_restrack_type type = res->type;
+ struct ib_device *dev;
+ struct ib_xrcd *xrcd;
+ struct ib_pd *pd;
+ struct ib_cq *cq;
+ struct ib_qp *qp;
+
+ switch (type) {
+ case RDMA_RESTRACK_PD:
+ pd = container_of(res, struct ib_pd, res);
+ dev = pd->device;
+ break;
+ case RDMA_RESTRACK_CQ:
+ cq = container_of(res, struct ib_cq, res);
+ dev = cq->device;
+ break;
+ case RDMA_RESTRACK_QP:
+ qp = container_of(res, struct ib_qp, res);
+ dev = qp->device;
+ break;
+ case RDMA_RESTRACK_XRCD:
+ xrcd = container_of(res, struct ib_xrcd, res);
+ dev = xrcd->device;
+ break;
+ default:
+ WARN_ONCE(true, "Wrong resource tracking type %u\n", type);
+ return NULL;
+ }
+
+ return dev;
+}
+
+void rdma_restrack_add(struct rdma_restrack_entry *res)
+{
+ struct ib_device *dev = res_to_dev(res);
+
+ if (!dev)
+ return;
+
+ if (!uaccess_kernel()) {
+ get_task_struct(current);
+ res->task = current;
+ res->kern_name = NULL;
+ } else {
+ set_kern_name(res);
+ res->task = NULL;
+ }
+
+ kref_init(&res->kref);
+ init_completion(&res->comp);
+ res->valid = true;
+
+ down_write(&dev->res.rwsem);
+ hash_add(dev->res.hash, &res->node, res->type);
+ up_write(&dev->res.rwsem);
+}
+EXPORT_SYMBOL(rdma_restrack_add);
+
+int __must_check rdma_restrack_get(struct rdma_restrack_entry *res)
+{
+ return kref_get_unless_zero(&res->kref);
+}
+EXPORT_SYMBOL(rdma_restrack_get);
+
+static void restrack_release(struct kref *kref)
+{
+ struct rdma_restrack_entry *res;
+
+ res = container_of(kref, struct rdma_restrack_entry, kref);
+ complete(&res->comp);
+}
+
+int rdma_restrack_put(struct rdma_restrack_entry *res)
+{
+ return kref_put(&res->kref, restrack_release);
+}
+EXPORT_SYMBOL(rdma_restrack_put);
+
+void rdma_restrack_del(struct rdma_restrack_entry *res)
+{
+ struct ib_device *dev;
+
+ if (!res->valid)
+ return;
+
+ dev = res_to_dev(res);
+ if (!dev)
+ return;
+
+ rdma_restrack_put(res);
+
+ wait_for_completion(&res->comp);
+
+ down_write(&dev->res.rwsem);
+ hash_del(&res->node);
+ res->valid = false;
+ if (res->task)
+ put_task_struct(res->task);
+ up_write(&dev->res.rwsem);
+}
+EXPORT_SYMBOL(rdma_restrack_del);
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index 90e3889b7fbe..5a52ec77940a 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -410,15 +410,18 @@ static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
rtnl_unlock();
}
-/* This function will rescan all of the network devices in the system
- * and add their gids, as needed, to the relevant RoCE devices. */
-int roce_rescan_device(struct ib_device *ib_dev)
+/**
+ * rdma_roce_rescan_device - Rescan all of the network devices in the system
+ * and add their gids, as needed, to the relevant RoCE devices.
+ *
+ * @device: the rdma device
+ */
+void rdma_roce_rescan_device(struct ib_device *ib_dev)
{
ib_enum_roce_netdev(ib_dev, pass_all_filter, NULL,
enum_all_gids_of_dev_cb, NULL);
-
- return 0;
}
+EXPORT_SYMBOL(rdma_roce_rescan_device);
static void callback_for_addr_gid_device_scan(struct ib_device *device,
u8 port,
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index ab5e1024fea9..8cf15d4a8ac4 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1227,9 +1227,9 @@ static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
return src_path_mask;
}
-int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
- struct sa_path_rec *rec,
- struct rdma_ah_attr *ah_attr)
+int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
+ struct sa_path_rec *rec,
+ struct rdma_ah_attr *ah_attr)
{
int ret;
u16 gid_index;
@@ -1341,10 +1341,11 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
return 0;
}
-EXPORT_SYMBOL(ib_init_ah_from_path);
+EXPORT_SYMBOL(ib_init_ah_attr_from_path);
static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
{
+ struct rdma_ah_attr ah_attr;
unsigned long flags;
spin_lock_irqsave(&query->port->ah_lock, flags);
@@ -1356,6 +1357,15 @@ static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
query->sm_ah = query->port->sm_ah;
spin_unlock_irqrestore(&query->port->ah_lock, flags);
+ /*
+ * Always check if sm_ah has valid dlid assigned,
+ * before querying for class port info
+ */
+ if ((rdma_query_ah(query->sm_ah->ah, &ah_attr) < 0) ||
+ !rdma_is_valid_unicast_lid(&ah_attr)) {
+ kref_put(&query->sm_ah->ref, free_sm_ah);
+ return -EAGAIN;
+ }
query->mad_buf = ib_create_send_mad(query->port->agent, 1,
query->sm_ah->pkey_index,
0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index 59b2f96d986a..b61dda6b04fc 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -653,12 +653,11 @@ int ib_security_modify_qp(struct ib_qp *qp,
}
return ret;
}
-EXPORT_SYMBOL(ib_security_modify_qp);
-int ib_security_pkey_access(struct ib_device *dev,
- u8 port_num,
- u16 pkey_index,
- void *sec)
+static int ib_security_pkey_access(struct ib_device *dev,
+ u8 port_num,
+ u16 pkey_index,
+ void *sec)
{
u64 subnet_prefix;
u16 pkey;
@@ -678,7 +677,6 @@ int ib_security_pkey_access(struct ib_device *dev,
return security_ib_pkey_access(sec, subnet_prefix, pkey);
}
-EXPORT_SYMBOL(ib_security_pkey_access);
static int ib_mad_agent_security_change(struct notifier_block *nb,
unsigned long event,
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index e30d86fa1855..8ae1308eecc7 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -1276,7 +1276,6 @@ int ib_device_register_sysfs(struct ib_device *device,
int ret;
int i;
- WARN_ON_ONCE(!device->dev.parent);
ret = dev_set_name(class_dev, "%s", device->name);
if (ret)
return ret;
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index f2a7f62c2834..8ae636bb09e5 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -53,6 +53,8 @@
#include <rdma/ib_user_cm.h>
#include <rdma/ib_marshall.h>
+#include "core_priv.h"
+
MODULE_AUTHOR("Libor Michalek");
MODULE_DESCRIPTION("InfiniBand userspace Connection Manager access");
MODULE_LICENSE("Dual BSD/GPL");
@@ -104,10 +106,13 @@ struct ib_ucm_event {
enum {
IB_UCM_MAJOR = 231,
IB_UCM_BASE_MINOR = 224,
- IB_UCM_MAX_DEVICES = 32
+ IB_UCM_MAX_DEVICES = RDMA_MAX_PORTS,
+ IB_UCM_NUM_FIXED_MINOR = 32,
+ IB_UCM_NUM_DYNAMIC_MINOR = IB_UCM_MAX_DEVICES - IB_UCM_NUM_FIXED_MINOR,
};
#define IB_UCM_BASE_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_BASE_MINOR)
+static dev_t dynamic_ucm_dev;
static void ib_ucm_add_one(struct ib_device *device);
static void ib_ucm_remove_one(struct ib_device *device, void *client_data);
@@ -1130,11 +1135,11 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
return result;
}
-static unsigned int ib_ucm_poll(struct file *filp,
+static __poll_t ib_ucm_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct ib_ucm_file *file = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(filp, &file->poll_wait, wait);
@@ -1199,7 +1204,6 @@ static int ib_ucm_close(struct inode *inode, struct file *filp)
return 0;
}
-static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
static void ib_ucm_release_dev(struct device *dev)
{
struct ib_ucm_device *ucm_dev;
@@ -1210,10 +1214,7 @@ static void ib_ucm_release_dev(struct device *dev)
static void ib_ucm_free_dev(struct ib_ucm_device *ucm_dev)
{
- if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
- clear_bit(ucm_dev->devnum, dev_map);
- else
- clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, overflow_map);
+ clear_bit(ucm_dev->devnum, dev_map);
}
static const struct file_operations ucm_fops = {
@@ -1235,27 +1236,6 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
-static dev_t overflow_maj;
-static int find_overflow_devnum(void)
-{
- int ret;
-
- if (!overflow_maj) {
- ret = alloc_chrdev_region(&overflow_maj, 0, IB_UCM_MAX_DEVICES,
- "infiniband_cm");
- if (ret) {
- pr_err("ucm: couldn't register dynamic device number\n");
- return ret;
- }
- }
-
- ret = find_first_zero_bit(overflow_map, IB_UCM_MAX_DEVICES);
- if (ret >= IB_UCM_MAX_DEVICES)
- return -1;
-
- return ret;
-}
-
static void ib_ucm_add_one(struct ib_device *device)
{
int devnum;
@@ -1274,19 +1254,14 @@ static void ib_ucm_add_one(struct ib_device *device)
ucm_dev->dev.release = ib_ucm_release_dev;
devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES);
- if (devnum >= IB_UCM_MAX_DEVICES) {
- devnum = find_overflow_devnum();
- if (devnum < 0)
- goto err;
-
- ucm_dev->devnum = devnum + IB_UCM_MAX_DEVICES;
- base = devnum + overflow_maj;
- set_bit(devnum, overflow_map);
- } else {
- ucm_dev->devnum = devnum;
- base = devnum + IB_UCM_BASE_DEV;
- set_bit(devnum, dev_map);
- }
+ if (devnum >= IB_UCM_MAX_DEVICES)
+ goto err;
+ ucm_dev->devnum = devnum;
+ set_bit(devnum, dev_map);
+ if (devnum >= IB_UCM_NUM_FIXED_MINOR)
+ base = dynamic_ucm_dev + devnum - IB_UCM_NUM_FIXED_MINOR;
+ else
+ base = IB_UCM_BASE_DEV + devnum;
cdev_init(&ucm_dev->cdev, &ucm_fops);
ucm_dev->cdev.owner = THIS_MODULE;
@@ -1334,13 +1309,20 @@ static int __init ib_ucm_init(void)
{
int ret;
- ret = register_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES,
+ ret = register_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_NUM_FIXED_MINOR,
"infiniband_cm");
if (ret) {
pr_err("ucm: couldn't register device number\n");
goto error1;
}
+ ret = alloc_chrdev_region(&dynamic_ucm_dev, 0, IB_UCM_NUM_DYNAMIC_MINOR,
+ "infiniband_cm");
+ if (ret) {
+ pr_err("ucm: couldn't register dynamic device number\n");
+ goto err_alloc;
+ }
+
ret = class_create_file(&cm_class, &class_attr_abi_version.attr);
if (ret) {
pr_err("ucm: couldn't create abi_version attribute\n");
@@ -1357,7 +1339,9 @@ static int __init ib_ucm_init(void)
error3:
class_remove_file(&cm_class, &class_attr_abi_version.attr);
error2:
- unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
+ unregister_chrdev_region(dynamic_ucm_dev, IB_UCM_NUM_DYNAMIC_MINOR);
+err_alloc:
+ unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_NUM_FIXED_MINOR);
error1:
return ret;
}
@@ -1366,9 +1350,8 @@ static void __exit ib_ucm_cleanup(void)
{
ib_unregister_client(&ucm_client);
class_remove_file(&cm_class, &class_attr_abi_version.attr);
- unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
- if (overflow_maj)
- unregister_chrdev_region(overflow_maj, IB_UCM_MAX_DEVICES);
+ unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_NUM_FIXED_MINOR);
+ unregister_chrdev_region(dynamic_ucm_dev, IB_UCM_NUM_DYNAMIC_MINOR);
idr_destroy(&ctx_id_table);
}
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index eb85b546e223..6ba4231f2b07 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -904,13 +904,14 @@ static ssize_t ucma_query_path(struct ucma_context *ctx,
resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY |
IB_PATH_BIDIRECTIONAL;
- if (rec->rec_type == SA_PATH_REC_TYPE_IB) {
- ib_sa_pack_path(rec, &resp->path_data[i].path_rec);
- } else {
+ if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
struct sa_path_rec ib;
sa_convert_path_opa_to_ib(&ib, rec);
ib_sa_pack_path(&ib, &resp->path_data[i].path_rec);
+
+ } else {
+ ib_sa_pack_path(rec, &resp->path_data[i].path_rec);
}
}
@@ -943,8 +944,8 @@ static ssize_t ucma_query_gid(struct ucma_context *ctx,
} else {
addr->sib_family = AF_IB;
addr->sib_pkey = (__force __be16) resp.pkey;
- rdma_addr_get_sgid(&ctx->cm_id->route.addr.dev_addr,
- (union ib_gid *) &addr->sib_addr);
+ rdma_read_gids(ctx->cm_id, (union ib_gid *)&addr->sib_addr,
+ NULL);
addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
&ctx->cm_id->route.addr.src_addr);
}
@@ -956,8 +957,8 @@ static ssize_t ucma_query_gid(struct ucma_context *ctx,
} else {
addr->sib_family = AF_IB;
addr->sib_pkey = (__force __be16) resp.pkey;
- rdma_addr_get_dgid(&ctx->cm_id->route.addr.dev_addr,
- (union ib_gid *) &addr->sib_addr);
+ rdma_read_gids(ctx->cm_id, NULL,
+ (union ib_gid *)&addr->sib_addr);
addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
&ctx->cm_id->route.addr.dst_addr);
}
@@ -1231,9 +1232,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
struct sa_path_rec opa;
sa_convert_path_ib_to_opa(&opa, &sa_path);
- ret = rdma_set_ib_paths(ctx->cm_id, &opa, 1);
+ ret = rdma_set_ib_path(ctx->cm_id, &opa);
} else {
- ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
+ ret = rdma_set_ib_path(ctx->cm_id, &sa_path);
}
if (ret)
return ret;
@@ -1630,10 +1631,10 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
return ret;
}
-static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
+static __poll_t ucma_poll(struct file *filp, struct poll_table_struct *wait)
{
struct ucma_file *file = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(filp, &file->poll_wait, wait);
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 130606c3b07c..9a4e899d94b3 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -352,7 +352,7 @@ int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
return -EINVAL;
}
- ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->nmap, dst, length,
+ ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->npages, dst, length,
offset + ib_umem_offset(umem));
if (ret < 0)
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 4b64dd02e090..78c77962422e 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -55,16 +55,21 @@
#include <rdma/ib_mad.h>
#include <rdma/ib_user_mad.h>
+#include "core_priv.h"
+
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
MODULE_LICENSE("Dual BSD/GPL");
enum {
- IB_UMAD_MAX_PORTS = 64,
+ IB_UMAD_MAX_PORTS = RDMA_MAX_PORTS,
IB_UMAD_MAX_AGENTS = 32,
IB_UMAD_MAJOR = 231,
- IB_UMAD_MINOR_BASE = 0
+ IB_UMAD_MINOR_BASE = 0,
+ IB_UMAD_NUM_FIXED_MINOR = 64,
+ IB_UMAD_NUM_DYNAMIC_MINOR = IB_UMAD_MAX_PORTS - IB_UMAD_NUM_FIXED_MINOR,
+ IB_ISSM_MINOR_BASE = IB_UMAD_NUM_FIXED_MINOR,
};
/*
@@ -127,9 +132,12 @@ struct ib_umad_packet {
static struct class *umad_class;
-static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
+static const dev_t base_umad_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
+static const dev_t base_issm_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE) +
+ IB_UMAD_NUM_FIXED_MINOR;
+static dev_t dynamic_umad_dev;
+static dev_t dynamic_issm_dev;
-static DEFINE_SPINLOCK(port_lock);
static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS);
static void ib_umad_add_one(struct ib_device *device);
@@ -233,8 +241,7 @@ static void recv_handler(struct ib_mad_agent *agent,
* On OPA devices it is okay to lose the upper 16 bits of LID as this
* information is obtained elsewhere. Mask off the upper 16 bits.
*/
- if (agent->device->port_immutable[agent->port_num].core_cap_flags &
- RDMA_CORE_PORT_INTEL_OPA)
+ if (rdma_cap_opa_mad(agent->device, agent->port_num))
packet->mad.hdr.lid = ib_lid_be16(0xFFFF &
mad_recv_wc->wc->slid);
else
@@ -246,10 +253,14 @@ static void recv_handler(struct ib_mad_agent *agent,
if (packet->mad.hdr.grh_present) {
struct rdma_ah_attr ah_attr;
const struct ib_global_route *grh;
+ int ret;
- ib_init_ah_from_wc(agent->device, agent->port_num,
- mad_recv_wc->wc, mad_recv_wc->recv_buf.grh,
- &ah_attr);
+ ret = ib_init_ah_attr_from_wc(agent->device, agent->port_num,
+ mad_recv_wc->wc,
+ mad_recv_wc->recv_buf.grh,
+ &ah_attr);
+ if (ret)
+ goto err2;
grh = rdma_ah_read_grh(&ah_attr);
packet->mad.hdr.gid_index = grh->sgid_index;
@@ -500,7 +511,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
}
memset(&ah_attr, 0, sizeof ah_attr);
- ah_attr.type = rdma_ah_find_type(file->port->ib_dev,
+ ah_attr.type = rdma_ah_find_type(agent->device,
file->port->port_num);
rdma_ah_set_dlid(&ah_attr, be16_to_cpu(packet->mad.hdr.lid));
rdma_ah_set_sl(&ah_attr, packet->mad.hdr.sl);
@@ -617,12 +628,12 @@ err:
return ret;
}
-static unsigned int ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
+static __poll_t ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
{
struct ib_umad_file *file = filp->private_data;
/* we will always be able to post a MAD send */
- unsigned int mask = POLLOUT | POLLWRNORM;
+ __poll_t mask = POLLOUT | POLLWRNORM;
poll_wait(filp, &file->recv_wait, wait);
@@ -1139,54 +1150,26 @@ static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
static CLASS_ATTR_STRING(abi_version, S_IRUGO,
__stringify(IB_USER_MAD_ABI_VERSION));
-static dev_t overflow_maj;
-static DECLARE_BITMAP(overflow_map, IB_UMAD_MAX_PORTS);
-static int find_overflow_devnum(struct ib_device *device)
-{
- int ret;
-
- if (!overflow_maj) {
- ret = alloc_chrdev_region(&overflow_maj, 0, IB_UMAD_MAX_PORTS * 2,
- "infiniband_mad");
- if (ret) {
- dev_err(&device->dev,
- "couldn't register dynamic device number\n");
- return ret;
- }
- }
-
- ret = find_first_zero_bit(overflow_map, IB_UMAD_MAX_PORTS);
- if (ret >= IB_UMAD_MAX_PORTS)
- return -1;
-
- return ret;
-}
-
static int ib_umad_init_port(struct ib_device *device, int port_num,
struct ib_umad_device *umad_dev,
struct ib_umad_port *port)
{
int devnum;
- dev_t base;
+ dev_t base_umad;
+ dev_t base_issm;
- spin_lock(&port_lock);
devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS);
- if (devnum >= IB_UMAD_MAX_PORTS) {
- spin_unlock(&port_lock);
- devnum = find_overflow_devnum(device);
- if (devnum < 0)
- return -1;
-
- spin_lock(&port_lock);
- port->dev_num = devnum + IB_UMAD_MAX_PORTS;
- base = devnum + overflow_maj;
- set_bit(devnum, overflow_map);
+ if (devnum >= IB_UMAD_MAX_PORTS)
+ return -1;
+ port->dev_num = devnum;
+ set_bit(devnum, dev_map);
+ if (devnum >= IB_UMAD_NUM_FIXED_MINOR) {
+ base_umad = dynamic_umad_dev + devnum - IB_UMAD_NUM_FIXED_MINOR;
+ base_issm = dynamic_issm_dev + devnum - IB_UMAD_NUM_FIXED_MINOR;
} else {
- port->dev_num = devnum;
- base = devnum + base_dev;
- set_bit(devnum, dev_map);
+ base_umad = devnum + base_umad_dev;
+ base_issm = devnum + base_issm_dev;
}
- spin_unlock(&port_lock);
port->ib_dev = device;
port->port_num = port_num;
@@ -1198,7 +1181,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
port->cdev.owner = THIS_MODULE;
cdev_set_parent(&port->cdev, &umad_dev->kobj);
kobject_set_name(&port->cdev.kobj, "umad%d", port->dev_num);
- if (cdev_add(&port->cdev, base, 1))
+ if (cdev_add(&port->cdev, base_umad, 1))
goto err_cdev;
port->dev = device_create(umad_class, device->dev.parent,
@@ -1212,12 +1195,11 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
if (device_create_file(port->dev, &dev_attr_port))
goto err_dev;
- base += IB_UMAD_MAX_PORTS;
cdev_init(&port->sm_cdev, &umad_sm_fops);
port->sm_cdev.owner = THIS_MODULE;
cdev_set_parent(&port->sm_cdev, &umad_dev->kobj);
kobject_set_name(&port->sm_cdev.kobj, "issm%d", port->dev_num);
- if (cdev_add(&port->sm_cdev, base, 1))
+ if (cdev_add(&port->sm_cdev, base_issm, 1))
goto err_sm_cdev;
port->sm_dev = device_create(umad_class, device->dev.parent,
@@ -1244,10 +1226,7 @@ err_dev:
err_cdev:
cdev_del(&port->cdev);
- if (port->dev_num < IB_UMAD_MAX_PORTS)
- clear_bit(devnum, dev_map);
- else
- clear_bit(devnum, overflow_map);
+ clear_bit(devnum, dev_map);
return -1;
}
@@ -1281,11 +1260,7 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
}
mutex_unlock(&port->file_mutex);
-
- if (port->dev_num < IB_UMAD_MAX_PORTS)
- clear_bit(port->dev_num, dev_map);
- else
- clear_bit(port->dev_num - IB_UMAD_MAX_PORTS, overflow_map);
+ clear_bit(port->dev_num, dev_map);
}
static void ib_umad_add_one(struct ib_device *device)
@@ -1361,13 +1336,23 @@ static int __init ib_umad_init(void)
{
int ret;
- ret = register_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2,
+ ret = register_chrdev_region(base_umad_dev,
+ IB_UMAD_NUM_FIXED_MINOR * 2,
"infiniband_mad");
if (ret) {
pr_err("couldn't register device number\n");
goto out;
}
+ ret = alloc_chrdev_region(&dynamic_umad_dev, 0,
+ IB_UMAD_NUM_DYNAMIC_MINOR * 2,
+ "infiniband_mad");
+ if (ret) {
+ pr_err("couldn't register dynamic device number\n");
+ goto out_alloc;
+ }
+ dynamic_issm_dev = dynamic_umad_dev + IB_UMAD_NUM_DYNAMIC_MINOR;
+
umad_class = class_create(THIS_MODULE, "infiniband_mad");
if (IS_ERR(umad_class)) {
ret = PTR_ERR(umad_class);
@@ -1395,7 +1380,12 @@ out_class:
class_destroy(umad_class);
out_chrdev:
- unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);
+ unregister_chrdev_region(dynamic_umad_dev,
+ IB_UMAD_NUM_DYNAMIC_MINOR * 2);
+
+out_alloc:
+ unregister_chrdev_region(base_umad_dev,
+ IB_UMAD_NUM_FIXED_MINOR * 2);
out:
return ret;
@@ -1405,9 +1395,10 @@ static void __exit ib_umad_cleanup(void)
{
ib_unregister_client(&umad_client);
class_destroy(umad_class);
- unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);
- if (overflow_maj)
- unregister_chrdev_region(overflow_maj, IB_UMAD_MAX_PORTS * 2);
+ unregister_chrdev_region(base_umad_dev,
+ IB_UMAD_NUM_FIXED_MINOR * 2);
+ unregister_chrdev_region(dynamic_umad_dev,
+ IB_UMAD_NUM_DYNAMIC_MINOR * 2);
}
module_init(ib_umad_init);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 840b24096690..256934d1f64f 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -340,6 +340,8 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
uobj->object = pd;
memset(&resp, 0, sizeof resp);
resp.pd_handle = uobj->id;
+ pd->res.type = RDMA_RESTRACK_PD;
+ rdma_restrack_add(&pd->res);
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
ret = -EFAULT;
@@ -1033,6 +1035,8 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
goto err_cb;
uobj_alloc_commit(&obj->uobject);
+ cq->res.type = RDMA_RESTRACK_CQ;
+ rdma_restrack_add(&cq->res);
return obj;
@@ -1145,10 +1149,7 @@ int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
min(ucore->inlen, sizeof(cmd)),
ib_uverbs_ex_create_cq_cb, NULL);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- return 0;
+ return PTR_ERR_OR_ZERO(obj);
}
ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
@@ -1199,7 +1200,7 @@ static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest,
tmp.opcode = wc->opcode;
tmp.vendor_err = wc->vendor_err;
tmp.byte_len = wc->byte_len;
- tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data;
+ tmp.ex.imm_data = wc->ex.imm_data;
tmp.qp_num = wc->qp->qp_num;
tmp.src_qp = wc->src_qp;
tmp.wc_flags = wc->wc_flags;
@@ -1517,7 +1518,7 @@ static int create_qp(struct ib_uverbs_file *file,
if (cmd->qp_type == IB_QPT_XRC_TGT)
qp = ib_create_qp(pd, &attr);
else
- qp = device->create_qp(pd, &attr, uhw);
+ qp = _ib_create_qp(device, pd, &attr, uhw);
if (IS_ERR(qp)) {
ret = PTR_ERR(qp);
@@ -1530,7 +1531,6 @@ static int create_qp(struct ib_uverbs_file *file,
goto err_cb;
qp->real_qp = qp;
- qp->device = device;
qp->pd = pd;
qp->send_cq = attr.send_cq;
qp->recv_cq = attr.recv_cq;
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index 71ff2644e053..d96dc1d17be1 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -243,16 +243,13 @@ static long ib_uverbs_cmd_verbs(struct ib_device *ib_dev,
size_t ctx_size;
uintptr_t data[UVERBS_OPTIMIZE_USING_STACK_SZ / sizeof(uintptr_t)];
- if (hdr->reserved)
- return -EINVAL;
-
object_spec = uverbs_get_object(ib_dev, hdr->object_id);
if (!object_spec)
- return -EOPNOTSUPP;
+ return -EPROTONOSUPPORT;
method_spec = uverbs_get_method(object_spec, hdr->method_id);
if (!method_spec)
- return -EOPNOTSUPP;
+ return -EPROTONOSUPPORT;
if ((method_spec->flags & UVERBS_ACTION_FLAG_CREATE_ROOT) ^ !file->ucontext)
return -EINVAL;
@@ -305,6 +302,16 @@ static long ib_uverbs_cmd_verbs(struct ib_device *ib_dev,
err = uverbs_handle_method(buf, ctx->uattrs, hdr->num_attrs, ib_dev,
file, method_spec, ctx->uverbs_attr_bundle);
+
+ /*
+ * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can
+ * not invoke the method because the request is not supported. No
+ * other cases should return this code.
+ */
+ if (unlikely(err == -EPROTONOSUPPORT)) {
+ WARN_ON_ONCE(err == -EPROTONOSUPPORT);
+ err = -EINVAL;
+ }
out:
if (ctx != (void *)data)
kfree(ctx);
@@ -341,7 +348,7 @@ long ib_uverbs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
if (hdr.reserved) {
- err = -EOPNOTSUPP;
+ err = -EPROTONOSUPPORT;
goto out;
}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 381fd9c096ae..5b811bf574d6 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -62,14 +62,16 @@ MODULE_LICENSE("Dual BSD/GPL");
enum {
IB_UVERBS_MAJOR = 231,
IB_UVERBS_BASE_MINOR = 192,
- IB_UVERBS_MAX_DEVICES = 32
+ IB_UVERBS_MAX_DEVICES = RDMA_MAX_PORTS,
+ IB_UVERBS_NUM_FIXED_MINOR = 32,
+ IB_UVERBS_NUM_DYNAMIC_MINOR = IB_UVERBS_MAX_DEVICES - IB_UVERBS_NUM_FIXED_MINOR,
};
#define IB_UVERBS_BASE_DEV MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR)
+static dev_t dynamic_uverbs_dev;
static struct class *uverbs_class;
-static DEFINE_SPINLOCK(map_lock);
static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
@@ -339,11 +341,11 @@ static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf,
sizeof(struct ib_uverbs_comp_event_desc));
}
-static unsigned int ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
+static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
struct file *filp,
struct poll_table_struct *wait)
{
- unsigned int pollflags = 0;
+ __poll_t pollflags = 0;
poll_wait(filp, &ev_queue->poll_wait, wait);
@@ -355,13 +357,13 @@ static unsigned int ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
return pollflags;
}
-static unsigned int ib_uverbs_async_event_poll(struct file *filp,
+static __poll_t ib_uverbs_async_event_poll(struct file *filp,
struct poll_table_struct *wait)
{
return ib_uverbs_event_poll(filp->private_data, filp, wait);
}
-static unsigned int ib_uverbs_comp_event_poll(struct file *filp,
+static __poll_t ib_uverbs_comp_event_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct ib_uverbs_completion_event_file *comp_ev_file =
@@ -1005,34 +1007,6 @@ static DEVICE_ATTR(abi_version, S_IRUGO, show_dev_abi_version, NULL);
static CLASS_ATTR_STRING(abi_version, S_IRUGO,
__stringify(IB_USER_VERBS_ABI_VERSION));
-static dev_t overflow_maj;
-static DECLARE_BITMAP(overflow_map, IB_UVERBS_MAX_DEVICES);
-
-/*
- * If we have more than IB_UVERBS_MAX_DEVICES, dynamically overflow by
- * requesting a new major number and doubling the number of max devices we
- * support. It's stupid, but simple.
- */
-static int find_overflow_devnum(void)
-{
- int ret;
-
- if (!overflow_maj) {
- ret = alloc_chrdev_region(&overflow_maj, 0, IB_UVERBS_MAX_DEVICES,
- "infiniband_verbs");
- if (ret) {
- pr_err("user_verbs: couldn't register dynamic device number\n");
- return ret;
- }
- }
-
- ret = find_first_zero_bit(overflow_map, IB_UVERBS_MAX_DEVICES);
- if (ret >= IB_UVERBS_MAX_DEVICES)
- return -1;
-
- return ret;
-}
-
static void ib_uverbs_add_one(struct ib_device *device)
{
int devnum;
@@ -1062,24 +1036,15 @@ static void ib_uverbs_add_one(struct ib_device *device)
INIT_LIST_HEAD(&uverbs_dev->uverbs_file_list);
INIT_LIST_HEAD(&uverbs_dev->uverbs_events_file_list);
- spin_lock(&map_lock);
devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
- if (devnum >= IB_UVERBS_MAX_DEVICES) {
- spin_unlock(&map_lock);
- devnum = find_overflow_devnum();
- if (devnum < 0)
- goto err;
-
- spin_lock(&map_lock);
- uverbs_dev->devnum = devnum + IB_UVERBS_MAX_DEVICES;
- base = devnum + overflow_maj;
- set_bit(devnum, overflow_map);
- } else {
- uverbs_dev->devnum = devnum;
- base = devnum + IB_UVERBS_BASE_DEV;
- set_bit(devnum, dev_map);
- }
- spin_unlock(&map_lock);
+ if (devnum >= IB_UVERBS_MAX_DEVICES)
+ goto err;
+ uverbs_dev->devnum = devnum;
+ set_bit(devnum, dev_map);
+ if (devnum >= IB_UVERBS_NUM_FIXED_MINOR)
+ base = dynamic_uverbs_dev + devnum - IB_UVERBS_NUM_FIXED_MINOR;
+ else
+ base = IB_UVERBS_BASE_DEV + devnum;
rcu_assign_pointer(uverbs_dev->ib_dev, device);
uverbs_dev->num_comp_vectors = device->num_comp_vectors;
@@ -1124,10 +1089,7 @@ err_class:
err_cdev:
cdev_del(&uverbs_dev->cdev);
- if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES)
- clear_bit(devnum, dev_map);
- else
- clear_bit(devnum, overflow_map);
+ clear_bit(devnum, dev_map);
err:
if (atomic_dec_and_test(&uverbs_dev->refcount))
@@ -1219,11 +1181,7 @@ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
dev_set_drvdata(uverbs_dev->dev, NULL);
device_destroy(uverbs_class, uverbs_dev->cdev.dev);
cdev_del(&uverbs_dev->cdev);
-
- if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES)
- clear_bit(uverbs_dev->devnum, dev_map);
- else
- clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map);
+ clear_bit(uverbs_dev->devnum, dev_map);
if (device->disassociate_ucontext) {
/* We disassociate HW resources and immediately return.
@@ -1265,13 +1223,22 @@ static int __init ib_uverbs_init(void)
{
int ret;
- ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES,
+ ret = register_chrdev_region(IB_UVERBS_BASE_DEV,
+ IB_UVERBS_NUM_FIXED_MINOR,
"infiniband_verbs");
if (ret) {
pr_err("user_verbs: couldn't register device number\n");
goto out;
}
+ ret = alloc_chrdev_region(&dynamic_uverbs_dev, 0,
+ IB_UVERBS_NUM_DYNAMIC_MINOR,
+ "infiniband_verbs");
+ if (ret) {
+ pr_err("couldn't register dynamic device number\n");
+ goto out_alloc;
+ }
+
uverbs_class = class_create(THIS_MODULE, "infiniband_verbs");
if (IS_ERR(uverbs_class)) {
ret = PTR_ERR(uverbs_class);
@@ -1299,7 +1266,12 @@ out_class:
class_destroy(uverbs_class);
out_chrdev:
- unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES);
+ unregister_chrdev_region(dynamic_uverbs_dev,
+ IB_UVERBS_NUM_DYNAMIC_MINOR);
+
+out_alloc:
+ unregister_chrdev_region(IB_UVERBS_BASE_DEV,
+ IB_UVERBS_NUM_FIXED_MINOR);
out:
return ret;
@@ -1309,9 +1281,10 @@ static void __exit ib_uverbs_cleanup(void)
{
ib_unregister_client(&uverbs_client);
class_destroy(uverbs_class);
- unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES);
- if (overflow_maj)
- unregister_chrdev_region(overflow_maj, IB_UVERBS_MAX_DEVICES);
+ unregister_chrdev_region(IB_UVERBS_BASE_DEV,
+ IB_UVERBS_NUM_FIXED_MINOR);
+ unregister_chrdev_region(dynamic_uverbs_dev,
+ IB_UVERBS_NUM_DYNAMIC_MINOR);
}
module_init(ib_uverbs_init);
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
index c3ee5d9b336d..b571176babbe 100644
--- a/drivers/infiniband/core/uverbs_std_types.c
+++ b/drivers/infiniband/core/uverbs_std_types.c
@@ -35,6 +35,7 @@
#include <rdma/ib_verbs.h>
#include <linux/bug.h>
#include <linux/file.h>
+#include <rdma/restrack.h>
#include "rdma_core.h"
#include "uverbs.h"
@@ -319,6 +320,8 @@ static int uverbs_create_cq_handler(struct ib_device *ib_dev,
obj->uobject.object = cq;
obj->uobject.user_handle = user_handle;
atomic_set(&cq->usecnt, 0);
+ cq->res.type = RDMA_RESTRACK_CQ;
+ rdma_restrack_add(&cq->res);
ret = uverbs_copy_to(attrs, CREATE_CQ_RESP_CQE, &cq->cqe);
if (ret)
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index e36d27ed4daa..16ebc6372c31 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -124,16 +124,24 @@ EXPORT_SYMBOL(ib_wc_status_msg);
__attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
{
switch (rate) {
- case IB_RATE_2_5_GBPS: return 1;
- case IB_RATE_5_GBPS: return 2;
- case IB_RATE_10_GBPS: return 4;
- case IB_RATE_20_GBPS: return 8;
- case IB_RATE_30_GBPS: return 12;
- case IB_RATE_40_GBPS: return 16;
- case IB_RATE_60_GBPS: return 24;
- case IB_RATE_80_GBPS: return 32;
- case IB_RATE_120_GBPS: return 48;
- default: return -1;
+ case IB_RATE_2_5_GBPS: return 1;
+ case IB_RATE_5_GBPS: return 2;
+ case IB_RATE_10_GBPS: return 4;
+ case IB_RATE_20_GBPS: return 8;
+ case IB_RATE_30_GBPS: return 12;
+ case IB_RATE_40_GBPS: return 16;
+ case IB_RATE_60_GBPS: return 24;
+ case IB_RATE_80_GBPS: return 32;
+ case IB_RATE_120_GBPS: return 48;
+ case IB_RATE_14_GBPS: return 6;
+ case IB_RATE_56_GBPS: return 22;
+ case IB_RATE_112_GBPS: return 45;
+ case IB_RATE_168_GBPS: return 67;
+ case IB_RATE_25_GBPS: return 10;
+ case IB_RATE_100_GBPS: return 40;
+ case IB_RATE_200_GBPS: return 80;
+ case IB_RATE_300_GBPS: return 120;
+ default: return -1;
}
}
EXPORT_SYMBOL(ib_rate_to_mult);
@@ -141,16 +149,24 @@ EXPORT_SYMBOL(ib_rate_to_mult);
__attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
{
switch (mult) {
- case 1: return IB_RATE_2_5_GBPS;
- case 2: return IB_RATE_5_GBPS;
- case 4: return IB_RATE_10_GBPS;
- case 8: return IB_RATE_20_GBPS;
- case 12: return IB_RATE_30_GBPS;
- case 16: return IB_RATE_40_GBPS;
- case 24: return IB_RATE_60_GBPS;
- case 32: return IB_RATE_80_GBPS;
- case 48: return IB_RATE_120_GBPS;
- default: return IB_RATE_PORT_CURRENT;
+ case 1: return IB_RATE_2_5_GBPS;
+ case 2: return IB_RATE_5_GBPS;
+ case 4: return IB_RATE_10_GBPS;
+ case 8: return IB_RATE_20_GBPS;
+ case 12: return IB_RATE_30_GBPS;
+ case 16: return IB_RATE_40_GBPS;
+ case 24: return IB_RATE_60_GBPS;
+ case 32: return IB_RATE_80_GBPS;
+ case 48: return IB_RATE_120_GBPS;
+ case 6: return IB_RATE_14_GBPS;
+ case 22: return IB_RATE_56_GBPS;
+ case 45: return IB_RATE_112_GBPS;
+ case 67: return IB_RATE_168_GBPS;
+ case 10: return IB_RATE_25_GBPS;
+ case 40: return IB_RATE_100_GBPS;
+ case 80: return IB_RATE_200_GBPS;
+ case 120: return IB_RATE_300_GBPS;
+ default: return IB_RATE_PORT_CURRENT;
}
}
EXPORT_SYMBOL(mult_to_ib_rate);
@@ -247,6 +263,10 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
}
+ pd->res.type = RDMA_RESTRACK_PD;
+ pd->res.kern_name = caller;
+ rdma_restrack_add(&pd->res);
+
if (mr_access_flags) {
struct ib_mr *mr;
@@ -296,6 +316,7 @@ void ib_dealloc_pd(struct ib_pd *pd)
requires the caller to guarantee we can't race here. */
WARN_ON(atomic_read(&pd->usecnt));
+ rdma_restrack_del(&pd->res);
/* Making delalloc_pd a void return is a WIP, no driver should return
an error here. */
ret = pd->device->dealloc_pd(pd);
@@ -421,8 +442,7 @@ static bool find_gid_index(const union ib_gid *gid,
const struct ib_gid_attr *gid_attr,
void *context)
{
- struct find_gid_index_context *ctx =
- (struct find_gid_index_context *)context;
+ struct find_gid_index_context *ctx = context;
if (ctx->gid_type != gid_attr->gid_type)
return false;
@@ -481,8 +501,53 @@ int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
}
EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
+/* Resolve destination mac address and hop limit for unicast destination
+ * GID entry, considering the source GID entry as well.
+ * ah_attribute must have have valid port_num, sgid_index.
+ */
+static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
+ struct rdma_ah_attr *ah_attr)
+{
+ struct ib_gid_attr sgid_attr;
+ struct ib_global_route *grh;
+ int hop_limit = 0xff;
+ union ib_gid sgid;
+ int ret;
+
+ grh = rdma_ah_retrieve_grh(ah_attr);
+
+ ret = ib_query_gid(device,
+ rdma_ah_get_port_num(ah_attr),
+ grh->sgid_index,
+ &sgid, &sgid_attr);
+ if (ret || !sgid_attr.ndev) {
+ if (!ret)
+ ret = -ENXIO;
+ return ret;
+ }
+
+ /* If destination is link local and source GID is RoCEv1,
+ * IP stack is not used.
+ */
+ if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) &&
+ sgid_attr.gid_type == IB_GID_TYPE_ROCE) {
+ rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
+ ah_attr->roce.dmac);
+ goto done;
+ }
+
+ ret = rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
+ ah_attr->roce.dmac,
+ sgid_attr.ndev, &hop_limit);
+done:
+ dev_put(sgid_attr.ndev);
+
+ grh->hop_limit = hop_limit;
+ return ret;
+}
+
/*
- * This function creates ah from the incoming packet.
+ * This function initializes address handle attributes from the incoming packet.
* Incoming packet has dgid of the receiver node on which this code is
* getting executed and, sgid contains the GID of the sender.
*
@@ -490,13 +555,10 @@ EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
* as sgid and, sgid is used as dgid because sgid contains destinations
* GID whom to respond to.
*
- * This is why when calling rdma_addr_find_l2_eth_by_grh() function, the
- * position of arguments dgid and sgid do not match the order of the
- * parameters.
*/
-int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
- const struct ib_wc *wc, const struct ib_grh *grh,
- struct rdma_ah_attr *ah_attr)
+int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
+ const struct ib_wc *wc, const struct ib_grh *grh,
+ struct rdma_ah_attr *ah_attr)
{
u32 flow_class;
u16 gid_index;
@@ -523,57 +585,33 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
if (ret)
return ret;
+ rdma_ah_set_sl(ah_attr, wc->sl);
+ rdma_ah_set_port_num(ah_attr, port_num);
+
if (rdma_protocol_roce(device, port_num)) {
- int if_index = 0;
u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
wc->vlan_id : 0xffff;
- struct net_device *idev;
- struct net_device *resolved_dev;
if (!(wc->wc_flags & IB_WC_GRH))
return -EPROTOTYPE;
- if (!device->get_netdev)
- return -EOPNOTSUPP;
-
- idev = device->get_netdev(device, port_num);
- if (!idev)
- return -ENODEV;
-
- ret = rdma_addr_find_l2_eth_by_grh(&dgid, &sgid,
- ah_attr->roce.dmac,
- wc->wc_flags & IB_WC_WITH_VLAN ?
- NULL : &vlan_id,
- &if_index, &hoplimit);
- if (ret) {
- dev_put(idev);
- return ret;
- }
-
- resolved_dev = dev_get_by_index(&init_net, if_index);
- rcu_read_lock();
- if (resolved_dev != idev && !rdma_is_upper_dev_rcu(idev,
- resolved_dev))
- ret = -EHOSTUNREACH;
- rcu_read_unlock();
- dev_put(idev);
- dev_put(resolved_dev);
+ ret = get_sgid_index_from_eth(device, port_num,
+ vlan_id, &dgid,
+ gid_type, &gid_index);
if (ret)
return ret;
- ret = get_sgid_index_from_eth(device, port_num, vlan_id,
- &dgid, gid_type, &gid_index);
- if (ret)
- return ret;
- }
-
- rdma_ah_set_dlid(ah_attr, wc->slid);
- rdma_ah_set_sl(ah_attr, wc->sl);
- rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
- rdma_ah_set_port_num(ah_attr, port_num);
+ flow_class = be32_to_cpu(grh->version_tclass_flow);
+ rdma_ah_set_grh(ah_attr, &sgid,
+ flow_class & 0xFFFFF,
+ (u8)gid_index, hoplimit,
+ (flow_class >> 20) & 0xFF);
+ return ib_resolve_unicast_gid_dmac(device, ah_attr);
+ } else {
+ rdma_ah_set_dlid(ah_attr, wc->slid);
+ rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
- if (wc->wc_flags & IB_WC_GRH) {
- if (!rdma_cap_eth_ah(device, port_num)) {
+ if (wc->wc_flags & IB_WC_GRH) {
if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
ret = ib_find_cached_gid_by_port(device, &dgid,
IB_GID_TYPE_IB,
@@ -584,18 +622,17 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
} else {
gid_index = 0;
}
- }
-
- flow_class = be32_to_cpu(grh->version_tclass_flow);
- rdma_ah_set_grh(ah_attr, &sgid,
- flow_class & 0xFFFFF,
- (u8)gid_index, hoplimit,
- (flow_class >> 20) & 0xFF);
+ flow_class = be32_to_cpu(grh->version_tclass_flow);
+ rdma_ah_set_grh(ah_attr, &sgid,
+ flow_class & 0xFFFFF,
+ (u8)gid_index, hoplimit,
+ (flow_class >> 20) & 0xFF);
+ }
+ return 0;
}
- return 0;
}
-EXPORT_SYMBOL(ib_init_ah_from_wc);
+EXPORT_SYMBOL(ib_init_ah_attr_from_wc);
struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
const struct ib_grh *grh, u8 port_num)
@@ -603,7 +640,7 @@ struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
struct rdma_ah_attr ah_attr;
int ret;
- ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
+ ret = ib_init_ah_attr_from_wc(pd->device, port_num, wc, grh, &ah_attr);
if (ret)
return ERR_PTR(ret);
@@ -850,7 +887,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
if (qp_init_attr->cap.max_rdma_ctxs)
rdma_rw_init_qp(device, qp_init_attr);
- qp = device->create_qp(pd, qp_init_attr, NULL);
+ qp = _ib_create_qp(device, pd, qp_init_attr, NULL);
if (IS_ERR(qp))
return qp;
@@ -860,7 +897,6 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
return ERR_PTR(ret);
}
- qp->device = device;
qp->real_qp = qp;
qp->uobject = NULL;
qp->qp_type = qp_init_attr->qp_type;
@@ -890,7 +926,6 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
atomic_inc(&qp_init_attr->srq->usecnt);
}
- qp->pd = pd;
qp->send_cq = qp_init_attr->send_cq;
qp->xrcd = NULL;
@@ -1269,16 +1304,8 @@ static int ib_resolve_eth_dmac(struct ib_device *device,
if (!rdma_is_port_valid(device, rdma_ah_get_port_num(ah_attr)))
return -EINVAL;
- if (ah_attr->type != RDMA_AH_ATTR_TYPE_ROCE)
- return 0;
-
grh = rdma_ah_retrieve_grh(ah_attr);
- if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw)) {
- rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
- ah_attr->roce.dmac);
- return 0;
- }
if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) {
__be32 addr = 0;
@@ -1290,40 +1317,52 @@ static int ib_resolve_eth_dmac(struct ib_device *device,
(char *)ah_attr->roce.dmac);
}
} else {
- union ib_gid sgid;
- struct ib_gid_attr sgid_attr;
- int ifindex;
- int hop_limit;
-
- ret = ib_query_gid(device,
- rdma_ah_get_port_num(ah_attr),
- grh->sgid_index,
- &sgid, &sgid_attr);
-
- if (ret || !sgid_attr.ndev) {
- if (!ret)
- ret = -ENXIO;
- goto out;
- }
-
- ifindex = sgid_attr.ndev->ifindex;
+ ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
+ }
+ return ret;
+}
- ret =
- rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
- ah_attr->roce.dmac,
- NULL, &ifindex, &hop_limit);
+/**
+ * IB core internal function to perform QP attributes modification.
+ */
+static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ u8 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
+ int ret;
- dev_put(sgid_attr.ndev);
+ if (rdma_ib_or_roce(qp->device, port)) {
+ if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) {
+ pr_warn("%s: %s rq_psn overflow, masking to 24 bits\n",
+ __func__, qp->device->name);
+ attr->rq_psn &= 0xffffff;
+ }
- grh->hop_limit = hop_limit;
+ if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) {
+ pr_warn("%s: %s sq_psn overflow, masking to 24 bits\n",
+ __func__, qp->device->name);
+ attr->sq_psn &= 0xffffff;
+ }
}
-out:
+
+ ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
+ if (!ret && (attr_mask & IB_QP_PORT))
+ qp->port = attr->port_num;
+
return ret;
}
+static bool is_qp_type_connected(const struct ib_qp *qp)
+{
+ return (qp->qp_type == IB_QPT_UC ||
+ qp->qp_type == IB_QPT_RC ||
+ qp->qp_type == IB_QPT_XRC_INI ||
+ qp->qp_type == IB_QPT_XRC_TGT);
+}
+
/**
* ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
- * @qp: The QP to modify.
+ * @ib_qp: The QP to modify.
* @attr: On input, specifies the QP attributes to modify. On output,
* the current values of selected QP attributes are returned.
* @attr_mask: A bit-mask used to specify which attributes of the QP
@@ -1332,21 +1371,20 @@ out:
* are being modified.
* It returns 0 on success and returns appropriate error code on error.
*/
-int ib_modify_qp_with_udata(struct ib_qp *qp, struct ib_qp_attr *attr,
+int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
+ struct ib_qp *qp = ib_qp->real_qp;
int ret;
- if (attr_mask & IB_QP_AV) {
+ if (attr_mask & IB_QP_AV &&
+ attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
+ is_qp_type_connected(qp)) {
ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
if (ret)
return ret;
}
- ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
- if (!ret && (attr_mask & IB_QP_PORT))
- qp->port = attr->port_num;
-
- return ret;
+ return _ib_modify_qp(qp, attr, attr_mask, udata);
}
EXPORT_SYMBOL(ib_modify_qp_with_udata);
@@ -1409,7 +1447,7 @@ int ib_modify_qp(struct ib_qp *qp,
struct ib_qp_attr *qp_attr,
int qp_attr_mask)
{
- return ib_modify_qp_with_udata(qp, qp_attr, qp_attr_mask, NULL);
+ return _ib_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
}
EXPORT_SYMBOL(ib_modify_qp);
@@ -1503,6 +1541,7 @@ int ib_destroy_qp(struct ib_qp *qp)
if (!qp->uobject)
rdma_rw_cleanup_mrs(qp);
+ rdma_restrack_del(&qp->res);
ret = qp->device->destroy_qp(qp);
if (!ret) {
if (pd)
@@ -1545,6 +1584,8 @@ struct ib_cq *ib_create_cq(struct ib_device *device,
cq->event_handler = event_handler;
cq->cq_context = cq_context;
atomic_set(&cq->usecnt, 0);
+ cq->res.type = RDMA_RESTRACK_CQ;
+ rdma_restrack_add(&cq->res);
}
return cq;
@@ -1563,6 +1604,7 @@ int ib_destroy_cq(struct ib_cq *cq)
if (atomic_read(&cq->usecnt))
return -EBUSY;
+ rdma_restrack_del(&cq->res);
return cq->device->destroy_cq(cq);
}
EXPORT_SYMBOL(ib_destroy_cq);
@@ -1747,7 +1789,7 @@ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
}
EXPORT_SYMBOL(ib_detach_mcast);
-struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
+struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller)
{
struct ib_xrcd *xrcd;
@@ -1765,7 +1807,7 @@ struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
return xrcd;
}
-EXPORT_SYMBOL(ib_alloc_xrcd);
+EXPORT_SYMBOL(__ib_alloc_xrcd);
int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
{
@@ -1790,11 +1832,11 @@ EXPORT_SYMBOL(ib_dealloc_xrcd);
* ib_create_wq - Creates a WQ associated with the specified protection
* domain.
* @pd: The protection domain associated with the WQ.
- * @wq_init_attr: A list of initial attributes required to create the
+ * @wq_attr: A list of initial attributes required to create the
* WQ. If WQ creation succeeds, then the attributes are updated to
* the actual capabilities of the created WQ.
*
- * wq_init_attr->max_wr and wq_init_attr->max_sge determine
+ * wq_attr->max_wr and wq_attr->max_sge determine
* the requested size of the WQ, and set to the actual values allocated
* on return.
* If ib_create_wq() succeeds, then max_wr and max_sge will always be
@@ -2156,16 +2198,16 @@ static void __ib_drain_sq(struct ib_qp *qp)
struct ib_send_wr swr = {}, *bad_swr;
int ret;
- swr.wr_cqe = &sdrain.cqe;
- sdrain.cqe.done = ib_drain_qp_done;
- init_completion(&sdrain.done);
-
ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
if (ret) {
WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
return;
}
+ swr.wr_cqe = &sdrain.cqe;
+ sdrain.cqe.done = ib_drain_qp_done;
+ init_completion(&sdrain.done);
+
ret = ib_post_send(qp, &swr, &bad_swr);
if (ret) {
WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
@@ -2190,16 +2232,16 @@ static void __ib_drain_rq(struct ib_qp *qp)
struct ib_recv_wr rwr = {}, *bad_rwr;
int ret;
- rwr.wr_cqe = &rdrain.cqe;
- rdrain.cqe.done = ib_drain_qp_done;
- init_completion(&rdrain.done);
-
ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
if (ret) {
WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
return;
}
+ rwr.wr_cqe = &rdrain.cqe;
+ rdrain.cqe.done = ib_drain_qp_done;
+ init_completion(&rdrain.done);
+
ret = ib_post_recv(qp, &rwr, &bad_rwr);
if (ret) {
WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index ecbac91b2e14..ca32057e886f 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -43,20 +43,41 @@
#define ROCE_DRV_MODULE_VERSION "1.0.0"
#define BNXT_RE_DESC "Broadcom NetXtreme-C/E RoCE Driver"
-
-#define BNXT_RE_PAGE_SIZE_4K BIT(12)
-#define BNXT_RE_PAGE_SIZE_8K BIT(13)
-#define BNXT_RE_PAGE_SIZE_64K BIT(16)
-#define BNXT_RE_PAGE_SIZE_2M BIT(21)
-#define BNXT_RE_PAGE_SIZE_8M BIT(23)
-#define BNXT_RE_PAGE_SIZE_1G BIT(30)
-
-#define BNXT_RE_MAX_MR_SIZE BIT(30)
+#define BNXT_RE_PAGE_SHIFT_4K (12)
+#define BNXT_RE_PAGE_SHIFT_8K (13)
+#define BNXT_RE_PAGE_SHIFT_64K (16)
+#define BNXT_RE_PAGE_SHIFT_2M (21)
+#define BNXT_RE_PAGE_SHIFT_8M (23)
+#define BNXT_RE_PAGE_SHIFT_1G (30)
+
+#define BNXT_RE_PAGE_SIZE_4K BIT(BNXT_RE_PAGE_SHIFT_4K)
+#define BNXT_RE_PAGE_SIZE_8K BIT(BNXT_RE_PAGE_SHIFT_8K)
+#define BNXT_RE_PAGE_SIZE_64K BIT(BNXT_RE_PAGE_SHIFT_64K)
+#define BNXT_RE_PAGE_SIZE_2M BIT(BNXT_RE_PAGE_SHIFT_2M)
+#define BNXT_RE_PAGE_SIZE_8M BIT(BNXT_RE_PAGE_SHIFT_8M)
+#define BNXT_RE_PAGE_SIZE_1G BIT(BNXT_RE_PAGE_SHIFT_1G)
+
+#define BNXT_RE_MAX_MR_SIZE_LOW BIT(BNXT_RE_PAGE_SHIFT_1G)
+#define BNXT_RE_MAX_MR_SIZE_HIGH BIT(39)
+#define BNXT_RE_MAX_MR_SIZE BNXT_RE_MAX_MR_SIZE_HIGH
#define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
#define BNXT_RE_MAX_MRW_COUNT (64 * 1024)
#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
#define BNXT_RE_MAX_CQ_COUNT (64 * 1024)
+#define BNXT_RE_MAX_MRW_COUNT_64K (64 * 1024)
+#define BNXT_RE_MAX_MRW_COUNT_256K (256 * 1024)
+
+/* Number of MRs to reserve for PF, leaving remainder for VFs */
+#define BNXT_RE_RESVD_MR_FOR_PF (32 * 1024)
+#define BNXT_RE_MAX_GID_PER_VF 128
+
+/*
+ * Percentage of resources of each type reserved for PF.
+ * Remaining resources are divided equally among VFs.
+ * [0, 100]
+ */
+#define BNXT_RE_PCT_RSVD_FOR_PF 50
#define BNXT_RE_UD_QP_HW_STALL 0x400000
@@ -100,6 +121,7 @@ struct bnxt_re_dev {
#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4
#define BNXT_RE_FLAG_QOS_WORK_REG 5
#define BNXT_RE_FLAG_TASK_IN_PROG 6
+#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
struct net_device *netdev;
unsigned int version, major, minor;
struct bnxt_en_dev *en_dev;
@@ -145,6 +167,9 @@ struct bnxt_re_dev {
struct bnxt_re_ah *sqp_ah;
struct bnxt_re_sqp_entries sqp_tbl[1024];
atomic_t nq_alloc_cnt;
+ u32 is_virtfn;
+ u32 num_vfs;
+ struct bnxt_qplib_roce_stats stats;
};
#define to_bnxt_re_dev(ptr, member) \
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
index 7b28219eba46..77416bc61e6e 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -58,16 +58,55 @@
#include "hw_counters.h"
static const char * const bnxt_re_stat_name[] = {
- [BNXT_RE_ACTIVE_QP] = "active_qps",
- [BNXT_RE_ACTIVE_SRQ] = "active_srqs",
- [BNXT_RE_ACTIVE_CQ] = "active_cqs",
- [BNXT_RE_ACTIVE_MR] = "active_mrs",
- [BNXT_RE_ACTIVE_MW] = "active_mws",
- [BNXT_RE_RX_PKTS] = "rx_pkts",
- [BNXT_RE_RX_BYTES] = "rx_bytes",
- [BNXT_RE_TX_PKTS] = "tx_pkts",
- [BNXT_RE_TX_BYTES] = "tx_bytes",
- [BNXT_RE_RECOVERABLE_ERRORS] = "recoverable_errors"
+ [BNXT_RE_ACTIVE_QP] = "active_qps",
+ [BNXT_RE_ACTIVE_SRQ] = "active_srqs",
+ [BNXT_RE_ACTIVE_CQ] = "active_cqs",
+ [BNXT_RE_ACTIVE_MR] = "active_mrs",
+ [BNXT_RE_ACTIVE_MW] = "active_mws",
+ [BNXT_RE_RX_PKTS] = "rx_pkts",
+ [BNXT_RE_RX_BYTES] = "rx_bytes",
+ [BNXT_RE_TX_PKTS] = "tx_pkts",
+ [BNXT_RE_TX_BYTES] = "tx_bytes",
+ [BNXT_RE_RECOVERABLE_ERRORS] = "recoverable_errors",
+ [BNXT_RE_TO_RETRANSMITS] = "to_retransmits",
+ [BNXT_RE_SEQ_ERR_NAKS_RCVD] = "seq_err_naks_rcvd",
+ [BNXT_RE_MAX_RETRY_EXCEEDED] = "max_retry_exceeded",
+ [BNXT_RE_RNR_NAKS_RCVD] = "rnr_naks_rcvd",
+ [BNXT_RE_MISSING_RESP] = "missin_resp",
+ [BNXT_RE_UNRECOVERABLE_ERR] = "unrecoverable_err",
+ [BNXT_RE_BAD_RESP_ERR] = "bad_resp_err",
+ [BNXT_RE_LOCAL_QP_OP_ERR] = "local_qp_op_err",
+ [BNXT_RE_LOCAL_PROTECTION_ERR] = "local_protection_err",
+ [BNXT_RE_MEM_MGMT_OP_ERR] = "mem_mgmt_op_err",
+ [BNXT_RE_REMOTE_INVALID_REQ_ERR] = "remote_invalid_req_err",
+ [BNXT_RE_REMOTE_ACCESS_ERR] = "remote_access_err",
+ [BNXT_RE_REMOTE_OP_ERR] = "remote_op_err",
+ [BNXT_RE_DUP_REQ] = "dup_req",
+ [BNXT_RE_RES_EXCEED_MAX] = "res_exceed_max",
+ [BNXT_RE_RES_LENGTH_MISMATCH] = "res_length_mismatch",
+ [BNXT_RE_RES_EXCEEDS_WQE] = "res_exceeds_wqe",
+ [BNXT_RE_RES_OPCODE_ERR] = "res_opcode_err",
+ [BNXT_RE_RES_RX_INVALID_RKEY] = "res_rx_invalid_rkey",
+ [BNXT_RE_RES_RX_DOMAIN_ERR] = "res_rx_domain_err",
+ [BNXT_RE_RES_RX_NO_PERM] = "res_rx_no_perm",
+ [BNXT_RE_RES_RX_RANGE_ERR] = "res_rx_range_err",
+ [BNXT_RE_RES_TX_INVALID_RKEY] = "res_tx_invalid_rkey",
+ [BNXT_RE_RES_TX_DOMAIN_ERR] = "res_tx_domain_err",
+ [BNXT_RE_RES_TX_NO_PERM] = "res_tx_no_perm",
+ [BNXT_RE_RES_TX_RANGE_ERR] = "res_tx_range_err",
+ [BNXT_RE_RES_IRRQ_OFLOW] = "res_irrq_oflow",
+ [BNXT_RE_RES_UNSUP_OPCODE] = "res_unsup_opcode",
+ [BNXT_RE_RES_UNALIGNED_ATOMIC] = "res_unaligned_atomic",
+ [BNXT_RE_RES_REM_INV_ERR] = "res_rem_inv_err",
+ [BNXT_RE_RES_MEM_ERROR] = "res_mem_err",
+ [BNXT_RE_RES_SRQ_ERR] = "res_srq_err",
+ [BNXT_RE_RES_CMP_ERR] = "res_cmp_err",
+ [BNXT_RE_RES_INVALID_DUP_RKEY] = "res_invalid_dup_rkey",
+ [BNXT_RE_RES_WQE_FORMAT_ERR] = "res_wqe_format_err",
+ [BNXT_RE_RES_CQ_LOAD_ERR] = "res_cq_load_err",
+ [BNXT_RE_RES_SRQ_LOAD_ERR] = "res_srq_load_err",
+ [BNXT_RE_RES_TX_PCI_ERR] = "res_tx_pci_err",
+ [BNXT_RE_RES_RX_PCI_ERR] = "res_rx_pci_err"
};
int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
@@ -76,6 +115,7 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
struct ctx_hw_stats *bnxt_re_stats = rdev->qplib_ctx.stats.dma;
+ int rc = 0;
if (!port || !stats)
return -EINVAL;
@@ -97,6 +137,91 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
stats->value[BNXT_RE_TX_BYTES] =
le64_to_cpu(bnxt_re_stats->tx_ucast_bytes);
}
+ if (test_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags)) {
+ rc = bnxt_qplib_get_roce_stats(&rdev->rcfw, &rdev->stats);
+ if (rc)
+ clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS,
+ &rdev->flags);
+ stats->value[BNXT_RE_TO_RETRANSMITS] =
+ rdev->stats.to_retransmits;
+ stats->value[BNXT_RE_SEQ_ERR_NAKS_RCVD] =
+ rdev->stats.seq_err_naks_rcvd;
+ stats->value[BNXT_RE_MAX_RETRY_EXCEEDED] =
+ rdev->stats.max_retry_exceeded;
+ stats->value[BNXT_RE_RNR_NAKS_RCVD] =
+ rdev->stats.rnr_naks_rcvd;
+ stats->value[BNXT_RE_MISSING_RESP] =
+ rdev->stats.missing_resp;
+ stats->value[BNXT_RE_UNRECOVERABLE_ERR] =
+ rdev->stats.unrecoverable_err;
+ stats->value[BNXT_RE_BAD_RESP_ERR] =
+ rdev->stats.bad_resp_err;
+ stats->value[BNXT_RE_LOCAL_QP_OP_ERR] =
+ rdev->stats.local_qp_op_err;
+ stats->value[BNXT_RE_LOCAL_PROTECTION_ERR] =
+ rdev->stats.local_protection_err;
+ stats->value[BNXT_RE_MEM_MGMT_OP_ERR] =
+ rdev->stats.mem_mgmt_op_err;
+ stats->value[BNXT_RE_REMOTE_INVALID_REQ_ERR] =
+ rdev->stats.remote_invalid_req_err;
+ stats->value[BNXT_RE_REMOTE_ACCESS_ERR] =
+ rdev->stats.remote_access_err;
+ stats->value[BNXT_RE_REMOTE_OP_ERR] =
+ rdev->stats.remote_op_err;
+ stats->value[BNXT_RE_DUP_REQ] =
+ rdev->stats.dup_req;
+ stats->value[BNXT_RE_RES_EXCEED_MAX] =
+ rdev->stats.res_exceed_max;
+ stats->value[BNXT_RE_RES_LENGTH_MISMATCH] =
+ rdev->stats.res_length_mismatch;
+ stats->value[BNXT_RE_RES_EXCEEDS_WQE] =
+ rdev->stats.res_exceeds_wqe;
+ stats->value[BNXT_RE_RES_OPCODE_ERR] =
+ rdev->stats.res_opcode_err;
+ stats->value[BNXT_RE_RES_RX_INVALID_RKEY] =
+ rdev->stats.res_rx_invalid_rkey;
+ stats->value[BNXT_RE_RES_RX_DOMAIN_ERR] =
+ rdev->stats.res_rx_domain_err;
+ stats->value[BNXT_RE_RES_RX_NO_PERM] =
+ rdev->stats.res_rx_no_perm;
+ stats->value[BNXT_RE_RES_RX_RANGE_ERR] =
+ rdev->stats.res_rx_range_err;
+ stats->value[BNXT_RE_RES_TX_INVALID_RKEY] =
+ rdev->stats.res_tx_invalid_rkey;
+ stats->value[BNXT_RE_RES_TX_DOMAIN_ERR] =
+ rdev->stats.res_tx_domain_err;
+ stats->value[BNXT_RE_RES_TX_NO_PERM] =
+ rdev->stats.res_tx_no_perm;
+ stats->value[BNXT_RE_RES_TX_RANGE_ERR] =
+ rdev->stats.res_tx_range_err;
+ stats->value[BNXT_RE_RES_IRRQ_OFLOW] =
+ rdev->stats.res_irrq_oflow;
+ stats->value[BNXT_RE_RES_UNSUP_OPCODE] =
+ rdev->stats.res_unsup_opcode;
+ stats->value[BNXT_RE_RES_UNALIGNED_ATOMIC] =
+ rdev->stats.res_unaligned_atomic;
+ stats->value[BNXT_RE_RES_REM_INV_ERR] =
+ rdev->stats.res_rem_inv_err;
+ stats->value[BNXT_RE_RES_MEM_ERROR] =
+ rdev->stats.res_mem_error;
+ stats->value[BNXT_RE_RES_SRQ_ERR] =
+ rdev->stats.res_srq_err;
+ stats->value[BNXT_RE_RES_CMP_ERR] =
+ rdev->stats.res_cmp_err;
+ stats->value[BNXT_RE_RES_INVALID_DUP_RKEY] =
+ rdev->stats.res_invalid_dup_rkey;
+ stats->value[BNXT_RE_RES_WQE_FORMAT_ERR] =
+ rdev->stats.res_wqe_format_err;
+ stats->value[BNXT_RE_RES_CQ_LOAD_ERR] =
+ rdev->stats.res_cq_load_err;
+ stats->value[BNXT_RE_RES_SRQ_LOAD_ERR] =
+ rdev->stats.res_srq_load_err;
+ stats->value[BNXT_RE_RES_TX_PCI_ERR] =
+ rdev->stats.res_tx_pci_err;
+ stats->value[BNXT_RE_RES_RX_PCI_ERR] =
+ rdev->stats.res_rx_pci_err;
+ }
+
return ARRAY_SIZE(bnxt_re_stat_name);
}
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.h b/drivers/infiniband/hw/bnxt_re/hw_counters.h
index be0dc0093b58..a01a922717d5 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.h
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.h
@@ -51,6 +51,45 @@ enum bnxt_re_hw_stats {
BNXT_RE_TX_PKTS,
BNXT_RE_TX_BYTES,
BNXT_RE_RECOVERABLE_ERRORS,
+ BNXT_RE_TO_RETRANSMITS,
+ BNXT_RE_SEQ_ERR_NAKS_RCVD,
+ BNXT_RE_MAX_RETRY_EXCEEDED,
+ BNXT_RE_RNR_NAKS_RCVD,
+ BNXT_RE_MISSING_RESP,
+ BNXT_RE_UNRECOVERABLE_ERR,
+ BNXT_RE_BAD_RESP_ERR,
+ BNXT_RE_LOCAL_QP_OP_ERR,
+ BNXT_RE_LOCAL_PROTECTION_ERR,
+ BNXT_RE_MEM_MGMT_OP_ERR,
+ BNXT_RE_REMOTE_INVALID_REQ_ERR,
+ BNXT_RE_REMOTE_ACCESS_ERR,
+ BNXT_RE_REMOTE_OP_ERR,
+ BNXT_RE_DUP_REQ,
+ BNXT_RE_RES_EXCEED_MAX,
+ BNXT_RE_RES_LENGTH_MISMATCH,
+ BNXT_RE_RES_EXCEEDS_WQE,
+ BNXT_RE_RES_OPCODE_ERR,
+ BNXT_RE_RES_RX_INVALID_RKEY,
+ BNXT_RE_RES_RX_DOMAIN_ERR,
+ BNXT_RE_RES_RX_NO_PERM,
+ BNXT_RE_RES_RX_RANGE_ERR,
+ BNXT_RE_RES_TX_INVALID_RKEY,
+ BNXT_RE_RES_TX_DOMAIN_ERR,
+ BNXT_RE_RES_TX_NO_PERM,
+ BNXT_RE_RES_TX_RANGE_ERR,
+ BNXT_RE_RES_IRRQ_OFLOW,
+ BNXT_RE_RES_UNSUP_OPCODE,
+ BNXT_RE_RES_UNALIGNED_ATOMIC,
+ BNXT_RE_RES_REM_INV_ERR,
+ BNXT_RE_RES_MEM_ERROR,
+ BNXT_RE_RES_SRQ_ERR,
+ BNXT_RE_RES_CMP_ERR,
+ BNXT_RE_RES_INVALID_DUP_RKEY,
+ BNXT_RE_RES_WQE_FORMAT_ERR,
+ BNXT_RE_RES_CQ_LOAD_ERR,
+ BNXT_RE_RES_SRQ_LOAD_ERR,
+ BNXT_RE_RES_TX_PCI_ERR,
+ BNXT_RE_RES_RX_PCI_ERR,
BNXT_RE_NUM_COUNTERS
};
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 2032db7db766..9b8fa77b8831 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -141,12 +141,13 @@ int bnxt_re_query_device(struct ib_device *ibdev,
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
memset(ib_attr, 0, sizeof(*ib_attr));
-
- ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver);
+ memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
+ min(sizeof(dev_attr->fw_ver),
+ sizeof(ib_attr->fw_ver)));
bnxt_qplib_get_guid(rdev->netdev->dev_addr,
(u8 *)&ib_attr->sys_image_guid);
ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
- ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K;
+ ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M;
ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
@@ -247,8 +248,7 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
IB_PORT_VENDOR_CLASS_SUP |
IB_PORT_IP_BASED_GIDS;
- /* Max MSG size set to 2G for now */
- port_attr->max_msg_sz = 0x80000000;
+ port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
port_attr->bad_pkey_cntr = 0;
port_attr->qkey_viol_cntr = 0;
port_attr->pkey_tbl_len = dev_attr->max_pkey;
@@ -281,6 +281,15 @@ int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
+void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
+ rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
+ rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
+}
+
int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
u16 index, u16 *pkey)
{
@@ -532,7 +541,7 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
pbl_tbl = dma_addr;
rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
- BNXT_RE_FENCE_PBL_SIZE, false);
+ BNXT_RE_FENCE_PBL_SIZE, false, PAGE_SIZE);
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
goto fail;
@@ -1018,6 +1027,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
struct bnxt_re_qp *qp;
struct bnxt_re_cq *cq;
+ struct bnxt_re_srq *srq;
int rc, entries;
if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
@@ -1073,9 +1083,15 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
}
if (qp_init_attr->srq) {
- dev_err(rdev_to_dev(rdev), "SRQ not supported");
- rc = -ENOTSUPP;
- goto fail;
+ srq = container_of(qp_init_attr->srq, struct bnxt_re_srq,
+ ib_srq);
+ if (!srq) {
+ dev_err(rdev_to_dev(rdev), "SRQ not found");
+ rc = -EINVAL;
+ goto fail;
+ }
+ qp->qplib_qp.srq = &srq->qplib_srq;
+ qp->qplib_qp.rq.max_wqe = 0;
} else {
/* Allocate 1 more than what's provided so posting max doesn't
* mean empty
@@ -1280,6 +1296,237 @@ static enum ib_mtu __to_ib_mtu(u32 mtu)
}
}
+/* Shared Receive Queues */
+int bnxt_re_destroy_srq(struct ib_srq *ib_srq)
+{
+ struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
+ ib_srq);
+ struct bnxt_re_dev *rdev = srq->rdev;
+ struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
+ struct bnxt_qplib_nq *nq = NULL;
+ int rc;
+
+ if (qplib_srq->cq)
+ nq = qplib_srq->cq->nq;
+ rc = bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Destroy HW SRQ failed!");
+ return rc;
+ }
+
+ if (srq->umem && !IS_ERR(srq->umem))
+ ib_umem_release(srq->umem);
+ kfree(srq);
+ atomic_dec(&rdev->srq_count);
+ if (nq)
+ nq->budget--;
+ return 0;
+}
+
+static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
+ struct bnxt_re_pd *pd,
+ struct bnxt_re_srq *srq,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_srq_req ureq;
+ struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
+ struct ib_umem *umem;
+ int bytes = 0;
+ struct ib_ucontext *context = pd->ib_pd.uobject->context;
+ struct bnxt_re_ucontext *cntx = container_of(context,
+ struct bnxt_re_ucontext,
+ ib_uctx);
+ if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
+ return -EFAULT;
+
+ bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
+ bytes = PAGE_ALIGN(bytes);
+ umem = ib_umem_get(context, ureq.srqva, bytes,
+ IB_ACCESS_LOCAL_WRITE, 1);
+ if (IS_ERR(umem))
+ return PTR_ERR(umem);
+
+ srq->umem = umem;
+ qplib_srq->nmap = umem->nmap;
+ qplib_srq->sglist = umem->sg_head.sgl;
+ qplib_srq->srq_handle = ureq.srq_handle;
+ qplib_srq->dpi = &cntx->dpi;
+
+ return 0;
+}
+
+struct ib_srq *bnxt_re_create_srq(struct ib_pd *ib_pd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_re_srq *srq;
+ struct bnxt_qplib_nq *nq = NULL;
+ int rc, entries;
+
+ if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
+ dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
+ rc = -ENOTSUPP;
+ goto exit;
+ }
+
+ srq = kzalloc(sizeof(*srq), GFP_KERNEL);
+ if (!srq) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+ srq->rdev = rdev;
+ srq->qplib_srq.pd = &pd->qplib_pd;
+ srq->qplib_srq.dpi = &rdev->dpi_privileged;
+ /* Allocate 1 more than what's provided so posting max doesn't
+ * mean empty
+ */
+ entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
+ if (entries > dev_attr->max_srq_wqes + 1)
+ entries = dev_attr->max_srq_wqes + 1;
+
+ srq->qplib_srq.max_wqe = entries;
+ srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
+ srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
+ srq->srq_limit = srq_init_attr->attr.srq_limit;
+ srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
+ nq = &rdev->nq[0];
+
+ if (udata) {
+ rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
+ if (rc)
+ goto fail;
+ }
+
+ rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Create HW SRQ failed!");
+ goto fail;
+ }
+
+ if (udata) {
+ struct bnxt_re_srq_resp resp;
+
+ resp.srqid = srq->qplib_srq.id;
+ rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!");
+ bnxt_qplib_destroy_srq(&rdev->qplib_res,
+ &srq->qplib_srq);
+ goto exit;
+ }
+ }
+ if (nq)
+ nq->budget++;
+ atomic_inc(&rdev->srq_count);
+
+ return &srq->ib_srq;
+
+fail:
+ if (udata && srq->umem && !IS_ERR(srq->umem)) {
+ ib_umem_release(srq->umem);
+ srq->umem = NULL;
+ }
+
+ kfree(srq);
+exit:
+ return ERR_PTR(rc);
+}
+
+int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
+ enum ib_srq_attr_mask srq_attr_mask,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
+ ib_srq);
+ struct bnxt_re_dev *rdev = srq->rdev;
+ int rc;
+
+ switch (srq_attr_mask) {
+ case IB_SRQ_MAX_WR:
+ /* SRQ resize is not supported */
+ break;
+ case IB_SRQ_LIMIT:
+ /* Change the SRQ threshold */
+ if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
+ return -EINVAL;
+
+ srq->qplib_srq.threshold = srq_attr->srq_limit;
+ rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Modify HW SRQ failed!");
+ return rc;
+ }
+ /* On success, update the shadow */
+ srq->srq_limit = srq_attr->srq_limit;
+ /* No need to Build and send response back to udata */
+ break;
+ default:
+ dev_err(rdev_to_dev(rdev),
+ "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
+{
+ struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
+ ib_srq);
+ struct bnxt_re_srq tsrq;
+ struct bnxt_re_dev *rdev = srq->rdev;
+ int rc;
+
+ /* Get live SRQ attr */
+ tsrq.qplib_srq.id = srq->qplib_srq.id;
+ rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Query HW SRQ failed!");
+ return rc;
+ }
+ srq_attr->max_wr = srq->qplib_srq.max_wqe;
+ srq_attr->max_sge = srq->qplib_srq.max_sge;
+ srq_attr->srq_limit = tsrq.qplib_srq.threshold;
+
+ return 0;
+}
+
+int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
+ ib_srq);
+ struct bnxt_qplib_swqe wqe;
+ unsigned long flags;
+ int rc = 0, payload_sz = 0;
+
+ spin_lock_irqsave(&srq->lock, flags);
+ while (wr) {
+ /* Transcribe each ib_recv_wr to qplib_swqe */
+ wqe.num_sge = wr->num_sge;
+ payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
+ wr->num_sge);
+ wqe.wr_id = wr->wr_id;
+ wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
+
+ rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
+ if (rc) {
+ *bad_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+ spin_unlock_irqrestore(&srq->lock, flags);
+
+ return rc;
+}
static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp1_qp,
int qp_attr_mask)
@@ -2295,10 +2542,14 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
/* Completion Queues */
int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
{
- struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
- struct bnxt_re_dev *rdev = cq->rdev;
int rc;
- struct bnxt_qplib_nq *nq = cq->qplib_cq.nq;
+ struct bnxt_re_cq *cq;
+ struct bnxt_qplib_nq *nq;
+ struct bnxt_re_dev *rdev;
+
+ cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
+ rdev = cq->rdev;
+ nq = cq->qplib_cq.nq;
rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
if (rc) {
@@ -2308,12 +2559,11 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
if (!IS_ERR_OR_NULL(cq->umem))
ib_umem_release(cq->umem);
- if (cq) {
- kfree(cq->cql);
- kfree(cq);
- }
atomic_dec(&rdev->cq_count);
nq->budget--;
+ kfree(cq->cql);
+ kfree(cq);
+
return 0;
}
@@ -3078,7 +3328,8 @@ struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
mr->qplib_mr.hwq.level = PBL_LVL_MAX;
mr->qplib_mr.total_size = -1; /* Infinte length */
- rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false);
+ rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false,
+ PAGE_SIZE);
if (rc)
goto fail_mr;
@@ -3104,10 +3355,8 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
int rc;
rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
- if (rc) {
+ if (rc)
dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
- return rc;
- }
if (mr->pages) {
rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
@@ -3170,7 +3419,7 @@ struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
if (rc)
- goto fail;
+ goto bail;
mr->ib_mr.lkey = mr->qplib_mr.lkey;
mr->ib_mr.rkey = mr->ib_mr.lkey;
@@ -3192,9 +3441,10 @@ struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
return &mr->ib_mr;
fail_mr:
- bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
-fail:
kfree(mr->pages);
+fail:
+ bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
+bail:
kfree(mr);
return ERR_PTR(rc);
}
@@ -3248,6 +3498,46 @@ int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
return rc;
}
+static int bnxt_re_page_size_ok(int page_shift)
+{
+ switch (page_shift) {
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
+ int page_shift)
+{
+ u64 *pbl_tbl = pbl_tbl_orig;
+ u64 paddr;
+ u64 page_mask = (1ULL << page_shift) - 1;
+ int i, pages;
+ struct scatterlist *sg;
+ int entry;
+
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+ pages = sg_dma_len(sg) >> PAGE_SHIFT;
+ for (i = 0; i < pages; i++) {
+ paddr = sg_dma_address(sg) + (i << PAGE_SHIFT);
+ if (pbl_tbl == pbl_tbl_orig)
+ *pbl_tbl++ = paddr & ~page_mask;
+ else if ((paddr & page_mask) == 0)
+ *pbl_tbl++ = paddr;
+ }
+ }
+ return pbl_tbl - pbl_tbl_orig;
+}
+
/* uverbs */
struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
@@ -3257,10 +3547,8 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
struct bnxt_re_dev *rdev = pd->rdev;
struct bnxt_re_mr *mr;
struct ib_umem *umem;
- u64 *pbl_tbl, *pbl_tbl_orig;
- int i, umem_pgs, pages, rc;
- struct scatterlist *sg;
- int entry;
+ u64 *pbl_tbl = NULL;
+ int umem_pgs, page_shift, rc;
if (length > BNXT_RE_MAX_MR_SIZE) {
dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n",
@@ -3277,64 +3565,68 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
+ rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
+ goto free_mr;
+ }
+ /* The fixed portion of the rkey is the same as the lkey */
+ mr->ib_mr.rkey = mr->qplib_mr.rkey;
+
umem = ib_umem_get(ib_pd->uobject->context, start, length,
mr_access_flags, 0);
if (IS_ERR(umem)) {
dev_err(rdev_to_dev(rdev), "Failed to get umem");
rc = -EFAULT;
- goto free_mr;
+ goto free_mrw;
}
mr->ib_umem = umem;
- rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
- if (rc) {
- dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
- goto release_umem;
- }
- /* The fixed portion of the rkey is the same as the lkey */
- mr->ib_mr.rkey = mr->qplib_mr.rkey;
-
mr->qplib_mr.va = virt_addr;
umem_pgs = ib_umem_page_count(umem);
if (!umem_pgs) {
dev_err(rdev_to_dev(rdev), "umem is invalid!");
rc = -EINVAL;
- goto free_mrw;
+ goto free_umem;
}
mr->qplib_mr.total_size = length;
pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
if (!pbl_tbl) {
- rc = -EINVAL;
- goto free_mrw;
+ rc = -ENOMEM;
+ goto free_umem;
}
- pbl_tbl_orig = pbl_tbl;
- if (umem->hugetlb) {
- dev_err(rdev_to_dev(rdev), "umem hugetlb not supported!");
+ page_shift = umem->page_shift;
+
+ if (!bnxt_re_page_size_ok(page_shift)) {
+ dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
rc = -EFAULT;
goto fail;
}
- if (umem->page_shift != PAGE_SHIFT) {
- dev_err(rdev_to_dev(rdev), "umem page shift unsupported!");
- rc = -EFAULT;
+ if (!umem->hugetlb && length > BNXT_RE_MAX_MR_SIZE_LOW) {
+ dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu",
+ length, (u64)BNXT_RE_MAX_MR_SIZE_LOW);
+ rc = -EINVAL;
goto fail;
}
- /* Map umem buf ptrs to the PBL */
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
- pages = sg_dma_len(sg) >> umem->page_shift;
- for (i = 0; i < pages; i++, pbl_tbl++)
- *pbl_tbl = sg_dma_address(sg) + (i << umem->page_shift);
+ if (umem->hugetlb && length > BNXT_RE_PAGE_SIZE_2M) {
+ page_shift = BNXT_RE_PAGE_SHIFT_2M;
+ dev_warn(rdev_to_dev(rdev), "umem hugetlb set page_size %x",
+ 1 << page_shift);
}
- rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl_orig,
- umem_pgs, false);
+
+ /* Map umem buf ptrs to the PBL */
+ umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift);
+ rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl,
+ umem_pgs, false, 1 << page_shift);
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to register user MR");
goto fail;
}
- kfree(pbl_tbl_orig);
+ kfree(pbl_tbl);
mr->ib_mr.lkey = mr->qplib_mr.lkey;
mr->ib_mr.rkey = mr->qplib_mr.lkey;
@@ -3342,11 +3634,11 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
return &mr->ib_mr;
fail:
- kfree(pbl_tbl_orig);
+ kfree(pbl_tbl);
+free_umem:
+ ib_umem_release(umem);
free_mrw:
bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
-release_umem:
- ib_umem_release(umem);
free_mr:
kfree(mr);
return ERR_PTR(rc);
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 1df11ed272ea..423ebe012f95 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -68,6 +68,15 @@ struct bnxt_re_ah {
struct bnxt_qplib_ah qplib_ah;
};
+struct bnxt_re_srq {
+ struct bnxt_re_dev *rdev;
+ u32 srq_limit;
+ struct ib_srq ib_srq;
+ struct bnxt_qplib_srq qplib_srq;
+ struct ib_umem *umem;
+ spinlock_t lock; /* protect srq */
+};
+
struct bnxt_re_qp {
struct list_head list;
struct bnxt_re_dev *rdev;
@@ -143,6 +152,7 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
struct ib_port_attr *port_attr);
int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_immutable *immutable);
+void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str);
int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
u16 index, u16 *pkey);
int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
@@ -164,6 +174,16 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *pd,
int bnxt_re_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int bnxt_re_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int bnxt_re_destroy_ah(struct ib_ah *ah);
+struct ib_srq *bnxt_re_create_srq(struct ib_pd *pd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata);
+int bnxt_re_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
+ enum ib_srq_attr_mask srq_attr_mask,
+ struct ib_udata *udata);
+int bnxt_re_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
+int bnxt_re_destroy_srq(struct ib_srq *srq);
+int bnxt_re_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *recv_wr,
+ struct ib_recv_wr **bad_recv_wr);
struct ib_qp *bnxt_re_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr,
struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index aafc19aa5de1..508d00a5a106 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -80,6 +80,79 @@ static DEFINE_MUTEX(bnxt_re_dev_lock);
static struct workqueue_struct *bnxt_re_wq;
static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait);
+/* SR-IOV helper functions */
+
+static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev *rdev)
+{
+ struct bnxt *bp;
+
+ bp = netdev_priv(rdev->en_dev->net);
+ if (BNXT_VF(bp))
+ rdev->is_virtfn = 1;
+}
+
+/* Set the maximum number of each resource that the driver actually wants
+ * to allocate. This may be up to the maximum number the firmware has
+ * reserved for the function. The driver may choose to allocate fewer
+ * resources than the firmware maximum.
+ */
+static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
+{
+ u32 vf_qps = 0, vf_srqs = 0, vf_cqs = 0, vf_mrws = 0, vf_gids = 0;
+ u32 i;
+ u32 vf_pct;
+ u32 num_vfs;
+ struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+
+ rdev->qplib_ctx.qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT,
+ dev_attr->max_qp);
+
+ rdev->qplib_ctx.mrw_count = BNXT_RE_MAX_MRW_COUNT_256K;
+ /* Use max_mr from fw since max_mrw does not get set */
+ rdev->qplib_ctx.mrw_count = min_t(u32, rdev->qplib_ctx.mrw_count,
+ dev_attr->max_mr);
+ rdev->qplib_ctx.srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT,
+ dev_attr->max_srq);
+ rdev->qplib_ctx.cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT,
+ dev_attr->max_cq);
+
+ for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
+ rdev->qplib_ctx.tqm_count[i] =
+ rdev->dev_attr.tqm_alloc_reqs[i];
+
+ if (rdev->num_vfs) {
+ /*
+ * Reserve a set of resources for the PF. Divide the remaining
+ * resources among the VFs
+ */
+ vf_pct = 100 - BNXT_RE_PCT_RSVD_FOR_PF;
+ num_vfs = 100 * rdev->num_vfs;
+ vf_qps = (rdev->qplib_ctx.qpc_count * vf_pct) / num_vfs;
+ vf_srqs = (rdev->qplib_ctx.srqc_count * vf_pct) / num_vfs;
+ vf_cqs = (rdev->qplib_ctx.cq_count * vf_pct) / num_vfs;
+ /*
+ * The driver allows many more MRs than other resources. If the
+ * firmware does also, then reserve a fixed amount for the PF
+ * and divide the rest among VFs. VFs may use many MRs for NFS
+ * mounts, ISER, NVME applications, etc. If the firmware
+ * severely restricts the number of MRs, then let PF have
+ * half and divide the rest among VFs, as for the other
+ * resource types.
+ */
+ if (rdev->qplib_ctx.mrw_count < BNXT_RE_MAX_MRW_COUNT_64K)
+ vf_mrws = rdev->qplib_ctx.mrw_count * vf_pct / num_vfs;
+ else
+ vf_mrws = (rdev->qplib_ctx.mrw_count -
+ BNXT_RE_RESVD_MR_FOR_PF) / rdev->num_vfs;
+ vf_gids = BNXT_RE_MAX_GID_PER_VF;
+ }
+ rdev->qplib_ctx.vf_res.max_mrw_per_vf = vf_mrws;
+ rdev->qplib_ctx.vf_res.max_gid_per_vf = vf_gids;
+ rdev->qplib_ctx.vf_res.max_qp_per_vf = vf_qps;
+ rdev->qplib_ctx.vf_res.max_srq_per_vf = vf_srqs;
+ rdev->qplib_ctx.vf_res.max_cq_per_vf = vf_cqs;
+}
+
/* for handling bnxt_en callbacks later */
static void bnxt_re_stop(void *p)
{
@@ -91,6 +164,15 @@ static void bnxt_re_start(void *p)
static void bnxt_re_sriov_config(void *p, int num_vfs)
{
+ struct bnxt_re_dev *rdev = p;
+
+ if (!rdev)
+ return;
+
+ rdev->num_vfs = num_vfs;
+ bnxt_re_set_resource_limits(rdev);
+ bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
+ &rdev->qplib_ctx);
}
static void bnxt_re_shutdown(void *p)
@@ -417,7 +499,7 @@ static struct bnxt_en_dev *bnxt_re_dev_probe(struct net_device *netdev)
return ERR_PTR(-EINVAL);
if (!(en_dev->flags & BNXT_EN_FLAG_ROCE_CAP)) {
- dev_dbg(&pdev->dev,
+ dev_info(&pdev->dev,
"%s: probe error: RoCE is not supported on this device",
ROCE_DRV_MODULE_NAME);
return ERR_PTR(-ENODEV);
@@ -490,6 +572,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
ibdev->query_port = bnxt_re_query_port;
ibdev->get_port_immutable = bnxt_re_get_port_immutable;
+ ibdev->get_dev_fw_str = bnxt_re_query_fw_str;
ibdev->query_pkey = bnxt_re_query_pkey;
ibdev->query_gid = bnxt_re_query_gid;
ibdev->get_netdev = bnxt_re_get_netdev;
@@ -505,6 +588,12 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
ibdev->query_ah = bnxt_re_query_ah;
ibdev->destroy_ah = bnxt_re_destroy_ah;
+ ibdev->create_srq = bnxt_re_create_srq;
+ ibdev->modify_srq = bnxt_re_modify_srq;
+ ibdev->query_srq = bnxt_re_query_srq;
+ ibdev->destroy_srq = bnxt_re_destroy_srq;
+ ibdev->post_srq_recv = bnxt_re_post_srq_recv;
+
ibdev->create_qp = bnxt_re_create_qp;
ibdev->modify_qp = bnxt_re_modify_qp;
ibdev->query_qp = bnxt_re_query_qp;
@@ -541,14 +630,6 @@ static ssize_t show_rev(struct device *device, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE, "0x%x\n", rdev->en_dev->pdev->vendor);
}
-static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev);
-
- return scnprintf(buf, PAGE_SIZE, "%s\n", rdev->dev_attr.fw_ver);
-}
-
static ssize_t show_hca(struct device *device, struct device_attribute *attr,
char *buf)
{
@@ -558,12 +639,10 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr,
}
static DEVICE_ATTR(hw_rev, 0444, show_rev, NULL);
-static DEVICE_ATTR(fw_rev, 0444, show_fw_ver, NULL);
static DEVICE_ATTR(hca_type, 0444, show_hca, NULL);
static struct device_attribute *bnxt_re_attributes[] = {
&dev_attr_hw_rev,
- &dev_attr_fw_rev,
&dev_attr_hca_type
};
@@ -616,10 +695,10 @@ static struct bnxt_re_dev *bnxt_re_dev_add(struct net_device *netdev,
return rdev;
}
-static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw *rcfw,
- struct creq_func_event *aeqe)
+static int bnxt_re_handle_unaffi_async_event(struct creq_func_event
+ *unaffi_async)
{
- switch (aeqe->event) {
+ switch (unaffi_async->event) {
case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
break;
case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
@@ -648,6 +727,93 @@ static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw *rcfw,
return 0;
}
+static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
+ struct bnxt_re_qp *qp)
+{
+ struct ib_event event;
+
+ memset(&event, 0, sizeof(event));
+ if (qp->qplib_qp.srq) {
+ event.device = &qp->rdev->ibdev;
+ event.element.qp = &qp->ib_qp;
+ event.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ }
+
+ if (event.device && qp->ib_qp.event_handler)
+ qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
+
+ return 0;
+}
+
+static int bnxt_re_handle_affi_async_event(struct creq_qp_event *affi_async,
+ void *obj)
+{
+ int rc = 0;
+ u8 event;
+
+ if (!obj)
+ return rc; /* QP was already dead, still return success */
+
+ event = affi_async->event;
+ if (event == CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION) {
+ struct bnxt_qplib_qp *lib_qp = obj;
+ struct bnxt_re_qp *qp = container_of(lib_qp, struct bnxt_re_qp,
+ qplib_qp);
+ rc = bnxt_re_handle_qp_async_event(affi_async, qp);
+ }
+ return rc;
+}
+
+static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw *rcfw,
+ void *aeqe, void *obj)
+{
+ struct creq_qp_event *affi_async;
+ struct creq_func_event *unaffi_async;
+ u8 type;
+ int rc;
+
+ type = ((struct creq_base *)aeqe)->type;
+ if (type == CREQ_BASE_TYPE_FUNC_EVENT) {
+ unaffi_async = aeqe;
+ rc = bnxt_re_handle_unaffi_async_event(unaffi_async);
+ } else {
+ affi_async = aeqe;
+ rc = bnxt_re_handle_affi_async_event(affi_async, obj);
+ }
+
+ return rc;
+}
+
+static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq,
+ struct bnxt_qplib_srq *handle, u8 event)
+{
+ struct bnxt_re_srq *srq = container_of(handle, struct bnxt_re_srq,
+ qplib_srq);
+ struct ib_event ib_event;
+ int rc = 0;
+
+ if (!srq) {
+ dev_err(NULL, "%s: SRQ is NULL, SRQN not handled",
+ ROCE_DRV_MODULE_NAME);
+ rc = -EINVAL;
+ goto done;
+ }
+ ib_event.device = &srq->rdev->ibdev;
+ ib_event.element.srq = &srq->ib_srq;
+ if (event == NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT)
+ ib_event.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ else
+ ib_event.event = IB_EVENT_SRQ_ERR;
+
+ if (srq->ib_srq.event_handler) {
+ /* Lock event_handler? */
+ (*srq->ib_srq.event_handler)(&ib_event,
+ srq->ib_srq.srq_context);
+ }
+done:
+ return rc;
+}
+
static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
struct bnxt_qplib_cq *handle)
{
@@ -690,7 +856,8 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1],
i - 1, rdev->msix_entries[i].vector,
rdev->msix_entries[i].db_offset,
- &bnxt_re_cqn_handler, NULL);
+ &bnxt_re_cqn_handler,
+ &bnxt_re_srqn_handler);
if (rc) {
dev_err(rdev_to_dev(rdev),
@@ -734,7 +901,8 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
/* Configure and allocate resources for qplib */
rdev->qplib_res.rcfw = &rdev->rcfw;
- rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
+ rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr,
+ rdev->is_virtfn);
if (rc)
goto fail;
@@ -1035,19 +1203,6 @@ static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait)
}
}
-static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
-{
- u32 i;
-
- rdev->qplib_ctx.qpc_count = BNXT_RE_MAX_QPC_COUNT;
- rdev->qplib_ctx.mrw_count = BNXT_RE_MAX_MRW_COUNT;
- rdev->qplib_ctx.srqc_count = BNXT_RE_MAX_SRQC_COUNT;
- rdev->qplib_ctx.cq_count = BNXT_RE_MAX_CQ_COUNT;
- for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
- rdev->qplib_ctx.tqm_count[i] =
- rdev->dev_attr.tqm_alloc_reqs[i];
-}
-
/* worker thread for polling periodic events. Now used for QoS programming*/
static void bnxt_re_worker(struct work_struct *work)
{
@@ -1070,6 +1225,9 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
}
set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
+ /* Check whether VF or PF */
+ bnxt_re_get_sriov_func_type(rdev);
+
rc = bnxt_re_request_msix(rdev);
if (rc) {
pr_err("Failed to get MSI-X vectors: %#x\n", rc);
@@ -1101,16 +1259,18 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
(rdev->en_dev->pdev, &rdev->rcfw,
rdev->msix_entries[BNXT_RE_AEQ_IDX].vector,
rdev->msix_entries[BNXT_RE_AEQ_IDX].db_offset,
- 0, &bnxt_re_aeq_handler);
+ rdev->is_virtfn, &bnxt_re_aeq_handler);
if (rc) {
pr_err("Failed to enable RCFW channel: %#x\n", rc);
goto free_ring;
}
- rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
+ rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr,
+ rdev->is_virtfn);
if (rc)
goto disable_rcfw;
- bnxt_re_set_resource_limits(rdev);
+ if (!rdev->is_virtfn)
+ bnxt_re_set_resource_limits(rdev);
rc = bnxt_qplib_alloc_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx, 0);
if (rc) {
@@ -1125,7 +1285,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
goto free_ctx;
}
- rc = bnxt_qplib_init_rcfw(&rdev->rcfw, &rdev->qplib_ctx, 0);
+ rc = bnxt_qplib_init_rcfw(&rdev->rcfw, &rdev->qplib_ctx,
+ rdev->is_virtfn);
if (rc) {
pr_err("Failed to initialize RCFW: %#x\n", rc);
goto free_sctx;
@@ -1144,13 +1305,15 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
goto fail;
}
- rc = bnxt_re_setup_qos(rdev);
- if (rc)
- pr_info("RoCE priority not yet configured\n");
+ if (!rdev->is_virtfn) {
+ rc = bnxt_re_setup_qos(rdev);
+ if (rc)
+ pr_info("RoCE priority not yet configured\n");
- INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker);
- set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags);
- schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
+ INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker);
+ set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags);
+ schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
+ }
/* Register ib dev */
rc = bnxt_re_register_ib(rdev);
@@ -1176,6 +1339,7 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
&rdev->active_width);
+ set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE);
bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_GID_CHANGE);
@@ -1400,7 +1564,7 @@ err_netdev:
static void __exit bnxt_re_mod_exit(void)
{
- struct bnxt_re_dev *rdev;
+ struct bnxt_re_dev *rdev, *next;
LIST_HEAD(to_be_deleted);
mutex_lock(&bnxt_re_dev_lock);
@@ -1408,8 +1572,11 @@ static void __exit bnxt_re_mod_exit(void)
if (!list_empty(&bnxt_re_dev_list))
list_splice_init(&bnxt_re_dev_list, &to_be_deleted);
mutex_unlock(&bnxt_re_dev_lock);
-
- list_for_each_entry(rdev, &to_be_deleted, list) {
+ /*
+ * Cleanup the devices in reverse order so that the VF device
+ * cleanup is done before PF cleanup
+ */
+ list_for_each_entry_safe_reverse(rdev, next, &to_be_deleted, list) {
dev_info(rdev_to_dev(rdev), "Unregistering Device");
bnxt_re_dev_stop(rdev);
bnxt_re_ib_unreg(rdev, true);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 61764f7aa79b..8b5f11ac0e42 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -52,6 +52,7 @@
static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq);
static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
+static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type);
static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
{
@@ -278,6 +279,7 @@ static void bnxt_qplib_service_nq(unsigned long data)
struct nq_base *nqe, **nq_ptr;
struct bnxt_qplib_cq *cq;
int num_cqne_processed = 0;
+ int num_srqne_processed = 0;
u32 sw_cons, raw_cons;
u16 type;
int budget = nq->budget;
@@ -320,6 +322,26 @@ static void bnxt_qplib_service_nq(unsigned long data)
spin_unlock_bh(&cq->compl_lock);
break;
}
+ case NQ_BASE_TYPE_SRQ_EVENT:
+ {
+ struct nq_srq_event *nqsrqe =
+ (struct nq_srq_event *)nqe;
+
+ q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
+ q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
+ << 32;
+ bnxt_qplib_arm_srq((struct bnxt_qplib_srq *)q_handle,
+ DBR_DBR_TYPE_SRQ_ARMENA);
+ if (!nq->srqn_handler(nq,
+ (struct bnxt_qplib_srq *)q_handle,
+ nqsrqe->event))
+ num_srqne_processed++;
+ else
+ dev_warn(&nq->pdev->dev,
+ "QPLIB: SRQ event 0x%x not handled",
+ nqsrqe->event);
+ break;
+ }
case NQ_BASE_TYPE_DBQ_EVENT:
break;
default:
@@ -384,17 +406,19 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
int (*cqn_handler)(struct bnxt_qplib_nq *nq,
struct bnxt_qplib_cq *),
int (*srqn_handler)(struct bnxt_qplib_nq *nq,
- void *, u8 event))
+ struct bnxt_qplib_srq *,
+ u8 event))
{
resource_size_t nq_base;
int rc = -1;
nq->pdev = pdev;
nq->vector = msix_vector;
+ if (cqn_handler)
+ nq->cqn_handler = cqn_handler;
- nq->cqn_handler = cqn_handler;
-
- nq->srqn_handler = srqn_handler;
+ if (srqn_handler)
+ nq->srqn_handler = srqn_handler;
tasklet_init(&nq->worker, bnxt_qplib_service_nq, (unsigned long)nq);
@@ -410,7 +434,6 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
if (rc) {
dev_err(&nq->pdev->dev,
"Failed to request IRQ for NQ: %#x", rc);
- bnxt_qplib_disable_nq(nq);
goto fail;
}
@@ -469,6 +492,238 @@ int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq)
return 0;
}
+/* SRQ */
+static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type)
+{
+ struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
+ struct dbr_dbr db_msg = { 0 };
+ void __iomem *db;
+ u32 sw_prod = 0;
+
+ /* Ring DB */
+ sw_prod = (arm_type == DBR_DBR_TYPE_SRQ_ARM) ? srq->threshold :
+ HWQ_CMP(srq_hwq->prod, srq_hwq);
+ db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
+ DBR_DBR_INDEX_MASK);
+ db_msg.type_xid = cpu_to_le32(((srq->id << DBR_DBR_XID_SFT) &
+ DBR_DBR_XID_MASK) | arm_type);
+ db = (arm_type == DBR_DBR_TYPE_SRQ_ARMENA) ?
+ srq->dbr_base : srq->dpi->dbr;
+ wmb(); /* barrier before db ring */
+ __iowrite64_copy(db, &db_msg, sizeof(db_msg) / sizeof(u64));
+}
+
+int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct cmdq_destroy_srq req;
+ struct creq_destroy_srq_resp resp;
+ u16 cmd_flags = 0;
+ int rc;
+
+ RCFW_CMD_PREP(req, DESTROY_SRQ, cmd_flags);
+
+ /* Configure the request */
+ req.srq_cid = cpu_to_le32(srq->id);
+
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ if (rc)
+ return rc;
+
+ bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
+ kfree(srq->swq);
+ return 0;
+}
+
+int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct cmdq_create_srq req;
+ struct creq_create_srq_resp resp;
+ struct bnxt_qplib_pbl *pbl;
+ u16 cmd_flags = 0;
+ int rc, idx;
+
+ srq->hwq.max_elements = srq->max_wqe;
+ rc = bnxt_qplib_alloc_init_hwq(res->pdev, &srq->hwq, srq->sglist,
+ srq->nmap, &srq->hwq.max_elements,
+ BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
+ PAGE_SIZE, HWQ_TYPE_QUEUE);
+ if (rc)
+ goto exit;
+
+ srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
+ GFP_KERNEL);
+ if (!srq->swq)
+ goto fail;
+
+ RCFW_CMD_PREP(req, CREATE_SRQ, cmd_flags);
+
+ /* Configure the request */
+ req.dpi = cpu_to_le32(srq->dpi->dpi);
+ req.srq_handle = cpu_to_le64(srq);
+
+ req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
+ pbl = &srq->hwq.pbl[PBL_LVL_0];
+ req.pg_size_lvl = cpu_to_le16((((u16)srq->hwq.level &
+ CMDQ_CREATE_SRQ_LVL_MASK) <<
+ CMDQ_CREATE_SRQ_LVL_SFT) |
+ (pbl->pg_size == ROCE_PG_SIZE_4K ?
+ CMDQ_CREATE_SRQ_PG_SIZE_PG_4K :
+ pbl->pg_size == ROCE_PG_SIZE_8K ?
+ CMDQ_CREATE_SRQ_PG_SIZE_PG_8K :
+ pbl->pg_size == ROCE_PG_SIZE_64K ?
+ CMDQ_CREATE_SRQ_PG_SIZE_PG_64K :
+ pbl->pg_size == ROCE_PG_SIZE_2M ?
+ CMDQ_CREATE_SRQ_PG_SIZE_PG_2M :
+ pbl->pg_size == ROCE_PG_SIZE_8M ?
+ CMDQ_CREATE_SRQ_PG_SIZE_PG_8M :
+ pbl->pg_size == ROCE_PG_SIZE_1G ?
+ CMDQ_CREATE_SRQ_PG_SIZE_PG_1G :
+ CMDQ_CREATE_SRQ_PG_SIZE_PG_4K));
+ req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
+ req.pd_id = cpu_to_le32(srq->pd->id);
+ req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
+
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ if (rc)
+ goto fail;
+
+ spin_lock_init(&srq->lock);
+ srq->start_idx = 0;
+ srq->last_idx = srq->hwq.max_elements - 1;
+ for (idx = 0; idx < srq->hwq.max_elements; idx++)
+ srq->swq[idx].next_idx = idx + 1;
+ srq->swq[srq->last_idx].next_idx = -1;
+
+ srq->id = le32_to_cpu(resp.xid);
+ srq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
+ if (srq->threshold)
+ bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ_ARMENA);
+ srq->arm_req = false;
+
+ return 0;
+fail:
+ bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
+ kfree(srq->swq);
+exit:
+ return rc;
+}
+
+int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq)
+{
+ struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
+ u32 sw_prod, sw_cons, count = 0;
+
+ sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
+ sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
+
+ count = sw_prod > sw_cons ? sw_prod - sw_cons :
+ srq_hwq->max_elements - sw_cons + sw_prod;
+ if (count > srq->threshold) {
+ srq->arm_req = false;
+ bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ_ARM);
+ } else {
+ /* Deferred arming */
+ srq->arm_req = true;
+ }
+
+ return 0;
+}
+
+int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct cmdq_query_srq req;
+ struct creq_query_srq_resp resp;
+ struct bnxt_qplib_rcfw_sbuf *sbuf;
+ struct creq_query_srq_resp_sb *sb;
+ u16 cmd_flags = 0;
+ int rc = 0;
+
+ RCFW_CMD_PREP(req, QUERY_SRQ, cmd_flags);
+ req.srq_cid = cpu_to_le32(srq->id);
+
+ /* Configure the request */
+ sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
+ if (!sbuf)
+ return -ENOMEM;
+ sb = sbuf->sb;
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+ (void *)sbuf, 0);
+ srq->threshold = le16_to_cpu(sb->srq_limit);
+ bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
+
+ return rc;
+}
+
+int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
+ struct bnxt_qplib_swqe *wqe)
+{
+ struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
+ struct rq_wqe *srqe, **srqe_ptr;
+ struct sq_sge *hw_sge;
+ u32 sw_prod, sw_cons, count = 0;
+ int i, rc = 0, next;
+
+ spin_lock(&srq_hwq->lock);
+ if (srq->start_idx == srq->last_idx) {
+ dev_err(&srq_hwq->pdev->dev, "QPLIB: FP: SRQ (0x%x) is full!",
+ srq->id);
+ rc = -EINVAL;
+ spin_unlock(&srq_hwq->lock);
+ goto done;
+ }
+ next = srq->start_idx;
+ srq->start_idx = srq->swq[next].next_idx;
+ spin_unlock(&srq_hwq->lock);
+
+ sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
+ srqe_ptr = (struct rq_wqe **)srq_hwq->pbl_ptr;
+ srqe = &srqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
+ memset(srqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
+ /* Calculate wqe_size16 and data_len */
+ for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
+ i < wqe->num_sge; i++, hw_sge++) {
+ hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
+ hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
+ hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
+ }
+ srqe->wqe_type = wqe->type;
+ srqe->flags = wqe->flags;
+ srqe->wqe_size = wqe->num_sge +
+ ((offsetof(typeof(*srqe), data) + 15) >> 4);
+ srqe->wr_id[0] = cpu_to_le32((u32)next);
+ srq->swq[next].wr_id = wqe->wr_id;
+
+ srq_hwq->prod++;
+
+ spin_lock(&srq_hwq->lock);
+ sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
+ /* retaining srq_hwq->cons for this logic
+ * actually the lock is only required to
+ * read srq_hwq->cons.
+ */
+ sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
+ count = sw_prod > sw_cons ? sw_prod - sw_cons :
+ srq_hwq->max_elements - sw_cons + sw_prod;
+ spin_unlock(&srq_hwq->lock);
+ /* Ring DB */
+ bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ);
+ if (srq->arm_req == true && count > srq->threshold) {
+ srq->arm_req = false;
+ bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ_ARM);
+ }
+done:
+ return rc;
+}
+
/* QP */
int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
{
@@ -737,6 +992,12 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
pbl->pg_size == ROCE_PG_SIZE_1G ?
CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G :
CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K);
+ } else {
+ /* SRQ */
+ if (qp->srq) {
+ qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
+ req.srq_cid = cpu_to_le32(qp->srq->id);
+ }
}
if (qp->rcq)
@@ -2068,6 +2329,16 @@ done:
return rc;
}
+static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
+{
+ spin_lock(&srq->hwq.lock);
+ srq->swq[srq->last_idx].next_idx = (int)tag;
+ srq->last_idx = (int)tag;
+ srq->swq[srq->last_idx].next_idx = -1;
+ srq->hwq.cons++; /* Support for SRQE counter */
+ spin_unlock(&srq->hwq.lock);
+}
+
static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
struct cq_res_rc *hwcqe,
struct bnxt_qplib_cqe **pcqe,
@@ -2075,6 +2346,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
{
struct bnxt_qplib_qp *qp;
struct bnxt_qplib_q *rq;
+ struct bnxt_qplib_srq *srq;
struct bnxt_qplib_cqe *cqe;
u32 wr_id_idx;
int rc = 0;
@@ -2102,27 +2374,46 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
- rq = &qp->rq;
- if (wr_id_idx > rq->hwq.max_elements) {
- dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process RC ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
- wr_id_idx, rq->hwq.max_elements);
- return -EINVAL;
- }
-
- cqe->wr_id = rq->swq[wr_id_idx].wr_id;
- cqe++;
- (*budget)--;
- rq->hwq.cons++;
- *pcqe = cqe;
+ if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
+ srq = qp->srq;
+ if (!srq)
+ return -EINVAL;
+ if (wr_id_idx > srq->hwq.max_elements) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process RC ");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
+ wr_id_idx, srq->hwq.max_elements);
+ return -EINVAL;
+ }
+ cqe->wr_id = srq->swq[wr_id_idx].wr_id;
+ bnxt_qplib_release_srqe(srq, wr_id_idx);
+ cqe++;
+ (*budget)--;
+ *pcqe = cqe;
+ } else {
+ rq = &qp->rq;
+ if (wr_id_idx > rq->hwq.max_elements) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process RC ");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
+ wr_id_idx, rq->hwq.max_elements);
+ return -EINVAL;
+ }
+ cqe->wr_id = rq->swq[wr_id_idx].wr_id;
+ cqe++;
+ (*budget)--;
+ rq->hwq.cons++;
+ *pcqe = cqe;
- if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
- qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
- /* Add qp to flush list of the CQ */
- bnxt_qplib_lock_buddy_cq(qp, cq);
- __bnxt_qplib_add_flush_qp(qp);
- bnxt_qplib_unlock_buddy_cq(qp, cq);
+ if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
+ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
+ /* Add qp to flush list of the CQ */
+ bnxt_qplib_lock_buddy_cq(qp, cq);
+ __bnxt_qplib_add_flush_qp(qp);
+ bnxt_qplib_unlock_buddy_cq(qp, cq);
+ }
}
done:
@@ -2136,6 +2427,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
{
struct bnxt_qplib_qp *qp;
struct bnxt_qplib_q *rq;
+ struct bnxt_qplib_srq *srq;
struct bnxt_qplib_cqe *cqe;
u32 wr_id_idx;
int rc = 0;
@@ -2166,27 +2458,48 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
hwcqe->src_qp_high_srq_or_rq_wr_id) &
CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
- rq = &qp->rq;
- if (wr_id_idx > rq->hwq.max_elements) {
- dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process UD ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: wr_id idx %#x exceeded RQ max %#x",
- wr_id_idx, rq->hwq.max_elements);
- return -EINVAL;
- }
+ if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
+ srq = qp->srq;
+ if (!srq)
+ return -EINVAL;
- cqe->wr_id = rq->swq[wr_id_idx].wr_id;
- cqe++;
- (*budget)--;
- rq->hwq.cons++;
- *pcqe = cqe;
+ if (wr_id_idx > srq->hwq.max_elements) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process UD ");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
+ wr_id_idx, srq->hwq.max_elements);
+ return -EINVAL;
+ }
+ cqe->wr_id = srq->swq[wr_id_idx].wr_id;
+ bnxt_qplib_release_srqe(srq, wr_id_idx);
+ cqe++;
+ (*budget)--;
+ *pcqe = cqe;
+ } else {
+ rq = &qp->rq;
+ if (wr_id_idx > rq->hwq.max_elements) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process UD ");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
+ wr_id_idx, rq->hwq.max_elements);
+ return -EINVAL;
+ }
- if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
- qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
- /* Add qp to flush list of the CQ */
- bnxt_qplib_lock_buddy_cq(qp, cq);
- __bnxt_qplib_add_flush_qp(qp);
- bnxt_qplib_unlock_buddy_cq(qp, cq);
+ cqe->wr_id = rq->swq[wr_id_idx].wr_id;
+ cqe++;
+ (*budget)--;
+ rq->hwq.cons++;
+ *pcqe = cqe;
+
+ if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
+ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
+ /* Add qp to flush list of the CQ */
+ bnxt_qplib_lock_buddy_cq(qp, cq);
+ __bnxt_qplib_add_flush_qp(qp);
+ bnxt_qplib_unlock_buddy_cq(qp, cq);
+ }
}
done:
return rc;
@@ -2218,6 +2531,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
{
struct bnxt_qplib_qp *qp;
struct bnxt_qplib_q *rq;
+ struct bnxt_qplib_srq *srq;
struct bnxt_qplib_cqe *cqe;
u32 wr_id_idx;
int rc = 0;
@@ -2256,26 +2570,49 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
- rq = &qp->rq;
- if (wr_id_idx > rq->hwq.max_elements) {
- dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process Raw/QP1 RQ wr_id ");
- dev_err(&cq->hwq.pdev->dev, "QPLIB: ix 0x%x exceeded RQ max 0x%x",
- wr_id_idx, rq->hwq.max_elements);
- return -EINVAL;
- }
-
- cqe->wr_id = rq->swq[wr_id_idx].wr_id;
- cqe++;
- (*budget)--;
- rq->hwq.cons++;
- *pcqe = cqe;
+ if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
+ srq = qp->srq;
+ if (!srq) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: SRQ used but not defined??");
+ return -EINVAL;
+ }
+ if (wr_id_idx > srq->hwq.max_elements) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process Raw/QP1 ");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
+ wr_id_idx, srq->hwq.max_elements);
+ return -EINVAL;
+ }
+ cqe->wr_id = srq->swq[wr_id_idx].wr_id;
+ bnxt_qplib_release_srqe(srq, wr_id_idx);
+ cqe++;
+ (*budget)--;
+ *pcqe = cqe;
+ } else {
+ rq = &qp->rq;
+ if (wr_id_idx > rq->hwq.max_elements) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process Raw/QP1 RQ wr_id ");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: ix 0x%x exceeded RQ max 0x%x",
+ wr_id_idx, rq->hwq.max_elements);
+ return -EINVAL;
+ }
+ cqe->wr_id = rq->swq[wr_id_idx].wr_id;
+ cqe++;
+ (*budget)--;
+ rq->hwq.cons++;
+ *pcqe = cqe;
- if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
- qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
- /* Add qp to flush list of the CQ */
- bnxt_qplib_lock_buddy_cq(qp, cq);
- __bnxt_qplib_add_flush_qp(qp);
- bnxt_qplib_unlock_buddy_cq(qp, cq);
+ if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
+ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
+ /* Add qp to flush list of the CQ */
+ bnxt_qplib_lock_buddy_cq(qp, cq);
+ __bnxt_qplib_add_flush_qp(qp);
+ bnxt_qplib_unlock_buddy_cq(qp, cq);
+ }
}
done:
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index c582d4ec8173..211b27a8f9e2 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -39,6 +39,27 @@
#ifndef __BNXT_QPLIB_FP_H__
#define __BNXT_QPLIB_FP_H__
+struct bnxt_qplib_srq {
+ struct bnxt_qplib_pd *pd;
+ struct bnxt_qplib_dpi *dpi;
+ void __iomem *dbr_base;
+ u64 srq_handle;
+ u32 id;
+ u32 max_wqe;
+ u32 max_sge;
+ u32 threshold;
+ bool arm_req;
+ struct bnxt_qplib_cq *cq;
+ struct bnxt_qplib_hwq hwq;
+ struct bnxt_qplib_swq *swq;
+ struct scatterlist *sglist;
+ int start_idx;
+ int last_idx;
+ u32 nmap;
+ u16 eventq_hw_ring_id;
+ spinlock_t lock; /* protect SRQE link list */
+};
+
struct bnxt_qplib_sge {
u64 addr;
u32 lkey;
@@ -79,6 +100,7 @@ static inline u32 get_psne_idx(u32 val)
struct bnxt_qplib_swq {
u64 wr_id;
+ int next_idx;
u8 type;
u8 flags;
u32 start_psn;
@@ -404,29 +426,27 @@ struct bnxt_qplib_cq {
writel(NQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db)
struct bnxt_qplib_nq {
- struct pci_dev *pdev;
-
- int vector;
- cpumask_t mask;
- int budget;
- bool requested;
- struct tasklet_struct worker;
- struct bnxt_qplib_hwq hwq;
-
- u16 bar_reg;
- u16 bar_reg_off;
- u16 ring_id;
- void __iomem *bar_reg_iomem;
-
- int (*cqn_handler)
- (struct bnxt_qplib_nq *nq,
- struct bnxt_qplib_cq *cq);
- int (*srqn_handler)
- (struct bnxt_qplib_nq *nq,
- void *srq,
- u8 event);
- struct workqueue_struct *cqn_wq;
- char name[32];
+ struct pci_dev *pdev;
+
+ int vector;
+ cpumask_t mask;
+ int budget;
+ bool requested;
+ struct tasklet_struct worker;
+ struct bnxt_qplib_hwq hwq;
+
+ u16 bar_reg;
+ u16 bar_reg_off;
+ u16 ring_id;
+ void __iomem *bar_reg_iomem;
+
+ int (*cqn_handler)(struct bnxt_qplib_nq *nq,
+ struct bnxt_qplib_cq *cq);
+ int (*srqn_handler)(struct bnxt_qplib_nq *nq,
+ struct bnxt_qplib_srq *srq,
+ u8 event);
+ struct workqueue_struct *cqn_wq;
+ char name[32];
};
struct bnxt_qplib_nq_work {
@@ -441,8 +461,18 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
int (*cqn_handler)(struct bnxt_qplib_nq *nq,
struct bnxt_qplib_cq *cq),
int (*srqn_handler)(struct bnxt_qplib_nq *nq,
- void *srq,
+ struct bnxt_qplib_srq *srq,
u8 event));
+int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq);
+int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq);
+int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq);
+int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq);
+int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
+ struct bnxt_qplib_swqe *wqe);
int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index bb5574adf195..8329ec6a7946 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -93,7 +93,8 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
opcode = req->opcode;
if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
(opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
- opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW)) {
+ opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW &&
+ opcode != CMDQ_BASE_OPCODE_QUERY_VERSION)) {
dev_err(&rcfw->pdev->dev,
"QPLIB: RCFW not initialized, reject opcode 0x%x",
opcode);
@@ -615,7 +616,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
int msix_vector,
int cp_bar_reg_off, int virt_fn,
int (*aeq_handler)(struct bnxt_qplib_rcfw *,
- struct creq_func_event *))
+ void *, void *))
{
resource_size_t res_base;
struct cmdq_init init;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 2946a7cfae82..6bee6e3636ea 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -167,7 +167,7 @@ struct bnxt_qplib_rcfw {
#define FIRMWARE_TIMED_OUT 3
wait_queue_head_t waitq;
int (*aeq_handler)(struct bnxt_qplib_rcfw *,
- struct creq_func_event *);
+ void *, void *);
u32 seq_num;
/* Bar region info */
@@ -199,9 +199,8 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
struct bnxt_qplib_rcfw *rcfw,
int msix_vector,
int cp_bar_reg_off, int virt_fn,
- int (*aeq_handler)
- (struct bnxt_qplib_rcfw *,
- struct creq_func_event *));
+ int (*aeq_handler)(struct bnxt_qplib_rcfw *,
+ void *aeqe, void *obj));
struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
struct bnxt_qplib_rcfw *rcfw,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 4e101704e801..ad37d54affcc 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -104,13 +104,12 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
if (!sghead) {
for (i = 0; i < pages; i++) {
- pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
- pbl->pg_size,
- &pbl->pg_map_arr[i],
- GFP_KERNEL);
+ pbl->pg_arr[i] = dma_zalloc_coherent(&pdev->dev,
+ pbl->pg_size,
+ &pbl->pg_map_arr[i],
+ GFP_KERNEL);
if (!pbl->pg_arr[i])
goto fail;
- memset(pbl->pg_arr[i], 0, pbl->pg_size);
pbl->pg_count++;
}
} else {
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 9543ce51a28a..c015c1861351 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -64,8 +64,28 @@ static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
return !!(pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
}
+static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
+ char *fw_ver)
+{
+ struct cmdq_query_version req;
+ struct creq_query_version_resp resp;
+ u16 cmd_flags = 0;
+ int rc = 0;
+
+ RCFW_CMD_PREP(req, QUERY_VERSION, cmd_flags);
+
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, NULL, 0);
+ if (rc)
+ return;
+ fw_ver[0] = resp.fw_maj;
+ fw_ver[1] = resp.fw_minor;
+ fw_ver[2] = resp.fw_bld;
+ fw_ver[3] = resp.fw_rsvd;
+}
+
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_dev_attr *attr)
+ struct bnxt_qplib_dev_attr *attr, bool vf)
{
struct cmdq_query_func req;
struct creq_query_func_resp resp;
@@ -95,7 +115,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
/* Extract the context from the side buffer */
attr->max_qp = le32_to_cpu(sb->max_qp);
/* max_qp value reported by FW for PF doesn't include the QP1 for PF */
- attr->max_qp += 1;
+ if (!vf)
+ attr->max_qp += 1;
attr->max_qp_rd_atom =
sb->max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom;
@@ -133,7 +154,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->l2_db_size = (sb->l2_db_space_size + 1) * PAGE_SIZE;
attr->max_sgid = le32_to_cpu(sb->max_gid);
- strlcpy(attr->fw_ver, "20.6.28.0", sizeof(attr->fw_ver));
+ bnxt_qplib_query_version(rcfw, attr->fw_ver);
for (i = 0; i < MAX_TQM_ALLOC_REQ / 4; i++) {
temp = le32_to_cpu(sb->tqm_alloc_reqs[i]);
@@ -150,6 +171,38 @@ bail:
return rc;
}
+int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_ctx *ctx)
+{
+ struct cmdq_set_func_resources req;
+ struct creq_set_func_resources_resp resp;
+ u16 cmd_flags = 0;
+ int rc = 0;
+
+ RCFW_CMD_PREP(req, SET_FUNC_RESOURCES, cmd_flags);
+
+ req.number_of_qp = cpu_to_le32(ctx->qpc_count);
+ req.number_of_mrw = cpu_to_le32(ctx->mrw_count);
+ req.number_of_srq = cpu_to_le32(ctx->srqc_count);
+ req.number_of_cq = cpu_to_le32(ctx->cq_count);
+
+ req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf);
+ req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf);
+ req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf);
+ req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf);
+ req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf);
+
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp,
+ NULL, 0);
+ if (rc) {
+ dev_err(&res->pdev->dev,
+ "QPLIB: Failed to set function resources");
+ }
+ return rc;
+}
+
/* SGID */
int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
@@ -604,7 +657,7 @@ int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
}
int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
- u64 *pbl_tbl, int num_pbls, bool block)
+ u64 *pbl_tbl, int num_pbls, bool block, u32 buf_pg_size)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_register_mr req;
@@ -615,6 +668,9 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
u32 pg_size;
if (num_pbls) {
+ /* Allocate memory for the non-leaf pages to store buf ptrs.
+ * Non-leaf pages always uses system PAGE_SIZE
+ */
pg_ptrs = roundup_pow_of_two(num_pbls);
pages = pg_ptrs >> MAX_PBL_LVL_1_PGS_SHIFT;
if (!pages)
@@ -632,6 +688,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
bnxt_qplib_free_hwq(res->pdev, &mr->hwq);
mr->hwq.max_elements = pages;
+ /* Use system PAGE_SIZE */
rc = bnxt_qplib_alloc_init_hwq(res->pdev, &mr->hwq, NULL, 0,
&mr->hwq.max_elements,
PAGE_SIZE, 0, PAGE_SIZE,
@@ -652,18 +709,22 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
/* Configure the request */
if (mr->hwq.level == PBL_LVL_MAX) {
+ /* No PBL provided, just use system PAGE_SIZE */
level = 0;
req.pbl = 0;
pg_size = PAGE_SIZE;
} else {
level = mr->hwq.level + 1;
req.pbl = cpu_to_le64(mr->hwq.pbl[PBL_LVL_0].pg_map_arr[0]);
- pg_size = mr->hwq.pbl[PBL_LVL_0].pg_size;
}
+ pg_size = buf_pg_size ? buf_pg_size : PAGE_SIZE;
req.log2_pg_size_lvl = (level << CMDQ_REGISTER_MR_LVL_SFT) |
((ilog2(pg_size) <<
CMDQ_REGISTER_MR_LOG2_PG_SIZE_SFT) &
CMDQ_REGISTER_MR_LOG2_PG_SIZE_MASK);
+ req.log2_pbl_pg_size = cpu_to_le16(((ilog2(PAGE_SIZE) <<
+ CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT) &
+ CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK));
req.access = (mr->flags & 0xFFFF);
req.va = cpu_to_le64(mr->va);
req.key = cpu_to_le32(mr->lkey);
@@ -729,3 +790,73 @@ int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids)
0);
return 0;
}
+
+int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_roce_stats *stats)
+{
+ struct cmdq_query_roce_stats req;
+ struct creq_query_roce_stats_resp resp;
+ struct bnxt_qplib_rcfw_sbuf *sbuf;
+ struct creq_query_roce_stats_resp_sb *sb;
+ u16 cmd_flags = 0;
+ int rc = 0;
+
+ RCFW_CMD_PREP(req, QUERY_ROCE_STATS, cmd_flags);
+
+ sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
+ if (!sbuf) {
+ dev_err(&rcfw->pdev->dev,
+ "QPLIB: SP: QUERY_ROCE_STATS alloc side buffer failed");
+ return -ENOMEM;
+ }
+
+ sb = sbuf->sb;
+ req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
+ (void *)sbuf, 0);
+ if (rc)
+ goto bail;
+ /* Extract the context from the side buffer */
+ stats->to_retransmits = le64_to_cpu(sb->to_retransmits);
+ stats->seq_err_naks_rcvd = le64_to_cpu(sb->seq_err_naks_rcvd);
+ stats->max_retry_exceeded = le64_to_cpu(sb->max_retry_exceeded);
+ stats->rnr_naks_rcvd = le64_to_cpu(sb->rnr_naks_rcvd);
+ stats->missing_resp = le64_to_cpu(sb->missing_resp);
+ stats->unrecoverable_err = le64_to_cpu(sb->unrecoverable_err);
+ stats->bad_resp_err = le64_to_cpu(sb->bad_resp_err);
+ stats->local_qp_op_err = le64_to_cpu(sb->local_qp_op_err);
+ stats->local_protection_err = le64_to_cpu(sb->local_protection_err);
+ stats->mem_mgmt_op_err = le64_to_cpu(sb->mem_mgmt_op_err);
+ stats->remote_invalid_req_err = le64_to_cpu(sb->remote_invalid_req_err);
+ stats->remote_access_err = le64_to_cpu(sb->remote_access_err);
+ stats->remote_op_err = le64_to_cpu(sb->remote_op_err);
+ stats->dup_req = le64_to_cpu(sb->dup_req);
+ stats->res_exceed_max = le64_to_cpu(sb->res_exceed_max);
+ stats->res_length_mismatch = le64_to_cpu(sb->res_length_mismatch);
+ stats->res_exceeds_wqe = le64_to_cpu(sb->res_exceeds_wqe);
+ stats->res_opcode_err = le64_to_cpu(sb->res_opcode_err);
+ stats->res_rx_invalid_rkey = le64_to_cpu(sb->res_rx_invalid_rkey);
+ stats->res_rx_domain_err = le64_to_cpu(sb->res_rx_domain_err);
+ stats->res_rx_no_perm = le64_to_cpu(sb->res_rx_no_perm);
+ stats->res_rx_range_err = le64_to_cpu(sb->res_rx_range_err);
+ stats->res_tx_invalid_rkey = le64_to_cpu(sb->res_tx_invalid_rkey);
+ stats->res_tx_domain_err = le64_to_cpu(sb->res_tx_domain_err);
+ stats->res_tx_no_perm = le64_to_cpu(sb->res_tx_no_perm);
+ stats->res_tx_range_err = le64_to_cpu(sb->res_tx_range_err);
+ stats->res_irrq_oflow = le64_to_cpu(sb->res_irrq_oflow);
+ stats->res_unsup_opcode = le64_to_cpu(sb->res_unsup_opcode);
+ stats->res_unaligned_atomic = le64_to_cpu(sb->res_unaligned_atomic);
+ stats->res_rem_inv_err = le64_to_cpu(sb->res_rem_inv_err);
+ stats->res_mem_error = le64_to_cpu(sb->res_mem_error);
+ stats->res_srq_err = le64_to_cpu(sb->res_srq_err);
+ stats->res_cmp_err = le64_to_cpu(sb->res_cmp_err);
+ stats->res_invalid_dup_rkey = le64_to_cpu(sb->res_invalid_dup_rkey);
+ stats->res_wqe_format_err = le64_to_cpu(sb->res_wqe_format_err);
+ stats->res_cq_load_err = le64_to_cpu(sb->res_cq_load_err);
+ stats->res_srq_load_err = le64_to_cpu(sb->res_srq_load_err);
+ stats->res_tx_pci_err = le64_to_cpu(sb->res_tx_pci_err);
+ stats->res_rx_pci_err = le64_to_cpu(sb->res_rx_pci_err);
+bail:
+ bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
+ return rc;
+}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 11322582f5e4..9d3e8b994945 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -45,7 +45,8 @@
#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040
struct bnxt_qplib_dev_attr {
- char fw_ver[32];
+#define FW_VER_ARR_LEN 4
+ u8 fw_ver[FW_VER_ARR_LEN];
u16 max_sgid;
u16 max_mrw;
u32 max_qp;
@@ -127,6 +128,85 @@ struct bnxt_qplib_frpl {
#define BNXT_QPLIB_ACCESS_ZERO_BASED BIT(5)
#define BNXT_QPLIB_ACCESS_ON_DEMAND BIT(6)
+struct bnxt_qplib_roce_stats {
+ u64 to_retransmits;
+ u64 seq_err_naks_rcvd;
+ /* seq_err_naks_rcvd is 64 b */
+ u64 max_retry_exceeded;
+ /* max_retry_exceeded is 64 b */
+ u64 rnr_naks_rcvd;
+ /* rnr_naks_rcvd is 64 b */
+ u64 missing_resp;
+ u64 unrecoverable_err;
+ /* unrecoverable_err is 64 b */
+ u64 bad_resp_err;
+ /* bad_resp_err is 64 b */
+ u64 local_qp_op_err;
+ /* local_qp_op_err is 64 b */
+ u64 local_protection_err;
+ /* local_protection_err is 64 b */
+ u64 mem_mgmt_op_err;
+ /* mem_mgmt_op_err is 64 b */
+ u64 remote_invalid_req_err;
+ /* remote_invalid_req_err is 64 b */
+ u64 remote_access_err;
+ /* remote_access_err is 64 b */
+ u64 remote_op_err;
+ /* remote_op_err is 64 b */
+ u64 dup_req;
+ /* dup_req is 64 b */
+ u64 res_exceed_max;
+ /* res_exceed_max is 64 b */
+ u64 res_length_mismatch;
+ /* res_length_mismatch is 64 b */
+ u64 res_exceeds_wqe;
+ /* res_exceeds_wqe is 64 b */
+ u64 res_opcode_err;
+ /* res_opcode_err is 64 b */
+ u64 res_rx_invalid_rkey;
+ /* res_rx_invalid_rkey is 64 b */
+ u64 res_rx_domain_err;
+ /* res_rx_domain_err is 64 b */
+ u64 res_rx_no_perm;
+ /* res_rx_no_perm is 64 b */
+ u64 res_rx_range_err;
+ /* res_rx_range_err is 64 b */
+ u64 res_tx_invalid_rkey;
+ /* res_tx_invalid_rkey is 64 b */
+ u64 res_tx_domain_err;
+ /* res_tx_domain_err is 64 b */
+ u64 res_tx_no_perm;
+ /* res_tx_no_perm is 64 b */
+ u64 res_tx_range_err;
+ /* res_tx_range_err is 64 b */
+ u64 res_irrq_oflow;
+ /* res_irrq_oflow is 64 b */
+ u64 res_unsup_opcode;
+ /* res_unsup_opcode is 64 b */
+ u64 res_unaligned_atomic;
+ /* res_unaligned_atomic is 64 b */
+ u64 res_rem_inv_err;
+ /* res_rem_inv_err is 64 b */
+ u64 res_mem_error;
+ /* res_mem_error is 64 b */
+ u64 res_srq_err;
+ /* res_srq_err is 64 b */
+ u64 res_cmp_err;
+ /* res_cmp_err is 64 b */
+ u64 res_invalid_dup_rkey;
+ /* res_invalid_dup_rkey is 64 b */
+ u64 res_wqe_format_err;
+ /* res_wqe_format_err is 64 b */
+ u64 res_cq_load_err;
+ /* res_cq_load_err is 64 b */
+ u64 res_srq_load_err;
+ /* res_srq_load_err is 64 b */
+ u64 res_tx_pci_err;
+ /* res_tx_pci_err is 64 b */
+ u64 res_rx_pci_err;
+ /* res_rx_pci_err is 64 b */
+};
+
int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
struct bnxt_qplib_gid *gid);
@@ -147,7 +227,10 @@ int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res,
struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey,
bool update);
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_dev_attr *attr);
+ struct bnxt_qplib_dev_attr *attr, bool vf);
+int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_ctx *ctx);
int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah);
int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah);
int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res,
@@ -155,7 +238,7 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res,
int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
bool block);
int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
- u64 *pbl_tbl, int num_pbls, bool block);
+ u64 *pbl_tbl, int num_pbls, bool block, u32 buf_pg_size);
int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr);
int bnxt_qplib_alloc_fast_reg_mr(struct bnxt_qplib_res *res,
struct bnxt_qplib_mrw *mr, int max);
@@ -164,4 +247,6 @@ int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res,
int bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res,
struct bnxt_qplib_frpl *frpl);
int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids);
+int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_roce_stats *stats);
#endif /* __BNXT_QPLIB_SP_H__*/
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index c3cba6063a03..2d7ea096a247 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -954,6 +954,7 @@ struct cmdq_base {
#define CMDQ_BASE_OPCODE_QUERY_VERSION 0x8bUL
#define CMDQ_BASE_OPCODE_MODIFY_CC 0x8cUL
#define CMDQ_BASE_OPCODE_QUERY_CC 0x8dUL
+ #define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS 0x8eUL
u8 cmd_size;
__le16 flags;
__le16 cookie;
@@ -1383,8 +1384,20 @@ struct cmdq_register_mr {
#define CMDQ_REGISTER_MR_LVL_LVL_0 0x0UL
#define CMDQ_REGISTER_MR_LVL_LVL_1 0x1UL
#define CMDQ_REGISTER_MR_LVL_LVL_2 0x2UL
+ #define CMDQ_REGISTER_MR_LVL_LAST CMDQ_REGISTER_MR_LVL_LVL_2
#define CMDQ_REGISTER_MR_LOG2_PG_SIZE_MASK 0x7cUL
#define CMDQ_REGISTER_MR_LOG2_PG_SIZE_SFT 2
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_4K (0xcUL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_8K (0xdUL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_64K (0x10UL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_256K (0x12UL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_1M (0x14UL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_2M (0x15UL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_4M (0x16UL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_1G (0x1eUL << 2)
+ #define CMDQ_REGISTER_MR_LOG2_PG_SIZE_LAST \
+ CMDQ_REGISTER_MR_LOG2_PG_SIZE_PG_1G
+ #define CMDQ_REGISTER_MR_UNUSED1 0x80UL
u8 access;
#define CMDQ_REGISTER_MR_ACCESS_LOCAL_WRITE 0x1UL
#define CMDQ_REGISTER_MR_ACCESS_REMOTE_READ 0x2UL
@@ -1392,7 +1405,21 @@ struct cmdq_register_mr {
#define CMDQ_REGISTER_MR_ACCESS_REMOTE_ATOMIC 0x8UL
#define CMDQ_REGISTER_MR_ACCESS_MW_BIND 0x10UL
#define CMDQ_REGISTER_MR_ACCESS_ZERO_BASED 0x20UL
- __le16 unused_1;
+ __le16 log2_pbl_pg_size;
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK 0x1fUL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT 0
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K 0xcUL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K 0xdUL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K 0x10UL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K 0x12UL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M 0x14UL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M 0x15UL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M 0x16UL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G 0x1eUL
+ #define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_LAST \
+ CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G
+ #define CMDQ_REGISTER_MR_UNUSED11_MASK 0xffe0UL
+ #define CMDQ_REGISTER_MR_UNUSED11_SFT 5
__le32 key;
__le64 pbl;
__le64 va;
@@ -1799,6 +1826,16 @@ struct cmdq_set_func_resources {
u8 resp_size;
u8 reserved8;
__le64 resp_addr;
+ __le32 number_of_qp;
+ __le32 number_of_mrw;
+ __le32 number_of_srq;
+ __le32 number_of_cq;
+ __le32 max_qp_per_vf;
+ __le32 max_mrw_per_vf;
+ __le32 max_srq_per_vf;
+ __le32 max_cq_per_vf;
+ __le32 max_gid_per_vf;
+ __le32 stat_ctx_id;
};
/* Read hardware resource context command (24 bytes) */
@@ -2013,6 +2050,20 @@ struct creq_modify_qp_resp {
__le16 reserved48[3];
};
+/* cmdq_query_roce_stats (size:128b/16B) */
+struct cmdq_query_roce_stats {
+ u8 opcode;
+ #define CMDQ_QUERY_ROCE_STATS_OPCODE_QUERY_ROCE_STATS 0x8eUL
+ #define CMDQ_QUERY_ROCE_STATS_OPCODE_LAST \
+ CMDQ_QUERY_ROCE_STATS_OPCODE_QUERY_ROCE_STATS
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+};
+
/* Query QP command response (16 bytes) */
struct creq_query_qp_resp {
u8 type;
@@ -2783,6 +2834,80 @@ struct creq_query_cc_resp_sb {
__le64 reserved64_1;
};
+/* creq_query_roce_stats_resp (size:128b/16B) */
+struct creq_query_roce_stats_resp {
+ u8 type;
+ #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_SFT 0
+ #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_QUERY_ROCE_STATS_RESP_TYPE_LAST \
+ CREQ_QUERY_ROCE_STATS_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 size;
+ u8 v;
+ #define CREQ_QUERY_ROCE_STATS_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_QUERY_ROCE_STATS_RESP_EVENT_QUERY_ROCE_STATS 0x8eUL
+ #define CREQ_QUERY_ROCE_STATS_RESP_EVENT_LAST \
+ CREQ_QUERY_ROCE_STATS_RESP_EVENT_QUERY_ROCE_STATS
+ u8 reserved48[6];
+};
+
+/* creq_query_roce_stats_resp_sb (size:2624b/328B) */
+struct creq_query_roce_stats_resp_sb {
+ u8 opcode;
+ #define CREQ_QUERY_ROCE_STATS_RESP_SB_OPCODE_QUERY_ROCE_STATS 0x8eUL
+ #define CREQ_QUERY_ROCE_STATS_RESP_SB_OPCODE_LAST \
+ CREQ_QUERY_ROCE_STATS_RESP_SB_OPCODE_QUERY_ROCE_STATS
+ u8 status;
+ __le16 cookie;
+ __le16 flags;
+ u8 resp_size;
+ u8 rsvd;
+ __le32 num_counters;
+ __le32 rsvd1;
+ __le64 to_retransmits;
+ __le64 seq_err_naks_rcvd;
+ __le64 max_retry_exceeded;
+ __le64 rnr_naks_rcvd;
+ __le64 missing_resp;
+ __le64 unrecoverable_err;
+ __le64 bad_resp_err;
+ __le64 local_qp_op_err;
+ __le64 local_protection_err;
+ __le64 mem_mgmt_op_err;
+ __le64 remote_invalid_req_err;
+ __le64 remote_access_err;
+ __le64 remote_op_err;
+ __le64 dup_req;
+ __le64 res_exceed_max;
+ __le64 res_length_mismatch;
+ __le64 res_exceeds_wqe;
+ __le64 res_opcode_err;
+ __le64 res_rx_invalid_rkey;
+ __le64 res_rx_domain_err;
+ __le64 res_rx_no_perm;
+ __le64 res_rx_range_err;
+ __le64 res_tx_invalid_rkey;
+ __le64 res_tx_domain_err;
+ __le64 res_tx_no_perm;
+ __le64 res_tx_range_err;
+ __le64 res_irrq_oflow;
+ __le64 res_unsup_opcode;
+ __le64 res_unaligned_atomic;
+ __le64 res_rem_inv_err;
+ __le64 res_mem_error;
+ __le64 res_srq_err;
+ __le64 res_cmp_err;
+ __le64 res_invalid_dup_rkey;
+ __le64 res_wqe_format_err;
+ __le64 res_cq_load_err;
+ __le64 res_srq_load_err;
+ __le64 res_tx_pci_err;
+ __le64 res_rx_pci_err;
+};
+
/* QP error notification event (16 bytes) */
struct creq_qp_error_notification {
u8 type;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 21db3b48a617..4cf17c650c36 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -257,8 +257,8 @@ static void set_emss(struct c4iw_ep *ep, u16 opt)
if (ep->emss < 128)
ep->emss = 128;
if (ep->emss & 7)
- pr_warn("Warning: misaligned mtu idx %u mss %u emss=%u\n",
- TCPOPT_MSS_G(opt), ep->mss, ep->emss);
+ pr_debug("Warning: misaligned mtu idx %u mss %u emss=%u\n",
+ TCPOPT_MSS_G(opt), ep->mss, ep->emss);
pr_debug("mss_idx %u mss %u emss=%u\n", TCPOPT_MSS_G(opt), ep->mss,
ep->emss);
}
@@ -2733,9 +2733,8 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
if (cxgb_is_neg_adv(req->status)) {
- pr_warn("%s Negative advice on abort- tid %u status %d (%s)\n",
- __func__, ep->hwtid, req->status,
- neg_adv_str(req->status));
+ pr_debug("Negative advice on abort- tid %u status %d (%s)\n",
+ ep->hwtid, req->status, neg_adv_str(req->status));
ep->stats.abort_neg_adv++;
mutex_lock(&dev->rdev.stats.lock);
dev->rdev.stats.neg_adv++;
@@ -3567,8 +3566,8 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
case MORIBUND:
case ABORTING:
case DEAD:
- pr_info("%s ignoring disconnect ep %p state %u\n",
- __func__, ep, ep->com.state);
+ pr_debug("ignoring disconnect ep %p state %u\n",
+ ep, ep->com.state);
break;
default:
WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state);
@@ -4097,9 +4096,15 @@ static void process_work(struct work_struct *work)
dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
opcode = rpl->ot.opcode;
- ret = work_handlers[opcode](dev, skb);
- if (!ret)
+ if (opcode >= ARRAY_SIZE(work_handlers) ||
+ !work_handlers[opcode]) {
+ pr_err("No handler for opcode 0x%x.\n", opcode);
kfree_skb(skb);
+ } else {
+ ret = work_handlers[opcode](dev, skb);
+ if (!ret)
+ kfree_skb(skb);
+ }
process_timedout_eps();
}
}
@@ -4201,8 +4206,8 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
if (cxgb_is_neg_adv(req->status)) {
- pr_warn("%s Negative advice on abort- tid %u status %d (%s)\n",
- __func__, ep->hwtid, req->status,
+ pr_debug("Negative advice on abort- tid %u status %d (%s)\n",
+ ep->hwtid, req->status,
neg_adv_str(req->status));
goto out;
}
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index af77d128d242..7a9d0de89d6a 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -66,7 +66,7 @@ MODULE_PARM_DESC(c4iw_wr_log_size_order,
static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex);
-struct workqueue_struct *reg_workq;
+static struct workqueue_struct *reg_workq;
#define DB_FC_RESUME_SIZE 64
#define DB_FC_RESUME_DELAY 1
@@ -108,19 +108,19 @@ void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe)
idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) &
(wq->rdev->wr_log_size - 1);
le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]);
- getnstimeofday(&le.poll_host_ts);
+ le.poll_host_time = ktime_get();
le.valid = 1;
le.cqe_sge_ts = CQE_TS(cqe);
if (SQ_TYPE(cqe)) {
le.qid = wq->sq.qid;
le.opcode = CQE_OPCODE(cqe);
- le.post_host_ts = wq->sq.sw_sq[wq->sq.cidx].host_ts;
+ le.post_host_time = wq->sq.sw_sq[wq->sq.cidx].host_time;
le.post_sge_ts = wq->sq.sw_sq[wq->sq.cidx].sge_ts;
le.wr_id = CQE_WRID_SQ_IDX(cqe);
} else {
le.qid = wq->rq.qid;
le.opcode = FW_RI_RECEIVE;
- le.post_host_ts = wq->rq.sw_rq[wq->rq.cidx].host_ts;
+ le.post_host_time = wq->rq.sw_rq[wq->rq.cidx].host_time;
le.post_sge_ts = wq->rq.sw_rq[wq->rq.cidx].sge_ts;
le.wr_id = CQE_WRID_MSN(cqe);
}
@@ -130,9 +130,9 @@ void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe)
static int wr_log_show(struct seq_file *seq, void *v)
{
struct c4iw_dev *dev = seq->private;
- struct timespec prev_ts = {0, 0};
+ ktime_t prev_time;
struct wr_log_entry *lep;
- int prev_ts_set = 0;
+ int prev_time_set = 0;
int idx, end;
#define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000)
@@ -145,33 +145,29 @@ static int wr_log_show(struct seq_file *seq, void *v)
lep = &dev->rdev.wr_log[idx];
while (idx != end) {
if (lep->valid) {
- if (!prev_ts_set) {
- prev_ts_set = 1;
- prev_ts = lep->poll_host_ts;
+ if (!prev_time_set) {
+ prev_time_set = 1;
+ prev_time = lep->poll_host_time;
}
- seq_printf(seq, "%04u: sec %lu nsec %lu qid %u opcode "
- "%u %s 0x%x host_wr_delta sec %lu nsec %lu "
+ seq_printf(seq, "%04u: nsec %llu qid %u opcode "
+ "%u %s 0x%x host_wr_delta nsec %llu "
"post_sge_ts 0x%llx cqe_sge_ts 0x%llx "
"poll_sge_ts 0x%llx post_poll_delta_ns %llu "
"cqe_poll_delta_ns %llu\n",
idx,
- timespec_sub(lep->poll_host_ts,
- prev_ts).tv_sec,
- timespec_sub(lep->poll_host_ts,
- prev_ts).tv_nsec,
+ ktime_to_ns(ktime_sub(lep->poll_host_time,
+ prev_time)),
lep->qid, lep->opcode,
lep->opcode == FW_RI_RECEIVE ?
"msn" : "wrid",
lep->wr_id,
- timespec_sub(lep->poll_host_ts,
- lep->post_host_ts).tv_sec,
- timespec_sub(lep->poll_host_ts,
- lep->post_host_ts).tv_nsec,
+ ktime_to_ns(ktime_sub(lep->poll_host_time,
+ lep->post_host_time)),
lep->post_sge_ts, lep->cqe_sge_ts,
lep->poll_sge_ts,
ts2ns(lep->poll_sge_ts - lep->post_sge_ts),
ts2ns(lep->poll_sge_ts - lep->cqe_sge_ts));
- prev_ts = lep->poll_host_ts;
+ prev_time = lep->poll_host_time;
}
idx++;
if (idx > (dev->rdev.wr_log_size - 1))
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index a252d5c40ae3..3e9d8b277ab9 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -236,7 +236,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
if (atomic_dec_and_test(&chp->refcnt))
wake_up(&chp->wait);
} else {
- pr_warn("%s unknown cqid 0x%x\n", __func__, qid);
+ pr_debug("unknown cqid 0x%x\n", qid);
spin_unlock_irqrestore(&dev->lock, flag);
}
return 0;
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 65dd3726ca02..cc929002c05e 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -153,8 +153,8 @@ struct c4iw_hw_queue {
};
struct wr_log_entry {
- struct timespec post_host_ts;
- struct timespec poll_host_ts;
+ ktime_t post_host_time;
+ ktime_t poll_host_time;
u64 post_sge_ts;
u64 cqe_sge_ts;
u64 poll_sge_ts;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index d5c92fc520d6..de77b6027d69 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1042,7 +1042,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
if (c4iw_wr_log) {
swsqe->sge_ts = cxgb4_read_sge_timestamp(
qhp->rhp->rdev.lldi.ports[0]);
- getnstimeofday(&swsqe->host_ts);
+ swsqe->host_time = ktime_get();
}
init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
@@ -1117,8 +1117,8 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts =
cxgb4_read_sge_timestamp(
qhp->rhp->rdev.lldi.ports[0]);
- getnstimeofday(
- &qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_ts);
+ qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_time =
+ ktime_get();
}
wqe->recv.opcode = FW_RI_RECV_WR;
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 79e8ee12c391..8369c7c8de83 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -277,7 +277,7 @@ struct t4_swsqe {
int signaled;
u16 idx;
int flushed;
- struct timespec host_ts;
+ ktime_t host_time;
u64 sge_ts;
};
@@ -318,7 +318,7 @@ struct t4_sq {
struct t4_swrqe {
u64 wr_id;
- struct timespec host_ts;
+ ktime_t host_time;
u64 sge_ts;
};
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 4f057e8ffe50..6660f920f42e 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -6518,11 +6518,12 @@ static void _dc_start(struct hfi1_devdata *dd)
if (!dd->dc_shutdown)
return;
- /*
- * Take the 8051 out of reset, wait until 8051 is ready, and set host
- * version bit.
- */
- release_and_wait_ready_8051_firmware(dd);
+ /* Take the 8051 out of reset */
+ write_csr(dd, DC_DC8051_CFG_RST, 0ull);
+ /* Wait until 8051 is ready */
+ if (wait_fm_ready(dd, TIMEOUT_8051_START))
+ dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
+ __func__);
/* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
write_csr(dd, DCC_CFG_RESET, 0x10);
@@ -8564,23 +8565,27 @@ int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
}
/*
- * If the 8051 is in reset mode (dd->dc_shutdown == 1), this function
- * will still continue executing.
- *
* Returns:
* < 0 = Linux error, not able to get access
* > 0 = 8051 command RETURN_CODE
*/
-static int _do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
- u64 *out_data)
+static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
+ u64 *out_data)
{
u64 reg, completed;
int return_code;
unsigned long timeout;
- lockdep_assert_held(&dd->dc8051_lock);
hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
+ mutex_lock(&dd->dc8051_lock);
+
+ /* We can't send any commands to the 8051 if it's in reset */
+ if (dd->dc_shutdown) {
+ return_code = -ENODEV;
+ goto fail;
+ }
+
/*
* If an 8051 host command timed out previously, then the 8051 is
* stuck.
@@ -8681,29 +8686,6 @@ static int _do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
fail:
- return return_code;
-}
-
-/*
- * Returns:
- * < 0 = Linux error, not able to get access
- * > 0 = 8051 command RETURN_CODE
- */
-static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
- u64 *out_data)
-{
- int return_code;
-
- mutex_lock(&dd->dc8051_lock);
- /* We can't send any commands to the 8051 if it's in reset */
- if (dd->dc_shutdown) {
- return_code = -ENODEV;
- goto fail;
- }
-
- return_code = _do_8051_command(dd, type, in_data, out_data);
-
-fail:
mutex_unlock(&dd->dc8051_lock);
return return_code;
}
@@ -8713,17 +8695,16 @@ static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
}
-static int _load_8051_config(struct hfi1_devdata *dd, u8 field_id,
- u8 lane_id, u32 config_data)
+int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
+ u8 lane_id, u32 config_data)
{
u64 data;
int ret;
- lockdep_assert_held(&dd->dc8051_lock);
data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
| (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
| (u64)config_data << LOAD_DATA_DATA_SHIFT;
- ret = _do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
+ ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
if (ret != HCMD_SUCCESS) {
dd_dev_err(dd,
"load 8051 config: field id %d, lane %d, err %d\n",
@@ -8732,18 +8713,6 @@ static int _load_8051_config(struct hfi1_devdata *dd, u8 field_id,
return ret;
}
-int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
- u8 lane_id, u32 config_data)
-{
- int return_code;
-
- mutex_lock(&dd->dc8051_lock);
- return_code = _load_8051_config(dd, field_id, lane_id, config_data);
- mutex_unlock(&dd->dc8051_lock);
-
- return return_code;
-}
-
/*
* Read the 8051 firmware "registers". Use the RAM directly. Always
* set the result, even on error.
@@ -8859,14 +8828,13 @@ int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
u32 frame;
u32 mask;
- lockdep_assert_held(&dd->dc8051_lock);
mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
/* Clear, then set field */
frame &= ~mask;
frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
- return _load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
- frame);
+ return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
+ frame);
}
void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
@@ -9270,6 +9238,14 @@ static int set_local_link_attributes(struct hfi1_pportdata *ppd)
if (ret != HCMD_SUCCESS)
goto set_local_link_attributes_fail;
+ ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
+ if (ret != HCMD_SUCCESS) {
+ dd_dev_err(dd,
+ "Failed to set host interface version, return 0x%x\n",
+ ret);
+ goto set_local_link_attributes_fail;
+ }
+
/*
* DC supports continuous updates.
*/
@@ -14944,9 +14920,8 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
num_vls > HFI1_MAX_VLS_SUPPORTED) {
- hfi1_early_err(&pdev->dev,
- "Invalid num_vls %u, using %u VLs\n",
- num_vls, HFI1_MAX_VLS_SUPPORTED);
+ dd_dev_err(dd, "Invalid num_vls %u, using %u VLs\n",
+ num_vls, HFI1_MAX_VLS_SUPPORTED);
num_vls = HFI1_MAX_VLS_SUPPORTED;
}
ppd->vls_supported = num_vls;
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index 133e313feca4..21fca8ec5076 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -508,6 +508,7 @@
#define DOWN_REMOTE_REASON_SHIFT 16
#define DOWN_REMOTE_REASON_MASK 0xff
+#define HOST_INTERFACE_VERSION 1
#define HOST_INTERFACE_VERSION_SHIFT 16
#define HOST_INTERFACE_VERSION_MASK 0xff
@@ -713,7 +714,6 @@ void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
u8 *ver_patch);
int write_host_interface_version(struct hfi1_devdata *dd, u8 version);
void read_guid(struct hfi1_devdata *dd);
-int release_and_wait_ready_8051_firmware(struct hfi1_devdata *dd);
int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout);
void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
u8 neigh_reason, u8 rem_reason);
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 4f65ac671044..067b29f35f21 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -159,22 +159,6 @@ static int hfi1_caps_get(char *buffer, const struct kernel_param *kp)
return scnprintf(buffer, PAGE_SIZE, "0x%lx", cap_mask);
}
-const char *get_unit_name(int unit)
-{
- static char iname[16];
-
- snprintf(iname, sizeof(iname), DRIVER_NAME "_%u", unit);
- return iname;
-}
-
-const char *get_card_name(struct rvt_dev_info *rdi)
-{
- struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi);
- struct hfi1_devdata *dd = container_of(ibdev,
- struct hfi1_devdata, verbs_dev);
- return get_unit_name(dd->unit);
-}
-
struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi)
{
struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi);
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 1df7da47f431..bd6f03cc5ee0 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -74,7 +74,7 @@
static int hfi1_file_open(struct inode *inode, struct file *fp);
static int hfi1_file_close(struct inode *inode, struct file *fp);
static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from);
-static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt);
+static __poll_t hfi1_poll(struct file *fp, struct poll_table_struct *pt);
static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma);
static u64 kvirt_to_phys(void *addr);
@@ -102,8 +102,8 @@ static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
struct hfi1_user_info *uinfo,
struct hfi1_ctxtdata **cd);
static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt);
-static unsigned int poll_urgent(struct file *fp, struct poll_table_struct *pt);
-static unsigned int poll_next(struct file *fp, struct poll_table_struct *pt);
+static __poll_t poll_urgent(struct file *fp, struct poll_table_struct *pt);
+static __poll_t poll_next(struct file *fp, struct poll_table_struct *pt);
static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
unsigned long arg);
static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg);
@@ -607,10 +607,10 @@ static int vma_fault(struct vm_fault *vmf)
return 0;
}
-static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt)
+static __poll_t hfi1_poll(struct file *fp, struct poll_table_struct *pt)
{
struct hfi1_ctxtdata *uctxt;
- unsigned pollflag;
+ __poll_t pollflag;
uctxt = ((struct hfi1_filedata *)fp->private_data)->uctxt;
if (!uctxt)
@@ -1425,13 +1425,13 @@ static int user_exp_rcv_invalid(struct hfi1_filedata *fd, unsigned long arg,
return ret;
}
-static unsigned int poll_urgent(struct file *fp,
+static __poll_t poll_urgent(struct file *fp,
struct poll_table_struct *pt)
{
struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
- unsigned pollflag;
+ __poll_t pollflag;
poll_wait(fp, &uctxt->wait, pt);
@@ -1448,13 +1448,13 @@ static unsigned int poll_urgent(struct file *fp,
return pollflag;
}
-static unsigned int poll_next(struct file *fp,
+static __poll_t poll_next(struct file *fp,
struct poll_table_struct *pt)
{
struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
- unsigned pollflag;
+ __poll_t pollflag;
poll_wait(fp, &uctxt->wait, pt);
diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
index 98868df78a7e..2b57ba70ddd6 100644
--- a/drivers/infiniband/hw/hfi1/firmware.c
+++ b/drivers/infiniband/hw/hfi1/firmware.c
@@ -68,7 +68,6 @@
#define ALT_FW_FABRIC_NAME "hfi1_fabric_d.fw"
#define ALT_FW_SBUS_NAME "hfi1_sbus_d.fw"
#define ALT_FW_PCIE_NAME "hfi1_pcie_d.fw"
-#define HOST_INTERFACE_VERSION 1
MODULE_FIRMWARE(DEFAULT_FW_8051_NAME_ASIC);
MODULE_FIRMWARE(DEFAULT_FW_FABRIC_NAME);
@@ -976,46 +975,6 @@ int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout)
}
/*
- * Clear all reset bits, releasing the 8051.
- * Wait for firmware to be ready to accept host requests.
- * Then, set host version bit.
- *
- * This function executes even if the 8051 is in reset mode when
- * dd->dc_shutdown == 1.
- *
- * Expects dd->dc8051_lock to be held.
- */
-int release_and_wait_ready_8051_firmware(struct hfi1_devdata *dd)
-{
- int ret;
-
- lockdep_assert_held(&dd->dc8051_lock);
- /* clear all reset bits, releasing the 8051 */
- write_csr(dd, DC_DC8051_CFG_RST, 0ull);
-
- /*
- * Wait for firmware to be ready to accept host
- * requests.
- */
- ret = wait_fm_ready(dd, TIMEOUT_8051_START);
- if (ret) {
- dd_dev_err(dd, "8051 start timeout, current FW state 0x%x\n",
- get_firmware_state(dd));
- return ret;
- }
-
- ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
- if (ret != HCMD_SUCCESS) {
- dd_dev_err(dd,
- "Failed to set host interface version, return 0x%x\n",
- ret);
- return -EIO;
- }
-
- return 0;
-}
-
-/*
* Load the 8051 firmware.
*/
static int load_8051_firmware(struct hfi1_devdata *dd,
@@ -1080,22 +1039,31 @@ static int load_8051_firmware(struct hfi1_devdata *dd,
if (ret)
return ret;
+ /* clear all reset bits, releasing the 8051 */
+ write_csr(dd, DC_DC8051_CFG_RST, 0ull);
+
/*
- * Clear all reset bits, releasing the 8051.
* DC reset step 5. Wait for firmware to be ready to accept host
* requests.
- * Then, set host version bit.
*/
- mutex_lock(&dd->dc8051_lock);
- ret = release_and_wait_ready_8051_firmware(dd);
- mutex_unlock(&dd->dc8051_lock);
- if (ret)
- return ret;
+ ret = wait_fm_ready(dd, TIMEOUT_8051_START);
+ if (ret) { /* timed out */
+ dd_dev_err(dd, "8051 start timeout, current state 0x%x\n",
+ get_firmware_state(dd));
+ return -ETIMEDOUT;
+ }
read_misc_status(dd, &ver_major, &ver_minor, &ver_patch);
dd_dev_info(dd, "8051 firmware version %d.%d.%d\n",
(int)ver_major, (int)ver_minor, (int)ver_patch);
dd->dc8051_ver = dc8051_ver(ver_major, ver_minor, ver_patch);
+ ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
+ if (ret != HCMD_SUCCESS) {
+ dd_dev_err(dd,
+ "Failed to set host interface version, return 0x%x\n",
+ ret);
+ return -EIO;
+ }
return 0;
}
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 8ce9118d4a7f..b42c22292597 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -1623,7 +1623,7 @@ static int ingress_pkey_table_search(struct hfi1_pportdata *ppd, u16 pkey)
* the 'error info' for this failure.
*/
static void ingress_pkey_table_fail(struct hfi1_pportdata *ppd, u16 pkey,
- u16 slid)
+ u32 slid)
{
struct hfi1_devdata *dd = ppd->dd;
@@ -1971,8 +1971,6 @@ int get_platform_config_field(struct hfi1_devdata *dd,
table_type, int table_index, int field_index,
u32 *data, u32 len);
-const char *get_unit_name(int unit);
-const char *get_card_name(struct rvt_dev_info *rdi);
struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi);
/*
@@ -2122,39 +2120,42 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
#define dd_dev_emerg(dd, fmt, ...) \
dev_emerg(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
#define dd_dev_err(dd, fmt, ...) \
dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
#define dd_dev_err_ratelimited(dd, fmt, ...) \
dev_err_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
+ ##__VA_ARGS__)
#define dd_dev_warn(dd, fmt, ...) \
dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
#define dd_dev_warn_ratelimited(dd, fmt, ...) \
dev_warn_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
+ ##__VA_ARGS__)
#define dd_dev_info(dd, fmt, ...) \
dev_info(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
#define dd_dev_info_ratelimited(dd, fmt, ...) \
dev_info_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
+ ##__VA_ARGS__)
#define dd_dev_dbg(dd, fmt, ...) \
dev_dbg(&(dd)->pcidev->dev, "%s: " fmt, \
- get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
#define hfi1_dev_porterr(dd, port, fmt, ...) \
dev_err(&(dd)->pcidev->dev, "%s: port %u: " fmt, \
- get_unit_name((dd)->unit), (port), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), (port), ##__VA_ARGS__)
/*
* this is used for formatting hw error messages...
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 8e3b3e7d829a..9b128268fb28 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1272,6 +1272,8 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
"Could not allocate unit ID: error %d\n", -ret);
goto bail;
}
+ rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit);
+
/*
* Initialize all locks for the device. This needs to be as early as
* possible so locks are usable.
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index cf8dba34fe30..34547a48a445 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -4348,11 +4348,7 @@ static int opa_local_smp_check(struct hfi1_ibport *ibp,
*/
if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY)
return 0;
- /*
- * On OPA devices it is okay to lose the upper 16 bits of LID as this
- * information is obtained elsewhere. Mask off the upper 16 bits.
- */
- ingress_pkey_table_fail(ppd, pkey, ib_lid_cpu16(0xFFFF & in_wc->slid));
+ ingress_pkey_table_fail(ppd, pkey, in_wc->slid);
return 1;
}
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 4b01ccd895b4..5507910e8b8a 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -556,6 +556,8 @@ void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
struct sdma_engine *sde;
struct send_context *send_context;
struct rvt_ack_entry *e = NULL;
+ struct rvt_srq *srq = qp->ibqp.srq ?
+ ibsrq_to_rvtsrq(qp->ibqp.srq) : NULL;
sde = qp_to_sdma_engine(qp, priv->s_sc);
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
@@ -563,7 +565,7 @@ void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
if (qp->s_ack_queue)
e = &qp->s_ack_queue[qp->s_tail_ack_queue];
seq_printf(s,
- "N %d %s QP %x R %u %s %u %u %u f=%x %u %u %u %u %u %u SPSN %x %x %x %x %x RPSN %x S(%u %u %u %u %u %u %u) R(%u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d OS %x %x E %x %x %x\n",
+ "N %d %s QP %x R %u %s %u %u %u f=%x %u %u %u %u %u %u SPSN %x %x %x %x %x RPSN %x S(%u %u %u %u %u %u %u) R(%u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d OS %x %x E %x %x %x RNR %d %s %d\n",
iter->n,
qp_idle(qp) ? "I" : "B",
qp->ibqp.qp_num,
@@ -610,7 +612,11 @@ void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
/* ack queue information */
e ? e->opcode : 0,
e ? e->psn : 0,
- e ? e->lpsn : 0);
+ e ? e->lpsn : 0,
+ qp->r_min_rnr_timer,
+ srq ? "SRQ" : "RQ",
+ srq ? srq->rq.size : qp->r_rq.size
+ );
}
void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index af5f7936f7e5..14cc212a21c7 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -302,7 +302,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
@@ -346,7 +345,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
newreq = 0;
if (qp->s_cur == qp->s_tail) {
/* Check if send work queue is empty. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_tail == READ_ONCE(qp->s_head)) {
clear_ahg(qp);
goto bail;
@@ -843,11 +841,11 @@ static inline void hfi1_make_rc_ack_16B(struct rvt_qp *qp,
/* Convert dwords to flits */
len = (*hwords + *nwords) >> 1;
- hfi1_make_16b_hdr(hdr,
- ppd->lid | rdma_ah_get_path_bits(&qp->remote_ah_attr),
+ hfi1_make_16b_hdr(hdr, ppd->lid |
+ (rdma_ah_get_path_bits(&qp->remote_ah_attr) &
+ ((1 << ppd->lmc) - 1)),
opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr),
- 16B),
- len, pkey, becn, 0, l4, sc5);
+ 16B), len, pkey, becn, 0, l4, sc5);
bth0 = pkey | (OP(ACKNOWLEDGE) << 24);
bth0 |= extra_bytes << 20;
@@ -900,7 +898,6 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd,
}
/* Ensure s_rdma_ack_cnt changes are committed */
- smp_read_barrier_depends();
if (qp->s_rdma_ack_cnt) {
hfi1_queue_rc_ack(qp, is_fecn);
return;
@@ -1562,7 +1559,6 @@ static void rc_rcv_resp(struct hfi1_packet *packet)
trace_hfi1_ack(qp, psn);
/* Ignore invalid responses. */
- smp_read_barrier_depends(); /* see post_one_send */
if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0)
goto ack_done;
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index 2c7fc6e331ea..13b994738f41 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -362,7 +362,6 @@ static void ruc_loopback(struct rvt_qp *sqp)
sqp->s_flags |= RVT_S_BUSY;
again:
- smp_read_barrier_depends(); /* see post_one_send() */
if (sqp->s_last == READ_ONCE(sqp->s_head))
goto clr_busy;
wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 31c8f89b5fc8..61c130dbed10 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -553,7 +553,6 @@ static void sdma_hw_clean_up_task(unsigned long opaque)
static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde)
{
- smp_read_barrier_depends(); /* see sdma_update_tail() */
return sde->tx_ring[sde->tx_head & sde->sdma_mask];
}
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index 991bbee04821..132b63e787d1 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -79,7 +79,6 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
@@ -119,7 +118,6 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
RVT_PROCESS_NEXT_SEND_OK))
goto bail;
/* Check if send work queue is empty. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_cur == READ_ONCE(qp->s_head)) {
clear_ahg(qp);
goto bail;
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index beb5091eccca..deb184574395 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -486,7 +486,6 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
@@ -500,7 +499,6 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
}
/* see post_one_send() */
- smp_read_barrier_depends();
if (qp->s_cur == READ_ONCE(qp->s_head))
goto bail;
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index a38785e224cc..b8776a362a91 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1486,7 +1486,7 @@ static int query_port(struct rvt_dev_info *rdi, u8 port_num,
props->max_mtu = mtu_to_enum((!valid_ib_mtu(hfi1_max_mtu) ?
4096 : hfi1_max_mtu), IB_MTU_4096);
props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu :
- mtu_to_enum(ppd->ibmtu, IB_MTU_2048);
+ mtu_to_enum(ppd->ibmtu, IB_MTU_4096);
/*
* sm_lid of 0xFFFF needs special handling so that it can
@@ -1844,7 +1844,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
struct hfi1_ibport *ibp = &ppd->ibport_data;
unsigned i;
int ret;
- size_t lcpysz = IB_DEVICE_NAME_MAX;
for (i = 0; i < dd->num_pports; i++)
init_ibport(ppd + i);
@@ -1872,8 +1871,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
*/
if (!ib_hfi1_sys_image_guid)
ib_hfi1_sys_image_guid = ibdev->node_guid;
- lcpysz = strlcpy(ibdev->name, class_name(), lcpysz);
- strlcpy(ibdev->name + lcpysz, "_%d", IB_DEVICE_NAME_MAX - lcpysz);
ibdev->owner = THIS_MODULE;
ibdev->phys_port_cnt = dd->num_pports;
ibdev->dev.parent = &dd->pcidev->dev;
@@ -1893,7 +1890,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
* Fill in rvt info object.
*/
dd->verbs_dev.rdi.driver_f.port_callback = hfi1_create_port_files;
- dd->verbs_dev.rdi.driver_f.get_card_name = get_card_name;
dd->verbs_dev.rdi.driver_f.get_pci_dev = get_pci_dev;
dd->verbs_dev.rdi.driver_f.check_ah = hfi1_check_ah;
dd->verbs_dev.rdi.driver_f.notify_new_ah = hfi1_notify_new_ah;
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index ff426a625e13..97bf2cd1cacb 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -5,7 +5,7 @@
ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_INFINIBAND_HNS) += hns-roce.o
-hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_eq.o hns_roce_pd.o \
+hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
hns_roce_cq.o hns_roce_alloc.o
obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c
index 1085cb249bc1..9ebe839d8b24 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c
@@ -103,6 +103,7 @@ void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
context->out_param = out_param;
complete(&context->done);
}
+EXPORT_SYMBOL_GPL(hns_roce_cmd_event);
/* this should be called with "use_events" */
static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.h b/drivers/infiniband/hw/hns/hns_roce_cmd.h
index b1c94223c28b..9549ae51a0dd 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.h
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h
@@ -88,6 +88,16 @@ enum {
HNS_ROCE_CMD_DESTROY_SRQC_BT0 = 0x38,
HNS_ROCE_CMD_DESTROY_SRQC_BT1 = 0x39,
HNS_ROCE_CMD_DESTROY_SRQC_BT2 = 0x3a,
+
+ /* EQC commands */
+ HNS_ROCE_CMD_CREATE_AEQC = 0x80,
+ HNS_ROCE_CMD_MODIFY_AEQC = 0x81,
+ HNS_ROCE_CMD_QUERY_AEQC = 0x82,
+ HNS_ROCE_CMD_DESTROY_AEQC = 0x83,
+ HNS_ROCE_CMD_CREATE_CEQC = 0x90,
+ HNS_ROCE_CMD_MODIFY_CEQC = 0x91,
+ HNS_ROCE_CMD_QUERY_CEQC = 0x92,
+ HNS_ROCE_CMD_DESTROY_CEQC = 0x93,
};
enum {
diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h
index 7ecb7a4147a8..dd67fafd0c40 100644
--- a/drivers/infiniband/hw/hns/hns_roce_common.h
+++ b/drivers/infiniband/hw/hns/hns_roce_common.h
@@ -376,6 +376,12 @@
#define ROCEE_RX_CMQ_TAIL_REG 0x07024
#define ROCEE_RX_CMQ_HEAD_REG 0x07028
+#define ROCEE_VF_MB_CFG0_REG 0x40
+#define ROCEE_VF_MB_STATUS_REG 0x58
+
+#define ROCEE_VF_EQ_DB_CFG0_REG 0x238
+#define ROCEE_VF_EQ_DB_CFG1_REG 0x23C
+
#define ROCEE_VF_SMAC_CFG0_REG 0x12000
#define ROCEE_VF_SMAC_CFG1_REG 0x12004
@@ -385,4 +391,9 @@
#define ROCEE_VF_SGID_CFG3_REG 0x1000c
#define ROCEE_VF_SGID_CFG4_REG 0x10010
+#define ROCEE_VF_ABN_INT_CFG_REG 0x13000
+#define ROCEE_VF_ABN_INT_ST_REG 0x13004
+#define ROCEE_VF_ABN_INT_EN_REG 0x13008
+#define ROCEE_VF_EVENT_INT_EN_REG 0x1300c
+
#endif /* _HNS_ROCE_COMMON_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 2111b57a3489..bccc9b54c9ce 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -196,15 +196,14 @@ void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
if (ret)
dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret,
hr_cq->cqn);
- if (hr_dev->eq_table.eq) {
- /* Waiting interrupt process procedure carried out */
- synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
-
- /* wait for all interrupt processed */
- if (atomic_dec_and_test(&hr_cq->refcount))
- complete(&hr_cq->free);
- wait_for_completion(&hr_cq->free);
- }
+
+ /* Waiting interrupt process procedure carried out */
+ synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
+
+ /* wait for all interrupt processed */
+ if (atomic_dec_and_test(&hr_cq->refcount))
+ complete(&hr_cq->free);
+ wait_for_completion(&hr_cq->free);
spin_lock_irq(&cq_table->lock);
radix_tree_delete(&cq_table->tree, hr_cq->cqn);
@@ -460,6 +459,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
++cq->arm_sn;
cq->comp(cq);
}
+EXPORT_SYMBOL_GPL(hns_roce_cq_completion);
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
{
@@ -482,6 +482,7 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
if (atomic_dec_and_test(&cq->refcount))
complete(&cq->free);
}
+EXPORT_SYMBOL_GPL(hns_roce_cq_event);
int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
{
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index b154ce40cded..42c3b5a2d441 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -62,12 +62,16 @@
#define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2
#define HNS_ROCE_MIN_CQE_CNT 16
-#define HNS_ROCE_MAX_IRQ_NUM 34
+#define HNS_ROCE_MAX_IRQ_NUM 128
-#define HNS_ROCE_COMP_VEC_NUM 32
+#define EQ_ENABLE 1
+#define EQ_DISABLE 0
-#define HNS_ROCE_AEQE_VEC_NUM 1
-#define HNS_ROCE_AEQE_OF_VEC_NUM 1
+#define HNS_ROCE_CEQ 0
+#define HNS_ROCE_AEQ 1
+
+#define HNS_ROCE_CEQ_ENTRY_SIZE 0x4
+#define HNS_ROCE_AEQ_ENTRY_SIZE 0x10
/* 4G/4K = 1M */
#define HNS_ROCE_SL_SHIFT 28
@@ -130,6 +134,7 @@ enum hns_roce_event {
HNS_ROCE_EVENT_TYPE_DB_OVERFLOW = 0x12,
HNS_ROCE_EVENT_TYPE_MB = 0x13,
HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW = 0x14,
+ HNS_ROCE_EVENT_TYPE_FLR = 0x15,
};
/* Local Work Queue Catastrophic Error,SUBTYPE 0x5 */
@@ -173,6 +178,7 @@ enum {
enum {
HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0),
HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1),
+ HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2)
};
enum hns_roce_mtt_type {
@@ -441,6 +447,21 @@ struct hns_roce_cmd_mailbox {
struct hns_roce_dev;
+struct hns_roce_rinl_sge {
+ void *addr;
+ u32 len;
+};
+
+struct hns_roce_rinl_wqe {
+ struct hns_roce_rinl_sge *sg_list;
+ u32 sge_cnt;
+};
+
+struct hns_roce_rinl_buf {
+ struct hns_roce_rinl_wqe *wqe_list;
+ u32 wqe_cnt;
+};
+
struct hns_roce_qp {
struct ib_qp ibqp;
struct hns_roce_buf hr_buf;
@@ -462,7 +483,9 @@ struct hns_roce_qp {
u8 resp_depth;
u8 state;
u32 access_flags;
+ u32 atomic_rd_en;
u32 pkey_index;
+ u32 qkey;
void (*event)(struct hns_roce_qp *,
enum hns_roce_event);
unsigned long qpn;
@@ -472,6 +495,8 @@ struct hns_roce_qp {
struct hns_roce_sge sge;
u32 next_sge;
+
+ struct hns_roce_rinl_buf rq_inl_buf;
};
struct hns_roce_sqp {
@@ -485,6 +510,45 @@ struct hns_roce_ib_iboe {
u8 phy_port[HNS_ROCE_MAX_PORTS];
};
+enum {
+ HNS_ROCE_EQ_STAT_INVALID = 0,
+ HNS_ROCE_EQ_STAT_VALID = 2,
+};
+
+struct hns_roce_ceqe {
+ u32 comp;
+};
+
+struct hns_roce_aeqe {
+ u32 asyn;
+ union {
+ struct {
+ u32 qp;
+ u32 rsv0;
+ u32 rsv1;
+ } qp_event;
+
+ struct {
+ u32 cq;
+ u32 rsv0;
+ u32 rsv1;
+ } cq_event;
+
+ struct {
+ u32 ceqe;
+ u32 rsv0;
+ u32 rsv1;
+ } ce_event;
+
+ struct {
+ __le64 out_param;
+ __le16 token;
+ u8 status;
+ u8 rsv0;
+ } __packed cmd;
+ } event;
+};
+
struct hns_roce_eq {
struct hns_roce_dev *hr_dev;
void __iomem *doorbell;
@@ -498,11 +562,31 @@ struct hns_roce_eq {
int log_page_size;
int cons_index;
struct hns_roce_buf_list *buf_list;
+ int over_ignore;
+ int coalesce;
+ int arm_st;
+ u64 eqe_ba;
+ int eqe_ba_pg_sz;
+ int eqe_buf_pg_sz;
+ int hop_num;
+ u64 *bt_l0; /* Base address table for L0 */
+ u64 **bt_l1; /* Base address table for L1 */
+ u64 **buf;
+ dma_addr_t l0_dma;
+ dma_addr_t *l1_dma;
+ dma_addr_t *buf_dma;
+ u32 l0_last_num; /* L0 last chunk num */
+ u32 l1_last_num; /* L1 last chunk num */
+ int eq_max_cnt;
+ int eq_period;
+ int shift;
+ dma_addr_t cur_eqe_ba;
+ dma_addr_t nxt_eqe_ba;
};
struct hns_roce_eq_table {
struct hns_roce_eq *eq;
- void __iomem **eqc_base;
+ void __iomem **eqc_base; /* only for hw v1 */
};
struct hns_roce_caps {
@@ -528,7 +612,7 @@ struct hns_roce_caps {
u32 min_wqes;
int reserved_cqs;
int num_aeq_vectors; /* 1 */
- int num_comp_vectors; /* 32 ceq */
+ int num_comp_vectors;
int num_other_vectors;
int num_mtpts;
u32 num_mtt_segs;
@@ -550,7 +634,7 @@ struct hns_roce_caps {
u32 pbl_buf_pg_sz;
u32 pbl_hop_num;
int aeqe_depth;
- int ceqe_depth[HNS_ROCE_COMP_VEC_NUM];
+ int ceqe_depth;
enum ib_mtu max_mtu;
u32 qpc_bt_num;
u32 srqc_bt_num;
@@ -574,6 +658,9 @@ struct hns_roce_caps {
u32 cqe_ba_pg_sz;
u32 cqe_buf_pg_sz;
u32 cqe_hop_num;
+ u32 eqe_ba_pg_sz;
+ u32 eqe_buf_pg_sz;
+ u32 eqe_hop_num;
u32 chunk_sz; /* chunk size in non multihop mode*/
u64 flags;
};
@@ -623,6 +710,8 @@ struct hns_roce_hw {
int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr);
int (*destroy_cq)(struct ib_cq *ibcq);
int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
+ int (*init_eq)(struct hns_roce_dev *hr_dev);
+ void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
};
struct hns_roce_dev {
diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.c b/drivers/infiniband/hw/hns/hns_roce_eq.c
deleted file mode 100644
index d184431e2bf5..000000000000
--- a/drivers/infiniband/hw/hns/hns_roce_eq.c
+++ /dev/null
@@ -1,759 +0,0 @@
-/*
- * Copyright (c) 2016 Hisilicon Limited.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include "hns_roce_common.h"
-#include "hns_roce_device.h"
-#include "hns_roce_eq.h"
-
-static void eq_set_cons_index(struct hns_roce_eq *eq, int req_not)
-{
- roce_raw_write((eq->cons_index & CONS_INDEX_MASK) |
- (req_not << eq->log_entries), eq->doorbell);
- /* Memory barrier */
- mb();
-}
-
-static struct hns_roce_aeqe *get_aeqe(struct hns_roce_eq *eq, u32 entry)
-{
- unsigned long off = (entry & (eq->entries - 1)) *
- HNS_ROCE_AEQ_ENTRY_SIZE;
-
- return (struct hns_roce_aeqe *)((u8 *)
- (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
- off % HNS_ROCE_BA_SIZE);
-}
-
-static struct hns_roce_aeqe *next_aeqe_sw(struct hns_roce_eq *eq)
-{
- struct hns_roce_aeqe *aeqe = get_aeqe(eq, eq->cons_index);
-
- return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^
- !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
-}
-
-static void hns_roce_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe, int qpn)
-{
- struct device *dev = &hr_dev->pdev->dev;
-
- dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
- switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
- case HNS_ROCE_LWQCE_QPC_ERROR:
- dev_warn(dev, "QP %d, QPC error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_MTU_ERROR:
- dev_warn(dev, "QP %d, MTU error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
- dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
- dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
- dev_warn(dev, "QP %d, WQE shift error\n", qpn);
- break;
- case HNS_ROCE_LWQCE_SL_ERROR:
- dev_warn(dev, "QP %d, SL error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_PORT_ERROR:
- dev_warn(dev, "QP %d, port error.\n", qpn);
- break;
- default:
- break;
- }
-}
-
-static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe,
- int qpn)
-{
- struct device *dev = &hr_dev->pdev->dev;
-
- dev_warn(dev, "Local Access Violation Work Queue Error.\n");
- switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
- case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
- dev_warn(dev, "QP %d, R_key violation.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_LENGTH_ERROR:
- dev_warn(dev, "QP %d, length error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_VA_ERROR:
- dev_warn(dev, "QP %d, VA error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_PD_ERROR:
- dev_err(dev, "QP %d, PD error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
- dev_warn(dev, "QP %d, rw acc error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
- dev_warn(dev, "QP %d, key state error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
- dev_warn(dev, "QP %d, MR operation error.\n", qpn);
- break;
- default:
- break;
- }
-}
-
-static void hns_roce_qp_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe,
- int event_type)
-{
- struct device *dev = &hr_dev->pdev->dev;
- int phy_port;
- int qpn;
-
- qpn = roce_get_field(aeqe->event.qp_event.qp,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
- phy_port = roce_get_field(aeqe->event.qp_event.qp,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
- if (qpn <= 1)
- qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
-
- switch (event_type) {
- case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
- dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
- "QP %d, phy_port %d.\n", qpn, phy_port);
- break;
- case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
- hns_roce_wq_catas_err_handle(hr_dev, aeqe, qpn);
- break;
- case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- hns_roce_local_wq_access_err_handle(hr_dev, aeqe, qpn);
- break;
- default:
- break;
- }
-
- hns_roce_qp_event(hr_dev, qpn, event_type);
-}
-
-static void hns_roce_cq_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe,
- int event_type)
-{
- struct device *dev = &hr_dev->pdev->dev;
- u32 cqn;
-
- cqn = le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
-
- switch (event_type) {
- case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
- dev_warn(dev, "CQ 0x%x access err.\n", cqn);
- break;
- case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
- dev_warn(dev, "CQ 0x%x overflow\n", cqn);
- break;
- case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
- dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
- break;
- default:
- break;
- }
-
- hns_roce_cq_event(hr_dev, cqn, event_type);
-}
-
-static void hns_roce_db_overflow_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe)
-{
- struct device *dev = &hr_dev->pdev->dev;
-
- switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
- case HNS_ROCE_DB_SUBTYPE_SDB_OVF:
- dev_warn(dev, "SDB overflow.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF:
- dev_warn(dev, "SDB almost overflow.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP:
- dev_warn(dev, "SDB almost empty.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_ODB_OVF:
- dev_warn(dev, "ODB overflow.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF:
- dev_warn(dev, "ODB almost overflow.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP:
- dev_warn(dev, "SDB almost empty.\n");
- break;
- default:
- break;
- }
-}
-
-static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
-{
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_aeqe *aeqe;
- int aeqes_found = 0;
- int event_type;
-
- while ((aeqe = next_aeqe_sw(eq))) {
- dev_dbg(dev, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe,
- roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
- /* Memory barrier */
- rmb();
-
- event_type = roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
- switch (event_type) {
- case HNS_ROCE_EVENT_TYPE_PATH_MIG:
- dev_warn(dev, "PATH MIG not supported\n");
- break;
- case HNS_ROCE_EVENT_TYPE_COMM_EST:
- dev_warn(dev, "COMMUNICATION established\n");
- break;
- case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
- dev_warn(dev, "SQ DRAINED not supported\n");
- break;
- case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
- dev_warn(dev, "PATH MIG failed\n");
- break;
- case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
- case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
- case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- hns_roce_qp_err_handle(hr_dev, aeqe, event_type);
- break;
- case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
- case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
- case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
- dev_warn(dev, "SRQ not support!\n");
- break;
- case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
- case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
- case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
- hns_roce_cq_err_handle(hr_dev, aeqe, event_type);
- break;
- case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
- dev_warn(dev, "port change.\n");
- break;
- case HNS_ROCE_EVENT_TYPE_MB:
- hns_roce_cmd_event(hr_dev,
- le16_to_cpu(aeqe->event.cmd.token),
- aeqe->event.cmd.status,
- le64_to_cpu(aeqe->event.cmd.out_param
- ));
- break;
- case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
- hns_roce_db_overflow_handle(hr_dev, aeqe);
- break;
- case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
- dev_warn(dev, "CEQ 0x%lx overflow.\n",
- roce_get_field(aeqe->event.ce_event.ceqe,
- HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M,
- HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
- break;
- default:
- dev_warn(dev, "Unhandled event %d on EQ %d at index %u\n",
- event_type, eq->eqn, eq->cons_index);
- break;
- }
-
- eq->cons_index++;
- aeqes_found = 1;
-
- if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) {
- dev_warn(dev, "cons_index overflow, set back to zero\n"
- );
- eq->cons_index = 0;
- }
- }
-
- eq_set_cons_index(eq, 0);
-
- return aeqes_found;
-}
-
-static struct hns_roce_ceqe *get_ceqe(struct hns_roce_eq *eq, u32 entry)
-{
- unsigned long off = (entry & (eq->entries - 1)) *
- HNS_ROCE_CEQ_ENTRY_SIZE;
-
- return (struct hns_roce_ceqe *)((u8 *)
- (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
- off % HNS_ROCE_BA_SIZE);
-}
-
-static struct hns_roce_ceqe *next_ceqe_sw(struct hns_roce_eq *eq)
-{
- struct hns_roce_ceqe *ceqe = get_ceqe(eq, eq->cons_index);
-
- return (!!(roce_get_bit(ceqe->ceqe.comp,
- HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^
- (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
-}
-
-static int hns_roce_ceq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
-{
- struct hns_roce_ceqe *ceqe;
- int ceqes_found = 0;
- u32 cqn;
-
- while ((ceqe = next_ceqe_sw(eq))) {
- /* Memory barrier */
- rmb();
- cqn = roce_get_field(ceqe->ceqe.comp,
- HNS_ROCE_CEQE_CEQE_COMP_CQN_M,
- HNS_ROCE_CEQE_CEQE_COMP_CQN_S);
- hns_roce_cq_completion(hr_dev, cqn);
-
- ++eq->cons_index;
- ceqes_found = 1;
-
- if (eq->cons_index > 2 * hr_dev->caps.ceqe_depth[eq->eqn] - 1) {
- dev_warn(&eq->hr_dev->pdev->dev,
- "cons_index overflow, set back to zero\n");
- eq->cons_index = 0;
- }
- }
-
- eq_set_cons_index(eq, 0);
-
- return ceqes_found;
-}
-
-static int hns_roce_aeq_ovf_int(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
-{
- struct device *dev = &eq->hr_dev->pdev->dev;
- int eqovf_found = 0;
- u32 caepaemask_val;
- u32 cealmovf_val;
- u32 caepaest_val;
- u32 aeshift_val;
- u32 ceshift_val;
- u32 cemask_val;
- int i = 0;
-
- /**
- * AEQ overflow ECC mult bit err CEQ overflow alarm
- * must clear interrupt, mask irq, clear irq, cancel mask operation
- */
- aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
-
- if (roce_get_bit(aeshift_val,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) {
- dev_warn(dev, "AEQ overflow!\n");
-
- /* Set mask */
- caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
- roce_set_bit(caepaemask_val,
- ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
- HNS_ROCE_INT_MASK_ENABLE);
- roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
-
- /* Clear int state(INT_WC : write 1 clear) */
- caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG);
- roce_set_bit(caepaest_val,
- ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
- roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val);
-
- /* Clear mask */
- caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
- roce_set_bit(caepaemask_val,
- ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
- HNS_ROCE_INT_MASK_DISABLE);
- roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
- }
-
- /* CEQ almost overflow */
- for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
- ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG +
- i * CEQ_REG_OFFSET);
-
- if (roce_get_bit(ceshift_val,
- ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) {
- dev_warn(dev, "CEQ[%d] almost overflow!\n", i);
- eqovf_found++;
-
- /* Set mask */
- cemask_val = roce_read(hr_dev,
- ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET);
- roce_set_bit(cemask_val,
- ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
- HNS_ROCE_INT_MASK_ENABLE);
- roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET, cemask_val);
-
- /* Clear int state(INT_WC : write 1 clear) */
- cealmovf_val = roce_read(hr_dev,
- ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
- i * CEQ_REG_OFFSET);
- roce_set_bit(cealmovf_val,
- ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S,
- 1);
- roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
- i * CEQ_REG_OFFSET, cealmovf_val);
-
- /* Clear mask */
- cemask_val = roce_read(hr_dev,
- ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET);
- roce_set_bit(cemask_val,
- ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
- HNS_ROCE_INT_MASK_DISABLE);
- roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET, cemask_val);
- }
- }
-
- /* ECC multi-bit error alarm */
- dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n",
- roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG),
- roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG),
- roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG));
-
- dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n",
- roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG),
- roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG),
- roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG));
-
- return eqovf_found;
-}
-
-static int hns_roce_eq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
-{
- int eqes_found = 0;
-
- if (likely(eq->type_flag == HNS_ROCE_CEQ))
- /* CEQ irq routine, CEQ is pulse irq, not clear */
- eqes_found = hns_roce_ceq_int(hr_dev, eq);
- else if (likely(eq->type_flag == HNS_ROCE_AEQ))
- /* AEQ irq routine, AEQ is pulse irq, not clear */
- eqes_found = hns_roce_aeq_int(hr_dev, eq);
- else
- /* AEQ queue overflow irq */
- eqes_found = hns_roce_aeq_ovf_int(hr_dev, eq);
-
- return eqes_found;
-}
-
-static irqreturn_t hns_roce_msi_x_interrupt(int irq, void *eq_ptr)
-{
- int int_work = 0;
- struct hns_roce_eq *eq = eq_ptr;
- struct hns_roce_dev *hr_dev = eq->hr_dev;
-
- int_work = hns_roce_eq_int(hr_dev, eq);
-
- return IRQ_RETVAL(int_work);
-}
-
-static void hns_roce_enable_eq(struct hns_roce_dev *hr_dev, int eq_num,
- int enable_flag)
-{
- void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];
- u32 val;
-
- val = readl(eqc);
-
- if (enable_flag)
- roce_set_field(val,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
- HNS_ROCE_EQ_STAT_VALID);
- else
- roce_set_field(val,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
- HNS_ROCE_EQ_STAT_INVALID);
- writel(val, eqc);
-}
-
-static int hns_roce_create_eq(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
-{
- void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];
- struct device *dev = &hr_dev->pdev->dev;
- dma_addr_t tmp_dma_addr;
- u32 eqconsindx_val = 0;
- u32 eqcuridx_val = 0;
- u32 eqshift_val = 0;
- int num_bas = 0;
- int ret;
- int i;
-
- num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) +
- HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
-
- if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) {
- dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n",
- (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE,
- num_bas);
- return -EINVAL;
- }
-
- eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL);
- if (!eq->buf_list)
- return -ENOMEM;
-
- for (i = 0; i < num_bas; ++i) {
- eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE,
- &tmp_dma_addr,
- GFP_KERNEL);
- if (!eq->buf_list[i].buf) {
- ret = -ENOMEM;
- goto err_out_free_pages;
- }
-
- eq->buf_list[i].map = tmp_dma_addr;
- memset(eq->buf_list[i].buf, 0, HNS_ROCE_BA_SIZE);
- }
- eq->cons_index = 0;
- roce_set_field(eqshift_val,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
- HNS_ROCE_EQ_STAT_INVALID);
- roce_set_field(eqshift_val,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S,
- eq->log_entries);
- writel(eqshift_val, eqc);
-
- /* Configure eq extended address 12~44bit */
- writel((u32)(eq->buf_list[0].map >> 12), eqc + 4);
-
- /*
- * Configure eq extended address 45~49 bit.
- * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
- * using 4K page, and shift more 32 because of
- * caculating the high 32 bit value evaluated to hardware.
- */
- roce_set_field(eqcuridx_val, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
- ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
- eq->buf_list[0].map >> 44);
- roce_set_field(eqcuridx_val,
- ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
- ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
- writel(eqcuridx_val, eqc + 8);
-
- /* Configure eq consumer index */
- roce_set_field(eqconsindx_val,
- ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
- ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
- writel(eqconsindx_val, eqc + 0xc);
-
- return 0;
-
-err_out_free_pages:
- for (i = i - 1; i >= 0; i--)
- dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf,
- eq->buf_list[i].map);
-
- kfree(eq->buf_list);
- return ret;
-}
-
-static void hns_roce_free_eq(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
-{
- int i = 0;
- int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) +
- HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
-
- if (!eq->buf_list)
- return;
-
- for (i = 0; i < npages; ++i)
- dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE,
- eq->buf_list[i].buf, eq->buf_list[i].map);
-
- kfree(eq->buf_list);
-}
-
-static void hns_roce_int_mask_en(struct hns_roce_dev *hr_dev)
-{
- int i = 0;
- u32 aemask_val;
- int masken = 0;
-
- /* AEQ INT */
- aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
- roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
- masken);
- roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
- roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val);
-
- /* CEQ INT */
- for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
- /* IRQ mask */
- roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET, masken);
- }
-}
-
-static void hns_roce_ce_int_default_cfg(struct hns_roce_dev *hr_dev)
-{
- /* Configure ce int interval */
- roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG,
- HNS_ROCE_CEQ_DEFAULT_INTERVAL);
-
- /* Configure ce int burst num */
- roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG,
- HNS_ROCE_CEQ_DEFAULT_BURST_NUM);
-}
-
-int hns_roce_init_eq_table(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_eq *eq = NULL;
- int eq_num = 0;
- int ret = 0;
- int i = 0;
- int j = 0;
-
- eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
- eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
- if (!eq_table->eq)
- return -ENOMEM;
-
- eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base),
- GFP_KERNEL);
- if (!eq_table->eqc_base) {
- ret = -ENOMEM;
- goto err_eqc_base_alloc_fail;
- }
-
- for (i = 0; i < eq_num; i++) {
- eq = &eq_table->eq[i];
- eq->hr_dev = hr_dev;
- eq->eqn = i;
- eq->irq = hr_dev->irq[i];
- eq->log_page_size = PAGE_SHIFT;
-
- if (i < hr_dev->caps.num_comp_vectors) {
- /* CEQ */
- eq_table->eqc_base[i] = hr_dev->reg_base +
- ROCEE_CAEP_CEQC_SHIFT_0_REG +
- HNS_ROCE_CEQC_REG_OFFSET * i;
- eq->type_flag = HNS_ROCE_CEQ;
- eq->doorbell = hr_dev->reg_base +
- ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
- HNS_ROCE_CEQC_REG_OFFSET * i;
- eq->entries = hr_dev->caps.ceqe_depth[i];
- eq->log_entries = ilog2(eq->entries);
- eq->eqe_size = sizeof(struct hns_roce_ceqe);
- } else {
- /* AEQ */
- eq_table->eqc_base[i] = hr_dev->reg_base +
- ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
- eq->type_flag = HNS_ROCE_AEQ;
- eq->doorbell = hr_dev->reg_base +
- ROCEE_CAEP_AEQE_CONS_IDX_REG;
- eq->entries = hr_dev->caps.aeqe_depth;
- eq->log_entries = ilog2(eq->entries);
- eq->eqe_size = sizeof(struct hns_roce_aeqe);
- }
- }
-
- /* Disable irq */
- hns_roce_int_mask_en(hr_dev);
-
- /* Configure CE irq interval and burst num */
- hns_roce_ce_int_default_cfg(hr_dev);
-
- for (i = 0; i < eq_num; i++) {
- ret = hns_roce_create_eq(hr_dev, &eq_table->eq[i]);
- if (ret) {
- dev_err(dev, "eq create failed\n");
- goto err_create_eq_fail;
- }
- }
-
- for (j = 0; j < eq_num; j++) {
- ret = request_irq(eq_table->eq[j].irq, hns_roce_msi_x_interrupt,
- 0, hr_dev->irq_names[j], eq_table->eq + j);
- if (ret) {
- dev_err(dev, "request irq error!\n");
- goto err_request_irq_fail;
- }
- }
-
- for (i = 0; i < eq_num; i++)
- hns_roce_enable_eq(hr_dev, i, EQ_ENABLE);
-
- return 0;
-
-err_request_irq_fail:
- for (j = j - 1; j >= 0; j--)
- free_irq(eq_table->eq[j].irq, eq_table->eq + j);
-
-err_create_eq_fail:
- for (i = i - 1; i >= 0; i--)
- hns_roce_free_eq(hr_dev, &eq_table->eq[i]);
-
- kfree(eq_table->eqc_base);
-
-err_eqc_base_alloc_fail:
- kfree(eq_table->eq);
-
- return ret;
-}
-
-void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev)
-{
- int i;
- int eq_num;
- struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
-
- eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
- for (i = 0; i < eq_num; i++) {
- /* Disable EQ */
- hns_roce_enable_eq(hr_dev, i, EQ_DISABLE);
-
- free_irq(eq_table->eq[i].irq, eq_table->eq + i);
-
- hns_roce_free_eq(hr_dev, &eq_table->eq[i]);
- }
-
- kfree(eq_table->eqc_base);
- kfree(eq_table->eq);
-}
diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.h b/drivers/infiniband/hw/hns/hns_roce_eq.h
deleted file mode 100644
index c6d212d12e03..000000000000
--- a/drivers/infiniband/hw/hns/hns_roce_eq.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Copyright (c) 2016 Hisilicon Limited.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _HNS_ROCE_EQ_H
-#define _HNS_ROCE_EQ_H
-
-#define HNS_ROCE_CEQ 1
-#define HNS_ROCE_AEQ 2
-
-#define HNS_ROCE_CEQ_ENTRY_SIZE 0x4
-#define HNS_ROCE_AEQ_ENTRY_SIZE 0x10
-#define HNS_ROCE_CEQC_REG_OFFSET 0x18
-
-#define HNS_ROCE_CEQ_DEFAULT_INTERVAL 0x10
-#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x10
-
-#define HNS_ROCE_INT_MASK_DISABLE 0
-#define HNS_ROCE_INT_MASK_ENABLE 1
-
-#define EQ_ENABLE 1
-#define EQ_DISABLE 0
-#define CONS_INDEX_MASK 0xffff
-
-#define CEQ_REG_OFFSET 0x18
-
-enum {
- HNS_ROCE_EQ_STAT_INVALID = 0,
- HNS_ROCE_EQ_STAT_VALID = 2,
-};
-
-struct hns_roce_aeqe {
- u32 asyn;
- union {
- struct {
- u32 qp;
- u32 rsv0;
- u32 rsv1;
- } qp_event;
-
- struct {
- u32 cq;
- u32 rsv0;
- u32 rsv1;
- } cq_event;
-
- struct {
- u32 port;
- u32 rsv0;
- u32 rsv1;
- } port_event;
-
- struct {
- u32 ceqe;
- u32 rsv0;
- u32 rsv1;
- } ce_event;
-
- struct {
- __le64 out_param;
- __le16 token;
- u8 status;
- u8 rsv0;
- } __packed cmd;
- } event;
-};
-
-#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S 16
-#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M \
- (((1UL << 8) - 1) << HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)
-
-#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S 24
-#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M \
- (((1UL << 7) - 1) << HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)
-
-#define HNS_ROCE_AEQE_U32_4_OWNER_S 31
-
-#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S 0
-#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M \
- (((1UL << 24) - 1) << HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S)
-
-#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S 25
-#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M \
- (((1UL << 3) - 1) << HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S)
-
-#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S 0
-#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M \
- (((1UL << 16) - 1) << HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)
-
-#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S 0
-#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M \
- (((1UL << 5) - 1) << HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S)
-
-struct hns_roce_ceqe {
- union {
- int comp;
- } ceqe;
-};
-
-#define HNS_ROCE_CEQE_CEQE_COMP_OWNER_S 0
-
-#define HNS_ROCE_CEQE_CEQE_COMP_CQN_S 16
-#define HNS_ROCE_CEQE_CEQE_COMP_CQN_M \
- (((1UL << 16) - 1) << HNS_ROCE_CEQE_CEQE_COMP_CQN_S)
-
-#endif /* _HNS_ROCE_EQ_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index af27168faf0f..21ca9fa7c9d1 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -33,6 +33,7 @@
#include <linux/platform_device.h>
#include <linux/acpi.h>
#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <rdma/ib_umem.h>
@@ -774,7 +775,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
goto create_lp_qp_failed;
}
- ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
+ ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, IB_QP_DEST_QPN,
IB_QPS_INIT, IB_QPS_RTR);
if (ret) {
dev_err(dev, "modify qp failed(%d)!\n", ret);
@@ -1492,9 +1493,9 @@ static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
caps->max_sq_inline = HNS_ROCE_V1_INLINE_SIZE;
caps->num_uars = HNS_ROCE_V1_UAR_NUM;
caps->phy_num_uars = HNS_ROCE_V1_PHY_UAR_NUM;
- caps->num_aeq_vectors = HNS_ROCE_AEQE_VEC_NUM;
- caps->num_comp_vectors = HNS_ROCE_COMP_VEC_NUM;
- caps->num_other_vectors = HNS_ROCE_AEQE_OF_VEC_NUM;
+ caps->num_aeq_vectors = HNS_ROCE_V1_AEQE_VEC_NUM;
+ caps->num_comp_vectors = HNS_ROCE_V1_COMP_VEC_NUM;
+ caps->num_other_vectors = HNS_ROCE_V1_ABNORMAL_VEC_NUM;
caps->num_mtpts = HNS_ROCE_V1_MAX_MTPT_NUM;
caps->num_mtt_segs = HNS_ROCE_V1_MAX_MTT_SEGS;
caps->num_pds = HNS_ROCE_V1_MAX_PD_NUM;
@@ -1529,10 +1530,8 @@ static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
caps->num_ports + 1;
}
- for (i = 0; i < caps->num_comp_vectors; i++)
- caps->ceqe_depth[i] = HNS_ROCE_V1_NUM_COMP_EQE;
-
- caps->aeqe_depth = HNS_ROCE_V1_NUM_ASYNC_EQE;
+ caps->ceqe_depth = HNS_ROCE_V1_COMP_EQE_NUM;
+ caps->aeqe_depth = HNS_ROCE_V1_ASYNC_EQE_NUM;
caps->local_ca_ack_delay = le32_to_cpu(roce_read(hr_dev,
ROCEE_ACK_DELAY_REG));
caps->max_mtu = IB_MTU_2048;
@@ -2312,15 +2311,16 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
case HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE:
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = le32_to_cpu(cqe->immediate_data);
+ wc->ex.imm_data =
+ cpu_to_be32(le32_to_cpu(cqe->immediate_data));
break;
case HNS_ROCE_OPCODE_SEND_DATA_RECEIVE:
if (roce_get_bit(cqe->cqe_byte_4,
CQE_BYTE_4_IMM_INDICATOR_S)) {
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = le32_to_cpu(
- cqe->immediate_data);
+ wc->ex.imm_data = cpu_to_be32(
+ le32_to_cpu(cqe->immediate_data));
} else {
wc->opcode = IB_WC_RECV;
wc->wc_flags = 0;
@@ -3960,6 +3960,732 @@ static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq)
return ret;
}
+static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not)
+{
+ roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) |
+ (req_not << eq->log_entries), eq->doorbell);
+}
+
+static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe, int qpn)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+
+ dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
+ switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
+ case HNS_ROCE_LWQCE_QPC_ERROR:
+ dev_warn(dev, "QP %d, QPC error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_MTU_ERROR:
+ dev_warn(dev, "QP %d, MTU error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
+ dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
+ dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
+ dev_warn(dev, "QP %d, WQE shift error\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_SL_ERROR:
+ dev_warn(dev, "QP %d, SL error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_PORT_ERROR:
+ dev_warn(dev, "QP %d, port error.\n", qpn);
+ break;
+ default:
+ break;
+ }
+}
+
+static void hns_roce_v1_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ int qpn)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+
+ dev_warn(dev, "Local Access Violation Work Queue Error.\n");
+ switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
+ case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
+ dev_warn(dev, "QP %d, R_key violation.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_LENGTH_ERROR:
+ dev_warn(dev, "QP %d, length error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_VA_ERROR:
+ dev_warn(dev, "QP %d, VA error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_PD_ERROR:
+ dev_err(dev, "QP %d, PD error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
+ dev_warn(dev, "QP %d, rw acc error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
+ dev_warn(dev, "QP %d, key state error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
+ dev_warn(dev, "QP %d, MR operation error.\n", qpn);
+ break;
+ default:
+ break;
+ }
+}
+
+static void hns_roce_v1_qp_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ int event_type)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ int phy_port;
+ int qpn;
+
+ qpn = roce_get_field(aeqe->event.qp_event.qp,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
+ phy_port = roce_get_field(aeqe->event.qp_event.qp,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
+ if (qpn <= 1)
+ qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
+
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
+ "QP %d, phy_port %d.\n", qpn, phy_port);
+ break;
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ hns_roce_v1_wq_catas_err_handle(hr_dev, aeqe, qpn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ hns_roce_v1_local_wq_access_err_handle(hr_dev, aeqe, qpn);
+ break;
+ default:
+ break;
+ }
+
+ hns_roce_qp_event(hr_dev, qpn, event_type);
+}
+
+static void hns_roce_v1_cq_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ int event_type)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ u32 cqn;
+
+ cqn = le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
+ HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
+ HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
+
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+ dev_warn(dev, "CQ 0x%x access err.\n", cqn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+ dev_warn(dev, "CQ 0x%x overflow\n", cqn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
+ dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
+ break;
+ default:
+ break;
+ }
+
+ hns_roce_cq_event(hr_dev, cqn, event_type);
+}
+
+static void hns_roce_v1_db_overflow_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+
+ switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
+ case HNS_ROCE_DB_SUBTYPE_SDB_OVF:
+ dev_warn(dev, "SDB overflow.\n");
+ break;
+ case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF:
+ dev_warn(dev, "SDB almost overflow.\n");
+ break;
+ case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP:
+ dev_warn(dev, "SDB almost empty.\n");
+ break;
+ case HNS_ROCE_DB_SUBTYPE_ODB_OVF:
+ dev_warn(dev, "ODB overflow.\n");
+ break;
+ case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF:
+ dev_warn(dev, "ODB almost overflow.\n");
+ break;
+ case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP:
+ dev_warn(dev, "SDB almost empty.\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static struct hns_roce_aeqe *get_aeqe_v1(struct hns_roce_eq *eq, u32 entry)
+{
+ unsigned long off = (entry & (eq->entries - 1)) *
+ HNS_ROCE_AEQ_ENTRY_SIZE;
+
+ return (struct hns_roce_aeqe *)((u8 *)
+ (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
+ off % HNS_ROCE_BA_SIZE);
+}
+
+static struct hns_roce_aeqe *next_aeqe_sw_v1(struct hns_roce_eq *eq)
+{
+ struct hns_roce_aeqe *aeqe = get_aeqe_v1(eq, eq->cons_index);
+
+ return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^
+ !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
+}
+
+static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_aeqe *aeqe;
+ int aeqes_found = 0;
+ int event_type;
+
+ while ((aeqe = next_aeqe_sw_v1(eq))) {
+
+ /* Make sure we read the AEQ entry after we have checked the
+ * ownership bit
+ */
+ dma_rmb();
+
+ dev_dbg(dev, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe,
+ roce_get_field(aeqe->asyn,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
+ event_type = roce_get_field(aeqe->asyn,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG:
+ dev_warn(dev, "PATH MIG not supported\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_COMM_EST:
+ dev_warn(dev, "COMMUNICATION established\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
+ dev_warn(dev, "SQ DRAINED not supported\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
+ dev_warn(dev, "PATH MIG failed\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ hns_roce_v1_qp_err_handle(hr_dev, aeqe, event_type);
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
+ case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
+ dev_warn(dev, "SRQ not support!\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+ case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
+ hns_roce_v1_cq_err_handle(hr_dev, aeqe, event_type);
+ break;
+ case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
+ dev_warn(dev, "port change.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_MB:
+ hns_roce_cmd_event(hr_dev,
+ le16_to_cpu(aeqe->event.cmd.token),
+ aeqe->event.cmd.status,
+ le64_to_cpu(aeqe->event.cmd.out_param
+ ));
+ break;
+ case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
+ hns_roce_v1_db_overflow_handle(hr_dev, aeqe);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
+ dev_warn(dev, "CEQ 0x%lx overflow.\n",
+ roce_get_field(aeqe->event.ce_event.ceqe,
+ HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M,
+ HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
+ break;
+ default:
+ dev_warn(dev, "Unhandled event %d on EQ %d at idx %u.\n",
+ event_type, eq->eqn, eq->cons_index);
+ break;
+ }
+
+ eq->cons_index++;
+ aeqes_found = 1;
+
+ if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) {
+ dev_warn(dev, "cons_index overflow, set back to 0.\n");
+ eq->cons_index = 0;
+ }
+ }
+
+ set_eq_cons_index_v1(eq, 0);
+
+ return aeqes_found;
+}
+
+static struct hns_roce_ceqe *get_ceqe_v1(struct hns_roce_eq *eq, u32 entry)
+{
+ unsigned long off = (entry & (eq->entries - 1)) *
+ HNS_ROCE_CEQ_ENTRY_SIZE;
+
+ return (struct hns_roce_ceqe *)((u8 *)
+ (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
+ off % HNS_ROCE_BA_SIZE);
+}
+
+static struct hns_roce_ceqe *next_ceqe_sw_v1(struct hns_roce_eq *eq)
+{
+ struct hns_roce_ceqe *ceqe = get_ceqe_v1(eq, eq->cons_index);
+
+ return (!!(roce_get_bit(ceqe->comp,
+ HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^
+ (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
+}
+
+static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ struct hns_roce_ceqe *ceqe;
+ int ceqes_found = 0;
+ u32 cqn;
+
+ while ((ceqe = next_ceqe_sw_v1(eq))) {
+
+ /* Make sure we read CEQ entry after we have checked the
+ * ownership bit
+ */
+ dma_rmb();
+
+ cqn = roce_get_field(ceqe->comp,
+ HNS_ROCE_CEQE_CEQE_COMP_CQN_M,
+ HNS_ROCE_CEQE_CEQE_COMP_CQN_S);
+ hns_roce_cq_completion(hr_dev, cqn);
+
+ ++eq->cons_index;
+ ceqes_found = 1;
+
+ if (eq->cons_index > 2 * hr_dev->caps.ceqe_depth - 1) {
+ dev_warn(&eq->hr_dev->pdev->dev,
+ "cons_index overflow, set back to 0.\n");
+ eq->cons_index = 0;
+ }
+ }
+
+ set_eq_cons_index_v1(eq, 0);
+
+ return ceqes_found;
+}
+
+static irqreturn_t hns_roce_v1_msix_interrupt_eq(int irq, void *eq_ptr)
+{
+ struct hns_roce_eq *eq = eq_ptr;
+ struct hns_roce_dev *hr_dev = eq->hr_dev;
+ int int_work = 0;
+
+ if (eq->type_flag == HNS_ROCE_CEQ)
+ /* CEQ irq routine, CEQ is pulse irq, not clear */
+ int_work = hns_roce_v1_ceq_int(hr_dev, eq);
+ else
+ /* AEQ irq routine, AEQ is pulse irq, not clear */
+ int_work = hns_roce_v1_aeq_int(hr_dev, eq);
+
+ return IRQ_RETVAL(int_work);
+}
+
+static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
+{
+ struct hns_roce_dev *hr_dev = dev_id;
+ struct device *dev = &hr_dev->pdev->dev;
+ int int_work = 0;
+ u32 caepaemask_val;
+ u32 cealmovf_val;
+ u32 caepaest_val;
+ u32 aeshift_val;
+ u32 ceshift_val;
+ u32 cemask_val;
+ int i;
+
+ /*
+ * Abnormal interrupt:
+ * AEQ overflow, ECC multi-bit err, CEQ overflow must clear
+ * interrupt, mask irq, clear irq, cancel mask operation
+ */
+ aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
+
+ /* AEQE overflow */
+ if (roce_get_bit(aeshift_val,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) {
+ dev_warn(dev, "AEQ overflow!\n");
+
+ /* Set mask */
+ caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
+ roce_set_bit(caepaemask_val,
+ ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
+ HNS_ROCE_INT_MASK_ENABLE);
+ roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
+
+ /* Clear int state(INT_WC : write 1 clear) */
+ caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG);
+ roce_set_bit(caepaest_val,
+ ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
+ roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val);
+
+ /* Clear mask */
+ caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
+ roce_set_bit(caepaemask_val,
+ ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
+ HNS_ROCE_INT_MASK_DISABLE);
+ roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
+ }
+
+ /* CEQ almost overflow */
+ for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
+ ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG +
+ i * CEQ_REG_OFFSET);
+
+ if (roce_get_bit(ceshift_val,
+ ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) {
+ dev_warn(dev, "CEQ[%d] almost overflow!\n", i);
+ int_work++;
+
+ /* Set mask */
+ cemask_val = roce_read(hr_dev,
+ ROCEE_CAEP_CE_IRQ_MASK_0_REG +
+ i * CEQ_REG_OFFSET);
+ roce_set_bit(cemask_val,
+ ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
+ HNS_ROCE_INT_MASK_ENABLE);
+ roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
+ i * CEQ_REG_OFFSET, cemask_val);
+
+ /* Clear int state(INT_WC : write 1 clear) */
+ cealmovf_val = roce_read(hr_dev,
+ ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
+ i * CEQ_REG_OFFSET);
+ roce_set_bit(cealmovf_val,
+ ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S,
+ 1);
+ roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
+ i * CEQ_REG_OFFSET, cealmovf_val);
+
+ /* Clear mask */
+ cemask_val = roce_read(hr_dev,
+ ROCEE_CAEP_CE_IRQ_MASK_0_REG +
+ i * CEQ_REG_OFFSET);
+ roce_set_bit(cemask_val,
+ ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
+ HNS_ROCE_INT_MASK_DISABLE);
+ roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
+ i * CEQ_REG_OFFSET, cemask_val);
+ }
+ }
+
+ /* ECC multi-bit error alarm */
+ dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n",
+ roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG),
+ roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG),
+ roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG));
+
+ dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n",
+ roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG),
+ roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG),
+ roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG));
+
+ return IRQ_RETVAL(int_work);
+}
+
+static void hns_roce_v1_int_mask_enable(struct hns_roce_dev *hr_dev)
+{
+ u32 aemask_val;
+ int masken = 0;
+ int i;
+
+ /* AEQ INT */
+ aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
+ roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
+ masken);
+ roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
+ roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val);
+
+ /* CEQ INT */
+ for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
+ /* IRQ mask */
+ roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
+ i * CEQ_REG_OFFSET, masken);
+ }
+}
+
+static void hns_roce_v1_free_eq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) +
+ HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
+ int i;
+
+ if (!eq->buf_list)
+ return;
+
+ for (i = 0; i < npages; ++i)
+ dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE,
+ eq->buf_list[i].buf, eq->buf_list[i].map);
+
+ kfree(eq->buf_list);
+}
+
+static void hns_roce_v1_enable_eq(struct hns_roce_dev *hr_dev, int eq_num,
+ int enable_flag)
+{
+ void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];
+ u32 val;
+
+ val = readl(eqc);
+
+ if (enable_flag)
+ roce_set_field(val,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
+ HNS_ROCE_EQ_STAT_VALID);
+ else
+ roce_set_field(val,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
+ HNS_ROCE_EQ_STAT_INVALID);
+ writel(val, eqc);
+}
+
+static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];
+ struct device *dev = &hr_dev->pdev->dev;
+ dma_addr_t tmp_dma_addr;
+ u32 eqconsindx_val = 0;
+ u32 eqcuridx_val = 0;
+ u32 eqshift_val = 0;
+ int num_bas;
+ int ret;
+ int i;
+
+ num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) +
+ HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
+
+ if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) {
+ dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n",
+ (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE,
+ num_bas);
+ return -EINVAL;
+ }
+
+ eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL);
+ if (!eq->buf_list)
+ return -ENOMEM;
+
+ for (i = 0; i < num_bas; ++i) {
+ eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE,
+ &tmp_dma_addr,
+ GFP_KERNEL);
+ if (!eq->buf_list[i].buf) {
+ ret = -ENOMEM;
+ goto err_out_free_pages;
+ }
+
+ eq->buf_list[i].map = tmp_dma_addr;
+ memset(eq->buf_list[i].buf, 0, HNS_ROCE_BA_SIZE);
+ }
+ eq->cons_index = 0;
+ roce_set_field(eqshift_val,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
+ HNS_ROCE_EQ_STAT_INVALID);
+ roce_set_field(eqshift_val,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S,
+ eq->log_entries);
+ writel(eqshift_val, eqc);
+
+ /* Configure eq extended address 12~44bit */
+ writel((u32)(eq->buf_list[0].map >> 12), eqc + 4);
+
+ /*
+ * Configure eq extended address 45~49 bit.
+ * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
+ * using 4K page, and shift more 32 because of
+ * caculating the high 32 bit value evaluated to hardware.
+ */
+ roce_set_field(eqcuridx_val, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
+ ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
+ eq->buf_list[0].map >> 44);
+ roce_set_field(eqcuridx_val,
+ ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
+ ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
+ writel(eqcuridx_val, eqc + 8);
+
+ /* Configure eq consumer index */
+ roce_set_field(eqconsindx_val,
+ ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
+ ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
+ writel(eqconsindx_val, eqc + 0xc);
+
+ return 0;
+
+err_out_free_pages:
+ for (i -= 1; i >= 0; i--)
+ dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf,
+ eq->buf_list[i].map);
+
+ kfree(eq->buf_list);
+ return ret;
+}
+
+static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_eq *eq;
+ int irq_num;
+ int eq_num;
+ int ret;
+ int i, j;
+
+ eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
+ irq_num = eq_num + hr_dev->caps.num_other_vectors;
+
+ eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
+ if (!eq_table->eq)
+ return -ENOMEM;
+
+ eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base),
+ GFP_KERNEL);
+ if (!eq_table->eqc_base) {
+ ret = -ENOMEM;
+ goto err_eqc_base_alloc_fail;
+ }
+
+ for (i = 0; i < eq_num; i++) {
+ eq = &eq_table->eq[i];
+ eq->hr_dev = hr_dev;
+ eq->eqn = i;
+ eq->irq = hr_dev->irq[i];
+ eq->log_page_size = PAGE_SHIFT;
+
+ if (i < hr_dev->caps.num_comp_vectors) {
+ /* CEQ */
+ eq_table->eqc_base[i] = hr_dev->reg_base +
+ ROCEE_CAEP_CEQC_SHIFT_0_REG +
+ CEQ_REG_OFFSET * i;
+ eq->type_flag = HNS_ROCE_CEQ;
+ eq->doorbell = hr_dev->reg_base +
+ ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
+ CEQ_REG_OFFSET * i;
+ eq->entries = hr_dev->caps.ceqe_depth;
+ eq->log_entries = ilog2(eq->entries);
+ eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
+ } else {
+ /* AEQ */
+ eq_table->eqc_base[i] = hr_dev->reg_base +
+ ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
+ eq->type_flag = HNS_ROCE_AEQ;
+ eq->doorbell = hr_dev->reg_base +
+ ROCEE_CAEP_AEQE_CONS_IDX_REG;
+ eq->entries = hr_dev->caps.aeqe_depth;
+ eq->log_entries = ilog2(eq->entries);
+ eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
+ }
+ }
+
+ /* Disable irq */
+ hns_roce_v1_int_mask_enable(hr_dev);
+
+ /* Configure ce int interval */
+ roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG,
+ HNS_ROCE_CEQ_DEFAULT_INTERVAL);
+
+ /* Configure ce int burst num */
+ roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG,
+ HNS_ROCE_CEQ_DEFAULT_BURST_NUM);
+
+ for (i = 0; i < eq_num; i++) {
+ ret = hns_roce_v1_create_eq(hr_dev, &eq_table->eq[i]);
+ if (ret) {
+ dev_err(dev, "eq create failed\n");
+ goto err_create_eq_fail;
+ }
+ }
+
+ for (j = 0; j < irq_num; j++) {
+ if (j < eq_num)
+ ret = request_irq(hr_dev->irq[j],
+ hns_roce_v1_msix_interrupt_eq, 0,
+ hr_dev->irq_names[j],
+ &eq_table->eq[j]);
+ else
+ ret = request_irq(hr_dev->irq[j],
+ hns_roce_v1_msix_interrupt_abn, 0,
+ hr_dev->irq_names[j], hr_dev);
+
+ if (ret) {
+ dev_err(dev, "request irq error!\n");
+ goto err_request_irq_fail;
+ }
+ }
+
+ for (i = 0; i < eq_num; i++)
+ hns_roce_v1_enable_eq(hr_dev, i, EQ_ENABLE);
+
+ return 0;
+
+err_request_irq_fail:
+ for (j -= 1; j >= 0; j--)
+ free_irq(hr_dev->irq[j], &eq_table->eq[j]);
+
+err_create_eq_fail:
+ for (i -= 1; i >= 0; i--)
+ hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
+
+ kfree(eq_table->eqc_base);
+
+err_eqc_base_alloc_fail:
+ kfree(eq_table->eq);
+
+ return ret;
+}
+
+static void hns_roce_v1_cleanup_eq_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
+ int irq_num;
+ int eq_num;
+ int i;
+
+ eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
+ irq_num = eq_num + hr_dev->caps.num_other_vectors;
+ for (i = 0; i < eq_num; i++) {
+ /* Disable EQ */
+ hns_roce_v1_enable_eq(hr_dev, i, EQ_DISABLE);
+
+ free_irq(hr_dev->irq[i], &eq_table->eq[i]);
+
+ hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
+ }
+ for (i = eq_num; i < irq_num; i++)
+ free_irq(hr_dev->irq[i], hr_dev);
+
+ kfree(eq_table->eqc_base);
+ kfree(eq_table->eq);
+}
+
static const struct hns_roce_hw hns_roce_hw_v1 = {
.reset = hns_roce_v1_reset,
.hw_profile = hns_roce_v1_profile,
@@ -3983,6 +4709,8 @@ static const struct hns_roce_hw hns_roce_hw_v1 = {
.poll_cq = hns_roce_v1_poll_cq,
.dereg_mr = hns_roce_v1_dereg_mr,
.destroy_cq = hns_roce_v1_destroy_cq,
+ .init_eq = hns_roce_v1_init_eq_table,
+ .cleanup_eq = hns_roce_v1_cleanup_eq_table,
};
static const struct of_device_id hns_roce_of_match[] = {
@@ -4060,10 +4788,6 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
/* get the mapped register base address */
res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "memory resource not found!\n");
- return -EINVAL;
- }
hr_dev->reg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(hr_dev->reg_base))
return PTR_ERR(hr_dev->reg_base);
@@ -4132,14 +4856,14 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
/* read the interrupt names from the DT or ACPI */
ret = device_property_read_string_array(dev, "interrupt-names",
hr_dev->irq_names,
- HNS_ROCE_MAX_IRQ_NUM);
+ HNS_ROCE_V1_MAX_IRQ_NUM);
if (ret < 0) {
dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
return ret;
}
/* fetch the interrupt numbers */
- for (i = 0; i < HNS_ROCE_MAX_IRQ_NUM; i++) {
+ for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) {
hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
if (hr_dev->irq[i] <= 0) {
dev_err(dev, "platform get of irq[=%d] failed!\n", i);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
index 21a07ef0afc9..b44ddd239060 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
@@ -60,8 +60,13 @@
#define HNS_ROCE_V1_GID_NUM 16
#define HNS_ROCE_V1_RESV_QP 8
-#define HNS_ROCE_V1_NUM_COMP_EQE 0x8000
-#define HNS_ROCE_V1_NUM_ASYNC_EQE 0x400
+#define HNS_ROCE_V1_MAX_IRQ_NUM 34
+#define HNS_ROCE_V1_COMP_VEC_NUM 32
+#define HNS_ROCE_V1_AEQE_VEC_NUM 1
+#define HNS_ROCE_V1_ABNORMAL_VEC_NUM 1
+
+#define HNS_ROCE_V1_COMP_EQE_NUM 0x8000
+#define HNS_ROCE_V1_ASYNC_EQE_NUM 0x400
#define HNS_ROCE_V1_QPC_ENTRY_SIZE 256
#define HNS_ROCE_V1_IRRL_ENTRY_SIZE 8
@@ -159,6 +164,41 @@
#define SDB_INV_CNT_OFFSET 8
#define SDB_ST_CMP_VAL 8
+#define HNS_ROCE_CEQ_DEFAULT_INTERVAL 0x10
+#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x10
+
+#define HNS_ROCE_INT_MASK_DISABLE 0
+#define HNS_ROCE_INT_MASK_ENABLE 1
+
+#define CEQ_REG_OFFSET 0x18
+
+#define HNS_ROCE_CEQE_CEQE_COMP_OWNER_S 0
+
+#define HNS_ROCE_V1_CONS_IDX_M GENMASK(15, 0)
+
+#define HNS_ROCE_CEQE_CEQE_COMP_CQN_S 16
+#define HNS_ROCE_CEQE_CEQE_COMP_CQN_M GENMASK(31, 16)
+
+#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S 16
+#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M GENMASK(23, 16)
+
+#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S 24
+#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M GENMASK(30, 24)
+
+#define HNS_ROCE_AEQE_U32_4_OWNER_S 31
+
+#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S 0
+#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M GENMASK(23, 0)
+
+#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S 25
+#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M GENMASK(27, 25)
+
+#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S 0
+#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M GENMASK(15, 0)
+
+#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S 0
+#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M GENMASK(4, 0)
+
struct hns_roce_cq_context {
u32 cqc_byte_4;
u32 cq_bt_l;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 8e18445714a9..256fe110107a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -34,6 +34,7 @@
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <net/addrconf.h>
#include <rdma/ib_umem.h>
#include "hnae3.h"
@@ -51,32 +52,106 @@ static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
dseg->len = cpu_to_le32(sg->length);
}
+static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
+ void *wqe, unsigned int *sge_ind,
+ struct ib_send_wr **bad_wr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_v2_wqe_data_seg *dseg = wqe;
+ struct hns_roce_qp *qp = to_hr_qp(ibqp);
+ int i;
+
+ if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
+ if (rc_sq_wqe->msg_len > hr_dev->caps.max_sq_inline) {
+ *bad_wr = wr;
+ dev_err(hr_dev->dev, "inline len(1-%d)=%d, illegal",
+ rc_sq_wqe->msg_len, hr_dev->caps.max_sq_inline);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < wr->num_sge; i++) {
+ memcpy(wqe, ((void *)wr->sg_list[i].addr),
+ wr->sg_list[i].length);
+ wqe += wr->sg_list[i].length;
+ }
+
+ roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
+ 1);
+ } else {
+ if (wr->num_sge <= 2) {
+ for (i = 0; i < wr->num_sge; i++) {
+ if (likely(wr->sg_list[i].length)) {
+ set_data_seg_v2(dseg, wr->sg_list + i);
+ dseg++;
+ }
+ }
+ } else {
+ roce_set_field(rc_sq_wqe->byte_20,
+ V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
+ V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
+ (*sge_ind) & (qp->sge.sge_cnt - 1));
+
+ for (i = 0; i < 2; i++) {
+ if (likely(wr->sg_list[i].length)) {
+ set_data_seg_v2(dseg, wr->sg_list + i);
+ dseg++;
+ }
+ }
+
+ dseg = get_send_extend_sge(qp,
+ (*sge_ind) & (qp->sge.sge_cnt - 1));
+
+ for (i = 0; i < wr->num_sge - 2; i++) {
+ if (likely(wr->sg_list[i + 2].length)) {
+ set_data_seg_v2(dseg,
+ wr->sg_list + 2 + i);
+ dseg++;
+ (*sge_ind)++;
+ }
+ }
+ }
+
+ roce_set_field(rc_sq_wqe->byte_16,
+ V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
+ V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, wr->num_sge);
+ }
+
+ return 0;
+}
+
static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
+ struct hns_roce_v2_ud_send_wqe *ud_sq_wqe;
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
struct hns_roce_qp *qp = to_hr_qp(ibqp);
struct hns_roce_v2_wqe_data_seg *dseg;
struct device *dev = hr_dev->dev;
struct hns_roce_v2_db sq_db;
unsigned int sge_ind = 0;
- unsigned int wqe_sz = 0;
unsigned int owner_bit;
unsigned long flags;
unsigned int ind;
void *wqe = NULL;
+ bool loopback;
int ret = 0;
+ u8 *smac;
int nreq;
int i;
- if (unlikely(ibqp->qp_type != IB_QPT_RC)) {
+ if (unlikely(ibqp->qp_type != IB_QPT_RC &&
+ ibqp->qp_type != IB_QPT_GSI &&
+ ibqp->qp_type != IB_QPT_UD)) {
dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
*bad_wr = NULL;
return -EOPNOTSUPP;
}
- if (unlikely(qp->state != IB_QPS_RTS && qp->state != IB_QPS_SQD)) {
+ if (unlikely(qp->state == IB_QPS_RESET || qp->state == IB_QPS_INIT ||
+ qp->state == IB_QPS_RTR)) {
dev_err(dev, "Post WQE fail, QP state %d err!\n", qp->state);
*bad_wr = wr;
return -EINVAL;
@@ -106,161 +181,255 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
wr->wr_id;
owner_bit = ~(qp->sq.head >> ilog2(qp->sq.wqe_cnt)) & 0x1;
- rc_sq_wqe = wqe;
- memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
- for (i = 0; i < wr->num_sge; i++)
- rc_sq_wqe->msg_len += wr->sg_list[i].length;
- rc_sq_wqe->inv_key_immtdata = send_ieth(wr);
+ /* Corresponding to the QP type, wqe process separately */
+ if (ibqp->qp_type == IB_QPT_GSI) {
+ ud_sq_wqe = wqe;
+ memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
+
+ roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
+ V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
+ roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
+ V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]);
+ roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M,
+ V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]);
+ roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M,
+ V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]);
+ roce_set_field(ud_sq_wqe->byte_48,
+ V2_UD_SEND_WQE_BYTE_48_DMAC_4_M,
+ V2_UD_SEND_WQE_BYTE_48_DMAC_4_S,
+ ah->av.mac[4]);
+ roce_set_field(ud_sq_wqe->byte_48,
+ V2_UD_SEND_WQE_BYTE_48_DMAC_5_M,
+ V2_UD_SEND_WQE_BYTE_48_DMAC_5_S,
+ ah->av.mac[5]);
+
+ /* MAC loopback */
+ smac = (u8 *)hr_dev->dev_addr[qp->port];
+ loopback = ether_addr_equal_unaligned(ah->av.mac,
+ smac) ? 1 : 0;
+
+ roce_set_bit(ud_sq_wqe->byte_40,
+ V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback);
+
+ roce_set_field(ud_sq_wqe->byte_4,
+ V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_SEND);
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FENCE_S,
- (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
+ for (i = 0; i < wr->num_sge; i++)
+ ud_sq_wqe->msg_len += wr->sg_list[i].length;
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SE_S,
- (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
+ ud_sq_wqe->immtdata = send_ieth(wr);
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_CQE_S,
- (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
+ /* Set sig attr */
+ roce_set_bit(ud_sq_wqe->byte_4,
+ V2_UD_SEND_WQE_BYTE_4_CQE_S,
+ (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
- roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S,
- owner_bit);
+ /* Set se attr */
+ roce_set_bit(ud_sq_wqe->byte_4,
+ V2_UD_SEND_WQE_BYTE_4_SE_S,
+ (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
- switch (wr->opcode) {
- case IB_WR_RDMA_READ:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_RDMA_READ);
- rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
- rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
- break;
- case IB_WR_RDMA_WRITE:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_RDMA_WRITE);
- rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
- rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
- break;
- case IB_WR_RDMA_WRITE_WITH_IMM:
- roce_set_field(rc_sq_wqe->byte_4,
+ roce_set_bit(ud_sq_wqe->byte_4,
+ V2_UD_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
+
+ roce_set_field(ud_sq_wqe->byte_16,
+ V2_UD_SEND_WQE_BYTE_16_PD_M,
+ V2_UD_SEND_WQE_BYTE_16_PD_S,
+ to_hr_pd(ibqp->pd)->pdn);
+
+ roce_set_field(ud_sq_wqe->byte_16,
+ V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
+ V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S,
+ wr->num_sge);
+
+ roce_set_field(ud_sq_wqe->byte_20,
+ V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
+ V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
+ sge_ind & (qp->sge.sge_cnt - 1));
+
+ roce_set_field(ud_sq_wqe->byte_24,
+ V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
+ V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0);
+ ud_sq_wqe->qkey =
+ cpu_to_be32(ud_wr(wr)->remote_qkey & 0x80000000) ?
+ qp->qkey : ud_wr(wr)->remote_qkey;
+ roce_set_field(ud_sq_wqe->byte_32,
+ V2_UD_SEND_WQE_BYTE_32_DQPN_M,
+ V2_UD_SEND_WQE_BYTE_32_DQPN_S,
+ ud_wr(wr)->remote_qpn);
+
+ roce_set_field(ud_sq_wqe->byte_36,
+ V2_UD_SEND_WQE_BYTE_36_VLAN_M,
+ V2_UD_SEND_WQE_BYTE_36_VLAN_S,
+ ah->av.vlan);
+ roce_set_field(ud_sq_wqe->byte_36,
+ V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
+ V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S,
+ ah->av.hop_limit);
+ roce_set_field(ud_sq_wqe->byte_36,
+ V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
+ V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
+ 0);
+ roce_set_field(ud_sq_wqe->byte_36,
+ V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
+ V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
+ 0);
+ roce_set_field(ud_sq_wqe->byte_40,
+ V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
+ V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, 0);
+ roce_set_field(ud_sq_wqe->byte_40,
+ V2_UD_SEND_WQE_BYTE_40_SL_M,
+ V2_UD_SEND_WQE_BYTE_40_SL_S,
+ ah->av.sl_tclass_flowlabel >>
+ HNS_ROCE_SL_SHIFT);
+ roce_set_field(ud_sq_wqe->byte_40,
+ V2_UD_SEND_WQE_BYTE_40_PORTN_M,
+ V2_UD_SEND_WQE_BYTE_40_PORTN_S,
+ qp->port);
+
+ roce_set_field(ud_sq_wqe->byte_48,
+ V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
+ V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S,
+ hns_get_gid_index(hr_dev, qp->phy_port,
+ ah->av.gid_index));
+
+ memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
+ GID_LEN_V2);
+
+ dseg = get_send_extend_sge(qp,
+ sge_ind & (qp->sge.sge_cnt - 1));
+ for (i = 0; i < wr->num_sge; i++) {
+ set_data_seg_v2(dseg + i, wr->sg_list + i);
+ sge_ind++;
+ }
+
+ ind++;
+ } else if (ibqp->qp_type == IB_QPT_RC) {
+ rc_sq_wqe = wqe;
+ memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
+ for (i = 0; i < wr->num_sge; i++)
+ rc_sq_wqe->msg_len += wr->sg_list[i].length;
+
+ rc_sq_wqe->inv_key_immtdata = send_ieth(wr);
+
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_FENCE_S,
+ (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
+
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_SE_S,
+ (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
+
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_CQE_S,
+ (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
+
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
+
+ switch (wr->opcode) {
+ case IB_WR_RDMA_READ:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_RDMA_READ);
+ rc_sq_wqe->rkey =
+ cpu_to_le32(rdma_wr(wr)->rkey);
+ rc_sq_wqe->va =
+ cpu_to_le64(rdma_wr(wr)->remote_addr);
+ break;
+ case IB_WR_RDMA_WRITE:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_RDMA_WRITE);
+ rc_sq_wqe->rkey =
+ cpu_to_le32(rdma_wr(wr)->rkey);
+ rc_sq_wqe->va =
+ cpu_to_le64(rdma_wr(wr)->remote_addr);
+ break;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ roce_set_field(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM);
- rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
- rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
- break;
- case IB_WR_SEND:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_SEND);
- break;
- case IB_WR_SEND_WITH_INV:
- roce_set_field(rc_sq_wqe->byte_4,
+ rc_sq_wqe->rkey =
+ cpu_to_le32(rdma_wr(wr)->rkey);
+ rc_sq_wqe->va =
+ cpu_to_le64(rdma_wr(wr)->remote_addr);
+ break;
+ case IB_WR_SEND:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_SEND);
+ break;
+ case IB_WR_SEND_WITH_INV:
+ roce_set_field(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_SEND_WITH_INV);
- break;
- case IB_WR_SEND_WITH_IMM:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM);
- break;
- case IB_WR_LOCAL_INV:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_LOCAL_INV);
- break;
- case IB_WR_ATOMIC_CMP_AND_SWP:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP);
- break;
- case IB_WR_ATOMIC_FETCH_AND_ADD:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD);
- break;
- case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
- roce_set_field(rc_sq_wqe->byte_4,
+ break;
+ case IB_WR_SEND_WITH_IMM:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM);
+ break;
+ case IB_WR_LOCAL_INV:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_LOCAL_INV);
+ break;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP);
+ break;
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD);
+ break;
+ case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
+ roce_set_field(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP);
- break;
- case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
- roce_set_field(rc_sq_wqe->byte_4,
+ break;
+ case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
+ roce_set_field(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD);
- break;
- default:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_MASK);
- break;
- }
-
- wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
- dseg = wqe;
- if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
- if (rc_sq_wqe->msg_len >
- hr_dev->caps.max_sq_inline) {
- ret = -EINVAL;
- *bad_wr = wr;
- dev_err(dev, "inline len(1-%d)=%d, illegal",
- rc_sq_wqe->msg_len,
- hr_dev->caps.max_sq_inline);
- goto out;
+ break;
+ default:
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
+ HNS_ROCE_V2_WQE_OP_MASK);
+ break;
}
- for (i = 0; i < wr->num_sge; i++) {
- memcpy(wqe, ((void *)wr->sg_list[i].addr),
- wr->sg_list[i].length);
- wqe += wr->sg_list[i].length;
- wqe_sz += wr->sg_list[i].length;
- }
+ wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
+ dseg = wqe;
- roce_set_bit(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_INLINE_S, 1);
+ ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe, wqe,
+ &sge_ind, bad_wr);
+ if (ret)
+ goto out;
+ ind++;
} else {
- if (wr->num_sge <= 2) {
- for (i = 0; i < wr->num_sge; i++)
- set_data_seg_v2(dseg + i,
- wr->sg_list + i);
- } else {
- roce_set_field(rc_sq_wqe->byte_20,
- V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
- V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
- sge_ind & (qp->sge.sge_cnt - 1));
-
- for (i = 0; i < 2; i++)
- set_data_seg_v2(dseg + i,
- wr->sg_list + i);
-
- dseg = get_send_extend_sge(qp,
- sge_ind & (qp->sge.sge_cnt - 1));
-
- for (i = 0; i < wr->num_sge - 2; i++) {
- set_data_seg_v2(dseg + i,
- wr->sg_list + 2 + i);
- sge_ind++;
- }
- }
-
- roce_set_field(rc_sq_wqe->byte_16,
- V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
- V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
- wr->num_sge);
- wqe_sz += wr->num_sge *
- sizeof(struct hns_roce_v2_wqe_data_seg);
+ dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
+ spin_unlock_irqrestore(&qp->sq.lock, flags);
+ return -EOPNOTSUPP;
}
- ind++;
}
out:
@@ -299,6 +468,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct hns_roce_v2_wqe_data_seg *dseg;
+ struct hns_roce_rinl_sge *sge_list;
struct device *dev = hr_dev->dev;
struct hns_roce_v2_db rq_db;
unsigned long flags;
@@ -347,6 +517,14 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
dseg[i].addr = 0;
}
+ /* rq support inline data */
+ sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
+ hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt = (u32)wr->num_sge;
+ for (i = 0; i < wr->num_sge; i++) {
+ sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr;
+ sge_list[i].len = wr->sg_list[i].length;
+ }
+
hr_qp->rq.wrid[ind] = wr->wr_id;
ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
@@ -908,9 +1086,9 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
caps->num_uars = HNS_ROCE_V2_UAR_NUM;
caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
- caps->num_aeq_vectors = 1;
- caps->num_comp_vectors = 63;
- caps->num_other_vectors = 0;
+ caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM;
+ caps->num_comp_vectors = HNS_ROCE_V2_COMP_VEC_NUM;
+ caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
@@ -955,12 +1133,18 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->cqe_ba_pg_sz = 0;
caps->cqe_buf_pg_sz = 0;
caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
+ caps->eqe_ba_pg_sz = 0;
+ caps->eqe_buf_pg_sz = 0;
+ caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
- HNS_ROCE_CAP_FLAG_ROCE_V1_V2;
+ HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
+ HNS_ROCE_CAP_FLAG_RQ_INLINE;
caps->pkey_table_len[0] = 1;
caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
+ caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
+ caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM;
caps->local_ca_ack_delay = 0;
caps->max_mtu = IB_MTU_4096;
@@ -1382,6 +1566,8 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M,
V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID);
+ roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
+ V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent));
roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
@@ -1422,6 +1608,15 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
+
+ roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
+ V2_CQC_BYTE_56_CQ_MAX_CNT_M,
+ V2_CQC_BYTE_56_CQ_MAX_CNT_S,
+ HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
+ roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
+ V2_CQC_BYTE_56_CQ_PERIOD_M,
+ V2_CQC_BYTE_56_CQ_PERIOD_S,
+ HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
}
static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
@@ -1457,6 +1652,40 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
return 0;
}
+static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
+ struct hns_roce_qp **cur_qp,
+ struct ib_wc *wc)
+{
+ struct hns_roce_rinl_sge *sge_list;
+ u32 wr_num, wr_cnt, sge_num;
+ u32 sge_cnt, data_len, size;
+ void *wqe_buf;
+
+ wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
+ V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
+ wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
+
+ sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
+ sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
+ wqe_buf = get_recv_wqe(*cur_qp, wr_cnt);
+ data_len = wc->byte_len;
+
+ for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
+ size = min(sge_list[sge_cnt].len, data_len);
+ memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
+
+ data_len -= size;
+ wqe_buf += size;
+ }
+
+ if (data_len) {
+ wc->status = IB_WC_LOC_LEN_ERR;
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
struct hns_roce_qp **cur_qp, struct ib_wc *wc)
{
@@ -1469,6 +1698,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
u32 opcode;
u32 status;
int qpn;
+ int ret;
/* Find cqe according to consumer index */
cqe = next_cqe_sw_v2(hr_cq);
@@ -1636,7 +1866,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = le32_to_cpu(cqe->rkey_immtdata);
+ wc->ex.imm_data = cqe->immtdata;
break;
case HNS_ROCE_V2_OPCODE_SEND:
wc->opcode = IB_WC_RECV;
@@ -1645,18 +1875,29 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = le32_to_cpu(cqe->rkey_immtdata);
+ wc->ex.imm_data = cqe->immtdata;
break;
case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_INVALIDATE;
- wc->ex.invalidate_rkey = cqe->rkey_immtdata;
+ wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
break;
default:
wc->status = IB_WC_GENERAL_ERR;
break;
}
+ if ((wc->qp->qp_type == IB_QPT_RC ||
+ wc->qp->qp_type == IB_QPT_UC) &&
+ (opcode == HNS_ROCE_V2_OPCODE_SEND ||
+ opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
+ opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
+ (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
+ ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
+ if (ret)
+ return -EAGAIN;
+ }
+
/* Update tail pointer, record wr_id */
wq = &(*cur_qp)->rq;
wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
@@ -1670,6 +1911,21 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
wc->wc_flags |= (roce_get_bit(cqe->byte_32,
V2_CQE_BYTE_32_GRH_S) ?
IB_WC_GRH : 0);
+ wc->port_num = roce_get_field(cqe->byte_32,
+ V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S);
+ wc->pkey_index = 0;
+ memcpy(wc->smac, cqe->smac, 4);
+ wc->smac[4] = roce_get_field(cqe->byte_28,
+ V2_CQE_BYTE_28_SMAC_4_M,
+ V2_CQE_BYTE_28_SMAC_4_S);
+ wc->smac[5] = roce_get_field(cqe->byte_28,
+ V2_CQE_BYTE_28_SMAC_5_M,
+ V2_CQE_BYTE_28_SMAC_5_S);
+ wc->vlan_id = 0xffff;
+ wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
+ wc->network_hdr_type = roce_get_field(cqe->byte_28,
+ V2_CQE_BYTE_28_PORT_TYPE_M,
+ V2_CQE_BYTE_28_PORT_TYPE_S);
}
return 0;
@@ -1859,8 +2115,39 @@ static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
return ret;
}
+static void set_access_flags(struct hns_roce_qp *hr_qp,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask,
+ const struct ib_qp_attr *attr, int attr_mask)
+{
+ u8 dest_rd_atomic;
+ u32 access_flags;
+
+ dest_rd_atomic = !!(attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
+ attr->max_dest_rd_atomic : hr_qp->resp_depth;
+
+ access_flags = !!(attr_mask & IB_QP_ACCESS_FLAGS) ?
+ attr->qp_access_flags : hr_qp->atomic_rd_en;
+
+ if (!dest_rd_atomic)
+ access_flags &= IB_ACCESS_REMOTE_WRITE;
+
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
+ !!(access_flags & IB_ACCESS_REMOTE_READ));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
+
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
+ !!(access_flags & IB_ACCESS_REMOTE_WRITE));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
+
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
+ !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
+}
+
static void modify_qp_reset_to_init(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
+ int attr_mask,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
@@ -1877,9 +2164,18 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
V2_QPC_BYTE_4_TST_S, 0);
- roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
- V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
- ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+ if (ibqp->qp_type == IB_QPT_GSI)
+ roce_set_field(context->byte_4_sqpn_tst,
+ V2_QPC_BYTE_4_SGE_SHIFT_M,
+ V2_QPC_BYTE_4_SGE_SHIFT_S,
+ ilog2((unsigned int)hr_qp->sge.sge_cnt));
+ else
+ roce_set_field(context->byte_4_sqpn_tst,
+ V2_QPC_BYTE_4_SGE_SHIFT_M,
+ V2_QPC_BYTE_4_SGE_SHIFT_S,
+ hr_qp->sq.max_gs > 2 ?
+ ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
@@ -1944,18 +2240,13 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
- roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
- !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
- roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
-
- roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
- !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE));
- roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
-
- roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
- !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC));
- roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
+ if (attr_mask & IB_QP_QKEY) {
+ context->qkey_xrcd = attr->qkey;
+ qpc_mask->qkey_xrcd = 0;
+ hr_qp->qkey = attr->qkey;
+ }
+ roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 1);
roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
@@ -2176,9 +2467,17 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
V2_QPC_BYTE_4_TST_S, 0);
- roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
- V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
- ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+ if (ibqp->qp_type == IB_QPT_GSI)
+ roce_set_field(context->byte_4_sqpn_tst,
+ V2_QPC_BYTE_4_SGE_SHIFT_M,
+ V2_QPC_BYTE_4_SGE_SHIFT_S,
+ ilog2((unsigned int)hr_qp->sge.sge_cnt));
+ else
+ roce_set_field(context->byte_4_sqpn_tst,
+ V2_QPC_BYTE_4_SGE_SHIFT_M,
+ V2_QPC_BYTE_4_SGE_SHIFT_S, hr_qp->sq.max_gs > 2 ?
+ ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
+
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
@@ -2239,7 +2538,7 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
V2_QPC_BYTE_80_RX_CQN_S, 0);
roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
- V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
+ V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
V2_QPC_BYTE_252_TX_CQN_S, 0);
@@ -2255,10 +2554,10 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
}
- if (attr_mask & IB_QP_PKEY_INDEX)
- context->qkey_xrcd = attr->pkey_index;
- else
- context->qkey_xrcd = hr_qp->pkey_index;
+ if (attr_mask & IB_QP_QKEY) {
+ context->qkey_xrcd = attr->qkey;
+ qpc_mask->qkey_xrcd = 0;
+ }
roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
@@ -2354,7 +2653,8 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(context->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGE_HOP_NUM_M,
V2_QPC_BYTE_20_SGE_HOP_NUM_S,
- hr_qp->sq.max_gs > 2 ? hr_dev->caps.mtt_hop_num : 0);
+ ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
+ hr_dev->caps.mtt_hop_num : 0);
roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
V2_QPC_BYTE_20_SGE_HOP_NUM_M,
V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
@@ -2463,11 +2763,14 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
}
- roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
- V2_QPC_BYTE_140_RR_MAX_S,
- ilog2((unsigned int)attr->max_dest_rd_atomic));
- roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
- V2_QPC_BYTE_140_RR_MAX_S, 0);
+ if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
+ attr->max_dest_rd_atomic) {
+ roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
+ V2_QPC_BYTE_140_RR_MAX_S,
+ fls(attr->max_dest_rd_atomic - 1));
+ roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
+ V2_QPC_BYTE_140_RR_MAX_S, 0);
+ }
roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
@@ -2511,8 +2814,13 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
V2_QPC_BYTE_24_TC_S, 0);
- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
- V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
+ if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
+ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
+ V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
+ else
+ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
+ V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
+
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
V2_QPC_BYTE_24_MTU_S, 0);
@@ -2557,12 +2865,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
V2_QPC_BYTE_168_LP_SGEN_INI_M,
V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
- roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
- V2_QPC_BYTE_208_SR_MAX_S,
- ilog2((unsigned int)attr->max_rd_atomic));
- roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
- V2_QPC_BYTE_208_SR_MAX_S, 0);
-
roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
@@ -2625,13 +2927,14 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
- context->sq_cur_sge_blk_addr = hr_qp->sq.max_gs > 2 ?
+ context->sq_cur_sge_blk_addr =
+ ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
((u32)(mtts[hr_qp->sge.offset / page_size]
>> PAGE_ADDR_SHIFT)) : 0;
roce_set_field(context->byte_184_irrl_idx,
V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
- hr_qp->sq.max_gs > 2 ?
+ ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > 2) ?
(mtts[hr_qp->sge.offset / page_size] >>
(32 + PAGE_ADDR_SHIFT)) : 0);
qpc_mask->sq_cur_sge_blk_addr = 0;
@@ -2766,6 +3069,14 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
+ if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
+ roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
+ V2_QPC_BYTE_208_SR_MAX_S,
+ fls(attr->max_rd_atomic - 1));
+ roce_set_field(qpc_mask->byte_208_irrl,
+ V2_QPC_BYTE_208_SR_MAX_M,
+ V2_QPC_BYTE_208_SR_MAX_S, 0);
+ }
return 0;
}
@@ -2794,7 +3105,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
*/
memset(qpc_mask, 0xff, sizeof(*qpc_mask));
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
- modify_qp_reset_to_init(ibqp, attr, context, qpc_mask);
+ modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
+ qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
modify_qp_init_to_init(ibqp, attr, attr_mask, context,
qpc_mask);
@@ -2829,6 +3141,9 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
goto out;
}
+ if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
+ set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
+
/* Every status migrate must change state */
roce_set_field(context->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M,
V2_QPC_BYTE_60_QP_ST_S, new_state);
@@ -2845,6 +3160,9 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
hr_qp->state = new_state;
+ if (attr_mask & IB_QP_ACCESS_FLAGS)
+ hr_qp->atomic_rd_en = attr->qp_access_flags;
+
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
hr_qp->resp_depth = attr->max_dest_rd_atomic;
if (attr_mask & IB_QP_PORT) {
@@ -3098,6 +3416,11 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
}
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
+ kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
+ kfree(hr_qp->rq_inl_buf.wqe_list);
+ }
+
return 0;
}
@@ -3162,6 +3485,1146 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
return ret;
}
+static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
+{
+ u32 doorbell[2];
+
+ doorbell[0] = 0;
+ doorbell[1] = 0;
+
+ if (eq->type_flag == HNS_ROCE_AEQ) {
+ roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
+ HNS_ROCE_V2_EQ_DB_CMD_S,
+ eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
+ HNS_ROCE_EQ_DB_CMD_AEQ :
+ HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
+ } else {
+ roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_TAG_M,
+ HNS_ROCE_V2_EQ_DB_TAG_S, eq->eqn);
+
+ roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
+ HNS_ROCE_V2_EQ_DB_CMD_S,
+ eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
+ HNS_ROCE_EQ_DB_CMD_CEQ :
+ HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
+ }
+
+ roce_set_field(doorbell[1], HNS_ROCE_V2_EQ_DB_PARA_M,
+ HNS_ROCE_V2_EQ_DB_PARA_S,
+ (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
+
+ hns_roce_write64_k(doorbell, eq->doorbell);
+}
+
+static void hns_roce_v2_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ u32 qpn)
+{
+ struct device *dev = hr_dev->dev;
+ int sub_type;
+
+ dev_warn(dev, "Local work queue catastrophic error.\n");
+ sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M,
+ HNS_ROCE_V2_AEQE_SUB_TYPE_S);
+ switch (sub_type) {
+ case HNS_ROCE_LWQCE_QPC_ERROR:
+ dev_warn(dev, "QP %d, QPC error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_MTU_ERROR:
+ dev_warn(dev, "QP %d, MTU error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
+ dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
+ dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
+ dev_warn(dev, "QP %d, WQE shift error.\n", qpn);
+ break;
+ default:
+ dev_err(dev, "Unhandled sub_event type %d.\n", sub_type);
+ break;
+ }
+}
+
+static void hns_roce_v2_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe, u32 qpn)
+{
+ struct device *dev = hr_dev->dev;
+ int sub_type;
+
+ dev_warn(dev, "Local access violation work queue error.\n");
+ sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M,
+ HNS_ROCE_V2_AEQE_SUB_TYPE_S);
+ switch (sub_type) {
+ case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
+ dev_warn(dev, "QP %d, R_key violation.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_LENGTH_ERROR:
+ dev_warn(dev, "QP %d, length error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_VA_ERROR:
+ dev_warn(dev, "QP %d, VA error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_PD_ERROR:
+ dev_err(dev, "QP %d, PD error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
+ dev_warn(dev, "QP %d, rw acc error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
+ dev_warn(dev, "QP %d, key state error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
+ dev_warn(dev, "QP %d, MR operation error.\n", qpn);
+ break;
+ default:
+ dev_err(dev, "Unhandled sub_event type %d.\n", sub_type);
+ break;
+ }
+}
+
+static void hns_roce_v2_qp_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ int event_type)
+{
+ struct device *dev = hr_dev->dev;
+ u32 qpn;
+
+ qpn = roce_get_field(aeqe->event.qp_event.qp,
+ HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
+ HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
+
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_COMM_EST:
+ dev_warn(dev, "Communication established.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
+ dev_warn(dev, "Send queue drained.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ hns_roce_v2_wq_catas_err_handle(hr_dev, aeqe, qpn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ dev_warn(dev, "Invalid request local work queue error.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ hns_roce_v2_local_wq_access_err_handle(hr_dev, aeqe, qpn);
+ break;
+ default:
+ break;
+ }
+
+ hns_roce_qp_event(hr_dev, qpn, event_type);
+}
+
+static void hns_roce_v2_cq_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ int event_type)
+{
+ struct device *dev = hr_dev->dev;
+ u32 cqn;
+
+ cqn = roce_get_field(aeqe->event.cq_event.cq,
+ HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
+ HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
+
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+ dev_warn(dev, "CQ 0x%x access err.\n", cqn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+ dev_warn(dev, "CQ 0x%x overflow\n", cqn);
+ break;
+ default:
+ break;
+ }
+
+ hns_roce_cq_event(hr_dev, cqn, event_type);
+}
+
+static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry)
+{
+ u32 buf_chk_sz;
+ unsigned long off;
+
+ buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
+ off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
+
+ return (struct hns_roce_aeqe *)((char *)(eq->buf_list->buf) +
+ off % buf_chk_sz);
+}
+
+static struct hns_roce_aeqe *mhop_get_aeqe(struct hns_roce_eq *eq, u32 entry)
+{
+ u32 buf_chk_sz;
+ unsigned long off;
+
+ buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
+
+ off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
+
+ if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
+ return (struct hns_roce_aeqe *)((u8 *)(eq->bt_l0) +
+ off % buf_chk_sz);
+ else
+ return (struct hns_roce_aeqe *)((u8 *)
+ (eq->buf[off / buf_chk_sz]) + off % buf_chk_sz);
+}
+
+static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
+{
+ struct hns_roce_aeqe *aeqe;
+
+ if (!eq->hop_num)
+ aeqe = get_aeqe_v2(eq, eq->cons_index);
+ else
+ aeqe = mhop_get_aeqe(eq, eq->cons_index);
+
+ return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
+ !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
+}
+
+static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_aeqe *aeqe;
+ int aeqe_found = 0;
+ int event_type;
+
+ while ((aeqe = next_aeqe_sw_v2(eq))) {
+
+ /* Make sure we read AEQ entry after we have checked the
+ * ownership bit
+ */
+ dma_rmb();
+
+ event_type = roce_get_field(aeqe->asyn,
+ HNS_ROCE_V2_AEQE_EVENT_TYPE_M,
+ HNS_ROCE_V2_AEQE_EVENT_TYPE_S);
+
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG:
+ dev_warn(dev, "Path migrated succeeded.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
+ dev_warn(dev, "Path migration failed.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_COMM_EST:
+ case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ hns_roce_v2_qp_err_handle(hr_dev, aeqe, event_type);
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
+ case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
+ case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
+ dev_warn(dev, "SRQ not support.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+ hns_roce_v2_cq_err_handle(hr_dev, aeqe, event_type);
+ break;
+ case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
+ dev_warn(dev, "DB overflow.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_MB:
+ hns_roce_cmd_event(hr_dev,
+ le16_to_cpu(aeqe->event.cmd.token),
+ aeqe->event.cmd.status,
+ le64_to_cpu(aeqe->event.cmd.out_param));
+ break;
+ case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
+ dev_warn(dev, "CEQ overflow.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_FLR:
+ dev_warn(dev, "Function level reset.\n");
+ break;
+ default:
+ dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
+ event_type, eq->eqn, eq->cons_index);
+ break;
+ };
+
+ ++eq->cons_index;
+ aeqe_found = 1;
+
+ if (eq->cons_index > (2 * eq->entries - 1)) {
+ dev_warn(dev, "cons_index overflow, set back to 0.\n");
+ eq->cons_index = 0;
+ }
+ }
+
+ set_eq_cons_index_v2(eq);
+ return aeqe_found;
+}
+
+static struct hns_roce_ceqe *get_ceqe_v2(struct hns_roce_eq *eq, u32 entry)
+{
+ u32 buf_chk_sz;
+ unsigned long off;
+
+ buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
+ off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
+
+ return (struct hns_roce_ceqe *)((char *)(eq->buf_list->buf) +
+ off % buf_chk_sz);
+}
+
+static struct hns_roce_ceqe *mhop_get_ceqe(struct hns_roce_eq *eq, u32 entry)
+{
+ u32 buf_chk_sz;
+ unsigned long off;
+
+ buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
+
+ off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
+
+ if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
+ return (struct hns_roce_ceqe *)((u8 *)(eq->bt_l0) +
+ off % buf_chk_sz);
+ else
+ return (struct hns_roce_ceqe *)((u8 *)(eq->buf[off /
+ buf_chk_sz]) + off % buf_chk_sz);
+}
+
+static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
+{
+ struct hns_roce_ceqe *ceqe;
+
+ if (!eq->hop_num)
+ ceqe = get_ceqe_v2(eq, eq->cons_index);
+ else
+ ceqe = mhop_get_ceqe(eq, eq->cons_index);
+
+ return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
+ (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
+}
+
+static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_ceqe *ceqe;
+ int ceqe_found = 0;
+ u32 cqn;
+
+ while ((ceqe = next_ceqe_sw_v2(eq))) {
+
+ /* Make sure we read CEQ entry after we have checked the
+ * ownership bit
+ */
+ dma_rmb();
+
+ cqn = roce_get_field(ceqe->comp,
+ HNS_ROCE_V2_CEQE_COMP_CQN_M,
+ HNS_ROCE_V2_CEQE_COMP_CQN_S);
+
+ hns_roce_cq_completion(hr_dev, cqn);
+
+ ++eq->cons_index;
+ ceqe_found = 1;
+
+ if (eq->cons_index > (2 * eq->entries - 1)) {
+ dev_warn(dev, "cons_index overflow, set back to 0.\n");
+ eq->cons_index = 0;
+ }
+ }
+
+ set_eq_cons_index_v2(eq);
+
+ return ceqe_found;
+}
+
+static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
+{
+ struct hns_roce_eq *eq = eq_ptr;
+ struct hns_roce_dev *hr_dev = eq->hr_dev;
+ int int_work = 0;
+
+ if (eq->type_flag == HNS_ROCE_CEQ)
+ /* Completion event interrupt */
+ int_work = hns_roce_v2_ceq_int(hr_dev, eq);
+ else
+ /* Asychronous event interrupt */
+ int_work = hns_roce_v2_aeq_int(hr_dev, eq);
+
+ return IRQ_RETVAL(int_work);
+}
+
+static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
+{
+ struct hns_roce_dev *hr_dev = dev_id;
+ struct device *dev = hr_dev->dev;
+ int int_work = 0;
+ u32 int_st;
+ u32 int_en;
+
+ /* Abnormal interrupt */
+ int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
+ int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
+
+ if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
+ dev_err(dev, "AEQ overflow!\n");
+
+ roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S, 1);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
+
+ roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
+
+ int_work = 1;
+ } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) {
+ dev_err(dev, "BUS ERR!\n");
+
+ roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S, 1);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
+
+ roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
+
+ int_work = 1;
+ } else if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) {
+ dev_err(dev, "OTHER ERR!\n");
+
+ roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S, 1);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
+
+ roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
+
+ int_work = 1;
+ } else
+ dev_err(dev, "There is no abnormal irq found!\n");
+
+ return IRQ_RETVAL(int_work);
+}
+
+static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
+ int eq_num, int enable_flag)
+{
+ int i;
+
+ if (enable_flag == EQ_ENABLE) {
+ for (i = 0; i < eq_num; i++)
+ roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
+ i * EQ_REG_OFFSET,
+ HNS_ROCE_V2_VF_EVENT_INT_EN_M);
+
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
+ HNS_ROCE_V2_VF_ABN_INT_EN_M);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
+ HNS_ROCE_V2_VF_ABN_INT_CFG_M);
+ } else {
+ for (i = 0; i < eq_num; i++)
+ roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
+ i * EQ_REG_OFFSET,
+ HNS_ROCE_V2_VF_EVENT_INT_EN_M & 0x0);
+
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
+ HNS_ROCE_V2_VF_ABN_INT_EN_M & 0x0);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
+ HNS_ROCE_V2_VF_ABN_INT_CFG_M & 0x0);
+ }
+}
+
+static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
+{
+ struct device *dev = hr_dev->dev;
+ int ret;
+
+ if (eqn < hr_dev->caps.num_comp_vectors)
+ ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
+ 0, HNS_ROCE_CMD_DESTROY_CEQC,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+ else
+ ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
+ 0, HNS_ROCE_CMD_DESTROY_AEQC,
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+ if (ret)
+ dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
+}
+
+static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ struct device *dev = hr_dev->dev;
+ u64 idx;
+ u64 size;
+ u32 buf_chk_sz;
+ u32 bt_chk_sz;
+ u32 mhop_num;
+ int eqe_alloc;
+ int ba_num;
+ int i = 0;
+ int j = 0;
+
+ mhop_num = hr_dev->caps.eqe_hop_num;
+ buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
+ bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
+ ba_num = (PAGE_ALIGN(eq->entries * eq->eqe_size) + buf_chk_sz - 1) /
+ buf_chk_sz;
+
+ /* hop_num = 0 */
+ if (mhop_num == HNS_ROCE_HOP_NUM_0) {
+ dma_free_coherent(dev, (unsigned int)(eq->entries *
+ eq->eqe_size), eq->bt_l0, eq->l0_dma);
+ return;
+ }
+
+ /* hop_num = 1 or hop = 2 */
+ dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
+ if (mhop_num == 1) {
+ for (i = 0; i < eq->l0_last_num; i++) {
+ if (i == eq->l0_last_num - 1) {
+ eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
+ size = (eq->entries - eqe_alloc) * eq->eqe_size;
+ dma_free_coherent(dev, size, eq->buf[i],
+ eq->buf_dma[i]);
+ break;
+ }
+ dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
+ eq->buf_dma[i]);
+ }
+ } else if (mhop_num == 2) {
+ for (i = 0; i < eq->l0_last_num; i++) {
+ dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
+ eq->l1_dma[i]);
+
+ for (j = 0; j < bt_chk_sz / 8; j++) {
+ idx = i * (bt_chk_sz / 8) + j;
+ if ((i == eq->l0_last_num - 1)
+ && j == eq->l1_last_num - 1) {
+ eqe_alloc = (buf_chk_sz / eq->eqe_size)
+ * idx;
+ size = (eq->entries - eqe_alloc)
+ * eq->eqe_size;
+ dma_free_coherent(dev, size,
+ eq->buf[idx],
+ eq->buf_dma[idx]);
+ break;
+ }
+ dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
+ eq->buf_dma[idx]);
+ }
+ }
+ }
+ kfree(eq->buf_dma);
+ kfree(eq->buf);
+ kfree(eq->l1_dma);
+ kfree(eq->bt_l1);
+ eq->buf_dma = NULL;
+ eq->buf = NULL;
+ eq->l1_dma = NULL;
+ eq->bt_l1 = NULL;
+}
+
+static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ u32 buf_chk_sz;
+
+ buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
+
+ if (hr_dev->caps.eqe_hop_num) {
+ hns_roce_mhop_free_eq(hr_dev, eq);
+ return;
+ }
+
+ if (eq->buf_list)
+ dma_free_coherent(hr_dev->dev, buf_chk_sz,
+ eq->buf_list->buf, eq->buf_list->map);
+}
+
+static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq,
+ void *mb_buf)
+{
+ struct hns_roce_eq_context *eqc;
+
+ eqc = mb_buf;
+ memset(eqc, 0, sizeof(struct hns_roce_eq_context));
+
+ /* init eqc */
+ eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
+ eq->hop_num = hr_dev->caps.eqe_hop_num;
+ eq->cons_index = 0;
+ eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
+ eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
+ eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
+ eq->eqe_ba_pg_sz = hr_dev->caps.eqe_ba_pg_sz;
+ eq->eqe_buf_pg_sz = hr_dev->caps.eqe_buf_pg_sz;
+ eq->shift = ilog2((unsigned int)eq->entries);
+
+ if (!eq->hop_num)
+ eq->eqe_ba = eq->buf_list->map;
+ else
+ eq->eqe_ba = eq->l0_dma;
+
+ /* set eqc state */
+ roce_set_field(eqc->byte_4,
+ HNS_ROCE_EQC_EQ_ST_M,
+ HNS_ROCE_EQC_EQ_ST_S,
+ HNS_ROCE_V2_EQ_STATE_VALID);
+
+ /* set eqe hop num */
+ roce_set_field(eqc->byte_4,
+ HNS_ROCE_EQC_HOP_NUM_M,
+ HNS_ROCE_EQC_HOP_NUM_S, eq->hop_num);
+
+ /* set eqc over_ignore */
+ roce_set_field(eqc->byte_4,
+ HNS_ROCE_EQC_OVER_IGNORE_M,
+ HNS_ROCE_EQC_OVER_IGNORE_S, eq->over_ignore);
+
+ /* set eqc coalesce */
+ roce_set_field(eqc->byte_4,
+ HNS_ROCE_EQC_COALESCE_M,
+ HNS_ROCE_EQC_COALESCE_S, eq->coalesce);
+
+ /* set eqc arm_state */
+ roce_set_field(eqc->byte_4,
+ HNS_ROCE_EQC_ARM_ST_M,
+ HNS_ROCE_EQC_ARM_ST_S, eq->arm_st);
+
+ /* set eqn */
+ roce_set_field(eqc->byte_4,
+ HNS_ROCE_EQC_EQN_M,
+ HNS_ROCE_EQC_EQN_S, eq->eqn);
+
+ /* set eqe_cnt */
+ roce_set_field(eqc->byte_4,
+ HNS_ROCE_EQC_EQE_CNT_M,
+ HNS_ROCE_EQC_EQE_CNT_S,
+ HNS_ROCE_EQ_INIT_EQE_CNT);
+
+ /* set eqe_ba_pg_sz */
+ roce_set_field(eqc->byte_8,
+ HNS_ROCE_EQC_BA_PG_SZ_M,
+ HNS_ROCE_EQC_BA_PG_SZ_S, eq->eqe_ba_pg_sz);
+
+ /* set eqe_buf_pg_sz */
+ roce_set_field(eqc->byte_8,
+ HNS_ROCE_EQC_BUF_PG_SZ_M,
+ HNS_ROCE_EQC_BUF_PG_SZ_S, eq->eqe_buf_pg_sz);
+
+ /* set eq_producer_idx */
+ roce_set_field(eqc->byte_8,
+ HNS_ROCE_EQC_PROD_INDX_M,
+ HNS_ROCE_EQC_PROD_INDX_S,
+ HNS_ROCE_EQ_INIT_PROD_IDX);
+
+ /* set eq_max_cnt */
+ roce_set_field(eqc->byte_12,
+ HNS_ROCE_EQC_MAX_CNT_M,
+ HNS_ROCE_EQC_MAX_CNT_S, eq->eq_max_cnt);
+
+ /* set eq_period */
+ roce_set_field(eqc->byte_12,
+ HNS_ROCE_EQC_PERIOD_M,
+ HNS_ROCE_EQC_PERIOD_S, eq->eq_period);
+
+ /* set eqe_report_timer */
+ roce_set_field(eqc->eqe_report_timer,
+ HNS_ROCE_EQC_REPORT_TIMER_M,
+ HNS_ROCE_EQC_REPORT_TIMER_S,
+ HNS_ROCE_EQ_INIT_REPORT_TIMER);
+
+ /* set eqe_ba [34:3] */
+ roce_set_field(eqc->eqe_ba0,
+ HNS_ROCE_EQC_EQE_BA_L_M,
+ HNS_ROCE_EQC_EQE_BA_L_S, eq->eqe_ba >> 3);
+
+ /* set eqe_ba [64:35] */
+ roce_set_field(eqc->eqe_ba1,
+ HNS_ROCE_EQC_EQE_BA_H_M,
+ HNS_ROCE_EQC_EQE_BA_H_S, eq->eqe_ba >> 35);
+
+ /* set eq shift */
+ roce_set_field(eqc->byte_28,
+ HNS_ROCE_EQC_SHIFT_M,
+ HNS_ROCE_EQC_SHIFT_S, eq->shift);
+
+ /* set eq MSI_IDX */
+ roce_set_field(eqc->byte_28,
+ HNS_ROCE_EQC_MSI_INDX_M,
+ HNS_ROCE_EQC_MSI_INDX_S,
+ HNS_ROCE_EQ_INIT_MSI_IDX);
+
+ /* set cur_eqe_ba [27:12] */
+ roce_set_field(eqc->byte_28,
+ HNS_ROCE_EQC_CUR_EQE_BA_L_M,
+ HNS_ROCE_EQC_CUR_EQE_BA_L_S, eq->cur_eqe_ba >> 12);
+
+ /* set cur_eqe_ba [59:28] */
+ roce_set_field(eqc->byte_32,
+ HNS_ROCE_EQC_CUR_EQE_BA_M_M,
+ HNS_ROCE_EQC_CUR_EQE_BA_M_S, eq->cur_eqe_ba >> 28);
+
+ /* set cur_eqe_ba [63:60] */
+ roce_set_field(eqc->byte_36,
+ HNS_ROCE_EQC_CUR_EQE_BA_H_M,
+ HNS_ROCE_EQC_CUR_EQE_BA_H_S, eq->cur_eqe_ba >> 60);
+
+ /* set eq consumer idx */
+ roce_set_field(eqc->byte_36,
+ HNS_ROCE_EQC_CONS_INDX_M,
+ HNS_ROCE_EQC_CONS_INDX_S,
+ HNS_ROCE_EQ_INIT_CONS_IDX);
+
+ /* set nex_eqe_ba[43:12] */
+ roce_set_field(eqc->nxt_eqe_ba0,
+ HNS_ROCE_EQC_NXT_EQE_BA_L_M,
+ HNS_ROCE_EQC_NXT_EQE_BA_L_S, eq->nxt_eqe_ba >> 12);
+
+ /* set nex_eqe_ba[63:44] */
+ roce_set_field(eqc->nxt_eqe_ba1,
+ HNS_ROCE_EQC_NXT_EQE_BA_H_M,
+ HNS_ROCE_EQC_NXT_EQE_BA_H_S, eq->nxt_eqe_ba >> 44);
+}
+
+static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq)
+{
+ struct device *dev = hr_dev->dev;
+ int eq_alloc_done = 0;
+ int eq_buf_cnt = 0;
+ int eqe_alloc;
+ u32 buf_chk_sz;
+ u32 bt_chk_sz;
+ u32 mhop_num;
+ u64 size;
+ u64 idx;
+ int ba_num;
+ int bt_num;
+ int record_i;
+ int record_j;
+ int i = 0;
+ int j = 0;
+
+ mhop_num = hr_dev->caps.eqe_hop_num;
+ buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
+ bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
+
+ ba_num = (PAGE_ALIGN(eq->entries * eq->eqe_size) + buf_chk_sz - 1)
+ / buf_chk_sz;
+ bt_num = (ba_num + bt_chk_sz / 8 - 1) / (bt_chk_sz / 8);
+
+ /* hop_num = 0 */
+ if (mhop_num == HNS_ROCE_HOP_NUM_0) {
+ if (eq->entries > buf_chk_sz / eq->eqe_size) {
+ dev_err(dev, "eq entries %d is larger than buf_pg_sz!",
+ eq->entries);
+ return -EINVAL;
+ }
+ eq->bt_l0 = dma_alloc_coherent(dev, eq->entries * eq->eqe_size,
+ &(eq->l0_dma), GFP_KERNEL);
+ if (!eq->bt_l0)
+ return -ENOMEM;
+
+ eq->cur_eqe_ba = eq->l0_dma;
+ eq->nxt_eqe_ba = 0;
+
+ memset(eq->bt_l0, 0, eq->entries * eq->eqe_size);
+
+ return 0;
+ }
+
+ eq->buf_dma = kcalloc(ba_num, sizeof(*eq->buf_dma), GFP_KERNEL);
+ if (!eq->buf_dma)
+ return -ENOMEM;
+ eq->buf = kcalloc(ba_num, sizeof(*eq->buf), GFP_KERNEL);
+ if (!eq->buf)
+ goto err_kcalloc_buf;
+
+ if (mhop_num == 2) {
+ eq->l1_dma = kcalloc(bt_num, sizeof(*eq->l1_dma), GFP_KERNEL);
+ if (!eq->l1_dma)
+ goto err_kcalloc_l1_dma;
+
+ eq->bt_l1 = kcalloc(bt_num, sizeof(*eq->bt_l1), GFP_KERNEL);
+ if (!eq->bt_l1)
+ goto err_kcalloc_bt_l1;
+ }
+
+ /* alloc L0 BT */
+ eq->bt_l0 = dma_alloc_coherent(dev, bt_chk_sz, &eq->l0_dma, GFP_KERNEL);
+ if (!eq->bt_l0)
+ goto err_dma_alloc_l0;
+
+ if (mhop_num == 1) {
+ if (ba_num > (bt_chk_sz / 8))
+ dev_err(dev, "ba_num %d is too large for 1 hop\n",
+ ba_num);
+
+ /* alloc buf */
+ for (i = 0; i < bt_chk_sz / 8; i++) {
+ if (eq_buf_cnt + 1 < ba_num) {
+ size = buf_chk_sz;
+ } else {
+ eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
+ size = (eq->entries - eqe_alloc) * eq->eqe_size;
+ }
+ eq->buf[i] = dma_alloc_coherent(dev, size,
+ &(eq->buf_dma[i]),
+ GFP_KERNEL);
+ if (!eq->buf[i])
+ goto err_dma_alloc_buf;
+
+ memset(eq->buf[i], 0, size);
+ *(eq->bt_l0 + i) = eq->buf_dma[i];
+
+ eq_buf_cnt++;
+ if (eq_buf_cnt >= ba_num)
+ break;
+ }
+ eq->cur_eqe_ba = eq->buf_dma[0];
+ eq->nxt_eqe_ba = eq->buf_dma[1];
+
+ } else if (mhop_num == 2) {
+ /* alloc L1 BT and buf */
+ for (i = 0; i < bt_chk_sz / 8; i++) {
+ eq->bt_l1[i] = dma_alloc_coherent(dev, bt_chk_sz,
+ &(eq->l1_dma[i]),
+ GFP_KERNEL);
+ if (!eq->bt_l1[i])
+ goto err_dma_alloc_l1;
+ *(eq->bt_l0 + i) = eq->l1_dma[i];
+
+ for (j = 0; j < bt_chk_sz / 8; j++) {
+ idx = i * bt_chk_sz / 8 + j;
+ if (eq_buf_cnt + 1 < ba_num) {
+ size = buf_chk_sz;
+ } else {
+ eqe_alloc = (buf_chk_sz / eq->eqe_size)
+ * idx;
+ size = (eq->entries - eqe_alloc)
+ * eq->eqe_size;
+ }
+ eq->buf[idx] = dma_alloc_coherent(dev, size,
+ &(eq->buf_dma[idx]),
+ GFP_KERNEL);
+ if (!eq->buf[idx])
+ goto err_dma_alloc_buf;
+
+ memset(eq->buf[idx], 0, size);
+ *(eq->bt_l1[i] + j) = eq->buf_dma[idx];
+
+ eq_buf_cnt++;
+ if (eq_buf_cnt >= ba_num) {
+ eq_alloc_done = 1;
+ break;
+ }
+ }
+
+ if (eq_alloc_done)
+ break;
+ }
+ eq->cur_eqe_ba = eq->buf_dma[0];
+ eq->nxt_eqe_ba = eq->buf_dma[1];
+ }
+
+ eq->l0_last_num = i + 1;
+ if (mhop_num == 2)
+ eq->l1_last_num = j + 1;
+
+ return 0;
+
+err_dma_alloc_l1:
+ dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
+ eq->bt_l0 = NULL;
+ eq->l0_dma = 0;
+ for (i -= 1; i >= 0; i--) {
+ dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
+ eq->l1_dma[i]);
+
+ for (j = 0; j < bt_chk_sz / 8; j++) {
+ idx = i * bt_chk_sz / 8 + j;
+ dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
+ eq->buf_dma[idx]);
+ }
+ }
+ goto err_dma_alloc_l0;
+
+err_dma_alloc_buf:
+ dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
+ eq->bt_l0 = NULL;
+ eq->l0_dma = 0;
+
+ if (mhop_num == 1)
+ for (i -= i; i >= 0; i--)
+ dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
+ eq->buf_dma[i]);
+ else if (mhop_num == 2) {
+ record_i = i;
+ record_j = j;
+ for (; i >= 0; i--) {
+ dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
+ eq->l1_dma[i]);
+
+ for (j = 0; j < bt_chk_sz / 8; j++) {
+ if (i == record_i && j >= record_j)
+ break;
+
+ idx = i * bt_chk_sz / 8 + j;
+ dma_free_coherent(dev, buf_chk_sz,
+ eq->buf[idx],
+ eq->buf_dma[idx]);
+ }
+ }
+ }
+
+err_dma_alloc_l0:
+ kfree(eq->bt_l1);
+ eq->bt_l1 = NULL;
+
+err_kcalloc_bt_l1:
+ kfree(eq->l1_dma);
+ eq->l1_dma = NULL;
+
+err_kcalloc_l1_dma:
+ kfree(eq->buf);
+ eq->buf = NULL;
+
+err_kcalloc_buf:
+ kfree(eq->buf_dma);
+ eq->buf_dma = NULL;
+
+ return -ENOMEM;
+}
+
+static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
+ struct hns_roce_eq *eq,
+ unsigned int eq_cmd)
+{
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_cmd_mailbox *mailbox;
+ u32 buf_chk_sz = 0;
+ int ret;
+
+ /* Allocate mailbox memory */
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ if (!hr_dev->caps.eqe_hop_num) {
+ buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
+
+ eq->buf_list = kzalloc(sizeof(struct hns_roce_buf_list),
+ GFP_KERNEL);
+ if (!eq->buf_list) {
+ ret = -ENOMEM;
+ goto free_cmd_mbox;
+ }
+
+ eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz,
+ &(eq->buf_list->map),
+ GFP_KERNEL);
+ if (!eq->buf_list->buf) {
+ ret = -ENOMEM;
+ goto err_alloc_buf;
+ }
+
+ memset(eq->buf_list->buf, 0, buf_chk_sz);
+ } else {
+ ret = hns_roce_mhop_alloc_eq(hr_dev, eq);
+ if (ret) {
+ ret = -ENOMEM;
+ goto free_cmd_mbox;
+ }
+ }
+
+ hns_roce_config_eqc(hr_dev, eq, mailbox->buf);
+
+ ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
+ eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
+ if (ret) {
+ dev_err(dev, "[mailbox cmd] creat eqc failed.\n");
+ goto err_cmd_mbox;
+ }
+
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return 0;
+
+err_cmd_mbox:
+ if (!hr_dev->caps.eqe_hop_num)
+ dma_free_coherent(dev, buf_chk_sz, eq->buf_list->buf,
+ eq->buf_list->map);
+ else {
+ hns_roce_mhop_free_eq(hr_dev, eq);
+ goto free_cmd_mbox;
+ }
+
+err_alloc_buf:
+ kfree(eq->buf_list);
+
+free_cmd_mbox:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return ret;
+}
+
+static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_eq *eq;
+ unsigned int eq_cmd;
+ int irq_num;
+ int eq_num;
+ int other_num;
+ int comp_num;
+ int aeq_num;
+ int i, j, k;
+ int ret;
+
+ other_num = hr_dev->caps.num_other_vectors;
+ comp_num = hr_dev->caps.num_comp_vectors;
+ aeq_num = hr_dev->caps.num_aeq_vectors;
+
+ eq_num = comp_num + aeq_num;
+ irq_num = eq_num + other_num;
+
+ eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
+ if (!eq_table->eq)
+ return -ENOMEM;
+
+ for (i = 0; i < irq_num; i++) {
+ hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
+ GFP_KERNEL);
+ if (!hr_dev->irq_names[i]) {
+ ret = -ENOMEM;
+ goto err_failed_kzalloc;
+ }
+ }
+
+ /* create eq */
+ for (j = 0; j < eq_num; j++) {
+ eq = &eq_table->eq[j];
+ eq->hr_dev = hr_dev;
+ eq->eqn = j;
+ if (j < comp_num) {
+ /* CEQ */
+ eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
+ eq->type_flag = HNS_ROCE_CEQ;
+ eq->entries = hr_dev->caps.ceqe_depth;
+ eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
+ eq->irq = hr_dev->irq[j + other_num + aeq_num];
+ eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
+ eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
+ } else {
+ /* AEQ */
+ eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
+ eq->type_flag = HNS_ROCE_AEQ;
+ eq->entries = hr_dev->caps.aeqe_depth;
+ eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
+ eq->irq = hr_dev->irq[j - comp_num + other_num];
+ eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
+ eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
+ }
+
+ ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
+ if (ret) {
+ dev_err(dev, "eq create failed.\n");
+ goto err_create_eq_fail;
+ }
+ }
+
+ /* enable irq */
+ hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
+
+ /* irq contains: abnormal + AEQ + CEQ*/
+ for (k = 0; k < irq_num; k++)
+ if (k < other_num)
+ snprintf((char *)hr_dev->irq_names[k],
+ HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", k);
+ else if (k < (other_num + aeq_num))
+ snprintf((char *)hr_dev->irq_names[k],
+ HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d",
+ k - other_num);
+ else
+ snprintf((char *)hr_dev->irq_names[k],
+ HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d",
+ k - other_num - aeq_num);
+
+ for (k = 0; k < irq_num; k++) {
+ if (k < other_num)
+ ret = request_irq(hr_dev->irq[k],
+ hns_roce_v2_msix_interrupt_abn,
+ 0, hr_dev->irq_names[k], hr_dev);
+
+ else if (k < (other_num + comp_num))
+ ret = request_irq(eq_table->eq[k - other_num].irq,
+ hns_roce_v2_msix_interrupt_eq,
+ 0, hr_dev->irq_names[k + aeq_num],
+ &eq_table->eq[k - other_num]);
+ else
+ ret = request_irq(eq_table->eq[k - other_num].irq,
+ hns_roce_v2_msix_interrupt_eq,
+ 0, hr_dev->irq_names[k - comp_num],
+ &eq_table->eq[k - other_num]);
+ if (ret) {
+ dev_err(dev, "Request irq error!\n");
+ goto err_request_irq_fail;
+ }
+ }
+
+ return 0;
+
+err_request_irq_fail:
+ for (k -= 1; k >= 0; k--)
+ if (k < other_num)
+ free_irq(hr_dev->irq[k], hr_dev);
+ else
+ free_irq(eq_table->eq[k - other_num].irq,
+ &eq_table->eq[k - other_num]);
+
+err_create_eq_fail:
+ for (j -= 1; j >= 0; j--)
+ hns_roce_v2_free_eq(hr_dev, &eq_table->eq[j]);
+
+err_failed_kzalloc:
+ for (i -= 1; i >= 0; i--)
+ kfree(hr_dev->irq_names[i]);
+ kfree(eq_table->eq);
+
+ return ret;
+}
+
+static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
+ int irq_num;
+ int eq_num;
+ int i;
+
+ eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
+ irq_num = eq_num + hr_dev->caps.num_other_vectors;
+
+ /* Disable irq */
+ hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
+
+ for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
+ free_irq(hr_dev->irq[i], hr_dev);
+
+ for (i = 0; i < eq_num; i++) {
+ hns_roce_v2_destroy_eqc(hr_dev, i);
+
+ free_irq(eq_table->eq[i].irq, &eq_table->eq[i]);
+
+ hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
+ }
+
+ for (i = 0; i < irq_num; i++)
+ kfree(hr_dev->irq_names[i]);
+
+ kfree(eq_table->eq);
+}
+
static const struct hns_roce_hw hns_roce_hw_v2 = {
.cmq_init = hns_roce_v2_cmq_init,
.cmq_exit = hns_roce_v2_cmq_exit,
@@ -3183,6 +4646,8 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.post_recv = hns_roce_v2_post_recv,
.req_notify_cq = hns_roce_v2_req_notify_cq,
.poll_cq = hns_roce_v2_poll_cq,
+ .init_eq = hns_roce_v2_init_eq_table,
+ .cleanup_eq = hns_roce_v2_cleanup_eq_table,
};
static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
@@ -3197,6 +4662,7 @@ static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
struct hnae3_handle *handle)
{
const struct pci_device_id *id;
+ int i;
id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
if (!id) {
@@ -3214,8 +4680,15 @@ static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
hr_dev->iboe.phy_port[0] = 0;
+ addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
+ hr_dev->iboe.netdevs[0]->dev_addr);
+
+ for (i = 0; i < HNS_ROCE_V2_MAX_IRQ_NUM; i++)
+ hr_dev->irq[i] = pci_irq_vector(handle->pdev,
+ i + handle->rinfo.base_vector);
+
/* cmd issue mode: 0 is poll, 1 is event */
- hr_dev->cmd_mod = 0;
+ hr_dev->cmd_mod = 1;
hr_dev->loop_idc = 0;
return 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 04b7a51b8efb..960df095392a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -53,6 +53,10 @@
#define HNS_ROCE_V2_MAX_SQ_INLINE 0x20
#define HNS_ROCE_V2_UAR_NUM 256
#define HNS_ROCE_V2_PHY_UAR_NUM 1
+#define HNS_ROCE_V2_MAX_IRQ_NUM 65
+#define HNS_ROCE_V2_COMP_VEC_NUM 63
+#define HNS_ROCE_V2_AEQE_VEC_NUM 1
+#define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1
#define HNS_ROCE_V2_MAX_MTPT_NUM 0x8000
#define HNS_ROCE_V2_MAX_MTT_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_CQE_SEGS 0x1000000
@@ -78,6 +82,8 @@
#define HNS_ROCE_MTT_HOP_NUM 1
#define HNS_ROCE_CQE_HOP_NUM 1
#define HNS_ROCE_PBL_HOP_NUM 2
+#define HNS_ROCE_EQE_HOP_NUM 2
+
#define HNS_ROCE_V2_GID_INDEX_NUM 256
#define HNS_ROCE_V2_TABLE_CHUNK_SIZE (1 << 18)
@@ -105,6 +111,12 @@
(step_idx == 1 && hop_num == 1) || \
(step_idx == 2 && hop_num == 2))
+enum {
+ NO_ARMED = 0x0,
+ REG_NXT_CEQE = 0x2,
+ REG_NXT_SE_CEQE = 0x3
+};
+
#define V2_CQ_DB_REQ_NOT_SOL 0
#define V2_CQ_DB_REQ_NOT 1
@@ -229,6 +241,9 @@ struct hns_roce_v2_cq_context {
u32 cqe_report_timer;
u32 byte_64_se_cqe_idx;
};
+#define HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM 0x0
+#define HNS_ROCE_V2_CQ_DEFAULT_INTERVAL 0x0
+
#define V2_CQC_BYTE_4_CQ_ST_S 0
#define V2_CQC_BYTE_4_CQ_ST_M GENMASK(1, 0)
@@ -747,11 +762,14 @@ struct hns_roce_v2_qp_context {
struct hns_roce_v2_cqe {
u32 byte_4;
- u32 rkey_immtdata;
+ union {
+ __le32 rkey;
+ __be32 immtdata;
+ };
u32 byte_12;
u32 byte_16;
u32 byte_cnt;
- u32 smac;
+ u8 smac[4];
u32 byte_28;
u32 byte_32;
};
@@ -901,6 +919,90 @@ struct hns_roce_v2_cq_db {
#define V2_CQ_DB_PARAMETER_NOTIFY_S 24
+struct hns_roce_v2_ud_send_wqe {
+ u32 byte_4;
+ u32 msg_len;
+ u32 immtdata;
+ u32 byte_16;
+ u32 byte_20;
+ u32 byte_24;
+ u32 qkey;
+ u32 byte_32;
+ u32 byte_36;
+ u32 byte_40;
+ u32 dmac;
+ u32 byte_48;
+ u8 dgid[GID_LEN_V2];
+
+};
+#define V2_UD_SEND_WQE_BYTE_4_OPCODE_S 0
+#define V2_UD_SEND_WQE_BYTE_4_OPCODE_M GENMASK(4, 0)
+
+#define V2_UD_SEND_WQE_BYTE_4_OWNER_S 7
+
+#define V2_UD_SEND_WQE_BYTE_4_CQE_S 8
+
+#define V2_UD_SEND_WQE_BYTE_4_SE_S 11
+
+#define V2_UD_SEND_WQE_BYTE_16_PD_S 0
+#define V2_UD_SEND_WQE_BYTE_16_PD_M GENMASK(23, 0)
+
+#define V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S 24
+#define V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M GENMASK(31, 24)
+
+#define V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S 0
+#define V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M GENMASK(23, 0)
+
+#define V2_UD_SEND_WQE_BYTE_24_UDPSPN_S 16
+#define V2_UD_SEND_WQE_BYTE_24_UDPSPN_M GENMASK(31, 16)
+
+#define V2_UD_SEND_WQE_BYTE_32_DQPN_S 0
+#define V2_UD_SEND_WQE_BYTE_32_DQPN_M GENMASK(23, 0)
+
+#define V2_UD_SEND_WQE_BYTE_36_VLAN_S 0
+#define V2_UD_SEND_WQE_BYTE_36_VLAN_M GENMASK(15, 0)
+
+#define V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S 16
+#define V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M GENMASK(23, 16)
+
+#define V2_UD_SEND_WQE_BYTE_36_TCLASS_S 24
+#define V2_UD_SEND_WQE_BYTE_36_TCLASS_M GENMASK(31, 24)
+
+#define V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S 0
+#define V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M GENMASK(19, 0)
+
+#define V2_UD_SEND_WQE_BYTE_40_SL_S 20
+#define V2_UD_SEND_WQE_BYTE_40_SL_M GENMASK(23, 20)
+
+#define V2_UD_SEND_WQE_BYTE_40_PORTN_S 24
+#define V2_UD_SEND_WQE_BYTE_40_PORTN_M GENMASK(26, 24)
+
+#define V2_UD_SEND_WQE_BYTE_40_LBI_S 31
+
+#define V2_UD_SEND_WQE_DMAC_0_S 0
+#define V2_UD_SEND_WQE_DMAC_0_M GENMASK(7, 0)
+
+#define V2_UD_SEND_WQE_DMAC_1_S 8
+#define V2_UD_SEND_WQE_DMAC_1_M GENMASK(15, 8)
+
+#define V2_UD_SEND_WQE_DMAC_2_S 16
+#define V2_UD_SEND_WQE_DMAC_2_M GENMASK(23, 16)
+
+#define V2_UD_SEND_WQE_DMAC_3_S 24
+#define V2_UD_SEND_WQE_DMAC_3_M GENMASK(31, 24)
+
+#define V2_UD_SEND_WQE_BYTE_48_DMAC_4_S 0
+#define V2_UD_SEND_WQE_BYTE_48_DMAC_4_M GENMASK(7, 0)
+
+#define V2_UD_SEND_WQE_BYTE_48_DMAC_5_S 8
+#define V2_UD_SEND_WQE_BYTE_48_DMAC_5_M GENMASK(15, 8)
+
+#define V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S 16
+#define V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M GENMASK(23, 16)
+
+#define V2_UD_SEND_WQE_BYTE_48_SMAC_INDX_S 24
+#define V2_UD_SEND_WQE_BYTE_48_SMAC_INDX_M GENMASK(31, 24)
+
struct hns_roce_v2_rc_send_wqe {
u32 byte_4;
u32 msg_len;
@@ -1129,9 +1231,6 @@ struct hns_roce_cmq_desc {
u32 data[6];
};
-#define ROCEE_VF_MB_CFG0_REG 0x40
-#define ROCEE_VF_MB_STATUS_REG 0x58
-
#define HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS 10000
#define HNS_ROCE_HW_RUN_BIT_SHIFT 31
@@ -1174,4 +1273,178 @@ struct hns_roce_v2_priv {
struct hns_roce_v2_cmq cmq;
};
+struct hns_roce_eq_context {
+ u32 byte_4;
+ u32 byte_8;
+ u32 byte_12;
+ u32 eqe_report_timer;
+ u32 eqe_ba0;
+ u32 eqe_ba1;
+ u32 byte_28;
+ u32 byte_32;
+ u32 byte_36;
+ u32 nxt_eqe_ba0;
+ u32 nxt_eqe_ba1;
+ u32 rsv[5];
+};
+
+#define HNS_ROCE_AEQ_DEFAULT_BURST_NUM 0x0
+#define HNS_ROCE_AEQ_DEFAULT_INTERVAL 0x0
+#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x0
+#define HNS_ROCE_CEQ_DEFAULT_INTERVAL 0x0
+
+#define HNS_ROCE_V2_EQ_STATE_INVALID 0
+#define HNS_ROCE_V2_EQ_STATE_VALID 1
+#define HNS_ROCE_V2_EQ_STATE_OVERFLOW 2
+#define HNS_ROCE_V2_EQ_STATE_FAILURE 3
+
+#define HNS_ROCE_V2_EQ_OVER_IGNORE_0 0
+#define HNS_ROCE_V2_EQ_OVER_IGNORE_1 1
+
+#define HNS_ROCE_V2_EQ_COALESCE_0 0
+#define HNS_ROCE_V2_EQ_COALESCE_1 1
+
+#define HNS_ROCE_V2_EQ_FIRED 0
+#define HNS_ROCE_V2_EQ_ARMED 1
+#define HNS_ROCE_V2_EQ_ALWAYS_ARMED 3
+
+#define HNS_ROCE_EQ_INIT_EQE_CNT 0
+#define HNS_ROCE_EQ_INIT_PROD_IDX 0
+#define HNS_ROCE_EQ_INIT_REPORT_TIMER 0
+#define HNS_ROCE_EQ_INIT_MSI_IDX 0
+#define HNS_ROCE_EQ_INIT_CONS_IDX 0
+#define HNS_ROCE_EQ_INIT_NXT_EQE_BA 0
+
+#define HNS_ROCE_V2_CEQ_CEQE_OWNER_S 31
+#define HNS_ROCE_V2_AEQ_AEQE_OWNER_S 31
+
+#define HNS_ROCE_V2_COMP_EQE_NUM 0x1000
+#define HNS_ROCE_V2_ASYNC_EQE_NUM 0x1000
+
+#define HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S 0
+#define HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S 1
+#define HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S 2
+
+#define HNS_ROCE_EQ_DB_CMD_AEQ 0x0
+#define HNS_ROCE_EQ_DB_CMD_AEQ_ARMED 0x1
+#define HNS_ROCE_EQ_DB_CMD_CEQ 0x2
+#define HNS_ROCE_EQ_DB_CMD_CEQ_ARMED 0x3
+
+#define EQ_ENABLE 1
+#define EQ_DISABLE 0
+
+#define EQ_REG_OFFSET 0x4
+
+#define HNS_ROCE_INT_NAME_LEN 32
+#define HNS_ROCE_V2_EQN_M GENMASK(23, 0)
+
+#define HNS_ROCE_V2_CONS_IDX_M GENMASK(23, 0)
+
+#define HNS_ROCE_V2_VF_ABN_INT_EN_S 0
+#define HNS_ROCE_V2_VF_ABN_INT_EN_M GENMASK(0, 0)
+#define HNS_ROCE_V2_VF_ABN_INT_ST_M GENMASK(2, 0)
+#define HNS_ROCE_V2_VF_ABN_INT_CFG_M GENMASK(2, 0)
+#define HNS_ROCE_V2_VF_EVENT_INT_EN_M GENMASK(0, 0)
+
+/* WORD0 */
+#define HNS_ROCE_EQC_EQ_ST_S 0
+#define HNS_ROCE_EQC_EQ_ST_M GENMASK(1, 0)
+
+#define HNS_ROCE_EQC_HOP_NUM_S 2
+#define HNS_ROCE_EQC_HOP_NUM_M GENMASK(3, 2)
+
+#define HNS_ROCE_EQC_OVER_IGNORE_S 4
+#define HNS_ROCE_EQC_OVER_IGNORE_M GENMASK(4, 4)
+
+#define HNS_ROCE_EQC_COALESCE_S 5
+#define HNS_ROCE_EQC_COALESCE_M GENMASK(5, 5)
+
+#define HNS_ROCE_EQC_ARM_ST_S 6
+#define HNS_ROCE_EQC_ARM_ST_M GENMASK(7, 6)
+
+#define HNS_ROCE_EQC_EQN_S 8
+#define HNS_ROCE_EQC_EQN_M GENMASK(15, 8)
+
+#define HNS_ROCE_EQC_EQE_CNT_S 16
+#define HNS_ROCE_EQC_EQE_CNT_M GENMASK(31, 16)
+
+/* WORD1 */
+#define HNS_ROCE_EQC_BA_PG_SZ_S 0
+#define HNS_ROCE_EQC_BA_PG_SZ_M GENMASK(3, 0)
+
+#define HNS_ROCE_EQC_BUF_PG_SZ_S 4
+#define HNS_ROCE_EQC_BUF_PG_SZ_M GENMASK(7, 4)
+
+#define HNS_ROCE_EQC_PROD_INDX_S 8
+#define HNS_ROCE_EQC_PROD_INDX_M GENMASK(31, 8)
+
+/* WORD2 */
+#define HNS_ROCE_EQC_MAX_CNT_S 0
+#define HNS_ROCE_EQC_MAX_CNT_M GENMASK(15, 0)
+
+#define HNS_ROCE_EQC_PERIOD_S 16
+#define HNS_ROCE_EQC_PERIOD_M GENMASK(31, 16)
+
+/* WORD3 */
+#define HNS_ROCE_EQC_REPORT_TIMER_S 0
+#define HNS_ROCE_EQC_REPORT_TIMER_M GENMASK(31, 0)
+
+/* WORD4 */
+#define HNS_ROCE_EQC_EQE_BA_L_S 0
+#define HNS_ROCE_EQC_EQE_BA_L_M GENMASK(31, 0)
+
+/* WORD5 */
+#define HNS_ROCE_EQC_EQE_BA_H_S 0
+#define HNS_ROCE_EQC_EQE_BA_H_M GENMASK(28, 0)
+
+/* WORD6 */
+#define HNS_ROCE_EQC_SHIFT_S 0
+#define HNS_ROCE_EQC_SHIFT_M GENMASK(7, 0)
+
+#define HNS_ROCE_EQC_MSI_INDX_S 8
+#define HNS_ROCE_EQC_MSI_INDX_M GENMASK(15, 8)
+
+#define HNS_ROCE_EQC_CUR_EQE_BA_L_S 16
+#define HNS_ROCE_EQC_CUR_EQE_BA_L_M GENMASK(31, 16)
+
+/* WORD7 */
+#define HNS_ROCE_EQC_CUR_EQE_BA_M_S 0
+#define HNS_ROCE_EQC_CUR_EQE_BA_M_M GENMASK(31, 0)
+
+/* WORD8 */
+#define HNS_ROCE_EQC_CUR_EQE_BA_H_S 0
+#define HNS_ROCE_EQC_CUR_EQE_BA_H_M GENMASK(3, 0)
+
+#define HNS_ROCE_EQC_CONS_INDX_S 8
+#define HNS_ROCE_EQC_CONS_INDX_M GENMASK(31, 8)
+
+/* WORD9 */
+#define HNS_ROCE_EQC_NXT_EQE_BA_L_S 0
+#define HNS_ROCE_EQC_NXT_EQE_BA_L_M GENMASK(31, 0)
+
+/* WORD10 */
+#define HNS_ROCE_EQC_NXT_EQE_BA_H_S 0
+#define HNS_ROCE_EQC_NXT_EQE_BA_H_M GENMASK(19, 0)
+
+#define HNS_ROCE_V2_CEQE_COMP_CQN_S 0
+#define HNS_ROCE_V2_CEQE_COMP_CQN_M GENMASK(23, 0)
+
+#define HNS_ROCE_V2_AEQE_EVENT_TYPE_S 0
+#define HNS_ROCE_V2_AEQE_EVENT_TYPE_M GENMASK(7, 0)
+
+#define HNS_ROCE_V2_AEQE_SUB_TYPE_S 8
+#define HNS_ROCE_V2_AEQE_SUB_TYPE_M GENMASK(15, 8)
+
+#define HNS_ROCE_V2_EQ_DB_CMD_S 16
+#define HNS_ROCE_V2_EQ_DB_CMD_M GENMASK(17, 16)
+
+#define HNS_ROCE_V2_EQ_DB_TAG_S 0
+#define HNS_ROCE_V2_EQ_DB_TAG_M GENMASK(7, 0)
+
+#define HNS_ROCE_V2_EQ_DB_PARA_S 0
+#define HNS_ROCE_V2_EQ_DB_PARA_M GENMASK(23, 0)
+
+#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S 0
+#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M GENMASK(23, 0)
+
#endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index cf02ac2d3596..aa0c242ddc50 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -748,12 +748,10 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
goto error_failed_cmd_init;
}
- if (hr_dev->cmd_mod) {
- ret = hns_roce_init_eq_table(hr_dev);
- if (ret) {
- dev_err(dev, "eq init failed!\n");
- goto error_failed_eq_table;
- }
+ ret = hr_dev->hw->init_eq(hr_dev);
+ if (ret) {
+ dev_err(dev, "eq init failed!\n");
+ goto error_failed_eq_table;
}
if (hr_dev->cmd_mod) {
@@ -805,8 +803,7 @@ error_failed_init_hem:
hns_roce_cmd_use_polling(hr_dev);
error_failed_use_event:
- if (hr_dev->cmd_mod)
- hns_roce_cleanup_eq_table(hr_dev);
+ hr_dev->hw->cleanup_eq(hr_dev);
error_failed_eq_table:
hns_roce_cmd_cleanup(hr_dev);
@@ -837,8 +834,7 @@ void hns_roce_exit(struct hns_roce_dev *hr_dev)
if (hr_dev->cmd_mod)
hns_roce_cmd_use_polling(hr_dev);
- if (hr_dev->cmd_mod)
- hns_roce_cleanup_eq_table(hr_dev);
+ hr_dev->hw->cleanup_eq(hr_dev);
hns_roce_cmd_cleanup(hr_dev);
if (hr_dev->hw->cmq_exit)
hr_dev->hw->cmq_exit(hr_dev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 49586ec8126a..4414cea9ef56 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -65,6 +65,7 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
if (atomic_dec_and_test(&qp->refcount))
complete(&qp->free);
}
+EXPORT_SYMBOL_GPL(hns_roce_qp_event);
static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
enum hns_roce_event type)
@@ -454,6 +455,13 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sge.sge_shift = 4;
}
+ /* ud sqwqe's sge use extend sge */
+ if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) {
+ hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
+ hr_qp->sq.max_gs);
+ hr_qp->sge.sge_shift = 4;
+ }
+
/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
hr_qp->sq.offset = 0;
@@ -493,6 +501,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
int ret = 0;
u32 page_shift;
u32 npages;
+ int i;
mutex_init(&hr_qp->mutex);
spin_lock_init(&hr_qp->sq.lock);
@@ -500,6 +509,8 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
hr_qp->state = IB_QPS_RESET;
+ hr_qp->ibqp.qp_type = init_attr->qp_type;
+
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
else
@@ -512,18 +523,48 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_out;
}
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
+ /* allocate recv inline buf */
+ hr_qp->rq_inl_buf.wqe_list = kcalloc(hr_qp->rq.wqe_cnt,
+ sizeof(struct hns_roce_rinl_wqe),
+ GFP_KERNEL);
+ if (!hr_qp->rq_inl_buf.wqe_list) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ hr_qp->rq_inl_buf.wqe_cnt = hr_qp->rq.wqe_cnt;
+
+ /* Firstly, allocate a list of sge space buffer */
+ hr_qp->rq_inl_buf.wqe_list[0].sg_list =
+ kcalloc(hr_qp->rq_inl_buf.wqe_cnt,
+ init_attr->cap.max_recv_sge *
+ sizeof(struct hns_roce_rinl_sge),
+ GFP_KERNEL);
+ if (!hr_qp->rq_inl_buf.wqe_list[0].sg_list) {
+ ret = -ENOMEM;
+ goto err_wqe_list;
+ }
+
+ for (i = 1; i < hr_qp->rq_inl_buf.wqe_cnt; i++)
+ /* Secondly, reallocate the buffer */
+ hr_qp->rq_inl_buf.wqe_list[i].sg_list =
+ &hr_qp->rq_inl_buf.wqe_list[0].sg_list[i *
+ init_attr->cap.max_recv_sge];
+ }
+
if (ib_pd->uobject) {
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
dev_err(dev, "ib_copy_from_udata error for create qp\n");
ret = -EFAULT;
- goto err_out;
+ goto err_rq_sge_list;
}
ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp,
&ucmd);
if (ret) {
dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n");
- goto err_out;
+ goto err_rq_sge_list;
}
hr_qp->umem = ib_umem_get(ib_pd->uobject->context,
@@ -532,7 +573,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
if (IS_ERR(hr_qp->umem)) {
dev_err(dev, "ib_umem_get error for create qp\n");
ret = PTR_ERR(hr_qp->umem);
- goto err_out;
+ goto err_rq_sge_list;
}
hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
@@ -566,13 +607,13 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
dev_err(dev, "init_attr->create_flags error!\n");
ret = -EINVAL;
- goto err_out;
+ goto err_rq_sge_list;
}
if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
dev_err(dev, "init_attr->create_flags error!\n");
ret = -EINVAL;
- goto err_out;
+ goto err_rq_sge_list;
}
/* Set SQ size */
@@ -580,7 +621,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
hr_qp);
if (ret) {
dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
- goto err_out;
+ goto err_rq_sge_list;
}
/* QP doorbell register address */
@@ -596,7 +637,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
&hr_qp->hr_buf, page_shift)) {
dev_err(dev, "hns_roce_buf_alloc error!\n");
ret = -ENOMEM;
- goto err_out;
+ goto err_rq_sge_list;
}
hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
@@ -678,6 +719,14 @@ err_buf:
else
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
+err_rq_sge_list:
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
+ kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
+
+err_wqe_list:
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
+ kfree(hr_qp->rq_inl_buf.wqe_list);
+
err_out:
return ret;
}
@@ -724,8 +773,13 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
hr_qp = &hr_sqp->hr_qp;
hr_qp->port = init_attr->port_num - 1;
hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
- hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
- hr_dev->iboe.phy_port[hr_qp->port];
+
+ /* when hw version is v1, the sqpn is allocated */
+ if (hr_dev->caps.max_sq_sg <= 2)
+ hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
+ hr_dev->iboe.phy_port[hr_qp->port];
+ else
+ hr_qp->ibqp.qp_num = 1;
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
hr_qp->ibqp.qp_num, hr_qp);
diff --git a/drivers/infiniband/hw/i40iw/Kconfig b/drivers/infiniband/hw/i40iw/Kconfig
index f6d20ba88c03..2962979c06e9 100644
--- a/drivers/infiniband/hw/i40iw/Kconfig
+++ b/drivers/infiniband/hw/i40iw/Kconfig
@@ -5,4 +5,3 @@ config INFINIBAND_I40IW
select GENERIC_ALLOCATOR
---help---
Intel(R) Ethernet X722 iWARP Driver
- INET && I40IW && INFINIBAND && I40E
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index 4ae9131b6350..bcddd7061fc0 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -587,5 +587,8 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
int i40iw_net_event(struct notifier_block *notifier,
unsigned long event,
void *ptr);
+int i40iw_netdevice_event(struct notifier_block *notifier,
+ unsigned long event,
+ void *ptr);
#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 77870f9e1736..abf4cd897849 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -92,14 +92,9 @@ void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp)
static u8 i40iw_derive_hw_ird_setting(u16 cm_ird)
{
u8 encoded_ird_size;
- u8 pof2_cm_ird = 1;
-
- /* round-off to next powerof2 */
- while (pof2_cm_ird < cm_ird)
- pof2_cm_ird *= 2;
/* ird_size field is encoded in qp_ctx */
- switch (pof2_cm_ird) {
+ switch (cm_ird ? roundup_pow_of_two(cm_ird) : 0) {
case I40IW_HW_IRD_SETTING_64:
encoded_ird_size = 3;
break;
@@ -125,13 +120,16 @@ static u8 i40iw_derive_hw_ird_setting(u16 cm_ird)
* @conn_ird: connection IRD
* @conn_ord: connection ORD
*/
-static void i40iw_record_ird_ord(struct i40iw_cm_node *cm_node, u16 conn_ird, u16 conn_ord)
+static void i40iw_record_ird_ord(struct i40iw_cm_node *cm_node, u32 conn_ird,
+ u32 conn_ord)
{
if (conn_ird > I40IW_MAX_IRD_SIZE)
conn_ird = I40IW_MAX_IRD_SIZE;
if (conn_ord > I40IW_MAX_ORD_SIZE)
conn_ord = I40IW_MAX_ORD_SIZE;
+ else if (!conn_ord && cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO)
+ conn_ord = 1;
cm_node->ird_size = conn_ird;
cm_node->ord_size = conn_ord;
@@ -2878,15 +2876,13 @@ static struct i40iw_cm_listener *i40iw_make_listen_node(
* i40iw_create_cm_node - make a connection node with params
* @cm_core: cm's core
* @iwdev: iwarp device structure
- * @private_data_len: len to provate data for mpa request
- * @private_data: pointer to private data for connection
+ * @conn_param: upper layer connection parameters
* @cm_info: quad info for connection
*/
static struct i40iw_cm_node *i40iw_create_cm_node(
struct i40iw_cm_core *cm_core,
struct i40iw_device *iwdev,
- u16 private_data_len,
- void *private_data,
+ struct iw_cm_conn_param *conn_param,
struct i40iw_cm_info *cm_info)
{
struct i40iw_cm_node *cm_node;
@@ -2894,6 +2890,9 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
struct i40iw_cm_node *loopback_remotenode;
struct i40iw_cm_info loopback_cm_info;
+ u16 private_data_len = conn_param->private_data_len;
+ const void *private_data = conn_param->private_data;
+
/* create a CM connection node */
cm_node = i40iw_make_cm_node(cm_core, iwdev, cm_info, NULL);
if (!cm_node)
@@ -2902,6 +2901,8 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
cm_node->tcp_cntxt.client = 1;
cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
+ i40iw_record_ird_ord(cm_node, conn_param->ird, conn_param->ord);
+
if (!memcmp(cm_info->loc_addr, cm_info->rem_addr, sizeof(cm_info->loc_addr))) {
loopback_remotelistener = i40iw_find_listener(
cm_core,
@@ -2935,6 +2936,10 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
private_data_len);
loopback_remotenode->pdata.size = private_data_len;
+ if (loopback_remotenode->ord_size > cm_node->ird_size)
+ loopback_remotenode->ord_size =
+ cm_node->ird_size;
+
cm_node->state = I40IW_CM_STATE_OFFLOADED;
cm_node->tcp_cntxt.rcv_nxt =
loopback_remotenode->tcp_cntxt.loc_seq_num;
@@ -3691,7 +3696,7 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_node->qhash_set = false;
i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
- cm_node->accelerated = 1;
+ cm_node->accelerated = true;
status =
i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
if (status)
@@ -3815,9 +3820,7 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
__func__, cm_id->tos, cm_info.user_pri);
cm_id->add_ref(cm_id);
cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev,
- conn_param->private_data_len,
- (void *)conn_param->private_data,
- &cm_info);
+ conn_param, &cm_info);
if (IS_ERR(cm_node)) {
ret = PTR_ERR(cm_node);
@@ -3849,11 +3852,6 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
cm_node->apbvt_set = true;
- i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord);
- if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO &&
- !cm_node->ord_size)
- cm_node->ord_size = 1;
-
iwqp->cm_node = cm_node;
cm_node->iwqp = iwqp;
iwqp->cm_id = cm_id;
@@ -4058,7 +4056,7 @@ static void i40iw_cm_event_connected(struct i40iw_cm_event *event)
cm_node->qhash_set = false;
i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
- cm_node->accelerated = 1;
+ cm_node->accelerated = true;
status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY,
0);
if (status)
@@ -4242,10 +4240,16 @@ set_qhash:
}
/**
- * i40iw_cm_disconnect_all - disconnect all connected qp's
+ * i40iw_cm_teardown_connections - teardown QPs
* @iwdev: device pointer
+ * @ipaddr: Pointer to IPv4 or IPv6 address
+ * @ipv4: flag indicating IPv4 when true
+ * @disconnect_all: flag indicating disconnect all QPs
+ * teardown QPs where source or destination addr matches ip addr
*/
-void i40iw_cm_disconnect_all(struct i40iw_device *iwdev)
+void i40iw_cm_teardown_connections(struct i40iw_device *iwdev, u32 *ipaddr,
+ struct i40iw_cm_info *nfo,
+ bool disconnect_all)
{
struct i40iw_cm_core *cm_core = &iwdev->cm_core;
struct list_head *list_core_temp;
@@ -4259,8 +4263,13 @@ void i40iw_cm_disconnect_all(struct i40iw_device *iwdev)
spin_lock_irqsave(&cm_core->ht_lock, flags);
list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) {
cm_node = container_of(list_node, struct i40iw_cm_node, list);
- atomic_inc(&cm_node->ref_count);
- list_add(&cm_node->connected_entry, &connected_list);
+ if (disconnect_all ||
+ (nfo->vlan_id == cm_node->vlan_id &&
+ (!memcmp(cm_node->loc_addr, ipaddr, nfo->ipv4 ? 4 : 16) ||
+ !memcmp(cm_node->rem_addr, ipaddr, nfo->ipv4 ? 4 : 16)))) {
+ atomic_inc(&cm_node->ref_count);
+ list_add(&cm_node->connected_entry, &connected_list);
+ }
}
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
@@ -4294,6 +4303,9 @@ void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
enum i40iw_quad_hash_manage_type op =
ifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE;
+ nfo.vlan_id = vlan_id;
+ nfo.ipv4 = ipv4;
+
/* Disable or enable qhash for listeners */
spin_lock_irqsave(&cm_core->listen_list_lock, flags);
list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
@@ -4303,8 +4315,6 @@ void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
memcpy(nfo.loc_addr, listen_node->loc_addr,
sizeof(nfo.loc_addr));
nfo.loc_port = listen_node->loc_port;
- nfo.ipv4 = listen_node->ipv4;
- nfo.vlan_id = listen_node->vlan_id;
nfo.user_pri = listen_node->user_pri;
if (!list_empty(&listen_node->child_listen_list)) {
i40iw_qhash_ctrl(iwdev,
@@ -4326,7 +4336,7 @@ void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
}
spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
- /* disconnect any connected qp's on ifdown */
+ /* teardown connected qp's on ifdown */
if (!ifup)
- i40iw_cm_disconnect_all(iwdev);
+ i40iw_cm_teardown_connections(iwdev, ipaddr, &nfo, false);
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h
index 0d5840d2c4fc..cf60c451e071 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h
@@ -276,8 +276,6 @@ struct i40iw_cm_tcp_context {
u32 mss;
u8 snd_wscale;
u8 rcv_wscale;
-
- struct timeval sent_ts;
};
enum i40iw_cm_listener_state {
@@ -337,7 +335,7 @@ struct i40iw_cm_node {
u16 mpav2_ird_ord;
struct iw_cm_id *cm_id;
struct list_head list;
- int accelerated;
+ bool accelerated;
struct i40iw_cm_listener *listener;
int apbvt_set;
int accept_pend;
@@ -455,5 +453,7 @@ int i40iw_arp_table(struct i40iw_device *iwdev,
void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
u32 *ipaddr, bool ipv4, bool ifup);
-void i40iw_cm_disconnect_all(struct i40iw_device *iwdev);
+void i40iw_cm_teardown_connections(struct i40iw_device *iwdev, u32 *ipaddr,
+ struct i40iw_cm_info *nfo,
+ bool disconnect_all);
#endif /* I40IW_CM_H */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index da9821a10e0d..c74fd3309b93 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -1893,8 +1893,6 @@ static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq,
static enum i40iw_status_code i40iw_sc_repost_aeq_entries(struct i40iw_sc_dev *dev,
u32 count)
{
- if (count > I40IW_MAX_AEQ_ALLOCATE_COUNT)
- return I40IW_ERR_INVALID_SIZE;
if (dev->is_pf)
i40iw_wr32(dev->hw, I40E_PFPE_AEQALLOC, count);
@@ -3872,7 +3870,6 @@ enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_
struct i40iw_virt_mem virt_mem;
u32 i, mem_size;
u32 qpwantedoriginal, qpwanted, mrwanted, pblewanted;
- u32 powerof2;
u64 sd_needed;
u32 loop_count = 0;
@@ -3928,8 +3925,10 @@ enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_
hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].cnt = 1;
hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt = mrwanted;
- hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt = I40IW_MAX_WQ_ENTRIES * qpwanted;
- hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt = 4 * I40IW_MAX_IRD_SIZE * qpwanted;
+ hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt =
+ roundup_pow_of_two(I40IW_MAX_WQ_ENTRIES * qpwanted);
+ hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt =
+ roundup_pow_of_two(2 * I40IW_MAX_IRD_SIZE * qpwanted);
hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].cnt =
hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;
hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].cnt =
@@ -3945,16 +3944,10 @@ enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_
if ((loop_count > 1000) ||
((!(loop_count % 10)) &&
(qpwanted > qpwantedoriginal * 2 / 3))) {
- if (qpwanted > FPM_MULTIPLIER) {
- qpwanted -= FPM_MULTIPLIER;
- powerof2 = 1;
- while (powerof2 < qpwanted)
- powerof2 *= 2;
- powerof2 /= 2;
- qpwanted = powerof2;
- } else {
- qpwanted /= 2;
- }
+ if (qpwanted > FPM_MULTIPLIER)
+ qpwanted = roundup_pow_of_two(qpwanted -
+ FPM_MULTIPLIER);
+ qpwanted >>= 1;
}
if (mrwanted > FPM_MULTIPLIER * 10)
mrwanted -= FPM_MULTIPLIER * 10;
@@ -3962,8 +3955,6 @@ enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_
pblewanted -= FPM_MULTIPLIER * 1000;
} while (sd_needed > hmc_fpm_misc->max_sds && loop_count < 2000);
- sd_needed = i40iw_est_sd(dev, hmc_info);
-
i40iw_debug(dev, I40IW_DEBUG_HMC,
"loop_cnt=%d, sd_needed=%lld, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d\n",
loop_count, sd_needed,
diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
index 029083cb81d5..4b65e4140bd7 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_d.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
@@ -97,6 +97,7 @@
#define RDMA_OPCODE_MASK 0x0f
#define RDMA_READ_REQ_OPCODE 1
#define Q2_BAD_FRAME_OFFSET 72
+#define Q2_FPSN_OFFSET 64
#define CQE_MAJOR_DRV 0x8000
#define I40IW_TERM_SENT 0x01
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
index e96bdafbcbb3..61540e14e4b9 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -385,6 +385,8 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
iwcq->ibcq.event_handler(&ibevent, iwcq->ibcq.cq_context);
}
break;
+ case I40IW_AE_LLP_DOUBT_REACHABILITY:
+ break;
case I40IW_AE_PRIV_OPERATION_DENIED:
case I40IW_AE_STAG_ZERO_INVALID:
case I40IW_AE_IB_RREQ_AND_Q1_FULL:
@@ -403,7 +405,6 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
case I40IW_AE_LLP_SYN_RECEIVED:
case I40IW_AE_LLP_TOO_MANY_RETRIES:
- case I40IW_AE_LLP_DOUBT_REACHABILITY:
case I40IW_AE_LCE_QP_CATASTROPHIC:
case I40IW_AE_LCE_FUNCTION_CATASTROPHIC:
case I40IW_AE_LCE_CQ_CATASTROPHIC:
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index e824296713e2..b08862978de8 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -99,6 +99,10 @@ static struct notifier_block i40iw_net_notifier = {
.notifier_call = i40iw_net_event
};
+static struct notifier_block i40iw_netdevice_notifier = {
+ .notifier_call = i40iw_netdevice_event
+};
+
/**
* i40iw_find_i40e_handler - find a handler given a client info
* @ldev: pointer to a client info
@@ -483,6 +487,7 @@ static enum i40iw_status_code i40iw_create_hmc_objs(struct i40iw_device *iwdev,
for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
info.rsrc_type = iw_hmc_obj_types[i];
info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
+ info.add_sd_cnt = 0;
status = i40iw_create_hmc_obj_type(dev, &info);
if (status) {
i40iw_pr_err("create obj type %d status = %d\n",
@@ -607,7 +612,7 @@ static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev)
INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
/* init the waitq of the cqp_requests and add them to the list */
- for (i = 0; i < I40IW_CQP_SW_SQSIZE_2048; i++) {
+ for (i = 0; i < sqsize; i++) {
init_waitqueue_head(&cqp->cqp_requests[i].waitq);
list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
}
@@ -1285,7 +1290,7 @@ static void i40iw_wait_pe_ready(struct i40iw_hw *hw)
__LINE__, statuscpu2);
if ((statuscpu0 == 0x80) && (statuscpu1 == 0x80) && (statuscpu2 == 0x80))
break; /* SUCCESS */
- mdelay(1000);
+ msleep(1000);
retrycount++;
} while (retrycount < 14);
i40iw_wr32(hw, 0xb4040, 0x4C104C5);
@@ -1393,6 +1398,7 @@ static void i40iw_register_notifiers(void)
register_inetaddr_notifier(&i40iw_inetaddr_notifier);
register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
register_netevent_notifier(&i40iw_net_notifier);
+ register_netdevice_notifier(&i40iw_netdevice_notifier);
}
/**
@@ -1404,6 +1410,7 @@ static void i40iw_unregister_notifiers(void)
unregister_netevent_notifier(&i40iw_net_notifier);
unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
+ unregister_netdevice_notifier(&i40iw_netdevice_notifier);
}
/**
@@ -1793,7 +1800,7 @@ static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool
if (reset)
iwdev->reset = true;
- i40iw_cm_disconnect_all(iwdev);
+ i40iw_cm_teardown_connections(iwdev, NULL, NULL, true);
destroy_workqueue(iwdev->virtchnl_wq);
i40iw_deinit_device(iwdev);
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index 796a815b53fd..4c21197830b3 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -48,7 +48,6 @@ static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid);
static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx);
static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc
*rsrc, bool initial);
-static void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp);
/**
* i40iw_puda_get_listbuf - get buffer from puda list
* @list: list to use for buffers (ILQ or IEQ)
@@ -1378,7 +1377,7 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx;
u32 rcv_wnd = hw_host_ctx[23];
/* first partial seq # in q2 */
- u32 fps = qp->q2_buf[16];
+ u32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET);
struct list_head *rxlist = &pfpdu->rxlist;
struct list_head *plist;
@@ -1483,7 +1482,7 @@ static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid)
* @ieq: ieq resource
* @qp: all pending fpdu buffers
*/
-static void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp)
+void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp)
{
struct i40iw_puda_buf *buf;
struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.h b/drivers/infiniband/hw/i40iw/i40iw_puda.h
index 660aa3edae56..53a7d58c84b5 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.h
@@ -184,4 +184,5 @@ enum i40iw_status_code i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev, struct
enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq);
void i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq);
+void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp);
#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
index 3ec5389a81a1..8afa5a67a86b 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
@@ -894,20 +894,6 @@ exit:
}
/**
- * i40iw_qp_roundup - return round up QP WQ depth
- * @wqdepth: WQ depth in quantas to round up
- */
-static int i40iw_qp_round_up(u32 wqdepth)
-{
- int scount = 1;
-
- for (wqdepth--; scount <= 16; scount *= 2)
- wqdepth |= wqdepth >> scount;
-
- return ++wqdepth;
-}
-
-/**
* i40iw_get_wqe_shift - get shift count for maximum wqe size
* @sge: Maximum Scatter Gather Elements wqe
* @inline_data: Maximum inline data size
@@ -934,7 +920,7 @@ void i40iw_get_wqe_shift(u32 sge, u32 inline_data, u8 *shift)
*/
enum i40iw_status_code i40iw_get_sqdepth(u32 sq_size, u8 shift, u32 *sqdepth)
{
- *sqdepth = i40iw_qp_round_up((sq_size << shift) + I40IW_SQ_RSVD);
+ *sqdepth = roundup_pow_of_two((sq_size << shift) + I40IW_SQ_RSVD);
if (*sqdepth < (I40IW_QP_SW_MIN_WQSIZE << shift))
*sqdepth = I40IW_QP_SW_MIN_WQSIZE << shift;
@@ -953,7 +939,7 @@ enum i40iw_status_code i40iw_get_sqdepth(u32 sq_size, u8 shift, u32 *sqdepth)
*/
enum i40iw_status_code i40iw_get_rqdepth(u32 rq_size, u8 shift, u32 *rqdepth)
{
- *rqdepth = i40iw_qp_round_up((rq_size << shift) + I40IW_RQ_RSVD);
+ *rqdepth = roundup_pow_of_two((rq_size << shift) + I40IW_RQ_RSVD);
if (*rqdepth < (I40IW_QP_SW_MIN_WQSIZE << shift))
*rqdepth = I40IW_QP_SW_MIN_WQSIZE << shift;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_user.h b/drivers/infiniband/hw/i40iw/i40iw_user.h
index e73efc59a0ab..b125925641e0 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_user.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_user.h
@@ -59,7 +59,6 @@ enum i40iw_device_capabilities_const {
I40IW_MAX_CEQ_ENTRIES = 131071,
I40IW_MIN_CQ_SIZE = 1,
I40IW_MAX_CQ_SIZE = 1048575,
- I40IW_MAX_AEQ_ALLOCATE_COUNT = 255,
I40IW_DB_ID_ZERO = 0,
I40IW_MAX_WQ_FRAGMENT_COUNT = 3,
I40IW_MAX_SGE_RD = 1,
@@ -72,7 +71,7 @@ enum i40iw_device_capabilities_const {
I40IW_MAX_SQ_PAYLOAD_SIZE = 2145386496,
I40IW_MAX_INLINE_DATA_SIZE = 48,
I40IW_MAX_PUSHMODE_INLINE_DATA_SIZE = 48,
- I40IW_MAX_IRD_SIZE = 63,
+ I40IW_MAX_IRD_SIZE = 64,
I40IW_MAX_ORD_SIZE = 127,
I40IW_MAX_WQ_ENTRIES = 2048,
I40IW_Q2_BUFFER_SIZE = (248 + 100),
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 8845dba7c438..ddc1056b0b4e 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -137,7 +137,7 @@ inline u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg)
}
/**
- * i40iw_inetaddr_event - system notifier for netdev events
+ * i40iw_inetaddr_event - system notifier for ipv4 addr events
* @notfier: not used
* @event: event for notifier
* @ptr: if address
@@ -200,7 +200,7 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
}
/**
- * i40iw_inet6addr_event - system notifier for ipv6 netdev events
+ * i40iw_inet6addr_event - system notifier for ipv6 addr events
* @notfier: not used
* @event: event for notifier
* @ptr: if address
@@ -252,7 +252,7 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
}
/**
- * i40iw_net_event - system notifier for net events
+ * i40iw_net_event - system notifier for netevents
* @notfier: not used
* @event: event for notifier
* @ptr: neighbor
@@ -297,6 +297,50 @@ int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *
}
/**
+ * i40iw_netdevice_event - system notifier for netdev events
+ * @notfier: not used
+ * @event: event for notifier
+ * @ptr: netdev
+ */
+int i40iw_netdevice_event(struct notifier_block *notifier,
+ unsigned long event,
+ void *ptr)
+{
+ struct net_device *event_netdev;
+ struct net_device *netdev;
+ struct i40iw_device *iwdev;
+ struct i40iw_handler *hdl;
+
+ event_netdev = netdev_notifier_info_to_dev(ptr);
+
+ hdl = i40iw_find_netdev(event_netdev);
+ if (!hdl)
+ return NOTIFY_DONE;
+
+ iwdev = &hdl->device;
+ if (iwdev->init_state < RDMA_DEV_REGISTERED || iwdev->closing)
+ return NOTIFY_DONE;
+
+ netdev = iwdev->ldev->netdev;
+ if (netdev != event_netdev)
+ return NOTIFY_DONE;
+
+ iwdev->iw_status = 1;
+
+ switch (event) {
+ case NETDEV_DOWN:
+ iwdev->iw_status = 0;
+ /* Fall through */
+ case NETDEV_UP:
+ i40iw_port_ibevent(iwdev);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+/**
* i40iw_get_cqp_request - get cqp struct
* @cqp: device cqp ptr
* @wait: cqp to be used in wait mode
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 3c6f3ce88f89..70024e8e2692 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -412,6 +412,7 @@ void i40iw_free_qp_resources(struct i40iw_device *iwdev,
{
struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
+ i40iw_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
if (qp_num)
i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num);
@@ -1637,6 +1638,7 @@ static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd,
err_code = -EOVERFLOW;
goto err;
}
+ stag &= ~I40IW_CQPSQ_STAG_KEY_MASK;
iwmr->stag = stag;
iwmr->ibmr.rkey = stag;
iwmr->ibmr.lkey = stag;
@@ -2242,14 +2244,12 @@ static int i40iw_post_send(struct ib_qp *ibqp,
info.op.inline_rdma_write.len = ib_wr->sg_list[0].length;
info.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
info.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
- info.op.inline_rdma_write.rem_addr.len = ib_wr->sg_list->length;
ret = ukqp->ops.iw_inline_rdma_write(ukqp, &info, false);
} else {
info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
- info.op.rdma_write.rem_addr.len = ib_wr->sg_list->length;
ret = ukqp->ops.iw_rdma_write(ukqp, &info, false);
}
@@ -2271,7 +2271,6 @@ static int i40iw_post_send(struct ib_qp *ibqp,
info.op_type = I40IW_OP_TYPE_RDMA_READ;
info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
- info.op.rdma_read.rem_addr.len = ib_wr->sg_list->length;
info.op.rdma_read.lo_addr.tag_off = ib_wr->sg_list->addr;
info.op.rdma_read.lo_addr.stag = ib_wr->sg_list->lkey;
info.op.rdma_read.lo_addr.len = ib_wr->sg_list->length;
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index bf4f14a1b4fc..9a566ee3ceff 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -170,7 +170,7 @@ err_buf:
return err;
}
-#define CQ_CREATE_FLAGS_SUPPORTED IB_CQ_FLAGS_TIMESTAMP_COMPLETION
+#define CQ_CREATE_FLAGS_SUPPORTED IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION
struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
@@ -246,7 +246,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
cq->db.dma, &cq->mcq, vector, 0,
- !!(cq->create_flags & IB_CQ_FLAGS_TIMESTAMP_COMPLETION));
+ !!(cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION));
if (err)
goto err_dbmap;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 8c8a16791a3f..8d2ee9322f2e 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -589,6 +589,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
if (props->rss_caps.supported_qpts) {
resp.rss_caps.rx_hash_function =
MLX4_IB_RX_HASH_FUNC_TOEPLITZ;
+
resp.rss_caps.rx_hash_fields_mask =
MLX4_IB_RX_HASH_SRC_IPV4 |
MLX4_IB_RX_HASH_DST_IPV4 |
@@ -598,6 +599,11 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
MLX4_IB_RX_HASH_DST_PORT_TCP |
MLX4_IB_RX_HASH_SRC_PORT_UDP |
MLX4_IB_RX_HASH_DST_PORT_UDP;
+
+ if (dev->dev->caps.tunnel_offload_mode ==
+ MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+ resp.rss_caps.rx_hash_fields_mask |=
+ MLX4_IB_RX_HASH_INNER;
}
}
@@ -2995,9 +3001,8 @@ err_steer_free_bitmap:
kfree(ibdev->ib_uc_qpns_bitmap);
err_steer_qp_release:
- if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
- mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
- ibdev->steer_qpn_count);
+ mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
+ ibdev->steer_qpn_count);
err_counter:
for (i = 0; i < ibdev->num_ports; ++i)
mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
@@ -3102,11 +3107,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
ibdev->iboe.nb.notifier_call = NULL;
}
- if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
- mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
- ibdev->steer_qpn_count);
- kfree(ibdev->ib_uc_qpns_bitmap);
- }
+ mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
+ ibdev->steer_qpn_count);
+ kfree(ibdev->ib_uc_qpns_bitmap);
iounmap(ibdev->uar_map);
for (p = 0; p < ibdev->num_ports; ++p)
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index caf490ab24c8..f045491f2c14 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -734,10 +734,24 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
return (-EOPNOTSUPP);
}
+ if (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_INNER) {
+ if (dev->dev->caps.tunnel_offload_mode ==
+ MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ /*
+ * Hash according to inner headers if exist, otherwise
+ * according to outer headers.
+ */
+ rss_ctx->flags |= MLX4_RSS_BY_INNER_HEADERS_IPONLY;
+ } else {
+ pr_debug("RSS Hash for inner headers isn't supported\n");
+ return (-EOPNOTSUPP);
+ }
+ }
+
return 0;
}
-static int create_qp_rss(struct mlx4_ib_dev *dev, struct ib_pd *ibpd,
+static int create_qp_rss(struct mlx4_ib_dev *dev,
struct ib_qp_init_attr *init_attr,
struct mlx4_ib_create_qp_rss *ucmd,
struct mlx4_ib_qp *qp)
@@ -860,7 +874,7 @@ static struct ib_qp *_mlx4_ib_create_qp_rss(struct ib_pd *pd,
qp->pri.vid = 0xFFFF;
qp->alt.vid = 0xFFFF;
- err = create_qp_rss(to_mdev(pd->device), pd, init_attr, &ucmd, qp);
+ err = create_qp_rss(to_mdev(pd->device), init_attr, &ucmd, qp);
if (err) {
kfree(qp);
return ERR_PTR(err);
@@ -1836,6 +1850,8 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev,
mlx4_ib_gid_index_to_real_index(dev, port,
grh->sgid_index);
+ if (real_sgid_index < 0)
+ return real_sgid_index;
if (real_sgid_index >= dev->dev->caps.gid_table_len[port]) {
pr_err("sgid_index (%u) too large. max is %d\n",
real_sgid_index, dev->dev->caps.gid_table_len[port] - 1);
diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c
index 2d32b519bb61..985fa2637390 100644
--- a/drivers/infiniband/hw/mlx5/cong.c
+++ b/drivers/infiniband/hw/mlx5/cong.c
@@ -247,21 +247,30 @@ static void mlx5_ib_set_cc_param_mask_val(void *field, int offset,
}
}
-static int mlx5_ib_get_cc_params(struct mlx5_ib_dev *dev, int offset, u32 *var)
+static int mlx5_ib_get_cc_params(struct mlx5_ib_dev *dev, u8 port_num,
+ int offset, u32 *var)
{
int outlen = MLX5_ST_SZ_BYTES(query_cong_params_out);
void *out;
void *field;
int err;
enum mlx5_ib_cong_node_type node;
+ struct mlx5_core_dev *mdev;
+
+ /* Takes a 1-based port number */
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num + 1, NULL);
+ if (!mdev)
+ return -ENODEV;
out = kvzalloc(outlen, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
+ if (!out) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
node = mlx5_ib_param_to_node(offset);
- err = mlx5_cmd_query_cong_params(dev->mdev, node, out, outlen);
+ err = mlx5_cmd_query_cong_params(mdev, node, out, outlen);
if (err)
goto free;
@@ -270,21 +279,32 @@ static int mlx5_ib_get_cc_params(struct mlx5_ib_dev *dev, int offset, u32 *var)
free:
kvfree(out);
+alloc_err:
+ mlx5_ib_put_native_port_mdev(dev, port_num + 1);
return err;
}
-static int mlx5_ib_set_cc_params(struct mlx5_ib_dev *dev, int offset, u32 var)
+static int mlx5_ib_set_cc_params(struct mlx5_ib_dev *dev, u8 port_num,
+ int offset, u32 var)
{
int inlen = MLX5_ST_SZ_BYTES(modify_cong_params_in);
void *in;
void *field;
enum mlx5_ib_cong_node_type node;
+ struct mlx5_core_dev *mdev;
u32 attr_mask = 0;
int err;
+ /* Takes a 1-based port number */
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num + 1, NULL);
+ if (!mdev)
+ return -ENODEV;
+
in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
+ if (!in) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
MLX5_SET(modify_cong_params_in, in, opcode,
MLX5_CMD_OP_MODIFY_CONG_PARAMS);
@@ -299,8 +319,10 @@ static int mlx5_ib_set_cc_params(struct mlx5_ib_dev *dev, int offset, u32 var)
MLX5_SET(field_select_r_roce_rp, field, field_select_r_roce_rp,
attr_mask);
- err = mlx5_cmd_modify_cong_params(dev->mdev, in, inlen);
+ err = mlx5_cmd_modify_cong_params(mdev, in, inlen);
kvfree(in);
+alloc_err:
+ mlx5_ib_put_native_port_mdev(dev, port_num + 1);
return err;
}
@@ -324,7 +346,7 @@ static ssize_t set_param(struct file *filp, const char __user *buf,
if (kstrtou32(lbuf, 0, &var))
return -EINVAL;
- ret = mlx5_ib_set_cc_params(param->dev, offset, var);
+ ret = mlx5_ib_set_cc_params(param->dev, param->port_num, offset, var);
return ret ? ret : count;
}
@@ -340,7 +362,7 @@ static ssize_t get_param(struct file *filp, char __user *buf, size_t count,
if (*pos)
return 0;
- ret = mlx5_ib_get_cc_params(param->dev, offset, &var);
+ ret = mlx5_ib_get_cc_params(param->dev, param->port_num, offset, &var);
if (ret)
return ret;
@@ -362,44 +384,51 @@ static const struct file_operations dbg_cc_fops = {
.read = get_param,
};
-void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev)
+void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num)
{
if (!mlx5_debugfs_root ||
- !dev->dbg_cc_params ||
- !dev->dbg_cc_params->root)
+ !dev->port[port_num].dbg_cc_params ||
+ !dev->port[port_num].dbg_cc_params->root)
return;
- debugfs_remove_recursive(dev->dbg_cc_params->root);
- kfree(dev->dbg_cc_params);
- dev->dbg_cc_params = NULL;
+ debugfs_remove_recursive(dev->port[port_num].dbg_cc_params->root);
+ kfree(dev->port[port_num].dbg_cc_params);
+ dev->port[port_num].dbg_cc_params = NULL;
}
-int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev)
+int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num)
{
struct mlx5_ib_dbg_cc_params *dbg_cc_params;
+ struct mlx5_core_dev *mdev;
int i;
if (!mlx5_debugfs_root)
goto out;
- if (!MLX5_CAP_GEN(dev->mdev, cc_query_allowed) ||
- !MLX5_CAP_GEN(dev->mdev, cc_modify_allowed))
+ /* Takes a 1-based port number */
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num + 1, NULL);
+ if (!mdev)
goto out;
+ if (!MLX5_CAP_GEN(mdev, cc_query_allowed) ||
+ !MLX5_CAP_GEN(mdev, cc_modify_allowed))
+ goto put_mdev;
+
dbg_cc_params = kzalloc(sizeof(*dbg_cc_params), GFP_KERNEL);
if (!dbg_cc_params)
- goto out;
+ goto err;
- dev->dbg_cc_params = dbg_cc_params;
+ dev->port[port_num].dbg_cc_params = dbg_cc_params;
dbg_cc_params->root = debugfs_create_dir("cc_params",
- dev->mdev->priv.dbg_root);
+ mdev->priv.dbg_root);
if (!dbg_cc_params->root)
goto err;
for (i = 0; i < MLX5_IB_DBG_CC_MAX; i++) {
dbg_cc_params->params[i].offset = i;
dbg_cc_params->params[i].dev = dev;
+ dbg_cc_params->params[i].port_num = port_num;
dbg_cc_params->params[i].dentry =
debugfs_create_file(mlx5_ib_dbg_cc_name[i],
0600, dbg_cc_params->root,
@@ -408,11 +437,17 @@ int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev)
if (!dbg_cc_params->params[i].dentry)
goto err;
}
-out: return 0;
+
+put_mdev:
+ mlx5_ib_put_native_port_mdev(dev, port_num + 1);
+out:
+ return 0;
err:
mlx5_ib_warn(dev, "cong debugfs failure\n");
- mlx5_ib_cleanup_cong_debugfs(dev);
+ mlx5_ib_cleanup_cong_debugfs(dev, port_num);
+ mlx5_ib_put_native_port_mdev(dev, port_num + 1);
+
/*
* We don't want to fail driver if debugfs failed to initialize,
* so we are not forwarding error to the user.
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 18705cbcdc8c..5b974fb97611 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -1010,7 +1010,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
MLX5_SET(cqc, cqc, uar_page, index);
MLX5_SET(cqc, cqc, c_eqn, eqn);
MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
- if (cq->create_flags & IB_CQ_FLAGS_IGNORE_OVERRUN)
+ if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN)
MLX5_SET(cqc, cqc, oi, 1);
err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 1003b0133a49..32a9e9228b13 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -197,10 +197,9 @@ static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt,
vl_15_dropped);
}
-static int process_pma_cmd(struct ib_device *ibdev, u8 port_num,
+static int process_pma_cmd(struct mlx5_core_dev *mdev, u8 port_num,
const struct ib_mad *in_mad, struct ib_mad *out_mad)
{
- struct mlx5_ib_dev *dev = to_mdev(ibdev);
int err;
void *out_cnt;
@@ -222,7 +221,7 @@ static int process_pma_cmd(struct ib_device *ibdev, u8 port_num,
if (!out_cnt)
return IB_MAD_RESULT_FAILURE;
- err = mlx5_core_query_vport_counter(dev->mdev, 0, 0,
+ err = mlx5_core_query_vport_counter(mdev, 0, 0,
port_num, out_cnt, sz);
if (!err)
pma_cnt_ext_assign(pma_cnt_ext, out_cnt);
@@ -235,7 +234,7 @@ static int process_pma_cmd(struct ib_device *ibdev, u8 port_num,
if (!out_cnt)
return IB_MAD_RESULT_FAILURE;
- err = mlx5_core_query_ib_ppcnt(dev->mdev, port_num,
+ err = mlx5_core_query_ib_ppcnt(mdev, port_num,
out_cnt, sz);
if (!err)
pma_cnt_assign(pma_cnt, out_cnt);
@@ -255,9 +254,11 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
u16 *out_mad_pkey_index)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- struct mlx5_core_dev *mdev = dev->mdev;
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
+ struct mlx5_core_dev *mdev;
+ u8 mdev_port_num;
+ int ret;
if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
*out_mad_size != sizeof(*out_mad)))
@@ -265,14 +266,20 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
memset(out_mad->data, 0, sizeof(out_mad->data));
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
+ if (!mdev)
+ return IB_MAD_RESULT_FAILURE;
+
if (MLX5_CAP_GEN(mdev, vport_counters) &&
in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) {
- return process_pma_cmd(ibdev, port_num, in_mad, out_mad);
+ ret = process_pma_cmd(mdev, mdev_port_num, in_mad, out_mad);
} else {
- return process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
+ ret = process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
in_mad, out_mad);
}
+ mlx5_ib_put_native_port_mdev(dev, port_num);
+ return ret;
}
int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
@@ -519,7 +526,7 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
int ext_active_speed;
int err = -ENOMEM;
- if (port < 1 || port > MLX5_CAP_GEN(mdev, num_ports)) {
+ if (port < 1 || port > dev->num_ports) {
mlx5_ib_warn(dev, "invalid port number %d\n", port);
return -EINVAL;
}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 262c1aa2e028..4236c8086820 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -50,16 +50,14 @@
#include <rdma/ib_cache.h>
#include <linux/mlx5/port.h>
#include <linux/mlx5/vport.h>
+#include <linux/mlx5/fs.h>
#include <linux/list.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_umem.h>
#include <linux/in.h>
#include <linux/etherdevice.h>
-#include <linux/mlx5/fs.h>
-#include <linux/mlx5/vport.h>
#include "mlx5_ib.h"
#include "cmd.h"
-#include <linux/mlx5/vport.h>
#define DRIVER_NAME "mlx5_ib"
#define DRIVER_VERSION "5.0-0"
@@ -72,10 +70,36 @@ static char mlx5_version[] =
DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
DRIVER_VERSION "\n";
+struct mlx5_ib_event_work {
+ struct work_struct work;
+ struct mlx5_core_dev *dev;
+ void *context;
+ enum mlx5_dev_event event;
+ unsigned long param;
+};
+
enum {
MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
};
+static struct workqueue_struct *mlx5_ib_event_wq;
+static LIST_HEAD(mlx5_ib_unaffiliated_port_list);
+static LIST_HEAD(mlx5_ib_dev_list);
+/*
+ * This mutex should be held when accessing either of the above lists
+ */
+static DEFINE_MUTEX(mlx5_ib_multiport_mutex);
+
+struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi)
+{
+ struct mlx5_ib_dev *dev;
+
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ dev = mpi->ibdev;
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+ return dev;
+}
+
static enum rdma_link_layer
mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
{
@@ -115,24 +139,32 @@ static int get_port_state(struct ib_device *ibdev,
static int mlx5_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
+ struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb);
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
- struct mlx5_ib_dev *ibdev = container_of(this, struct mlx5_ib_dev,
- roce.nb);
+ u8 port_num = roce->native_port_num;
+ struct mlx5_core_dev *mdev;
+ struct mlx5_ib_dev *ibdev;
+
+ ibdev = roce->dev;
+ mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
+ if (!mdev)
+ return NOTIFY_DONE;
switch (event) {
case NETDEV_REGISTER:
case NETDEV_UNREGISTER:
- write_lock(&ibdev->roce.netdev_lock);
- if (ndev->dev.parent == &ibdev->mdev->pdev->dev)
- ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ?
- NULL : ndev;
- write_unlock(&ibdev->roce.netdev_lock);
+ write_lock(&roce->netdev_lock);
+
+ if (ndev->dev.parent == &mdev->pdev->dev)
+ roce->netdev = (event == NETDEV_UNREGISTER) ?
+ NULL : ndev;
+ write_unlock(&roce->netdev_lock);
break;
case NETDEV_CHANGE:
case NETDEV_UP:
case NETDEV_DOWN: {
- struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(ibdev->mdev);
+ struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev);
struct net_device *upper = NULL;
if (lag_ndev) {
@@ -140,27 +172,28 @@ static int mlx5_netdev_event(struct notifier_block *this,
dev_put(lag_ndev);
}
- if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev))
+ if ((upper == ndev || (!upper && ndev == roce->netdev))
&& ibdev->ib_active) {
struct ib_event ibev = { };
enum ib_port_state port_state;
- if (get_port_state(&ibdev->ib_dev, 1, &port_state))
- return NOTIFY_DONE;
+ if (get_port_state(&ibdev->ib_dev, port_num,
+ &port_state))
+ goto done;
- if (ibdev->roce.last_port_state == port_state)
- return NOTIFY_DONE;
+ if (roce->last_port_state == port_state)
+ goto done;
- ibdev->roce.last_port_state = port_state;
+ roce->last_port_state = port_state;
ibev.device = &ibdev->ib_dev;
if (port_state == IB_PORT_DOWN)
ibev.event = IB_EVENT_PORT_ERR;
else if (port_state == IB_PORT_ACTIVE)
ibev.event = IB_EVENT_PORT_ACTIVE;
else
- return NOTIFY_DONE;
+ goto done;
- ibev.element.port_num = 1;
+ ibev.element.port_num = port_num;
ib_dispatch_event(&ibev);
}
break;
@@ -169,7 +202,8 @@ static int mlx5_netdev_event(struct notifier_block *this,
default:
break;
}
-
+done:
+ mlx5_ib_put_native_port_mdev(ibdev, port_num);
return NOTIFY_DONE;
}
@@ -178,22 +212,88 @@ static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
{
struct mlx5_ib_dev *ibdev = to_mdev(device);
struct net_device *ndev;
+ struct mlx5_core_dev *mdev;
- ndev = mlx5_lag_get_roce_netdev(ibdev->mdev);
+ mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
+ if (!mdev)
+ return NULL;
+
+ ndev = mlx5_lag_get_roce_netdev(mdev);
if (ndev)
- return ndev;
+ goto out;
/* Ensure ndev does not disappear before we invoke dev_hold()
*/
- read_lock(&ibdev->roce.netdev_lock);
- ndev = ibdev->roce.netdev;
+ read_lock(&ibdev->roce[port_num - 1].netdev_lock);
+ ndev = ibdev->roce[port_num - 1].netdev;
if (ndev)
dev_hold(ndev);
- read_unlock(&ibdev->roce.netdev_lock);
+ read_unlock(&ibdev->roce[port_num - 1].netdev_lock);
+out:
+ mlx5_ib_put_native_port_mdev(ibdev, port_num);
return ndev;
}
+struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
+ u8 ib_port_num,
+ u8 *native_port_num)
+{
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
+ ib_port_num);
+ struct mlx5_core_dev *mdev = NULL;
+ struct mlx5_ib_multiport_info *mpi;
+ struct mlx5_ib_port *port;
+
+ if (native_port_num)
+ *native_port_num = 1;
+
+ if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
+ return ibdev->mdev;
+
+ port = &ibdev->port[ib_port_num - 1];
+ if (!port)
+ return NULL;
+
+ spin_lock(&port->mp.mpi_lock);
+ mpi = ibdev->port[ib_port_num - 1].mp.mpi;
+ if (mpi && !mpi->unaffiliate) {
+ mdev = mpi->mdev;
+ /* If it's the master no need to refcount, it'll exist
+ * as long as the ib_dev exists.
+ */
+ if (!mpi->is_master)
+ mpi->mdev_refcnt++;
+ }
+ spin_unlock(&port->mp.mpi_lock);
+
+ return mdev;
+}
+
+void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u8 port_num)
+{
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
+ port_num);
+ struct mlx5_ib_multiport_info *mpi;
+ struct mlx5_ib_port *port;
+
+ if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
+ return;
+
+ port = &ibdev->port[port_num - 1];
+
+ spin_lock(&port->mp.mpi_lock);
+ mpi = ibdev->port[port_num - 1].mp.mpi;
+ if (mpi->is_master)
+ goto out;
+
+ mpi->mdev_refcnt--;
+ if (mpi->unaffiliate)
+ complete(&mpi->unref_comp);
+out:
+ spin_unlock(&port->mp.mpi_lock);
+}
+
static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
u8 *active_width)
{
@@ -256,19 +356,33 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
struct ib_port_attr *props)
{
struct mlx5_ib_dev *dev = to_mdev(device);
- struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_core_dev *mdev;
struct net_device *ndev, *upper;
enum ib_mtu ndev_ib_mtu;
+ bool put_mdev = true;
u16 qkey_viol_cntr;
u32 eth_prot_oper;
+ u8 mdev_port_num;
int err;
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
+ if (!mdev) {
+ /* This means the port isn't affiliated yet. Get the
+ * info for the master port instead.
+ */
+ put_mdev = false;
+ mdev = dev->mdev;
+ mdev_port_num = 1;
+ port_num = 1;
+ }
+
/* Possible bad flows are checked before filling out props so in case
* of an error it will still be zeroed out.
*/
- err = mlx5_query_port_eth_proto_oper(mdev, &eth_prot_oper, port_num);
+ err = mlx5_query_port_eth_proto_oper(mdev, &eth_prot_oper,
+ mdev_port_num);
if (err)
- return err;
+ goto out;
translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
&props->active_width);
@@ -284,12 +398,16 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
props->state = IB_PORT_DOWN;
props->phys_state = 3;
- mlx5_query_nic_vport_qkey_viol_cntr(dev->mdev, &qkey_viol_cntr);
+ mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
props->qkey_viol_cntr = qkey_viol_cntr;
+ /* If this is a stub query for an unaffiliated port stop here */
+ if (!put_mdev)
+ goto out;
+
ndev = mlx5_ib_get_netdev(device, port_num);
if (!ndev)
- return 0;
+ goto out;
if (mlx5_lag_is_active(dev->mdev)) {
rcu_read_lock();
@@ -312,7 +430,10 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
dev_put(ndev);
props->active_mtu = min(props->max_mtu, ndev_ib_mtu);
- return 0;
+out:
+ if (put_mdev)
+ mlx5_ib_put_native_port_mdev(dev, port_num);
+ return err;
}
static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
@@ -354,7 +475,7 @@ static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
roce_l3_type, gid->raw, mac, vlan,
- vlan_id);
+ vlan_id, port_num);
}
static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num,
@@ -438,11 +559,11 @@ static int mlx5_get_vport_access_method(struct ib_device *ibdev)
}
static void get_atomic_caps(struct mlx5_ib_dev *dev,
+ u8 atomic_size_qp,
struct ib_device_attr *props)
{
u8 tmp;
u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
- u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
u8 atomic_req_8B_endianness_mode =
MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode);
@@ -459,6 +580,29 @@ static void get_atomic_caps(struct mlx5_ib_dev *dev,
}
}
+static void get_atomic_caps_qp(struct mlx5_ib_dev *dev,
+ struct ib_device_attr *props)
+{
+ u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
+
+ get_atomic_caps(dev, atomic_size_qp, props);
+}
+
+static void get_atomic_caps_dc(struct mlx5_ib_dev *dev,
+ struct ib_device_attr *props)
+{
+ u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
+
+ get_atomic_caps(dev, atomic_size_qp, props);
+}
+
+bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev)
+{
+ struct ib_device_attr props = {};
+
+ get_atomic_caps_dc(dev, &props);
+ return (props.atomic_cap == IB_ATOMIC_HCA) ? true : false;
+}
static int mlx5_query_system_image_guid(struct ib_device *ibdev,
__be64 *sys_image_guid)
{
@@ -587,6 +731,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
int max_rq_sg;
int max_sq_sg;
u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
+ bool raw_support = !mlx5_core_mp_enabled(mdev);
struct mlx5_ib_query_device_resp resp = {};
size_t resp_len;
u64 max_tso;
@@ -650,7 +795,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (MLX5_CAP_GEN(mdev, block_lb_mc))
props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
- if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads)) {
+ if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) {
if (MLX5_CAP_ETH(mdev, csum_cap)) {
/* Legacy bit to support old userspace libraries */
props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
@@ -682,7 +827,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
MLX5_RX_HASH_SRC_PORT_TCP |
MLX5_RX_HASH_DST_PORT_TCP |
MLX5_RX_HASH_SRC_PORT_UDP |
- MLX5_RX_HASH_DST_PORT_UDP;
+ MLX5_RX_HASH_DST_PORT_UDP |
+ MLX5_RX_HASH_INNER;
resp.response_length += sizeof(resp.rss_caps);
}
} else {
@@ -698,7 +844,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
- MLX5_CAP_GEN(dev->mdev, general_notification_event))
+ MLX5_CAP_GEN(dev->mdev, general_notification_event) &&
+ raw_support)
props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP;
if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
@@ -706,7 +853,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
- MLX5_CAP_ETH(dev->mdev, scatter_fcs)) {
+ MLX5_CAP_ETH(dev->mdev, scatter_fcs) &&
+ raw_support) {
/* Legacy bit to support old userspace libraries */
props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
@@ -746,7 +894,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->max_srq_sge = max_rq_sg - 1;
props->max_fast_reg_page_list_len =
1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
- get_atomic_caps(dev, props);
+ get_atomic_caps_qp(dev, props);
props->masked_atomic_cap = IB_ATOMIC_NONE;
props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
@@ -770,7 +918,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
if (mlx5_ib_port_link_layer(ibdev, 1) ==
- IB_LINK_LAYER_ETHERNET) {
+ IB_LINK_LAYER_ETHERNET && raw_support) {
props->rss_caps.max_rwq_indirection_tables =
1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
props->rss_caps.max_rwq_indirection_table_size =
@@ -807,7 +955,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
resp.response_length += sizeof(resp.cqe_comp_caps);
}
- if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen)) {
+ if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen) &&
+ raw_support) {
if (MLX5_CAP_QOS(mdev, packet_pacing) &&
MLX5_CAP_GEN(mdev, qos)) {
resp.packet_pacing_caps.qp_rate_limit_max =
@@ -866,7 +1015,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
}
}
- if (field_avail(typeof(resp), striding_rq_caps, uhw->outlen)) {
+ if (field_avail(typeof(resp), striding_rq_caps, uhw->outlen) &&
+ raw_support) {
resp.response_length += sizeof(resp.striding_rq_caps);
if (MLX5_CAP_GEN(mdev, striding_rq)) {
resp.striding_rq_caps.min_single_stride_log_num_of_bytes =
@@ -1097,7 +1247,22 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
}
if (!ret && props) {
- count = mlx5_core_reserved_gids_count(to_mdev(ibdev)->mdev);
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev;
+ bool put_mdev = true;
+
+ mdev = mlx5_ib_get_native_port_mdev(dev, port, NULL);
+ if (!mdev) {
+ /* If the port isn't affiliated yet query the master.
+ * The master and slave will have the same values.
+ */
+ mdev = dev->mdev;
+ port = 1;
+ put_mdev = false;
+ }
+ count = mlx5_core_reserved_gids_count(mdev);
+ if (put_mdev)
+ mlx5_ib_put_native_port_mdev(dev, port);
props->gid_tbl_len -= count;
}
return ret;
@@ -1122,20 +1287,43 @@ static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
}
-static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
- u16 *pkey)
+static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u8 port,
+ u16 index, u16 *pkey)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_core_dev *mdev;
+ bool put_mdev = true;
+ u8 mdev_port_num;
+ int err;
+
+ mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num);
+ if (!mdev) {
+ /* The port isn't affiliated yet, get the PKey from the master
+ * port. For RoCE the PKey tables will be the same.
+ */
+ put_mdev = false;
+ mdev = dev->mdev;
+ mdev_port_num = 1;
+ }
+
+ err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0,
+ index, pkey);
+ if (put_mdev)
+ mlx5_ib_put_native_port_mdev(dev, port);
+ return err;
+}
+
+static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey)
+{
switch (mlx5_get_vport_access_method(ibdev)) {
case MLX5_VPORT_ACCESS_METHOD_MAD:
return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
case MLX5_VPORT_ACCESS_METHOD_HCA:
case MLX5_VPORT_ACCESS_METHOD_NIC:
- return mlx5_query_hca_vport_pkey(mdev, 0, port, 0, index,
- pkey);
+ return mlx5_query_hca_nic_pkey(ibdev, port, index, pkey);
default:
return -EINVAL;
}
@@ -1174,23 +1362,32 @@ static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u8 port_num, u32 mask,
u32 value)
{
struct mlx5_hca_vport_context ctx = {};
+ struct mlx5_core_dev *mdev;
+ u8 mdev_port_num;
int err;
- err = mlx5_query_hca_vport_context(dev->mdev, 0,
- port_num, 0, &ctx);
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
+ if (!mdev)
+ return -ENODEV;
+
+ err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx);
if (err)
- return err;
+ goto out;
if (~ctx.cap_mask1_perm & mask) {
mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n",
mask, ctx.cap_mask1_perm);
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
ctx.cap_mask1 = value;
ctx.cap_mask1_perm = mask;
- err = mlx5_core_modify_hca_vport_context(dev->mdev, 0,
- port_num, 0, &ctx);
+ err = mlx5_core_modify_hca_vport_context(mdev, 0, mdev_port_num,
+ 0, &ctx);
+
+out:
+ mlx5_ib_put_native_port_mdev(dev, port_num);
return err;
}
@@ -1241,9 +1438,18 @@ static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
}
+static u16 calc_dynamic_bfregs(int uars_per_sys_page)
+{
+ /* Large page with non 4k uar support might limit the dynamic size */
+ if (uars_per_sys_page == 1 && PAGE_SIZE > 4096)
+ return MLX5_MIN_DYN_BFREGS;
+
+ return MLX5_MAX_DYN_BFREGS;
+}
+
static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
struct mlx5_ib_alloc_ucontext_req_v2 *req,
- u32 *num_sys_pages)
+ struct mlx5_bfreg_info *bfregi)
{
int uars_per_sys_page;
int bfregs_per_sys_page;
@@ -1260,16 +1466,21 @@ static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
+ /* This holds the required static allocation asked by the user */
req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
- *num_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
-
if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
return -EINVAL;
- mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, using %d sys pages\n",
+ bfregi->num_static_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
+ bfregi->num_dyn_bfregs = ALIGN(calc_dynamic_bfregs(uars_per_sys_page), bfregs_per_sys_page);
+ bfregi->total_num_bfregs = req->total_num_bfregs + bfregi->num_dyn_bfregs;
+ bfregi->num_sys_pages = bfregi->total_num_bfregs / bfregs_per_sys_page;
+
+ mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, total bfregs %d, using %d sys pages\n",
MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
lib_uar_4k ? "yes" : "no", ref_bfregs,
- req->total_num_bfregs, *num_sys_pages);
+ req->total_num_bfregs, bfregi->total_num_bfregs,
+ bfregi->num_sys_pages);
return 0;
}
@@ -1281,13 +1492,17 @@ static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *conte
int i;
bfregi = &context->bfregi;
- for (i = 0; i < bfregi->num_sys_pages; i++) {
+ for (i = 0; i < bfregi->num_static_sys_pages; i++) {
err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]);
if (err)
goto error;
mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
}
+
+ for (i = bfregi->num_static_sys_pages; i < bfregi->num_sys_pages; i++)
+ bfregi->sys_pages[i] = MLX5_IB_INVALID_UAR_INDEX;
+
return 0;
error:
@@ -1306,12 +1521,16 @@ static int deallocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *con
bfregi = &context->bfregi;
for (i = 0; i < bfregi->num_sys_pages; i++) {
- err = mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
- if (err) {
- mlx5_ib_warn(dev, "failed to free uar %d\n", i);
- return err;
+ if (i < bfregi->num_static_sys_pages ||
+ bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX) {
+ err = mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
+ if (err) {
+ mlx5_ib_warn(dev, "failed to free uar %d, err=%d\n", i, err);
+ return err;
+ }
}
}
+
return 0;
}
@@ -1362,6 +1581,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_alloc_ucontext_req_v2 req = {};
struct mlx5_ib_alloc_ucontext_resp resp = {};
+ struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_ib_ucontext *context;
struct mlx5_bfreg_info *bfregi;
int ver;
@@ -1422,13 +1642,13 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
bfregi = &context->bfregi;
/* updates req->total_num_bfregs */
- err = calc_total_bfregs(dev, lib_uar_4k, &req, &bfregi->num_sys_pages);
+ err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
if (err)
goto out_ctx;
mutex_init(&bfregi->lock);
bfregi->lib_uar_4k = lib_uar_4k;
- bfregi->count = kcalloc(req.total_num_bfregs, sizeof(*bfregi->count),
+ bfregi->count = kcalloc(bfregi->total_num_bfregs, sizeof(*bfregi->count),
GFP_KERNEL);
if (!bfregi->count) {
err = -ENOMEM;
@@ -1470,7 +1690,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
mutex_init(&context->db_page_mutex);
resp.tot_bfregs = req.total_num_bfregs;
- resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
+ resp.num_ports = dev->num_ports;
if (field_avail(typeof(resp), cqe_version, udata->outlen))
resp.response_length += sizeof(resp.cqe_version);
@@ -1489,6 +1709,12 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.response_length += sizeof(resp.eth_min_inline);
}
+ if (field_avail(typeof(resp), clock_info_versions, udata->outlen)) {
+ if (mdev->clock_info)
+ resp.clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1);
+ resp.response_length += sizeof(resp.clock_info_versions);
+ }
+
/*
* We don't want to expose information from the PCI bar that is located
* after 4096 bytes, so if the arch only supports larger pages, let's
@@ -1502,8 +1728,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.hca_core_clock_offset =
offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE;
}
- resp.response_length += sizeof(resp.hca_core_clock_offset) +
- sizeof(resp.reserved2);
+ resp.response_length += sizeof(resp.hca_core_clock_offset);
}
if (field_avail(typeof(resp), log_uar_size, udata->outlen))
@@ -1512,6 +1737,11 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (field_avail(typeof(resp), num_uars_per_page, udata->outlen))
resp.response_length += sizeof(resp.num_uars_per_page);
+ if (field_avail(typeof(resp), num_dyn_bfregs, udata->outlen)) {
+ resp.num_dyn_bfregs = bfregi->num_dyn_bfregs;
+ resp.response_length += sizeof(resp.num_dyn_bfregs);
+ }
+
err = ib_copy_to_udata(udata, &resp, resp.response_length);
if (err)
goto out_td;
@@ -1566,15 +1796,13 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
}
static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
- struct mlx5_bfreg_info *bfregi,
- int idx)
+ int uar_idx)
{
int fw_uars_per_page;
fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
- return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) +
- bfregi->sys_pages[idx] / fw_uars_per_page;
+ return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
}
static int get_command(unsigned long offset)
@@ -1592,6 +1820,12 @@ static int get_index(unsigned long offset)
return get_arg(offset);
}
+/* Index resides in an extra byte to enable larger values than 255 */
+static int get_extended_index(unsigned long offset)
+{
+ return get_arg(offset) | ((offset >> 16) & 0xff) << 8;
+}
+
static void mlx5_ib_vma_open(struct vm_area_struct *area)
{
/* vma_open is called when a new VMA is created on top of our VMA. This
@@ -1733,6 +1967,38 @@ static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
}
}
+static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
+ struct vm_area_struct *vma,
+ struct mlx5_ib_ucontext *context)
+{
+ phys_addr_t pfn;
+ int err;
+
+ if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+ return -EINVAL;
+
+ if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1)
+ return -EOPNOTSUPP;
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EPERM;
+
+ if (!dev->mdev->clock_info_page)
+ return -EOPNOTSUPP;
+
+ pfn = page_to_pfn(dev->mdev->clock_info_page);
+ err = remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE,
+ vma->vm_page_prot);
+ if (err)
+ return err;
+
+ mlx5_ib_dbg(dev, "mapped clock info at 0x%lx, PA 0x%llx\n",
+ vma->vm_start,
+ (unsigned long long)pfn << PAGE_SHIFT);
+
+ return mlx5_ib_set_vma_data(vma, context);
+}
+
static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
struct vm_area_struct *vma,
struct mlx5_ib_ucontext *context)
@@ -1742,21 +2008,29 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
unsigned long idx;
phys_addr_t pfn, pa;
pgprot_t prot;
- int uars_per_page;
+ u32 bfreg_dyn_idx = 0;
+ u32 uar_index;
+ int dyn_uar = (cmd == MLX5_IB_MMAP_ALLOC_WC);
+ int max_valid_idx = dyn_uar ? bfregi->num_sys_pages :
+ bfregi->num_static_sys_pages;
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
- uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
- idx = get_index(vma->vm_pgoff);
- if (idx % uars_per_page ||
- idx * uars_per_page >= bfregi->num_sys_pages) {
- mlx5_ib_warn(dev, "invalid uar index %lu\n", idx);
+ if (dyn_uar)
+ idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages;
+ else
+ idx = get_index(vma->vm_pgoff);
+
+ if (idx >= max_valid_idx) {
+ mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n",
+ idx, max_valid_idx);
return -EINVAL;
}
switch (cmd) {
case MLX5_IB_MMAP_WC_PAGE:
+ case MLX5_IB_MMAP_ALLOC_WC:
/* Some architectures don't support WC memory */
#if defined(CONFIG_X86)
if (!pat_enabled())
@@ -1776,7 +2050,40 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
return -EINVAL;
}
- pfn = uar_index2pfn(dev, bfregi, idx);
+ if (dyn_uar) {
+ int uars_per_page;
+
+ uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
+ bfreg_dyn_idx = idx * (uars_per_page * MLX5_NON_FP_BFREGS_PER_UAR);
+ if (bfreg_dyn_idx >= bfregi->total_num_bfregs) {
+ mlx5_ib_warn(dev, "invalid bfreg_dyn_idx %u, max=%u\n",
+ bfreg_dyn_idx, bfregi->total_num_bfregs);
+ return -EINVAL;
+ }
+
+ mutex_lock(&bfregi->lock);
+ /* Fail if uar already allocated, first bfreg index of each
+ * page holds its count.
+ */
+ if (bfregi->count[bfreg_dyn_idx]) {
+ mlx5_ib_warn(dev, "wrong offset, idx %lu is busy, bfregn=%u\n", idx, bfreg_dyn_idx);
+ mutex_unlock(&bfregi->lock);
+ return -EINVAL;
+ }
+
+ bfregi->count[bfreg_dyn_idx]++;
+ mutex_unlock(&bfregi->lock);
+
+ err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index);
+ if (err) {
+ mlx5_ib_warn(dev, "UAR alloc failed\n");
+ goto free_bfreg;
+ }
+ } else {
+ uar_index = bfregi->sys_pages[idx];
+ }
+
+ pfn = uar_index2pfn(dev, uar_index);
mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
vma->vm_page_prot = prot;
@@ -1785,14 +2092,32 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
if (err) {
mlx5_ib_err(dev, "io_remap_pfn_range failed with error=%d, vm_start=0x%lx, pfn=%pa, mmap_cmd=%s\n",
err, vma->vm_start, &pfn, mmap_cmd2str(cmd));
- return -EAGAIN;
+ err = -EAGAIN;
+ goto err;
}
pa = pfn << PAGE_SHIFT;
mlx5_ib_dbg(dev, "mapped %s at 0x%lx, PA %pa\n", mmap_cmd2str(cmd),
vma->vm_start, &pa);
- return mlx5_ib_set_vma_data(vma, context);
+ err = mlx5_ib_set_vma_data(vma, context);
+ if (err)
+ goto err;
+
+ if (dyn_uar)
+ bfregi->sys_pages[idx] = uar_index;
+ return 0;
+
+err:
+ if (!dyn_uar)
+ return err;
+
+ mlx5_cmd_free_uar(dev->mdev, idx);
+
+free_bfreg:
+ mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
+
+ return err;
}
static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
@@ -1807,6 +2132,7 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
case MLX5_IB_MMAP_WC_PAGE:
case MLX5_IB_MMAP_NC_PAGE:
case MLX5_IB_MMAP_REGULAR_PAGE:
+ case MLX5_IB_MMAP_ALLOC_WC:
return uar_mmap(dev, command, vma, context);
case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
@@ -1835,6 +2161,8 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
vma->vm_start,
(unsigned long long)pfn << PAGE_SHIFT);
break;
+ case MLX5_IB_MMAP_CLOCK_INFO:
+ return mlx5_ib_mmap_clock_info_page(dev, vma, context);
default:
return -EINVAL;
@@ -2663,7 +2991,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
return ERR_PTR(-ENOMEM);
if (domain != IB_FLOW_DOMAIN_USER ||
- flow_attr->port > MLX5_CAP_GEN(dev->mdev, num_ports) ||
+ flow_attr->port > dev->num_ports ||
(flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP))
return ERR_PTR(-EINVAL);
@@ -2928,15 +3256,24 @@ static void delay_drop_handler(struct work_struct *work)
mutex_unlock(&delay_drop->lock);
}
-static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
- enum mlx5_dev_event event, unsigned long param)
+static void mlx5_ib_handle_event(struct work_struct *_work)
{
- struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
+ struct mlx5_ib_event_work *work =
+ container_of(_work, struct mlx5_ib_event_work, work);
+ struct mlx5_ib_dev *ibdev;
struct ib_event ibev;
bool fatal = false;
u8 port = 0;
- switch (event) {
+ if (mlx5_core_is_mp_slave(work->dev)) {
+ ibdev = mlx5_ib_get_ibdev_from_mpi(work->context);
+ if (!ibdev)
+ goto out;
+ } else {
+ ibdev = work->context;
+ }
+
+ switch (work->event) {
case MLX5_DEV_EVENT_SYS_ERROR:
ibev.event = IB_EVENT_DEVICE_FATAL;
mlx5_ib_handle_internal_error(ibdev);
@@ -2946,39 +3283,39 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
case MLX5_DEV_EVENT_PORT_UP:
case MLX5_DEV_EVENT_PORT_DOWN:
case MLX5_DEV_EVENT_PORT_INITIALIZED:
- port = (u8)param;
+ port = (u8)work->param;
/* In RoCE, port up/down events are handled in
* mlx5_netdev_event().
*/
if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
IB_LINK_LAYER_ETHERNET)
- return;
+ goto out;
- ibev.event = (event == MLX5_DEV_EVENT_PORT_UP) ?
+ ibev.event = (work->event == MLX5_DEV_EVENT_PORT_UP) ?
IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
break;
case MLX5_DEV_EVENT_LID_CHANGE:
ibev.event = IB_EVENT_LID_CHANGE;
- port = (u8)param;
+ port = (u8)work->param;
break;
case MLX5_DEV_EVENT_PKEY_CHANGE:
ibev.event = IB_EVENT_PKEY_CHANGE;
- port = (u8)param;
+ port = (u8)work->param;
schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
break;
case MLX5_DEV_EVENT_GUID_CHANGE:
ibev.event = IB_EVENT_GID_CHANGE;
- port = (u8)param;
+ port = (u8)work->param;
break;
case MLX5_DEV_EVENT_CLIENT_REREG:
ibev.event = IB_EVENT_CLIENT_REREGISTER;
- port = (u8)param;
+ port = (u8)work->param;
break;
case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT:
schedule_work(&ibdev->delay_drop.delay_drop_work);
@@ -3000,9 +3337,26 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
if (fatal)
ibdev->ib_active = false;
-
out:
- return;
+ kfree(work);
+}
+
+static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
+ enum mlx5_dev_event event, unsigned long param)
+{
+ struct mlx5_ib_event_work *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ INIT_WORK(&work->work, mlx5_ib_handle_event);
+ work->dev = dev;
+ work->param = param;
+ work->context = context;
+ work->event = event;
+
+ queue_work(mlx5_ib_event_wq, &work->work);
}
static int set_has_smi_cap(struct mlx5_ib_dev *dev)
@@ -3011,7 +3365,7 @@ static int set_has_smi_cap(struct mlx5_ib_dev *dev)
int err;
int port;
- for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
+ for (port = 1; port <= dev->num_ports; port++) {
dev->mdev->port_caps[port - 1].has_smi = false;
if (MLX5_CAP_GEN(dev->mdev, port_type) ==
MLX5_CAP_PORT_TYPE_IB) {
@@ -3038,16 +3392,15 @@ static void get_ext_port_caps(struct mlx5_ib_dev *dev)
{
int port;
- for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++)
+ for (port = 1; port <= dev->num_ports; port++)
mlx5_query_ext_port_caps(dev, port);
}
-static int get_port_caps(struct mlx5_ib_dev *dev)
+static int get_port_caps(struct mlx5_ib_dev *dev, u8 port)
{
struct ib_device_attr *dprops = NULL;
struct ib_port_attr *pprops = NULL;
int err = -ENOMEM;
- int port;
struct ib_udata uhw = {.inlen = 0, .outlen = 0};
pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
@@ -3068,22 +3421,21 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
goto out;
}
- for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
- memset(pprops, 0, sizeof(*pprops));
- err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
- if (err) {
- mlx5_ib_warn(dev, "query_port %d failed %d\n",
- port, err);
- break;
- }
- dev->mdev->port_caps[port - 1].pkey_table_len =
- dprops->max_pkeys;
- dev->mdev->port_caps[port - 1].gid_table_len =
- pprops->gid_tbl_len;
- mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
- dprops->max_pkeys, pprops->gid_tbl_len);
+ memset(pprops, 0, sizeof(*pprops));
+ err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
+ if (err) {
+ mlx5_ib_warn(dev, "query_port %d failed %d\n",
+ port, err);
+ goto out;
}
+ dev->mdev->port_caps[port - 1].pkey_table_len =
+ dprops->max_pkeys;
+ dev->mdev->port_caps[port - 1].gid_table_len =
+ pprops->gid_tbl_len;
+ mlx5_ib_dbg(dev, "port %d: pkey_table_len %d, gid_table_len %d\n",
+ port, dprops->max_pkeys, pprops->gid_tbl_len);
+
out:
kfree(pprops);
kfree(dprops);
@@ -3373,12 +3725,14 @@ static u32 get_core_cap_flags(struct ib_device *ibdev)
enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
+ bool raw_support = !mlx5_core_mp_enabled(dev->mdev);
u32 ret = 0;
if (ll == IB_LINK_LAYER_INFINIBAND)
return RDMA_CORE_PORT_IBA_IB;
- ret = RDMA_CORE_PORT_RAW_PACKET;
+ if (raw_support)
+ ret = RDMA_CORE_PORT_RAW_PACKET;
if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
return ret;
@@ -3468,33 +3822,33 @@ static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
}
}
-static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev)
+static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
{
int err;
- dev->roce.nb.notifier_call = mlx5_netdev_event;
- err = register_netdevice_notifier(&dev->roce.nb);
+ dev->roce[port_num].nb.notifier_call = mlx5_netdev_event;
+ err = register_netdevice_notifier(&dev->roce[port_num].nb);
if (err) {
- dev->roce.nb.notifier_call = NULL;
+ dev->roce[port_num].nb.notifier_call = NULL;
return err;
}
return 0;
}
-static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev)
+static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
{
- if (dev->roce.nb.notifier_call) {
- unregister_netdevice_notifier(&dev->roce.nb);
- dev->roce.nb.notifier_call = NULL;
+ if (dev->roce[port_num].nb.notifier_call) {
+ unregister_netdevice_notifier(&dev->roce[port_num].nb);
+ dev->roce[port_num].nb.notifier_call = NULL;
}
}
-static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
+static int mlx5_enable_eth(struct mlx5_ib_dev *dev, u8 port_num)
{
int err;
- err = mlx5_add_netdev_notifier(dev);
+ err = mlx5_add_netdev_notifier(dev, port_num);
if (err)
return err;
@@ -3515,7 +3869,7 @@ err_disable_roce:
mlx5_nic_vport_disable_roce(dev->mdev);
err_unregister_netdevice_notifier:
- mlx5_remove_netdev_notifier(dev);
+ mlx5_remove_netdev_notifier(dev, port_num);
return err;
}
@@ -3577,11 +3931,12 @@ static const struct mlx5_ib_counter extended_err_cnts[] = {
static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
{
- unsigned int i;
+ int i;
for (i = 0; i < dev->num_ports; i++) {
- mlx5_core_dealloc_q_counter(dev->mdev,
- dev->port[i].cnts.set_id);
+ if (dev->port[i].cnts.set_id)
+ mlx5_core_dealloc_q_counter(dev->mdev,
+ dev->port[i].cnts.set_id);
kfree(dev->port[i].cnts.names);
kfree(dev->port[i].cnts.offsets);
}
@@ -3623,6 +3978,7 @@ static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
err_names:
kfree(cnts->names);
+ cnts->names = NULL;
return -ENOMEM;
}
@@ -3669,37 +4025,33 @@ static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
{
+ int err = 0;
int i;
- int ret;
for (i = 0; i < dev->num_ports; i++) {
- struct mlx5_ib_port *port = &dev->port[i];
+ err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts);
+ if (err)
+ goto err_alloc;
+
+ mlx5_ib_fill_counters(dev, dev->port[i].cnts.names,
+ dev->port[i].cnts.offsets);
- ret = mlx5_core_alloc_q_counter(dev->mdev,
- &port->cnts.set_id);
- if (ret) {
+ err = mlx5_core_alloc_q_counter(dev->mdev,
+ &dev->port[i].cnts.set_id);
+ if (err) {
mlx5_ib_warn(dev,
"couldn't allocate queue counter for port %d, err %d\n",
- i + 1, ret);
- goto dealloc_counters;
+ i + 1, err);
+ goto err_alloc;
}
-
- ret = __mlx5_ib_alloc_counters(dev, &port->cnts);
- if (ret)
- goto dealloc_counters;
-
- mlx5_ib_fill_counters(dev, port->cnts.names,
- port->cnts.offsets);
+ dev->port[i].cnts.set_id_valid = true;
}
return 0;
-dealloc_counters:
- while (--i >= 0)
- mlx5_core_dealloc_q_counter(dev->mdev,
- dev->port[i].cnts.set_id);
-
- return ret;
+err_alloc:
+ mlx5_ib_dealloc_counters(dev);
+ return err;
}
static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
@@ -3718,7 +4070,7 @@ static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
-static int mlx5_ib_query_q_counters(struct mlx5_ib_dev *dev,
+static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev,
struct mlx5_ib_port *port,
struct rdma_hw_stats *stats)
{
@@ -3731,7 +4083,7 @@ static int mlx5_ib_query_q_counters(struct mlx5_ib_dev *dev,
if (!out)
return -ENOMEM;
- ret = mlx5_core_query_q_counter(dev->mdev,
+ ret = mlx5_core_query_q_counter(mdev,
port->cnts.set_id, 0,
out, outlen);
if (ret)
@@ -3753,28 +4105,43 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_port *port = &dev->port[port_num - 1];
+ struct mlx5_core_dev *mdev;
int ret, num_counters;
+ u8 mdev_port_num;
if (!stats)
return -EINVAL;
- ret = mlx5_ib_query_q_counters(dev, port, stats);
+ num_counters = port->cnts.num_q_counters + port->cnts.num_cong_counters;
+
+ /* q_counters are per IB device, query the master mdev */
+ ret = mlx5_ib_query_q_counters(dev->mdev, port, stats);
if (ret)
return ret;
- num_counters = port->cnts.num_q_counters;
if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num,
+ &mdev_port_num);
+ if (!mdev) {
+ /* If port is not affiliated yet, its in down state
+ * which doesn't have any counters yet, so it would be
+ * zero. So no need to read from the HCA.
+ */
+ goto done;
+ }
ret = mlx5_lag_query_cong_counters(dev->mdev,
stats->value +
port->cnts.num_q_counters,
port->cnts.num_cong_counters,
port->cnts.offsets +
port->cnts.num_q_counters);
+
+ mlx5_ib_put_native_port_mdev(dev, port_num);
if (ret)
return ret;
- num_counters += port->cnts.num_cong_counters;
}
+done:
return num_counters;
}
@@ -3936,36 +4303,250 @@ mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector)
return mlx5_get_vector_affinity(dev->mdev, comp_vector);
}
-static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
+/* The mlx5_ib_multiport_mutex should be held when calling this function */
+static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
+ struct mlx5_ib_multiport_info *mpi)
{
- struct mlx5_ib_dev *dev;
- enum rdma_link_layer ll;
- int port_type_cap;
- const char *name;
+ u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
+ struct mlx5_ib_port *port = &ibdev->port[port_num];
+ int comps;
int err;
int i;
- port_type_cap = MLX5_CAP_GEN(mdev, port_type);
- ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
+ mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
- printk_once(KERN_INFO "%s", mlx5_version);
+ spin_lock(&port->mp.mpi_lock);
+ if (!mpi->ibdev) {
+ spin_unlock(&port->mp.mpi_lock);
+ return;
+ }
+ mpi->ibdev = NULL;
- dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
- if (!dev)
- return NULL;
+ spin_unlock(&port->mp.mpi_lock);
+ mlx5_remove_netdev_notifier(ibdev, port_num);
+ spin_lock(&port->mp.mpi_lock);
- dev->mdev = mdev;
+ comps = mpi->mdev_refcnt;
+ if (comps) {
+ mpi->unaffiliate = true;
+ init_completion(&mpi->unref_comp);
+ spin_unlock(&port->mp.mpi_lock);
+
+ for (i = 0; i < comps; i++)
+ wait_for_completion(&mpi->unref_comp);
+
+ spin_lock(&port->mp.mpi_lock);
+ mpi->unaffiliate = false;
+ }
+
+ port->mp.mpi = NULL;
+
+ list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
+
+ spin_unlock(&port->mp.mpi_lock);
+
+ err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
+
+ mlx5_ib_dbg(ibdev, "unaffiliated port %d\n", port_num + 1);
+ /* Log an error, still needed to cleanup the pointers and add
+ * it back to the list.
+ */
+ if (err)
+ mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n",
+ port_num + 1);
+
+ ibdev->roce[port_num].last_port_state = IB_PORT_DOWN;
+}
+
+/* The mlx5_ib_multiport_mutex should be held when calling this function */
+static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
+ struct mlx5_ib_multiport_info *mpi)
+{
+ u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
+ int err;
+
+ spin_lock(&ibdev->port[port_num].mp.mpi_lock);
+ if (ibdev->port[port_num].mp.mpi) {
+ mlx5_ib_warn(ibdev, "port %d already affiliated.\n",
+ port_num + 1);
+ spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
+ return false;
+ }
+
+ ibdev->port[port_num].mp.mpi = mpi;
+ mpi->ibdev = ibdev;
+ spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
+
+ err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev);
+ if (err)
+ goto unbind;
+
+ err = get_port_caps(ibdev, mlx5_core_native_port_num(mpi->mdev));
+ if (err)
+ goto unbind;
+
+ err = mlx5_add_netdev_notifier(ibdev, port_num);
+ if (err) {
+ mlx5_ib_err(ibdev, "failed adding netdev notifier for port %u\n",
+ port_num + 1);
+ goto unbind;
+ }
+
+ err = mlx5_ib_init_cong_debugfs(ibdev, port_num);
+ if (err)
+ goto unbind;
+
+ return true;
+
+unbind:
+ mlx5_ib_unbind_slave_port(ibdev, mpi);
+ return false;
+}
+
+static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
+{
+ int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
+ port_num + 1);
+ struct mlx5_ib_multiport_info *mpi;
+ int err;
+ int i;
+
+ if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
+ return 0;
+
+ err = mlx5_query_nic_vport_system_image_guid(dev->mdev,
+ &dev->sys_image_guid);
+ if (err)
+ return err;
- dev->port = kcalloc(MLX5_CAP_GEN(mdev, num_ports), sizeof(*dev->port),
+ err = mlx5_nic_vport_enable_roce(dev->mdev);
+ if (err)
+ return err;
+
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ for (i = 0; i < dev->num_ports; i++) {
+ bool bound = false;
+
+ /* build a stub multiport info struct for the native port. */
+ if (i == port_num) {
+ mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
+ if (!mpi) {
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+ mlx5_nic_vport_disable_roce(dev->mdev);
+ return -ENOMEM;
+ }
+
+ mpi->is_master = true;
+ mpi->mdev = dev->mdev;
+ mpi->sys_image_guid = dev->sys_image_guid;
+ dev->port[i].mp.mpi = mpi;
+ mpi->ibdev = dev;
+ mpi = NULL;
+ continue;
+ }
+
+ list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list,
+ list) {
+ if (dev->sys_image_guid == mpi->sys_image_guid &&
+ (mlx5_core_native_port_num(mpi->mdev) - 1) == i) {
+ bound = mlx5_ib_bind_slave_port(dev, mpi);
+ }
+
+ if (bound) {
+ dev_dbg(&mpi->mdev->pdev->dev, "removing port from unaffiliated list.\n");
+ mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
+ list_del(&mpi->list);
+ break;
+ }
+ }
+ if (!bound) {
+ get_port_caps(dev, i + 1);
+ mlx5_ib_dbg(dev, "no free port found for port %d\n",
+ i + 1);
+ }
+ }
+
+ list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list);
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+ return err;
+}
+
+static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
+{
+ int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
+ port_num + 1);
+ int i;
+
+ if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
+ return;
+
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ for (i = 0; i < dev->num_ports; i++) {
+ if (dev->port[i].mp.mpi) {
+ /* Destroy the native port stub */
+ if (i == port_num) {
+ kfree(dev->port[i].mp.mpi);
+ dev->port[i].mp.mpi = NULL;
+ } else {
+ mlx5_ib_dbg(dev, "unbinding port_num: %d\n", i + 1);
+ mlx5_ib_unbind_slave_port(dev, dev->port[i].mp.mpi);
+ }
+ }
+ }
+
+ mlx5_ib_dbg(dev, "removing from devlist\n");
+ list_del(&dev->ib_dev_list);
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+
+ mlx5_nic_vport_disable_roce(dev->mdev);
+}
+
+static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_ib_cleanup_multiport_master(dev);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ cleanup_srcu_struct(&dev->mr_srcu);
+#endif
+ kfree(dev->port);
+}
+
+static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ const char *name;
+ int err;
+ int i;
+
+ dev->port = kcalloc(dev->num_ports, sizeof(*dev->port),
GFP_KERNEL);
if (!dev->port)
- goto err_dealloc;
+ return -ENOMEM;
+
+ for (i = 0; i < dev->num_ports; i++) {
+ spin_lock_init(&dev->port[i].mp.mpi_lock);
+ rwlock_init(&dev->roce[i].netdev_lock);
+ }
- rwlock_init(&dev->roce.netdev_lock);
- err = get_port_caps(dev);
+ err = mlx5_ib_init_multiport_master(dev);
if (err)
goto err_free_port;
+ if (!mlx5_core_mp_enabled(mdev)) {
+ int i;
+
+ for (i = 1; i <= dev->num_ports; i++) {
+ err = get_port_caps(dev, i);
+ if (err)
+ break;
+ }
+ } else {
+ err = get_port_caps(dev, mlx5_core_native_port_num(mdev));
+ }
+ if (err)
+ goto err_mp;
+
if (mlx5_use_mad_ifc(dev))
get_ext_port_caps(dev);
@@ -3978,12 +4559,37 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.owner = THIS_MODULE;
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
- dev->num_ports = MLX5_CAP_GEN(mdev, num_ports);
- dev->ib_dev.phys_port_cnt = dev->num_ports;
+ dev->ib_dev.phys_port_cnt = dev->num_ports;
dev->ib_dev.num_comp_vectors =
dev->mdev->priv.eq_table.num_comp_vectors;
dev->ib_dev.dev.parent = &mdev->pdev->dev;
+ mutex_init(&dev->flow_db.lock);
+ mutex_init(&dev->cap_mask_mutex);
+ INIT_LIST_HEAD(&dev->qp_list);
+ spin_lock_init(&dev->reset_flow_resource_lock);
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ err = init_srcu_struct(&dev->mr_srcu);
+ if (err)
+ goto err_free_port;
+#endif
+
+ return 0;
+err_mp:
+ mlx5_ib_cleanup_multiport_master(dev);
+
+err_free_port:
+ kfree(dev->port);
+
+ return -ENOMEM;
+}
+
+static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ int err;
+
dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION;
dev->ib_dev.uverbs_cmd_mask =
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
@@ -4022,8 +4628,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.query_device = mlx5_ib_query_device;
dev->ib_dev.query_port = mlx5_ib_query_port;
dev->ib_dev.get_link_layer = mlx5_ib_port_link_layer;
- if (ll == IB_LINK_LAYER_ETHERNET)
- dev->ib_dev.get_netdev = mlx5_ib_get_netdev;
dev->ib_dev.query_gid = mlx5_ib_query_gid;
dev->ib_dev.add_gid = mlx5_ib_add_gid;
dev->ib_dev.del_gid = mlx5_ib_del_gid;
@@ -4080,8 +4684,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.disassociate_ucontext = mlx5_ib_disassociate_ucontext;
- mlx5_ib_internal_fill_odp_caps(dev);
-
dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
if (MLX5_CAP_GEN(mdev, imaicl)) {
@@ -4092,11 +4694,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
}
- if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
- dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats;
- dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats;
- }
-
if (MLX5_CAP_GEN(mdev, xrc)) {
dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
@@ -4111,8 +4708,39 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
(1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
- if (mlx5_ib_port_link_layer(&dev->ib_dev, 1) ==
- IB_LINK_LAYER_ETHERNET) {
+ err = init_node_data(dev);
+ if (err)
+ return err;
+
+ if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
+ (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
+ MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
+ mutex_init(&dev->lb_mutex);
+
+ return 0;
+}
+
+static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ enum rdma_link_layer ll;
+ int port_type_cap;
+ u8 port_num;
+ int err;
+ int i;
+
+ port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ port_type_cap = MLX5_CAP_GEN(mdev, port_type);
+ ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
+
+ if (ll == IB_LINK_LAYER_ETHERNET) {
+ for (i = 0; i < dev->num_ports; i++) {
+ dev->roce[i].dev = dev;
+ dev->roce[i].native_port_num = i + 1;
+ dev->roce[i].last_port_state = IB_PORT_DOWN;
+ }
+
+ dev->ib_dev.get_netdev = mlx5_ib_get_netdev;
dev->ib_dev.create_wq = mlx5_ib_create_wq;
dev->ib_dev.modify_wq = mlx5_ib_modify_wq;
dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq;
@@ -4124,143 +4752,329 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
(1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
(1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
+ err = mlx5_enable_eth(dev, port_num);
+ if (err)
+ return err;
}
- err = init_node_data(dev);
- if (err)
- goto err_free_port;
- mutex_init(&dev->flow_db.lock);
- mutex_init(&dev->cap_mask_mutex);
- INIT_LIST_HEAD(&dev->qp_list);
- spin_lock_init(&dev->reset_flow_resource_lock);
+ return 0;
+}
+
+static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ enum rdma_link_layer ll;
+ int port_type_cap;
+ u8 port_num;
+
+ port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ port_type_cap = MLX5_CAP_GEN(mdev, port_type);
+ ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
if (ll == IB_LINK_LAYER_ETHERNET) {
- err = mlx5_enable_eth(dev);
- if (err)
- goto err_free_port;
- dev->roce.last_port_state = IB_PORT_DOWN;
+ mlx5_disable_eth(dev);
+ mlx5_remove_netdev_notifier(dev, port_num);
}
+}
- err = create_dev_resources(&dev->devr);
- if (err)
- goto err_disable_eth;
+static int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev)
+{
+ return create_dev_resources(&dev->devr);
+}
- err = mlx5_ib_odp_init_one(dev);
- if (err)
- goto err_rsrc;
+static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
+{
+ destroy_dev_resources(&dev->devr);
+}
+static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
+{
+ mlx5_ib_internal_fill_odp_caps(dev);
+
+ return mlx5_ib_odp_init_one(dev);
+}
+
+static int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
+{
if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
- err = mlx5_ib_alloc_counters(dev);
- if (err)
- goto err_odp;
+ dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats;
+ dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats;
+
+ return mlx5_ib_alloc_counters(dev);
}
- err = mlx5_ib_init_cong_debugfs(dev);
- if (err)
- goto err_cnt;
+ return 0;
+}
+
+static void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev)
+{
+ if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
+ mlx5_ib_dealloc_counters(dev);
+}
+static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev)
+{
+ return mlx5_ib_init_cong_debugfs(dev,
+ mlx5_core_native_port_num(dev->mdev) - 1);
+}
+
+static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_ib_cleanup_cong_debugfs(dev,
+ mlx5_core_native_port_num(dev->mdev) - 1);
+}
+
+static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev)
+{
dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
- if (IS_ERR(dev->mdev->priv.uar))
- goto err_cong;
+ if (!dev->mdev->priv.uar)
+ return -ENOMEM;
+ return 0;
+}
+
+static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
+}
+
+static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
+{
+ int err;
err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
if (err)
- goto err_uar_page;
+ return err;
err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
if (err)
- goto err_bfreg;
+ mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
- err = ib_register_device(&dev->ib_dev, NULL);
- if (err)
- goto err_fp_bfreg;
+ return err;
+}
- err = create_umr_res(dev);
- if (err)
- goto err_dev;
+static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
+ mlx5_free_bfreg(dev->mdev, &dev->bfreg);
+}
+
+static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
+{
+ return ib_register_device(&dev->ib_dev, NULL);
+}
+
+static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
+{
+ ib_unregister_device(&dev->ib_dev);
+}
+static int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev)
+{
+ return create_umr_res(dev);
+}
+
+static void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev)
+{
+ destroy_umrc_res(dev);
+}
+
+static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
+{
init_delay_drop(dev);
+ return 0;
+}
+
+static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
+{
+ cancel_delay_drop(dev);
+}
+
+static int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev)
+{
+ int err;
+ int i;
+
for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
err = device_create_file(&dev->ib_dev.dev,
mlx5_class_attributes[i]);
if (err)
- goto err_delay_drop;
+ return err;
}
- if ((MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
- (MLX5_CAP_GEN(mdev, disable_local_lb_uc) ||
- MLX5_CAP_GEN(mdev, disable_local_lb_mc)))
- mutex_init(&dev->lb_mutex);
+ return 0;
+}
+
+static void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
+ const struct mlx5_ib_profile *profile,
+ int stage)
+{
+ /* Number of stages to cleanup */
+ while (stage) {
+ stage--;
+ if (profile->stage[stage].cleanup)
+ profile->stage[stage].cleanup(dev);
+ }
+
+ ib_dealloc_device((struct ib_device *)dev);
+}
+
+static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num);
+
+static void *__mlx5_ib_add(struct mlx5_core_dev *mdev,
+ const struct mlx5_ib_profile *profile)
+{
+ struct mlx5_ib_dev *dev;
+ int err;
+ int i;
+
+ printk_once(KERN_INFO "%s", mlx5_version);
+ dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
+ if (!dev)
+ return NULL;
+
+ dev->mdev = mdev;
+ dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
+ MLX5_CAP_GEN(mdev, num_vhca_ports));
+
+ for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
+ if (profile->stage[i].init) {
+ err = profile->stage[i].init(dev);
+ if (err)
+ goto err_out;
+ }
+ }
+
+ dev->profile = profile;
dev->ib_active = true;
return dev;
-err_delay_drop:
- cancel_delay_drop(dev);
- destroy_umrc_res(dev);
+err_out:
+ __mlx5_ib_remove(dev, profile, i);
-err_dev:
- ib_unregister_device(&dev->ib_dev);
+ return NULL;
+}
-err_fp_bfreg:
- mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
+static const struct mlx5_ib_profile pf_profile = {
+ STAGE_CREATE(MLX5_IB_STAGE_INIT,
+ mlx5_ib_stage_init_init,
+ mlx5_ib_stage_init_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_CAPS,
+ mlx5_ib_stage_caps_init,
+ NULL),
+ STAGE_CREATE(MLX5_IB_STAGE_ROCE,
+ mlx5_ib_stage_roce_init,
+ mlx5_ib_stage_roce_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
+ mlx5_ib_stage_dev_res_init,
+ mlx5_ib_stage_dev_res_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_ODP,
+ mlx5_ib_stage_odp_init,
+ NULL),
+ STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
+ mlx5_ib_stage_counters_init,
+ mlx5_ib_stage_counters_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
+ mlx5_ib_stage_cong_debugfs_init,
+ mlx5_ib_stage_cong_debugfs_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_UAR,
+ mlx5_ib_stage_uar_init,
+ mlx5_ib_stage_uar_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_BFREG,
+ mlx5_ib_stage_bfrag_init,
+ mlx5_ib_stage_bfrag_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
+ mlx5_ib_stage_ib_reg_init,
+ mlx5_ib_stage_ib_reg_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES,
+ mlx5_ib_stage_umr_res_init,
+ mlx5_ib_stage_umr_res_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
+ mlx5_ib_stage_delay_drop_init,
+ mlx5_ib_stage_delay_drop_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
+ mlx5_ib_stage_class_attr_init,
+ NULL),
+};
-err_bfreg:
- mlx5_free_bfreg(dev->mdev, &dev->bfreg);
+static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num)
+{
+ struct mlx5_ib_multiport_info *mpi;
+ struct mlx5_ib_dev *dev;
+ bool bound = false;
+ int err;
-err_uar_page:
- mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
+ mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
+ if (!mpi)
+ return NULL;
-err_cong:
- mlx5_ib_cleanup_cong_debugfs(dev);
-err_cnt:
- if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
- mlx5_ib_dealloc_counters(dev);
+ mpi->mdev = mdev;
-err_odp:
- mlx5_ib_odp_remove_one(dev);
+ err = mlx5_query_nic_vport_system_image_guid(mdev,
+ &mpi->sys_image_guid);
+ if (err) {
+ kfree(mpi);
+ return NULL;
+ }
-err_rsrc:
- destroy_dev_resources(&dev->devr);
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
+ if (dev->sys_image_guid == mpi->sys_image_guid)
+ bound = mlx5_ib_bind_slave_port(dev, mpi);
-err_disable_eth:
- if (ll == IB_LINK_LAYER_ETHERNET) {
- mlx5_disable_eth(dev);
- mlx5_remove_netdev_notifier(dev);
+ if (bound) {
+ rdma_roce_rescan_device(&dev->ib_dev);
+ break;
+ }
}
-err_free_port:
- kfree(dev->port);
+ if (!bound) {
+ list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
+ dev_dbg(&mdev->pdev->dev, "no suitable IB device found to bind to, added to unaffiliated list.\n");
+ } else {
+ mlx5_ib_dbg(dev, "bound port %u\n", port_num + 1);
+ }
+ mutex_unlock(&mlx5_ib_multiport_mutex);
-err_dealloc:
- ib_dealloc_device((struct ib_device *)dev);
+ return mpi;
+}
- return NULL;
+static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
+{
+ enum rdma_link_layer ll;
+ int port_type_cap;
+
+ port_type_cap = MLX5_CAP_GEN(mdev, port_type);
+ ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
+
+ if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET) {
+ u8 port_num = mlx5_core_native_port_num(mdev) - 1;
+
+ return mlx5_ib_add_slave_port(mdev, port_num);
+ }
+
+ return __mlx5_ib_add(mdev, &pf_profile);
}
static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
{
- struct mlx5_ib_dev *dev = context;
- enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
+ struct mlx5_ib_multiport_info *mpi;
+ struct mlx5_ib_dev *dev;
- cancel_delay_drop(dev);
- mlx5_remove_netdev_notifier(dev);
- ib_unregister_device(&dev->ib_dev);
- mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
- mlx5_free_bfreg(dev->mdev, &dev->bfreg);
- mlx5_put_uars_page(dev->mdev, mdev->priv.uar);
- mlx5_ib_cleanup_cong_debugfs(dev);
- if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
- mlx5_ib_dealloc_counters(dev);
- destroy_umrc_res(dev);
- mlx5_ib_odp_remove_one(dev);
- destroy_dev_resources(&dev->devr);
- if (ll == IB_LINK_LAYER_ETHERNET)
- mlx5_disable_eth(dev);
- kfree(dev->port);
- ib_dealloc_device(&dev->ib_dev);
+ if (mlx5_core_is_mp_slave(mdev)) {
+ mpi = context;
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ if (mpi->ibdev)
+ mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
+ list_del(&mpi->list);
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+ return;
+ }
+
+ dev = context;
+ __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
}
static struct mlx5_interface mlx5_ib_interface = {
@@ -4277,6 +5091,10 @@ static int __init mlx5_ib_init(void)
{
int err;
+ mlx5_ib_event_wq = alloc_ordered_workqueue("mlx5_ib_event_wq", 0);
+ if (!mlx5_ib_event_wq)
+ return -ENOMEM;
+
mlx5_ib_odp_init();
err = mlx5_register_interface(&mlx5_ib_interface);
@@ -4287,6 +5105,7 @@ static int __init mlx5_ib_init(void)
static void __exit mlx5_ib_cleanup(void)
{
mlx5_unregister_interface(&mlx5_ib_interface);
+ destroy_workqueue(mlx5_ib_event_wq);
}
module_init(mlx5_ib_init);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 2c5f3533bbc9..139385129973 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -70,15 +70,6 @@ enum {
MLX5_IB_MMAP_CMD_MASK = 0xff,
};
-enum mlx5_ib_mmap_cmd {
- MLX5_IB_MMAP_REGULAR_PAGE = 0,
- MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
- MLX5_IB_MMAP_WC_PAGE = 2,
- MLX5_IB_MMAP_NC_PAGE = 3,
- /* 5 is chosen in order to be compatible with old versions of libmlx5 */
- MLX5_IB_MMAP_CORE_CLOCK = 5,
-};
-
enum {
MLX5_RES_SCAT_DATA32_CQE = 0x1,
MLX5_RES_SCAT_DATA64_CQE = 0x2,
@@ -112,6 +103,11 @@ enum {
MLX5_TM_MAX_SGE = 1,
};
+enum {
+ MLX5_IB_INVALID_UAR_INDEX = BIT(31),
+ MLX5_IB_INVALID_BFREG = BIT(31),
+};
+
struct mlx5_ib_vma_private_data {
struct list_head list;
struct vm_area_struct *vma;
@@ -200,6 +196,8 @@ struct mlx5_ib_flow_db {
* creates the actual hardware QP.
*/
#define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
+#define MLX5_IB_QPT_DCI IB_QPT_RESERVED3
+#define MLX5_IB_QPT_DCT IB_QPT_RESERVED4
#define MLX5_IB_WR_UMR IB_WR_RESERVED1
#define MLX5_IB_UMR_OCTOWORD 16
@@ -360,12 +358,18 @@ struct mlx5_bf {
struct mlx5_sq_bfreg *bfreg;
};
+struct mlx5_ib_dct {
+ struct mlx5_core_dct mdct;
+ u32 *in;
+};
+
struct mlx5_ib_qp {
struct ib_qp ibqp;
union {
struct mlx5_ib_qp_trans trans_qp;
struct mlx5_ib_raw_packet_qp raw_packet_qp;
struct mlx5_ib_rss_qp rss_qp;
+ struct mlx5_ib_dct dct;
};
struct mlx5_buf buf;
@@ -404,6 +408,8 @@ struct mlx5_ib_qp {
u32 rate_limit;
u32 underlay_qpn;
bool tunnel_offload_en;
+ /* storage for qp sub type when core qp type is IB_QPT_DRIVER */
+ enum ib_qp_type qp_sub_type;
};
struct mlx5_ib_cq_buf {
@@ -636,10 +642,21 @@ struct mlx5_ib_counters {
u32 num_q_counters;
u32 num_cong_counters;
u16 set_id;
+ bool set_id_valid;
+};
+
+struct mlx5_ib_multiport_info;
+
+struct mlx5_ib_multiport {
+ struct mlx5_ib_multiport_info *mpi;
+ /* To be held when accessing the multiport info */
+ spinlock_t mpi_lock;
};
struct mlx5_ib_port {
struct mlx5_ib_counters cnts;
+ struct mlx5_ib_multiport mp;
+ struct mlx5_ib_dbg_cc_params *dbg_cc_params;
};
struct mlx5_roce {
@@ -651,12 +668,15 @@ struct mlx5_roce {
struct notifier_block nb;
atomic_t next_port;
enum ib_port_state last_port_state;
+ struct mlx5_ib_dev *dev;
+ u8 native_port_num;
};
struct mlx5_ib_dbg_param {
int offset;
struct mlx5_ib_dev *dev;
struct dentry *dentry;
+ u8 port_num;
};
enum mlx5_ib_dbg_cc_types {
@@ -709,10 +729,50 @@ struct mlx5_ib_delay_drop {
struct mlx5_ib_dbg_delay_drop *dbg;
};
+enum mlx5_ib_stages {
+ MLX5_IB_STAGE_INIT,
+ MLX5_IB_STAGE_CAPS,
+ MLX5_IB_STAGE_ROCE,
+ MLX5_IB_STAGE_DEVICE_RESOURCES,
+ MLX5_IB_STAGE_ODP,
+ MLX5_IB_STAGE_COUNTERS,
+ MLX5_IB_STAGE_CONG_DEBUGFS,
+ MLX5_IB_STAGE_UAR,
+ MLX5_IB_STAGE_BFREG,
+ MLX5_IB_STAGE_IB_REG,
+ MLX5_IB_STAGE_UMR_RESOURCES,
+ MLX5_IB_STAGE_DELAY_DROP,
+ MLX5_IB_STAGE_CLASS_ATTR,
+ MLX5_IB_STAGE_MAX,
+};
+
+struct mlx5_ib_stage {
+ int (*init)(struct mlx5_ib_dev *dev);
+ void (*cleanup)(struct mlx5_ib_dev *dev);
+};
+
+#define STAGE_CREATE(_stage, _init, _cleanup) \
+ .stage[_stage] = {.init = _init, .cleanup = _cleanup}
+
+struct mlx5_ib_profile {
+ struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX];
+};
+
+struct mlx5_ib_multiport_info {
+ struct list_head list;
+ struct mlx5_ib_dev *ibdev;
+ struct mlx5_core_dev *mdev;
+ struct completion unref_comp;
+ u64 sys_image_guid;
+ u32 mdev_refcnt;
+ bool is_master;
+ bool unaffiliate;
+};
+
struct mlx5_ib_dev {
struct ib_device ib_dev;
struct mlx5_core_dev *mdev;
- struct mlx5_roce roce;
+ struct mlx5_roce roce[MLX5_MAX_PORTS];
int num_ports;
/* serialize update of capability mask
*/
@@ -746,12 +806,14 @@ struct mlx5_ib_dev {
struct mlx5_sq_bfreg bfreg;
struct mlx5_sq_bfreg fp_bfreg;
struct mlx5_ib_delay_drop delay_drop;
- struct mlx5_ib_dbg_cc_params *dbg_cc_params;
+ const struct mlx5_ib_profile *profile;
/* protect the user_td */
struct mutex lb_mutex;
u32 user_td;
u8 umr_fence;
+ struct list_head ib_dev_list;
+ u64 sys_image_guid;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@@ -956,13 +1018,14 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
struct ib_rwq_ind_table_init_attr *init_attr,
struct ib_udata *udata);
int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
+bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev);
+
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
struct mlx5_pagefault *pfault);
int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
-void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev);
int __init mlx5_ib_odp_init(void);
void mlx5_ib_odp_cleanup(void);
void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
@@ -977,7 +1040,6 @@ static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
}
static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
-static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev) {}
static inline int mlx5_ib_odp_init(void) { return 0; }
static inline void mlx5_ib_odp_cleanup(void) {}
static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {}
@@ -1001,8 +1063,8 @@ __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
int index, enum ib_gid_type *gid_type);
-void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev);
-int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev);
+void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
+int mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u8 port_num);
/* GSI QP helper functions */
struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
@@ -1021,6 +1083,15 @@ void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
+void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi,
+ int bfregn);
+struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi);
+struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
+ u8 ib_port_num,
+ u8 *native_port_num);
+void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
+ u8 port_num);
+
static inline void init_query_mad(struct ib_smp *mad)
{
mad->base_version = 1;
@@ -1052,8 +1123,8 @@ static inline u32 check_cq_create_flags(u32 flags)
* It returns non-zero value for unsupported CQ
* create flags, otherwise it returns zero.
*/
- return (flags & ~(IB_CQ_FLAGS_IGNORE_OVERRUN |
- IB_CQ_FLAGS_TIMESTAMP_COMPLETION));
+ return (flags & ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN |
+ IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION));
}
static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
@@ -1113,10 +1184,10 @@ static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_suppor
MLX5_UARS_IN_PAGE : 1;
}
-static inline int get_num_uars(struct mlx5_ib_dev *dev,
- struct mlx5_bfreg_info *bfregi)
+static inline int get_num_static_uars(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi)
{
- return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_sys_pages;
+ return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_static_sys_pages;
}
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index d109fe8290a7..556e015678de 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1206,6 +1206,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int err;
bool use_umr = true;
+ if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
+ return ERR_PTR(-EINVAL);
+
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
start, virt_addr, length, access_flags);
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index e2197bdda89c..f1a87a690a4c 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -1207,10 +1207,6 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
{
int ret;
- ret = init_srcu_struct(&dev->mr_srcu);
- if (ret)
- return ret;
-
if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
if (ret) {
@@ -1222,11 +1218,6 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
return 0;
}
-void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *dev)
-{
- cleanup_srcu_struct(&dev->mr_srcu);
-}
-
int mlx5_ib_odp_init(void)
{
mlx5_imr_ksm_entries = BIT_ULL(get_order(TASK_SIZE) -
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index cffe5966aef9..39d24bf694a8 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -493,7 +493,7 @@ enum {
static int max_bfregs(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi)
{
- return get_num_uars(dev, bfregi) * MLX5_NON_FP_BFREGS_PER_UAR;
+ return get_num_static_uars(dev, bfregi) * MLX5_NON_FP_BFREGS_PER_UAR;
}
static int num_med_bfreg(struct mlx5_ib_dev *dev,
@@ -581,7 +581,7 @@ static int alloc_bfreg(struct mlx5_ib_dev *dev,
return bfregn;
}
-static void free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, int bfregn)
+void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, int bfregn)
{
mutex_lock(&bfregi->lock);
bfregi->count[bfregn]--;
@@ -613,6 +613,7 @@ static int to_mlx5_st(enum ib_qp_type type)
case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC;
case IB_QPT_SMI: return MLX5_QP_ST_QP0;
case MLX5_IB_QPT_HW_GSI: return MLX5_QP_ST_QP1;
+ case MLX5_IB_QPT_DCI: return MLX5_QP_ST_DCI;
case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6;
case IB_QPT_RAW_PACKET:
case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE;
@@ -627,7 +628,8 @@ static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq,
struct mlx5_ib_cq *recv_cq);
static int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
- struct mlx5_bfreg_info *bfregi, int bfregn)
+ struct mlx5_bfreg_info *bfregi, int bfregn,
+ bool dyn_bfreg)
{
int bfregs_per_sys_page;
int index_of_sys_page;
@@ -637,8 +639,16 @@ static int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
MLX5_NON_FP_BFREGS_PER_UAR;
index_of_sys_page = bfregn / bfregs_per_sys_page;
- offset = bfregn % bfregs_per_sys_page / MLX5_NON_FP_BFREGS_PER_UAR;
+ if (dyn_bfreg) {
+ index_of_sys_page += bfregi->num_static_sys_pages;
+ if (bfregn > bfregi->num_dyn_bfregs ||
+ bfregi->sys_pages[index_of_sys_page] == MLX5_IB_INVALID_UAR_INDEX) {
+ mlx5_ib_dbg(dev, "Invalid dynamic uar index\n");
+ return -EINVAL;
+ }
+ }
+ offset = bfregn % bfregs_per_sys_page / MLX5_NON_FP_BFREGS_PER_UAR;
return bfregi->sys_pages[index_of_sys_page] + offset;
}
@@ -764,7 +774,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_create_qp ucmd;
struct mlx5_ib_ubuffer *ubuffer = &base->ubuffer;
int page_shift = 0;
- int uar_index;
+ int uar_index = 0;
int npages;
u32 offset = 0;
int bfregn;
@@ -780,12 +790,20 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
}
context = to_mucontext(pd->uobject->context);
- /*
- * TBD: should come from the verbs when we have the API
- */
- if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
+ if (ucmd.flags & MLX5_QP_FLAG_BFREG_INDEX) {
+ uar_index = bfregn_to_uar_index(dev, &context->bfregi,
+ ucmd.bfreg_index, true);
+ if (uar_index < 0)
+ return uar_index;
+
+ bfregn = MLX5_IB_INVALID_BFREG;
+ } else if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL) {
+ /*
+ * TBD: should come from the verbs when we have the API
+ */
/* In CROSS_CHANNEL CQ and QP must use the same UAR */
bfregn = MLX5_CROSS_CHANNEL_BFREG;
+ }
else {
bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_HIGH);
if (bfregn < 0) {
@@ -804,8 +822,10 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
}
}
- uar_index = bfregn_to_uar_index(dev, &context->bfregi, bfregn);
mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index);
+ if (bfregn != MLX5_IB_INVALID_BFREG)
+ uar_index = bfregn_to_uar_index(dev, &context->bfregi, bfregn,
+ false);
qp->rq.offset = 0;
qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
@@ -845,7 +865,10 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
MLX5_SET(qpc, qpc, page_offset, offset);
MLX5_SET(qpc, qpc, uar_page, uar_index);
- resp->bfreg_index = adjust_bfregn(dev, &context->bfregi, bfregn);
+ if (bfregn != MLX5_IB_INVALID_BFREG)
+ resp->bfreg_index = adjust_bfregn(dev, &context->bfregi, bfregn);
+ else
+ resp->bfreg_index = MLX5_IB_INVALID_BFREG;
qp->bfregn = bfregn;
err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db);
@@ -874,7 +897,8 @@ err_umem:
ib_umem_release(ubuffer->umem);
err_bfreg:
- free_bfreg(dev, &context->bfregi, bfregn);
+ if (bfregn != MLX5_IB_INVALID_BFREG)
+ mlx5_ib_free_bfreg(dev, &context->bfregi, bfregn);
return err;
}
@@ -887,7 +911,13 @@ static void destroy_qp_user(struct mlx5_ib_dev *dev, struct ib_pd *pd,
mlx5_ib_db_unmap_user(context, &qp->db);
if (base->ubuffer.umem)
ib_umem_release(base->ubuffer.umem);
- free_bfreg(dev, &context->bfregi, qp->bfregn);
+
+ /*
+ * Free only the BFREGs which are handled by the kernel.
+ * BFREGs of UARs allocated dynamically are handled by user.
+ */
+ if (qp->bfregn != MLX5_IB_INVALID_BFREG)
+ mlx5_ib_free_bfreg(dev, &context->bfregi, qp->bfregn);
}
static int create_kernel_qp(struct mlx5_ib_dev *dev,
@@ -1015,6 +1045,7 @@ static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
{
if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
+ (attr->qp_type == MLX5_IB_QPT_DCI) ||
(attr->qp_type == IB_QPT_XRC_INI))
return MLX5_SRQ_RQ;
else if (!qp->has_rq)
@@ -2086,20 +2117,108 @@ static const char *ib_qp_type_str(enum ib_qp_type type)
return "IB_QPT_RAW_PACKET";
case MLX5_IB_QPT_REG_UMR:
return "MLX5_IB_QPT_REG_UMR";
+ case IB_QPT_DRIVER:
+ return "IB_QPT_DRIVER";
case IB_QPT_MAX:
default:
return "Invalid QP type";
}
}
+static struct ib_qp *mlx5_ib_create_dct(struct ib_pd *pd,
+ struct ib_qp_init_attr *attr,
+ struct mlx5_ib_create_qp *ucmd)
+{
+ struct mlx5_ib_dev *dev;
+ struct mlx5_ib_qp *qp;
+ int err = 0;
+ u32 uidx = MLX5_IB_DEFAULT_UIDX;
+ void *dctc;
+
+ if (!attr->srq || !attr->recv_cq)
+ return ERR_PTR(-EINVAL);
+
+ dev = to_mdev(pd->device);
+
+ err = get_qp_user_index(to_mucontext(pd->uobject->context),
+ ucmd, sizeof(*ucmd), &uidx);
+ if (err)
+ return ERR_PTR(err);
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return ERR_PTR(-ENOMEM);
+
+ qp->dct.in = kzalloc(MLX5_ST_SZ_BYTES(create_dct_in), GFP_KERNEL);
+ if (!qp->dct.in) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
+ qp->qp_sub_type = MLX5_IB_QPT_DCT;
+ MLX5_SET(dctc, dctc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(dctc, dctc, srqn_xrqn, to_msrq(attr->srq)->msrq.srqn);
+ MLX5_SET(dctc, dctc, cqn, to_mcq(attr->recv_cq)->mcq.cqn);
+ MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key);
+ MLX5_SET(dctc, dctc, user_index, uidx);
+
+ qp->state = IB_QPS_RESET;
+
+ return &qp->ibqp;
+err_free:
+ kfree(qp);
+ return ERR_PTR(err);
+}
+
+static int set_mlx_qp_type(struct mlx5_ib_dev *dev,
+ struct ib_qp_init_attr *init_attr,
+ struct mlx5_ib_create_qp *ucmd,
+ struct ib_udata *udata)
+{
+ enum { MLX_QP_FLAGS = MLX5_QP_FLAG_TYPE_DCT | MLX5_QP_FLAG_TYPE_DCI };
+ int err;
+
+ if (!udata)
+ return -EINVAL;
+
+ if (udata->inlen < sizeof(*ucmd)) {
+ mlx5_ib_dbg(dev, "create_qp user command is smaller than expected\n");
+ return -EINVAL;
+ }
+ err = ib_copy_from_udata(ucmd, udata, sizeof(*ucmd));
+ if (err)
+ return err;
+
+ if ((ucmd->flags & MLX_QP_FLAGS) == MLX5_QP_FLAG_TYPE_DCI) {
+ init_attr->qp_type = MLX5_IB_QPT_DCI;
+ } else {
+ if ((ucmd->flags & MLX_QP_FLAGS) == MLX5_QP_FLAG_TYPE_DCT) {
+ init_attr->qp_type = MLX5_IB_QPT_DCT;
+ } else {
+ mlx5_ib_dbg(dev, "Invalid QP flags\n");
+ return -EINVAL;
+ }
+ }
+
+ if (!MLX5_CAP_GEN(dev->mdev, dct)) {
+ mlx5_ib_dbg(dev, "DC transport is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *init_attr,
+ struct ib_qp_init_attr *verbs_init_attr,
struct ib_udata *udata)
{
struct mlx5_ib_dev *dev;
struct mlx5_ib_qp *qp;
u16 xrcdn = 0;
int err;
+ struct ib_qp_init_attr mlx_init_attr;
+ struct ib_qp_init_attr *init_attr = verbs_init_attr;
if (pd) {
dev = to_mdev(pd->device);
@@ -2124,6 +2243,26 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
}
+ if (init_attr->qp_type == IB_QPT_DRIVER) {
+ struct mlx5_ib_create_qp ucmd;
+
+ init_attr = &mlx_init_attr;
+ memcpy(init_attr, verbs_init_attr, sizeof(*verbs_init_attr));
+ err = set_mlx_qp_type(dev, init_attr, &ucmd, udata);
+ if (err)
+ return ERR_PTR(err);
+
+ if (init_attr->qp_type == MLX5_IB_QPT_DCI) {
+ if (init_attr->cap.max_recv_wr ||
+ init_attr->cap.max_recv_sge) {
+ mlx5_ib_dbg(dev, "DCI QP requires zero size receive queue\n");
+ return ERR_PTR(-EINVAL);
+ }
+ } else {
+ return mlx5_ib_create_dct(pd, init_attr, &ucmd);
+ }
+ }
+
switch (init_attr->qp_type) {
case IB_QPT_XRC_TGT:
case IB_QPT_XRC_INI:
@@ -2145,6 +2284,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
case IB_QPT_SMI:
case MLX5_IB_QPT_HW_GSI:
case MLX5_IB_QPT_REG_UMR:
+ case MLX5_IB_QPT_DCI:
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
@@ -2185,9 +2325,31 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
return ERR_PTR(-EINVAL);
}
+ if (verbs_init_attr->qp_type == IB_QPT_DRIVER)
+ qp->qp_sub_type = init_attr->qp_type;
+
return &qp->ibqp;
}
+static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
+{
+ struct mlx5_ib_dev *dev = to_mdev(mqp->ibqp.device);
+
+ if (mqp->state == IB_QPS_RTR) {
+ int err;
+
+ err = mlx5_core_destroy_dct(dev->mdev, &mqp->dct.mdct);
+ if (err) {
+ mlx5_ib_warn(dev, "failed to destroy DCT %d\n", err);
+ return err;
+ }
+ }
+
+ kfree(mqp->dct.in);
+ kfree(mqp);
+ return 0;
+}
+
int mlx5_ib_destroy_qp(struct ib_qp *qp)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
@@ -2196,6 +2358,9 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp)
if (unlikely(qp->qp_type == IB_QPT_GSI))
return mlx5_ib_gsi_destroy_qp(qp);
+ if (mqp->qp_sub_type == MLX5_IB_QPT_DCT)
+ return mlx5_ib_destroy_dct(mqp);
+
destroy_qp_common(dev, mqp);
kfree(mqp);
@@ -2763,7 +2928,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (!context)
return -ENOMEM;
- err = to_mlx5_st(ibqp->qp_type);
+ err = to_mlx5_st(ibqp->qp_type == IB_QPT_DRIVER ?
+ qp->qp_sub_type : ibqp->qp_type);
if (err < 0) {
mlx5_ib_dbg(dev, "unsupported qp type %d\n", ibqp->qp_type);
goto out;
@@ -2796,8 +2962,9 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
(ibqp->qp_type == IB_QPT_XRC_INI) ||
(ibqp->qp_type == IB_QPT_XRC_TGT)) {
if (mlx5_lag_is_active(dev->mdev)) {
+ u8 p = mlx5_core_native_port_num(dev->mdev);
tx_affinity = (unsigned int)atomic_add_return(1,
- &dev->roce.next_port) %
+ &dev->roce[p].next_port) %
MLX5_MAX_PORTS + 1;
context->flags |= cpu_to_be32(tx_affinity << 24);
}
@@ -2922,7 +3089,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
mlx5_cur = to_mlx5_state(cur_state);
mlx5_new = to_mlx5_state(new_state);
- mlx5_st = to_mlx5_st(ibqp->qp_type);
+ mlx5_st = to_mlx5_st(ibqp->qp_type == IB_QPT_DRIVER ?
+ qp->qp_sub_type : ibqp->qp_type);
if (mlx5_st < 0)
goto out;
@@ -2994,6 +3162,139 @@ out:
return err;
}
+static inline bool is_valid_mask(int mask, int req, int opt)
+{
+ if ((mask & req) != req)
+ return false;
+
+ if (mask & ~(req | opt))
+ return false;
+
+ return true;
+}
+
+/* check valid transition for driver QP types
+ * for now the only QP type that this function supports is DCI
+ */
+static bool modify_dci_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state new_state,
+ enum ib_qp_attr_mask attr_mask)
+{
+ int req = IB_QP_STATE;
+ int opt = 0;
+
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ req |= IB_QP_PKEY_INDEX | IB_QP_PORT;
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
+ opt = IB_QP_PKEY_INDEX | IB_QP_PORT;
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
+ req |= IB_QP_PATH_MTU;
+ opt = IB_QP_PKEY_INDEX;
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
+ req |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
+ IB_QP_MAX_QP_RD_ATOMIC | IB_QP_SQ_PSN;
+ opt = IB_QP_MIN_RNR_TIMER;
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state == IB_QPS_RTS && new_state == IB_QPS_RTS) {
+ opt = IB_QP_MIN_RNR_TIMER;
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state != IB_QPS_RESET && new_state == IB_QPS_ERR) {
+ return is_valid_mask(attr_mask, req, opt);
+ }
+ return false;
+}
+
+/* mlx5_ib_modify_dct: modify a DCT QP
+ * valid transitions are:
+ * RESET to INIT: must set access_flags, pkey_index and port
+ * INIT to RTR : must set min_rnr_timer, tclass, flow_label,
+ * mtu, gid_index and hop_limit
+ * Other transitions and attributes are illegal
+ */
+static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ enum ib_qp_state cur_state, new_state;
+ int err = 0;
+ int required = IB_QP_STATE;
+ void *dctc;
+
+ if (!(attr_mask & IB_QP_STATE))
+ return -EINVAL;
+
+ cur_state = qp->state;
+ new_state = attr->qp_state;
+
+ dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ required |= IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
+ if (!is_valid_mask(attr_mask, required, 0))
+ return -EINVAL;
+
+ if (attr->port_num == 0 ||
+ attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)) {
+ mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
+ attr->port_num, dev->num_ports);
+ return -EINVAL;
+ }
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
+ MLX5_SET(dctc, dctc, rre, 1);
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
+ MLX5_SET(dctc, dctc, rwe, 1);
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) {
+ if (!mlx5_ib_dc_atomic_is_supported(dev))
+ return -EOPNOTSUPP;
+ MLX5_SET(dctc, dctc, rae, 1);
+ MLX5_SET(dctc, dctc, atomic_mode, MLX5_ATOMIC_MODE_DCT_CX);
+ }
+ MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index);
+ MLX5_SET(dctc, dctc, port, attr->port_num);
+ MLX5_SET(dctc, dctc, counter_set_id, dev->port[attr->port_num - 1].cnts.set_id);
+
+ } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
+ struct mlx5_ib_modify_qp_resp resp = {};
+ u32 min_resp_len = offsetof(typeof(resp), dctn) +
+ sizeof(resp.dctn);
+
+ if (udata->outlen < min_resp_len)
+ return -EINVAL;
+ resp.response_length = min_resp_len;
+
+ required |= IB_QP_MIN_RNR_TIMER | IB_QP_AV | IB_QP_PATH_MTU;
+ if (!is_valid_mask(attr_mask, required, 0))
+ return -EINVAL;
+ MLX5_SET(dctc, dctc, min_rnr_nak, attr->min_rnr_timer);
+ MLX5_SET(dctc, dctc, tclass, attr->ah_attr.grh.traffic_class);
+ MLX5_SET(dctc, dctc, flow_label, attr->ah_attr.grh.flow_label);
+ MLX5_SET(dctc, dctc, mtu, attr->path_mtu);
+ MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index);
+ MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
+
+ err = mlx5_core_create_dct(dev->mdev, &qp->dct.mdct, qp->dct.in,
+ MLX5_ST_SZ_BYTES(create_dct_in));
+ if (err)
+ return err;
+ resp.dctn = qp->dct.mdct.mqp.qpn;
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
+ if (err) {
+ mlx5_core_destroy_dct(dev->mdev, &qp->dct.mdct);
+ return err;
+ }
+ } else {
+ mlx5_ib_warn(dev, "Modify DCT: Invalid transition from %d to %d\n", cur_state, new_state);
+ return -EINVAL;
+ }
+ if (err)
+ qp->state = IB_QPS_ERR;
+ else
+ qp->state = new_state;
+ return err;
+}
+
int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
@@ -3011,8 +3312,14 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (unlikely(ibqp->qp_type == IB_QPT_GSI))
return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask);
- qp_type = (unlikely(ibqp->qp_type == MLX5_IB_QPT_HW_GSI)) ?
- IB_QPT_GSI : ibqp->qp_type;
+ if (ibqp->qp_type == IB_QPT_DRIVER)
+ qp_type = qp->qp_sub_type;
+ else
+ qp_type = (unlikely(ibqp->qp_type == MLX5_IB_QPT_HW_GSI)) ?
+ IB_QPT_GSI : ibqp->qp_type;
+
+ if (qp_type == MLX5_IB_QPT_DCT)
+ return mlx5_ib_modify_dct(ibqp, attr, attr_mask, udata);
mutex_lock(&qp->mutex);
@@ -3031,15 +3338,21 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
goto out;
}
} else if (qp_type != MLX5_IB_QPT_REG_UMR &&
- !ib_modify_qp_is_ok(cur_state, new_state, qp_type, attr_mask, ll)) {
+ qp_type != MLX5_IB_QPT_DCI &&
+ !ib_modify_qp_is_ok(cur_state, new_state, qp_type, attr_mask, ll)) {
mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
cur_state, new_state, ibqp->qp_type, attr_mask);
goto out;
+ } else if (qp_type == MLX5_IB_QPT_DCI &&
+ !modify_dci_qp_is_ok(cur_state, new_state, attr_mask)) {
+ mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
+ cur_state, new_state, qp_type, attr_mask);
+ goto out;
}
if ((attr_mask & IB_QP_PORT) &&
(attr->port_num == 0 ||
- attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports))) {
+ attr->port_num > dev->num_ports)) {
mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
attr->port_num, dev->num_ports);
goto out;
@@ -4358,11 +4671,10 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
struct rdma_ah_attr *ah_attr,
struct mlx5_qp_path *path)
{
- struct mlx5_core_dev *dev = ibdev->mdev;
memset(ah_attr, 0, sizeof(*ah_attr));
- if (!path->port || path->port > MLX5_CAP_GEN(dev, num_ports))
+ if (!path->port || path->port > ibdev->num_ports)
return;
ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port);
@@ -4577,6 +4889,71 @@ out:
return err;
}
+static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp,
+ struct ib_qp_attr *qp_attr, int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr)
+{
+ struct mlx5_core_dct *dct = &mqp->dct.mdct;
+ u32 *out;
+ u32 access_flags = 0;
+ int outlen = MLX5_ST_SZ_BYTES(query_dct_out);
+ void *dctc;
+ int err;
+ int supported_mask = IB_QP_STATE |
+ IB_QP_ACCESS_FLAGS |
+ IB_QP_PORT |
+ IB_QP_MIN_RNR_TIMER |
+ IB_QP_AV |
+ IB_QP_PATH_MTU |
+ IB_QP_PKEY_INDEX;
+
+ if (qp_attr_mask & ~supported_mask)
+ return -EINVAL;
+ if (mqp->state != IB_QPS_RTR)
+ return -EINVAL;
+
+ out = kzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_core_dct_query(dev->mdev, dct, out, outlen);
+ if (err)
+ goto out;
+
+ dctc = MLX5_ADDR_OF(query_dct_out, out, dct_context_entry);
+
+ if (qp_attr_mask & IB_QP_STATE)
+ qp_attr->qp_state = IB_QPS_RTR;
+
+ if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
+ if (MLX5_GET(dctc, dctc, rre))
+ access_flags |= IB_ACCESS_REMOTE_READ;
+ if (MLX5_GET(dctc, dctc, rwe))
+ access_flags |= IB_ACCESS_REMOTE_WRITE;
+ if (MLX5_GET(dctc, dctc, rae))
+ access_flags |= IB_ACCESS_REMOTE_ATOMIC;
+ qp_attr->qp_access_flags = access_flags;
+ }
+
+ if (qp_attr_mask & IB_QP_PORT)
+ qp_attr->port_num = MLX5_GET(dctc, dctc, port);
+ if (qp_attr_mask & IB_QP_MIN_RNR_TIMER)
+ qp_attr->min_rnr_timer = MLX5_GET(dctc, dctc, min_rnr_nak);
+ if (qp_attr_mask & IB_QP_AV) {
+ qp_attr->ah_attr.grh.traffic_class = MLX5_GET(dctc, dctc, tclass);
+ qp_attr->ah_attr.grh.flow_label = MLX5_GET(dctc, dctc, flow_label);
+ qp_attr->ah_attr.grh.sgid_index = MLX5_GET(dctc, dctc, my_addr_index);
+ qp_attr->ah_attr.grh.hop_limit = MLX5_GET(dctc, dctc, hop_limit);
+ }
+ if (qp_attr_mask & IB_QP_PATH_MTU)
+ qp_attr->path_mtu = MLX5_GET(dctc, dctc, mtu);
+ if (qp_attr_mask & IB_QP_PKEY_INDEX)
+ qp_attr->pkey_index = MLX5_GET(dctc, dctc, pkey_index);
+out:
+ kfree(out);
+ return err;
+}
+
int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
{
@@ -4596,6 +4973,10 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
memset(qp_init_attr, 0, sizeof(*qp_init_attr));
memset(qp_attr, 0, sizeof(*qp_attr));
+ if (unlikely(qp->qp_sub_type == MLX5_IB_QPT_DCT))
+ return mlx5_ib_dct_query_qp(dev, qp, qp_attr,
+ qp_attr_mask, qp_init_attr);
+
mutex_lock(&qp->mutex);
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
@@ -4685,13 +5066,10 @@ int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
int err;
err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
- if (err) {
+ if (err)
mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
- return err;
- }
kfree(xrcd);
-
return 0;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index c6fe89d79248..2fe503e86c1d 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -472,7 +472,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
goto out;
}
- ret = get_user_pages(uaddr & PAGE_MASK, 1, FOLL_WRITE, pages, NULL);
+ ret = get_user_pages_fast(uaddr & PAGE_MASK, 1, FOLL_WRITE, pages);
if (ret < 0)
goto out;
@@ -623,13 +623,12 @@ int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
page = dev->db_tab->page + end;
alloc:
- page->db_rec = dma_alloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
- &page->mapping, GFP_KERNEL);
+ page->db_rec = dma_zalloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
+ &page->mapping, GFP_KERNEL);
if (!page->db_rec) {
ret = -ENOMEM;
goto out;
}
- memset(page->db_rec, 0, MTHCA_ICM_PAGE_SIZE);
ret = mthca_MAP_ICM_page(dev, page->mapping,
mthca_uarc_virt(dev, &dev->driver_uar, i));
diff --git a/drivers/infiniband/hw/mthca/mthca_user.h b/drivers/infiniband/hw/mthca/mthca_user.h
deleted file mode 100644
index 5fe56e810739..000000000000
--- a/drivers/infiniband/hw/mthca/mthca_user.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (c) 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MTHCA_USER_H
-#define MTHCA_USER_H
-
-#include <linux/types.h>
-
-/*
- * Increment this value if any changes that break userspace ABI
- * compatibility are made.
- */
-#define MTHCA_UVERBS_ABI_VERSION 1
-
-/*
- * Make sure that all structs defined in this file remain laid out so
- * that they pack the same way on 32-bit and 64-bit architectures (to
- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
- * instead.
- */
-
-struct mthca_alloc_ucontext_resp {
- __u32 qp_tab_size;
- __u32 uarc_size;
-};
-
-struct mthca_alloc_pd_resp {
- __u32 pdn;
- __u32 reserved;
-};
-
-struct mthca_reg_mr {
-/*
- * Mark the memory region with a DMA attribute that causes
- * in-flight DMA to be flushed when the region is written to:
- */
-#define MTHCA_MR_DMASYNC 0x1
- __u32 mr_attrs;
- __u32 reserved;
-};
-
-struct mthca_create_cq {
- __u32 lkey;
- __u32 pdn;
- __u64 arm_db_page;
- __u64 set_db_page;
- __u32 arm_db_index;
- __u32 set_db_index;
-};
-
-struct mthca_create_cq_resp {
- __u32 cqn;
- __u32 reserved;
-};
-
-struct mthca_resize_cq {
- __u32 lkey;
- __u32 reserved;
-};
-
-struct mthca_create_srq {
- __u32 lkey;
- __u32 db_index;
- __u64 db_page;
-};
-
-struct mthca_create_srq_resp {
- __u32 srqn;
- __u32 reserved;
-};
-
-struct mthca_create_qp {
- __u32 lkey;
- __u32 reserved;
- __u64 sq_db_page;
- __u64 rq_db_page;
- __u32 sq_db_index;
- __u32 rq_db_index;
-};
-
-#endif /* MTHCA_USER_H */
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index c56ca2a74df5..6cdfbf8c5674 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1365,7 +1365,7 @@ static int mini_cm_del_listen(struct nes_cm_core *cm_core,
static inline int mini_cm_accelerated(struct nes_cm_core *cm_core,
struct nes_cm_node *cm_node)
{
- cm_node->accelerated = 1;
+ cm_node->accelerated = true;
if (cm_node->accept_pend) {
BUG_ON(!cm_node->listener);
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
index d827d03e3941..b9cc02b4e8d5 100644
--- a/drivers/infiniband/hw/nes/nes_cm.h
+++ b/drivers/infiniband/hw/nes/nes_cm.h
@@ -279,7 +279,6 @@ struct nes_cm_tcp_context {
u8 rcv_wscale;
struct nes_cm_tsa_context tsa_cntxt;
- struct timeval sent_ts;
};
@@ -341,7 +340,7 @@ struct nes_cm_node {
u16 mpa_frame_size;
struct iw_cm_id *cm_id;
struct list_head list;
- int accelerated;
+ bool accelerated;
struct nes_cm_listener *listener;
enum nes_cm_conn_type conn_type;
struct nes_vnic *nesvnic;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 0ba695a88b62..9904918589a4 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -380,11 +380,10 @@ static int ocrdma_alloc_q(struct ocrdma_dev *dev,
q->len = len;
q->entry_size = entry_size;
q->size = len * entry_size;
- q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size,
- &q->dma, GFP_KERNEL);
+ q->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, q->size,
+ &q->dma, GFP_KERNEL);
if (!q->va)
return -ENOMEM;
- memset(q->va, 0, q->size);
return 0;
}
@@ -1819,12 +1818,11 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
return -ENOMEM;
ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ,
OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
- cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
+ cq->va = dma_zalloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
if (!cq->va) {
status = -ENOMEM;
goto mem_err;
}
- memset(cq->va, 0, cq->len);
page_size = cq->len / hw_pages;
cmd->cmd.pgsz_pgcnt = (page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
@@ -2212,10 +2210,9 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
qp->sq.max_cnt = max_wqe_allocated;
len = (hw_pages * hw_page_size);
- qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
+ qp->sq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
if (!qp->sq.va)
return -EINVAL;
- memset(qp->sq.va, 0, len);
qp->sq.len = len;
qp->sq.pa = pa;
qp->sq.entry_size = dev->attr.wqe_size;
@@ -2263,10 +2260,9 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
qp->rq.max_cnt = max_rqe_allocated;
len = (hw_pages * hw_page_size);
- qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
+ qp->rq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
if (!qp->rq.va)
return -ENOMEM;
- memset(qp->rq.va, 0, len);
qp->rq.pa = pa;
qp->rq.len = len;
qp->rq.entry_size = dev->attr.rqe_size;
@@ -2320,11 +2316,10 @@ static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
if (dev->attr.ird == 0)
return 0;
- qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len,
- &pa, GFP_KERNEL);
+ qp->ird_q_va = dma_zalloc_coherent(&pdev->dev, ird_q_len, &pa,
+ GFP_KERNEL);
if (!qp->ird_q_va)
return -ENOMEM;
- memset(qp->ird_q_va, 0, ird_q_len);
ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
pa, ird_page_size);
for (; i < ird_q_len / dev->attr.rqe_size; i++) {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index e528d7acb7f6..24d20a4aa262 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -73,15 +73,13 @@ bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev)
mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
sizeof(struct ocrdma_rdma_stats_resp));
- mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size,
- &mem->pa, GFP_KERNEL);
+ mem->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, mem->size,
+ &mem->pa, GFP_KERNEL);
if (!mem->va) {
pr_err("%s: stats mbox allocation failed\n", __func__);
return false;
}
- memset(mem->va, 0, mem->size);
-
/* Alloc debugfs mem */
mem->debugfs_mem = kzalloc(OCRDMA_MAX_DBGFS_MEM, GFP_KERNEL);
if (!mem->debugfs_mem)
@@ -834,7 +832,7 @@ void ocrdma_add_port_stats(struct ocrdma_dev *dev)
dev->reset_stats.type = OCRDMA_RESET_STATS;
dev->reset_stats.dev = dev;
- if (!debugfs_create_file("reset_stats", S_IRUSR, dev->dir,
+ if (!debugfs_create_file("reset_stats", 0200, dev->dir,
&dev->reset_stats, &ocrdma_dbg_ops))
goto err;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 7866fd8051f6..8009bdad4e5b 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -461,7 +461,7 @@ retry:
static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
struct ocrdma_pd *pd)
{
- return (uctx->cntxt_pd == pd ? true : false);
+ return (uctx->cntxt_pd == pd);
}
static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
@@ -550,13 +550,12 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
INIT_LIST_HEAD(&ctx->mm_head);
mutex_init(&ctx->mm_list_lock);
- ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
- &ctx->ah_tbl.pa, GFP_KERNEL);
+ ctx->ah_tbl.va = dma_zalloc_coherent(&pdev->dev, map_len,
+ &ctx->ah_tbl.pa, GFP_KERNEL);
if (!ctx->ah_tbl.va) {
kfree(ctx);
return ERR_PTR(-ENOMEM);
}
- memset(ctx->ah_tbl.va, 0, map_len);
ctx->ah_tbl.len = map_len;
memset(&resp, 0, sizeof(resp));
@@ -885,13 +884,12 @@ static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
return -ENOMEM;
for (i = 0; i < mr->num_pbls; i++) {
- va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
+ va = dma_zalloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
if (!va) {
ocrdma_free_mr_pbl_tbl(dev, mr);
status = -ENOMEM;
break;
}
- memset(va, 0, dma_len);
mr->pbl_table[i].va = va;
mr->pbl_table[i].pa = pa;
}
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index b26aa88dab48..53f00dbf313f 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -604,12 +604,11 @@ static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
return ERR_PTR(-ENOMEM);
for (i = 0; i < pbl_info->num_pbls; i++) {
- va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size,
- &pa, flags);
+ va = dma_zalloc_coherent(&pdev->dev, pbl_info->pbl_size,
+ &pa, flags);
if (!va)
goto err;
- memset(va, 0, pbl_info->pbl_size);
pbl_table[i].va = va;
pbl_table[i].pa = pa;
}
@@ -3040,7 +3039,7 @@ static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
swqe->wqe_size = 2;
swqe2 = qed_chain_produce(&qp->sq.pbl);
- swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.imm_data);
+ swqe->inv_key_or_imm_data = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
wr, bad_wr);
swqe->length = cpu_to_le32(length);
@@ -3471,9 +3470,9 @@ static int qedr_poll_cq_req(struct qedr_dev *dev,
break;
case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
if (qp->state != QED_ROCE_QP_STATE_ERR)
- DP_ERR(dev,
- "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
- cq->icid, qp->icid);
+ DP_DEBUG(dev, QEDR_MSG_CQ,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
IB_WC_WR_FLUSH_ERR, 1);
break;
@@ -3591,7 +3590,7 @@ static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
wc->byte_len = le32_to_cpu(resp->length);
if (resp->flags & QEDR_RESP_IMM) {
- wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
+ wc->ex.imm_data = cpu_to_be32(le32_to_cpu(resp->imm_data_or_inv_r_Key));
wc->wc_flags |= IB_WC_WITH_IMM;
if (resp->flags & QEDR_RESP_RDMA)
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 092ed8103842..0235f76bbc72 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -1428,8 +1428,6 @@ u64 qib_sps_ints(void);
*/
dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long,
size_t, int);
-const char *qib_get_unit_name(int unit);
-const char *qib_get_card_name(struct rvt_dev_info *rdi);
struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi);
/*
@@ -1488,15 +1486,15 @@ extern struct mutex qib_mutex;
#define qib_dev_err(dd, fmt, ...) \
dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
- qib_get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
#define qib_dev_warn(dd, fmt, ...) \
dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
- qib_get_unit_name((dd)->unit), ##__VA_ARGS__)
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
#define qib_dev_porterr(dd, port, fmt, ...) \
dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
- qib_get_unit_name((dd)->unit), (dd)->unit, (port), \
+ rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), (dd)->unit, (port), \
##__VA_ARGS__)
#define qib_devinfo(pcidev, fmt, ...) \
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 33d3335385e8..3117cc5f2a9a 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -81,22 +81,6 @@ MODULE_DESCRIPTION("Intel IB driver");
struct qlogic_ib_stats qib_stats;
-const char *qib_get_unit_name(int unit)
-{
- static char iname[16];
-
- snprintf(iname, sizeof(iname), "infinipath%u", unit);
- return iname;
-}
-
-const char *qib_get_card_name(struct rvt_dev_info *rdi)
-{
- struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
- struct qib_devdata *dd = container_of(ibdev,
- struct qib_devdata, verbs_dev);
- return qib_get_unit_name(dd->unit);
-}
-
struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi)
{
struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c
index 33a2e74c8495..5838b3bf34b9 100644
--- a/drivers/infiniband/hw/qib/qib_eeprom.c
+++ b/drivers/infiniband/hw/qib/qib_eeprom.c
@@ -163,8 +163,7 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
if (bguid[6] == 0xff) {
if (bguid[5] == 0xff) {
qib_dev_err(dd,
- "Can't set %s GUID from base, wraps to OUI!\n",
- qib_get_unit_name(t));
+ "Can't set GUID from base, wraps to OUI!\n");
dd->base_guid = 0;
goto bail;
}
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 2d6a191afec0..f7593b5e2b76 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -58,7 +58,7 @@ static int qib_open(struct inode *, struct file *);
static int qib_close(struct inode *, struct file *);
static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *);
static ssize_t qib_write_iter(struct kiocb *, struct iov_iter *);
-static unsigned int qib_poll(struct file *, struct poll_table_struct *);
+static __poll_t qib_poll(struct file *, struct poll_table_struct *);
static int qib_mmapf(struct file *, struct vm_area_struct *);
/*
@@ -568,20 +568,16 @@ done:
static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
{
struct qib_pportdata *ppd = rcd->ppd;
- int i, any = 0, pidx = -1;
+ int i, pidx = -1;
+ bool any = false;
u16 lkey = key & 0x7FFF;
- int ret;
- if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF)) {
+ if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF))
/* nothing to do; this key always valid */
- ret = 0;
- goto bail;
- }
+ return 0;
- if (!lkey) {
- ret = -EINVAL;
- goto bail;
- }
+ if (!lkey)
+ return -EINVAL;
/*
* Set the full membership bit, because it has to be
@@ -594,18 +590,14 @@ static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
if (!rcd->pkeys[i] && pidx == -1)
pidx = i;
- if (rcd->pkeys[i] == key) {
- ret = -EEXIST;
- goto bail;
- }
+ if (rcd->pkeys[i] == key)
+ return -EEXIST;
}
- if (pidx == -1) {
- ret = -EBUSY;
- goto bail;
- }
- for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+ if (pidx == -1)
+ return -EBUSY;
+ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
if (!ppd->pkeys[i]) {
- any++;
+ any = true;
continue;
}
if (ppd->pkeys[i] == key) {
@@ -613,44 +605,34 @@ static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
if (atomic_inc_return(pkrefs) > 1) {
rcd->pkeys[pidx] = key;
- ret = 0;
- goto bail;
- } else {
- /*
- * lost race, decrement count, catch below
- */
- atomic_dec(pkrefs);
- any++;
+ return 0;
}
+ /*
+ * lost race, decrement count, catch below
+ */
+ atomic_dec(pkrefs);
+ any = true;
}
- if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
+ if ((ppd->pkeys[i] & 0x7FFF) == lkey)
/*
* It makes no sense to have both the limited and
* full membership PKEY set at the same time since
* the unlimited one will disable the limited one.
*/
- ret = -EEXIST;
- goto bail;
- }
- }
- if (!any) {
- ret = -EBUSY;
- goto bail;
+ return -EEXIST;
}
- for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
+ if (!any)
+ return -EBUSY;
+ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
if (!ppd->pkeys[i] &&
atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
rcd->pkeys[pidx] = key;
ppd->pkeys[i] = key;
(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
- ret = 0;
- goto bail;
+ return 0;
}
}
- ret = -EBUSY;
-
-bail:
- return ret;
+ return -EBUSY;
}
/**
@@ -1092,12 +1074,12 @@ bail:
return ret;
}
-static unsigned int qib_poll_urgent(struct qib_ctxtdata *rcd,
+static __poll_t qib_poll_urgent(struct qib_ctxtdata *rcd,
struct file *fp,
struct poll_table_struct *pt)
{
struct qib_devdata *dd = rcd->dd;
- unsigned pollflag;
+ __poll_t pollflag;
poll_wait(fp, &rcd->wait, pt);
@@ -1114,12 +1096,12 @@ static unsigned int qib_poll_urgent(struct qib_ctxtdata *rcd,
return pollflag;
}
-static unsigned int qib_poll_next(struct qib_ctxtdata *rcd,
+static __poll_t qib_poll_next(struct qib_ctxtdata *rcd,
struct file *fp,
struct poll_table_struct *pt)
{
struct qib_devdata *dd = rcd->dd;
- unsigned pollflag;
+ __poll_t pollflag;
poll_wait(fp, &rcd->wait, pt);
@@ -1135,10 +1117,10 @@ static unsigned int qib_poll_next(struct qib_ctxtdata *rcd,
return pollflag;
}
-static unsigned int qib_poll(struct file *fp, struct poll_table_struct *pt)
+static __poll_t qib_poll(struct file *fp, struct poll_table_struct *pt)
{
struct qib_ctxtdata *rcd;
- unsigned pollflag;
+ __poll_t pollflag;
rcd = ctxt_fp(fp);
if (!rcd)
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 85dfbba427f6..3990f386aa32 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1119,6 +1119,8 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
"Could not allocate unit ID: error %d\n", -ret);
goto bail;
}
+ rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s%d", "qib", dd->unit);
+
dd->int_counter = alloc_percpu(u64);
if (!dd->int_counter) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
deleted file mode 100644
index 8fdf79f8d4e4..000000000000
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "qib.h"
-
-/**
- * qib_alloc_lkey - allocate an lkey
- * @mr: memory region that this lkey protects
- * @dma_region: 0->normal key, 1->restricted DMA key
- *
- * Returns 0 if successful, otherwise returns -errno.
- *
- * Increments mr reference count as required.
- *
- * Sets the lkey field mr for non-dma regions.
- *
- */
-
-int qib_alloc_lkey(struct rvt_mregion *mr, int dma_region)
-{
- unsigned long flags;
- u32 r;
- u32 n;
- int ret = 0;
- struct qib_ibdev *dev = to_idev(mr->pd->device);
- struct rvt_lkey_table *rkt = &dev->lk_table;
-
- spin_lock_irqsave(&rkt->lock, flags);
-
- /* special case for dma_mr lkey == 0 */
- if (dma_region) {
- struct rvt_mregion *tmr;
-
- tmr = rcu_access_pointer(dev->dma_mr);
- if (!tmr) {
- qib_get_mr(mr);
- rcu_assign_pointer(dev->dma_mr, mr);
- mr->lkey_published = 1;
- }
- goto success;
- }
-
- /* Find the next available LKEY */
- r = rkt->next;
- n = r;
- for (;;) {
- if (rkt->table[r] == NULL)
- break;
- r = (r + 1) & (rkt->max - 1);
- if (r == n)
- goto bail;
- }
- rkt->next = (r + 1) & (rkt->max - 1);
- /*
- * Make sure lkey is never zero which is reserved to indicate an
- * unrestricted LKEY.
- */
- rkt->gen++;
- /*
- * bits are capped in qib_verbs.c to insure enough bits
- * for generation number
- */
- mr->lkey = (r << (32 - ib_rvt_lkey_table_size)) |
- ((((1 << (24 - ib_rvt_lkey_table_size)) - 1) & rkt->gen)
- << 8);
- if (mr->lkey == 0) {
- mr->lkey |= 1 << 8;
- rkt->gen++;
- }
- qib_get_mr(mr);
- rcu_assign_pointer(rkt->table[r], mr);
- mr->lkey_published = 1;
-success:
- spin_unlock_irqrestore(&rkt->lock, flags);
-out:
- return ret;
-bail:
- spin_unlock_irqrestore(&rkt->lock, flags);
- ret = -ENOMEM;
- goto out;
-}
-
-/**
- * qib_free_lkey - free an lkey
- * @mr: mr to free from tables
- */
-void qib_free_lkey(struct rvt_mregion *mr)
-{
- unsigned long flags;
- u32 lkey = mr->lkey;
- u32 r;
- struct qib_ibdev *dev = to_idev(mr->pd->device);
- struct rvt_lkey_table *rkt = &dev->lk_table;
-
- spin_lock_irqsave(&rkt->lock, flags);
- if (!mr->lkey_published)
- goto out;
- if (lkey == 0)
- RCU_INIT_POINTER(dev->dma_mr, NULL);
- else {
- r = lkey >> (32 - ib_rvt_lkey_table_size);
- RCU_INIT_POINTER(rkt->table[r], NULL);
- }
- qib_put_mr(mr);
- mr->lkey_published = 0;
-out:
- spin_unlock_irqrestore(&rkt->lock, flags);
-}
-
-/**
- * qib_rkey_ok - check the IB virtual address, length, and RKEY
- * @qp: qp for validation
- * @sge: SGE state
- * @len: length of data
- * @vaddr: virtual address to place data
- * @rkey: rkey to check
- * @acc: access flags
- *
- * Return 1 if successful, otherwise 0.
- *
- * increments the reference count upon success
- */
-int qib_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
- u32 len, u64 vaddr, u32 rkey, int acc)
-{
- struct rvt_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
- struct rvt_mregion *mr;
- unsigned n, m;
- size_t off;
-
- /* We use RKEY == zero for kernel virtual addresses */
- rcu_read_lock();
- if (rkey == 0) {
- struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd);
- struct qib_ibdev *dev = to_idev(pd->ibpd.device);
-
- if (pd->user)
- goto bail;
- mr = rcu_dereference(dev->dma_mr);
- if (!mr)
- goto bail;
- if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
- goto bail;
- rcu_read_unlock();
-
- sge->mr = mr;
- sge->vaddr = (void *) vaddr;
- sge->length = len;
- sge->sge_length = len;
- sge->m = 0;
- sge->n = 0;
- goto ok;
- }
-
- mr = rcu_dereference(
- rkt->table[(rkey >> (32 - ib_rvt_lkey_table_size))]);
- if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
- goto bail;
-
- off = vaddr - mr->iova;
- if (unlikely(vaddr < mr->iova || off + len > mr->length ||
- (mr->access_flags & acc) == 0))
- goto bail;
- if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
- goto bail;
- rcu_read_unlock();
-
- off += mr->offset;
- if (mr->page_shift) {
- /*
- page sizes are uniform power of 2 so no loop is necessary
- entries_spanned_by_off is the number of times the loop below
- would have executed.
- */
- size_t entries_spanned_by_off;
-
- entries_spanned_by_off = off >> mr->page_shift;
- off -= (entries_spanned_by_off << mr->page_shift);
- m = entries_spanned_by_off / RVT_SEGSZ;
- n = entries_spanned_by_off % RVT_SEGSZ;
- } else {
- m = 0;
- n = 0;
- while (off >= mr->map[m]->segs[n].length) {
- off -= mr->map[m]->segs[n].length;
- n++;
- if (n >= RVT_SEGSZ) {
- m++;
- n = 0;
- }
- }
- }
- sge->mr = mr;
- sge->vaddr = mr->map[m]->segs[n].vaddr + off;
- sge->length = mr->map[m]->segs[n].length - off;
- sge->sge_length = len;
- sge->m = m;
- sge->n = n;
-ok:
- return 1;
-bail:
- rcu_read_unlock();
- return 0;
-}
-
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 8f5754fb8579..cfddff45413f 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -246,7 +246,6 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
@@ -293,7 +292,6 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
newreq = 0;
if (qp->s_cur == qp->s_tail) {
/* Check if send work queue is empty. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_tail == READ_ONCE(qp->s_head))
goto bail;
/*
@@ -434,13 +432,13 @@ no_flow_control:
qp->s_state = OP(COMPARE_SWAP);
put_ib_ateth_swap(wqe->atomic_wr.swap,
&ohdr->u.atomic_eth);
- put_ib_ateth_swap(wqe->atomic_wr.compare_add,
- &ohdr->u.atomic_eth);
+ put_ib_ateth_compare(wqe->atomic_wr.compare_add,
+ &ohdr->u.atomic_eth);
} else {
qp->s_state = OP(FETCH_ADD);
put_ib_ateth_swap(wqe->atomic_wr.compare_add,
&ohdr->u.atomic_eth);
- put_ib_ateth_swap(0, &ohdr->u.atomic_eth);
+ put_ib_ateth_compare(0, &ohdr->u.atomic_eth);
}
put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
&ohdr->u.atomic_eth);
@@ -1340,7 +1338,6 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
goto ack_done;
/* Ignore invalid responses. */
- smp_read_barrier_depends(); /* see post_one_send */
if (qib_cmp24(psn, READ_ONCE(qp->s_next_psn)) >= 0)
goto ack_done;
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index 9a37e844d4c8..4662cc7bde92 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -367,7 +367,6 @@ static void qib_ruc_loopback(struct rvt_qp *sqp)
sqp->s_flags |= RVT_S_BUSY;
again:
- smp_read_barrier_depends(); /* see post_one_send() */
if (sqp->s_last == READ_ONCE(sqp->s_head))
goto clr_busy;
wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index bddcc37ace44..70c58b88192c 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -60,7 +60,6 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
@@ -90,7 +89,6 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
RVT_PROCESS_NEXT_SEND_OK))
goto bail;
/* Check if send work queue is empty. */
- smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_cur == READ_ONCE(qp->s_head))
goto bail;
/*
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 15962ed193ce..386c3c4da0c7 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -252,7 +252,6 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- smp_read_barrier_depends(); /* see post_one_send */
if (qp->s_last == READ_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
@@ -266,7 +265,6 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
}
/* see post_one_send() */
- smp_read_barrier_depends();
if (qp->s_cur == READ_ONCE(qp->s_head))
goto bail;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index c55000501582..fabee760407e 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1571,7 +1571,6 @@ int qib_register_ib_device(struct qib_devdata *dd)
if (!ib_qib_sys_image_guid)
ib_qib_sys_image_guid = ppd->guid;
- strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
ibdev->owner = THIS_MODULE;
ibdev->node_guid = ppd->guid;
ibdev->phys_port_cnt = dd->num_pports;
@@ -1586,7 +1585,6 @@ int qib_register_ib_device(struct qib_devdata *dd)
* Fill in rvt info object.
*/
dd->verbs_dev.rdi.driver_f.port_callback = qib_create_port_files;
- dd->verbs_dev.rdi.driver_f.get_card_name = qib_get_card_name;
dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev;
dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah;
dd->verbs_dev.rdi.driver_f.check_send_wqe = qib_check_send_wqe;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
index 685ef2293cb8..4210ca14014d 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
@@ -45,7 +45,6 @@
#include "usnic_ib_verbs.h"
#include "usnic_ib_sysfs.h"
#include "usnic_log.h"
-#include "usnic_ib_sysfs.h"
static ssize_t usnic_ib_show_board(struct device *device,
struct device_attribute *attr,
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index aa2456a4f9bd..a688a5669168 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -47,7 +47,6 @@
#include "usnic_log.h"
#include "usnic_uiom.h"
#include "usnic_transport.h"
-#include "usnic_ib_verbs.h"
#define USNIC_DEFAULT_TRANSPORT USNIC_TRANSPORT_ROCE_CUSTOM
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index 4f7bd3b6a315..44cb1cfba417 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -93,7 +93,7 @@ struct pvrdma_cq {
struct pvrdma_page_dir pdir;
u32 cq_handle;
bool is_kernel;
- atomic_t refcnt;
+ refcount_t refcnt;
struct completion free;
};
@@ -196,7 +196,7 @@ struct pvrdma_qp {
u8 state;
bool is_kernel;
struct mutex mutex; /* QP state mutex. */
- atomic_t refcnt;
+ refcount_t refcnt;
struct completion free;
};
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index e529622cefad..faa9478c14a6 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -132,8 +132,9 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
}
cq->ibcq.cqe = entries;
+ cq->is_kernel = !context;
- if (context) {
+ if (!cq->is_kernel) {
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
ret = -EFAULT;
goto err_cq;
@@ -148,8 +149,6 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
npages = ib_umem_page_count(cq->umem);
} else {
- cq->is_kernel = true;
-
/* One extra page for shared ring state */
npages = 1 + (entries * sizeof(struct pvrdma_cqe) +
PAGE_SIZE - 1) / PAGE_SIZE;
@@ -178,7 +177,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
else
pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0);
- atomic_set(&cq->refcnt, 1);
+ refcount_set(&cq->refcnt, 1);
init_completion(&cq->free);
spin_lock_init(&cq->cq_lock);
@@ -202,7 +201,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq;
spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
- if (context) {
+ if (!cq->is_kernel) {
cq->uar = &(to_vucontext(context)->uar);
/* Copy udata back. */
@@ -219,7 +218,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
err_page_dir:
pvrdma_page_dir_cleanup(dev, &cq->pdir);
err_umem:
- if (context)
+ if (!cq->is_kernel)
ib_umem_release(cq->umem);
err_cq:
atomic_dec(&dev->num_cqs);
@@ -230,7 +229,7 @@ err_cq:
static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
{
- if (atomic_dec_and_test(&cq->refcnt))
+ if (refcount_dec_and_test(&cq->refcnt))
complete(&cq->free);
wait_for_completion(&cq->free);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index e92681878c93..d650a9fcde24 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -243,13 +243,13 @@ static int pvrdma_register_device(struct pvrdma_dev *dev)
mutex_init(&dev->port_mutex);
spin_lock_init(&dev->desc_lock);
- dev->cq_tbl = kcalloc(dev->dsr->caps.max_cq, sizeof(void *),
+ dev->cq_tbl = kcalloc(dev->dsr->caps.max_cq, sizeof(struct pvrdma_cq *),
GFP_KERNEL);
if (!dev->cq_tbl)
return ret;
spin_lock_init(&dev->cq_tbl_lock);
- dev->qp_tbl = kcalloc(dev->dsr->caps.max_qp, sizeof(void *),
+ dev->qp_tbl = kcalloc(dev->dsr->caps.max_qp, sizeof(struct pvrdma_qp *),
GFP_KERNEL);
if (!dev->qp_tbl)
goto err_cq_free;
@@ -333,7 +333,7 @@ static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
spin_lock_irqsave(&dev->qp_tbl_lock, flags);
qp = dev->qp_tbl[qpn % dev->dsr->caps.max_qp];
if (qp)
- atomic_inc(&qp->refcnt);
+ refcount_inc(&qp->refcnt);
spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
if (qp && qp->ibqp.event_handler) {
@@ -346,7 +346,7 @@ static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
ibqp->event_handler(&e, ibqp->qp_context);
}
if (qp) {
- if (atomic_dec_and_test(&qp->refcnt))
+ if (refcount_dec_and_test(&qp->refcnt))
complete(&qp->free);
}
}
@@ -359,7 +359,7 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
spin_lock_irqsave(&dev->cq_tbl_lock, flags);
cq = dev->cq_tbl[cqn % dev->dsr->caps.max_cq];
if (cq)
- atomic_inc(&cq->refcnt);
+ refcount_inc(&cq->refcnt);
spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
if (cq && cq->ibcq.event_handler) {
@@ -372,7 +372,7 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
ibcq->event_handler(&e, ibcq->cq_context);
}
if (cq) {
- if (atomic_dec_and_test(&cq->refcnt))
+ if (refcount_dec_and_test(&cq->refcnt))
complete(&cq->free);
}
}
@@ -531,13 +531,13 @@ static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
spin_lock_irqsave(&dev->cq_tbl_lock, flags);
cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq];
if (cq)
- atomic_inc(&cq->refcnt);
+ refcount_inc(&cq->refcnt);
spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
if (cq && cq->ibcq.comp_handler)
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
if (cq) {
- if (atomic_dec_and_test(&cq->refcnt))
+ if (refcount_dec_and_test(&cq->refcnt))
complete(&cq->free);
}
pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
@@ -882,8 +882,8 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "device version %d, driver version %d\n",
dev->dsr_version, PVRDMA_VERSION);
- dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr),
- &dev->dsrbase, GFP_KERNEL);
+ dev->dsr = dma_zalloc_coherent(&pdev->dev, sizeof(*dev->dsr),
+ &dev->dsrbase, GFP_KERNEL);
if (!dev->dsr) {
dev_err(&pdev->dev, "failed to allocate shared region\n");
ret = -ENOMEM;
@@ -891,7 +891,6 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
}
/* Setup the shared region */
- memset(dev->dsr, 0, sizeof(*dev->dsr));
dev->dsr->driver_version = PVRDMA_VERSION;
dev->dsr->gos_info.gos_bits = sizeof(void *) == 4 ?
PVRDMA_GOS_BITS_32 :
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
index 8519f3212e52..fa96fa4fb829 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
@@ -119,10 +119,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
- int nchunks;
int ret;
- int entry;
- struct scatterlist *sg;
if (length == 0 || length > dev->dsr->caps.max_mr_size) {
dev_warn(&dev->pdev->dev, "invalid mem region length\n");
@@ -137,13 +134,9 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_CAST(umem);
}
- nchunks = 0;
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry)
- nchunks += sg_dma_len(sg) >> PAGE_SHIFT;
-
- if (nchunks < 0 || nchunks > PVRDMA_PAGE_DIR_MAX_PAGES) {
+ if (umem->npages < 0 || umem->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
dev_warn(&dev->pdev->dev, "overflow %d pages in mem region\n",
- nchunks);
+ umem->npages);
ret = -EINVAL;
goto err_umem;
}
@@ -158,7 +151,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->mmr.size = length;
mr->umem = umem;
- ret = pvrdma_page_dir_init(dev, &mr->pdir, nchunks, false);
+ ret = pvrdma_page_dir_init(dev, &mr->pdir, umem->npages, false);
if (ret) {
dev_warn(&dev->pdev->dev,
"could not allocate page directory\n");
@@ -175,7 +168,7 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
cmd->length = length;
cmd->pd_handle = to_vpd(pd)->pd_handle;
cmd->access_flags = access_flags;
- cmd->nchunks = nchunks;
+ cmd->nchunks = umem->npages;
cmd->pdir_dma = mr->pdir.dir_dma;
ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index 4059308e1454..7bf518bdbf21 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -245,12 +245,13 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
mutex_init(&qp->mutex);
- atomic_set(&qp->refcnt, 1);
+ refcount_set(&qp->refcnt, 1);
init_completion(&qp->free);
qp->state = IB_QPS_RESET;
+ qp->is_kernel = !(pd->uobject && udata);
- if (pd->uobject && udata) {
+ if (!qp->is_kernel) {
dev_dbg(&dev->pdev->dev,
"create queuepair from user space\n");
@@ -291,8 +292,6 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
qp->npages_recv = 0;
qp->npages = qp->npages_send + qp->npages_recv;
} else {
- qp->is_kernel = true;
-
ret = pvrdma_set_sq_size(to_vdev(pd->device),
&init_attr->cap, qp);
if (ret)
@@ -394,7 +393,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
err_pdir:
pvrdma_page_dir_cleanup(dev, &qp->pdir);
err_umem:
- if (pd->uobject && udata) {
+ if (!qp->is_kernel) {
if (qp->rumem)
ib_umem_release(qp->rumem);
if (qp->sumem)
@@ -428,7 +427,7 @@ static void pvrdma_free_qp(struct pvrdma_qp *qp)
pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
- if (atomic_dec_and_test(&qp->refcnt))
+ if (refcount_dec_and_test(&qp->refcnt))
complete(&qp->free);
wait_for_completion(&qp->free);
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index 97d71e49c092..fb52b669bfce 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -56,7 +56,7 @@
* rvt_cq_enter - add a new entry to the completion queue
* @cq: completion queue
* @entry: work completion entry to add
- * @sig: true if @entry is solicited
+ * @solicited: true if @entry is solicited
*
* This may be called with qp->s_lock held.
*/
@@ -101,8 +101,7 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
wc->uqueue[head].opcode = entry->opcode;
wc->uqueue[head].vendor_err = entry->vendor_err;
wc->uqueue[head].byte_len = entry->byte_len;
- wc->uqueue[head].ex.imm_data =
- (__u32 __force)entry->ex.imm_data;
+ wc->uqueue[head].ex.imm_data = entry->ex.imm_data;
wc->uqueue[head].qp_num = entry->qp->qp_num;
wc->uqueue[head].src_qp = entry->src_qp;
wc->uqueue[head].wc_flags = entry->wc_flags;
@@ -198,7 +197,7 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
return ERR_PTR(-EINVAL);
/* Allocate the completion queue structure. */
- cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, rdi->dparms.node);
if (!cq)
return ERR_PTR(-ENOMEM);
@@ -214,7 +213,9 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
else
sz += sizeof(struct ib_wc) * (entries + 1);
- wc = vmalloc_user(sz);
+ wc = udata ?
+ vmalloc_user(sz) :
+ vzalloc_node(sz, rdi->dparms.node);
if (!wc) {
ret = ERR_PTR(-ENOMEM);
goto bail_cq;
@@ -369,7 +370,9 @@ int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
else
sz += sizeof(struct ib_wc) * (cqe + 1);
- wc = vmalloc_user(sz);
+ wc = udata ?
+ vmalloc_user(sz) :
+ vzalloc_node(sz, rdi->dparms.node);
if (!wc)
return -ENOMEM;
diff --git a/drivers/infiniband/sw/rdmavt/mcast.c b/drivers/infiniband/sw/rdmavt/mcast.c
index b3a38c5e4cad..dd11c6fcd060 100644
--- a/drivers/infiniband/sw/rdmavt/mcast.c
+++ b/drivers/infiniband/sw/rdmavt/mcast.c
@@ -272,7 +272,7 @@ bail:
/**
* rvt_attach_mcast - attach a qp to a multicast group
* @ibqp: Infiniband qp
- * @igd: multicast guid
+ * @gid: multicast guid
* @lid: multicast lid
*
* Return: 0 on success
@@ -335,7 +335,7 @@ bail_mcast:
/**
* rvt_detach_mcast - remove a qp from a multicast group
* @ibqp: Infiniband qp
- * @igd: multicast guid
+ * @gid: multicast guid
* @lid: multicast lid
*
* Return: 0 on success
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 42713511b53b..1b2e5362a3ff 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -768,7 +768,7 @@ bail:
/**
* rvt_map_phys_fmr - set up a fast memory region
- * @ibmfr: the fast memory region to set up
+ * @ibfmr: the fast memory region to set up
* @page_list: the list of pages to associate with the fast memory region
* @list_len: the number of pages to associate with the fast memory region
* @iova: the virtual address of the start of the fast memory region
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 9177df60742a..c82e6bb3d77c 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -269,7 +269,7 @@ no_qp_table:
/**
* free_all_qps - check for QPs still in use
- * @qpt: the QP table to empty
+ * @rdi: rvt device info structure
*
* There should not be any QPs still in use.
* Free memory for table.
@@ -335,9 +335,9 @@ static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
/**
* alloc_qpn - Allocate the next available qpn or zero/one for QP type
* IB_QPT_SMI/IB_QPT_GSI
- *@rdi: rvt device info structure
- *@qpt: queue pair number table pointer
- *@port_num: IB port number, 1 based, comes from core
+ * @rdi: rvt device info structure
+ * @qpt: queue pair number table pointer
+ * @port_num: IB port number, 1 based, comes from core
*
* Return: The queue pair number
*/
@@ -1650,9 +1650,9 @@ static inline int rvt_qp_valid_operation(
/**
* rvt_qp_is_avail - determine queue capacity
- * @qp - the qp
- * @rdi - the rdmavt device
- * @reserved_op - is reserved operation
+ * @qp: the qp
+ * @rdi: the rdmavt device
+ * @reserved_op: is reserved operation
*
* This assumes the s_hlock is held but the s_last
* qp variable is uncontrolled.
@@ -1684,7 +1684,6 @@ static inline int rvt_qp_is_avail(
/* non-reserved operations */
if (likely(qp->s_avail))
return 0;
- smp_read_barrier_depends(); /* see rc.c */
slast = READ_ONCE(qp->s_last);
if (qp->s_head >= slast)
avail = qp->s_size - (qp->s_head - slast);
@@ -2075,6 +2074,7 @@ void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth)
lockdep_assert_held(&qp->s_lock);
qp->s_flags |= RVT_S_WAIT_RNR;
to = rvt_aeth_to_usec(aeth);
+ trace_rvt_rnrnak_add(qp, to);
hrtimer_start(&qp->s_rnr_timer,
ns_to_ktime(1000 * to), HRTIMER_MODE_REL);
}
@@ -2104,17 +2104,14 @@ EXPORT_SYMBOL(rvt_stop_rc_timers);
* stop an rnr timer and return if the timer
* had been pending.
*/
-static int rvt_stop_rnr_timer(struct rvt_qp *qp)
+static void rvt_stop_rnr_timer(struct rvt_qp *qp)
{
- int rval = 0;
-
lockdep_assert_held(&qp->s_lock);
/* Remove QP from rnr timer */
if (qp->s_flags & RVT_S_WAIT_RNR) {
qp->s_flags &= ~RVT_S_WAIT_RNR;
- rval = hrtimer_try_to_cancel(&qp->s_rnr_timer);
+ trace_rvt_rnrnak_stop(qp, 0);
}
- return rval;
}
/**
@@ -2167,6 +2164,7 @@ enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t)
spin_lock_irqsave(&qp->s_lock, flags);
rvt_stop_rnr_timer(qp);
+ trace_rvt_rnrnak_timeout(qp, 0);
rdi->driver_f.schedule_send(qp);
spin_unlock_irqrestore(&qp->s_lock, flags);
return HRTIMER_NORESTART;
@@ -2175,8 +2173,8 @@ EXPORT_SYMBOL(rvt_rc_rnr_retry);
/**
* rvt_qp_iter_init - initial for QP iteration
- * @rdi - rvt devinfo
- * @v - u64 value
+ * @rdi: rvt devinfo
+ * @v: u64 value
*
* This returns an iterator suitable for iterating QPs
* in the system.
diff --git a/drivers/infiniband/sw/rdmavt/srq.c b/drivers/infiniband/sw/rdmavt/srq.c
index f7c48e9023de..3707952b4364 100644
--- a/drivers/infiniband/sw/rdmavt/srq.c
+++ b/drivers/infiniband/sw/rdmavt/srq.c
@@ -90,7 +90,7 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
srq_init_attr->attr.max_wr > dev->dparms.props.max_srq_wr)
return ERR_PTR(-EINVAL);
- srq = kmalloc(sizeof(*srq), GFP_KERNEL);
+ srq = kzalloc_node(sizeof(*srq), GFP_KERNEL, dev->dparms.node);
if (!srq)
return ERR_PTR(-ENOMEM);
@@ -101,7 +101,10 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
srq->rq.max_sge = srq_init_attr->attr.max_sge;
sz = sizeof(struct ib_sge) * srq->rq.max_sge +
sizeof(struct rvt_rwqe);
- srq->rq.wq = vmalloc_user(sizeof(struct rvt_rwq) + srq->rq.size * sz);
+ srq->rq.wq = udata ?
+ vmalloc_user(sizeof(struct rvt_rwq) + srq->rq.size * sz) :
+ vzalloc_node(sizeof(struct rvt_rwq) + srq->rq.size * sz,
+ dev->dparms.node);
if (!srq->rq.wq) {
ret = ERR_PTR(-ENOMEM);
goto bail_srq;
@@ -129,16 +132,12 @@ struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
ret = ERR_PTR(err);
goto bail_ip;
}
- } else {
- srq->ip = NULL;
}
/*
* ib_create_srq() will initialize srq->ibsrq.
*/
spin_lock_init(&srq->rq.lock);
- srq->rq.wq->head = 0;
- srq->rq.wq->tail = 0;
srq->limit = srq_init_attr->attr.srq_limit;
spin_lock(&dev->n_srqs_lock);
@@ -200,7 +199,10 @@ int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
sz = sizeof(struct rvt_rwqe) +
srq->rq.max_sge * sizeof(struct ib_sge);
size = attr->max_wr + 1;
- wq = vmalloc_user(sizeof(struct rvt_rwq) + size * sz);
+ wq = udata ?
+ vmalloc_user(sizeof(struct rvt_rwq) + size * sz) :
+ vzalloc_node(sizeof(struct rvt_rwq) + size * sz,
+ dev->dparms.node);
if (!wq)
return -ENOMEM;
diff --git a/drivers/infiniband/sw/rdmavt/trace.h b/drivers/infiniband/sw/rdmavt/trace.h
index bb4b1e710f22..36ddbd291ee0 100644
--- a/drivers/infiniband/sw/rdmavt/trace.h
+++ b/drivers/infiniband/sw/rdmavt/trace.h
@@ -45,8 +45,8 @@
*
*/
-#define RDI_DEV_ENTRY(rdi) __string(dev, rdi->driver_f.get_card_name(rdi))
-#define RDI_DEV_ASSIGN(rdi) __assign_str(dev, rdi->driver_f.get_card_name(rdi))
+#define RDI_DEV_ENTRY(rdi) __string(dev, rvt_get_ibdev_name(rdi))
+#define RDI_DEV_ASSIGN(rdi) __assign_str(dev, rvt_get_ibdev_name(rdi))
#include "trace_rvt.h"
#include "trace_qp.h"
diff --git a/drivers/infiniband/sw/rdmavt/trace_qp.h b/drivers/infiniband/sw/rdmavt/trace_qp.h
index 4c77a3119bda..efc9d814b032 100644
--- a/drivers/infiniband/sw/rdmavt/trace_qp.h
+++ b/drivers/infiniband/sw/rdmavt/trace_qp.h
@@ -85,6 +85,48 @@ DEFINE_EVENT(rvt_qphash_template, rvt_qpremove,
TP_PROTO(struct rvt_qp *qp, u32 bucket),
TP_ARGS(qp, bucket));
+DECLARE_EVENT_CLASS(
+ rvt_rnrnak_template,
+ TP_PROTO(struct rvt_qp *qp, u32 to),
+ TP_ARGS(qp, to),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
+ __field(u32, qpn)
+ __field(void *, hrtimer)
+ __field(u32, s_flags)
+ __field(u32, to)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device))
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->hrtimer = &qp->s_rnr_timer;
+ __entry->s_flags = qp->s_flags;
+ __entry->to = to;
+ ),
+ TP_printk(
+ "[%s] qpn 0x%x hrtimer 0x%p s_flags 0x%x timeout %u us",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->hrtimer,
+ __entry->s_flags,
+ __entry->to
+ )
+);
+
+DEFINE_EVENT(
+ rvt_rnrnak_template, rvt_rnrnak_add,
+ TP_PROTO(struct rvt_qp *qp, u32 to),
+ TP_ARGS(qp, to));
+
+DEFINE_EVENT(
+ rvt_rnrnak_template, rvt_rnrnak_timeout,
+ TP_PROTO(struct rvt_qp *qp, u32 to),
+ TP_ARGS(qp, to));
+
+DEFINE_EVENT(
+ rvt_rnrnak_template, rvt_rnrnak_stop,
+ TP_PROTO(struct rvt_qp *qp, u32 to),
+ TP_ARGS(qp, to));
#endif /* __RVT_TRACE_QP_H */
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 64bdd442078a..a4553b2b3696 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -224,7 +224,8 @@ static int rvt_modify_port(struct ib_device *ibdev, u8 port_num,
* rvt_query_pkey - Return a pkey from the table at a given index
* @ibdev: Verbs IB dev
* @port_num: Port number, 1 based from ib core
- * @intex: Index into pkey table
+ * @index: Index into pkey table
+ * @pkey: returned pkey from the port pkey table
*
* Return: 0 on failure pkey otherwise
*/
@@ -255,7 +256,7 @@ static int rvt_query_pkey(struct ib_device *ibdev, u8 port_num, u16 index,
* rvt_query_gid - Return a gid from the table
* @ibdev: Verbs IB dev
* @port_num: Port number, 1 based from ib core
- * @index: = Index in table
+ * @guid_index: Index in table
* @gid: Gid to return
*
* Return: 0 on success
@@ -297,8 +298,8 @@ static inline struct rvt_ucontext *to_iucontext(struct ib_ucontext
/**
* rvt_alloc_ucontext - Allocate a user context
- * @ibdev: Vers IB dev
- * @data: User data allocated
+ * @ibdev: Verbs IB dev
+ * @udata: User data allocated
*/
static struct ib_ucontext *rvt_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata)
@@ -413,7 +414,6 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
* required for rdmavt to function.
*/
if ((!rdi->driver_f.port_callback) ||
- (!rdi->driver_f.get_card_name) ||
(!rdi->driver_f.get_pci_dev))
return -EINVAL;
break;
diff --git a/drivers/infiniband/sw/rdmavt/vt.h b/drivers/infiniband/sw/rdmavt/vt.h
index f363505312be..8823b2e7aac6 100644
--- a/drivers/infiniband/sw/rdmavt/vt.h
+++ b/drivers/infiniband/sw/rdmavt/vt.h
@@ -63,19 +63,19 @@
#define rvt_pr_info(rdi, fmt, ...) \
__rvt_pr_info(rdi->driver_f.get_pci_dev(rdi), \
- rdi->driver_f.get_card_name(rdi), \
+ rvt_get_ibdev_name(rdi), \
fmt, \
##__VA_ARGS__)
#define rvt_pr_warn(rdi, fmt, ...) \
__rvt_pr_warn(rdi->driver_f.get_pci_dev(rdi), \
- rdi->driver_f.get_card_name(rdi), \
+ rvt_get_ibdev_name(rdi), \
fmt, \
##__VA_ARGS__)
#define rvt_pr_err(rdi, fmt, ...) \
__rvt_pr_err(rdi->driver_f.get_pci_dev(rdi), \
- rdi->driver_f.get_card_name(rdi), \
+ rvt_get_ibdev_name(rdi), \
fmt, \
##__VA_ARGS__)
diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
index 320bffc980d8..bad4a576d7cf 100644
--- a/drivers/infiniband/sw/rxe/Kconfig
+++ b/drivers/infiniband/sw/rxe/Kconfig
@@ -1,8 +1,8 @@
config RDMA_RXE
tristate "Software RDMA over Ethernet (RoCE) driver"
depends on INET && PCI && INFINIBAND
- depends on NET_UDP_TUNNEL
- depends on CRYPTO_CRC32
+ select NET_UDP_TUNNEL
+ select CRYPTO_CRC32
select DMA_VIRT_OPS
---help---
This driver implements the InfiniBand RDMA transport over
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 8c3d30b3092d..b7debb6f2eac 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -77,12 +77,6 @@ void rxe_release(struct kref *kref)
ib_dealloc_device(&rxe->ib_dev);
}
-void rxe_dev_put(struct rxe_dev *rxe)
-{
- kref_put(&rxe->ref_cnt, rxe_release);
-}
-EXPORT_SYMBOL_GPL(rxe_dev_put);
-
/* initialize rxe device parameters */
static int rxe_init_device_param(struct rxe_dev *rxe)
{
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index 6447d736d5a4..7d232611303f 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -57,6 +57,7 @@
#include "rxe_hdr.h"
#include "rxe_param.h"
#include "rxe_verbs.h"
+#include "rxe_loc.h"
#define RXE_UVERBS_ABI_VERSION (1)
@@ -95,7 +96,10 @@ void rxe_remove_all(void);
int rxe_rcv(struct sk_buff *skb);
-void rxe_dev_put(struct rxe_dev *rxe);
+static inline void rxe_dev_put(struct rxe_dev *rxe)
+{
+ kref_put(&rxe->ref_cnt, rxe_release);
+}
struct rxe_dev *net_to_rxe(struct net_device *ndev);
struct rxe_dev *get_rxe_by_name(const char *name);
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index d7472a442a2c..96c3a6c5c4b5 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -237,7 +237,6 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
void rxe_release(struct kref *kref);
-void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify);
int rxe_completer(void *arg);
int rxe_requester(void *arg);
int rxe_responder(void *arg);
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 59dee10bebcb..159246b03867 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -82,7 +82,7 @@ struct rxe_dev *get_rxe_by_name(const char *name)
}
-struct rxe_recv_sockets recv_sockets;
+static struct rxe_recv_sockets recv_sockets;
struct device *rxe_dma_device(struct rxe_dev *rxe)
{
@@ -452,31 +452,26 @@ static void rxe_skb_tx_dtor(struct sk_buff *skb)
int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb)
{
- struct sk_buff *nskb;
struct rxe_av *av;
int err;
av = rxe_get_av(pkt);
- nskb = skb_clone(skb, GFP_ATOMIC);
- if (!nskb)
- return -ENOMEM;
-
- nskb->destructor = rxe_skb_tx_dtor;
- nskb->sk = pkt->qp->sk->sk;
+ skb->destructor = rxe_skb_tx_dtor;
+ skb->sk = pkt->qp->sk->sk;
rxe_add_ref(pkt->qp);
atomic_inc(&pkt->qp->skb_out);
if (av->network_type == RDMA_NETWORK_IPV4) {
- err = ip_local_out(dev_net(skb_dst(skb)->dev), nskb->sk, nskb);
+ err = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
} else if (av->network_type == RDMA_NETWORK_IPV6) {
- err = ip6_local_out(dev_net(skb_dst(skb)->dev), nskb->sk, nskb);
+ err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
} else {
pr_err("Unknown layer 3 protocol: %d\n", av->network_type);
atomic_dec(&pkt->qp->skb_out);
rxe_drop_ref(pkt->qp);
- kfree_skb(nskb);
+ kfree_skb(skb);
return -EINVAL;
}
@@ -485,7 +480,6 @@ int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb)
return -EAGAIN;
}
- kfree_skb(skb);
return 0;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.h b/drivers/infiniband/sw/rxe/rxe_net.h
index 1c06b3bfe1b6..728d8c71b36a 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.h
+++ b/drivers/infiniband/sw/rxe/rxe_net.h
@@ -43,7 +43,6 @@ struct rxe_recv_sockets {
struct socket *sk6;
};
-extern struct rxe_recv_sockets recv_sockets;
extern struct notifier_block rxe_net_notifier;
void rxe_release_udp_tunnel(struct socket *sk);
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 4469592b839d..137d6c0c49d4 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -824,9 +824,9 @@ void rxe_qp_destroy(struct rxe_qp *qp)
}
/* called when the last reference to the qp is dropped */
-void rxe_qp_cleanup(struct rxe_pool_entry *arg)
+static void rxe_qp_do_cleanup(struct work_struct *work)
{
- struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
+ struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
rxe_drop_all_mcast_groups(qp);
@@ -859,3 +859,11 @@ void rxe_qp_cleanup(struct rxe_pool_entry *arg)
kernel_sock_shutdown(qp->sk, SHUT_RDWR);
sock_release(qp->sk);
}
+
+/* called when the last reference to the qp is dropped */
+void rxe_qp_cleanup(struct rxe_pool_entry *arg)
+{
+ struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
+
+ execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
+}
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index fb8c83e055e1..4c3f899241d4 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -336,7 +336,6 @@ static int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
{
union ib_gid dgid;
union ib_gid *pdgid;
- u16 index;
if (skb->protocol == htons(ETH_P_IP)) {
ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
@@ -348,7 +347,7 @@ static int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
return ib_find_cached_gid_by_port(&rxe->ib_dev, pdgid,
IB_GID_TYPE_ROCE_UDP_ENCAP,
- 1, rxe->ndev, &index);
+ 1, rxe->ndev, NULL);
}
/* rxe_rcv is called from the interface driver */
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 26a7f923045b..7bdaf71b8221 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -594,15 +594,8 @@ int rxe_requester(void *arg)
rxe_add_ref(qp);
next_wqe:
- if (unlikely(!qp->valid)) {
- rxe_drain_req_pkts(qp, true);
+ if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
goto exit;
- }
-
- if (unlikely(qp->req.state == QP_STATE_ERROR)) {
- rxe_drain_req_pkts(qp, true);
- goto exit;
- }
if (unlikely(qp->req.state == QP_STATE_RESET)) {
qp->req.wqe_index = consumer_index(qp->sq.queue);
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 4240866a5331..d37bb9b97569 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -863,8 +863,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
if (pkt->mask & RXE_IMMDT_MASK) {
uwc->wc_flags |= IB_WC_WITH_IMM;
- uwc->ex.imm_data =
- (__u32 __force)immdt_imm(pkt);
+ uwc->ex.imm_data = immdt_imm(pkt);
}
if (pkt->mask & RXE_IETH_MASK) {
@@ -1210,7 +1209,7 @@ static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
}
}
-void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
+static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
{
struct sk_buff *skb;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index d03002b9d84d..7210a784abb4 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -814,6 +814,8 @@ static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
(queue_count(qp->sq.queue) > 1);
rxe_run_task(&qp->req.task, must_sched);
+ if (unlikely(qp->req.state == QP_STATE_ERROR))
+ rxe_run_task(&qp->comp.task, 1);
return err;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 0c2dbe45c729..1019f5e7dbdd 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -35,6 +35,7 @@
#define RXE_VERBS_H
#include <linux/interrupt.h>
+#include <linux/workqueue.h>
#include <rdma/rdma_user_rxe.h>
#include "rxe_pool.h"
#include "rxe_task.h"
@@ -281,6 +282,8 @@ struct rxe_qp {
struct timer_list rnr_nak_timer;
spinlock_t state_lock; /* guard requester and completer */
+
+ struct execute_work cleanup_work;
};
enum rxe_mem_state {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 71ea9e26666c..962fbcb57dc7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -766,12 +766,14 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
skb_orphan(skb);
skb_dst_drop(skb);
- if (netif_queue_stopped(dev))
- if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
- IB_CQ_REPORT_MISSED_EVENTS)) {
+ if (netif_queue_stopped(dev)) {
+ rc = ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
+ IB_CQ_REPORT_MISSED_EVENTS);
+ if (unlikely(rc < 0))
ipoib_warn(priv, "IPoIB/CM:request notify on send CQ failed\n");
+ else if (rc)
napi_schedule(&priv->send_napi);
- }
+ }
rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req);
if (unlikely(rc)) {
@@ -876,7 +878,7 @@ int ipoib_cm_dev_open(struct net_device *dev)
priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev);
if (IS_ERR(priv->cm.id)) {
- printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
+ pr_warn("%s: failed to create CM ID\n", priv->ca->name);
ret = PTR_ERR(priv->cm.id);
goto err_cm;
}
@@ -884,8 +886,8 @@ int ipoib_cm_dev_open(struct net_device *dev)
ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num),
0);
if (ret) {
- printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name,
- IPOIB_CM_IETF_ID | priv->qp->qp_num);
+ pr_warn("%s: failed to listen on ID 0x%llx\n", priv->ca->name,
+ IPOIB_CM_IETF_ID | priv->qp->qp_num);
goto err_listen;
}
@@ -1562,7 +1564,7 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
if (IS_ERR(priv->cm.srq)) {
if (PTR_ERR(priv->cm.srq) != -ENOSYS)
- printk(KERN_WARNING "%s: failed to allocate SRQ, error %ld\n",
+ pr_warn("%s: failed to allocate SRQ, error %ld\n",
priv->ca->name, PTR_ERR(priv->cm.srq));
priv->cm.srq = NULL;
return;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index e6151a29c412..10384ea50bed 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -644,7 +644,7 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
if (netif_queue_stopped(dev))
if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
- IB_CQ_REPORT_MISSED_EVENTS))
+ IB_CQ_REPORT_MISSED_EVENTS) < 0)
ipoib_warn(priv, "request notify on send CQ failed\n");
rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
@@ -1085,8 +1085,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
netif_addr_unlock_bh(priv->dev);
- err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB,
- priv->dev, &port, &index);
+ err = ib_find_gid(priv->ca, &search_gid, priv->dev, &port, &index);
netif_addr_lock_bh(priv->dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 8880351df179..5930c7d9a8fb 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -768,13 +768,30 @@ static void path_rec_completion(int status,
if (!status) {
struct rdma_ah_attr av;
- if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av))
+ if (!ib_init_ah_attr_from_path(priv->ca, priv->port,
+ pathrec, &av))
ah = ipoib_create_ah(dev, priv->pd, &av);
}
spin_lock_irqsave(&priv->lock, flags);
if (!IS_ERR_OR_NULL(ah)) {
+ /*
+ * pathrec.dgid is used as the database key from the LLADDR,
+ * it must remain unchanged even if the SA returns a different
+ * GID to use in the AH.
+ */
+ if (memcmp(pathrec->dgid.raw, path->pathrec.dgid.raw,
+ sizeof(union ib_gid))) {
+ ipoib_dbg(
+ priv,
+ "%s got PathRec for gid %pI6 while asked for %pI6\n",
+ dev->name, pathrec->dgid.raw,
+ path->pathrec.dgid.raw);
+ memcpy(pathrec->dgid.raw, path->pathrec.dgid.raw,
+ sizeof(union ib_gid));
+ }
+
path->pathrec = *pathrec;
old_ah = path->ah;
@@ -840,6 +857,23 @@ static void path_rec_completion(int status,
}
}
+static void init_path_rec(struct ipoib_dev_priv *priv, struct ipoib_path *path,
+ void *gid)
+{
+ path->dev = priv->dev;
+
+ if (rdma_cap_opa_ah(priv->ca, priv->port))
+ path->pathrec.rec_type = SA_PATH_REC_TYPE_OPA;
+ else
+ path->pathrec.rec_type = SA_PATH_REC_TYPE_IB;
+
+ memcpy(path->pathrec.dgid.raw, gid, sizeof(union ib_gid));
+ path->pathrec.sgid = priv->local_gid;
+ path->pathrec.pkey = cpu_to_be16(priv->pkey);
+ path->pathrec.numb_path = 1;
+ path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
+}
+
static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
@@ -852,21 +886,11 @@ static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
if (!path)
return NULL;
- path->dev = dev;
-
skb_queue_head_init(&path->queue);
INIT_LIST_HEAD(&path->neigh_list);
- if (rdma_cap_opa_ah(priv->ca, priv->port))
- path->pathrec.rec_type = SA_PATH_REC_TYPE_OPA;
- else
- path->pathrec.rec_type = SA_PATH_REC_TYPE_IB;
- memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid));
- path->pathrec.sgid = priv->local_gid;
- path->pathrec.pkey = cpu_to_be16(priv->pkey);
- path->pathrec.numb_path = 1;
- path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
+ init_path_rec(priv, path, gid);
return path;
}
@@ -1005,6 +1029,10 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
spin_lock_irqsave(&priv->lock, flags);
+ /* no broadcast means that all paths are (going to be) not valid */
+ if (!priv->broadcast)
+ goto drop_and_unlock;
+
path = __path_find(dev, phdr->hwaddr + 4);
if (!path || !path->valid) {
int new_path = 0;
@@ -1014,6 +1042,10 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
new_path = 1;
}
if (path) {
+ if (!new_path)
+ /* make sure there is no changes in the existing path record */
+ init_path_rec(priv, path, phdr->hwaddr + 4);
+
if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
push_pseudo_header(skb, phdr->hwaddr);
__skb_queue_tail(&path->queue, skb);
@@ -1030,8 +1062,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
} else
__path_add(dev, path);
} else {
- ++dev->stats.tx_dropped;
- dev_kfree_skb_any(skb);
+ goto drop_and_unlock;
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1051,11 +1082,16 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
push_pseudo_header(skb, phdr->hwaddr);
__skb_queue_tail(&path->queue, skb);
} else {
- ++dev->stats.tx_dropped;
- dev_kfree_skb_any(skb);
+ goto drop_and_unlock;
}
spin_unlock_irqrestore(&priv->lock, flags);
+ return;
+
+drop_and_unlock:
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -1674,8 +1710,8 @@ static int ipoib_dev_init_default(struct net_device *dev)
priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
if (!priv->tx_ring) {
- printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
- priv->ca->name, ipoib_sendq_size);
+ pr_warn("%s: failed to allocate TX ring (%d entries)\n",
+ priv->ca->name, ipoib_sendq_size);
goto out_rx_ring_cleanup;
}
@@ -2207,16 +2243,17 @@ static struct net_device *ipoib_add_port(const char *format,
int result = -ENOMEM;
priv = ipoib_intf_alloc(hca, port, format);
- if (!priv)
+ if (!priv) {
+ pr_warn("%s, %d: ipoib_intf_alloc failed\n", hca->name, port);
goto alloc_mem_failed;
+ }
SET_NETDEV_DEV(priv->dev, hca->dev.parent);
priv->dev->dev_id = port - 1;
result = ib_query_port(hca, port, &attr);
if (result) {
- printk(KERN_WARNING "%s: ib_query_port %d failed\n",
- hca->name, port);
+ pr_warn("%s: ib_query_port %d failed\n", hca->name, port);
goto device_init_failed;
}
@@ -2231,8 +2268,8 @@ static struct net_device *ipoib_add_port(const char *format,
result = ib_query_pkey(hca, port, 0, &priv->pkey);
if (result) {
- printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
- hca->name, port, result);
+ pr_warn("%s: ib_query_pkey port %d failed (ret = %d)\n",
+ hca->name, port, result);
goto device_init_failed;
}
@@ -2249,8 +2286,8 @@ static struct net_device *ipoib_add_port(const char *format,
result = ib_query_gid(hca, port, 0, &priv->local_gid, NULL);
if (result) {
- printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
- hca->name, port, result);
+ pr_warn("%s: ib_query_gid port %d failed (ret = %d)\n",
+ hca->name, port, result);
goto device_init_failed;
}
@@ -2260,8 +2297,8 @@ static struct net_device *ipoib_add_port(const char *format,
result = ipoib_dev_init(priv->dev, hca, port);
if (result) {
- printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
- hca->name, port, result);
+ pr_warn("%s: failed to initialize port %d (ret = %d)\n",
+ hca->name, port, result);
goto device_init_failed;
}
@@ -2271,8 +2308,8 @@ static struct net_device *ipoib_add_port(const char *format,
result = register_netdev(priv->dev);
if (result) {
- printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
- hca->name, port, result);
+ pr_warn("%s: couldn't register ipoib port %d; error %d\n",
+ hca->name, port, result);
goto register_failed;
}
@@ -2337,8 +2374,7 @@ static void ipoib_add_one(struct ib_device *device)
}
if (!count) {
- pr_err("Failed to init port, removing it\n");
- ipoib_remove_one(device, dev_list);
+ kfree(dev_list);
return;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index a1ed25422b72..984a88096f39 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -178,7 +178,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_rx_completion, NULL,
priv, &cq_attr);
if (IS_ERR(priv->recv_cq)) {
- printk(KERN_WARNING "%s: failed to create receive CQ\n", ca->name);
+ pr_warn("%s: failed to create receive CQ\n", ca->name);
goto out_cm_dev_cleanup;
}
@@ -187,7 +187,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
priv->send_cq = ib_create_cq(priv->ca, ipoib_ib_tx_completion, NULL,
priv, &cq_attr);
if (IS_ERR(priv->send_cq)) {
- printk(KERN_WARNING "%s: failed to create send CQ\n", ca->name);
+ pr_warn("%s: failed to create send CQ\n", ca->name);
goto out_free_recv_cq;
}
@@ -208,7 +208,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
priv->qp = ib_create_qp(priv->pd, &init_attr);
if (IS_ERR(priv->qp)) {
- printk(KERN_WARNING "%s: failed to create QP\n", ca->name);
+ pr_warn("%s: failed to create QP\n", ca->name);
goto out_free_send_cq;
}
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 2a07692007bd..df49c4eb67f7 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -142,8 +142,7 @@ iser_prepare_write_cmd(struct iscsi_task *task,
hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz);
}
- iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
- "VA:%#llX + unsol:%d\n",
+ iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X VA:%#llX + unsol:%d\n",
task->itt, mem_reg->rkey,
(unsigned long long)mem_reg->sge.addr, unsol_sz);
}
@@ -436,7 +435,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
{
struct iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
- struct iser_tx_desc *tx_desc = NULL;
+ struct iser_tx_desc *tx_desc;
struct iser_mem_reg *mem_reg;
unsigned long buf_offset;
unsigned long data_seg_len;
@@ -452,10 +451,8 @@ int iser_send_data_out(struct iscsi_conn *conn,
__func__,(int)itt,(int)data_seg_len,(int)buf_offset);
tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_ATOMIC);
- if (tx_desc == NULL) {
- iser_err("Failed to alloc desc for post dataout\n");
+ if (!tx_desc)
return -ENOMEM;
- }
tx_desc->type = ISCSI_TX_DATAOUT;
tx_desc->cqe.done = iser_dataout_comp;
@@ -475,8 +472,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
tx_desc->num_sge = 2;
if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
- iser_err("Offset:%ld & DSL:%ld in Data-Out "
- "inconsistent with total len:%ld, itt:%d\n",
+ iser_err("Offset:%ld & DSL:%ld in Data-Out inconsistent with total len:%ld, itt:%d\n",
buf_offset, data_seg_len,
iser_task->data[ISER_DIR_OUT].data_len, itt);
err = -EINVAL;
@@ -614,8 +610,8 @@ iser_check_remote_inv(struct iser_conn *iser_conn,
iser_conn, rkey);
if (unlikely(!iser_conn->snd_w_inv)) {
- iser_err("conn %p: unexpected remote invalidation, "
- "terminating connection\n", iser_conn);
+ iser_err("conn %p: unexpected remote invalidation, terminating connection\n",
+ iser_conn);
return -EPROTO;
}
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 1b02283ce20e..fff40b097947 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -2124,6 +2124,9 @@ isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn,
u32 rkey, offset;
int ret;
+ if (cmd->ctx_init_done)
+ goto rdma_ctx_post;
+
if (dir == DMA_FROM_DEVICE) {
addr = cmd->write_va;
rkey = cmd->write_stag;
@@ -2151,11 +2154,15 @@ isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn,
se_cmd->t_data_sg, se_cmd->t_data_nents,
offset, addr, rkey, dir);
}
+
if (ret < 0) {
isert_err("Cmd: %p failed to prepare RDMA res\n", cmd);
return ret;
}
+ cmd->ctx_init_done = true;
+
+rdma_ctx_post:
ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr);
if (ret < 0)
isert_err("Cmd: %p failed to post RDMA res\n", cmd);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index d6fd248320ae..3b296bac4f60 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -126,6 +126,7 @@ struct isert_cmd {
struct rdma_rw_ctx rw;
struct work_struct comp_work;
struct scatterlist sg;
+ bool ctx_init_done;
};
static inline struct isert_cmd *tx_desc_to_cmd(struct iser_tx_desc *desc)
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
index 4b615c1451e7..15711dcc6f58 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
@@ -710,7 +710,7 @@ vema_get_port(struct opa_vnic_ctrl_port *cport, u8 port_num)
/**
* opa_vnic_vema_send_trap -- This function sends a trap to the EM
- * @cport: pointer to vnic control port
+ * @adapter: pointer to vnic adapter
* @data: pointer to trap data filled by calling function
* @lid: issuers lid (encap_slid from vesw_port_info)
*
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 972d4b3c5223..b48843833d69 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -41,6 +41,7 @@
#include <linux/random.h>
#include <linux/jiffies.h>
#include <linux/lockdep.h>
+#include <linux/inet.h>
#include <rdma/ib_cache.h>
#include <linux/atomic.h>
@@ -144,7 +145,9 @@ static void srp_remove_one(struct ib_device *device, void *client_data);
static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
const char *opname);
-static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
+static int srp_ib_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
+static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event);
static struct scsi_transport_template *ib_srp_transport_template;
static struct workqueue_struct *srp_remove_wq;
@@ -265,8 +268,8 @@ static void srp_qp_event(struct ib_event *event, void *context)
ib_event_msg(event->event), event->event);
}
-static int srp_init_qp(struct srp_target_port *target,
- struct ib_qp *qp)
+static int srp_init_ib_qp(struct srp_target_port *target,
+ struct ib_qp *qp)
{
struct ib_qp_attr *attr;
int ret;
@@ -277,7 +280,7 @@ static int srp_init_qp(struct srp_target_port *target,
ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
target->srp_host->port,
- be16_to_cpu(target->pkey),
+ be16_to_cpu(target->ib_cm.pkey),
&attr->pkey_index);
if (ret)
goto out;
@@ -298,32 +301,110 @@ out:
return ret;
}
-static int srp_new_cm_id(struct srp_rdma_ch *ch)
+static int srp_new_ib_cm_id(struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
struct ib_cm_id *new_cm_id;
new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
- srp_cm_handler, ch);
+ srp_ib_cm_handler, ch);
if (IS_ERR(new_cm_id))
return PTR_ERR(new_cm_id);
- if (ch->cm_id)
- ib_destroy_cm_id(ch->cm_id);
- ch->cm_id = new_cm_id;
+ if (ch->ib_cm.cm_id)
+ ib_destroy_cm_id(ch->ib_cm.cm_id);
+ ch->ib_cm.cm_id = new_cm_id;
if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev,
target->srp_host->port))
- ch->path.rec_type = SA_PATH_REC_TYPE_OPA;
+ ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA;
else
- ch->path.rec_type = SA_PATH_REC_TYPE_IB;
- ch->path.sgid = target->sgid;
- ch->path.dgid = target->orig_dgid;
- ch->path.pkey = target->pkey;
- ch->path.service_id = target->service_id;
+ ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB;
+ ch->ib_cm.path.sgid = target->sgid;
+ ch->ib_cm.path.dgid = target->ib_cm.orig_dgid;
+ ch->ib_cm.path.pkey = target->ib_cm.pkey;
+ ch->ib_cm.path.service_id = target->ib_cm.service_id;
return 0;
}
+static const char *inet_ntop(const void *sa, char *dst, unsigned int size)
+{
+ switch (((struct sockaddr *)sa)->sa_family) {
+ case AF_INET:
+ snprintf(dst, size, "%pI4",
+ &((struct sockaddr_in *)sa)->sin_addr);
+ break;
+ case AF_INET6:
+ snprintf(dst, size, "%pI6",
+ &((struct sockaddr_in6 *)sa)->sin6_addr);
+ break;
+ default:
+ snprintf(dst, size, "???");
+ break;
+ }
+ return dst;
+}
+
+static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
+{
+ struct srp_target_port *target = ch->target;
+ struct rdma_cm_id *new_cm_id;
+ char src_addr[64], dst_addr[64];
+ int ret;
+
+ new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch,
+ RDMA_PS_TCP, IB_QPT_RC);
+ if (IS_ERR(new_cm_id)) {
+ ret = PTR_ERR(new_cm_id);
+ new_cm_id = NULL;
+ goto out;
+ }
+
+ init_completion(&ch->done);
+ ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ?
+ (struct sockaddr *)&target->rdma_cm.src : NULL,
+ (struct sockaddr *)&target->rdma_cm.dst,
+ SRP_PATH_REC_TIMEOUT_MS);
+ if (ret) {
+ pr_err("No route available from %s to %s (%d)\n",
+ target->rdma_cm.src_specified ?
+ inet_ntop(&target->rdma_cm.src, src_addr,
+ sizeof(src_addr)) : "(any)",
+ inet_ntop(&target->rdma_cm.dst, dst_addr,
+ sizeof(dst_addr)),
+ ret);
+ goto out;
+ }
+ ret = wait_for_completion_interruptible(&ch->done);
+ if (ret < 0)
+ goto out;
+
+ ret = ch->status;
+ if (ret) {
+ pr_err("Resolving address %s failed (%d)\n",
+ inet_ntop(&target->rdma_cm.dst, dst_addr,
+ sizeof(dst_addr)),
+ ret);
+ goto out;
+ }
+
+ swap(ch->rdma_cm.cm_id, new_cm_id);
+
+out:
+ if (new_cm_id)
+ rdma_destroy_id(new_cm_id);
+
+ return ret;
+}
+
+static int srp_new_cm_id(struct srp_rdma_ch *ch)
+{
+ struct srp_target_port *target = ch->target;
+
+ return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) :
+ srp_new_ib_cm_id(ch);
+}
+
static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
{
struct srp_device *dev = target->srp_host->srp_dev;
@@ -521,16 +602,25 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
init_attr->send_cq = send_cq;
init_attr->recv_cq = recv_cq;
- qp = ib_create_qp(dev->pd, init_attr);
- if (IS_ERR(qp)) {
- ret = PTR_ERR(qp);
+ if (target->using_rdma_cm) {
+ ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr);
+ qp = ch->rdma_cm.cm_id->qp;
+ } else {
+ qp = ib_create_qp(dev->pd, init_attr);
+ if (!IS_ERR(qp)) {
+ ret = srp_init_ib_qp(target, qp);
+ if (ret)
+ ib_destroy_qp(qp);
+ } else {
+ ret = PTR_ERR(qp);
+ }
+ }
+ if (ret) {
+ pr_err("QP creation failed for dev %s: %d\n",
+ dev_name(&dev->dev->dev), ret);
goto err_send_cq;
}
- ret = srp_init_qp(target, qp);
- if (ret)
- goto err_qp;
-
if (dev->use_fast_reg) {
fr_pool = srp_alloc_fr_pool(target);
if (IS_ERR(fr_pool)) {
@@ -574,7 +664,10 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
return 0;
err_qp:
- ib_destroy_qp(qp);
+ if (target->using_rdma_cm)
+ rdma_destroy_qp(ch->rdma_cm.cm_id);
+ else
+ ib_destroy_qp(qp);
err_send_cq:
ib_free_cq(send_cq);
@@ -600,9 +693,16 @@ static void srp_free_ch_ib(struct srp_target_port *target,
if (!ch->target)
return;
- if (ch->cm_id) {
- ib_destroy_cm_id(ch->cm_id);
- ch->cm_id = NULL;
+ if (target->using_rdma_cm) {
+ if (ch->rdma_cm.cm_id) {
+ rdma_destroy_id(ch->rdma_cm.cm_id);
+ ch->rdma_cm.cm_id = NULL;
+ }
+ } else {
+ if (ch->ib_cm.cm_id) {
+ ib_destroy_cm_id(ch->ib_cm.cm_id);
+ ch->ib_cm.cm_id = NULL;
+ }
}
/* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
@@ -658,16 +758,16 @@ static void srp_path_rec_completion(int status,
shost_printk(KERN_ERR, target->scsi_host,
PFX "Got failed path rec status %d\n", status);
else
- ch->path = *pathrec;
+ ch->ib_cm.path = *pathrec;
complete(&ch->done);
}
-static int srp_lookup_path(struct srp_rdma_ch *ch)
+static int srp_ib_lookup_path(struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
int ret = -ENODEV;
- ch->path.numb_path = 1;
+ ch->ib_cm.path.numb_path = 1;
init_completion(&ch->done);
@@ -678,10 +778,10 @@ static int srp_lookup_path(struct srp_rdma_ch *ch)
if (!scsi_host_get(target->scsi_host))
goto out;
- ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
+ ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client,
target->srp_host->srp_dev->dev,
target->srp_host->port,
- &ch->path,
+ &ch->ib_cm.path,
IB_SA_PATH_REC_SERVICE_ID |
IB_SA_PATH_REC_DGID |
IB_SA_PATH_REC_SGID |
@@ -690,8 +790,8 @@ static int srp_lookup_path(struct srp_rdma_ch *ch)
SRP_PATH_REC_TIMEOUT_MS,
GFP_KERNEL,
srp_path_rec_completion,
- ch, &ch->path_query);
- ret = ch->path_query_id;
+ ch, &ch->ib_cm.path_query);
+ ret = ch->ib_cm.path_query_id;
if (ret < 0)
goto put;
@@ -702,7 +802,10 @@ static int srp_lookup_path(struct srp_rdma_ch *ch)
ret = ch->status;
if (ret < 0)
shost_printk(KERN_WARNING, target->scsi_host,
- PFX "Path record query failed\n");
+ PFX "Path record query failed: sgid %pI6, dgid %pI6, pkey %#04x, service_id %#16llx\n",
+ ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw,
+ be16_to_cpu(target->ib_cm.pkey),
+ be64_to_cpu(target->ib_cm.service_id));
put:
scsi_host_put(target->scsi_host);
@@ -711,6 +814,34 @@ out:
return ret;
}
+static int srp_rdma_lookup_path(struct srp_rdma_ch *ch)
+{
+ struct srp_target_port *target = ch->target;
+ int ret;
+
+ init_completion(&ch->done);
+
+ ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS);
+ if (ret)
+ return ret;
+
+ wait_for_completion_interruptible(&ch->done);
+
+ if (ch->status != 0)
+ shost_printk(KERN_WARNING, target->scsi_host,
+ PFX "Path resolution failed\n");
+
+ return ch->status;
+}
+
+static int srp_lookup_path(struct srp_rdma_ch *ch)
+{
+ struct srp_target_port *target = ch->target;
+
+ return target->using_rdma_cm ? srp_rdma_lookup_path(ch) :
+ srp_ib_lookup_path(ch);
+}
+
static u8 srp_get_subnet_timeout(struct srp_host *host)
{
struct ib_port_attr attr;
@@ -732,48 +863,76 @@ static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
{
struct srp_target_port *target = ch->target;
struct {
- struct ib_cm_req_param param;
- struct srp_login_req priv;
+ struct rdma_conn_param rdma_param;
+ struct srp_login_req_rdma rdma_req;
+ struct ib_cm_req_param ib_param;
+ struct srp_login_req ib_req;
} *req = NULL;
+ char *ipi, *tpi;
int status;
- u8 subnet_timeout;
-
- subnet_timeout = srp_get_subnet_timeout(target->srp_host);
req = kzalloc(sizeof *req, GFP_KERNEL);
if (!req)
return -ENOMEM;
- req->param.primary_path = &ch->path;
- req->param.alternate_path = NULL;
- req->param.service_id = target->service_id;
- req->param.qp_num = ch->qp->qp_num;
- req->param.qp_type = ch->qp->qp_type;
- req->param.private_data = &req->priv;
- req->param.private_data_len = sizeof req->priv;
- req->param.flow_control = 1;
-
- get_random_bytes(&req->param.starting_psn, 4);
- req->param.starting_psn &= 0xffffff;
+ req->ib_param.flow_control = 1;
+ req->ib_param.retry_count = target->tl_retry_count;
/*
* Pick some arbitrary defaults here; we could make these
* module parameters if anyone cared about setting them.
*/
- req->param.responder_resources = 4;
- req->param.remote_cm_response_timeout = subnet_timeout + 2;
- req->param.local_cm_response_timeout = subnet_timeout + 2;
- req->param.retry_count = target->tl_retry_count;
- req->param.rnr_retry_count = 7;
- req->param.max_cm_retries = 15;
-
- req->priv.opcode = SRP_LOGIN_REQ;
- req->priv.tag = 0;
- req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
- req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
+ req->ib_param.responder_resources = 4;
+ req->ib_param.rnr_retry_count = 7;
+ req->ib_param.max_cm_retries = 15;
+
+ req->ib_req.opcode = SRP_LOGIN_REQ;
+ req->ib_req.tag = 0;
+ req->ib_req.req_it_iu_len = cpu_to_be32(target->max_iu_len);
+ req->ib_req.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
SRP_BUF_FORMAT_INDIRECT);
- req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
- SRP_MULTICHAN_SINGLE);
+ req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI :
+ SRP_MULTICHAN_SINGLE);
+
+ if (target->using_rdma_cm) {
+ req->rdma_param.flow_control = req->ib_param.flow_control;
+ req->rdma_param.responder_resources =
+ req->ib_param.responder_resources;
+ req->rdma_param.initiator_depth = req->ib_param.initiator_depth;
+ req->rdma_param.retry_count = req->ib_param.retry_count;
+ req->rdma_param.rnr_retry_count = req->ib_param.rnr_retry_count;
+ req->rdma_param.private_data = &req->rdma_req;
+ req->rdma_param.private_data_len = sizeof(req->rdma_req);
+
+ req->rdma_req.opcode = req->ib_req.opcode;
+ req->rdma_req.tag = req->ib_req.tag;
+ req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len;
+ req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt;
+ req->rdma_req.req_flags = req->ib_req.req_flags;
+
+ ipi = req->rdma_req.initiator_port_id;
+ tpi = req->rdma_req.target_port_id;
+ } else {
+ u8 subnet_timeout;
+
+ subnet_timeout = srp_get_subnet_timeout(target->srp_host);
+
+ req->ib_param.primary_path = &ch->ib_cm.path;
+ req->ib_param.alternate_path = NULL;
+ req->ib_param.service_id = target->ib_cm.service_id;
+ get_random_bytes(&req->ib_param.starting_psn, 4);
+ req->ib_param.starting_psn &= 0xffffff;
+ req->ib_param.qp_num = ch->qp->qp_num;
+ req->ib_param.qp_type = ch->qp->qp_type;
+ req->ib_param.local_cm_response_timeout = subnet_timeout + 2;
+ req->ib_param.remote_cm_response_timeout = subnet_timeout + 2;
+ req->ib_param.private_data = &req->ib_req;
+ req->ib_param.private_data_len = sizeof(req->ib_req);
+
+ ipi = req->ib_req.initiator_port_id;
+ tpi = req->ib_req.target_port_id;
+ }
+
/*
* In the published SRP specification (draft rev. 16a), the
* port identifier format is 8 bytes of ID extension followed
@@ -784,19 +943,15 @@ static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
* recognized by the I/O Class they report.
*/
if (target->io_class == SRP_REV10_IB_IO_CLASS) {
- memcpy(req->priv.initiator_port_id,
- &target->sgid.global.interface_id, 8);
- memcpy(req->priv.initiator_port_id + 8,
- &target->initiator_ext, 8);
- memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
- memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
+ memcpy(ipi, &target->sgid.global.interface_id, 8);
+ memcpy(ipi + 8, &target->initiator_ext, 8);
+ memcpy(tpi, &target->ioc_guid, 8);
+ memcpy(tpi + 8, &target->id_ext, 8);
} else {
- memcpy(req->priv.initiator_port_id,
- &target->initiator_ext, 8);
- memcpy(req->priv.initiator_port_id + 8,
- &target->sgid.global.interface_id, 8);
- memcpy(req->priv.target_port_id, &target->id_ext, 8);
- memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
+ memcpy(ipi, &target->initiator_ext, 8);
+ memcpy(ipi + 8, &target->sgid.global.interface_id, 8);
+ memcpy(tpi, &target->id_ext, 8);
+ memcpy(tpi + 8, &target->ioc_guid, 8);
}
/*
@@ -809,12 +964,14 @@ static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
PFX "Topspin/Cisco initiator port ID workaround "
"activated for target GUID %016llx\n",
be64_to_cpu(target->ioc_guid));
- memset(req->priv.initiator_port_id, 0, 8);
- memcpy(req->priv.initiator_port_id + 8,
- &target->srp_host->srp_dev->dev->node_guid, 8);
+ memset(ipi, 0, 8);
+ memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8);
}
- status = ib_send_cm_req(ch->cm_id, &req->param);
+ if (target->using_rdma_cm)
+ status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param);
+ else
+ status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param);
kfree(req);
@@ -841,14 +998,23 @@ static bool srp_queue_remove_work(struct srp_target_port *target)
static void srp_disconnect_target(struct srp_target_port *target)
{
struct srp_rdma_ch *ch;
- int i;
+ int i, ret;
/* XXX should send SRP_I_LOGOUT request */
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
ch->connected = false;
- if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
+ ret = 0;
+ if (target->using_rdma_cm) {
+ if (ch->rdma_cm.cm_id)
+ rdma_disconnect(ch->rdma_cm.cm_id);
+ } else {
+ if (ch->ib_cm.cm_id)
+ ret = ib_send_cm_dreq(ch->ib_cm.cm_id,
+ NULL, 0);
+ }
+ if (ret < 0) {
shost_printk(KERN_DEBUG, target->scsi_host,
PFX "Sending CM DREQ failed\n");
}
@@ -962,6 +1128,7 @@ static void srp_remove_target(struct srp_target_port *target)
scsi_remove_host(target->scsi_host);
srp_stop_rport_timers(target->rport);
srp_disconnect_target(target);
+ kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
srp_free_ch_ib(target, ch);
@@ -2349,7 +2516,7 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
struct srp_target_port *target = ch->target;
struct ib_qp_attr *qp_attr = NULL;
int attr_mask = 0;
- int ret;
+ int ret = 0;
int i;
if (lrsp->opcode == SRP_LOGIN_RSP) {
@@ -2379,40 +2546,42 @@ static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
goto error;
}
- ret = -ENOMEM;
- qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
- if (!qp_attr)
- goto error;
-
- qp_attr->qp_state = IB_QPS_RTR;
- ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
- if (ret)
- goto error_free;
-
- ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
- if (ret)
- goto error_free;
-
for (i = 0; i < target->queue_size; i++) {
struct srp_iu *iu = ch->rx_ring[i];
ret = srp_post_recv(ch, iu);
if (ret)
- goto error_free;
+ goto error;
}
- qp_attr->qp_state = IB_QPS_RTS;
- ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
- if (ret)
- goto error_free;
+ if (!target->using_rdma_cm) {
+ ret = -ENOMEM;
+ qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
+ if (!qp_attr)
+ goto error;
- target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
+ qp_attr->qp_state = IB_QPS_RTR;
+ ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
+ if (ret)
+ goto error_free;
- ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
- if (ret)
- goto error_free;
+ ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
+ if (ret)
+ goto error_free;
- ret = ib_send_cm_rtu(cm_id, NULL, 0);
+ qp_attr->qp_state = IB_QPS_RTS;
+ ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
+ if (ret)
+ goto error_free;
+
+ target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
+
+ ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
+ if (ret)
+ goto error_free;
+
+ ret = ib_send_cm_rtu(cm_id, NULL, 0);
+ }
error_free:
kfree(qp_attr);
@@ -2421,41 +2590,43 @@ error:
ch->status = ret;
}
-static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
- struct ib_cm_event *event,
- struct srp_rdma_ch *ch)
+static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id,
+ struct ib_cm_event *event,
+ struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
struct Scsi_Host *shost = target->scsi_host;
struct ib_class_port_info *cpi;
int opcode;
+ u16 dlid;
switch (event->param.rej_rcvd.reason) {
case IB_CM_REJ_PORT_CM_REDIRECT:
cpi = event->param.rej_rcvd.ari;
- sa_path_set_dlid(&ch->path, ntohs(cpi->redirect_lid));
- ch->path.pkey = cpi->redirect_pkey;
+ dlid = be16_to_cpu(cpi->redirect_lid);
+ sa_path_set_dlid(&ch->ib_cm.path, dlid);
+ ch->ib_cm.path.pkey = cpi->redirect_pkey;
cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
- memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
+ memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16);
- ch->status = sa_path_get_dlid(&ch->path) ?
- SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
+ ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
break;
case IB_CM_REJ_PORT_REDIRECT:
if (srp_target_is_topspin(target)) {
+ union ib_gid *dgid = &ch->ib_cm.path.dgid;
+
/*
* Topspin/Cisco SRP gateways incorrectly send
* reject reason code 25 when they mean 24
* (port redirect).
*/
- memcpy(ch->path.dgid.raw,
- event->param.rej_rcvd.ari, 16);
+ memcpy(dgid->raw, event->param.rej_rcvd.ari, 16);
shost_printk(KERN_DEBUG, shost,
PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
- be64_to_cpu(ch->path.dgid.global.subnet_prefix),
- be64_to_cpu(ch->path.dgid.global.interface_id));
+ be64_to_cpu(dgid->global.subnet_prefix),
+ be64_to_cpu(dgid->global.interface_id));
ch->status = SRP_PORT_REDIRECT;
} else {
@@ -2484,7 +2655,8 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
shost_printk(KERN_WARNING, shost, PFX
"SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
target->sgid.raw,
- target->orig_dgid.raw, reason);
+ target->ib_cm.orig_dgid.raw,
+ reason);
} else
shost_printk(KERN_WARNING, shost,
" REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
@@ -2504,7 +2676,7 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
}
}
-static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
+static int srp_ib_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
{
struct srp_rdma_ch *ch = cm_id->context;
struct srp_target_port *target = ch->target;
@@ -2527,7 +2699,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
comp = 1;
- srp_cm_rej_handler(cm_id, event, ch);
+ srp_ib_cm_rej_handler(cm_id, event, ch);
break;
case IB_CM_DREQ_RECEIVED:
@@ -2565,6 +2737,135 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
return 0;
}
+static void srp_rdma_cm_rej_handler(struct srp_rdma_ch *ch,
+ struct rdma_cm_event *event)
+{
+ struct srp_target_port *target = ch->target;
+ struct Scsi_Host *shost = target->scsi_host;
+ int opcode;
+
+ switch (event->status) {
+ case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
+ shost_printk(KERN_WARNING, shost,
+ " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
+ ch->status = -ECONNRESET;
+ break;
+
+ case IB_CM_REJ_CONSUMER_DEFINED:
+ opcode = *(u8 *) event->param.conn.private_data;
+ if (opcode == SRP_LOGIN_REJ) {
+ struct srp_login_rej *rej =
+ (struct srp_login_rej *)
+ event->param.conn.private_data;
+ u32 reason = be32_to_cpu(rej->reason);
+
+ if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
+ shost_printk(KERN_WARNING, shost,
+ PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
+ else
+ shost_printk(KERN_WARNING, shost,
+ PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
+ } else {
+ shost_printk(KERN_WARNING, shost,
+ " REJ reason: IB_CM_REJ_CONSUMER_DEFINED, opcode 0x%02x\n",
+ opcode);
+ }
+ ch->status = -ECONNRESET;
+ break;
+
+ case IB_CM_REJ_STALE_CONN:
+ shost_printk(KERN_WARNING, shost,
+ " REJ reason: stale connection\n");
+ ch->status = SRP_STALE_CONN;
+ break;
+
+ default:
+ shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
+ event->status);
+ ch->status = -ECONNRESET;
+ break;
+ }
+}
+
+static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
+ struct rdma_cm_event *event)
+{
+ struct srp_rdma_ch *ch = cm_id->context;
+ struct srp_target_port *target = ch->target;
+ int comp = 0;
+
+ switch (event->event) {
+ case RDMA_CM_EVENT_ADDR_RESOLVED:
+ ch->status = 0;
+ comp = 1;
+ break;
+
+ case RDMA_CM_EVENT_ADDR_ERROR:
+ ch->status = -ENXIO;
+ comp = 1;
+ break;
+
+ case RDMA_CM_EVENT_ROUTE_RESOLVED:
+ ch->status = 0;
+ comp = 1;
+ break;
+
+ case RDMA_CM_EVENT_ROUTE_ERROR:
+ case RDMA_CM_EVENT_UNREACHABLE:
+ ch->status = -EHOSTUNREACH;
+ comp = 1;
+ break;
+
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ shost_printk(KERN_DEBUG, target->scsi_host,
+ PFX "Sending CM REQ failed\n");
+ comp = 1;
+ ch->status = -ECONNRESET;
+ break;
+
+ case RDMA_CM_EVENT_ESTABLISHED:
+ comp = 1;
+ srp_cm_rep_handler(NULL, event->param.conn.private_data, ch);
+ break;
+
+ case RDMA_CM_EVENT_REJECTED:
+ shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
+ comp = 1;
+
+ srp_rdma_cm_rej_handler(ch, event);
+ break;
+
+ case RDMA_CM_EVENT_DISCONNECTED:
+ if (ch->connected) {
+ shost_printk(KERN_WARNING, target->scsi_host,
+ PFX "received DREQ\n");
+ rdma_disconnect(ch->rdma_cm.cm_id);
+ comp = 1;
+ ch->status = 0;
+ queue_work(system_long_wq, &target->tl_err_work);
+ }
+ break;
+
+ case RDMA_CM_EVENT_TIMEWAIT_EXIT:
+ shost_printk(KERN_ERR, target->scsi_host,
+ PFX "connection closed\n");
+
+ comp = 1;
+ ch->status = 0;
+ break;
+
+ default:
+ shost_printk(KERN_WARNING, target->scsi_host,
+ PFX "Unhandled CM event %d\n", event->event);
+ break;
+ }
+
+ if (comp)
+ complete(&ch->done);
+
+ return 0;
+}
+
/**
* srp_change_queue_depth - setting device queue depth
* @sdev: scsi device struct
@@ -2717,6 +3018,16 @@ static int srp_reset_host(struct scsi_cmnd *scmnd)
return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
}
+static int srp_target_alloc(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
+ struct srp_target_port *target = host_to_target(shost);
+
+ if (target->target_can_queue)
+ starget->can_queue = target->target_can_queue;
+ return 0;
+}
+
static int srp_slave_alloc(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
@@ -2766,7 +3077,10 @@ static ssize_t show_service_id(struct device *dev,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
- return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
+ if (target->using_rdma_cm)
+ return -ENOENT;
+ return sprintf(buf, "0x%016llx\n",
+ be64_to_cpu(target->ib_cm.service_id));
}
static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
@@ -2774,7 +3088,9 @@ static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
- return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
+ if (target->using_rdma_cm)
+ return -ENOENT;
+ return sprintf(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey));
}
static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
@@ -2791,7 +3107,9 @@ static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
struct srp_target_port *target = host_to_target(class_to_shost(dev));
struct srp_rdma_ch *ch = &target->ch[0];
- return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
+ if (target->using_rdma_cm)
+ return -ENOENT;
+ return sprintf(buf, "%pI6\n", ch->ib_cm.path.dgid.raw);
}
static ssize_t show_orig_dgid(struct device *dev,
@@ -2799,7 +3117,9 @@ static ssize_t show_orig_dgid(struct device *dev,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
- return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
+ if (target->using_rdma_cm)
+ return -ENOENT;
+ return sprintf(buf, "%pI6\n", target->ib_cm.orig_dgid.raw);
}
static ssize_t show_req_lim(struct device *dev,
@@ -2921,6 +3241,7 @@ static struct scsi_host_template srp_template = {
.module = THIS_MODULE,
.name = "InfiniBand SRP initiator",
.proc_name = DRV_NAME,
+ .target_alloc = srp_target_alloc,
.slave_alloc = srp_slave_alloc,
.slave_configure = srp_slave_configure,
.info = srp_target_info,
@@ -3044,6 +3365,9 @@ static bool srp_conn_unique(struct srp_host *host,
if (t != target &&
target->id_ext == t->id_ext &&
target->ioc_guid == t->ioc_guid &&
+ (!target->using_rdma_cm ||
+ memcmp(&target->rdma_cm.dst, &t->rdma_cm.dst,
+ sizeof(target->rdma_cm.dst)) == 0) &&
target->initiator_ext == t->initiator_ext) {
ret = false;
break;
@@ -3060,6 +3384,9 @@ out:
*
* id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
* pkey=<P_Key>,service_id=<service ID>
+ * or
+ * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,
+ * [src=<IPv4 address>,]dest=<IPv4 address>:<port number>
*
* to the add_target sysfs attribute.
*/
@@ -3080,11 +3407,20 @@ enum {
SRP_OPT_COMP_VECTOR = 1 << 12,
SRP_OPT_TL_RETRY_COUNT = 1 << 13,
SRP_OPT_QUEUE_SIZE = 1 << 14,
- SRP_OPT_ALL = (SRP_OPT_ID_EXT |
- SRP_OPT_IOC_GUID |
- SRP_OPT_DGID |
- SRP_OPT_PKEY |
- SRP_OPT_SERVICE_ID),
+ SRP_OPT_IP_SRC = 1 << 15,
+ SRP_OPT_IP_DEST = 1 << 16,
+ SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
+};
+
+static unsigned int srp_opt_mandatory[] = {
+ SRP_OPT_ID_EXT |
+ SRP_OPT_IOC_GUID |
+ SRP_OPT_DGID |
+ SRP_OPT_PKEY |
+ SRP_OPT_SERVICE_ID,
+ SRP_OPT_ID_EXT |
+ SRP_OPT_IOC_GUID |
+ SRP_OPT_IP_DEST,
};
static const match_table_t srp_opt_tokens = {
@@ -3095,6 +3431,7 @@ static const match_table_t srp_opt_tokens = {
{ SRP_OPT_SERVICE_ID, "service_id=%s" },
{ SRP_OPT_MAX_SECT, "max_sect=%d" },
{ SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
+ { SRP_OPT_TARGET_CAN_QUEUE, "target_can_queue=%d" },
{ SRP_OPT_IO_CLASS, "io_class=%x" },
{ SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
{ SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
@@ -3103,15 +3440,33 @@ static const match_table_t srp_opt_tokens = {
{ SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
{ SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
{ SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
+ { SRP_OPT_IP_SRC, "src=%s" },
+ { SRP_OPT_IP_DEST, "dest=%s" },
{ SRP_OPT_ERR, NULL }
};
-static int srp_parse_options(const char *buf, struct srp_target_port *target)
+static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
+ const char *addr_port_str)
+{
+ char *addr = kstrdup(addr_port_str, GFP_KERNEL);
+ char *port_str = addr;
+ int ret;
+
+ if (!addr)
+ return -ENOMEM;
+ strsep(&port_str, ":");
+ ret = inet_pton_with_scope(net, AF_UNSPEC, addr, port_str, sa);
+ kfree(addr);
+ return ret;
+}
+
+static int srp_parse_options(struct net *net, const char *buf,
+ struct srp_target_port *target)
{
char *options, *sep_opt;
char *p;
- char dgid[3];
substring_t args[MAX_OPT_ARGS];
+ unsigned long long ull;
int opt_mask = 0;
int token;
int ret = -EINVAL;
@@ -3136,7 +3491,13 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
ret = -ENOMEM;
goto out;
}
- target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
+ ret = kstrtoull(p, 16, &ull);
+ if (ret) {
+ pr_warn("invalid id_ext parameter '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+ target->id_ext = cpu_to_be64(ull);
kfree(p);
break;
@@ -3146,7 +3507,13 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
ret = -ENOMEM;
goto out;
}
- target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
+ ret = kstrtoull(p, 16, &ull);
+ if (ret) {
+ pr_warn("invalid ioc_guid parameter '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+ target->ioc_guid = cpu_to_be64(ull);
kfree(p);
break;
@@ -3162,16 +3529,10 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
goto out;
}
- for (i = 0; i < 16; ++i) {
- strlcpy(dgid, p + i * 2, sizeof(dgid));
- if (sscanf(dgid, "%hhx",
- &target->orig_dgid.raw[i]) < 1) {
- ret = -EINVAL;
- kfree(p);
- goto out;
- }
- }
+ ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16);
kfree(p);
+ if (ret < 0)
+ goto out;
break;
case SRP_OPT_PKEY:
@@ -3179,7 +3540,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
pr_warn("bad P_Key parameter '%s'\n", p);
goto out;
}
- target->pkey = cpu_to_be16(token);
+ target->ib_cm.pkey = cpu_to_be16(token);
break;
case SRP_OPT_SERVICE_ID:
@@ -3188,7 +3549,45 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
ret = -ENOMEM;
goto out;
}
- target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
+ ret = kstrtoull(p, 16, &ull);
+ if (ret) {
+ pr_warn("bad service_id parameter '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+ target->ib_cm.service_id = cpu_to_be64(ull);
+ kfree(p);
+ break;
+
+ case SRP_OPT_IP_SRC:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = srp_parse_in(net, &target->rdma_cm.src.ss, p);
+ if (ret < 0) {
+ pr_warn("bad source parameter '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+ target->rdma_cm.src_specified = true;
+ kfree(p);
+ break;
+
+ case SRP_OPT_IP_DEST:
+ p = match_strdup(args);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p);
+ if (ret < 0) {
+ pr_warn("bad dest parameter '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+ target->using_rdma_cm = true;
kfree(p);
break;
@@ -3221,6 +3620,15 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
target->scsi_host->cmd_per_lun = token;
break;
+ case SRP_OPT_TARGET_CAN_QUEUE:
+ if (match_int(args, &token) || token < 1) {
+ pr_warn("bad max target_can_queue parameter '%s'\n",
+ p);
+ goto out;
+ }
+ target->target_can_queue = token;
+ break;
+
case SRP_OPT_IO_CLASS:
if (match_hex(args, &token)) {
pr_warn("bad IO class parameter '%s'\n", p);
@@ -3242,7 +3650,13 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
ret = -ENOMEM;
goto out;
}
- target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
+ ret = kstrtoull(p, 16, &ull);
+ if (ret) {
+ pr_warn("bad initiator_ext value '%s'\n", p);
+ kfree(p);
+ goto out;
+ }
+ target->initiator_ext = cpu_to_be64(ull);
kfree(p);
break;
@@ -3297,14 +3711,14 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
}
}
- if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
- ret = 0;
- else
- for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
- if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
- !(srp_opt_tokens[i].token & opt_mask))
- pr_warn("target creation request is missing parameter '%s'\n",
- srp_opt_tokens[i].pattern);
+ for (i = 0; i < ARRAY_SIZE(srp_opt_mandatory); i++) {
+ if ((opt_mask & srp_opt_mandatory[i]) == srp_opt_mandatory[i]) {
+ ret = 0;
+ break;
+ }
+ }
+ if (ret)
+ pr_warn("target creation request is missing one or more parameters\n");
if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
&& (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
@@ -3345,6 +3759,7 @@ static ssize_t srp_create_target(struct device *dev,
target = host_to_target(target_host);
+ target->net = kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
target->io_class = SRP_REV16A_IB_IO_CLASS;
target->scsi_host = target_host;
target->srp_host = host;
@@ -3366,18 +3781,29 @@ static ssize_t srp_create_target(struct device *dev,
if (ret < 0)
goto put;
- ret = srp_parse_options(buf, target);
+ ret = srp_parse_options(target->net, buf, target);
if (ret)
goto out;
target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
if (!srp_conn_unique(target->srp_host, target)) {
- shost_printk(KERN_INFO, target->scsi_host,
- PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
- be64_to_cpu(target->id_ext),
- be64_to_cpu(target->ioc_guid),
- be64_to_cpu(target->initiator_ext));
+ if (target->using_rdma_cm) {
+ char dst_addr[64];
+
+ shost_printk(KERN_INFO, target->scsi_host,
+ PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;dest=%s\n",
+ be64_to_cpu(target->id_ext),
+ be64_to_cpu(target->ioc_guid),
+ inet_ntop(&target->rdma_cm.dst, dst_addr,
+ sizeof(dst_addr)));
+ } else {
+ shost_printk(KERN_INFO, target->scsi_host,
+ PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
+ be64_to_cpu(target->id_ext),
+ be64_to_cpu(target->ioc_guid),
+ be64_to_cpu(target->initiator_ext));
+ }
ret = -EEXIST;
goto out;
}
@@ -3478,11 +3904,18 @@ static ssize_t srp_create_target(struct device *dev,
ret = srp_connect_ch(ch, multich);
if (ret) {
+ char dst[64];
+
+ if (target->using_rdma_cm)
+ inet_ntop(&target->rdma_cm.dst, dst,
+ sizeof(dst));
+ else
+ snprintf(dst, sizeof(dst), "%pI6",
+ target->ib_cm.orig_dgid.raw);
shost_printk(KERN_ERR, target->scsi_host,
- PFX "Connection %d/%d to %pI6 failed\n",
+ PFX "Connection %d/%d to %s failed\n",
ch_start + cpu_idx,
- target->ch_count,
- ch->target->orig_dgid.raw);
+ target->ch_count, dst);
if (node_idx == 0 && cpu_idx == 0) {
goto free_ch;
} else {
@@ -3507,13 +3940,25 @@ connected:
goto err_disconnect;
if (target->state != SRP_TARGET_REMOVED) {
- shost_printk(KERN_DEBUG, target->scsi_host, PFX
- "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
- be64_to_cpu(target->id_ext),
- be64_to_cpu(target->ioc_guid),
- be16_to_cpu(target->pkey),
- be64_to_cpu(target->service_id),
- target->sgid.raw, target->orig_dgid.raw);
+ if (target->using_rdma_cm) {
+ char dst[64];
+
+ inet_ntop(&target->rdma_cm.dst, dst, sizeof(dst));
+ shost_printk(KERN_DEBUG, target->scsi_host, PFX
+ "new target: id_ext %016llx ioc_guid %016llx sgid %pI6 dest %s\n",
+ be64_to_cpu(target->id_ext),
+ be64_to_cpu(target->ioc_guid),
+ target->sgid.raw, dst);
+ } else {
+ shost_printk(KERN_DEBUG, target->scsi_host, PFX
+ "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
+ be64_to_cpu(target->id_ext),
+ be64_to_cpu(target->ioc_guid),
+ be16_to_cpu(target->ib_cm.pkey),
+ be64_to_cpu(target->ib_cm.service_id),
+ target->sgid.raw,
+ target->ib_cm.orig_dgid.raw);
+ }
}
ret = count;
@@ -3523,8 +3968,16 @@ out:
put:
scsi_host_put(target->scsi_host);
- if (ret < 0)
+ if (ret < 0) {
+ /*
+ * If a call to srp_remove_target() has not been scheduled,
+ * drop the network namespace reference now that was obtained
+ * earlier in this function.
+ */
+ if (target->state != SRP_TARGET_REMOVED)
+ kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
scsi_host_put(target->scsi_host);
+ }
return ret;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index a814f5ef16f9..a2706086b9c7 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -45,6 +45,7 @@
#include <rdma/ib_sa.h>
#include <rdma/ib_cm.h>
#include <rdma/ib_fmr_pool.h>
+#include <rdma/rdma_cm.h>
enum {
SRP_PATH_REC_TIMEOUT_MS = 1000,
@@ -153,11 +154,18 @@ struct srp_rdma_ch {
struct completion done;
int status;
- struct sa_path_rec path;
- struct ib_sa_query *path_query;
- int path_query_id;
+ union {
+ struct ib_cm {
+ struct sa_path_rec path;
+ struct ib_sa_query *path_query;
+ int path_query_id;
+ struct ib_cm_id *cm_id;
+ } ib_cm;
+ struct rdma_cm {
+ struct rdma_cm_id *cm_id;
+ } rdma_cm;
+ };
- struct ib_cm_id *cm_id;
struct srp_iu **tx_ring;
struct srp_iu **rx_ring;
struct srp_request *req_ring;
@@ -182,6 +190,7 @@ struct srp_target_port {
/* read only in the hot path */
u32 global_rkey;
struct srp_rdma_ch *ch;
+ struct net *net;
u32 ch_count;
u32 lkey;
enum srp_target_state state;
@@ -194,7 +203,6 @@ struct srp_target_port {
union ib_gid sgid;
__be64 id_ext;
__be64 ioc_guid;
- __be64 service_id;
__be64 initiator_ext;
u16 io_class;
struct srp_host *srp_host;
@@ -203,6 +211,7 @@ struct srp_target_port {
char target_name[32];
unsigned int scsi_id;
unsigned int sg_tablesize;
+ unsigned int target_can_queue;
int mr_pool_size;
int mr_per_cmd;
int queue_size;
@@ -210,8 +219,28 @@ struct srp_target_port {
int comp_vector;
int tl_retry_count;
- union ib_gid orig_dgid;
- __be16 pkey;
+ bool using_rdma_cm;
+
+ union {
+ struct {
+ __be64 service_id;
+ union ib_gid orig_dgid;
+ __be16 pkey;
+ } ib_cm;
+ struct {
+ union {
+ struct sockaddr_in ip4;
+ struct sockaddr_in6 ip6;
+ struct sockaddr_storage ss;
+ } src;
+ union {
+ struct sockaddr_in ip4;
+ struct sockaddr_in6 ip6;
+ struct sockaddr_storage ss;
+ } dst;
+ bool src_specified;
+ } rdma_cm;
+ };
u32 rq_tmo_jiffies;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index bfa576aa9f03..0373b7c40902 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -41,6 +41,7 @@
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/atomic.h>
+#include <rdma/ib_cache.h>
#include <scsi/scsi_proto.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
@@ -120,7 +121,9 @@ static bool srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new)
}
/**
- * srpt_event_handler() - Asynchronous IB event callback function.
+ * srpt_event_handler - asynchronous IB event callback function
+ * @handler: IB event handler registered by ib_register_event_handler().
+ * @event: Description of the event that occurred.
*
* Callback function called by the InfiniBand core when an asynchronous IB
* event occurs. This callback may occur in interrupt context. See also
@@ -132,6 +135,7 @@ static void srpt_event_handler(struct ib_event_handler *handler,
{
struct srpt_device *sdev;
struct srpt_port *sport;
+ u8 port_num;
sdev = ib_get_client_data(event->device, &srpt_client);
if (!sdev || sdev->device != event->device)
@@ -142,10 +146,15 @@ static void srpt_event_handler(struct ib_event_handler *handler,
switch (event->event) {
case IB_EVENT_PORT_ERR:
- if (event->element.port_num <= sdev->device->phys_port_cnt) {
- sport = &sdev->port[event->element.port_num - 1];
+ port_num = event->element.port_num - 1;
+ if (port_num < sdev->device->phys_port_cnt) {
+ sport = &sdev->port[port_num];
sport->lid = 0;
sport->sm_lid = 0;
+ } else {
+ WARN(true, "event %d: port_num %d out of range 1..%d\n",
+ event->event, port_num + 1,
+ sdev->device->phys_port_cnt);
}
break;
case IB_EVENT_PORT_ACTIVE:
@@ -155,25 +164,31 @@ static void srpt_event_handler(struct ib_event_handler *handler,
case IB_EVENT_CLIENT_REREGISTER:
case IB_EVENT_GID_CHANGE:
/* Refresh port data asynchronously. */
- if (event->element.port_num <= sdev->device->phys_port_cnt) {
- sport = &sdev->port[event->element.port_num - 1];
+ port_num = event->element.port_num - 1;
+ if (port_num < sdev->device->phys_port_cnt) {
+ sport = &sdev->port[port_num];
if (!sport->lid && !sport->sm_lid)
schedule_work(&sport->work);
+ } else {
+ WARN(true, "event %d: port_num %d out of range 1..%d\n",
+ event->event, port_num + 1,
+ sdev->device->phys_port_cnt);
}
break;
default:
- pr_err("received unrecognized IB event %d\n",
- event->event);
+ pr_err("received unrecognized IB event %d\n", event->event);
break;
}
}
/**
- * srpt_srq_event() - SRQ event callback function.
+ * srpt_srq_event - SRQ event callback function
+ * @event: Description of the event that occurred.
+ * @ctx: Context pointer specified at SRQ creation time.
*/
static void srpt_srq_event(struct ib_event *event, void *ctx)
{
- pr_info("SRQ event %d\n", event->event);
+ pr_debug("SRQ event %d\n", event->event);
}
static const char *get_ch_state_name(enum rdma_ch_state s)
@@ -194,16 +209,18 @@ static const char *get_ch_state_name(enum rdma_ch_state s)
}
/**
- * srpt_qp_event() - QP event callback function.
+ * srpt_qp_event - QP event callback function
+ * @event: Description of the event that occurred.
+ * @ch: SRPT RDMA channel.
*/
static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
{
- pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n",
- event->event, ch->cm_id, ch->sess_name, ch->state);
+ pr_debug("QP event %d on ch=%p sess_name=%s state=%d\n",
+ event->event, ch, ch->sess_name, ch->state);
switch (event->event) {
case IB_EVENT_COMM_EST:
- ib_cm_notify(ch->cm_id, event->event);
+ ib_cm_notify(ch->ib_cm.cm_id, event->event);
break;
case IB_EVENT_QP_LAST_WQE_REACHED:
pr_debug("%s-%d, state %s: received Last WQE event.\n",
@@ -217,8 +234,8 @@ static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
}
/**
- * srpt_set_ioc() - Helper function for initializing an IOUnitInfo structure.
- *
+ * srpt_set_ioc - initialize a IOUnitInfo structure
+ * @c_list: controller list.
* @slot: one-based slot number.
* @value: four-bit value.
*
@@ -241,7 +258,8 @@ static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
}
/**
- * srpt_get_class_port_info() - Copy ClassPortInfo to a management datagram.
+ * srpt_get_class_port_info - copy ClassPortInfo to a management datagram
+ * @mad: Datagram that will be sent as response to DM_ATTR_CLASS_PORT_INFO.
*
* See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture
* Specification.
@@ -260,7 +278,8 @@ static void srpt_get_class_port_info(struct ib_dm_mad *mad)
}
/**
- * srpt_get_iou() - Write IOUnitInfo to a management datagram.
+ * srpt_get_iou - write IOUnitInfo to a management datagram
+ * @mad: Datagram that will be sent as response to DM_ATTR_IOU_INFO.
*
* See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture
* Specification. See also section B.7, table B.6 in the SRP r16a document.
@@ -284,7 +303,10 @@ static void srpt_get_iou(struct ib_dm_mad *mad)
}
/**
- * srpt_get_ioc() - Write IOControllerprofile to a management datagram.
+ * srpt_get_ioc - write IOControllerprofile to a management datagram
+ * @sport: HCA port through which the MAD has been received.
+ * @slot: Slot number specified in DM_ATTR_IOC_PROFILE query.
+ * @mad: Datagram that will be sent as response to DM_ATTR_IOC_PROFILE.
*
* See also section 16.3.3.4 IOControllerProfile in the InfiniBand
* Architecture Specification. See also section B.7, table B.7 in the SRP
@@ -314,7 +336,7 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
if (sdev->use_srq)
send_queue_depth = sdev->srq_size;
else
- send_queue_depth = min(SRPT_RQ_SIZE,
+ send_queue_depth = min(MAX_SRPT_RQ_SIZE,
sdev->device->attrs.max_qp_wr);
memset(iocp, 0, sizeof(*iocp));
@@ -342,7 +364,12 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
}
/**
- * srpt_get_svc_entries() - Write ServiceEntries to a management datagram.
+ * srpt_get_svc_entries - write ServiceEntries to a management datagram
+ * @ioc_guid: I/O controller GUID to use in reply.
+ * @slot: I/O controller number.
+ * @hi: End of the range of service entries to be specified in the reply.
+ * @lo: Start of the range of service entries to be specified in the reply..
+ * @mad: Datagram that will be sent as response to DM_ATTR_SVC_ENTRIES.
*
* See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
* Specification. See also section B.7, table B.8 in the SRP r16a document.
@@ -379,8 +406,8 @@ static void srpt_get_svc_entries(u64 ioc_guid,
}
/**
- * srpt_mgmt_method_get() - Process a received management datagram.
- * @sp: source port through which the MAD has been received.
+ * srpt_mgmt_method_get - process a received management datagram
+ * @sp: HCA port through which the MAD has been received.
* @rq_mad: received MAD.
* @rsp_mad: response MAD.
*/
@@ -419,7 +446,9 @@ static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
}
/**
- * srpt_mad_send_handler() - Post MAD-send callback function.
+ * srpt_mad_send_handler - MAD send completion callback
+ * @mad_agent: Return value of ib_register_mad_agent().
+ * @mad_wc: Work completion reporting that the MAD has been sent.
*/
static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_send_wc *mad_wc)
@@ -429,7 +458,10 @@ static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
}
/**
- * srpt_mad_recv_handler() - MAD reception callback function.
+ * srpt_mad_recv_handler - MAD reception callback function
+ * @mad_agent: Return value of ib_register_mad_agent().
+ * @send_buf: Not used.
+ * @mad_wc: Work completion reporting that a MAD has been received.
*/
static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_send_buf *send_buf,
@@ -493,8 +525,18 @@ err:
ib_free_recv_mad(mad_wc);
}
+static int srpt_format_guid(char *buf, unsigned int size, const __be64 *guid)
+{
+ const __be16 *g = (const __be16 *)guid;
+
+ return snprintf(buf, size, "%04x:%04x:%04x:%04x",
+ be16_to_cpu(g[0]), be16_to_cpu(g[1]),
+ be16_to_cpu(g[2]), be16_to_cpu(g[3]));
+}
+
/**
- * srpt_refresh_port() - Configure a HCA port.
+ * srpt_refresh_port - configure a HCA port
+ * @sport: SRPT HCA port.
*
* Enable InfiniBand management datagram processing, update the cached sm_lid,
* lid and gid values, and register a callback function for processing MADs
@@ -507,7 +549,6 @@ static int srpt_refresh_port(struct srpt_port *sport)
struct ib_mad_reg_req reg_req;
struct ib_port_modify port_modify;
struct ib_port_attr port_attr;
- __be16 *guid;
int ret;
memset(&port_modify, 0, sizeof(port_modify));
@@ -531,11 +572,8 @@ static int srpt_refresh_port(struct srpt_port *sport)
goto err_query_port;
sport->port_guid_wwn.priv = sport;
- guid = (__be16 *)&sport->gid.global.interface_id;
- snprintf(sport->port_guid, sizeof(sport->port_guid),
- "%04x:%04x:%04x:%04x",
- be16_to_cpu(guid[0]), be16_to_cpu(guid[1]),
- be16_to_cpu(guid[2]), be16_to_cpu(guid[3]));
+ srpt_format_guid(sport->port_guid, sizeof(sport->port_guid),
+ &sport->gid.global.interface_id);
sport->port_gid_wwn.priv = sport;
snprintf(sport->port_gid, sizeof(sport->port_gid),
"0x%016llx%016llx",
@@ -577,7 +615,8 @@ err_mod_port:
}
/**
- * srpt_unregister_mad_agent() - Unregister MAD callback functions.
+ * srpt_unregister_mad_agent - unregister MAD callback functions
+ * @sdev: SRPT HCA pointer.
*
* Note: It is safe to call this function more than once for the same device.
*/
@@ -602,7 +641,11 @@ static void srpt_unregister_mad_agent(struct srpt_device *sdev)
}
/**
- * srpt_alloc_ioctx() - Allocate an SRPT I/O context structure.
+ * srpt_alloc_ioctx - allocate a SRPT I/O context structure
+ * @sdev: SRPT HCA pointer.
+ * @ioctx_size: I/O context size.
+ * @dma_size: Size of I/O context DMA buffer.
+ * @dir: DMA data direction.
*/
static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
int ioctx_size, int dma_size,
@@ -633,7 +676,11 @@ err:
}
/**
- * srpt_free_ioctx() - Free an SRPT I/O context structure.
+ * srpt_free_ioctx - free a SRPT I/O context structure
+ * @sdev: SRPT HCA pointer.
+ * @ioctx: I/O context pointer.
+ * @dma_size: Size of I/O context DMA buffer.
+ * @dir: DMA data direction.
*/
static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
int dma_size, enum dma_data_direction dir)
@@ -647,7 +694,7 @@ static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
}
/**
- * srpt_alloc_ioctx_ring() - Allocate a ring of SRPT I/O context structures.
+ * srpt_alloc_ioctx_ring - allocate a ring of SRPT I/O context structures
* @sdev: Device to allocate the I/O context ring for.
* @ring_size: Number of elements in the I/O context ring.
* @ioctx_size: I/O context size.
@@ -685,7 +732,12 @@ out:
}
/**
- * srpt_free_ioctx_ring() - Free the ring of SRPT I/O context structures.
+ * srpt_free_ioctx_ring - free the ring of SRPT I/O context structures
+ * @ioctx_ring: I/O context ring to be freed.
+ * @sdev: SRPT HCA pointer.
+ * @ring_size: Number of ring elements.
+ * @dma_size: Size of I/O context DMA buffer.
+ * @dir: DMA data direction.
*/
static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
struct srpt_device *sdev, int ring_size,
@@ -702,23 +754,9 @@ static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
}
/**
- * srpt_get_cmd_state() - Get the state of a SCSI command.
- */
-static enum srpt_command_state srpt_get_cmd_state(struct srpt_send_ioctx *ioctx)
-{
- enum srpt_command_state state;
- unsigned long flags;
-
- BUG_ON(!ioctx);
-
- spin_lock_irqsave(&ioctx->spinlock, flags);
- state = ioctx->state;
- spin_unlock_irqrestore(&ioctx->spinlock, flags);
- return state;
-}
-
-/**
- * srpt_set_cmd_state() - Set the state of a SCSI command.
+ * srpt_set_cmd_state - set the state of a SCSI command
+ * @ioctx: Send I/O context.
+ * @new: New I/O context state.
*
* Does not modify the state of aborted commands. Returns the previous command
* state.
@@ -727,21 +765,19 @@ static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
enum srpt_command_state new)
{
enum srpt_command_state previous;
- unsigned long flags;
- BUG_ON(!ioctx);
-
- spin_lock_irqsave(&ioctx->spinlock, flags);
previous = ioctx->state;
if (previous != SRPT_STATE_DONE)
ioctx->state = new;
- spin_unlock_irqrestore(&ioctx->spinlock, flags);
return previous;
}
/**
- * srpt_test_and_set_cmd_state() - Test and set the state of a command.
+ * srpt_test_and_set_cmd_state - test and set the state of a command
+ * @ioctx: Send I/O context.
+ * @old: Current I/O context state.
+ * @new: New I/O context state.
*
* Returns true if and only if the previous command state was equal to 'old'.
*/
@@ -750,22 +786,23 @@ static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
enum srpt_command_state new)
{
enum srpt_command_state previous;
- unsigned long flags;
WARN_ON(!ioctx);
WARN_ON(old == SRPT_STATE_DONE);
WARN_ON(new == SRPT_STATE_NEW);
- spin_lock_irqsave(&ioctx->spinlock, flags);
previous = ioctx->state;
if (previous == old)
ioctx->state = new;
- spin_unlock_irqrestore(&ioctx->spinlock, flags);
+
return previous == old;
}
/**
- * srpt_post_recv() - Post an IB receive request.
+ * srpt_post_recv - post an IB receive request
+ * @sdev: SRPT HCA pointer.
+ * @ch: SRPT RDMA channel.
+ * @ioctx: Receive I/O context pointer.
*/
static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch,
struct srpt_recv_ioctx *ioctx)
@@ -791,7 +828,8 @@ static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch,
}
/**
- * srpt_zerolength_write() - Perform a zero-length RDMA write.
+ * srpt_zerolength_write - perform a zero-length RDMA write
+ * @ch: SRPT RDMA channel.
*
* A quote from the InfiniBand specification: C9-88: For an HCA responder
* using Reliable Connection service, for each zero-length RDMA READ or WRITE
@@ -802,6 +840,9 @@ static int srpt_zerolength_write(struct srpt_rdma_ch *ch)
{
struct ib_send_wr wr, *bad_wr;
+ pr_debug("%s-%d: queued zerolength write\n", ch->sess_name,
+ ch->qp->qp_num);
+
memset(&wr, 0, sizeof(wr));
wr.opcode = IB_WR_RDMA_WRITE;
wr.wr_cqe = &ch->zw_cqe;
@@ -813,13 +854,17 @@ static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct srpt_rdma_ch *ch = cq->cq_context;
+ pr_debug("%s-%d wc->status %d\n", ch->sess_name, ch->qp->qp_num,
+ wc->status);
+
if (wc->status == IB_WC_SUCCESS) {
srpt_process_wait_list(ch);
} else {
if (srpt_set_ch_state(ch, CH_DISCONNECTED))
schedule_work(&ch->release_work);
else
- WARN_ONCE(1, "%s-%d\n", ch->sess_name, ch->qp->qp_num);
+ pr_debug("%s-%d: already disconnected.\n",
+ ch->sess_name, ch->qp->qp_num);
}
}
@@ -928,11 +973,13 @@ static inline void *srpt_get_desc_buf(struct srp_cmd *srp_cmd)
}
/**
- * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request.
+ * srpt_get_desc_tbl - parse the data descriptors of a SRP_CMD request
* @ioctx: Pointer to the I/O context associated with the request.
* @srp_cmd: Pointer to the SRP_CMD request data.
* @dir: Pointer to the variable to which the transfer direction will be
* written.
+ * @sg: [out] scatterlist allocated for the parsed SRP_CMD.
+ * @sg_cnt: [out] length of @sg.
* @data_len: Pointer to the variable to which the total data length of all
* descriptors in the SRP_CMD request will be written.
*
@@ -998,7 +1045,9 @@ static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
}
/**
- * srpt_init_ch_qp() - Initialize queue pair attributes.
+ * srpt_init_ch_qp - initialize queue pair attributes
+ * @ch: SRPT RDMA channel.
+ * @qp: Queue pair pointer.
*
* Initialized the attributes of queue pair 'qp' by allowing local write,
* remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT.
@@ -1015,7 +1064,12 @@ static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
attr->qp_state = IB_QPS_INIT;
attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
attr->port_num = ch->sport->port;
- attr->pkey_index = 0;
+
+ ret = ib_find_cached_pkey(ch->sport->sdev->device, ch->sport->port,
+ ch->pkey, &attr->pkey_index);
+ if (ret < 0)
+ pr_err("Translating pkey %#x failed (%d) - using index 0\n",
+ ch->pkey, ret);
ret = ib_modify_qp(qp, attr,
IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
@@ -1026,7 +1080,7 @@ static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
}
/**
- * srpt_ch_qp_rtr() - Change the state of a channel to 'ready to receive' (RTR).
+ * srpt_ch_qp_rtr - change the state of a channel to 'ready to receive' (RTR)
* @ch: channel of the queue pair.
* @qp: queue pair to change the state of.
*
@@ -1043,7 +1097,7 @@ static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
int ret;
qp_attr.qp_state = IB_QPS_RTR;
- ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
+ ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask);
if (ret)
goto out;
@@ -1056,7 +1110,7 @@ out:
}
/**
- * srpt_ch_qp_rts() - Change the state of a channel to 'ready to send' (RTS).
+ * srpt_ch_qp_rts - change the state of a channel to 'ready to send' (RTS)
* @ch: channel of the queue pair.
* @qp: queue pair to change the state of.
*
@@ -1073,7 +1127,7 @@ static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
int ret;
qp_attr.qp_state = IB_QPS_RTS;
- ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
+ ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask);
if (ret)
goto out;
@@ -1086,7 +1140,8 @@ out:
}
/**
- * srpt_ch_qp_err() - Set the channel queue pair state to 'error'.
+ * srpt_ch_qp_err - set the channel queue pair state to 'error'
+ * @ch: SRPT RDMA channel.
*/
static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
{
@@ -1097,7 +1152,8 @@ static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
}
/**
- * srpt_get_send_ioctx() - Obtain an I/O context for sending to the initiator.
+ * srpt_get_send_ioctx - obtain an I/O context for sending to the initiator
+ * @ch: SRPT RDMA channel.
*/
static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
{
@@ -1119,11 +1175,9 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
return ioctx;
BUG_ON(ioctx->ch != ch);
- spin_lock_init(&ioctx->spinlock);
ioctx->state = SRPT_STATE_NEW;
ioctx->n_rdma = 0;
ioctx->n_rw_ctx = 0;
- init_completion(&ioctx->tx_done);
ioctx->queue_status_only = false;
/*
* transport_init_se_cmd() does not initialize all fields, so do it
@@ -1136,14 +1190,12 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
}
/**
- * srpt_abort_cmd() - Abort a SCSI command.
+ * srpt_abort_cmd - abort a SCSI command
* @ioctx: I/O context associated with the SCSI command.
- * @context: Preferred execution context.
*/
static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
{
enum srpt_command_state state;
- unsigned long flags;
BUG_ON(!ioctx);
@@ -1152,7 +1204,6 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
* the ib_srpt driver, change the state to the next state.
*/
- spin_lock_irqsave(&ioctx->spinlock, flags);
state = ioctx->state;
switch (state) {
case SRPT_STATE_NEED_DATA:
@@ -1167,7 +1218,6 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
__func__, state);
break;
}
- spin_unlock_irqrestore(&ioctx->spinlock, flags);
pr_debug("Aborting cmd with state %d -> %d and tag %lld\n", state,
ioctx->state, ioctx->cmd.tag);
@@ -1206,6 +1256,10 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
}
/**
+ * srpt_rdma_read_done - RDMA read completion callback
+ * @cq: Completion queue.
+ * @wc: Work completion.
+ *
* XXX: what is now target_execute_cmd used to be asynchronous, and unmapping
* the data that has been transferred via IB RDMA had to be postponed until the
* check_stop_free() callback. None of this is necessary anymore and needs to
@@ -1233,11 +1287,11 @@ static void srpt_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
target_execute_cmd(&ioctx->cmd);
else
pr_err("%s[%d]: wrong state = %d\n", __func__,
- __LINE__, srpt_get_cmd_state(ioctx));
+ __LINE__, ioctx->state);
}
/**
- * srpt_build_cmd_rsp() - Build an SRP_RSP response.
+ * srpt_build_cmd_rsp - build a SRP_RSP response
* @ch: RDMA channel through which the request has been received.
* @ioctx: I/O context associated with the SRP_CMD request. The response will
* be built in the buffer ioctx->buf points at and hence this function will
@@ -1297,7 +1351,7 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
}
/**
- * srpt_build_tskmgmt_rsp() - Build a task management response.
+ * srpt_build_tskmgmt_rsp - build a task management response
* @ch: RDMA channel through which the request has been received.
* @ioctx: I/O context in which the SRP_RSP response will be built.
* @rsp_code: RSP_CODE that will be stored in the response.
@@ -1345,7 +1399,10 @@ static int srpt_check_stop_free(struct se_cmd *cmd)
}
/**
- * srpt_handle_cmd() - Process SRP_CMD.
+ * srpt_handle_cmd - process a SRP_CMD information unit
+ * @ch: SRPT RDMA channel.
+ * @recv_ioctx: Receive I/O context.
+ * @send_ioctx: Send I/O context.
*/
static void srpt_handle_cmd(struct srpt_rdma_ch *ch,
struct srpt_recv_ioctx *recv_ioctx,
@@ -1427,7 +1484,10 @@ static int srp_tmr_to_tcm(int fn)
}
/**
- * srpt_handle_tsk_mgmt() - Process an SRP_TSK_MGMT information unit.
+ * srpt_handle_tsk_mgmt - process a SRP_TSK_MGMT information unit
+ * @ch: SRPT RDMA channel.
+ * @recv_ioctx: Receive I/O context.
+ * @send_ioctx: Send I/O context.
*
* Returns 0 if and only if the request will be processed by the target core.
*
@@ -1449,9 +1509,9 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
srp_tsk = recv_ioctx->ioctx.buf;
cmd = &send_ioctx->cmd;
- pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld"
- " cm_id %p sess %p\n", srp_tsk->tsk_mgmt_func,
- srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess);
+ pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld ch %p sess %p\n",
+ srp_tsk->tsk_mgmt_func, srp_tsk->task_tag, srp_tsk->tag, ch,
+ ch->sess);
srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
send_ioctx->cmd.tag = srp_tsk->tag;
@@ -1470,41 +1530,42 @@ fail:
}
/**
- * srpt_handle_new_iu() - Process a newly received information unit.
+ * srpt_handle_new_iu - process a newly received information unit
* @ch: RDMA channel through which the information unit has been received.
- * @ioctx: SRPT I/O context associated with the information unit.
+ * @recv_ioctx: Receive I/O context associated with the information unit.
*/
-static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
- struct srpt_recv_ioctx *recv_ioctx,
- struct srpt_send_ioctx *send_ioctx)
+static bool
+srpt_handle_new_iu(struct srpt_rdma_ch *ch, struct srpt_recv_ioctx *recv_ioctx)
{
+ struct srpt_send_ioctx *send_ioctx = NULL;
struct srp_cmd *srp_cmd;
+ bool res = false;
+ u8 opcode;
BUG_ON(!ch);
BUG_ON(!recv_ioctx);
+ if (unlikely(ch->state == CH_CONNECTING))
+ goto push;
+
ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
recv_ioctx->ioctx.dma, srp_max_req_size,
DMA_FROM_DEVICE);
- if (unlikely(ch->state == CH_CONNECTING))
- goto out_wait;
-
- if (unlikely(ch->state != CH_LIVE))
- return;
-
srp_cmd = recv_ioctx->ioctx.buf;
- if (srp_cmd->opcode == SRP_CMD || srp_cmd->opcode == SRP_TSK_MGMT) {
- if (!send_ioctx) {
- if (!list_empty(&ch->cmd_wait_list))
- goto out_wait;
- send_ioctx = srpt_get_send_ioctx(ch);
- }
+ opcode = srp_cmd->opcode;
+ if (opcode == SRP_CMD || opcode == SRP_TSK_MGMT) {
+ send_ioctx = srpt_get_send_ioctx(ch);
if (unlikely(!send_ioctx))
- goto out_wait;
+ goto push;
}
- switch (srp_cmd->opcode) {
+ if (!list_empty(&recv_ioctx->wait_list)) {
+ WARN_ON_ONCE(!ch->processing_wait_list);
+ list_del_init(&recv_ioctx->wait_list);
+ }
+
+ switch (opcode) {
case SRP_CMD:
srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
break;
@@ -1524,16 +1585,22 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
pr_err("Received SRP_RSP\n");
break;
default:
- pr_err("received IU with unknown opcode 0x%x\n",
- srp_cmd->opcode);
+ pr_err("received IU with unknown opcode 0x%x\n", opcode);
break;
}
srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
- return;
+ res = true;
+
+out:
+ return res;
-out_wait:
- list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
+push:
+ if (list_empty(&recv_ioctx->wait_list)) {
+ WARN_ON_ONCE(ch->processing_wait_list);
+ list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
+ }
+ goto out;
}
static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
@@ -1548,10 +1615,10 @@ static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
req_lim = atomic_dec_return(&ch->req_lim);
if (unlikely(req_lim < 0))
pr_err("req_lim = %d < 0\n", req_lim);
- srpt_handle_new_iu(ch, ioctx, NULL);
+ srpt_handle_new_iu(ch, ioctx);
} else {
- pr_info("receiving failed for ioctx %p with status %d\n",
- ioctx, wc->status);
+ pr_info_ratelimited("receiving failed for ioctx %p with status %d\n",
+ ioctx, wc->status);
}
}
@@ -1562,22 +1629,28 @@ static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
*/
static void srpt_process_wait_list(struct srpt_rdma_ch *ch)
{
- struct srpt_send_ioctx *ioctx;
+ struct srpt_recv_ioctx *recv_ioctx, *tmp;
+
+ WARN_ON_ONCE(ch->state == CH_CONNECTING);
- while (!list_empty(&ch->cmd_wait_list) &&
- ch->state >= CH_LIVE &&
- (ioctx = srpt_get_send_ioctx(ch)) != NULL) {
- struct srpt_recv_ioctx *recv_ioctx;
+ if (list_empty(&ch->cmd_wait_list))
+ return;
- recv_ioctx = list_first_entry(&ch->cmd_wait_list,
- struct srpt_recv_ioctx,
- wait_list);
- list_del(&recv_ioctx->wait_list);
- srpt_handle_new_iu(ch, recv_ioctx, ioctx);
+ WARN_ON_ONCE(ch->processing_wait_list);
+ ch->processing_wait_list = true;
+ list_for_each_entry_safe(recv_ioctx, tmp, &ch->cmd_wait_list,
+ wait_list) {
+ if (!srpt_handle_new_iu(ch, recv_ioctx))
+ break;
}
+ ch->processing_wait_list = false;
}
/**
+ * srpt_send_done - send completion callback
+ * @cq: Completion queue.
+ * @wc: Work completion.
+ *
* Note: Although this has not yet been observed during tests, at least in
* theory it is possible that the srpt_get_send_ioctx() call invoked by
* srpt_handle_new_iu() fails. This is possible because the req_lim_delta
@@ -1619,7 +1692,8 @@ static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc)
}
/**
- * srpt_create_ch_ib() - Create receive and send completion queues.
+ * srpt_create_ch_ib - create receive and send completion queues
+ * @ch: SRPT RDMA channel.
*/
static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
{
@@ -1627,7 +1701,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
struct srpt_port *sport = ch->sport;
struct srpt_device *sdev = sport->sdev;
const struct ib_device_attr *attrs = &sdev->device->attrs;
- u32 srp_sq_size = sport->port_attrib.srp_sq_size;
+ int sq_size = sport->port_attrib.srp_sq_size;
int i, ret;
WARN_ON(ch->rq_size < 1);
@@ -1638,12 +1712,12 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
goto out;
retry:
- ch->cq = ib_alloc_cq(sdev->device, ch, ch->rq_size + srp_sq_size,
+ ch->cq = ib_alloc_cq(sdev->device, ch, ch->rq_size + sq_size,
0 /* XXX: spread CQs */, IB_POLL_WORKQUEUE);
if (IS_ERR(ch->cq)) {
ret = PTR_ERR(ch->cq);
pr_err("failed to create CQ cqe= %d ret= %d\n",
- ch->rq_size + srp_sq_size, ret);
+ ch->rq_size + sq_size, ret);
goto out;
}
@@ -1661,8 +1735,8 @@ retry:
* both both, as RDMA contexts will also post completions for the
* RDMA READ case.
*/
- qp_init->cap.max_send_wr = min(srp_sq_size / 2, attrs->max_qp_wr + 0U);
- qp_init->cap.max_rdma_ctxs = srp_sq_size / 2;
+ qp_init->cap.max_send_wr = min(sq_size / 2, attrs->max_qp_wr);
+ qp_init->cap.max_rdma_ctxs = sq_size / 2;
qp_init->cap.max_send_sge = min(attrs->max_sge, SRPT_MAX_SG_PER_WQE);
qp_init->port_num = ch->sport->port;
if (sdev->use_srq) {
@@ -1676,8 +1750,8 @@ retry:
if (IS_ERR(ch->qp)) {
ret = PTR_ERR(ch->qp);
if (ret == -ENOMEM) {
- srp_sq_size /= 2;
- if (srp_sq_size >= MIN_SRPT_SQ_SIZE) {
+ sq_size /= 2;
+ if (sq_size >= MIN_SRPT_SQ_SIZE) {
ib_destroy_cq(ch->cq);
goto retry;
}
@@ -1688,9 +1762,9 @@ retry:
atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
- pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
+ pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d ch= %p\n",
__func__, ch->cq->cqe, qp_init->cap.max_send_sge,
- qp_init->cap.max_send_wr, ch->cm_id);
+ qp_init->cap.max_send_wr, ch);
ret = srpt_init_ch_qp(ch, ch->qp);
if (ret)
@@ -1718,7 +1792,8 @@ static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
}
/**
- * srpt_close_ch() - Close an RDMA channel.
+ * srpt_close_ch - close a RDMA channel
+ * @ch: SRPT RDMA channel.
*
* Make sure all resources associated with the channel will be deallocated at
* an appropriate time.
@@ -1743,8 +1818,6 @@ static bool srpt_close_ch(struct srpt_rdma_ch *ch)
pr_err("%s-%d: changing queue pair into error state failed: %d\n",
ch->sess_name, ch->qp->qp_num, ret);
- pr_debug("%s-%d: queued zerolength write\n", ch->sess_name,
- ch->qp->qp_num);
ret = srpt_zerolength_write(ch);
if (ret < 0) {
pr_err("%s-%d: queuing zero-length write failed: %d\n",
@@ -1776,9 +1849,9 @@ static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
if (!srpt_set_ch_state(ch, CH_DISCONNECTING))
return -ENOTCONN;
- ret = ib_send_cm_dreq(ch->cm_id, NULL, 0);
+ ret = ib_send_cm_dreq(ch->ib_cm.cm_id, NULL, 0);
if (ret < 0)
- ret = ib_send_cm_drep(ch->cm_id, NULL, 0);
+ ret = ib_send_cm_drep(ch->ib_cm.cm_id, NULL, 0);
if (ret < 0 && srpt_close_ch(ch))
ret = 0;
@@ -1786,83 +1859,135 @@ static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
return ret;
}
-/*
- * Send DREQ and wait for DREP. Return true if and only if this function
- * changed the state of @ch.
- */
-static bool srpt_disconnect_ch_sync(struct srpt_rdma_ch *ch)
- __must_hold(&sdev->mutex)
+static bool srpt_ch_closed(struct srpt_port *sport, struct srpt_rdma_ch *ch)
{
- DECLARE_COMPLETION_ONSTACK(release_done);
- struct srpt_device *sdev = ch->sport->sdev;
- bool wait;
+ struct srpt_nexus *nexus;
+ struct srpt_rdma_ch *ch2;
+ bool res = true;
+
+ rcu_read_lock();
+ list_for_each_entry(nexus, &sport->nexus_list, entry) {
+ list_for_each_entry(ch2, &nexus->ch_list, list) {
+ if (ch2 == ch) {
+ res = false;
+ goto done;
+ }
+ }
+ }
+done:
+ rcu_read_unlock();
- lockdep_assert_held(&sdev->mutex);
+ return res;
+}
+
+/* Send DREQ and wait for DREP. */
+static void srpt_disconnect_ch_sync(struct srpt_rdma_ch *ch)
+{
+ struct srpt_port *sport = ch->sport;
pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num,
ch->state);
- WARN_ON(ch->release_done);
- ch->release_done = &release_done;
- wait = !list_empty(&ch->list);
+ mutex_lock(&sport->mutex);
srpt_disconnect_ch(ch);
- mutex_unlock(&sdev->mutex);
+ mutex_unlock(&sport->mutex);
- if (!wait)
- goto out;
-
- while (wait_for_completion_timeout(&release_done, 180 * HZ) == 0)
+ while (wait_event_timeout(sport->ch_releaseQ, srpt_ch_closed(sport, ch),
+ 5 * HZ) == 0)
pr_info("%s(%s-%d state %d): still waiting ...\n", __func__,
ch->sess_name, ch->qp->qp_num, ch->state);
-out:
- mutex_lock(&sdev->mutex);
- return wait;
}
-static void srpt_set_enabled(struct srpt_port *sport, bool enabled)
- __must_hold(&sdev->mutex)
+static void __srpt_close_all_ch(struct srpt_port *sport)
{
- struct srpt_device *sdev = sport->sdev;
+ struct srpt_nexus *nexus;
struct srpt_rdma_ch *ch;
- lockdep_assert_held(&sdev->mutex);
+ lockdep_assert_held(&sport->mutex);
- if (sport->enabled == enabled)
- return;
- sport->enabled = enabled;
- if (sport->enabled)
- return;
+ list_for_each_entry(nexus, &sport->nexus_list, entry) {
+ list_for_each_entry(ch, &nexus->ch_list, list) {
+ if (srpt_disconnect_ch(ch) >= 0)
+ pr_info("Closing channel %s-%d because target %s_%d has been disabled\n",
+ ch->sess_name, ch->qp->qp_num,
+ sport->sdev->device->name, sport->port);
+ srpt_close_ch(ch);
+ }
+ }
+}
-again:
- list_for_each_entry(ch, &sdev->rch_list, list) {
- if (ch->sport == sport) {
- pr_info("%s: closing channel %s-%d\n",
- sdev->device->name, ch->sess_name,
- ch->qp->qp_num);
- if (srpt_disconnect_ch_sync(ch))
- goto again;
+/*
+ * Look up (i_port_id, t_port_id) in sport->nexus_list. Create an entry if
+ * it does not yet exist.
+ */
+static struct srpt_nexus *srpt_get_nexus(struct srpt_port *sport,
+ const u8 i_port_id[16],
+ const u8 t_port_id[16])
+{
+ struct srpt_nexus *nexus = NULL, *tmp_nexus = NULL, *n;
+
+ for (;;) {
+ mutex_lock(&sport->mutex);
+ list_for_each_entry(n, &sport->nexus_list, entry) {
+ if (memcmp(n->i_port_id, i_port_id, 16) == 0 &&
+ memcmp(n->t_port_id, t_port_id, 16) == 0) {
+ nexus = n;
+ break;
+ }
}
+ if (!nexus && tmp_nexus) {
+ list_add_tail_rcu(&tmp_nexus->entry,
+ &sport->nexus_list);
+ swap(nexus, tmp_nexus);
+ }
+ mutex_unlock(&sport->mutex);
+
+ if (nexus)
+ break;
+ tmp_nexus = kzalloc(sizeof(*nexus), GFP_KERNEL);
+ if (!tmp_nexus) {
+ nexus = ERR_PTR(-ENOMEM);
+ break;
+ }
+ INIT_LIST_HEAD(&tmp_nexus->ch_list);
+ memcpy(tmp_nexus->i_port_id, i_port_id, 16);
+ memcpy(tmp_nexus->t_port_id, t_port_id, 16);
}
+ kfree(tmp_nexus);
+
+ return nexus;
+}
+
+static void srpt_set_enabled(struct srpt_port *sport, bool enabled)
+ __must_hold(&sport->mutex)
+{
+ lockdep_assert_held(&sport->mutex);
+
+ if (sport->enabled == enabled)
+ return;
+ sport->enabled = enabled;
+ if (!enabled)
+ __srpt_close_all_ch(sport);
}
static void srpt_free_ch(struct kref *kref)
{
struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
- kfree(ch);
+ kfree_rcu(ch, rcu);
}
static void srpt_release_channel_work(struct work_struct *w)
{
struct srpt_rdma_ch *ch;
struct srpt_device *sdev;
+ struct srpt_port *sport;
struct se_session *se_sess;
ch = container_of(w, struct srpt_rdma_ch, release_work);
- pr_debug("%s: %s-%d; release_done = %p\n", __func__, ch->sess_name,
- ch->qp->qp_num, ch->release_done);
+ pr_debug("%s-%d\n", ch->sess_name, ch->qp->qp_num);
sdev = ch->sport->sdev;
BUG_ON(!sdev);
@@ -1877,169 +2002,141 @@ static void srpt_release_channel_work(struct work_struct *w)
transport_deregister_session(se_sess);
ch->sess = NULL;
- ib_destroy_cm_id(ch->cm_id);
+ ib_destroy_cm_id(ch->ib_cm.cm_id);
srpt_destroy_ch_ib(ch);
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
ch->sport->sdev, ch->rq_size,
- ch->rsp_size, DMA_TO_DEVICE);
+ ch->max_rsp_size, DMA_TO_DEVICE);
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
sdev, ch->rq_size,
srp_max_req_size, DMA_FROM_DEVICE);
- mutex_lock(&sdev->mutex);
- list_del_init(&ch->list);
- if (ch->release_done)
- complete(ch->release_done);
- mutex_unlock(&sdev->mutex);
+ sport = ch->sport;
+ mutex_lock(&sport->mutex);
+ list_del_rcu(&ch->list);
+ mutex_unlock(&sport->mutex);
- wake_up(&sdev->ch_releaseQ);
+ wake_up(&sport->ch_releaseQ);
kref_put(&ch->kref, srpt_free_ch);
}
/**
- * srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED.
+ * srpt_cm_req_recv - process the event IB_CM_REQ_RECEIVED
+ * @cm_id: IB/CM connection identifier.
+ * @port_num: Port through which the IB/CM REQ message was received.
+ * @pkey: P_Key of the incoming connection.
+ * @req: SRP login request.
+ * @src_addr: GID of the port that submitted the login request.
*
* Ownership of the cm_id is transferred to the target session if this
* functions returns zero. Otherwise the caller remains the owner of cm_id.
*/
static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
- struct ib_cm_req_event_param *param,
- void *private_data)
+ u8 port_num, __be16 pkey,
+ const struct srp_login_req *req,
+ const char *src_addr)
{
struct srpt_device *sdev = cm_id->context;
- struct srpt_port *sport = &sdev->port[param->port - 1];
- struct srp_login_req *req;
- struct srp_login_rsp *rsp;
- struct srp_login_rej *rej;
- struct ib_cm_rep_param *rep_param;
- struct srpt_rdma_ch *ch, *tmp_ch;
- __be16 *guid;
+ struct srpt_port *sport = &sdev->port[port_num - 1];
+ struct srpt_nexus *nexus;
+ struct srp_login_rsp *rsp = NULL;
+ struct srp_login_rej *rej = NULL;
+ struct ib_cm_rep_param *rep_param = NULL;
+ struct srpt_rdma_ch *ch;
+ char i_port_id[36];
u32 it_iu_len;
- int i, ret = 0;
+ int i, ret;
WARN_ON_ONCE(irqs_disabled());
- if (WARN_ON(!sdev || !private_data))
+ if (WARN_ON(!sdev || !req))
return -EINVAL;
- req = (struct srp_login_req *)private_data;
-
it_iu_len = be32_to_cpu(req->req_it_iu_len);
- pr_info("Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx,"
- " t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d"
- " (guid=0x%llx:0x%llx)\n",
- be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]),
- be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]),
- be64_to_cpu(*(__be64 *)&req->target_port_id[0]),
- be64_to_cpu(*(__be64 *)&req->target_port_id[8]),
- it_iu_len,
- param->port,
- be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
- be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
+ pr_info("Received SRP_LOGIN_REQ with i_port_id %pI6, t_port_id %pI6 and it_iu_len %d on port %d (guid=%pI6); pkey %#04x\n",
+ req->initiator_port_id, req->target_port_id, it_iu_len,
+ port_num, &sport->gid, be16_to_cpu(pkey));
+ nexus = srpt_get_nexus(sport, req->initiator_port_id,
+ req->target_port_id);
+ if (IS_ERR(nexus)) {
+ ret = PTR_ERR(nexus);
+ goto out;
+ }
+
+ ret = -ENOMEM;
rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
rej = kzalloc(sizeof(*rej), GFP_KERNEL);
rep_param = kzalloc(sizeof(*rep_param), GFP_KERNEL);
-
- if (!rsp || !rej || !rep_param) {
- ret = -ENOMEM;
+ if (!rsp || !rej || !rep_param)
goto out;
- }
+ ret = -EINVAL;
if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
rej->reason = cpu_to_be32(
- SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
- ret = -EINVAL;
- pr_err("rejected SRP_LOGIN_REQ because its"
- " length (%d bytes) is out of range (%d .. %d)\n",
+ SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
+ pr_err("rejected SRP_LOGIN_REQ because its length (%d bytes) is out of range (%d .. %d)\n",
it_iu_len, 64, srp_max_req_size);
goto reject;
}
if (!sport->enabled) {
- rej->reason = cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
- ret = -EINVAL;
- pr_err("rejected SRP_LOGIN_REQ because the target port"
- " has not yet been enabled\n");
+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ pr_info("rejected SRP_LOGIN_REQ because target port %s_%d has not yet been enabled\n",
+ sport->sdev->device->name, port_num);
goto reject;
}
- if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
- rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
-
- mutex_lock(&sdev->mutex);
-
- list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
- if (!memcmp(ch->i_port_id, req->initiator_port_id, 16)
- && !memcmp(ch->t_port_id, req->target_port_id, 16)
- && param->port == ch->sport->port
- && param->listen_id == ch->sport->sdev->cm_id
- && ch->cm_id) {
- if (srpt_disconnect_ch(ch) < 0)
- continue;
- pr_info("Relogin - closed existing channel %s\n",
- ch->sess_name);
- rsp->rsp_flags =
- SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
- }
- }
-
- mutex_unlock(&sdev->mutex);
-
- } else
- rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
-
if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
|| *(__be64 *)(req->target_port_id + 8) !=
cpu_to_be64(srpt_service_guid)) {
rej->reason = cpu_to_be32(
- SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
- ret = -ENOMEM;
- pr_err("rejected SRP_LOGIN_REQ because it"
- " has an invalid target port identifier.\n");
+ SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
+ pr_err("rejected SRP_LOGIN_REQ because it has an invalid target port identifier.\n");
goto reject;
}
+ ret = -ENOMEM;
ch = kzalloc(sizeof(*ch), GFP_KERNEL);
if (!ch) {
- rej->reason = cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
- pr_err("rejected SRP_LOGIN_REQ because no memory.\n");
- ret = -ENOMEM;
+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ pr_err("rejected SRP_LOGIN_REQ because out of memory.\n");
goto reject;
}
kref_init(&ch->kref);
+ ch->pkey = be16_to_cpu(pkey);
+ ch->nexus = nexus;
ch->zw_cqe.done = srpt_zerolength_write_done;
INIT_WORK(&ch->release_work, srpt_release_channel_work);
- memcpy(ch->i_port_id, req->initiator_port_id, 16);
- memcpy(ch->t_port_id, req->target_port_id, 16);
- ch->sport = &sdev->port[param->port - 1];
- ch->cm_id = cm_id;
+ ch->sport = sport;
+ ch->ib_cm.cm_id = cm_id;
cm_id->context = ch;
/*
* ch->rq_size should be at least as large as the initiator queue
* depth to avoid that the initiator driver has to report QUEUE_FULL
* to the SCSI mid-layer.
*/
- ch->rq_size = min(SRPT_RQ_SIZE, sdev->device->attrs.max_qp_wr);
+ ch->rq_size = min(MAX_SRPT_RQ_SIZE, sdev->device->attrs.max_qp_wr);
spin_lock_init(&ch->spinlock);
ch->state = CH_CONNECTING;
INIT_LIST_HEAD(&ch->cmd_wait_list);
- ch->rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
+ ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
ch->ioctx_ring = (struct srpt_send_ioctx **)
srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
sizeof(*ch->ioctx_ring[0]),
- ch->rsp_size, DMA_TO_DEVICE);
- if (!ch->ioctx_ring)
+ ch->max_rsp_size, DMA_TO_DEVICE);
+ if (!ch->ioctx_ring) {
+ pr_err("rejected SRP_LOGIN_REQ because creating a new QP SQ ring failed.\n");
+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
goto free_ch;
+ }
INIT_LIST_HEAD(&ch->free_list);
for (i = 0; i < ch->rq_size; i++) {
@@ -2058,59 +2155,88 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
goto free_ring;
}
+ for (i = 0; i < ch->rq_size; i++)
+ INIT_LIST_HEAD(&ch->ioctx_recv_ring[i]->wait_list);
}
ret = srpt_create_ch_ib(ch);
if (ret) {
- rej->reason = cpu_to_be32(
- SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
- pr_err("rejected SRP_LOGIN_REQ because creating"
- " a new RDMA channel failed.\n");
- goto free_recv_ring;
- }
-
- ret = srpt_ch_qp_rtr(ch, ch->qp);
- if (ret) {
rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
- pr_err("rejected SRP_LOGIN_REQ because enabling"
- " RTR failed (error code = %d)\n", ret);
- goto destroy_ib;
+ pr_err("rejected SRP_LOGIN_REQ because creating a new RDMA channel failed.\n");
+ goto free_recv_ring;
}
- guid = (__be16 *)&param->primary_path->dgid.global.interface_id;
- snprintf(ch->ini_guid, sizeof(ch->ini_guid), "%04x:%04x:%04x:%04x",
- be16_to_cpu(guid[0]), be16_to_cpu(guid[1]),
- be16_to_cpu(guid[2]), be16_to_cpu(guid[3]));
- snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx",
- be64_to_cpu(*(__be64 *)ch->i_port_id),
- be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
+ strlcpy(ch->sess_name, src_addr, sizeof(ch->sess_name));
+ snprintf(i_port_id, sizeof(i_port_id), "0x%016llx%016llx",
+ be64_to_cpu(*(__be64 *)nexus->i_port_id),
+ be64_to_cpu(*(__be64 *)(nexus->i_port_id + 8)));
pr_debug("registering session %s\n", ch->sess_name);
if (sport->port_guid_tpg.se_tpg_wwn)
ch->sess = target_alloc_session(&sport->port_guid_tpg, 0, 0,
TARGET_PROT_NORMAL,
- ch->ini_guid, ch, NULL);
+ ch->sess_name, ch, NULL);
if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
ch->sess = target_alloc_session(&sport->port_gid_tpg, 0, 0,
- TARGET_PROT_NORMAL, ch->sess_name, ch,
+ TARGET_PROT_NORMAL, i_port_id, ch,
NULL);
/* Retry without leading "0x" */
if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
ch->sess = target_alloc_session(&sport->port_gid_tpg, 0, 0,
TARGET_PROT_NORMAL,
- ch->sess_name + 2, ch, NULL);
+ i_port_id + 2, ch, NULL);
if (IS_ERR_OR_NULL(ch->sess)) {
- pr_info("Rejected login because no ACL has been configured yet for initiator %s.\n",
- ch->sess_name);
- rej->reason = cpu_to_be32((PTR_ERR(ch->sess) == -ENOMEM) ?
+ ret = PTR_ERR(ch->sess);
+ pr_info("Rejected login for initiator %s: ret = %d.\n",
+ ch->sess_name, ret);
+ rej->reason = cpu_to_be32(ret == -ENOMEM ?
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES :
SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
+ goto reject;
+ }
+
+ mutex_lock(&sport->mutex);
+
+ if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
+ struct srpt_rdma_ch *ch2;
+
+ rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
+
+ list_for_each_entry(ch2, &nexus->ch_list, list) {
+ if (srpt_disconnect_ch(ch2) < 0)
+ continue;
+ pr_info("Relogin - closed existing channel %s\n",
+ ch2->sess_name);
+ rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
+ }
+ } else {
+ rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
+ }
+
+ list_add_tail_rcu(&ch->list, &nexus->ch_list);
+
+ if (!sport->enabled) {
+ rej->reason = cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n",
+ sdev->device->name, port_num);
+ mutex_unlock(&sport->mutex);
+ goto reject;
+ }
+
+ mutex_unlock(&sport->mutex);
+
+ ret = srpt_ch_qp_rtr(ch, ch->qp);
+ if (ret) {
+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ pr_err("rejected SRP_LOGIN_REQ because enabling RTR failed (error code = %d)\n",
+ ret);
goto destroy_ib;
}
- pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
- ch->sess_name, ch->cm_id);
+ pr_debug("Establish connection sess=%p name=%s ch=%p\n", ch->sess,
+ ch->sess_name, ch);
/* create srp_login_response */
rsp->opcode = SRP_LOGIN_RSP;
@@ -2118,8 +2244,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
rsp->max_it_iu_len = req->req_it_iu_len;
rsp->max_ti_iu_len = req->req_it_iu_len;
ch->max_ti_iu_len = it_iu_len;
- rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
- | SRP_BUF_FORMAT_INDIRECT);
+ rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
+ SRP_BUF_FORMAT_INDIRECT);
rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
atomic_set(&ch->req_lim, ch->rq_size);
atomic_set(&ch->req_lim_delta, 0);
@@ -2135,25 +2261,31 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
rep_param->responder_resources = 4;
rep_param->initiator_depth = 4;
- ret = ib_send_cm_rep(cm_id, rep_param);
- if (ret) {
- pr_err("sending SRP_LOGIN_REQ response failed"
- " (error code = %d)\n", ret);
- goto release_channel;
- }
+ /*
+ * Hold the sport mutex while accepting a connection to avoid that
+ * srpt_disconnect_ch() is invoked concurrently with this code.
+ */
+ mutex_lock(&sport->mutex);
+ if (sport->enabled && ch->state == CH_CONNECTING)
+ ret = ib_send_cm_rep(cm_id, rep_param);
+ else
+ ret = -EINVAL;
+ mutex_unlock(&sport->mutex);
- mutex_lock(&sdev->mutex);
- list_add_tail(&ch->list, &sdev->rch_list);
- mutex_unlock(&sdev->mutex);
+ switch (ret) {
+ case 0:
+ break;
+ case -EINVAL:
+ goto reject;
+ default:
+ rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ pr_err("sending SRP_LOGIN_REQ response failed (error code = %d)\n",
+ ret);
+ goto reject;
+ }
goto out;
-release_channel:
- srpt_disconnect_ch(ch);
- transport_deregister_session_configfs(ch->sess);
- transport_deregister_session(ch->sess);
- ch->sess = NULL;
-
destroy_ib:
srpt_destroy_ch_ib(ch);
@@ -2165,15 +2297,20 @@ free_recv_ring:
free_ring:
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
ch->sport->sdev, ch->rq_size,
- ch->rsp_size, DMA_TO_DEVICE);
+ ch->max_rsp_size, DMA_TO_DEVICE);
free_ch:
+ cm_id->context = NULL;
kfree(ch);
+ ch = NULL;
+
+ WARN_ON_ONCE(ret == 0);
reject:
+ pr_info("Rejecting login with reason %#x\n", be32_to_cpu(rej->reason));
rej->opcode = SRP_LOGIN_REJ;
rej->tag = req->tag;
- rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
- | SRP_BUF_FORMAT_INDIRECT);
+ rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
+ SRP_BUF_FORMAT_INDIRECT);
ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
(void *)rej, sizeof(*rej));
@@ -2186,6 +2323,19 @@ out:
return ret;
}
+static int srpt_ib_cm_req_recv(struct ib_cm_id *cm_id,
+ struct ib_cm_req_event_param *param,
+ void *private_data)
+{
+ char sguid[40];
+
+ srpt_format_guid(sguid, sizeof(sguid),
+ &param->primary_path->dgid.global.interface_id);
+
+ return srpt_cm_req_recv(cm_id, param->port, param->primary_path->pkey,
+ private_data, sguid);
+}
+
static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
enum ib_cm_rej_reason reason,
const u8 *private_data,
@@ -2206,7 +2356,8 @@ static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
}
/**
- * srpt_cm_rtu_recv() - Process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event.
+ * srpt_cm_rtu_recv - process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event
+ * @ch: SRPT RDMA channel.
*
* An IB_CM_RTU_RECEIVED message indicates that the connection is established
* and that the recipient may begin transmitting (RTU = ready to use).
@@ -2215,21 +2366,34 @@ static void srpt_cm_rtu_recv(struct srpt_rdma_ch *ch)
{
int ret;
- if (srpt_set_ch_state(ch, CH_LIVE)) {
- ret = srpt_ch_qp_rts(ch, ch->qp);
+ ret = srpt_ch_qp_rts(ch, ch->qp);
+ if (ret < 0) {
+ pr_err("%s-%d: QP transition to RTS failed\n", ch->sess_name,
+ ch->qp->qp_num);
+ srpt_close_ch(ch);
+ return;
+ }
- if (ret == 0) {
- /* Trigger wait list processing. */
- ret = srpt_zerolength_write(ch);
- WARN_ONCE(ret < 0, "%d\n", ret);
- } else {
- srpt_close_ch(ch);
- }
+ /*
+ * Note: calling srpt_close_ch() if the transition to the LIVE state
+ * fails is not necessary since that means that that function has
+ * already been invoked from another thread.
+ */
+ if (!srpt_set_ch_state(ch, CH_LIVE)) {
+ pr_err("%s-%d: channel transition to LIVE state failed\n",
+ ch->sess_name, ch->qp->qp_num);
+ return;
}
+
+ /* Trigger wait list processing. */
+ ret = srpt_zerolength_write(ch);
+ WARN_ONCE(ret < 0, "%d\n", ret);
}
/**
- * srpt_cm_handler() - IB connection manager callback function.
+ * srpt_cm_handler - IB connection manager callback function
+ * @cm_id: IB/CM connection identifier.
+ * @event: IB/CM event.
*
* A non-zero return value will cause the caller destroy the CM ID.
*
@@ -2246,8 +2410,8 @@ static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
ret = 0;
switch (event->event) {
case IB_CM_REQ_RECEIVED:
- ret = srpt_cm_req_recv(cm_id, &event->param.req_rcvd,
- event->private_data);
+ ret = srpt_ib_cm_req_recv(cm_id, &event->param.req_rcvd,
+ event->private_data);
break;
case IB_CM_REJ_RECEIVED:
srpt_cm_rej_recv(ch, event->param.rej_rcvd.reason,
@@ -2294,11 +2458,11 @@ static int srpt_write_pending_status(struct se_cmd *se_cmd)
struct srpt_send_ioctx *ioctx;
ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
- return srpt_get_cmd_state(ioctx) == SRPT_STATE_NEED_DATA;
+ return ioctx->state == SRPT_STATE_NEED_DATA;
}
/*
- * srpt_write_pending() - Start data transfer from initiator to target (write).
+ * srpt_write_pending - Start data transfer from initiator to target (write).
*/
static int srpt_write_pending(struct se_cmd *se_cmd)
{
@@ -2355,7 +2519,8 @@ static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
}
/**
- * srpt_queue_response() - Transmits the response to a SCSI command.
+ * srpt_queue_response - transmit the response to a SCSI command
+ * @cmd: SCSI target command.
*
* Callback function called by the TCM core. Must not block since it can be
* invoked on the context of the IB completion handler.
@@ -2369,13 +2534,11 @@ static void srpt_queue_response(struct se_cmd *cmd)
struct ib_send_wr send_wr, *first_wr = &send_wr, *bad_wr;
struct ib_sge sge;
enum srpt_command_state state;
- unsigned long flags;
int resp_len, ret, i;
u8 srp_tm_status;
BUG_ON(!ch);
- spin_lock_irqsave(&ioctx->spinlock, flags);
state = ioctx->state;
switch (state) {
case SRPT_STATE_NEW:
@@ -2390,7 +2553,6 @@ static void srpt_queue_response(struct se_cmd *cmd)
ch, ioctx->ioctx.index, ioctx->state);
break;
}
- spin_unlock_irqrestore(&ioctx->spinlock, flags);
if (unlikely(WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT)))
return;
@@ -2494,26 +2656,56 @@ static void srpt_refresh_port_work(struct work_struct *work)
srpt_refresh_port(sport);
}
+static bool srpt_ch_list_empty(struct srpt_port *sport)
+{
+ struct srpt_nexus *nexus;
+ bool res = true;
+
+ rcu_read_lock();
+ list_for_each_entry(nexus, &sport->nexus_list, entry)
+ if (!list_empty(&nexus->ch_list))
+ res = false;
+ rcu_read_unlock();
+
+ return res;
+}
+
/**
- * srpt_release_sdev() - Free the channel resources associated with a target.
+ * srpt_release_sport - disable login and wait for associated channels
+ * @sport: SRPT HCA port.
*/
-static int srpt_release_sdev(struct srpt_device *sdev)
+static int srpt_release_sport(struct srpt_port *sport)
{
- int i, res;
+ struct srpt_nexus *nexus, *next_n;
+ struct srpt_rdma_ch *ch;
WARN_ON_ONCE(irqs_disabled());
- BUG_ON(!sdev);
-
- mutex_lock(&sdev->mutex);
- for (i = 0; i < ARRAY_SIZE(sdev->port); i++)
- srpt_set_enabled(&sdev->port[i], false);
- mutex_unlock(&sdev->mutex);
+ mutex_lock(&sport->mutex);
+ srpt_set_enabled(sport, false);
+ mutex_unlock(&sport->mutex);
+
+ while (wait_event_timeout(sport->ch_releaseQ,
+ srpt_ch_list_empty(sport), 5 * HZ) <= 0) {
+ pr_info("%s_%d: waiting for session unregistration ...\n",
+ sport->sdev->device->name, sport->port);
+ rcu_read_lock();
+ list_for_each_entry(nexus, &sport->nexus_list, entry) {
+ list_for_each_entry(ch, &nexus->ch_list, list) {
+ pr_info("%s-%d: state %s\n",
+ ch->sess_name, ch->qp->qp_num,
+ get_ch_state_name(ch->state));
+ }
+ }
+ rcu_read_unlock();
+ }
- res = wait_event_interruptible(sdev->ch_releaseQ,
- list_empty_careful(&sdev->rch_list));
- if (res)
- pr_err("%s: interrupted.\n", __func__);
+ mutex_lock(&sport->mutex);
+ list_for_each_entry_safe(nexus, next_n, &sport->nexus_list, entry) {
+ list_del(&nexus->entry);
+ kfree_rcu(nexus, rcu);
+ }
+ mutex_unlock(&sport->mutex);
return 0;
}
@@ -2600,8 +2792,10 @@ static int srpt_alloc_srq(struct srpt_device *sdev)
sdev->use_srq = true;
sdev->srq = srq;
- for (i = 0; i < sdev->srq_size; ++i)
+ for (i = 0; i < sdev->srq_size; ++i) {
+ INIT_LIST_HEAD(&sdev->ioctx_ring[i]->wait_list);
srpt_post_recv(sdev, NULL, sdev->ioctx_ring[i]);
+ }
return 0;
}
@@ -2623,7 +2817,8 @@ static int srpt_use_srq(struct srpt_device *sdev, bool use_srq)
}
/**
- * srpt_add_one() - Infiniband device addition callback function.
+ * srpt_add_one - InfiniBand device addition callback function
+ * @device: Describes a HCA.
*/
static void srpt_add_one(struct ib_device *device)
{
@@ -2638,9 +2833,7 @@ static void srpt_add_one(struct ib_device *device)
goto err;
sdev->device = device;
- INIT_LIST_HEAD(&sdev->rch_list);
- init_waitqueue_head(&sdev->ch_releaseQ);
- mutex_init(&sdev->mutex);
+ mutex_init(&sdev->sdev_mutex);
sdev->pd = ib_alloc_pd(device, 0);
if (IS_ERR(sdev->pd))
@@ -2681,6 +2874,9 @@ static void srpt_add_one(struct ib_device *device)
for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
sport = &sdev->port[i - 1];
+ INIT_LIST_HEAD(&sport->nexus_list);
+ init_waitqueue_head(&sport->ch_releaseQ);
+ mutex_init(&sport->mutex);
sport->sdev = sdev;
sport->port = i;
sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
@@ -2721,7 +2917,9 @@ err:
}
/**
- * srpt_remove_one() - InfiniBand device removal callback function.
+ * srpt_remove_one - InfiniBand device removal callback function
+ * @device: Describes a HCA.
+ * @client_data: The value passed as the third argument to ib_set_client_data().
*/
static void srpt_remove_one(struct ib_device *device, void *client_data)
{
@@ -2751,7 +2949,9 @@ static void srpt_remove_one(struct ib_device *device, void *client_data)
spin_lock(&srpt_dev_lock);
list_del(&sdev->list);
spin_unlock(&srpt_dev_lock);
- srpt_release_sdev(sdev);
+
+ for (i = 0; i < sdev->device->phys_port_cnt; i++)
+ srpt_release_sport(&sdev->port[i]);
srpt_free_srq(sdev);
@@ -2827,7 +3027,8 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
}
/**
- * srpt_close_session() - Forcibly close a session.
+ * srpt_close_session - forcibly close a session
+ * @se_sess: SCSI target session.
*
* Callback function invoked by the TCM core to clean up sessions associated
* with a node ACL when the user invokes
@@ -2836,15 +3037,13 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
static void srpt_close_session(struct se_session *se_sess)
{
struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
- struct srpt_device *sdev = ch->sport->sdev;
- mutex_lock(&sdev->mutex);
srpt_disconnect_ch_sync(ch);
- mutex_unlock(&sdev->mutex);
}
/**
- * srpt_sess_get_index() - Return the value of scsiAttIntrPortIndex (SCSI-MIB).
+ * srpt_sess_get_index - return the value of scsiAttIntrPortIndex (SCSI-MIB)
+ * @se_sess: SCSI target session.
*
* A quote from RFC 4455 (SCSI-MIB) about this MIB object:
* This object represents an arbitrary integer used to uniquely identify a
@@ -2866,7 +3065,7 @@ static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
struct srpt_send_ioctx *ioctx;
ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
- return srpt_get_cmd_state(ioctx);
+ return ioctx->state;
}
static int srpt_parse_guid(u64 *guid, const char *name)
@@ -2883,7 +3082,7 @@ out:
}
/**
- * srpt_parse_i_port_id() - Parse an initiator port ID.
+ * srpt_parse_i_port_id - parse an initiator port ID
* @name: ASCII representation of a 128-bit initiator port ID.
* @i_port_id: Binary 128-bit port ID.
*/
@@ -3064,18 +3263,24 @@ static ssize_t srpt_tpg_attrib_use_srq_store(struct config_item *item,
if (val != !!val)
return -EINVAL;
- ret = mutex_lock_interruptible(&sdev->mutex);
+ ret = mutex_lock_interruptible(&sdev->sdev_mutex);
if (ret < 0)
return ret;
+ ret = mutex_lock_interruptible(&sport->mutex);
+ if (ret < 0)
+ goto unlock_sdev;
enabled = sport->enabled;
/* Log out all initiator systems before changing 'use_srq'. */
srpt_set_enabled(sport, false);
sport->port_attrib.use_srq = val;
srpt_use_srq(sdev, sport->port_attrib.use_srq);
srpt_set_enabled(sport, enabled);
- mutex_unlock(&sdev->mutex);
+ ret = count;
+ mutex_unlock(&sport->mutex);
+unlock_sdev:
+ mutex_unlock(&sdev->sdev_mutex);
- return count;
+ return ret;
}
CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rdma_size);
@@ -3104,7 +3309,6 @@ static ssize_t srpt_tpg_enable_store(struct config_item *item,
{
struct se_portal_group *se_tpg = to_tpg(item);
struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
- struct srpt_device *sdev = sport->sdev;
unsigned long tmp;
int ret;
@@ -3119,9 +3323,9 @@ static ssize_t srpt_tpg_enable_store(struct config_item *item,
return -EINVAL;
}
- mutex_lock(&sdev->mutex);
+ mutex_lock(&sport->mutex);
srpt_set_enabled(sport, tmp);
- mutex_unlock(&sdev->mutex);
+ mutex_unlock(&sport->mutex);
return count;
}
@@ -3134,8 +3338,10 @@ static struct configfs_attribute *srpt_tpg_attrs[] = {
};
/**
- * configfs callback invoked for
- * mkdir /sys/kernel/config/target/$driver/$port/$tpg
+ * srpt_make_tpg - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port/$tpg
+ * @wwn: Corresponds to $driver/$port.
+ * @group: Not used.
+ * @name: $tpg.
*/
static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
struct config_group *group,
@@ -3157,8 +3363,8 @@ static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
}
/**
- * configfs callback invoked for
- * rmdir /sys/kernel/config/target/$driver/$port/$tpg
+ * srpt_drop_tpg - configfs callback invoked for rmdir /sys/kernel/config/target/$driver/$port/$tpg
+ * @tpg: Target portal group to deregister.
*/
static void srpt_drop_tpg(struct se_portal_group *tpg)
{
@@ -3169,8 +3375,10 @@ static void srpt_drop_tpg(struct se_portal_group *tpg)
}
/**
- * configfs callback invoked for
- * mkdir /sys/kernel/config/target/$driver/$port
+ * srpt_make_tport - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port
+ * @tf: Not used.
+ * @group: Not used.
+ * @name: $port.
*/
static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
struct config_group *group,
@@ -3180,8 +3388,8 @@ static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
}
/**
- * configfs callback invoked for
- * rmdir /sys/kernel/config/target/$driver/$port
+ * srpt_drop_tport - configfs callback invoked for rmdir /sys/kernel/config/target/$driver/$port
+ * @wwn: $port.
*/
static void srpt_drop_tport(struct se_wwn *wwn)
{
@@ -3239,7 +3447,7 @@ static const struct target_core_fabric_ops srpt_template = {
};
/**
- * srpt_init_module() - Kernel module initialization.
+ * srpt_init_module - kernel module initialization
*
* Note: Since ib_register_client() registers callback functions, and since at
* least one of these callback functions (srpt_add_one()) calls target core
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 673387d365a3..4d9199fd00dc 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -54,6 +54,8 @@
*/
#define SRP_SERVICE_NAME_PREFIX "SRP.T10:"
+struct srpt_nexus;
+
enum {
/*
* SRP IOControllerProfile attributes for SRP target ports that have
@@ -114,7 +116,7 @@ enum {
MIN_SRPT_SQ_SIZE = 16,
DEF_SRPT_SQ_SIZE = 4096,
- SRPT_RQ_SIZE = 128,
+ MAX_SRPT_RQ_SIZE = 128,
MIN_SRPT_SRQ_SIZE = 4,
DEFAULT_SRPT_SRQ_SIZE = 4095,
MAX_SRPT_SRQ_SIZE = 65535,
@@ -134,7 +136,7 @@ enum {
};
/**
- * enum srpt_command_state - SCSI command state managed by SRPT.
+ * enum srpt_command_state - SCSI command state managed by SRPT
* @SRPT_STATE_NEW: New command arrived and is being processed.
* @SRPT_STATE_NEED_DATA: Processing a write or bidir command and waiting
* for data arrival.
@@ -158,7 +160,8 @@ enum srpt_command_state {
};
/**
- * struct srpt_ioctx - Shared SRPT I/O context information.
+ * struct srpt_ioctx - shared SRPT I/O context information
+ * @cqe: Completion queue element.
* @buf: Pointer to the buffer.
* @dma: DMA address of the buffer.
* @index: Index of the I/O context in its ioctx_ring array.
@@ -171,7 +174,7 @@ struct srpt_ioctx {
};
/**
- * struct srpt_recv_ioctx - SRPT receive I/O context.
+ * struct srpt_recv_ioctx - SRPT receive I/O context
* @ioctx: See above.
* @wait_list: Node for insertion in srpt_rdma_ch.cmd_wait_list.
*/
@@ -187,13 +190,20 @@ struct srpt_rw_ctx {
};
/**
- * struct srpt_send_ioctx - SRPT send I/O context.
+ * struct srpt_send_ioctx - SRPT send I/O context
* @ioctx: See above.
* @ch: Channel pointer.
- * @spinlock: Protects 'state'.
+ * @s_rw_ctx: @rw_ctxs points here if only a single rw_ctx is needed.
+ * @rw_ctxs: RDMA read/write contexts.
+ * @rdma_cqe: RDMA completion queue element.
+ * @free_list: Node in srpt_rdma_ch.free_list.
* @state: I/O context state.
* @cmd: Target core command data structure.
* @sense_data: SCSI sense data.
+ * @n_rdma: Number of work requests needed to transfer this ioctx.
+ * @n_rw_ctx: Size of rw_ctxs array.
+ * @queue_status_only: Send a SCSI status back to the initiator but no data.
+ * @sense_data: Sense data to be sent to the initiator.
*/
struct srpt_send_ioctx {
struct srpt_ioctx ioctx;
@@ -204,10 +214,8 @@ struct srpt_send_ioctx {
struct ib_cqe rdma_cqe;
struct list_head free_list;
- spinlock_t spinlock;
enum srpt_command_state state;
struct se_cmd cmd;
- struct completion tx_done;
u8 n_rdma;
u8 n_rw_ctx;
bool queue_status_only;
@@ -215,7 +223,7 @@ struct srpt_send_ioctx {
};
/**
- * enum rdma_ch_state - SRP channel state.
+ * enum rdma_ch_state - SRP channel state
* @CH_CONNECTING: QP is in RTR state; waiting for RTU.
* @CH_LIVE: QP is in RTS state.
* @CH_DISCONNECTING: DREQ has been sent and waiting for DREP or DREQ has
@@ -233,17 +241,19 @@ enum rdma_ch_state {
};
/**
- * struct srpt_rdma_ch - RDMA channel.
- * @cm_id: IB CM ID associated with the channel.
+ * struct srpt_rdma_ch - RDMA channel
+ * @nexus: I_T nexus this channel is associated with.
* @qp: IB queue pair used for communicating over this channel.
+ * @cm_id: IB CM ID associated with the channel.
* @cq: IB completion queue for this channel.
+ * @zw_cqe: Zero-length write CQE.
+ * @rcu: RCU head.
+ * @kref: kref for this channel.
* @rq_size: IB receive queue size.
- * @rsp_size IB response message size in bytes.
+ * @max_rsp_size: Maximum size of an RSP response message in bytes.
* @sq_wr_avail: number of work requests available in the send queue.
* @sport: pointer to the information of the HCA port used by this
* channel.
- * @i_port_id: 128-bit initiator port identifier copied from SRP_LOGIN_REQ.
- * @t_port_id: 128-bit target port identifier copied from SRP_LOGIN_REQ.
* @max_ti_iu_len: maximum target-to-initiator information unit length.
* @req_lim: request limit: maximum number of requests that may be sent
* by the initiator without having received a response.
@@ -251,30 +261,34 @@ enum rdma_ch_state {
* @spinlock: Protects free_list and state.
* @free_list: Head of list with free send I/O contexts.
* @state: channel state. See also enum rdma_ch_state.
+ * @processing_wait_list: Whether or not cmd_wait_list is being processed.
* @ioctx_ring: Send ring.
* @ioctx_recv_ring: Receive I/O context ring.
- * @list: Node for insertion in the srpt_device.rch_list list.
+ * @list: Node in srpt_nexus.ch_list.
* @cmd_wait_list: List of SCSI commands that arrived before the RTU event. This
* list contains struct srpt_ioctx elements and is protected
* against concurrent modification by the cm_id spinlock.
+ * @pkey: P_Key of the IB partition for this SRP channel.
* @sess: Session information associated with this SRP channel.
* @sess_name: Session name.
- * @ini_guid: Initiator port GUID.
* @release_work: Allows scheduling of srpt_release_channel().
- * @release_done: Enables waiting for srpt_release_channel() completion.
*/
struct srpt_rdma_ch {
- struct ib_cm_id *cm_id;
+ struct srpt_nexus *nexus;
struct ib_qp *qp;
+ union {
+ struct {
+ struct ib_cm_id *cm_id;
+ } ib_cm;
+ };
struct ib_cq *cq;
struct ib_cqe zw_cqe;
+ struct rcu_head rcu;
struct kref kref;
int rq_size;
- u32 rsp_size;
+ u32 max_rsp_size;
atomic_t sq_wr_avail;
struct srpt_port *sport;
- u8 i_port_id[16];
- u8 t_port_id[16];
int max_ti_iu_len;
atomic_t req_lim;
atomic_t req_lim_delta;
@@ -285,15 +299,31 @@ struct srpt_rdma_ch {
struct srpt_recv_ioctx **ioctx_recv_ring;
struct list_head list;
struct list_head cmd_wait_list;
+ uint16_t pkey;
+ bool processing_wait_list;
struct se_session *sess;
- u8 sess_name[36];
- u8 ini_guid[24];
+ u8 sess_name[24];
struct work_struct release_work;
- struct completion *release_done;
};
/**
- * struct srpt_port_attib - Attributes for SRPT port
+ * struct srpt_nexus - I_T nexus
+ * @rcu: RCU head for this data structure.
+ * @entry: srpt_port.nexus_list list node.
+ * @ch_list: struct srpt_rdma_ch list. Protected by srpt_port.mutex.
+ * @i_port_id: 128-bit initiator port identifier copied from SRP_LOGIN_REQ.
+ * @t_port_id: 128-bit target port identifier copied from SRP_LOGIN_REQ.
+ */
+struct srpt_nexus {
+ struct rcu_head rcu;
+ struct list_head entry;
+ struct list_head ch_list;
+ u8 i_port_id[16];
+ u8 t_port_id[16];
+};
+
+/**
+ * struct srpt_port_attib - attributes for SRPT port
* @srp_max_rdma_size: Maximum size of SRP RDMA transfers for new connections.
* @srp_max_rsp_size: Maximum size of SRP response messages in bytes.
* @srp_sq_size: Shared receive queue (SRQ) size.
@@ -307,7 +337,7 @@ struct srpt_port_attrib {
};
/**
- * struct srpt_port - Information associated by SRPT with a single IB port.
+ * struct srpt_port - information associated by SRPT with a single IB port
* @sdev: backpointer to the HCA information.
* @mad_agent: per-port management datagram processing information.
* @enabled: Whether or not this target port is enabled.
@@ -323,7 +353,10 @@ struct srpt_port_attrib {
* @port_guid_wwn: WWN associated with target port GUID.
* @port_gid_tpg: TPG associated with target port GID.
* @port_gid_wwn: WWN associated with target port GID.
- * @port_acl_list: Head of the list with all node ACLs for this port.
+ * @port_attrib: Port attributes that can be accessed through configfs.
+ * @ch_releaseQ: Enables waiting for removal from nexus_list.
+ * @mutex: Protects nexus_list.
+ * @nexus_list: Nexus list. See also srpt_nexus.entry.
*/
struct srpt_port {
struct srpt_device *sdev;
@@ -341,21 +374,22 @@ struct srpt_port {
struct se_portal_group port_gid_tpg;
struct se_wwn port_gid_wwn;
struct srpt_port_attrib port_attrib;
+ wait_queue_head_t ch_releaseQ;
+ struct mutex mutex;
+ struct list_head nexus_list;
};
/**
- * struct srpt_device - Information associated by SRPT with a single HCA.
+ * struct srpt_device - information associated by SRPT with a single HCA
* @device: Backpointer to the struct ib_device managed by the IB core.
* @pd: IB protection domain.
* @lkey: L_Key (local key) with write access to all local memory.
* @srq: Per-HCA SRQ (shared receive queue).
* @cm_id: Connection identifier.
* @srq_size: SRQ size.
+ * @sdev_mutex: Serializes use_srq changes.
* @use_srq: Whether or not to use SRQ.
* @ioctx_ring: Per-HCA SRQ.
- * @rch_list: Per-device channel list -- see also srpt_rdma_ch.list.
- * @ch_releaseQ: Enables waiting for removal from rch_list.
- * @mutex: Protects rch_list.
* @port: Information about the ports owned by this HCA.
* @event_handler: Per-HCA asynchronous IB event handler.
* @list: Node in srpt_dev_list.
@@ -367,11 +401,9 @@ struct srpt_device {
struct ib_srq *srq;
struct ib_cm_id *cm_id;
int srq_size;
+ struct mutex sdev_mutex;
bool use_srq;
struct srpt_recv_ioctx **ioctx_ring;
- struct list_head rch_list;
- wait_queue_head_t ch_releaseQ;
- struct mutex mutex;
struct srpt_port port[2];
struct ib_event_handler event_handler;
struct list_head list;
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 925571475005..0193dd4f0452 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -635,11 +635,11 @@ static ssize_t evdev_read(struct file *file, char __user *buffer,
}
/* No kernel lock - fine */
-static unsigned int evdev_poll(struct file *file, poll_table *wait)
+static __poll_t evdev_poll(struct file *file, poll_table *wait)
{
struct evdev_client *client = file->private_data;
struct evdev *evdev = client->evdev;
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, &evdev->wait, wait);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index e30642db50d5..0d0b2ab1bb6b 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1048,7 +1048,7 @@ static inline void input_wakeup_procfs_readers(void)
wake_up(&input_devices_poll_wait);
}
-static unsigned int input_proc_devices_poll(struct file *file, poll_table *wait)
+static __poll_t input_proc_devices_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &input_devices_poll_wait, wait);
if (file->f_version != input_devices_state) {
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 7b29a8944039..fe3255572886 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -436,7 +436,7 @@ static ssize_t joydev_read(struct file *file, char __user *buf,
}
/* No kernel lock - fine */
-static unsigned int joydev_poll(struct file *file, poll_table *wait)
+static __poll_t joydev_poll(struct file *file, poll_table *wait)
{
struct joydev_client *client = file->private_data;
struct joydev *joydev = client->joydev;
diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c
index 1c8c56efc995..9c3f7ec3bd3d 100644
--- a/drivers/input/misc/hp_sdc_rtc.c
+++ b/drivers/input/misc/hp_sdc_rtc.c
@@ -408,7 +408,7 @@ static ssize_t hp_sdc_rtc_read(struct file *file, char __user *buf,
return retval;
}
-static unsigned int hp_sdc_rtc_poll(struct file *file, poll_table *wait)
+static __poll_t hp_sdc_rtc_poll(struct file *file, poll_table *wait)
{
unsigned long l;
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 39ddd9a73feb..91df0df15e68 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -694,7 +694,7 @@ static ssize_t uinput_read(struct file *file, char __user *buffer,
return retval;
}
-static unsigned int uinput_poll(struct file *file, poll_table *wait)
+static __poll_t uinput_poll(struct file *file, poll_table *wait)
{
struct uinput_device *udev = file->private_data;
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 2d7f691ec71c..731d84ae5101 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -757,11 +757,11 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
}
/* No kernel lock - fine */
-static unsigned int mousedev_poll(struct file *file, poll_table *wait)
+static __poll_t mousedev_poll(struct file *file, poll_table *wait)
{
struct mousedev_client *client = file->private_data;
struct mousedev *mousedev = client->mousedev;
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, &mousedev->wait, wait);
diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
index 516f9fe77a17..fccf55a380b2 100644
--- a/drivers/input/serio/serio_raw.c
+++ b/drivers/input/serio/serio_raw.c
@@ -239,11 +239,11 @@ out:
return retval;
}
-static unsigned int serio_raw_poll(struct file *file, poll_table *wait)
+static __poll_t serio_raw_poll(struct file *file, poll_table *wait)
{
struct serio_raw_client *client = file->private_data;
struct serio_raw *serio_raw = client->serio_raw;
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, &serio_raw->wait, wait);
diff --git a/drivers/input/serio/userio.c b/drivers/input/serio/userio.c
index df1fd41860ac..a63de06b08bc 100644
--- a/drivers/input/serio/userio.c
+++ b/drivers/input/serio/userio.c
@@ -248,7 +248,7 @@ out:
return error ?: count;
}
-static unsigned int userio_char_poll(struct file *file, poll_table *wait)
+static __poll_t userio_char_poll(struct file *file, poll_table *wait)
{
struct userio_device *userio = file->private_data;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 4a2de34895ec..a1373cf34326 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4808,7 +4808,7 @@ int __init intel_iommu_init(void)
up_write(&dmar_global_lock);
pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
-#ifdef CONFIG_SWIOTLB
+#if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
swiotlb = 0;
#endif
dma_ops = &intel_dma_ops;
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index c70476b34a53..d913aec85109 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -343,4 +343,12 @@ config MESON_IRQ_GPIO
help
Support Meson SoC Family GPIO Interrupt Multiplexer
+config GOLDFISH_PIC
+ bool "Goldfish programmable interrupt controller"
+ depends on MIPS && (GOLDFISH || COMPILE_TEST)
+ select IRQ_DOMAIN
+ help
+ Say yes here to enable Goldfish interrupt controller driver used
+ for Goldfish based virtual platforms.
+
endmenu
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index d2df34a54d38..d27e3e3619e0 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -84,3 +84,4 @@ obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o
obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o
obj-$(CONFIG_ARCH_SYNQUACER) += irq-sni-exiu.o
obj-$(CONFIG_MESON_IRQ_GPIO) += irq-meson-gpio.o
+obj-$(CONFIG_GOLDFISH_PIC) += irq-goldfish-pic.o
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index 667b9e14b032..dfe4a460340b 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -98,13 +98,35 @@ static struct irq_chip bcm2836_arm_irqchip_gpu = {
.irq_unmask = bcm2836_arm_irqchip_unmask_gpu_irq,
};
-static void bcm2836_arm_irqchip_register_irq(int hwirq, struct irq_chip *chip)
-{
- int irq = irq_create_mapping(intc.domain, hwirq);
+static int bcm2836_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct irq_chip *chip;
+
+ switch (hw) {
+ case LOCAL_IRQ_CNTPSIRQ:
+ case LOCAL_IRQ_CNTPNSIRQ:
+ case LOCAL_IRQ_CNTHPIRQ:
+ case LOCAL_IRQ_CNTVIRQ:
+ chip = &bcm2836_arm_irqchip_timer;
+ break;
+ case LOCAL_IRQ_GPU_FAST:
+ chip = &bcm2836_arm_irqchip_gpu;
+ break;
+ case LOCAL_IRQ_PMU_FAST:
+ chip = &bcm2836_arm_irqchip_pmu;
+ break;
+ default:
+ pr_warn_once("Unexpected hw irq: %lu\n", hw);
+ return -EINVAL;
+ }
irq_set_percpu_devid(irq);
- irq_set_chip_and_handler(irq, chip, handle_percpu_devid_irq);
+ irq_domain_set_info(d, irq, hw, chip, d->host_data,
+ handle_percpu_devid_irq, NULL, NULL);
irq_set_status_flags(irq, IRQ_NOAUTOEN);
+
+ return 0;
}
static void
@@ -165,7 +187,8 @@ static int bcm2836_cpu_dying(unsigned int cpu)
#endif
static const struct irq_domain_ops bcm2836_arm_irqchip_intc_ops = {
- .xlate = irq_domain_xlate_onecell
+ .xlate = irq_domain_xlate_onetwocell,
+ .map = bcm2836_map,
};
static void
@@ -218,19 +241,6 @@ static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node,
if (!intc.domain)
panic("%pOF: unable to create IRQ domain\n", node);
- bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTPSIRQ,
- &bcm2836_arm_irqchip_timer);
- bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTPNSIRQ,
- &bcm2836_arm_irqchip_timer);
- bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTHPIRQ,
- &bcm2836_arm_irqchip_timer);
- bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTVIRQ,
- &bcm2836_arm_irqchip_timer);
- bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_GPU_FAST,
- &bcm2836_arm_irqchip_gpu);
- bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_PMU_FAST,
- &bcm2836_arm_irqchip_pmu);
-
bcm2836_arm_irqchip_smp_init();
set_handle_irq(bcm2836_arm_irqchip_handle_irq);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index b56c3e23f0af..a57c0fbbd34a 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1070,31 +1070,6 @@ static int __init gic_validate_dist_version(void __iomem *dist_base)
return 0;
}
-static int get_cpu_number(struct device_node *dn)
-{
- const __be32 *cell;
- u64 hwid;
- int cpu;
-
- cell = of_get_property(dn, "reg", NULL);
- if (!cell)
- return -1;
-
- hwid = of_read_number(cell, of_n_addr_cells(dn));
-
- /*
- * Non affinity bits must be set to 0 in the DT
- */
- if (hwid & ~MPIDR_HWID_BITMASK)
- return -1;
-
- for_each_possible_cpu(cpu)
- if (cpu_logical_map(cpu) == hwid)
- return cpu;
-
- return -1;
-}
-
/* Create all possible partitions at boot time */
static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
{
@@ -1145,8 +1120,8 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
if (WARN_ON(!cpu_node))
continue;
- cpu = get_cpu_number(cpu_node);
- if (WARN_ON(cpu == -1))
+ cpu = of_cpu_node_to_id(cpu_node);
+ if (WARN_ON(cpu < 0))
continue;
pr_cont("%pOF[%d] ", cpu_node, cpu);
@@ -1331,6 +1306,10 @@ gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header,
u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
void __iomem *redist_base;
+ /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */
+ if (!(gicc->flags & ACPI_MADT_ENABLED))
+ return 0;
+
redist_base = ioremap(gicc->gicr_base_address, size);
if (!redist_base)
return -ENOMEM;
@@ -1380,6 +1359,13 @@ static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header,
if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address)
return 0;
+ /*
+ * It's perfectly valid firmware can pass disabled GICC entry, driver
+ * should not treat as errors, skip the entry instead of probe fail.
+ */
+ if (!(gicc->flags & ACPI_MADT_ENABLED))
+ return 0;
+
return -ENODEV;
}
diff --git a/drivers/irqchip/irq-goldfish-pic.c b/drivers/irqchip/irq-goldfish-pic.c
new file mode 100644
index 000000000000..2a92f03c73e4
--- /dev/null
+++ b/drivers/irqchip/irq-goldfish-pic.c
@@ -0,0 +1,139 @@
+/*
+ * Driver for MIPS Goldfish Programmable Interrupt Controller.
+ *
+ * Author: Miodrag Dinic <miodrag.dinic@mips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define GFPIC_NR_IRQS 32
+
+/* 8..39 Cascaded Goldfish PIC interrupts */
+#define GFPIC_IRQ_BASE 8
+
+#define GFPIC_REG_IRQ_PENDING 0x04
+#define GFPIC_REG_IRQ_DISABLE_ALL 0x08
+#define GFPIC_REG_IRQ_DISABLE 0x0c
+#define GFPIC_REG_IRQ_ENABLE 0x10
+
+struct goldfish_pic_data {
+ void __iomem *base;
+ struct irq_domain *irq_domain;
+};
+
+static void goldfish_pic_cascade(struct irq_desc *desc)
+{
+ struct goldfish_pic_data *gfpic = irq_desc_get_handler_data(desc);
+ struct irq_chip *host_chip = irq_desc_get_chip(desc);
+ u32 pending, hwirq, virq;
+
+ chained_irq_enter(host_chip, desc);
+
+ pending = readl(gfpic->base + GFPIC_REG_IRQ_PENDING);
+ while (pending) {
+ hwirq = __fls(pending);
+ virq = irq_linear_revmap(gfpic->irq_domain, hwirq);
+ generic_handle_irq(virq);
+ pending &= ~(1 << hwirq);
+ }
+
+ chained_irq_exit(host_chip, desc);
+}
+
+static const struct irq_domain_ops goldfish_irq_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int __init goldfish_pic_of_init(struct device_node *of_node,
+ struct device_node *parent)
+{
+ struct goldfish_pic_data *gfpic;
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+ unsigned int parent_irq;
+ int ret = 0;
+
+ gfpic = kzalloc(sizeof(*gfpic), GFP_KERNEL);
+ if (!gfpic) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ parent_irq = irq_of_parse_and_map(of_node, 0);
+ if (!parent_irq) {
+ pr_err("Failed to map parent IRQ!\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ gfpic->base = of_iomap(of_node, 0);
+ if (!gfpic->base) {
+ pr_err("Failed to map base address!\n");
+ ret = -ENOMEM;
+ goto out_unmap_irq;
+ }
+
+ /* Mask interrupts. */
+ writel(1, gfpic->base + GFPIC_REG_IRQ_DISABLE_ALL);
+
+ gc = irq_alloc_generic_chip("GFPIC", 1, GFPIC_IRQ_BASE, gfpic->base,
+ handle_level_irq);
+ if (!gc) {
+ pr_err("Failed to allocate chip structures!\n");
+ ret = -ENOMEM;
+ goto out_iounmap;
+ }
+
+ ct = gc->chip_types;
+ ct->regs.enable = GFPIC_REG_IRQ_ENABLE;
+ ct->regs.disable = GFPIC_REG_IRQ_DISABLE;
+ ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
+ ct->chip.irq_mask = irq_gc_mask_disable_reg;
+
+ irq_setup_generic_chip(gc, IRQ_MSK(GFPIC_NR_IRQS), 0,
+ IRQ_NOPROBE | IRQ_LEVEL, 0);
+
+ gfpic->irq_domain = irq_domain_add_legacy(of_node, GFPIC_NR_IRQS,
+ GFPIC_IRQ_BASE, 0,
+ &goldfish_irq_domain_ops,
+ NULL);
+ if (!gfpic->irq_domain) {
+ pr_err("Failed to add irqdomain!\n");
+ ret = -ENOMEM;
+ goto out_destroy_generic_chip;
+ }
+
+ irq_set_chained_handler_and_data(parent_irq,
+ goldfish_pic_cascade, gfpic);
+
+ pr_info("Successfully registered.\n");
+ return 0;
+
+out_destroy_generic_chip:
+ irq_destroy_generic_chip(gc, IRQ_MSK(GFPIC_NR_IRQS),
+ IRQ_NOPROBE | IRQ_LEVEL, 0);
+out_iounmap:
+ iounmap(gfpic->base);
+out_unmap_irq:
+ irq_dispose_mapping(parent_irq);
+out_free:
+ kfree(gfpic);
+out_err:
+ pr_err("Failed to initialize! (errno = %d)\n", ret);
+ return ret;
+}
+
+IRQCHIP_DECLARE(google_gf_pic, "google,goldfish-pic", goldfish_pic_of_init);
diff --git a/drivers/irqchip/irq-ompic.c b/drivers/irqchip/irq-ompic.c
index cf6d0c455518..e66ef4373b1e 100644
--- a/drivers/irqchip/irq-ompic.c
+++ b/drivers/irqchip/irq-ompic.c
@@ -171,9 +171,9 @@ static int __init ompic_of_init(struct device_node *node,
/* Setup the device */
ompic_base = ioremap(res.start, resource_size(&res));
- if (IS_ERR(ompic_base)) {
+ if (!ompic_base) {
pr_err("ompic: unable to map registers");
- return PTR_ERR(ompic_base);
+ return -ENOMEM;
}
irq = irq_of_parse_and_map(node, 0);
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index dde8f46bc254..e268811dc544 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -724,11 +724,11 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
return count;
}
-static unsigned int
+static __poll_t
capi_poll(struct file *file, poll_table *wait)
{
struct capidev *cdev = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
if (!cdev->ap.applid)
return POLLERR;
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c
index 1c5dc345e7c5..34b7704042a4 100644
--- a/drivers/isdn/divert/divert_procfs.c
+++ b/drivers/isdn/divert/divert_procfs.c
@@ -119,10 +119,10 @@ isdn_divert_write(struct file *file, const char __user *buf, size_t count, loff_
/***************************************/
/* select routines for various kernels */
/***************************************/
-static unsigned int
+static __poll_t
isdn_divert_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &(rd_queue), wait);
/* mask = POLLOUT | POLLWRNORM; */
diff --git a/drivers/isdn/hardware/eicon/divamnt.c b/drivers/isdn/hardware/eicon/divamnt.c
index 72e58bf07577..70f16102a001 100644
--- a/drivers/isdn/hardware/eicon/divamnt.c
+++ b/drivers/isdn/hardware/eicon/divamnt.c
@@ -98,9 +98,9 @@ void diva_os_get_time(dword *sec, dword *usec)
/*
* device node operations
*/
-static unsigned int maint_poll(struct file *file, poll_table *wait)
+static __poll_t maint_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &msgwaitq, wait);
mask = POLLOUT | POLLWRNORM;
diff --git a/drivers/isdn/hardware/eicon/divasi.c b/drivers/isdn/hardware/eicon/divasi.c
index 0033d74a7291..da5cc5ab7e2d 100644
--- a/drivers/isdn/hardware/eicon/divasi.c
+++ b/drivers/isdn/hardware/eicon/divasi.c
@@ -74,7 +74,7 @@ static ssize_t um_idi_read(struct file *file, char __user *buf, size_t count,
loff_t *offset);
static ssize_t um_idi_write(struct file *file, const char __user *buf,
size_t count, loff_t *offset);
-static unsigned int um_idi_poll(struct file *file, poll_table *wait);
+static __poll_t um_idi_poll(struct file *file, poll_table *wait);
static int um_idi_open(struct inode *inode, struct file *file);
static int um_idi_release(struct inode *inode, struct file *file);
static int remove_entity(void *entity);
@@ -365,7 +365,7 @@ um_idi_write(struct file *file, const char __user *buf, size_t count,
return (ret);
}
-static unsigned int um_idi_poll(struct file *file, poll_table *wait)
+static __poll_t um_idi_poll(struct file *file, poll_table *wait)
{
diva_um_idi_os_context_t *p_os;
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index b2023e08dcd2..fbc788e6f0db 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -650,7 +650,7 @@ static ssize_t divas_read(struct file *file, char __user *buf,
return (ret);
}
-static unsigned int divas_poll(struct file *file, poll_table *wait)
+static __poll_t divas_poll(struct file *file, poll_table *wait)
{
if (!file->private_data) {
return (POLLERR);
diff --git a/drivers/isdn/hardware/eicon/divasproc.c b/drivers/isdn/hardware/eicon/divasproc.c
index b57efd6ad916..3478f6f099eb 100644
--- a/drivers/isdn/hardware/eicon/divasproc.c
+++ b/drivers/isdn/hardware/eicon/divasproc.c
@@ -99,7 +99,7 @@ divas_write(struct file *file, const char __user *buf, size_t count, loff_t *off
return (-ENODEV);
}
-static unsigned int divas_poll(struct file *file, poll_table *wait)
+static __poll_t divas_poll(struct file *file, poll_table *wait)
{
return (POLLERR);
}
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c
index aaca0b3d662e..6abea6915f49 100644
--- a/drivers/isdn/hysdn/hysdn_proclog.c
+++ b/drivers/isdn/hysdn/hysdn_proclog.c
@@ -281,10 +281,10 @@ hysdn_log_close(struct inode *ino, struct file *filep)
/*************************************************/
/* select/poll routine to be able using select() */
/*************************************************/
-static unsigned int
+static __poll_t
hysdn_log_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
hysdn_card *card = PDE_DATA(file_inode(file));
struct procdata *pd = card->proclog;
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 8b03d618185e..0521c32949d4 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -1227,10 +1227,10 @@ out:
return retval;
}
-static unsigned int
+static __poll_t
isdn_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
unsigned int minor = iminor(file_inode(file));
int drvidx = isdn_minor2drv(minor - ISDN_MINOR_CTRL);
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index e07aefb9151d..57884319b4b1 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -685,10 +685,10 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
return 0;
}
-unsigned int
+__poll_t
isdn_ppp_poll(struct file *file, poll_table *wait)
{
- u_int mask;
+ __poll_t mask;
struct ippp_buf_queue *bf, *bl;
u_long flags;
struct ippp_struct *is;
diff --git a/drivers/isdn/i4l/isdn_ppp.h b/drivers/isdn/i4l/isdn_ppp.h
index 4e9b8935a4eb..34b8a2ce84f3 100644
--- a/drivers/isdn/i4l/isdn_ppp.h
+++ b/drivers/isdn/i4l/isdn_ppp.h
@@ -23,7 +23,7 @@ extern int isdn_ppp_autodial_filter(struct sk_buff *, isdn_net_local *);
extern int isdn_ppp_xmit(struct sk_buff *, struct net_device *);
extern void isdn_ppp_receive(isdn_net_dev *, isdn_net_local *, struct sk_buff *);
extern int isdn_ppp_dev_ioctl(struct net_device *, struct ifreq *, int);
-extern unsigned int isdn_ppp_poll(struct file *, struct poll_table_struct *);
+extern __poll_t isdn_ppp_poll(struct file *, struct poll_table_struct *);
extern int isdn_ppp_ioctl(int, struct file *, unsigned int, unsigned long);
extern void isdn_ppp_release(int, struct file *);
extern int isdn_ppp_dial_slave(char *);
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index e3654782a3e2..21d50e4cc5e1 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -645,8 +645,10 @@ l1oip_socket_thread(void *data)
{
struct l1oip *hc = (struct l1oip *)data;
int ret = 0;
- struct msghdr msg;
struct sockaddr_in sin_rx;
+ struct kvec iov;
+ struct msghdr msg = {.msg_name = &sin_rx,
+ .msg_namelen = sizeof(sin_rx)};
unsigned char *recvbuf;
size_t recvbuf_size = 1500;
int recvlen;
@@ -661,6 +663,9 @@ l1oip_socket_thread(void *data)
goto fail;
}
+ iov.iov_base = recvbuf;
+ iov.iov_len = recvbuf_size;
+
/* make daemon */
allow_signal(SIGTERM);
@@ -697,12 +702,6 @@ l1oip_socket_thread(void *data)
goto fail;
}
- /* build receive message */
- msg.msg_name = &sin_rx;
- msg.msg_namelen = sizeof(sin_rx);
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
-
/* build send message */
hc->sendmsg.msg_name = &hc->sin_remote;
hc->sendmsg.msg_namelen = sizeof(hc->sin_remote);
@@ -719,12 +718,9 @@ l1oip_socket_thread(void *data)
printk(KERN_DEBUG "%s: socket created and open\n",
__func__);
while (!signal_pending(current)) {
- struct kvec iov = {
- .iov_base = recvbuf,
- .iov_len = recvbuf_size,
- };
- recvlen = kernel_recvmsg(socket, &msg, &iov, 1,
- recvbuf_size, 0);
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1,
+ recvbuf_size);
+ recvlen = sock_recvmsg(socket, &msg, 0);
if (recvlen > 0) {
l1oip_socket_parse(hc, &sin_rx, recvbuf, recvlen);
} else {
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
index c50a34340f67..f4272d4e0a26 100644
--- a/drivers/isdn/mISDN/timerdev.c
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -141,11 +141,11 @@ mISDN_read(struct file *filep, char __user *buf, size_t count, loff_t *off)
return ret;
}
-static unsigned int
+static __poll_t
mISDN_poll(struct file *filep, poll_table *wait)
{
struct mISDNtimerdev *dev = filep->private_data;
- unsigned int mask = POLLERR;
+ __poll_t mask = POLLERR;
if (*debug & DEBUG_TIMER)
printk(KERN_DEBUG "%s(%p, %p)\n", __func__, filep, wait);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 318a28fd58fe..3e763d2a0cb3 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -137,6 +137,13 @@ config LEDS_LM3642
converter plus 1.5A constant current driver for a high-current
white LED.
+config LEDS_LM3692X
+ tristate "LED support for LM3692x Chips"
+ depends on LEDS_CLASS && I2C && OF
+ select REGMAP_I2C
+ help
+ This option enables support for the TI LM3692x family
+ of white LED string drivers used for backlighting.
config LEDS_LOCOMO
tristate "LED Support for Locomo device"
@@ -347,7 +354,7 @@ config LEDS_LP8788
config LEDS_LP8860
tristate "LED support for the TI LP8860 4 channel LED driver"
- depends on LEDS_CLASS && I2C
+ depends on LEDS_CLASS && I2C && OF
select REGMAP_I2C
help
If you say yes here you get support for the TI LP8860 4 channel
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index a2a6b5a4f86d..987884a5b9a5 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -74,6 +74,7 @@ obj-$(CONFIG_LEDS_PM8058) += leds-pm8058.o
obj-$(CONFIG_LEDS_MLXCPLD) += leds-mlxcpld.o
obj-$(CONFIG_LEDS_NIC78BX) += leds-nic78bx.o
obj-$(CONFIG_LEDS_MT6323) += leds-mt6323.o
+obj-$(CONFIG_LEDS_LM3692X) += leds-lm3692x.o
# LED SPI Drivers
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
diff --git a/drivers/leds/leds-as3645a.c b/drivers/leds/leds-as3645a.c
index 9a257f969300..f883616d9e60 100644
--- a/drivers/leds/leds-as3645a.c
+++ b/drivers/leds/leds-as3645a.c
@@ -360,7 +360,8 @@ static int as3645a_set_flash_brightness(struct led_classdev_flash *fled,
{
struct as3645a *flash = fled_to_as3645a(fled);
- flash->flash_current = as3645a_current_to_reg(flash, true, brightness_ua);
+ flash->flash_current = as3645a_current_to_reg(flash, true,
+ brightness_ua);
return as3645a_set_current(flash);
}
@@ -455,8 +456,8 @@ static int as3645a_detect(struct as3645a *flash)
/* Verify the chip model and version. */
if (model != 0x01 || rfu != 0x00) {
- dev_err(dev, "AS3645A not detected "
- "(model %d rfu %d)\n", model, rfu);
+ dev_err(dev, "AS3645A not detected (model %d rfu %d)\n",
+ model, rfu);
return -ENODEV;
}
diff --git a/drivers/leds/leds-blinkm.c b/drivers/leds/leds-blinkm.c
index d03ed6b4176b..851c1920b63c 100644
--- a/drivers/leds/leds-blinkm.c
+++ b/drivers/leds/leds-blinkm.c
@@ -549,8 +549,12 @@ static int blinkm_detect(struct i2c_client *client, struct i2c_board_info *info)
/* make sure the blinkM is balanced (read/writes) */
while (count > 0) {
ret = blinkm_write(client, BLM_GET_ADDR, NULL);
+ if (ret)
+ return ret;
usleep_range(5000, 10000);
ret = blinkm_read(client, BLM_GET_ADDR, tmpargs);
+ if (ret)
+ return ret;
usleep_range(5000, 10000);
if (tmpargs[0] == 0x09)
count = 0;
diff --git a/drivers/leds/leds-lm3692x.c b/drivers/leds/leds-lm3692x.c
new file mode 100644
index 000000000000..437173d1712c
--- /dev/null
+++ b/drivers/leds/leds-lm3692x.c
@@ -0,0 +1,393 @@
+/*
+ * TI lm3692x LED Driver
+ *
+ * Copyright (C) 2017 Texas Instruments
+ *
+ * Author: Dan Murphy <dmurphy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * Data sheet is located
+ * http://www.ti.com/lit/ds/snvsa29/snvsa29.pdf
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <uapi/linux/uleds.h>
+
+#define LM3692X_REV 0x0
+#define LM3692X_RESET 0x1
+#define LM3692X_EN 0x10
+#define LM3692X_BRT_CTRL 0x11
+#define LM3692X_PWM_CTRL 0x12
+#define LM3692X_BOOST_CTRL 0x13
+#define LM3692X_AUTO_FREQ_HI 0x15
+#define LM3692X_AUTO_FREQ_LO 0x16
+#define LM3692X_BL_ADJ_THRESH 0x17
+#define LM3692X_BRT_LSB 0x18
+#define LM3692X_BRT_MSB 0x19
+#define LM3692X_FAULT_CTRL 0x1e
+#define LM3692X_FAULT_FLAGS 0x1f
+
+#define LM3692X_SW_RESET BIT(0)
+#define LM3692X_DEVICE_EN BIT(0)
+#define LM3692X_LED1_EN BIT(1)
+#define LM3692X_LED2_EN BIT(2)
+
+/* Brightness Control Bits */
+#define LM3692X_BL_ADJ_POL BIT(0)
+#define LM3692X_RAMP_RATE_125us 0x00
+#define LM3692X_RAMP_RATE_250us BIT(1)
+#define LM3692X_RAMP_RATE_500us BIT(2)
+#define LM3692X_RAMP_RATE_1ms (BIT(1) | BIT(2))
+#define LM3692X_RAMP_RATE_2ms BIT(3)
+#define LM3692X_RAMP_RATE_4ms (BIT(3) | BIT(1))
+#define LM3692X_RAMP_RATE_8ms (BIT(2) | BIT(3))
+#define LM3692X_RAMP_RATE_16ms (BIT(1) | BIT(2) | BIT(3))
+#define LM3692X_RAMP_EN BIT(4)
+#define LM3692X_BRHT_MODE_REG 0x00
+#define LM3692X_BRHT_MODE_PWM BIT(5)
+#define LM3692X_BRHT_MODE_MULTI_RAMP BIT(6)
+#define LM3692X_BRHT_MODE_RAMP_MULTI (BIT(5) | BIT(6))
+#define LM3692X_MAP_MODE_EXP BIT(7)
+
+/* PWM Register Bits */
+#define LM3692X_PWM_FILTER_100 BIT(0)
+#define LM3692X_PWM_FILTER_150 BIT(1)
+#define LM3692X_PWM_FILTER_200 (BIT(0) | BIT(1))
+#define LM3692X_PWM_HYSTER_1LSB BIT(2)
+#define LM3692X_PWM_HYSTER_2LSB BIT(3)
+#define LM3692X_PWM_HYSTER_3LSB (BIT(3) | BIT(2))
+#define LM3692X_PWM_HYSTER_4LSB BIT(4)
+#define LM3692X_PWM_HYSTER_5LSB (BIT(4) | BIT(2))
+#define LM3692X_PWM_HYSTER_6LSB (BIT(4) | BIT(3))
+#define LM3692X_PWM_POLARITY BIT(5)
+#define LM3692X_PWM_SAMP_4MHZ BIT(6)
+#define LM3692X_PWM_SAMP_24MHZ BIT(7)
+
+/* Boost Control Bits */
+#define LM3692X_OCP_PROT_1A BIT(0)
+#define LM3692X_OCP_PROT_1_25A BIT(1)
+#define LM3692X_OCP_PROT_1_5A (BIT(0) | BIT(1))
+#define LM3692X_OVP_21V BIT(2)
+#define LM3692X_OVP_25V BIT(3)
+#define LM3692X_OVP_29V (BIT(2) | BIT(3))
+#define LM3692X_MIN_IND_22UH BIT(4)
+#define LM3692X_BOOST_SW_1MHZ BIT(5)
+#define LM3692X_BOOST_SW_NO_SHIFT BIT(6)
+
+/* Fault Control Bits */
+#define LM3692X_FAULT_CTRL_OVP BIT(0)
+#define LM3692X_FAULT_CTRL_OCP BIT(1)
+#define LM3692X_FAULT_CTRL_TSD BIT(2)
+#define LM3692X_FAULT_CTRL_OPEN BIT(3)
+
+/* Fault Flag Bits */
+#define LM3692X_FAULT_FLAG_OVP BIT(0)
+#define LM3692X_FAULT_FLAG_OCP BIT(1)
+#define LM3692X_FAULT_FLAG_TSD BIT(2)
+#define LM3692X_FAULT_FLAG_SHRT BIT(3)
+#define LM3692X_FAULT_FLAG_OPEN BIT(4)
+
+/**
+ * struct lm3692x_led -
+ * @lock - Lock for reading/writing the device
+ * @client - Pointer to the I2C client
+ * @led_dev - LED class device pointer
+ * @regmap - Devices register map
+ * @enable_gpio - VDDIO/EN gpio to enable communication interface
+ * @regulator - LED supply regulator pointer
+ * @label - LED label
+ */
+struct lm3692x_led {
+ struct mutex lock;
+ struct i2c_client *client;
+ struct led_classdev led_dev;
+ struct regmap *regmap;
+ struct gpio_desc *enable_gpio;
+ struct regulator *regulator;
+ char label[LED_MAX_NAME_SIZE];
+};
+
+static const struct reg_default lm3692x_reg_defs[] = {
+ {LM3692X_EN, 0xf},
+ {LM3692X_BRT_CTRL, 0x61},
+ {LM3692X_PWM_CTRL, 0x73},
+ {LM3692X_BOOST_CTRL, 0x6f},
+ {LM3692X_AUTO_FREQ_HI, 0x0},
+ {LM3692X_AUTO_FREQ_LO, 0x0},
+ {LM3692X_BL_ADJ_THRESH, 0x0},
+ {LM3692X_BRT_LSB, 0x7},
+ {LM3692X_BRT_MSB, 0xff},
+ {LM3692X_FAULT_CTRL, 0x7},
+};
+
+static const struct regmap_config lm3692x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = LM3692X_FAULT_FLAGS,
+ .reg_defaults = lm3692x_reg_defs,
+ .num_reg_defaults = ARRAY_SIZE(lm3692x_reg_defs),
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int lm3692x_fault_check(struct lm3692x_led *led)
+{
+ int ret;
+ unsigned int read_buf;
+
+ ret = regmap_read(led->regmap, LM3692X_FAULT_FLAGS, &read_buf);
+ if (ret)
+ return ret;
+
+ if (read_buf)
+ dev_err(&led->client->dev, "Detected a fault 0x%X\n", read_buf);
+
+ /* The first read may clear the fault. Check again to see if the fault
+ * still exits and return that value.
+ */
+ regmap_read(led->regmap, LM3692X_FAULT_FLAGS, &read_buf);
+ if (read_buf)
+ dev_err(&led->client->dev, "Second read of fault flags 0x%X\n",
+ read_buf);
+
+ return read_buf;
+}
+
+static int lm3692x_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brt_val)
+{
+ struct lm3692x_led *led =
+ container_of(led_cdev, struct lm3692x_led, led_dev);
+ int ret;
+ int led_brightness_lsb = (brt_val >> 5);
+
+ mutex_lock(&led->lock);
+
+ ret = lm3692x_fault_check(led);
+ if (ret) {
+ dev_err(&led->client->dev, "Cannot read/clear faults\n");
+ goto out;
+ }
+
+ ret = regmap_write(led->regmap, LM3692X_BRT_MSB, brt_val);
+ if (ret) {
+ dev_err(&led->client->dev, "Cannot write MSB\n");
+ goto out;
+ }
+
+ ret = regmap_write(led->regmap, LM3692X_BRT_LSB, led_brightness_lsb);
+ if (ret) {
+ dev_err(&led->client->dev, "Cannot write LSB\n");
+ goto out;
+ }
+out:
+ mutex_unlock(&led->lock);
+ return ret;
+}
+
+static int lm3692x_init(struct lm3692x_led *led)
+{
+ int ret;
+
+ if (led->regulator) {
+ ret = regulator_enable(led->regulator);
+ if (ret) {
+ dev_err(&led->client->dev,
+ "Failed to enable regulator\n");
+ return ret;
+ }
+ }
+
+ if (led->enable_gpio)
+ gpiod_direction_output(led->enable_gpio, 1);
+
+ ret = lm3692x_fault_check(led);
+ if (ret) {
+ dev_err(&led->client->dev, "Cannot read/clear faults\n");
+ goto out;
+ }
+
+ ret = regmap_write(led->regmap, LM3692X_BRT_CTRL, 0x00);
+ if (ret)
+ goto out;
+
+ /*
+ * For glitch free operation, the following data should
+ * only be written while device enable bit is 0
+ * per Section 7.5.14 of the data sheet
+ */
+ ret = regmap_write(led->regmap, LM3692X_PWM_CTRL,
+ LM3692X_PWM_FILTER_100 | LM3692X_PWM_SAMP_24MHZ);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(led->regmap, LM3692X_BOOST_CTRL,
+ LM3692X_BRHT_MODE_RAMP_MULTI |
+ LM3692X_BL_ADJ_POL |
+ LM3692X_RAMP_RATE_250us);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(led->regmap, LM3692X_AUTO_FREQ_HI, 0x00);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(led->regmap, LM3692X_AUTO_FREQ_LO, 0x00);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(led->regmap, LM3692X_BL_ADJ_THRESH, 0x00);
+ if (ret)
+ goto out;
+
+ ret = regmap_write(led->regmap, LM3692X_BRT_CTRL,
+ LM3692X_BL_ADJ_POL | LM3692X_PWM_HYSTER_4LSB);
+ if (ret)
+ goto out;
+
+ return ret;
+out:
+ dev_err(&led->client->dev, "Fail writing initialization values\n");
+
+ if (led->enable_gpio)
+ gpiod_direction_output(led->enable_gpio, 0);
+
+ if (led->regulator) {
+ ret = regulator_disable(led->regulator);
+ if (ret)
+ dev_err(&led->client->dev,
+ "Failed to disable regulator\n");
+ }
+
+ return ret;
+}
+
+static int lm3692x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct lm3692x_led *led;
+ struct device_node *np = client->dev.of_node;
+ struct device_node *child_node;
+ const char *name;
+
+ led = devm_kzalloc(&client->dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ for_each_available_child_of_node(np, child_node) {
+ led->led_dev.default_trigger = of_get_property(child_node,
+ "linux,default-trigger",
+ NULL);
+
+ ret = of_property_read_string(child_node, "label", &name);
+ if (!ret)
+ snprintf(led->label, sizeof(led->label),
+ "%s:%s", id->name, name);
+ else
+ snprintf(led->label, sizeof(led->label),
+ "%s::backlight_cluster", id->name);
+ };
+
+ led->enable_gpio = devm_gpiod_get_optional(&client->dev,
+ "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(led->enable_gpio)) {
+ ret = PTR_ERR(led->enable_gpio);
+ dev_err(&client->dev, "Failed to get enable gpio: %d\n", ret);
+ return ret;
+ }
+
+ led->regulator = devm_regulator_get(&client->dev, "vled");
+ if (IS_ERR(led->regulator))
+ led->regulator = NULL;
+
+ led->client = client;
+ led->led_dev.name = led->label;
+ led->led_dev.brightness_set_blocking = lm3692x_brightness_set;
+
+ mutex_init(&led->lock);
+
+ i2c_set_clientdata(client, led);
+
+ led->regmap = devm_regmap_init_i2c(client, &lm3692x_regmap_config);
+ if (IS_ERR(led->regmap)) {
+ ret = PTR_ERR(led->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = lm3692x_init(led);
+ if (ret)
+ return ret;
+
+ ret = devm_led_classdev_register(&client->dev, &led->led_dev);
+ if (ret) {
+ dev_err(&client->dev, "led register err: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lm3692x_remove(struct i2c_client *client)
+{
+ struct lm3692x_led *led = i2c_get_clientdata(client);
+ int ret;
+
+ if (led->enable_gpio)
+ gpiod_direction_output(led->enable_gpio, 0);
+
+ if (led->regulator) {
+ ret = regulator_disable(led->regulator);
+ if (ret)
+ dev_err(&led->client->dev,
+ "Failed to disable regulator\n");
+ }
+
+ mutex_destroy(&led->lock);
+
+ return 0;
+}
+
+static const struct i2c_device_id lm3692x_id[] = {
+ { "lm36922", 0 },
+ { "lm36923", 1 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, lm3692x_id);
+
+static const struct of_device_id of_lm3692x_leds_match[] = {
+ { .compatible = "ti,lm36922", },
+ { .compatible = "ti,lm36923", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_lm3692x_leds_match);
+
+static struct i2c_driver lm3692x_driver = {
+ .driver = {
+ .name = "lm3692x",
+ .of_match_table = of_lm3692x_leds_match,
+ },
+ .probe = lm3692x_probe,
+ .remove = lm3692x_remove,
+ .id_table = lm3692x_id,
+};
+module_i2c_driver(lm3692x_driver);
+
+MODULE_DESCRIPTION("Texas Instruments LM3692X LED driver");
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-lp8860.c b/drivers/leds/leds-lp8860.c
index 3e70775a2d54..39c72a908f3b 100644
--- a/drivers/leds/leds-lp8860.c
+++ b/drivers/leds/leds-lp8860.c
@@ -22,6 +22,7 @@
#include <linux/of_gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/slab.h>
+#include <uapi/linux/uleds.h>
#define LP8860_DISP_CL1_BRT_MSB 0x00
#define LP8860_DISP_CL1_BRT_LSB 0x01
@@ -86,8 +87,6 @@
#define LP8860_CLEAR_FAULTS 0x01
-#define LP8860_DISP_LED_NAME "display_cluster"
-
/**
* struct lp8860_led -
* @lock - Lock for reading/writing the device
@@ -98,7 +97,7 @@
* @enable_gpio - VDDIO/EN gpio to enable communication interface
* @regulator - LED supply regulator pointer
* @label - LED label
-**/
+ */
struct lp8860_led {
struct mutex lock;
struct i2c_client *client;
@@ -107,7 +106,7 @@ struct lp8860_led {
struct regmap *eeprom_regmap;
struct gpio_desc *enable_gpio;
struct regulator *regulator;
- const char *label;
+ char label[LED_MAX_NAME_SIZE];
};
struct lp8860_eeprom_reg {
@@ -247,6 +246,15 @@ static int lp8860_init(struct lp8860_led *led)
unsigned int read_buf;
int ret, i, reg_count;
+ if (led->regulator) {
+ ret = regulator_enable(led->regulator);
+ if (ret) {
+ dev_err(&led->client->dev,
+ "Failed to enable regulator\n");
+ return ret;
+ }
+ }
+
if (led->enable_gpio)
gpiod_direction_output(led->enable_gpio, 1);
@@ -282,12 +290,25 @@ static int lp8860_init(struct lp8860_led *led)
ret = regmap_write(led->regmap,
LP8860_EEPROM_CNTRL,
LP8860_PROGRAM_EEPROM);
- if (ret)
+ if (ret) {
dev_err(&led->client->dev, "Failed programming EEPROM\n");
+ goto out;
+ }
+
+ return ret;
+
out:
if (ret)
if (led->enable_gpio)
gpiod_direction_output(led->enable_gpio, 0);
+
+ if (led->regulator) {
+ ret = regulator_disable(led->regulator);
+ if (ret)
+ dev_err(&led->client->dev,
+ "Failed to disable regulator\n");
+ }
+
return ret;
}
@@ -365,19 +386,25 @@ static int lp8860_probe(struct i2c_client *client,
int ret;
struct lp8860_led *led;
struct device_node *np = client->dev.of_node;
+ struct device_node *child_node;
+ const char *name;
led = devm_kzalloc(&client->dev, sizeof(*led), GFP_KERNEL);
if (!led)
return -ENOMEM;
- led->label = LP8860_DISP_LED_NAME;
-
- if (client->dev.of_node) {
- ret = of_property_read_string(np, "label", &led->label);
- if (ret) {
- dev_err(&client->dev, "Missing label in dt\n");
- return -EINVAL;
- }
+ for_each_available_child_of_node(np, child_node) {
+ led->led_dev.default_trigger = of_get_property(child_node,
+ "linux,default-trigger",
+ NULL);
+
+ ret = of_property_read_string(child_node, "label", &name);
+ if (!ret)
+ snprintf(led->label, sizeof(led->label), "%s:%s",
+ id->name, name);
+ else
+ snprintf(led->label, sizeof(led->label),
+ "%s::display_cluster", id->name);
}
led->enable_gpio = devm_gpiod_get_optional(&client->dev,
@@ -394,7 +421,6 @@ static int lp8860_probe(struct i2c_client *client,
led->client = client;
led->led_dev.name = led->label;
- led->led_dev.max_brightness = LED_FULL;
led->led_dev.brightness_set_blocking = lp8860_brightness_set;
mutex_init(&led->lock);
@@ -421,7 +447,7 @@ static int lp8860_probe(struct i2c_client *client,
if (ret)
return ret;
- ret = led_classdev_register(&client->dev, &led->led_dev);
+ ret = devm_led_classdev_register(&client->dev, &led->led_dev);
if (ret) {
dev_err(&client->dev, "led register err: %d\n", ret);
return ret;
@@ -435,8 +461,6 @@ static int lp8860_remove(struct i2c_client *client)
struct lp8860_led *led = i2c_get_clientdata(client);
int ret;
- led_classdev_unregister(&led->led_dev);
-
if (led->enable_gpio)
gpiod_direction_output(led->enable_gpio, 0);
@@ -447,6 +471,8 @@ static int lp8860_remove(struct i2c_client *client)
"Failed to disable regulator\n");
}
+ mutex_destroy(&led->lock);
+
return 0;
}
@@ -456,18 +482,16 @@ static const struct i2c_device_id lp8860_id[] = {
};
MODULE_DEVICE_TABLE(i2c, lp8860_id);
-#ifdef CONFIG_OF
static const struct of_device_id of_lp8860_leds_match[] = {
{ .compatible = "ti,lp8860", },
{},
};
MODULE_DEVICE_TABLE(of, of_lp8860_leds_match);
-#endif
static struct i2c_driver lp8860_driver = {
.driver = {
.name = "lp8860",
- .of_match_table = of_match_ptr(of_lp8860_leds_match),
+ .of_match_table = of_lp8860_leds_match,
},
.probe = lp8860_probe,
.remove = lp8860_remove,
@@ -477,4 +501,4 @@ module_i2c_driver(lp8860_driver);
MODULE_DESCRIPTION("Texas Instruments LP8860 LED driver");
MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-pm8058.c b/drivers/leds/leds-pm8058.c
index a52674327857..8988ba3b2d65 100644
--- a/drivers/leds/leds-pm8058.c
+++ b/drivers/leds/leds-pm8058.c
@@ -106,7 +106,7 @@ static int pm8058_led_probe(struct platform_device *pdev)
if (!led)
return -ENOMEM;
- led->ledtype = (u32)of_device_get_match_data(&pdev->dev);
+ led->ledtype = (u32)(unsigned long)of_device_get_match_data(&pdev->dev);
map = dev_get_regmap(pdev->dev.parent, NULL);
if (!map) {
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 8d456dc6c5bf..df80c89ebe7f 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/of_platform.h>
-#include <linux/fb.h>
#include <linux/leds.h>
#include <linux/err.h>
#include <linux/pwm.h>
diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
index bb090216b4dc..a2559b4fdfff 100644
--- a/drivers/leds/trigger/Kconfig
+++ b/drivers/leds/trigger/Kconfig
@@ -81,7 +81,7 @@ config LEDS_TRIGGER_ACTIVITY
tristate "LED activity Trigger"
depends on LEDS_TRIGGERS
help
- This allows LEDs to be controlled by a immediate CPU usage.
+ This allows LEDs to be controlled by an immediate CPU usage.
The flash frequency and duty cycle varies from faint flashes to
intense brightness depending on the instant CPU load.
If unsure, say N.
@@ -135,4 +135,11 @@ config LEDS_TRIGGER_PANIC
a different trigger.
If unsure, say Y.
+config LEDS_TRIGGER_NETDEV
+ tristate "LED Netdev Trigger"
+ depends on NET && LEDS_TRIGGERS
+ help
+ This allows LEDs to be controlled by network device activity.
+ If unsure, say Y.
+
endif # LEDS_TRIGGERS
diff --git a/drivers/leds/trigger/Makefile b/drivers/leds/trigger/Makefile
index 4a8b6cff7761..f3cfe1950538 100644
--- a/drivers/leds/trigger/Makefile
+++ b/drivers/leds/trigger/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o
obj-$(CONFIG_LEDS_TRIGGER_TRANSIENT) += ledtrig-transient.o
obj-$(CONFIG_LEDS_TRIGGER_CAMERA) += ledtrig-camera.o
obj-$(CONFIG_LEDS_TRIGGER_PANIC) += ledtrig-panic.o
+obj-$(CONFIG_LEDS_TRIGGER_NETDEV) += ledtrig-netdev.o
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
new file mode 100644
index 000000000000..6df4781a6308
--- /dev/null
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -0,0 +1,496 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright 2017 Ben Whitten <ben.whitten@gmail.com>
+// Copyright 2007 Oliver Jowett <oliver@opencloud.com>
+//
+// LED Kernel Netdev Trigger
+//
+// Toggles the LED to reflect the link and traffic state of a named net device
+//
+// Derived from ledtrig-timer.c which is:
+// Copyright 2005-2006 Openedhand Ltd.
+// Author: Richard Purdie <rpurdie@openedhand.com>
+
+#include <linux/atomic.h>
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include "../leds.h"
+
+/*
+ * Configurable sysfs attributes:
+ *
+ * device_name - network device name to monitor
+ * interval - duration of LED blink, in milliseconds
+ * link - LED's normal state reflects whether the link is up
+ * (has carrier) or not
+ * tx - LED blinks on transmitted data
+ * rx - LED blinks on receive data
+ *
+ */
+
+struct led_netdev_data {
+ spinlock_t lock;
+
+ struct delayed_work work;
+ struct notifier_block notifier;
+
+ struct led_classdev *led_cdev;
+ struct net_device *net_dev;
+
+ char device_name[IFNAMSIZ];
+ atomic_t interval;
+ unsigned int last_activity;
+
+ unsigned long mode;
+#define NETDEV_LED_LINK 0
+#define NETDEV_LED_TX 1
+#define NETDEV_LED_RX 2
+#define NETDEV_LED_MODE_LINKUP 3
+};
+
+enum netdev_led_attr {
+ NETDEV_ATTR_LINK,
+ NETDEV_ATTR_TX,
+ NETDEV_ATTR_RX
+};
+
+static void set_baseline_state(struct led_netdev_data *trigger_data)
+{
+ int current_brightness;
+ struct led_classdev *led_cdev = trigger_data->led_cdev;
+
+ current_brightness = led_cdev->brightness;
+ if (current_brightness)
+ led_cdev->blink_brightness = current_brightness;
+ if (!led_cdev->blink_brightness)
+ led_cdev->blink_brightness = led_cdev->max_brightness;
+
+ if (!test_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode))
+ led_set_brightness(led_cdev, LED_OFF);
+ else {
+ if (test_bit(NETDEV_LED_LINK, &trigger_data->mode))
+ led_set_brightness(led_cdev,
+ led_cdev->blink_brightness);
+ else
+ led_set_brightness(led_cdev, LED_OFF);
+
+ /* If we are looking for RX/TX start periodically
+ * checking stats
+ */
+ if (test_bit(NETDEV_LED_TX, &trigger_data->mode) ||
+ test_bit(NETDEV_LED_RX, &trigger_data->mode))
+ schedule_delayed_work(&trigger_data->work, 0);
+ }
+}
+
+static ssize_t device_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ ssize_t len;
+
+ spin_lock_bh(&trigger_data->lock);
+ len = sprintf(buf, "%s\n", trigger_data->device_name);
+ spin_unlock_bh(&trigger_data->lock);
+
+ return len;
+}
+
+static ssize_t device_name_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+
+ if (size >= IFNAMSIZ)
+ return -EINVAL;
+
+ cancel_delayed_work_sync(&trigger_data->work);
+
+ spin_lock_bh(&trigger_data->lock);
+
+ if (trigger_data->net_dev) {
+ dev_put(trigger_data->net_dev);
+ trigger_data->net_dev = NULL;
+ }
+
+ strncpy(trigger_data->device_name, buf, size);
+ if (size > 0 && trigger_data->device_name[size - 1] == '\n')
+ trigger_data->device_name[size - 1] = 0;
+
+ if (trigger_data->device_name[0] != 0)
+ trigger_data->net_dev =
+ dev_get_by_name(&init_net, trigger_data->device_name);
+
+ clear_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode);
+ if (trigger_data->net_dev != NULL)
+ if (netif_carrier_ok(trigger_data->net_dev))
+ set_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode);
+
+ trigger_data->last_activity = 0;
+
+ set_baseline_state(trigger_data);
+ spin_unlock_bh(&trigger_data->lock);
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(device_name);
+
+static ssize_t netdev_led_attr_show(struct device *dev, char *buf,
+ enum netdev_led_attr attr)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ int bit;
+
+ switch (attr) {
+ case NETDEV_ATTR_LINK:
+ bit = NETDEV_LED_LINK;
+ break;
+ case NETDEV_ATTR_TX:
+ bit = NETDEV_LED_TX;
+ break;
+ case NETDEV_ATTR_RX:
+ bit = NETDEV_LED_RX;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sprintf(buf, "%u\n", test_bit(bit, &trigger_data->mode));
+}
+
+static ssize_t netdev_led_attr_store(struct device *dev, const char *buf,
+ size_t size, enum netdev_led_attr attr)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ unsigned long state;
+ int ret;
+ int bit;
+
+ ret = kstrtoul(buf, 0, &state);
+ if (ret)
+ return ret;
+
+ switch (attr) {
+ case NETDEV_ATTR_LINK:
+ bit = NETDEV_LED_LINK;
+ break;
+ case NETDEV_ATTR_TX:
+ bit = NETDEV_LED_TX;
+ break;
+ case NETDEV_ATTR_RX:
+ bit = NETDEV_LED_RX;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cancel_delayed_work_sync(&trigger_data->work);
+
+ if (state)
+ set_bit(bit, &trigger_data->mode);
+ else
+ clear_bit(bit, &trigger_data->mode);
+
+ set_baseline_state(trigger_data);
+
+ return size;
+}
+
+static ssize_t link_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return netdev_led_attr_show(dev, buf, NETDEV_ATTR_LINK);
+}
+
+static ssize_t link_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ return netdev_led_attr_store(dev, buf, size, NETDEV_ATTR_LINK);
+}
+
+static DEVICE_ATTR_RW(link);
+
+static ssize_t tx_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return netdev_led_attr_show(dev, buf, NETDEV_ATTR_TX);
+}
+
+static ssize_t tx_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ return netdev_led_attr_store(dev, buf, size, NETDEV_ATTR_TX);
+}
+
+static DEVICE_ATTR_RW(tx);
+
+static ssize_t rx_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return netdev_led_attr_show(dev, buf, NETDEV_ATTR_RX);
+}
+
+static ssize_t rx_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ return netdev_led_attr_store(dev, buf, size, NETDEV_ATTR_RX);
+}
+
+static DEVICE_ATTR_RW(rx);
+
+static ssize_t interval_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+
+ return sprintf(buf, "%u\n",
+ jiffies_to_msecs(atomic_read(&trigger_data->interval)));
+}
+
+static ssize_t interval_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &value);
+ if (ret)
+ return ret;
+
+ /* impose some basic bounds on the timer interval */
+ if (value >= 5 && value <= 10000) {
+ cancel_delayed_work_sync(&trigger_data->work);
+
+ atomic_set(&trigger_data->interval, msecs_to_jiffies(value));
+ set_baseline_state(trigger_data); /* resets timer */
+ }
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(interval);
+
+static int netdev_trig_notify(struct notifier_block *nb,
+ unsigned long evt, void *dv)
+{
+ struct net_device *dev =
+ netdev_notifier_info_to_dev((struct netdev_notifier_info *)dv);
+ struct led_netdev_data *trigger_data = container_of(nb,
+ struct
+ led_netdev_data,
+ notifier);
+
+ if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE
+ && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER
+ && evt != NETDEV_CHANGENAME)
+ return NOTIFY_DONE;
+
+ if (strcmp(dev->name, trigger_data->device_name))
+ return NOTIFY_DONE;
+
+ cancel_delayed_work_sync(&trigger_data->work);
+
+ spin_lock_bh(&trigger_data->lock);
+
+ clear_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode);
+ switch (evt) {
+ case NETDEV_REGISTER:
+ if (trigger_data->net_dev)
+ dev_put(trigger_data->net_dev);
+ dev_hold(dev);
+ trigger_data->net_dev = dev;
+ break;
+ case NETDEV_CHANGENAME:
+ case NETDEV_UNREGISTER:
+ if (trigger_data->net_dev) {
+ dev_put(trigger_data->net_dev);
+ trigger_data->net_dev = NULL;
+ }
+ break;
+ case NETDEV_UP:
+ case NETDEV_CHANGE:
+ if (netif_carrier_ok(dev))
+ set_bit(NETDEV_LED_MODE_LINKUP, &trigger_data->mode);
+ break;
+ }
+
+ set_baseline_state(trigger_data);
+
+ spin_unlock_bh(&trigger_data->lock);
+
+ return NOTIFY_DONE;
+}
+
+/* here's the real work! */
+static void netdev_trig_work(struct work_struct *work)
+{
+ struct led_netdev_data *trigger_data = container_of(work,
+ struct
+ led_netdev_data,
+ work.work);
+ struct rtnl_link_stats64 *dev_stats;
+ unsigned int new_activity;
+ struct rtnl_link_stats64 temp;
+ unsigned long interval;
+ int invert;
+
+ /* If we dont have a device, insure we are off */
+ if (!trigger_data->net_dev) {
+ led_set_brightness(trigger_data->led_cdev, LED_OFF);
+ return;
+ }
+
+ /* If we are not looking for RX/TX then return */
+ if (!test_bit(NETDEV_LED_TX, &trigger_data->mode) &&
+ !test_bit(NETDEV_LED_RX, &trigger_data->mode))
+ return;
+
+ dev_stats = dev_get_stats(trigger_data->net_dev, &temp);
+ new_activity =
+ (test_bit(NETDEV_LED_TX, &trigger_data->mode) ?
+ dev_stats->tx_packets : 0) +
+ (test_bit(NETDEV_LED_RX, &trigger_data->mode) ?
+ dev_stats->rx_packets : 0);
+
+ if (trigger_data->last_activity != new_activity) {
+ led_stop_software_blink(trigger_data->led_cdev);
+
+ invert = test_bit(NETDEV_LED_LINK, &trigger_data->mode);
+ interval = jiffies_to_msecs(
+ atomic_read(&trigger_data->interval));
+ /* base state is ON (link present) */
+ led_blink_set_oneshot(trigger_data->led_cdev,
+ &interval,
+ &interval,
+ invert);
+ trigger_data->last_activity = new_activity;
+ }
+
+ schedule_delayed_work(&trigger_data->work,
+ (atomic_read(&trigger_data->interval)*2));
+}
+
+static void netdev_trig_activate(struct led_classdev *led_cdev)
+{
+ struct led_netdev_data *trigger_data;
+ int rc;
+
+ trigger_data = kzalloc(sizeof(struct led_netdev_data), GFP_KERNEL);
+ if (!trigger_data)
+ return;
+
+ spin_lock_init(&trigger_data->lock);
+
+ trigger_data->notifier.notifier_call = netdev_trig_notify;
+ trigger_data->notifier.priority = 10;
+
+ INIT_DELAYED_WORK(&trigger_data->work, netdev_trig_work);
+
+ trigger_data->led_cdev = led_cdev;
+ trigger_data->net_dev = NULL;
+ trigger_data->device_name[0] = 0;
+
+ trigger_data->mode = 0;
+ atomic_set(&trigger_data->interval, msecs_to_jiffies(50));
+ trigger_data->last_activity = 0;
+
+ led_cdev->trigger_data = trigger_data;
+
+ rc = device_create_file(led_cdev->dev, &dev_attr_device_name);
+ if (rc)
+ goto err_out;
+ rc = device_create_file(led_cdev->dev, &dev_attr_link);
+ if (rc)
+ goto err_out_device_name;
+ rc = device_create_file(led_cdev->dev, &dev_attr_rx);
+ if (rc)
+ goto err_out_link;
+ rc = device_create_file(led_cdev->dev, &dev_attr_tx);
+ if (rc)
+ goto err_out_rx;
+ rc = device_create_file(led_cdev->dev, &dev_attr_interval);
+ if (rc)
+ goto err_out_tx;
+ rc = register_netdevice_notifier(&trigger_data->notifier);
+ if (rc)
+ goto err_out_interval;
+ return;
+
+err_out_interval:
+ device_remove_file(led_cdev->dev, &dev_attr_interval);
+err_out_tx:
+ device_remove_file(led_cdev->dev, &dev_attr_tx);
+err_out_rx:
+ device_remove_file(led_cdev->dev, &dev_attr_rx);
+err_out_link:
+ device_remove_file(led_cdev->dev, &dev_attr_link);
+err_out_device_name:
+ device_remove_file(led_cdev->dev, &dev_attr_device_name);
+err_out:
+ led_cdev->trigger_data = NULL;
+ kfree(trigger_data);
+}
+
+static void netdev_trig_deactivate(struct led_classdev *led_cdev)
+{
+ struct led_netdev_data *trigger_data = led_cdev->trigger_data;
+
+ if (trigger_data) {
+ unregister_netdevice_notifier(&trigger_data->notifier);
+
+ device_remove_file(led_cdev->dev, &dev_attr_device_name);
+ device_remove_file(led_cdev->dev, &dev_attr_link);
+ device_remove_file(led_cdev->dev, &dev_attr_rx);
+ device_remove_file(led_cdev->dev, &dev_attr_tx);
+ device_remove_file(led_cdev->dev, &dev_attr_interval);
+
+ cancel_delayed_work_sync(&trigger_data->work);
+
+ if (trigger_data->net_dev)
+ dev_put(trigger_data->net_dev);
+
+ kfree(trigger_data);
+ }
+}
+
+static struct led_trigger netdev_led_trigger = {
+ .name = "netdev",
+ .activate = netdev_trig_activate,
+ .deactivate = netdev_trig_deactivate,
+};
+
+static int __init netdev_trig_init(void)
+{
+ return led_trigger_register(&netdev_led_trigger);
+}
+
+static void __exit netdev_trig_exit(void)
+{
+ led_trigger_unregister(&netdev_led_trigger);
+}
+
+module_init(netdev_trig_init);
+module_exit(netdev_trig_exit);
+
+MODULE_AUTHOR("Ben Whitten <ben.whitten@gmail.com>");
+MODULE_AUTHOR("Oliver Jowett <oliver@opencloud.com>");
+MODULE_DESCRIPTION("Netdev LED trigger");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/trigger/ledtrig-transient.c b/drivers/leds/trigger/ledtrig-transient.c
index 7acce64b692a..9d1769073562 100644
--- a/drivers/leds/trigger/ledtrig-transient.c
+++ b/drivers/leds/trigger/ledtrig-transient.c
@@ -1,22 +1,15 @@
-/*
- * LED Kernel Transient Trigger
- *
- * Copyright (C) 2012 Shuah Khan <shuahkhan@gmail.com>
- *
- * Based on Richard Purdie's ledtrig-timer.c and Atsushi Nemoto's
- * ledtrig-heartbeat.c
- * Design and use-case input from Jonas Bonn <jonas@southpole.se> and
- * Neil Brown <neilb@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-/*
- * Transient trigger allows one shot timer activation. Please refer to
- * Documentation/leds/ledtrig-transient.txt for details
-*/
+// SPDX-License-Identifier: GPL-2.0
+//
+// LED Kernel Transient Trigger
+//
+// Transient trigger allows one shot timer activation. Please refer to
+// Documentation/leds/ledtrig-transient.txt for details
+// Copyright (C) 2012 Shuah Khan <shuahkhan@gmail.com>
+//
+// Based on Richard Purdie's ledtrig-timer.c and Atsushi Nemoto's
+// ledtrig-heartbeat.c
+// Design and use-case input from Jonas Bonn <jonas@southpole.se> and
+// Neil Brown <neilb@suse.de>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -238,4 +231,4 @@ module_exit(transient_trig_exit);
MODULE_AUTHOR("Shuah Khan <shuahkhan@gmail.com>");
MODULE_DESCRIPTION("Transient LED trigger");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/uleds.c b/drivers/leds/uleds.c
index 5e9e8a1fdefb..5beacab05ed7 100644
--- a/drivers/leds/uleds.c
+++ b/drivers/leds/uleds.c
@@ -176,7 +176,7 @@ static ssize_t uleds_read(struct file *file, char __user *buffer, size_t count,
return retval;
}
-static unsigned int uleds_poll(struct file *file, poll_table *wait)
+static __poll_t uleds_poll(struct file *file, poll_table *wait)
{
struct uleds_device *udev = file->private_data;
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index 2a953efec4e1..10c08982185a 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -27,13 +27,6 @@ config NVM_DEBUG
It is required to create/remove targets without IOCTLs.
-config NVM_RRPC
- tristate "Round-robin Hybrid Open-Channel SSD target"
- ---help---
- Allows an open-channel SSD to be exposed as a block device to the
- host. The target is implemented using a linear mapping table and
- cost-based garbage collection. It is optimized for 4K IO sizes.
-
config NVM_PBLK
tristate "Physical Block Device Open-Channel SSD target"
---help---
diff --git a/drivers/lightnvm/Makefile b/drivers/lightnvm/Makefile
index 2c3fd9d2c08c..97d9d7c71550 100644
--- a/drivers/lightnvm/Makefile
+++ b/drivers/lightnvm/Makefile
@@ -4,7 +4,6 @@
#
obj-$(CONFIG_NVM) := core.o
-obj-$(CONFIG_NVM_RRPC) += rrpc.o
obj-$(CONFIG_NVM_PBLK) += pblk.o
pblk-y := pblk-init.o pblk-core.o pblk-rb.o \
pblk-write.o pblk-cache.o pblk-read.o \
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 83249b43dd06..dcc9e621e651 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -45,12 +45,6 @@ struct nvm_dev_map {
int nr_chnls;
};
-struct nvm_area {
- struct list_head list;
- sector_t begin;
- sector_t end; /* end is excluded */
-};
-
static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
{
struct nvm_target *tgt;
@@ -62,6 +56,30 @@ static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
return NULL;
}
+static bool nvm_target_exists(const char *name)
+{
+ struct nvm_dev *dev;
+ struct nvm_target *tgt;
+ bool ret = false;
+
+ down_write(&nvm_lock);
+ list_for_each_entry(dev, &nvm_devices, devices) {
+ mutex_lock(&dev->mlock);
+ list_for_each_entry(tgt, &dev->targets, list) {
+ if (!strcmp(name, tgt->disk->disk_name)) {
+ ret = true;
+ mutex_unlock(&dev->mlock);
+ goto out;
+ }
+ }
+ mutex_unlock(&dev->mlock);
+ }
+
+out:
+ up_write(&nvm_lock);
+ return ret;
+}
+
static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
{
int i;
@@ -104,7 +122,7 @@ static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
if (clear) {
for (j = 0; j < ch_map->nr_luns; j++) {
int lun = j + lun_offs[j];
- int lunid = (ch * dev->geo.luns_per_chnl) + lun;
+ int lunid = (ch * dev->geo.nr_luns) + lun;
WARN_ON(!test_and_clear_bit(lunid,
dev->lun_map));
@@ -122,7 +140,8 @@ static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
}
static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
- int lun_begin, int lun_end)
+ u16 lun_begin, u16 lun_end,
+ u16 op)
{
struct nvm_tgt_dev *tgt_dev = NULL;
struct nvm_dev_map *dev_rmap = dev->rmap;
@@ -130,10 +149,10 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
struct ppa_addr *luns;
int nr_luns = lun_end - lun_begin + 1;
int luns_left = nr_luns;
- int nr_chnls = nr_luns / dev->geo.luns_per_chnl;
- int nr_chnls_mod = nr_luns % dev->geo.luns_per_chnl;
- int bch = lun_begin / dev->geo.luns_per_chnl;
- int blun = lun_begin % dev->geo.luns_per_chnl;
+ int nr_chnls = nr_luns / dev->geo.nr_luns;
+ int nr_chnls_mod = nr_luns % dev->geo.nr_luns;
+ int bch = lun_begin / dev->geo.nr_luns;
+ int blun = lun_begin % dev->geo.nr_luns;
int lunid = 0;
int lun_balanced = 1;
int prev_nr_luns;
@@ -154,15 +173,15 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
if (!luns)
goto err_luns;
- prev_nr_luns = (luns_left > dev->geo.luns_per_chnl) ?
- dev->geo.luns_per_chnl : luns_left;
+ prev_nr_luns = (luns_left > dev->geo.nr_luns) ?
+ dev->geo.nr_luns : luns_left;
for (i = 0; i < nr_chnls; i++) {
struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
int *lun_roffs = ch_rmap->lun_offs;
struct nvm_ch_map *ch_map = &dev_map->chnls[i];
int *lun_offs;
- int luns_in_chnl = (luns_left > dev->geo.luns_per_chnl) ?
- dev->geo.luns_per_chnl : luns_left;
+ int luns_in_chnl = (luns_left > dev->geo.nr_luns) ?
+ dev->geo.nr_luns : luns_left;
if (lun_balanced && prev_nr_luns != luns_in_chnl)
lun_balanced = 0;
@@ -199,8 +218,9 @@ static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
/* Target device only owns a portion of the physical device */
tgt_dev->geo.nr_chnls = nr_chnls;
- tgt_dev->geo.nr_luns = nr_luns;
- tgt_dev->geo.luns_per_chnl = (lun_balanced) ? prev_nr_luns : -1;
+ tgt_dev->geo.all_luns = nr_luns;
+ tgt_dev->geo.nr_luns = (lun_balanced) ? prev_nr_luns : -1;
+ tgt_dev->geo.op = op;
tgt_dev->total_secs = nr_luns * tgt_dev->geo.sec_per_lun;
tgt_dev->q = dev->q;
tgt_dev->map = dev_map;
@@ -226,27 +246,79 @@ static const struct block_device_operations nvm_fops = {
.owner = THIS_MODULE,
};
-static struct nvm_tgt_type *nvm_find_target_type(const char *name, int lock)
+static struct nvm_tgt_type *__nvm_find_target_type(const char *name)
{
- struct nvm_tgt_type *tmp, *tt = NULL;
+ struct nvm_tgt_type *tt;
- if (lock)
- down_write(&nvm_tgtt_lock);
+ list_for_each_entry(tt, &nvm_tgt_types, list)
+ if (!strcmp(name, tt->name))
+ return tt;
- list_for_each_entry(tmp, &nvm_tgt_types, list)
- if (!strcmp(name, tmp->name)) {
- tt = tmp;
- break;
- }
+ return NULL;
+}
+
+static struct nvm_tgt_type *nvm_find_target_type(const char *name)
+{
+ struct nvm_tgt_type *tt;
+
+ down_write(&nvm_tgtt_lock);
+ tt = __nvm_find_target_type(name);
+ up_write(&nvm_tgtt_lock);
- if (lock)
- up_write(&nvm_tgtt_lock);
return tt;
}
+static int nvm_config_check_luns(struct nvm_geo *geo, int lun_begin,
+ int lun_end)
+{
+ if (lun_begin > lun_end || lun_end >= geo->all_luns) {
+ pr_err("nvm: lun out of bound (%u:%u > %u)\n",
+ lun_begin, lun_end, geo->all_luns - 1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __nvm_config_simple(struct nvm_dev *dev,
+ struct nvm_ioctl_create_simple *s)
+{
+ struct nvm_geo *geo = &dev->geo;
+
+ if (s->lun_begin == -1 && s->lun_end == -1) {
+ s->lun_begin = 0;
+ s->lun_end = geo->all_luns - 1;
+ }
+
+ return nvm_config_check_luns(geo, s->lun_begin, s->lun_end);
+}
+
+static int __nvm_config_extended(struct nvm_dev *dev,
+ struct nvm_ioctl_create_extended *e)
+{
+ struct nvm_geo *geo = &dev->geo;
+
+ if (e->lun_begin == 0xFFFF && e->lun_end == 0xFFFF) {
+ e->lun_begin = 0;
+ e->lun_end = dev->geo.all_luns - 1;
+ }
+
+ /* op not set falls into target's default */
+ if (e->op == 0xFFFF)
+ e->op = NVM_TARGET_DEFAULT_OP;
+
+ if (e->op < NVM_TARGET_MIN_OP ||
+ e->op > NVM_TARGET_MAX_OP) {
+ pr_err("nvm: invalid over provisioning value\n");
+ return -EINVAL;
+ }
+
+ return nvm_config_check_luns(geo, e->lun_begin, e->lun_end);
+}
+
static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
{
- struct nvm_ioctl_create_simple *s = &create->conf.s;
+ struct nvm_ioctl_create_extended e;
struct request_queue *tqueue;
struct gendisk *tdisk;
struct nvm_tgt_type *tt;
@@ -255,22 +327,41 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
void *targetdata;
int ret;
- tt = nvm_find_target_type(create->tgttype, 1);
+ switch (create->conf.type) {
+ case NVM_CONFIG_TYPE_SIMPLE:
+ ret = __nvm_config_simple(dev, &create->conf.s);
+ if (ret)
+ return ret;
+
+ e.lun_begin = create->conf.s.lun_begin;
+ e.lun_end = create->conf.s.lun_end;
+ e.op = NVM_TARGET_DEFAULT_OP;
+ break;
+ case NVM_CONFIG_TYPE_EXTENDED:
+ ret = __nvm_config_extended(dev, &create->conf.e);
+ if (ret)
+ return ret;
+
+ e = create->conf.e;
+ break;
+ default:
+ pr_err("nvm: config type not valid\n");
+ return -EINVAL;
+ }
+
+ tt = nvm_find_target_type(create->tgttype);
if (!tt) {
pr_err("nvm: target type %s not found\n", create->tgttype);
return -EINVAL;
}
- mutex_lock(&dev->mlock);
- t = nvm_find_target(dev, create->tgtname);
- if (t) {
- pr_err("nvm: target name already exists.\n");
- mutex_unlock(&dev->mlock);
+ if (nvm_target_exists(create->tgtname)) {
+ pr_err("nvm: target name already exists (%s)\n",
+ create->tgtname);
return -EINVAL;
}
- mutex_unlock(&dev->mlock);
- ret = nvm_reserve_luns(dev, s->lun_begin, s->lun_end);
+ ret = nvm_reserve_luns(dev, e.lun_begin, e.lun_end);
if (ret)
return ret;
@@ -280,7 +371,7 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
goto err_reserve;
}
- tgt_dev = nvm_create_tgt_dev(dev, s->lun_begin, s->lun_end);
+ tgt_dev = nvm_create_tgt_dev(dev, e.lun_begin, e.lun_end, e.op);
if (!tgt_dev) {
pr_err("nvm: could not create target device\n");
ret = -ENOMEM;
@@ -350,7 +441,7 @@ err_dev:
err_t:
kfree(t);
err_reserve:
- nvm_release_luns_err(dev, s->lun_begin, s->lun_end);
+ nvm_release_luns_err(dev, e.lun_begin, e.lun_end);
return ret;
}
@@ -420,7 +511,7 @@ static int nvm_register_map(struct nvm_dev *dev)
for (i = 0; i < dev->geo.nr_chnls; i++) {
struct nvm_ch_map *ch_rmap;
int *lun_roffs;
- int luns_in_chnl = dev->geo.luns_per_chnl;
+ int luns_in_chnl = dev->geo.nr_luns;
ch_rmap = &rmap->chnls[i];
@@ -524,41 +615,12 @@ static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
nvm_ppa_dev_to_tgt(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
}
-void nvm_part_to_tgt(struct nvm_dev *dev, sector_t *entries,
- int len)
-{
- struct nvm_geo *geo = &dev->geo;
- struct nvm_dev_map *dev_rmap = dev->rmap;
- u64 i;
-
- for (i = 0; i < len; i++) {
- struct nvm_ch_map *ch_rmap;
- int *lun_roffs;
- struct ppa_addr gaddr;
- u64 pba = le64_to_cpu(entries[i]);
- u64 diff;
-
- if (!pba)
- continue;
-
- gaddr = linear_to_generic_addr(geo, pba);
- ch_rmap = &dev_rmap->chnls[gaddr.g.ch];
- lun_roffs = ch_rmap->lun_offs;
-
- diff = ((ch_rmap->ch_off * geo->luns_per_chnl) +
- (lun_roffs[gaddr.g.lun])) * geo->sec_per_lun;
-
- entries[i] -= cpu_to_le64(diff);
- }
-}
-EXPORT_SYMBOL(nvm_part_to_tgt);
-
int nvm_register_tgt_type(struct nvm_tgt_type *tt)
{
int ret = 0;
down_write(&nvm_tgtt_lock);
- if (nvm_find_target_type(tt->name, 0))
+ if (__nvm_find_target_type(tt->name))
ret = -EEXIST;
else
list_add(&tt->list, &nvm_tgt_types);
@@ -726,112 +788,6 @@ int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
}
EXPORT_SYMBOL(nvm_submit_io_sync);
-int nvm_erase_sync(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
- int nr_ppas)
-{
- struct nvm_geo *geo = &tgt_dev->geo;
- struct nvm_rq rqd;
- int ret;
-
- memset(&rqd, 0, sizeof(struct nvm_rq));
-
- rqd.opcode = NVM_OP_ERASE;
- rqd.flags = geo->plane_mode >> 1;
-
- ret = nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
- if (ret)
- return ret;
-
- ret = nvm_submit_io_sync(tgt_dev, &rqd);
- if (ret) {
- pr_err("rrpr: erase I/O submission failed: %d\n", ret);
- goto free_ppa_list;
- }
-
-free_ppa_list:
- nvm_free_rqd_ppalist(tgt_dev, &rqd);
-
- return ret;
-}
-EXPORT_SYMBOL(nvm_erase_sync);
-
-int nvm_get_l2p_tbl(struct nvm_tgt_dev *tgt_dev, u64 slba, u32 nlb,
- nvm_l2p_update_fn *update_l2p, void *priv)
-{
- struct nvm_dev *dev = tgt_dev->parent;
-
- if (!dev->ops->get_l2p_tbl)
- return 0;
-
- return dev->ops->get_l2p_tbl(dev, slba, nlb, update_l2p, priv);
-}
-EXPORT_SYMBOL(nvm_get_l2p_tbl);
-
-int nvm_get_area(struct nvm_tgt_dev *tgt_dev, sector_t *lba, sector_t len)
-{
- struct nvm_dev *dev = tgt_dev->parent;
- struct nvm_geo *geo = &dev->geo;
- struct nvm_area *area, *prev, *next;
- sector_t begin = 0;
- sector_t max_sectors = (geo->sec_size * dev->total_secs) >> 9;
-
- if (len > max_sectors)
- return -EINVAL;
-
- area = kmalloc(sizeof(struct nvm_area), GFP_KERNEL);
- if (!area)
- return -ENOMEM;
-
- prev = NULL;
-
- spin_lock(&dev->lock);
- list_for_each_entry(next, &dev->area_list, list) {
- if (begin + len > next->begin) {
- begin = next->end;
- prev = next;
- continue;
- }
- break;
- }
-
- if ((begin + len) > max_sectors) {
- spin_unlock(&dev->lock);
- kfree(area);
- return -EINVAL;
- }
-
- area->begin = *lba = begin;
- area->end = begin + len;
-
- if (prev) /* insert into sorted order */
- list_add(&area->list, &prev->list);
- else
- list_add(&area->list, &dev->area_list);
- spin_unlock(&dev->lock);
-
- return 0;
-}
-EXPORT_SYMBOL(nvm_get_area);
-
-void nvm_put_area(struct nvm_tgt_dev *tgt_dev, sector_t begin)
-{
- struct nvm_dev *dev = tgt_dev->parent;
- struct nvm_area *area;
-
- spin_lock(&dev->lock);
- list_for_each_entry(area, &dev->area_list, list) {
- if (area->begin != begin)
- continue;
-
- list_del(&area->list);
- spin_unlock(&dev->lock);
- kfree(area);
- return;
- }
- spin_unlock(&dev->lock);
-}
-EXPORT_SYMBOL(nvm_put_area);
-
void nvm_end_io(struct nvm_rq *rqd)
{
struct nvm_tgt_dev *tgt_dev = rqd->dev;
@@ -858,10 +814,10 @@ int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
struct nvm_geo *geo = &dev->geo;
int blk, offset, pl, blktype;
- if (nr_blks != geo->blks_per_lun * geo->plane_mode)
+ if (nr_blks != geo->nr_chks * geo->plane_mode)
return -EINVAL;
- for (blk = 0; blk < geo->blks_per_lun; blk++) {
+ for (blk = 0; blk < geo->nr_chks; blk++) {
offset = blk * geo->plane_mode;
blktype = blks[offset];
@@ -877,7 +833,7 @@ int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
blks[blk] = blktype;
}
- return geo->blks_per_lun;
+ return geo->nr_chks;
}
EXPORT_SYMBOL(nvm_bb_tbl_fold);
@@ -892,53 +848,6 @@ int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
}
EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
-static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
-{
- struct nvm_geo *geo = &dev->geo;
- int i;
-
- dev->lps_per_blk = geo->pgs_per_blk;
- dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
- if (!dev->lptbl)
- return -ENOMEM;
-
- /* Just a linear array */
- for (i = 0; i < dev->lps_per_blk; i++)
- dev->lptbl[i] = i;
-
- return 0;
-}
-
-static int nvm_init_mlc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
-{
- int i, p;
- struct nvm_id_lp_mlc *mlc = &grp->lptbl.mlc;
-
- if (!mlc->num_pairs)
- return 0;
-
- dev->lps_per_blk = mlc->num_pairs;
- dev->lptbl = kcalloc(dev->lps_per_blk, sizeof(int), GFP_KERNEL);
- if (!dev->lptbl)
- return -ENOMEM;
-
- /* The lower page table encoding consists of a list of bytes, where each
- * has a lower and an upper half. The first half byte maintains the
- * increment value and every value after is an offset added to the
- * previous incrementation value
- */
- dev->lptbl[0] = mlc->pairs[0] & 0xF;
- for (i = 1; i < dev->lps_per_blk; i++) {
- p = mlc->pairs[i >> 1];
- if (i & 0x1) /* upper */
- dev->lptbl[i] = dev->lptbl[i - 1] + ((p & 0xF0) >> 4);
- else /* lower */
- dev->lptbl[i] = dev->lptbl[i - 1] + (p & 0xF);
- }
-
- return 0;
-}
-
static int nvm_core_init(struct nvm_dev *dev)
{
struct nvm_id *id = &dev->identity;
@@ -946,66 +855,44 @@ static int nvm_core_init(struct nvm_dev *dev)
struct nvm_geo *geo = &dev->geo;
int ret;
+ memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
+
+ if (grp->mtype != 0) {
+ pr_err("nvm: memory type not supported\n");
+ return -EINVAL;
+ }
+
/* Whole device values */
geo->nr_chnls = grp->num_ch;
- geo->luns_per_chnl = grp->num_lun;
-
- /* Generic device values */
- geo->pgs_per_blk = grp->num_pg;
- geo->blks_per_lun = grp->num_blk;
- geo->nr_planes = grp->num_pln;
- geo->fpg_size = grp->fpg_sz;
- geo->pfpg_size = grp->fpg_sz * grp->num_pln;
+ geo->nr_luns = grp->num_lun;
+
+ /* Generic device geometry values */
+ geo->ws_min = grp->ws_min;
+ geo->ws_opt = grp->ws_opt;
+ geo->ws_seq = grp->ws_seq;
+ geo->ws_per_chk = grp->ws_per_chk;
+ geo->nr_chks = grp->num_chk;
geo->sec_size = grp->csecs;
geo->oob_size = grp->sos;
- geo->sec_per_pg = grp->fpg_sz / grp->csecs;
geo->mccap = grp->mccap;
- memcpy(&geo->ppaf, &id->ppaf, sizeof(struct nvm_addr_format));
-
- geo->plane_mode = NVM_PLANE_SINGLE;
geo->max_rq_size = dev->ops->max_phys_sect * geo->sec_size;
- if (grp->mpos & 0x020202)
- geo->plane_mode = NVM_PLANE_DOUBLE;
- if (grp->mpos & 0x040404)
- geo->plane_mode = NVM_PLANE_QUAD;
+ geo->sec_per_chk = grp->clba;
+ geo->sec_per_lun = geo->sec_per_chk * geo->nr_chks;
+ geo->all_luns = geo->nr_luns * geo->nr_chnls;
- if (grp->mtype != 0) {
- pr_err("nvm: memory type not supported\n");
- return -EINVAL;
- }
-
- /* calculated values */
+ /* 1.2 spec device geometry values */
+ geo->plane_mode = 1 << geo->ws_seq;
+ geo->nr_planes = geo->ws_opt / geo->ws_min;
+ geo->sec_per_pg = geo->ws_min;
geo->sec_per_pl = geo->sec_per_pg * geo->nr_planes;
- geo->sec_per_blk = geo->sec_per_pl * geo->pgs_per_blk;
- geo->sec_per_lun = geo->sec_per_blk * geo->blks_per_lun;
- geo->nr_luns = geo->luns_per_chnl * geo->nr_chnls;
- dev->total_secs = geo->nr_luns * geo->sec_per_lun;
- dev->lun_map = kcalloc(BITS_TO_LONGS(geo->nr_luns),
+ dev->total_secs = geo->all_luns * geo->sec_per_lun;
+ dev->lun_map = kcalloc(BITS_TO_LONGS(geo->all_luns),
sizeof(unsigned long), GFP_KERNEL);
if (!dev->lun_map)
return -ENOMEM;
- switch (grp->fmtype) {
- case NVM_ID_FMTYPE_SLC:
- if (nvm_init_slc_tbl(dev, grp)) {
- ret = -ENOMEM;
- goto err_fmtype;
- }
- break;
- case NVM_ID_FMTYPE_MLC:
- if (nvm_init_mlc_tbl(dev, grp)) {
- ret = -ENOMEM;
- goto err_fmtype;
- }
- break;
- default:
- pr_err("nvm: flash type not supported\n");
- ret = -EINVAL;
- goto err_fmtype;
- }
-
INIT_LIST_HEAD(&dev->area_list);
INIT_LIST_HEAD(&dev->targets);
mutex_init(&dev->mlock);
@@ -1031,7 +918,6 @@ static void nvm_free(struct nvm_dev *dev)
dev->ops->destroy_dma_pool(dev->dma_pool);
nvm_unregister_map(dev);
- kfree(dev->lptbl);
kfree(dev->lun_map);
kfree(dev);
}
@@ -1062,8 +948,8 @@ static int nvm_init(struct nvm_dev *dev)
pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n",
dev->name, geo->sec_per_pg, geo->nr_planes,
- geo->pgs_per_blk, geo->blks_per_lun,
- geo->nr_luns, geo->nr_chnls);
+ geo->ws_per_chk, geo->nr_chks,
+ geo->all_luns, geo->nr_chnls);
return 0;
err:
pr_err("nvm: failed to initialize nvm\n");
@@ -1135,7 +1021,6 @@ EXPORT_SYMBOL(nvm_unregister);
static int __nvm_configure_create(struct nvm_ioctl_create *create)
{
struct nvm_dev *dev;
- struct nvm_ioctl_create_simple *s;
down_write(&nvm_lock);
dev = nvm_find_nvm_dev(create->dev);
@@ -1146,23 +1031,6 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
return -EINVAL;
}
- if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
- pr_err("nvm: config type not valid\n");
- return -EINVAL;
- }
- s = &create->conf.s;
-
- if (s->lun_begin == -1 && s->lun_end == -1) {
- s->lun_begin = 0;
- s->lun_end = dev->geo.nr_luns - 1;
- }
-
- if (s->lun_begin > s->lun_end || s->lun_end >= dev->geo.nr_luns) {
- pr_err("nvm: lun out of bound (%u:%u > %u)\n",
- s->lun_begin, s->lun_end, dev->geo.nr_luns - 1);
- return -EINVAL;
- }
-
return nvm_create_tgt(dev, create);
}
@@ -1262,6 +1130,12 @@ static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
return -EFAULT;
+ if (create.conf.type == NVM_CONFIG_TYPE_EXTENDED &&
+ create.conf.e.rsv != 0) {
+ pr_err("nvm: reserved config field in use\n");
+ return -EINVAL;
+ }
+
create.dev[DISK_NAME_LEN - 1] = '\0';
create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
create.tgtname[DISK_NAME_LEN - 1] = '\0';
diff --git a/drivers/lightnvm/pblk-cache.c b/drivers/lightnvm/pblk-cache.c
index 0d227ef7d1b9..000fcad38136 100644
--- a/drivers/lightnvm/pblk-cache.c
+++ b/drivers/lightnvm/pblk-cache.c
@@ -19,12 +19,16 @@
int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags)
{
+ struct request_queue *q = pblk->dev->q;
struct pblk_w_ctx w_ctx;
sector_t lba = pblk_get_lba(bio);
+ unsigned long start_time = jiffies;
unsigned int bpos, pos;
int nr_entries = pblk_get_secs(bio);
int i, ret;
+ generic_start_io_acct(q, WRITE, bio_sectors(bio), &pblk->disk->part0);
+
/* Update the write buffer head (mem) with the entries that we can
* write. The write in itself cannot fail, so there is no need to
* rollback from here on.
@@ -67,6 +71,7 @@ retry:
pblk_rl_inserted(&pblk->rl, nr_entries);
out:
+ generic_end_io_acct(q, WRITE, &pblk->disk->part0, start_time);
pblk_write_should_kick(pblk);
return ret;
}
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index 76516ee84e9a..0487b9340c1d 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -32,8 +32,8 @@ static void pblk_line_mark_bb(struct work_struct *work)
struct pblk_line *line;
int pos;
- line = &pblk->lines[pblk_dev_ppa_to_line(*ppa)];
- pos = pblk_dev_ppa_to_pos(&dev->geo, *ppa);
+ line = &pblk->lines[pblk_ppa_to_line(*ppa)];
+ pos = pblk_ppa_to_pos(&dev->geo, *ppa);
pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
line->id, pos);
@@ -48,7 +48,7 @@ static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
- int pos = pblk_dev_ppa_to_pos(geo, *ppa);
+ int pos = pblk_ppa_to_pos(geo, *ppa);
pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
atomic_long_inc(&pblk->erase_failed);
@@ -66,7 +66,7 @@ static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
{
struct pblk_line *line;
- line = &pblk->lines[pblk_dev_ppa_to_line(rqd->ppa_addr)];
+ line = &pblk->lines[pblk_ppa_to_line(rqd->ppa_addr)];
atomic_dec(&line->left_seblks);
if (rqd->error) {
@@ -144,7 +144,7 @@ void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
BUG_ON(pblk_ppa_empty(ppa));
#endif
- line_id = pblk_tgt_ppa_to_line(ppa);
+ line_id = pblk_ppa_to_line(ppa);
line = &pblk->lines[line_id];
paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
@@ -650,7 +650,7 @@ next_rq:
} else {
for (i = 0; i < rqd.nr_ppas; ) {
struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
- int pos = pblk_dev_ppa_to_pos(geo, ppa);
+ int pos = pblk_ppa_to_pos(geo, ppa);
int read_type = PBLK_READ_RANDOM;
if (pblk_io_aligned(pblk, rq_ppas))
@@ -668,7 +668,7 @@ next_rq:
}
ppa = addr_to_gen_ppa(pblk, paddr, id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
}
if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
@@ -742,7 +742,7 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
cmd_op = NVM_OP_PWRITE;
flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
lba_list = emeta_to_lbas(pblk, line->emeta->buf);
- } else if (dir == PBLK_READ) {
+ } else if (dir == PBLK_READ_RECOV || dir == PBLK_READ) {
bio_op = REQ_OP_READ;
cmd_op = NVM_OP_PREAD;
flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
@@ -802,7 +802,7 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
if (rqd.error) {
if (dir == PBLK_WRITE)
pblk_log_write_err(pblk, &rqd);
- else
+ else if (dir == PBLK_READ)
pblk_log_read_err(pblk, &rqd);
}
@@ -816,7 +816,7 @@ int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
{
u64 bpaddr = pblk_line_smeta_start(pblk, line);
- return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ);
+ return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ_RECOV);
}
int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
@@ -854,8 +854,8 @@ static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
struct nvm_geo *geo = &dev->geo;
pr_err("pblk: could not sync erase line:%d,blk:%d\n",
- pblk_dev_ppa_to_line(ppa),
- pblk_dev_ppa_to_pos(geo, ppa));
+ pblk_ppa_to_line(ppa),
+ pblk_ppa_to_pos(geo, ppa));
rqd.error = ret;
goto out;
@@ -979,7 +979,7 @@ static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
/* Start metadata */
smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
- smeta_buf->window_wr_lun = cpu_to_le32(geo->nr_luns);
+ smeta_buf->window_wr_lun = cpu_to_le32(geo->all_luns);
/* Fill metadata among lines */
if (cur) {
@@ -1032,7 +1032,7 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
lm->sec_per_line);
bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
lm->sec_per_line);
- line->sec_in_line -= geo->sec_per_blk;
+ line->sec_in_line -= geo->sec_per_chk;
if (bit >= lm->emeta_bb)
nr_bb++;
}
@@ -1145,7 +1145,7 @@ int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
}
spin_unlock(&l_mg->free_lock);
- pblk_rl_free_lines_dec(&pblk->rl, line);
+ pblk_rl_free_lines_dec(&pblk->rl, line, true);
if (!pblk_line_init_bb(pblk, line, 0)) {
list_add(&line->list, &l_mg->free_list);
@@ -1233,7 +1233,7 @@ retry:
l_mg->data_line = retry_line;
spin_unlock(&l_mg->free_lock);
- pblk_rl_free_lines_dec(&pblk->rl, retry_line);
+ pblk_rl_free_lines_dec(&pblk->rl, line, false);
if (pblk_line_erase(pblk, retry_line))
goto retry;
@@ -1252,7 +1252,6 @@ struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
{
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line *line;
- int is_next = 0;
spin_lock(&l_mg->free_lock);
line = pblk_line_get(pblk);
@@ -1280,7 +1279,6 @@ struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
} else {
l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
l_mg->data_next->type = PBLK_LINETYPE_DATA;
- is_next = 1;
}
spin_unlock(&l_mg->free_lock);
@@ -1290,10 +1288,6 @@ struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
return NULL;
}
- pblk_rl_free_lines_dec(&pblk->rl, line);
- if (is_next)
- pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
-
retry_setup:
if (!pblk_line_init_metadata(pblk, line, NULL)) {
line = pblk_line_retry(pblk, line);
@@ -1311,6 +1305,8 @@ retry_setup:
goto retry_setup;
}
+ pblk_rl_free_lines_dec(&pblk->rl, line, true);
+
return line;
}
@@ -1395,7 +1391,6 @@ struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line *cur, *new = NULL;
unsigned int left_seblks;
- int is_next = 0;
cur = l_mg->data_line;
new = l_mg->data_next;
@@ -1444,6 +1439,8 @@ retry_setup:
goto retry_setup;
}
+ pblk_rl_free_lines_dec(&pblk->rl, new, true);
+
/* Allocate next line for preparation */
spin_lock(&l_mg->free_lock);
l_mg->data_next = pblk_line_get(pblk);
@@ -1457,13 +1454,9 @@ retry_setup:
} else {
l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
l_mg->data_next->type = PBLK_LINETYPE_DATA;
- is_next = 1;
}
spin_unlock(&l_mg->free_lock);
- if (is_next)
- pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
-
out:
return new;
}
@@ -1561,8 +1554,8 @@ int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
struct nvm_geo *geo = &dev->geo;
pr_err("pblk: could not async erase line:%d,blk:%d\n",
- pblk_dev_ppa_to_line(ppa),
- pblk_dev_ppa_to_pos(geo, ppa));
+ pblk_ppa_to_line(ppa),
+ pblk_ppa_to_pos(geo, ppa));
}
return err;
@@ -1746,7 +1739,7 @@ void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
struct pblk_lun *rlun;
- int nr_luns = geo->nr_luns;
+ int nr_luns = geo->all_luns;
int bit = -1;
while ((bit = find_next_bit(lun_bitmap, nr_luns, bit + 1)) < nr_luns) {
@@ -1884,7 +1877,7 @@ void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
/* If the L2P entry maps to a line, the reference is valid */
if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
- int line_id = pblk_dev_ppa_to_line(ppa);
+ int line_id = pblk_ppa_to_line(ppa);
struct pblk_line *line = &pblk->lines[line_id];
kref_get(&line->ref);
diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c
index 9c8e114c8a54..3d899383666e 100644
--- a/drivers/lightnvm/pblk-gc.c
+++ b/drivers/lightnvm/pblk-gc.c
@@ -169,7 +169,14 @@ static void pblk_gc_line_prepare_ws(struct work_struct *work)
* the line untouched. TODO: Implement a recovery routine that scans and
* moves all sectors on the line.
*/
- lba_list = pblk_recov_get_lba_list(pblk, emeta_buf);
+
+ ret = pblk_recov_check_emeta(pblk, emeta_buf);
+ if (ret) {
+ pr_err("pblk: inconsistent emeta (line %d)\n", line->id);
+ goto fail_free_emeta;
+ }
+
+ lba_list = emeta_to_lbas(pblk, emeta_buf);
if (!lba_list) {
pr_err("pblk: could not interpret emeta (line %d)\n", line->id);
goto fail_free_emeta;
@@ -519,22 +526,12 @@ void pblk_gc_should_start(struct pblk *pblk)
}
}
-/*
- * If flush_wq == 1 then no lock should be held by the caller since
- * flush_workqueue can sleep
- */
-static void pblk_gc_stop(struct pblk *pblk, int flush_wq)
-{
- pblk->gc.gc_active = 0;
- pr_debug("pblk: gc stop\n");
-}
-
void pblk_gc_should_stop(struct pblk *pblk)
{
struct pblk_gc *gc = &pblk->gc;
if (gc->gc_active && !gc->gc_forced)
- pblk_gc_stop(pblk, 0);
+ gc->gc_active = 0;
}
void pblk_gc_should_kick(struct pblk *pblk)
@@ -660,7 +657,7 @@ void pblk_gc_exit(struct pblk *pblk)
gc->gc_enabled = 0;
del_timer_sync(&gc->gc_timer);
- pblk_gc_stop(pblk, 1);
+ gc->gc_active = 0;
if (gc->gc_ts)
kthread_stop(gc->gc_ts);
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index 695826a06b5d..93d671ca518e 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -169,8 +169,8 @@ static int pblk_set_ppaf(struct pblk *pblk)
}
ppaf.ch_len = power_len;
- power_len = get_count_order(geo->luns_per_chnl);
- if (1 << power_len != geo->luns_per_chnl) {
+ power_len = get_count_order(geo->nr_luns);
+ if (1 << power_len != geo->nr_luns) {
pr_err("pblk: supports only power-of-two LUN config.\n");
return -EINVAL;
}
@@ -254,7 +254,7 @@ static int pblk_core_init(struct pblk *pblk)
struct nvm_geo *geo = &dev->geo;
pblk->pgs_in_buffer = NVM_MEM_PAGE_WRITE * geo->sec_per_pg *
- geo->nr_planes * geo->nr_luns;
+ geo->nr_planes * geo->all_luns;
if (pblk_init_global_caches(pblk))
return -ENOMEM;
@@ -270,21 +270,22 @@ static int pblk_core_init(struct pblk *pblk)
if (!pblk->gen_ws_pool)
goto free_page_bio_pool;
- pblk->rec_pool = mempool_create_slab_pool(geo->nr_luns, pblk_rec_cache);
+ pblk->rec_pool = mempool_create_slab_pool(geo->all_luns,
+ pblk_rec_cache);
if (!pblk->rec_pool)
goto free_gen_ws_pool;
- pblk->r_rq_pool = mempool_create_slab_pool(geo->nr_luns,
+ pblk->r_rq_pool = mempool_create_slab_pool(geo->all_luns,
pblk_g_rq_cache);
if (!pblk->r_rq_pool)
goto free_rec_pool;
- pblk->e_rq_pool = mempool_create_slab_pool(geo->nr_luns,
+ pblk->e_rq_pool = mempool_create_slab_pool(geo->all_luns,
pblk_g_rq_cache);
if (!pblk->e_rq_pool)
goto free_r_rq_pool;
- pblk->w_rq_pool = mempool_create_slab_pool(geo->nr_luns,
+ pblk->w_rq_pool = mempool_create_slab_pool(geo->all_luns,
pblk_w_rq_cache);
if (!pblk->w_rq_pool)
goto free_e_rq_pool;
@@ -354,6 +355,8 @@ static void pblk_core_free(struct pblk *pblk)
mempool_destroy(pblk->e_rq_pool);
mempool_destroy(pblk->w_rq_pool);
+ pblk_rwb_free(pblk);
+
pblk_free_global_caches(pblk);
}
@@ -409,7 +412,7 @@ static int pblk_bb_discovery(struct nvm_tgt_dev *dev, struct pblk_lun *rlun)
u8 *blks;
int nr_blks, ret;
- nr_blks = geo->blks_per_lun * geo->plane_mode;
+ nr_blks = geo->nr_chks * geo->plane_mode;
blks = kmalloc(nr_blks, GFP_KERNEL);
if (!blks)
return -ENOMEM;
@@ -482,20 +485,21 @@ static int pblk_luns_init(struct pblk *pblk, struct ppa_addr *luns)
int i, ret;
/* TODO: Implement unbalanced LUN support */
- if (geo->luns_per_chnl < 0) {
+ if (geo->nr_luns < 0) {
pr_err("pblk: unbalanced LUN config.\n");
return -EINVAL;
}
- pblk->luns = kcalloc(geo->nr_luns, sizeof(struct pblk_lun), GFP_KERNEL);
+ pblk->luns = kcalloc(geo->all_luns, sizeof(struct pblk_lun),
+ GFP_KERNEL);
if (!pblk->luns)
return -ENOMEM;
- for (i = 0; i < geo->nr_luns; i++) {
+ for (i = 0; i < geo->all_luns; i++) {
/* Stripe across channels */
int ch = i % geo->nr_chnls;
int lun_raw = i / geo->nr_chnls;
- int lunid = lun_raw + ch * geo->luns_per_chnl;
+ int lunid = lun_raw + ch * geo->nr_luns;
rlun = &pblk->luns[i];
rlun->bppa = luns[lunid];
@@ -577,22 +581,37 @@ static unsigned int calc_emeta_len(struct pblk *pblk)
static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
{
struct nvm_tgt_dev *dev = pblk->dev;
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+ struct pblk_line_meta *lm = &pblk->lm;
struct nvm_geo *geo = &dev->geo;
sector_t provisioned;
+ int sec_meta, blk_meta;
- pblk->over_pct = 20;
+ if (geo->op == NVM_TARGET_DEFAULT_OP)
+ pblk->op = PBLK_DEFAULT_OP;
+ else
+ pblk->op = geo->op;
provisioned = nr_free_blks;
- provisioned *= (100 - pblk->over_pct);
+ provisioned *= (100 - pblk->op);
sector_div(provisioned, 100);
+ pblk->op_blks = nr_free_blks - provisioned;
+
/* Internally pblk manages all free blocks, but all calculations based
* on user capacity consider only provisioned blocks
*/
pblk->rl.total_blocks = nr_free_blks;
- pblk->rl.nr_secs = nr_free_blks * geo->sec_per_blk;
- pblk->capacity = provisioned * geo->sec_per_blk;
+ pblk->rl.nr_secs = nr_free_blks * geo->sec_per_chk;
+
+ /* Consider sectors used for metadata */
+ sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
+ blk_meta = DIV_ROUND_UP(sec_meta, geo->sec_per_chk);
+
+ pblk->capacity = (provisioned - blk_meta) * geo->sec_per_chk;
+
atomic_set(&pblk->rl.free_blocks, nr_free_blks);
+ atomic_set(&pblk->rl.free_user_blocks, nr_free_blks);
}
static int pblk_lines_alloc_metadata(struct pblk *pblk)
@@ -683,7 +702,7 @@ static int pblk_lines_init(struct pblk *pblk)
int i, ret;
pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE);
- max_write_ppas = pblk->min_write_pgs * geo->nr_luns;
+ max_write_ppas = pblk->min_write_pgs * geo->all_luns;
pblk->max_write_pgs = (max_write_ppas < nvm_max_phys_sects(dev)) ?
max_write_ppas : nvm_max_phys_sects(dev);
pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
@@ -693,26 +712,26 @@ static int pblk_lines_init(struct pblk *pblk)
return -EINVAL;
}
- div_u64_rem(geo->sec_per_blk, pblk->min_write_pgs, &mod);
+ div_u64_rem(geo->sec_per_chk, pblk->min_write_pgs, &mod);
if (mod) {
pr_err("pblk: bad configuration of sectors/pages\n");
return -EINVAL;
}
- l_mg->nr_lines = geo->blks_per_lun;
+ l_mg->nr_lines = geo->nr_chks;
l_mg->log_line = l_mg->data_line = NULL;
l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
l_mg->nr_free_lines = 0;
bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
- lm->sec_per_line = geo->sec_per_blk * geo->nr_luns;
- lm->blk_per_line = geo->nr_luns;
- lm->blk_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
+ lm->sec_per_line = geo->sec_per_chk * geo->all_luns;
+ lm->blk_per_line = geo->all_luns;
+ lm->blk_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
- lm->lun_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
+ lm->lun_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
lm->mid_thrs = lm->sec_per_line / 2;
lm->high_thrs = lm->sec_per_line / 4;
- lm->meta_distance = (geo->nr_luns / 2) * pblk->min_write_pgs;
+ lm->meta_distance = (geo->all_luns / 2) * pblk->min_write_pgs;
/* Calculate necessary pages for smeta. See comment over struct
* line_smeta definition
@@ -742,12 +761,12 @@ add_emeta_page:
goto add_emeta_page;
}
- lm->emeta_bb = geo->nr_luns > i ? geo->nr_luns - i : 0;
+ lm->emeta_bb = geo->all_luns > i ? geo->all_luns - i : 0;
lm->min_blk_line = 1;
- if (geo->nr_luns > 1)
+ if (geo->all_luns > 1)
lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec +
- lm->emeta_sec[0], geo->sec_per_blk);
+ lm->emeta_sec[0], geo->sec_per_chk);
if (lm->min_blk_line > lm->blk_per_line) {
pr_err("pblk: config. not supported. Min. LUN in line:%d\n",
@@ -772,7 +791,7 @@ add_emeta_page:
goto fail_free_bb_template;
}
- bb_distance = (geo->nr_luns) * geo->sec_per_pl;
+ bb_distance = (geo->all_luns) * geo->sec_per_pl;
for (i = 0; i < lm->sec_per_line; i += bb_distance)
bitmap_set(l_mg->bb_template, i, geo->sec_per_pl);
@@ -844,7 +863,7 @@ add_emeta_page:
pblk_set_provision(pblk, nr_free_blks);
/* Cleanup per-LUN bad block lists - managed within lines on run-time */
- for (i = 0; i < geo->nr_luns; i++)
+ for (i = 0; i < geo->all_luns; i++)
kfree(pblk->luns[i].bb_list);
return 0;
@@ -858,7 +877,7 @@ fail_free_bb_template:
fail_free_meta:
pblk_line_meta_free(pblk);
fail:
- for (i = 0; i < geo->nr_luns; i++)
+ for (i = 0; i < geo->all_luns; i++)
kfree(pblk->luns[i].bb_list);
return ret;
@@ -866,15 +885,19 @@ fail:
static int pblk_writer_init(struct pblk *pblk)
{
- timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0);
- mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
-
pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
if (IS_ERR(pblk->writer_ts)) {
- pr_err("pblk: could not allocate writer kthread\n");
- return PTR_ERR(pblk->writer_ts);
+ int err = PTR_ERR(pblk->writer_ts);
+
+ if (err != -EINTR)
+ pr_err("pblk: could not allocate writer kthread (%d)\n",
+ err);
+ return err;
}
+ timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0);
+ mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
+
return 0;
}
@@ -910,7 +933,6 @@ static void pblk_tear_down(struct pblk *pblk)
pblk_pipeline_stop(pblk);
pblk_writer_stop(pblk);
pblk_rb_sync_l2p(&pblk->rwb);
- pblk_rwb_free(pblk);
pblk_rl_free(&pblk->rl);
pr_debug("pblk: consistent tear down\n");
@@ -1025,7 +1047,8 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
ret = pblk_writer_init(pblk);
if (ret) {
- pr_err("pblk: could not initialize write thread\n");
+ if (ret != -EINTR)
+ pr_err("pblk: could not initialize write thread\n");
goto fail_free_lines;
}
@@ -1041,13 +1064,14 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
blk_queue_write_cache(tqueue, true, false);
- tqueue->limits.discard_granularity = geo->pgs_per_blk * geo->pfpg_size;
+ tqueue->limits.discard_granularity = geo->sec_per_chk * geo->sec_size;
tqueue->limits.discard_alignment = 0;
blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, tqueue);
- pr_info("pblk init: luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
- geo->nr_luns, pblk->l_mg.nr_lines,
+ pr_info("pblk(%s): luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
+ tdisk->disk_name,
+ geo->all_luns, pblk->l_mg.nr_lines,
(unsigned long long)pblk->rl.nr_secs,
pblk->rwb.nr_entries);
diff --git a/drivers/lightnvm/pblk-map.c b/drivers/lightnvm/pblk-map.c
index 6f3ecde2140f..7445e6430c52 100644
--- a/drivers/lightnvm/pblk-map.c
+++ b/drivers/lightnvm/pblk-map.c
@@ -146,7 +146,7 @@ void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
return;
/* Erase blocks that are bad in this line but might not be in next */
- if (unlikely(ppa_empty(*erase_ppa)) &&
+ if (unlikely(pblk_ppa_empty(*erase_ppa)) &&
bitmap_weight(d_line->blk_bitmap, lm->blk_per_line)) {
int bit = -1;
diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c
index b8f78e401482..ec8fc314646b 100644
--- a/drivers/lightnvm/pblk-rb.c
+++ b/drivers/lightnvm/pblk-rb.c
@@ -54,7 +54,7 @@ int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
rb->seg_size = (1 << power_seg_sz);
rb->nr_entries = (1 << power_size);
rb->mem = rb->subm = rb->sync = rb->l2p_update = 0;
- rb->sync_point = EMPTY_ENTRY;
+ rb->flush_point = EMPTY_ENTRY;
spin_lock_init(&rb->w_lock);
spin_lock_init(&rb->s_lock);
@@ -112,7 +112,7 @@ int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
up_write(&pblk_rb_lock);
#ifdef CONFIG_NVM_DEBUG
- atomic_set(&rb->inflight_sync_point, 0);
+ atomic_set(&rb->inflight_flush_point, 0);
#endif
/*
@@ -226,7 +226,7 @@ static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int to_update)
pblk_update_map_dev(pblk, w_ctx->lba, w_ctx->ppa,
entry->cacheline);
- line = &pblk->lines[pblk_tgt_ppa_to_line(w_ctx->ppa)];
+ line = &pblk->lines[pblk_ppa_to_line(w_ctx->ppa)];
kref_put(&line->ref, pblk_line_put);
clean_wctx(w_ctx);
rb->l2p_update = (rb->l2p_update + 1) & (rb->nr_entries - 1);
@@ -349,35 +349,35 @@ void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
smp_store_release(&entry->w_ctx.flags, flags);
}
-static int pblk_rb_sync_point_set(struct pblk_rb *rb, struct bio *bio,
+static int pblk_rb_flush_point_set(struct pblk_rb *rb, struct bio *bio,
unsigned int pos)
{
struct pblk_rb_entry *entry;
- unsigned int subm, sync_point;
+ unsigned int sync, flush_point;
- subm = READ_ONCE(rb->subm);
+ sync = READ_ONCE(rb->sync);
+
+ if (pos == sync)
+ return 0;
#ifdef CONFIG_NVM_DEBUG
- atomic_inc(&rb->inflight_sync_point);
+ atomic_inc(&rb->inflight_flush_point);
#endif
- if (pos == subm)
- return 0;
+ flush_point = (pos == 0) ? (rb->nr_entries - 1) : (pos - 1);
+ entry = &rb->entries[flush_point];
- sync_point = (pos == 0) ? (rb->nr_entries - 1) : (pos - 1);
- entry = &rb->entries[sync_point];
+ pblk_rb_sync_init(rb, NULL);
- /* Protect syncs */
- smp_store_release(&rb->sync_point, sync_point);
+ /* Protect flush points */
+ smp_store_release(&rb->flush_point, flush_point);
- if (!bio)
- return 0;
+ if (bio)
+ bio_list_add(&entry->w_ctx.bios, bio);
- spin_lock_irq(&rb->s_lock);
- bio_list_add(&entry->w_ctx.bios, bio);
- spin_unlock_irq(&rb->s_lock);
+ pblk_rb_sync_end(rb, NULL);
- return 1;
+ return bio ? 1 : 0;
}
static int __pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries,
@@ -416,7 +416,7 @@ void pblk_rb_flush(struct pblk_rb *rb)
struct pblk *pblk = container_of(rb, struct pblk, rwb);
unsigned int mem = READ_ONCE(rb->mem);
- if (pblk_rb_sync_point_set(rb, NULL, mem))
+ if (pblk_rb_flush_point_set(rb, NULL, mem))
return;
pblk_write_should_kick(pblk);
@@ -440,7 +440,7 @@ static int pblk_rb_may_write_flush(struct pblk_rb *rb, unsigned int nr_entries,
#ifdef CONFIG_NVM_DEBUG
atomic_long_inc(&pblk->nr_flush);
#endif
- if (pblk_rb_sync_point_set(&pblk->rwb, bio, mem))
+ if (pblk_rb_flush_point_set(&pblk->rwb, bio, mem))
*io_ret = NVM_IO_OK;
}
@@ -606,21 +606,6 @@ try:
return NVM_IO_ERR;
}
- if (flags & PBLK_FLUSH_ENTRY) {
- unsigned int sync_point;
-
- sync_point = READ_ONCE(rb->sync_point);
- if (sync_point == pos) {
- /* Protect syncs */
- smp_store_release(&rb->sync_point, EMPTY_ENTRY);
- }
-
- flags &= ~PBLK_FLUSH_ENTRY;
-#ifdef CONFIG_NVM_DEBUG
- atomic_dec(&rb->inflight_sync_point);
-#endif
- }
-
flags &= ~PBLK_WRITTEN_DATA;
flags |= PBLK_SUBMITTED_ENTRY;
@@ -730,15 +715,24 @@ void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags)
unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries)
{
- unsigned int sync;
- unsigned int i;
-
+ unsigned int sync, flush_point;
lockdep_assert_held(&rb->s_lock);
sync = READ_ONCE(rb->sync);
+ flush_point = READ_ONCE(rb->flush_point);
- for (i = 0; i < nr_entries; i++)
- sync = (sync + 1) & (rb->nr_entries - 1);
+ if (flush_point != EMPTY_ENTRY) {
+ unsigned int secs_to_flush;
+
+ secs_to_flush = pblk_rb_ring_count(flush_point, sync,
+ rb->nr_entries);
+ if (secs_to_flush < nr_entries) {
+ /* Protect flush points */
+ smp_store_release(&rb->flush_point, EMPTY_ENTRY);
+ }
+ }
+
+ sync = (sync + nr_entries) & (rb->nr_entries - 1);
/* Protect from counts */
smp_store_release(&rb->sync, sync);
@@ -746,22 +740,27 @@ unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries)
return sync;
}
-unsigned int pblk_rb_sync_point_count(struct pblk_rb *rb)
+/* Calculate how many sectors to submit up to the current flush point. */
+unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb)
{
- unsigned int subm, sync_point;
- unsigned int count;
+ unsigned int subm, sync, flush_point;
+ unsigned int submitted, to_flush;
- /* Protect syncs */
- sync_point = smp_load_acquire(&rb->sync_point);
- if (sync_point == EMPTY_ENTRY)
+ /* Protect flush points */
+ flush_point = smp_load_acquire(&rb->flush_point);
+ if (flush_point == EMPTY_ENTRY)
return 0;
+ /* Protect syncs */
+ sync = smp_load_acquire(&rb->sync);
+
subm = READ_ONCE(rb->subm);
+ submitted = pblk_rb_ring_count(subm, sync, rb->nr_entries);
/* The sync point itself counts as a sector to sync */
- count = pblk_rb_ring_count(sync_point, subm, rb->nr_entries) + 1;
+ to_flush = pblk_rb_ring_count(flush_point, sync, rb->nr_entries) + 1;
- return count;
+ return (submitted < to_flush) ? (to_flush - submitted) : 0;
}
/*
@@ -801,7 +800,7 @@ int pblk_rb_tear_down_check(struct pblk_rb *rb)
if ((rb->mem == rb->subm) && (rb->subm == rb->sync) &&
(rb->sync == rb->l2p_update) &&
- (rb->sync_point == EMPTY_ENTRY)) {
+ (rb->flush_point == EMPTY_ENTRY)) {
goto out;
}
@@ -848,7 +847,7 @@ ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf)
queued_entries++;
spin_unlock_irq(&rb->s_lock);
- if (rb->sync_point != EMPTY_ENTRY)
+ if (rb->flush_point != EMPTY_ENTRY)
offset = scnprintf(buf, PAGE_SIZE,
"%u\t%u\t%u\t%u\t%u\t%u\t%u - %u/%u/%u - %d\n",
rb->nr_entries,
@@ -857,14 +856,14 @@ ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf)
rb->sync,
rb->l2p_update,
#ifdef CONFIG_NVM_DEBUG
- atomic_read(&rb->inflight_sync_point),
+ atomic_read(&rb->inflight_flush_point),
#else
0,
#endif
- rb->sync_point,
+ rb->flush_point,
pblk_rb_read_count(rb),
pblk_rb_space(rb),
- pblk_rb_sync_point_count(rb),
+ pblk_rb_flush_point_count(rb),
queued_entries);
else
offset = scnprintf(buf, PAGE_SIZE,
@@ -875,13 +874,13 @@ ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf)
rb->sync,
rb->l2p_update,
#ifdef CONFIG_NVM_DEBUG
- atomic_read(&rb->inflight_sync_point),
+ atomic_read(&rb->inflight_flush_point),
#else
0,
#endif
pblk_rb_read_count(rb),
pblk_rb_space(rb),
- pblk_rb_sync_point_count(rb),
+ pblk_rb_flush_point_count(rb),
queued_entries);
return offset;
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index ca79d8fb3e60..2f761283f43e 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -141,7 +141,7 @@ static void pblk_read_put_rqd_kref(struct pblk *pblk, struct nvm_rq *rqd)
struct ppa_addr ppa = ppa_list[i];
struct pblk_line *line;
- line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
+ line = &pblk->lines[pblk_ppa_to_line(ppa)];
kref_put(&line->ref, pblk_line_put_wq);
}
}
@@ -158,8 +158,12 @@ static void pblk_end_user_read(struct bio *bio)
static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
bool put_line)
{
+ struct nvm_tgt_dev *dev = pblk->dev;
struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
struct bio *bio = rqd->bio;
+ unsigned long start_time = r_ctx->start_time;
+
+ generic_end_io_acct(dev->q, READ, &pblk->disk->part0, start_time);
if (rqd->error)
pblk_log_read_err(pblk, rqd);
@@ -193,9 +197,9 @@ static void pblk_end_io_read(struct nvm_rq *rqd)
__pblk_end_io_read(pblk, rqd, true);
}
-static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
- unsigned int bio_init_idx,
- unsigned long *read_bitmap)
+static int pblk_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
+ unsigned int bio_init_idx,
+ unsigned long *read_bitmap)
{
struct bio *new_bio, *bio = rqd->bio;
struct pblk_sec_meta *meta_list = rqd->meta_list;
@@ -270,7 +274,7 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
i = 0;
hole = find_first_zero_bit(read_bitmap, nr_secs);
do {
- int line_id = pblk_dev_ppa_to_line(rqd->ppa_list[i]);
+ int line_id = pblk_ppa_to_line(rqd->ppa_list[i]);
struct pblk_line *line = &pblk->lines[line_id];
kref_put(&line->ref, pblk_line_put);
@@ -306,6 +310,8 @@ static int pblk_fill_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
return NVM_IO_OK;
err:
+ pr_err("pblk: failed to perform partial read\n");
+
/* Free allocated pages in new bio */
pblk_bio_free_pages(pblk, bio, 0, new_bio->bi_vcnt);
__pblk_end_io_read(pblk, rqd, false);
@@ -357,6 +363,7 @@ retry:
int pblk_submit_read(struct pblk *pblk, struct bio *bio)
{
struct nvm_tgt_dev *dev = pblk->dev;
+ struct request_queue *q = dev->q;
sector_t blba = pblk_get_lba(bio);
unsigned int nr_secs = pblk_get_secs(bio);
struct pblk_g_ctx *r_ctx;
@@ -372,6 +379,8 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
return NVM_IO_ERR;
}
+ generic_start_io_acct(q, READ, bio_sectors(bio), &pblk->disk->part0);
+
bitmap_zero(&read_bitmap, nr_secs);
rqd = pblk_alloc_rqd(pblk, PBLK_READ);
@@ -383,6 +392,7 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
rqd->end_io = pblk_end_io_read;
r_ctx = nvm_rq_to_pdu(rqd);
+ r_ctx->start_time = jiffies;
r_ctx->lba = blba;
/* Save the index for this bio's start. This is needed in case
@@ -422,7 +432,7 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
int_bio = bio_clone_fast(bio, GFP_KERNEL, pblk_bio_set);
if (!int_bio) {
pr_err("pblk: could not clone read bio\n");
- return NVM_IO_ERR;
+ goto fail_end_io;
}
rqd->bio = int_bio;
@@ -433,7 +443,7 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
pr_err("pblk: read IO submission failed\n");
if (int_bio)
bio_put(int_bio);
- return ret;
+ goto fail_end_io;
}
return NVM_IO_OK;
@@ -442,17 +452,14 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
/* The read bio request could be partially filled by the write buffer,
* but there are some holes that need to be read from the drive.
*/
- ret = pblk_fill_partial_read_bio(pblk, rqd, bio_init_idx, &read_bitmap);
- if (ret) {
- pr_err("pblk: failed to perform partial read\n");
- return ret;
- }
-
- return NVM_IO_OK;
+ return pblk_partial_read_bio(pblk, rqd, bio_init_idx, &read_bitmap);
fail_rqd_free:
pblk_free_rqd(pblk, rqd, PBLK_READ);
return ret;
+fail_end_io:
+ __pblk_end_io_read(pblk, rqd, false);
+ return ret;
}
static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
index eadb3eb5d4dc..1d5e961bf5e0 100644
--- a/drivers/lightnvm/pblk-recovery.c
+++ b/drivers/lightnvm/pblk-recovery.c
@@ -111,18 +111,18 @@ int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
return 0;
}
-__le64 *pblk_recov_get_lba_list(struct pblk *pblk, struct line_emeta *emeta_buf)
+int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta_buf)
{
u32 crc;
crc = pblk_calc_emeta_crc(pblk, emeta_buf);
if (le32_to_cpu(emeta_buf->crc) != crc)
- return NULL;
+ return 1;
if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC)
- return NULL;
+ return 1;
- return emeta_to_lbas(pblk, emeta_buf);
+ return 0;
}
static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
@@ -137,7 +137,7 @@ static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
u64 nr_valid_lbas, nr_lbas = 0;
u64 i;
- lba_list = pblk_recov_get_lba_list(pblk, emeta_buf);
+ lba_list = emeta_to_lbas(pblk, emeta_buf);
if (!lba_list)
return 1;
@@ -149,7 +149,7 @@ static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
struct ppa_addr ppa;
int pos;
- ppa = addr_to_pblk_ppa(pblk, i, line->id);
+ ppa = addr_to_gen_ppa(pblk, i, line->id);
pos = pblk_ppa_to_pos(geo, ppa);
/* Do not update bad blocks */
@@ -188,7 +188,7 @@ static int pblk_calc_sec_in_line(struct pblk *pblk, struct pblk_line *line)
int nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);
return lm->sec_per_line - lm->smeta_sec - lm->emeta_sec[0] -
- nr_bb * geo->sec_per_blk;
+ nr_bb * geo->sec_per_chk;
}
struct pblk_recov_alloc {
@@ -263,12 +263,12 @@ next_read_rq:
int pos;
ppa = addr_to_gen_ppa(pblk, r_ptr_int, line->id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
while (test_bit(pos, line->blk_bitmap)) {
r_ptr_int += pblk->min_write_pgs;
ppa = addr_to_gen_ppa(pblk, r_ptr_int, line->id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
}
for (j = 0; j < pblk->min_write_pgs; j++, i++, r_ptr_int++)
@@ -288,7 +288,7 @@ next_read_rq:
/* At this point, the read should not fail. If it does, it is a problem
* we cannot recover from here. Need FTL log.
*/
- if (rqd->error) {
+ if (rqd->error && rqd->error != NVM_RSP_WARN_HIGHECC) {
pr_err("pblk: L2P recovery failed (%d)\n", rqd->error);
return -EINTR;
}
@@ -411,12 +411,12 @@ next_pad_rq:
int pos;
w_ptr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
- ppa = addr_to_pblk_ppa(pblk, w_ptr, line->id);
+ ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
pos = pblk_ppa_to_pos(geo, ppa);
while (test_bit(pos, line->blk_bitmap)) {
w_ptr += pblk->min_write_pgs;
- ppa = addr_to_pblk_ppa(pblk, w_ptr, line->id);
+ ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
pos = pblk_ppa_to_pos(geo, ppa);
}
@@ -541,12 +541,12 @@ next_rq:
w_ptr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
while (test_bit(pos, line->blk_bitmap)) {
w_ptr += pblk->min_write_pgs;
ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
}
for (j = 0; j < pblk->min_write_pgs; j++, i++, w_ptr++)
@@ -672,12 +672,12 @@ next_rq:
paddr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
ppa = addr_to_gen_ppa(pblk, paddr, line->id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
while (test_bit(pos, line->blk_bitmap)) {
paddr += pblk->min_write_pgs;
ppa = addr_to_gen_ppa(pblk, paddr, line->id);
- pos = pblk_dev_ppa_to_pos(geo, ppa);
+ pos = pblk_ppa_to_pos(geo, ppa);
}
for (j = 0; j < pblk->min_write_pgs; j++, i++, paddr++)
@@ -817,7 +817,7 @@ static u64 pblk_line_emeta_start(struct pblk *pblk, struct pblk_line *line)
while (emeta_secs) {
emeta_start--;
- ppa = addr_to_pblk_ppa(pblk, emeta_start, line->id);
+ ppa = addr_to_gen_ppa(pblk, emeta_start, line->id);
pos = pblk_ppa_to_pos(geo, ppa);
if (!test_bit(pos, line->blk_bitmap))
emeta_secs--;
@@ -938,6 +938,11 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
goto next;
}
+ if (pblk_recov_check_emeta(pblk, line->emeta->buf)) {
+ pblk_recov_l2p_from_oob(pblk, line);
+ goto next;
+ }
+
if (pblk_recov_l2p_from_emeta(pblk, line))
pblk_recov_l2p_from_oob(pblk, line);
@@ -984,10 +989,8 @@ next:
}
spin_unlock(&l_mg->free_lock);
- if (is_next) {
+ if (is_next)
pblk_line_erase(pblk, l_mg->data_next);
- pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
- }
out:
if (found_lines != recovered_lines)
diff --git a/drivers/lightnvm/pblk-rl.c b/drivers/lightnvm/pblk-rl.c
index dacc71922260..0d457b162f23 100644
--- a/drivers/lightnvm/pblk-rl.c
+++ b/drivers/lightnvm/pblk-rl.c
@@ -89,17 +89,15 @@ unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl)
return atomic_read(&rl->free_blocks);
}
-/*
- * We check for (i) the number of free blocks in the current LUN and (ii) the
- * total number of free blocks in the pblk instance. This is to even out the
- * number of free blocks on each LUN when GC kicks in.
- *
- * Only the total number of free blocks is used to configure the rate limiter.
- */
-void pblk_rl_update_rates(struct pblk_rl *rl)
+unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl)
+{
+ return atomic_read(&rl->free_user_blocks);
+}
+
+static void __pblk_rl_update_rates(struct pblk_rl *rl,
+ unsigned long free_blocks)
{
struct pblk *pblk = container_of(rl, struct pblk, rl);
- unsigned long free_blocks = pblk_rl_nr_free_blks(rl);
int max = rl->rb_budget;
if (free_blocks >= rl->high) {
@@ -132,20 +130,37 @@ void pblk_rl_update_rates(struct pblk_rl *rl)
pblk_gc_should_stop(pblk);
}
+void pblk_rl_update_rates(struct pblk_rl *rl)
+{
+ __pblk_rl_update_rates(rl, pblk_rl_nr_user_free_blks(rl));
+}
+
void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line)
{
int blk_in_line = atomic_read(&line->blk_in_line);
+ int free_blocks;
atomic_add(blk_in_line, &rl->free_blocks);
- pblk_rl_update_rates(rl);
+ free_blocks = atomic_add_return(blk_in_line, &rl->free_user_blocks);
+
+ __pblk_rl_update_rates(rl, free_blocks);
}
-void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line)
+void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
+ bool used)
{
int blk_in_line = atomic_read(&line->blk_in_line);
+ int free_blocks;
atomic_sub(blk_in_line, &rl->free_blocks);
- pblk_rl_update_rates(rl);
+
+ if (used)
+ free_blocks = atomic_sub_return(blk_in_line,
+ &rl->free_user_blocks);
+ else
+ free_blocks = atomic_read(&rl->free_user_blocks);
+
+ __pblk_rl_update_rates(rl, free_blocks);
}
int pblk_rl_high_thrs(struct pblk_rl *rl)
@@ -174,16 +189,21 @@ void pblk_rl_free(struct pblk_rl *rl)
void pblk_rl_init(struct pblk_rl *rl, int budget)
{
struct pblk *pblk = container_of(rl, struct pblk, rl);
+ struct nvm_tgt_dev *dev = pblk->dev;
+ struct nvm_geo *geo = &dev->geo;
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line_meta *lm = &pblk->lm;
int min_blocks = lm->blk_per_line * PBLK_GC_RSV_LINE;
+ int sec_meta, blk_meta;
+
unsigned int rb_windows;
- rl->high = rl->total_blocks / PBLK_USER_HIGH_THRS;
- rl->high_pw = get_count_order(rl->high);
+ /* Consider sectors used for metadata */
+ sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
+ blk_meta = DIV_ROUND_UP(sec_meta, geo->sec_per_chk);
- rl->low = rl->total_blocks / PBLK_USER_LOW_THRS;
- if (rl->low < min_blocks)
- rl->low = min_blocks;
+ rl->high = pblk->op_blks - blk_meta - lm->blk_per_line;
+ rl->high_pw = get_count_order(rl->high);
rl->rsv_blocks = min_blocks;
diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c
index cd49e8875d4e..620bab853579 100644
--- a/drivers/lightnvm/pblk-sysfs.c
+++ b/drivers/lightnvm/pblk-sysfs.c
@@ -28,7 +28,7 @@ static ssize_t pblk_sysfs_luns_show(struct pblk *pblk, char *page)
ssize_t sz = 0;
int i;
- for (i = 0; i < geo->nr_luns; i++) {
+ for (i = 0; i < geo->all_luns; i++) {
int active = 1;
rlun = &pblk->luns[i];
@@ -49,11 +49,12 @@ static ssize_t pblk_sysfs_luns_show(struct pblk *pblk, char *page)
static ssize_t pblk_sysfs_rate_limiter(struct pblk *pblk, char *page)
{
- int free_blocks, total_blocks;
+ int free_blocks, free_user_blocks, total_blocks;
int rb_user_max, rb_user_cnt;
int rb_gc_max, rb_gc_cnt, rb_budget, rb_state;
- free_blocks = atomic_read(&pblk->rl.free_blocks);
+ free_blocks = pblk_rl_nr_free_blks(&pblk->rl);
+ free_user_blocks = pblk_rl_nr_user_free_blks(&pblk->rl);
rb_user_max = pblk->rl.rb_user_max;
rb_user_cnt = atomic_read(&pblk->rl.rb_user_cnt);
rb_gc_max = pblk->rl.rb_gc_max;
@@ -64,16 +65,16 @@ static ssize_t pblk_sysfs_rate_limiter(struct pblk *pblk, char *page)
total_blocks = pblk->rl.total_blocks;
return snprintf(page, PAGE_SIZE,
- "u:%u/%u,gc:%u/%u(%u/%u)(stop:<%u,full:>%u,free:%d/%d)-%d\n",
+ "u:%u/%u,gc:%u/%u(%u)(stop:<%u,full:>%u,free:%d/%d/%d)-%d\n",
rb_user_cnt,
rb_user_max,
rb_gc_cnt,
rb_gc_max,
rb_state,
rb_budget,
- pblk->rl.low,
pblk->rl.high,
free_blocks,
+ free_user_blocks,
total_blocks,
READ_ONCE(pblk->rl.rb_user_active));
}
@@ -238,7 +239,7 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page)
sz = snprintf(page, PAGE_SIZE - sz,
"line: nluns:%d, nblks:%d, nsecs:%d\n",
- geo->nr_luns, lm->blk_per_line, lm->sec_per_line);
+ geo->all_luns, lm->blk_per_line, lm->sec_per_line);
sz += snprintf(page + sz, PAGE_SIZE - sz,
"lines:d:%d,l:%d-f:%d,m:%d/%d,c:%d,b:%d,co:%d(d:%d,l:%d)t:%d\n",
@@ -287,7 +288,7 @@ static ssize_t pblk_sysfs_lines_info(struct pblk *pblk, char *page)
"blk_line:%d, sec_line:%d, sec_blk:%d\n",
lm->blk_per_line,
lm->sec_per_line,
- geo->sec_per_blk);
+ geo->sec_per_chk);
return sz;
}
diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
index 6c1cafafef53..aae86ed60b98 100644
--- a/drivers/lightnvm/pblk-write.c
+++ b/drivers/lightnvm/pblk-write.c
@@ -21,13 +21,28 @@ static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
struct pblk_c_ctx *c_ctx)
{
struct bio *original_bio;
+ struct pblk_rb *rwb = &pblk->rwb;
unsigned long ret;
int i;
for (i = 0; i < c_ctx->nr_valid; i++) {
struct pblk_w_ctx *w_ctx;
+ int pos = c_ctx->sentry + i;
+ int flags;
+
+ w_ctx = pblk_rb_w_ctx(rwb, pos);
+ flags = READ_ONCE(w_ctx->flags);
+
+ if (flags & PBLK_FLUSH_ENTRY) {
+ flags &= ~PBLK_FLUSH_ENTRY;
+ /* Release flags on context. Protect from writes */
+ smp_store_release(&w_ctx->flags, flags);
+
+#ifdef CONFIG_NVM_DEBUG
+ atomic_dec(&rwb->inflight_flush_point);
+#endif
+ }
- w_ctx = pblk_rb_w_ctx(&pblk->rwb, c_ctx->sentry + i);
while ((original_bio = bio_list_pop(&w_ctx->bios)))
bio_endio(original_bio);
}
@@ -439,7 +454,7 @@ static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
struct pblk_line *meta_line;
int err;
- ppa_set_empty(&erase_ppa);
+ pblk_ppa_set_empty(&erase_ppa);
/* Assign lbas to ppas and populate request structure */
err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
@@ -457,7 +472,7 @@ static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
return NVM_IO_ERR;
}
- if (!ppa_empty(erase_ppa)) {
+ if (!pblk_ppa_empty(erase_ppa)) {
/* Submit erase for next data line */
if (pblk_blk_erase_async(pblk, erase_ppa)) {
struct pblk_line *e_line = pblk_line_get_erase(pblk);
@@ -508,7 +523,7 @@ static int pblk_submit_write(struct pblk *pblk)
if (!secs_avail)
return 1;
- secs_to_flush = pblk_rb_sync_point_count(&pblk->rwb);
+ secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb);
if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
return 1;
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index 59a64d461a5d..8c357fb6538e 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -51,17 +51,16 @@
#define NR_PHY_IN_LOG (PBLK_EXPOSED_PAGE_SIZE / PBLK_SECTOR)
-#define pblk_for_each_lun(pblk, rlun, i) \
- for ((i) = 0, rlun = &(pblk)->luns[0]; \
- (i) < (pblk)->nr_luns; (i)++, rlun = &(pblk)->luns[(i)])
-
/* Static pool sizes */
#define PBLK_GEN_WS_POOL_SIZE (2)
+#define PBLK_DEFAULT_OP (11)
+
enum {
PBLK_READ = READ,
PBLK_WRITE = WRITE,/* Write from write buffer */
PBLK_WRITE_INT, /* Internal write - no write buffer */
+ PBLK_READ_RECOV, /* Recovery read - errors allowed */
PBLK_ERASE,
};
@@ -114,6 +113,7 @@ struct pblk_c_ctx {
/* read context */
struct pblk_g_ctx {
void *private;
+ unsigned long start_time;
u64 lba;
};
@@ -170,7 +170,7 @@ struct pblk_rb {
* the last submitted entry that has
* been successfully persisted to media
*/
- unsigned int sync_point; /* Sync point - last entry that must be
+ unsigned int flush_point; /* Sync point - last entry that must be
* flushed to the media. Used with
* REQ_FLUSH and REQ_FUA
*/
@@ -193,7 +193,7 @@ struct pblk_rb {
spinlock_t s_lock; /* Sync lock */
#ifdef CONFIG_NVM_DEBUG
- atomic_t inflight_sync_point; /* Not served REQ_FLUSH | REQ_FUA */
+ atomic_t inflight_flush_point; /* Not served REQ_FLUSH | REQ_FUA */
#endif
};
@@ -256,9 +256,6 @@ struct pblk_rl {
unsigned int high; /* Upper threshold for rate limiter (free run -
* user I/O rate limiter
*/
- unsigned int low; /* Lower threshold for rate limiter (user I/O
- * rate limiter - stall)
- */
unsigned int high_pw; /* High rounded up as a power of 2 */
#define PBLK_USER_HIGH_THRS 8 /* Begin write limit at 12% available blks */
@@ -292,7 +289,9 @@ struct pblk_rl {
unsigned long long nr_secs;
unsigned long total_blocks;
- atomic_t free_blocks;
+
+ atomic_t free_blocks; /* Total number of free blocks (+ OP) */
+ atomic_t free_user_blocks; /* Number of user free blocks (no OP) */
};
#define PBLK_LINE_EMPTY (~0U)
@@ -583,7 +582,9 @@ struct pblk {
*/
sector_t capacity; /* Device capacity when bad blocks are subtracted */
- int over_pct; /* Percentage of device used for over-provisioning */
+
+ int op; /* Percentage of device used for over-provisioning */
+ int op_blks; /* Number of blocks used for over-provisioning */
/* pblk provisioning values. Used by rate limiter */
struct pblk_rl rl;
@@ -691,7 +692,7 @@ unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries);
struct pblk_rb_entry *pblk_rb_sync_scan_entry(struct pblk_rb *rb,
struct ppa_addr *ppa);
void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags);
-unsigned int pblk_rb_sync_point_count(struct pblk_rb *rb);
+unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb);
unsigned int pblk_rb_read_count(struct pblk_rb *rb);
unsigned int pblk_rb_sync_count(struct pblk_rb *rb);
@@ -812,7 +813,7 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
void pblk_submit_rec(struct work_struct *work);
struct pblk_line *pblk_recov_l2p(struct pblk *pblk);
int pblk_recov_pad(struct pblk *pblk);
-__le64 *pblk_recov_get_lba_list(struct pblk *pblk, struct line_emeta *emeta);
+int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta);
int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
struct pblk_rec_ctx *recovery, u64 *comp_bits,
unsigned int comp);
@@ -843,6 +844,7 @@ void pblk_rl_free(struct pblk_rl *rl);
void pblk_rl_update_rates(struct pblk_rl *rl);
int pblk_rl_high_thrs(struct pblk_rl *rl);
unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl);
+unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl);
int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries);
void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries);
void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries);
@@ -851,7 +853,8 @@ void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries);
void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc);
int pblk_rl_max_io(struct pblk_rl *rl);
void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line);
-void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line);
+void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
+ bool used);
int pblk_rl_is_limit(struct pblk_rl *rl);
/*
@@ -907,28 +910,47 @@ static inline int pblk_pad_distance(struct pblk *pblk)
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
- return NVM_MEM_PAGE_WRITE * geo->nr_luns * geo->sec_per_pl;
+ return NVM_MEM_PAGE_WRITE * geo->all_luns * geo->sec_per_pl;
}
-static inline int pblk_dev_ppa_to_line(struct ppa_addr p)
+static inline int pblk_ppa_to_line(struct ppa_addr p)
{
return p.g.blk;
}
-static inline int pblk_tgt_ppa_to_line(struct ppa_addr p)
+static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
{
- return p.g.blk;
+ return p.g.lun * geo->nr_chnls + p.g.ch;
}
-static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
+static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr,
+ u64 line_id)
{
- return p.g.lun * geo->nr_chnls + p.g.ch;
+ struct ppa_addr ppa;
+
+ ppa.ppa = 0;
+ ppa.g.blk = line_id;
+ ppa.g.pg = (paddr & pblk->ppaf.pg_mask) >> pblk->ppaf.pg_offset;
+ ppa.g.lun = (paddr & pblk->ppaf.lun_mask) >> pblk->ppaf.lun_offset;
+ ppa.g.ch = (paddr & pblk->ppaf.ch_mask) >> pblk->ppaf.ch_offset;
+ ppa.g.pl = (paddr & pblk->ppaf.pln_mask) >> pblk->ppaf.pln_offset;
+ ppa.g.sec = (paddr & pblk->ppaf.sec_mask) >> pblk->ppaf.sec_offset;
+
+ return ppa;
}
-/* A block within a line corresponds to the lun */
-static inline int pblk_dev_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
+static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk,
+ struct ppa_addr p)
{
- return p.g.lun * geo->nr_chnls + p.g.ch;
+ u64 paddr;
+
+ paddr = (u64)p.g.pg << pblk->ppaf.pg_offset;
+ paddr |= (u64)p.g.lun << pblk->ppaf.lun_offset;
+ paddr |= (u64)p.g.ch << pblk->ppaf.ch_offset;
+ paddr |= (u64)p.g.pl << pblk->ppaf.pln_offset;
+ paddr |= (u64)p.g.sec << pblk->ppaf.sec_offset;
+
+ return paddr;
}
static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32)
@@ -960,24 +982,6 @@ static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32)
return ppa64;
}
-static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk,
- sector_t lba)
-{
- struct ppa_addr ppa;
-
- if (pblk->ppaf_bitsize < 32) {
- u32 *map = (u32 *)pblk->trans_map;
-
- ppa = pblk_ppa32_to_ppa64(pblk, map[lba]);
- } else {
- struct ppa_addr *map = (struct ppa_addr *)pblk->trans_map;
-
- ppa = map[lba];
- }
-
- return ppa;
-}
-
static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64)
{
u32 ppa32 = 0;
@@ -999,33 +1003,36 @@ static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64)
return ppa32;
}
-static inline void pblk_trans_map_set(struct pblk *pblk, sector_t lba,
- struct ppa_addr ppa)
+static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk,
+ sector_t lba)
{
+ struct ppa_addr ppa;
+
if (pblk->ppaf_bitsize < 32) {
u32 *map = (u32 *)pblk->trans_map;
- map[lba] = pblk_ppa64_to_ppa32(pblk, ppa);
+ ppa = pblk_ppa32_to_ppa64(pblk, map[lba]);
} else {
- u64 *map = (u64 *)pblk->trans_map;
+ struct ppa_addr *map = (struct ppa_addr *)pblk->trans_map;
- map[lba] = ppa.ppa;
+ ppa = map[lba];
}
+
+ return ppa;
}
-static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk,
- struct ppa_addr p)
+static inline void pblk_trans_map_set(struct pblk *pblk, sector_t lba,
+ struct ppa_addr ppa)
{
- u64 paddr;
+ if (pblk->ppaf_bitsize < 32) {
+ u32 *map = (u32 *)pblk->trans_map;
- paddr = 0;
- paddr |= (u64)p.g.pg << pblk->ppaf.pg_offset;
- paddr |= (u64)p.g.lun << pblk->ppaf.lun_offset;
- paddr |= (u64)p.g.ch << pblk->ppaf.ch_offset;
- paddr |= (u64)p.g.pl << pblk->ppaf.pln_offset;
- paddr |= (u64)p.g.sec << pblk->ppaf.sec_offset;
+ map[lba] = pblk_ppa64_to_ppa32(pblk, ppa);
+ } else {
+ u64 *map = (u64 *)pblk->trans_map;
- return paddr;
+ map[lba] = ppa.ppa;
+ }
}
static inline int pblk_ppa_empty(struct ppa_addr ppa_addr)
@@ -1040,10 +1047,7 @@ static inline void pblk_ppa_set_empty(struct ppa_addr *ppa_addr)
static inline bool pblk_ppa_comp(struct ppa_addr lppa, struct ppa_addr rppa)
{
- if (lppa.ppa == rppa.ppa)
- return true;
-
- return false;
+ return (lppa.ppa == rppa.ppa);
}
static inline int pblk_addr_in_cache(struct ppa_addr ppa)
@@ -1066,32 +1070,6 @@ static inline struct ppa_addr pblk_cacheline_to_addr(int addr)
return p;
}
-static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr,
- u64 line_id)
-{
- struct ppa_addr ppa;
-
- ppa.ppa = 0;
- ppa.g.blk = line_id;
- ppa.g.pg = (paddr & pblk->ppaf.pg_mask) >> pblk->ppaf.pg_offset;
- ppa.g.lun = (paddr & pblk->ppaf.lun_mask) >> pblk->ppaf.lun_offset;
- ppa.g.ch = (paddr & pblk->ppaf.ch_mask) >> pblk->ppaf.ch_offset;
- ppa.g.pl = (paddr & pblk->ppaf.pln_mask) >> pblk->ppaf.pln_offset;
- ppa.g.sec = (paddr & pblk->ppaf.sec_mask) >> pblk->ppaf.sec_offset;
-
- return ppa;
-}
-
-static inline struct ppa_addr addr_to_pblk_ppa(struct pblk *pblk, u64 paddr,
- u64 line_id)
-{
- struct ppa_addr ppa;
-
- ppa = addr_to_gen_ppa(pblk, paddr, line_id);
-
- return ppa;
-}
-
static inline u32 pblk_calc_meta_header_crc(struct pblk *pblk,
struct line_header *header)
{
@@ -1212,10 +1190,10 @@ static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
if (!ppa->c.is_cached &&
ppa->g.ch < geo->nr_chnls &&
- ppa->g.lun < geo->luns_per_chnl &&
+ ppa->g.lun < geo->nr_luns &&
ppa->g.pl < geo->nr_planes &&
- ppa->g.blk < geo->blks_per_lun &&
- ppa->g.pg < geo->pgs_per_blk &&
+ ppa->g.blk < geo->nr_chks &&
+ ppa->g.pg < geo->ws_per_chk &&
ppa->g.sec < geo->sec_per_pg)
continue;
@@ -1245,7 +1223,7 @@ static inline int pblk_check_io(struct pblk *pblk, struct nvm_rq *rqd)
for (i = 0; i < rqd->nr_ppas; i++) {
ppa = ppa_list[i];
- line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
+ line = &pblk->lines[pblk_ppa_to_line(ppa)];
spin_lock(&line->lock);
if (line->state != PBLK_LINESTATE_OPEN) {
@@ -1288,11 +1266,6 @@ static inline unsigned int pblk_get_secs(struct bio *bio)
return bio->bi_iter.bi_size / PBLK_EXPOSED_PAGE_SIZE;
}
-static inline sector_t pblk_get_sector(sector_t lba)
-{
- return lba * NR_PHY_IN_LOG;
-}
-
static inline void pblk_setup_uuid(struct pblk *pblk)
{
uuid_le uuid;
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
deleted file mode 100644
index 0993c14be860..000000000000
--- a/drivers/lightnvm/rrpc.c
+++ /dev/null
@@ -1,1625 +0,0 @@
-/*
- * Copyright (C) 2015 IT University of Copenhagen
- * Initial release: Matias Bjorling <m@bjorling.me>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
- */
-
-#include "rrpc.h"
-
-static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache;
-static DECLARE_RWSEM(rrpc_lock);
-
-static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags);
-
-#define rrpc_for_each_lun(rrpc, rlun, i) \
- for ((i) = 0, rlun = &(rrpc)->luns[0]; \
- (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
-
-static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_block *rblk = a->rblk;
- unsigned int pg_offset;
-
- lockdep_assert_held(&rrpc->rev_lock);
-
- if (a->addr == ADDR_EMPTY || !rblk)
- return;
-
- spin_lock(&rblk->lock);
-
- div_u64_rem(a->addr, dev->geo.sec_per_blk, &pg_offset);
- WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
- rblk->nr_invalid_pages++;
-
- spin_unlock(&rblk->lock);
-
- rrpc->rev_trans_map[a->addr].addr = ADDR_EMPTY;
-}
-
-static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
- unsigned int len)
-{
- sector_t i;
-
- spin_lock(&rrpc->rev_lock);
- for (i = slba; i < slba + len; i++) {
- struct rrpc_addr *gp = &rrpc->trans_map[i];
-
- rrpc_page_invalidate(rrpc, gp);
- gp->rblk = NULL;
- }
- spin_unlock(&rrpc->rev_lock);
-}
-
-static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc,
- sector_t laddr, unsigned int pages)
-{
- struct nvm_rq *rqd;
- struct rrpc_inflight_rq *inf;
-
- rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC);
- if (!rqd)
- return ERR_PTR(-ENOMEM);
-
- inf = rrpc_get_inflight_rq(rqd);
- if (rrpc_lock_laddr(rrpc, laddr, pages, inf)) {
- mempool_free(rqd, rrpc->rq_pool);
- return NULL;
- }
-
- return rqd;
-}
-
-static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd)
-{
- struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd);
-
- rrpc_unlock_laddr(rrpc, inf);
-
- mempool_free(rqd, rrpc->rq_pool);
-}
-
-static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
-{
- sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
- sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
- struct nvm_rq *rqd;
-
- while (1) {
- rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len);
- if (rqd)
- break;
-
- schedule();
- }
-
- if (IS_ERR(rqd)) {
- pr_err("rrpc: unable to acquire inflight IO\n");
- bio_io_error(bio);
- return;
- }
-
- rrpc_invalidate_range(rrpc, slba, len);
- rrpc_inflight_laddr_release(rrpc, rqd);
-}
-
-static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
-
- return (rblk->next_page == dev->geo.sec_per_blk);
-}
-
-/* Calculate relative addr for the given block, considering instantiated LUNs */
-static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_lun *rlun = rblk->rlun;
-
- return rlun->id * dev->geo.sec_per_blk;
-}
-
-static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_tgt_dev *dev,
- struct rrpc_addr *gp)
-{
- struct rrpc_block *rblk = gp->rblk;
- struct rrpc_lun *rlun = rblk->rlun;
- u64 addr = gp->addr;
- struct ppa_addr paddr;
-
- paddr.ppa = addr;
- paddr = rrpc_linear_to_generic_addr(&dev->geo, paddr);
- paddr.g.ch = rlun->bppa.g.ch;
- paddr.g.lun = rlun->bppa.g.lun;
- paddr.g.blk = rblk->id;
-
- return paddr;
-}
-
-/* requires lun->lock taken */
-static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk,
- struct rrpc_block **cur_rblk)
-{
- struct rrpc *rrpc = rlun->rrpc;
-
- if (*cur_rblk) {
- spin_lock(&(*cur_rblk)->lock);
- WARN_ON(!block_is_full(rrpc, *cur_rblk));
- spin_unlock(&(*cur_rblk)->lock);
- }
- *cur_rblk = new_rblk;
-}
-
-static struct rrpc_block *__rrpc_get_blk(struct rrpc *rrpc,
- struct rrpc_lun *rlun)
-{
- struct rrpc_block *rblk = NULL;
-
- if (list_empty(&rlun->free_list))
- goto out;
-
- rblk = list_first_entry(&rlun->free_list, struct rrpc_block, list);
-
- list_move_tail(&rblk->list, &rlun->used_list);
- rblk->state = NVM_BLK_ST_TGT;
- rlun->nr_free_blocks--;
-
-out:
- return rblk;
-}
-
-static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
- unsigned long flags)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_block *rblk;
- int is_gc = flags & NVM_IOTYPE_GC;
-
- spin_lock(&rlun->lock);
- if (!is_gc && rlun->nr_free_blocks < rlun->reserved_blocks) {
- pr_err("nvm: rrpc: cannot give block to non GC request\n");
- spin_unlock(&rlun->lock);
- return NULL;
- }
-
- rblk = __rrpc_get_blk(rrpc, rlun);
- if (!rblk) {
- pr_err("nvm: rrpc: cannot get new block\n");
- spin_unlock(&rlun->lock);
- return NULL;
- }
- spin_unlock(&rlun->lock);
-
- bitmap_zero(rblk->invalid_pages, dev->geo.sec_per_blk);
- rblk->next_page = 0;
- rblk->nr_invalid_pages = 0;
- atomic_set(&rblk->data_cmnt_size, 0);
-
- return rblk;
-}
-
-static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct rrpc_lun *rlun = rblk->rlun;
-
- spin_lock(&rlun->lock);
- if (rblk->state & NVM_BLK_ST_TGT) {
- list_move_tail(&rblk->list, &rlun->free_list);
- rlun->nr_free_blocks++;
- rblk->state = NVM_BLK_ST_FREE;
- } else if (rblk->state & NVM_BLK_ST_BAD) {
- list_move_tail(&rblk->list, &rlun->bb_list);
- rblk->state = NVM_BLK_ST_BAD;
- } else {
- WARN_ON_ONCE(1);
- pr_err("rrpc: erroneous type (ch:%d,lun:%d,blk%d-> %u)\n",
- rlun->bppa.g.ch, rlun->bppa.g.lun,
- rblk->id, rblk->state);
- list_move_tail(&rblk->list, &rlun->bb_list);
- }
- spin_unlock(&rlun->lock);
-}
-
-static void rrpc_put_blks(struct rrpc *rrpc)
-{
- struct rrpc_lun *rlun;
- int i;
-
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
- if (rlun->cur)
- rrpc_put_blk(rrpc, rlun->cur);
- if (rlun->gc_cur)
- rrpc_put_blk(rrpc, rlun->gc_cur);
- }
-}
-
-static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
-{
- int next = atomic_inc_return(&rrpc->next_lun);
-
- return &rrpc->luns[next % rrpc->nr_luns];
-}
-
-static void rrpc_gc_kick(struct rrpc *rrpc)
-{
- struct rrpc_lun *rlun;
- unsigned int i;
-
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
- queue_work(rrpc->krqd_wq, &rlun->ws_gc);
- }
-}
-
-/*
- * timed GC every interval.
- */
-static void rrpc_gc_timer(struct timer_list *t)
-{
- struct rrpc *rrpc = from_timer(rrpc, t, gc_timer);
-
- rrpc_gc_kick(rrpc);
- mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
-}
-
-static void rrpc_end_sync_bio(struct bio *bio)
-{
- struct completion *waiting = bio->bi_private;
-
- if (bio->bi_status)
- pr_err("nvm: gc request failed (%u).\n", bio->bi_status);
-
- complete(waiting);
-}
-
-/*
- * rrpc_move_valid_pages -- migrate live data off the block
- * @rrpc: the 'rrpc' structure
- * @block: the block from which to migrate live pages
- *
- * Description:
- * GC algorithms may call this function to migrate remaining live
- * pages off the block prior to erasing it. This function blocks
- * further execution until the operation is complete.
- */
-static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct request_queue *q = dev->q;
- struct rrpc_rev_addr *rev;
- struct nvm_rq *rqd;
- struct bio *bio;
- struct page *page;
- int slot;
- int nr_sec_per_blk = dev->geo.sec_per_blk;
- u64 phys_addr;
- DECLARE_COMPLETION_ONSTACK(wait);
-
- if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk))
- return 0;
-
- bio = bio_alloc(GFP_NOIO, 1);
- if (!bio) {
- pr_err("nvm: could not alloc bio to gc\n");
- return -ENOMEM;
- }
-
- page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
-
- while ((slot = find_first_zero_bit(rblk->invalid_pages,
- nr_sec_per_blk)) < nr_sec_per_blk) {
-
- /* Lock laddr */
- phys_addr = rrpc_blk_to_ppa(rrpc, rblk) + slot;
-
-try:
- spin_lock(&rrpc->rev_lock);
- /* Get logical address from physical to logical table */
- rev = &rrpc->rev_trans_map[phys_addr];
- /* already updated by previous regular write */
- if (rev->addr == ADDR_EMPTY) {
- spin_unlock(&rrpc->rev_lock);
- continue;
- }
-
- rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
- if (IS_ERR_OR_NULL(rqd)) {
- spin_unlock(&rrpc->rev_lock);
- schedule();
- goto try;
- }
-
- spin_unlock(&rrpc->rev_lock);
-
- /* Perform read to do GC */
- bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
- bio->bi_private = &wait;
- bio->bi_end_io = rrpc_end_sync_bio;
-
- /* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */
- bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
-
- if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
- pr_err("rrpc: gc read failed.\n");
- rrpc_inflight_laddr_release(rrpc, rqd);
- goto finished;
- }
- wait_for_completion_io(&wait);
- if (bio->bi_status) {
- rrpc_inflight_laddr_release(rrpc, rqd);
- goto finished;
- }
-
- bio_reset(bio);
- reinit_completion(&wait);
-
- bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
- bio->bi_private = &wait;
- bio->bi_end_io = rrpc_end_sync_bio;
-
- bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
-
- /* turn the command around and write the data back to a new
- * address
- */
- if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
- pr_err("rrpc: gc write failed.\n");
- rrpc_inflight_laddr_release(rrpc, rqd);
- goto finished;
- }
- wait_for_completion_io(&wait);
-
- rrpc_inflight_laddr_release(rrpc, rqd);
- if (bio->bi_status)
- goto finished;
-
- bio_reset(bio);
- }
-
-finished:
- mempool_free(page, rrpc->page_pool);
- bio_put(bio);
-
- if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) {
- pr_err("nvm: failed to garbage collect block\n");
- return -EIO;
- }
-
- return 0;
-}
-
-static void rrpc_block_gc(struct work_struct *work)
-{
- struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
- ws_gc);
- struct rrpc *rrpc = gcb->rrpc;
- struct rrpc_block *rblk = gcb->rblk;
- struct rrpc_lun *rlun = rblk->rlun;
- struct ppa_addr ppa;
-
- mempool_free(gcb, rrpc->gcb_pool);
- pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' being reclaimed\n",
- rlun->bppa.g.ch, rlun->bppa.g.lun,
- rblk->id);
-
- if (rrpc_move_valid_pages(rrpc, rblk))
- goto put_back;
-
- ppa.ppa = 0;
- ppa.g.ch = rlun->bppa.g.ch;
- ppa.g.lun = rlun->bppa.g.lun;
- ppa.g.blk = rblk->id;
-
- if (nvm_erase_sync(rrpc->dev, &ppa, 1))
- goto put_back;
-
- rrpc_put_blk(rrpc, rblk);
-
- return;
-
-put_back:
- spin_lock(&rlun->lock);
- list_add_tail(&rblk->prio, &rlun->prio_list);
- spin_unlock(&rlun->lock);
-}
-
-/* the block with highest number of invalid pages, will be in the beginning
- * of the list
- */
-static struct rrpc_block *rblk_max_invalid(struct rrpc_block *ra,
- struct rrpc_block *rb)
-{
- if (ra->nr_invalid_pages == rb->nr_invalid_pages)
- return ra;
-
- return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra;
-}
-
-/* linearly find the block with highest number of invalid pages
- * requires lun->lock
- */
-static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
-{
- struct list_head *prio_list = &rlun->prio_list;
- struct rrpc_block *rblk, *max;
-
- BUG_ON(list_empty(prio_list));
-
- max = list_first_entry(prio_list, struct rrpc_block, prio);
- list_for_each_entry(rblk, prio_list, prio)
- max = rblk_max_invalid(max, rblk);
-
- return max;
-}
-
-static void rrpc_lun_gc(struct work_struct *work)
-{
- struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
- struct rrpc *rrpc = rlun->rrpc;
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_block_gc *gcb;
- unsigned int nr_blocks_need;
-
- nr_blocks_need = dev->geo.blks_per_lun / GC_LIMIT_INVERSE;
-
- if (nr_blocks_need < rrpc->nr_luns)
- nr_blocks_need = rrpc->nr_luns;
-
- spin_lock(&rlun->lock);
- while (nr_blocks_need > rlun->nr_free_blocks &&
- !list_empty(&rlun->prio_list)) {
- struct rrpc_block *rblk = block_prio_find_max(rlun);
-
- if (!rblk->nr_invalid_pages)
- break;
-
- gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
- if (!gcb)
- break;
-
- list_del_init(&rblk->prio);
-
- WARN_ON(!block_is_full(rrpc, rblk));
-
- pr_debug("rrpc: selected block 'ch:%d,lun:%d,blk:%d' for GC\n",
- rlun->bppa.g.ch, rlun->bppa.g.lun,
- rblk->id);
-
- gcb->rrpc = rrpc;
- gcb->rblk = rblk;
- INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
-
- queue_work(rrpc->kgc_wq, &gcb->ws_gc);
-
- nr_blocks_need--;
- }
- spin_unlock(&rlun->lock);
-
- /* TODO: Hint that request queue can be started again */
-}
-
-static void rrpc_gc_queue(struct work_struct *work)
-{
- struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
- ws_gc);
- struct rrpc *rrpc = gcb->rrpc;
- struct rrpc_block *rblk = gcb->rblk;
- struct rrpc_lun *rlun = rblk->rlun;
-
- spin_lock(&rlun->lock);
- list_add_tail(&rblk->prio, &rlun->prio_list);
- spin_unlock(&rlun->lock);
-
- mempool_free(gcb, rrpc->gcb_pool);
- pr_debug("nvm: block 'ch:%d,lun:%d,blk:%d' full, allow GC (sched)\n",
- rlun->bppa.g.ch, rlun->bppa.g.lun,
- rblk->id);
-}
-
-static const struct block_device_operations rrpc_fops = {
- .owner = THIS_MODULE,
-};
-
-static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
-{
- unsigned int i;
- struct rrpc_lun *rlun, *max_free;
-
- if (!is_gc)
- return get_next_lun(rrpc);
-
- /* during GC, we don't care about RR, instead we want to make
- * sure that we maintain evenness between the block luns.
- */
- max_free = &rrpc->luns[0];
- /* prevent GC-ing lun from devouring pages of a lun with
- * little free blocks. We don't take the lock as we only need an
- * estimate.
- */
- rrpc_for_each_lun(rrpc, rlun, i) {
- if (rlun->nr_free_blocks > max_free->nr_free_blocks)
- max_free = rlun;
- }
-
- return max_free;
-}
-
-static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
- struct rrpc_block *rblk, u64 paddr)
-{
- struct rrpc_addr *gp;
- struct rrpc_rev_addr *rev;
-
- BUG_ON(laddr >= rrpc->nr_sects);
-
- gp = &rrpc->trans_map[laddr];
- spin_lock(&rrpc->rev_lock);
- if (gp->rblk)
- rrpc_page_invalidate(rrpc, gp);
-
- gp->addr = paddr;
- gp->rblk = rblk;
-
- rev = &rrpc->rev_trans_map[gp->addr];
- rev->addr = laddr;
- spin_unlock(&rrpc->rev_lock);
-
- return gp;
-}
-
-static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- u64 addr = ADDR_EMPTY;
-
- spin_lock(&rblk->lock);
- if (block_is_full(rrpc, rblk))
- goto out;
-
- addr = rblk->next_page;
-
- rblk->next_page++;
-out:
- spin_unlock(&rblk->lock);
- return addr;
-}
-
-/* Map logical address to a physical page. The mapping implements a round robin
- * approach and allocates a page from the next lun available.
- *
- * Returns rrpc_addr with the physical address and block. Returns NULL if no
- * blocks in the next rlun are available.
- */
-static struct ppa_addr rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
- int is_gc)
-{
- struct nvm_tgt_dev *tgt_dev = rrpc->dev;
- struct rrpc_lun *rlun;
- struct rrpc_block *rblk, **cur_rblk;
- struct rrpc_addr *p;
- struct ppa_addr ppa;
- u64 paddr;
- int gc_force = 0;
-
- ppa.ppa = ADDR_EMPTY;
- rlun = rrpc_get_lun_rr(rrpc, is_gc);
-
- if (!is_gc && rlun->nr_free_blocks < rrpc->nr_luns * 4)
- return ppa;
-
- /*
- * page allocation steps:
- * 1. Try to allocate new page from current rblk
- * 2a. If succeed, proceed to map it in and return
- * 2b. If fail, first try to allocate a new block from media manger,
- * and then retry step 1. Retry until the normal block pool is
- * exhausted.
- * 3. If exhausted, and garbage collector is requesting the block,
- * go to the reserved block and retry step 1.
- * In the case that this fails as well, or it is not GC
- * requesting, report not able to retrieve a block and let the
- * caller handle further processing.
- */
-
- spin_lock(&rlun->lock);
- cur_rblk = &rlun->cur;
- rblk = rlun->cur;
-retry:
- paddr = rrpc_alloc_addr(rrpc, rblk);
-
- if (paddr != ADDR_EMPTY)
- goto done;
-
- if (!list_empty(&rlun->wblk_list)) {
-new_blk:
- rblk = list_first_entry(&rlun->wblk_list, struct rrpc_block,
- prio);
- rrpc_set_lun_cur(rlun, rblk, cur_rblk);
- list_del(&rblk->prio);
- goto retry;
- }
- spin_unlock(&rlun->lock);
-
- rblk = rrpc_get_blk(rrpc, rlun, gc_force);
- if (rblk) {
- spin_lock(&rlun->lock);
- list_add_tail(&rblk->prio, &rlun->wblk_list);
- /*
- * another thread might already have added a new block,
- * Therefore, make sure that one is used, instead of the
- * one just added.
- */
- goto new_blk;
- }
-
- if (unlikely(is_gc) && !gc_force) {
- /* retry from emergency gc block */
- cur_rblk = &rlun->gc_cur;
- rblk = rlun->gc_cur;
- gc_force = 1;
- spin_lock(&rlun->lock);
- goto retry;
- }
-
- pr_err("rrpc: failed to allocate new block\n");
- return ppa;
-done:
- spin_unlock(&rlun->lock);
- p = rrpc_update_map(rrpc, laddr, rblk, paddr);
- if (!p)
- return ppa;
-
- /* return global address */
- return rrpc_ppa_to_gaddr(tgt_dev, p);
-}
-
-static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct rrpc_block_gc *gcb;
-
- gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
- if (!gcb) {
- pr_err("rrpc: unable to queue block for gc.");
- return;
- }
-
- gcb->rrpc = rrpc;
- gcb->rblk = rblk;
-
- INIT_WORK(&gcb->ws_gc, rrpc_gc_queue);
- queue_work(rrpc->kgc_wq, &gcb->ws_gc);
-}
-
-static struct rrpc_lun *rrpc_ppa_to_lun(struct rrpc *rrpc, struct ppa_addr p)
-{
- struct rrpc_lun *rlun = NULL;
- int i;
-
- for (i = 0; i < rrpc->nr_luns; i++) {
- if (rrpc->luns[i].bppa.g.ch == p.g.ch &&
- rrpc->luns[i].bppa.g.lun == p.g.lun) {
- rlun = &rrpc->luns[i];
- break;
- }
- }
-
- return rlun;
-}
-
-static void __rrpc_mark_bad_block(struct rrpc *rrpc, struct ppa_addr ppa)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_lun *rlun;
- struct rrpc_block *rblk;
-
- rlun = rrpc_ppa_to_lun(rrpc, ppa);
- rblk = &rlun->blocks[ppa.g.blk];
- rblk->state = NVM_BLK_ST_BAD;
-
- nvm_set_tgt_bb_tbl(dev, &ppa, 1, NVM_BLK_T_GRWN_BAD);
-}
-
-static void rrpc_mark_bad_block(struct rrpc *rrpc, struct nvm_rq *rqd)
-{
- void *comp_bits = &rqd->ppa_status;
- struct ppa_addr ppa, prev_ppa;
- int nr_ppas = rqd->nr_ppas;
- int bit;
-
- if (rqd->nr_ppas == 1)
- __rrpc_mark_bad_block(rrpc, rqd->ppa_addr);
-
- ppa_set_empty(&prev_ppa);
- bit = -1;
- while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
- ppa = rqd->ppa_list[bit];
- if (ppa_cmp_blk(ppa, prev_ppa))
- continue;
-
- __rrpc_mark_bad_block(rrpc, ppa);
- }
-}
-
-static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
- sector_t laddr, uint8_t npages)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_addr *p;
- struct rrpc_block *rblk;
- int cmnt_size, i;
-
- for (i = 0; i < npages; i++) {
- p = &rrpc->trans_map[laddr + i];
- rblk = p->rblk;
-
- cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
- if (unlikely(cmnt_size == dev->geo.sec_per_blk))
- rrpc_run_gc(rrpc, rblk);
- }
-}
-
-static void rrpc_end_io(struct nvm_rq *rqd)
-{
- struct rrpc *rrpc = rqd->private;
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
- uint8_t npages = rqd->nr_ppas;
- sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
-
- if (bio_data_dir(rqd->bio) == WRITE) {
- if (rqd->error == NVM_RSP_ERR_FAILWRITE)
- rrpc_mark_bad_block(rrpc, rqd);
-
- rrpc_end_io_write(rrpc, rrqd, laddr, npages);
- }
-
- bio_put(rqd->bio);
-
- if (rrqd->flags & NVM_IOTYPE_GC)
- return;
-
- rrpc_unlock_rq(rrpc, rqd);
-
- if (npages > 1)
- nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
-
- mempool_free(rqd, rrpc->rq_pool);
-}
-
-static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags, int npages)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
- struct rrpc_addr *gp;
- sector_t laddr = rrpc_get_laddr(bio);
- int is_gc = flags & NVM_IOTYPE_GC;
- int i;
-
- if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
- nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
- return NVM_IO_REQUEUE;
- }
-
- for (i = 0; i < npages; i++) {
- /* We assume that mapping occurs at 4KB granularity */
- BUG_ON(!(laddr + i < rrpc->nr_sects));
- gp = &rrpc->trans_map[laddr + i];
-
- if (gp->rblk) {
- rqd->ppa_list[i] = rrpc_ppa_to_gaddr(dev, gp);
- } else {
- BUG_ON(is_gc);
- rrpc_unlock_laddr(rrpc, r);
- nvm_dev_dma_free(dev->parent, rqd->ppa_list,
- rqd->dma_ppa_list);
- return NVM_IO_DONE;
- }
- }
-
- rqd->opcode = NVM_OP_HBREAD;
-
- return NVM_IO_OK;
-}
-
-static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
- unsigned long flags)
-{
- int is_gc = flags & NVM_IOTYPE_GC;
- sector_t laddr = rrpc_get_laddr(bio);
- struct rrpc_addr *gp;
-
- if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
- return NVM_IO_REQUEUE;
-
- BUG_ON(!(laddr < rrpc->nr_sects));
- gp = &rrpc->trans_map[laddr];
-
- if (gp->rblk) {
- rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp);
- } else {
- BUG_ON(is_gc);
- rrpc_unlock_rq(rrpc, rqd);
- return NVM_IO_DONE;
- }
-
- rqd->opcode = NVM_OP_HBREAD;
-
- return NVM_IO_OK;
-}
-
-static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags, int npages)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
- struct ppa_addr p;
- sector_t laddr = rrpc_get_laddr(bio);
- int is_gc = flags & NVM_IOTYPE_GC;
- int i;
-
- if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
- nvm_dev_dma_free(dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
- return NVM_IO_REQUEUE;
- }
-
- for (i = 0; i < npages; i++) {
- /* We assume that mapping occurs at 4KB granularity */
- p = rrpc_map_page(rrpc, laddr + i, is_gc);
- if (p.ppa == ADDR_EMPTY) {
- BUG_ON(is_gc);
- rrpc_unlock_laddr(rrpc, r);
- nvm_dev_dma_free(dev->parent, rqd->ppa_list,
- rqd->dma_ppa_list);
- rrpc_gc_kick(rrpc);
- return NVM_IO_REQUEUE;
- }
-
- rqd->ppa_list[i] = p;
- }
-
- rqd->opcode = NVM_OP_HBWRITE;
-
- return NVM_IO_OK;
-}
-
-static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags)
-{
- struct ppa_addr p;
- int is_gc = flags & NVM_IOTYPE_GC;
- sector_t laddr = rrpc_get_laddr(bio);
-
- if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
- return NVM_IO_REQUEUE;
-
- p = rrpc_map_page(rrpc, laddr, is_gc);
- if (p.ppa == ADDR_EMPTY) {
- BUG_ON(is_gc);
- rrpc_unlock_rq(rrpc, rqd);
- rrpc_gc_kick(rrpc);
- return NVM_IO_REQUEUE;
- }
-
- rqd->ppa_addr = p;
- rqd->opcode = NVM_OP_HBWRITE;
-
- return NVM_IO_OK;
-}
-
-static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags, uint8_t npages)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
-
- if (npages > 1) {
- rqd->ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
- &rqd->dma_ppa_list);
- if (!rqd->ppa_list) {
- pr_err("rrpc: not able to allocate ppa list\n");
- return NVM_IO_ERR;
- }
-
- if (bio_op(bio) == REQ_OP_WRITE)
- return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags,
- npages);
-
- return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages);
- }
-
- if (bio_op(bio) == REQ_OP_WRITE)
- return rrpc_write_rq(rrpc, bio, rqd, flags);
-
- return rrpc_read_rq(rrpc, bio, rqd, flags);
-}
-
-static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd, unsigned long flags)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd);
- uint8_t nr_pages = rrpc_get_pages(bio);
- int bio_size = bio_sectors(bio) << 9;
- int err;
-
- if (bio_size < dev->geo.sec_size)
- return NVM_IO_ERR;
- else if (bio_size > dev->geo.max_rq_size)
- return NVM_IO_ERR;
-
- err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages);
- if (err)
- return err;
-
- bio_get(bio);
- rqd->bio = bio;
- rqd->private = rrpc;
- rqd->nr_ppas = nr_pages;
- rqd->end_io = rrpc_end_io;
- rrq->flags = flags;
-
- err = nvm_submit_io(dev, rqd);
- if (err) {
- pr_err("rrpc: I/O submission failed: %d\n", err);
- bio_put(bio);
- if (!(flags & NVM_IOTYPE_GC)) {
- rrpc_unlock_rq(rrpc, rqd);
- if (rqd->nr_ppas > 1)
- nvm_dev_dma_free(dev->parent, rqd->ppa_list,
- rqd->dma_ppa_list);
- }
- return NVM_IO_ERR;
- }
-
- return NVM_IO_OK;
-}
-
-static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
-{
- struct rrpc *rrpc = q->queuedata;
- struct nvm_rq *rqd;
- int err;
-
- blk_queue_split(q, &bio);
-
- if (bio_op(bio) == REQ_OP_DISCARD) {
- rrpc_discard(rrpc, bio);
- return BLK_QC_T_NONE;
- }
-
- rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL);
- memset(rqd, 0, sizeof(struct nvm_rq));
-
- err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE);
- switch (err) {
- case NVM_IO_OK:
- return BLK_QC_T_NONE;
- case NVM_IO_ERR:
- bio_io_error(bio);
- break;
- case NVM_IO_DONE:
- bio_endio(bio);
- break;
- case NVM_IO_REQUEUE:
- spin_lock(&rrpc->bio_lock);
- bio_list_add(&rrpc->requeue_bios, bio);
- spin_unlock(&rrpc->bio_lock);
- queue_work(rrpc->kgc_wq, &rrpc->ws_requeue);
- break;
- }
-
- mempool_free(rqd, rrpc->rq_pool);
- return BLK_QC_T_NONE;
-}
-
-static void rrpc_requeue(struct work_struct *work)
-{
- struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue);
- struct bio_list bios;
- struct bio *bio;
-
- bio_list_init(&bios);
-
- spin_lock(&rrpc->bio_lock);
- bio_list_merge(&bios, &rrpc->requeue_bios);
- bio_list_init(&rrpc->requeue_bios);
- spin_unlock(&rrpc->bio_lock);
-
- while ((bio = bio_list_pop(&bios)))
- rrpc_make_rq(rrpc->disk->queue, bio);
-}
-
-static void rrpc_gc_free(struct rrpc *rrpc)
-{
- if (rrpc->krqd_wq)
- destroy_workqueue(rrpc->krqd_wq);
-
- if (rrpc->kgc_wq)
- destroy_workqueue(rrpc->kgc_wq);
-}
-
-static int rrpc_gc_init(struct rrpc *rrpc)
-{
- rrpc->krqd_wq = alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM|WQ_UNBOUND,
- rrpc->nr_luns);
- if (!rrpc->krqd_wq)
- return -ENOMEM;
-
- rrpc->kgc_wq = alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM, 1);
- if (!rrpc->kgc_wq)
- return -ENOMEM;
-
- timer_setup(&rrpc->gc_timer, rrpc_gc_timer, 0);
-
- return 0;
-}
-
-static void rrpc_map_free(struct rrpc *rrpc)
-{
- vfree(rrpc->rev_trans_map);
- vfree(rrpc->trans_map);
-}
-
-static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
-{
- struct rrpc *rrpc = (struct rrpc *)private;
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_addr *addr = rrpc->trans_map + slba;
- struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
- struct rrpc_lun *rlun;
- struct rrpc_block *rblk;
- u64 i;
-
- for (i = 0; i < nlb; i++) {
- struct ppa_addr gaddr;
- u64 pba = le64_to_cpu(entries[i]);
- unsigned int mod;
-
- /* LNVM treats address-spaces as silos, LBA and PBA are
- * equally large and zero-indexed.
- */
- if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
- pr_err("nvm: L2P data entry is out of bounds!\n");
- pr_err("nvm: Maybe loaded an old target L2P\n");
- return -EINVAL;
- }
-
- /* Address zero is a special one. The first page on a disk is
- * protected. As it often holds internal device boot
- * information.
- */
- if (!pba)
- continue;
-
- div_u64_rem(pba, rrpc->nr_sects, &mod);
-
- gaddr = rrpc_recov_addr(dev, pba);
- rlun = rrpc_ppa_to_lun(rrpc, gaddr);
- if (!rlun) {
- pr_err("rrpc: l2p corruption on lba %llu\n",
- slba + i);
- return -EINVAL;
- }
-
- rblk = &rlun->blocks[gaddr.g.blk];
- if (!rblk->state) {
- /* at this point, we don't know anything about the
- * block. It's up to the FTL on top to re-etablish the
- * block state. The block is assumed to be open.
- */
- list_move_tail(&rblk->list, &rlun->used_list);
- rblk->state = NVM_BLK_ST_TGT;
- rlun->nr_free_blocks--;
- }
-
- addr[i].addr = pba;
- addr[i].rblk = rblk;
- raddr[mod].addr = slba + i;
- }
-
- return 0;
-}
-
-static int rrpc_map_init(struct rrpc *rrpc)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- sector_t i;
- int ret;
-
- rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);
- if (!rrpc->trans_map)
- return -ENOMEM;
-
- rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
- * rrpc->nr_sects);
- if (!rrpc->rev_trans_map)
- return -ENOMEM;
-
- for (i = 0; i < rrpc->nr_sects; i++) {
- struct rrpc_addr *p = &rrpc->trans_map[i];
- struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
-
- p->addr = ADDR_EMPTY;
- r->addr = ADDR_EMPTY;
- }
-
- /* Bring up the mapping table from device */
- ret = nvm_get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
- rrpc_l2p_update, rrpc);
- if (ret) {
- pr_err("nvm: rrpc: could not read L2P table.\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* Minimum pages needed within a lun */
-#define PAGE_POOL_SIZE 16
-#define ADDR_POOL_SIZE 64
-
-static int rrpc_core_init(struct rrpc *rrpc)
-{
- down_write(&rrpc_lock);
- if (!rrpc_gcb_cache) {
- rrpc_gcb_cache = kmem_cache_create("rrpc_gcb",
- sizeof(struct rrpc_block_gc), 0, 0, NULL);
- if (!rrpc_gcb_cache) {
- up_write(&rrpc_lock);
- return -ENOMEM;
- }
-
- rrpc_rq_cache = kmem_cache_create("rrpc_rq",
- sizeof(struct nvm_rq) + sizeof(struct rrpc_rq),
- 0, 0, NULL);
- if (!rrpc_rq_cache) {
- kmem_cache_destroy(rrpc_gcb_cache);
- up_write(&rrpc_lock);
- return -ENOMEM;
- }
- }
- up_write(&rrpc_lock);
-
- rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
- if (!rrpc->page_pool)
- return -ENOMEM;
-
- rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->geo.nr_luns,
- rrpc_gcb_cache);
- if (!rrpc->gcb_pool)
- return -ENOMEM;
-
- rrpc->rq_pool = mempool_create_slab_pool(64, rrpc_rq_cache);
- if (!rrpc->rq_pool)
- return -ENOMEM;
-
- spin_lock_init(&rrpc->inflights.lock);
- INIT_LIST_HEAD(&rrpc->inflights.reqs);
-
- return 0;
-}
-
-static void rrpc_core_free(struct rrpc *rrpc)
-{
- mempool_destroy(rrpc->page_pool);
- mempool_destroy(rrpc->gcb_pool);
- mempool_destroy(rrpc->rq_pool);
-}
-
-static void rrpc_luns_free(struct rrpc *rrpc)
-{
- struct rrpc_lun *rlun;
- int i;
-
- if (!rrpc->luns)
- return;
-
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
- vfree(rlun->blocks);
- }
-
- kfree(rrpc->luns);
-}
-
-static int rrpc_bb_discovery(struct nvm_tgt_dev *dev, struct rrpc_lun *rlun)
-{
- struct nvm_geo *geo = &dev->geo;
- struct rrpc_block *rblk;
- struct ppa_addr ppa;
- u8 *blks;
- int nr_blks;
- int i;
- int ret;
-
- if (!dev->parent->ops->get_bb_tbl)
- return 0;
-
- nr_blks = geo->blks_per_lun * geo->plane_mode;
- blks = kmalloc(nr_blks, GFP_KERNEL);
- if (!blks)
- return -ENOMEM;
-
- ppa.ppa = 0;
- ppa.g.ch = rlun->bppa.g.ch;
- ppa.g.lun = rlun->bppa.g.lun;
-
- ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
- if (ret) {
- pr_err("rrpc: could not get BB table\n");
- goto out;
- }
-
- nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
- if (nr_blks < 0) {
- ret = nr_blks;
- goto out;
- }
-
- for (i = 0; i < nr_blks; i++) {
- if (blks[i] == NVM_BLK_T_FREE)
- continue;
-
- rblk = &rlun->blocks[i];
- list_move_tail(&rblk->list, &rlun->bb_list);
- rblk->state = NVM_BLK_ST_BAD;
- rlun->nr_free_blocks--;
- }
-
-out:
- kfree(blks);
- return ret;
-}
-
-static void rrpc_set_lun_ppa(struct rrpc_lun *rlun, struct ppa_addr ppa)
-{
- rlun->bppa.ppa = 0;
- rlun->bppa.g.ch = ppa.g.ch;
- rlun->bppa.g.lun = ppa.g.lun;
-}
-
-static int rrpc_luns_init(struct rrpc *rrpc, struct ppa_addr *luns)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct nvm_geo *geo = &dev->geo;
- struct rrpc_lun *rlun;
- int i, j, ret = -EINVAL;
-
- if (geo->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
- pr_err("rrpc: number of pages per block too high.");
- return -EINVAL;
- }
-
- spin_lock_init(&rrpc->rev_lock);
-
- rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
- GFP_KERNEL);
- if (!rrpc->luns)
- return -ENOMEM;
-
- /* 1:1 mapping */
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
- rlun->id = i;
- rrpc_set_lun_ppa(rlun, luns[i]);
- rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
- geo->blks_per_lun);
- if (!rlun->blocks) {
- ret = -ENOMEM;
- goto err;
- }
-
- INIT_LIST_HEAD(&rlun->free_list);
- INIT_LIST_HEAD(&rlun->used_list);
- INIT_LIST_HEAD(&rlun->bb_list);
-
- for (j = 0; j < geo->blks_per_lun; j++) {
- struct rrpc_block *rblk = &rlun->blocks[j];
-
- rblk->id = j;
- rblk->rlun = rlun;
- rblk->state = NVM_BLK_T_FREE;
- INIT_LIST_HEAD(&rblk->prio);
- INIT_LIST_HEAD(&rblk->list);
- spin_lock_init(&rblk->lock);
-
- list_add_tail(&rblk->list, &rlun->free_list);
- }
-
- rlun->rrpc = rrpc;
- rlun->nr_free_blocks = geo->blks_per_lun;
- rlun->reserved_blocks = 2; /* for GC only */
-
- INIT_LIST_HEAD(&rlun->prio_list);
- INIT_LIST_HEAD(&rlun->wblk_list);
-
- INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
- spin_lock_init(&rlun->lock);
-
- if (rrpc_bb_discovery(dev, rlun))
- goto err;
-
- }
-
- return 0;
-err:
- return ret;
-}
-
-/* returns 0 on success and stores the beginning address in *begin */
-static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- sector_t size = rrpc->nr_sects * dev->geo.sec_size;
- int ret;
-
- size >>= 9;
-
- ret = nvm_get_area(dev, begin, size);
- if (!ret)
- *begin >>= (ilog2(dev->geo.sec_size) - 9);
-
- return ret;
-}
-
-static void rrpc_area_free(struct rrpc *rrpc)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- sector_t begin = rrpc->soffset << (ilog2(dev->geo.sec_size) - 9);
-
- nvm_put_area(dev, begin);
-}
-
-static void rrpc_free(struct rrpc *rrpc)
-{
- rrpc_gc_free(rrpc);
- rrpc_map_free(rrpc);
- rrpc_core_free(rrpc);
- rrpc_luns_free(rrpc);
- rrpc_area_free(rrpc);
-
- kfree(rrpc);
-}
-
-static void rrpc_exit(void *private)
-{
- struct rrpc *rrpc = private;
-
- del_timer(&rrpc->gc_timer);
-
- flush_workqueue(rrpc->krqd_wq);
- flush_workqueue(rrpc->kgc_wq);
-
- rrpc_free(rrpc);
-}
-
-static sector_t rrpc_capacity(void *private)
-{
- struct rrpc *rrpc = private;
- struct nvm_tgt_dev *dev = rrpc->dev;
- sector_t reserved, provisioned;
-
- /* cur, gc, and two emergency blocks for each lun */
- reserved = rrpc->nr_luns * dev->geo.sec_per_blk * 4;
- provisioned = rrpc->nr_sects - reserved;
-
- if (reserved > rrpc->nr_sects) {
- pr_err("rrpc: not enough space available to expose storage.\n");
- return 0;
- }
-
- sector_div(provisioned, 10);
- return provisioned * 9 * NR_PHY_IN_LOG;
-}
-
-/*
- * Looks up the logical address from reverse trans map and check if its valid by
- * comparing the logical to physical address with the physical address.
- * Returns 0 on free, otherwise 1 if in use
- */
-static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- int offset;
- struct rrpc_addr *laddr;
- u64 bpaddr, paddr, pladdr;
-
- bpaddr = block_to_rel_addr(rrpc, rblk);
- for (offset = 0; offset < dev->geo.sec_per_blk; offset++) {
- paddr = bpaddr + offset;
-
- pladdr = rrpc->rev_trans_map[paddr].addr;
- if (pladdr == ADDR_EMPTY)
- continue;
-
- laddr = &rrpc->trans_map[pladdr];
-
- if (paddr == laddr->addr) {
- laddr->rblk = rblk;
- } else {
- set_bit(offset, rblk->invalid_pages);
- rblk->nr_invalid_pages++;
- }
- }
-}
-
-static int rrpc_blocks_init(struct rrpc *rrpc)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct rrpc_lun *rlun;
- struct rrpc_block *rblk;
- int lun_iter, blk_iter;
-
- for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) {
- rlun = &rrpc->luns[lun_iter];
-
- for (blk_iter = 0; blk_iter < dev->geo.blks_per_lun;
- blk_iter++) {
- rblk = &rlun->blocks[blk_iter];
- rrpc_block_map_update(rrpc, rblk);
- }
- }
-
- return 0;
-}
-
-static int rrpc_luns_configure(struct rrpc *rrpc)
-{
- struct rrpc_lun *rlun;
- struct rrpc_block *rblk;
- int i;
-
- for (i = 0; i < rrpc->nr_luns; i++) {
- rlun = &rrpc->luns[i];
-
- rblk = rrpc_get_blk(rrpc, rlun, 0);
- if (!rblk)
- goto err;
- rrpc_set_lun_cur(rlun, rblk, &rlun->cur);
-
- /* Emergency gc block */
- rblk = rrpc_get_blk(rrpc, rlun, 1);
- if (!rblk)
- goto err;
- rrpc_set_lun_cur(rlun, rblk, &rlun->gc_cur);
- }
-
- return 0;
-err:
- rrpc_put_blks(rrpc);
- return -EINVAL;
-}
-
-static struct nvm_tgt_type tt_rrpc;
-
-static void *rrpc_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
- int flags)
-{
- struct request_queue *bqueue = dev->q;
- struct request_queue *tqueue = tdisk->queue;
- struct nvm_geo *geo = &dev->geo;
- struct rrpc *rrpc;
- sector_t soffset;
- int ret;
-
- if (!(dev->identity.dom & NVM_RSP_L2P)) {
- pr_err("nvm: rrpc: device does not support l2p (%x)\n",
- dev->identity.dom);
- return ERR_PTR(-EINVAL);
- }
-
- rrpc = kzalloc(sizeof(struct rrpc), GFP_KERNEL);
- if (!rrpc)
- return ERR_PTR(-ENOMEM);
-
- rrpc->dev = dev;
- rrpc->disk = tdisk;
-
- bio_list_init(&rrpc->requeue_bios);
- spin_lock_init(&rrpc->bio_lock);
- INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
-
- rrpc->nr_luns = geo->nr_luns;
- rrpc->nr_sects = (unsigned long long)geo->sec_per_lun * rrpc->nr_luns;
-
- /* simple round-robin strategy */
- atomic_set(&rrpc->next_lun, -1);
-
- ret = rrpc_area_init(rrpc, &soffset);
- if (ret < 0) {
- pr_err("nvm: rrpc: could not initialize area\n");
- return ERR_PTR(ret);
- }
- rrpc->soffset = soffset;
-
- ret = rrpc_luns_init(rrpc, dev->luns);
- if (ret) {
- pr_err("nvm: rrpc: could not initialize luns\n");
- goto err;
- }
-
- ret = rrpc_core_init(rrpc);
- if (ret) {
- pr_err("nvm: rrpc: could not initialize core\n");
- goto err;
- }
-
- ret = rrpc_map_init(rrpc);
- if (ret) {
- pr_err("nvm: rrpc: could not initialize maps\n");
- goto err;
- }
-
- ret = rrpc_blocks_init(rrpc);
- if (ret) {
- pr_err("nvm: rrpc: could not initialize state for blocks\n");
- goto err;
- }
-
- ret = rrpc_luns_configure(rrpc);
- if (ret) {
- pr_err("nvm: rrpc: not enough blocks available in LUNs.\n");
- goto err;
- }
-
- ret = rrpc_gc_init(rrpc);
- if (ret) {
- pr_err("nvm: rrpc: could not initialize gc\n");
- goto err;
- }
-
- /* inherit the size from the underlying device */
- blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
- blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
-
- pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n",
- rrpc->nr_luns, (unsigned long long)rrpc->nr_sects);
-
- mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
-
- return rrpc;
-err:
- rrpc_free(rrpc);
- return ERR_PTR(ret);
-}
-
-/* round robin, page-based FTL, and cost-based GC */
-static struct nvm_tgt_type tt_rrpc = {
- .name = "rrpc",
- .version = {1, 0, 0},
-
- .make_rq = rrpc_make_rq,
- .capacity = rrpc_capacity,
-
- .init = rrpc_init,
- .exit = rrpc_exit,
-};
-
-static int __init rrpc_module_init(void)
-{
- return nvm_register_tgt_type(&tt_rrpc);
-}
-
-static void rrpc_module_exit(void)
-{
- nvm_unregister_tgt_type(&tt_rrpc);
-}
-
-module_init(rrpc_module_init);
-module_exit(rrpc_module_exit);
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs");
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
deleted file mode 100644
index fdb6ff902903..000000000000
--- a/drivers/lightnvm/rrpc.h
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * Copyright (C) 2015 IT University of Copenhagen
- * Initial release: Matias Bjorling <m@bjorling.me>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
- */
-
-#ifndef RRPC_H_
-#define RRPC_H_
-
-#include <linux/blkdev.h>
-#include <linux/blk-mq.h>
-#include <linux/bio.h>
-#include <linux/module.h>
-#include <linux/kthread.h>
-#include <linux/vmalloc.h>
-
-#include <linux/lightnvm.h>
-
-/* Run only GC if less than 1/X blocks are free */
-#define GC_LIMIT_INVERSE 10
-#define GC_TIME_SECS 100
-
-#define RRPC_SECTOR (512)
-#define RRPC_EXPOSED_PAGE_SIZE (4096)
-
-#define NR_PHY_IN_LOG (RRPC_EXPOSED_PAGE_SIZE / RRPC_SECTOR)
-
-struct rrpc_inflight {
- struct list_head reqs;
- spinlock_t lock;
-};
-
-struct rrpc_inflight_rq {
- struct list_head list;
- sector_t l_start;
- sector_t l_end;
-};
-
-struct rrpc_rq {
- struct rrpc_inflight_rq inflight_rq;
- unsigned long flags;
-};
-
-struct rrpc_block {
- int id; /* id inside of LUN */
- struct rrpc_lun *rlun;
-
- struct list_head prio; /* LUN CG list */
- struct list_head list; /* LUN free, used, bb list */
-
-#define MAX_INVALID_PAGES_STORAGE 8
- /* Bitmap for invalid page intries */
- unsigned long invalid_pages[MAX_INVALID_PAGES_STORAGE];
- /* points to the next writable page within a block */
- unsigned int next_page;
- /* number of pages that are invalid, wrt host page size */
- unsigned int nr_invalid_pages;
-
- int state;
-
- spinlock_t lock;
- atomic_t data_cmnt_size; /* data pages committed to stable storage */
-};
-
-struct rrpc_lun {
- struct rrpc *rrpc;
-
- int id;
- struct ppa_addr bppa;
-
- struct rrpc_block *cur, *gc_cur;
- struct rrpc_block *blocks; /* Reference to block allocation */
-
- struct list_head prio_list; /* Blocks that may be GC'ed */
- struct list_head wblk_list; /* Queued blocks to be written to */
-
- /* lun block lists */
- struct list_head used_list; /* In-use blocks */
- struct list_head free_list; /* Not used blocks i.e. released
- * and ready for use
- */
- struct list_head bb_list; /* Bad blocks. Mutually exclusive with
- * free_list and used_list
- */
- unsigned int nr_free_blocks; /* Number of unused blocks */
-
- struct work_struct ws_gc;
-
- int reserved_blocks;
-
- spinlock_t lock;
-};
-
-struct rrpc {
- struct nvm_tgt_dev *dev;
- struct gendisk *disk;
-
- sector_t soffset; /* logical sector offset */
-
- int nr_luns;
- struct rrpc_lun *luns;
-
- /* calculated values */
- unsigned long long nr_sects;
-
- /* Write strategy variables. Move these into each for structure for each
- * strategy
- */
- atomic_t next_lun; /* Whenever a page is written, this is updated
- * to point to the next write lun
- */
-
- spinlock_t bio_lock;
- struct bio_list requeue_bios;
- struct work_struct ws_requeue;
-
- /* Simple translation map of logical addresses to physical addresses.
- * The logical addresses is known by the host system, while the physical
- * addresses are used when writing to the disk block device.
- */
- struct rrpc_addr *trans_map;
- /* also store a reverse map for garbage collection */
- struct rrpc_rev_addr *rev_trans_map;
- spinlock_t rev_lock;
-
- struct rrpc_inflight inflights;
-
- mempool_t *addr_pool;
- mempool_t *page_pool;
- mempool_t *gcb_pool;
- mempool_t *rq_pool;
-
- struct timer_list gc_timer;
- struct workqueue_struct *krqd_wq;
- struct workqueue_struct *kgc_wq;
-};
-
-struct rrpc_block_gc {
- struct rrpc *rrpc;
- struct rrpc_block *rblk;
- struct work_struct ws_gc;
-};
-
-/* Logical to physical mapping */
-struct rrpc_addr {
- u64 addr;
- struct rrpc_block *rblk;
-};
-
-/* Physical to logical mapping */
-struct rrpc_rev_addr {
- u64 addr;
-};
-
-static inline struct ppa_addr rrpc_linear_to_generic_addr(struct nvm_geo *geo,
- struct ppa_addr r)
-{
- struct ppa_addr l;
- int secs, pgs;
- sector_t ppa = r.ppa;
-
- l.ppa = 0;
-
- div_u64_rem(ppa, geo->sec_per_pg, &secs);
- l.g.sec = secs;
-
- sector_div(ppa, geo->sec_per_pg);
- div_u64_rem(ppa, geo->pgs_per_blk, &pgs);
- l.g.pg = pgs;
-
- return l;
-}
-
-static inline struct ppa_addr rrpc_recov_addr(struct nvm_tgt_dev *dev, u64 pba)
-{
- return linear_to_generic_addr(&dev->geo, pba);
-}
-
-static inline u64 rrpc_blk_to_ppa(struct rrpc *rrpc, struct rrpc_block *rblk)
-{
- struct nvm_tgt_dev *dev = rrpc->dev;
- struct nvm_geo *geo = &dev->geo;
- struct rrpc_lun *rlun = rblk->rlun;
-
- return (rlun->id * geo->sec_per_lun) + (rblk->id * geo->sec_per_blk);
-}
-
-static inline sector_t rrpc_get_laddr(struct bio *bio)
-{
- return bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
-}
-
-static inline unsigned int rrpc_get_pages(struct bio *bio)
-{
- return bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
-}
-
-static inline sector_t rrpc_get_sector(sector_t laddr)
-{
- return laddr * NR_PHY_IN_LOG;
-}
-
-static inline int request_intersects(struct rrpc_inflight_rq *r,
- sector_t laddr_start, sector_t laddr_end)
-{
- return (laddr_end >= r->l_start) && (laddr_start <= r->l_end);
-}
-
-static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
- unsigned int pages, struct rrpc_inflight_rq *r)
-{
- sector_t laddr_end = laddr + pages - 1;
- struct rrpc_inflight_rq *rtmp;
-
- WARN_ON(irqs_disabled());
-
- spin_lock_irq(&rrpc->inflights.lock);
- list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) {
- if (unlikely(request_intersects(rtmp, laddr, laddr_end))) {
- /* existing, overlapping request, come back later */
- spin_unlock_irq(&rrpc->inflights.lock);
- return 1;
- }
- }
-
- r->l_start = laddr;
- r->l_end = laddr_end;
-
- list_add_tail(&r->list, &rrpc->inflights.reqs);
- spin_unlock_irq(&rrpc->inflights.lock);
- return 0;
-}
-
-static inline int rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
- unsigned int pages,
- struct rrpc_inflight_rq *r)
-{
- BUG_ON((laddr + pages) > rrpc->nr_sects);
-
- return __rrpc_lock_laddr(rrpc, laddr, pages, r);
-}
-
-static inline struct rrpc_inflight_rq *rrpc_get_inflight_rq(struct nvm_rq *rqd)
-{
- struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
-
- return &rrqd->inflight_rq;
-}
-
-static inline int rrpc_lock_rq(struct rrpc *rrpc, struct bio *bio,
- struct nvm_rq *rqd)
-{
- sector_t laddr = rrpc_get_laddr(bio);
- unsigned int pages = rrpc_get_pages(bio);
- struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
-
- return rrpc_lock_laddr(rrpc, laddr, pages, r);
-}
-
-static inline void rrpc_unlock_laddr(struct rrpc *rrpc,
- struct rrpc_inflight_rq *r)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&rrpc->inflights.lock, flags);
- list_del_init(&r->list);
- spin_unlock_irqrestore(&rrpc->inflights.lock, flags);
-}
-
-static inline void rrpc_unlock_rq(struct rrpc *rrpc, struct nvm_rq *rqd)
-{
- struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
- uint8_t pages = rqd->nr_ppas;
-
- BUG_ON((r->l_start + pages) > rrpc->nr_sects);
-
- rrpc_unlock_laddr(rrpc, r);
-}
-
-#endif /* RRPC_H_ */
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 899ec1f4c833..346e6f5f77be 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -1245,10 +1245,10 @@ static ssize_t smu_read(struct file *file, char __user *buf,
return -EBADFD;
}
-static unsigned int smu_fpoll(struct file *file, poll_table *wait)
+static __poll_t smu_fpoll(struct file *file, poll_table *wait)
{
struct smu_private *pp = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
unsigned long flags;
if (pp == 0)
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index c4c2b3b85ebc..e8b29fc532e1 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -2157,11 +2157,11 @@ pmu_write(struct file *file, const char __user *buf,
return 0;
}
-static unsigned int
+static __poll_t
pmu_fpoll(struct file *filp, poll_table *wait)
{
struct pmu_private *pp = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
unsigned long flags;
if (pp == 0)
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index 93f3d4d61fa7..f84730d63b1f 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -235,7 +235,7 @@ kfree_err:
return ret;
}
-static unsigned int
+static __poll_t
mbox_test_message_poll(struct file *filp, struct poll_table_struct *wait)
{
struct mbox_test_device *tdev = filp->private_data;
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 83b9362be09c..2c8ac3688815 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -269,6 +269,13 @@ config DM_BIO_PRISON
source "drivers/md/persistent-data/Kconfig"
+config DM_UNSTRIPED
+ tristate "Unstriped target"
+ depends on BLK_DEV_DM
+ ---help---
+ Unstripes I/O so it is issued solely on a single drive in a HW
+ RAID0 or dm-striped target.
+
config DM_CRYPT
tristate "Crypt target support"
depends on BLK_DEV_DM
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index f701bb211783..63255f3ebd97 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_BCACHE) += bcache/
obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
obj-$(CONFIG_BLK_DEV_DM_BUILTIN) += dm-builtin.o
+obj-$(CONFIG_DM_UNSTRIPED) += dm-unstripe.o
obj-$(CONFIG_DM_BUFIO) += dm-bufio.o
obj-$(CONFIG_DM_BIO_PRISON) += dm-bio-prison.o
obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index a0cc1bc6d884..6cc6c0f9c3a9 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -525,15 +525,21 @@ struct open_bucket {
/*
* We keep multiple buckets open for writes, and try to segregate different
- * write streams for better cache utilization: first we look for a bucket where
- * the last write to it was sequential with the current write, and failing that
- * we look for a bucket that was last used by the same task.
+ * write streams for better cache utilization: first we try to segregate flash
+ * only volume write streams from cached devices, secondly we look for a bucket
+ * where the last write to it was sequential with the current write, and
+ * failing that we look for a bucket that was last used by the same task.
*
* The ideas is if you've got multiple tasks pulling data into the cache at the
* same time, you'll get better cache utilization if you try to segregate their
* data and preserve locality.
*
- * For example, say you've starting Firefox at the same time you're copying a
+ * For example, dirty sectors of flash only volume is not reclaimable, if their
+ * dirty sectors mixed with dirty sectors of cached device, such buckets will
+ * be marked as dirty and won't be reclaimed, though the dirty data of cached
+ * device have been written back to backend device.
+ *
+ * And say you've starting Firefox at the same time you're copying a
* bunch of files. Firefox will likely end up being fairly hot and stay in the
* cache awhile, but the data you copied might not be; if you wrote all that
* data to the same buckets it'd get invalidated at the same time.
@@ -550,7 +556,10 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c,
struct open_bucket *ret, *ret_task = NULL;
list_for_each_entry_reverse(ret, &c->data_buckets, list)
- if (!bkey_cmp(&ret->key, search))
+ if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) !=
+ UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)]))
+ continue;
+ else if (!bkey_cmp(&ret->key, search))
goto found;
else if (ret->last_write_point == write_point)
ret_task = ret;
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 843877e017e1..5e2d4e80198e 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -320,14 +320,15 @@ struct cached_dev {
*/
atomic_t has_dirty;
- struct bch_ratelimit writeback_rate;
- struct delayed_work writeback_rate_update;
-
/*
- * Internal to the writeback code, so read_dirty() can keep track of
- * where it's at.
+ * Set to zero by things that touch the backing volume-- except
+ * writeback. Incremented by writeback. Used to determine when to
+ * accelerate idle writeback.
*/
- sector_t last_read;
+ atomic_t backing_idle;
+
+ struct bch_ratelimit writeback_rate;
+ struct delayed_work writeback_rate_update;
/* Limit number of writeback bios in flight */
struct semaphore in_flight;
@@ -336,6 +337,14 @@ struct cached_dev {
struct keybuf writeback_keys;
+ /*
+ * Order the write-half of writeback operations strongly in dispatch
+ * order. (Maintain LBA order; don't allow reads completing out of
+ * order to re-order the writes...)
+ */
+ struct closure_waitlist writeback_ordering_wait;
+ atomic_t writeback_sequence_next;
+
/* For tracking sequential IO */
#define RECENT_IO_BITS 7
#define RECENT_IO (1 << RECENT_IO_BITS)
@@ -488,6 +497,7 @@ struct cache_set {
int caches_loaded;
struct bcache_device **devices;
+ unsigned devices_max_used;
struct list_head cached_devs;
uint64_t cached_dev_sectors;
struct closure caching;
@@ -852,7 +862,7 @@ static inline void wake_up_allocators(struct cache_set *c)
/* Forward declarations */
-void bch_count_io_errors(struct cache *, blk_status_t, const char *);
+void bch_count_io_errors(struct cache *, blk_status_t, int, const char *);
void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
blk_status_t, const char *);
void bch_bbio_endio(struct cache_set *, struct bio *, blk_status_t,
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 81e8dc3dbe5e..bf3a48aa9a9a 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -419,7 +419,7 @@ static void do_btree_node_write(struct btree *b)
SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
bset_sector_offset(&b->keys, i));
- if (!bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
+ if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
int j;
struct bio_vec *bv;
void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
@@ -432,6 +432,7 @@ static void do_btree_node_write(struct btree *b)
continue_at(cl, btree_node_write_done, NULL);
} else {
+ /* No problem for multipage bvec since the bio is just allocated */
b->bio->bi_vcnt = 0;
bch_bio_map(b->bio, i);
@@ -1678,7 +1679,7 @@ static void bch_btree_gc_finish(struct cache_set *c)
/* don't reclaim buckets to which writeback keys point */
rcu_read_lock();
- for (i = 0; i < c->nr_uuids; i++) {
+ for (i = 0; i < c->devices_max_used; i++) {
struct bcache_device *d = c->devices[i];
struct cached_dev *dc;
struct keybuf_key *w, *n;
@@ -1803,10 +1804,7 @@ static int bch_gc_thread(void *arg)
int bch_gc_thread_start(struct cache_set *c)
{
c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
- if (IS_ERR(c->gc_thread))
- return PTR_ERR(c->gc_thread);
-
- return 0;
+ return PTR_ERR_OR_ZERO(c->gc_thread);
}
/* Initial partial gc */
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index 1841d0359bac..7f12920c14f7 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -8,6 +8,7 @@
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/seq_file.h>
+#include <linux/sched/debug.h>
#include "closure.h"
@@ -18,10 +19,6 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
BUG_ON(flags & CLOSURE_GUARD_MASK);
BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
- /* Must deliver precisely one wakeup */
- if (r == 1 && (flags & CLOSURE_SLEEPING))
- wake_up_process(cl->task);
-
if (!r) {
if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
atomic_set(&cl->remaining,
@@ -100,28 +97,34 @@ bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
}
EXPORT_SYMBOL(closure_wait);
-/**
- * closure_sync - sleep until a closure has nothing left to wait on
- *
- * Sleeps until the refcount hits 1 - the thread that's running the closure owns
- * the last refcount.
- */
-void closure_sync(struct closure *cl)
+struct closure_syncer {
+ struct task_struct *task;
+ int done;
+};
+
+static void closure_sync_fn(struct closure *cl)
{
- while (1) {
- __closure_start_sleep(cl);
- closure_set_ret_ip(cl);
+ cl->s->done = 1;
+ wake_up_process(cl->s->task);
+}
- if ((atomic_read(&cl->remaining) &
- CLOSURE_REMAINING_MASK) == 1)
- break;
+void __sched __closure_sync(struct closure *cl)
+{
+ struct closure_syncer s = { .task = current };
+ cl->s = &s;
+ continue_at(cl, closure_sync_fn, NULL);
+
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (s.done)
+ break;
schedule();
}
- __closure_end_sleep(cl);
+ __set_current_state(TASK_RUNNING);
}
-EXPORT_SYMBOL(closure_sync);
+EXPORT_SYMBOL(__closure_sync);
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
@@ -168,12 +171,10 @@ static int debug_seq_show(struct seq_file *f, void *data)
cl, (void *) cl->ip, cl->fn, cl->parent,
r & CLOSURE_REMAINING_MASK);
- seq_printf(f, "%s%s%s%s\n",
+ seq_printf(f, "%s%s\n",
test_bit(WORK_STRUCT_PENDING_BIT,
work_data_bits(&cl->work)) ? "Q" : "",
- r & CLOSURE_RUNNING ? "R" : "",
- r & CLOSURE_STACK ? "S" : "",
- r & CLOSURE_SLEEPING ? "Sl" : "");
+ r & CLOSURE_RUNNING ? "R" : "");
if (r & CLOSURE_WAITING)
seq_printf(f, " W %pF\n",
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
index ccfbea6f9f6b..3b9dfc9962ad 100644
--- a/drivers/md/bcache/closure.h
+++ b/drivers/md/bcache/closure.h
@@ -103,6 +103,7 @@
*/
struct closure;
+struct closure_syncer;
typedef void (closure_fn) (struct closure *);
struct closure_waitlist {
@@ -115,10 +116,6 @@ enum closure_state {
* the thread that owns the closure, and cleared by the thread that's
* waking up the closure.
*
- * CLOSURE_SLEEPING: Must be set before a thread uses a closure to sleep
- * - indicates that cl->task is valid and closure_put() may wake it up.
- * Only set or cleared by the thread that owns the closure.
- *
* The rest are for debugging and don't affect behaviour:
*
* CLOSURE_RUNNING: Set when a closure is running (i.e. by
@@ -128,22 +125,16 @@ enum closure_state {
* continue_at() and closure_return() clear it for you, if you're doing
* something unusual you can use closure_set_dead() which also helps
* annotate where references are being transferred.
- *
- * CLOSURE_STACK: Sanity check - remaining should never hit 0 on a
- * closure with this flag set
*/
- CLOSURE_BITS_START = (1 << 23),
- CLOSURE_DESTRUCTOR = (1 << 23),
- CLOSURE_WAITING = (1 << 25),
- CLOSURE_SLEEPING = (1 << 27),
- CLOSURE_RUNNING = (1 << 29),
- CLOSURE_STACK = (1 << 31),
+ CLOSURE_BITS_START = (1U << 26),
+ CLOSURE_DESTRUCTOR = (1U << 26),
+ CLOSURE_WAITING = (1U << 28),
+ CLOSURE_RUNNING = (1U << 30),
};
#define CLOSURE_GUARD_MASK \
- ((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_SLEEPING| \
- CLOSURE_RUNNING|CLOSURE_STACK) << 1)
+ ((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_RUNNING) << 1)
#define CLOSURE_REMAINING_MASK (CLOSURE_BITS_START - 1)
#define CLOSURE_REMAINING_INITIALIZER (1|CLOSURE_RUNNING)
@@ -152,7 +143,7 @@ struct closure {
union {
struct {
struct workqueue_struct *wq;
- struct task_struct *task;
+ struct closure_syncer *s;
struct llist_node list;
closure_fn *fn;
};
@@ -178,7 +169,19 @@ void closure_sub(struct closure *cl, int v);
void closure_put(struct closure *cl);
void __closure_wake_up(struct closure_waitlist *list);
bool closure_wait(struct closure_waitlist *list, struct closure *cl);
-void closure_sync(struct closure *cl);
+void __closure_sync(struct closure *cl);
+
+/**
+ * closure_sync - sleep until a closure a closure has nothing left to wait on
+ *
+ * Sleeps until the refcount hits 1 - the thread that's running the closure owns
+ * the last refcount.
+ */
+static inline void closure_sync(struct closure *cl)
+{
+ if ((atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK) != 1)
+ __closure_sync(cl);
+}
#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
@@ -215,24 +218,6 @@ static inline void closure_set_waiting(struct closure *cl, unsigned long f)
#endif
}
-static inline void __closure_end_sleep(struct closure *cl)
-{
- __set_current_state(TASK_RUNNING);
-
- if (atomic_read(&cl->remaining) & CLOSURE_SLEEPING)
- atomic_sub(CLOSURE_SLEEPING, &cl->remaining);
-}
-
-static inline void __closure_start_sleep(struct closure *cl)
-{
- closure_set_ip(cl);
- cl->task = current;
- set_current_state(TASK_UNINTERRUPTIBLE);
-
- if (!(atomic_read(&cl->remaining) & CLOSURE_SLEEPING))
- atomic_add(CLOSURE_SLEEPING, &cl->remaining);
-}
-
static inline void closure_set_stopped(struct closure *cl)
{
atomic_sub(CLOSURE_RUNNING, &cl->remaining);
@@ -241,7 +226,6 @@ static inline void closure_set_stopped(struct closure *cl)
static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
struct workqueue_struct *wq)
{
- BUG_ON(object_is_on_stack(cl));
closure_set_ip(cl);
cl->fn = fn;
cl->wq = wq;
@@ -300,7 +284,7 @@ static inline void closure_init(struct closure *cl, struct closure *parent)
static inline void closure_init_stack(struct closure *cl)
{
memset(cl, 0, sizeof(struct closure));
- atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|CLOSURE_STACK);
+ atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
}
/**
@@ -322,6 +306,8 @@ static inline void closure_wake_up(struct closure_waitlist *list)
* This is because after calling continue_at() you no longer have a ref on @cl,
* and whatever @cl owns may be freed out from under you - a running closure fn
* has a ref on its own closure which continue_at() drops.
+ *
+ * Note you are expected to immediately return after using this macro.
*/
#define continue_at(_cl, _fn, _wq) \
do { \
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index c7a02c4900da..af89408befe8 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -116,7 +116,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
return;
check->bi_opf = REQ_OP_READ;
- if (bio_alloc_pages(check, GFP_NOIO))
+ if (bch_bio_alloc_pages(check, GFP_NOIO))
goto out_put;
submit_bio_wait(check);
@@ -251,8 +251,7 @@ void bch_debug_exit(void)
int __init bch_debug_init(struct kobject *kobj)
{
- int ret = 0;
-
debug = debugfs_create_dir("bcache", NULL);
- return ret;
+
+ return IS_ERR_OR_NULL(debug);
}
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index fac97ec2d0e2..a783c5a41ff1 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -51,7 +51,10 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c,
/* IO errors */
-void bch_count_io_errors(struct cache *ca, blk_status_t error, const char *m)
+void bch_count_io_errors(struct cache *ca,
+ blk_status_t error,
+ int is_read,
+ const char *m)
{
/*
* The halflife of an error is:
@@ -94,8 +97,9 @@ void bch_count_io_errors(struct cache *ca, blk_status_t error, const char *m)
errors >>= IO_ERROR_SHIFT;
if (errors < ca->set->error_limit)
- pr_err("%s: IO error on %s, recovering",
- bdevname(ca->bdev, buf), m);
+ pr_err("%s: IO error on %s%s",
+ bdevname(ca->bdev, buf), m,
+ is_read ? ", recovering." : ".");
else
bch_cache_set_error(ca->set,
"%s: too many IO errors %s",
@@ -108,6 +112,7 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
{
struct bbio *b = container_of(bio, struct bbio, bio);
struct cache *ca = PTR_CACHE(c, &b->key, 0);
+ int is_read = (bio_data_dir(bio) == READ ? 1 : 0);
unsigned threshold = op_is_write(bio_op(bio))
? c->congested_write_threshold_us
@@ -129,7 +134,7 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
atomic_inc(&c->congested);
}
- bch_count_io_errors(ca, error, m);
+ bch_count_io_errors(ca, error, is_read, m);
}
void bch_bbio_endio(struct cache_set *c, struct bio *bio,
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index d50c1c97da68..a24c3a95b2c0 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -162,7 +162,7 @@ static void read_moving(struct cache_set *c)
bio_set_op_attrs(bio, REQ_OP_READ, 0);
bio->bi_end_io = read_moving_endio;
- if (bio_alloc_pages(bio, GFP_KERNEL))
+ if (bch_bio_alloc_pages(bio, GFP_KERNEL))
goto err;
trace_bcache_gc_copy(&w->key);
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 643c3021624f..1a46b41dac70 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -576,6 +576,7 @@ static void cache_lookup(struct closure *cl)
{
struct search *s = container_of(cl, struct search, iop.cl);
struct bio *bio = &s->bio.bio;
+ struct cached_dev *dc;
int ret;
bch_btree_op_init(&s->op, -1);
@@ -588,6 +589,27 @@ static void cache_lookup(struct closure *cl)
return;
}
+ /*
+ * We might meet err when searching the btree, If that happens, we will
+ * get negative ret, in this scenario we should not recover data from
+ * backing device (when cache device is dirty) because we don't know
+ * whether bkeys the read request covered are all clean.
+ *
+ * And after that happened, s->iop.status is still its initial value
+ * before we submit s->bio.bio
+ */
+ if (ret < 0) {
+ BUG_ON(ret == -EINTR);
+ if (s->d && s->d->c &&
+ !UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) {
+ dc = container_of(s->d, struct cached_dev, disk);
+ if (dc && atomic_read(&dc->has_dirty))
+ s->recoverable = false;
+ }
+ if (!s->iop.status)
+ s->iop.status = BLK_STS_IOERR;
+ }
+
closure_return(cl);
}
@@ -611,8 +633,8 @@ static void request_endio(struct bio *bio)
static void bio_complete(struct search *s)
{
if (s->orig_bio) {
- struct request_queue *q = s->orig_bio->bi_disk->queue;
- generic_end_io_acct(q, bio_data_dir(s->orig_bio),
+ generic_end_io_acct(s->d->disk->queue,
+ bio_data_dir(s->orig_bio),
&s->d->disk->part0, s->start_time);
trace_bcache_request_end(s->d, s->orig_bio);
@@ -841,7 +863,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
cache_bio->bi_private = &s->cl;
bch_bio_map(cache_bio, NULL);
- if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
+ if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
goto out_put;
if (reada)
@@ -974,6 +996,7 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q,
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
int rw = bio_data_dir(bio);
+ atomic_set(&dc->backing_idle, 0);
generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);
bio_set_dev(bio, dc->bdev);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index b4d28928dec5..133b81225ea9 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -211,7 +211,7 @@ static void write_bdev_super_endio(struct bio *bio)
static void __write_super(struct cache_sb *sb, struct bio *bio)
{
- struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
+ struct cache_sb *out = page_address(bio_first_page_all(bio));
unsigned i;
bio->bi_iter.bi_sector = SB_SECTOR;
@@ -274,7 +274,9 @@ static void write_super_endio(struct bio *bio)
{
struct cache *ca = bio->bi_private;
- bch_count_io_errors(ca, bio->bi_status, "writing superblock");
+ /* is_read = 0 */
+ bch_count_io_errors(ca, bio->bi_status, 0,
+ "writing superblock");
closure_put(&ca->set->sb_write);
}
@@ -721,6 +723,9 @@ static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
d->c = c;
c->devices[id] = d;
+ if (id >= c->devices_max_used)
+ c->devices_max_used = id + 1;
+
closure_get(&c->caching);
}
@@ -906,6 +911,12 @@ static void cached_dev_detach_finish(struct work_struct *w)
mutex_lock(&bch_register_lock);
+ cancel_delayed_work_sync(&dc->writeback_rate_update);
+ if (!IS_ERR_OR_NULL(dc->writeback_thread)) {
+ kthread_stop(dc->writeback_thread);
+ dc->writeback_thread = NULL;
+ }
+
memset(&dc->sb.set_uuid, 0, 16);
SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
@@ -1166,7 +1177,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
dc->bdev->bd_holder = dc;
bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1);
- dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
+ bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page;
get_page(sb_page);
if (cached_dev_init(dc, sb->block_size << 9))
@@ -1261,7 +1272,7 @@ static int flash_devs_run(struct cache_set *c)
struct uuid_entry *u;
for (u = c->uuids;
- u < c->uuids + c->nr_uuids && !ret;
+ u < c->uuids + c->devices_max_used && !ret;
u++)
if (UUID_FLASH_ONLY(u))
ret = flash_dev_run(c, u);
@@ -1427,7 +1438,7 @@ static void __cache_set_unregister(struct closure *cl)
mutex_lock(&bch_register_lock);
- for (i = 0; i < c->nr_uuids; i++)
+ for (i = 0; i < c->devices_max_used; i++)
if (c->devices[i]) {
if (!UUID_FLASH_ONLY(&c->uuids[i]) &&
test_bit(CACHE_SET_UNREGISTERING, &c->flags)) {
@@ -1490,7 +1501,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
c->bucket_bits = ilog2(sb->bucket_size);
c->block_bits = ilog2(sb->block_size);
c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry);
-
+ c->devices_max_used = 0;
c->btree_pages = bucket_pages(c);
if (c->btree_pages > BTREE_MAX_PAGES)
c->btree_pages = max_t(int, c->btree_pages / 4,
@@ -1810,7 +1821,7 @@ void bch_cache_release(struct kobject *kobj)
free_fifo(&ca->free[i]);
if (ca->sb_bio.bi_inline_vecs[0].bv_page)
- put_page(ca->sb_bio.bi_io_vec[0].bv_page);
+ put_page(bio_first_page_all(&ca->sb_bio));
if (!IS_ERR_OR_NULL(ca->bdev))
blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
@@ -1864,7 +1875,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
ca->bdev->bd_holder = ca;
bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1);
- ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
+ bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page;
get_page(sb_page);
if (blk_queue_discard(bdev_get_queue(ca->bdev)))
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index e548b8b51322..a23cd6a14b74 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -249,6 +249,13 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
: 0;
}
+/*
+ * Generally it isn't good to access .bi_io_vec and .bi_vcnt directly,
+ * the preferred way is bio_add_page, but in this case, bch_bio_map()
+ * supposes that the bvec table is empty, so it is safe to access
+ * .bi_vcnt & .bi_io_vec in this way even after multipage bvec is
+ * supported.
+ */
void bch_bio_map(struct bio *bio, void *base)
{
size_t size = bio->bi_iter.bi_size;
@@ -276,6 +283,33 @@ start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset,
}
}
+/**
+ * bch_bio_alloc_pages - allocates a single page for each bvec in a bio
+ * @bio: bio to allocate pages for
+ * @gfp_mask: flags for allocation
+ *
+ * Allocates pages up to @bio->bi_vcnt.
+ *
+ * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are
+ * freed.
+ */
+int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
+{
+ int i;
+ struct bio_vec *bv;
+
+ bio_for_each_segment_all(bv, bio, i) {
+ bv->bv_page = alloc_page(gfp_mask);
+ if (!bv->bv_page) {
+ while (--bv >= bio->bi_io_vec)
+ __free_page(bv->bv_page);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
/*
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group (Any
* use permitted, subject to terms of PostgreSQL license; see.)
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index ed5e8a412eb8..4df4c5c1cab2 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -558,6 +558,7 @@ static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
}
void bch_bio_map(struct bio *bio, void *base);
+int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask);
static inline sector_t bdev_sectors(struct block_device *bdev)
{
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 56a37884ca8b..51306a19ab03 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -18,17 +18,39 @@
#include <trace/events/bcache.h>
/* Rate limiting */
-
-static void __update_writeback_rate(struct cached_dev *dc)
+static uint64_t __calc_target_rate(struct cached_dev *dc)
{
struct cache_set *c = dc->disk.c;
+
+ /*
+ * This is the size of the cache, minus the amount used for
+ * flash-only devices
+ */
uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
bcache_flash_devs_sectors_dirty(c);
+
+ /*
+ * Unfortunately there is no control of global dirty data. If the
+ * user states that they want 10% dirty data in the cache, and has,
+ * e.g., 5 backing volumes of equal size, we try and ensure each
+ * backing volume uses about 2% of the cache for dirty data.
+ */
+ uint32_t bdev_share =
+ div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT,
+ c->cached_dev_sectors);
+
uint64_t cache_dirty_target =
div_u64(cache_sectors * dc->writeback_percent, 100);
- int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev),
- c->cached_dev_sectors);
+ /* Ensure each backing dev gets at least one dirty share */
+ if (bdev_share < 1)
+ bdev_share = 1;
+
+ return (cache_dirty_target * bdev_share) >> WRITEBACK_SHARE_SHIFT;
+}
+
+static void __update_writeback_rate(struct cached_dev *dc)
+{
/*
* PI controller:
* Figures out the amount that should be written per second.
@@ -49,6 +71,7 @@ static void __update_writeback_rate(struct cached_dev *dc)
* This acts as a slow, long-term average that is not subject to
* variations in usage like the p term.
*/
+ int64_t target = __calc_target_rate(dc);
int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
int64_t error = dirty - target;
int64_t proportional_scaled =
@@ -116,6 +139,7 @@ static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
struct dirty_io {
struct closure cl;
struct cached_dev *dc;
+ uint16_t sequence;
struct bio bio;
};
@@ -194,6 +218,27 @@ static void write_dirty(struct closure *cl)
{
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
struct keybuf_key *w = io->bio.bi_private;
+ struct cached_dev *dc = io->dc;
+
+ uint16_t next_sequence;
+
+ if (atomic_read(&dc->writeback_sequence_next) != io->sequence) {
+ /* Not our turn to write; wait for a write to complete */
+ closure_wait(&dc->writeback_ordering_wait, cl);
+
+ if (atomic_read(&dc->writeback_sequence_next) == io->sequence) {
+ /*
+ * Edge case-- it happened in indeterminate order
+ * relative to when we were added to wait list..
+ */
+ closure_wake_up(&dc->writeback_ordering_wait);
+ }
+
+ continue_at(cl, write_dirty, io->dc->writeback_write_wq);
+ return;
+ }
+
+ next_sequence = io->sequence + 1;
/*
* IO errors are signalled using the dirty bit on the key.
@@ -211,6 +256,9 @@ static void write_dirty(struct closure *cl)
closure_bio_submit(&io->bio, cl);
}
+ atomic_set(&dc->writeback_sequence_next, next_sequence);
+ closure_wake_up(&dc->writeback_ordering_wait);
+
continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
}
@@ -219,8 +267,10 @@ static void read_dirty_endio(struct bio *bio)
struct keybuf_key *w = bio->bi_private;
struct dirty_io *io = w->private;
+ /* is_read = 1 */
bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
- bio->bi_status, "reading dirty data from cache");
+ bio->bi_status, 1,
+ "reading dirty data from cache");
dirty_endio(bio);
}
@@ -237,10 +287,15 @@ static void read_dirty_submit(struct closure *cl)
static void read_dirty(struct cached_dev *dc)
{
unsigned delay = 0;
- struct keybuf_key *w;
+ struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w;
+ size_t size;
+ int nk, i;
struct dirty_io *io;
struct closure cl;
+ uint16_t sequence = 0;
+ BUG_ON(!llist_empty(&dc->writeback_ordering_wait.list));
+ atomic_set(&dc->writeback_sequence_next, sequence);
closure_init_stack(&cl);
/*
@@ -248,45 +303,109 @@ static void read_dirty(struct cached_dev *dc)
* mempools.
*/
- while (!kthread_should_stop()) {
-
- w = bch_keybuf_next(&dc->writeback_keys);
- if (!w)
- break;
-
- BUG_ON(ptr_stale(dc->disk.c, &w->key, 0));
-
- if (KEY_START(&w->key) != dc->last_read ||
- jiffies_to_msecs(delay) > 50)
- while (!kthread_should_stop() && delay)
- delay = schedule_timeout_interruptible(delay);
-
- dc->last_read = KEY_OFFSET(&w->key);
-
- io = kzalloc(sizeof(struct dirty_io) + sizeof(struct bio_vec)
- * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
- GFP_KERNEL);
- if (!io)
- goto err;
-
- w->private = io;
- io->dc = dc;
-
- dirty_init(w);
- bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
- io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
- bio_set_dev(&io->bio, PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
- io->bio.bi_end_io = read_dirty_endio;
-
- if (bio_alloc_pages(&io->bio, GFP_KERNEL))
- goto err_free;
+ next = bch_keybuf_next(&dc->writeback_keys);
+
+ while (!kthread_should_stop() && next) {
+ size = 0;
+ nk = 0;
+
+ do {
+ BUG_ON(ptr_stale(dc->disk.c, &next->key, 0));
+
+ /*
+ * Don't combine too many operations, even if they
+ * are all small.
+ */
+ if (nk >= MAX_WRITEBACKS_IN_PASS)
+ break;
+
+ /*
+ * If the current operation is very large, don't
+ * further combine operations.
+ */
+ if (size >= MAX_WRITESIZE_IN_PASS)
+ break;
+
+ /*
+ * Operations are only eligible to be combined
+ * if they are contiguous.
+ *
+ * TODO: add a heuristic willing to fire a
+ * certain amount of non-contiguous IO per pass,
+ * so that we can benefit from backing device
+ * command queueing.
+ */
+ if ((nk != 0) && bkey_cmp(&keys[nk-1]->key,
+ &START_KEY(&next->key)))
+ break;
+
+ size += KEY_SIZE(&next->key);
+ keys[nk++] = next;
+ } while ((next = bch_keybuf_next(&dc->writeback_keys)));
+
+ /* Now we have gathered a set of 1..5 keys to write back. */
+ for (i = 0; i < nk; i++) {
+ w = keys[i];
+
+ io = kzalloc(sizeof(struct dirty_io) +
+ sizeof(struct bio_vec) *
+ DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
+ GFP_KERNEL);
+ if (!io)
+ goto err;
+
+ w->private = io;
+ io->dc = dc;
+ io->sequence = sequence++;
+
+ dirty_init(w);
+ bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
+ io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
+ bio_set_dev(&io->bio,
+ PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
+ io->bio.bi_end_io = read_dirty_endio;
+
+ if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
+ goto err_free;
+
+ trace_bcache_writeback(&w->key);
+
+ down(&dc->in_flight);
+
+ /* We've acquired a semaphore for the maximum
+ * simultaneous number of writebacks; from here
+ * everything happens asynchronously.
+ */
+ closure_call(&io->cl, read_dirty_submit, NULL, &cl);
+ }
- trace_bcache_writeback(&w->key);
+ delay = writeback_delay(dc, size);
- down(&dc->in_flight);
- closure_call(&io->cl, read_dirty_submit, NULL, &cl);
+ /* If the control system would wait for at least half a
+ * second, and there's been no reqs hitting the backing disk
+ * for awhile: use an alternate mode where we have at most
+ * one contiguous set of writebacks in flight at a time. If
+ * someone wants to do IO it will be quick, as it will only
+ * have to contend with one operation in flight, and we'll
+ * be round-tripping data to the backing disk as quickly as
+ * it can accept it.
+ */
+ if (delay >= HZ / 2) {
+ /* 3 means at least 1.5 seconds, up to 7.5 if we
+ * have slowed way down.
+ */
+ if (atomic_inc_return(&dc->backing_idle) >= 3) {
+ /* Wait for current I/Os to finish */
+ closure_sync(&cl);
+ /* And immediately launch a new set. */
+ delay = 0;
+ }
+ }
- delay = writeback_delay(dc, KEY_SIZE(&w->key));
+ while (!kthread_should_stop() && delay) {
+ schedule_timeout_interruptible(delay);
+ delay = writeback_delay(dc, 0);
+ }
}
if (0) {
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index a9e3ffb4b03c..66f1c527fa24 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -5,6 +5,16 @@
#define CUTOFF_WRITEBACK 40
#define CUTOFF_WRITEBACK_SYNC 70
+#define MAX_WRITEBACKS_IN_PASS 5
+#define MAX_WRITESIZE_IN_PASS 5000 /* *512b */
+
+/*
+ * 14 (16384ths) is chosen here as something that each backing device
+ * should be a reasonable fraction of the share, and not to blow up
+ * until individual backing devices are a petabyte.
+ */
+#define WRITEBACK_SHARE_SHIFT 14
+
static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
{
uint64_t i, ret = 0;
@@ -21,7 +31,7 @@ static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c)
mutex_lock(&bch_register_lock);
- for (i = 0; i < c->nr_uuids; i++) {
+ for (i = 0; i < c->devices_max_used; i++) {
struct bcache_device *d = c->devices[i];
if (!d || !UUID_FLASH_ONLY(&c->uuids[i]))
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index c546b567f3b5..414c9af54ded 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -662,7 +662,7 @@ static void submit_io(struct dm_buffer *b, int rw, bio_end_io_t *end_io)
sector = (b->block << b->c->sectors_per_block_bits) + b->c->start;
- if (rw != WRITE) {
+ if (rw != REQ_OP_WRITE) {
n_sectors = 1 << b->c->sectors_per_block_bits;
offset = 0;
} else {
@@ -740,7 +740,7 @@ static void __write_dirty_buffer(struct dm_buffer *b,
b->write_end = b->dirty_end;
if (!write_list)
- submit_io(b, WRITE, write_endio);
+ submit_io(b, REQ_OP_WRITE, write_endio);
else
list_add_tail(&b->write_list, write_list);
}
@@ -753,7 +753,7 @@ static void __flush_write_list(struct list_head *write_list)
struct dm_buffer *b =
list_entry(write_list->next, struct dm_buffer, write_list);
list_del(&b->write_list);
- submit_io(b, WRITE, write_endio);
+ submit_io(b, REQ_OP_WRITE, write_endio);
cond_resched();
}
blk_finish_plug(&plug);
@@ -1123,7 +1123,7 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
return NULL;
if (need_submit)
- submit_io(b, READ, read_endio);
+ submit_io(b, REQ_OP_READ, read_endio);
wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
@@ -1193,7 +1193,7 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
dm_bufio_unlock(c);
if (need_submit)
- submit_io(b, READ, read_endio);
+ submit_io(b, REQ_OP_READ, read_endio);
dm_bufio_release(b);
cond_resched();
@@ -1454,7 +1454,7 @@ retry:
old_block = b->block;
__unlink_buffer(b);
__link_buffer(b, new_block, b->list_mode);
- submit_io(b, WRITE, write_endio);
+ submit_io(b, REQ_OP_WRITE, write_endio);
wait_on_bit_io(&b->state, B_WRITING,
TASK_UNINTERRUPTIBLE);
__unlink_buffer(b);
@@ -1716,7 +1716,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
if (!DM_BUFIO_CACHE_NAME(c)) {
r = -ENOMEM;
mutex_unlock(&dm_bufio_clients_lock);
- goto bad_cache;
+ goto bad;
}
}
@@ -1727,7 +1727,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
if (!DM_BUFIO_CACHE(c)) {
r = -ENOMEM;
mutex_unlock(&dm_bufio_clients_lock);
- goto bad_cache;
+ goto bad;
}
}
}
@@ -1738,27 +1738,28 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
if (!b) {
r = -ENOMEM;
- goto bad_buffer;
+ goto bad;
}
__free_buffer_wake(b);
}
+ c->shrinker.count_objects = dm_bufio_shrink_count;
+ c->shrinker.scan_objects = dm_bufio_shrink_scan;
+ c->shrinker.seeks = 1;
+ c->shrinker.batch = 0;
+ r = register_shrinker(&c->shrinker);
+ if (r)
+ goto bad;
+
mutex_lock(&dm_bufio_clients_lock);
dm_bufio_client_count++;
list_add(&c->client_list, &dm_bufio_all_clients);
__cache_size_refresh();
mutex_unlock(&dm_bufio_clients_lock);
- c->shrinker.count_objects = dm_bufio_shrink_count;
- c->shrinker.scan_objects = dm_bufio_shrink_scan;
- c->shrinker.seeks = 1;
- c->shrinker.batch = 0;
- register_shrinker(&c->shrinker);
-
return c;
-bad_buffer:
-bad_cache:
+bad:
while (!list_empty(&c->reserved_buffers)) {
struct dm_buffer *b = list_entry(c->reserved_buffers.next,
struct dm_buffer, lru_list);
@@ -1767,6 +1768,7 @@ bad_cache:
}
dm_io_client_destroy(c->dm_io);
bad_dm_io:
+ mutex_destroy(&c->lock);
kfree(c);
bad_client:
return ERR_PTR(r);
@@ -1811,6 +1813,7 @@ void dm_bufio_client_destroy(struct dm_bufio_client *c)
BUG_ON(c->n_buffers[i]);
dm_io_client_destroy(c->dm_io);
+ mutex_destroy(&c->lock);
kfree(c);
}
EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 6a14f945783c..3222e21cbbf8 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -91,8 +91,7 @@ struct mapped_device {
/*
* io objects are allocated from here.
*/
- mempool_t *io_pool;
-
+ struct bio_set *io_bs;
struct bio_set *bs;
/*
@@ -130,8 +129,6 @@ struct mapped_device {
struct srcu_struct io_barrier;
};
-void dm_init_md_queue(struct mapped_device *md);
-void dm_init_normal_md_queue(struct mapped_device *md);
int md_in_flight(struct mapped_device *md);
void disable_write_same(struct mapped_device *md);
void disable_write_zeroes(struct mapped_device *md);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 554d60394c06..8168f737590e 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1446,7 +1446,6 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
bio_for_each_segment_all(bv, clone, i) {
BUG_ON(!bv->bv_page);
mempool_free(bv->bv_page, cc->page_pool);
- bv->bv_page = NULL;
}
}
@@ -2194,6 +2193,8 @@ static void crypt_dtr(struct dm_target *ti)
kzfree(cc->cipher_auth);
kzfree(cc->authenc_key);
+ mutex_destroy(&cc->bio_alloc_lock);
+
/* Must zero key material before freeing */
kzfree(cc);
}
@@ -2703,8 +2704,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- cc->bs = bioset_create(MIN_IOS, 0, (BIOSET_NEED_BVECS |
- BIOSET_NEED_RESCUER));
+ cc->bs = bioset_create(MIN_IOS, 0, BIOSET_NEED_BVECS);
if (!cc->bs) {
ti->error = "Cannot allocate crypt bioset";
goto bad;
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 288386bfbfb5..1783d80c9cad 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -229,6 +229,8 @@ static void delay_dtr(struct dm_target *ti)
if (dc->dev_write)
dm_put_device(ti, dc->dev_write);
+ mutex_destroy(&dc->timer_lock);
+
kfree(dc);
}
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index b82cb1ab1eaa..1b907b15f5c3 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -70,6 +70,11 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
arg_name = dm_shift_arg(as);
argc--;
+ if (!arg_name) {
+ ti->error = "Insufficient feature arguments";
+ return -EINVAL;
+ }
+
/*
* drop_writes
*/
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index b4357ed4d541..a8d914d5abbe 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -58,8 +58,7 @@ struct dm_io_client *dm_io_client_create(void)
if (!client->pool)
goto bad;
- client->bios = bioset_create(min_ios, 0, (BIOSET_NEED_BVECS |
- BIOSET_NEED_RESCUER));
+ client->bios = bioset_create(min_ios, 0, BIOSET_NEED_BVECS);
if (!client->bios)
goto bad;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index e52676fa9832..3f6791afd3e4 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1929,10 +1929,10 @@ static int dm_release(struct inode *inode, struct file *filp)
return 0;
}
-static unsigned dm_poll(struct file *filp, poll_table *wait)
+static __poll_t dm_poll(struct file *filp, poll_table *wait)
{
struct dm_file *priv = filp->private_data;
- unsigned mask = 0;
+ __poll_t mask = 0;
poll_wait(filp, &dm_global_eventq, wait);
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index eb45cc3df31d..e6e7c686646d 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -477,8 +477,10 @@ static int run_complete_job(struct kcopyd_job *job)
* If this is the master job, the sub jobs have already
* completed so we can free everything.
*/
- if (job->master_job == job)
+ if (job->master_job == job) {
+ mutex_destroy(&job->lock);
mempool_free(job, kc->job_pool);
+ }
fn(read_err, write_err, context);
if (atomic_dec_and_test(&kc->nr_jobs))
@@ -750,6 +752,7 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
* followed by SPLIT_COUNT sub jobs.
*/
job = mempool_alloc(kc->job_pool, GFP_NOIO);
+ mutex_init(&job->lock);
/*
* set up for the read.
@@ -811,7 +814,6 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
if (job->source.count <= SUB_JOB_SIZE)
dispatch_job(job);
else {
- mutex_init(&job->lock);
job->progress = 0;
split_job(job);
}
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 189badbeddaf..3362d866793b 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -594,7 +594,7 @@ static int log_mark(struct log_writes_c *lc, char *data)
return -ENOMEM;
}
- block->data = kstrndup(data, maxsize, GFP_KERNEL);
+ block->data = kstrndup(data, maxsize - 1, GFP_KERNEL);
if (!block->data) {
DMERR("Error copying mark data");
kfree(block);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index f7810cc869ac..7d3e572072f5 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -64,36 +64,30 @@ struct priority_group {
/* Multipath context */
struct multipath {
- struct list_head list;
- struct dm_target *ti;
-
- const char *hw_handler_name;
- char *hw_handler_params;
+ unsigned long flags; /* Multipath state flags */
spinlock_t lock;
-
- unsigned nr_priority_groups;
- struct list_head priority_groups;
-
- wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
+ enum dm_queue_mode queue_mode;
struct pgpath *current_pgpath;
struct priority_group *current_pg;
struct priority_group *next_pg; /* Switch to this PG if set */
- unsigned long flags; /* Multipath state flags */
+ atomic_t nr_valid_paths; /* Total number of usable paths */
+ unsigned nr_priority_groups;
+ struct list_head priority_groups;
+ const char *hw_handler_name;
+ char *hw_handler_params;
+ wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
unsigned pg_init_retries; /* Number of times to retry pg_init */
unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
-
- atomic_t nr_valid_paths; /* Total number of usable paths */
atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */
atomic_t pg_init_count; /* Number of times pg_init called */
- enum dm_queue_mode queue_mode;
-
struct mutex work_mutex;
struct work_struct trigger_event;
+ struct dm_target *ti;
struct work_struct process_queued_bios;
struct bio_list queued_bios;
@@ -135,10 +129,10 @@ static struct pgpath *alloc_pgpath(void)
{
struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
- if (pgpath) {
- pgpath->is_active = true;
- INIT_DELAYED_WORK(&pgpath->activate_path, activate_path_work);
- }
+ if (!pgpath)
+ return NULL;
+
+ pgpath->is_active = true;
return pgpath;
}
@@ -193,13 +187,8 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
if (m) {
INIT_LIST_HEAD(&m->priority_groups);
spin_lock_init(&m->lock);
- set_bit(MPATHF_QUEUE_IO, &m->flags);
atomic_set(&m->nr_valid_paths, 0);
- atomic_set(&m->pg_init_in_progress, 0);
- atomic_set(&m->pg_init_count, 0);
- m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
INIT_WORK(&m->trigger_event, trigger_event);
- init_waitqueue_head(&m->pg_init_wait);
mutex_init(&m->work_mutex);
m->queue_mode = DM_TYPE_NONE;
@@ -221,13 +210,26 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
else
m->queue_mode = DM_TYPE_REQUEST_BASED;
- } else if (m->queue_mode == DM_TYPE_BIO_BASED) {
+
+ } else if (m->queue_mode == DM_TYPE_BIO_BASED ||
+ m->queue_mode == DM_TYPE_NVME_BIO_BASED) {
INIT_WORK(&m->process_queued_bios, process_queued_bios);
- /*
- * bio-based doesn't support any direct scsi_dh management;
- * it just discovers if a scsi_dh is attached.
- */
- set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
+
+ if (m->queue_mode == DM_TYPE_BIO_BASED) {
+ /*
+ * bio-based doesn't support any direct scsi_dh management;
+ * it just discovers if a scsi_dh is attached.
+ */
+ set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
+ }
+ }
+
+ if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) {
+ set_bit(MPATHF_QUEUE_IO, &m->flags);
+ atomic_set(&m->pg_init_in_progress, 0);
+ atomic_set(&m->pg_init_count, 0);
+ m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
+ init_waitqueue_head(&m->pg_init_wait);
}
dm_table_set_type(ti->table, m->queue_mode);
@@ -246,6 +248,7 @@ static void free_multipath(struct multipath *m)
kfree(m->hw_handler_name);
kfree(m->hw_handler_params);
+ mutex_destroy(&m->work_mutex);
kfree(m);
}
@@ -264,29 +267,23 @@ static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
return dm_per_bio_data(bio, multipath_per_bio_data_size());
}
-static struct dm_bio_details *get_bio_details_from_bio(struct bio *bio)
+static struct dm_bio_details *get_bio_details_from_mpio(struct dm_mpath_io *mpio)
{
/* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */
- struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
void *bio_details = mpio + 1;
-
return bio_details;
}
-static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p,
- struct dm_bio_details **bio_details_p)
+static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p)
{
struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
- struct dm_bio_details *bio_details = get_bio_details_from_bio(bio);
+ struct dm_bio_details *bio_details = get_bio_details_from_mpio(mpio);
- memset(mpio, 0, sizeof(*mpio));
- memset(bio_details, 0, sizeof(*bio_details));
- dm_bio_record(bio_details, bio);
+ mpio->nr_bytes = bio->bi_iter.bi_size;
+ mpio->pgpath = NULL;
+ *mpio_p = mpio;
- if (mpio_p)
- *mpio_p = mpio;
- if (bio_details_p)
- *bio_details_p = bio_details;
+ dm_bio_record(bio_details, bio);
}
/*-----------------------------------------------
@@ -340,6 +337,9 @@ static void __switch_pg(struct multipath *m, struct priority_group *pg)
{
m->current_pg = pg;
+ if (m->queue_mode == DM_TYPE_NVME_BIO_BASED)
+ return;
+
/* Must we initialise the PG first, and queue I/O till it's ready? */
if (m->hw_handler_name) {
set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
@@ -385,7 +385,8 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
unsigned bypassed = 1;
if (!atomic_read(&m->nr_valid_paths)) {
- clear_bit(MPATHF_QUEUE_IO, &m->flags);
+ if (m->queue_mode != DM_TYPE_NVME_BIO_BASED)
+ clear_bit(MPATHF_QUEUE_IO, &m->flags);
goto failed;
}
@@ -516,12 +517,10 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
return DM_MAPIO_KILL;
} else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
- if (pg_init_all_paths(m))
- return DM_MAPIO_DELAY_REQUEUE;
- return DM_MAPIO_REQUEUE;
+ pg_init_all_paths(m);
+ return DM_MAPIO_DELAY_REQUEUE;
}
- memset(mpio, 0, sizeof(*mpio));
mpio->pgpath = pgpath;
mpio->nr_bytes = nr_bytes;
@@ -530,12 +529,23 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, GFP_ATOMIC);
if (IS_ERR(clone)) {
/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
- bool queue_dying = blk_queue_dying(q);
- if (queue_dying) {
+ if (blk_queue_dying(q)) {
atomic_inc(&m->pg_init_in_progress);
activate_or_offline_path(pgpath);
+ return DM_MAPIO_DELAY_REQUEUE;
}
- return DM_MAPIO_DELAY_REQUEUE;
+
+ /*
+ * blk-mq's SCHED_RESTART can cover this requeue, so we
+ * needn't deal with it by DELAY_REQUEUE. More importantly,
+ * we have to return DM_MAPIO_REQUEUE so that blk-mq can
+ * get the queue busy feedback (via BLK_STS_RESOURCE),
+ * otherwise I/O merging can suffer.
+ */
+ if (q->mq_ops)
+ return DM_MAPIO_REQUEUE;
+ else
+ return DM_MAPIO_DELAY_REQUEUE;
}
clone->bio = clone->biotail = NULL;
clone->rq_disk = bdev->bd_disk;
@@ -557,9 +567,9 @@ static void multipath_release_clone(struct request *clone)
/*
* Map cloned bios (bio-based multipath)
*/
-static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_mpath_io *mpio)
+
+static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
{
- size_t nr_bytes = bio->bi_iter.bi_size;
struct pgpath *pgpath;
unsigned long flags;
bool queue_io;
@@ -568,7 +578,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
pgpath = READ_ONCE(m->current_pgpath);
queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
if (!pgpath || !queue_io)
- pgpath = choose_pgpath(m, nr_bytes);
+ pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
if ((pgpath && queue_io) ||
(!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
@@ -576,14 +586,62 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
spin_lock_irqsave(&m->lock, flags);
bio_list_add(&m->queued_bios, bio);
spin_unlock_irqrestore(&m->lock, flags);
+
/* PG_INIT_REQUIRED cannot be set without QUEUE_IO */
if (queue_io || test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
pg_init_all_paths(m);
else if (!queue_io)
queue_work(kmultipathd, &m->process_queued_bios);
- return DM_MAPIO_SUBMITTED;
+
+ return ERR_PTR(-EAGAIN);
+ }
+
+ return pgpath;
+}
+
+static struct pgpath *__map_bio_nvme(struct multipath *m, struct bio *bio)
+{
+ struct pgpath *pgpath;
+ unsigned long flags;
+
+ /* Do we need to select a new pgpath? */
+ /*
+ * FIXME: currently only switching path if no path (due to failure, etc)
+ * - which negates the point of using a path selector
+ */
+ pgpath = READ_ONCE(m->current_pgpath);
+ if (!pgpath)
+ pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
+
+ if (!pgpath) {
+ if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
+ /* Queue for the daemon to resubmit */
+ spin_lock_irqsave(&m->lock, flags);
+ bio_list_add(&m->queued_bios, bio);
+ spin_unlock_irqrestore(&m->lock, flags);
+ queue_work(kmultipathd, &m->process_queued_bios);
+
+ return ERR_PTR(-EAGAIN);
+ }
+ return NULL;
}
+ return pgpath;
+}
+
+static int __multipath_map_bio(struct multipath *m, struct bio *bio,
+ struct dm_mpath_io *mpio)
+{
+ struct pgpath *pgpath;
+
+ if (m->queue_mode == DM_TYPE_NVME_BIO_BASED)
+ pgpath = __map_bio_nvme(m, bio);
+ else
+ pgpath = __map_bio(m, bio);
+
+ if (IS_ERR(pgpath))
+ return DM_MAPIO_SUBMITTED;
+
if (!pgpath) {
if (must_push_back_bio(m))
return DM_MAPIO_REQUEUE;
@@ -592,7 +650,6 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
}
mpio->pgpath = pgpath;
- mpio->nr_bytes = nr_bytes;
bio->bi_status = 0;
bio_set_dev(bio, pgpath->path.dev->bdev);
@@ -601,7 +658,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
if (pgpath->pg->ps.type->start_io)
pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
&pgpath->path,
- nr_bytes);
+ mpio->nr_bytes);
return DM_MAPIO_REMAPPED;
}
@@ -610,8 +667,7 @@ static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
struct multipath *m = ti->private;
struct dm_mpath_io *mpio = NULL;
- multipath_init_per_bio_data(bio, &mpio, NULL);
-
+ multipath_init_per_bio_data(bio, &mpio);
return __multipath_map_bio(m, bio, mpio);
}
@@ -619,7 +675,8 @@ static void process_queued_io_list(struct multipath *m)
{
if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
- else if (m->queue_mode == DM_TYPE_BIO_BASED)
+ else if (m->queue_mode == DM_TYPE_BIO_BASED ||
+ m->queue_mode == DM_TYPE_NVME_BIO_BASED)
queue_work(kmultipathd, &m->process_queued_bios);
}
@@ -649,7 +706,9 @@ static void process_queued_bios(struct work_struct *work)
blk_start_plug(&plug);
while ((bio = bio_list_pop(&bios))) {
- r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio));
+ struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
+ dm_bio_restore(get_bio_details_from_mpio(mpio), bio);
+ r = __multipath_map_bio(m, bio, mpio);
switch (r) {
case DM_MAPIO_KILL:
bio->bi_status = BLK_STS_IOERR;
@@ -752,34 +811,11 @@ static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
return 0;
}
-static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
- struct dm_target *ti)
+static int setup_scsi_dh(struct block_device *bdev, struct multipath *m, char **error)
{
- int r;
- struct pgpath *p;
- struct multipath *m = ti->private;
- struct request_queue *q = NULL;
+ struct request_queue *q = bdev_get_queue(bdev);
const char *attached_handler_name;
-
- /* we need at least a path arg */
- if (as->argc < 1) {
- ti->error = "no device given";
- return ERR_PTR(-EINVAL);
- }
-
- p = alloc_pgpath();
- if (!p)
- return ERR_PTR(-ENOMEM);
-
- r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
- &p->path.dev);
- if (r) {
- ti->error = "error getting device";
- goto bad;
- }
-
- if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) || m->hw_handler_name)
- q = bdev_get_queue(p->path.dev->bdev);
+ int r;
if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
retain:
@@ -811,26 +847,59 @@ retain:
char b[BDEVNAME_SIZE];
printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
- bdevname(p->path.dev->bdev, b));
+ bdevname(bdev, b));
goto retain;
}
if (r < 0) {
- ti->error = "error attaching hardware handler";
- dm_put_device(ti, p->path.dev);
- goto bad;
+ *error = "error attaching hardware handler";
+ return r;
}
if (m->hw_handler_params) {
r = scsi_dh_set_params(q, m->hw_handler_params);
if (r < 0) {
- ti->error = "unable to set hardware "
- "handler parameters";
- dm_put_device(ti, p->path.dev);
- goto bad;
+ *error = "unable to set hardware handler parameters";
+ return r;
}
}
}
+ return 0;
+}
+
+static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
+ struct dm_target *ti)
+{
+ int r;
+ struct pgpath *p;
+ struct multipath *m = ti->private;
+
+ /* we need at least a path arg */
+ if (as->argc < 1) {
+ ti->error = "no device given";
+ return ERR_PTR(-EINVAL);
+ }
+
+ p = alloc_pgpath();
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+
+ r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
+ &p->path.dev);
+ if (r) {
+ ti->error = "error getting device";
+ goto bad;
+ }
+
+ if (m->queue_mode != DM_TYPE_NVME_BIO_BASED) {
+ INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
+ r = setup_scsi_dh(p->path.dev->bdev, m, &ti->error);
+ if (r) {
+ dm_put_device(ti, p->path.dev);
+ goto bad;
+ }
+ }
+
r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
if (r) {
dm_put_device(ti, p->path.dev);
@@ -838,7 +907,6 @@ retain:
}
return p;
-
bad:
free_pgpath(p);
return ERR_PTR(r);
@@ -933,7 +1001,8 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
if (!hw_argc)
return 0;
- if (m->queue_mode == DM_TYPE_BIO_BASED) {
+ if (m->queue_mode == DM_TYPE_BIO_BASED ||
+ m->queue_mode == DM_TYPE_NVME_BIO_BASED) {
dm_consume_args(as, hw_argc);
DMERR("bio-based multipath doesn't allow hardware handler args");
return 0;
@@ -1022,6 +1091,8 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
if (!strcasecmp(queue_mode_name, "bio"))
m->queue_mode = DM_TYPE_BIO_BASED;
+ else if (!strcasecmp(queue_mode_name, "nvme"))
+ m->queue_mode = DM_TYPE_NVME_BIO_BASED;
else if (!strcasecmp(queue_mode_name, "rq"))
m->queue_mode = DM_TYPE_REQUEST_BASED;
else if (!strcasecmp(queue_mode_name, "mq"))
@@ -1122,7 +1193,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->num_discard_bios = 1;
ti->num_write_same_bios = 1;
ti->num_write_zeroes_bios = 1;
- if (m->queue_mode == DM_TYPE_BIO_BASED)
+ if (m->queue_mode == DM_TYPE_BIO_BASED || m->queue_mode == DM_TYPE_NVME_BIO_BASED)
ti->per_io_data_size = multipath_per_bio_data_size();
else
ti->per_io_data_size = sizeof(struct dm_mpath_io);
@@ -1151,16 +1222,19 @@ static void multipath_wait_for_pg_init_completion(struct multipath *m)
static void flush_multipath_work(struct multipath *m)
{
- set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
- smp_mb__after_atomic();
+ if (m->hw_handler_name) {
+ set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
+ smp_mb__after_atomic();
+
+ flush_workqueue(kmpath_handlerd);
+ multipath_wait_for_pg_init_completion(m);
+
+ clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
+ smp_mb__after_atomic();
+ }
- flush_workqueue(kmpath_handlerd);
- multipath_wait_for_pg_init_completion(m);
flush_workqueue(kmultipathd);
flush_work(&m->trigger_event);
-
- clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
- smp_mb__after_atomic();
}
static void multipath_dtr(struct dm_target *ti)
@@ -1475,21 +1549,6 @@ static void activate_path_work(struct work_struct *work)
activate_or_offline_path(pgpath);
}
-static int noretry_error(blk_status_t error)
-{
- switch (error) {
- case BLK_STS_NOTSUPP:
- case BLK_STS_NOSPC:
- case BLK_STS_TARGET:
- case BLK_STS_NEXUS:
- case BLK_STS_MEDIUM:
- return 1;
- }
-
- /* Anything else could be a path failure, so should be retried */
- return 0;
-}
-
static int multipath_end_io(struct dm_target *ti, struct request *clone,
blk_status_t error, union map_info *map_context)
{
@@ -1508,10 +1567,13 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
* request into dm core, which will remake a clone request and
* clone bios for it and resubmit it later.
*/
- if (error && !noretry_error(error)) {
+ if (error && blk_path_error(error)) {
struct multipath *m = ti->private;
- r = DM_ENDIO_REQUEUE;
+ if (error == BLK_STS_RESOURCE)
+ r = DM_ENDIO_DELAY_REQUEUE;
+ else
+ r = DM_ENDIO_REQUEUE;
if (pgpath)
fail_path(pgpath);
@@ -1536,7 +1598,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
}
static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
- blk_status_t *error)
+ blk_status_t *error)
{
struct multipath *m = ti->private;
struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
@@ -1544,7 +1606,7 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
unsigned long flags;
int r = DM_ENDIO_DONE;
- if (!*error || noretry_error(*error))
+ if (!*error || !blk_path_error(*error))
goto done;
if (pgpath)
@@ -1561,9 +1623,6 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
goto done;
}
- /* Queue for the daemon to resubmit */
- dm_bio_restore(get_bio_details_from_bio(clone), clone);
-
spin_lock_irqsave(&m->lock, flags);
bio_list_add(&m->queued_bios, clone);
spin_unlock_irqrestore(&m->lock, flags);
@@ -1671,6 +1730,9 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
case DM_TYPE_BIO_BASED:
DMEMIT("queue_mode bio ");
break;
+ case DM_TYPE_NVME_BIO_BASED:
+ DMEMIT("queue_mode nvme ");
+ break;
case DM_TYPE_MQ_REQUEST_BASED:
DMEMIT("queue_mode mq ");
break;
diff --git a/drivers/md/dm-queue-length.c b/drivers/md/dm-queue-length.c
index 23f178641794..969c4f1a3633 100644
--- a/drivers/md/dm-queue-length.c
+++ b/drivers/md/dm-queue-length.c
@@ -195,9 +195,6 @@ static struct dm_path *ql_select_path(struct path_selector *ps, size_t nr_bytes)
if (list_empty(&s->valid_paths))
goto out;
- /* Change preferred (first in list) path to evenly balance. */
- list_move_tail(s->valid_paths.next, &s->valid_paths);
-
list_for_each_entry(pi, &s->valid_paths, list) {
if (!best ||
(atomic_read(&pi->qlen) < atomic_read(&best->qlen)))
@@ -210,6 +207,9 @@ static struct dm_path *ql_select_path(struct path_selector *ps, size_t nr_bytes)
if (!best)
goto out;
+ /* Move most recently used to least preferred to evenly balance. */
+ list_move_tail(&best->list, &s->valid_paths);
+
ret = best->path;
out:
spin_unlock_irqrestore(&s->lock, flags);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 6319d846e0ad..7ef469e902c6 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -29,6 +29,9 @@
*/
#define MIN_RAID456_JOURNAL_SPACE (4*2048)
+/* Global list of all raid sets */
+static LIST_HEAD(raid_sets);
+
static bool devices_handle_discard_safely = false;
/*
@@ -105,8 +108,6 @@ struct raid_dev {
#define CTR_FLAG_JOURNAL_DEV (1 << __CTR_FLAG_JOURNAL_DEV)
#define CTR_FLAG_JOURNAL_MODE (1 << __CTR_FLAG_JOURNAL_MODE)
-#define RESUME_STAY_FROZEN_FLAGS (CTR_FLAG_DELTA_DISKS | CTR_FLAG_DATA_OFFSET)
-
/*
* Definitions of various constructor flags to
* be used in checks of valid / invalid flags
@@ -209,6 +210,8 @@ struct raid_dev {
#define RT_FLAG_UPDATE_SBS 3
#define RT_FLAG_RESHAPE_RS 4
#define RT_FLAG_RS_SUSPENDED 5
+#define RT_FLAG_RS_IN_SYNC 6
+#define RT_FLAG_RS_RESYNCING 7
/* Array elements of 64 bit needed for rebuild/failed disk bits */
#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
@@ -224,8 +227,8 @@ struct rs_layout {
struct raid_set {
struct dm_target *ti;
+ struct list_head list;
- uint32_t bitmap_loaded;
uint32_t stripe_cache_entries;
unsigned long ctr_flags;
unsigned long runtime_flags;
@@ -270,6 +273,19 @@ static void rs_config_restore(struct raid_set *rs, struct rs_layout *l)
mddev->new_chunk_sectors = l->new_chunk_sectors;
}
+/* Find any raid_set in active slot for @rs on global list */
+static struct raid_set *rs_find_active(struct raid_set *rs)
+{
+ struct raid_set *r;
+ struct mapped_device *md = dm_table_get_md(rs->ti->table);
+
+ list_for_each_entry(r, &raid_sets, list)
+ if (r != rs && dm_table_get_md(r->ti->table) == md)
+ return r;
+
+ return NULL;
+}
+
/* raid10 algorithms (i.e. formats) */
#define ALGORITHM_RAID10_DEFAULT 0
#define ALGORITHM_RAID10_NEAR 1
@@ -572,7 +588,7 @@ static const char *raid10_md_layout_to_format(int layout)
}
/* Return md raid10 algorithm for @name */
-static int raid10_name_to_format(const char *name)
+static const int raid10_name_to_format(const char *name)
{
if (!strcasecmp(name, "near"))
return ALGORITHM_RAID10_NEAR;
@@ -675,15 +691,11 @@ static struct raid_type *get_raid_type_by_ll(const int level, const int layout)
return NULL;
}
-/*
- * Conditionally change bdev capacity of @rs
- * in case of a disk add/remove reshape
- */
-static void rs_set_capacity(struct raid_set *rs)
+/* Adjust rdev sectors */
+static void rs_set_rdev_sectors(struct raid_set *rs)
{
struct mddev *mddev = &rs->md;
struct md_rdev *rdev;
- struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table));
/*
* raid10 sets rdev->sector to the device size, which
@@ -692,8 +704,16 @@ static void rs_set_capacity(struct raid_set *rs)
rdev_for_each(rdev, mddev)
if (!test_bit(Journal, &rdev->flags))
rdev->sectors = mddev->dev_sectors;
+}
- set_capacity(gendisk, mddev->array_sectors);
+/*
+ * Change bdev capacity of @rs in case of a disk add/remove reshape
+ */
+static void rs_set_capacity(struct raid_set *rs)
+{
+ struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table));
+
+ set_capacity(gendisk, rs->md.array_sectors);
revalidate_disk(gendisk);
}
@@ -744,6 +764,7 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
mddev_init(&rs->md);
+ INIT_LIST_HEAD(&rs->list);
rs->raid_disks = raid_devs;
rs->delta_disks = 0;
@@ -761,6 +782,9 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
for (i = 0; i < raid_devs; i++)
md_rdev_init(&rs->dev[i].rdev);
+ /* Add @rs to global list. */
+ list_add(&rs->list, &raid_sets);
+
/*
* Remaining items to be initialized by further RAID params:
* rs->md.persistent
@@ -773,6 +797,7 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
return rs;
}
+/* Free all @rs allocations and remove it from global list. */
static void raid_set_free(struct raid_set *rs)
{
int i;
@@ -790,6 +815,8 @@ static void raid_set_free(struct raid_set *rs)
dm_put_device(rs->ti, rs->dev[i].data_dev);
}
+ list_del(&rs->list);
+
kfree(rs);
}
@@ -1002,7 +1029,7 @@ static int validate_raid_redundancy(struct raid_set *rs)
!rs->dev[i].rdev.sb_page)
rebuild_cnt++;
- switch (rs->raid_type->level) {
+ switch (rs->md.level) {
case 0:
break;
case 1:
@@ -1017,6 +1044,11 @@ static int validate_raid_redundancy(struct raid_set *rs)
break;
case 10:
copies = raid10_md_layout_to_copies(rs->md.new_layout);
+ if (copies < 2) {
+ DMERR("Bogus raid10 data copies < 2!");
+ return -EINVAL;
+ }
+
if (rebuild_cnt < copies)
break;
@@ -1576,6 +1608,24 @@ static sector_t __rdev_sectors(struct raid_set *rs)
return 0;
}
+/* Check that calculated dev_sectors fits all component devices. */
+static int _check_data_dev_sectors(struct raid_set *rs)
+{
+ sector_t ds = ~0;
+ struct md_rdev *rdev;
+
+ rdev_for_each(rdev, &rs->md)
+ if (!test_bit(Journal, &rdev->flags) && rdev->bdev) {
+ ds = min(ds, to_sector(i_size_read(rdev->bdev->bd_inode)));
+ if (ds < rs->md.dev_sectors) {
+ rs->ti->error = "Component device(s) too small";
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
/* Calculate the sectors per device and per array used for @rs */
static int rs_set_dev_and_array_sectors(struct raid_set *rs, bool use_mddev)
{
@@ -1625,7 +1675,7 @@ static int rs_set_dev_and_array_sectors(struct raid_set *rs, bool use_mddev)
mddev->array_sectors = array_sectors;
mddev->dev_sectors = dev_sectors;
- return 0;
+ return _check_data_dev_sectors(rs);
bad:
rs->ti->error = "Target length not divisible by number of data devices";
return -EINVAL;
@@ -1674,8 +1724,11 @@ static void do_table_event(struct work_struct *ws)
struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
smp_rmb(); /* Make sure we access most actual mddev properties */
- if (!rs_is_reshaping(rs))
+ if (!rs_is_reshaping(rs)) {
+ if (rs_is_raid10(rs))
+ rs_set_rdev_sectors(rs);
rs_set_capacity(rs);
+ }
dm_table_event(rs->ti->table);
}
@@ -1860,7 +1913,7 @@ static bool rs_reshape_requested(struct raid_set *rs)
if (rs_takeover_requested(rs))
return false;
- if (!mddev->level)
+ if (rs_is_raid0(rs))
return false;
change = mddev->new_layout != mddev->layout ||
@@ -1868,7 +1921,7 @@ static bool rs_reshape_requested(struct raid_set *rs)
rs->delta_disks;
/* Historical case to support raid1 reshape without delta disks */
- if (mddev->level == 1) {
+ if (rs_is_raid1(rs)) {
if (rs->delta_disks)
return !!rs->delta_disks;
@@ -1876,7 +1929,7 @@ static bool rs_reshape_requested(struct raid_set *rs)
mddev->raid_disks != rs->raid_disks;
}
- if (mddev->level == 10)
+ if (rs_is_raid10(rs))
return change &&
!__is_raid10_far(mddev->new_layout) &&
rs->delta_disks >= 0;
@@ -2340,7 +2393,7 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
DMERR("new device%s provided without 'rebuild'",
new_devs > 1 ? "s" : "");
return -EINVAL;
- } else if (rs_is_recovering(rs)) {
+ } else if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) && rs_is_recovering(rs)) {
DMERR("'rebuild' specified while raid set is not in-sync (recovery_cp=%llu)",
(unsigned long long) mddev->recovery_cp);
return -EINVAL;
@@ -2640,12 +2693,19 @@ static int rs_adjust_data_offsets(struct raid_set *rs)
* Make sure we got a minimum amount of free sectors per device
*/
if (rs->data_offset &&
- to_sector(i_size_read(rdev->bdev->bd_inode)) - rdev->sectors < MIN_FREE_RESHAPE_SPACE) {
+ to_sector(i_size_read(rdev->bdev->bd_inode)) - rs->md.dev_sectors < MIN_FREE_RESHAPE_SPACE) {
rs->ti->error = data_offset ? "No space for forward reshape" :
"No space for backward reshape";
return -ENOSPC;
}
out:
+ /*
+ * Raise recovery_cp in case data_offset != 0 to
+ * avoid false recovery positives in the constructor.
+ */
+ if (rs->md.recovery_cp < rs->md.dev_sectors)
+ rs->md.recovery_cp += rs->dev[0].rdev.data_offset;
+
/* Adjust data offsets on all rdevs but on any raid4/5/6 journal device */
rdev_for_each(rdev, &rs->md) {
if (!test_bit(Journal, &rdev->flags)) {
@@ -2682,14 +2742,14 @@ static int rs_setup_takeover(struct raid_set *rs)
sector_t new_data_offset = rs->dev[0].rdev.data_offset ? 0 : rs->data_offset;
if (rt_is_raid10(rs->raid_type)) {
- if (mddev->level == 0) {
+ if (rs_is_raid0(rs)) {
/* Userpace reordered disks -> adjust raid_disk indexes */
__reorder_raid_disk_indexes(rs);
/* raid0 -> raid10_far layout */
mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_FAR,
rs->raid10_copies);
- } else if (mddev->level == 1)
+ } else if (rs_is_raid1(rs))
/* raid1 -> raid10_near layout */
mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR,
rs->raid_disks);
@@ -2777,6 +2837,23 @@ static int rs_prepare_reshape(struct raid_set *rs)
return 0;
}
+/* Get reshape sectors from data_offsets or raid set */
+static sector_t _get_reshape_sectors(struct raid_set *rs)
+{
+ struct md_rdev *rdev;
+ sector_t reshape_sectors = 0;
+
+ rdev_for_each(rdev, &rs->md)
+ if (!test_bit(Journal, &rdev->flags)) {
+ reshape_sectors = (rdev->data_offset > rdev->new_data_offset) ?
+ rdev->data_offset - rdev->new_data_offset :
+ rdev->new_data_offset - rdev->data_offset;
+ break;
+ }
+
+ return max(reshape_sectors, (sector_t) rs->data_offset);
+}
+
/*
*
* - change raid layout
@@ -2788,6 +2865,7 @@ static int rs_setup_reshape(struct raid_set *rs)
{
int r = 0;
unsigned int cur_raid_devs, d;
+ sector_t reshape_sectors = _get_reshape_sectors(rs);
struct mddev *mddev = &rs->md;
struct md_rdev *rdev;
@@ -2804,13 +2882,13 @@ static int rs_setup_reshape(struct raid_set *rs)
/*
* Adjust array size:
*
- * - in case of adding disks, array size has
+ * - in case of adding disk(s), array size has
* to grow after the disk adding reshape,
* which'll hapen in the event handler;
* reshape will happen forward, so space has to
* be available at the beginning of each disk
*
- * - in case of removing disks, array size
+ * - in case of removing disk(s), array size
* has to shrink before starting the reshape,
* which'll happen here;
* reshape will happen backward, so space has to
@@ -2841,7 +2919,7 @@ static int rs_setup_reshape(struct raid_set *rs)
rdev->recovery_offset = rs_is_raid1(rs) ? 0 : MaxSector;
}
- mddev->reshape_backwards = 0; /* adding disks -> forward reshape */
+ mddev->reshape_backwards = 0; /* adding disk(s) -> forward reshape */
/* Remove disk(s) */
} else if (rs->delta_disks < 0) {
@@ -2874,6 +2952,15 @@ static int rs_setup_reshape(struct raid_set *rs)
mddev->reshape_backwards = rs->dev[0].rdev.data_offset ? 0 : 1;
}
+ /*
+ * Adjust device size for forward reshape
+ * because md_finish_reshape() reduces it.
+ */
+ if (!mddev->reshape_backwards)
+ rdev_for_each(rdev, &rs->md)
+ if (!test_bit(Journal, &rdev->flags))
+ rdev->sectors += reshape_sectors;
+
return r;
}
@@ -2890,7 +2977,7 @@ static void configure_discard_support(struct raid_set *rs)
/*
* XXX: RAID level 4,5,6 require zeroing for safety.
*/
- raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6);
+ raid456 = rs_is_raid456(rs);
for (i = 0; i < rs->raid_disks; i++) {
struct request_queue *q;
@@ -2915,7 +3002,7 @@ static void configure_discard_support(struct raid_set *rs)
* RAID1 and RAID10 personalities require bio splitting,
* RAID0/4/5/6 don't and process large discard bios properly.
*/
- ti->split_discard_bios = !!(rs->md.level == 1 || rs->md.level == 10);
+ ti->split_discard_bios = !!(rs_is_raid1(rs) || rs_is_raid10(rs));
ti->num_discard_bios = 1;
}
@@ -2935,10 +3022,10 @@ static void configure_discard_support(struct raid_set *rs)
static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
int r;
- bool resize;
+ bool resize = false;
struct raid_type *rt;
unsigned int num_raid_params, num_raid_devs;
- sector_t calculated_dev_sectors, rdev_sectors;
+ sector_t calculated_dev_sectors, rdev_sectors, reshape_sectors;
struct raid_set *rs = NULL;
const char *arg;
struct rs_layout rs_layout;
@@ -3021,7 +3108,10 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- resize = calculated_dev_sectors != rdev_sectors;
+
+ reshape_sectors = _get_reshape_sectors(rs);
+ if (calculated_dev_sectors != rdev_sectors)
+ resize = calculated_dev_sectors != (reshape_sectors ? rdev_sectors - reshape_sectors : rdev_sectors);
INIT_WORK(&rs->md.event_work, do_table_event);
ti->private = rs;
@@ -3105,19 +3195,22 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- /*
- * We can only prepare for a reshape here, because the
- * raid set needs to run to provide the repective reshape
- * check functions via its MD personality instance.
- *
- * So do the reshape check after md_run() succeeded.
- */
- r = rs_prepare_reshape(rs);
- if (r)
- return r;
+ /* Out-of-place space has to be available to allow for a reshape unless raid1! */
+ if (reshape_sectors || rs_is_raid1(rs)) {
+ /*
+ * We can only prepare for a reshape here, because the
+ * raid set needs to run to provide the repective reshape
+ * check functions via its MD personality instance.
+ *
+ * So do the reshape check after md_run() succeeded.
+ */
+ r = rs_prepare_reshape(rs);
+ if (r)
+ return r;
- /* Reshaping ain't recovery, so disable recovery */
- rs_setup_recovery(rs, MaxSector);
+ /* Reshaping ain't recovery, so disable recovery */
+ rs_setup_recovery(rs, MaxSector);
+ }
rs_set_cur(rs);
} else {
/* May not set recovery when a device rebuild is requested */
@@ -3144,13 +3237,20 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
mddev_lock_nointr(&rs->md);
r = md_run(&rs->md);
rs->md.in_sync = 0; /* Assume already marked dirty */
-
if (r) {
ti->error = "Failed to run raid array";
mddev_unlock(&rs->md);
goto bad;
}
+ r = md_start(&rs->md);
+
+ if (r) {
+ ti->error = "Failed to start raid array";
+ mddev_unlock(&rs->md);
+ goto bad_md_start;
+ }
+
rs->callbacks.congested_fn = raid_is_congested;
dm_table_add_target_callbacks(ti->table, &rs->callbacks);
@@ -3198,6 +3298,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
mddev_unlock(&rs->md);
return 0;
+bad_md_start:
bad_journal_mode_set:
bad_stripe_cache:
bad_check_reshape:
@@ -3239,25 +3340,27 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
}
/* Return string describing the current sync action of @mddev */
-static const char *decipher_sync_action(struct mddev *mddev)
+static const char *decipher_sync_action(struct mddev *mddev, unsigned long recovery)
{
- if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
+ if (test_bit(MD_RECOVERY_FROZEN, &recovery))
return "frozen";
- if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
- (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
- if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
+ /* The MD sync thread can be done with io but still be running */
+ if (!test_bit(MD_RECOVERY_DONE, &recovery) &&
+ (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
+ (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) {
+ if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
return "reshape";
- if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
- if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+ if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
+ if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
return "resync";
- else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
+ else if (test_bit(MD_RECOVERY_CHECK, &recovery))
return "check";
return "repair";
}
- if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
+ if (test_bit(MD_RECOVERY_RECOVER, &recovery))
return "recover";
}
@@ -3274,7 +3377,7 @@ static const char *decipher_sync_action(struct mddev *mddev)
* 'A' = Alive and in-sync raid set component _or_ alive raid4/5/6 'write_through' journal device
* '-' = Non-existing device (i.e. uspace passed '- -' into the ctr)
*/
-static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev, bool array_in_sync)
+static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev)
{
if (!rdev->bdev)
return "-";
@@ -3282,85 +3385,108 @@ static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev,
return "D";
else if (test_bit(Journal, &rdev->flags))
return (rs->journal_dev.mode == R5C_JOURNAL_MODE_WRITE_THROUGH) ? "A" : "a";
- else if (!array_in_sync || !test_bit(In_sync, &rdev->flags))
+ else if (test_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags) ||
+ (!test_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags) &&
+ !test_bit(In_sync, &rdev->flags)))
return "a";
else
return "A";
}
-/* Helper to return resync/reshape progress for @rs and @array_in_sync */
-static sector_t rs_get_progress(struct raid_set *rs,
- sector_t resync_max_sectors, bool *array_in_sync)
+/* Helper to return resync/reshape progress for @rs and runtime flags for raid set in sync / resynching */
+static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
+ sector_t resync_max_sectors)
{
- sector_t r, curr_resync_completed;
+ sector_t r;
struct mddev *mddev = &rs->md;
- curr_resync_completed = mddev->curr_resync_completed ?: mddev->recovery_cp;
- *array_in_sync = false;
+ clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
+ clear_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
if (rs_is_raid0(rs)) {
r = resync_max_sectors;
- *array_in_sync = true;
+ set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
} else {
- r = mddev->reshape_position;
-
- /* Reshape is relative to the array size */
- if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
- r != MaxSector) {
- if (r == MaxSector) {
- *array_in_sync = true;
- r = resync_max_sectors;
- } else {
- /* Got to reverse on backward reshape */
- if (mddev->reshape_backwards)
- r = mddev->array_sectors - r;
-
- /* Devide by # of data stripes */
- sector_div(r, mddev_data_stripes(rs));
- }
-
- /* Sync is relative to the component device size */
- } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
- r = curr_resync_completed;
+ if (test_bit(MD_RECOVERY_NEEDED, &recovery) ||
+ test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
+ test_bit(MD_RECOVERY_RUNNING, &recovery))
+ r = mddev->curr_resync_completed;
else
r = mddev->recovery_cp;
- if ((r == MaxSector) ||
- (test_bit(MD_RECOVERY_DONE, &mddev->recovery) &&
- (mddev->curr_resync_completed == resync_max_sectors))) {
+ if (r >= resync_max_sectors &&
+ (!test_bit(MD_RECOVERY_REQUESTED, &recovery) ||
+ (!test_bit(MD_RECOVERY_FROZEN, &recovery) &&
+ !test_bit(MD_RECOVERY_NEEDED, &recovery) &&
+ !test_bit(MD_RECOVERY_RUNNING, &recovery)))) {
/*
* Sync complete.
*/
- *array_in_sync = true;
- r = resync_max_sectors;
- } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
+ /* In case we have finished recovering, the array is in sync. */
+ if (test_bit(MD_RECOVERY_RECOVER, &recovery))
+ set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
+
+ } else if (test_bit(MD_RECOVERY_RECOVER, &recovery)) {
+ /*
+ * In case we are recovering, the array is not in sync
+ * and health chars should show the recovering legs.
+ */
+ ;
+
+ } else if (test_bit(MD_RECOVERY_SYNC, &recovery) &&
+ !test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
+ /*
+ * If "resync" is occurring, the raid set
+ * is or may be out of sync hence the health
+ * characters shall be 'a'.
+ */
+ set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
+
+ } else if (test_bit(MD_RECOVERY_RESHAPE, &recovery) &&
+ !test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
+ /*
+ * If "reshape" is occurring, the raid set
+ * is or may be out of sync hence the health
+ * characters shall be 'a'.
+ */
+ set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
+
+ } else if (test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
/*
* If "check" or "repair" is occurring, the raid set has
* undergone an initial sync and the health characters
* should not be 'a' anymore.
*/
- *array_in_sync = true;
+ set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
+
} else {
struct md_rdev *rdev;
/*
+ * We are idle and recovery is needed, prevent 'A' chars race
+ * caused by components still set to in-sync by constrcuctor.
+ */
+ if (test_bit(MD_RECOVERY_NEEDED, &recovery))
+ set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
+
+ /*
* The raid set may be doing an initial sync, or it may
* be rebuilding individual components. If all the
* devices are In_sync, then it is the raid set that is
* being initialized.
*/
+ set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
rdev_for_each(rdev, mddev)
if (!test_bit(Journal, &rdev->flags) &&
- !test_bit(In_sync, &rdev->flags))
- *array_in_sync = true;
-#if 0
- r = 0; /* HM FIXME: TESTME: https://bugzilla.redhat.com/show_bug.cgi?id=1210637 ? */
-#endif
+ !test_bit(In_sync, &rdev->flags)) {
+ clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
+ break;
+ }
}
}
- return r;
+ return min(r, resync_max_sectors);
}
/* Helper to return @dev name or "-" if !@dev */
@@ -3376,7 +3502,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
struct mddev *mddev = &rs->md;
struct r5conf *conf = mddev->private;
int i, max_nr_stripes = conf ? conf->max_nr_stripes : 0;
- bool array_in_sync;
+ unsigned long recovery;
unsigned int raid_param_cnt = 1; /* at least 1 for chunksize */
unsigned int sz = 0;
unsigned int rebuild_disks;
@@ -3396,17 +3522,18 @@ static void raid_status(struct dm_target *ti, status_type_t type,
/* Access most recent mddev properties for status output */
smp_rmb();
+ recovery = rs->md.recovery;
/* Get sensible max sectors even if raid set not yet started */
resync_max_sectors = test_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags) ?
mddev->resync_max_sectors : mddev->dev_sectors;
- progress = rs_get_progress(rs, resync_max_sectors, &array_in_sync);
+ progress = rs_get_progress(rs, recovery, resync_max_sectors);
resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
atomic64_read(&mddev->resync_mismatches) : 0;
- sync_action = decipher_sync_action(&rs->md);
+ sync_action = decipher_sync_action(&rs->md, recovery);
/* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */
for (i = 0; i < rs->raid_disks; i++)
- DMEMIT(__raid_dev_status(rs, &rs->dev[i].rdev, array_in_sync));
+ DMEMIT(__raid_dev_status(rs, &rs->dev[i].rdev));
/*
* In-sync/Reshape ratio:
@@ -3457,7 +3584,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
* v1.10.0+:
*/
DMEMIT(" %s", test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags) ?
- __raid_dev_status(rs, &rs->journal_dev.rdev, 0) : "-");
+ __raid_dev_status(rs, &rs->journal_dev.rdev) : "-");
break;
case STATUSTYPE_TABLE:
@@ -3613,24 +3740,19 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
blk_limits_io_opt(limits, chunk_size * mddev_data_stripes(rs));
}
-static void raid_presuspend(struct dm_target *ti)
-{
- struct raid_set *rs = ti->private;
-
- md_stop_writes(&rs->md);
-}
-
static void raid_postsuspend(struct dm_target *ti)
{
struct raid_set *rs = ti->private;
if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
+ /* Writes have to be stopped before suspending to avoid deadlocks. */
+ if (!test_bit(MD_RECOVERY_FROZEN, &rs->md.recovery))
+ md_stop_writes(&rs->md);
+
mddev_lock_nointr(&rs->md);
mddev_suspend(&rs->md);
mddev_unlock(&rs->md);
}
-
- rs->md.ro = 1;
}
static void attempt_restore_of_faulty_devices(struct raid_set *rs)
@@ -3807,10 +3929,33 @@ static int raid_preresume(struct dm_target *ti)
struct raid_set *rs = ti->private;
struct mddev *mddev = &rs->md;
- /* This is a resume after a suspend of the set -> it's already started */
+ /* This is a resume after a suspend of the set -> it's already started. */
if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags))
return 0;
+ if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
+ struct raid_set *rs_active = rs_find_active(rs);
+
+ if (rs_active) {
+ /*
+ * In case no rebuilds have been requested
+ * and an active table slot exists, copy
+ * current resynchonization completed and
+ * reshape position pointers across from
+ * suspended raid set in the active slot.
+ *
+ * This resumes the new mapping at current
+ * offsets to continue recover/reshape without
+ * necessarily redoing a raid set partially or
+ * causing data corruption in case of a reshape.
+ */
+ if (rs_active->md.curr_resync_completed != MaxSector)
+ mddev->curr_resync_completed = rs_active->md.curr_resync_completed;
+ if (rs_active->md.reshape_position != MaxSector)
+ mddev->reshape_position = rs_active->md.reshape_position;
+ }
+ }
+
/*
* The superblocks need to be updated on disk if the
* array is new or new devices got added (thus zeroed
@@ -3842,11 +3987,10 @@ static int raid_preresume(struct dm_target *ti)
mddev->resync_min = mddev->recovery_cp;
}
- rs_set_capacity(rs);
-
/* Check for any reshape request unless new raid set */
- if (test_and_clear_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
+ if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
/* Initiate a reshape. */
+ rs_set_rdev_sectors(rs);
mddev_lock_nointr(mddev);
r = rs_start_reshape(rs);
mddev_unlock(mddev);
@@ -3872,21 +4016,15 @@ static void raid_resume(struct dm_target *ti)
attempt_restore_of_faulty_devices(rs);
}
- mddev->ro = 0;
- mddev->in_sync = 0;
-
- /*
- * Keep the RAID set frozen if reshape/rebuild flags are set.
- * The RAID set is unfrozen once the next table load/resume,
- * which clears the reshape/rebuild flags, occurs.
- * This ensures that the constructor for the inactive table
- * retrieves an up-to-date reshape_position.
- */
- if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS))
- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
-
if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
+ /* Only reduce raid set size before running a disk removing reshape. */
+ if (mddev->delta_disks < 0)
+ rs_set_capacity(rs);
+
mddev_lock_nointr(mddev);
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ mddev->ro = 0;
+ mddev->in_sync = 0;
mddev_resume(mddev);
mddev_unlock(mddev);
}
@@ -3894,7 +4032,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 13, 0},
+ .version = {1, 13, 2},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
@@ -3903,7 +4041,6 @@ static struct target_type raid_target = {
.message = raid_message,
.iterate_devices = raid_iterate_devices,
.io_hints = raid_io_hints,
- .presuspend = raid_presuspend,
.postsuspend = raid_postsuspend,
.preresume = raid_preresume,
.resume = raid_resume,
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 9d32f25489c2..aeaaaef43eff 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -315,6 +315,10 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
/* The target wants to requeue the I/O */
dm_requeue_original_request(tio, false);
break;
+ case DM_ENDIO_DELAY_REQUEUE:
+ /* The target wants to requeue the I/O after a delay */
+ dm_requeue_original_request(tio, true);
+ break;
default:
DMWARN("unimplemented target endio return value: %d", r);
BUG();
@@ -395,7 +399,7 @@ static void end_clone_request(struct request *clone, blk_status_t error)
dm_complete_request(tio->orig, error);
}
-static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
+static blk_status_t dm_dispatch_clone_request(struct request *clone, struct request *rq)
{
blk_status_t r;
@@ -404,9 +408,10 @@ static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
clone->start_time = jiffies;
r = blk_insert_cloned_request(clone->q, clone);
- if (r)
+ if (r != BLK_STS_OK && r != BLK_STS_RESOURCE)
/* must complete clone in terms of original request */
dm_complete_request(rq, r);
+ return r;
}
static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
@@ -476,8 +481,10 @@ static int map_request(struct dm_rq_target_io *tio)
struct mapped_device *md = tio->md;
struct request *rq = tio->orig;
struct request *clone = NULL;
+ blk_status_t ret;
r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
+check_again:
switch (r) {
case DM_MAPIO_SUBMITTED:
/* The target has taken the I/O to submit by itself later */
@@ -492,7 +499,17 @@ static int map_request(struct dm_rq_target_io *tio)
/* The target has remapped the I/O so dispatch it */
trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
blk_rq_pos(rq));
- dm_dispatch_clone_request(clone, rq);
+ ret = dm_dispatch_clone_request(clone, rq);
+ if (ret == BLK_STS_RESOURCE) {
+ blk_rq_unprep_clone(clone);
+ tio->ti->type->release_clone_rq(clone);
+ tio->clone = NULL;
+ if (!rq->q->mq_ops)
+ r = DM_MAPIO_DELAY_REQUEUE;
+ else
+ r = DM_MAPIO_REQUEUE;
+ goto check_again;
+ }
break;
case DM_MAPIO_REQUEUE:
/* The target wants to requeue the I/O */
@@ -700,7 +717,6 @@ int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
/* disable dm_old_request_fn's merge heuristic by default */
md->seq_rq_merge_deadline_usecs = 0;
- dm_init_normal_md_queue(md);
blk_queue_softirq_done(md->queue, dm_softirq_done);
/* Initialize the request-based DM worker thread */
@@ -713,8 +729,6 @@ int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
return error;
}
- elv_register_queue(md->queue);
-
return 0;
}
@@ -810,17 +824,9 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
err = PTR_ERR(q);
goto out_tag_set;
}
- dm_init_md_queue(md);
-
- /* backfill 'mq' sysfs registration normally done in blk_register_queue */
- err = blk_mq_register_dev(disk_to_dev(md->disk), q);
- if (err)
- goto out_cleanup_queue;
return 0;
-out_cleanup_queue:
- blk_cleanup_queue(q);
out_tag_set:
blk_mq_free_tag_set(md->tag_set);
out_kfree_tag_set:
diff --git a/drivers/md/dm-service-time.c b/drivers/md/dm-service-time.c
index 7b8642045c55..f006a9005593 100644
--- a/drivers/md/dm-service-time.c
+++ b/drivers/md/dm-service-time.c
@@ -282,9 +282,6 @@ static struct dm_path *st_select_path(struct path_selector *ps, size_t nr_bytes)
if (list_empty(&s->valid_paths))
goto out;
- /* Change preferred (first in list) path to evenly balance. */
- list_move_tail(s->valid_paths.next, &s->valid_paths);
-
list_for_each_entry(pi, &s->valid_paths, list)
if (!best || (st_compare_load(pi, best, nr_bytes) < 0))
best = pi;
@@ -292,6 +289,9 @@ static struct dm_path *st_select_path(struct path_selector *ps, size_t nr_bytes)
if (!best)
goto out;
+ /* Move most recently used to least preferred to evenly balance. */
+ list_move_tail(&best->list, &s->valid_paths);
+
ret = best->path;
out:
spin_unlock_irqrestore(&s->lock, flags);
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index a0613bd8ed00..216035be5661 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -47,7 +47,7 @@ struct dm_exception_table {
};
struct dm_snapshot {
- struct rw_semaphore lock;
+ struct mutex lock;
struct dm_dev *origin;
struct dm_dev *cow;
@@ -439,9 +439,9 @@ static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
continue;
- down_read(&s->lock);
+ mutex_lock(&s->lock);
active = s->active;
- up_read(&s->lock);
+ mutex_unlock(&s->lock);
if (active) {
if (snap_src)
@@ -909,7 +909,7 @@ static int remove_single_exception_chunk(struct dm_snapshot *s)
int r;
chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
- down_write(&s->lock);
+ mutex_lock(&s->lock);
/*
* Process chunks (and associated exceptions) in reverse order
@@ -924,7 +924,7 @@ static int remove_single_exception_chunk(struct dm_snapshot *s)
b = __release_queued_bios_after_merge(s);
out:
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
if (b)
flush_bios(b);
@@ -983,9 +983,9 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
if (linear_chunks < 0) {
DMERR("Read error in exception store: "
"shutting down merge");
- down_write(&s->lock);
+ mutex_lock(&s->lock);
s->merge_failed = 1;
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
}
goto shut;
}
@@ -1026,10 +1026,10 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
previous_count = read_pending_exceptions_done_count();
}
- down_write(&s->lock);
+ mutex_lock(&s->lock);
s->first_merging_chunk = old_chunk;
s->num_merging_chunks = linear_chunks;
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
/* Wait until writes to all 'linear_chunks' drain */
for (i = 0; i < linear_chunks; i++)
@@ -1071,10 +1071,10 @@ static void merge_callback(int read_err, unsigned long write_err, void *context)
return;
shut:
- down_write(&s->lock);
+ mutex_lock(&s->lock);
s->merge_failed = 1;
b = __release_queued_bios_after_merge(s);
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
error_bios(b);
merge_shutdown(s);
@@ -1173,7 +1173,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->exception_start_sequence = 0;
s->exception_complete_sequence = 0;
INIT_LIST_HEAD(&s->out_of_order_list);
- init_rwsem(&s->lock);
+ mutex_init(&s->lock);
INIT_LIST_HEAD(&s->list);
spin_lock_init(&s->pe_lock);
s->state_bits = 0;
@@ -1338,9 +1338,9 @@ static void snapshot_dtr(struct dm_target *ti)
/* Check whether exception handover must be cancelled */
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest && (s == snap_src)) {
- down_write(&snap_dest->lock);
+ mutex_lock(&snap_dest->lock);
snap_dest->valid = 0;
- up_write(&snap_dest->lock);
+ mutex_unlock(&snap_dest->lock);
DMERR("Cancelling snapshot handover.");
}
up_read(&_origins_lock);
@@ -1371,6 +1371,8 @@ static void snapshot_dtr(struct dm_target *ti)
dm_exception_store_destroy(s->store);
+ mutex_destroy(&s->lock);
+
dm_put_device(ti, s->cow);
dm_put_device(ti, s->origin);
@@ -1458,7 +1460,7 @@ static void pending_complete(void *context, int success)
if (!success) {
/* Read/write error - snapshot is unusable */
- down_write(&s->lock);
+ mutex_lock(&s->lock);
__invalidate_snapshot(s, -EIO);
error = 1;
goto out;
@@ -1466,14 +1468,14 @@ static void pending_complete(void *context, int success)
e = alloc_completed_exception(GFP_NOIO);
if (!e) {
- down_write(&s->lock);
+ mutex_lock(&s->lock);
__invalidate_snapshot(s, -ENOMEM);
error = 1;
goto out;
}
*e = pe->e;
- down_write(&s->lock);
+ mutex_lock(&s->lock);
if (!s->valid) {
free_completed_exception(e);
error = 1;
@@ -1498,7 +1500,7 @@ out:
full_bio->bi_end_io = pe->full_bio_end_io;
increment_pending_exceptions_done_count();
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
/* Submit any pending write bios */
if (error) {
@@ -1694,7 +1696,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
/* FIXME: should only take write lock if we need
* to copy an exception */
- down_write(&s->lock);
+ mutex_lock(&s->lock);
if (!s->valid || (unlikely(s->snapshot_overflowed) &&
bio_data_dir(bio) == WRITE)) {
@@ -1717,9 +1719,9 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
if (bio_data_dir(bio) == WRITE) {
pe = __lookup_pending_exception(s, chunk);
if (!pe) {
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
pe = alloc_pending_exception(s);
- down_write(&s->lock);
+ mutex_lock(&s->lock);
if (!s->valid || s->snapshot_overflowed) {
free_pending_exception(pe);
@@ -1754,7 +1756,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
bio->bi_iter.bi_size ==
(s->store->chunk_size << SECTOR_SHIFT)) {
pe->started = 1;
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
start_full_bio(pe, bio);
goto out;
}
@@ -1764,7 +1766,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
if (!pe->started) {
/* this is protected by snap->lock */
pe->started = 1;
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
start_copy(pe);
goto out;
}
@@ -1774,7 +1776,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
}
out_unlock:
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
out:
return r;
}
@@ -1810,7 +1812,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
- down_write(&s->lock);
+ mutex_lock(&s->lock);
/* Full merging snapshots are redirected to the origin */
if (!s->valid)
@@ -1841,12 +1843,12 @@ redirect_to_origin:
bio_set_dev(bio, s->origin->bdev);
if (bio_data_dir(bio) == WRITE) {
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
return do_origin(s->origin, bio);
}
out_unlock:
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
return r;
}
@@ -1878,7 +1880,7 @@ static int snapshot_preresume(struct dm_target *ti)
down_read(&_origins_lock);
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest) {
- down_read(&snap_src->lock);
+ mutex_lock(&snap_src->lock);
if (s == snap_src) {
DMERR("Unable to resume snapshot source until "
"handover completes.");
@@ -1888,7 +1890,7 @@ static int snapshot_preresume(struct dm_target *ti)
"source is suspended.");
r = -EINVAL;
}
- up_read(&snap_src->lock);
+ mutex_unlock(&snap_src->lock);
}
up_read(&_origins_lock);
@@ -1934,11 +1936,11 @@ static void snapshot_resume(struct dm_target *ti)
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest) {
- down_write(&snap_src->lock);
- down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
+ mutex_lock(&snap_src->lock);
+ mutex_lock_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
__handover_exceptions(snap_src, snap_dest);
- up_write(&snap_dest->lock);
- up_write(&snap_src->lock);
+ mutex_unlock(&snap_dest->lock);
+ mutex_unlock(&snap_src->lock);
}
up_read(&_origins_lock);
@@ -1953,9 +1955,9 @@ static void snapshot_resume(struct dm_target *ti)
/* Now we have correct chunk size, reregister */
reregister_snapshot(s);
- down_write(&s->lock);
+ mutex_lock(&s->lock);
s->active = 1;
- up_write(&s->lock);
+ mutex_unlock(&s->lock);
}
static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
@@ -1995,7 +1997,7 @@ static void snapshot_status(struct dm_target *ti, status_type_t type,
switch (type) {
case STATUSTYPE_INFO:
- down_write(&snap->lock);
+ mutex_lock(&snap->lock);
if (!snap->valid)
DMEMIT("Invalid");
@@ -2020,7 +2022,7 @@ static void snapshot_status(struct dm_target *ti, status_type_t type,
DMEMIT("Unknown");
}
- up_write(&snap->lock);
+ mutex_unlock(&snap->lock);
break;
@@ -2086,7 +2088,7 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
if (dm_target_is_snapshot_merge(snap->ti))
continue;
- down_write(&snap->lock);
+ mutex_lock(&snap->lock);
/* Only deal with valid and active snapshots */
if (!snap->valid || !snap->active)
@@ -2113,9 +2115,9 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
pe = __lookup_pending_exception(snap, chunk);
if (!pe) {
- up_write(&snap->lock);
+ mutex_unlock(&snap->lock);
pe = alloc_pending_exception(snap);
- down_write(&snap->lock);
+ mutex_lock(&snap->lock);
if (!snap->valid) {
free_pending_exception(pe);
@@ -2158,7 +2160,7 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
}
next_snapshot:
- up_write(&snap->lock);
+ mutex_unlock(&snap->lock);
if (pe_to_start_now) {
start_copy(pe_to_start_now);
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 29bc51084c82..56059fb56e2d 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -228,6 +228,7 @@ void dm_stats_cleanup(struct dm_stats *stats)
dm_stat_free(&s->rcu_head);
}
free_percpu(stats->last);
+ mutex_destroy(&stats->mutex);
}
static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index aaffd0c0ee9a..5fe7ec356c33 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -866,7 +866,8 @@ EXPORT_SYMBOL(dm_consume_args);
static bool __table_type_bio_based(enum dm_queue_mode table_type)
{
return (table_type == DM_TYPE_BIO_BASED ||
- table_type == DM_TYPE_DAX_BIO_BASED);
+ table_type == DM_TYPE_DAX_BIO_BASED ||
+ table_type == DM_TYPE_NVME_BIO_BASED);
}
static bool __table_type_request_based(enum dm_queue_mode table_type)
@@ -909,13 +910,33 @@ static bool dm_table_supports_dax(struct dm_table *t)
return true;
}
+static bool dm_table_does_not_support_partial_completion(struct dm_table *t);
+
+struct verify_rq_based_data {
+ unsigned sq_count;
+ unsigned mq_count;
+};
+
+static int device_is_rq_based(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+ struct verify_rq_based_data *v = data;
+
+ if (q->mq_ops)
+ v->mq_count++;
+ else
+ v->sq_count++;
+
+ return queue_is_rq_based(q);
+}
+
static int dm_table_determine_type(struct dm_table *t)
{
unsigned i;
unsigned bio_based = 0, request_based = 0, hybrid = 0;
- unsigned sq_count = 0, mq_count = 0;
+ struct verify_rq_based_data v = {.sq_count = 0, .mq_count = 0};
struct dm_target *tgt;
- struct dm_dev_internal *dd;
struct list_head *devices = dm_table_get_devices(t);
enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
@@ -923,6 +944,14 @@ static int dm_table_determine_type(struct dm_table *t)
/* target already set the table's type */
if (t->type == DM_TYPE_BIO_BASED)
return 0;
+ else if (t->type == DM_TYPE_NVME_BIO_BASED) {
+ if (!dm_table_does_not_support_partial_completion(t)) {
+ DMERR("nvme bio-based is only possible with devices"
+ " that don't support partial completion");
+ return -EINVAL;
+ }
+ /* Fallthru, also verify all devices are blk-mq */
+ }
BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
goto verify_rq_based;
}
@@ -937,8 +966,8 @@ static int dm_table_determine_type(struct dm_table *t)
bio_based = 1;
if (bio_based && request_based) {
- DMWARN("Inconsistent table: different target types"
- " can't be mixed up");
+ DMERR("Inconsistent table: different target types"
+ " can't be mixed up");
return -EINVAL;
}
}
@@ -959,8 +988,18 @@ static int dm_table_determine_type(struct dm_table *t)
/* We must use this table as bio-based */
t->type = DM_TYPE_BIO_BASED;
if (dm_table_supports_dax(t) ||
- (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED))
+ (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
t->type = DM_TYPE_DAX_BIO_BASED;
+ } else {
+ /* Check if upgrading to NVMe bio-based is valid or required */
+ tgt = dm_table_get_immutable_target(t);
+ if (tgt && !tgt->max_io_len && dm_table_does_not_support_partial_completion(t)) {
+ t->type = DM_TYPE_NVME_BIO_BASED;
+ goto verify_rq_based; /* must be stacked directly on NVMe (blk-mq) */
+ } else if (list_empty(devices) && live_md_type == DM_TYPE_NVME_BIO_BASED) {
+ t->type = DM_TYPE_NVME_BIO_BASED;
+ }
+ }
return 0;
}
@@ -980,7 +1019,8 @@ verify_rq_based:
* (e.g. request completion process for partial completion.)
*/
if (t->num_targets > 1) {
- DMWARN("Request-based dm doesn't support multiple targets yet");
+ DMERR("%s DM doesn't support multiple targets",
+ t->type == DM_TYPE_NVME_BIO_BASED ? "nvme bio-based" : "request-based");
return -EINVAL;
}
@@ -997,28 +1037,29 @@ verify_rq_based:
return 0;
}
- /* Non-request-stackable devices can't be used for request-based dm */
- list_for_each_entry(dd, devices, list) {
- struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
-
- if (!queue_is_rq_based(q)) {
- DMERR("table load rejected: including"
- " non-request-stackable devices");
- return -EINVAL;
- }
+ tgt = dm_table_get_immutable_target(t);
+ if (!tgt) {
+ DMERR("table load rejected: immutable target is required");
+ return -EINVAL;
+ } else if (tgt->max_io_len) {
+ DMERR("table load rejected: immutable target that splits IO is not supported");
+ return -EINVAL;
+ }
- if (q->mq_ops)
- mq_count++;
- else
- sq_count++;
+ /* Non-request-stackable devices can't be used for request-based dm */
+ if (!tgt->type->iterate_devices ||
+ !tgt->type->iterate_devices(tgt, device_is_rq_based, &v)) {
+ DMERR("table load rejected: including non-request-stackable devices");
+ return -EINVAL;
}
- if (sq_count && mq_count) {
+ if (v.sq_count && v.mq_count) {
DMERR("table load rejected: not all devices are blk-mq request-stackable");
return -EINVAL;
}
- t->all_blk_mq = mq_count > 0;
+ t->all_blk_mq = v.mq_count > 0;
- if (t->type == DM_TYPE_MQ_REQUEST_BASED && !t->all_blk_mq) {
+ if (!t->all_blk_mq &&
+ (t->type == DM_TYPE_MQ_REQUEST_BASED || t->type == DM_TYPE_NVME_BIO_BASED)) {
DMERR("table load rejected: all devices are not blk-mq request-stackable");
return -EINVAL;
}
@@ -1079,7 +1120,8 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
{
enum dm_queue_mode type = dm_table_get_type(t);
unsigned per_io_data_size = 0;
- struct dm_target *tgt;
+ unsigned min_pool_size = 0;
+ struct dm_target *ti;
unsigned i;
if (unlikely(type == DM_TYPE_NONE)) {
@@ -1089,11 +1131,13 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
if (__table_type_bio_based(type))
for (i = 0; i < t->num_targets; i++) {
- tgt = t->targets + i;
- per_io_data_size = max(per_io_data_size, tgt->per_io_data_size);
+ ti = t->targets + i;
+ per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
+ min_pool_size = max(min_pool_size, ti->num_flush_bios);
}
- t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_io_data_size);
+ t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported,
+ per_io_data_size, min_pool_size);
if (!t->mempools)
return -ENOMEM;
@@ -1705,6 +1749,20 @@ static bool dm_table_all_devices_attribute(struct dm_table *t,
return true;
}
+static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ char b[BDEVNAME_SIZE];
+
+ /* For now, NVMe devices are the only devices of this class */
+ return (strncmp(bdevname(dev->bdev, b), "nvme", 3) == 0);
+}
+
+static bool dm_table_does_not_support_partial_completion(struct dm_table *t)
+{
+ return dm_table_all_devices_attribute(t, device_no_partial_completion);
+}
+
static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
@@ -1820,6 +1878,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
}
blk_queue_write_cache(q, wc, fua);
+ if (dm_table_supports_dax(t))
+ queue_flag_set_unlocked(QUEUE_FLAG_DAX, q);
if (dm_table_supports_dax_write_cache(t))
dax_write_cache(t->md->dax_dev, true);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index f91d771fff4b..629c555890c1 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -492,6 +492,11 @@ static void pool_table_init(void)
INIT_LIST_HEAD(&dm_thin_pool_table.pools);
}
+static void pool_table_exit(void)
+{
+ mutex_destroy(&dm_thin_pool_table.mutex);
+}
+
static void __pool_table_insert(struct pool *pool)
{
BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
@@ -1717,7 +1722,7 @@ static void __remap_and_issue_shared_cell(void *context,
bio_op(bio) == REQ_OP_DISCARD)
bio_list_add(&info->defer_bios, bio);
else {
- struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));;
+ struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds);
inc_all_io_entry(info->tc->pool, bio);
@@ -4387,6 +4392,8 @@ static void dm_thin_exit(void)
dm_unregister_target(&pool_target);
kmem_cache_destroy(_new_mapping_cache);
+
+ pool_table_exit();
}
module_init(dm_thin_init);
diff --git a/drivers/md/dm-unstripe.c b/drivers/md/dm-unstripe.c
new file mode 100644
index 000000000000..65f838fa2e99
--- /dev/null
+++ b/drivers/md/dm-unstripe.c
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2017 Intel Corporation.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm.h"
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/device-mapper.h>
+
+struct unstripe_c {
+ struct dm_dev *dev;
+ sector_t physical_start;
+
+ uint32_t stripes;
+
+ uint32_t unstripe;
+ sector_t unstripe_width;
+ sector_t unstripe_offset;
+
+ uint32_t chunk_size;
+ u8 chunk_shift;
+};
+
+#define DM_MSG_PREFIX "unstriped"
+
+static void cleanup_unstripe(struct unstripe_c *uc, struct dm_target *ti)
+{
+ if (uc->dev)
+ dm_put_device(ti, uc->dev);
+ kfree(uc);
+}
+
+/*
+ * Contruct an unstriped mapping.
+ * <number of stripes> <chunk size> <stripe #> <dev_path> <offset>
+ */
+static int unstripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ struct unstripe_c *uc;
+ sector_t tmp_len;
+ unsigned long long start;
+ char dummy;
+
+ if (argc != 5) {
+ ti->error = "Invalid number of arguments";
+ return -EINVAL;
+ }
+
+ uc = kzalloc(sizeof(*uc), GFP_KERNEL);
+ if (!uc) {
+ ti->error = "Memory allocation for unstriped context failed";
+ return -ENOMEM;
+ }
+
+ if (kstrtouint(argv[0], 10, &uc->stripes) || !uc->stripes) {
+ ti->error = "Invalid stripe count";
+ goto err;
+ }
+
+ if (kstrtouint(argv[1], 10, &uc->chunk_size) || !uc->chunk_size) {
+ ti->error = "Invalid chunk_size";
+ goto err;
+ }
+
+ // FIXME: must support non power of 2 chunk_size, dm-stripe.c does
+ if (!is_power_of_2(uc->chunk_size)) {
+ ti->error = "Non power of 2 chunk_size is not supported yet";
+ goto err;
+ }
+
+ if (kstrtouint(argv[2], 10, &uc->unstripe)) {
+ ti->error = "Invalid stripe number";
+ goto err;
+ }
+
+ if (uc->unstripe > uc->stripes && uc->stripes > 1) {
+ ti->error = "Please provide stripe between [0, # of stripes]";
+ goto err;
+ }
+
+ if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &uc->dev)) {
+ ti->error = "Couldn't get striped device";
+ goto err;
+ }
+
+ if (sscanf(argv[4], "%llu%c", &start, &dummy) != 1) {
+ ti->error = "Invalid striped device offset";
+ goto err;
+ }
+ uc->physical_start = start;
+
+ uc->unstripe_offset = uc->unstripe * uc->chunk_size;
+ uc->unstripe_width = (uc->stripes - 1) * uc->chunk_size;
+ uc->chunk_shift = fls(uc->chunk_size) - 1;
+
+ tmp_len = ti->len;
+ if (sector_div(tmp_len, uc->chunk_size)) {
+ ti->error = "Target length not divisible by chunk size";
+ goto err;
+ }
+
+ if (dm_set_target_max_io_len(ti, uc->chunk_size)) {
+ ti->error = "Failed to set max io len";
+ goto err;
+ }
+
+ ti->private = uc;
+ return 0;
+err:
+ cleanup_unstripe(uc, ti);
+ return -EINVAL;
+}
+
+static void unstripe_dtr(struct dm_target *ti)
+{
+ struct unstripe_c *uc = ti->private;
+
+ cleanup_unstripe(uc, ti);
+}
+
+static sector_t map_to_core(struct dm_target *ti, struct bio *bio)
+{
+ struct unstripe_c *uc = ti->private;
+ sector_t sector = bio->bi_iter.bi_sector;
+
+ /* Shift us up to the right "row" on the stripe */
+ sector += uc->unstripe_width * (sector >> uc->chunk_shift);
+
+ /* Account for what stripe we're operating on */
+ sector += uc->unstripe_offset;
+
+ return sector;
+}
+
+static int unstripe_map(struct dm_target *ti, struct bio *bio)
+{
+ struct unstripe_c *uc = ti->private;
+
+ bio_set_dev(bio, uc->dev->bdev);
+ bio->bi_iter.bi_sector = map_to_core(ti, bio) + uc->physical_start;
+
+ return DM_MAPIO_REMAPPED;
+}
+
+static void unstripe_status(struct dm_target *ti, status_type_t type,
+ unsigned int status_flags, char *result, unsigned int maxlen)
+{
+ struct unstripe_c *uc = ti->private;
+ unsigned int sz = 0;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ break;
+
+ case STATUSTYPE_TABLE:
+ DMEMIT("%d %llu %d %s %llu",
+ uc->stripes, (unsigned long long)uc->chunk_size, uc->unstripe,
+ uc->dev->name, (unsigned long long)uc->physical_start);
+ break;
+ }
+}
+
+static int unstripe_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct unstripe_c *uc = ti->private;
+
+ return fn(ti, uc->dev, uc->physical_start, ti->len, data);
+}
+
+static void unstripe_io_hints(struct dm_target *ti,
+ struct queue_limits *limits)
+{
+ struct unstripe_c *uc = ti->private;
+
+ limits->chunk_sectors = uc->chunk_size;
+}
+
+static struct target_type unstripe_target = {
+ .name = "unstriped",
+ .version = {1, 0, 0},
+ .module = THIS_MODULE,
+ .ctr = unstripe_ctr,
+ .dtr = unstripe_dtr,
+ .map = unstripe_map,
+ .status = unstripe_status,
+ .iterate_devices = unstripe_iterate_devices,
+ .io_hints = unstripe_io_hints,
+};
+
+static int __init dm_unstripe_init(void)
+{
+ int r;
+
+ r = dm_register_target(&unstripe_target);
+ if (r < 0)
+ DMERR("target registration failed");
+
+ return r;
+}
+
+static void __exit dm_unstripe_exit(void)
+{
+ dm_unregister_target(&unstripe_target);
+}
+
+module_init(dm_unstripe_init);
+module_exit(dm_unstripe_exit);
+
+MODULE_DESCRIPTION(DM_NAME " unstriped target");
+MODULE_AUTHOR("Scott Bauer <scott.bauer@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 70485de37b66..969954915566 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -2333,6 +2333,9 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
/* Free the zone descriptors */
dmz_drop_zones(zmd);
+
+ mutex_destroy(&zmd->mblk_flush_lock);
+ mutex_destroy(&zmd->map_lock);
}
/*
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 6d7bda6f8190..caff02caf083 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -827,6 +827,7 @@ err_fwq:
err_cwq:
destroy_workqueue(dmz->chunk_wq);
err_bio:
+ mutex_destroy(&dmz->chunk_lock);
bioset_free(dmz->bio_set);
err_meta:
dmz_dtr_metadata(dmz->metadata);
@@ -861,6 +862,8 @@ static void dmz_dtr(struct dm_target *ti)
dmz_put_zoned_device(ti);
+ mutex_destroy(&dmz->chunk_lock);
+
kfree(dmz);
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index de17b7193299..d6de00f367ef 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -60,18 +60,73 @@ void dm_issue_global_event(void)
}
/*
- * One of these is allocated per bio.
+ * One of these is allocated (on-stack) per original bio.
*/
+struct clone_info {
+ struct dm_table *map;
+ struct bio *bio;
+ struct dm_io *io;
+ sector_t sector;
+ unsigned sector_count;
+};
+
+/*
+ * One of these is allocated per clone bio.
+ */
+#define DM_TIO_MAGIC 7282014
+struct dm_target_io {
+ unsigned magic;
+ struct dm_io *io;
+ struct dm_target *ti;
+ unsigned target_bio_nr;
+ unsigned *len_ptr;
+ bool inside_dm_io;
+ struct bio clone;
+};
+
+/*
+ * One of these is allocated per original bio.
+ * It contains the first clone used for that original.
+ */
+#define DM_IO_MAGIC 5191977
struct dm_io {
+ unsigned magic;
struct mapped_device *md;
blk_status_t status;
atomic_t io_count;
- struct bio *bio;
+ struct bio *orig_bio;
unsigned long start_time;
spinlock_t endio_lock;
struct dm_stats_aux stats_aux;
+ /* last member of dm_target_io is 'struct bio' */
+ struct dm_target_io tio;
};
+void *dm_per_bio_data(struct bio *bio, size_t data_size)
+{
+ struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
+ if (!tio->inside_dm_io)
+ return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
+ return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size;
+}
+EXPORT_SYMBOL_GPL(dm_per_bio_data);
+
+struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
+{
+ struct dm_io *io = (struct dm_io *)((char *)data + data_size);
+ if (io->magic == DM_IO_MAGIC)
+ return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone));
+ BUG_ON(io->magic != DM_TIO_MAGIC);
+ return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone));
+}
+EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);
+
+unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
+{
+ return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
+}
+EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
+
#define MINOR_ALLOCED ((void *)-1)
/*
@@ -93,8 +148,8 @@ static int dm_numa_node = DM_NUMA_NODE;
* For mempools pre-allocation at the table loading time.
*/
struct dm_md_mempools {
- mempool_t *io_pool;
struct bio_set *bs;
+ struct bio_set *io_bs;
};
struct table_device {
@@ -103,7 +158,6 @@ struct table_device {
struct dm_dev dm_dev;
};
-static struct kmem_cache *_io_cache;
static struct kmem_cache *_rq_tio_cache;
static struct kmem_cache *_rq_cache;
@@ -170,14 +224,9 @@ static int __init local_init(void)
{
int r = -ENOMEM;
- /* allocate a slab for the dm_ios */
- _io_cache = KMEM_CACHE(dm_io, 0);
- if (!_io_cache)
- return r;
-
_rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
if (!_rq_tio_cache)
- goto out_free_io_cache;
+ return r;
_rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request),
__alignof__(struct request), 0, NULL);
@@ -212,8 +261,6 @@ out_free_rq_cache:
kmem_cache_destroy(_rq_cache);
out_free_rq_tio_cache:
kmem_cache_destroy(_rq_tio_cache);
-out_free_io_cache:
- kmem_cache_destroy(_io_cache);
return r;
}
@@ -225,7 +272,6 @@ static void local_exit(void)
kmem_cache_destroy(_rq_cache);
kmem_cache_destroy(_rq_tio_cache);
- kmem_cache_destroy(_io_cache);
unregister_blkdev(_major, _name);
dm_uevent_exit();
@@ -486,18 +532,69 @@ out:
return r;
}
-static struct dm_io *alloc_io(struct mapped_device *md)
+static void start_io_acct(struct dm_io *io);
+
+static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
{
- return mempool_alloc(md->io_pool, GFP_NOIO);
+ struct dm_io *io;
+ struct dm_target_io *tio;
+ struct bio *clone;
+
+ clone = bio_alloc_bioset(GFP_NOIO, 0, md->io_bs);
+ if (!clone)
+ return NULL;
+
+ tio = container_of(clone, struct dm_target_io, clone);
+ tio->inside_dm_io = true;
+ tio->io = NULL;
+
+ io = container_of(tio, struct dm_io, tio);
+ io->magic = DM_IO_MAGIC;
+ io->status = 0;
+ atomic_set(&io->io_count, 1);
+ io->orig_bio = bio;
+ io->md = md;
+ spin_lock_init(&io->endio_lock);
+
+ start_io_acct(io);
+
+ return io;
}
static void free_io(struct mapped_device *md, struct dm_io *io)
{
- mempool_free(io, md->io_pool);
+ bio_put(&io->tio.clone);
+}
+
+static struct dm_target_io *alloc_tio(struct clone_info *ci, struct dm_target *ti,
+ unsigned target_bio_nr, gfp_t gfp_mask)
+{
+ struct dm_target_io *tio;
+
+ if (!ci->io->tio.io) {
+ /* the dm_target_io embedded in ci->io is available */
+ tio = &ci->io->tio;
+ } else {
+ struct bio *clone = bio_alloc_bioset(gfp_mask, 0, ci->io->md->bs);
+ if (!clone)
+ return NULL;
+
+ tio = container_of(clone, struct dm_target_io, clone);
+ tio->inside_dm_io = false;
+ }
+
+ tio->magic = DM_TIO_MAGIC;
+ tio->io = ci->io;
+ tio->ti = ti;
+ tio->target_bio_nr = target_bio_nr;
+
+ return tio;
}
static void free_tio(struct dm_target_io *tio)
{
+ if (tio->inside_dm_io)
+ return;
bio_put(&tio->clone);
}
@@ -510,17 +607,15 @@ int md_in_flight(struct mapped_device *md)
static void start_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
- struct bio *bio = io->bio;
- int cpu;
+ struct bio *bio = io->orig_bio;
int rw = bio_data_dir(bio);
io->start_time = jiffies;
- cpu = part_stat_lock();
- part_round_stats(md->queue, cpu, &dm_disk(md)->part0);
- part_stat_unlock();
+ generic_start_io_acct(md->queue, rw, bio_sectors(bio), &dm_disk(md)->part0);
+
atomic_set(&dm_disk(md)->part0.in_flight[rw],
- atomic_inc_return(&md->pending[rw]));
+ atomic_inc_return(&md->pending[rw]));
if (unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio_data_dir(bio),
@@ -531,7 +626,7 @@ static void start_io_acct(struct dm_io *io)
static void end_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
- struct bio *bio = io->bio;
+ struct bio *bio = io->orig_bio;
unsigned long duration = jiffies - io->start_time;
int pending;
int rw = bio_data_dir(bio);
@@ -752,15 +847,6 @@ int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
return 0;
}
-/*-----------------------------------------------------------------
- * CRUD START:
- * A more elegant soln is in the works that uses the queue
- * merge fn, unfortunately there are a couple of changes to
- * the block layer that I want to make for this. So in the
- * interests of getting something for people to use I give
- * you this clearly demarcated crap.
- *---------------------------------------------------------------*/
-
static int __noflush_suspending(struct mapped_device *md)
{
return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
@@ -780,8 +866,7 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
/* Push-back supersedes any I/O errors */
if (unlikely(error)) {
spin_lock_irqsave(&io->endio_lock, flags);
- if (!(io->status == BLK_STS_DM_REQUEUE &&
- __noflush_suspending(md)))
+ if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md)))
io->status = error;
spin_unlock_irqrestore(&io->endio_lock, flags);
}
@@ -793,7 +878,8 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
*/
spin_lock_irqsave(&md->deferred_lock, flags);
if (__noflush_suspending(md))
- bio_list_add_head(&md->deferred, io->bio);
+ /* NOTE early return due to BLK_STS_DM_REQUEUE below */
+ bio_list_add_head(&md->deferred, io->orig_bio);
else
/* noflush suspend was interrupted. */
io->status = BLK_STS_IOERR;
@@ -801,7 +887,7 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
}
io_error = io->status;
- bio = io->bio;
+ bio = io->orig_bio;
end_io_acct(io);
free_io(md, io);
@@ -847,7 +933,7 @@ static void clone_endio(struct bio *bio)
struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io;
- if (unlikely(error == BLK_STS_TARGET)) {
+ if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
if (bio_op(bio) == REQ_OP_WRITE_SAME &&
!bio->bi_disk->queue->limits.max_write_same_sectors)
disable_write_same(md);
@@ -920,7 +1006,15 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
return -EINVAL;
}
- ti->max_io_len = (uint32_t) len;
+ /*
+ * BIO based queue uses its own splitting. When multipage bvecs
+ * is switched on, size of the incoming bio may be too big to
+ * be handled in some targets, such as crypt.
+ *
+ * When these targets are ready for the big bio, we can remove
+ * the limit.
+ */
+ ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE);
return 0;
}
@@ -997,7 +1091,7 @@ static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
/*
* A target may call dm_accept_partial_bio only from the map routine. It is
- * allowed for all bio types except REQ_PREFLUSH.
+ * allowed for all bio types except REQ_PREFLUSH and REQ_OP_ZONE_RESET.
*
* dm_accept_partial_bio informs the dm that the target only wants to process
* additional n_sectors sectors of the bio and the rest of the data should be
@@ -1047,7 +1141,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
{
#ifdef CONFIG_BLK_DEV_ZONED
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
- struct bio *report_bio = tio->io->bio;
+ struct bio *report_bio = tio->io->orig_bio;
struct blk_zone_report_hdr *hdr = NULL;
struct blk_zone *zone;
unsigned int nr_rep = 0;
@@ -1114,67 +1208,15 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
}
EXPORT_SYMBOL_GPL(dm_remap_zone_report);
-/*
- * Flush current->bio_list when the target map method blocks.
- * This fixes deadlocks in snapshot and possibly in other targets.
- */
-struct dm_offload {
- struct blk_plug plug;
- struct blk_plug_cb cb;
-};
-
-static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule)
-{
- struct dm_offload *o = container_of(cb, struct dm_offload, cb);
- struct bio_list list;
- struct bio *bio;
- int i;
-
- INIT_LIST_HEAD(&o->cb.list);
-
- if (unlikely(!current->bio_list))
- return;
-
- for (i = 0; i < 2; i++) {
- list = current->bio_list[i];
- bio_list_init(&current->bio_list[i]);
-
- while ((bio = bio_list_pop(&list))) {
- struct bio_set *bs = bio->bi_pool;
- if (unlikely(!bs) || bs == fs_bio_set ||
- !bs->rescue_workqueue) {
- bio_list_add(&current->bio_list[i], bio);
- continue;
- }
-
- spin_lock(&bs->rescue_lock);
- bio_list_add(&bs->rescue_list, bio);
- queue_work(bs->rescue_workqueue, &bs->rescue_work);
- spin_unlock(&bs->rescue_lock);
- }
- }
-}
-
-static void dm_offload_start(struct dm_offload *o)
-{
- blk_start_plug(&o->plug);
- o->cb.callback = flush_current_bio_list;
- list_add(&o->cb.list, &current->plug->cb_list);
-}
-
-static void dm_offload_end(struct dm_offload *o)
-{
- list_del(&o->cb.list);
- blk_finish_plug(&o->plug);
-}
-
-static void __map_bio(struct dm_target_io *tio)
+static blk_qc_t __map_bio(struct dm_target_io *tio)
{
int r;
sector_t sector;
- struct dm_offload o;
struct bio *clone = &tio->clone;
+ struct dm_io *io = tio->io;
+ struct mapped_device *md = io->md;
struct dm_target *ti = tio->ti;
+ blk_qc_t ret = BLK_QC_T_NONE;
clone->bi_end_io = clone_endio;
@@ -1183,44 +1225,37 @@ static void __map_bio(struct dm_target_io *tio)
* anything, the target has assumed ownership of
* this io.
*/
- atomic_inc(&tio->io->io_count);
+ atomic_inc(&io->io_count);
sector = clone->bi_iter.bi_sector;
- dm_offload_start(&o);
r = ti->type->map(ti, clone);
- dm_offload_end(&o);
-
switch (r) {
case DM_MAPIO_SUBMITTED:
break;
case DM_MAPIO_REMAPPED:
/* the bio has been remapped so dispatch it */
trace_block_bio_remap(clone->bi_disk->queue, clone,
- bio_dev(tio->io->bio), sector);
- generic_make_request(clone);
+ bio_dev(io->orig_bio), sector);
+ if (md->type == DM_TYPE_NVME_BIO_BASED)
+ ret = direct_make_request(clone);
+ else
+ ret = generic_make_request(clone);
break;
case DM_MAPIO_KILL:
- dec_pending(tio->io, BLK_STS_IOERR);
free_tio(tio);
+ dec_pending(io, BLK_STS_IOERR);
break;
case DM_MAPIO_REQUEUE:
- dec_pending(tio->io, BLK_STS_DM_REQUEUE);
free_tio(tio);
+ dec_pending(io, BLK_STS_DM_REQUEUE);
break;
default:
DMWARN("unimplemented target map return value: %d", r);
BUG();
}
-}
-struct clone_info {
- struct mapped_device *md;
- struct dm_table *map;
- struct bio *bio;
- struct dm_io *io;
- sector_t sector;
- unsigned sector_count;
-};
+ return ret;
+}
static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
{
@@ -1264,28 +1299,49 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
return 0;
}
-static struct dm_target_io *alloc_tio(struct clone_info *ci,
- struct dm_target *ti,
- unsigned target_bio_nr)
+static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
+ struct dm_target *ti, unsigned num_bios)
{
struct dm_target_io *tio;
- struct bio *clone;
+ int try;
- clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
- tio = container_of(clone, struct dm_target_io, clone);
+ if (!num_bios)
+ return;
- tio->io = ci->io;
- tio->ti = ti;
- tio->target_bio_nr = target_bio_nr;
+ if (num_bios == 1) {
+ tio = alloc_tio(ci, ti, 0, GFP_NOIO);
+ bio_list_add(blist, &tio->clone);
+ return;
+ }
- return tio;
+ for (try = 0; try < 2; try++) {
+ int bio_nr;
+ struct bio *bio;
+
+ if (try)
+ mutex_lock(&ci->io->md->table_devices_lock);
+ for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
+ tio = alloc_tio(ci, ti, bio_nr, try ? GFP_NOIO : GFP_NOWAIT);
+ if (!tio)
+ break;
+
+ bio_list_add(blist, &tio->clone);
+ }
+ if (try)
+ mutex_unlock(&ci->io->md->table_devices_lock);
+ if (bio_nr == num_bios)
+ return;
+
+ while ((bio = bio_list_pop(blist))) {
+ tio = container_of(bio, struct dm_target_io, clone);
+ free_tio(tio);
+ }
+ }
}
-static void __clone_and_map_simple_bio(struct clone_info *ci,
- struct dm_target *ti,
- unsigned target_bio_nr, unsigned *len)
+static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci,
+ struct dm_target_io *tio, unsigned *len)
{
- struct dm_target_io *tio = alloc_tio(ci, ti, target_bio_nr);
struct bio *clone = &tio->clone;
tio->len_ptr = len;
@@ -1294,16 +1350,22 @@ static void __clone_and_map_simple_bio(struct clone_info *ci,
if (len)
bio_setup_sector(clone, ci->sector, *len);
- __map_bio(tio);
+ return __map_bio(tio);
}
static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
unsigned num_bios, unsigned *len)
{
- unsigned target_bio_nr;
+ struct bio_list blist = BIO_EMPTY_LIST;
+ struct bio *bio;
+ struct dm_target_io *tio;
+
+ alloc_multiple_bios(&blist, ci, ti, num_bios);
- for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++)
- __clone_and_map_simple_bio(ci, ti, target_bio_nr, len);
+ while ((bio = bio_list_pop(&blist))) {
+ tio = container_of(bio, struct dm_target_io, clone);
+ (void) __clone_and_map_simple_bio(ci, tio, len);
+ }
}
static int __send_empty_flush(struct clone_info *ci)
@@ -1319,32 +1381,22 @@ static int __send_empty_flush(struct clone_info *ci)
}
static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
- sector_t sector, unsigned *len)
+ sector_t sector, unsigned *len)
{
struct bio *bio = ci->bio;
struct dm_target_io *tio;
- unsigned target_bio_nr;
- unsigned num_target_bios = 1;
- int r = 0;
+ int r;
- /*
- * Does the target want to receive duplicate copies of the bio?
- */
- if (bio_data_dir(bio) == WRITE && ti->num_write_bios)
- num_target_bios = ti->num_write_bios(ti, bio);
-
- for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
- tio = alloc_tio(ci, ti, target_bio_nr);
- tio->len_ptr = len;
- r = clone_bio(tio, bio, sector, *len);
- if (r < 0) {
- free_tio(tio);
- break;
- }
- __map_bio(tio);
+ tio = alloc_tio(ci, ti, 0, GFP_NOIO);
+ tio->len_ptr = len;
+ r = clone_bio(tio, bio, sector, *len);
+ if (r < 0) {
+ free_tio(tio);
+ return r;
}
+ (void) __map_bio(tio);
- return r;
+ return 0;
}
typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
@@ -1371,56 +1423,50 @@ static bool is_split_required_for_discard(struct dm_target *ti)
return ti->split_discard_bios;
}
-static int __send_changing_extent_only(struct clone_info *ci,
+static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
get_num_bios_fn get_num_bios,
is_split_required_fn is_split_required)
{
- struct dm_target *ti;
unsigned len;
unsigned num_bios;
- do {
- ti = dm_table_find_target(ci->map, ci->sector);
- if (!dm_target_is_valid(ti))
- return -EIO;
-
- /*
- * Even though the device advertised support for this type of
- * request, that does not mean every target supports it, and
- * reconfiguration might also have changed that since the
- * check was performed.
- */
- num_bios = get_num_bios ? get_num_bios(ti) : 0;
- if (!num_bios)
- return -EOPNOTSUPP;
+ /*
+ * Even though the device advertised support for this type of
+ * request, that does not mean every target supports it, and
+ * reconfiguration might also have changed that since the
+ * check was performed.
+ */
+ num_bios = get_num_bios ? get_num_bios(ti) : 0;
+ if (!num_bios)
+ return -EOPNOTSUPP;
- if (is_split_required && !is_split_required(ti))
- len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
- else
- len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti));
+ if (is_split_required && !is_split_required(ti))
+ len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
+ else
+ len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti));
- __send_duplicate_bios(ci, ti, num_bios, &len);
+ __send_duplicate_bios(ci, ti, num_bios, &len);
- ci->sector += len;
- } while (ci->sector_count -= len);
+ ci->sector += len;
+ ci->sector_count -= len;
return 0;
}
-static int __send_discard(struct clone_info *ci)
+static int __send_discard(struct clone_info *ci, struct dm_target *ti)
{
- return __send_changing_extent_only(ci, get_num_discard_bios,
+ return __send_changing_extent_only(ci, ti, get_num_discard_bios,
is_split_required_for_discard);
}
-static int __send_write_same(struct clone_info *ci)
+static int __send_write_same(struct clone_info *ci, struct dm_target *ti)
{
- return __send_changing_extent_only(ci, get_num_write_same_bios, NULL);
+ return __send_changing_extent_only(ci, ti, get_num_write_same_bios, NULL);
}
-static int __send_write_zeroes(struct clone_info *ci)
+static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti)
{
- return __send_changing_extent_only(ci, get_num_write_zeroes_bios, NULL);
+ return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios, NULL);
}
/*
@@ -1433,17 +1479,17 @@ static int __split_and_process_non_flush(struct clone_info *ci)
unsigned len;
int r;
- if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
- return __send_discard(ci);
- else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
- return __send_write_same(ci);
- else if (unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES))
- return __send_write_zeroes(ci);
-
ti = dm_table_find_target(ci->map, ci->sector);
if (!dm_target_is_valid(ti))
return -EIO;
+ if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
+ return __send_discard(ci, ti);
+ else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
+ return __send_write_same(ci, ti);
+ else if (unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES))
+ return __send_write_zeroes(ci, ti);
+
if (bio_op(bio) == REQ_OP_ZONE_REPORT)
len = ci->sector_count;
else
@@ -1460,34 +1506,33 @@ static int __split_and_process_non_flush(struct clone_info *ci)
return 0;
}
+static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
+ struct dm_table *map, struct bio *bio)
+{
+ ci->map = map;
+ ci->io = alloc_io(md, bio);
+ ci->sector = bio->bi_iter.bi_sector;
+}
+
/*
* Entry point to split a bio into clones and submit them to the targets.
*/
-static void __split_and_process_bio(struct mapped_device *md,
- struct dm_table *map, struct bio *bio)
+static blk_qc_t __split_and_process_bio(struct mapped_device *md,
+ struct dm_table *map, struct bio *bio)
{
struct clone_info ci;
+ blk_qc_t ret = BLK_QC_T_NONE;
int error = 0;
if (unlikely(!map)) {
bio_io_error(bio);
- return;
+ return ret;
}
- ci.map = map;
- ci.md = md;
- ci.io = alloc_io(md);
- ci.io->status = 0;
- atomic_set(&ci.io->io_count, 1);
- ci.io->bio = bio;
- ci.io->md = md;
- spin_lock_init(&ci.io->endio_lock);
- ci.sector = bio->bi_iter.bi_sector;
-
- start_io_acct(ci.io);
+ init_clone_info(&ci, md, map, bio);
if (bio->bi_opf & REQ_PREFLUSH) {
- ci.bio = &ci.md->flush_bio;
+ ci.bio = &ci.io->md->flush_bio;
ci.sector_count = 0;
error = __send_empty_flush(&ci);
/* dec_pending submits any data associated with flush */
@@ -1498,32 +1543,95 @@ static void __split_and_process_bio(struct mapped_device *md,
} else {
ci.bio = bio;
ci.sector_count = bio_sectors(bio);
- while (ci.sector_count && !error)
+ while (ci.sector_count && !error) {
error = __split_and_process_non_flush(&ci);
+ if (current->bio_list && ci.sector_count && !error) {
+ /*
+ * Remainder must be passed to generic_make_request()
+ * so that it gets handled *after* bios already submitted
+ * have been completely processed.
+ * We take a clone of the original to store in
+ * ci.io->orig_bio to be used by end_io_acct() and
+ * for dec_pending to use for completion handling.
+ * As this path is not used for REQ_OP_ZONE_REPORT,
+ * the usage of io->orig_bio in dm_remap_zone_report()
+ * won't be affected by this reassignment.
+ */
+ struct bio *b = bio_clone_bioset(bio, GFP_NOIO,
+ md->queue->bio_split);
+ ci.io->orig_bio = b;
+ bio_advance(bio, (bio_sectors(bio) - ci.sector_count) << 9);
+ bio_chain(b, bio);
+ ret = generic_make_request(bio);
+ break;
+ }
+ }
}
/* drop the extra reference count */
dec_pending(ci.io, errno_to_blk_status(error));
+ return ret;
}
-/*-----------------------------------------------------------------
- * CRUD END
- *---------------------------------------------------------------*/
/*
- * The request function that just remaps the bio built up by
- * dm_merge_bvec.
+ * Optimized variant of __split_and_process_bio that leverages the
+ * fact that targets that use it do _not_ have a need to split bios.
*/
-static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
+static blk_qc_t __process_bio(struct mapped_device *md,
+ struct dm_table *map, struct bio *bio)
+{
+ struct clone_info ci;
+ blk_qc_t ret = BLK_QC_T_NONE;
+ int error = 0;
+
+ if (unlikely(!map)) {
+ bio_io_error(bio);
+ return ret;
+ }
+
+ init_clone_info(&ci, md, map, bio);
+
+ if (bio->bi_opf & REQ_PREFLUSH) {
+ ci.bio = &ci.io->md->flush_bio;
+ ci.sector_count = 0;
+ error = __send_empty_flush(&ci);
+ /* dec_pending submits any data associated with flush */
+ } else {
+ struct dm_target *ti = md->immutable_target;
+ struct dm_target_io *tio;
+
+ /*
+ * Defend against IO still getting in during teardown
+ * - as was seen for a time with nvme-fcloop
+ */
+ if (unlikely(WARN_ON_ONCE(!ti || !dm_target_is_valid(ti)))) {
+ error = -EIO;
+ goto out;
+ }
+
+ tio = alloc_tio(&ci, ti, 0, GFP_NOIO);
+ ci.bio = bio;
+ ci.sector_count = bio_sectors(bio);
+ ret = __clone_and_map_simple_bio(&ci, tio, NULL);
+ }
+out:
+ /* drop the extra reference count */
+ dec_pending(ci.io, errno_to_blk_status(error));
+ return ret;
+}
+
+typedef blk_qc_t (process_bio_fn)(struct mapped_device *, struct dm_table *, struct bio *);
+
+static blk_qc_t __dm_make_request(struct request_queue *q, struct bio *bio,
+ process_bio_fn process_bio)
{
- int rw = bio_data_dir(bio);
struct mapped_device *md = q->queuedata;
+ blk_qc_t ret = BLK_QC_T_NONE;
int srcu_idx;
struct dm_table *map;
map = dm_get_live_table(md, &srcu_idx);
- generic_start_io_acct(q, rw, bio_sectors(bio), &dm_disk(md)->part0);
-
/* if we're suspended, we have to queue this io for later */
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
dm_put_live_table(md, srcu_idx);
@@ -1532,12 +1640,27 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
queue_io(md, bio);
else
bio_io_error(bio);
- return BLK_QC_T_NONE;
+ return ret;
}
- __split_and_process_bio(md, map, bio);
+ ret = process_bio(md, map, bio);
+
dm_put_live_table(md, srcu_idx);
- return BLK_QC_T_NONE;
+ return ret;
+}
+
+/*
+ * The request function that remaps the bio to one target and
+ * splits off any remainder.
+ */
+static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
+{
+ return __dm_make_request(q, bio, __split_and_process_bio);
+}
+
+static blk_qc_t dm_make_request_nvme(struct request_queue *q, struct bio *bio)
+{
+ return __dm_make_request(q, bio, __process_bio);
}
static int dm_any_congested(void *congested_data, int bdi_bits)
@@ -1618,20 +1741,9 @@ static const struct dax_operations dm_dax_ops;
static void dm_wq_work(struct work_struct *work);
-void dm_init_md_queue(struct mapped_device *md)
-{
- /*
- * Initialize data that will only be used by a non-blk-mq DM queue
- * - must do so here (in alloc_dev callchain) before queue is used
- */
- md->queue->queuedata = md;
- md->queue->backing_dev_info->congested_data = md;
-}
-
-void dm_init_normal_md_queue(struct mapped_device *md)
+static void dm_init_normal_md_queue(struct mapped_device *md)
{
md->use_blk_mq = false;
- dm_init_md_queue(md);
/*
* Initialize aspects of queue that aren't relevant for blk-mq
@@ -1645,9 +1757,10 @@ static void cleanup_mapped_device(struct mapped_device *md)
destroy_workqueue(md->wq);
if (md->kworker_task)
kthread_stop(md->kworker_task);
- mempool_destroy(md->io_pool);
if (md->bs)
bioset_free(md->bs);
+ if (md->io_bs)
+ bioset_free(md->io_bs);
if (md->dax_dev) {
kill_dax(md->dax_dev);
@@ -1673,6 +1786,10 @@ static void cleanup_mapped_device(struct mapped_device *md)
md->bdev = NULL;
}
+ mutex_destroy(&md->suspend_lock);
+ mutex_destroy(&md->type_lock);
+ mutex_destroy(&md->table_devices_lock);
+
dm_mq_cleanup_mapped_device(md);
}
@@ -1726,10 +1843,10 @@ static struct mapped_device *alloc_dev(int minor)
md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id);
if (!md->queue)
goto bad;
+ md->queue->queuedata = md;
+ md->queue->backing_dev_info->congested_data = md;
- dm_init_md_queue(md);
-
- md->disk = alloc_disk_node(1, numa_node_id);
+ md->disk = alloc_disk_node(1, md->numa_node_id);
if (!md->disk)
goto bad;
@@ -1753,7 +1870,7 @@ static struct mapped_device *alloc_dev(int minor)
goto bad;
md->dax_dev = dax_dev;
- add_disk(md->disk);
+ add_disk_no_queue_reg(md->disk);
format_dev_t(md->name, MKDEV(_major, minor));
md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
@@ -1812,17 +1929,22 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
{
struct dm_md_mempools *p = dm_table_get_md_mempools(t);
- if (md->bs) {
- /* The md already has necessary mempools. */
- if (dm_table_bio_based(t)) {
- /*
- * Reload bioset because front_pad may have changed
- * because a different table was loaded.
- */
+ if (dm_table_bio_based(t)) {
+ /*
+ * The md may already have mempools that need changing.
+ * If so, reload bioset because front_pad may have changed
+ * because a different table was loaded.
+ */
+ if (md->bs) {
bioset_free(md->bs);
- md->bs = p->bs;
- p->bs = NULL;
+ md->bs = NULL;
}
+ if (md->io_bs) {
+ bioset_free(md->io_bs);
+ md->io_bs = NULL;
+ }
+
+ } else if (md->bs) {
/*
* There's no need to reload with request-based dm
* because the size of front_pad doesn't change.
@@ -1834,13 +1956,12 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
goto out;
}
- BUG_ON(!p || md->io_pool || md->bs);
+ BUG_ON(!p || md->bs || md->io_bs);
- md->io_pool = p->io_pool;
- p->io_pool = NULL;
md->bs = p->bs;
p->bs = NULL;
-
+ md->io_bs = p->io_bs;
+ p->io_bs = NULL;
out:
/* mempool bind completed, no longer need any mempools in the table */
dm_table_free_md_mempools(t);
@@ -1886,6 +2007,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
{
struct dm_table *old_map;
struct request_queue *q = md->queue;
+ bool request_based = dm_table_request_based(t);
sector_t size;
lockdep_assert_held(&md->suspend_lock);
@@ -1909,12 +2031,15 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
* This must be done before setting the queue restrictions,
* because request-based dm may be run just after the setting.
*/
- if (dm_table_request_based(t)) {
+ if (request_based)
dm_stop_queue(q);
+
+ if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) {
/*
- * Leverage the fact that request-based DM targets are
- * immutable singletons and establish md->immutable_target
- * - used to optimize both dm_request_fn and dm_mq_queue_rq
+ * Leverage the fact that request-based DM targets and
+ * NVMe bio based targets are immutable singletons
+ * - used to optimize both dm_request_fn and dm_mq_queue_rq;
+ * and __process_bio.
*/
md->immutable_target = dm_table_get_immutable_target(t);
}
@@ -1954,13 +2079,18 @@ static struct dm_table *__unbind(struct mapped_device *md)
*/
int dm_create(int minor, struct mapped_device **result)
{
+ int r;
struct mapped_device *md;
md = alloc_dev(minor);
if (!md)
return -ENXIO;
- dm_sysfs_init(md);
+ r = dm_sysfs_init(md);
+ if (r) {
+ free_dev(md);
+ return r;
+ }
*result = md;
return 0;
@@ -2013,10 +2143,12 @@ EXPORT_SYMBOL_GPL(dm_get_queue_limits);
int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
{
int r;
+ struct queue_limits limits;
enum dm_queue_mode type = dm_get_md_type(md);
switch (type) {
case DM_TYPE_REQUEST_BASED:
+ dm_init_normal_md_queue(md);
r = dm_old_init_request_queue(md, t);
if (r) {
DMERR("Cannot initialize queue for request-based mapped device");
@@ -2034,21 +2166,24 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
case DM_TYPE_DAX_BIO_BASED:
dm_init_normal_md_queue(md);
blk_queue_make_request(md->queue, dm_make_request);
- /*
- * DM handles splitting bios as needed. Free the bio_split bioset
- * since it won't be used (saves 1 process per bio-based DM device).
- */
- bioset_free(md->queue->bio_split);
- md->queue->bio_split = NULL;
-
- if (type == DM_TYPE_DAX_BIO_BASED)
- queue_flag_set_unlocked(QUEUE_FLAG_DAX, md->queue);
+ break;
+ case DM_TYPE_NVME_BIO_BASED:
+ dm_init_normal_md_queue(md);
+ blk_queue_make_request(md->queue, dm_make_request_nvme);
break;
case DM_TYPE_NONE:
WARN_ON_ONCE(true);
break;
}
+ r = dm_calculate_queue_limits(t, &limits);
+ if (r) {
+ DMERR("Cannot calculate initial queue limits");
+ return r;
+ }
+ dm_table_set_restrictions(t, md->queue, &limits);
+ blk_register_queue(md->disk);
+
return 0;
}
@@ -2113,7 +2248,6 @@ EXPORT_SYMBOL_GPL(dm_device_name);
static void __dm_destroy(struct mapped_device *md, bool wait)
{
- struct request_queue *q = dm_get_md_queue(md);
struct dm_table *map;
int srcu_idx;
@@ -2124,7 +2258,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
- blk_set_queue_dying(q);
+ blk_set_queue_dying(md->queue);
if (dm_request_based(md) && md->kworker_task)
kthread_flush_worker(&md->kworker);
@@ -2735,11 +2869,12 @@ int dm_noflush_suspending(struct dm_target *ti)
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
- unsigned integrity, unsigned per_io_data_size)
+ unsigned integrity, unsigned per_io_data_size,
+ unsigned min_pool_size)
{
struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
unsigned int pool_size = 0;
- unsigned int front_pad;
+ unsigned int front_pad, io_front_pad;
if (!pools)
return NULL;
@@ -2747,16 +2882,19 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
switch (type) {
case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED:
- pool_size = dm_get_reserved_bio_based_ios();
+ case DM_TYPE_NVME_BIO_BASED:
+ pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
-
- pools->io_pool = mempool_create_slab_pool(pool_size, _io_cache);
- if (!pools->io_pool)
+ io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio);
+ pools->io_bs = bioset_create(pool_size, io_front_pad, 0);
+ if (!pools->io_bs)
+ goto out;
+ if (integrity && bioset_integrity_create(pools->io_bs, pool_size))
goto out;
break;
case DM_TYPE_REQUEST_BASED:
case DM_TYPE_MQ_REQUEST_BASED:
- pool_size = dm_get_reserved_rq_based_ios();
+ pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size);
front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
/* per_io_data_size is used for blk-mq pdu at queue allocation */
break;
@@ -2764,7 +2902,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
BUG();
}
- pools->bs = bioset_create(pool_size, front_pad, BIOSET_NEED_RESCUER);
+ pools->bs = bioset_create(pool_size, front_pad, 0);
if (!pools->bs)
goto out;
@@ -2784,10 +2922,10 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
if (!pools)
return;
- mempool_destroy(pools->io_pool);
-
if (pools->bs)
bioset_free(pools->bs);
+ if (pools->io_bs)
+ bioset_free(pools->io_bs);
kfree(pools);
}
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 36399bb875dd..114a81b27c37 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -49,7 +49,6 @@ struct dm_md_mempools;
/*-----------------------------------------------------------------
* Internal table functions.
*---------------------------------------------------------------*/
-void dm_table_destroy(struct dm_table *t);
void dm_table_event_callback(struct dm_table *t,
void (*fn)(void *), void *context);
struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
@@ -206,7 +205,8 @@ void dm_kcopyd_exit(void);
* Mempool operations
*/
struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
- unsigned integrity, unsigned per_bio_data_size);
+ unsigned integrity, unsigned per_bio_data_size,
+ unsigned min_pool_size);
void dm_free_md_mempools(struct dm_md_mempools *pools);
/*
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4e4dee0ec2de..0081ace39a64 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -711,7 +711,7 @@ static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
return NULL;
}
-static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
+struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev)
{
struct md_rdev *rdev;
@@ -721,6 +721,7 @@ static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
return NULL;
}
+EXPORT_SYMBOL_GPL(md_find_rdev_rcu);
static struct md_personality *find_pers(int level, char *clevel)
{
@@ -5560,11 +5561,6 @@ int md_run(struct mddev *mddev)
if (start_readonly && mddev->ro == 0)
mddev->ro = 2; /* read-only, but switch on first write */
- /*
- * NOTE: some pers->run(), for example r5l_recovery_log(), wakes
- * up mddev->thread. It is important to initialize critical
- * resources for mddev->thread BEFORE calling pers->run().
- */
err = pers->run(mddev);
if (err)
pr_warn("md: pers->run() failed ...\n");
@@ -5678,6 +5674,9 @@ static int do_md_run(struct mddev *mddev)
if (mddev_is_clustered(mddev))
md_allow_write(mddev);
+ /* run start up tasks that require md_thread */
+ md_start(mddev);
+
md_wakeup_thread(mddev->thread);
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
@@ -5689,6 +5688,21 @@ out:
return err;
}
+int md_start(struct mddev *mddev)
+{
+ int ret = 0;
+
+ if (mddev->pers->start) {
+ set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ ret = mddev->pers->start(mddev);
+ clear_bit(MD_RECOVERY_WAIT, &mddev->recovery);
+ md_wakeup_thread(mddev->sync_thread);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(md_start);
+
static int restart_array(struct mddev *mddev)
{
struct gendisk *disk = mddev->gendisk;
@@ -6997,7 +7011,7 @@ static int set_disk_faulty(struct mddev *mddev, dev_t dev)
return -ENODEV;
rcu_read_lock();
- rdev = find_rdev_rcu(mddev, dev);
+ rdev = md_find_rdev_rcu(mddev, dev);
if (!rdev)
err = -ENODEV;
else {
@@ -7871,10 +7885,10 @@ static int md_seq_open(struct inode *inode, struct file *file)
}
static int md_unloading;
-static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
+static __poll_t mdstat_poll(struct file *filp, poll_table *wait)
{
struct seq_file *seq = filp->private_data;
- int mask;
+ __poll_t mask;
if (md_unloading)
return POLLIN|POLLRDNORM|POLLERR|POLLPRI;
@@ -8169,7 +8183,8 @@ void md_do_sync(struct md_thread *thread)
int ret;
/* just incase thread restarts... */
- if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
+ if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
+ test_bit(MD_RECOVERY_WAIT, &mddev->recovery))
return;
if (mddev->ro) {/* never try to sync a read-only array */
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 7d6bcf0eba0c..58cd20a5e85e 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -485,6 +485,7 @@ enum recovery_flags {
MD_RECOVERY_RESHAPE, /* A reshape is happening */
MD_RECOVERY_FROZEN, /* User request to abort, and not restart, any action */
MD_RECOVERY_ERROR, /* sync-action interrupted because io-error */
+ MD_RECOVERY_WAIT, /* waiting for pers->start() to finish */
};
static inline int __must_check mddev_lock(struct mddev *mddev)
@@ -523,7 +524,13 @@ struct md_personality
struct list_head list;
struct module *owner;
bool (*make_request)(struct mddev *mddev, struct bio *bio);
+ /*
+ * start up works that do NOT require md_thread. tasks that
+ * requires md_thread should go into start()
+ */
int (*run)(struct mddev *mddev);
+ /* start up works that require md threads */
+ int (*start)(struct mddev *mddev);
void (*free)(struct mddev *mddev, void *priv);
void (*status)(struct seq_file *seq, struct mddev *mddev);
/* error_handler must set ->faulty and clear ->in_sync
@@ -687,6 +694,7 @@ extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
extern void mddev_init(struct mddev *mddev);
extern int md_run(struct mddev *mddev);
+extern int md_start(struct mddev *mddev);
extern void md_stop(struct mddev *mddev);
extern void md_stop_writes(struct mddev *mddev);
extern int md_rdev_init(struct md_rdev *rdev);
@@ -702,6 +710,7 @@ extern void md_reload_sb(struct mddev *mddev, int raid_disk);
extern void md_update_sb(struct mddev *mddev, int force);
extern void md_kick_rdev_from_array(struct md_rdev * rdev);
struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
+struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev);
static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
{
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 6df398e3a008..b2eae332e1a2 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -815,6 +815,17 @@ static void flush_pending_writes(struct r1conf *conf)
bio = bio_list_get(&conf->pending_bio_list);
conf->pending_count = 0;
spin_unlock_irq(&conf->device_lock);
+
+ /*
+ * As this is called in a wait_event() loop (see freeze_array),
+ * current->state might be TASK_UNINTERRUPTIBLE which will
+ * cause a warning when we prepare to wait again. As it is
+ * rare that this path is taken, it is perfectly safe to force
+ * us to go around the wait_event() loop again, so the warning
+ * is a false-positive. Silence the warning by resetting
+ * thread state
+ */
+ __set_current_state(TASK_RUNNING);
blk_start_plug(&plug);
flush_bio_list(conf, bio);
blk_finish_plug(&plug);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c131835cf008..99c9207899a7 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -900,6 +900,18 @@ static void flush_pending_writes(struct r10conf *conf)
bio = bio_list_get(&conf->pending_bio_list);
conf->pending_count = 0;
spin_unlock_irq(&conf->device_lock);
+
+ /*
+ * As this is called in a wait_event() loop (see freeze_array),
+ * current->state might be TASK_UNINTERRUPTIBLE which will
+ * cause a warning when we prepare to wait again. As it is
+ * rare that this path is taken, it is perfectly safe to force
+ * us to go around the wait_event() loop again, so the warning
+ * is a false-positive. Silence the warning by resetting
+ * thread state
+ */
+ __set_current_state(TASK_RUNNING);
+
blk_start_plug(&plug);
/* flush any pending bitmap writes to disk
* before proceeding w/ I/O */
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 39f31f07ffe9..3c65f52b68f5 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1111,9 +1111,6 @@ void r5l_write_stripe_run(struct r5l_log *log)
int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
{
- if (!log)
- return -ENODEV;
-
if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
/*
* in write through (journal only)
@@ -1592,8 +1589,6 @@ void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
void r5l_quiesce(struct r5l_log *log, int quiesce)
{
struct mddev *mddev;
- if (!log)
- return;
if (quiesce) {
/* make sure r5l_write_super_and_discard_space exits */
@@ -2448,7 +2443,6 @@ static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
raid5_release_stripe(sh);
}
- md_wakeup_thread(conf->mddev->thread);
/* reuse conf->wait_for_quiescent in recovery */
wait_event(conf->wait_for_quiescent,
atomic_read(&conf->active_stripes) == 0);
@@ -2491,10 +2485,10 @@ static int r5l_recovery_log(struct r5l_log *log)
ctx->seq += 10000;
if ((ctx->data_only_stripes == 0) && (ctx->data_parity_stripes == 0))
- pr_debug("md/raid:%s: starting from clean shutdown\n",
+ pr_info("md/raid:%s: starting from clean shutdown\n",
mdname(mddev));
else
- pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
+ pr_info("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
mdname(mddev), ctx->data_only_stripes,
ctx->data_parity_stripes);
@@ -3036,6 +3030,23 @@ ioerr:
return ret;
}
+int r5l_start(struct r5l_log *log)
+{
+ int ret;
+
+ if (!log)
+ return 0;
+
+ ret = r5l_load_log(log);
+ if (ret) {
+ struct mddev *mddev = log->rdev->mddev;
+ struct r5conf *conf = mddev->private;
+
+ r5l_exit_log(conf);
+ }
+ return ret;
+}
+
void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev)
{
struct r5conf *conf = mddev->private;
@@ -3138,13 +3149,9 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
rcu_assign_pointer(conf->log, log);
- if (r5l_load_log(log))
- goto error;
-
set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
return 0;
-error:
rcu_assign_pointer(conf->log, NULL);
md_unregister_thread(&log->reclaim_thread);
reclaim_thread:
diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h
index 284578b0a349..0c76bcedfc1c 100644
--- a/drivers/md/raid5-log.h
+++ b/drivers/md/raid5-log.h
@@ -32,6 +32,7 @@ extern struct md_sysfs_entry r5c_journal_mode;
extern void r5c_update_on_rdev_error(struct mddev *mddev,
struct md_rdev *rdev);
extern bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect);
+extern int r5l_start(struct r5l_log *log);
extern struct dma_async_tx_descriptor *
ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu,
@@ -42,6 +43,7 @@ extern int ppl_write_stripe(struct r5conf *conf, struct stripe_head *sh);
extern void ppl_write_stripe_run(struct r5conf *conf);
extern void ppl_stripe_write_finished(struct stripe_head *sh);
extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add);
+extern void ppl_quiesce(struct r5conf *conf, int quiesce);
static inline bool raid5_has_ppl(struct r5conf *conf)
{
@@ -87,6 +89,34 @@ static inline void log_write_stripe_run(struct r5conf *conf)
ppl_write_stripe_run(conf);
}
+static inline void log_flush_stripe_to_raid(struct r5conf *conf)
+{
+ if (conf->log)
+ r5l_flush_stripe_to_raid(conf->log);
+ else if (raid5_has_ppl(conf))
+ ppl_write_stripe_run(conf);
+}
+
+static inline int log_handle_flush_request(struct r5conf *conf, struct bio *bio)
+{
+ int ret = -ENODEV;
+
+ if (conf->log)
+ ret = r5l_handle_flush_request(conf->log, bio);
+ else if (raid5_has_ppl(conf))
+ ret = 0;
+
+ return ret;
+}
+
+static inline void log_quiesce(struct r5conf *conf, int quiesce)
+{
+ if (conf->log)
+ r5l_quiesce(conf->log, quiesce);
+ else if (raid5_has_ppl(conf))
+ ppl_quiesce(conf, quiesce);
+}
+
static inline void log_exit(struct r5conf *conf)
{
if (conf->log)
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 628c0bf7b9fd..2764c2290062 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -85,6 +85,9 @@
* (for a single member disk). New io_units are added to the end of the list
* and the first io_unit is submitted, if it is not submitted already.
* The current io_unit accepting new stripes is always at the end of the list.
+ *
+ * If write-back cache is enabled for any of the disks in the array, its data
+ * must be flushed before next io_unit is submitted.
*/
#define PPL_SPACE_SIZE (128 * 1024)
@@ -104,6 +107,7 @@ struct ppl_conf {
struct kmem_cache *io_kc;
mempool_t *io_pool;
struct bio_set *bs;
+ struct bio_set *flush_bs;
/* used only for recovery */
int recovered_entries;
@@ -128,6 +132,8 @@ struct ppl_log {
sector_t next_io_sector;
unsigned int entry_space;
bool use_multippl;
+ bool wb_cache_on;
+ unsigned long disk_flush_bitmap;
};
#define PPL_IO_INLINE_BVECS 32
@@ -145,6 +151,7 @@ struct ppl_io_unit {
struct list_head stripe_list; /* stripes added to the io_unit */
atomic_t pending_stripes; /* how many stripes not written to raid */
+ atomic_t pending_flushes; /* how many disk flushes are in progress */
bool submitted; /* true if write to log started */
@@ -249,6 +256,7 @@ static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log,
INIT_LIST_HEAD(&io->log_sibling);
INIT_LIST_HEAD(&io->stripe_list);
atomic_set(&io->pending_stripes, 0);
+ atomic_set(&io->pending_flushes, 0);
bio_init(&io->bio, io->biovec, PPL_IO_INLINE_BVECS);
pplhdr = page_address(io->header_page);
@@ -475,7 +483,18 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
if (log->use_multippl)
log->next_io_sector += (PPL_HEADER_SIZE + io->pp_size) >> 9;
+ WARN_ON(log->disk_flush_bitmap != 0);
+
list_for_each_entry(sh, &io->stripe_list, log_list) {
+ for (i = 0; i < sh->disks; i++) {
+ struct r5dev *dev = &sh->dev[i];
+
+ if ((ppl_conf->child_logs[i].wb_cache_on) &&
+ (test_bit(R5_Wantwrite, &dev->flags))) {
+ set_bit(i, &log->disk_flush_bitmap);
+ }
+ }
+
/* entries for full stripe writes have no partial parity */
if (test_bit(STRIPE_FULL_WRITE, &sh->state))
continue;
@@ -540,6 +559,7 @@ static void ppl_io_unit_finished(struct ppl_io_unit *io)
{
struct ppl_log *log = io->log;
struct ppl_conf *ppl_conf = log->ppl_conf;
+ struct r5conf *conf = ppl_conf->mddev->private;
unsigned long flags;
pr_debug("%s: seq: %llu\n", __func__, io->seq);
@@ -565,6 +585,112 @@ static void ppl_io_unit_finished(struct ppl_io_unit *io)
spin_unlock(&ppl_conf->no_mem_stripes_lock);
local_irq_restore(flags);
+
+ wake_up(&conf->wait_for_quiescent);
+}
+
+static void ppl_flush_endio(struct bio *bio)
+{
+ struct ppl_io_unit *io = bio->bi_private;
+ struct ppl_log *log = io->log;
+ struct ppl_conf *ppl_conf = log->ppl_conf;
+ struct r5conf *conf = ppl_conf->mddev->private;
+ char b[BDEVNAME_SIZE];
+
+ pr_debug("%s: dev: %s\n", __func__, bio_devname(bio, b));
+
+ if (bio->bi_status) {
+ struct md_rdev *rdev;
+
+ rcu_read_lock();
+ rdev = md_find_rdev_rcu(conf->mddev, bio_dev(bio));
+ if (rdev)
+ md_error(rdev->mddev, rdev);
+ rcu_read_unlock();
+ }
+
+ bio_put(bio);
+
+ if (atomic_dec_and_test(&io->pending_flushes)) {
+ ppl_io_unit_finished(io);
+ md_wakeup_thread(conf->mddev->thread);
+ }
+}
+
+static void ppl_do_flush(struct ppl_io_unit *io)
+{
+ struct ppl_log *log = io->log;
+ struct ppl_conf *ppl_conf = log->ppl_conf;
+ struct r5conf *conf = ppl_conf->mddev->private;
+ int raid_disks = conf->raid_disks;
+ int flushed_disks = 0;
+ int i;
+
+ atomic_set(&io->pending_flushes, raid_disks);
+
+ for_each_set_bit(i, &log->disk_flush_bitmap, raid_disks) {
+ struct md_rdev *rdev;
+ struct block_device *bdev = NULL;
+
+ rcu_read_lock();
+ rdev = rcu_dereference(conf->disks[i].rdev);
+ if (rdev && !test_bit(Faulty, &rdev->flags))
+ bdev = rdev->bdev;
+ rcu_read_unlock();
+
+ if (bdev) {
+ struct bio *bio;
+ char b[BDEVNAME_SIZE];
+
+ bio = bio_alloc_bioset(GFP_NOIO, 0, ppl_conf->flush_bs);
+ bio_set_dev(bio, bdev);
+ bio->bi_private = io;
+ bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+ bio->bi_end_io = ppl_flush_endio;
+
+ pr_debug("%s: dev: %s\n", __func__,
+ bio_devname(bio, b));
+
+ submit_bio(bio);
+ flushed_disks++;
+ }
+ }
+
+ log->disk_flush_bitmap = 0;
+
+ for (i = flushed_disks ; i < raid_disks; i++) {
+ if (atomic_dec_and_test(&io->pending_flushes))
+ ppl_io_unit_finished(io);
+ }
+}
+
+static inline bool ppl_no_io_unit_submitted(struct r5conf *conf,
+ struct ppl_log *log)
+{
+ struct ppl_io_unit *io;
+
+ io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit,
+ log_sibling);
+
+ return !io || !io->submitted;
+}
+
+void ppl_quiesce(struct r5conf *conf, int quiesce)
+{
+ struct ppl_conf *ppl_conf = conf->log_private;
+ int i;
+
+ if (quiesce) {
+ for (i = 0; i < ppl_conf->count; i++) {
+ struct ppl_log *log = &ppl_conf->child_logs[i];
+
+ spin_lock_irq(&log->io_list_lock);
+ wait_event_lock_irq(conf->wait_for_quiescent,
+ ppl_no_io_unit_submitted(conf, log),
+ log->io_list_lock);
+ spin_unlock_irq(&log->io_list_lock);
+ }
+ }
}
void ppl_stripe_write_finished(struct stripe_head *sh)
@@ -574,8 +700,12 @@ void ppl_stripe_write_finished(struct stripe_head *sh)
io = sh->ppl_io;
sh->ppl_io = NULL;
- if (io && atomic_dec_and_test(&io->pending_stripes))
- ppl_io_unit_finished(io);
+ if (io && atomic_dec_and_test(&io->pending_stripes)) {
+ if (io->log->disk_flush_bitmap)
+ ppl_do_flush(io);
+ else
+ ppl_io_unit_finished(io);
+ }
}
static void ppl_xor(int size, struct page *page1, struct page *page2)
@@ -1108,6 +1238,8 @@ static void __ppl_exit_log(struct ppl_conf *ppl_conf)
if (ppl_conf->bs)
bioset_free(ppl_conf->bs);
+ if (ppl_conf->flush_bs)
+ bioset_free(ppl_conf->flush_bs);
mempool_destroy(ppl_conf->io_pool);
kmem_cache_destroy(ppl_conf->io_kc);
@@ -1173,6 +1305,8 @@ static int ppl_validate_rdev(struct md_rdev *rdev)
static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev)
{
+ struct request_queue *q;
+
if ((rdev->ppl.size << 9) >= (PPL_SPACE_SIZE +
PPL_HEADER_SIZE) * 2) {
log->use_multippl = true;
@@ -1185,6 +1319,10 @@ static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev)
PPL_HEADER_SIZE;
}
log->next_io_sector = rdev->ppl.sector;
+
+ q = bdev_get_queue(rdev->bdev);
+ if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
+ log->wb_cache_on = true;
}
int ppl_init_log(struct r5conf *conf)
@@ -1192,8 +1330,8 @@ int ppl_init_log(struct r5conf *conf)
struct ppl_conf *ppl_conf;
struct mddev *mddev = conf->mddev;
int ret = 0;
+ int max_disks;
int i;
- bool need_cache_flush = false;
pr_debug("md/raid:%s: enabling distributed Partial Parity Log\n",
mdname(conf->mddev));
@@ -1219,6 +1357,14 @@ int ppl_init_log(struct r5conf *conf)
return -EINVAL;
}
+ max_disks = FIELD_SIZEOF(struct ppl_log, disk_flush_bitmap) *
+ BITS_PER_BYTE;
+ if (conf->raid_disks > max_disks) {
+ pr_warn("md/raid:%s PPL doesn't support over %d disks in the array\n",
+ mdname(mddev), max_disks);
+ return -EINVAL;
+ }
+
ppl_conf = kzalloc(sizeof(struct ppl_conf), GFP_KERNEL);
if (!ppl_conf)
return -ENOMEM;
@@ -1244,6 +1390,12 @@ int ppl_init_log(struct r5conf *conf)
goto err;
}
+ ppl_conf->flush_bs = bioset_create(conf->raid_disks, 0, 0);
+ if (!ppl_conf->flush_bs) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
ppl_conf->count = conf->raid_disks;
ppl_conf->child_logs = kcalloc(ppl_conf->count, sizeof(struct ppl_log),
GFP_KERNEL);
@@ -1275,23 +1427,14 @@ int ppl_init_log(struct r5conf *conf)
log->rdev = rdev;
if (rdev) {
- struct request_queue *q;
-
ret = ppl_validate_rdev(rdev);
if (ret)
goto err;
- q = bdev_get_queue(rdev->bdev);
- if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
- need_cache_flush = true;
ppl_init_child_log(log, rdev);
}
}
- if (need_cache_flush)
- pr_warn("md/raid:%s: Volatile write-back cache should be disabled on all member drives when using PPL!\n",
- mdname(mddev));
-
/* load and possibly recover the logs from the member disks */
ret = ppl_load(ppl_conf);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 98ce4272ace9..50d01144b805 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5563,7 +5563,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
bool do_flush = false;
if (unlikely(bi->bi_opf & REQ_PREFLUSH)) {
- int ret = r5l_handle_flush_request(conf->log, bi);
+ int ret = log_handle_flush_request(conf, bi);
if (ret == 0)
return true;
@@ -6168,7 +6168,7 @@ static int handle_active_stripes(struct r5conf *conf, int group,
break;
if (i == NR_STRIPE_HASH_LOCKS) {
spin_unlock_irq(&conf->device_lock);
- r5l_flush_stripe_to_raid(conf->log);
+ log_flush_stripe_to_raid(conf);
spin_lock_irq(&conf->device_lock);
return batch_size;
}
@@ -8060,7 +8060,7 @@ static void raid5_quiesce(struct mddev *mddev, int quiesce)
wake_up(&conf->wait_for_overlap);
unlock_all_device_hash_locks_irq(conf);
}
- r5l_quiesce(conf->log, quiesce);
+ log_quiesce(conf, quiesce);
}
static void *raid45_takeover_raid0(struct mddev *mddev, int level)
@@ -8364,6 +8364,13 @@ static int raid5_change_consistency_policy(struct mddev *mddev, const char *buf)
return err;
}
+static int raid5_start(struct mddev *mddev)
+{
+ struct r5conf *conf = mddev->private;
+
+ return r5l_start(conf->log);
+}
+
static struct md_personality raid6_personality =
{
.name = "raid6",
@@ -8371,6 +8378,7 @@ static struct md_personality raid6_personality =
.owner = THIS_MODULE,
.make_request = raid5_make_request,
.run = raid5_run,
+ .start = raid5_start,
.free = raid5_free,
.status = raid5_status,
.error_handler = raid5_error,
@@ -8395,6 +8403,7 @@ static struct md_personality raid5_personality =
.owner = THIS_MODULE,
.make_request = raid5_make_request,
.run = raid5_run,
+ .start = raid5_start,
.free = raid5_free,
.status = raid5_status,
.error_handler = raid5_error,
@@ -8420,6 +8429,7 @@ static struct md_personality raid4_personality =
.owner = THIS_MODULE,
.make_request = raid5_make_request,
.run = raid5_run,
+ .start = raid5_start,
.free = raid5_free,
.status = raid5_status,
.error_handler = raid5_error,
diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c
index 3dba3aa34a43..9d6c496f756c 100644
--- a/drivers/media/cec/cec-api.c
+++ b/drivers/media/cec/cec-api.c
@@ -43,13 +43,13 @@ static inline struct cec_devnode *cec_devnode_data(struct file *filp)
/* CEC file operations */
-static unsigned int cec_poll(struct file *filp,
+static __poll_t cec_poll(struct file *filp,
struct poll_table_struct *poll)
{
struct cec_devnode *devnode = cec_devnode_data(filp);
struct cec_fh *fh = filp->private_data;
struct cec_adapter *adap = fh->adap;
- unsigned int res = 0;
+ __poll_t res = 0;
if (!devnode->registered)
return POLLERR | POLLHUP;
diff --git a/drivers/media/common/saa7146/saa7146_fops.c b/drivers/media/common/saa7146/saa7146_fops.c
index 8c87d6837c49..8ee3eebef4db 100644
--- a/drivers/media/common/saa7146/saa7146_fops.c
+++ b/drivers/media/common/saa7146/saa7146_fops.c
@@ -320,13 +320,13 @@ static int fops_mmap(struct file *file, struct vm_area_struct * vma)
return res;
}
-static unsigned int __fops_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t __fops_poll(struct file *file, struct poll_table_struct *wait)
{
struct video_device *vdev = video_devdata(file);
struct saa7146_fh *fh = file->private_data;
struct videobuf_buffer *buf = NULL;
struct videobuf_queue *q;
- unsigned int res = v4l2_ctrl_poll(file, wait);
+ __poll_t res = v4l2_ctrl_poll(file, wait);
DEB_EE("file:%p, poll:%p\n", file, wait);
@@ -359,10 +359,10 @@ static unsigned int __fops_poll(struct file *file, struct poll_table_struct *wai
return res;
}
-static unsigned int fops_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t fops_poll(struct file *file, struct poll_table_struct *wait)
{
struct video_device *vdev = video_devdata(file);
- unsigned int res;
+ __poll_t res;
mutex_lock(vdev->lock);
res = __fops_poll(file, wait);
diff --git a/drivers/media/common/siano/smsdvb-debugfs.c b/drivers/media/common/siano/smsdvb-debugfs.c
index 1a8677ade391..0c0878bcf251 100644
--- a/drivers/media/common/siano/smsdvb-debugfs.c
+++ b/drivers/media/common/siano/smsdvb-debugfs.c
@@ -374,7 +374,7 @@ exit:
return rc;
}
-static unsigned int smsdvb_stats_poll(struct file *file, poll_table *wait)
+static __poll_t smsdvb_stats_poll(struct file *file, poll_table *wait)
{
struct smsdvb_debugfs *debug_data = file->private_data;
int rc;
@@ -384,12 +384,9 @@ static unsigned int smsdvb_stats_poll(struct file *file, poll_table *wait)
poll_wait(file, &debug_data->stats_queue, wait);
rc = smsdvb_stats_wait_read(debug_data);
- if (rc > 0)
- rc = POLLIN | POLLRDNORM;
-
kref_put(&debug_data->refcount, smsdvb_debugfs_data_release);
- return rc;
+ return rc > 0 ? POLLIN | POLLRDNORM : 0;
}
static ssize_t smsdvb_stats_read(struct file *file, char __user *user_buf,
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index 3ddd44e1ee77..3fe0eb740a6d 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -1066,10 +1066,10 @@ static long dvb_demux_ioctl(struct file *file, unsigned int cmd,
return dvb_usercopy(file, cmd, arg, dvb_demux_do_ioctl);
}
-static unsigned int dvb_demux_poll(struct file *file, poll_table *wait)
+static __poll_t dvb_demux_poll(struct file *file, poll_table *wait)
{
struct dmxdev_filter *dmxdevfilter = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
if ((!dmxdevfilter) || dmxdevfilter->dev->exit)
return POLLERR;
@@ -1160,11 +1160,11 @@ static long dvb_dvr_ioctl(struct file *file,
return dvb_usercopy(file, cmd, arg, dvb_dvr_do_ioctl);
}
-static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait)
+static __poll_t dvb_dvr_poll(struct file *file, poll_table *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct dmxdev *dmxdev = dvbdev->priv;
- unsigned int mask = 0;
+ __poll_t mask = 0;
dprintk("%s\n", __func__);
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index d48b61eb01f4..3f6c8bd8f869 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -1782,11 +1782,11 @@ static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file)
*
* return: Standard poll mask.
*/
-static unsigned int dvb_ca_en50221_io_poll(struct file *file, poll_table *wait)
+static __poll_t dvb_ca_en50221_io_poll(struct file *file, poll_table *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct dvb_ca_private *ca = dvbdev->priv;
- unsigned int mask = 0;
+ __poll_t mask = 0;
int slot;
int result = 0;
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 2afaa8226342..48e16fd003a7 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -2470,7 +2470,7 @@ static int dvb_frontend_handle_ioctl(struct file *file,
}
-static unsigned int dvb_frontend_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t dvb_frontend_poll(struct file *file, struct poll_table_struct *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct dvb_frontend *fe = dvbdev->priv;
diff --git a/drivers/media/firewire/firedtv-ci.c b/drivers/media/firewire/firedtv-ci.c
index edbb30fdd9d9..fb8a1d2ffd24 100644
--- a/drivers/media/firewire/firedtv-ci.c
+++ b/drivers/media/firewire/firedtv-ci.c
@@ -207,7 +207,7 @@ static int fdtv_ca_ioctl(struct file *file, unsigned int cmd, void *arg)
return err;
}
-static unsigned int fdtv_ca_io_poll(struct file *file, poll_table *wait)
+static __poll_t fdtv_ca_io_poll(struct file *file, poll_table *wait)
{
return POLLIN;
}
diff --git a/drivers/media/media-devnode.c b/drivers/media/media-devnode.c
index 423248f577b6..3049b1f505e5 100644
--- a/drivers/media/media-devnode.c
+++ b/drivers/media/media-devnode.c
@@ -99,7 +99,7 @@ static ssize_t media_write(struct file *filp, const char __user *buf,
return devnode->fops->write(filp, buf, sz, off);
}
-static unsigned int media_poll(struct file *filp,
+static __poll_t media_poll(struct file *filp,
struct poll_table_struct *poll)
{
struct media_devnode *devnode = media_devnode_data(filp);
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index b366a7e1d976..c988669e22ff 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -2955,13 +2955,13 @@ static ssize_t bttv_read(struct file *file, char __user *data,
return retval;
}
-static unsigned int bttv_poll(struct file *file, poll_table *wait)
+static __poll_t bttv_poll(struct file *file, poll_table *wait)
{
struct bttv_fh *fh = file->private_data;
struct bttv_buffer *buf;
enum v4l2_field field;
- unsigned int rc = 0;
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t rc = 0;
+ __poll_t req_events = poll_requested_events(wait);
if (v4l2_event_pending(&fh->fh))
rc = POLLPRI;
@@ -3329,13 +3329,13 @@ static ssize_t radio_read(struct file *file, char __user *data,
return cmd.result;
}
-static unsigned int radio_poll(struct file *file, poll_table *wait)
+static __poll_t radio_poll(struct file *file, poll_table *wait)
{
struct bttv_fh *fh = file->private_data;
struct bttv *btv = fh->btv;
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct saa6588_command cmd;
- unsigned int res = 0;
+ __poll_t res = 0;
if (v4l2_event_pending(&fh->fh))
res = POLLPRI;
diff --git a/drivers/media/pci/cx18/cx18-fileops.c b/drivers/media/pci/cx18/cx18-fileops.c
index 4f9c2395941b..2b0abd5bbf64 100644
--- a/drivers/media/pci/cx18/cx18-fileops.c
+++ b/drivers/media/pci/cx18/cx18-fileops.c
@@ -602,14 +602,14 @@ ssize_t cx18_v4l2_read(struct file *filp, char __user *buf, size_t count,
return cx18_read_pos(s, buf, count, pos, filp->f_flags & O_NONBLOCK);
}
-unsigned int cx18_v4l2_enc_poll(struct file *filp, poll_table *wait)
+__poll_t cx18_v4l2_enc_poll(struct file *filp, poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct cx18_open_id *id = file2id(filp);
struct cx18 *cx = id->cx;
struct cx18_stream *s = &cx->streams[id->type];
int eof = test_bit(CX18_F_S_STREAMOFF, &s->s_flags);
- unsigned res = 0;
+ __poll_t res = 0;
/* Start a capture if there is none */
if (!eof && !test_bit(CX18_F_S_STREAMING, &s->s_flags) &&
@@ -629,7 +629,7 @@ unsigned int cx18_v4l2_enc_poll(struct file *filp, poll_table *wait)
if ((s->vb_type == V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
(id->type == CX18_ENC_STREAM_TYPE_YUV)) {
- int videobuf_poll = videobuf_poll_stream(filp, &s->vbuf_q, wait);
+ __poll_t videobuf_poll = videobuf_poll_stream(filp, &s->vbuf_q, wait);
if (v4l2_event_pending(&id->fh))
res |= POLLPRI;
diff --git a/drivers/media/pci/cx18/cx18-fileops.h b/drivers/media/pci/cx18/cx18-fileops.h
index 37ef34e866cb..5b44d30efd8f 100644
--- a/drivers/media/pci/cx18/cx18-fileops.h
+++ b/drivers/media/pci/cx18/cx18-fileops.h
@@ -23,7 +23,7 @@ ssize_t cx18_v4l2_read(struct file *filp, char __user *buf, size_t count,
ssize_t cx18_v4l2_write(struct file *filp, const char __user *buf, size_t count,
loff_t *pos);
int cx18_v4l2_close(struct file *filp);
-unsigned int cx18_v4l2_enc_poll(struct file *filp, poll_table *wait);
+__poll_t cx18_v4l2_enc_poll(struct file *filp, poll_table *wait);
int cx18_start_capture(struct cx18_open_id *id);
void cx18_stop_capture(struct cx18_open_id *id, int gop_end);
void cx18_mute(struct cx18 *cx);
diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
index f4bd4908acdd..09a25d6c2cd1 100644
--- a/drivers/media/pci/ddbridge/ddbridge-core.c
+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
@@ -732,13 +732,13 @@ static ssize_t ts_read(struct file *file, __user char *buf,
return (count && (left == count)) ? -EAGAIN : (count - left);
}
-static unsigned int ts_poll(struct file *file, poll_table *wait)
+static __poll_t ts_poll(struct file *file, poll_table *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct ddb_output *output = dvbdev->priv;
struct ddb_input *input = output->port->input[0];
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &input->dma->wq, wait);
poll_wait(file, &output->dma->wq, wait);
diff --git a/drivers/media/pci/ivtv/ivtv-fileops.c b/drivers/media/pci/ivtv/ivtv-fileops.c
index c9bd018e53de..4aa773507201 100644
--- a/drivers/media/pci/ivtv/ivtv-fileops.c
+++ b/drivers/media/pci/ivtv/ivtv-fileops.c
@@ -730,12 +730,12 @@ ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t c
return res;
}
-unsigned int ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
+__poll_t ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
{
struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
- int res = 0;
+ __poll_t res = 0;
/* add stream's waitq to the poll list */
IVTV_DEBUG_HI_FILE("Decoder poll\n");
@@ -764,14 +764,14 @@ unsigned int ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
return res;
}
-unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table *wait)
+__poll_t ivtv_v4l2_enc_poll(struct file *filp, poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct ivtv_open_id *id = fh2id(filp->private_data);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
int eof = test_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
- unsigned res = 0;
+ __poll_t res = 0;
/* Start a capture if there is none */
if (!eof && !test_bit(IVTV_F_S_STREAMING, &s->s_flags) &&
diff --git a/drivers/media/pci/ivtv/ivtv-fileops.h b/drivers/media/pci/ivtv/ivtv-fileops.h
index 5e08800772ca..e0029b2b8d09 100644
--- a/drivers/media/pci/ivtv/ivtv-fileops.h
+++ b/drivers/media/pci/ivtv/ivtv-fileops.h
@@ -28,8 +28,8 @@ ssize_t ivtv_v4l2_read(struct file *filp, char __user *buf, size_t count,
ssize_t ivtv_v4l2_write(struct file *filp, const char __user *buf, size_t count,
loff_t * pos);
int ivtv_v4l2_close(struct file *filp);
-unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait);
-unsigned int ivtv_v4l2_dec_poll(struct file *filp, poll_table * wait);
+__poll_t ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait);
+__poll_t ivtv_v4l2_dec_poll(struct file *filp, poll_table * wait);
int ivtv_start_capture(struct ivtv_open_id *id);
void ivtv_stop_capture(struct ivtv_open_id *id, int gop_end);
int ivtv_start_decoding(struct ivtv_open_id *id, int speed);
diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c
index 23999a8cef37..f74b08635082 100644
--- a/drivers/media/pci/meye/meye.c
+++ b/drivers/media/pci/meye/meye.c
@@ -1423,9 +1423,9 @@ static long vidioc_default(struct file *file, void *fh, bool valid_prio,
}
-static unsigned int meye_poll(struct file *file, poll_table *wait)
+static __poll_t meye_poll(struct file *file, poll_table *wait)
{
- unsigned int res = v4l2_ctrl_poll(file, wait);
+ __poll_t res = v4l2_ctrl_poll(file, wait);
mutex_lock(&meye.lock);
poll_wait(file, &meye.proc_list, wait);
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 82d2a24644e4..0ceaa3473cf2 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -1227,11 +1227,11 @@ static ssize_t radio_read(struct file *file, char __user *data,
return cmd.result;
}
-static unsigned int radio_poll(struct file *file, poll_table *wait)
+static __poll_t radio_poll(struct file *file, poll_table *wait)
{
struct saa7134_dev *dev = video_drvdata(file);
struct saa6588_command cmd;
- unsigned int rc = v4l2_ctrl_poll(file, wait);
+ __poll_t rc = v4l2_ctrl_poll(file, wait);
cmd.instance = file;
cmd.event_list = wait;
diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
index f21c245a54f7..e7b31a5b14fd 100644
--- a/drivers/media/pci/saa7164/saa7164-encoder.c
+++ b/drivers/media/pci/saa7164/saa7164-encoder.c
@@ -909,13 +909,13 @@ err:
return ret;
}
-static unsigned int fops_poll(struct file *file, poll_table *wait)
+static __poll_t fops_poll(struct file *file, poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct saa7164_encoder_fh *fh =
(struct saa7164_encoder_fh *)file->private_data;
struct saa7164_port *port = fh->port;
- unsigned int mask = v4l2_ctrl_poll(file, wait);
+ __poll_t mask = v4l2_ctrl_poll(file, wait);
port->last_poll_msecs_diff = port->last_poll_msecs;
port->last_poll_msecs = jiffies_to_msecs(jiffies);
diff --git a/drivers/media/pci/saa7164/saa7164-vbi.c b/drivers/media/pci/saa7164/saa7164-vbi.c
index 9255d7d23947..6f97c8f2e00d 100644
--- a/drivers/media/pci/saa7164/saa7164-vbi.c
+++ b/drivers/media/pci/saa7164/saa7164-vbi.c
@@ -614,11 +614,11 @@ err:
return ret;
}
-static unsigned int fops_poll(struct file *file, poll_table *wait)
+static __poll_t fops_poll(struct file *file, poll_table *wait)
{
struct saa7164_vbi_fh *fh = (struct saa7164_vbi_fh *)file->private_data;
struct saa7164_port *port = fh->port;
- unsigned int mask = 0;
+ __poll_t mask = 0;
port->last_poll_msecs_diff = port->last_poll_msecs;
port->last_poll_msecs = jiffies_to_msecs(jiffies);
diff --git a/drivers/media/pci/ttpci/av7110_av.c b/drivers/media/pci/ttpci/av7110_av.c
index 2aa4ba675194..4d10e2f979d2 100644
--- a/drivers/media/pci/ttpci/av7110_av.c
+++ b/drivers/media/pci/ttpci/av7110_av.c
@@ -937,11 +937,11 @@ static int dvb_video_get_event (struct av7110 *av7110, struct video_event *event
* DVB device file operations
******************************************************************************/
-static unsigned int dvb_video_poll(struct file *file, poll_table *wait)
+static __poll_t dvb_video_poll(struct file *file, poll_table *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
- unsigned int mask = 0;
+ __poll_t mask = 0;
dprintk(2, "av7110:%p, \n", av7110);
@@ -989,11 +989,11 @@ static ssize_t dvb_video_write(struct file *file, const char __user *buf,
return dvb_play(av7110, buf, count, file->f_flags & O_NONBLOCK, 1);
}
-static unsigned int dvb_audio_poll(struct file *file, poll_table *wait)
+static __poll_t dvb_audio_poll(struct file *file, poll_table *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
- unsigned int mask = 0;
+ __poll_t mask = 0;
dprintk(2, "av7110:%p, \n", av7110);
diff --git a/drivers/media/pci/ttpci/av7110_ca.c b/drivers/media/pci/ttpci/av7110_ca.c
index 1fe49171d823..96ca227cf51b 100644
--- a/drivers/media/pci/ttpci/av7110_ca.c
+++ b/drivers/media/pci/ttpci/av7110_ca.c
@@ -223,13 +223,13 @@ static int dvb_ca_open(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int dvb_ca_poll (struct file *file, poll_table *wait)
+static __poll_t dvb_ca_poll (struct file *file, poll_table *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
struct dvb_ringbuffer *rbuf = &av7110->ci_rbuffer;
struct dvb_ringbuffer *wbuf = &av7110->ci_wbuffer;
- unsigned int mask = 0;
+ __poll_t mask = 0;
dprintk(8, "av7110:%p\n",av7110);
diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
index d07840072337..b6a6c4f171d0 100644
--- a/drivers/media/pci/zoran/zoran_driver.c
+++ b/drivers/media/pci/zoran/zoran_driver.c
@@ -2501,13 +2501,13 @@ static int zoran_s_jpegcomp(struct file *file, void *__fh,
return res;
}
-static unsigned int
+static __poll_t
zoran_poll (struct file *file,
poll_table *wait)
{
struct zoran_fh *fh = file->private_data;
struct zoran *zr = fh->zr;
- int res = v4l2_ctrl_poll(file, wait);
+ __poll_t res = v4l2_ctrl_poll(file, wait);
int frame;
unsigned long flags;
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index 7b3f6f8e3dc8..cf65b39807fe 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -730,7 +730,7 @@ static int vpfe_mmap(struct file *file, struct vm_area_struct *vma)
/*
* vpfe_poll: It is used for select/poll system call
*/
-static unsigned int vpfe_poll(struct file *file, poll_table *wait)
+static __poll_t vpfe_poll(struct file *file, poll_table *wait)
{
struct vpfe_device *vpfe_dev = video_drvdata(file);
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index 2a2994ef15d5..b2dc524112f7 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -707,12 +707,12 @@ static int gsc_m2m_release(struct file *file)
return 0;
}
-static unsigned int gsc_m2m_poll(struct file *file,
+static __poll_t gsc_m2m_poll(struct file *file,
struct poll_table_struct *wait)
{
struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
struct gsc_dev *gsc = ctx->gsc_dev;
- unsigned int ret;
+ __poll_t ret;
if (mutex_lock_interruptible(&gsc->lock))
return -ERESTARTSYS;
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index dba21215dc84..de285a269390 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -1263,13 +1263,13 @@ static ssize_t viu_read(struct file *file, char __user *data, size_t count,
return 0;
}
-static unsigned int viu_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t viu_poll(struct file *file, struct poll_table_struct *wait)
{
struct viu_fh *fh = file->private_data;
struct videobuf_queue *q = &fh->vb_vidq;
struct viu_dev *dev = fh->dev;
- unsigned long req_events = poll_requested_events(wait);
- unsigned int res = v4l2_ctrl_poll(file, wait);
+ __poll_t req_events = poll_requested_events(wait);
+ __poll_t res = v4l2_ctrl_poll(file, wait);
if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type)
return POLLERR;
diff --git a/drivers/media/platform/m2m-deinterlace.c b/drivers/media/platform/m2m-deinterlace.c
index c8a12493f395..945ef1e2ccc7 100644
--- a/drivers/media/platform/m2m-deinterlace.c
+++ b/drivers/media/platform/m2m-deinterlace.c
@@ -950,11 +950,11 @@ static int deinterlace_release(struct file *file)
return 0;
}
-static unsigned int deinterlace_poll(struct file *file,
+static __poll_t deinterlace_poll(struct file *file,
struct poll_table_struct *wait)
{
struct deinterlace_ctx *ctx = file->private_data;
- int ret;
+ __poll_t ret;
deinterlace_lock(ctx);
ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index 4a2b1afa19c4..5a8eff60e95f 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -838,12 +838,12 @@ static int emmaprp_release(struct file *file)
return 0;
}
-static unsigned int emmaprp_poll(struct file *file,
+static __poll_t emmaprp_poll(struct file *file,
struct poll_table_struct *wait)
{
struct emmaprp_dev *pcdev = video_drvdata(file);
struct emmaprp_ctx *ctx = file->private_data;
- unsigned int res;
+ __poll_t res;
mutex_lock(&pcdev->dev_mutex);
res = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 6f1b0c799e58..abb14ee20538 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -839,7 +839,7 @@ static void omap_vout_buffer_release(struct videobuf_queue *q,
/*
* File operations
*/
-static unsigned int omap_vout_poll(struct file *file,
+static __poll_t omap_vout_poll(struct file *file,
struct poll_table_struct *wait)
{
struct omap_vout_device *vout = file->private_data;
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index 218e6d7ae93a..a751c89a3ea8 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -1383,11 +1383,11 @@ static int isp_video_release(struct file *file)
return 0;
}
-static unsigned int isp_video_poll(struct file *file, poll_table *wait)
+static __poll_t isp_video_poll(struct file *file, poll_table *wait)
{
struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
struct isp_video *video = video_drvdata(file);
- int ret;
+ __poll_t ret;
mutex_lock(&video->queue_lock);
ret = vb2_poll(&vfh->queue, file, wait);
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index 25c7a7d42292..437395a61065 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -590,12 +590,12 @@ static int s3c_camif_close(struct file *file)
return ret;
}
-static unsigned int s3c_camif_poll(struct file *file,
+static __poll_t s3c_camif_poll(struct file *file,
struct poll_table_struct *wait)
{
struct camif_vp *vp = video_drvdata(file);
struct camif_dev *camif = vp->camif;
- int ret;
+ __poll_t ret;
mutex_lock(&camif->lock);
if (vp->owner && vp->owner != file->private_data)
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index bc68dbbcaec1..fe94bd6b705e 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -988,14 +988,14 @@ static int s5p_mfc_release(struct file *file)
}
/* Poll */
-static unsigned int s5p_mfc_poll(struct file *file,
+static __poll_t s5p_mfc_poll(struct file *file,
struct poll_table_struct *wait)
{
struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
struct s5p_mfc_dev *dev = ctx->dev;
struct vb2_queue *src_q, *dst_q;
struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
- unsigned int rc = 0;
+ __poll_t rc = 0;
unsigned long flags;
mutex_lock(&dev->mfc_mutex);
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
index dedc1b024f6f..976ea0bb5b6c 100644
--- a/drivers/media/platform/sh_veu.c
+++ b/drivers/media/platform/sh_veu.c
@@ -1016,7 +1016,7 @@ static int sh_veu_release(struct file *file)
return 0;
}
-static unsigned int sh_veu_poll(struct file *file,
+static __poll_t sh_veu_poll(struct file *file,
struct poll_table_struct *wait)
{
struct sh_veu_file *veu_file = file->private_data;
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index 36762ec954e7..9b069783e3ed 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -1553,7 +1553,7 @@ static int sh_mobile_ceu_set_liveselection(struct soc_camera_device *icd,
return ret;
}
-static unsigned int sh_mobile_ceu_poll(struct file *file, poll_table *pt)
+static __poll_t sh_mobile_ceu_poll(struct file *file, poll_table *pt)
{
struct soc_camera_device *icd = file->private_data;
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index 916ff68b73d4..d964c072832c 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -805,11 +805,11 @@ static int soc_camera_mmap(struct file *file, struct vm_area_struct *vma)
return err;
}
-static unsigned int soc_camera_poll(struct file *file, poll_table *pt)
+static __poll_t soc_camera_poll(struct file *file, poll_table *pt)
{
struct soc_camera_device *icd = file->private_data;
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- unsigned res = POLLERR;
+ __poll_t res = POLLERR;
if (icd->streamer != file)
return POLLERR;
diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via-camera.c
index 805d4a8fc17e..f77be9302120 100644
--- a/drivers/media/platform/via-camera.c
+++ b/drivers/media/platform/via-camera.c
@@ -764,7 +764,7 @@ out_unlock:
}
-static unsigned int viacam_poll(struct file *filp, struct poll_table_struct *pt)
+static __poll_t viacam_poll(struct file *filp, struct poll_table_struct *pt)
{
struct via_camera *cam = video_drvdata(filp);
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index 5f316a5e38db..c80239592088 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -416,7 +416,7 @@ static ssize_t vivid_radio_write(struct file *file, const char __user *buf,
return vivid_radio_tx_write(file, buf, size, offset);
}
-static unsigned int vivid_radio_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t vivid_radio_poll(struct file *file, struct poll_table_struct *wait)
{
struct video_device *vdev = video_devdata(file);
diff --git a/drivers/media/platform/vivid/vivid-radio-rx.c b/drivers/media/platform/vivid/vivid-radio-rx.c
index 47c36c26096b..71f3ebb7aecf 100644
--- a/drivers/media/platform/vivid/vivid-radio-rx.c
+++ b/drivers/media/platform/vivid/vivid-radio-rx.c
@@ -141,7 +141,7 @@ retry:
return i;
}
-unsigned int vivid_radio_rx_poll(struct file *file, struct poll_table_struct *wait)
+__poll_t vivid_radio_rx_poll(struct file *file, struct poll_table_struct *wait)
{
return POLLIN | POLLRDNORM | v4l2_ctrl_poll(file, wait);
}
diff --git a/drivers/media/platform/vivid/vivid-radio-rx.h b/drivers/media/platform/vivid/vivid-radio-rx.h
index 1077d8f061eb..2b33edb60942 100644
--- a/drivers/media/platform/vivid/vivid-radio-rx.h
+++ b/drivers/media/platform/vivid/vivid-radio-rx.h
@@ -21,7 +21,7 @@
#define _VIVID_RADIO_RX_H_
ssize_t vivid_radio_rx_read(struct file *, char __user *, size_t, loff_t *);
-unsigned int vivid_radio_rx_poll(struct file *file, struct poll_table_struct *wait);
+__poll_t vivid_radio_rx_poll(struct file *file, struct poll_table_struct *wait);
int vivid_radio_rx_enum_freq_bands(struct file *file, void *fh, struct v4l2_frequency_band *band);
int vivid_radio_rx_s_hw_freq_seek(struct file *file, void *fh, const struct v4l2_hw_freq_seek *a);
diff --git a/drivers/media/platform/vivid/vivid-radio-tx.c b/drivers/media/platform/vivid/vivid-radio-tx.c
index 0e8025b7b4dd..f0917f4e7d8c 100644
--- a/drivers/media/platform/vivid/vivid-radio-tx.c
+++ b/drivers/media/platform/vivid/vivid-radio-tx.c
@@ -105,7 +105,7 @@ retry:
return i;
}
-unsigned int vivid_radio_tx_poll(struct file *file, struct poll_table_struct *wait)
+__poll_t vivid_radio_tx_poll(struct file *file, struct poll_table_struct *wait)
{
return POLLOUT | POLLWRNORM | v4l2_ctrl_poll(file, wait);
}
diff --git a/drivers/media/platform/vivid/vivid-radio-tx.h b/drivers/media/platform/vivid/vivid-radio-tx.h
index 7f8ff7547119..3c3343d70cbc 100644
--- a/drivers/media/platform/vivid/vivid-radio-tx.h
+++ b/drivers/media/platform/vivid/vivid-radio-tx.h
@@ -21,7 +21,7 @@
#define _VIVID_RADIO_TX_H_
ssize_t vivid_radio_tx_write(struct file *, const char __user *, size_t, loff_t *);
-unsigned int vivid_radio_tx_poll(struct file *file, struct poll_table_struct *wait);
+__poll_t vivid_radio_tx_poll(struct file *file, struct poll_table_struct *wait);
int vidioc_g_modulator(struct file *file, void *fh, struct v4l2_modulator *a);
int vidioc_s_modulator(struct file *file, void *fh, const struct v4l2_modulator *a);
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
index 7575e5370a49..dba611003a00 100644
--- a/drivers/media/radio/radio-cadet.c
+++ b/drivers/media/radio/radio-cadet.c
@@ -481,11 +481,11 @@ static int cadet_release(struct file *file)
return 0;
}
-static unsigned int cadet_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t cadet_poll(struct file *file, struct poll_table_struct *wait)
{
struct cadet *dev = video_drvdata(file);
- unsigned long req_events = poll_requested_events(wait);
- unsigned int res = v4l2_ctrl_poll(file, wait);
+ __poll_t req_events = poll_requested_events(wait);
+ __poll_t res = v4l2_ctrl_poll(file, wait);
poll_wait(file, &dev->read_queue, wait);
if (dev->rdsstat == 0 && (req_events & (POLLIN | POLLRDNORM))) {
diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
index 540ac887a63c..49293dd707b9 100644
--- a/drivers/media/radio/radio-si476x.c
+++ b/drivers/media/radio/radio-si476x.c
@@ -1153,12 +1153,12 @@ static ssize_t si476x_radio_fops_read(struct file *file, char __user *buf,
return rval;
}
-static unsigned int si476x_radio_fops_poll(struct file *file,
+static __poll_t si476x_radio_fops_poll(struct file *file,
struct poll_table_struct *pts)
{
struct si476x_radio *radio = video_drvdata(file);
- unsigned long req_events = poll_requested_events(pts);
- unsigned int err = v4l2_ctrl_poll(file, pts);
+ __poll_t req_events = poll_requested_events(pts);
+ __poll_t err = v4l2_ctrl_poll(file, pts);
if (req_events & (POLLIN | POLLRDNORM)) {
if (atomic_read(&radio->core->is_alive))
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 3cbdc085c65d..f92b0f9241a9 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1089,7 +1089,7 @@ out:
return r;
}
-static unsigned int wl1273_fm_fops_poll(struct file *file,
+static __poll_t wl1273_fm_fops_poll(struct file *file,
struct poll_table_struct *pts)
{
struct wl1273_device *radio = video_get_drvdata(video_devdata(file));
diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c
index c89a7d5b8c55..68fe9e5c7a70 100644
--- a/drivers/media/radio/si470x/radio-si470x-common.c
+++ b/drivers/media/radio/si470x/radio-si470x-common.c
@@ -507,12 +507,12 @@ done:
/*
* si470x_fops_poll - poll RDS data
*/
-static unsigned int si470x_fops_poll(struct file *file,
+static __poll_t si470x_fops_poll(struct file *file,
struct poll_table_struct *pts)
{
struct si470x_device *radio = video_drvdata(file);
- unsigned long req_events = poll_requested_events(pts);
- int retval = v4l2_ctrl_poll(file, pts);
+ __poll_t req_events = poll_requested_events(pts);
+ __poll_t retval = v4l2_ctrl_poll(file, pts);
if (req_events & (POLLIN | POLLRDNORM)) {
/* switch on rds reception */
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
index fc5a7abc83d2..fd603c1b96bb 100644
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
+++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c
@@ -102,7 +102,7 @@ static ssize_t fm_v4l2_fops_write(struct file *file, const char __user * buf,
return sizeof(rds);
}
-static u32 fm_v4l2_fops_poll(struct file *file, struct poll_table_struct *pts)
+static __poll_t fm_v4l2_fops_poll(struct file *file, struct poll_table_struct *pts)
{
int ret;
struct fmdev *fmdev;
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index e16d1138ca48..aab53641838b 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -272,10 +272,10 @@ int lirc_dev_fop_close(struct inode *inode, struct file *file)
}
EXPORT_SYMBOL(lirc_dev_fop_close);
-unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait)
+__poll_t lirc_dev_fop_poll(struct file *file, poll_table *wait)
{
struct lirc_dev *d = file->private_data;
- unsigned int ret;
+ __poll_t ret;
if (!d->attached)
return POLLHUP | POLLERR;
diff --git a/drivers/media/usb/cpia2/cpia2.h b/drivers/media/usb/cpia2/cpia2.h
index 81f72c0b561f..ab238ac8bfc0 100644
--- a/drivers/media/usb/cpia2/cpia2.h
+++ b/drivers/media/usb/cpia2/cpia2.h
@@ -444,7 +444,7 @@ int cpia2_allocate_buffers(struct camera_data *cam);
void cpia2_free_buffers(struct camera_data *cam);
long cpia2_read(struct camera_data *cam,
char __user *buf, unsigned long count, int noblock);
-unsigned int cpia2_poll(struct camera_data *cam,
+__poll_t cpia2_poll(struct camera_data *cam,
struct file *filp, poll_table *wait);
int cpia2_remap_buffer(struct camera_data *cam, struct vm_area_struct *vma);
void cpia2_set_property_flip(struct camera_data *cam, int prop_val);
diff --git a/drivers/media/usb/cpia2/cpia2_core.c b/drivers/media/usb/cpia2/cpia2_core.c
index 0efba0da0a45..e7524920c618 100644
--- a/drivers/media/usb/cpia2/cpia2_core.c
+++ b/drivers/media/usb/cpia2/cpia2_core.c
@@ -2370,10 +2370,10 @@ long cpia2_read(struct camera_data *cam,
* cpia2_poll
*
*****************************************************************************/
-unsigned int cpia2_poll(struct camera_data *cam, struct file *filp,
+__poll_t cpia2_poll(struct camera_data *cam, struct file *filp,
poll_table *wait)
{
- unsigned int status = v4l2_ctrl_poll(filp, wait);
+ __poll_t status = v4l2_ctrl_poll(filp, wait);
if ((poll_requested_events(wait) & (POLLIN | POLLRDNORM)) &&
!cam->streaming) {
diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c
index 3dedd83f0b19..74c97565ccc6 100644
--- a/drivers/media/usb/cpia2/cpia2_v4l.c
+++ b/drivers/media/usb/cpia2/cpia2_v4l.c
@@ -169,10 +169,10 @@ static ssize_t cpia2_v4l_read(struct file *file, char __user *buf, size_t count,
* cpia2_v4l_poll
*
*****************************************************************************/
-static unsigned int cpia2_v4l_poll(struct file *filp, struct poll_table_struct *wait)
+static __poll_t cpia2_v4l_poll(struct file *filp, struct poll_table_struct *wait)
{
struct camera_data *cam = video_drvdata(filp);
- unsigned int res;
+ __poll_t res;
mutex_lock(&cam->v4l2_lock);
res = cpia2_poll(cam, filp, wait);
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index d538fa407742..103e3299b77f 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -1812,13 +1812,13 @@ static ssize_t mpeg_read(struct file *file, char __user *data,
file->f_flags & O_NONBLOCK);
}
-static unsigned int mpeg_poll(struct file *file,
+static __poll_t mpeg_poll(struct file *file,
struct poll_table_struct *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct cx231xx_fh *fh = file->private_data;
struct cx231xx *dev = fh->dev;
- unsigned int res = 0;
+ __poll_t res = 0;
if (v4l2_event_pending(&fh->fh))
res |= POLLPRI;
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index 226059fc672b..d7b2e694bbb9 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -2006,12 +2006,12 @@ cx231xx_v4l2_read(struct file *filp, char __user *buf, size_t count,
* cx231xx_v4l2_poll()
* will allocate buffers when called for the first time
*/
-static unsigned int cx231xx_v4l2_poll(struct file *filp, poll_table *wait)
+static __poll_t cx231xx_v4l2_poll(struct file *filp, poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct cx231xx_fh *fh = filp->private_data;
struct cx231xx *dev = fh->dev;
- unsigned res = 0;
+ __poll_t res = 0;
int rc;
rc = check_dev(dev);
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index 961343873fd0..b72d02e225fd 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -1862,11 +1862,11 @@ out:
return ret;
}
-static unsigned int dev_poll(struct file *file, poll_table *wait)
+static __poll_t dev_poll(struct file *file, poll_table *wait)
{
struct gspca_dev *gspca_dev = video_drvdata(file);
- unsigned long req_events = poll_requested_events(wait);
- int ret = 0;
+ __poll_t req_events = poll_requested_events(wait);
+ __poll_t ret = 0;
PDEBUG(D_FRAM, "poll");
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index 7fb036d6a86e..d0d638c2e900 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -521,12 +521,12 @@ err:
return ret;
}
-static unsigned int hdpvr_poll(struct file *filp, poll_table *wait)
+static __poll_t hdpvr_poll(struct file *filp, poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct hdpvr_buffer *buf = NULL;
struct hdpvr_device *dev = video_drvdata(filp);
- unsigned int mask = v4l2_ctrl_poll(filp, wait);
+ __poll_t mask = v4l2_ctrl_poll(filp, wait);
if (!(req_events & (POLLIN | POLLRDNORM)))
return mask;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index 4320bda9352d..11cdfe356801 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -1190,9 +1190,9 @@ static ssize_t pvr2_v4l2_read(struct file *file,
}
-static unsigned int pvr2_v4l2_poll(struct file *file, poll_table *wait)
+static __poll_t pvr2_v4l2_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
struct pvr2_v4l2_fh *fh = file->private_data;
int ret;
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index c0bba773db25..cba0916d33e5 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -721,10 +721,10 @@ static ssize_t v4l_stk_read(struct file *fp, char __user *buf,
return ret;
}
-static unsigned int v4l_stk_poll(struct file *fp, poll_table *wait)
+static __poll_t v4l_stk_poll(struct file *fp, poll_table *wait)
{
struct stk_camera *dev = video_drvdata(fp);
- unsigned res = v4l2_ctrl_poll(fp, wait);
+ __poll_t res = v4l2_ctrl_poll(fp, wait);
poll_wait(fp, &dev->wait_frame, wait);
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index 9fa25de6b5a9..317bf5a3933e 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -1423,13 +1423,13 @@ tm6000_read(struct file *file, char __user *data, size_t count, loff_t *pos)
return 0;
}
-static unsigned int
+static __poll_t
__tm6000_poll(struct file *file, struct poll_table_struct *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct tm6000_fh *fh = file->private_data;
struct tm6000_buffer *buf;
- int res = 0;
+ __poll_t res = 0;
if (v4l2_event_pending(&fh->fh))
res = POLLPRI;
@@ -1457,11 +1457,11 @@ __tm6000_poll(struct file *file, struct poll_table_struct *wait)
return res;
}
-static unsigned int tm6000_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t tm6000_poll(struct file *file, struct poll_table_struct *wait)
{
struct tm6000_fh *fh = file->private_data;
struct tm6000_core *dev = fh->dev;
- unsigned int res;
+ __poll_t res;
mutex_lock(&dev->lock);
res = __tm6000_poll(file, wait);
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index c8d78b2f3de4..692c463f14f7 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -340,10 +340,10 @@ unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue,
}
#endif
-unsigned int uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
+__poll_t uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
poll_table *wait)
{
- unsigned int ret;
+ __poll_t ret;
mutex_lock(&queue->mutex);
ret = vb2_poll(&queue->queue, file, wait);
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 3e7e283a44a8..381f614b2f4c 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -1284,36 +1284,30 @@ struct uvc_xu_control_mapping32 {
static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp,
const struct uvc_xu_control_mapping32 __user *up)
{
- compat_caddr_t p;
+ struct uvc_xu_control_mapping32 *p = (void *)kp;
+ compat_caddr_t info;
+ u32 count;
- if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
- __copy_from_user(kp, up, offsetof(typeof(*up), menu_info)) ||
- __get_user(kp->menu_count, &up->menu_count))
+ if (copy_from_user(p, up, sizeof(*p)))
return -EFAULT;
- memset(kp->reserved, 0, sizeof(kp->reserved));
-
- if (kp->menu_count == 0) {
- kp->menu_info = NULL;
- return 0;
- }
-
- if (__get_user(p, &up->menu_info))
- return -EFAULT;
- kp->menu_info = compat_ptr(p);
+ count = p->menu_count;
+ info = p->menu_info;
+ memset(kp->reserved, 0, sizeof(kp->reserved));
+ kp->menu_info = count ? compat_ptr(info) : NULL;
+ kp->menu_count = count;
return 0;
}
static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp,
struct uvc_xu_control_mapping32 __user *up)
{
- if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
- __copy_to_user(up, kp, offsetof(typeof(*up), menu_info)) ||
- __put_user(kp->menu_count, &up->menu_count))
+ if (copy_to_user(up, kp, offsetof(typeof(*up), menu_info)) ||
+ put_user(kp->menu_count, &up->menu_count))
return -EFAULT;
- if (__clear_user(up->reserved, sizeof(up->reserved)))
+ if (clear_user(up->reserved, sizeof(up->reserved)))
return -EFAULT;
return 0;
@@ -1330,31 +1324,26 @@ struct uvc_xu_control_query32 {
static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp,
const struct uvc_xu_control_query32 __user *up)
{
- compat_caddr_t p;
+ struct uvc_xu_control_query32 v;
- if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
- __copy_from_user(kp, up, offsetof(typeof(*up), data)))
+ if (copy_from_user(&v, up, sizeof(v)))
return -EFAULT;
- if (kp->size == 0) {
- kp->data = NULL;
- return 0;
- }
-
- if (__get_user(p, &up->data))
- return -EFAULT;
- kp->data = compat_ptr(p);
-
+ *kp = (struct uvc_xu_control_query){
+ .unit = v.unit,
+ .selector = v.selector,
+ .query = v.query,
+ .size = v.size,
+ .data = v.size ? compat_ptr(v.data) : NULL
+ };
return 0;
}
static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp,
struct uvc_xu_control_query32 __user *up)
{
- if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
- __copy_to_user(up, kp, offsetof(typeof(*up), data)))
+ if (copy_to_user(up, kp, offsetof(typeof(*up), data)))
return -EFAULT;
-
return 0;
}
@@ -1423,7 +1412,7 @@ static int uvc_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
return uvc_queue_mmap(&stream->queue, vma);
}
-static unsigned int uvc_v4l2_poll(struct file *file, poll_table *wait)
+static __poll_t uvc_v4l2_poll(struct file *file, poll_table *wait)
{
struct uvc_fh *handle = file->private_data;
struct uvc_streaming *stream = handle->stream;
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 05398784d1c8..9b44a7cd5ec1 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -678,7 +678,7 @@ extern struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
struct uvc_buffer *buf);
extern int uvc_queue_mmap(struct uvc_video_queue *queue,
struct vm_area_struct *vma);
-extern unsigned int uvc_queue_poll(struct uvc_video_queue *queue,
+extern __poll_t uvc_queue_poll(struct uvc_video_queue *queue,
struct file *file, poll_table *wait);
#ifndef CONFIG_MMU
extern unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue,
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index 1d888661fd03..8b7c19943d46 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -1287,12 +1287,12 @@ static int zr364xx_mmap(struct file *file, struct vm_area_struct *vma)
return ret;
}
-static unsigned int zr364xx_poll(struct file *file,
+static __poll_t zr364xx_poll(struct file *file,
struct poll_table_struct *wait)
{
struct zr364xx_camera *cam = video_drvdata(file);
struct videobuf_queue *q = &cam->vb_vidq;
- unsigned res = v4l2_ctrl_poll(file, wait);
+ __poll_t res = v4l2_ctrl_poll(file, wait);
_DBG("%s\n", __func__);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index cbb2ef43945f..b07657149434 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -3457,7 +3457,7 @@ int v4l2_ctrl_subdev_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
}
EXPORT_SYMBOL(v4l2_ctrl_subdev_subscribe_event);
-unsigned int v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait)
+__poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait)
{
struct v4l2_fh *fh = file->private_data;
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index c647ba648805..8ad8c1627768 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -331,10 +331,10 @@ static ssize_t v4l2_write(struct file *filp, const char __user *buf,
return ret;
}
-static unsigned int v4l2_poll(struct file *filp, struct poll_table_struct *poll)
+static __poll_t v4l2_poll(struct file *filp, struct poll_table_struct *poll)
{
struct video_device *vdev = video_devdata(filp);
- unsigned int res = POLLERR | POLLHUP;
+ __poll_t res = POLLERR | POLLHUP;
if (!vdev->fops->poll)
return DEFAULT_POLLMASK;
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index bc580fbe18fa..186156f8952a 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -500,14 +500,14 @@ int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
}
EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
-unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct poll_table_struct *wait)
{
struct video_device *vfd = video_devdata(file);
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct vb2_queue *src_q, *dst_q;
struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
- unsigned int rc = 0;
+ __poll_t rc = 0;
unsigned long flags;
if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
@@ -794,11 +794,11 @@ int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
}
EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
-unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
+__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
{
struct v4l2_fh *fh = file->private_data;
struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
- unsigned int ret;
+ __poll_t ret;
if (m2m_ctx->q_lock)
mutex_lock(m2m_ctx->q_lock);
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 43fefa73e0a3..28966fa8c610 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -469,7 +469,7 @@ static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
}
#endif
-static unsigned int subdev_poll(struct file *file, poll_table *wait)
+static __poll_t subdev_poll(struct file *file, poll_table *wait)
{
struct video_device *vdev = video_devdata(file);
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
index e87fb13b22dc..9a89d3ae170f 100644
--- a/drivers/media/v4l2-core/videobuf-core.c
+++ b/drivers/media/v4l2-core/videobuf-core.c
@@ -1118,13 +1118,13 @@ done:
}
EXPORT_SYMBOL_GPL(videobuf_read_stream);
-unsigned int videobuf_poll_stream(struct file *file,
+__poll_t videobuf_poll_stream(struct file *file,
struct videobuf_queue *q,
poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct videobuf_buffer *buf = NULL;
- unsigned int rc = 0;
+ __poll_t rc = 0;
videobuf_queue_lock(q);
if (q->streaming) {
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index a8589d96ef72..0d9f772d6d03 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -2018,10 +2018,10 @@ void vb2_core_queue_release(struct vb2_queue *q)
}
EXPORT_SYMBOL_GPL(vb2_core_queue_release);
-unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
+__poll_t vb2_core_poll(struct vb2_queue *q, struct file *file,
poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct vb2_buffer *vb = NULL;
unsigned long flags;
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index 4075314a6989..a49f7eb98c2e 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -671,11 +671,11 @@ void vb2_queue_release(struct vb2_queue *q)
}
EXPORT_SYMBOL_GPL(vb2_queue_release);
-unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
+__poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
{
struct video_device *vfd = video_devdata(file);
- unsigned long req_events = poll_requested_events(wait);
- unsigned int res = 0;
+ __poll_t req_events = poll_requested_events(wait);
+ __poll_t res = 0;
if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
struct v4l2_fh *fh = file->private_data;
@@ -904,12 +904,12 @@ exit:
}
EXPORT_SYMBOL_GPL(vb2_fop_read);
-unsigned int vb2_fop_poll(struct file *file, poll_table *wait)
+__poll_t vb2_fop_poll(struct file *file, poll_table *wait)
{
struct video_device *vdev = video_devdata(file);
struct vb2_queue *q = vdev->queue;
struct mutex *lock = q->lock ? q->lock : vdev->lock;
- unsigned res;
+ __poll_t res;
void *fileio;
/*
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index a385a35c7de9..90a66b3f7ae1 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -32,7 +32,6 @@
#include <linux/pm_runtime.h>
#include <linux/platform_data/mtd-nand-omap2.h>
-#include <linux/platform_data/mtd-onenand-omap2.h>
#include <asm/mach-types.h>
@@ -1138,6 +1137,112 @@ struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *reg, int cs)
}
EXPORT_SYMBOL_GPL(gpmc_omap_get_nand_ops);
+static void gpmc_omap_onenand_calc_sync_timings(struct gpmc_timings *t,
+ struct gpmc_settings *s,
+ int freq, int latency)
+{
+ struct gpmc_device_timings dev_t;
+ const int t_cer = 15;
+ const int t_avdp = 12;
+ const int t_cez = 20; /* max of t_cez, t_oez */
+ const int t_wpl = 40;
+ const int t_wph = 30;
+ int min_gpmc_clk_period, t_ces, t_avds, t_avdh, t_ach, t_aavdh, t_rdyo;
+
+ switch (freq) {
+ case 104:
+ min_gpmc_clk_period = 9600; /* 104 MHz */
+ t_ces = 3;
+ t_avds = 4;
+ t_avdh = 2;
+ t_ach = 3;
+ t_aavdh = 6;
+ t_rdyo = 6;
+ break;
+ case 83:
+ min_gpmc_clk_period = 12000; /* 83 MHz */
+ t_ces = 5;
+ t_avds = 4;
+ t_avdh = 2;
+ t_ach = 6;
+ t_aavdh = 6;
+ t_rdyo = 9;
+ break;
+ case 66:
+ min_gpmc_clk_period = 15000; /* 66 MHz */
+ t_ces = 6;
+ t_avds = 5;
+ t_avdh = 2;
+ t_ach = 6;
+ t_aavdh = 6;
+ t_rdyo = 11;
+ break;
+ default:
+ min_gpmc_clk_period = 18500; /* 54 MHz */
+ t_ces = 7;
+ t_avds = 7;
+ t_avdh = 7;
+ t_ach = 9;
+ t_aavdh = 7;
+ t_rdyo = 15;
+ break;
+ }
+
+ /* Set synchronous read timings */
+ memset(&dev_t, 0, sizeof(dev_t));
+
+ if (!s->sync_write) {
+ dev_t.t_avdp_w = max(t_avdp, t_cer) * 1000;
+ dev_t.t_wpl = t_wpl * 1000;
+ dev_t.t_wph = t_wph * 1000;
+ dev_t.t_aavdh = t_aavdh * 1000;
+ }
+ dev_t.ce_xdelay = true;
+ dev_t.avd_xdelay = true;
+ dev_t.oe_xdelay = true;
+ dev_t.we_xdelay = true;
+ dev_t.clk = min_gpmc_clk_period;
+ dev_t.t_bacc = dev_t.clk;
+ dev_t.t_ces = t_ces * 1000;
+ dev_t.t_avds = t_avds * 1000;
+ dev_t.t_avdh = t_avdh * 1000;
+ dev_t.t_ach = t_ach * 1000;
+ dev_t.cyc_iaa = (latency + 1);
+ dev_t.t_cez_r = t_cez * 1000;
+ dev_t.t_cez_w = dev_t.t_cez_r;
+ dev_t.cyc_aavdh_oe = 1;
+ dev_t.t_rdyo = t_rdyo * 1000 + min_gpmc_clk_period;
+
+ gpmc_calc_timings(t, s, &dev_t);
+}
+
+int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq,
+ int latency,
+ struct gpmc_onenand_info *info)
+{
+ int ret;
+ struct gpmc_timings gpmc_t;
+ struct gpmc_settings gpmc_s;
+
+ gpmc_read_settings_dt(dev->of_node, &gpmc_s);
+
+ info->sync_read = gpmc_s.sync_read;
+ info->sync_write = gpmc_s.sync_write;
+ info->burst_len = gpmc_s.burst_len;
+
+ if (!gpmc_s.sync_read && !gpmc_s.sync_write)
+ return 0;
+
+ gpmc_omap_onenand_calc_sync_timings(&gpmc_t, &gpmc_s, freq, latency);
+
+ ret = gpmc_cs_program_settings(cs, &gpmc_s);
+ if (ret < 0)
+ return ret;
+
+ return gpmc_cs_set_timings(cs, &gpmc_t, &gpmc_s);
+}
+EXPORT_SYMBOL_GPL(gpmc_omap_onenand_set_timings);
+
int gpmc_get_client_irq(unsigned irq_config)
{
if (!gpmc_irq_domain) {
@@ -1916,41 +2021,6 @@ static void __maybe_unused gpmc_read_timings_dt(struct device_node *np,
of_property_read_bool(np, "gpmc,time-para-granularity");
}
-#if IS_ENABLED(CONFIG_MTD_ONENAND)
-static int gpmc_probe_onenand_child(struct platform_device *pdev,
- struct device_node *child)
-{
- u32 val;
- struct omap_onenand_platform_data *gpmc_onenand_data;
-
- if (of_property_read_u32(child, "reg", &val) < 0) {
- dev_err(&pdev->dev, "%pOF has no 'reg' property\n",
- child);
- return -ENODEV;
- }
-
- gpmc_onenand_data = devm_kzalloc(&pdev->dev, sizeof(*gpmc_onenand_data),
- GFP_KERNEL);
- if (!gpmc_onenand_data)
- return -ENOMEM;
-
- gpmc_onenand_data->cs = val;
- gpmc_onenand_data->of_node = child;
- gpmc_onenand_data->dma_channel = -1;
-
- if (!of_property_read_u32(child, "dma-channel", &val))
- gpmc_onenand_data->dma_channel = val;
-
- return gpmc_onenand_init(gpmc_onenand_data);
-}
-#else
-static int gpmc_probe_onenand_child(struct platform_device *pdev,
- struct device_node *child)
-{
- return 0;
-}
-#endif
-
/**
* gpmc_probe_generic_child - configures the gpmc for a child device
* @pdev: pointer to gpmc platform device
@@ -2053,6 +2123,16 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
}
}
+ if (of_node_cmp(child->name, "onenand") == 0) {
+ /* Warn about older DT blobs with no compatible property */
+ if (!of_property_read_bool(child, "compatible")) {
+ dev_warn(&pdev->dev,
+ "Incompatible OneNAND node: missing compatible");
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
if (of_device_is_compatible(child, "ti,omap2-nand")) {
/* NAND specific setup */
val = 8;
@@ -2077,8 +2157,9 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
} else {
ret = of_property_read_u32(child, "bank-width",
&gpmc_s.device_width);
- if (ret < 0) {
- dev_err(&pdev->dev, "%pOF has no 'bank-width' property\n",
+ if (ret < 0 && !gpmc_s.device_width) {
+ dev_err(&pdev->dev,
+ "%pOF has no 'gpmc,device-width' property\n",
child);
goto err;
}
@@ -2188,11 +2269,7 @@ static void gpmc_probe_dt_children(struct platform_device *pdev)
if (!child->name)
continue;
- if (of_node_cmp(child->name, "onenand") == 0)
- ret = gpmc_probe_onenand_child(pdev, child);
- else
- ret = gpmc_probe_generic_child(pdev, child);
-
+ ret = gpmc_probe_generic_child(pdev, child);
if (ret) {
dev_err(&pdev->dev, "failed to probe DT child '%s': %d\n",
child->name, ret);
diff --git a/drivers/memstick/host/Kconfig b/drivers/memstick/host/Kconfig
index 7310e32b5991..aa2b0786bbe9 100644
--- a/drivers/memstick/host/Kconfig
+++ b/drivers/memstick/host/Kconfig
@@ -45,7 +45,7 @@ config MEMSTICK_R592
config MEMSTICK_REALTEK_PCI
tristate "Realtek PCI-E Memstick Card Interface Driver"
- depends on MFD_RTSX_PCI
+ depends on MISC_RTSX_PCI
help
Say Y here to include driver code to support Memstick card interface
of Realtek PCI-E card reader
@@ -55,7 +55,7 @@ config MEMSTICK_REALTEK_PCI
config MEMSTICK_REALTEK_USB
tristate "Realtek USB Memstick Card Interface Driver"
- depends on MFD_RTSX_USB
+ depends on MISC_RTSX_USB
help
Say Y here to include driver code to support Memstick card interface
of Realtek RTS5129/39 series USB card reader
diff --git a/drivers/memstick/host/rtsx_pci_ms.c b/drivers/memstick/host/rtsx_pci_ms.c
index 818fa94354ae..a44b4578ba4d 100644
--- a/drivers/memstick/host/rtsx_pci_ms.c
+++ b/drivers/memstick/host/rtsx_pci_ms.c
@@ -24,7 +24,7 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/memstick.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include <asm/unaligned.h>
struct realtek_pci_ms {
diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
index 2e3cf012ef48..4f64563df7de 100644
--- a/drivers/memstick/host/rtsx_usb_ms.c
+++ b/drivers/memstick/host/rtsx_usb_ms.c
@@ -25,7 +25,7 @@
#include <linux/workqueue.h>
#include <linux/memstick.h>
#include <linux/kthread.h>
-#include <linux/mfd/rtsx_usb.h>
+#include <linux/rtsx_usb.h>
#include <linux/pm_runtime.h>
#include <linux/mutex.h>
#include <linux/sched.h>
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 7a93400eea2a..51eb1b027963 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -958,7 +958,7 @@ mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
{
u32 mf_dma_addr;
int req_offset;
- u16 req_idx; /* Request index */
+ u16 req_idx; /* Request index */
/* ensure values are reset properly! */
mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx; /* byte */
@@ -994,7 +994,7 @@ mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
{
u32 mf_dma_addr;
int req_offset;
- u16 req_idx; /* Request index */
+ u16 req_idx; /* Request index */
/* ensure values are reset properly! */
mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx;
@@ -1128,11 +1128,12 @@ mpt_add_sge_64bit_1078(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
static void
mpt_add_chain(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
{
- SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
- pChain->Length = cpu_to_le16(length);
- pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
- pChain->NextChainOffset = next;
- pChain->Address = cpu_to_le32(dma_addr);
+ SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
+
+ pChain->Length = cpu_to_le16(length);
+ pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
+ pChain->NextChainOffset = next;
+ pChain->Address = cpu_to_le32(dma_addr);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1147,18 +1148,18 @@ mpt_add_chain(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
static void
mpt_add_chain_64bit(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
{
- SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
- u32 tmp = dma_addr & 0xFFFFFFFF;
+ SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
+ u32 tmp = dma_addr & 0xFFFFFFFF;
- pChain->Length = cpu_to_le16(length);
- pChain->Flags = (MPI_SGE_FLAGS_CHAIN_ELEMENT |
- MPI_SGE_FLAGS_64_BIT_ADDRESSING);
+ pChain->Length = cpu_to_le16(length);
+ pChain->Flags = (MPI_SGE_FLAGS_CHAIN_ELEMENT |
+ MPI_SGE_FLAGS_64_BIT_ADDRESSING);
- pChain->NextChainOffset = next;
+ pChain->NextChainOffset = next;
- pChain->Address.Low = cpu_to_le32(tmp);
- tmp = (u32)(upper_32_bits(dma_addr));
- pChain->Address.High = cpu_to_le32(tmp);
+ pChain->Address.Low = cpu_to_le32(tmp);
+ tmp = (u32)(upper_32_bits(dma_addr));
+ pChain->Address.High = cpu_to_le32(tmp);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1360,7 +1361,7 @@ mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
ioc->add_sge(psge, flags_length, ioc->HostPageBuffer_dma);
ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE;
-return 0;
+ return 0;
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -2152,7 +2153,7 @@ mpt_suspend(struct pci_dev *pdev, pm_message_t state)
device_state);
/* put ioc into READY_STATE */
- if(SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, CAN_SLEEP)) {
+ if (SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, CAN_SLEEP)) {
printk(MYIOC_s_ERR_FMT
"pci-suspend: IOC msg unit reset failed!\n", ioc->name);
}
@@ -6348,7 +6349,7 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
u8 page_type = 0, extend_page;
unsigned long timeleft;
unsigned long flags;
- int in_isr;
+ int in_isr;
u8 issue_hard_reset = 0;
u8 retry_count = 0;
@@ -7697,7 +7698,7 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
break;
}
if (ds)
- strncpy(evStr, ds, EVENT_DESCR_STR_SZ);
+ strlcpy(evStr, ds, EVENT_DESCR_STR_SZ);
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
@@ -8092,15 +8093,15 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info)
static void
mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info, u8 cb_idx)
{
-union loginfo_type {
- u32 loginfo;
- struct {
- u32 subcode:16;
- u32 code:8;
- u32 originator:4;
- u32 bus_type:4;
- }dw;
-};
+ union loginfo_type {
+ u32 loginfo;
+ struct {
+ u32 subcode:16;
+ u32 code:8;
+ u32 originator:4;
+ u32 bus_type:4;
+ } dw;
+ };
union loginfo_type sas_loginfo;
char *originator_desc = NULL;
char *code_desc = NULL;
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 7b3b41368931..8d12017b9893 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -2481,24 +2481,13 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
else
karg.host_no = -1;
- /* Reformat the fw_version into a string
- */
- karg.fw_version[0] = ioc->facts.FWVersion.Struct.Major >= 10 ?
- ((ioc->facts.FWVersion.Struct.Major / 10) + '0') : '0';
- karg.fw_version[1] = (ioc->facts.FWVersion.Struct.Major % 10 ) + '0';
- karg.fw_version[2] = '.';
- karg.fw_version[3] = ioc->facts.FWVersion.Struct.Minor >= 10 ?
- ((ioc->facts.FWVersion.Struct.Minor / 10) + '0') : '0';
- karg.fw_version[4] = (ioc->facts.FWVersion.Struct.Minor % 10 ) + '0';
- karg.fw_version[5] = '.';
- karg.fw_version[6] = ioc->facts.FWVersion.Struct.Unit >= 10 ?
- ((ioc->facts.FWVersion.Struct.Unit / 10) + '0') : '0';
- karg.fw_version[7] = (ioc->facts.FWVersion.Struct.Unit % 10 ) + '0';
- karg.fw_version[8] = '.';
- karg.fw_version[9] = ioc->facts.FWVersion.Struct.Dev >= 10 ?
- ((ioc->facts.FWVersion.Struct.Dev / 10) + '0') : '0';
- karg.fw_version[10] = (ioc->facts.FWVersion.Struct.Dev % 10 ) + '0';
- karg.fw_version[11] = '\0';
+ /* Reformat the fw_version into a string */
+ snprintf(karg.fw_version, sizeof(karg.fw_version),
+ "%.2hhu.%.2hhu.%.2hhu.%.2hhu",
+ ioc->facts.FWVersion.Struct.Major,
+ ioc->facts.FWVersion.Struct.Minor,
+ ioc->facts.FWVersion.Struct.Unit,
+ ioc->facts.FWVersion.Struct.Dev);
/* Issue a config request to get the device serial number
*/
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 345f6035599e..439ee9c5f535 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1165,7 +1165,6 @@ mptsas_schedule_target_reset(void *iocp)
* issue target reset to next device in the queue
*/
- head = &hd->target_reset_list;
if (list_empty(head))
return;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 1d20a800e967..b860eb5aa194 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -222,6 +222,16 @@ config MFD_CROS_EC_SPI
response time cannot be guaranteed, we support ignoring
'pre-amble' bytes before the response actually starts.
+config MFD_CROS_EC_CHARDEV
+ tristate "Chrome OS Embedded Controller userspace device interface"
+ depends on MFD_CROS_EC
+ select CROS_EC_CTL
+ ---help---
+ This driver adds support to talk with the ChromeOS EC from userspace.
+
+ If you have a supported Chromebook, choose Y or M here.
+ The module will be called cros_ec_dev.
+
config MFD_ASIC3
bool "Compaq ASIC3"
depends on GPIOLIB && ARM
@@ -877,7 +887,7 @@ config UCB1400_CORE
config MFD_PM8XXX
tristate "Qualcomm PM8xxx PMIC chips driver"
- depends on (ARM || HEXAGON)
+ depends on (ARM || HEXAGON || COMPILE_TEST)
select IRQ_DOMAIN
select MFD_CORE
select REGMAP
@@ -929,17 +939,6 @@ config MFD_RDC321X
southbridge which provides access to GPIOs and Watchdog using the
southbridge PCI device configuration space.
-config MFD_RTSX_PCI
- tristate "Realtek PCI-E card reader"
- depends on PCI
- select MFD_CORE
- help
- This supports for Realtek PCI-Express card reader including rts5209,
- rts5227, rts522A, rts5229, rts5249, rts524A, rts525A, rtl8411, etc.
- Realtek card reader supports access to many types of memory cards,
- such as Memory Stick, Memory Stick Pro, Secure Digital and
- MultiMediaCard.
-
config MFD_RT5033
tristate "Richtek RT5033 Power Management IC"
depends on I2C
@@ -953,16 +952,6 @@ config MFD_RT5033
sub-devices like charger, fuel gauge, flash LED, current source,
LDO and Buck.
-config MFD_RTSX_USB
- tristate "Realtek USB card reader"
- depends on USB
- select MFD_CORE
- help
- Select this option to get support for Realtek USB 2.0 card readers
- including RTS5129, RTS5139, RTS5179 and RTS5170.
- Realtek card reader supports access to many types of memory cards,
- such as Memory Stick Pro, Secure Digital and MultiMediaCard.
-
config MFD_RC5T583
bool "Ricoh RC5T583 Power Management system device"
depends on I2C=y
@@ -1859,5 +1848,13 @@ config MFD_VEXPRESS_SYSREG
System Registers are the platform configuration block
on the ARM Ltd. Versatile Express board.
+config RAVE_SP_CORE
+ tristate "RAVE SP MCU core driver"
+ depends on SERIAL_DEV_BUS
+ select CRC_CCITT
+ help
+ Select this to get support for the Supervisory Processor
+ device found on several devices in RAVE line of hardware.
+
endmenu
endif
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index d9474ade32e6..d9d2cf0d32ef 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -17,12 +17,9 @@ cros_ec_core-$(CONFIG_ACPI) += cros_ec_acpi_gpe.o
obj-$(CONFIG_MFD_CROS_EC) += cros_ec_core.o
obj-$(CONFIG_MFD_CROS_EC_I2C) += cros_ec_i2c.o
obj-$(CONFIG_MFD_CROS_EC_SPI) += cros_ec_spi.o
+obj-$(CONFIG_MFD_CROS_EC_CHARDEV) += cros_ec_dev.o
obj-$(CONFIG_MFD_EXYNOS_LPASS) += exynos-lpass.o
-rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o
-obj-$(CONFIG_MFD_RTSX_PCI) += rtsx_pci.o
-obj-$(CONFIG_MFD_RTSX_USB) += rtsx_usb.o
-
obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o
obj-$(CONFIG_HTC_I2CPLD) += htc-i2cpld.o
@@ -230,3 +227,5 @@ obj-$(CONFIG_MFD_STM32_LPTIMER) += stm32-lptimer.o
obj-$(CONFIG_MFD_STM32_TIMERS) += stm32-timers.o
obj-$(CONFIG_MFD_MXS_LRADC) += mxs-lradc.o
obj-$(CONFIG_MFD_SC27XX_PMIC) += sprd-sc27xx-spi.o
+obj-$(CONFIG_RAVE_SP_CORE) += rave-sp.o
+
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index c1c815241e02..1afa27de7191 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -1258,6 +1258,19 @@ static struct ab8500_prcmu_ranges ab8540_debug_ranges[AB8500_NUM_BANKS] = {
},
};
+#define DEFINE_SHOW_ATTRIBUTE(__name) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __name ## _show, inode->i_private); \
+} \
+ \
+static const struct file_operations __name ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+} \
static irqreturn_t ab8500_debug_handler(int irq, void *data)
{
@@ -1318,7 +1331,7 @@ static int ab8500_registers_print(struct device *dev, u32 bank,
return 0;
}
-static int ab8500_print_bank_registers(struct seq_file *s, void *p)
+static int ab8500_bank_registers_show(struct seq_file *s, void *p)
{
struct device *dev = s->private;
u32 bank = debug_bank;
@@ -1330,18 +1343,7 @@ static int ab8500_print_bank_registers(struct seq_file *s, void *p)
return ab8500_registers_print(dev, bank, s);
}
-static int ab8500_registers_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_print_bank_registers, inode->i_private);
-}
-
-static const struct file_operations ab8500_registers_fops = {
- .open = ab8500_registers_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_bank_registers);
static int ab8500_print_all_banks(struct seq_file *s, void *p)
{
@@ -1528,7 +1530,7 @@ void ab8500_debug_register_interrupt(int line)
num_interrupts[line]++;
}
-static int ab8500_interrupts_print(struct seq_file *s, void *p)
+static int ab8500_interrupts_show(struct seq_file *s, void *p)
{
int line;
@@ -1557,10 +1559,7 @@ static int ab8500_interrupts_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_interrupts_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_interrupts_print, inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_interrupts);
/*
* - HWREG DB8500 formated routines
@@ -1603,7 +1602,7 @@ static int ab8500_hwreg_open(struct inode *inode, struct file *file)
#define AB8500_LAST_SIM_REG 0x8B
#define AB8505_LAST_SIM_REG 0x8C
-static int ab8500_print_modem_registers(struct seq_file *s, void *p)
+static int ab8500_modem_show(struct seq_file *s, void *p)
{
struct device *dev = s->private;
struct ab8500 *ab8500;
@@ -1620,18 +1619,15 @@ static int ab8500_print_modem_registers(struct seq_file *s, void *p)
err = abx500_get_register_interruptible(dev,
AB8500_REGU_CTRL1, AB8500_SUPPLY_CONTROL_REG, &orig_value);
- if (err < 0) {
- dev_err(dev, "ab->read fail %d\n", err);
- return err;
- }
+ if (err < 0)
+ goto report_read_failure;
+
/* Config 1 will allow APE side to read SIM registers */
err = abx500_set_register_interruptible(dev,
AB8500_REGU_CTRL1, AB8500_SUPPLY_CONTROL_REG,
AB8500_SUPPLY_CONTROL_CONFIG_1);
- if (err < 0) {
- dev_err(dev, "ab->write fail %d\n", err);
- return err;
- }
+ if (err < 0)
+ goto report_write_failure;
seq_printf(s, " bank 0x%02X:\n", bank);
@@ -1641,36 +1637,30 @@ static int ab8500_print_modem_registers(struct seq_file *s, void *p)
for (reg = AB8500_FIRST_SIM_REG; reg <= last_sim_reg; reg++) {
err = abx500_get_register_interruptible(dev,
bank, reg, &value);
- if (err < 0) {
- dev_err(dev, "ab->read fail %d\n", err);
- return err;
- }
+ if (err < 0)
+ goto report_read_failure;
+
seq_printf(s, " [0x%02X/0x%02X]: 0x%02X\n", bank, reg, value);
}
err = abx500_set_register_interruptible(dev,
AB8500_REGU_CTRL1, AB8500_SUPPLY_CONTROL_REG, orig_value);
- if (err < 0) {
- dev_err(dev, "ab->write fail %d\n", err);
- return err;
- }
+ if (err < 0)
+ goto report_write_failure;
+
return 0;
-}
-static int ab8500_modem_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_print_modem_registers,
- inode->i_private);
+report_read_failure:
+ dev_err(dev, "ab->read fail %d\n", err);
+ return err;
+
+report_write_failure:
+ dev_err(dev, "ab->write fail %d\n", err);
+ return err;
}
-static const struct file_operations ab8500_modem_fops = {
- .open = ab8500_modem_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_modem);
-static int ab8500_gpadc_bat_ctrl_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_bat_ctrl_show(struct seq_file *s, void *p)
{
int bat_ctrl_raw;
int bat_ctrl_convert;
@@ -1687,21 +1677,9 @@ static int ab8500_gpadc_bat_ctrl_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_bat_ctrl_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_bat_ctrl_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_bat_ctrl_fops = {
- .open = ab8500_gpadc_bat_ctrl_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_bat_ctrl);
-static int ab8500_gpadc_btemp_ball_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_btemp_ball_show(struct seq_file *s, void *p)
{
int btemp_ball_raw;
int btemp_ball_convert;
@@ -1718,22 +1696,9 @@ static int ab8500_gpadc_btemp_ball_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_btemp_ball_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_btemp_ball_print,
- inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_btemp_ball);
-static const struct file_operations ab8500_gpadc_btemp_ball_fops = {
- .open = ab8500_gpadc_btemp_ball_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_main_charger_v_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_main_charger_v_show(struct seq_file *s, void *p)
{
int main_charger_v_raw;
int main_charger_v_convert;
@@ -1750,22 +1715,9 @@ static int ab8500_gpadc_main_charger_v_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_main_charger_v_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_main_charger_v_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_main_charger_v_fops = {
- .open = ab8500_gpadc_main_charger_v_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_main_charger_v);
-static int ab8500_gpadc_acc_detect1_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_acc_detect1_show(struct seq_file *s, void *p)
{
int acc_detect1_raw;
int acc_detect1_convert;
@@ -1782,22 +1734,9 @@ static int ab8500_gpadc_acc_detect1_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_acc_detect1_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_acc_detect1_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_acc_detect1_fops = {
- .open = ab8500_gpadc_acc_detect1_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_acc_detect1);
-static int ab8500_gpadc_acc_detect2_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_acc_detect2_show(struct seq_file *s, void *p)
{
int acc_detect2_raw;
int acc_detect2_convert;
@@ -1814,22 +1753,9 @@ static int ab8500_gpadc_acc_detect2_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_acc_detect2_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_acc_detect2_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_acc_detect2_fops = {
- .open = ab8500_gpadc_acc_detect2_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_acc_detect2);
-static int ab8500_gpadc_aux1_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_aux1_show(struct seq_file *s, void *p)
{
int aux1_raw;
int aux1_convert;
@@ -1846,20 +1772,9 @@ static int ab8500_gpadc_aux1_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_aux1_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_aux1_print, inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_aux1);
-static const struct file_operations ab8500_gpadc_aux1_fops = {
- .open = ab8500_gpadc_aux1_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_aux2_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_aux2_show(struct seq_file *s, void *p)
{
int aux2_raw;
int aux2_convert;
@@ -1876,20 +1791,9 @@ static int ab8500_gpadc_aux2_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_aux2_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_aux2_print, inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_aux2_fops = {
- .open = ab8500_gpadc_aux2_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_aux2);
-static int ab8500_gpadc_main_bat_v_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_main_bat_v_show(struct seq_file *s, void *p)
{
int main_bat_v_raw;
int main_bat_v_convert;
@@ -1906,22 +1810,9 @@ static int ab8500_gpadc_main_bat_v_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_main_bat_v_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_main_bat_v_print,
- inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_main_bat_v);
-static const struct file_operations ab8500_gpadc_main_bat_v_fops = {
- .open = ab8500_gpadc_main_bat_v_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_vbus_v_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_vbus_v_show(struct seq_file *s, void *p)
{
int vbus_v_raw;
int vbus_v_convert;
@@ -1938,20 +1829,9 @@ static int ab8500_gpadc_vbus_v_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_vbus_v_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_vbus_v_print, inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_vbus_v);
-static const struct file_operations ab8500_gpadc_vbus_v_fops = {
- .open = ab8500_gpadc_vbus_v_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_main_charger_c_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_main_charger_c_show(struct seq_file *s, void *p)
{
int main_charger_c_raw;
int main_charger_c_convert;
@@ -1968,22 +1848,9 @@ static int ab8500_gpadc_main_charger_c_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_main_charger_c_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_main_charger_c_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_main_charger_c_fops = {
- .open = ab8500_gpadc_main_charger_c_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_main_charger_c);
-static int ab8500_gpadc_usb_charger_c_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_usb_charger_c_show(struct seq_file *s, void *p)
{
int usb_charger_c_raw;
int usb_charger_c_convert;
@@ -2000,22 +1867,9 @@ static int ab8500_gpadc_usb_charger_c_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_usb_charger_c_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8500_gpadc_usb_charger_c_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_usb_charger_c_fops = {
- .open = ab8500_gpadc_usb_charger_c_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_usb_charger_c);
-static int ab8500_gpadc_bk_bat_v_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_bk_bat_v_show(struct seq_file *s, void *p)
{
int bk_bat_v_raw;
int bk_bat_v_convert;
@@ -2032,21 +1886,9 @@ static int ab8500_gpadc_bk_bat_v_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_bk_bat_v_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_bk_bat_v_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_bk_bat_v_fops = {
- .open = ab8500_gpadc_bk_bat_v_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_bk_bat_v);
-static int ab8500_gpadc_die_temp_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_die_temp_show(struct seq_file *s, void *p)
{
int die_temp_raw;
int die_temp_convert;
@@ -2063,21 +1905,9 @@ static int ab8500_gpadc_die_temp_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_die_temp_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_die_temp_print,
- inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_die_temp);
-static const struct file_operations ab8500_gpadc_die_temp_fops = {
- .open = ab8500_gpadc_die_temp_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8500_gpadc_usb_id_print(struct seq_file *s, void *p)
+static int ab8500_gpadc_usb_id_show(struct seq_file *s, void *p)
{
int usb_id_raw;
int usb_id_convert;
@@ -2094,20 +1924,9 @@ static int ab8500_gpadc_usb_id_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8500_gpadc_usb_id_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8500_gpadc_usb_id_print, inode->i_private);
-}
-
-static const struct file_operations ab8500_gpadc_usb_id_fops = {
- .open = ab8500_gpadc_usb_id_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8500_gpadc_usb_id);
-static int ab8540_gpadc_xtal_temp_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_xtal_temp_show(struct seq_file *s, void *p)
{
int xtal_temp_raw;
int xtal_temp_convert;
@@ -2124,21 +1943,9 @@ static int ab8540_gpadc_xtal_temp_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8540_gpadc_xtal_temp_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8540_gpadc_xtal_temp_print,
- inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_xtal_temp);
-static const struct file_operations ab8540_gpadc_xtal_temp_fops = {
- .open = ab8540_gpadc_xtal_temp_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8540_gpadc_vbat_true_meas_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_vbat_true_meas_show(struct seq_file *s, void *p)
{
int vbat_true_meas_raw;
int vbat_true_meas_convert;
@@ -2156,22 +1963,9 @@ static int ab8540_gpadc_vbat_true_meas_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8540_gpadc_vbat_true_meas_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8540_gpadc_vbat_true_meas_print,
- inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_vbat_true_meas);
-static const struct file_operations ab8540_gpadc_vbat_true_meas_fops = {
- .open = ab8540_gpadc_vbat_true_meas_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8540_gpadc_bat_ctrl_and_ibat_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_bat_ctrl_and_ibat_show(struct seq_file *s, void *p)
{
int bat_ctrl_raw;
int bat_ctrl_convert;
@@ -2197,22 +1991,9 @@ static int ab8540_gpadc_bat_ctrl_and_ibat_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8540_gpadc_bat_ctrl_and_ibat_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8540_gpadc_bat_ctrl_and_ibat_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8540_gpadc_bat_ctrl_and_ibat_fops = {
- .open = ab8540_gpadc_bat_ctrl_and_ibat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_bat_ctrl_and_ibat);
-static int ab8540_gpadc_vbat_meas_and_ibat_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_vbat_meas_and_ibat_show(struct seq_file *s, void *p)
{
int vbat_meas_raw;
int vbat_meas_convert;
@@ -2237,23 +2018,9 @@ static int ab8540_gpadc_vbat_meas_and_ibat_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8540_gpadc_vbat_meas_and_ibat_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8540_gpadc_vbat_meas_and_ibat_print,
- inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_vbat_meas_and_ibat);
-static const struct file_operations ab8540_gpadc_vbat_meas_and_ibat_fops = {
- .open = ab8540_gpadc_vbat_meas_and_ibat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ab8540_gpadc_vbat_true_meas_and_ibat_print(struct seq_file *s,
- void *p)
+static int ab8540_gpadc_vbat_true_meas_and_ibat_show(struct seq_file *s, void *p)
{
int vbat_true_meas_raw;
int vbat_true_meas_convert;
@@ -2279,23 +2046,9 @@ static int ab8540_gpadc_vbat_true_meas_and_ibat_print(struct seq_file *s,
return 0;
}
-static int ab8540_gpadc_vbat_true_meas_and_ibat_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8540_gpadc_vbat_true_meas_and_ibat_print,
- inode->i_private);
-}
-
-static const struct file_operations
-ab8540_gpadc_vbat_true_meas_and_ibat_fops = {
- .open = ab8540_gpadc_vbat_true_meas_and_ibat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_vbat_true_meas_and_ibat);
-static int ab8540_gpadc_bat_temp_and_ibat_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_bat_temp_and_ibat_show(struct seq_file *s, void *p)
{
int bat_temp_raw;
int bat_temp_convert;
@@ -2320,22 +2073,9 @@ static int ab8540_gpadc_bat_temp_and_ibat_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8540_gpadc_bat_temp_and_ibat_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ab8540_gpadc_bat_temp_and_ibat_print,
- inode->i_private);
-}
-
-static const struct file_operations ab8540_gpadc_bat_temp_and_ibat_fops = {
- .open = ab8540_gpadc_bat_temp_and_ibat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_bat_temp_and_ibat);
-static int ab8540_gpadc_otp_cal_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_otp_calib_show(struct seq_file *s, void *p)
{
struct ab8500_gpadc *gpadc;
u16 vmain_l, vmain_h, btemp_l, btemp_h;
@@ -2359,18 +2099,7 @@ static int ab8540_gpadc_otp_cal_print(struct seq_file *s, void *p)
return 0;
}
-static int ab8540_gpadc_otp_cal_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ab8540_gpadc_otp_cal_print, inode->i_private);
-}
-
-static const struct file_operations ab8540_gpadc_otp_calib_fops = {
- .open = ab8540_gpadc_otp_cal_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ab8540_gpadc_otp_calib);
static int ab8500_gpadc_avg_sample_print(struct seq_file *s, void *p)
{
@@ -2903,14 +2632,6 @@ static const struct file_operations ab8500_val_fops = {
.owner = THIS_MODULE,
};
-static const struct file_operations ab8500_interrupts_fops = {
- .open = ab8500_interrupts_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
static const struct file_operations ab8500_subscribe_fops = {
.open = ab8500_subscribe_unsubscribe_open,
.write = ab8500_subscribe_write,
@@ -2997,7 +2718,7 @@ static int ab8500_debug_probe(struct platform_device *plf)
goto err;
file = debugfs_create_file("all-bank-registers", S_IRUGO, ab8500_dir,
- &plf->dev, &ab8500_registers_fops);
+ &plf->dev, &ab8500_bank_registers_fops);
if (!file)
goto err;
diff --git a/drivers/mfd/atmel-flexcom.c b/drivers/mfd/atmel-flexcom.c
index 064bde9cff5a..f684a93a3340 100644
--- a/drivers/mfd/atmel-flexcom.c
+++ b/drivers/mfd/atmel-flexcom.c
@@ -39,34 +39,43 @@
#define FLEX_MR_OPMODE(opmode) (((opmode) << FLEX_MR_OPMODE_OFFSET) & \
FLEX_MR_OPMODE_MASK)
+struct atmel_flexcom {
+ void __iomem *base;
+ u32 opmode;
+ struct clk *clk;
+};
static int atmel_flexcom_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct clk *clk;
struct resource *res;
- void __iomem *base;
- u32 opmode;
+ struct atmel_flexcom *ddata;
int err;
- err = of_property_read_u32(np, "atmel,flexcom-mode", &opmode);
+ ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ddata);
+
+ err = of_property_read_u32(np, "atmel,flexcom-mode", &ddata->opmode);
if (err)
return err;
- if (opmode < ATMEL_FLEXCOM_MODE_USART ||
- opmode > ATMEL_FLEXCOM_MODE_TWI)
+ if (ddata->opmode < ATMEL_FLEXCOM_MODE_USART ||
+ ddata->opmode > ATMEL_FLEXCOM_MODE_TWI)
return -EINVAL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ ddata->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ddata->base))
+ return PTR_ERR(ddata->base);
- clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ ddata->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ddata->clk))
+ return PTR_ERR(ddata->clk);
- err = clk_prepare_enable(clk);
+ err = clk_prepare_enable(ddata->clk);
if (err)
return err;
@@ -76,9 +85,9 @@ static int atmel_flexcom_probe(struct platform_device *pdev)
* inaccessible and are read as zero. Also the external I/O lines of the
* Flexcom are muxed to reach the selected device.
*/
- writel(FLEX_MR_OPMODE(opmode), base + FLEX_MR);
+ writel(FLEX_MR_OPMODE(ddata->opmode), ddata->base + FLEX_MR);
- clk_disable_unprepare(clk);
+ clk_disable_unprepare(ddata->clk);
return devm_of_platform_populate(&pdev->dev);
}
@@ -89,10 +98,34 @@ static const struct of_device_id atmel_flexcom_of_match[] = {
};
MODULE_DEVICE_TABLE(of, atmel_flexcom_of_match);
+#ifdef CONFIG_PM_SLEEP
+static int atmel_flexcom_resume(struct device *dev)
+{
+ struct atmel_flexcom *ddata = dev_get_drvdata(dev);
+ int err;
+ u32 val;
+
+ err = clk_prepare_enable(ddata->clk);
+ if (err)
+ return err;
+
+ val = FLEX_MR_OPMODE(ddata->opmode),
+ writel(val, ddata->base + FLEX_MR);
+
+ clk_disable_unprepare(ddata->clk);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(atmel_flexcom_pm_ops, NULL,
+ atmel_flexcom_resume);
+
static struct platform_driver atmel_flexcom_driver = {
.probe = atmel_flexcom_probe,
.driver = {
.name = "atmel_flexcom",
+ .pm = &atmel_flexcom_pm_ops,
.of_match_table = atmel_flexcom_of_match,
},
};
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 2468b431bb22..e94c72c2faa2 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -129,6 +129,7 @@ static const struct regmap_range axp288_volatile_ranges[] = {
regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP288_POWER_REASON),
regmap_reg_range(AXP288_BC_GLOBAL, AXP288_BC_GLOBAL),
regmap_reg_range(AXP288_BC_DET_STAT, AXP288_BC_DET_STAT),
+ regmap_reg_range(AXP20X_CHRG_BAK_CTRL, AXP20X_CHRG_BAK_CTRL),
regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IPSOUT_V_HIGH_L),
regmap_reg_range(AXP20X_TIMER_CTRL, AXP20X_TIMER_CTRL),
regmap_reg_range(AXP22X_GPIO_STATE, AXP22X_GPIO_STATE),
@@ -878,6 +879,9 @@ static struct mfd_cell axp813_cells[] = {
.resources = axp803_pek_resources,
}, {
.name = "axp20x-regulator",
+ }, {
+ .name = "axp20x-gpio",
+ .of_compatible = "x-powers,axp813-gpio",
}
};
diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c
index b0ca5a4c841e..d61024141e2b 100644
--- a/drivers/mfd/cros_ec.c
+++ b/drivers/mfd/cros_ec.c
@@ -40,13 +40,13 @@ static struct cros_ec_platform pd_p = {
};
static const struct mfd_cell ec_cell = {
- .name = "cros-ec-ctl",
+ .name = "cros-ec-dev",
.platform_data = &ec_p,
.pdata_size = sizeof(ec_p),
};
static const struct mfd_cell ec_pd_cell = {
- .name = "cros-ec-ctl",
+ .name = "cros-ec-dev",
.platform_data = &pd_p,
.pdata_size = sizeof(pd_p),
};
diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index cf6c4f0846b8..e4fafdd96e5e 100644
--- a/drivers/platform/chrome/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -25,9 +25,10 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
-#include "cros_ec_debugfs.h"
#include "cros_ec_dev.h"
+#define DRV_NAME "cros-ec-dev"
+
/* Device variables */
#define CROS_MAX_DEV 128
static int ec_major;
@@ -461,7 +462,7 @@ static int ec_device_remove(struct platform_device *pdev)
}
static const struct platform_device_id cros_ec_id[] = {
- { "cros-ec-ctl", 0 },
+ { DRV_NAME, 0 },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(platform, cros_ec_id);
@@ -493,7 +494,7 @@ static const struct dev_pm_ops cros_ec_dev_pm_ops = {
static struct platform_driver cros_ec_dev_driver = {
.driver = {
- .name = "cros-ec-ctl",
+ .name = DRV_NAME,
.pm = &cros_ec_dev_pm_ops,
},
.probe = ec_device_probe,
@@ -544,6 +545,7 @@ static void __exit cros_ec_dev_exit(void)
module_init(cros_ec_dev_init);
module_exit(cros_ec_dev_exit);
+MODULE_ALIAS("platform:" DRV_NAME);
MODULE_AUTHOR("Bill Richardson <wfrichar@chromium.org>");
MODULE_DESCRIPTION("Userspace interface to the Chrome OS Embedded Controller");
MODULE_VERSION("1.0");
diff --git a/drivers/platform/chrome/cros_ec_dev.h b/drivers/mfd/cros_ec_dev.h
index 45e9453608c5..45e9453608c5 100644
--- a/drivers/platform/chrome/cros_ec_dev.h
+++ b/drivers/mfd/cros_ec_dev.h
diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c
index 59c82cdcf48d..1b52b8557034 100644
--- a/drivers/mfd/cros_ec_spi.c
+++ b/drivers/mfd/cros_ec_spi.c
@@ -72,8 +72,7 @@
* struct cros_ec_spi - information about a SPI-connected EC
*
* @spi: SPI device we are connected to
- * @last_transfer_ns: time that we last finished a transfer, or 0 if there
- * if no record
+ * @last_transfer_ns: time that we last finished a transfer.
* @start_of_msg_delay: used to set the delay_usecs on the spi_transfer that
* is sent when we want to turn on CS at the start of a transaction.
* @end_of_msg_delay: used to set the delay_usecs on the spi_transfer that
@@ -379,18 +378,15 @@ static int cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
u8 sum;
u8 rx_byte;
int ret = 0, final_ret;
+ unsigned long delay;
len = cros_ec_prepare_tx(ec_dev, ec_msg);
dev_dbg(ec_dev->dev, "prepared, len=%d\n", len);
/* If it's too soon to do another transaction, wait */
- if (ec_spi->last_transfer_ns) {
- unsigned long delay; /* The delay completed so far */
-
- delay = ktime_get_ns() - ec_spi->last_transfer_ns;
- if (delay < EC_SPI_RECOVERY_TIME_NS)
- ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
- }
+ delay = ktime_get_ns() - ec_spi->last_transfer_ns;
+ if (delay < EC_SPI_RECOVERY_TIME_NS)
+ ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
rx_buf = kzalloc(len, GFP_KERNEL);
if (!rx_buf)
@@ -509,18 +505,15 @@ static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
u8 rx_byte;
int sum;
int ret = 0, final_ret;
+ unsigned long delay;
len = cros_ec_prepare_tx(ec_dev, ec_msg);
dev_dbg(ec_dev->dev, "prepared, len=%d\n", len);
/* If it's too soon to do another transaction, wait */
- if (ec_spi->last_transfer_ns) {
- unsigned long delay; /* The delay completed so far */
-
- delay = ktime_get_ns() - ec_spi->last_transfer_ns;
- if (delay < EC_SPI_RECOVERY_TIME_NS)
- ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
- }
+ delay = ktime_get_ns() - ec_spi->last_transfer_ns;
+ if (delay < EC_SPI_RECOVERY_TIME_NS)
+ ndelay(EC_SPI_RECOVERY_TIME_NS - delay);
rx_buf = kzalloc(len, GFP_KERNEL);
if (!rx_buf)
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 0e0ab9bb1530..9e545eb6e8b4 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -450,6 +450,8 @@ int intel_lpss_probe(struct device *dev,
if (ret)
goto err_remove_ltr;
+ dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND);
+
return 0;
err_remove_ltr:
@@ -478,7 +480,9 @@ EXPORT_SYMBOL_GPL(intel_lpss_remove);
static int resume_lpss_device(struct device *dev, void *data)
{
- pm_runtime_resume(dev);
+ if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
+ pm_runtime_resume(dev);
+
return 0;
}
diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c
index 36adf9e8153e..274306d98ac1 100644
--- a/drivers/mfd/intel_soc_pmic_core.c
+++ b/drivers/mfd/intel_soc_pmic_core.c
@@ -16,7 +16,6 @@
* Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
*/
-#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/mfd/core.h>
#include <linux/i2c.h>
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index 55d824b3a808..390b27cb2c2e 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -458,7 +458,7 @@ static int kempld_probe(struct platform_device *pdev)
return -EINVAL;
pld->io_base = devm_ioport_map(dev, ioport->start,
- ioport->end - ioport->start);
+ resource_size(ioport));
if (!pld->io_base)
return -ENOMEM;
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index cf1120abbf52..53dc1a43472c 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -1143,11 +1143,6 @@ static int lpc_ich_init_spi(struct pci_dev *dev)
res->end = res->start + SPIBASE_APL_SZ - 1;
pci_bus_read_config_dword(bus, spi, BCR, &bcr);
- if (!(bcr & BCR_WPD)) {
- bcr |= BCR_WPD;
- pci_bus_write_config_dword(bus, spi, BCR, bcr);
- pci_bus_read_config_dword(bus, spi, BCR, &bcr);
- }
info->writeable = !!(bcr & BCR_WPD);
}
diff --git a/drivers/mfd/max77843.c b/drivers/mfd/max77843.c
index dc5caeaaa6a1..da9612dbb222 100644
--- a/drivers/mfd/max77843.c
+++ b/drivers/mfd/max77843.c
@@ -15,7 +15,6 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/mfd/core.h>
#include <linux/mfd/max77693-common.h>
#include <linux/mfd/max77843-private.h>
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index 3922a93f9f92..663a2398b6b1 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -430,6 +430,7 @@ static void palmas_power_off(void)
{
unsigned int addr;
int ret, slave;
+ u8 powerhold_mask;
struct device_node *np = palmas_dev->dev->of_node;
if (of_property_read_bool(np, "ti,palmas-override-powerhold")) {
@@ -437,8 +438,15 @@ static void palmas_power_off(void)
PALMAS_PRIMARY_SECONDARY_PAD2);
slave = PALMAS_BASE_TO_SLAVE(PALMAS_PU_PD_OD_BASE);
+ if (of_device_is_compatible(np, "ti,tps65917"))
+ powerhold_mask =
+ TPS65917_PRIMARY_SECONDARY_PAD2_GPIO_5_MASK;
+ else
+ powerhold_mask =
+ PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_MASK;
+
ret = regmap_update_bits(palmas_dev->regmap[slave], addr,
- PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_MASK, 0);
+ powerhold_mask, 0);
if (ret)
dev_err(palmas_dev->dev,
"Unable to write PRIMARY_SECONDARY_PAD2 %d\n",
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 6155d123a84e..f952dff6765f 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -149,7 +149,7 @@ pcf50633_client_dev_register(struct pcf50633 *pcf, const char *name,
*pdev = platform_device_alloc(name, -1);
if (!*pdev) {
- dev_err(pcf->dev, "Falied to allocate %s\n", name);
+ dev_err(pcf->dev, "Failed to allocate %s\n", name);
return;
}
diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c
new file mode 100644
index 000000000000..5c858e784a89
--- /dev/null
+++ b/drivers/mfd/rave-sp.c
@@ -0,0 +1,710 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Multifunction core driver for Zodiac Inflight Innovations RAVE
+ * Supervisory Processor(SP) MCU that is connected via dedicated UART
+ * port
+ *
+ * Copyright (C) 2017 Zodiac Inflight Innovations
+ */
+
+#include <linux/atomic.h>
+#include <linux/crc-ccitt.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/mfd/rave-sp.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/sched.h>
+#include <linux/serdev.h>
+#include <asm/unaligned.h>
+
+/*
+ * UART protocol using following entities:
+ * - message to MCU => ACK response
+ * - event from MCU => event ACK
+ *
+ * Frame structure:
+ * <STX> <DATA> <CHECKSUM> <ETX>
+ * Where:
+ * - STX - is start of transmission character
+ * - ETX - end of transmission
+ * - DATA - payload
+ * - CHECKSUM - checksum calculated on <DATA>
+ *
+ * If <DATA> or <CHECKSUM> contain one of control characters, then it is
+ * escaped using <DLE> control code. Added <DLE> does not participate in
+ * checksum calculation.
+ */
+#define RAVE_SP_STX 0x02
+#define RAVE_SP_ETX 0x03
+#define RAVE_SP_DLE 0x10
+
+#define RAVE_SP_MAX_DATA_SIZE 64
+#define RAVE_SP_CHECKSUM_SIZE 2 /* Worst case scenario on RDU2 */
+/*
+ * We don't store STX, ETX and unescaped bytes, so Rx is only
+ * DATA + CSUM
+ */
+#define RAVE_SP_RX_BUFFER_SIZE \
+ (RAVE_SP_MAX_DATA_SIZE + RAVE_SP_CHECKSUM_SIZE)
+
+#define RAVE_SP_STX_ETX_SIZE 2
+/*
+ * For Tx we have to have space for everything, STX, EXT and
+ * potentially stuffed DATA + CSUM data + csum
+ */
+#define RAVE_SP_TX_BUFFER_SIZE \
+ (RAVE_SP_STX_ETX_SIZE + 2 * RAVE_SP_RX_BUFFER_SIZE)
+
+#define RAVE_SP_BOOT_SOURCE_GET 0
+#define RAVE_SP_BOOT_SOURCE_SET 1
+
+#define RAVE_SP_RDU2_BOARD_TYPE_RMB 0
+#define RAVE_SP_RDU2_BOARD_TYPE_DEB 1
+
+#define RAVE_SP_BOOT_SOURCE_SD 0
+#define RAVE_SP_BOOT_SOURCE_EMMC 1
+#define RAVE_SP_BOOT_SOURCE_NOR 2
+
+/**
+ * enum rave_sp_deframer_state - Possible state for de-framer
+ *
+ * @RAVE_SP_EXPECT_SOF: Scanning input for start-of-frame marker
+ * @RAVE_SP_EXPECT_DATA: Got start of frame marker, collecting frame
+ * @RAVE_SP_EXPECT_ESCAPED_DATA: Got escape character, collecting escaped byte
+ */
+enum rave_sp_deframer_state {
+ RAVE_SP_EXPECT_SOF,
+ RAVE_SP_EXPECT_DATA,
+ RAVE_SP_EXPECT_ESCAPED_DATA,
+};
+
+/**
+ * struct rave_sp_deframer - Device protocol deframer
+ *
+ * @state: Current state of the deframer
+ * @data: Buffer used to collect deframed data
+ * @length: Number of bytes de-framed so far
+ */
+struct rave_sp_deframer {
+ enum rave_sp_deframer_state state;
+ unsigned char data[RAVE_SP_RX_BUFFER_SIZE];
+ size_t length;
+};
+
+/**
+ * struct rave_sp_reply - Reply as per RAVE device protocol
+ *
+ * @length: Expected reply length
+ * @data: Buffer to store reply payload in
+ * @code: Expected reply code
+ * @ackid: Expected reply ACK ID
+ * @completion: Successful reply reception completion
+ */
+struct rave_sp_reply {
+ size_t length;
+ void *data;
+ u8 code;
+ u8 ackid;
+ struct completion received;
+};
+
+/**
+ * struct rave_sp_checksum - Variant specific checksum implementation details
+ *
+ * @length: Caculated checksum length
+ * @subroutine: Utilized checksum algorithm implementation
+ */
+struct rave_sp_checksum {
+ size_t length;
+ void (*subroutine)(const u8 *, size_t, u8 *);
+};
+
+/**
+ * struct rave_sp_variant_cmds - Variant specific command routines
+ *
+ * @translate: Generic to variant specific command mapping routine
+ *
+ */
+struct rave_sp_variant_cmds {
+ int (*translate)(enum rave_sp_command);
+};
+
+/**
+ * struct rave_sp_variant - RAVE supervisory processor core variant
+ *
+ * @checksum: Variant specific checksum implementation
+ * @cmd: Variant specific command pointer table
+ *
+ */
+struct rave_sp_variant {
+ const struct rave_sp_checksum *checksum;
+ struct rave_sp_variant_cmds cmd;
+};
+
+/**
+ * struct rave_sp - RAVE supervisory processor core
+ *
+ * @serdev: Pointer to underlying serdev
+ * @deframer: Stored state of the protocol deframer
+ * @ackid: ACK ID used in last reply sent to the device
+ * @bus_lock: Lock to serialize access to the device
+ * @reply_lock: Lock protecting @reply
+ * @reply: Pointer to memory to store reply payload
+ *
+ * @variant: Device variant specific information
+ * @event_notifier_list: Input event notification chain
+ *
+ */
+struct rave_sp {
+ struct serdev_device *serdev;
+ struct rave_sp_deframer deframer;
+ atomic_t ackid;
+ struct mutex bus_lock;
+ struct mutex reply_lock;
+ struct rave_sp_reply *reply;
+
+ const struct rave_sp_variant *variant;
+ struct blocking_notifier_head event_notifier_list;
+};
+
+static bool rave_sp_id_is_event(u8 code)
+{
+ return (code & 0xF0) == RAVE_SP_EVNT_BASE;
+}
+
+static void rave_sp_unregister_event_notifier(struct device *dev, void *res)
+{
+ struct rave_sp *sp = dev_get_drvdata(dev->parent);
+ struct notifier_block *nb = *(struct notifier_block **)res;
+ struct blocking_notifier_head *bnh = &sp->event_notifier_list;
+
+ WARN_ON(blocking_notifier_chain_unregister(bnh, nb));
+}
+
+int devm_rave_sp_register_event_notifier(struct device *dev,
+ struct notifier_block *nb)
+{
+ struct rave_sp *sp = dev_get_drvdata(dev->parent);
+ struct notifier_block **rcnb;
+ int ret;
+
+ rcnb = devres_alloc(rave_sp_unregister_event_notifier,
+ sizeof(*rcnb), GFP_KERNEL);
+ if (!rcnb)
+ return -ENOMEM;
+
+ ret = blocking_notifier_chain_register(&sp->event_notifier_list, nb);
+ if (!ret) {
+ *rcnb = nb;
+ devres_add(dev, rcnb);
+ } else {
+ devres_free(rcnb);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_rave_sp_register_event_notifier);
+
+static void csum_8b2c(const u8 *buf, size_t size, u8 *crc)
+{
+ *crc = *buf++;
+ size--;
+
+ while (size--)
+ *crc += *buf++;
+
+ *crc = 1 + ~(*crc);
+}
+
+static void csum_ccitt(const u8 *buf, size_t size, u8 *crc)
+{
+ const u16 calculated = crc_ccitt_false(0xffff, buf, size);
+
+ /*
+ * While the rest of the wire protocol is little-endian,
+ * CCITT-16 CRC in RDU2 device is sent out in big-endian order.
+ */
+ put_unaligned_be16(calculated, crc);
+}
+
+static void *stuff(unsigned char *dest, const unsigned char *src, size_t n)
+{
+ while (n--) {
+ const unsigned char byte = *src++;
+
+ switch (byte) {
+ case RAVE_SP_STX:
+ case RAVE_SP_ETX:
+ case RAVE_SP_DLE:
+ *dest++ = RAVE_SP_DLE;
+ /* FALLTHROUGH */
+ default:
+ *dest++ = byte;
+ }
+ }
+
+ return dest;
+}
+
+static int rave_sp_write(struct rave_sp *sp, const u8 *data, u8 data_size)
+{
+ const size_t checksum_length = sp->variant->checksum->length;
+ unsigned char frame[RAVE_SP_TX_BUFFER_SIZE];
+ unsigned char crc[RAVE_SP_CHECKSUM_SIZE];
+ unsigned char *dest = frame;
+ size_t length;
+
+ if (WARN_ON(checksum_length > sizeof(crc)))
+ return -ENOMEM;
+
+ if (WARN_ON(data_size > sizeof(frame)))
+ return -ENOMEM;
+
+ sp->variant->checksum->subroutine(data, data_size, crc);
+
+ *dest++ = RAVE_SP_STX;
+ dest = stuff(dest, data, data_size);
+ dest = stuff(dest, crc, checksum_length);
+ *dest++ = RAVE_SP_ETX;
+
+ length = dest - frame;
+
+ print_hex_dump(KERN_DEBUG, "rave-sp tx: ", DUMP_PREFIX_NONE,
+ 16, 1, frame, length, false);
+
+ return serdev_device_write(sp->serdev, frame, length, HZ);
+}
+
+static u8 rave_sp_reply_code(u8 command)
+{
+ /*
+ * There isn't a single rule that describes command code ->
+ * ACK code transformation, but, going through various
+ * versions of ICDs, there appear to be three distinct groups
+ * that can be described by simple transformation.
+ */
+ switch (command) {
+ case 0xA0 ... 0xBE:
+ /*
+ * Commands implemented by firmware found in RDU1 and
+ * older devices all seem to obey the following rule
+ */
+ return command + 0x20;
+ case 0xE0 ... 0xEF:
+ /*
+ * Events emitted by all versions of the firmare use
+ * least significant bit to get an ACK code
+ */
+ return command | 0x01;
+ default:
+ /*
+ * Commands implemented by firmware found in RDU2 are
+ * similar to "old" commands, but they use slightly
+ * different offset
+ */
+ return command + 0x40;
+ }
+}
+
+int rave_sp_exec(struct rave_sp *sp,
+ void *__data, size_t data_size,
+ void *reply_data, size_t reply_data_size)
+{
+ struct rave_sp_reply reply = {
+ .data = reply_data,
+ .length = reply_data_size,
+ .received = COMPLETION_INITIALIZER_ONSTACK(reply.received),
+ };
+ unsigned char *data = __data;
+ int command, ret = 0;
+ u8 ackid;
+
+ command = sp->variant->cmd.translate(data[0]);
+ if (command < 0)
+ return command;
+
+ ackid = atomic_inc_return(&sp->ackid);
+ reply.ackid = ackid;
+ reply.code = rave_sp_reply_code((u8)command),
+
+ mutex_lock(&sp->bus_lock);
+
+ mutex_lock(&sp->reply_lock);
+ sp->reply = &reply;
+ mutex_unlock(&sp->reply_lock);
+
+ data[0] = command;
+ data[1] = ackid;
+
+ rave_sp_write(sp, data, data_size);
+
+ if (!wait_for_completion_timeout(&reply.received, HZ)) {
+ dev_err(&sp->serdev->dev, "Command timeout\n");
+ ret = -ETIMEDOUT;
+
+ mutex_lock(&sp->reply_lock);
+ sp->reply = NULL;
+ mutex_unlock(&sp->reply_lock);
+ }
+
+ mutex_unlock(&sp->bus_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rave_sp_exec);
+
+static void rave_sp_receive_event(struct rave_sp *sp,
+ const unsigned char *data, size_t length)
+{
+ u8 cmd[] = {
+ [0] = rave_sp_reply_code(data[0]),
+ [1] = data[1],
+ };
+
+ rave_sp_write(sp, cmd, sizeof(cmd));
+
+ blocking_notifier_call_chain(&sp->event_notifier_list,
+ rave_sp_action_pack(data[0], data[2]),
+ NULL);
+}
+
+static void rave_sp_receive_reply(struct rave_sp *sp,
+ const unsigned char *data, size_t length)
+{
+ struct device *dev = &sp->serdev->dev;
+ struct rave_sp_reply *reply;
+ const size_t payload_length = length - 2;
+
+ mutex_lock(&sp->reply_lock);
+ reply = sp->reply;
+
+ if (reply) {
+ if (reply->code == data[0] && reply->ackid == data[1] &&
+ payload_length >= reply->length) {
+ /*
+ * We are relying on memcpy(dst, src, 0) to be a no-op
+ * when handling commands that have a no-payload reply
+ */
+ memcpy(reply->data, &data[2], reply->length);
+ complete(&reply->received);
+ sp->reply = NULL;
+ } else {
+ dev_err(dev, "Ignoring incorrect reply\n");
+ dev_dbg(dev, "Code: expected = 0x%08x received = 0x%08x\n",
+ reply->code, data[0]);
+ dev_dbg(dev, "ACK ID: expected = 0x%08x received = 0x%08x\n",
+ reply->ackid, data[1]);
+ dev_dbg(dev, "Length: expected = %zu received = %zu\n",
+ reply->length, payload_length);
+ }
+ }
+
+ mutex_unlock(&sp->reply_lock);
+}
+
+static void rave_sp_receive_frame(struct rave_sp *sp,
+ const unsigned char *data,
+ size_t length)
+{
+ const size_t checksum_length = sp->variant->checksum->length;
+ const size_t payload_length = length - checksum_length;
+ const u8 *crc_reported = &data[payload_length];
+ struct device *dev = &sp->serdev->dev;
+ u8 crc_calculated[checksum_length];
+
+ print_hex_dump(KERN_DEBUG, "rave-sp rx: ", DUMP_PREFIX_NONE,
+ 16, 1, data, length, false);
+
+ if (unlikely(length <= checksum_length)) {
+ dev_warn(dev, "Dropping short frame\n");
+ return;
+ }
+
+ sp->variant->checksum->subroutine(data, payload_length,
+ crc_calculated);
+
+ if (memcmp(crc_calculated, crc_reported, checksum_length)) {
+ dev_warn(dev, "Dropping bad frame\n");
+ return;
+ }
+
+ if (rave_sp_id_is_event(data[0]))
+ rave_sp_receive_event(sp, data, length);
+ else
+ rave_sp_receive_reply(sp, data, length);
+}
+
+static int rave_sp_receive_buf(struct serdev_device *serdev,
+ const unsigned char *buf, size_t size)
+{
+ struct device *dev = &serdev->dev;
+ struct rave_sp *sp = dev_get_drvdata(dev);
+ struct rave_sp_deframer *deframer = &sp->deframer;
+ const unsigned char *src = buf;
+ const unsigned char *end = buf + size;
+
+ while (src < end) {
+ const unsigned char byte = *src++;
+
+ switch (deframer->state) {
+ case RAVE_SP_EXPECT_SOF:
+ if (byte == RAVE_SP_STX)
+ deframer->state = RAVE_SP_EXPECT_DATA;
+ break;
+
+ case RAVE_SP_EXPECT_DATA:
+ /*
+ * Treat special byte values first
+ */
+ switch (byte) {
+ case RAVE_SP_ETX:
+ rave_sp_receive_frame(sp,
+ deframer->data,
+ deframer->length);
+ /*
+ * Once we extracted a complete frame
+ * out of a stream, we call it done
+ * and proceed to bailing out while
+ * resetting the framer to initial
+ * state, regardless if we've consumed
+ * all of the stream or not.
+ */
+ goto reset_framer;
+ case RAVE_SP_STX:
+ dev_warn(dev, "Bad frame: STX before ETX\n");
+ /*
+ * If we encounter second "start of
+ * the frame" marker before seeing
+ * corresponding "end of frame", we
+ * reset the framer and ignore both:
+ * frame started by first SOF and
+ * frame started by current SOF.
+ *
+ * NOTE: The above means that only the
+ * frame started by third SOF, sent
+ * after this one will have a chance
+ * to get throught.
+ */
+ goto reset_framer;
+ case RAVE_SP_DLE:
+ deframer->state = RAVE_SP_EXPECT_ESCAPED_DATA;
+ /*
+ * If we encounter escape sequence we
+ * need to skip it and collect the
+ * byte that follows. We do it by
+ * forcing the next iteration of the
+ * encompassing while loop.
+ */
+ continue;
+ }
+ /*
+ * For the rest of the bytes, that are not
+ * speical snoflakes, we do the same thing
+ * that we do to escaped data - collect it in
+ * deframer buffer
+ */
+
+ /* FALLTHROUGH */
+
+ case RAVE_SP_EXPECT_ESCAPED_DATA:
+ deframer->data[deframer->length++] = byte;
+
+ if (deframer->length == sizeof(deframer->data)) {
+ dev_warn(dev, "Bad frame: Too long\n");
+ /*
+ * If the amount of data we've
+ * accumulated for current frame so
+ * far starts to exceed the capacity
+ * of deframer's buffer, there's
+ * nothing else we can do but to
+ * discard that data and start
+ * assemblying a new frame again
+ */
+ goto reset_framer;
+ }
+
+ /*
+ * We've extracted out special byte, now we
+ * can go back to regular data collecting
+ */
+ deframer->state = RAVE_SP_EXPECT_DATA;
+ break;
+ }
+ }
+
+ /*
+ * The only way to get out of the above loop and end up here
+ * is throught consuming all of the supplied data, so here we
+ * report that we processed it all.
+ */
+ return size;
+
+reset_framer:
+ /*
+ * NOTE: A number of codepaths that will drop us here will do
+ * so before consuming all 'size' bytes of the data passed by
+ * serdev layer. We rely on the fact that serdev layer will
+ * re-execute this handler with the remainder of the Rx bytes
+ * once we report actual number of bytes that we processed.
+ */
+ deframer->state = RAVE_SP_EXPECT_SOF;
+ deframer->length = 0;
+
+ return src - buf;
+}
+
+static int rave_sp_rdu1_cmd_translate(enum rave_sp_command command)
+{
+ if (command >= RAVE_SP_CMD_STATUS &&
+ command <= RAVE_SP_CMD_CONTROL_EVENTS)
+ return command;
+
+ return -EINVAL;
+}
+
+static int rave_sp_rdu2_cmd_translate(enum rave_sp_command command)
+{
+ if (command >= RAVE_SP_CMD_GET_FIRMWARE_VERSION &&
+ command <= RAVE_SP_CMD_GET_GPIO_STATE)
+ return command;
+
+ if (command == RAVE_SP_CMD_REQ_COPPER_REV) {
+ /*
+ * As per RDU2 ICD 3.4.47 CMD_GET_COPPER_REV code is
+ * different from that for RDU1 and it is set to 0x28.
+ */
+ return 0x28;
+ }
+
+ return rave_sp_rdu1_cmd_translate(command);
+}
+
+static int rave_sp_default_cmd_translate(enum rave_sp_command command)
+{
+ /*
+ * All of the following command codes were taken from "Table :
+ * Communications Protocol Message Types" in section 3.3
+ * "MESSAGE TYPES" of Rave PIC24 ICD.
+ */
+ switch (command) {
+ case RAVE_SP_CMD_GET_FIRMWARE_VERSION:
+ return 0x11;
+ case RAVE_SP_CMD_GET_BOOTLOADER_VERSION:
+ return 0x12;
+ case RAVE_SP_CMD_BOOT_SOURCE:
+ return 0x14;
+ case RAVE_SP_CMD_SW_WDT:
+ return 0x1C;
+ case RAVE_SP_CMD_RESET:
+ return 0x1E;
+ case RAVE_SP_CMD_RESET_REASON:
+ return 0x1F;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct rave_sp_checksum rave_sp_checksum_8b2c = {
+ .length = 1,
+ .subroutine = csum_8b2c,
+};
+
+static const struct rave_sp_checksum rave_sp_checksum_ccitt = {
+ .length = 2,
+ .subroutine = csum_ccitt,
+};
+
+static const struct rave_sp_variant rave_sp_legacy = {
+ .checksum = &rave_sp_checksum_8b2c,
+ .cmd = {
+ .translate = rave_sp_default_cmd_translate,
+ },
+};
+
+static const struct rave_sp_variant rave_sp_rdu1 = {
+ .checksum = &rave_sp_checksum_8b2c,
+ .cmd = {
+ .translate = rave_sp_rdu1_cmd_translate,
+ },
+};
+
+static const struct rave_sp_variant rave_sp_rdu2 = {
+ .checksum = &rave_sp_checksum_ccitt,
+ .cmd = {
+ .translate = rave_sp_rdu2_cmd_translate,
+ },
+};
+
+static const struct of_device_id rave_sp_dt_ids[] = {
+ { .compatible = "zii,rave-sp-niu", .data = &rave_sp_legacy },
+ { .compatible = "zii,rave-sp-mezz", .data = &rave_sp_legacy },
+ { .compatible = "zii,rave-sp-esb", .data = &rave_sp_legacy },
+ { .compatible = "zii,rave-sp-rdu1", .data = &rave_sp_rdu1 },
+ { .compatible = "zii,rave-sp-rdu2", .data = &rave_sp_rdu2 },
+ { /* sentinel */ }
+};
+
+static const struct serdev_device_ops rave_sp_serdev_device_ops = {
+ .receive_buf = rave_sp_receive_buf,
+ .write_wakeup = serdev_device_write_wakeup,
+};
+
+static int rave_sp_probe(struct serdev_device *serdev)
+{
+ struct device *dev = &serdev->dev;
+ struct rave_sp *sp;
+ u32 baud;
+ int ret;
+
+ if (of_property_read_u32(dev->of_node, "current-speed", &baud)) {
+ dev_err(dev,
+ "'current-speed' is not specified in device node\n");
+ return -EINVAL;
+ }
+
+ sp = devm_kzalloc(dev, sizeof(*sp), GFP_KERNEL);
+ if (!sp)
+ return -ENOMEM;
+
+ sp->serdev = serdev;
+ dev_set_drvdata(dev, sp);
+
+ sp->variant = of_device_get_match_data(dev);
+ if (!sp->variant)
+ return -ENODEV;
+
+ mutex_init(&sp->bus_lock);
+ mutex_init(&sp->reply_lock);
+ BLOCKING_INIT_NOTIFIER_HEAD(&sp->event_notifier_list);
+
+ serdev_device_set_client_ops(serdev, &rave_sp_serdev_device_ops);
+ ret = devm_serdev_device_open(dev, serdev);
+ if (ret)
+ return ret;
+
+ serdev_device_set_baudrate(serdev, baud);
+
+ return devm_of_platform_populate(dev);
+}
+
+MODULE_DEVICE_TABLE(of, rave_sp_dt_ids);
+
+static struct serdev_device_driver rave_sp_drv = {
+ .probe = rave_sp_probe,
+ .driver = {
+ .name = "rave-sp",
+ .of_match_table = rave_sp_dt_ids,
+ },
+};
+module_serdev_device_driver(rave_sp_drv);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrey Vostrikov <andrey.vostrikov@cogentembedded.com>");
+MODULE_AUTHOR("Nikita Yushchenko <nikita.yoush@cogentembedded.com>");
+MODULE_AUTHOR("Andrey Smirnov <andrew.smirnov@gmail.com>");
+MODULE_DESCRIPTION("RAVE SP core driver");
diff --git a/drivers/mfd/stm32-lptimer.c b/drivers/mfd/stm32-lptimer.c
index 075330a25f61..a00f99f36559 100644
--- a/drivers/mfd/stm32-lptimer.c
+++ b/drivers/mfd/stm32-lptimer.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* STM32 Low-Power Timer parent driver.
- *
* Copyright (C) STMicroelectronics 2017
- *
* Author: Fabrice Gasnier <fabrice.gasnier@st.com>
- *
* Inspired by Benjamin Gaignard's stm32-timers driver
- *
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/mfd/stm32-lptimer.h>
diff --git a/drivers/mfd/stm32-timers.c b/drivers/mfd/stm32-timers.c
index a6675a449409..1d347e5dfa79 100644
--- a/drivers/mfd/stm32-timers.c
+++ b/drivers/mfd/stm32-timers.c
@@ -1,9 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics 2016
- *
* Author: Benjamin Gaignard <benjamin.gaignard@st.com>
- *
- * License terms: GNU General Public License (GPL), version 2
*/
#include <linux/mfd/stm32-timers.h>
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index b93fe4c4957a..7eaa40bc703f 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -13,6 +13,7 @@
*/
#include <linux/err.h>
+#include <linux/hwspinlock.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/list.h>
@@ -87,6 +88,24 @@ static struct syscon *of_syscon_register(struct device_node *np)
if (ret)
reg_io_width = 4;
+ ret = of_hwspin_lock_get_id(np, 0);
+ if (ret > 0 || (IS_ENABLED(CONFIG_HWSPINLOCK) && ret == 0)) {
+ syscon_config.use_hwlock = true;
+ syscon_config.hwlock_id = ret;
+ syscon_config.hwlock_mode = HWLOCK_IRQSTATE;
+ } else if (ret < 0) {
+ switch (ret) {
+ case -ENOENT:
+ /* Ignore missing hwlock, it's optional. */
+ break;
+ default:
+ pr_err("Failed to retrieve valid hwlock: %d\n", ret);
+ /* fall-through */
+ case -EPROBE_DEFER:
+ goto err_regmap;
+ }
+ }
+
syscon_config.reg_stride = reg_io_width;
syscon_config.val_bits = reg_io_width * 8;
syscon_config.max_register = resource_size(&res) - reg_io_width;
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index 0f3fab47fe48..3cd958a31f36 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -124,7 +124,7 @@ static int ti_tscadc_probe(struct platform_device *pdev)
struct ti_tscadc_dev *tscadc;
struct resource *res;
struct clk *clk;
- struct device_node *node = pdev->dev.of_node;
+ struct device_node *node;
struct mfd_cell *cell;
struct property *prop;
const __be32 *cur;
diff --git a/drivers/mfd/tmio_core.c b/drivers/mfd/tmio_core.c
index 83af78c1b0eb..ebf54cc28f7a 100644
--- a/drivers/mfd/tmio_core.c
+++ b/drivers/mfd/tmio_core.c
@@ -9,6 +9,26 @@
#include <linux/export.h>
#include <linux/mfd/tmio.h>
+#define CNF_CMD 0x04
+#define CNF_CTL_BASE 0x10
+#define CNF_INT_PIN 0x3d
+#define CNF_STOP_CLK_CTL 0x40
+#define CNF_GCLK_CTL 0x41
+#define CNF_SD_CLK_MODE 0x42
+#define CNF_PIN_STATUS 0x44
+#define CNF_PWR_CTL_1 0x48
+#define CNF_PWR_CTL_2 0x49
+#define CNF_PWR_CTL_3 0x4a
+#define CNF_CARD_DETECT_MODE 0x4c
+#define CNF_SD_SLOT 0x50
+#define CNF_EXT_GCLK_CTL_1 0xf0
+#define CNF_EXT_GCLK_CTL_2 0xf1
+#define CNF_EXT_GCLK_CTL_3 0xf9
+#define CNF_SD_LED_EN_1 0xfa
+#define CNF_SD_LED_EN_2 0xfe
+
+#define SDCREN 0x2 /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/
+
int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base)
{
/* Enable the MMC/SD Control registers */
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index f1a5c2357b14..7c0fa24f9067 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -496,6 +496,10 @@ config PCI_ENDPOINT_TEST
Enable this configuration option to enable the host side test driver
for PCI Endpoint.
+config MISC_RTSX
+ tristate
+ default MISC_RTSX_PCI || MISC_RTSX_USB
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
@@ -508,4 +512,5 @@ source "drivers/misc/mic/Kconfig"
source "drivers/misc/genwqe/Kconfig"
source "drivers/misc/echo/Kconfig"
source "drivers/misc/cxl/Kconfig"
+source "drivers/misc/cardreader/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 5ca5f64df478..8d8cc096063b 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -55,6 +55,7 @@ obj-$(CONFIG_CXL_BASE) += cxl/
obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o
obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o
obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o
+obj-$(CONFIG_MISC_RTSX) += cardreader/
lkdtm-$(CONFIG_LKDTM) += lkdtm_core.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_bugs.o
diff --git a/drivers/misc/cardreader/Kconfig b/drivers/misc/cardreader/Kconfig
new file mode 100644
index 000000000000..69e815e32a8c
--- /dev/null
+++ b/drivers/misc/cardreader/Kconfig
@@ -0,0 +1,20 @@
+config MISC_RTSX_PCI
+ tristate "Realtek PCI-E card reader"
+ depends on PCI
+ select MFD_CORE
+ help
+ This supports for Realtek PCI-Express card reader including rts5209,
+ rts5227, rts522A, rts5229, rts5249, rts524A, rts525A, rtl8411, rts5260.
+ Realtek card readers support access to many types of memory cards,
+ such as Memory Stick, Memory Stick Pro, Secure Digital and
+ MultiMediaCard.
+
+config MISC_RTSX_USB
+ tristate "Realtek USB card reader"
+ depends on USB
+ select MFD_CORE
+ help
+ Select this option to get support for Realtek USB 2.0 card readers
+ including RTS5129, RTS5139, RTS5179 and RTS5170.
+ Realtek card reader supports access to many types of memory cards,
+ such as Memory Stick Pro, Secure Digital and MultiMediaCard.
diff --git a/drivers/misc/cardreader/Makefile b/drivers/misc/cardreader/Makefile
new file mode 100644
index 000000000000..9fabfcc6fa7a
--- /dev/null
+++ b/drivers/misc/cardreader/Makefile
@@ -0,0 +1,4 @@
+rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o
+
+obj-$(CONFIG_MISC_RTSX_PCI) += rtsx_pci.o
+obj-$(CONFIG_MISC_RTSX_USB) += rtsx_usb.o
diff --git a/drivers/mfd/rtl8411.c b/drivers/misc/cardreader/rtl8411.c
index b3ae6592014a..434fd070d3e3 100644
--- a/drivers/mfd/rtl8411.c
+++ b/drivers/misc/cardreader/rtl8411.c
@@ -23,7 +23,7 @@
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/delay.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include "rtsx_pcr.h"
diff --git a/drivers/mfd/rts5209.c b/drivers/misc/cardreader/rts5209.c
index b95beecf767f..ce68c48d8ec9 100644
--- a/drivers/mfd/rts5209.c
+++ b/drivers/misc/cardreader/rts5209.c
@@ -21,7 +21,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include "rtsx_pcr.h"
diff --git a/drivers/mfd/rts5227.c b/drivers/misc/cardreader/rts5227.c
index ff296a4bf3d2..024dcba8d6c8 100644
--- a/drivers/mfd/rts5227.c
+++ b/drivers/misc/cardreader/rts5227.c
@@ -22,7 +22,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include "rtsx_pcr.h"
diff --git a/drivers/mfd/rts5229.c b/drivers/misc/cardreader/rts5229.c
index 9ed9dc84eac8..9119261337cc 100644
--- a/drivers/mfd/rts5229.c
+++ b/drivers/misc/cardreader/rts5229.c
@@ -21,7 +21,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include "rtsx_pcr.h"
diff --git a/drivers/mfd/rts5249.c b/drivers/misc/cardreader/rts5249.c
index 7fcf37ba922c..dbe013abdb83 100644
--- a/drivers/mfd/rts5249.c
+++ b/drivers/misc/cardreader/rts5249.c
@@ -21,7 +21,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include "rtsx_pcr.h"
@@ -738,4 +738,3 @@ void rts525a_init_params(struct rtsx_pcr *pcr)
pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
pcr->ops = &rts525a_pcr_ops;
}
-
diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
new file mode 100644
index 000000000000..07cb93abf685
--- /dev/null
+++ b/drivers/misc/cardreader/rts5260.c
@@ -0,0 +1,748 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2016-2017 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Steven FENG <steven_feng@realsil.com.cn>
+ * Rui FENG <rui_feng@realsil.com.cn>
+ * Wei WANG <wei_wang@realsil.com.cn>
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/rtsx_pci.h>
+
+#include "rts5260.h"
+#include "rtsx_pcr.h"
+
+static u8 rts5260_get_ic_version(struct rtsx_pcr *pcr)
+{
+ u8 val;
+
+ rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val);
+ return val & IC_VERSION_MASK;
+}
+
+static void rts5260_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
+{
+ u8 driving_3v3[6][3] = {
+ {0x94, 0x94, 0x94},
+ {0x11, 0x11, 0x18},
+ {0x55, 0x55, 0x5C},
+ {0x94, 0x94, 0x94},
+ {0x94, 0x94, 0x94},
+ {0xFF, 0xFF, 0xFF},
+ };
+ u8 driving_1v8[6][3] = {
+ {0x9A, 0x89, 0x89},
+ {0xC4, 0xC4, 0xC4},
+ {0x3C, 0x3C, 0x3C},
+ {0x9B, 0x99, 0x99},
+ {0x9A, 0x89, 0x89},
+ {0xFE, 0xFE, 0xFE},
+ };
+ u8 (*driving)[3], drive_sel;
+
+ if (voltage == OUTPUT_3V3) {
+ driving = driving_3v3;
+ drive_sel = pcr->sd30_drive_sel_3v3;
+ } else {
+ driving = driving_1v8;
+ drive_sel = pcr->sd30_drive_sel_1v8;
+ }
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CLK_DRIVE_SEL,
+ 0xFF, driving[drive_sel][0]);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CMD_DRIVE_SEL,
+ 0xFF, driving[drive_sel][1]);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DAT_DRIVE_SEL,
+ 0xFF, driving[drive_sel][2]);
+}
+
+static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
+{
+ u32 reg;
+
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG1, &reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+
+ if (!rtsx_vendor_setting_valid(reg)) {
+ pcr_dbg(pcr, "skip fetch vendor setting\n");
+ return;
+ }
+
+ pcr->aspm_en = rtsx_reg_to_aspm(reg);
+ pcr->sd30_drive_sel_1v8 = rtsx_reg_to_sd30_drive_sel_1v8(reg);
+ pcr->card_drive_sel &= 0x3F;
+ pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
+
+ rtsx_pci_read_config_dword(pcr, PCR_SETTING_REG2, &reg);
+ pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
+ pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
+ if (rtsx_reg_check_reverse_socket(reg))
+ pcr->flags |= PCR_REVERSE_SOCKET;
+}
+
+static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state)
+{
+ /* Set relink_time to 0 */
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
+ rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
+ RELINK_TIME_MASK, 0);
+
+ if (pm_state == HOST_ENTER_S3)
+ rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
+ D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
+
+ rtsx_pci_write_register(pcr, FPDCTL, ALL_POWER_DOWN, ALL_POWER_DOWN);
+}
+
+static int rtsx_base_enable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+ LED_SHINE_MASK, LED_SHINE_EN);
+}
+
+static int rtsx_base_disable_auto_blink(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, OLT_LED_CTL,
+ LED_SHINE_MASK, LED_SHINE_DISABLE);
+}
+
+static int rts5260_turn_on_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, RTS5260_REG_GPIO_CTL0,
+ RTS5260_REG_GPIO_MASK, RTS5260_REG_GPIO_ON);
+}
+
+static int rts5260_turn_off_led(struct rtsx_pcr *pcr)
+{
+ return rtsx_pci_write_register(pcr, RTS5260_REG_GPIO_CTL0,
+ RTS5260_REG_GPIO_MASK, RTS5260_REG_GPIO_OFF);
+}
+
+/* SD Pull Control Enable:
+ * SD_DAT[3:0] ==> pull up
+ * SD_CD ==> pull up
+ * SD_WP ==> pull up
+ * SD_CMD ==> pull up
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5260_sd_pull_ctl_enable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL1, 0x66),
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
+ RTSX_REG_PAIR(CARD_PULL_CTL4, 0xAA),
+ 0,
+};
+
+/* SD Pull Control Disable:
+ * SD_DAT[3:0] ==> pull down
+ * SD_CD ==> pull up
+ * SD_WP ==> pull down
+ * SD_CMD ==> pull down
+ * SD_CLK ==> pull down
+ */
+static const u32 rts5260_sd_pull_ctl_disable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL1, 0x66),
+ RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
+ RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
+ 0,
+};
+
+/* MS Pull Control Enable:
+ * MS CD ==> pull up
+ * others ==> pull down
+ */
+static const u32 rts5260_ms_pull_ctl_enable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
+ 0,
+};
+
+/* MS Pull Control Disable:
+ * MS CD ==> pull up
+ * others ==> pull down
+ */
+static const u32 rts5260_ms_pull_ctl_disable_tbl[] = {
+ RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
+ RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
+ 0,
+};
+
+static int sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_write_register(pcr, SD_CFG1, SD_MODE_SELECT_MASK
+ | SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST);
+ rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ);
+ rtsx_pci_write_register(pcr, CARD_CLK_SOURCE, 0xFF,
+ CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
+ rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
+
+ return 0;
+}
+
+static int rts5260_card_power_on(struct rtsx_pcr *pcr, int card)
+{
+ int err = 0;
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en)
+ rtsx_pci_enable_ocp(pcr);
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CONFIG2,
+ DV331812_VDD1, DV331812_VDD1);
+ err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+ if (err < 0)
+ return err;
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_VCC_CFG0,
+ RTS5260_DVCC_TUNE_MASK, RTS5260_DVCC_33);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_VCC_CFG1,
+ LDO_POW_SDVDD1_MASK, LDO_POW_SDVDD1_ON);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CONFIG2,
+ DV331812_POWERON, DV331812_POWERON);
+ err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+
+ msleep(20);
+
+ if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50 ||
+ pcr->extra_caps & EXTRA_CAPS_SD_SDR104)
+ sd_set_sample_push_timing_sd30(pcr);
+
+ /* Initialize SD_CFG1 register */
+ rtsx_pci_write_register(pcr, SD_CFG1, 0xFF,
+ SD_CLK_DIVIDE_128 | SD_20_MODE);
+
+ rtsx_pci_write_register(pcr, SD_SAMPLE_POINT_CTL,
+ 0xFF, SD20_RX_POS_EDGE);
+ rtsx_pci_write_register(pcr, SD_PUSH_POINT_CTL, 0xFF, 0);
+ rtsx_pci_write_register(pcr, CARD_STOP, SD_STOP | SD_CLR_ERR,
+ SD_STOP | SD_CLR_ERR);
+
+ /* Reset SD_CFG3 register */
+ rtsx_pci_write_register(pcr, SD_CFG3, SD30_CLK_END_EN, 0);
+ rtsx_pci_write_register(pcr, REG_SD_STOP_SDCLK_CFG,
+ SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 |
+ SD30_CLK_STOP_CFG0, 0);
+
+ rtsx_pci_write_register(pcr, REG_PRE_RW_MODE, EN_INFINITE_MODE, 0);
+
+ return err;
+}
+
+static int rts5260_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
+{
+ switch (voltage) {
+ case OUTPUT_3V3:
+ rtsx_pci_write_register(pcr, LDO_CONFIG2,
+ DV331812_VDD1, DV331812_VDD1);
+ rtsx_pci_write_register(pcr, LDO_DV18_CFG,
+ DV331812_MASK, DV331812_33);
+ rtsx_pci_write_register(pcr, SD_PAD_CTL, SD_IO_USING_1V8, 0);
+ break;
+ case OUTPUT_1V8:
+ rtsx_pci_write_register(pcr, LDO_CONFIG2,
+ DV331812_VDD1, DV331812_VDD1);
+ rtsx_pci_write_register(pcr, LDO_DV18_CFG,
+ DV331812_MASK, DV331812_17);
+ rtsx_pci_write_register(pcr, SD_PAD_CTL, SD_IO_USING_1V8,
+ SD_IO_USING_1V8);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set pad drive */
+ rtsx_pci_init_cmd(pcr);
+ rts5260_fill_driving(pcr, voltage);
+ return rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+}
+
+static void rts5260_stop_cmd(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
+ rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
+ rtsx_pci_write_register(pcr, RTS5260_DMA_RST_CTL_0,
+ RTS5260_DMA_RST | RTS5260_ADMA3_RST,
+ RTS5260_DMA_RST | RTS5260_ADMA3_RST);
+ rtsx_pci_write_register(pcr, RBCTL, RB_FLUSH, RB_FLUSH);
+}
+
+static void rts5260_card_before_power_off(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ rts5260_stop_cmd(pcr);
+ rts5260_switch_output_voltage(pcr, OUTPUT_3V3);
+
+ if (option->ocp_en)
+ rtsx_pci_disable_ocp(pcr);
+}
+
+static int rts5260_card_power_off(struct rtsx_pcr *pcr, int card)
+{
+ int err = 0;
+
+ rts5260_card_before_power_off(pcr);
+
+ rtsx_pci_init_cmd(pcr);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_VCC_CFG1,
+ LDO_POW_SDVDD1_MASK, LDO_POW_SDVDD1_OFF);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CONFIG2,
+ DV331812_POWERON, DV331812_POWEROFF);
+ err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+
+ return err;
+}
+
+static void rts5260_init_ocp(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ if (option->ocp_en) {
+ u8 mask, val;
+
+ rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
+ RTS5260_DVCC_OCP_EN |
+ RTS5260_DVCC_OCP_CL_EN,
+ RTS5260_DVCC_OCP_EN |
+ RTS5260_DVCC_OCP_CL_EN);
+ rtsx_pci_write_register(pcr, RTS5260_DVIO_CTRL,
+ RTS5260_DVIO_OCP_EN |
+ RTS5260_DVIO_OCP_CL_EN,
+ RTS5260_DVIO_OCP_EN |
+ RTS5260_DVIO_OCP_CL_EN);
+
+ rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
+ RTS5260_DVCC_OCP_THD_MASK,
+ option->sd_400mA_ocp_thd);
+
+ rtsx_pci_write_register(pcr, RTS5260_DVIO_CTRL,
+ RTS5260_DVIO_OCP_THD_MASK,
+ RTS5260_DVIO_OCP_THD_350);
+
+ rtsx_pci_write_register(pcr, RTS5260_DV331812_CFG,
+ RTS5260_DV331812_OCP_THD_MASK,
+ RTS5260_DV331812_OCP_THD_210);
+
+ mask = SD_OCP_GLITCH_MASK | SDVIO_OCP_GLITCH_MASK;
+ val = pcr->hw_param.ocp_glitch;
+ rtsx_pci_write_register(pcr, REG_OCPGLITCH, mask, val);
+
+ rtsx_pci_enable_ocp(pcr);
+ } else {
+ rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
+ RTS5260_DVCC_OCP_EN |
+ RTS5260_DVCC_OCP_CL_EN, 0);
+ rtsx_pci_write_register(pcr, RTS5260_DVIO_CTRL,
+ RTS5260_DVIO_OCP_EN |
+ RTS5260_DVIO_OCP_CL_EN, 0);
+ }
+}
+
+static void rts5260_enable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 val = 0;
+
+ rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
+
+ val = SD_OCP_INT_EN | SD_DETECT_EN;
+ val |= SDVIO_OCP_INT_EN | SDVIO_DETECT_EN;
+ rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
+ rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
+ DV3318_DETECT_EN | DV3318_OCP_INT_EN,
+ DV3318_DETECT_EN | DV3318_OCP_INT_EN);
+}
+
+static void rts5260_disable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 mask = 0;
+
+ mask = SD_OCP_INT_EN | SD_DETECT_EN;
+ mask |= SDVIO_OCP_INT_EN | SDVIO_DETECT_EN;
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+ rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
+ DV3318_DETECT_EN | DV3318_OCP_INT_EN, 0);
+
+ rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
+ OC_POWER_DOWN);
+}
+
+int rts5260_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
+{
+ return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
+}
+
+int rts5260_get_ocpstat2(struct rtsx_pcr *pcr, u8 *val)
+{
+ return rtsx_pci_read_register(pcr, REG_DV3318_OCPSTAT, val);
+}
+
+void rts5260_clear_ocpstat(struct rtsx_pcr *pcr)
+{
+ u8 mask = 0;
+ u8 val = 0;
+
+ mask = SD_OCP_INT_CLR | SD_OC_CLR;
+ mask |= SDVIO_OCP_INT_CLR | SDVIO_OC_CLR;
+ val = SD_OCP_INT_CLR | SD_OC_CLR;
+ val |= SDVIO_OCP_INT_CLR | SDVIO_OC_CLR;
+
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
+ rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
+ DV3318_OCP_INT_CLR | DV3318_OCP_CLR,
+ DV3318_OCP_INT_CLR | DV3318_OCP_CLR);
+ udelay(10);
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+ rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
+ DV3318_OCP_INT_CLR | DV3318_OCP_CLR, 0);
+}
+
+void rts5260_process_ocp(struct rtsx_pcr *pcr)
+{
+ if (!pcr->option.ocp_en)
+ return;
+
+ rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
+ rts5260_get_ocpstat2(pcr, &pcr->ocp_stat2);
+ if (pcr->card_exist & SD_EXIST)
+ rtsx_sd_power_off_card3v3(pcr);
+ else if (pcr->card_exist & MS_EXIST)
+ rtsx_ms_power_off_card3v3(pcr);
+
+ if (!(pcr->card_exist & MS_EXIST) && !(pcr->card_exist & SD_EXIST)) {
+ if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER |
+ SDVIO_OC_NOW | SDVIO_OC_EVER)) ||
+ (pcr->ocp_stat2 & (DV3318_OCP_NOW | DV3318_OCP_EVER)))
+ rtsx_pci_clear_ocpstat(pcr);
+ pcr->ocp_stat = 0;
+ pcr->ocp_stat2 = 0;
+ }
+
+ if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER |
+ SDVIO_OC_NOW | SDVIO_OC_EVER)) ||
+ (pcr->ocp_stat2 & (DV3318_OCP_NOW | DV3318_OCP_EVER))) {
+ if (pcr->card_exist & SD_EXIST)
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
+ else if (pcr->card_exist & MS_EXIST)
+ rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
+ }
+}
+
+int rts5260_init_hw(struct rtsx_pcr *pcr)
+{
+ int err;
+
+ rtsx_pci_init_ocp(pcr);
+
+ rtsx_pci_init_cmd(pcr);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, L1SUB_CONFIG1,
+ AUX_CLK_ACTIVE_SEL_MASK, MAC_CKSW_DONE);
+ /* Rest L1SUB Config */
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, L1SUB_CONFIG3, 0xFF, 0x00);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PM_CLK_FORCE_CTL,
+ CLK_PM_EN, CLK_PM_EN);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWD_SUSPEND_EN, 0xFF, 0xFF);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
+ PWR_GATE_EN, PWR_GATE_EN);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, REG_VREF,
+ PWD_SUSPND_EN, PWD_SUSPND_EN);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RBCTL,
+ U_AUTO_DMA_EN_MASK, U_AUTO_DMA_DISABLE);
+
+ if (pcr->flags & PCR_REVERSE_SOCKET)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0xB0);
+ else
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0x80);
+
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OBFF_CFG,
+ OBFF_EN_MASK, OBFF_DISABLE);
+
+ err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static void rts5260_pwr_saving_setting(struct rtsx_pcr *pcr)
+{
+ int lss_l1_1, lss_l1_2;
+
+ lss_l1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN)
+ | rtsx_check_dev_flag(pcr, PM_L1_1_EN);
+ lss_l1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN)
+ | rtsx_check_dev_flag(pcr, PM_L1_2_EN);
+
+ if (lss_l1_2) {
+ pcr_dbg(pcr, "Set parameters for L1.2.");
+ rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL,
+ 0xFF, PCIE_L1_2_EN);
+ rtsx_pci_write_register(pcr, PWR_FE_CTL,
+ 0xFF, PCIE_L1_2_PD_FE_EN);
+ } else if (lss_l1_1) {
+ pcr_dbg(pcr, "Set parameters for L1.1.");
+ rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL,
+ 0xFF, PCIE_L1_1_EN);
+ rtsx_pci_write_register(pcr, PWR_FE_CTL,
+ 0xFF, PCIE_L1_1_PD_FE_EN);
+ } else {
+ pcr_dbg(pcr, "Set parameters for L1.");
+ rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL,
+ 0xFF, PCIE_L1_0_EN);
+ rtsx_pci_write_register(pcr, PWR_FE_CTL,
+ 0xFF, PCIE_L1_0_PD_FE_EN);
+ }
+
+ rtsx_pci_write_register(pcr, CFG_L1_0_PCIE_DPHY_RET_VALUE,
+ 0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_L1_0_PCIE_MAC_RET_VALUE,
+ 0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_L1_0_CRC_SD30_RET_VALUE,
+ 0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_L1_0_CRC_SD40_RET_VALUE,
+ 0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_L1_0_SYS_RET_VALUE,
+ 0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
+ /*Option cut APHY*/
+ rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_0,
+ 0xFF, CFG_PCIE_APHY_OFF_0_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_1,
+ 0xFF, CFG_PCIE_APHY_OFF_1_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_2,
+ 0xFF, CFG_PCIE_APHY_OFF_2_DEFAULT);
+ rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_3,
+ 0xFF, CFG_PCIE_APHY_OFF_3_DEFAULT);
+ /*CDR DEC*/
+ rtsx_pci_write_register(pcr, PWC_CDR, 0xFF, PWC_CDR_DEFAULT);
+ /*PWMPFM*/
+ rtsx_pci_write_register(pcr, CFG_LP_FPWM_VALUE,
+ 0xFF, CFG_LP_FPWM_VALUE_DEFAULT);
+ /*No Power Saving WA*/
+ rtsx_pci_write_register(pcr, CFG_L1_0_CRC_MISC_RET_VALUE,
+ 0xFF, CFG_L1_0_CRC_MISC_RET_VALUE_DEFAULT);
+}
+
+static void rts5260_init_from_cfg(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ u32 lval;
+
+ rtsx_pci_read_config_dword(pcr, PCR_ASPM_SETTING_5260, &lval);
+
+ if (lval & ASPM_L1_1_EN_MASK)
+ rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+
+ if (lval & ASPM_L1_2_EN_MASK)
+ rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (lval & PM_L1_1_EN_MASK)
+ rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+
+ if (lval & PM_L1_2_EN_MASK)
+ rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+
+ rts5260_pwr_saving_setting(pcr);
+
+ if (option->ltr_en) {
+ u16 val;
+
+ pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
+ if (val & PCI_EXP_DEVCTL2_LTR_EN) {
+ option->ltr_enabled = true;
+ option->ltr_active = true;
+ rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
+ } else {
+ option->ltr_enabled = false;
+ }
+ }
+
+ if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+ | PM_L1_1_EN | PM_L1_2_EN))
+ option->force_clkreq_0 = false;
+ else
+ option->force_clkreq_0 = true;
+}
+
+static int rts5260_extra_init_hw(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+
+ /* Set mcu_cnt to 7 to ensure data can be sampled properly */
+ rtsx_pci_write_register(pcr, 0xFC03, 0x7F, 0x07);
+ rtsx_pci_write_register(pcr, SSC_DIV_N_0, 0xFF, 0x5D);
+
+ rts5260_init_from_cfg(pcr);
+
+ /* force no MDIO*/
+ rtsx_pci_write_register(pcr, RTS5260_AUTOLOAD_CFG4,
+ 0xFF, RTS5260_MIMO_DISABLE);
+ /*Modify SDVCC Tune Default Parameters!*/
+ rtsx_pci_write_register(pcr, LDO_VCC_CFG0,
+ RTS5260_DVCC_TUNE_MASK, RTS5260_DVCC_33);
+
+ rtsx_pci_write_register(pcr, PCLK_CTL, PCLK_MODE_SEL, PCLK_MODE_SEL);
+
+ rts5260_init_hw(pcr);
+
+ /*
+ * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+ * to drive low, and we forcibly request clock.
+ */
+ if (option->force_clkreq_0)
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ else
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+
+ return 0;
+}
+
+void rts5260_set_aspm(struct rtsx_pcr *pcr, bool enable)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ u8 val = 0;
+
+ if (pcr->aspm_enabled == enable)
+ return;
+
+ if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
+ if (enable)
+ val = pcr->aspm_en;
+ rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
+ ASPM_MASK_NEG, val);
+ } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
+ u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0;
+
+ if (!enable)
+ val = FORCE_ASPM_CTL0;
+ rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
+ }
+
+ pcr->aspm_enabled = enable;
+}
+
+static void rts5260_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ u32 interrupt = rtsx_pci_readl(pcr, RTSX_BIPR);
+ int card_exist = (interrupt & SD_EXIST) | (interrupt & MS_EXIST);
+ int aspm_L1_1, aspm_L1_2;
+ u8 val = 0;
+
+ aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN);
+ aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (active) {
+ /* run, latency: 60us */
+ if (aspm_L1_1)
+ val = option->ltr_l1off_snooze_sspwrgate;
+ } else {
+ /* l1off, latency: 300us */
+ if (aspm_L1_2)
+ val = option->ltr_l1off_sspwrgate;
+ }
+
+ if (aspm_L1_1 || aspm_L1_2) {
+ if (rtsx_check_dev_flag(pcr,
+ LTR_L1SS_PWR_GATE_CHECK_CARD_EN)) {
+ if (card_exist)
+ val &= ~L1OFF_MBIAS2_EN_5250;
+ else
+ val |= L1OFF_MBIAS2_EN_5250;
+ }
+ }
+ rtsx_set_l1off_sub(pcr, val);
+}
+
+static const struct pcr_ops rts5260_pcr_ops = {
+ .fetch_vendor_settings = rtsx_base_fetch_vendor_settings,
+ .turn_on_led = rts5260_turn_on_led,
+ .turn_off_led = rts5260_turn_off_led,
+ .extra_init_hw = rts5260_extra_init_hw,
+ .enable_auto_blink = rtsx_base_enable_auto_blink,
+ .disable_auto_blink = rtsx_base_disable_auto_blink,
+ .card_power_on = rts5260_card_power_on,
+ .card_power_off = rts5260_card_power_off,
+ .switch_output_voltage = rts5260_switch_output_voltage,
+ .force_power_down = rtsx_base_force_power_down,
+ .stop_cmd = rts5260_stop_cmd,
+ .set_aspm = rts5260_set_aspm,
+ .set_l1off_cfg_sub_d0 = rts5260_set_l1off_cfg_sub_d0,
+ .enable_ocp = rts5260_enable_ocp,
+ .disable_ocp = rts5260_disable_ocp,
+ .init_ocp = rts5260_init_ocp,
+ .process_ocp = rts5260_process_ocp,
+ .get_ocpstat = rts5260_get_ocpstat,
+ .clear_ocpstat = rts5260_clear_ocpstat,
+};
+
+void rts5260_init_params(struct rtsx_pcr *pcr)
+{
+ struct rtsx_cr_option *option = &pcr->option;
+ struct rtsx_hw_param *hw_param = &pcr->hw_param;
+
+ pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
+ pcr->num_slots = 2;
+
+ pcr->flags = 0;
+ pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
+ pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
+ pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
+ pcr->aspm_en = ASPM_L1_EN;
+ pcr->tx_initial_phase = SET_CLOCK_PHASE(1, 29, 16);
+ pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
+
+ pcr->ic_version = rts5260_get_ic_version(pcr);
+ pcr->sd_pull_ctl_enable_tbl = rts5260_sd_pull_ctl_enable_tbl;
+ pcr->sd_pull_ctl_disable_tbl = rts5260_sd_pull_ctl_disable_tbl;
+ pcr->ms_pull_ctl_enable_tbl = rts5260_ms_pull_ctl_enable_tbl;
+ pcr->ms_pull_ctl_disable_tbl = rts5260_ms_pull_ctl_disable_tbl;
+
+ pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
+
+ pcr->ops = &rts5260_pcr_ops;
+
+ option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN
+ | LTR_L1SS_PWR_GATE_EN);
+ option->ltr_en = true;
+
+ /* init latency of active, idle, L1OFF to 60us, 300us, 3ms */
+ option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
+ option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
+ option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
+ option->dev_aspm_mode = DEV_ASPM_DYNAMIC;
+ option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
+ option->ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
+ option->ltr_l1off_snooze_sspwrgate =
+ LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;
+
+ option->ocp_en = 1;
+ if (option->ocp_en)
+ hw_param->interrupt_en |= SD_OC_INT_EN;
+ hw_param->ocp_glitch = SD_OCP_GLITCH_10M | SDVIO_OCP_GLITCH_800U;
+ option->sd_400mA_ocp_thd = RTS5260_DVCC_OCP_THD_550;
+ option->sd_800mA_ocp_thd = RTS5260_DVCC_OCP_THD_970;
+}
diff --git a/drivers/misc/cardreader/rts5260.h b/drivers/misc/cardreader/rts5260.h
new file mode 100644
index 000000000000..53a1411c8868
--- /dev/null
+++ b/drivers/misc/cardreader/rts5260.h
@@ -0,0 +1,45 @@
+#ifndef __RTS5260_H__
+#define __RTS5260_H__
+
+#define RTS5260_DVCC_CTRL 0xFF73
+#define RTS5260_DVCC_OCP_EN (0x01 << 7)
+#define RTS5260_DVCC_OCP_THD_MASK (0x07 << 4)
+#define RTS5260_DVCC_POWERON (0x01 << 3)
+#define RTS5260_DVCC_OCP_CL_EN (0x01 << 2)
+
+#define RTS5260_DVIO_CTRL 0xFF75
+#define RTS5260_DVIO_OCP_EN (0x01 << 7)
+#define RTS5260_DVIO_OCP_THD_MASK (0x07 << 4)
+#define RTS5260_DVIO_POWERON (0x01 << 3)
+#define RTS5260_DVIO_OCP_CL_EN (0x01 << 2)
+
+#define RTS5260_DV331812_CFG 0xFF71
+#define RTS5260_DV331812_OCP_EN (0x01 << 7)
+#define RTS5260_DV331812_OCP_THD_MASK (0x07 << 4)
+#define RTS5260_DV331812_POWERON (0x01 << 3)
+#define RTS5260_DV331812_SEL (0x01 << 2)
+#define RTS5260_DV331812_VDD1 (0x01 << 2)
+#define RTS5260_DV331812_VDD2 (0x00 << 2)
+
+#define RTS5260_DV331812_OCP_THD_120 (0x00 << 4)
+#define RTS5260_DV331812_OCP_THD_140 (0x01 << 4)
+#define RTS5260_DV331812_OCP_THD_160 (0x02 << 4)
+#define RTS5260_DV331812_OCP_THD_180 (0x03 << 4)
+#define RTS5260_DV331812_OCP_THD_210 (0x04 << 4)
+#define RTS5260_DV331812_OCP_THD_240 (0x05 << 4)
+#define RTS5260_DV331812_OCP_THD_270 (0x06 << 4)
+#define RTS5260_DV331812_OCP_THD_300 (0x07 << 4)
+
+#define RTS5260_DVIO_OCP_THD_250 (0x00 << 4)
+#define RTS5260_DVIO_OCP_THD_300 (0x01 << 4)
+#define RTS5260_DVIO_OCP_THD_350 (0x02 << 4)
+#define RTS5260_DVIO_OCP_THD_400 (0x03 << 4)
+#define RTS5260_DVIO_OCP_THD_450 (0x04 << 4)
+#define RTS5260_DVIO_OCP_THD_500 (0x05 << 4)
+#define RTS5260_DVIO_OCP_THD_550 (0x06 << 4)
+#define RTS5260_DVIO_OCP_THD_600 (0x07 << 4)
+
+#define RTS5260_DVCC_OCP_THD_550 (0x00 << 4)
+#define RTS5260_DVCC_OCP_THD_970 (0x05 << 4)
+
+#endif
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index c3ed885c155c..fd09b0960097 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -29,7 +29,7 @@
#include <linux/idr.h>
#include <linux/platform_device.h>
#include <linux/mfd/core.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include <linux/mmc/card.h>
#include <asm/unaligned.h>
@@ -62,6 +62,7 @@ static const struct pci_device_id rtsx_pci_ids[] = {
{ PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
+ { PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ 0, }
};
@@ -334,6 +335,9 @@ EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
{
+ if (pcr->ops->stop_cmd)
+ return pcr->ops->stop_cmd(pcr);
+
rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
@@ -826,7 +830,7 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
return err;
/* Wait SSC clock stable */
- udelay(10);
+ udelay(SSC_CLOCK_STABLE_WAIT);
err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
if (err < 0)
return err;
@@ -963,6 +967,20 @@ static void rtsx_pci_card_detect(struct work_struct *work)
pcr->slots[RTSX_MS_CARD].p_dev);
}
+void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
+{
+ if (pcr->ops->process_ocp)
+ pcr->ops->process_ocp(pcr);
+}
+
+int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
+{
+ if (pcr->option.ocp_en)
+ rtsx_pci_process_ocp(pcr);
+
+ return 0;
+}
+
static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
{
struct rtsx_pcr *pcr = dev_id;
@@ -987,6 +1005,9 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
int_reg &= (pcr->bier | 0x7FFFFF);
+ if (int_reg & SD_OC_INT)
+ rtsx_pci_process_ocp_interrupt(pcr);
+
if (int_reg & SD_INT) {
if (int_reg & SD_EXIST) {
pcr->card_inserted |= SD_EXIST;
@@ -1119,6 +1140,102 @@ static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
}
#endif
+void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 val = SD_OCP_INT_EN | SD_DETECT_EN;
+
+ if (pcr->ops->enable_ocp)
+ pcr->ops->enable_ocp(pcr);
+ else
+ rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
+
+}
+
+void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr)
+{
+ u8 mask = SD_OCP_INT_EN | SD_DETECT_EN;
+
+ if (pcr->ops->disable_ocp)
+ pcr->ops->disable_ocp(pcr);
+ else
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+}
+
+void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
+{
+ if (pcr->ops->init_ocp) {
+ pcr->ops->init_ocp(pcr);
+ } else {
+ struct rtsx_cr_option *option = &(pcr->option);
+
+ if (option->ocp_en) {
+ u8 val = option->sd_400mA_ocp_thd;
+
+ rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
+ rtsx_pci_write_register(pcr, REG_OCPPARA1,
+ SD_OCP_TIME_MASK, SD_OCP_TIME_800);
+ rtsx_pci_write_register(pcr, REG_OCPPARA2,
+ SD_OCP_THD_MASK, val);
+ rtsx_pci_write_register(pcr, REG_OCPGLITCH,
+ SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch);
+ rtsx_pci_enable_ocp(pcr);
+ } else {
+ /* OC power down */
+ rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
+ OC_POWER_DOWN);
+ }
+ }
+}
+
+int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
+{
+ if (pcr->ops->get_ocpstat)
+ return pcr->ops->get_ocpstat(pcr, val);
+ else
+ return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
+}
+
+void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
+{
+ if (pcr->ops->clear_ocpstat) {
+ pcr->ops->clear_ocpstat(pcr);
+ } else {
+ u8 mask = SD_OCP_INT_CLR | SD_OC_CLR;
+ u8 val = SD_OCP_INT_CLR | SD_OC_CLR;
+
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
+ rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
+ }
+}
+
+int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
+ MS_CLK_EN | SD40_CLK_EN, 0);
+ rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
+
+ rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
+
+ msleep(50);
+
+ rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD);
+
+ return 0;
+}
+
+int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr)
+{
+ rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
+ MS_CLK_EN | SD40_CLK_EN, 0);
+
+ rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD);
+
+ rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
+ rtsx_pci_card_power_off(pcr, RTSX_MS_CARD);
+
+ return 0;
+}
+
static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
{
int err;
@@ -1189,6 +1306,7 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
case PID_5250:
case PID_524A:
case PID_525A:
+ case PID_5260:
rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
break;
default:
@@ -1265,6 +1383,9 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
case 0x5286:
rtl8402_init_params(pcr);
break;
+ case 0x5260:
+ rts5260_init_params(pcr);
+ break;
}
pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
diff --git a/drivers/mfd/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h
index ec784e04fe20..6ea1655db0bb 100644
--- a/drivers/mfd/rtsx_pcr.h
+++ b/drivers/misc/cardreader/rtsx_pcr.h
@@ -22,7 +22,7 @@
#ifndef __RTSX_PCR_H
#define __RTSX_PCR_H
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#define MIN_DIV_N_PCR 80
#define MAX_DIV_N_PCR 208
@@ -44,6 +44,8 @@
#define ASPM_MASK_NEG 0xFC
#define MASK_8_BIT_DEF 0xFF
+#define SSC_CLOCK_STABLE_WAIT 130
+
int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val);
int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val);
@@ -57,6 +59,7 @@ void rts5249_init_params(struct rtsx_pcr *pcr);
void rts524a_init_params(struct rtsx_pcr *pcr);
void rts525a_init_params(struct rtsx_pcr *pcr);
void rtl8411b_init_params(struct rtsx_pcr *pcr);
+void rts5260_init_params(struct rtsx_pcr *pcr);
static inline u8 map_sd_drive(int idx)
{
@@ -99,5 +102,12 @@ do { \
int rtsx_gops_pm_reset(struct rtsx_pcr *pcr);
int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency);
int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val);
+void rtsx_pci_init_ocp(struct rtsx_pcr *pcr);
+void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr);
+void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr);
+int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val);
+void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr);
+int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr);
+int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr);
#endif
diff --git a/drivers/mfd/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c
index 59d61b04c197..b97903ff1a72 100644
--- a/drivers/mfd/rtsx_usb.c
+++ b/drivers/misc/cardreader/rtsx_usb.c
@@ -23,7 +23,7 @@
#include <linux/usb.h>
#include <linux/platform_device.h>
#include <linux/mfd/core.h>
-#include <linux/mfd/rtsx_usb.h>
+#include <linux/rtsx_usb.h>
static int polling_pipe = 1;
module_param(polling_pipe, int, S_IRUGO | S_IWUSR);
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 7c11bad5cded..753b1a698fc4 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -427,7 +427,7 @@ int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm)
return afu_mmap(file, vm);
}
EXPORT_SYMBOL_GPL(cxl_fd_mmap);
-unsigned int cxl_fd_poll(struct file *file, struct poll_table_struct *poll)
+__poll_t cxl_fd_poll(struct file *file, struct poll_table_struct *poll)
{
return afu_poll(file, poll);
}
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index e46a4062904a..a798c2ccd67d 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -1081,7 +1081,7 @@ int afu_open(struct inode *inode, struct file *file);
int afu_release(struct inode *inode, struct file *file);
long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
int afu_mmap(struct file *file, struct vm_area_struct *vm);
-unsigned int afu_poll(struct file *file, struct poll_table_struct *poll);
+__poll_t afu_poll(struct file *file, struct poll_table_struct *poll);
ssize_t afu_read(struct file *file, char __user *buf, size_t count, loff_t *off);
extern const struct file_operations afu_fops;
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 76c0b0ca9388..90341ccda9bd 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -354,10 +354,10 @@ static inline bool ctx_event_pending(struct cxl_context *ctx)
return false;
}
-unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
+__poll_t afu_poll(struct file *file, struct poll_table_struct *poll)
{
struct cxl_context *ctx = file->private_data;
- int mask = 0;
+ __poll_t mask = 0;
unsigned long flags;
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
index 512a4897dbf6..7fd0bdc1436a 100644
--- a/drivers/misc/cxl/vphb.c
+++ b/drivers/misc/cxl/vphb.c
@@ -54,7 +54,7 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
return false;
}
- set_dma_ops(&dev->dev, &dma_direct_ops);
+ set_dma_ops(&dev->dev, &dma_nommu_ops);
set_dma_offset(&dev->dev, PAGE_OFFSET);
return _cxl_pci_associate_default_context(dev, afu);
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index 097e3092c158..95ce3e891b1b 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -514,7 +514,7 @@ static ssize_t ilo_write(struct file *fp, const char __user *buf,
return err ? -EFAULT : len;
}
-static unsigned int ilo_poll(struct file *fp, poll_table *wait)
+static __poll_t ilo_poll(struct file *fp, poll_table *wait)
{
struct ccb_data *data = fp->private_data;
struct ccb *driver_ccb = &data->driver_ccb;
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index 8d53609861d8..e49888eab87d 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -651,7 +651,7 @@ out:
return retval;
}
-static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
+static __poll_t lis3lv02d_misc_poll(struct file *file, poll_table *wait)
{
struct lis3lv02d *lis3 = container_of(file->private_data,
struct lis3lv02d, miscdev);
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index e825f013e54e..505b710291e6 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -542,12 +542,12 @@ static long mei_compat_ioctl(struct file *file,
*
* Return: poll mask
*/
-static unsigned int mei_poll(struct file *file, poll_table *wait)
+static __poll_t mei_poll(struct file *file, poll_table *wait)
{
- unsigned long req_events = poll_requested_events(wait);
+ __poll_t req_events = poll_requested_events(wait);
struct mei_cl *cl = file->private_data;
struct mei_device *dev;
- unsigned int mask = 0;
+ __poll_t mask = 0;
bool notify_en;
if (WARN_ON(!cl || !cl->dev))
diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c
index ddc9e4b08b5c..8a3e48ec37dd 100644
--- a/drivers/misc/mic/scif/scif_api.c
+++ b/drivers/misc/mic/scif/scif_api.c
@@ -1311,10 +1311,10 @@ static inline void _scif_poll_wait(struct file *f, wait_queue_head_t *wq,
spin_lock(&ep->lock);
}
-unsigned int
+__poll_t
__scif_pollfd(struct file *f, poll_table *wait, struct scif_endpt *ep)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
dev_dbg(scif_info.mdev.this_device,
"SCIFAPI pollfd: ep %p %s\n", ep, scif_ep_states[ep->state]);
@@ -1389,7 +1389,8 @@ scif_poll(struct scif_pollepd *ufds, unsigned int nfds, long timeout_msecs)
{
struct poll_wqueues table;
poll_table *pt;
- int i, mask, count = 0, timed_out = timeout_msecs == 0;
+ int i, count = 0, timed_out = timeout_msecs == 0;
+ __poll_t mask;
u64 timeout = timeout_msecs < 0 ? MAX_SCHEDULE_TIMEOUT
: msecs_to_jiffies(timeout_msecs);
diff --git a/drivers/misc/mic/scif/scif_epd.h b/drivers/misc/mic/scif/scif_epd.h
index 1771d7a9b8d0..f39b663da287 100644
--- a/drivers/misc/mic/scif/scif_epd.h
+++ b/drivers/misc/mic/scif/scif_epd.h
@@ -203,7 +203,7 @@ void scif_clientrcvd(struct scif_dev *scifdev, struct scifmsg *msg);
int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block);
int __scif_flush(scif_epd_t epd);
int scif_mmap(struct vm_area_struct *vma, scif_epd_t epd);
-unsigned int __scif_pollfd(struct file *f, poll_table *wait,
+__poll_t __scif_pollfd(struct file *f, poll_table *wait,
struct scif_endpt *ep);
int __scif_pin_pages(void *addr, size_t len, int *out_prot,
int map_flags, scif_pinned_pages_t *pages);
diff --git a/drivers/misc/mic/scif/scif_fd.c b/drivers/misc/mic/scif/scif_fd.c
index f7e826142a72..5c2a57ae4f85 100644
--- a/drivers/misc/mic/scif/scif_fd.c
+++ b/drivers/misc/mic/scif/scif_fd.c
@@ -41,7 +41,7 @@ static int scif_fdmmap(struct file *f, struct vm_area_struct *vma)
return scif_mmap(vma, priv);
}
-static unsigned int scif_fdpoll(struct file *f, poll_table *wait)
+static __poll_t scif_fdpoll(struct file *f, poll_table *wait)
{
struct scif_endpt *priv = f->private_data;
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
index fed992e2c258..4120ed8f0cae 100644
--- a/drivers/misc/mic/vop/vop_vringh.c
+++ b/drivers/misc/mic/vop/vop_vringh.c
@@ -1023,10 +1023,10 @@ __unlock_ret:
* in the card->host (TX) path, when userspace is unblocked by poll it
* must drain all available descriptors or it can stall.
*/
-static unsigned int vop_poll(struct file *f, poll_table *wait)
+static __poll_t vop_poll(struct file *f, poll_table *wait)
{
struct vop_vdev *vdev = f->private_data;
- int mask = 0;
+ __poll_t mask = 0;
mutex_lock(&vdev->vdev_mutex);
if (vop_vdev_inited(vdev)) {
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
index 30754927fd80..8fa68cf308e0 100644
--- a/drivers/misc/phantom.c
+++ b/drivers/misc/phantom.c
@@ -256,10 +256,10 @@ static int phantom_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int phantom_poll(struct file *file, poll_table *wait)
+static __poll_t phantom_poll(struct file *file, poll_table *wait)
{
struct phantom_device *dev = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
pr_debug("phantom_poll: %d\n", atomic_read(&dev->counter));
poll_wait(file, &dev->wait, wait);
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index 8a16a26e9658..6640e7651533 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -166,11 +166,11 @@ static int vmci_host_close(struct inode *inode, struct file *filp)
* This is used to wake up the VMX when a VMCI call arrives, or
* to wake up select() or poll() at the next clock tick.
*/
-static unsigned int vmci_host_poll(struct file *filp, poll_table *wait)
+static __poll_t vmci_host_poll(struct file *filp, poll_table *wait)
{
struct vmci_host_dev *vmci_host_dev = filp->private_data;
struct vmci_ctx *context = vmci_host_dev->context;
- unsigned int mask = 0;
+ __poll_t mask = 0;
if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
/* Check for VMCI calls to this VM context. */
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index ccfa98af1dd3..20135a5de748 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -63,7 +63,13 @@ MODULE_ALIAS("mmc:block");
#endif
#define MODULE_PARAM_PREFIX "mmcblk."
-#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
+/*
+ * Set a 10 second timeout for polling write request busy state. Note, mmc core
+ * is setting a 3 second timeout for SD cards, and SDHCI has long had a 10
+ * second software timer to timeout the whole request, so 10 seconds should be
+ * ample.
+ */
+#define MMC_BLK_TIMEOUT_MS (10 * 1000)
#define MMC_SANITIZE_REQ_TIMEOUT 240000
#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
@@ -112,6 +118,7 @@ struct mmc_blk_data {
#define MMC_BLK_WRITE BIT(1)
#define MMC_BLK_DISCARD BIT(2)
#define MMC_BLK_SECDISCARD BIT(3)
+#define MMC_BLK_CQE_RECOVERY BIT(4)
/*
* Only set in main mmc_blk_data associated
@@ -189,7 +196,7 @@ static void mmc_blk_put(struct mmc_blk_data *md)
md->usage--;
if (md->usage == 0) {
int devidx = mmc_get_devidx(md->disk);
- blk_cleanup_queue(md->queue.queue);
+ blk_put_queue(md->queue.queue);
ida_simple_remove(&mmc_blk_ida, devidx);
put_disk(md->disk);
kfree(md);
@@ -921,14 +928,54 @@ static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks)
return 0;
}
+static unsigned int mmc_blk_clock_khz(struct mmc_host *host)
+{
+ if (host->actual_clock)
+ return host->actual_clock / 1000;
+
+ /* Clock may be subject to a divisor, fudge it by a factor of 2. */
+ if (host->ios.clock)
+ return host->ios.clock / 2000;
+
+ /* How can there be no clock */
+ WARN_ON_ONCE(1);
+ return 100; /* 100 kHz is minimum possible value */
+}
+
+static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host,
+ struct mmc_data *data)
+{
+ unsigned int ms = DIV_ROUND_UP(data->timeout_ns, 1000000);
+ unsigned int khz;
+
+ if (data->timeout_clks) {
+ khz = mmc_blk_clock_khz(host);
+ ms += DIV_ROUND_UP(data->timeout_clks, khz);
+ }
+
+ return ms;
+}
+
+static inline bool mmc_blk_in_tran_state(u32 status)
+{
+ /*
+ * Some cards mishandle the status bits, so make sure to check both the
+ * busy indication and the card state.
+ */
+ return status & R1_READY_FOR_DATA &&
+ (R1_CURRENT_STATE(status) == R1_STATE_TRAN);
+}
+
static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
- bool hw_busy_detect, struct request *req, bool *gen_err)
+ struct request *req, u32 *resp_errs)
{
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
int err = 0;
u32 status;
do {
+ bool done = time_after(jiffies, timeout);
+
err = __mmc_send_status(card, &status, 5);
if (err) {
pr_err("%s: error %d requesting status\n",
@@ -936,25 +983,18 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
return err;
}
- if (status & R1_ERROR) {
- pr_err("%s: %s: error sending status cmd, status %#x\n",
- req->rq_disk->disk_name, __func__, status);
- *gen_err = true;
- }
-
- /* We may rely on the host hw to handle busy detection.*/
- if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) &&
- hw_busy_detect)
- break;
+ /* Accumulate any response error bits seen */
+ if (resp_errs)
+ *resp_errs |= status;
/*
* Timeout if the device never becomes ready for data and never
* leaves the program state.
*/
- if (time_after(jiffies, timeout)) {
- pr_err("%s: Card stuck in programming state! %s %s\n",
+ if (done) {
+ pr_err("%s: Card stuck in wrong state! %s %s status: %#x\n",
mmc_hostname(card->host),
- req->rq_disk->disk_name, __func__);
+ req->rq_disk->disk_name, __func__, status);
return -ETIMEDOUT;
}
@@ -963,229 +1003,11 @@ static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
* so make sure to check both the busy
* indication and the card state.
*/
- } while (!(status & R1_READY_FOR_DATA) ||
- (R1_CURRENT_STATE(status) == R1_STATE_PRG));
+ } while (!mmc_blk_in_tran_state(status));
return err;
}
-static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
- struct request *req, bool *gen_err, u32 *stop_status)
-{
- struct mmc_host *host = card->host;
- struct mmc_command cmd = {};
- int err;
- bool use_r1b_resp = rq_data_dir(req) == WRITE;
-
- /*
- * Normally we use R1B responses for WRITE, but in cases where the host
- * has specified a max_busy_timeout we need to validate it. A failure
- * means we need to prevent the host from doing hw busy detection, which
- * is done by converting to a R1 response instead.
- */
- if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
- use_r1b_resp = false;
-
- cmd.opcode = MMC_STOP_TRANSMISSION;
- if (use_r1b_resp) {
- cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
- cmd.busy_timeout = timeout_ms;
- } else {
- cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
- }
-
- err = mmc_wait_for_cmd(host, &cmd, 5);
- if (err)
- return err;
-
- *stop_status = cmd.resp[0];
-
- /* No need to check card status in case of READ. */
- if (rq_data_dir(req) == READ)
- return 0;
-
- if (!mmc_host_is_spi(host) &&
- (*stop_status & R1_ERROR)) {
- pr_err("%s: %s: general error sending stop command, resp %#x\n",
- req->rq_disk->disk_name, __func__, *stop_status);
- *gen_err = true;
- }
-
- return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
-}
-
-#define ERR_NOMEDIUM 3
-#define ERR_RETRY 2
-#define ERR_ABORT 1
-#define ERR_CONTINUE 0
-
-static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
- bool status_valid, u32 status)
-{
- switch (error) {
- case -EILSEQ:
- /* response crc error, retry the r/w cmd */
- pr_err("%s: %s sending %s command, card status %#x\n",
- req->rq_disk->disk_name, "response CRC error",
- name, status);
- return ERR_RETRY;
-
- case -ETIMEDOUT:
- pr_err("%s: %s sending %s command, card status %#x\n",
- req->rq_disk->disk_name, "timed out", name, status);
-
- /* If the status cmd initially failed, retry the r/w cmd */
- if (!status_valid) {
- pr_err("%s: status not valid, retrying timeout\n",
- req->rq_disk->disk_name);
- return ERR_RETRY;
- }
-
- /*
- * If it was a r/w cmd crc error, or illegal command
- * (eg, issued in wrong state) then retry - we should
- * have corrected the state problem above.
- */
- if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
- pr_err("%s: command error, retrying timeout\n",
- req->rq_disk->disk_name);
- return ERR_RETRY;
- }
-
- /* Otherwise abort the command */
- return ERR_ABORT;
-
- default:
- /* We don't understand the error code the driver gave us */
- pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
- req->rq_disk->disk_name, error, status);
- return ERR_ABORT;
- }
-}
-
-/*
- * Initial r/w and stop cmd error recovery.
- * We don't know whether the card received the r/w cmd or not, so try to
- * restore things back to a sane state. Essentially, we do this as follows:
- * - Obtain card status. If the first attempt to obtain card status fails,
- * the status word will reflect the failed status cmd, not the failed
- * r/w cmd. If we fail to obtain card status, it suggests we can no
- * longer communicate with the card.
- * - Check the card state. If the card received the cmd but there was a
- * transient problem with the response, it might still be in a data transfer
- * mode. Try to send it a stop command. If this fails, we can't recover.
- * - If the r/w cmd failed due to a response CRC error, it was probably
- * transient, so retry the cmd.
- * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
- * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
- * illegal cmd, retry.
- * Otherwise we don't understand what happened, so abort.
- */
-static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
- struct mmc_blk_request *brq, bool *ecc_err, bool *gen_err)
-{
- bool prev_cmd_status_valid = true;
- u32 status, stop_status = 0;
- int err, retry;
-
- if (mmc_card_removed(card))
- return ERR_NOMEDIUM;
-
- /*
- * Try to get card status which indicates both the card state
- * and why there was no response. If the first attempt fails,
- * we can't be sure the returned status is for the r/w command.
- */
- for (retry = 2; retry >= 0; retry--) {
- err = __mmc_send_status(card, &status, 0);
- if (!err)
- break;
-
- /* Re-tune if needed */
- mmc_retune_recheck(card->host);
-
- prev_cmd_status_valid = false;
- pr_err("%s: error %d sending status command, %sing\n",
- req->rq_disk->disk_name, err, retry ? "retry" : "abort");
- }
-
- /* We couldn't get a response from the card. Give up. */
- if (err) {
- /* Check if the card is removed */
- if (mmc_detect_card_removed(card->host))
- return ERR_NOMEDIUM;
- return ERR_ABORT;
- }
-
- /* Flag ECC errors */
- if ((status & R1_CARD_ECC_FAILED) ||
- (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
- (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
- *ecc_err = true;
-
- /* Flag General errors */
- if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
- if ((status & R1_ERROR) ||
- (brq->stop.resp[0] & R1_ERROR)) {
- pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
- req->rq_disk->disk_name, __func__,
- brq->stop.resp[0], status);
- *gen_err = true;
- }
-
- /*
- * Check the current card state. If it is in some data transfer
- * mode, tell it to stop (and hopefully transition back to TRAN.)
- */
- if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
- R1_CURRENT_STATE(status) == R1_STATE_RCV) {
- err = send_stop(card,
- DIV_ROUND_UP(brq->data.timeout_ns, 1000000),
- req, gen_err, &stop_status);
- if (err) {
- pr_err("%s: error %d sending stop command\n",
- req->rq_disk->disk_name, err);
- /*
- * If the stop cmd also timed out, the card is probably
- * not present, so abort. Other errors are bad news too.
- */
- return ERR_ABORT;
- }
-
- if (stop_status & R1_CARD_ECC_FAILED)
- *ecc_err = true;
- }
-
- /* Check for set block count errors */
- if (brq->sbc.error)
- return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
- prev_cmd_status_valid, status);
-
- /* Check for r/w command errors */
- if (brq->cmd.error)
- return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
- prev_cmd_status_valid, status);
-
- /* Data errors */
- if (!brq->stop.error)
- return ERR_CONTINUE;
-
- /* Now for stop errors. These aren't fatal to the transfer. */
- pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
- req->rq_disk->disk_name, brq->stop.error,
- brq->cmd.resp[0], status);
-
- /*
- * Subsitute in our own stop status as this will give the error
- * state which happened during the execution of the r/w command.
- */
- if (stop_status) {
- brq->stop.resp[0] = stop_status;
- brq->stop.error = 0;
- }
- return ERR_CONTINUE;
-}
-
static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
int type)
{
@@ -1281,7 +1103,7 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
break;
}
mq_rq->drv_op_result = ret;
- blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
+ blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
}
static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
@@ -1324,7 +1146,7 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
else
mmc_blk_reset_success(md, type);
fail:
- blk_end_request(req, status, blk_rq_bytes(req));
+ blk_mq_end_request(req, status);
}
static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
@@ -1394,7 +1216,7 @@ out_retry:
if (!err)
mmc_blk_reset_success(md, type);
out:
- blk_end_request(req, status, blk_rq_bytes(req));
+ blk_mq_end_request(req, status);
}
static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
@@ -1404,7 +1226,7 @@ static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
int ret = 0;
ret = mmc_flush_cache(card);
- blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
+ blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
}
/*
@@ -1430,15 +1252,18 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
}
}
-#define CMD_ERRORS \
- (R1_OUT_OF_RANGE | /* Command argument out of range */ \
- R1_ADDRESS_ERROR | /* Misaligned address */ \
+#define CMD_ERRORS_EXCL_OOR \
+ (R1_ADDRESS_ERROR | /* Misaligned address */ \
R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
R1_WP_VIOLATION | /* Tried to write to protected block */ \
R1_CARD_ECC_FAILED | /* Card ECC failed */ \
R1_CC_ERROR | /* Card controller error */ \
R1_ERROR) /* General/unknown error */
+#define CMD_ERRORS \
+ (CMD_ERRORS_EXCL_OOR | \
+ R1_OUT_OF_RANGE) /* Command argument out of range */ \
+
static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
{
u32 val;
@@ -1481,116 +1306,6 @@ static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
}
}
-static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
- struct mmc_async_req *areq)
-{
- struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
- areq);
- struct mmc_blk_request *brq = &mq_mrq->brq;
- struct request *req = mmc_queue_req_to_req(mq_mrq);
- int need_retune = card->host->need_retune;
- bool ecc_err = false;
- bool gen_err = false;
-
- /*
- * sbc.error indicates a problem with the set block count
- * command. No data will have been transferred.
- *
- * cmd.error indicates a problem with the r/w command. No
- * data will have been transferred.
- *
- * stop.error indicates a problem with the stop command. Data
- * may have been transferred, or may still be transferring.
- */
-
- mmc_blk_eval_resp_error(brq);
-
- if (brq->sbc.error || brq->cmd.error ||
- brq->stop.error || brq->data.error) {
- switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
- case ERR_RETRY:
- return MMC_BLK_RETRY;
- case ERR_ABORT:
- return MMC_BLK_ABORT;
- case ERR_NOMEDIUM:
- return MMC_BLK_NOMEDIUM;
- case ERR_CONTINUE:
- break;
- }
- }
-
- /*
- * Check for errors relating to the execution of the
- * initial command - such as address errors. No data
- * has been transferred.
- */
- if (brq->cmd.resp[0] & CMD_ERRORS) {
- pr_err("%s: r/w command failed, status = %#x\n",
- req->rq_disk->disk_name, brq->cmd.resp[0]);
- return MMC_BLK_ABORT;
- }
-
- /*
- * Everything else is either success, or a data error of some
- * kind. If it was a write, we may have transitioned to
- * program mode, which we have to wait for it to complete.
- */
- if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
- int err;
-
- /* Check stop command response */
- if (brq->stop.resp[0] & R1_ERROR) {
- pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
- req->rq_disk->disk_name, __func__,
- brq->stop.resp[0]);
- gen_err = true;
- }
-
- err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
- &gen_err);
- if (err)
- return MMC_BLK_CMD_ERR;
- }
-
- /* if general error occurs, retry the write operation. */
- if (gen_err) {
- pr_warn("%s: retrying write for general error\n",
- req->rq_disk->disk_name);
- return MMC_BLK_RETRY;
- }
-
- /* Some errors (ECC) are flagged on the next commmand, so check stop, too */
- if (brq->data.error || brq->stop.error) {
- if (need_retune && !brq->retune_retry_done) {
- pr_debug("%s: retrying because a re-tune was needed\n",
- req->rq_disk->disk_name);
- brq->retune_retry_done = 1;
- return MMC_BLK_RETRY;
- }
- pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
- req->rq_disk->disk_name, brq->data.error ?: brq->stop.error,
- (unsigned)blk_rq_pos(req),
- (unsigned)blk_rq_sectors(req),
- brq->cmd.resp[0], brq->stop.resp[0]);
-
- if (rq_data_dir(req) == READ) {
- if (ecc_err)
- return MMC_BLK_ECC_ERR;
- return MMC_BLK_DATA_ERR;
- } else {
- return MMC_BLK_CMD_ERR;
- }
- }
-
- if (!brq->data.bytes_xfered)
- return MMC_BLK_RETRY;
-
- if (blk_rq_bytes(req) != brq->data.bytes_xfered)
- return MMC_BLK_PARTIAL;
-
- return MMC_BLK_SUCCESS;
-}
-
static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
int disable_multi, bool *do_rel_wr_p,
bool *do_data_tag_p)
@@ -1706,8 +1421,6 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
brq->data.sg_len = i;
}
- mqrq->areq.mrq = &brq->mrq;
-
if (do_rel_wr_p)
*do_rel_wr_p = do_rel_wr;
@@ -1715,6 +1428,138 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
*do_data_tag_p = do_data_tag;
}
+#define MMC_CQE_RETRIES 2
+
+static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct request_queue *q = req->q;
+ struct mmc_host *host = mq->card->host;
+ unsigned long flags;
+ bool put_card;
+ int err;
+
+ mmc_cqe_post_req(host, mrq);
+
+ if (mrq->cmd && mrq->cmd->error)
+ err = mrq->cmd->error;
+ else if (mrq->data && mrq->data->error)
+ err = mrq->data->error;
+ else
+ err = 0;
+
+ if (err) {
+ if (mqrq->retries++ < MMC_CQE_RETRIES)
+ blk_mq_requeue_request(req, true);
+ else
+ blk_mq_end_request(req, BLK_STS_IOERR);
+ } else if (mrq->data) {
+ if (blk_update_request(req, BLK_STS_OK, mrq->data->bytes_xfered))
+ blk_mq_requeue_request(req, true);
+ else
+ __blk_mq_end_request(req, BLK_STS_OK);
+ } else {
+ blk_mq_end_request(req, BLK_STS_OK);
+ }
+
+ spin_lock_irqsave(q->queue_lock, flags);
+
+ mq->in_flight[mmc_issue_type(mq, req)] -= 1;
+
+ put_card = (mmc_tot_in_flight(mq) == 0);
+
+ mmc_cqe_check_busy(mq);
+
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ if (!mq->cqe_busy)
+ blk_mq_run_hw_queues(q, true);
+
+ if (put_card)
+ mmc_put_card(mq->card, &mq->ctx);
+}
+
+void mmc_blk_cqe_recovery(struct mmc_queue *mq)
+{
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host = card->host;
+ int err;
+
+ pr_debug("%s: CQE recovery start\n", mmc_hostname(host));
+
+ err = mmc_cqe_recovery(host);
+ if (err)
+ mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY);
+ else
+ mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
+
+ pr_debug("%s: CQE recovery done\n", mmc_hostname(host));
+}
+
+static void mmc_blk_cqe_req_done(struct mmc_request *mrq)
+{
+ struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
+ brq.mrq);
+ struct request *req = mmc_queue_req_to_req(mqrq);
+ struct request_queue *q = req->q;
+ struct mmc_queue *mq = q->queuedata;
+
+ /*
+ * Block layer timeouts race with completions which means the normal
+ * completion path cannot be used during recovery.
+ */
+ if (mq->in_recovery)
+ mmc_blk_cqe_complete_rq(mq, req);
+ else
+ blk_mq_complete_request(req);
+}
+
+static int mmc_blk_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ mrq->done = mmc_blk_cqe_req_done;
+ mrq->recovery_notifier = mmc_cqe_recovery_notifier;
+
+ return mmc_cqe_start_req(host, mrq);
+}
+
+static struct mmc_request *mmc_blk_cqe_prep_dcmd(struct mmc_queue_req *mqrq,
+ struct request *req)
+{
+ struct mmc_blk_request *brq = &mqrq->brq;
+
+ memset(brq, 0, sizeof(*brq));
+
+ brq->mrq.cmd = &brq->cmd;
+ brq->mrq.tag = req->tag;
+
+ return &brq->mrq;
+}
+
+static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = mmc_blk_cqe_prep_dcmd(mqrq, req);
+
+ mrq->cmd->opcode = MMC_SWITCH;
+ mrq->cmd->arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
+ (EXT_CSD_FLUSH_CACHE << 16) |
+ (1 << 8) |
+ EXT_CSD_CMD_SET_NORMAL;
+ mrq->cmd->flags = MMC_CMD_AC | MMC_RSP_R1B;
+
+ return mmc_blk_cqe_start_req(mq->card->host, mrq);
+}
+
+static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+
+ mmc_blk_data_prep(mq, mqrq, 0, NULL, NULL);
+
+ return mmc_blk_cqe_start_req(mq->card->host, &mqrq->brq.mrq);
+}
+
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
int disable_multi,
@@ -1779,318 +1624,637 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
brq->mrq.sbc = &brq->sbc;
}
+}
+
+#define MMC_MAX_RETRIES 5
+#define MMC_DATA_RETRIES 2
+#define MMC_NO_RETRIES (MMC_MAX_RETRIES + 1)
- mqrq->areq.err_check = mmc_blk_err_check;
+static int mmc_blk_send_stop(struct mmc_card *card, unsigned int timeout)
+{
+ struct mmc_command cmd = {
+ .opcode = MMC_STOP_TRANSMISSION,
+ .flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC,
+ /* Some hosts wait for busy anyway, so provide a busy timeout */
+ .busy_timeout = timeout,
+ };
+
+ return mmc_wait_for_cmd(card->host, &cmd, 5);
}
-static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
- struct mmc_blk_request *brq, struct request *req,
- bool old_req_pending)
+static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
{
- bool req_pending;
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_blk_request *brq = &mqrq->brq;
+ unsigned int timeout = mmc_blk_data_timeout_ms(card->host, &brq->data);
+ int err;
- /*
- * If this is an SD card and we're writing, we can first
- * mark the known good sectors as ok.
- *
- * If the card is not SD, we can still ok written sectors
- * as reported by the controller (which might be less than
- * the real number of written sectors, but never more).
- */
- if (mmc_card_sd(card)) {
- u32 blocks;
+ mmc_retune_hold_now(card->host);
+
+ mmc_blk_send_stop(card, timeout);
+
+ err = card_busy_detect(card, timeout, req, NULL);
+
+ mmc_retune_release(card->host);
+
+ return err;
+}
+
+#define MMC_READ_SINGLE_RETRIES 2
+
+/* Single sector read during recovery */
+static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host = card->host;
+ blk_status_t error = BLK_STS_OK;
+ int retries = 0;
+
+ do {
+ u32 status;
int err;
- err = mmc_sd_num_wr_blocks(card, &blocks);
+ mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
+
+ mmc_wait_for_req(host, mrq);
+
+ err = mmc_send_status(card, &status);
if (err)
- req_pending = old_req_pending;
+ goto error_exit;
+
+ if (!mmc_host_is_spi(host) &&
+ !mmc_blk_in_tran_state(status)) {
+ err = mmc_blk_fix_state(card, req);
+ if (err)
+ goto error_exit;
+ }
+
+ if (mrq->cmd->error && retries++ < MMC_READ_SINGLE_RETRIES)
+ continue;
+
+ retries = 0;
+
+ if (mrq->cmd->error ||
+ mrq->data->error ||
+ (!mmc_host_is_spi(host) &&
+ (mrq->cmd->resp[0] & CMD_ERRORS || status & CMD_ERRORS)))
+ error = BLK_STS_IOERR;
else
- req_pending = blk_end_request(req, BLK_STS_OK, blocks << 9);
- } else {
- req_pending = blk_end_request(req, BLK_STS_OK, brq->data.bytes_xfered);
- }
- return req_pending;
+ error = BLK_STS_OK;
+
+ } while (blk_update_request(req, error, 512));
+
+ return;
+
+error_exit:
+ mrq->data->bytes_xfered = 0;
+ blk_update_request(req, BLK_STS_IOERR, 512);
+ /* Let it try the remaining request again */
+ if (mqrq->retries > MMC_MAX_RETRIES - 1)
+ mqrq->retries = MMC_MAX_RETRIES - 1;
}
-static void mmc_blk_rw_cmd_abort(struct mmc_queue *mq, struct mmc_card *card,
- struct request *req,
- struct mmc_queue_req *mqrq)
+static inline bool mmc_blk_oor_valid(struct mmc_blk_request *brq)
{
- if (mmc_card_removed(card))
- req->rq_flags |= RQF_QUIET;
- while (blk_end_request(req, BLK_STS_IOERR, blk_rq_cur_bytes(req)));
- mq->qcnt--;
+ return !!brq->mrq.sbc;
}
-/**
- * mmc_blk_rw_try_restart() - tries to restart the current async request
- * @mq: the queue with the card and host to restart
- * @req: a new request that want to be started after the current one
+static inline u32 mmc_blk_stop_err_bits(struct mmc_blk_request *brq)
+{
+ return mmc_blk_oor_valid(brq) ? CMD_ERRORS : CMD_ERRORS_EXCL_OOR;
+}
+
+/*
+ * Check for errors the host controller driver might not have seen such as
+ * response mode errors or invalid card state.
+ */
+static bool mmc_blk_status_error(struct request *req, u32 status)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_blk_request *brq = &mqrq->brq;
+ struct mmc_queue *mq = req->q->queuedata;
+ u32 stop_err_bits;
+
+ if (mmc_host_is_spi(mq->card->host))
+ return false;
+
+ stop_err_bits = mmc_blk_stop_err_bits(brq);
+
+ return brq->cmd.resp[0] & CMD_ERRORS ||
+ brq->stop.resp[0] & stop_err_bits ||
+ status & stop_err_bits ||
+ (rq_data_dir(req) == WRITE && !mmc_blk_in_tran_state(status));
+}
+
+static inline bool mmc_blk_cmd_started(struct mmc_blk_request *brq)
+{
+ return !brq->sbc.error && !brq->cmd.error &&
+ !(brq->cmd.resp[0] & CMD_ERRORS);
+}
+
+/*
+ * Requests are completed by mmc_blk_mq_complete_rq() which sets simple
+ * policy:
+ * 1. A request that has transferred at least some data is considered
+ * successful and will be requeued if there is remaining data to
+ * transfer.
+ * 2. Otherwise the number of retries is incremented and the request
+ * will be requeued if there are remaining retries.
+ * 3. Otherwise the request will be errored out.
+ * That means mmc_blk_mq_complete_rq() is controlled by bytes_xfered and
+ * mqrq->retries. So there are only 4 possible actions here:
+ * 1. do not accept the bytes_xfered value i.e. set it to zero
+ * 2. change mqrq->retries to determine the number of retries
+ * 3. try to reset the card
+ * 4. read one sector at a time
*/
-static void mmc_blk_rw_try_restart(struct mmc_queue *mq, struct request *req,
- struct mmc_queue_req *mqrq)
+static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
{
- if (!req)
+ int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_blk_request *brq = &mqrq->brq;
+ struct mmc_blk_data *md = mq->blkdata;
+ struct mmc_card *card = mq->card;
+ u32 status;
+ u32 blocks;
+ int err;
+
+ /*
+ * Some errors the host driver might not have seen. Set the number of
+ * bytes transferred to zero in that case.
+ */
+ err = __mmc_send_status(card, &status, 0);
+ if (err || mmc_blk_status_error(req, status))
+ brq->data.bytes_xfered = 0;
+
+ mmc_retune_release(card->host);
+
+ /*
+ * Try again to get the status. This also provides an opportunity for
+ * re-tuning.
+ */
+ if (err)
+ err = __mmc_send_status(card, &status, 0);
+
+ /*
+ * Nothing more to do after the number of bytes transferred has been
+ * updated and there is no card.
+ */
+ if (err && mmc_detect_card_removed(card->host))
return;
+ /* Try to get back to "tran" state */
+ if (!mmc_host_is_spi(mq->card->host) &&
+ (err || !mmc_blk_in_tran_state(status)))
+ err = mmc_blk_fix_state(mq->card, req);
+
/*
- * If the card was removed, just cancel everything and return.
+ * Special case for SD cards where the card might record the number of
+ * blocks written.
*/
- if (mmc_card_removed(mq->card)) {
- req->rq_flags |= RQF_QUIET;
- blk_end_request_all(req, BLK_STS_IOERR);
- mq->qcnt--; /* FIXME: just set to 0? */
+ if (!err && mmc_blk_cmd_started(brq) && mmc_card_sd(card) &&
+ rq_data_dir(req) == WRITE) {
+ if (mmc_sd_num_wr_blocks(card, &blocks))
+ brq->data.bytes_xfered = 0;
+ else
+ brq->data.bytes_xfered = blocks << 9;
+ }
+
+ /* Reset if the card is in a bad state */
+ if (!mmc_host_is_spi(mq->card->host) &&
+ err && mmc_blk_reset(md, card->host, type)) {
+ pr_err("%s: recovery failed!\n", req->rq_disk->disk_name);
+ mqrq->retries = MMC_NO_RETRIES;
+ return;
+ }
+
+ /*
+ * If anything was done, just return and if there is anything remaining
+ * on the request it will get requeued.
+ */
+ if (brq->data.bytes_xfered)
+ return;
+
+ /* Reset before last retry */
+ if (mqrq->retries + 1 == MMC_MAX_RETRIES)
+ mmc_blk_reset(md, card->host, type);
+
+ /* Command errors fail fast, so use all MMC_MAX_RETRIES */
+ if (brq->sbc.error || brq->cmd.error)
+ return;
+
+ /* Reduce the remaining retries for data errors */
+ if (mqrq->retries < MMC_MAX_RETRIES - MMC_DATA_RETRIES) {
+ mqrq->retries = MMC_MAX_RETRIES - MMC_DATA_RETRIES;
+ return;
+ }
+
+ /* FIXME: Missing single sector read for large sector size */
+ if (!mmc_large_sector(card) && rq_data_dir(req) == READ &&
+ brq->data.blocks > 1) {
+ /* Read one sector at a time */
+ mmc_blk_read_single(mq, req);
return;
}
- /* Else proceed and try to restart the current async request */
- mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
- mmc_start_areq(mq->card->host, &mqrq->areq, NULL);
}
-static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
+static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq)
{
- struct mmc_blk_data *md = mq->blkdata;
- struct mmc_card *card = md->queue.card;
- struct mmc_blk_request *brq;
- int disable_multi = 0, retry = 0, type, retune_retry_done = 0;
- enum mmc_blk_status status;
- struct mmc_queue_req *mqrq_cur = NULL;
- struct mmc_queue_req *mq_rq;
- struct request *old_req;
- struct mmc_async_req *new_areq;
- struct mmc_async_req *old_areq;
- bool req_pending = true;
+ mmc_blk_eval_resp_error(brq);
+
+ return brq->sbc.error || brq->cmd.error || brq->stop.error ||
+ brq->data.error || brq->cmd.resp[0] & CMD_ERRORS;
+}
+
+static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ u32 status = 0;
+ int err;
+
+ if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ)
+ return 0;
+
+ err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, req, &status);
+
+ /*
+ * Do not assume data transferred correctly if there are any error bits
+ * set.
+ */
+ if (status & mmc_blk_stop_err_bits(&mqrq->brq)) {
+ mqrq->brq.data.bytes_xfered = 0;
+ err = err ? err : -EIO;
+ }
+
+ /* Copy the exception bit so it will be seen later on */
+ if (mmc_card_mmc(card) && status & R1_EXCEPTION_EVENT)
+ mqrq->brq.cmd.resp[0] |= R1_EXCEPTION_EVENT;
+
+ return err;
+}
- if (new_req) {
- mqrq_cur = req_to_mmc_queue_req(new_req);
- mq->qcnt++;
+static inline void mmc_blk_rw_reset_success(struct mmc_queue *mq,
+ struct request *req)
+{
+ int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
+
+ mmc_blk_reset_success(mq->blkdata, type);
+}
+
+static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ unsigned int nr_bytes = mqrq->brq.data.bytes_xfered;
+
+ if (nr_bytes) {
+ if (blk_update_request(req, BLK_STS_OK, nr_bytes))
+ blk_mq_requeue_request(req, true);
+ else
+ __blk_mq_end_request(req, BLK_STS_OK);
+ } else if (!blk_rq_bytes(req)) {
+ __blk_mq_end_request(req, BLK_STS_IOERR);
+ } else if (mqrq->retries++ < MMC_MAX_RETRIES) {
+ blk_mq_requeue_request(req, true);
+ } else {
+ if (mmc_card_removed(mq->card))
+ req->rq_flags |= RQF_QUIET;
+ blk_mq_end_request(req, BLK_STS_IOERR);
+ }
+}
+
+static bool mmc_blk_urgent_bkops_needed(struct mmc_queue *mq,
+ struct mmc_queue_req *mqrq)
+{
+ return mmc_card_mmc(mq->card) && !mmc_host_is_spi(mq->card->host) &&
+ (mqrq->brq.cmd.resp[0] & R1_EXCEPTION_EVENT ||
+ mqrq->brq.stop.resp[0] & R1_EXCEPTION_EVENT);
+}
+
+static void mmc_blk_urgent_bkops(struct mmc_queue *mq,
+ struct mmc_queue_req *mqrq)
+{
+ if (mmc_blk_urgent_bkops_needed(mq, mqrq))
+ mmc_start_bkops(mq->card, true);
+}
+
+void mmc_blk_mq_complete(struct request *req)
+{
+ struct mmc_queue *mq = req->q->queuedata;
+
+ if (mq->use_cqe)
+ mmc_blk_cqe_complete_rq(mq, req);
+ else
+ mmc_blk_mq_complete_rq(mq, req);
+}
+
+static void mmc_blk_mq_poll_completion(struct mmc_queue *mq,
+ struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_host *host = mq->card->host;
+
+ if (mmc_blk_rq_error(&mqrq->brq) ||
+ mmc_blk_card_busy(mq->card, req)) {
+ mmc_blk_mq_rw_recovery(mq, req);
+ } else {
+ mmc_blk_rw_reset_success(mq, req);
+ mmc_retune_release(host);
+ }
+
+ mmc_blk_urgent_bkops(mq, mqrq);
+}
+
+static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
+{
+ struct request_queue *q = req->q;
+ unsigned long flags;
+ bool put_card;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+
+ mq->in_flight[mmc_issue_type(mq, req)] -= 1;
+
+ put_card = (mmc_tot_in_flight(mq) == 0);
+
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ if (put_card)
+ mmc_put_card(mq->card, &mq->ctx);
+}
+
+static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct mmc_host *host = mq->card->host;
+
+ mmc_post_req(host, mrq, 0);
+
+ /*
+ * Block layer timeouts race with completions which means the normal
+ * completion path cannot be used during recovery.
+ */
+ if (mq->in_recovery)
+ mmc_blk_mq_complete_rq(mq, req);
+ else
+ blk_mq_complete_request(req);
+
+ mmc_blk_mq_dec_in_flight(mq, req);
+}
+
+void mmc_blk_mq_recovery(struct mmc_queue *mq)
+{
+ struct request *req = mq->recovery_req;
+ struct mmc_host *host = mq->card->host;
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+
+ mq->recovery_req = NULL;
+ mq->rw_wait = false;
+
+ if (mmc_blk_rq_error(&mqrq->brq)) {
+ mmc_retune_hold_now(host);
+ mmc_blk_mq_rw_recovery(mq, req);
}
- if (!mq->qcnt)
+ mmc_blk_urgent_bkops(mq, mqrq);
+
+ mmc_blk_mq_post_req(mq, req);
+}
+
+static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq,
+ struct request **prev_req)
+{
+ if (mmc_host_done_complete(mq->card->host))
return;
- do {
- if (new_req) {
- /*
- * When 4KB native sector is enabled, only 8 blocks
- * multiple read or write is allowed
- */
- if (mmc_large_sector(card) &&
- !IS_ALIGNED(blk_rq_sectors(new_req), 8)) {
- pr_err("%s: Transfer size is not 4KB sector size aligned\n",
- new_req->rq_disk->disk_name);
- mmc_blk_rw_cmd_abort(mq, card, new_req, mqrq_cur);
- return;
- }
+ mutex_lock(&mq->complete_lock);
- mmc_blk_rw_rq_prep(mqrq_cur, card, 0, mq);
- new_areq = &mqrq_cur->areq;
- } else
- new_areq = NULL;
+ if (!mq->complete_req)
+ goto out_unlock;
- old_areq = mmc_start_areq(card->host, new_areq, &status);
- if (!old_areq) {
- /*
- * We have just put the first request into the pipeline
- * and there is nothing more to do until it is
- * complete.
- */
- return;
- }
+ mmc_blk_mq_poll_completion(mq, mq->complete_req);
+
+ if (prev_req)
+ *prev_req = mq->complete_req;
+ else
+ mmc_blk_mq_post_req(mq, mq->complete_req);
+
+ mq->complete_req = NULL;
+
+out_unlock:
+ mutex_unlock(&mq->complete_lock);
+}
+
+void mmc_blk_mq_complete_work(struct work_struct *work)
+{
+ struct mmc_queue *mq = container_of(work, struct mmc_queue,
+ complete_work);
+
+ mmc_blk_mq_complete_prev_req(mq, NULL);
+}
+
+static void mmc_blk_mq_req_done(struct mmc_request *mrq)
+{
+ struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
+ brq.mrq);
+ struct request *req = mmc_queue_req_to_req(mqrq);
+ struct request_queue *q = req->q;
+ struct mmc_queue *mq = q->queuedata;
+ struct mmc_host *host = mq->card->host;
+ unsigned long flags;
+
+ if (!mmc_host_done_complete(host)) {
+ bool waiting;
/*
- * An asynchronous request has been completed and we proceed
- * to handle the result of it.
+ * We cannot complete the request in this context, so record
+ * that there is a request to complete, and that a following
+ * request does not need to wait (although it does need to
+ * complete complete_req first).
*/
- mq_rq = container_of(old_areq, struct mmc_queue_req, areq);
- brq = &mq_rq->brq;
- old_req = mmc_queue_req_to_req(mq_rq);
- type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
-
- switch (status) {
- case MMC_BLK_SUCCESS:
- case MMC_BLK_PARTIAL:
- /*
- * A block was successfully transferred.
- */
- mmc_blk_reset_success(md, type);
+ spin_lock_irqsave(q->queue_lock, flags);
+ mq->complete_req = req;
+ mq->rw_wait = false;
+ waiting = mq->waiting;
+ spin_unlock_irqrestore(q->queue_lock, flags);
- req_pending = blk_end_request(old_req, BLK_STS_OK,
- brq->data.bytes_xfered);
- /*
- * If the blk_end_request function returns non-zero even
- * though all data has been transferred and no errors
- * were returned by the host controller, it's a bug.
- */
- if (status == MMC_BLK_SUCCESS && req_pending) {
- pr_err("%s BUG rq_tot %d d_xfer %d\n",
- __func__, blk_rq_bytes(old_req),
- brq->data.bytes_xfered);
- mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
- return;
- }
- break;
- case MMC_BLK_CMD_ERR:
- req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending);
- if (mmc_blk_reset(md, card->host, type)) {
- if (req_pending)
- mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
- else
- mq->qcnt--;
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- }
- if (!req_pending) {
- mq->qcnt--;
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- }
- break;
- case MMC_BLK_RETRY:
- retune_retry_done = brq->retune_retry_done;
- if (retry++ < 5)
- break;
- /* Fall through */
- case MMC_BLK_ABORT:
- if (!mmc_blk_reset(md, card->host, type))
- break;
- mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- case MMC_BLK_DATA_ERR: {
- int err;
-
- err = mmc_blk_reset(md, card->host, type);
- if (!err)
- break;
- if (err == -ENODEV) {
- mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- }
- /* Fall through */
- }
- case MMC_BLK_ECC_ERR:
- if (brq->data.blocks > 1) {
- /* Redo read one sector at a time */
- pr_warn("%s: retrying using single block read\n",
- old_req->rq_disk->disk_name);
- disable_multi = 1;
- break;
- }
- /*
- * After an error, we redo I/O one sector at a
- * time, so we only reach here after trying to
- * read a single sector.
- */
- req_pending = blk_end_request(old_req, BLK_STS_IOERR,
- brq->data.blksz);
- if (!req_pending) {
- mq->qcnt--;
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- }
- break;
- case MMC_BLK_NOMEDIUM:
- mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- default:
- pr_err("%s: Unhandled return value (%d)",
- old_req->rq_disk->disk_name, status);
- mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq);
- mmc_blk_rw_try_restart(mq, new_req, mqrq_cur);
- return;
- }
+ /*
+ * If 'waiting' then the waiting task will complete this
+ * request, otherwise queue a work to do it. Note that
+ * complete_work may still race with the dispatch of a following
+ * request.
+ */
+ if (waiting)
+ wake_up(&mq->wait);
+ else
+ kblockd_schedule_work(&mq->complete_work);
- if (req_pending) {
- /*
- * In case of a incomplete request
- * prepare it again and resend.
- */
- mmc_blk_rw_rq_prep(mq_rq, card,
- disable_multi, mq);
- mmc_start_areq(card->host,
- &mq_rq->areq, NULL);
- mq_rq->brq.retune_retry_done = retune_retry_done;
- }
- } while (req_pending);
+ return;
+ }
+
+ /* Take the recovery path for errors or urgent background operations */
+ if (mmc_blk_rq_error(&mqrq->brq) ||
+ mmc_blk_urgent_bkops_needed(mq, mqrq)) {
+ spin_lock_irqsave(q->queue_lock, flags);
+ mq->recovery_needed = true;
+ mq->recovery_req = req;
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ wake_up(&mq->wait);
+ schedule_work(&mq->recovery_work);
+ return;
+ }
+
+ mmc_blk_rw_reset_success(mq, req);
- mq->qcnt--;
+ mq->rw_wait = false;
+ wake_up(&mq->wait);
+
+ mmc_blk_mq_post_req(mq, req);
}
-void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
+{
+ struct request_queue *q = mq->queue;
+ unsigned long flags;
+ bool done;
+
+ /*
+ * Wait while there is another request in progress, but not if recovery
+ * is needed. Also indicate whether there is a request waiting to start.
+ */
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (mq->recovery_needed) {
+ *err = -EBUSY;
+ done = true;
+ } else {
+ done = !mq->rw_wait;
+ }
+ mq->waiting = !done;
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return done;
+}
+
+static int mmc_blk_rw_wait(struct mmc_queue *mq, struct request **prev_req)
+{
+ int err = 0;
+
+ wait_event(mq->wait, mmc_blk_rw_wait_cond(mq, &err));
+
+ /* Always complete the previous request if there is one */
+ mmc_blk_mq_complete_prev_req(mq, prev_req);
+
+ return err;
+}
+
+static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq,
+ struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_host *host = mq->card->host;
+ struct request *prev_req = NULL;
+ int err = 0;
+
+ mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq);
+
+ mqrq->brq.mrq.done = mmc_blk_mq_req_done;
+
+ mmc_pre_req(host, &mqrq->brq.mrq);
+
+ err = mmc_blk_rw_wait(mq, &prev_req);
+ if (err)
+ goto out_post_req;
+
+ mq->rw_wait = true;
+
+ err = mmc_start_request(host, &mqrq->brq.mrq);
+
+ if (prev_req)
+ mmc_blk_mq_post_req(mq, prev_req);
+
+ if (err)
+ mq->rw_wait = false;
+
+ /* Release re-tuning here where there is no synchronization required */
+ if (err || mmc_host_done_complete(host))
+ mmc_retune_release(host);
+
+out_post_req:
+ if (err)
+ mmc_post_req(host, &mqrq->brq.mrq, err);
+
+ return err;
+}
+
+static int mmc_blk_wait_for_idle(struct mmc_queue *mq, struct mmc_host *host)
+{
+ if (mq->use_cqe)
+ return host->cqe_ops->cqe_wait_for_idle(host);
+
+ return mmc_blk_rw_wait(mq, NULL);
+}
+
+enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
{
- int ret;
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
-
- if (req && !mq->qcnt)
- /* claim host only for the first request */
- mmc_get_card(card, NULL);
+ struct mmc_host *host = card->host;
+ int ret;
ret = mmc_blk_part_switch(card, md->part_type);
- if (ret) {
- if (req) {
- blk_end_request_all(req, BLK_STS_IOERR);
- }
- goto out;
- }
+ if (ret)
+ return MMC_REQ_FAILED_TO_START;
- if (req) {
+ switch (mmc_issue_type(mq, req)) {
+ case MMC_ISSUE_SYNC:
+ ret = mmc_blk_wait_for_idle(mq, host);
+ if (ret)
+ return MMC_REQ_BUSY;
switch (req_op(req)) {
case REQ_OP_DRV_IN:
case REQ_OP_DRV_OUT:
- /*
- * Complete ongoing async transfer before issuing
- * ioctl()s
- */
- if (mq->qcnt)
- mmc_blk_issue_rw_rq(mq, NULL);
mmc_blk_issue_drv_op(mq, req);
break;
case REQ_OP_DISCARD:
- /*
- * Complete ongoing async transfer before issuing
- * discard.
- */
- if (mq->qcnt)
- mmc_blk_issue_rw_rq(mq, NULL);
mmc_blk_issue_discard_rq(mq, req);
break;
case REQ_OP_SECURE_ERASE:
- /*
- * Complete ongoing async transfer before issuing
- * secure erase.
- */
- if (mq->qcnt)
- mmc_blk_issue_rw_rq(mq, NULL);
mmc_blk_issue_secdiscard_rq(mq, req);
break;
case REQ_OP_FLUSH:
- /*
- * Complete ongoing async transfer before issuing
- * flush.
- */
- if (mq->qcnt)
- mmc_blk_issue_rw_rq(mq, NULL);
mmc_blk_issue_flush(mq, req);
break;
default:
- /* Normal request, just issue it */
- mmc_blk_issue_rw_rq(mq, req);
- card->host->context_info.is_waiting_last_req = false;
+ WARN_ON_ONCE(1);
+ return MMC_REQ_FAILED_TO_START;
+ }
+ return MMC_REQ_FINISHED;
+ case MMC_ISSUE_DCMD:
+ case MMC_ISSUE_ASYNC:
+ switch (req_op(req)) {
+ case REQ_OP_FLUSH:
+ ret = mmc_blk_cqe_issue_flush(mq, req);
break;
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
+ if (mq->use_cqe)
+ ret = mmc_blk_cqe_issue_rw_rq(mq, req);
+ else
+ ret = mmc_blk_mq_issue_rw_rq(mq, req);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
}
- } else {
- /* No request, flushing the pipeline with NULL */
- mmc_blk_issue_rw_rq(mq, NULL);
- card->host->context_info.is_waiting_last_req = false;
+ if (!ret)
+ return MMC_REQ_STARTED;
+ return ret == -EBUSY ? MMC_REQ_BUSY : MMC_REQ_FAILED_TO_START;
+ default:
+ WARN_ON_ONCE(1);
+ return MMC_REQ_FAILED_TO_START;
}
-
-out:
- if (!mq->qcnt)
- mmc_put_card(card, NULL);
}
static inline int mmc_blk_readonly(struct mmc_card *card)
@@ -2156,6 +2320,18 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
md->queue.blkdata = md;
+ /*
+ * Keep an extra reference to the queue so that we can shutdown the
+ * queue (i.e. call blk_cleanup_queue()) while there are still
+ * references to the 'md'. The corresponding blk_put_queue() is in
+ * mmc_blk_put().
+ */
+ if (!blk_get_queue(md->queue.queue)) {
+ mmc_cleanup_queue(&md->queue);
+ ret = -ENODEV;
+ goto err_putdisk;
+ }
+
md->disk->major = MMC_BLOCK_MAJOR;
md->disk->first_minor = devidx * perdev_minors;
md->disk->fops = &mmc_bdops;
@@ -2471,10 +2647,6 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
* from being accepted.
*/
card = md->queue.card;
- spin_lock_irq(md->queue.queue->queue_lock);
- queue_flag_set(QUEUE_FLAG_BYPASS, md->queue.queue);
- spin_unlock_irq(md->queue.queue->queue_lock);
- blk_set_queue_dying(md->queue.queue);
mmc_cleanup_queue(&md->queue);
if (md->disk->flags & GENHD_FL_UP) {
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
@@ -2623,6 +2795,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
if (n != EXT_CSD_STR_LEN) {
err = -EINVAL;
+ kfree(ext_csd);
goto out_free;
}
diff --git a/drivers/mmc/core/block.h b/drivers/mmc/core/block.h
index 5946636101ef..31153f656f41 100644
--- a/drivers/mmc/core/block.h
+++ b/drivers/mmc/core/block.h
@@ -5,6 +5,16 @@
struct mmc_queue;
struct request;
-void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req);
+void mmc_blk_cqe_recovery(struct mmc_queue *mq);
+
+enum mmc_issued;
+
+enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req);
+void mmc_blk_mq_complete(struct request *req);
+void mmc_blk_mq_recovery(struct mmc_queue *mq);
+
+struct work_struct;
+
+void mmc_blk_mq_complete_work(struct work_struct *work);
#endif
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 7586ff2ad1f1..fc92c6c1c9a4 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -351,8 +351,6 @@ int mmc_add_card(struct mmc_card *card)
#ifdef CONFIG_DEBUG_FS
mmc_add_card_debugfs(card);
#endif
- mmc_init_context_info(card->host);
-
card->dev.of_node = mmc_of_find_child_device(card->host, 0);
device_enable_async_suspend(&card->dev);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 1f0f44f4dd5f..c0ba6d8823b7 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -341,6 +341,8 @@ int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
{
int err;
+ init_completion(&mrq->cmd_completion);
+
mmc_retune_hold(host);
if (mmc_card_removed(host->card))
@@ -361,20 +363,6 @@ int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
}
EXPORT_SYMBOL(mmc_start_request);
-/*
- * mmc_wait_data_done() - done callback for data request
- * @mrq: done data request
- *
- * Wakes up mmc context, passed as a callback to host controller driver
- */
-static void mmc_wait_data_done(struct mmc_request *mrq)
-{
- struct mmc_context_info *context_info = &mrq->host->context_info;
-
- context_info->is_done_rcv = true;
- wake_up_interruptible(&context_info->wait);
-}
-
static void mmc_wait_done(struct mmc_request *mrq)
{
complete(&mrq->completion);
@@ -392,37 +380,6 @@ static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host *host)
wait_for_completion(&ongoing_mrq->cmd_completion);
}
-/*
- *__mmc_start_data_req() - starts data request
- * @host: MMC host to start the request
- * @mrq: data request to start
- *
- * Sets the done callback to be called when request is completed by the card.
- * Starts data mmc request execution
- * If an ongoing transfer is already in progress, wait for the command line
- * to become available before sending another command.
- */
-static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
-{
- int err;
-
- mmc_wait_ongoing_tfr_cmd(host);
-
- mrq->done = mmc_wait_data_done;
- mrq->host = host;
-
- init_completion(&mrq->cmd_completion);
-
- err = mmc_start_request(host, mrq);
- if (err) {
- mrq->cmd->error = err;
- mmc_complete_cmd(mrq);
- mmc_wait_data_done(mrq);
- }
-
- return err;
-}
-
static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
{
int err;
@@ -432,8 +389,6 @@ static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
init_completion(&mrq->completion);
mrq->done = mmc_wait_done;
- init_completion(&mrq->cmd_completion);
-
err = mmc_start_request(host, mrq);
if (err) {
mrq->cmd->error = err;
@@ -650,164 +605,11 @@ EXPORT_SYMBOL(mmc_cqe_recovery);
*/
bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq)
{
- if (host->areq)
- return host->context_info.is_done_rcv;
- else
- return completion_done(&mrq->completion);
+ return completion_done(&mrq->completion);
}
EXPORT_SYMBOL(mmc_is_req_done);
/**
- * mmc_pre_req - Prepare for a new request
- * @host: MMC host to prepare command
- * @mrq: MMC request to prepare for
- *
- * mmc_pre_req() is called in prior to mmc_start_req() to let
- * host prepare for the new request. Preparation of a request may be
- * performed while another request is running on the host.
- */
-static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq)
-{
- if (host->ops->pre_req)
- host->ops->pre_req(host, mrq);
-}
-
-/**
- * mmc_post_req - Post process a completed request
- * @host: MMC host to post process command
- * @mrq: MMC request to post process for
- * @err: Error, if non zero, clean up any resources made in pre_req
- *
- * Let the host post process a completed request. Post processing of
- * a request may be performed while another reuqest is running.
- */
-static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
- int err)
-{
- if (host->ops->post_req)
- host->ops->post_req(host, mrq, err);
-}
-
-/**
- * mmc_finalize_areq() - finalize an asynchronous request
- * @host: MMC host to finalize any ongoing request on
- *
- * Returns the status of the ongoing asynchronous request, but
- * MMC_BLK_SUCCESS if no request was going on.
- */
-static enum mmc_blk_status mmc_finalize_areq(struct mmc_host *host)
-{
- struct mmc_context_info *context_info = &host->context_info;
- enum mmc_blk_status status;
-
- if (!host->areq)
- return MMC_BLK_SUCCESS;
-
- while (1) {
- wait_event_interruptible(context_info->wait,
- (context_info->is_done_rcv ||
- context_info->is_new_req));
-
- if (context_info->is_done_rcv) {
- struct mmc_command *cmd;
-
- context_info->is_done_rcv = false;
- cmd = host->areq->mrq->cmd;
-
- if (!cmd->error || !cmd->retries ||
- mmc_card_removed(host->card)) {
- status = host->areq->err_check(host->card,
- host->areq);
- break; /* return status */
- } else {
- mmc_retune_recheck(host);
- pr_info("%s: req failed (CMD%u): %d, retrying...\n",
- mmc_hostname(host),
- cmd->opcode, cmd->error);
- cmd->retries--;
- cmd->error = 0;
- __mmc_start_request(host, host->areq->mrq);
- continue; /* wait for done/new event again */
- }
- }
-
- return MMC_BLK_NEW_REQUEST;
- }
-
- mmc_retune_release(host);
-
- /*
- * Check BKOPS urgency for each R1 response
- */
- if (host->card && mmc_card_mmc(host->card) &&
- ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
- (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
- (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
- mmc_start_bkops(host->card, true);
- }
-
- return status;
-}
-
-/**
- * mmc_start_areq - start an asynchronous request
- * @host: MMC host to start command
- * @areq: asynchronous request to start
- * @ret_stat: out parameter for status
- *
- * Start a new MMC custom command request for a host.
- * If there is on ongoing async request wait for completion
- * of that request and start the new one and return.
- * Does not wait for the new request to complete.
- *
- * Returns the completed request, NULL in case of none completed.
- * Wait for the an ongoing request (previoulsy started) to complete and
- * return the completed request. If there is no ongoing request, NULL
- * is returned without waiting. NULL is not an error condition.
- */
-struct mmc_async_req *mmc_start_areq(struct mmc_host *host,
- struct mmc_async_req *areq,
- enum mmc_blk_status *ret_stat)
-{
- enum mmc_blk_status status;
- int start_err = 0;
- struct mmc_async_req *previous = host->areq;
-
- /* Prepare a new request */
- if (areq)
- mmc_pre_req(host, areq->mrq);
-
- /* Finalize previous request */
- status = mmc_finalize_areq(host);
- if (ret_stat)
- *ret_stat = status;
-
- /* The previous request is still going on... */
- if (status == MMC_BLK_NEW_REQUEST)
- return NULL;
-
- /* Fine so far, start the new request! */
- if (status == MMC_BLK_SUCCESS && areq)
- start_err = __mmc_start_data_req(host, areq->mrq);
-
- /* Postprocess the old request at this point */
- if (host->areq)
- mmc_post_req(host, host->areq->mrq, 0);
-
- /* Cancel a prepared request if it was not started. */
- if ((status != MMC_BLK_SUCCESS || start_err) && areq)
- mmc_post_req(host, areq->mrq, -EINVAL);
-
- if (status != MMC_BLK_SUCCESS)
- host->areq = NULL;
- else
- host->areq = areq;
-
- return previous;
-}
-EXPORT_SYMBOL(mmc_start_areq);
-
-/**
* mmc_wait_for_req - start a request and wait for completion
* @host: MMC host to start command
* @mrq: MMC request to start
@@ -2959,6 +2761,14 @@ static int mmc_pm_notify(struct notifier_block *notify_block,
if (!err)
break;
+ if (!mmc_card_is_removable(host)) {
+ dev_warn(mmc_dev(host),
+ "pre_suspend failed for non-removable host: "
+ "%d\n", err);
+ /* Avoid removing non-removable hosts */
+ break;
+ }
+
/* Calling bus_ops->remove() with a claimed host can deadlock */
host->bus_ops->remove(host);
mmc_claim_host(host);
@@ -2994,22 +2804,6 @@ void mmc_unregister_pm_notifier(struct mmc_host *host)
}
#endif
-/**
- * mmc_init_context_info() - init synchronization context
- * @host: mmc host
- *
- * Init struct context_info needed to implement asynchronous
- * request mechanism, used by mmc core, host driver and mmc requests
- * supplier.
- */
-void mmc_init_context_info(struct mmc_host *host)
-{
- host->context_info.is_new_req = false;
- host->context_info.is_done_rcv = false;
- host->context_info.is_waiting_last_req = false;
- init_waitqueue_head(&host->context_info.wait);
-}
-
static int __init mmc_init(void)
{
int ret;
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 71e6c6d7ceb7..d6303d69071b 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -62,12 +62,10 @@ void mmc_set_initial_state(struct mmc_host *host);
static inline void mmc_delay(unsigned int ms)
{
- if (ms < 1000 / HZ) {
- cond_resched();
- mdelay(ms);
- } else {
+ if (ms <= 20)
+ usleep_range(ms * 1000, ms * 1250);
+ else
msleep(ms);
- }
}
void mmc_rescan(struct work_struct *work);
@@ -91,8 +89,6 @@ void mmc_remove_host_debugfs(struct mmc_host *host);
void mmc_add_card_debugfs(struct mmc_card *card);
void mmc_remove_card_debugfs(struct mmc_card *card);
-void mmc_init_context_info(struct mmc_host *host);
-
int mmc_execute_tuning(struct mmc_card *card);
int mmc_hs200_to_hs400(struct mmc_card *card);
int mmc_hs400_to_hs200(struct mmc_card *card);
@@ -110,12 +106,6 @@ bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq);
int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq);
-struct mmc_async_req;
-
-struct mmc_async_req *mmc_start_areq(struct mmc_host *host,
- struct mmc_async_req *areq,
- enum mmc_blk_status *ret_stat);
-
int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
unsigned int arg);
int mmc_can_erase(struct mmc_card *card);
@@ -152,4 +142,35 @@ int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq);
void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq);
int mmc_cqe_recovery(struct mmc_host *host);
+/**
+ * mmc_pre_req - Prepare for a new request
+ * @host: MMC host to prepare command
+ * @mrq: MMC request to prepare for
+ *
+ * mmc_pre_req() is called in prior to mmc_start_req() to let
+ * host prepare for the new request. Preparation of a request may be
+ * performed while another request is running on the host.
+ */
+static inline void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ if (host->ops->pre_req)
+ host->ops->pre_req(host, mrq);
+}
+
+/**
+ * mmc_post_req - Post process a completed request
+ * @host: MMC host to post process command
+ * @mrq: MMC request to post process for
+ * @err: Error, if non zero, clean up any resources made in pre_req
+ *
+ * Let the host post process a completed request. Post processing of
+ * a request may be performed while another request is running.
+ */
+static inline void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
+ int err)
+{
+ if (host->ops->post_req)
+ host->ops->post_req(host, mrq, err);
+}
+
#endif
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index fb689a1065ed..06ec19b5bf9f 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -41,6 +41,11 @@ static inline int mmc_host_cmd23(struct mmc_host *host)
return host->caps & MMC_CAP_CMD23;
}
+static inline bool mmc_host_done_complete(struct mmc_host *host)
+{
+ return host->caps & MMC_CAP_DONE_COMPLETE;
+}
+
static inline int mmc_boot_partition_access(struct mmc_host *host)
{
return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC);
@@ -74,6 +79,5 @@ static inline bool mmc_card_hs400es(struct mmc_card *card)
return card->host->ios.enhanced_strobe;
}
-
#endif
diff --git a/drivers/mmc/core/mmc_test.c b/drivers/mmc/core/mmc_test.c
index 478869805b96..ef18daeaa4cc 100644
--- a/drivers/mmc/core/mmc_test.c
+++ b/drivers/mmc/core/mmc_test.c
@@ -101,7 +101,7 @@ struct mmc_test_transfer_result {
struct list_head link;
unsigned int count;
unsigned int sectors;
- struct timespec ts;
+ struct timespec64 ts;
unsigned int rate;
unsigned int iops;
};
@@ -171,11 +171,6 @@ struct mmc_test_multiple_rw {
enum mmc_test_prep_media prepare;
};
-struct mmc_test_async_req {
- struct mmc_async_req areq;
- struct mmc_test_card *test;
-};
-
/*******************************************************************/
/* General helper functions */
/*******************************************************************/
@@ -515,14 +510,11 @@ static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
/*
* Calculate transfer rate in bytes per second.
*/
-static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
+static unsigned int mmc_test_rate(uint64_t bytes, struct timespec64 *ts)
{
uint64_t ns;
- ns = ts->tv_sec;
- ns *= 1000000000;
- ns += ts->tv_nsec;
-
+ ns = timespec64_to_ns(ts);
bytes *= 1000000000;
while (ns > UINT_MAX) {
@@ -542,7 +534,7 @@ static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
* Save transfer results for future usage
*/
static void mmc_test_save_transfer_result(struct mmc_test_card *test,
- unsigned int count, unsigned int sectors, struct timespec ts,
+ unsigned int count, unsigned int sectors, struct timespec64 ts,
unsigned int rate, unsigned int iops)
{
struct mmc_test_transfer_result *tr;
@@ -567,21 +559,21 @@ static void mmc_test_save_transfer_result(struct mmc_test_card *test,
* Print the transfer rate.
*/
static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
- struct timespec *ts1, struct timespec *ts2)
+ struct timespec64 *ts1, struct timespec64 *ts2)
{
unsigned int rate, iops, sectors = bytes >> 9;
- struct timespec ts;
+ struct timespec64 ts;
- ts = timespec_sub(*ts2, *ts1);
+ ts = timespec64_sub(*ts2, *ts1);
rate = mmc_test_rate(bytes, &ts);
iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
- pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
+ pr_info("%s: Transfer of %u sectors (%u%s KiB) took %llu.%09u "
"seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
mmc_hostname(test->card->host), sectors, sectors >> 1,
- (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
- (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
+ (sectors & 1 ? ".5" : ""), (u64)ts.tv_sec,
+ (u32)ts.tv_nsec, rate / 1000, rate / 1024,
iops / 100, iops % 100);
mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
@@ -591,24 +583,24 @@ static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
* Print the average transfer rate.
*/
static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
- unsigned int count, struct timespec *ts1,
- struct timespec *ts2)
+ unsigned int count, struct timespec64 *ts1,
+ struct timespec64 *ts2)
{
unsigned int rate, iops, sectors = bytes >> 9;
uint64_t tot = bytes * count;
- struct timespec ts;
+ struct timespec64 ts;
- ts = timespec_sub(*ts2, *ts1);
+ ts = timespec64_sub(*ts2, *ts1);
rate = mmc_test_rate(tot, &ts);
iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
- "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
+ "%llu.%09u seconds (%u kB/s, %u KiB/s, "
"%u.%02u IOPS, sg_len %d)\n",
mmc_hostname(test->card->host), count, sectors, count,
sectors >> 1, (sectors & 1 ? ".5" : ""),
- (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
+ (u64)ts.tv_sec, (u32)ts.tv_nsec,
rate / 1000, rate / 1024, iops / 100, iops % 100,
test->area.sg_len);
@@ -741,30 +733,6 @@ static int mmc_test_check_result(struct mmc_test_card *test,
return ret;
}
-static enum mmc_blk_status mmc_test_check_result_async(struct mmc_card *card,
- struct mmc_async_req *areq)
-{
- struct mmc_test_async_req *test_async =
- container_of(areq, struct mmc_test_async_req, areq);
- int ret;
-
- mmc_test_wait_busy(test_async->test);
-
- /*
- * FIXME: this would earlier just casts a regular error code,
- * either of the kernel type -ERRORCODE or the local test framework
- * RESULT_* errorcode, into an enum mmc_blk_status and return as
- * result check. Instead, convert it to some reasonable type by just
- * returning either MMC_BLK_SUCCESS or MMC_BLK_CMD_ERR.
- * If possible, a reasonable error code should be returned.
- */
- ret = mmc_test_check_result(test_async->test, areq->mrq);
- if (ret)
- return MMC_BLK_CMD_ERR;
-
- return MMC_BLK_SUCCESS;
-}
-
/*
* Checks that a "short transfer" behaved as expected
*/
@@ -831,6 +799,45 @@ static struct mmc_test_req *mmc_test_req_alloc(void)
return rq;
}
+static void mmc_test_wait_done(struct mmc_request *mrq)
+{
+ complete(&mrq->completion);
+}
+
+static int mmc_test_start_areq(struct mmc_test_card *test,
+ struct mmc_request *mrq,
+ struct mmc_request *prev_mrq)
+{
+ struct mmc_host *host = test->card->host;
+ int err = 0;
+
+ if (mrq) {
+ init_completion(&mrq->completion);
+ mrq->done = mmc_test_wait_done;
+ mmc_pre_req(host, mrq);
+ }
+
+ if (prev_mrq) {
+ wait_for_completion(&prev_mrq->completion);
+ err = mmc_test_wait_busy(test);
+ if (!err)
+ err = mmc_test_check_result(test, prev_mrq);
+ }
+
+ if (!err && mrq) {
+ err = mmc_start_request(host, mrq);
+ if (err)
+ mmc_retune_release(host);
+ }
+
+ if (prev_mrq)
+ mmc_post_req(host, prev_mrq, 0);
+
+ if (err && mrq)
+ mmc_post_req(host, mrq, err);
+
+ return err;
+}
static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
struct scatterlist *sg, unsigned sg_len,
@@ -838,17 +845,10 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
unsigned blksz, int write, int count)
{
struct mmc_test_req *rq1, *rq2;
- struct mmc_test_async_req test_areq[2];
- struct mmc_async_req *done_areq;
- struct mmc_async_req *cur_areq = &test_areq[0].areq;
- struct mmc_async_req *other_areq = &test_areq[1].areq;
- enum mmc_blk_status status;
+ struct mmc_request *mrq, *prev_mrq;
int i;
int ret = RESULT_OK;
- test_areq[0].test = test;
- test_areq[1].test = test;
-
rq1 = mmc_test_req_alloc();
rq2 = mmc_test_req_alloc();
if (!rq1 || !rq2) {
@@ -856,33 +856,25 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
goto err;
}
- cur_areq->mrq = &rq1->mrq;
- cur_areq->err_check = mmc_test_check_result_async;
- other_areq->mrq = &rq2->mrq;
- other_areq->err_check = mmc_test_check_result_async;
+ mrq = &rq1->mrq;
+ prev_mrq = NULL;
for (i = 0; i < count; i++) {
- mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
- blocks, blksz, write);
- done_areq = mmc_start_areq(test->card->host, cur_areq, &status);
-
- if (status != MMC_BLK_SUCCESS || (!done_areq && i > 0)) {
- ret = RESULT_FAIL;
+ mmc_test_req_reset(container_of(mrq, struct mmc_test_req, mrq));
+ mmc_test_prepare_mrq(test, mrq, sg, sg_len, dev_addr, blocks,
+ blksz, write);
+ ret = mmc_test_start_areq(test, mrq, prev_mrq);
+ if (ret)
goto err;
- }
- if (done_areq)
- mmc_test_req_reset(container_of(done_areq->mrq,
- struct mmc_test_req, mrq));
+ if (!prev_mrq)
+ prev_mrq = &rq2->mrq;
- swap(cur_areq, other_areq);
+ swap(mrq, prev_mrq);
dev_addr += blocks;
}
- done_areq = mmc_start_areq(test->card->host, NULL, &status);
- if (status != MMC_BLK_SUCCESS)
- ret = RESULT_FAIL;
-
+ ret = mmc_test_start_areq(test, NULL, prev_mrq);
err:
kfree(rq1);
kfree(rq2);
@@ -1449,7 +1441,7 @@ static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
int max_scatter, int timed, int count,
bool nonblock, int min_sg_len)
{
- struct timespec ts1, ts2;
+ struct timespec64 ts1, ts2;
int ret = 0;
int i;
struct mmc_test_area *t = &test->area;
@@ -1475,7 +1467,7 @@ static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
return ret;
if (timed)
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
if (nonblock)
ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
dev_addr, t->blocks, 512, write, count);
@@ -1489,7 +1481,7 @@ static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
return ret;
if (timed)
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
if (timed)
mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
@@ -1747,7 +1739,7 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
struct mmc_test_area *t = &test->area;
unsigned long sz;
unsigned int dev_addr;
- struct timespec ts1, ts2;
+ struct timespec64 ts1, ts2;
int ret;
if (!mmc_can_trim(test->card))
@@ -1758,19 +1750,19 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
for (sz = 512; sz < t->max_sz; sz <<= 1) {
dev_addr = t->dev_addr + (sz >> 9);
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
if (ret)
return ret;
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
mmc_test_print_rate(test, sz, &ts1, &ts2);
}
dev_addr = t->dev_addr;
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
if (ret)
return ret;
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
mmc_test_print_rate(test, sz, &ts1, &ts2);
return 0;
}
@@ -1779,19 +1771,19 @@ static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
{
struct mmc_test_area *t = &test->area;
unsigned int dev_addr, i, cnt;
- struct timespec ts1, ts2;
+ struct timespec64 ts1, ts2;
int ret;
cnt = t->max_sz / sz;
dev_addr = t->dev_addr;
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
if (ret)
return ret;
dev_addr += (sz >> 9);
}
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
return 0;
}
@@ -1818,7 +1810,7 @@ static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
{
struct mmc_test_area *t = &test->area;
unsigned int dev_addr, i, cnt;
- struct timespec ts1, ts2;
+ struct timespec64 ts1, ts2;
int ret;
ret = mmc_test_area_erase(test);
@@ -1826,14 +1818,14 @@ static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
return ret;
cnt = t->max_sz / sz;
dev_addr = t->dev_addr;
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
if (ret)
return ret;
dev_addr += (sz >> 9);
}
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
return 0;
}
@@ -1864,7 +1856,7 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
struct mmc_test_area *t = &test->area;
unsigned long sz;
unsigned int dev_addr, i, cnt;
- struct timespec ts1, ts2;
+ struct timespec64 ts1, ts2;
int ret;
if (!mmc_can_trim(test->card))
@@ -1882,7 +1874,7 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
return ret;
cnt = t->max_sz / sz;
dev_addr = t->dev_addr;
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_erase(test->card, dev_addr, sz >> 9,
MMC_TRIM_ARG);
@@ -1890,7 +1882,7 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
return ret;
dev_addr += (sz >> 9);
}
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
}
return 0;
@@ -1912,7 +1904,7 @@ static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
{
unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
unsigned int ssz;
- struct timespec ts1, ts2, ts;
+ struct timespec64 ts1, ts2, ts;
int ret;
ssz = sz >> 9;
@@ -1921,10 +1913,10 @@ static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
range1 = rnd_addr / test->card->pref_erase;
range2 = range1 / ssz;
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
for (cnt = 0; cnt < UINT_MAX; cnt++) {
- getnstimeofday(&ts2);
- ts = timespec_sub(ts2, ts1);
+ ktime_get_ts64(&ts2);
+ ts = timespec64_sub(ts2, ts1);
if (ts.tv_sec >= 10)
break;
ea = mmc_test_rnd_num(range1);
@@ -1998,7 +1990,7 @@ static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
{
struct mmc_test_area *t = &test->area;
unsigned int dev_addr, i, cnt, sz, ssz;
- struct timespec ts1, ts2;
+ struct timespec64 ts1, ts2;
int ret;
sz = t->max_tfr;
@@ -2025,7 +2017,7 @@ static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
cnt = tot_sz / sz;
dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
- getnstimeofday(&ts1);
+ ktime_get_ts64(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_test_area_io(test, sz, dev_addr, write,
max_scatter, 0);
@@ -2033,7 +2025,7 @@ static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
return ret;
dev_addr += ssz;
}
- getnstimeofday(&ts2);
+ ktime_get_ts64(&ts2);
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
@@ -2328,10 +2320,17 @@ static int mmc_test_reset(struct mmc_test_card *test)
int err;
err = mmc_hw_reset(host);
- if (!err)
+ if (!err) {
+ /*
+ * Reset will re-enable the card's command queue, but tests
+ * expect it to be disabled.
+ */
+ if (card->ext_csd.cmdq_en)
+ mmc_cmdq_disable(card);
return RESULT_OK;
- else if (err == -EOPNOTSUPP)
+ } else if (err == -EOPNOTSUPP) {
return RESULT_UNSUP_HOST;
+ }
return RESULT_FAIL;
}
@@ -2356,11 +2355,9 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
struct mmc_test_req *rq = mmc_test_req_alloc();
struct mmc_host *host = test->card->host;
struct mmc_test_area *t = &test->area;
- struct mmc_test_async_req test_areq = { .test = test };
struct mmc_request *mrq;
unsigned long timeout;
bool expired = false;
- enum mmc_blk_status blkstat = MMC_BLK_SUCCESS;
int ret = 0, cmd_ret;
u32 status = 0;
int count = 0;
@@ -2373,9 +2370,6 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
mrq->sbc = &rq->sbc;
mrq->cap_cmd_during_tfr = true;
- test_areq.areq.mrq = mrq;
- test_areq.areq.err_check = mmc_test_check_result_async;
-
mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks,
512, write);
@@ -2388,11 +2382,9 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
/* Start ongoing data request */
if (use_areq) {
- mmc_start_areq(host, &test_areq.areq, &blkstat);
- if (blkstat != MMC_BLK_SUCCESS) {
- ret = RESULT_FAIL;
+ ret = mmc_test_start_areq(test, mrq, NULL);
+ if (ret)
goto out_free;
- }
} else {
mmc_wait_for_req(host, mrq);
}
@@ -2426,9 +2418,7 @@ static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
/* Wait for data request to complete */
if (use_areq) {
- mmc_start_areq(host, NULL, &blkstat);
- if (blkstat != MMC_BLK_SUCCESS)
- ret = RESULT_FAIL;
+ ret = mmc_test_start_areq(test, NULL, mrq);
} else {
mmc_wait_for_req_done(test->card->host, mrq);
}
@@ -3066,10 +3056,9 @@ static int mtf_test_show(struct seq_file *sf, void *data)
seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
list_for_each_entry(tr, &gr->tr_lst, link) {
- seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
+ seq_printf(sf, "%u %d %llu.%09u %u %u.%02u\n",
tr->count, tr->sectors,
- (unsigned long)tr->ts.tv_sec,
- (unsigned long)tr->ts.tv_nsec,
+ (u64)tr->ts.tv_sec, (u32)tr->ts.tv_nsec,
tr->rate, tr->iops / 100, tr->iops % 100);
}
}
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 4f33d277b125..421fab7250ac 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -22,100 +22,147 @@
#include "block.h"
#include "core.h"
#include "card.h"
+#include "host.h"
-/*
- * Prepare a MMC request. This just filters out odd stuff.
- */
-static int mmc_prep_request(struct request_queue *q, struct request *req)
+static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq)
{
- struct mmc_queue *mq = q->queuedata;
+ /* Allow only 1 DCMD at a time */
+ return mq->in_flight[MMC_ISSUE_DCMD];
+}
- if (mq && mmc_card_removed(mq->card))
- return BLKPREP_KILL;
+void mmc_cqe_check_busy(struct mmc_queue *mq)
+{
+ if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq))
+ mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY;
- req->rq_flags |= RQF_DONTPREP;
+ mq->cqe_busy &= ~MMC_CQE_QUEUE_FULL;
+}
- return BLKPREP_OK;
+static inline bool mmc_cqe_can_dcmd(struct mmc_host *host)
+{
+ return host->caps2 & MMC_CAP2_CQE_DCMD;
}
-static int mmc_queue_thread(void *d)
+static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host,
+ struct request *req)
{
- struct mmc_queue *mq = d;
- struct request_queue *q = mq->queue;
- struct mmc_context_info *cntx = &mq->card->host->context_info;
+ switch (req_op(req)) {
+ case REQ_OP_DRV_IN:
+ case REQ_OP_DRV_OUT:
+ case REQ_OP_DISCARD:
+ case REQ_OP_SECURE_ERASE:
+ return MMC_ISSUE_SYNC;
+ case REQ_OP_FLUSH:
+ return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC;
+ default:
+ return MMC_ISSUE_ASYNC;
+ }
+}
- current->flags |= PF_MEMALLOC;
+enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_host *host = mq->card->host;
- down(&mq->thread_sem);
- do {
- struct request *req;
+ if (mq->use_cqe)
+ return mmc_cqe_issue_type(host, req);
- spin_lock_irq(q->queue_lock);
- set_current_state(TASK_INTERRUPTIBLE);
- req = blk_fetch_request(q);
- mq->asleep = false;
- cntx->is_waiting_last_req = false;
- cntx->is_new_req = false;
- if (!req) {
- /*
- * Dispatch queue is empty so set flags for
- * mmc_request_fn() to wake us up.
- */
- if (mq->qcnt)
- cntx->is_waiting_last_req = true;
- else
- mq->asleep = true;
- }
- spin_unlock_irq(q->queue_lock);
+ if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE)
+ return MMC_ISSUE_ASYNC;
- if (req || mq->qcnt) {
- set_current_state(TASK_RUNNING);
- mmc_blk_issue_rq(mq, req);
- cond_resched();
- } else {
- if (kthread_should_stop()) {
- set_current_state(TASK_RUNNING);
- break;
- }
- up(&mq->thread_sem);
- schedule();
- down(&mq->thread_sem);
- }
- } while (1);
- up(&mq->thread_sem);
+ return MMC_ISSUE_SYNC;
+}
- return 0;
+static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq)
+{
+ if (!mq->recovery_needed) {
+ mq->recovery_needed = true;
+ schedule_work(&mq->recovery_work);
+ }
}
-/*
- * Generic MMC request handler. This is called for any queue on a
- * particular host. When the host is not busy, we look for a request
- * on any queue on this host, and attempt to issue it. This may
- * not be the queue we were asked to process.
- */
-static void mmc_request_fn(struct request_queue *q)
+void mmc_cqe_recovery_notifier(struct mmc_request *mrq)
{
+ struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
+ brq.mrq);
+ struct request *req = mmc_queue_req_to_req(mqrq);
+ struct request_queue *q = req->q;
struct mmc_queue *mq = q->queuedata;
- struct request *req;
- struct mmc_context_info *cntx;
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ __mmc_cqe_recovery_notifier(mq);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
- if (!mq) {
- while ((req = blk_fetch_request(q)) != NULL) {
- req->rq_flags |= RQF_QUIET;
- __blk_end_request_all(req, BLK_STS_IOERR);
+static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
+{
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct mmc_queue *mq = req->q->queuedata;
+ struct mmc_host *host = mq->card->host;
+ enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
+ bool recovery_needed = false;
+
+ switch (issue_type) {
+ case MMC_ISSUE_ASYNC:
+ case MMC_ISSUE_DCMD:
+ if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
+ if (recovery_needed)
+ __mmc_cqe_recovery_notifier(mq);
+ return BLK_EH_RESET_TIMER;
}
- return;
+ /* No timeout */
+ return BLK_EH_HANDLED;
+ default:
+ /* Timeout is handled by mmc core */
+ return BLK_EH_RESET_TIMER;
}
+}
+
+static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
+ bool reserved)
+{
+ struct request_queue *q = req->q;
+ struct mmc_queue *mq = q->queuedata;
+ unsigned long flags;
+ int ret;
- cntx = &mq->card->host->context_info;
+ spin_lock_irqsave(q->queue_lock, flags);
- if (cntx->is_waiting_last_req) {
- cntx->is_new_req = true;
- wake_up_interruptible(&cntx->wait);
- }
+ if (mq->recovery_needed || !mq->use_cqe)
+ ret = BLK_EH_RESET_TIMER;
+ else
+ ret = mmc_cqe_timed_out(req);
- if (mq->asleep)
- wake_up_process(mq->thread);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return ret;
+}
+
+static void mmc_mq_recovery_handler(struct work_struct *work)
+{
+ struct mmc_queue *mq = container_of(work, struct mmc_queue,
+ recovery_work);
+ struct request_queue *q = mq->queue;
+
+ mmc_get_card(mq->card, &mq->ctx);
+
+ mq->in_recovery = true;
+
+ if (mq->use_cqe)
+ mmc_blk_cqe_recovery(mq);
+ else
+ mmc_blk_mq_recovery(mq);
+
+ mq->in_recovery = false;
+
+ spin_lock_irq(q->queue_lock);
+ mq->recovery_needed = false;
+ spin_unlock_irq(q->queue_lock);
+
+ mmc_put_card(mq->card, &mq->ctx);
+
+ blk_mq_run_hw_queues(q, true);
}
static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp)
@@ -154,11 +201,10 @@ static void mmc_queue_setup_discard(struct request_queue *q,
* @req: the request
* @gfp: memory allocation policy
*/
-static int mmc_init_request(struct request_queue *q, struct request *req,
- gfp_t gfp)
+static int __mmc_init_request(struct mmc_queue *mq, struct request *req,
+ gfp_t gfp)
{
struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
- struct mmc_queue *mq = q->queuedata;
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
@@ -177,6 +223,131 @@ static void mmc_exit_request(struct request_queue *q, struct request *req)
mq_rq->sg = NULL;
}
+static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req,
+ unsigned int hctx_idx, unsigned int numa_node)
+{
+ return __mmc_init_request(set->driver_data, req, GFP_KERNEL);
+}
+
+static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
+ unsigned int hctx_idx)
+{
+ struct mmc_queue *mq = set->driver_data;
+
+ mmc_exit_request(mq->queue, req);
+}
+
+/*
+ * We use BLK_MQ_F_BLOCKING and have only 1 hardware queue, which means requests
+ * will not be dispatched in parallel.
+ */
+static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct request *req = bd->rq;
+ struct request_queue *q = req->q;
+ struct mmc_queue *mq = q->queuedata;
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host = card->host;
+ enum mmc_issue_type issue_type;
+ enum mmc_issued issued;
+ bool get_card, cqe_retune_ok;
+ int ret;
+
+ if (mmc_card_removed(mq->card)) {
+ req->rq_flags |= RQF_QUIET;
+ return BLK_STS_IOERR;
+ }
+
+ issue_type = mmc_issue_type(mq, req);
+
+ spin_lock_irq(q->queue_lock);
+
+ if (mq->recovery_needed) {
+ spin_unlock_irq(q->queue_lock);
+ return BLK_STS_RESOURCE;
+ }
+
+ switch (issue_type) {
+ case MMC_ISSUE_DCMD:
+ if (mmc_cqe_dcmd_busy(mq)) {
+ mq->cqe_busy |= MMC_CQE_DCMD_BUSY;
+ spin_unlock_irq(q->queue_lock);
+ return BLK_STS_RESOURCE;
+ }
+ break;
+ case MMC_ISSUE_ASYNC:
+ break;
+ default:
+ /*
+ * Timeouts are handled by mmc core, and we don't have a host
+ * API to abort requests, so we can't handle the timeout anyway.
+ * However, when the timeout happens, blk_mq_complete_request()
+ * no longer works (to stop the request disappearing under us).
+ * To avoid racing with that, set a large timeout.
+ */
+ req->timeout = 600 * HZ;
+ break;
+ }
+
+ mq->in_flight[issue_type] += 1;
+ get_card = (mmc_tot_in_flight(mq) == 1);
+ cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
+
+ spin_unlock_irq(q->queue_lock);
+
+ if (!(req->rq_flags & RQF_DONTPREP)) {
+ req_to_mmc_queue_req(req)->retries = 0;
+ req->rq_flags |= RQF_DONTPREP;
+ }
+
+ if (get_card)
+ mmc_get_card(card, &mq->ctx);
+
+ if (mq->use_cqe) {
+ host->retune_now = host->need_retune && cqe_retune_ok &&
+ !host->hold_retune;
+ }
+
+ blk_mq_start_request(req);
+
+ issued = mmc_blk_mq_issue_rq(mq, req);
+
+ switch (issued) {
+ case MMC_REQ_BUSY:
+ ret = BLK_STS_RESOURCE;
+ break;
+ case MMC_REQ_FAILED_TO_START:
+ ret = BLK_STS_IOERR;
+ break;
+ default:
+ ret = BLK_STS_OK;
+ break;
+ }
+
+ if (issued != MMC_REQ_STARTED) {
+ bool put_card = false;
+
+ spin_lock_irq(q->queue_lock);
+ mq->in_flight[issue_type] -= 1;
+ if (mmc_tot_in_flight(mq) == 0)
+ put_card = true;
+ spin_unlock_irq(q->queue_lock);
+ if (put_card)
+ mmc_put_card(card, &mq->ctx);
+ }
+
+ return ret;
+}
+
+static const struct blk_mq_ops mmc_mq_ops = {
+ .queue_rq = mmc_mq_queue_rq,
+ .init_request = mmc_mq_init_request,
+ .exit_request = mmc_mq_exit_request,
+ .complete = mmc_blk_mq_complete,
+ .timeout = mmc_mq_timed_out,
+};
+
static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
{
struct mmc_host *host = card->host;
@@ -196,124 +367,139 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
blk_queue_max_segments(mq->queue, host->max_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
- /* Initialize thread_sem even if it is not used */
- sema_init(&mq->thread_sem, 1);
+ INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
+ INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
+
+ mutex_init(&mq->complete_lock);
+
+ init_waitqueue_head(&mq->wait);
}
-/**
- * mmc_init_queue - initialise a queue structure.
- * @mq: mmc queue
- * @card: mmc card to attach this queue
- * @lock: queue lock
- * @subname: partition subname
- *
- * Initialise a MMC card request queue.
- */
-int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
- spinlock_t *lock, const char *subname)
+static int mmc_mq_init_queue(struct mmc_queue *mq, int q_depth,
+ const struct blk_mq_ops *mq_ops, spinlock_t *lock)
{
- struct mmc_host *host = card->host;
- int ret = -ENOMEM;
-
- mq->card = card;
- mq->queue = blk_alloc_queue(GFP_KERNEL);
- if (!mq->queue)
- return -ENOMEM;
- mq->queue->queue_lock = lock;
- mq->queue->request_fn = mmc_request_fn;
- mq->queue->init_rq_fn = mmc_init_request;
- mq->queue->exit_rq_fn = mmc_exit_request;
- mq->queue->cmd_size = sizeof(struct mmc_queue_req);
- mq->queue->queuedata = mq;
- mq->qcnt = 0;
- ret = blk_init_allocated_queue(mq->queue);
- if (ret) {
- blk_cleanup_queue(mq->queue);
+ int ret;
+
+ memset(&mq->tag_set, 0, sizeof(mq->tag_set));
+ mq->tag_set.ops = mq_ops;
+ mq->tag_set.queue_depth = q_depth;
+ mq->tag_set.numa_node = NUMA_NO_NODE;
+ mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE |
+ BLK_MQ_F_BLOCKING;
+ mq->tag_set.nr_hw_queues = 1;
+ mq->tag_set.cmd_size = sizeof(struct mmc_queue_req);
+ mq->tag_set.driver_data = mq;
+
+ ret = blk_mq_alloc_tag_set(&mq->tag_set);
+ if (ret)
return ret;
- }
-
- blk_queue_prep_rq(mq->queue, mmc_prep_request);
-
- mmc_setup_queue(mq, card);
- mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
- host->index, subname ? subname : "");
-
- if (IS_ERR(mq->thread)) {
- ret = PTR_ERR(mq->thread);
- goto cleanup_queue;
+ mq->queue = blk_mq_init_queue(&mq->tag_set);
+ if (IS_ERR(mq->queue)) {
+ ret = PTR_ERR(mq->queue);
+ goto free_tag_set;
}
+ mq->queue->queue_lock = lock;
+ mq->queue->queuedata = mq;
+
return 0;
-cleanup_queue:
- blk_cleanup_queue(mq->queue);
+free_tag_set:
+ blk_mq_free_tag_set(&mq->tag_set);
+
return ret;
}
-void mmc_cleanup_queue(struct mmc_queue *mq)
-{
- struct request_queue *q = mq->queue;
- unsigned long flags;
+/* Set queue depth to get a reasonable value for q->nr_requests */
+#define MMC_QUEUE_DEPTH 64
- /* Make sure the queue isn't suspended, as that will deadlock */
- mmc_queue_resume(mq);
+static int mmc_mq_init(struct mmc_queue *mq, struct mmc_card *card,
+ spinlock_t *lock)
+{
+ struct mmc_host *host = card->host;
+ int q_depth;
+ int ret;
+
+ /*
+ * The queue depth for CQE must match the hardware because the request
+ * tag is used to index the hardware queue.
+ */
+ if (mq->use_cqe)
+ q_depth = min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
+ else
+ q_depth = MMC_QUEUE_DEPTH;
+
+ ret = mmc_mq_init_queue(mq, q_depth, &mmc_mq_ops, lock);
+ if (ret)
+ return ret;
- /* Then terminate our worker thread */
- kthread_stop(mq->thread);
+ blk_queue_rq_timeout(mq->queue, 60 * HZ);
- /* Empty the queue */
- spin_lock_irqsave(q->queue_lock, flags);
- q->queuedata = NULL;
- blk_start_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ mmc_setup_queue(mq, card);
- mq->card = NULL;
+ return 0;
}
-EXPORT_SYMBOL(mmc_cleanup_queue);
/**
- * mmc_queue_suspend - suspend a MMC request queue
- * @mq: MMC queue to suspend
+ * mmc_init_queue - initialise a queue structure.
+ * @mq: mmc queue
+ * @card: mmc card to attach this queue
+ * @lock: queue lock
+ * @subname: partition subname
*
- * Stop the block request queue, and wait for our thread to
- * complete any outstanding requests. This ensures that we
- * won't suspend while a request is being processed.
+ * Initialise a MMC card request queue.
*/
-void mmc_queue_suspend(struct mmc_queue *mq)
+int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
+ spinlock_t *lock, const char *subname)
{
- struct request_queue *q = mq->queue;
- unsigned long flags;
+ struct mmc_host *host = card->host;
- if (!mq->suspended) {
- mq->suspended |= true;
+ mq->card = card;
- spin_lock_irqsave(q->queue_lock, flags);
- blk_stop_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ mq->use_cqe = host->cqe_enabled;
- down(&mq->thread_sem);
- }
+ return mmc_mq_init(mq, card, lock);
+}
+
+void mmc_queue_suspend(struct mmc_queue *mq)
+{
+ blk_mq_quiesce_queue(mq->queue);
+
+ /*
+ * The host remains claimed while there are outstanding requests, so
+ * simply claiming and releasing here ensures there are none.
+ */
+ mmc_claim_host(mq->card->host);
+ mmc_release_host(mq->card->host);
}
-/**
- * mmc_queue_resume - resume a previously suspended MMC request queue
- * @mq: MMC queue to resume
- */
void mmc_queue_resume(struct mmc_queue *mq)
{
+ blk_mq_unquiesce_queue(mq->queue);
+}
+
+void mmc_cleanup_queue(struct mmc_queue *mq)
+{
struct request_queue *q = mq->queue;
- unsigned long flags;
- if (mq->suspended) {
- mq->suspended = false;
+ /*
+ * The legacy code handled the possibility of being suspended,
+ * so do that here too.
+ */
+ if (blk_queue_quiesced(q))
+ blk_mq_unquiesce_queue(q);
- up(&mq->thread_sem);
+ blk_cleanup_queue(q);
- spin_lock_irqsave(q->queue_lock, flags);
- blk_start_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
- }
+ /*
+ * A request can be completed before the next request, potentially
+ * leaving a complete_work with nothing to do. Such a work item might
+ * still be queued at this point. Flush it.
+ */
+ flush_work(&mq->complete_work);
+
+ mq->card = NULL;
}
/*
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index 547b457c4251..17e59d50b496 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -8,6 +8,20 @@
#include <linux/mmc/core.h>
#include <linux/mmc/host.h>
+enum mmc_issued {
+ MMC_REQ_STARTED,
+ MMC_REQ_BUSY,
+ MMC_REQ_FAILED_TO_START,
+ MMC_REQ_FINISHED,
+};
+
+enum mmc_issue_type {
+ MMC_ISSUE_SYNC,
+ MMC_ISSUE_DCMD,
+ MMC_ISSUE_ASYNC,
+ MMC_ISSUE_MAX,
+};
+
static inline struct mmc_queue_req *req_to_mmc_queue_req(struct request *rq)
{
return blk_mq_rq_to_pdu(rq);
@@ -20,7 +34,6 @@ static inline struct request *mmc_queue_req_to_req(struct mmc_queue_req *mqr)
return blk_mq_rq_from_pdu(mqr);
}
-struct task_struct;
struct mmc_blk_data;
struct mmc_blk_ioc_data;
@@ -30,7 +43,6 @@ struct mmc_blk_request {
struct mmc_command cmd;
struct mmc_command stop;
struct mmc_data data;
- int retune_retry_done;
};
/**
@@ -52,28 +64,34 @@ enum mmc_drv_op {
struct mmc_queue_req {
struct mmc_blk_request brq;
struct scatterlist *sg;
- struct mmc_async_req areq;
enum mmc_drv_op drv_op;
int drv_op_result;
void *drv_op_data;
unsigned int ioc_count;
+ int retries;
};
struct mmc_queue {
struct mmc_card *card;
- struct task_struct *thread;
- struct semaphore thread_sem;
- bool suspended;
- bool asleep;
+ struct mmc_ctx ctx;
+ struct blk_mq_tag_set tag_set;
struct mmc_blk_data *blkdata;
struct request_queue *queue;
- /*
- * FIXME: this counter is not a very reliable way of keeping
- * track of how many requests that are ongoing. Switch to just
- * letting the block core keep track of requests and per-request
- * associated mmc_queue_req data.
- */
- int qcnt;
+ int in_flight[MMC_ISSUE_MAX];
+ unsigned int cqe_busy;
+#define MMC_CQE_DCMD_BUSY BIT(0)
+#define MMC_CQE_QUEUE_FULL BIT(1)
+ bool use_cqe;
+ bool recovery_needed;
+ bool in_recovery;
+ bool rw_wait;
+ bool waiting;
+ struct work_struct recovery_work;
+ wait_queue_head_t wait;
+ struct request *recovery_req;
+ struct request *complete_req;
+ struct mutex complete_lock;
+ struct work_struct complete_work;
};
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
@@ -84,4 +102,22 @@ extern void mmc_queue_resume(struct mmc_queue *);
extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
struct mmc_queue_req *);
+void mmc_cqe_check_busy(struct mmc_queue *mq);
+void mmc_cqe_recovery_notifier(struct mmc_request *mrq);
+
+enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req);
+
+static inline int mmc_tot_in_flight(struct mmc_queue *mq)
+{
+ return mq->in_flight[MMC_ISSUE_SYNC] +
+ mq->in_flight[MMC_ISSUE_DCMD] +
+ mq->in_flight[MMC_ISSUE_ASYNC];
+}
+
+static inline int mmc_cqe_qcnt(struct mmc_queue *mq)
+{
+ return mq->in_flight[MMC_ISSUE_DCMD] +
+ mq->in_flight[MMC_ISSUE_ASYNC];
+}
+
#endif
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 863f1dbbfc1b..3698b0576009 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -121,20 +121,18 @@ EXPORT_SYMBOL(mmc_gpio_request_ro);
void mmc_gpiod_request_cd_irq(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
- int ret, irq;
+ int irq = -EINVAL;
+ int ret;
if (host->slot.cd_irq >= 0 || !ctx || !ctx->cd_gpio)
return;
- irq = gpiod_to_irq(ctx->cd_gpio);
-
/*
- * Even if gpiod_to_irq() returns a valid IRQ number, the platform might
- * still prefer to poll, e.g., because that IRQ number is already used
- * by another unit and cannot be shared.
+ * Do not use IRQ if the platform prefers to poll, e.g., because that
+ * IRQ number is already used by another unit and cannot be shared.
*/
- if (irq >= 0 && host->caps & MMC_CAP_NEEDS_POLL)
- irq = -EINVAL;
+ if (!(host->caps & MMC_CAP_NEEDS_POLL))
+ irq = gpiod_to_irq(ctx->cd_gpio);
if (irq >= 0) {
if (!ctx->cd_gpio_isr)
@@ -307,3 +305,11 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
return 0;
}
EXPORT_SYMBOL(mmc_gpiod_request_ro);
+
+bool mmc_can_gpio_ro(struct mmc_host *host)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+
+ return ctx->ro_gpio ? true : false;
+}
+EXPORT_SYMBOL(mmc_can_gpio_ro);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 567028c9219a..67bd3344dd03 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -81,6 +81,7 @@ config MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
config MMC_SDHCI_PCI
tristate "SDHCI support on PCI bus"
depends on MMC_SDHCI && PCI
+ select MMC_CQHCI
help
This selects the PCI Secure Digital Host Controller Interface.
Most controllers found today are PCI devices.
@@ -132,6 +133,7 @@ config MMC_SDHCI_OF_ARASAN
depends on MMC_SDHCI_PLTFM
depends on OF
depends on COMMON_CLK
+ select MMC_CQHCI
help
This selects the Arasan Secure Digital Host Controller Interface
(SDHCI). This hardware is found e.g. in Xilinx' Zynq SoC.
@@ -320,7 +322,7 @@ config MMC_SDHCI_BCM_KONA
config MMC_SDHCI_F_SDH30
tristate "SDHCI support for Fujitsu Semiconductor F_SDH30"
depends on MMC_SDHCI_PLTFM
- depends on OF
+ depends on OF || ACPI
help
This selects the Secure Digital Host Controller Interface (SDHCI)
Needed by some Fujitsu SoC for MMC / SD / SDIO support.
@@ -595,11 +597,8 @@ config MMC_TMIO
config MMC_SDHI
tristate "Renesas SDHI SD/SDIO controller support"
- depends on SUPERH || ARM || ARM64
depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
select MMC_TMIO_CORE
- select MMC_SDHI_SYS_DMAC if (SUPERH || ARM)
- select MMC_SDHI_INTERNAL_DMAC if ARM64
help
This provides support for the SDHI SD/SDIO controller found in
Renesas SuperH, ARM and ARM64 based SoCs
@@ -607,6 +606,7 @@ config MMC_SDHI
config MMC_SDHI_SYS_DMAC
tristate "DMA for SDHI SD/SDIO controllers using SYS-DMAC"
depends on MMC_SDHI
+ default MMC_SDHI if (SUPERH || ARM)
help
This provides DMA support for SDHI SD/SDIO controllers
using SYS-DMAC via DMA Engine. This supports the controllers
@@ -616,6 +616,7 @@ config MMC_SDHI_INTERNAL_DMAC
tristate "DMA for SDHI SD/SDIO controllers using on-chip bus mastering"
depends on ARM64 || COMPILE_TEST
depends on MMC_SDHI
+ default MMC_SDHI if ARM64
help
This provides DMA support for SDHI SD/SDIO controllers
using on-chip bus mastering. This supports the controllers
@@ -838,14 +839,14 @@ config MMC_USDHI6ROL0
config MMC_REALTEK_PCI
tristate "Realtek PCI-E SD/MMC Card Interface Driver"
- depends on MFD_RTSX_PCI
+ depends on MISC_RTSX_PCI
help
Say Y here to include driver code to support SD/MMC card interface
of Realtek PCI-E card reader
config MMC_REALTEK_USB
tristate "Realtek USB SD/MMC Card Interface Driver"
- depends on MFD_RTSX_USB
+ depends on MISC_RTSX_USB
help
Say Y here to include driver code to support SD/MMC card interface
of Realtek RTS5129/39 series card reader
@@ -857,6 +858,19 @@ config MMC_SUNXI
This selects support for the SD/MMC Host Controller on
Allwinner sunxi SoCs.
+config MMC_CQHCI
+ tristate "Command Queue Host Controller Interface support"
+ depends on HAS_DMA
+ help
+ This selects the Command Queue Host Controller Interface (CQHCI)
+ support present in host controllers of Qualcomm Technologies, Inc
+ amongst others.
+ This controller supports eMMC devices with command queue support.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config MMC_TOSHIBA_PCI
tristate "Toshiba Type A SD/MMC Card Interface Driver"
depends on PCI
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index a43cf0d5a5d3..84cd1388abc3 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_MMC_MXC) += mxcmmc.o
obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
-sdhci-pci-y += sdhci-pci-core.o sdhci-pci-o2micro.o
+sdhci-pci-y += sdhci-pci-core.o sdhci-pci-o2micro.o sdhci-pci-arasan.o
obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o
obj-$(CONFIG_MMC_SDHCI_ACPI) += sdhci-acpi.o
obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o
@@ -39,12 +39,8 @@ obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o
obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_core.o
-ifeq ($(subst m,y,$(CONFIG_MMC_SDHI_SYS_DMAC)),y)
-obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_sys_dmac.o
-endif
-ifeq ($(subst m,y,$(CONFIG_MMC_SDHI_INTERNAL_DMAC)),y)
-obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_internal_dmac.o
-endif
+obj-$(CONFIG_MMC_SDHI_SYS_DMAC) += renesas_sdhi_sys_dmac.o
+obj-$(CONFIG_MMC_SDHI_INTERNAL_DMAC) += renesas_sdhi_internal_dmac.o
obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
@@ -92,6 +88,7 @@ obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o
obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32) += sdhci-pic32.o
obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o
obj-$(CONFIG_MMC_SDHCI_OMAP) += sdhci-omap.o
+obj-$(CONFIG_MMC_CQHCI) += cqhci.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c
index 63fe5091ca59..63d27589cd89 100644
--- a/drivers/mmc/host/android-goldfish.c
+++ b/drivers/mmc/host/android-goldfish.c
@@ -42,13 +42,11 @@
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/clk.h>
-#include <linux/scatterlist.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/types.h>
-#include <asm/io.h>
#include <linux/uaccess.h>
#define DRIVER_NAME "goldfish_mmc"
diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c
new file mode 100644
index 000000000000..159270e947cf
--- /dev/null
+++ b/drivers/mmc/host/cqhci.c
@@ -0,0 +1,1150 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/platform_device.h>
+#include <linux/ktime.h>
+
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+
+#include "cqhci.h"
+
+#define DCMD_SLOT 31
+#define NUM_SLOTS 32
+
+struct cqhci_slot {
+ struct mmc_request *mrq;
+ unsigned int flags;
+#define CQHCI_EXTERNAL_TIMEOUT BIT(0)
+#define CQHCI_COMPLETED BIT(1)
+#define CQHCI_HOST_CRC BIT(2)
+#define CQHCI_HOST_TIMEOUT BIT(3)
+#define CQHCI_HOST_OTHER BIT(4)
+};
+
+static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag)
+{
+ return cq_host->desc_base + (tag * cq_host->slot_sz);
+}
+
+static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag)
+{
+ u8 *desc = get_desc(cq_host, tag);
+
+ return desc + cq_host->task_desc_len;
+}
+
+static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag)
+{
+ return cq_host->trans_desc_dma_base +
+ (cq_host->mmc->max_segs * tag *
+ cq_host->trans_desc_len);
+}
+
+static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag)
+{
+ return cq_host->trans_desc_base +
+ (cq_host->trans_desc_len * cq_host->mmc->max_segs * tag);
+}
+
+static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag)
+{
+ u8 *link_temp;
+ dma_addr_t trans_temp;
+
+ link_temp = get_link_desc(cq_host, tag);
+ trans_temp = get_trans_desc_dma(cq_host, tag);
+
+ memset(link_temp, 0, cq_host->link_desc_len);
+ if (cq_host->link_desc_len > 8)
+ *(link_temp + 8) = 0;
+
+ if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) {
+ *link_temp = CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1);
+ return;
+ }
+
+ *link_temp = CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0);
+
+ if (cq_host->dma64) {
+ __le64 *data_addr = (__le64 __force *)(link_temp + 4);
+
+ data_addr[0] = cpu_to_le64(trans_temp);
+ } else {
+ __le32 *data_addr = (__le32 __force *)(link_temp + 4);
+
+ data_addr[0] = cpu_to_le32(trans_temp);
+ }
+}
+
+static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set)
+{
+ cqhci_writel(cq_host, set, CQHCI_ISTE);
+ cqhci_writel(cq_host, set, CQHCI_ISGE);
+}
+
+#define DRV_NAME "cqhci"
+
+#define CQHCI_DUMP(f, x...) \
+ pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x)
+
+static void cqhci_dumpregs(struct cqhci_host *cq_host)
+{
+ struct mmc_host *mmc = cq_host->mmc;
+
+ CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n");
+
+ CQHCI_DUMP("Caps: 0x%08x | Version: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_CAP),
+ cqhci_readl(cq_host, CQHCI_VER));
+ CQHCI_DUMP("Config: 0x%08x | Control: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_CFG),
+ cqhci_readl(cq_host, CQHCI_CTL));
+ CQHCI_DUMP("Int stat: 0x%08x | Int enab: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_IS),
+ cqhci_readl(cq_host, CQHCI_ISTE));
+ CQHCI_DUMP("Int sig: 0x%08x | Int Coal: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_ISGE),
+ cqhci_readl(cq_host, CQHCI_IC));
+ CQHCI_DUMP("TDL base: 0x%08x | TDL up32: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_TDLBA),
+ cqhci_readl(cq_host, CQHCI_TDLBAU));
+ CQHCI_DUMP("Doorbell: 0x%08x | TCN: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_TDBR),
+ cqhci_readl(cq_host, CQHCI_TCN));
+ CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_DQS),
+ cqhci_readl(cq_host, CQHCI_DPT));
+ CQHCI_DUMP("Task clr: 0x%08x | SSC1: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_TCLR),
+ cqhci_readl(cq_host, CQHCI_SSC1));
+ CQHCI_DUMP("SSC2: 0x%08x | DCMD rsp: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_SSC2),
+ cqhci_readl(cq_host, CQHCI_CRDCT));
+ CQHCI_DUMP("RED mask: 0x%08x | TERRI: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_RMEM),
+ cqhci_readl(cq_host, CQHCI_TERRI));
+ CQHCI_DUMP("Resp idx: 0x%08x | Resp arg: 0x%08x\n",
+ cqhci_readl(cq_host, CQHCI_CRI),
+ cqhci_readl(cq_host, CQHCI_CRA));
+
+ if (cq_host->ops->dumpregs)
+ cq_host->ops->dumpregs(mmc);
+ else
+ CQHCI_DUMP(": ===========================================\n");
+}
+
+/**
+ * The allocated descriptor table for task, link & transfer descritors
+ * looks like:
+ * |----------|
+ * |task desc | |->|----------|
+ * |----------| | |trans desc|
+ * |link desc-|->| |----------|
+ * |----------| .
+ * . .
+ * no. of slots max-segs
+ * . |----------|
+ * |----------|
+ * The idea here is to create the [task+trans] table and mark & point the
+ * link desc to the transfer desc table on a per slot basis.
+ */
+static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
+{
+ int i = 0;
+
+ /* task descriptor can be 64/128 bit irrespective of arch */
+ if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
+ cqhci_writel(cq_host, cqhci_readl(cq_host, CQHCI_CFG) |
+ CQHCI_TASK_DESC_SZ, CQHCI_CFG);
+ cq_host->task_desc_len = 16;
+ } else {
+ cq_host->task_desc_len = 8;
+ }
+
+ /*
+ * 96 bits length of transfer desc instead of 128 bits which means
+ * ADMA would expect next valid descriptor at the 96th bit
+ * or 128th bit
+ */
+ if (cq_host->dma64) {
+ if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ)
+ cq_host->trans_desc_len = 12;
+ else
+ cq_host->trans_desc_len = 16;
+ cq_host->link_desc_len = 16;
+ } else {
+ cq_host->trans_desc_len = 8;
+ cq_host->link_desc_len = 8;
+ }
+
+ /* total size of a slot: 1 task & 1 transfer (link) */
+ cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len;
+
+ cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
+
+ cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
+ (cq_host->num_slots - 1);
+
+ pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
+ mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
+ cq_host->slot_sz);
+
+ /*
+ * allocate a dma-mapped chunk of memory for the descriptors
+ * allocate a dma-mapped chunk of memory for link descriptors
+ * setup each link-desc memory offset per slot-number to
+ * the descriptor table.
+ */
+ cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
+ cq_host->desc_size,
+ &cq_host->desc_dma_base,
+ GFP_KERNEL);
+ cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
+ cq_host->data_size,
+ &cq_host->trans_desc_dma_base,
+ GFP_KERNEL);
+ if (!cq_host->desc_base || !cq_host->trans_desc_base)
+ return -ENOMEM;
+
+ pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
+ mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
+ (unsigned long long)cq_host->desc_dma_base,
+ (unsigned long long)cq_host->trans_desc_dma_base);
+
+ for (; i < (cq_host->num_slots); i++)
+ setup_trans_desc(cq_host, i);
+
+ return 0;
+}
+
+static void __cqhci_enable(struct cqhci_host *cq_host)
+{
+ struct mmc_host *mmc = cq_host->mmc;
+ u32 cqcfg;
+
+ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
+
+ /* Configuration must not be changed while enabled */
+ if (cqcfg & CQHCI_ENABLE) {
+ cqcfg &= ~CQHCI_ENABLE;
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+ }
+
+ cqcfg &= ~(CQHCI_DCMD | CQHCI_TASK_DESC_SZ);
+
+ if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
+ cqcfg |= CQHCI_DCMD;
+
+ if (cq_host->caps & CQHCI_TASK_DESC_SZ_128)
+ cqcfg |= CQHCI_TASK_DESC_SZ;
+
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+
+ cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base),
+ CQHCI_TDLBA);
+ cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base),
+ CQHCI_TDLBAU);
+
+ cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2);
+
+ cqhci_set_irqs(cq_host, 0);
+
+ cqcfg |= CQHCI_ENABLE;
+
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+
+ mmc->cqe_on = true;
+
+ if (cq_host->ops->enable)
+ cq_host->ops->enable(mmc);
+
+ /* Ensure all writes are done before interrupts are enabled */
+ wmb();
+
+ cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
+
+ cq_host->activated = true;
+}
+
+static void __cqhci_disable(struct cqhci_host *cq_host)
+{
+ u32 cqcfg;
+
+ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
+ cqcfg &= ~CQHCI_ENABLE;
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+
+ cq_host->mmc->cqe_on = false;
+
+ cq_host->activated = false;
+}
+
+int cqhci_suspend(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+
+ if (cq_host->enabled)
+ __cqhci_disable(cq_host);
+
+ return 0;
+}
+EXPORT_SYMBOL(cqhci_suspend);
+
+int cqhci_resume(struct mmc_host *mmc)
+{
+ /* Re-enable is done upon first request */
+ return 0;
+}
+EXPORT_SYMBOL(cqhci_resume);
+
+static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ int err;
+
+ if (cq_host->enabled)
+ return 0;
+
+ cq_host->rca = card->rca;
+
+ err = cqhci_host_alloc_tdl(cq_host);
+ if (err)
+ return err;
+
+ __cqhci_enable(cq_host);
+
+ cq_host->enabled = true;
+
+#ifdef DEBUG
+ cqhci_dumpregs(cq_host);
+#endif
+ return 0;
+}
+
+/* CQHCI is idle and should halt immediately, so set a small timeout */
+#define CQHCI_OFF_TIMEOUT 100
+
+static void cqhci_off(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ ktime_t timeout;
+ bool timed_out;
+ u32 reg;
+
+ if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
+ return;
+
+ if (cq_host->ops->disable)
+ cq_host->ops->disable(mmc, false);
+
+ cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
+
+ timeout = ktime_add_us(ktime_get(), CQHCI_OFF_TIMEOUT);
+ while (1) {
+ timed_out = ktime_compare(ktime_get(), timeout) > 0;
+ reg = cqhci_readl(cq_host, CQHCI_CTL);
+ if ((reg & CQHCI_HALT) || timed_out)
+ break;
+ }
+
+ if (timed_out)
+ pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
+ else
+ pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
+
+ mmc->cqe_on = false;
+}
+
+static void cqhci_disable(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+
+ if (!cq_host->enabled)
+ return;
+
+ cqhci_off(mmc);
+
+ __cqhci_disable(cq_host);
+
+ dmam_free_coherent(mmc_dev(mmc), cq_host->data_size,
+ cq_host->trans_desc_base,
+ cq_host->trans_desc_dma_base);
+
+ dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size,
+ cq_host->desc_base,
+ cq_host->desc_dma_base);
+
+ cq_host->trans_desc_base = NULL;
+ cq_host->desc_base = NULL;
+
+ cq_host->enabled = false;
+}
+
+static void cqhci_prep_task_desc(struct mmc_request *mrq,
+ u64 *data, bool intr)
+{
+ u32 req_flags = mrq->data->flags;
+
+ *data = CQHCI_VALID(1) |
+ CQHCI_END(1) |
+ CQHCI_INT(intr) |
+ CQHCI_ACT(0x5) |
+ CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) |
+ CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) |
+ CQHCI_DATA_DIR(!!(req_flags & MMC_DATA_READ)) |
+ CQHCI_PRIORITY(!!(req_flags & MMC_DATA_PRIO)) |
+ CQHCI_QBAR(!!(req_flags & MMC_DATA_QBR)) |
+ CQHCI_REL_WRITE(!!(req_flags & MMC_DATA_REL_WR)) |
+ CQHCI_BLK_COUNT(mrq->data->blocks) |
+ CQHCI_BLK_ADDR((u64)mrq->data->blk_addr);
+
+ pr_debug("%s: cqhci: tag %d task descriptor 0x016%llx\n",
+ mmc_hostname(mrq->host), mrq->tag, (unsigned long long)*data);
+}
+
+static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq)
+{
+ int sg_count;
+ struct mmc_data *data = mrq->data;
+
+ if (!data)
+ return -EINVAL;
+
+ sg_count = dma_map_sg(mmc_dev(host), data->sg,
+ data->sg_len,
+ (data->flags & MMC_DATA_WRITE) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (!sg_count) {
+ pr_err("%s: sg-len: %d\n", __func__, data->sg_len);
+ return -ENOMEM;
+ }
+
+ return sg_count;
+}
+
+static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end,
+ bool dma64)
+{
+ __le32 *attr = (__le32 __force *)desc;
+
+ *attr = (CQHCI_VALID(1) |
+ CQHCI_END(end ? 1 : 0) |
+ CQHCI_INT(0) |
+ CQHCI_ACT(0x4) |
+ CQHCI_DAT_LENGTH(len));
+
+ if (dma64) {
+ __le64 *dataddr = (__le64 __force *)(desc + 4);
+
+ dataddr[0] = cpu_to_le64(addr);
+ } else {
+ __le32 *dataddr = (__le32 __force *)(desc + 4);
+
+ dataddr[0] = cpu_to_le32(addr);
+ }
+}
+
+static int cqhci_prep_tran_desc(struct mmc_request *mrq,
+ struct cqhci_host *cq_host, int tag)
+{
+ struct mmc_data *data = mrq->data;
+ int i, sg_count, len;
+ bool end = false;
+ bool dma64 = cq_host->dma64;
+ dma_addr_t addr;
+ u8 *desc;
+ struct scatterlist *sg;
+
+ sg_count = cqhci_dma_map(mrq->host, mrq);
+ if (sg_count < 0) {
+ pr_err("%s: %s: unable to map sg lists, %d\n",
+ mmc_hostname(mrq->host), __func__, sg_count);
+ return sg_count;
+ }
+
+ desc = get_trans_desc(cq_host, tag);
+
+ for_each_sg(data->sg, sg, sg_count, i) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+
+ if ((i+1) == sg_count)
+ end = true;
+ cqhci_set_tran_desc(desc, addr, len, end, dma64);
+ desc += cq_host->trans_desc_len;
+ }
+
+ return 0;
+}
+
+static void cqhci_prep_dcmd_desc(struct mmc_host *mmc,
+ struct mmc_request *mrq)
+{
+ u64 *task_desc = NULL;
+ u64 data = 0;
+ u8 resp_type;
+ u8 *desc;
+ __le64 *dataddr;
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ u8 timing;
+
+ if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) {
+ resp_type = 0x0;
+ timing = 0x1;
+ } else {
+ if (mrq->cmd->flags & MMC_RSP_R1B) {
+ resp_type = 0x3;
+ timing = 0x0;
+ } else {
+ resp_type = 0x2;
+ timing = 0x1;
+ }
+ }
+
+ task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot);
+ memset(task_desc, 0, cq_host->task_desc_len);
+ data |= (CQHCI_VALID(1) |
+ CQHCI_END(1) |
+ CQHCI_INT(1) |
+ CQHCI_QBAR(1) |
+ CQHCI_ACT(0x5) |
+ CQHCI_CMD_INDEX(mrq->cmd->opcode) |
+ CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type));
+ *task_desc |= data;
+ desc = (u8 *)task_desc;
+ pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n",
+ mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type);
+ dataddr = (__le64 __force *)(desc + 4);
+ dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg);
+
+}
+
+static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ struct mmc_data *data = mrq->data;
+
+ if (data) {
+ dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len,
+ (data->flags & MMC_DATA_READ) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ }
+}
+
+static inline int cqhci_tag(struct mmc_request *mrq)
+{
+ return mrq->cmd ? DCMD_SLOT : mrq->tag;
+}
+
+static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ int err = 0;
+ u64 data = 0;
+ u64 *task_desc = NULL;
+ int tag = cqhci_tag(mrq);
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ unsigned long flags;
+
+ if (!cq_host->enabled) {
+ pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc));
+ return -EINVAL;
+ }
+
+ /* First request after resume has to re-enable */
+ if (!cq_host->activated)
+ __cqhci_enable(cq_host);
+
+ if (!mmc->cqe_on) {
+ cqhci_writel(cq_host, 0, CQHCI_CTL);
+ mmc->cqe_on = true;
+ pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
+ if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) {
+ pr_err("%s: cqhci: CQE failed to exit halt state\n",
+ mmc_hostname(mmc));
+ }
+ if (cq_host->ops->enable)
+ cq_host->ops->enable(mmc);
+ }
+
+ if (mrq->data) {
+ task_desc = (__le64 __force *)get_desc(cq_host, tag);
+ cqhci_prep_task_desc(mrq, &data, 1);
+ *task_desc = cpu_to_le64(data);
+ err = cqhci_prep_tran_desc(mrq, cq_host, tag);
+ if (err) {
+ pr_err("%s: cqhci: failed to setup tx desc: %d\n",
+ mmc_hostname(mmc), err);
+ return err;
+ }
+ } else {
+ cqhci_prep_dcmd_desc(mmc, mrq);
+ }
+
+ spin_lock_irqsave(&cq_host->lock, flags);
+
+ if (cq_host->recovery_halt) {
+ err = -EBUSY;
+ goto out_unlock;
+ }
+
+ cq_host->slot[tag].mrq = mrq;
+ cq_host->slot[tag].flags = 0;
+
+ cq_host->qcnt += 1;
+
+ cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
+ if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
+ pr_debug("%s: cqhci: doorbell not set for tag %d\n",
+ mmc_hostname(mmc), tag);
+out_unlock:
+ spin_unlock_irqrestore(&cq_host->lock, flags);
+
+ if (err)
+ cqhci_post_req(mmc, mrq);
+
+ return err;
+}
+
+static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq,
+ bool notify)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+
+ if (!cq_host->recovery_halt) {
+ cq_host->recovery_halt = true;
+ pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc));
+ wake_up(&cq_host->wait_queue);
+ if (notify && mrq->recovery_notifier)
+ mrq->recovery_notifier(mrq);
+ }
+}
+
+static unsigned int cqhci_error_flags(int error1, int error2)
+{
+ int error = error1 ? error1 : error2;
+
+ switch (error) {
+ case -EILSEQ:
+ return CQHCI_HOST_CRC;
+ case -ETIMEDOUT:
+ return CQHCI_HOST_TIMEOUT;
+ default:
+ return CQHCI_HOST_OTHER;
+ }
+}
+
+static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error,
+ int data_error)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ struct cqhci_slot *slot;
+ u32 terri;
+ int tag;
+
+ spin_lock(&cq_host->lock);
+
+ terri = cqhci_readl(cq_host, CQHCI_TERRI);
+
+ pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
+ mmc_hostname(mmc), status, cmd_error, data_error, terri);
+
+ /* Forget about errors when recovery has already been triggered */
+ if (cq_host->recovery_halt)
+ goto out_unlock;
+
+ if (!cq_host->qcnt) {
+ WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
+ mmc_hostname(mmc), status, cmd_error, data_error,
+ terri);
+ goto out_unlock;
+ }
+
+ if (CQHCI_TERRI_C_VALID(terri)) {
+ tag = CQHCI_TERRI_C_TASK(terri);
+ slot = &cq_host->slot[tag];
+ if (slot->mrq) {
+ slot->flags = cqhci_error_flags(cmd_error, data_error);
+ cqhci_recovery_needed(mmc, slot->mrq, true);
+ }
+ }
+
+ if (CQHCI_TERRI_D_VALID(terri)) {
+ tag = CQHCI_TERRI_D_TASK(terri);
+ slot = &cq_host->slot[tag];
+ if (slot->mrq) {
+ slot->flags = cqhci_error_flags(data_error, cmd_error);
+ cqhci_recovery_needed(mmc, slot->mrq, true);
+ }
+ }
+
+ if (!cq_host->recovery_halt) {
+ /*
+ * The only way to guarantee forward progress is to mark at
+ * least one task in error, so if none is indicated, pick one.
+ */
+ for (tag = 0; tag < NUM_SLOTS; tag++) {
+ slot = &cq_host->slot[tag];
+ if (!slot->mrq)
+ continue;
+ slot->flags = cqhci_error_flags(data_error, cmd_error);
+ cqhci_recovery_needed(mmc, slot->mrq, true);
+ break;
+ }
+ }
+
+out_unlock:
+ spin_unlock(&cq_host->lock);
+}
+
+static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ struct cqhci_slot *slot = &cq_host->slot[tag];
+ struct mmc_request *mrq = slot->mrq;
+ struct mmc_data *data;
+
+ if (!mrq) {
+ WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n",
+ mmc_hostname(mmc), tag);
+ return;
+ }
+
+ /* No completions allowed during recovery */
+ if (cq_host->recovery_halt) {
+ slot->flags |= CQHCI_COMPLETED;
+ return;
+ }
+
+ slot->mrq = NULL;
+
+ cq_host->qcnt -= 1;
+
+ data = mrq->data;
+ if (data) {
+ if (data->error)
+ data->bytes_xfered = 0;
+ else
+ data->bytes_xfered = data->blksz * data->blocks;
+ }
+
+ mmc_cqe_request_done(mmc, mrq);
+}
+
+irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
+ int data_error)
+{
+ u32 status;
+ unsigned long tag = 0, comp_status;
+ struct cqhci_host *cq_host = mmc->cqe_private;
+
+ status = cqhci_readl(cq_host, CQHCI_IS);
+ cqhci_writel(cq_host, status, CQHCI_IS);
+
+ pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status);
+
+ if ((status & CQHCI_IS_RED) || cmd_error || data_error)
+ cqhci_error_irq(mmc, status, cmd_error, data_error);
+
+ if (status & CQHCI_IS_TCC) {
+ /* read TCN and complete the request */
+ comp_status = cqhci_readl(cq_host, CQHCI_TCN);
+ cqhci_writel(cq_host, comp_status, CQHCI_TCN);
+ pr_debug("%s: cqhci: TCN: 0x%08lx\n",
+ mmc_hostname(mmc), comp_status);
+
+ spin_lock(&cq_host->lock);
+
+ for_each_set_bit(tag, &comp_status, cq_host->num_slots) {
+ /* complete the corresponding mrq */
+ pr_debug("%s: cqhci: completing tag %lu\n",
+ mmc_hostname(mmc), tag);
+ cqhci_finish_mrq(mmc, tag);
+ }
+
+ if (cq_host->waiting_for_idle && !cq_host->qcnt) {
+ cq_host->waiting_for_idle = false;
+ wake_up(&cq_host->wait_queue);
+ }
+
+ spin_unlock(&cq_host->lock);
+ }
+
+ if (status & CQHCI_IS_TCL)
+ wake_up(&cq_host->wait_queue);
+
+ if (status & CQHCI_IS_HAC)
+ wake_up(&cq_host->wait_queue);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(cqhci_irq);
+
+static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret)
+{
+ unsigned long flags;
+ bool is_idle;
+
+ spin_lock_irqsave(&cq_host->lock, flags);
+ is_idle = !cq_host->qcnt || cq_host->recovery_halt;
+ *ret = cq_host->recovery_halt ? -EBUSY : 0;
+ cq_host->waiting_for_idle = !is_idle;
+ spin_unlock_irqrestore(&cq_host->lock, flags);
+
+ return is_idle;
+}
+
+static int cqhci_wait_for_idle(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ int ret;
+
+ wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret));
+
+ return ret;
+}
+
+static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq,
+ bool *recovery_needed)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ int tag = cqhci_tag(mrq);
+ struct cqhci_slot *slot = &cq_host->slot[tag];
+ unsigned long flags;
+ bool timed_out;
+
+ spin_lock_irqsave(&cq_host->lock, flags);
+ timed_out = slot->mrq == mrq;
+ if (timed_out) {
+ slot->flags |= CQHCI_EXTERNAL_TIMEOUT;
+ cqhci_recovery_needed(mmc, mrq, false);
+ *recovery_needed = cq_host->recovery_halt;
+ }
+ spin_unlock_irqrestore(&cq_host->lock, flags);
+
+ if (timed_out) {
+ pr_err("%s: cqhci: timeout for tag %d\n",
+ mmc_hostname(mmc), tag);
+ cqhci_dumpregs(cq_host);
+ }
+
+ return timed_out;
+}
+
+static bool cqhci_tasks_cleared(struct cqhci_host *cq_host)
+{
+ return !(cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS);
+}
+
+static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ bool ret;
+ u32 ctl;
+
+ cqhci_set_irqs(cq_host, CQHCI_IS_TCL);
+
+ ctl = cqhci_readl(cq_host, CQHCI_CTL);
+ ctl |= CQHCI_CLEAR_ALL_TASKS;
+ cqhci_writel(cq_host, ctl, CQHCI_CTL);
+
+ wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host),
+ msecs_to_jiffies(timeout) + 1);
+
+ cqhci_set_irqs(cq_host, 0);
+
+ ret = cqhci_tasks_cleared(cq_host);
+
+ if (!ret)
+ pr_debug("%s: cqhci: Failed to clear tasks\n",
+ mmc_hostname(mmc));
+
+ return ret;
+}
+
+static bool cqhci_halted(struct cqhci_host *cq_host)
+{
+ return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT;
+}
+
+static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ bool ret;
+ u32 ctl;
+
+ if (cqhci_halted(cq_host))
+ return true;
+
+ cqhci_set_irqs(cq_host, CQHCI_IS_HAC);
+
+ ctl = cqhci_readl(cq_host, CQHCI_CTL);
+ ctl |= CQHCI_HALT;
+ cqhci_writel(cq_host, ctl, CQHCI_CTL);
+
+ wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host),
+ msecs_to_jiffies(timeout) + 1);
+
+ cqhci_set_irqs(cq_host, 0);
+
+ ret = cqhci_halted(cq_host);
+
+ if (!ret)
+ pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
+
+ return ret;
+}
+
+/*
+ * After halting we expect to be able to use the command line. We interpret the
+ * failure to halt to mean the data lines might still be in use (and the upper
+ * layers will need to send a STOP command), so we set the timeout based on a
+ * generous command timeout.
+ */
+#define CQHCI_START_HALT_TIMEOUT 5
+
+static void cqhci_recovery_start(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+
+ pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
+
+ WARN_ON(!cq_host->recovery_halt);
+
+ cqhci_halt(mmc, CQHCI_START_HALT_TIMEOUT);
+
+ if (cq_host->ops->disable)
+ cq_host->ops->disable(mmc, true);
+
+ mmc->cqe_on = false;
+}
+
+static int cqhci_error_from_flags(unsigned int flags)
+{
+ if (!flags)
+ return 0;
+
+ /* CRC errors might indicate re-tuning so prefer to report that */
+ if (flags & CQHCI_HOST_CRC)
+ return -EILSEQ;
+
+ if (flags & (CQHCI_EXTERNAL_TIMEOUT | CQHCI_HOST_TIMEOUT))
+ return -ETIMEDOUT;
+
+ return -EIO;
+}
+
+static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag)
+{
+ struct cqhci_slot *slot = &cq_host->slot[tag];
+ struct mmc_request *mrq = slot->mrq;
+ struct mmc_data *data;
+
+ if (!mrq)
+ return;
+
+ slot->mrq = NULL;
+
+ cq_host->qcnt -= 1;
+
+ data = mrq->data;
+ if (data) {
+ data->bytes_xfered = 0;
+ data->error = cqhci_error_from_flags(slot->flags);
+ } else {
+ mrq->cmd->error = cqhci_error_from_flags(slot->flags);
+ }
+
+ mmc_cqe_request_done(cq_host->mmc, mrq);
+}
+
+static void cqhci_recover_mrqs(struct cqhci_host *cq_host)
+{
+ int i;
+
+ for (i = 0; i < cq_host->num_slots; i++)
+ cqhci_recover_mrq(cq_host, i);
+}
+
+/*
+ * By now the command and data lines should be unused so there is no reason for
+ * CQHCI to take a long time to halt, but if it doesn't halt there could be
+ * problems clearing tasks, so be generous.
+ */
+#define CQHCI_FINISH_HALT_TIMEOUT 20
+
+/* CQHCI could be expected to clear it's internal state pretty quickly */
+#define CQHCI_CLEAR_TIMEOUT 20
+
+static void cqhci_recovery_finish(struct mmc_host *mmc)
+{
+ struct cqhci_host *cq_host = mmc->cqe_private;
+ unsigned long flags;
+ u32 cqcfg;
+ bool ok;
+
+ pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
+
+ WARN_ON(!cq_host->recovery_halt);
+
+ ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
+
+ if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
+ ok = false;
+
+ /*
+ * The specification contradicts itself, by saying that tasks cannot be
+ * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
+ * be disabled/re-enabled, but not to disable before clearing tasks.
+ * Have a go anyway.
+ */
+ if (!ok) {
+ pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc));
+ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
+ cqcfg &= ~CQHCI_ENABLE;
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+ cqcfg |= CQHCI_ENABLE;
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+ /* Be sure that there are no tasks */
+ ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
+ if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
+ ok = false;
+ WARN_ON(!ok);
+ }
+
+ cqhci_recover_mrqs(cq_host);
+
+ WARN_ON(cq_host->qcnt);
+
+ spin_lock_irqsave(&cq_host->lock, flags);
+ cq_host->qcnt = 0;
+ cq_host->recovery_halt = false;
+ mmc->cqe_on = false;
+ spin_unlock_irqrestore(&cq_host->lock, flags);
+
+ /* Ensure all writes are done before interrupts are re-enabled */
+ wmb();
+
+ cqhci_writel(cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS);
+
+ cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
+
+ pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc));
+}
+
+static const struct mmc_cqe_ops cqhci_cqe_ops = {
+ .cqe_enable = cqhci_enable,
+ .cqe_disable = cqhci_disable,
+ .cqe_request = cqhci_request,
+ .cqe_post_req = cqhci_post_req,
+ .cqe_off = cqhci_off,
+ .cqe_wait_for_idle = cqhci_wait_for_idle,
+ .cqe_timeout = cqhci_timeout,
+ .cqe_recovery_start = cqhci_recovery_start,
+ .cqe_recovery_finish = cqhci_recovery_finish,
+};
+
+struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev)
+{
+ struct cqhci_host *cq_host;
+ struct resource *cqhci_memres = NULL;
+
+ /* check and setup CMDQ interface */
+ cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "cqhci_mem");
+ if (!cqhci_memres) {
+ dev_dbg(&pdev->dev, "CMDQ not supported\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL);
+ if (!cq_host)
+ return ERR_PTR(-ENOMEM);
+ cq_host->mmio = devm_ioremap(&pdev->dev,
+ cqhci_memres->start,
+ resource_size(cqhci_memres));
+ if (!cq_host->mmio) {
+ dev_err(&pdev->dev, "failed to remap cqhci regs\n");
+ return ERR_PTR(-EBUSY);
+ }
+ dev_dbg(&pdev->dev, "CMDQ ioremap: done\n");
+
+ return cq_host;
+}
+EXPORT_SYMBOL(cqhci_pltfm_init);
+
+static unsigned int cqhci_ver_major(struct cqhci_host *cq_host)
+{
+ return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER));
+}
+
+static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host)
+{
+ u32 ver = cqhci_readl(cq_host, CQHCI_VER);
+
+ return CQHCI_VER_MINOR1(ver) * 10 + CQHCI_VER_MINOR2(ver);
+}
+
+int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc,
+ bool dma64)
+{
+ int err;
+
+ cq_host->dma64 = dma64;
+ cq_host->mmc = mmc;
+ cq_host->mmc->cqe_private = cq_host;
+
+ cq_host->num_slots = NUM_SLOTS;
+ cq_host->dcmd_slot = DCMD_SLOT;
+
+ mmc->cqe_ops = &cqhci_cqe_ops;
+
+ mmc->cqe_qdepth = NUM_SLOTS;
+ if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
+ mmc->cqe_qdepth -= 1;
+
+ cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots,
+ sizeof(*cq_host->slot), GFP_KERNEL);
+ if (!cq_host->slot) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ spin_lock_init(&cq_host->lock);
+
+ init_completion(&cq_host->halt_comp);
+ init_waitqueue_head(&cq_host->wait_queue);
+
+ pr_info("%s: CQHCI version %u.%02u\n",
+ mmc_hostname(mmc), cqhci_ver_major(cq_host),
+ cqhci_ver_minor(cq_host));
+
+ return 0;
+
+out_err:
+ pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n",
+ mmc_hostname(mmc), cqhci_ver_major(cq_host),
+ cqhci_ver_minor(cq_host), err);
+ return err;
+}
+EXPORT_SYMBOL(cqhci_init);
+
+MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>");
+MODULE_DESCRIPTION("Command Queue Host Controller Interface driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/cqhci.h b/drivers/mmc/host/cqhci.h
new file mode 100644
index 000000000000..9e68286a07b4
--- /dev/null
+++ b/drivers/mmc/host/cqhci.h
@@ -0,0 +1,240 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef LINUX_MMC_CQHCI_H
+#define LINUX_MMC_CQHCI_H
+
+#include <linux/compiler.h>
+#include <linux/bitops.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/wait.h>
+#include <linux/irqreturn.h>
+#include <asm/io.h>
+
+/* registers */
+/* version */
+#define CQHCI_VER 0x00
+#define CQHCI_VER_MAJOR(x) (((x) & GENMASK(11, 8)) >> 8)
+#define CQHCI_VER_MINOR1(x) (((x) & GENMASK(7, 4)) >> 4)
+#define CQHCI_VER_MINOR2(x) ((x) & GENMASK(3, 0))
+
+/* capabilities */
+#define CQHCI_CAP 0x04
+/* configuration */
+#define CQHCI_CFG 0x08
+#define CQHCI_DCMD 0x00001000
+#define CQHCI_TASK_DESC_SZ 0x00000100
+#define CQHCI_ENABLE 0x00000001
+
+/* control */
+#define CQHCI_CTL 0x0C
+#define CQHCI_CLEAR_ALL_TASKS 0x00000100
+#define CQHCI_HALT 0x00000001
+
+/* interrupt status */
+#define CQHCI_IS 0x10
+#define CQHCI_IS_HAC BIT(0)
+#define CQHCI_IS_TCC BIT(1)
+#define CQHCI_IS_RED BIT(2)
+#define CQHCI_IS_TCL BIT(3)
+
+#define CQHCI_IS_MASK (CQHCI_IS_TCC | CQHCI_IS_RED)
+
+/* interrupt status enable */
+#define CQHCI_ISTE 0x14
+
+/* interrupt signal enable */
+#define CQHCI_ISGE 0x18
+
+/* interrupt coalescing */
+#define CQHCI_IC 0x1C
+#define CQHCI_IC_ENABLE BIT(31)
+#define CQHCI_IC_RESET BIT(16)
+#define CQHCI_IC_ICCTHWEN BIT(15)
+#define CQHCI_IC_ICCTH(x) (((x) & 0x1F) << 8)
+#define CQHCI_IC_ICTOVALWEN BIT(7)
+#define CQHCI_IC_ICTOVAL(x) ((x) & 0x7F)
+
+/* task list base address */
+#define CQHCI_TDLBA 0x20
+
+/* task list base address upper */
+#define CQHCI_TDLBAU 0x24
+
+/* door-bell */
+#define CQHCI_TDBR 0x28
+
+/* task completion notification */
+#define CQHCI_TCN 0x2C
+
+/* device queue status */
+#define CQHCI_DQS 0x30
+
+/* device pending tasks */
+#define CQHCI_DPT 0x34
+
+/* task clear */
+#define CQHCI_TCLR 0x38
+
+/* send status config 1 */
+#define CQHCI_SSC1 0x40
+
+/* send status config 2 */
+#define CQHCI_SSC2 0x44
+
+/* response for dcmd */
+#define CQHCI_CRDCT 0x48
+
+/* response mode error mask */
+#define CQHCI_RMEM 0x50
+
+/* task error info */
+#define CQHCI_TERRI 0x54
+
+#define CQHCI_TERRI_C_INDEX(x) ((x) & GENMASK(5, 0))
+#define CQHCI_TERRI_C_TASK(x) (((x) & GENMASK(12, 8)) >> 8)
+#define CQHCI_TERRI_C_VALID(x) ((x) & BIT(15))
+#define CQHCI_TERRI_D_INDEX(x) (((x) & GENMASK(21, 16)) >> 16)
+#define CQHCI_TERRI_D_TASK(x) (((x) & GENMASK(28, 24)) >> 24)
+#define CQHCI_TERRI_D_VALID(x) ((x) & BIT(31))
+
+/* command response index */
+#define CQHCI_CRI 0x58
+
+/* command response argument */
+#define CQHCI_CRA 0x5C
+
+#define CQHCI_INT_ALL 0xF
+#define CQHCI_IC_DEFAULT_ICCTH 31
+#define CQHCI_IC_DEFAULT_ICTOVAL 1
+
+/* attribute fields */
+#define CQHCI_VALID(x) (((x) & 1) << 0)
+#define CQHCI_END(x) (((x) & 1) << 1)
+#define CQHCI_INT(x) (((x) & 1) << 2)
+#define CQHCI_ACT(x) (((x) & 0x7) << 3)
+
+/* data command task descriptor fields */
+#define CQHCI_FORCED_PROG(x) (((x) & 1) << 6)
+#define CQHCI_CONTEXT(x) (((x) & 0xF) << 7)
+#define CQHCI_DATA_TAG(x) (((x) & 1) << 11)
+#define CQHCI_DATA_DIR(x) (((x) & 1) << 12)
+#define CQHCI_PRIORITY(x) (((x) & 1) << 13)
+#define CQHCI_QBAR(x) (((x) & 1) << 14)
+#define CQHCI_REL_WRITE(x) (((x) & 1) << 15)
+#define CQHCI_BLK_COUNT(x) (((x) & 0xFFFF) << 16)
+#define CQHCI_BLK_ADDR(x) (((x) & 0xFFFFFFFF) << 32)
+
+/* direct command task descriptor fields */
+#define CQHCI_CMD_INDEX(x) (((x) & 0x3F) << 16)
+#define CQHCI_CMD_TIMING(x) (((x) & 1) << 22)
+#define CQHCI_RESP_TYPE(x) (((x) & 0x3) << 23)
+
+/* transfer descriptor fields */
+#define CQHCI_DAT_LENGTH(x) (((x) & 0xFFFF) << 16)
+#define CQHCI_DAT_ADDR_LO(x) (((x) & 0xFFFFFFFF) << 32)
+#define CQHCI_DAT_ADDR_HI(x) (((x) & 0xFFFFFFFF) << 0)
+
+struct cqhci_host_ops;
+struct mmc_host;
+struct cqhci_slot;
+
+struct cqhci_host {
+ const struct cqhci_host_ops *ops;
+ void __iomem *mmio;
+ struct mmc_host *mmc;
+
+ spinlock_t lock;
+
+ /* relative card address of device */
+ unsigned int rca;
+
+ /* 64 bit DMA */
+ bool dma64;
+ int num_slots;
+ int qcnt;
+
+ u32 dcmd_slot;
+ u32 caps;
+#define CQHCI_TASK_DESC_SZ_128 0x1
+
+ u32 quirks;
+#define CQHCI_QUIRK_SHORT_TXFR_DESC_SZ 0x1
+
+ bool enabled;
+ bool halted;
+ bool init_done;
+ bool activated;
+ bool waiting_for_idle;
+ bool recovery_halt;
+
+ size_t desc_size;
+ size_t data_size;
+
+ u8 *desc_base;
+
+ /* total descriptor size */
+ u8 slot_sz;
+
+ /* 64/128 bit depends on CQHCI_CFG */
+ u8 task_desc_len;
+
+ /* 64 bit on 32-bit arch, 128 bit on 64-bit */
+ u8 link_desc_len;
+
+ u8 *trans_desc_base;
+ /* same length as transfer descriptor */
+ u8 trans_desc_len;
+
+ dma_addr_t desc_dma_base;
+ dma_addr_t trans_desc_dma_base;
+
+ struct completion halt_comp;
+ wait_queue_head_t wait_queue;
+ struct cqhci_slot *slot;
+};
+
+struct cqhci_host_ops {
+ void (*dumpregs)(struct mmc_host *mmc);
+ void (*write_l)(struct cqhci_host *host, u32 val, int reg);
+ u32 (*read_l)(struct cqhci_host *host, int reg);
+ void (*enable)(struct mmc_host *mmc);
+ void (*disable)(struct mmc_host *mmc, bool recovery);
+};
+
+static inline void cqhci_writel(struct cqhci_host *host, u32 val, int reg)
+{
+ if (unlikely(host->ops->write_l))
+ host->ops->write_l(host, val, reg);
+ else
+ writel_relaxed(val, host->mmio + reg);
+}
+
+static inline u32 cqhci_readl(struct cqhci_host *host, int reg)
+{
+ if (unlikely(host->ops->read_l))
+ return host->ops->read_l(host, reg);
+ else
+ return readl_relaxed(host->mmio + reg);
+}
+
+struct platform_device;
+
+irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
+ int data_error);
+int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc, bool dma64);
+struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev);
+int cqhci_suspend(struct mmc_host *mmc);
+int cqhci_resume(struct mmc_host *mmc);
+
+#endif
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 351330dfb954..8e363174f9d6 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -174,7 +174,7 @@ module_param(poll_loopcount, uint, S_IRUGO);
MODULE_PARM_DESC(poll_loopcount,
"Maximum polling loop count. Default = 32");
-static unsigned __initdata use_dma = 1;
+static unsigned use_dma = 1;
module_param(use_dma, uint, 0);
MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1");
@@ -496,8 +496,7 @@ static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,
return ret;
}
-static void __init_or_module
-davinci_release_dma_channels(struct mmc_davinci_host *host)
+static void davinci_release_dma_channels(struct mmc_davinci_host *host)
{
if (!host->use_dma)
return;
@@ -506,7 +505,7 @@ davinci_release_dma_channels(struct mmc_davinci_host *host)
dma_release_channel(host->dma_rx);
}
-static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
+static int davinci_acquire_dma_channels(struct mmc_davinci_host *host)
{
host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
if (IS_ERR(host->dma_tx)) {
@@ -1201,7 +1200,7 @@ static int mmc_davinci_parse_pdata(struct mmc_host *mmc)
return 0;
}
-static int __init davinci_mmcsd_probe(struct platform_device *pdev)
+static int davinci_mmcsd_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
struct mmc_davinci_host *host = NULL;
@@ -1254,8 +1253,9 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
pdev->id_entry = match->data;
ret = mmc_of_parse(mmc);
if (ret) {
- dev_err(&pdev->dev,
- "could not parse of data: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "could not parse of data: %d\n", ret);
goto parse_fail;
}
} else {
@@ -1414,11 +1414,12 @@ static struct platform_driver davinci_mmcsd_driver = {
.pm = davinci_mmcsd_pm_ops,
.of_match_table = davinci_mmc_dt_ids,
},
+ .probe = davinci_mmcsd_probe,
.remove = __exit_p(davinci_mmcsd_remove),
.id_table = davinci_mmc_devtype,
};
-module_platform_driver_probe(davinci_mmcsd_driver, davinci_mmcsd_probe);
+module_platform_driver(davinci_mmcsd_driver);
MODULE_AUTHOR("Texas Instruments India");
MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index e0862d3f65b3..32a6a228cd12 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -1208,7 +1208,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (!irq) {
+ if (irq <= 0) {
dev_err(&pdev->dev, "failed to get interrupt resource.\n");
ret = -EINVAL;
goto free_host;
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index e8a1bb1ae694..70b0df8b9c78 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -82,6 +82,10 @@ static unsigned int fmax = 515633;
* @qcom_fifo: enables qcom specific fifo pio read logic.
* @qcom_dml: enables qcom specific dma glue for dma transfers.
* @reversed_irq_handling: handle data irq before cmd irq.
+ * @mmcimask1: true if variant have a MMCIMASK1 register.
+ * @start_err: bitmask identifying the STARTBITERR bit inside MMCISTATUS
+ * register.
+ * @opendrain: bitmask identifying the OPENDRAIN bit inside MMCIPOWER register
*/
struct variant_data {
unsigned int clkreg;
@@ -111,6 +115,9 @@ struct variant_data {
bool qcom_fifo;
bool qcom_dml;
bool reversed_irq_handling;
+ bool mmcimask1;
+ u32 start_err;
+ u32 opendrain;
};
static struct variant_data variant_arm = {
@@ -120,6 +127,9 @@ static struct variant_data variant_arm = {
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 100000000,
.reversed_irq_handling = true,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_ROD,
};
static struct variant_data variant_arm_extended_fifo = {
@@ -128,6 +138,9 @@ static struct variant_data variant_arm_extended_fifo = {
.datalength_bits = 16,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 100000000,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_ROD,
};
static struct variant_data variant_arm_extended_fifo_hwfc = {
@@ -137,6 +150,9 @@ static struct variant_data variant_arm_extended_fifo_hwfc = {
.datalength_bits = 16,
.pwrreg_powerup = MCI_PWR_UP,
.f_max = 100000000,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_ROD,
};
static struct variant_data variant_u300 = {
@@ -152,6 +168,9 @@ static struct variant_data variant_u300 = {
.signal_direction = true,
.pwrreg_clkgate = true,
.pwrreg_nopower = true,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_OD,
};
static struct variant_data variant_nomadik = {
@@ -168,6 +187,9 @@ static struct variant_data variant_nomadik = {
.signal_direction = true,
.pwrreg_clkgate = true,
.pwrreg_nopower = true,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_OD,
};
static struct variant_data variant_ux500 = {
@@ -190,6 +212,9 @@ static struct variant_data variant_ux500 = {
.busy_detect_flag = MCI_ST_CARDBUSY,
.busy_detect_mask = MCI_ST_BUSYENDMASK,
.pwrreg_nopower = true,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_OD,
};
static struct variant_data variant_ux500v2 = {
@@ -214,6 +239,26 @@ static struct variant_data variant_ux500v2 = {
.busy_detect_flag = MCI_ST_CARDBUSY,
.busy_detect_mask = MCI_ST_BUSYENDMASK,
.pwrreg_nopower = true,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_OD,
+};
+
+static struct variant_data variant_stm32 = {
+ .fifosize = 32 * 4,
+ .fifohalfsize = 8 * 4,
+ .clkreg = MCI_CLK_ENABLE,
+ .clkreg_enable = MCI_ST_UX500_HWFCEN,
+ .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
+ .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
+ .datalength_bits = 24,
+ .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
+ .st_sdio = true,
+ .st_clkdiv = true,
+ .pwrreg_powerup = MCI_PWR_ON,
+ .f_max = 48000000,
+ .pwrreg_clkgate = true,
+ .pwrreg_nopower = true,
};
static struct variant_data variant_qcom = {
@@ -232,6 +277,9 @@ static struct variant_data variant_qcom = {
.explicit_mclk_control = true,
.qcom_fifo = true,
.qcom_dml = true,
+ .mmcimask1 = true,
+ .start_err = MCI_STARTBITERR,
+ .opendrain = MCI_ROD,
};
/* Busy detection for the ST Micro variant */
@@ -396,6 +444,7 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
{
void __iomem *base = host->base;
+ struct variant_data *variant = host->variant;
if (host->singleirq) {
unsigned int mask0 = readl(base + MMCIMASK0);
@@ -406,7 +455,10 @@ static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
writel(mask0, base + MMCIMASK0);
}
- writel(mask, base + MMCIMASK1);
+ if (variant->mmcimask1)
+ writel(mask, base + MMCIMASK1);
+
+ host->mask1_reg = mask;
}
static void mmci_stop_data(struct mmci_host *host)
@@ -921,8 +973,9 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
return;
/* First check for errors */
- if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
- MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
+ if (status & (MCI_DATACRCFAIL | MCI_DATATIMEOUT |
+ host->variant->start_err |
+ MCI_TXUNDERRUN | MCI_RXOVERRUN)) {
u32 remain, success;
/* Terminate the DMA transfer */
@@ -1286,7 +1339,7 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
status = readl(host->base + MMCISTATUS);
if (host->singleirq) {
- if (status & readl(host->base + MMCIMASK1))
+ if (status & host->mask1_reg)
mmci_pio_irq(irq, dev_id);
status &= ~MCI_IRQ1MASK;
@@ -1429,16 +1482,18 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
~MCI_ST_DATA2DIREN);
}
- if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
- if (host->hw_designer != AMBA_VENDOR_ST)
- pwr |= MCI_ROD;
- else {
- /*
- * The ST Micro variant use the ROD bit for something
- * else and only has OD (Open Drain).
- */
- pwr |= MCI_OD;
- }
+ if (variant->opendrain) {
+ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
+ pwr |= variant->opendrain;
+ } else {
+ /*
+ * If the variant cannot configure the pads by its own, then we
+ * expect the pinctrl to be able to do that for us
+ */
+ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
+ pinctrl_select_state(host->pinctrl, host->pins_opendrain);
+ else
+ pinctrl_select_state(host->pinctrl, host->pins_default);
}
/*
@@ -1583,6 +1638,35 @@ static int mmci_probe(struct amba_device *dev,
host = mmc_priv(mmc);
host->mmc = mmc;
+ /*
+ * Some variant (STM32) doesn't have opendrain bit, nevertheless
+ * pins can be set accordingly using pinctrl
+ */
+ if (!variant->opendrain) {
+ host->pinctrl = devm_pinctrl_get(&dev->dev);
+ if (IS_ERR(host->pinctrl)) {
+ dev_err(&dev->dev, "failed to get pinctrl");
+ ret = PTR_ERR(host->pinctrl);
+ goto host_free;
+ }
+
+ host->pins_default = pinctrl_lookup_state(host->pinctrl,
+ PINCTRL_STATE_DEFAULT);
+ if (IS_ERR(host->pins_default)) {
+ dev_err(mmc_dev(mmc), "Can't select default pins\n");
+ ret = PTR_ERR(host->pins_default);
+ goto host_free;
+ }
+
+ host->pins_opendrain = pinctrl_lookup_state(host->pinctrl,
+ MMCI_PINCTRL_STATE_OPENDRAIN);
+ if (IS_ERR(host->pins_opendrain)) {
+ dev_err(mmc_dev(mmc), "Can't select opendrain pins\n");
+ ret = PTR_ERR(host->pins_opendrain);
+ goto host_free;
+ }
+ }
+
host->hw_designer = amba_manf(dev);
host->hw_revision = amba_rev(dev);
dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
@@ -1729,7 +1813,10 @@ static int mmci_probe(struct amba_device *dev,
spin_lock_init(&host->lock);
writel(0, host->base + MMCIMASK0);
- writel(0, host->base + MMCIMASK1);
+
+ if (variant->mmcimask1)
+ writel(0, host->base + MMCIMASK1);
+
writel(0xfff, host->base + MMCICLEAR);
/*
@@ -1809,6 +1896,7 @@ static int mmci_remove(struct amba_device *dev)
if (mmc) {
struct mmci_host *host = mmc_priv(mmc);
+ struct variant_data *variant = host->variant;
/*
* Undo pm_runtime_put() in probe. We use the _sync
@@ -1819,7 +1907,9 @@ static int mmci_remove(struct amba_device *dev)
mmc_remove_host(mmc);
writel(0, host->base + MMCIMASK0);
- writel(0, host->base + MMCIMASK1);
+
+ if (variant->mmcimask1)
+ writel(0, host->base + MMCIMASK1);
writel(0, host->base + MMCICOMMAND);
writel(0, host->base + MMCIDATACTRL);
@@ -1951,6 +2041,11 @@ static const struct amba_id mmci_ids[] = {
.mask = 0xf0ffffff,
.data = &variant_ux500v2,
},
+ {
+ .id = 0x00880180,
+ .mask = 0x00ffffff,
+ .data = &variant_stm32,
+ },
/* Qualcomm variants */
{
.id = 0x00051180,
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 4a8bef1aac8f..f91cdf7f6dae 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -192,6 +192,8 @@
#define NR_SG 128
+#define MMCI_PINCTRL_STATE_OPENDRAIN "opendrain"
+
struct clk;
struct variant_data;
struct dma_chan;
@@ -223,9 +225,13 @@ struct mmci_host {
u32 clk_reg;
u32 datactrl_reg;
u32 busy_status;
+ u32 mask1_reg;
bool vqmmc_enabled;
struct mmci_platform_data *plat;
struct variant_data *variant;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default;
+ struct pinctrl_state *pins_opendrain;
u8 hw_designer;
u8 hw_revision:4;
diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h
index b9dfea5d8193..f13f798d8506 100644
--- a/drivers/mmc/host/renesas_sdhi.h
+++ b/drivers/mmc/host/renesas_sdhi.h
@@ -35,6 +35,28 @@ struct renesas_sdhi_of_data {
unsigned short max_segs;
};
+struct tmio_mmc_dma {
+ enum dma_slave_buswidth dma_buswidth;
+ bool (*filter)(struct dma_chan *chan, void *arg);
+ void (*enable)(struct tmio_mmc_host *host, bool enable);
+ struct completion dma_dataend;
+ struct tasklet_struct dma_complete;
+};
+
+struct renesas_sdhi {
+ struct clk *clk;
+ struct clk *clk_cd;
+ struct tmio_mmc_data mmc_data;
+ struct tmio_mmc_dma dma_priv;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default, *pins_uhs;
+ void __iomem *scc_ctl;
+ u32 scc_tappos;
+};
+
+#define host_to_priv(host) \
+ container_of((host)->pdata, struct renesas_sdhi, mmc_data)
+
int renesas_sdhi_probe(struct platform_device *pdev,
const struct tmio_mmc_dma_ops *dma_ops);
int renesas_sdhi_remove(struct platform_device *pdev);
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 157e1d9e7725..80943fa07db6 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -47,19 +47,6 @@
#define SDHI_VER_GEN3_SD 0xcc10
#define SDHI_VER_GEN3_SDMMC 0xcd10
-#define host_to_priv(host) \
- container_of((host)->pdata, struct renesas_sdhi, mmc_data)
-
-struct renesas_sdhi {
- struct clk *clk;
- struct clk *clk_cd;
- struct tmio_mmc_data mmc_data;
- struct tmio_mmc_dma dma_priv;
- struct pinctrl *pinctrl;
- struct pinctrl_state *pins_default, *pins_uhs;
- void __iomem *scc_ctl;
-};
-
static void renesas_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
{
u32 val;
@@ -281,7 +268,7 @@ static unsigned int renesas_sdhi_init_tuning(struct tmio_mmc_host *host)
~SH_MOBILE_SDHI_SCC_RVSCNTL_RVSEN &
sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_RVSCNTL));
- sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF, host->scc_tappos);
+ sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF, priv->scc_tappos);
/* Read TAPNUM */
return (sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL) >>
@@ -498,7 +485,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
if (IS_ERR(priv->clk)) {
ret = PTR_ERR(priv->clk);
dev_err(&pdev->dev, "cannot get clock: %d\n", ret);
- goto eprobe;
+ return ret;
}
/*
@@ -524,11 +511,9 @@ int renesas_sdhi_probe(struct platform_device *pdev,
"state_uhs");
}
- host = tmio_mmc_host_alloc(pdev);
- if (!host) {
- ret = -ENOMEM;
- goto eprobe;
- }
+ host = tmio_mmc_host_alloc(pdev, mmc_data);
+ if (IS_ERR(host))
+ return PTR_ERR(host);
if (of_data) {
mmc_data->flags |= of_data->tmio_flags;
@@ -542,18 +527,18 @@ int renesas_sdhi_probe(struct platform_device *pdev,
host->bus_shift = of_data->bus_shift;
}
- host->dma = dma_priv;
host->write16_hook = renesas_sdhi_write16_hook;
host->clk_enable = renesas_sdhi_clk_enable;
host->clk_update = renesas_sdhi_clk_update;
host->clk_disable = renesas_sdhi_clk_disable;
host->multi_io_quirk = renesas_sdhi_multi_io_quirk;
+ host->dma_ops = dma_ops;
/* SDR speeds are only available on Gen2+ */
if (mmc_data->flags & TMIO_MMC_MIN_RCAR2) {
/* card_busy caused issues on r8a73a4 (pre-Gen2) CD-less SDHI */
- host->card_busy = renesas_sdhi_card_busy;
- host->start_signal_voltage_switch =
+ host->ops.card_busy = renesas_sdhi_card_busy;
+ host->ops.start_signal_voltage_switch =
renesas_sdhi_start_signal_voltage_switch;
}
@@ -587,10 +572,14 @@ int renesas_sdhi_probe(struct platform_device *pdev,
/* All SDHI have SDIO status bits which must be 1 */
mmc_data->flags |= TMIO_MMC_SDIO_STATUS_SETBITS;
- ret = tmio_mmc_host_probe(host, mmc_data, dma_ops);
- if (ret < 0)
+ ret = renesas_sdhi_clk_enable(host);
+ if (ret)
goto efree;
+ ret = tmio_mmc_host_probe(host);
+ if (ret < 0)
+ goto edisclk;
+
/* One Gen2 SDHI incarnation does NOT have a CBSY bit */
if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN2_SDR50)
mmc_data->flags &= ~TMIO_MMC_HAVE_CBSY;
@@ -607,7 +596,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
for (i = 0; i < of_data->taps_num; i++) {
if (taps[i].clk_rate == 0 ||
taps[i].clk_rate == host->mmc->f_max) {
- host->scc_tappos = taps->tap;
+ priv->scc_tappos = taps->tap;
hit = true;
break;
}
@@ -651,19 +640,21 @@ int renesas_sdhi_probe(struct platform_device *pdev,
eirq:
tmio_mmc_host_remove(host);
+edisclk:
+ renesas_sdhi_clk_disable(host);
efree:
tmio_mmc_host_free(host);
-eprobe:
+
return ret;
}
EXPORT_SYMBOL_GPL(renesas_sdhi_probe);
int renesas_sdhi_remove(struct platform_device *pdev)
{
- struct mmc_host *mmc = platform_get_drvdata(pdev);
- struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct tmio_mmc_host *host = platform_get_drvdata(pdev);
tmio_mmc_host_remove(host);
+ renesas_sdhi_clk_disable(host);
return 0;
}
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 41cbe84c1d18..7c03cfead6f9 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -103,6 +103,8 @@ renesas_sdhi_internal_dmac_dm_write(struct tmio_mmc_host *host,
static void
renesas_sdhi_internal_dmac_enable_dma(struct tmio_mmc_host *host, bool enable)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
+
if (!host->chan_tx || !host->chan_rx)
return;
@@ -110,8 +112,8 @@ renesas_sdhi_internal_dmac_enable_dma(struct tmio_mmc_host *host, bool enable)
renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO1,
INFO1_CLEAR);
- if (host->dma->enable)
- host->dma->enable(host, enable);
+ if (priv->dma_priv.enable)
+ priv->dma_priv.enable(host, enable);
}
static void
@@ -130,7 +132,9 @@ renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host) {
static void
renesas_sdhi_internal_dmac_dataend_dma(struct tmio_mmc_host *host) {
- tasklet_schedule(&host->dma_complete);
+ struct renesas_sdhi *priv = host_to_priv(host);
+
+ tasklet_schedule(&priv->dma_priv.dma_complete);
}
static void
@@ -220,10 +224,12 @@ static void
renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host,
struct tmio_mmc_data *pdata)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
+
/* Each value is set to non-zero to assume "enabling" each DMA */
host->chan_rx = host->chan_tx = (void *)0xdeadbeaf;
- tasklet_init(&host->dma_complete,
+ tasklet_init(&priv->dma_priv.dma_complete,
renesas_sdhi_internal_dmac_complete_tasklet_fn,
(unsigned long)host);
tasklet_init(&host->dma_issue,
@@ -255,6 +261,7 @@ static const struct soc_device_attribute gen3_soc_whitelist[] = {
{ .soc_id = "r8a7795", .revision = "ES1.*" },
{ .soc_id = "r8a7795", .revision = "ES2.0" },
{ .soc_id = "r8a7796", .revision = "ES1.0" },
+ { .soc_id = "r8a77995", .revision = "ES1.0" },
{ /* sentinel */ }
};
diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
index 9ab10436e4b8..82d757c480b2 100644
--- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
@@ -117,11 +117,13 @@ MODULE_DEVICE_TABLE(of, renesas_sdhi_sys_dmac_of_match);
static void renesas_sdhi_sys_dmac_enable_dma(struct tmio_mmc_host *host,
bool enable)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
+
if (!host->chan_tx || !host->chan_rx)
return;
- if (host->dma->enable)
- host->dma->enable(host, enable);
+ if (priv->dma_priv.enable)
+ priv->dma_priv.enable(host, enable);
}
static void renesas_sdhi_sys_dmac_abort_dma(struct tmio_mmc_host *host)
@@ -138,12 +140,15 @@ static void renesas_sdhi_sys_dmac_abort_dma(struct tmio_mmc_host *host)
static void renesas_sdhi_sys_dmac_dataend_dma(struct tmio_mmc_host *host)
{
- complete(&host->dma_dataend);
+ struct renesas_sdhi *priv = host_to_priv(host);
+
+ complete(&priv->dma_priv.dma_dataend);
}
static void renesas_sdhi_sys_dmac_dma_callback(void *arg)
{
struct tmio_mmc_host *host = arg;
+ struct renesas_sdhi *priv = host_to_priv(host);
spin_lock_irq(&host->lock);
@@ -161,7 +166,7 @@ static void renesas_sdhi_sys_dmac_dma_callback(void *arg)
spin_unlock_irq(&host->lock);
- wait_for_completion(&host->dma_dataend);
+ wait_for_completion(&priv->dma_priv.dma_dataend);
spin_lock_irq(&host->lock);
tmio_mmc_do_data_irq(host);
@@ -171,6 +176,7 @@ out:
static void renesas_sdhi_sys_dmac_start_dma_rx(struct tmio_mmc_host *host)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_rx;
@@ -214,7 +220,7 @@ static void renesas_sdhi_sys_dmac_start_dma_rx(struct tmio_mmc_host *host)
DMA_CTRL_ACK);
if (desc) {
- reinit_completion(&host->dma_dataend);
+ reinit_completion(&priv->dma_priv.dma_dataend);
desc->callback = renesas_sdhi_sys_dmac_dma_callback;
desc->callback_param = host;
@@ -245,6 +251,7 @@ pio:
static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_tx;
@@ -293,7 +300,7 @@ static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host)
DMA_CTRL_ACK);
if (desc) {
- reinit_completion(&host->dma_dataend);
+ reinit_completion(&priv->dma_priv.dma_dataend);
desc->callback = renesas_sdhi_sys_dmac_dma_callback;
desc->callback_param = host;
@@ -341,7 +348,7 @@ static void renesas_sdhi_sys_dmac_issue_tasklet_fn(unsigned long priv)
spin_lock_irq(&host->lock);
- if (host && host->data) {
+ if (host->data) {
if (host->data->flags & MMC_DATA_READ)
chan = host->chan_rx;
else
@@ -359,9 +366,11 @@ static void renesas_sdhi_sys_dmac_issue_tasklet_fn(unsigned long priv)
static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
struct tmio_mmc_data *pdata)
{
+ struct renesas_sdhi *priv = host_to_priv(host);
+
/* We can only either use DMA for both Tx and Rx or not use it at all */
- if (!host->dma || (!host->pdev->dev.of_node &&
- (!pdata->chan_priv_tx || !pdata->chan_priv_rx)))
+ if (!host->pdev->dev.of_node &&
+ (!pdata->chan_priv_tx || !pdata->chan_priv_rx))
return;
if (!host->chan_tx && !host->chan_rx) {
@@ -378,7 +387,7 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
dma_cap_set(DMA_SLAVE, mask);
host->chan_tx = dma_request_slave_channel_compat(mask,
- host->dma->filter, pdata->chan_priv_tx,
+ priv->dma_priv.filter, pdata->chan_priv_tx,
&host->pdev->dev, "tx");
dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
host->chan_tx);
@@ -389,7 +398,7 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
cfg.direction = DMA_MEM_TO_DEV;
cfg.dst_addr = res->start +
(CTL_SD_DATA_PORT << host->bus_shift);
- cfg.dst_addr_width = host->dma->dma_buswidth;
+ cfg.dst_addr_width = priv->dma_priv.dma_buswidth;
if (!cfg.dst_addr_width)
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
cfg.src_addr = 0;
@@ -398,7 +407,7 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
goto ecfgtx;
host->chan_rx = dma_request_slave_channel_compat(mask,
- host->dma->filter, pdata->chan_priv_rx,
+ priv->dma_priv.filter, pdata->chan_priv_rx,
&host->pdev->dev, "rx");
dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
host->chan_rx);
@@ -408,7 +417,7 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
cfg.direction = DMA_DEV_TO_MEM;
cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset;
- cfg.src_addr_width = host->dma->dma_buswidth;
+ cfg.src_addr_width = priv->dma_priv.dma_buswidth;
if (!cfg.src_addr_width)
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
cfg.dst_addr = 0;
@@ -420,7 +429,7 @@ static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host,
if (!host->bounce_buf)
goto ebouncebuf;
- init_completion(&host->dma_dataend);
+ init_completion(&priv->dma_priv.dma_dataend);
tasklet_init(&host->dma_issue,
renesas_sdhi_sys_dmac_issue_tasklet_fn,
(unsigned long)host);
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 0848dc0f882e..30bd8081307e 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -30,7 +30,7 @@
#include <linux/mmc/sd.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/card.h>
-#include <linux/mfd/rtsx_pci.h>
+#include <linux/rtsx_pci.h>
#include <asm/unaligned.h>
struct realtek_pci_sdmmc {
diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
index 76da1687ab37..78422079ecfa 100644
--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
+++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
@@ -31,7 +31,7 @@
#include <linux/scatterlist.h>
#include <linux/pm_runtime.h>
-#include <linux/mfd/rtsx_usb.h>
+#include <linux/rtsx_usb.h>
#include <asm/unaligned.h>
#if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 555c7f133eb8..f77493604312 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1660,7 +1660,7 @@ static int s3cmci_probe(struct platform_device *pdev)
}
host->irq = platform_get_irq(pdev, 0);
- if (host->irq == 0) {
+ if (host->irq <= 0) {
dev_err(&pdev->dev, "failed to get interrupt resource.\n");
ret = -EINVAL;
goto probe_iounmap;
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index b988997a1e80..4065da58789d 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -76,6 +76,7 @@ struct sdhci_acpi_slot {
size_t priv_size;
int (*probe_slot)(struct platform_device *, const char *, const char *);
int (*remove_slot)(struct platform_device *);
+ int (*setup_host)(struct platform_device *pdev);
};
struct sdhci_acpi_host {
@@ -96,14 +97,21 @@ static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag)
return c->slot && (c->slot->flags & flag);
}
+#define INTEL_DSM_HS_CAPS_SDR25 BIT(0)
+#define INTEL_DSM_HS_CAPS_DDR50 BIT(1)
+#define INTEL_DSM_HS_CAPS_SDR50 BIT(2)
+#define INTEL_DSM_HS_CAPS_SDR104 BIT(3)
+
enum {
INTEL_DSM_FNS = 0,
INTEL_DSM_V18_SWITCH = 3,
INTEL_DSM_V33_SWITCH = 4,
+ INTEL_DSM_HS_CAPS = 8,
};
struct intel_host {
u32 dsm_fns;
+ u32 hs_caps;
};
static const guid_t intel_dsm_guid =
@@ -152,6 +160,8 @@ static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
{
int err;
+ intel_host->hs_caps = ~0;
+
err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
if (err) {
pr_debug("%s: DSM not supported, error %d\n",
@@ -161,6 +171,8 @@ static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
pr_debug("%s: DSM function mask %#x\n",
mmc_hostname(mmc), intel_host->dsm_fns);
+
+ intel_dsm(intel_host, dev, INTEL_DSM_HS_CAPS, &intel_host->hs_caps);
}
static int intel_start_signal_voltage_switch(struct mmc_host *mmc,
@@ -398,6 +410,26 @@ static int intel_probe_slot(struct platform_device *pdev, const char *hid,
return 0;
}
+static int intel_setup_host(struct platform_device *pdev)
+{
+ struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
+ struct intel_host *intel_host = sdhci_acpi_priv(c);
+
+ if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR25))
+ c->host->mmc->caps &= ~MMC_CAP_UHS_SDR25;
+
+ if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR50))
+ c->host->mmc->caps &= ~MMC_CAP_UHS_SDR50;
+
+ if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_DDR50))
+ c->host->mmc->caps &= ~MMC_CAP_UHS_DDR50;
+
+ if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR104))
+ c->host->mmc->caps &= ~MMC_CAP_UHS_SDR104;
+
+ return 0;
+}
+
static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
.chip = &sdhci_acpi_chip_int,
.caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
@@ -409,6 +441,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
SDHCI_QUIRK2_STOP_WITH_TC |
SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400,
.probe_slot = intel_probe_slot,
+ .setup_host = intel_setup_host,
.priv_size = sizeof(struct intel_host),
};
@@ -421,6 +454,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
.flags = SDHCI_ACPI_RUNTIME_PM,
.pm_caps = MMC_PM_KEEP_POWER,
.probe_slot = intel_probe_slot,
+ .setup_host = intel_setup_host,
.priv_size = sizeof(struct intel_host),
};
@@ -432,6 +466,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
SDHCI_QUIRK2_STOP_WITH_TC,
.caps = MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_AGGRESSIVE_PM,
.probe_slot = intel_probe_slot,
+ .setup_host = intel_setup_host,
.priv_size = sizeof(struct intel_host),
};
@@ -446,6 +481,83 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd = {
.caps = MMC_CAP_NONREMOVABLE,
};
+/* AMD sdhci reset dll register. */
+#define SDHCI_AMD_RESET_DLL_REGISTER 0x908
+
+static int amd_select_drive_strength(struct mmc_card *card,
+ unsigned int max_dtr, int host_drv,
+ int card_drv, int *drv_type)
+{
+ return MMC_SET_DRIVER_TYPE_A;
+}
+
+static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host)
+{
+ /* AMD Platform requires dll setting */
+ sdhci_writel(host, 0x40003210, SDHCI_AMD_RESET_DLL_REGISTER);
+ usleep_range(10, 20);
+ sdhci_writel(host, 0x40033210, SDHCI_AMD_RESET_DLL_REGISTER);
+}
+
+/*
+ * For AMD Platform it is required to disable the tuning
+ * bit first controller to bring to HS Mode from HS200
+ * mode, later enable to tune to HS400 mode.
+ */
+static void amd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned int old_timing = host->timing;
+
+ sdhci_set_ios(mmc, ios);
+ if (old_timing == MMC_TIMING_MMC_HS200 &&
+ ios->timing == MMC_TIMING_MMC_HS)
+ sdhci_writew(host, 0x9, SDHCI_HOST_CONTROL2);
+ if (old_timing != MMC_TIMING_MMC_HS400 &&
+ ios->timing == MMC_TIMING_MMC_HS400) {
+ sdhci_writew(host, 0x80, SDHCI_HOST_CONTROL2);
+ sdhci_acpi_amd_hs400_dll(host);
+ }
+}
+
+static const struct sdhci_ops sdhci_acpi_ops_amd = {
+ .set_clock = sdhci_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+static const struct sdhci_acpi_chip sdhci_acpi_chip_amd = {
+ .ops = &sdhci_acpi_ops_amd,
+};
+
+static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev,
+ const char *hid, const char *uid)
+{
+ struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
+ struct sdhci_host *host = c->host;
+
+ sdhci_read_caps(host);
+ if (host->caps1 & SDHCI_SUPPORT_DDR50)
+ host->mmc->caps = MMC_CAP_1_8V_DDR;
+
+ if ((host->caps1 & SDHCI_SUPPORT_SDR104) &&
+ (host->mmc->caps & MMC_CAP_1_8V_DDR))
+ host->mmc->caps2 = MMC_CAP2_HS400_1_8V;
+
+ host->mmc_host_ops.select_drive_strength = amd_select_drive_strength;
+ host->mmc_host_ops.set_ios = amd_set_ios;
+ return 0;
+}
+
+static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = {
+ .chip = &sdhci_acpi_chip_amd,
+ .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
+ .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE |
+ SDHCI_QUIRK_32BIT_ADMA_SIZE,
+ .probe_slot = sdhci_acpi_emmc_amd_probe_slot,
+};
+
struct sdhci_acpi_uid_slot {
const char *hid;
const char *uid;
@@ -469,6 +581,7 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
{ "PNP0D40" },
{ "QCOM8051", NULL, &sdhci_acpi_slot_qcom_sd_3v },
{ "QCOM8052", NULL, &sdhci_acpi_slot_qcom_sd },
+ { "AMDI0040", NULL, &sdhci_acpi_slot_amd_emmc },
{ },
};
@@ -485,6 +598,7 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
{ "PNP0D40" },
{ "QCOM8051" },
{ "QCOM8052" },
+ { "AMDI0040" },
{ },
};
MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids);
@@ -566,6 +680,10 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
host->hw_name = "ACPI";
host->ops = &sdhci_acpi_ops_dflt;
host->irq = platform_get_irq(pdev, 0);
+ if (host->irq <= 0) {
+ err = -EINVAL;
+ goto err_free;
+ }
host->ioaddr = devm_ioremap_nocache(dev, iomem->start,
resource_size(iomem));
@@ -609,10 +727,20 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
}
}
- err = sdhci_add_host(host);
+ err = sdhci_setup_host(host);
if (err)
goto err_free;
+ if (c->slot && c->slot->setup_host) {
+ err = c->slot->setup_host(pdev);
+ if (err)
+ goto err_cleanup;
+ }
+
+ err = __sdhci_add_host(host);
+ if (err)
+ goto err_cleanup;
+
if (c->use_runtime_pm) {
pm_runtime_set_active(dev);
pm_suspend_ignore_children(dev, 1);
@@ -625,6 +753,8 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
return 0;
+err_cleanup:
+ sdhci_cleanup_host(c->host);
err_free:
sdhci_free_host(c->host);
return err;
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 8b941f814472..cd2b5f643a15 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -193,6 +193,7 @@ struct pltfm_imx_data {
struct clk *clk_ipg;
struct clk *clk_ahb;
struct clk *clk_per;
+ unsigned int actual_clock;
enum {
NO_CMD_PENDING, /* no multiblock command pending */
MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */
@@ -1403,11 +1404,15 @@ static int sdhci_esdhc_runtime_suspend(struct device *dev)
int ret;
ret = sdhci_runtime_suspend_host(host);
+ if (ret)
+ return ret;
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
if (!sdhci_sdio_irq_enabled(host)) {
+ imx_data->actual_clock = host->mmc->actual_clock;
+ esdhc_pltfm_set_clock(host, 0);
clk_disable_unprepare(imx_data->clk_per);
clk_disable_unprepare(imx_data->clk_ipg);
}
@@ -1423,31 +1428,34 @@ static int sdhci_esdhc_runtime_resume(struct device *dev)
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
int err;
+ err = clk_prepare_enable(imx_data->clk_ahb);
+ if (err)
+ return err;
+
if (!sdhci_sdio_irq_enabled(host)) {
err = clk_prepare_enable(imx_data->clk_per);
if (err)
- return err;
+ goto disable_ahb_clk;
err = clk_prepare_enable(imx_data->clk_ipg);
if (err)
goto disable_per_clk;
+ esdhc_pltfm_set_clock(host, imx_data->actual_clock);
}
- err = clk_prepare_enable(imx_data->clk_ahb);
- if (err)
- goto disable_ipg_clk;
+
err = sdhci_runtime_resume_host(host);
if (err)
- goto disable_ahb_clk;
+ goto disable_ipg_clk;
return 0;
-disable_ahb_clk:
- clk_disable_unprepare(imx_data->clk_ahb);
disable_ipg_clk:
if (!sdhci_sdio_irq_enabled(host))
clk_disable_unprepare(imx_data->clk_ipg);
disable_per_clk:
if (!sdhci_sdio_irq_enabled(host))
clk_disable_unprepare(imx_data->clk_per);
+disable_ahb_clk:
+ clk_disable_unprepare(imx_data->clk_ahb);
return err;
}
#endif
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 0720ea717011..c33a5f7393bd 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -25,11 +25,13 @@
#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/regmap.h>
-#include "sdhci-pltfm.h"
#include <linux/of.h>
-#define SDHCI_ARASAN_VENDOR_REGISTER 0x78
+#include "cqhci.h"
+#include "sdhci-pltfm.h"
+#define SDHCI_ARASAN_VENDOR_REGISTER 0x78
+#define SDHCI_ARASAN_CQE_BASE_ADDR 0x200
#define VENDOR_ENHANCED_STROBE BIT(0)
#define PHY_CLK_TOO_SLOW_HZ 400000
@@ -90,6 +92,7 @@ struct sdhci_arasan_data {
struct phy *phy;
bool is_phy_on;
+ bool has_cqe;
struct clk_hw sdcardclk_hw;
struct clk *sdcardclk;
@@ -262,6 +265,17 @@ static int sdhci_arasan_voltage_switch(struct mmc_host *mmc,
return -EINVAL;
}
+static void sdhci_arasan_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ if (!IS_ERR(host->mmc->supply.vmmc)) {
+ struct mmc_host *mmc = host->mmc;
+
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+ }
+ sdhci_set_power_noreg(host, mode, vdd);
+}
+
static const struct sdhci_ops sdhci_arasan_ops = {
.set_clock = sdhci_arasan_set_clock,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
@@ -269,6 +283,7 @@ static const struct sdhci_ops sdhci_arasan_ops = {
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_arasan_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
+ .set_power = sdhci_arasan_set_power,
};
static const struct sdhci_pltfm_data sdhci_arasan_pdata = {
@@ -278,6 +293,62 @@ static const struct sdhci_pltfm_data sdhci_arasan_pdata = {
SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
};
+static u32 sdhci_arasan_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+ return 0;
+}
+
+static void sdhci_arasan_dumpregs(struct mmc_host *mmc)
+{
+ sdhci_dumpregs(mmc_priv(mmc));
+}
+
+static void sdhci_arasan_cqe_enable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u32 reg;
+
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ while (reg & SDHCI_DATA_AVAILABLE) {
+ sdhci_readl(host, SDHCI_BUFFER);
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ }
+
+ sdhci_cqe_enable(mmc);
+}
+
+static const struct cqhci_host_ops sdhci_arasan_cqhci_ops = {
+ .enable = sdhci_arasan_cqe_enable,
+ .disable = sdhci_cqe_disable,
+ .dumpregs = sdhci_arasan_dumpregs,
+};
+
+static const struct sdhci_ops sdhci_arasan_cqe_ops = {
+ .set_clock = sdhci_arasan_set_clock,
+ .get_max_clock = sdhci_pltfm_clk_get_max_clock,
+ .get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_arasan_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .set_power = sdhci_arasan_set_power,
+ .irq = sdhci_arasan_cqhci_irq,
+};
+
+static const struct sdhci_pltfm_data sdhci_arasan_cqe_pdata = {
+ .ops = &sdhci_arasan_cqe_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
+};
+
#ifdef CONFIG_PM_SLEEP
/**
* sdhci_arasan_suspend - Suspend method for the driver
@@ -297,6 +368,12 @@ static int sdhci_arasan_suspend(struct device *dev)
if (host->tuning_mode != SDHCI_TUNING_MODE_3)
mmc_retune_needed(host->mmc);
+ if (sdhci_arasan->has_cqe) {
+ ret = cqhci_suspend(host->mmc);
+ if (ret)
+ return ret;
+ }
+
ret = sdhci_suspend_host(host);
if (ret)
return ret;
@@ -353,7 +430,16 @@ static int sdhci_arasan_resume(struct device *dev)
sdhci_arasan->is_phy_on = true;
}
- return sdhci_resume_host(host);
+ ret = sdhci_resume_host(host);
+ if (ret) {
+ dev_err(dev, "Cannot resume host.\n");
+ return ret;
+ }
+
+ if (sdhci_arasan->has_cqe)
+ return cqhci_resume(host->mmc);
+
+ return 0;
}
#endif /* ! CONFIG_PM_SLEEP */
@@ -556,6 +642,49 @@ static void sdhci_arasan_unregister_sdclk(struct device *dev)
of_clk_del_provider(dev->of_node);
}
+static int sdhci_arasan_add_host(struct sdhci_arasan_data *sdhci_arasan)
+{
+ struct sdhci_host *host = sdhci_arasan->host;
+ struct cqhci_host *cq_host;
+ bool dma64;
+ int ret;
+
+ if (!sdhci_arasan->has_cqe)
+ return sdhci_add_host(host);
+
+ ret = sdhci_setup_host(host);
+ if (ret)
+ return ret;
+
+ cq_host = devm_kzalloc(host->mmc->parent,
+ sizeof(*cq_host), GFP_KERNEL);
+ if (!cq_host) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ cq_host->mmio = host->ioaddr + SDHCI_ARASAN_CQE_BASE_ADDR;
+ cq_host->ops = &sdhci_arasan_cqhci_ops;
+
+ dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
+ if (dma64)
+ cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+
+ ret = cqhci_init(cq_host, host->mmc, dma64);
+ if (ret)
+ goto cleanup;
+
+ ret = __sdhci_add_host(host);
+ if (ret)
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ sdhci_cleanup_host(host);
+ return ret;
+}
+
static int sdhci_arasan_probe(struct platform_device *pdev)
{
int ret;
@@ -566,9 +695,15 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_arasan_data *sdhci_arasan;
struct device_node *np = pdev->dev.of_node;
+ const struct sdhci_pltfm_data *pdata;
+
+ if (of_device_is_compatible(pdev->dev.of_node, "arasan,sdhci-5.1"))
+ pdata = &sdhci_arasan_cqe_pdata;
+ else
+ pdata = &sdhci_arasan_pdata;
+
+ host = sdhci_pltfm_init(pdev, pdata, sizeof(*sdhci_arasan));
- host = sdhci_pltfm_init(pdev, &sdhci_arasan_pdata,
- sizeof(*sdhci_arasan));
if (IS_ERR(host))
return PTR_ERR(host);
@@ -663,9 +798,11 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
sdhci_arasan_hs400_enhanced_strobe;
host->mmc_host_ops.start_signal_voltage_switch =
sdhci_arasan_voltage_switch;
+ sdhci_arasan->has_cqe = true;
+ host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
}
- ret = sdhci_add_host(host);
+ ret = sdhci_arasan_add_host(sdhci_arasan);
if (ret)
goto err_add_host;
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 1f424374bbbb..4ffa6b173a21 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -589,10 +589,18 @@ static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
static void esdhc_reset(struct sdhci_host *host, u8 mask)
{
+ u32 val;
+
sdhci_reset(host, mask);
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+
+ if (mask & SDHCI_RESET_ALL) {
+ val = sdhci_readl(host, ESDHC_TBCTL);
+ val &= ~ESDHC_TB_EN;
+ sdhci_writel(host, val, ESDHC_TBCTL);
+ }
}
/* The SCFG, Supplemental Configuration Unit, provides SoC specific
diff --git a/drivers/mmc/host/sdhci-pci-arasan.c b/drivers/mmc/host/sdhci-pci-arasan.c
new file mode 100644
index 000000000000..499f3205ec5c
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pci-arasan.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sdhci-pci-arasan.c - Driver for Arasan PCI Controller with
+ * integrated phy.
+ *
+ * Copyright (C) 2017 Arasan Chip Systems Inc.
+ *
+ * Author: Atul Garg <agarg@arasan.com>
+ */
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "sdhci.h"
+#include "sdhci-pci.h"
+
+/* Extra registers for Arasan SD/SDIO/MMC Host Controller with PHY */
+#define PHY_ADDR_REG 0x300
+#define PHY_DAT_REG 0x304
+
+#define PHY_WRITE BIT(8)
+#define PHY_BUSY BIT(9)
+#define DATA_MASK 0xFF
+
+/* PHY Specific Registers */
+#define DLL_STATUS 0x00
+#define IPAD_CTRL1 0x01
+#define IPAD_CTRL2 0x02
+#define IPAD_STS 0x03
+#define IOREN_CTRL1 0x06
+#define IOREN_CTRL2 0x07
+#define IOPU_CTRL1 0x08
+#define IOPU_CTRL2 0x09
+#define ITAP_DELAY 0x0C
+#define OTAP_DELAY 0x0D
+#define STRB_SEL 0x0E
+#define CLKBUF_SEL 0x0F
+#define MODE_CTRL 0x11
+#define DLL_TRIM 0x12
+#define CMD_CTRL 0x20
+#define DATA_CTRL 0x21
+#define STRB_CTRL 0x22
+#define CLK_CTRL 0x23
+#define PHY_CTRL 0x24
+
+#define DLL_ENBL BIT(3)
+#define RTRIM_EN BIT(1)
+#define PDB_ENBL BIT(1)
+#define RETB_ENBL BIT(6)
+#define ODEN_CMD BIT(1)
+#define ODEN_DAT 0xFF
+#define REN_STRB BIT(0)
+#define REN_CMND BIT(1)
+#define REN_DATA 0xFF
+#define PU_CMD BIT(1)
+#define PU_DAT 0xFF
+#define ITAPDLY_EN BIT(0)
+#define OTAPDLY_EN BIT(0)
+#define OD_REL_CMD BIT(1)
+#define OD_REL_DAT 0xFF
+#define DLLTRM_ICP 0x8
+#define PDB_CMND BIT(0)
+#define PDB_DATA 0xFF
+#define PDB_STRB BIT(0)
+#define PDB_CLOCK BIT(0)
+#define CALDONE_MASK 0x10
+#define DLL_RDY_MASK 0x10
+#define MAX_CLK_BUF 0x7
+
+/* Mode Controls */
+#define ENHSTRB_MODE BIT(0)
+#define HS400_MODE BIT(1)
+#define LEGACY_MODE BIT(2)
+#define DDR50_MODE BIT(3)
+
+/*
+ * Controller has no specific bits for HS200/HS.
+ * Used BIT(4), BIT(5) for software programming.
+ */
+#define HS200_MODE BIT(4)
+#define HISPD_MODE BIT(5)
+
+#define OTAPDLY(x) (((x) << 1) | OTAPDLY_EN)
+#define ITAPDLY(x) (((x) << 1) | ITAPDLY_EN)
+#define FREQSEL(x) (((x) << 5) | DLL_ENBL)
+#define IOPAD(x, y) ((x) | ((y) << 2))
+
+/* Arasan private data */
+struct arasan_host {
+ u32 chg_clk;
+};
+
+static int arasan_phy_addr_poll(struct sdhci_host *host, u32 offset, u32 mask)
+{
+ ktime_t timeout = ktime_add_us(ktime_get(), 100);
+ bool failed;
+ u8 val = 0;
+
+ while (1) {
+ failed = ktime_after(ktime_get(), timeout);
+ val = sdhci_readw(host, PHY_ADDR_REG);
+ if (!(val & mask))
+ return 0;
+ if (failed)
+ return -EBUSY;
+ }
+}
+
+static int arasan_phy_write(struct sdhci_host *host, u8 data, u8 offset)
+{
+ sdhci_writew(host, data, PHY_DAT_REG);
+ sdhci_writew(host, (PHY_WRITE | offset), PHY_ADDR_REG);
+ return arasan_phy_addr_poll(host, PHY_ADDR_REG, PHY_BUSY);
+}
+
+static int arasan_phy_read(struct sdhci_host *host, u8 offset, u8 *data)
+{
+ int ret;
+
+ sdhci_writew(host, 0, PHY_DAT_REG);
+ sdhci_writew(host, offset, PHY_ADDR_REG);
+ ret = arasan_phy_addr_poll(host, PHY_ADDR_REG, PHY_BUSY);
+
+ /* Masking valid data bits */
+ *data = sdhci_readw(host, PHY_DAT_REG) & DATA_MASK;
+ return ret;
+}
+
+static int arasan_phy_sts_poll(struct sdhci_host *host, u32 offset, u32 mask)
+{
+ int ret;
+ ktime_t timeout = ktime_add_us(ktime_get(), 100);
+ bool failed;
+ u8 val = 0;
+
+ while (1) {
+ failed = ktime_after(ktime_get(), timeout);
+ ret = arasan_phy_read(host, offset, &val);
+ if (ret)
+ return -EBUSY;
+ else if (val & mask)
+ return 0;
+ if (failed)
+ return -EBUSY;
+ }
+}
+
+/* Initialize the Arasan PHY */
+static int arasan_phy_init(struct sdhci_host *host)
+{
+ int ret;
+ u8 val;
+
+ /* Program IOPADs and wait for calibration to be done */
+ if (arasan_phy_read(host, IPAD_CTRL1, &val) ||
+ arasan_phy_write(host, val | RETB_ENBL | PDB_ENBL, IPAD_CTRL1) ||
+ arasan_phy_read(host, IPAD_CTRL2, &val) ||
+ arasan_phy_write(host, val | RTRIM_EN, IPAD_CTRL2))
+ return -EBUSY;
+ ret = arasan_phy_sts_poll(host, IPAD_STS, CALDONE_MASK);
+ if (ret)
+ return -EBUSY;
+
+ /* Program CMD/Data lines */
+ if (arasan_phy_read(host, IOREN_CTRL1, &val) ||
+ arasan_phy_write(host, val | REN_CMND | REN_STRB, IOREN_CTRL1) ||
+ arasan_phy_read(host, IOPU_CTRL1, &val) ||
+ arasan_phy_write(host, val | PU_CMD, IOPU_CTRL1) ||
+ arasan_phy_read(host, CMD_CTRL, &val) ||
+ arasan_phy_write(host, val | PDB_CMND, CMD_CTRL) ||
+ arasan_phy_read(host, IOREN_CTRL2, &val) ||
+ arasan_phy_write(host, val | REN_DATA, IOREN_CTRL2) ||
+ arasan_phy_read(host, IOPU_CTRL2, &val) ||
+ arasan_phy_write(host, val | PU_DAT, IOPU_CTRL2) ||
+ arasan_phy_read(host, DATA_CTRL, &val) ||
+ arasan_phy_write(host, val | PDB_DATA, DATA_CTRL) ||
+ arasan_phy_read(host, STRB_CTRL, &val) ||
+ arasan_phy_write(host, val | PDB_STRB, STRB_CTRL) ||
+ arasan_phy_read(host, CLK_CTRL, &val) ||
+ arasan_phy_write(host, val | PDB_CLOCK, CLK_CTRL) ||
+ arasan_phy_read(host, CLKBUF_SEL, &val) ||
+ arasan_phy_write(host, val | MAX_CLK_BUF, CLKBUF_SEL) ||
+ arasan_phy_write(host, LEGACY_MODE, MODE_CTRL))
+ return -EBUSY;
+ return 0;
+}
+
+/* Set Arasan PHY for different modes */
+static int arasan_phy_set(struct sdhci_host *host, u8 mode, u8 otap,
+ u8 drv_type, u8 itap, u8 trim, u8 clk)
+{
+ u8 val;
+ int ret;
+
+ if (mode == HISPD_MODE || mode == HS200_MODE)
+ ret = arasan_phy_write(host, 0x0, MODE_CTRL);
+ else
+ ret = arasan_phy_write(host, mode, MODE_CTRL);
+ if (ret)
+ return ret;
+ if (mode == HS400_MODE || mode == HS200_MODE) {
+ ret = arasan_phy_read(host, IPAD_CTRL1, &val);
+ if (ret)
+ return ret;
+ ret = arasan_phy_write(host, IOPAD(val, drv_type), IPAD_CTRL1);
+ if (ret)
+ return ret;
+ }
+ if (mode == LEGACY_MODE) {
+ ret = arasan_phy_write(host, 0x0, OTAP_DELAY);
+ if (ret)
+ return ret;
+ ret = arasan_phy_write(host, 0x0, ITAP_DELAY);
+ } else {
+ ret = arasan_phy_write(host, OTAPDLY(otap), OTAP_DELAY);
+ if (ret)
+ return ret;
+ if (mode != HS200_MODE)
+ ret = arasan_phy_write(host, ITAPDLY(itap), ITAP_DELAY);
+ else
+ ret = arasan_phy_write(host, 0x0, ITAP_DELAY);
+ }
+ if (ret)
+ return ret;
+ if (mode != LEGACY_MODE) {
+ ret = arasan_phy_write(host, trim, DLL_TRIM);
+ if (ret)
+ return ret;
+ }
+ ret = arasan_phy_write(host, 0, DLL_STATUS);
+ if (ret)
+ return ret;
+ if (mode != LEGACY_MODE) {
+ ret = arasan_phy_write(host, FREQSEL(clk), DLL_STATUS);
+ if (ret)
+ return ret;
+ ret = arasan_phy_sts_poll(host, DLL_STATUS, DLL_RDY_MASK);
+ if (ret)
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int arasan_select_phy_clock(struct sdhci_host *host)
+{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct arasan_host *arasan_host = sdhci_pci_priv(slot);
+ u8 clk;
+
+ if (arasan_host->chg_clk == host->mmc->ios.clock)
+ return 0;
+
+ arasan_host->chg_clk = host->mmc->ios.clock;
+ if (host->mmc->ios.clock == 200000000)
+ clk = 0x0;
+ else if (host->mmc->ios.clock == 100000000)
+ clk = 0x2;
+ else if (host->mmc->ios.clock == 50000000)
+ clk = 0x1;
+ else
+ clk = 0x0;
+
+ if (host->mmc_host_ops.hs400_enhanced_strobe) {
+ arasan_phy_set(host, ENHSTRB_MODE, 1, 0x0, 0x0,
+ DLLTRM_ICP, clk);
+ } else {
+ switch (host->mmc->ios.timing) {
+ case MMC_TIMING_LEGACY:
+ arasan_phy_set(host, LEGACY_MODE, 0x0, 0x0, 0x0,
+ 0x0, 0x0);
+ break;
+ case MMC_TIMING_MMC_HS:
+ case MMC_TIMING_SD_HS:
+ arasan_phy_set(host, HISPD_MODE, 0x3, 0x0, 0x2,
+ DLLTRM_ICP, clk);
+ break;
+ case MMC_TIMING_MMC_HS200:
+ case MMC_TIMING_UHS_SDR104:
+ arasan_phy_set(host, HS200_MODE, 0x2,
+ host->mmc->ios.drv_type, 0x0,
+ DLLTRM_ICP, clk);
+ break;
+ case MMC_TIMING_MMC_DDR52:
+ case MMC_TIMING_UHS_DDR50:
+ arasan_phy_set(host, DDR50_MODE, 0x1, 0x0,
+ 0x0, DLLTRM_ICP, clk);
+ break;
+ case MMC_TIMING_MMC_HS400:
+ arasan_phy_set(host, HS400_MODE, 0x1,
+ host->mmc->ios.drv_type, 0xa,
+ DLLTRM_ICP, clk);
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+static int arasan_pci_probe_slot(struct sdhci_pci_slot *slot)
+{
+ int err;
+
+ slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE | MMC_CAP_8_BIT_DATA;
+ err = arasan_phy_init(slot->host);
+ if (err)
+ return -ENODEV;
+ return 0;
+}
+
+static void arasan_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ sdhci_set_clock(host, clock);
+
+ /* Change phy settings for the new clock */
+ arasan_select_phy_clock(host);
+}
+
+static const struct sdhci_ops arasan_sdhci_pci_ops = {
+ .set_clock = arasan_sdhci_set_clock,
+ .enable_dma = sdhci_pci_enable_dma,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+};
+
+const struct sdhci_pci_fixes sdhci_arasan = {
+ .probe_slot = arasan_pci_probe_slot,
+ .ops = &arasan_sdhci_pci_ops,
+ .priv_size = sizeof(struct arasan_host),
+};
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 3e4f04fd5175..6d1a983e6227 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -30,17 +30,37 @@
#include <linux/mmc/sdhci-pci-data.h>
#include <linux/acpi.h>
+#include "cqhci.h"
+
#include "sdhci.h"
#include "sdhci-pci.h"
-static int sdhci_pci_enable_dma(struct sdhci_host *host);
static void sdhci_pci_hw_reset(struct sdhci_host *host);
#ifdef CONFIG_PM_SLEEP
-static int __sdhci_pci_suspend_host(struct sdhci_pci_chip *chip)
+static int sdhci_pci_init_wakeup(struct sdhci_pci_chip *chip)
+{
+ mmc_pm_flag_t pm_flags = 0;
+ int i;
+
+ for (i = 0; i < chip->num_slots; i++) {
+ struct sdhci_pci_slot *slot = chip->slots[i];
+
+ if (slot)
+ pm_flags |= slot->host->mmc->pm_flags;
+ }
+
+ return device_set_wakeup_enable(&chip->pdev->dev,
+ (pm_flags & MMC_PM_KEEP_POWER) &&
+ (pm_flags & MMC_PM_WAKE_SDIO_IRQ));
+}
+
+static int sdhci_pci_suspend_host(struct sdhci_pci_chip *chip)
{
int i, ret;
+ sdhci_pci_init_wakeup(chip);
+
for (i = 0; i < chip->num_slots; i++) {
struct sdhci_pci_slot *slot = chip->slots[i];
struct sdhci_host *host;
@@ -56,9 +76,6 @@ static int __sdhci_pci_suspend_host(struct sdhci_pci_chip *chip)
ret = sdhci_suspend_host(host);
if (ret)
goto err_pci_suspend;
-
- if (host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ)
- sdhci_enable_irq_wakeups(host);
}
return 0;
@@ -69,52 +86,44 @@ err_pci_suspend:
return ret;
}
-static int sdhci_pci_init_wakeup(struct sdhci_pci_chip *chip)
+int sdhci_pci_resume_host(struct sdhci_pci_chip *chip)
{
- mmc_pm_flag_t pm_flags = 0;
- int i;
+ struct sdhci_pci_slot *slot;
+ int i, ret;
for (i = 0; i < chip->num_slots; i++) {
- struct sdhci_pci_slot *slot = chip->slots[i];
+ slot = chip->slots[i];
+ if (!slot)
+ continue;
- if (slot)
- pm_flags |= slot->host->mmc->pm_flags;
+ ret = sdhci_resume_host(slot->host);
+ if (ret)
+ return ret;
}
- return device_init_wakeup(&chip->pdev->dev,
- (pm_flags & MMC_PM_KEEP_POWER) &&
- (pm_flags & MMC_PM_WAKE_SDIO_IRQ));
+ return 0;
}
-static int sdhci_pci_suspend_host(struct sdhci_pci_chip *chip)
+static int sdhci_cqhci_suspend(struct sdhci_pci_chip *chip)
{
int ret;
- ret = __sdhci_pci_suspend_host(chip);
+ ret = cqhci_suspend(chip->slots[0]->host->mmc);
if (ret)
return ret;
- sdhci_pci_init_wakeup(chip);
-
- return 0;
+ return sdhci_pci_suspend_host(chip);
}
-int sdhci_pci_resume_host(struct sdhci_pci_chip *chip)
+static int sdhci_cqhci_resume(struct sdhci_pci_chip *chip)
{
- struct sdhci_pci_slot *slot;
- int i, ret;
-
- for (i = 0; i < chip->num_slots; i++) {
- slot = chip->slots[i];
- if (!slot)
- continue;
+ int ret;
- ret = sdhci_resume_host(slot->host);
- if (ret)
- return ret;
- }
+ ret = sdhci_pci_resume_host(chip);
+ if (ret)
+ return ret;
- return 0;
+ return cqhci_resume(chip->slots[0]->host->mmc);
}
#endif
@@ -166,8 +175,48 @@ static int sdhci_pci_runtime_resume_host(struct sdhci_pci_chip *chip)
return 0;
}
+
+static int sdhci_cqhci_runtime_suspend(struct sdhci_pci_chip *chip)
+{
+ int ret;
+
+ ret = cqhci_suspend(chip->slots[0]->host->mmc);
+ if (ret)
+ return ret;
+
+ return sdhci_pci_runtime_suspend_host(chip);
+}
+
+static int sdhci_cqhci_runtime_resume(struct sdhci_pci_chip *chip)
+{
+ int ret;
+
+ ret = sdhci_pci_runtime_resume_host(chip);
+ if (ret)
+ return ret;
+
+ return cqhci_resume(chip->slots[0]->host->mmc);
+}
#endif
+static u32 sdhci_cqhci_irq(struct sdhci_host *host, u32 intmask)
+{
+ int cmd_error = 0;
+ int data_error = 0;
+
+ if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
+ return intmask;
+
+ cqhci_irq(host->mmc, intmask, cmd_error, data_error);
+
+ return 0;
+}
+
+static void sdhci_pci_dumpregs(struct mmc_host *mmc)
+{
+ sdhci_dumpregs(mmc_priv(mmc));
+}
+
/*****************************************************************************\
* *
* Hardware specific quirk handling *
@@ -583,6 +632,18 @@ static const struct sdhci_ops sdhci_intel_byt_ops = {
.voltage_switch = sdhci_intel_voltage_switch,
};
+static const struct sdhci_ops sdhci_intel_glk_ops = {
+ .set_clock = sdhci_set_clock,
+ .set_power = sdhci_intel_set_power,
+ .enable_dma = sdhci_pci_enable_dma,
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .hw_reset = sdhci_pci_hw_reset,
+ .voltage_switch = sdhci_intel_voltage_switch,
+ .irq = sdhci_cqhci_irq,
+};
+
static void byt_read_dsm(struct sdhci_pci_slot *slot)
{
struct intel_host *intel_host = sdhci_pci_priv(slot);
@@ -612,12 +673,80 @@ static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot)
{
int ret = byt_emmc_probe_slot(slot);
+ slot->host->mmc->caps2 |= MMC_CAP2_CQE;
+
if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) {
slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES,
slot->host->mmc_host_ops.hs400_enhanced_strobe =
intel_hs400_enhanced_strobe;
+ slot->host->mmc->caps2 |= MMC_CAP2_CQE_DCMD;
+ }
+
+ return ret;
+}
+
+static void glk_cqe_enable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u32 reg;
+
+ /*
+ * CQE gets stuck if it sees Buffer Read Enable bit set, which can be
+ * the case after tuning, so ensure the buffer is drained.
+ */
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
+ while (reg & SDHCI_DATA_AVAILABLE) {
+ sdhci_readl(host, SDHCI_BUFFER);
+ reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
}
+ sdhci_cqe_enable(mmc);
+}
+
+static const struct cqhci_host_ops glk_cqhci_ops = {
+ .enable = glk_cqe_enable,
+ .disable = sdhci_cqe_disable,
+ .dumpregs = sdhci_pci_dumpregs,
+};
+
+static int glk_emmc_add_host(struct sdhci_pci_slot *slot)
+{
+ struct device *dev = &slot->chip->pdev->dev;
+ struct sdhci_host *host = slot->host;
+ struct cqhci_host *cq_host;
+ bool dma64;
+ int ret;
+
+ ret = sdhci_setup_host(host);
+ if (ret)
+ return ret;
+
+ cq_host = devm_kzalloc(dev, sizeof(*cq_host), GFP_KERNEL);
+ if (!cq_host) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ cq_host->mmio = host->ioaddr + 0x200;
+ cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ;
+ cq_host->ops = &glk_cqhci_ops;
+
+ dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
+ if (dma64)
+ cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
+
+ ret = cqhci_init(cq_host, host->mmc, dma64);
+ if (ret)
+ goto cleanup;
+
+ ret = __sdhci_add_host(host);
+ if (ret)
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ sdhci_cleanup_host(host);
return ret;
}
@@ -699,11 +828,20 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = {
.allow_runtime_pm = true,
.probe_slot = glk_emmc_probe_slot,
+ .add_host = glk_emmc_add_host,
+#ifdef CONFIG_PM_SLEEP
+ .suspend = sdhci_cqhci_suspend,
+ .resume = sdhci_cqhci_resume,
+#endif
+#ifdef CONFIG_PM
+ .runtime_suspend = sdhci_cqhci_runtime_suspend,
+ .runtime_resume = sdhci_cqhci_runtime_resume,
+#endif
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
SDHCI_QUIRK2_STOP_WITH_TC,
- .ops = &sdhci_intel_byt_ops,
+ .ops = &sdhci_intel_glk_ops,
.priv_size = sizeof(struct intel_host),
};
@@ -778,6 +916,8 @@ static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot)
slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
break;
case INTEL_MRFLD_SDIO:
+ /* Advertise 2.0v for compatibility with the SDIO card's OCR */
+ slot->host->ocr_mask = MMC_VDD_20_21 | MMC_VDD_165_195;
slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE |
MMC_CAP_POWER_OFF_CARD;
break;
@@ -955,7 +1095,7 @@ static int jmicron_suspend(struct sdhci_pci_chip *chip)
{
int i, ret;
- ret = __sdhci_pci_suspend_host(chip);
+ ret = sdhci_pci_suspend_host(chip);
if (ret)
return ret;
@@ -965,8 +1105,6 @@ static int jmicron_suspend(struct sdhci_pci_chip *chip)
jmicron_enable_mmc(chip->slots[i]->host, 0);
}
- sdhci_pci_init_wakeup(chip);
-
return 0;
}
@@ -1306,6 +1444,7 @@ static const struct pci_device_id pci_ids[] = {
SDHCI_PCI_DEVICE(O2, SDS1, o2),
SDHCI_PCI_DEVICE(O2, SEABIRD0, o2),
SDHCI_PCI_DEVICE(O2, SEABIRD1, o2),
+ SDHCI_PCI_DEVICE(ARASAN, PHY_EMMC, arasan),
SDHCI_PCI_DEVICE_CLASS(AMD, SYSTEM_SDHCI, PCI_CLASS_MASK, amd),
/* Generic SD host controller */
{PCI_DEVICE_CLASS(SYSTEM_SDHCI, PCI_CLASS_MASK)},
@@ -1320,7 +1459,7 @@ MODULE_DEVICE_TABLE(pci, pci_ids);
* *
\*****************************************************************************/
-static int sdhci_pci_enable_dma(struct sdhci_host *host)
+int sdhci_pci_enable_dma(struct sdhci_host *host)
{
struct sdhci_pci_slot *slot;
struct pci_dev *pdev;
@@ -1543,10 +1682,13 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
}
}
- host->mmc->pm_caps = MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
+ host->mmc->pm_caps = MMC_PM_KEEP_POWER;
host->mmc->slotno = slotno;
host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
+ if (device_can_wakeup(&pdev->dev))
+ host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
+
if (slot->cd_idx >= 0) {
ret = mmc_gpiod_request_cd(host->mmc, NULL, slot->cd_idx,
slot->cd_override_level, 0, NULL);
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 0056f08a29cc..5cbcdc448f98 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -55,6 +55,9 @@
#define PCI_SUBDEVICE_ID_NI_7884 0x7884
+#define PCI_VENDOR_ID_ARASAN 0x16e6
+#define PCI_DEVICE_ID_ARASAN_PHY_EMMC 0x0670
+
/*
* PCI device class and mask
*/
@@ -170,11 +173,13 @@ static inline void *sdhci_pci_priv(struct sdhci_pci_slot *slot)
#ifdef CONFIG_PM_SLEEP
int sdhci_pci_resume_host(struct sdhci_pci_chip *chip);
#endif
-
+int sdhci_pci_enable_dma(struct sdhci_host *host);
int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot);
int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip);
#ifdef CONFIG_PM_SLEEP
int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip);
#endif
+extern const struct sdhci_pci_fixes sdhci_arasan;
+
#endif /* __SDHCI_PCI_H */
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index 8c0f88428556..14511526a3a8 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -82,6 +82,10 @@ static int sdhci_probe(struct platform_device *pdev)
host->hw_name = "sdhci";
host->ops = &sdhci_pltfm_ops;
host->irq = platform_get_irq(pdev, 0);
+ if (host->irq <= 0) {
+ ret = -EINVAL;
+ goto err_host;
+ }
host->quirks = SDHCI_QUIRK_BROKEN_ADMA;
sdhci = sdhci_priv(host);
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index 0842bbc2d7ad..4d0791f6ec23 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -230,7 +230,14 @@ static void xenon_set_power(struct sdhci_host *host, unsigned char mode,
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
}
+static void xenon_voltage_switch(struct sdhci_host *host)
+{
+ /* Wait for 5ms after set 1.8V signal enable bit */
+ usleep_range(5000, 5500);
+}
+
static const struct sdhci_ops sdhci_xenon_ops = {
+ .voltage_switch = xenon_voltage_switch,
.set_clock = sdhci_set_clock,
.set_power = xenon_set_power,
.set_bus_width = sdhci_set_bus_width,
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index e9290a3439d5..070aff9c108f 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1434,6 +1434,13 @@ void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
if (mode != MMC_POWER_OFF) {
switch (1 << vdd) {
case MMC_VDD_165_195:
+ /*
+ * Without a regulator, SDHCI does not support 2.0v
+ * so we only get here if the driver deliberately
+ * added the 2.0v range to ocr_avail. Map it to 1.8v
+ * for the purpose of turning on the power.
+ */
+ case MMC_VDD_20_21:
pwr = SDHCI_POWER_180;
break;
case MMC_VDD_29_30:
@@ -2821,25 +2828,33 @@ static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
* sdhci_disable_irq_wakeups() since it will be set by
* sdhci_enable_card_detection() or sdhci_init().
*/
-void sdhci_enable_irq_wakeups(struct sdhci_host *host)
+static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
{
+ u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
+ SDHCI_WAKE_ON_INT;
+ u32 irq_val = 0;
+ u8 wake_val = 0;
u8 val;
- u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
- | SDHCI_WAKE_ON_INT;
- u32 irq_val = SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
- SDHCI_INT_CARD_INT;
- val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
- val |= mask ;
- /* Avoid fake wake up */
- if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) {
- val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
- irq_val &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
+ if (!(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)) {
+ wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
+ irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
}
+
+ wake_val |= SDHCI_WAKE_ON_INT;
+ irq_val |= SDHCI_INT_CARD_INT;
+
+ val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
+ val &= ~mask;
+ val |= wake_val;
sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
+
sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
+
+ host->irq_wake_enabled = !enable_irq_wake(host->irq);
+
+ return host->irq_wake_enabled;
}
-EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
{
@@ -2850,6 +2865,10 @@ static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
val &= ~mask;
sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
+
+ disable_irq_wake(host->irq);
+
+ host->irq_wake_enabled = false;
}
int sdhci_suspend_host(struct sdhci_host *host)
@@ -2858,15 +2877,14 @@ int sdhci_suspend_host(struct sdhci_host *host)
mmc_retune_timer_stop(host->mmc);
- if (!device_may_wakeup(mmc_dev(host->mmc))) {
+ if (!device_may_wakeup(mmc_dev(host->mmc)) ||
+ !sdhci_enable_irq_wakeups(host)) {
host->ier = 0;
sdhci_writel(host, 0, SDHCI_INT_ENABLE);
sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
free_irq(host->irq, host);
- } else {
- sdhci_enable_irq_wakeups(host);
- enable_irq_wake(host->irq);
}
+
return 0;
}
@@ -2894,15 +2912,14 @@ int sdhci_resume_host(struct sdhci_host *host)
mmiowb();
}
- if (!device_may_wakeup(mmc_dev(host->mmc))) {
+ if (host->irq_wake_enabled) {
+ sdhci_disable_irq_wakeups(host);
+ } else {
ret = request_threaded_irq(host->irq, sdhci_irq,
sdhci_thread_irq, IRQF_SHARED,
mmc_hostname(host->mmc), host);
if (ret)
return ret;
- } else {
- sdhci_disable_irq_wakeups(host);
- disable_irq_wake(host->irq);
}
sdhci_enable_card_detection(host);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 54bc444c317f..afab26fd70e6 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -484,6 +484,7 @@ struct sdhci_host {
bool bus_on; /* Bus power prevents runtime suspend */
bool preset_enabled; /* Preset is enabled */
bool pending_reset; /* Cmd/data reset is pending */
+ bool irq_wake_enabled; /* IRQ wakeup is enabled */
struct mmc_request *mrqs_done[SDHCI_MAX_MRQS]; /* Requests done */
struct mmc_command *cmd; /* Current command */
@@ -718,7 +719,6 @@ void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable);
#ifdef CONFIG_PM
int sdhci_suspend_host(struct sdhci_host *host);
int sdhci_resume_host(struct sdhci_host *host);
-void sdhci_enable_irq_wakeups(struct sdhci_host *host);
int sdhci_runtime_suspend_host(struct sdhci_host *host);
int sdhci_runtime_resume_host(struct sdhci_host *host);
#endif
diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
index 04ca0d33a521..485f7591fae4 100644
--- a/drivers/mmc/host/sdhci_f_sdh30.c
+++ b/drivers/mmc/host/sdhci_f_sdh30.c
@@ -10,9 +10,11 @@
* the Free Software Foundation, version 2 of the License.
*/
+#include <linux/acpi.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/property.h>
#include <linux/clk.h>
@@ -146,7 +148,6 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
- sdhci_get_of_property(pdev);
host->hw_name = "f_sdh30";
host->ops = &sdhci_f_sdh30_ops;
host->irq = irq;
@@ -158,25 +159,29 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
goto err;
}
- priv->clk_iface = devm_clk_get(&pdev->dev, "iface");
- if (IS_ERR(priv->clk_iface)) {
- ret = PTR_ERR(priv->clk_iface);
- goto err;
- }
+ if (dev_of_node(dev)) {
+ sdhci_get_of_property(pdev);
- ret = clk_prepare_enable(priv->clk_iface);
- if (ret)
- goto err;
+ priv->clk_iface = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(priv->clk_iface)) {
+ ret = PTR_ERR(priv->clk_iface);
+ goto err;
+ }
- priv->clk = devm_clk_get(&pdev->dev, "core");
- if (IS_ERR(priv->clk)) {
- ret = PTR_ERR(priv->clk);
- goto err_clk;
- }
+ ret = clk_prepare_enable(priv->clk_iface);
+ if (ret)
+ goto err;
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- goto err_clk;
+ priv->clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ goto err_clk;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ goto err_clk;
+ }
/* init vendor specific regs */
ctrl = sdhci_readw(host, F_SDH30_AHB_CONFIG);
@@ -226,16 +231,27 @@ static int sdhci_f_sdh30_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
static const struct of_device_id f_sdh30_dt_ids[] = {
{ .compatible = "fujitsu,mb86s70-sdhci-3.0" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, f_sdh30_dt_ids);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id f_sdh30_acpi_ids[] = {
+ { "SCX0002" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(acpi, f_sdh30_acpi_ids);
+#endif
static struct platform_driver sdhci_f_sdh30_driver = {
.driver = {
.name = "f_sdh30",
- .of_match_table = f_sdh30_dt_ids,
+ .of_match_table = of_match_ptr(f_sdh30_dt_ids),
+ .acpi_match_table = ACPI_PTR(f_sdh30_acpi_ids),
.pm = &sdhci_pltfm_pmops,
},
.probe = sdhci_f_sdh30_probe,
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 53fb18bb7bee..7bb00c68a756 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -916,7 +916,7 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
struct mmc_request *mrq)
{
struct mmc_command *cmd = mrq->cmd;
- u32 opc = cmd->opcode;
+ u32 opc;
u32 mask = 0;
unsigned long flags;
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index cc98355dbdb9..bad612d6f879 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -3,7 +3,7 @@
* (C) Copyright 2007-2011 Reuuimlla Technology Co., Ltd.
* (C) Copyright 2007-2011 Aaron Maoye <leafy.myeh@reuuimllatech.com>
* (C) Copyright 2013-2014 O2S GmbH <www.o2s.ch>
- * (C) Copyright 2013-2014 David Lanzend�rfer <david.lanzendoerfer@o2s.ch>
+ * (C) Copyright 2013-2014 David Lanzendörfer <david.lanzendoerfer@o2s.ch>
* (C) Copyright 2013-2014 Hans de Goede <hdegoede@redhat.com>
* (C) Copyright 2017 Sootech SA
*
@@ -1255,6 +1255,11 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
goto error_assert_reset;
host->irq = platform_get_irq(pdev, 0);
+ if (host->irq <= 0) {
+ ret = -EINVAL;
+ goto error_assert_reset;
+ }
+
return devm_request_threaded_irq(&pdev->dev, host->irq, sunxi_mmc_irq,
sunxi_mmc_handle_manual_stop, 0, "sunxi-mmc", host);
@@ -1393,5 +1398,5 @@ module_platform_driver(sunxi_mmc_driver);
MODULE_DESCRIPTION("Allwinner's SD/MMC Card Controller Driver");
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("David Lanzend�rfer <david.lanzendoerfer@o2s.ch>");
+MODULE_AUTHOR("David Lanzendörfer <david.lanzendoerfer@o2s.ch>");
MODULE_ALIAS("platform:sunxi-mmc");
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 64b7e9f18361..43a2ea5cff24 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -92,14 +92,19 @@ static int tmio_mmc_probe(struct platform_device *pdev)
pdata->flags |= TMIO_MMC_HAVE_HIGH_REG;
- host = tmio_mmc_host_alloc(pdev);
- if (!host)
+ host = tmio_mmc_host_alloc(pdev, pdata);
+ if (IS_ERR(host)) {
+ ret = PTR_ERR(host);
goto cell_disable;
+ }
/* SD control register space size is 0x200, 0x400 for bus_shift=1 */
host->bus_shift = resource_size(res) >> 10;
- ret = tmio_mmc_host_probe(host, pdata, NULL);
+ host->mmc->f_max = pdata->hclk;
+ host->mmc->f_min = pdata->hclk / 512;
+
+ ret = tmio_mmc_host_probe(host);
if (ret)
goto host_free;
@@ -128,15 +133,11 @@ out:
static int tmio_mmc_remove(struct platform_device *pdev)
{
const struct mfd_cell *cell = mfd_get_cell(pdev);
- struct mmc_host *mmc = platform_get_drvdata(pdev);
+ struct tmio_mmc_host *host = platform_get_drvdata(pdev);
- if (mmc) {
- struct tmio_mmc_host *host = mmc_priv(mmc);
-
- tmio_mmc_host_remove(host);
- if (cell->disable)
- cell->disable(pdev);
- }
+ tmio_mmc_host_remove(host);
+ if (cell->disable)
+ cell->disable(pdev);
return 0;
}
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 3e6ff8921440..e7d651352dc9 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -112,12 +112,6 @@
struct tmio_mmc_data;
struct tmio_mmc_host;
-struct tmio_mmc_dma {
- enum dma_slave_buswidth dma_buswidth;
- bool (*filter)(struct dma_chan *chan, void *arg);
- void (*enable)(struct tmio_mmc_host *host, bool enable);
-};
-
struct tmio_mmc_dma_ops {
void (*start)(struct tmio_mmc_host *host, struct mmc_data *data);
void (*enable)(struct tmio_mmc_host *host, bool enable);
@@ -134,6 +128,7 @@ struct tmio_mmc_host {
struct mmc_request *mrq;
struct mmc_data *data;
struct mmc_host *mmc;
+ struct mmc_host_ops ops;
/* Callbacks for clock / power control */
void (*set_pwr)(struct platform_device *host, int state);
@@ -144,18 +139,15 @@ struct tmio_mmc_host {
struct scatterlist *sg_orig;
unsigned int sg_len;
unsigned int sg_off;
- unsigned long bus_shift;
+ unsigned int bus_shift;
struct platform_device *pdev;
struct tmio_mmc_data *pdata;
- struct tmio_mmc_dma *dma;
/* DMA support */
bool force_pio;
struct dma_chan *chan_rx;
struct dma_chan *chan_tx;
- struct completion dma_dataend;
- struct tasklet_struct dma_complete;
struct tasklet_struct dma_issue;
struct scatterlist bounce_sg;
u8 *bounce_buf;
@@ -174,7 +166,6 @@ struct tmio_mmc_host {
struct mutex ios_lock; /* protect set_ios() context */
bool native_hotplug;
bool sdio_irq_enabled;
- u32 scc_tappos;
/* Mandatory callback */
int (*clk_enable)(struct tmio_mmc_host *host);
@@ -185,9 +176,6 @@ struct tmio_mmc_host {
void (*clk_disable)(struct tmio_mmc_host *host);
int (*multi_io_quirk)(struct mmc_card *card,
unsigned int direction, int blk_size);
- int (*card_busy)(struct mmc_host *mmc);
- int (*start_signal_voltage_switch)(struct mmc_host *mmc,
- struct mmc_ios *ios);
int (*write16_hook)(struct tmio_mmc_host *host, int addr);
void (*hw_reset)(struct tmio_mmc_host *host);
void (*prepare_tuning)(struct tmio_mmc_host *host, unsigned long tap);
@@ -207,11 +195,10 @@ struct tmio_mmc_host {
const struct tmio_mmc_dma_ops *dma_ops;
};
-struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev);
+struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev,
+ struct tmio_mmc_data *pdata);
void tmio_mmc_host_free(struct tmio_mmc_host *host);
-int tmio_mmc_host_probe(struct tmio_mmc_host *host,
- struct tmio_mmc_data *pdata,
- const struct tmio_mmc_dma_ops *dma_ops);
+int tmio_mmc_host_probe(struct tmio_mmc_host *host);
void tmio_mmc_host_remove(struct tmio_mmc_host *host);
void tmio_mmc_do_data_irq(struct tmio_mmc_host *host);
@@ -240,26 +227,26 @@ int tmio_mmc_host_runtime_resume(struct device *dev);
static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
{
- return readw(host->ctl + (addr << host->bus_shift));
+ return ioread16(host->ctl + (addr << host->bus_shift));
}
static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
u16 *buf, int count)
{
- readsw(host->ctl + (addr << host->bus_shift), buf, count);
+ ioread16_rep(host->ctl + (addr << host->bus_shift), buf, count);
}
static inline u32 sd_ctrl_read16_and_16_as_32(struct tmio_mmc_host *host,
int addr)
{
- return readw(host->ctl + (addr << host->bus_shift)) |
- readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
+ return ioread16(host->ctl + (addr << host->bus_shift)) |
+ ioread16(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
}
static inline void sd_ctrl_read32_rep(struct tmio_mmc_host *host, int addr,
u32 *buf, int count)
{
- readsl(host->ctl + (addr << host->bus_shift), buf, count);
+ ioread32_rep(host->ctl + (addr << host->bus_shift), buf, count);
}
static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr,
@@ -270,26 +257,26 @@ static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr,
*/
if (host->write16_hook && host->write16_hook(host, addr))
return;
- writew(val, host->ctl + (addr << host->bus_shift));
+ iowrite16(val, host->ctl + (addr << host->bus_shift));
}
static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
u16 *buf, int count)
{
- writesw(host->ctl + (addr << host->bus_shift), buf, count);
+ iowrite16_rep(host->ctl + (addr << host->bus_shift), buf, count);
}
static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host,
int addr, u32 val)
{
- writew(val & 0xffff, host->ctl + (addr << host->bus_shift));
- writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
+ iowrite16(val & 0xffff, host->ctl + (addr << host->bus_shift));
+ iowrite16(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
}
static inline void sd_ctrl_write32_rep(struct tmio_mmc_host *host, int addr,
const u32 *buf, int count)
{
- writesl(host->ctl + (addr << host->bus_shift), buf, count);
+ iowrite32_rep(host->ctl + (addr << host->bus_shift), buf, count);
}
#endif
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 583bf3262df5..33494241245a 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -806,7 +806,7 @@ static int tmio_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (ret == 0)
set_bit(i, host->taps);
- mdelay(1);
+ usleep_range(1000, 1200);
}
ret = host->select_tuning(host);
@@ -926,20 +926,6 @@ static void tmio_mmc_done_work(struct work_struct *work)
tmio_mmc_finish_request(host);
}
-static int tmio_mmc_clk_enable(struct tmio_mmc_host *host)
-{
- if (!host->clk_enable)
- return -ENOTSUPP;
-
- return host->clk_enable(host);
-}
-
-static void tmio_mmc_clk_disable(struct tmio_mmc_host *host)
-{
- if (host->clk_disable)
- host->clk_disable(host);
-}
-
static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
{
struct mmc_host *mmc = host->mmc;
@@ -958,7 +944,7 @@ static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
* 100us were not enough. Is this the same 140us delay, as in
* tmio_mmc_set_ios()?
*/
- udelay(200);
+ usleep_range(200, 300);
}
/*
* It seems, VccQ should be switched on after Vcc, this is also what the
@@ -966,7 +952,7 @@ static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
*/
if (!IS_ERR(mmc->supply.vqmmc) && !ret) {
ret = regulator_enable(mmc->supply.vqmmc);
- udelay(200);
+ usleep_range(200, 300);
}
if (ret < 0)
@@ -1059,7 +1045,7 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
/* Let things settle. delay taken from winCE driver */
- udelay(140);
+ usleep_range(140, 200);
if (PTR_ERR(host->mrq) == -EINTR)
dev_dbg(&host->pdev->dev,
"%s.%d: IOS interrupted: clk %u, mode %u",
@@ -1076,15 +1062,9 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
struct tmio_mmc_data *pdata = host->pdata;
- int ret = mmc_gpio_get_ro(mmc);
-
- if (ret >= 0)
- return ret;
-
- ret = !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
- (sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
- return ret;
+ return !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
+ (sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
}
static int tmio_multi_io_quirk(struct mmc_card *card,
@@ -1098,7 +1078,7 @@ static int tmio_multi_io_quirk(struct mmc_card *card,
return blk_size;
}
-static struct mmc_host_ops tmio_mmc_ops = {
+static const struct mmc_host_ops tmio_mmc_ops = {
.request = tmio_mmc_request,
.set_ios = tmio_mmc_set_ios,
.get_ro = tmio_mmc_get_ro,
@@ -1145,19 +1125,45 @@ static void tmio_mmc_of_parse(struct platform_device *pdev,
pdata->flags |= TMIO_MMC_WRPROTECT_DISABLE;
}
-struct tmio_mmc_host*
-tmio_mmc_host_alloc(struct platform_device *pdev)
+struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev,
+ struct tmio_mmc_data *pdata)
{
struct tmio_mmc_host *host;
struct mmc_host *mmc;
+ struct resource *res;
+ void __iomem *ctl;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctl = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ctl))
+ return ERR_CAST(ctl);
mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev);
if (!mmc)
- return NULL;
+ return ERR_PTR(-ENOMEM);
host = mmc_priv(mmc);
+ host->ctl = ctl;
host->mmc = mmc;
host->pdev = pdev;
+ host->pdata = pdata;
+ host->ops = tmio_mmc_ops;
+ mmc->ops = &host->ops;
+
+ ret = mmc_of_parse(host->mmc);
+ if (ret) {
+ host = ERR_PTR(ret);
+ goto free;
+ }
+
+ tmio_mmc_of_parse(pdev, pdata);
+
+ platform_set_drvdata(pdev, host);
+
+ return host;
+free:
+ mmc_free_host(mmc);
return host;
}
@@ -1169,32 +1175,24 @@ void tmio_mmc_host_free(struct tmio_mmc_host *host)
}
EXPORT_SYMBOL_GPL(tmio_mmc_host_free);
-int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
- struct tmio_mmc_data *pdata,
- const struct tmio_mmc_dma_ops *dma_ops)
+int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
{
struct platform_device *pdev = _host->pdev;
+ struct tmio_mmc_data *pdata = _host->pdata;
struct mmc_host *mmc = _host->mmc;
- struct resource *res_ctl;
int ret;
u32 irq_mask = TMIO_MASK_CMD;
- tmio_mmc_of_parse(pdev, pdata);
+ /*
+ * Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from
+ * looping forever...
+ */
+ if (mmc->f_min == 0)
+ return -EINVAL;
if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT))
_host->write16_hook = NULL;
- res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res_ctl)
- return -EINVAL;
-
- ret = mmc_of_parse(mmc);
- if (ret < 0)
- return ret;
-
- _host->pdata = pdata;
- platform_set_drvdata(pdev, mmc);
-
_host->set_pwr = pdata->set_pwr;
_host->set_clk_div = pdata->set_clk_div;
@@ -1202,15 +1200,11 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
if (ret < 0)
return ret;
- _host->ctl = devm_ioremap(&pdev->dev,
- res_ctl->start, resource_size(res_ctl));
- if (!_host->ctl)
- return -ENOMEM;
-
- tmio_mmc_ops.card_busy = _host->card_busy;
- tmio_mmc_ops.start_signal_voltage_switch =
- _host->start_signal_voltage_switch;
- mmc->ops = &tmio_mmc_ops;
+ if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
+ ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0);
+ if (ret)
+ return ret;
+ }
mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
mmc->caps2 |= pdata->capabilities2;
@@ -1233,7 +1227,10 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
}
mmc->max_seg_size = mmc->max_req_size;
- _host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
+ if (mmc_can_gpio_ro(mmc))
+ _host->ops.get_ro = mmc_gpio_get_ro;
+
+ _host->native_hotplug = !(mmc_can_gpio_cd(mmc) ||
mmc->caps & MMC_CAP_NEEDS_POLL ||
!mmc_card_is_removable(mmc));
@@ -1246,18 +1243,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
if (pdata->flags & TMIO_MMC_MIN_RCAR2)
_host->native_hotplug = true;
- if (tmio_mmc_clk_enable(_host) < 0) {
- mmc->f_max = pdata->hclk;
- mmc->f_min = mmc->f_max / 512;
- }
-
- /*
- * Check the sanity of mmc->f_min to prevent tmio_mmc_set_clock() from
- * looping forever...
- */
- if (mmc->f_min == 0)
- return -EINVAL;
-
/*
* While using internal tmio hardware logic for card detection, we need
* to ensure it stays powered for it to work.
@@ -1293,7 +1278,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
INIT_WORK(&_host->done, tmio_mmc_done_work);
/* See if we also get DMA */
- _host->dma_ops = dma_ops;
tmio_mmc_request_dma(_host, pdata);
pm_runtime_set_active(&pdev->dev);
@@ -1307,14 +1291,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
- if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
- ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0);
- if (ret)
- goto remove_host;
-
- mmc_gpiod_request_cd_irq(mmc);
- }
-
return 0;
remove_host:
@@ -1343,16 +1319,27 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- tmio_mmc_clk_disable(host);
}
EXPORT_SYMBOL_GPL(tmio_mmc_host_remove);
#ifdef CONFIG_PM
+static int tmio_mmc_clk_enable(struct tmio_mmc_host *host)
+{
+ if (!host->clk_enable)
+ return -ENOTSUPP;
+
+ return host->clk_enable(host);
+}
+
+static void tmio_mmc_clk_disable(struct tmio_mmc_host *host)
+{
+ if (host->clk_disable)
+ host->clk_disable(host);
+}
+
int tmio_mmc_host_runtime_suspend(struct device *dev)
{
- struct mmc_host *mmc = dev_get_drvdata(dev);
- struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct tmio_mmc_host *host = dev_get_drvdata(dev);
tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL);
@@ -1372,8 +1359,7 @@ static bool tmio_mmc_can_retune(struct tmio_mmc_host *host)
int tmio_mmc_host_runtime_resume(struct device *dev)
{
- struct mmc_host *mmc = dev_get_drvdata(dev);
- struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct tmio_mmc_host *host = dev_get_drvdata(dev);
tmio_mmc_reset(host);
tmio_mmc_clk_enable(host);
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 0806f72102c0..a85af236b44d 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -904,9 +904,6 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
if (ooblen % DOC_LAYOUT_OOB_SIZE)
return -EINVAL;
- if (from + len > mtd->size)
- return -EINVAL;
-
ops->oobretlen = 0;
ops->retlen = 0;
ret = 0;
@@ -990,36 +987,6 @@ err_in_read:
goto out;
}
-/**
- * doc_read - Read bytes from flash
- * @mtd: the device
- * @from: the offset from first block and first page, in bytes, aligned on page
- * size
- * @len: the number of bytes to read (must be a multiple of 4)
- * @retlen: the number of bytes actually read
- * @buf: the filled in buffer
- *
- * Reads flash memory pages. This function does not read the OOB chunk, but only
- * the page data.
- *
- * Returns 0 if read successful, of -EIO, -EINVAL if an error occurred
- */
-static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- struct mtd_oob_ops ops;
- size_t ret;
-
- memset(&ops, 0, sizeof(ops));
- ops.datbuf = buf;
- ops.len = len;
- ops.mode = MTD_OPS_AUTO_OOB;
-
- ret = doc_read_oob(mtd, from, &ops);
- *retlen = ops.retlen;
- return ret;
-}
-
static int doc_reload_bbt(struct docg3 *docg3)
{
int block = DOC_LAYOUT_BLOCK_BBT;
@@ -1471,8 +1438,6 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
if (len && ooblen &&
(len / DOC_LAYOUT_PAGE_SIZE) != (ooblen / oobdelta))
return -EINVAL;
- if (ofs + len > mtd->size)
- return -EINVAL;
ops->oobretlen = 0;
ops->retlen = 0;
@@ -1513,39 +1478,6 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
return ret;
}
-/**
- * doc_write - Write a buffer to the chip
- * @mtd: the device
- * @to: the offset from first block and first page, in bytes, aligned on page
- * size
- * @len: the number of bytes to write (must be a full page size, ie. 512)
- * @retlen: the number of bytes actually written (0 or 512)
- * @buf: the buffer to get bytes from
- *
- * Writes data to the chip.
- *
- * Returns 0 if write successful, -EIO if write error
- */
-static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- struct docg3 *docg3 = mtd->priv;
- int ret;
- struct mtd_oob_ops ops;
-
- doc_dbg("doc_write(to=%lld, len=%zu)\n", to, len);
- ops.datbuf = (char *)buf;
- ops.len = len;
- ops.mode = MTD_OPS_PLACE_OOB;
- ops.oobbuf = NULL;
- ops.ooblen = 0;
- ops.ooboffs = 0;
-
- ret = doc_write_oob(mtd, to, &ops);
- *retlen = ops.retlen;
- return ret;
-}
-
static struct docg3 *sysfs_dev2docg3(struct device *dev,
struct device_attribute *attr)
{
@@ -1866,8 +1798,6 @@ static int __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
mtd->writebufsize = mtd->writesize = DOC_LAYOUT_PAGE_SIZE;
mtd->oobsize = DOC_LAYOUT_OOB_SIZE;
mtd->_erase = doc_erase;
- mtd->_read = doc_read;
- mtd->_write = doc_write;
mtd->_read_oob = doc_read_oob;
mtd->_write_oob = doc_write_oob;
mtd->_block_isbad = doc_block_isbad;
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index dbe6a1de2bb8..a4e18f6aaa33 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -307,10 +307,18 @@ static int m25p_remove(struct spi_device *spi)
{
struct m25p *flash = spi_get_drvdata(spi);
+ spi_nor_restore(&flash->spi_nor);
+
/* Clean up MTD stuff. */
return mtd_device_unregister(&flash->spi_nor.mtd);
}
+static void m25p_shutdown(struct spi_device *spi)
+{
+ struct m25p *flash = spi_get_drvdata(spi);
+
+ spi_nor_restore(&flash->spi_nor);
+}
/*
* Do NOT add to this array without reading the following:
*
@@ -386,6 +394,7 @@ static struct spi_driver m25p80_driver = {
.id_table = m25p_ids,
.probe = m25p_probe,
.remove = m25p_remove,
+ .shutdown = m25p_shutdown,
/* REVISIT: many of these chips have deep power-down modes, which
* should clearly be entered on suspend() to minimize power use.
diff --git a/drivers/mtd/devices/mchp23k256.c b/drivers/mtd/devices/mchp23k256.c
index 8956b7dcc984..75f71d166fd6 100644
--- a/drivers/mtd/devices/mchp23k256.c
+++ b/drivers/mtd/devices/mchp23k256.c
@@ -68,6 +68,7 @@ static int mchp23k256_write(struct mtd_info *mtd, loff_t to, size_t len,
struct spi_transfer transfer[2] = {};
struct spi_message message;
unsigned char command[MAX_CMD_SIZE];
+ int ret;
spi_message_init(&message);
@@ -84,12 +85,16 @@ static int mchp23k256_write(struct mtd_info *mtd, loff_t to, size_t len,
mutex_lock(&flash->lock);
- spi_sync(flash->spi, &message);
+ ret = spi_sync(flash->spi, &message);
+
+ mutex_unlock(&flash->lock);
+
+ if (ret)
+ return ret;
if (retlen && message.actual_length > sizeof(command))
*retlen += message.actual_length - sizeof(command);
- mutex_unlock(&flash->lock);
return 0;
}
@@ -100,6 +105,7 @@ static int mchp23k256_read(struct mtd_info *mtd, loff_t from, size_t len,
struct spi_transfer transfer[2] = {};
struct spi_message message;
unsigned char command[MAX_CMD_SIZE];
+ int ret;
spi_message_init(&message);
@@ -117,12 +123,16 @@ static int mchp23k256_read(struct mtd_info *mtd, loff_t from, size_t len,
mutex_lock(&flash->lock);
- spi_sync(flash->spi, &message);
+ ret = spi_sync(flash->spi, &message);
+
+ mutex_unlock(&flash->lock);
+
+ if (ret)
+ return ret;
if (retlen && message.actual_length > sizeof(command))
*retlen += message.actual_length - sizeof(command);
- mutex_unlock(&flash->lock);
return 0;
}
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 73b605577447..28553c840d32 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -503,6 +503,11 @@ int add_mtd_device(struct mtd_info *mtd)
return -EEXIST;
BUG_ON(mtd->writesize == 0);
+
+ if (WARN_ON((!mtd->erasesize || !mtd->_erase) &&
+ !(mtd->flags & MTD_NO_ERASE)))
+ return -EINVAL;
+
mutex_lock(&mtd_table_mutex);
i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
@@ -1053,7 +1058,20 @@ int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
* representing the maximum number of bitflips that were corrected on
* any one ecc region (if applicable; zero otherwise).
*/
- ret_code = mtd->_read(mtd, from, len, retlen, buf);
+ if (mtd->_read) {
+ ret_code = mtd->_read(mtd, from, len, retlen, buf);
+ } else if (mtd->_read_oob) {
+ struct mtd_oob_ops ops = {
+ .len = len,
+ .datbuf = buf,
+ };
+
+ ret_code = mtd->_read_oob(mtd, from, &ops);
+ *retlen = ops.retlen;
+ } else {
+ return -ENOTSUPP;
+ }
+
if (unlikely(ret_code < 0))
return ret_code;
if (mtd->ecc_strength == 0)
@@ -1068,11 +1086,25 @@ int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
*retlen = 0;
if (to < 0 || to >= mtd->size || len > mtd->size - to)
return -EINVAL;
- if (!mtd->_write || !(mtd->flags & MTD_WRITEABLE))
+ if ((!mtd->_write && !mtd->_write_oob) ||
+ !(mtd->flags & MTD_WRITEABLE))
return -EROFS;
if (!len)
return 0;
ledtrig_mtd_activity();
+
+ if (!mtd->_write) {
+ struct mtd_oob_ops ops = {
+ .len = len,
+ .datbuf = (u8 *)buf,
+ };
+ int ret;
+
+ ret = mtd->_write_oob(mtd, to, &ops);
+ *retlen = ops.retlen;
+ return ret;
+ }
+
return mtd->_write(mtd, to, len, retlen, buf);
}
EXPORT_SYMBOL_GPL(mtd_write);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index be088bccd593..76cd21d1171b 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -105,34 +105,17 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
struct mtd_part *part = mtd_to_part(mtd);
+ struct mtd_ecc_stats stats;
int res;
- if (from >= mtd->size)
- return -EINVAL;
- if (ops->datbuf && from + ops->len > mtd->size)
- return -EINVAL;
-
- /*
- * If OOB is also requested, make sure that we do not read past the end
- * of this partition.
- */
- if (ops->oobbuf) {
- size_t len, pages;
-
- len = mtd_oobavail(mtd, ops);
- pages = mtd_div_by_ws(mtd->size, mtd);
- pages -= mtd_div_by_ws(from, mtd);
- if (ops->ooboffs + ops->ooblen > pages * len)
- return -EINVAL;
- }
-
+ stats = part->parent->ecc_stats;
res = part->parent->_read_oob(part->parent, from + part->offset, ops);
- if (unlikely(res)) {
- if (mtd_is_bitflip(res))
- mtd->ecc_stats.corrected++;
- if (mtd_is_eccerr(res))
- mtd->ecc_stats.failed++;
- }
+ if (unlikely(mtd_is_eccerr(res)))
+ mtd->ecc_stats.failed +=
+ part->parent->ecc_stats.failed - stats.failed;
+ else
+ mtd->ecc_stats.corrected +=
+ part->parent->ecc_stats.corrected - stats.corrected;
return res;
}
@@ -189,10 +172,6 @@ static int part_write_oob(struct mtd_info *mtd, loff_t to,
{
struct mtd_part *part = mtd_to_part(mtd);
- if (to >= mtd->size)
- return -EINVAL;
- if (ops->datbuf && to + ops->len > mtd->size)
- return -EINVAL;
return part->parent->_write_oob(part->parent, to + part->offset, ops);
}
@@ -435,8 +414,10 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
parent->dev.parent;
slave->mtd.dev.of_node = part->of_node;
- slave->mtd._read = part_read;
- slave->mtd._write = part_write;
+ if (parent->_read)
+ slave->mtd._read = part_read;
+ if (parent->_write)
+ slave->mtd._write = part_write;
if (parent->_panic_write)
slave->mtd._panic_write = part_panic_write;
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index f07492c6f4b2..7eb0e1f4f980 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -1223,8 +1223,9 @@ static int mtdswap_show(struct seq_file *s, void *data)
unsigned int max[MTDSWAP_TREE_CNT];
unsigned int i, cw = 0, cwp = 0, cwecount = 0, bb_cnt, mapped, pages;
uint64_t use_size;
- char *name[] = {"clean", "used", "low", "high", "dirty", "bitflip",
- "failing"};
+ static const char * const name[] = {
+ "clean", "used", "low", "high", "dirty", "bitflip", "failing"
+ };
mutex_lock(&d->mbd_dev->lock);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index bb48aafed9a2..e6b8c59f2c0d 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -315,6 +315,7 @@ config MTD_NAND_ATMEL
config MTD_NAND_PXA3xx
tristate "NAND support on PXA3xx and Armada 370/XP"
+ depends on !MTD_NAND_MARVELL
depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU
help
@@ -323,6 +324,18 @@ config MTD_NAND_PXA3xx
platforms (XP, 370, 375, 38x, 39x) and 64-bit Armada
platforms (7K, 8K) (NFCv2).
+config MTD_NAND_MARVELL
+ tristate "NAND controller support on Marvell boards"
+ depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU || \
+ COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This enables the NAND flash controller driver for Marvell boards,
+ including:
+ - PXA3xx processors (NFCv1)
+ - 32-bit Armada platforms (XP, 37x, 38x, 39x) (NFCv2)
+ - 64-bit Aramda platforms (7k, 8k) (NFCv2)
+
config MTD_NAND_SLC_LPC32XX
tristate "NXP LPC32xx SLC Controller"
depends on ARCH_LPC32XX
@@ -376,9 +389,7 @@ config MTD_NAND_GPMI_NAND
Enables NAND Flash support for IMX23, IMX28 or IMX6.
The GPMI controller is very powerful, with the help of BCH
module, it can do the hardware ECC. The GPMI supports several
- NAND flashs at the same time. The GPMI may conflicts with other
- block, such as SD card. So pay attention to it when you enable
- the GPMI.
+ NAND flashs at the same time.
config MTD_NAND_BRCMNAND
tristate "Broadcom STB NAND controller"
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 118a1349aad3..921634ba400c 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_MTD_NAND_OMAP2) += omap2_nand.o
obj-$(CONFIG_MTD_NAND_OMAP_BCH_BUILD) += omap_elm.o
obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
+obj-$(CONFIG_MTD_NAND_MARVELL) += marvell_nand.o
obj-$(CONFIG_MTD_NAND_TMIO) += tmio_nand.o
obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o
diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c
index 90a71a56bc23..b2f00b398490 100644
--- a/drivers/mtd/nand/atmel/nand-controller.c
+++ b/drivers/mtd/nand/atmel/nand-controller.c
@@ -841,6 +841,8 @@ static int atmel_nand_pmecc_write_pg(struct nand_chip *chip, const u8 *buf,
struct atmel_nand *nand = to_atmel_nand(chip);
int ret;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw);
if (ret)
return ret;
@@ -857,7 +859,7 @@ static int atmel_nand_pmecc_write_pg(struct nand_chip *chip, const u8 *buf,
atmel_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
@@ -881,6 +883,8 @@ static int atmel_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw);
if (ret)
return ret;
@@ -1000,7 +1004,7 @@ static int atmel_hsmc_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
* to the non-optimized one.
*/
if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB) {
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page,
raw);
@@ -1178,7 +1182,6 @@ static int atmel_hsmc_nand_ecc_init(struct atmel_nand *nand)
chip->ecc.write_page = atmel_hsmc_nand_pmecc_write_page;
chip->ecc.read_page_raw = atmel_hsmc_nand_pmecc_read_page_raw;
chip->ecc.write_page_raw = atmel_hsmc_nand_pmecc_write_page_raw;
- chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS;
return 0;
}
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 5655dca6ce43..87bbd177b3e5 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -572,6 +572,8 @@ static void bf5xx_nand_dma_write_buf(struct mtd_info *mtd,
static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
bf5xx_nand_read_buf(mtd, buf, mtd->writesize);
bf5xx_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -582,10 +584,10 @@ static int bf5xx_nand_write_page_raw(struct mtd_info *mtd,
struct nand_chip *chip, const uint8_t *buf, int oob_required,
int page)
{
- bf5xx_nand_write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
/*
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
index dd56a671ea42..c28fd2bc1a84 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/brcmnand/brcmnand.c
@@ -1071,7 +1071,7 @@ static void brcmnand_wp(struct mtd_info *mtd, int wp)
return;
brcmnand_set_wp(ctrl, wp);
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ nand_status_op(chip, NULL);
/* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */
ret = bcmnand_ctrl_poll_status(ctrl,
NAND_CTRL_RDY |
@@ -1453,7 +1453,7 @@ static uint8_t brcmnand_read_byte(struct mtd_info *mtd)
/* At FC_BYTES boundary, switch to next column */
if (host->last_byte > 0 && offs == 0)
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, addr, -1);
+ nand_change_read_column_op(chip, addr, NULL, 0, false);
ret = ctrl->flash_cache[offs];
break;
@@ -1681,7 +1681,7 @@ static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
int ret;
if (!buf) {
- buf = chip->buffers->databuf;
+ buf = chip->data_buf;
/* Invalidate page cache */
chip->pagebuf = -1;
}
@@ -1689,7 +1689,6 @@ static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
sas = mtd->oobsize / chip->ecc.steps;
/* read without ecc for verification */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
ret = chip->ecc.read_page_raw(mtd, chip, buf, true, page);
if (ret)
return ret;
@@ -1793,6 +1792,8 @@ static int brcmnand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
struct brcmnand_host *host = nand_get_controller_data(chip);
u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
return brcmnand_read(mtd, chip, host->last_addr,
mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
}
@@ -1804,6 +1805,8 @@ static int brcmnand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
int ret;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
brcmnand_set_ecc_enabled(host, 0);
ret = brcmnand_read(mtd, chip, host->last_addr,
mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
@@ -1909,8 +1912,10 @@ static int brcmnand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
struct brcmnand_host *host = nand_get_controller_data(chip);
void *oob = oob_required ? chip->oob_poi : NULL;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
static int brcmnand_write_page_raw(struct mtd_info *mtd,
@@ -1920,10 +1925,12 @@ static int brcmnand_write_page_raw(struct mtd_info *mtd,
struct brcmnand_host *host = nand_get_controller_data(chip);
void *oob = oob_required ? chip->oob_poi : NULL;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
brcmnand_set_ecc_enabled(host, 0);
brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
brcmnand_set_ecc_enabled(host, 1);
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
static int brcmnand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
@@ -2193,16 +2200,9 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
if (ctrl->nand_version >= 0x0702)
tmp |= ACC_CONTROL_RD_ERASED;
tmp &= ~ACC_CONTROL_FAST_PGM_RDIN;
- if (ctrl->features & BRCMNAND_HAS_PREFETCH) {
- /*
- * FIXME: Flash DMA + prefetch may see spurious erased-page ECC
- * errors
- */
- if (has_flash_dma(ctrl))
- tmp &= ~ACC_CONTROL_PREFETCH;
- else
- tmp |= ACC_CONTROL_PREFETCH;
- }
+ if (ctrl->features & BRCMNAND_HAS_PREFETCH)
+ tmp &= ~ACC_CONTROL_PREFETCH;
+
nand_writereg(ctrl, offs, tmp);
return 0;
@@ -2230,6 +2230,9 @@ static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
nand_set_controller_data(chip, host);
mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "brcmnand.%d",
host->cs);
+ if (!mtd->name)
+ return -ENOMEM;
+
mtd->owner = THIS_MODULE;
mtd->dev.parent = &pdev->dev;
@@ -2369,12 +2372,11 @@ static int brcmnand_resume(struct device *dev)
list_for_each_entry(host, &ctrl->host_list, node) {
struct nand_chip *chip = &host->chip;
- struct mtd_info *mtd = nand_to_mtd(chip);
brcmnand_save_restore_cs_config(host, 1);
/* Reset the chip, required by some chips after power-up */
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ nand_reset_op(chip);
}
return 0;
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index bc558c438a57..567ff972d5fc 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -353,23 +353,15 @@ static void cafe_nand_bug(struct mtd_info *mtd)
static int cafe_nand_write_oob(struct mtd_info *mtd,
struct nand_chip *chip, int page)
{
- int status = 0;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
+ mtd->oobsize);
}
/* Don't use -- use nand_read_oob_std for now */
static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
}
/**
* cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read
@@ -391,7 +383,7 @@ static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
cafe_readl(cafe, NAND_ECC_RESULT),
cafe_readl(cafe, NAND_ECC_SYN01));
- chip->read_buf(mtd, buf, mtd->writesize);
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
if (checkecc && cafe_readl(cafe, NAND_ECC_RESULT) & (1<<18)) {
@@ -549,13 +541,13 @@ static int cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
{
struct cafe_priv *cafe = nand_get_controller_data(chip);
- chip->write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
/* Set up ECC autogeneration */
cafe->ctl2 |= (1<<30);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
@@ -613,7 +605,6 @@ static int cafe_nand_probe(struct pci_dev *pdev,
uint32_t ctrl;
int err = 0;
int old_dma;
- struct nand_buffers *nbuf;
/* Very old versions shared the same PCI ident for all three
functions on the chip. Verify the class too... */
@@ -661,7 +652,6 @@ static int cafe_nand_probe(struct pci_dev *pdev,
/* Enable the following for a flash based bad block table */
cafe->nand.bbt_options = NAND_BBT_USE_FLASH;
- cafe->nand.options = NAND_OWN_BUFFERS;
if (skipbbt) {
cafe->nand.options |= NAND_SKIP_BBTSCAN;
@@ -731,32 +721,20 @@ static int cafe_nand_probe(struct pci_dev *pdev,
if (err)
goto out_irq;
- cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev,
- 2112 + sizeof(struct nand_buffers) +
- mtd->writesize + mtd->oobsize,
- &cafe->dmaaddr, GFP_KERNEL);
+ cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev, 2112,
+ &cafe->dmaaddr, GFP_KERNEL);
if (!cafe->dmabuf) {
err = -ENOMEM;
goto out_irq;
}
- cafe->nand.buffers = nbuf = (void *)cafe->dmabuf + 2112;
/* Set up DMA address */
- cafe_writel(cafe, cafe->dmaaddr & 0xffffffff, NAND_DMA_ADDR0);
- if (sizeof(cafe->dmaaddr) > 4)
- /* Shift in two parts to shut the compiler up */
- cafe_writel(cafe, (cafe->dmaaddr >> 16) >> 16, NAND_DMA_ADDR1);
- else
- cafe_writel(cafe, 0, NAND_DMA_ADDR1);
+ cafe_writel(cafe, lower_32_bits(cafe->dmaaddr), NAND_DMA_ADDR0);
+ cafe_writel(cafe, upper_32_bits(cafe->dmaaddr), NAND_DMA_ADDR1);
cafe_dev_dbg(&cafe->pdev->dev, "Set DMA address to %x (virt %p)\n",
cafe_readl(cafe, NAND_DMA_ADDR0), cafe->dmabuf);
- /* this driver does not need the @ecccalc and @ecccode */
- nbuf->ecccalc = NULL;
- nbuf->ecccode = NULL;
- nbuf->databuf = (uint8_t *)(nbuf + 1);
-
/* Restore the DMA flag */
usedma = old_dma;
@@ -801,10 +779,7 @@ static int cafe_nand_probe(struct pci_dev *pdev,
goto out;
out_free_dma:
- dma_free_coherent(&cafe->pdev->dev,
- 2112 + sizeof(struct nand_buffers) +
- mtd->writesize + mtd->oobsize,
- cafe->dmabuf, cafe->dmaaddr);
+ dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
out_irq:
/* Disable NAND IRQ in global IRQ mask register */
cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
@@ -829,10 +804,7 @@ static void cafe_nand_remove(struct pci_dev *pdev)
nand_release(mtd);
free_rs(cafe->rs);
pci_iounmap(pdev, cafe->mmio);
- dma_free_coherent(&cafe->pdev->dev,
- 2112 + sizeof(struct nand_buffers) +
- mtd->writesize + mtd->oobsize,
- cafe->dmabuf, cafe->dmaaddr);
+ dma_free_coherent(&cafe->pdev->dev, 2112, cafe->dmabuf, cafe->dmaaddr);
kfree(cafe);
}
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 5124f8ae8c04..313c7f50621b 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -330,16 +330,12 @@ static int denali_check_erased_page(struct mtd_info *mtd,
unsigned long uncor_ecc_flags,
unsigned int max_bitflips)
{
- uint8_t *ecc_code = chip->buffers->ecccode;
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ uint8_t *ecc_code = chip->oob_poi + denali->oob_skip_bytes;
int ecc_steps = chip->ecc.steps;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
- int i, ret, stat;
-
- ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
- chip->ecc.total);
- if (ret)
- return ret;
+ int i, stat;
for (i = 0; i < ecc_steps; i++) {
if (!(uncor_ecc_flags & BIT(i)))
@@ -645,8 +641,6 @@ static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
int page, int write)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- unsigned int start_cmd = write ? NAND_CMD_SEQIN : NAND_CMD_READ0;
- unsigned int rnd_cmd = write ? NAND_CMD_RNDIN : NAND_CMD_RNDOUT;
int writesize = mtd->writesize;
int oobsize = mtd->oobsize;
uint8_t *bufpoi = chip->oob_poi;
@@ -658,11 +652,11 @@ static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
int i, pos, len;
/* BBM at the beginning of the OOB area */
- chip->cmdfunc(mtd, start_cmd, writesize, page);
if (write)
- chip->write_buf(mtd, bufpoi, oob_skip);
+ nand_prog_page_begin_op(chip, page, writesize, bufpoi,
+ oob_skip);
else
- chip->read_buf(mtd, bufpoi, oob_skip);
+ nand_read_page_op(chip, page, writesize, bufpoi, oob_skip);
bufpoi += oob_skip;
/* OOB ECC */
@@ -675,30 +669,35 @@ static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
else if (pos + len > writesize)
len = writesize - pos;
- chip->cmdfunc(mtd, rnd_cmd, pos, -1);
if (write)
- chip->write_buf(mtd, bufpoi, len);
+ nand_change_write_column_op(chip, pos, bufpoi, len,
+ false);
else
- chip->read_buf(mtd, bufpoi, len);
+ nand_change_read_column_op(chip, pos, bufpoi, len,
+ false);
bufpoi += len;
if (len < ecc_bytes) {
len = ecc_bytes - len;
- chip->cmdfunc(mtd, rnd_cmd, writesize + oob_skip, -1);
if (write)
- chip->write_buf(mtd, bufpoi, len);
+ nand_change_write_column_op(chip, writesize +
+ oob_skip, bufpoi,
+ len, false);
else
- chip->read_buf(mtd, bufpoi, len);
+ nand_change_read_column_op(chip, writesize +
+ oob_skip, bufpoi,
+ len, false);
bufpoi += len;
}
}
/* OOB free */
len = oobsize - (bufpoi - chip->oob_poi);
- chip->cmdfunc(mtd, rnd_cmd, size - len, -1);
if (write)
- chip->write_buf(mtd, bufpoi, len);
+ nand_change_write_column_op(chip, size - len, bufpoi, len,
+ false);
else
- chip->read_buf(mtd, bufpoi, len);
+ nand_change_read_column_op(chip, size - len, bufpoi, len,
+ false);
}
static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
@@ -710,12 +709,12 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
int ecc_steps = chip->ecc.steps;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
- void *dma_buf = denali->buf;
+ void *tmp_buf = denali->buf;
int oob_skip = denali->oob_skip_bytes;
size_t size = writesize + oobsize;
int ret, i, pos, len;
- ret = denali_data_xfer(denali, dma_buf, size, page, 1, 0);
+ ret = denali_data_xfer(denali, tmp_buf, size, page, 1, 0);
if (ret)
return ret;
@@ -730,11 +729,11 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
else if (pos + len > writesize)
len = writesize - pos;
- memcpy(buf, dma_buf + pos, len);
+ memcpy(buf, tmp_buf + pos, len);
buf += len;
if (len < ecc_size) {
len = ecc_size - len;
- memcpy(buf, dma_buf + writesize + oob_skip,
+ memcpy(buf, tmp_buf + writesize + oob_skip,
len);
buf += len;
}
@@ -745,7 +744,7 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *oob = chip->oob_poi;
/* BBM at the beginning of the OOB area */
- memcpy(oob, dma_buf + writesize, oob_skip);
+ memcpy(oob, tmp_buf + writesize, oob_skip);
oob += oob_skip;
/* OOB ECC */
@@ -758,11 +757,11 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
else if (pos + len > writesize)
len = writesize - pos;
- memcpy(oob, dma_buf + pos, len);
+ memcpy(oob, tmp_buf + pos, len);
oob += len;
if (len < ecc_bytes) {
len = ecc_bytes - len;
- memcpy(oob, dma_buf + writesize + oob_skip,
+ memcpy(oob, tmp_buf + writesize + oob_skip,
len);
oob += len;
}
@@ -770,7 +769,7 @@ static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
/* OOB free */
len = oobsize - (oob - chip->oob_poi);
- memcpy(oob, dma_buf + size - len, len);
+ memcpy(oob, tmp_buf + size - len, len);
}
return 0;
@@ -788,16 +787,12 @@ static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
- int status;
denali_reset_irq(denali);
denali_oob_xfer(mtd, chip, page, 1);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_end_op(chip);
}
static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
@@ -841,7 +836,7 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
int ecc_steps = chip->ecc.steps;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
- void *dma_buf = denali->buf;
+ void *tmp_buf = denali->buf;
int oob_skip = denali->oob_skip_bytes;
size_t size = writesize + oobsize;
int i, pos, len;
@@ -851,7 +846,7 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
* This simplifies the logic.
*/
if (!buf || !oob_required)
- memset(dma_buf, 0xff, size);
+ memset(tmp_buf, 0xff, size);
/* Arrange the buffer for syndrome payload/ecc layout */
if (buf) {
@@ -864,11 +859,11 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
else if (pos + len > writesize)
len = writesize - pos;
- memcpy(dma_buf + pos, buf, len);
+ memcpy(tmp_buf + pos, buf, len);
buf += len;
if (len < ecc_size) {
len = ecc_size - len;
- memcpy(dma_buf + writesize + oob_skip, buf,
+ memcpy(tmp_buf + writesize + oob_skip, buf,
len);
buf += len;
}
@@ -879,7 +874,7 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *oob = chip->oob_poi;
/* BBM at the beginning of the OOB area */
- memcpy(dma_buf + writesize, oob, oob_skip);
+ memcpy(tmp_buf + writesize, oob, oob_skip);
oob += oob_skip;
/* OOB ECC */
@@ -892,11 +887,11 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
else if (pos + len > writesize)
len = writesize - pos;
- memcpy(dma_buf + pos, oob, len);
+ memcpy(tmp_buf + pos, oob, len);
oob += len;
if (len < ecc_bytes) {
len = ecc_bytes - len;
- memcpy(dma_buf + writesize + oob_skip, oob,
+ memcpy(tmp_buf + writesize + oob_skip, oob,
len);
oob += len;
}
@@ -904,10 +899,10 @@ static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
/* OOB free */
len = oobsize - (oob - chip->oob_poi);
- memcpy(dma_buf + size - len, oob, len);
+ memcpy(tmp_buf + size - len, oob, len);
}
- return denali_data_xfer(denali, dma_buf, size, page, 1, 1);
+ return denali_data_xfer(denali, tmp_buf, size, page, 1, 1);
}
static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
@@ -951,7 +946,7 @@ static int denali_erase(struct mtd_info *mtd, int page)
irq_status = denali_wait_for_irq(denali,
INTR__ERASE_COMP | INTR__ERASE_FAIL);
- return irq_status & INTR__ERASE_COMP ? 0 : NAND_STATUS_FAIL;
+ return irq_status & INTR__ERASE_COMP ? 0 : -EIO;
}
static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
@@ -1359,7 +1354,6 @@ int denali_init(struct denali_nand_info *denali)
chip->read_buf = denali_read_buf;
chip->write_buf = denali_write_buf;
}
- chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS;
chip->ecc.read_page = denali_read_page;
chip->ecc.read_page_raw = denali_read_page_raw;
chip->ecc.write_page = denali_write_page;
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index 2911066dacac..9ad33d237378 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -329,7 +329,7 @@ struct denali_nand_info {
#define DENALI_CAP_DMA_64BIT BIT(1)
int denali_calc_ecc_bytes(int step_size, int strength);
-extern int denali_init(struct denali_nand_info *denali);
-extern void denali_remove(struct denali_nand_info *denali);
+int denali_init(struct denali_nand_info *denali);
+void denali_remove(struct denali_nand_info *denali);
#endif /* __DENALI_H__ */
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
index 57fb7ae31412..49cb3e1f8bd0 100644
--- a/drivers/mtd/nand/denali_pci.c
+++ b/drivers/mtd/nand/denali_pci.c
@@ -125,3 +125,7 @@ static struct pci_driver denali_pci_driver = {
.remove = denali_pci_remove,
};
module_pci_driver(denali_pci_driver);
+
+MODULE_DESCRIPTION("PCI driver for Denali NAND controller");
+MODULE_AUTHOR("Intel Corporation and its suppliers");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 72671dc52e2e..6bc93ea66f50 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -448,7 +448,7 @@ static int doc200x_wait(struct mtd_info *mtd, struct nand_chip *this)
int status;
DoC_WaitReady(doc);
- this->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ nand_status_op(this, NULL);
DoC_WaitReady(doc);
status = (int)this->read_byte(mtd);
@@ -595,7 +595,7 @@ static void doc2001plus_select_chip(struct mtd_info *mtd, int chip)
/* Assert ChipEnable and deassert WriteProtect */
WriteDOC((DOC_FLASH_CE), docptr, Mplus_FlashSelect);
- this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ nand_reset_op(this);
doc->curchip = chip;
doc->curfloor = floor;
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index 2436cbc71662..72f1327c4430 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -785,6 +785,8 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
dev_dbg(doc->dev, "%s: page %08x\n", __func__, page);
+ nand_read_page_op(nand, page, 0, NULL, 0);
+
writew(DOC_ECCCONF0_READ_MODE |
DOC_ECCCONF0_ECC_ENABLE |
DOC_ECCCONF0_UNKNOWN |
@@ -864,7 +866,7 @@ static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
dev_dbg(doc->dev, "%s: page %x\n", __func__, page);
- docg4_command(mtd, NAND_CMD_READ0, nand->ecc.size, page);
+ nand_read_page_op(nand, page, nand->ecc.size, NULL, 0);
writew(DOC_ECCCONF0_READ_MODE | DOCG4_OOB_SIZE, docptr + DOC_ECCCONF0);
write_nop(docptr);
@@ -900,6 +902,7 @@ static int docg4_erase_block(struct mtd_info *mtd, int page)
struct docg4_priv *doc = nand_get_controller_data(nand);
void __iomem *docptr = doc->virtadr;
uint16_t g4_page;
+ int status;
dev_dbg(doc->dev, "%s: page %04x\n", __func__, page);
@@ -939,11 +942,15 @@ static int docg4_erase_block(struct mtd_info *mtd, int page)
poll_status(doc);
write_nop(docptr);
- return nand->waitfunc(mtd, nand);
+ status = nand->waitfunc(mtd, nand);
+ if (status < 0)
+ return status;
+
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
}
static int write_page(struct mtd_info *mtd, struct nand_chip *nand,
- const uint8_t *buf, bool use_ecc)
+ const uint8_t *buf, int page, bool use_ecc)
{
struct docg4_priv *doc = nand_get_controller_data(nand);
void __iomem *docptr = doc->virtadr;
@@ -951,6 +958,8 @@ static int write_page(struct mtd_info *mtd, struct nand_chip *nand,
dev_dbg(doc->dev, "%s...\n", __func__);
+ nand_prog_page_begin_op(nand, page, 0, NULL, 0);
+
writew(DOC_ECCCONF0_ECC_ENABLE |
DOC_ECCCONF0_UNKNOWN |
DOCG4_BCH_SIZE,
@@ -995,19 +1004,19 @@ static int write_page(struct mtd_info *mtd, struct nand_chip *nand,
writew(0, docptr + DOC_DATAEND);
write_nop(docptr);
- return 0;
+ return nand_prog_page_end_op(nand);
}
static int docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
const uint8_t *buf, int oob_required, int page)
{
- return write_page(mtd, nand, buf, false);
+ return write_page(mtd, nand, buf, page, false);
}
static int docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
const uint8_t *buf, int oob_required, int page)
{
- return write_page(mtd, nand, buf, true);
+ return write_page(mtd, nand, buf, page, true);
}
static int docg4_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 17db2f90aa2c..8b6dcd739ecb 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -713,7 +713,7 @@ static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
struct fsl_lbc_ctrl *ctrl = priv->ctrl;
struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
- fsl_elbc_read_buf(mtd, buf, mtd->writesize);
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -729,10 +729,10 @@ static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
static int fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required, int page)
{
- fsl_elbc_write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
/* ECC will be calculated automatically, and errors will be detected in
@@ -742,10 +742,10 @@ static int fsl_elbc_write_subpage(struct mtd_info *mtd, struct nand_chip *chip,
uint32_t offset, uint32_t data_len,
const uint8_t *buf, int oob_required, int page)
{
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
fsl_elbc_write_buf(mtd, buf, mtd->writesize);
fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
-
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 9e03bac7f34c..4872a7ba6503 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -688,7 +688,7 @@ static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
- fsl_ifc_read_buf(mtd, buf, mtd->writesize);
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -711,10 +711,10 @@ static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
static int fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required, int page)
{
- fsl_ifc_write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
@@ -916,6 +916,13 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
if (ctrl->version >= FSL_IFC_VERSION_1_1_0)
fsl_ifc_sram_init(priv);
+ /*
+ * As IFC version 2.0.0 has 16KB of internal SRAM as compared to older
+ * versions which had 8KB. Hence bufnum mask needs to be updated.
+ */
+ if (ctrl->version >= FSL_IFC_VERSION_2_0_0)
+ priv->bufnum_mask = (priv->bufnum_mask * 2) + 1;
+
return 0;
}
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index eac15d9bf49e..f49ed46fa770 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -684,8 +684,8 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- uint8_t *ecc_code = chip->buffers->ecccode;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
int off, len, group = 0;
/*
* ecc_oob is intentionally taken as uint16_t. In 16bit devices, we
@@ -697,7 +697,7 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
unsigned int max_bitflips = 0;
for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
- chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page);
+ nand_read_page_op(chip, page, s * eccsize, NULL, 0);
chip->ecc.hwctl(mtd, NAND_ECC_READ);
chip->read_buf(mtd, p, eccsize);
@@ -720,8 +720,7 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
if (chip->options & NAND_BUSWIDTH_16)
len = roundup(len, 2);
- chip->cmdfunc(mtd, NAND_CMD_READOOB, off, page);
- chip->read_buf(mtd, oob + j, len);
+ nand_read_oob_op(chip, page, off, oob + j, len);
j += len;
}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index d4d824ef64e9..61fdd733492f 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -1029,11 +1029,13 @@ static void block_mark_swapping(struct gpmi_nand_data *this,
p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
}
-static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+static int gpmi_ecc_read_page_data(struct nand_chip *chip,
+ uint8_t *buf, int oob_required,
+ int page)
{
struct gpmi_nand_data *this = nand_get_controller_data(chip);
struct bch_geometry *nfc_geo = &this->bch_geometry;
+ struct mtd_info *mtd = nand_to_mtd(chip);
void *payload_virt;
dma_addr_t payload_phys;
void *auxiliary_virt;
@@ -1094,8 +1096,8 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
eccbytes = DIV_ROUND_UP(offset + eccbits, 8);
offset /= 8;
eccbytes -= offset;
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
- chip->read_buf(mtd, eccbuf, eccbytes);
+ nand_change_read_column_op(chip, offset, eccbuf,
+ eccbytes, false);
/*
* ECC data are not byte aligned and we may have
@@ -1176,6 +1178,14 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
return max_bitflips;
}
+static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
+ return gpmi_ecc_read_page_data(chip, buf, oob_required, page);
+}
+
/* Fake a virtual small page for the subpage read */
static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
uint32_t offs, uint32_t len, uint8_t *buf, int page)
@@ -1220,12 +1230,12 @@ static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
meta = geo->metadata_size;
if (first) {
col = meta + (size + ecc_parity_size) * first;
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, col, -1);
-
meta = 0;
buf = buf + first * size;
}
+ nand_read_page_op(chip, page, col, NULL, 0);
+
/* Save the old environment */
r1_old = r1_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT0);
r2_old = r2_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT1);
@@ -1254,7 +1264,7 @@ static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
/* Read the subpage now */
this->swap_block_mark = false;
- max_bitflips = gpmi_ecc_read_page(mtd, chip, buf, 0, page);
+ max_bitflips = gpmi_ecc_read_page_data(chip, buf, 0, page);
/* Restore */
writel(r1_old, bch_regs + HW_BCH_FLASH0LAYOUT0);
@@ -1277,6 +1287,9 @@ static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
int ret;
dev_dbg(this->dev, "ecc write page.\n");
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
if (this->swap_block_mark) {
/*
* If control arrives here, we're doing block mark swapping.
@@ -1338,7 +1351,10 @@ exit_auxiliary:
payload_virt, payload_phys);
}
- return 0;
+ if (ret)
+ return ret;
+
+ return nand_prog_page_end_op(chip);
}
/*
@@ -1411,7 +1427,7 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
memset(chip->oob_poi, ~0, mtd->oobsize);
/* Read out the conventional OOB. */
- chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
+ nand_read_page_op(chip, page, mtd->writesize, NULL, 0);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
/*
@@ -1421,7 +1437,7 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
*/
if (GPMI_IS_MX23(this)) {
/* Read the block mark into the first byte of the OOB buffer. */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
chip->oob_poi[0] = chip->read_byte(mtd);
}
@@ -1432,7 +1448,6 @@ static int
gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
{
struct mtd_oob_region of = { };
- int status = 0;
/* Do we have available oob area? */
mtd_ooblayout_free(mtd, 0, &of);
@@ -1442,12 +1457,8 @@ gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
if (!nand_is_slc(chip))
return -EPERM;
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize + of.offset, page);
- chip->write_buf(mtd, chip->oob_poi + of.offset, of.length);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_op(chip, page, mtd->writesize + of.offset,
+ chip->oob_poi + of.offset, of.length);
}
/*
@@ -1477,8 +1488,8 @@ static int gpmi_ecc_read_page_raw(struct mtd_info *mtd,
uint8_t *oob = chip->oob_poi;
int step;
- chip->read_buf(mtd, tmp_buf,
- mtd->writesize + mtd->oobsize);
+ nand_read_page_op(chip, page, 0, tmp_buf,
+ mtd->writesize + mtd->oobsize);
/*
* If required, swap the bad block marker and the data stored in the
@@ -1487,12 +1498,8 @@ static int gpmi_ecc_read_page_raw(struct mtd_info *mtd,
* See the layout description for a detailed explanation on why this
* is needed.
*/
- if (this->swap_block_mark) {
- u8 swap = tmp_buf[0];
-
- tmp_buf[0] = tmp_buf[mtd->writesize];
- tmp_buf[mtd->writesize] = swap;
- }
+ if (this->swap_block_mark)
+ swap(tmp_buf[0], tmp_buf[mtd->writesize]);
/*
* Copy the metadata section into the oob buffer (this section is
@@ -1615,31 +1622,22 @@ static int gpmi_ecc_write_page_raw(struct mtd_info *mtd,
* See the layout description for a detailed explanation on why this
* is needed.
*/
- if (this->swap_block_mark) {
- u8 swap = tmp_buf[0];
-
- tmp_buf[0] = tmp_buf[mtd->writesize];
- tmp_buf[mtd->writesize] = swap;
- }
+ if (this->swap_block_mark)
+ swap(tmp_buf[0], tmp_buf[mtd->writesize]);
- chip->write_buf(mtd, tmp_buf, mtd->writesize + mtd->oobsize);
-
- return 0;
+ return nand_prog_page_op(chip, page, 0, tmp_buf,
+ mtd->writesize + mtd->oobsize);
}
static int gpmi_ecc_read_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
return gpmi_ecc_read_page_raw(mtd, chip, NULL, 1, page);
}
static int gpmi_ecc_write_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
-
return gpmi_ecc_write_page_raw(mtd, chip, NULL, 1, page);
}
@@ -1649,7 +1647,7 @@ static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
struct gpmi_nand_data *this = nand_get_controller_data(chip);
int ret = 0;
uint8_t *block_mark;
- int column, page, status, chipnr;
+ int column, page, chipnr;
chipnr = (int)(ofs >> chip->chip_shift);
chip->select_chip(mtd, chipnr);
@@ -1663,13 +1661,7 @@ static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
/* Shift to get page */
page = (int)(ofs >> chip->page_shift);
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, column, page);
- chip->write_buf(mtd, block_mark, 1);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
- ret = -EIO;
+ ret = nand_prog_page_op(chip, page, column, block_mark, 1);
chip->select_chip(mtd, -1);
@@ -1712,7 +1704,7 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
unsigned int search_area_size_in_strides;
unsigned int stride;
unsigned int page;
- uint8_t *buffer = chip->buffers->databuf;
+ uint8_t *buffer = chip->data_buf;
int saved_chip_number;
int found_an_ncb_fingerprint = false;
@@ -1737,7 +1729,7 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
* Read the NCB fingerprint. The fingerprint is four bytes long
* and starts in the 12th byte of the page.
*/
- chip->cmdfunc(mtd, NAND_CMD_READ0, 12, page);
+ nand_read_page_op(chip, page, 12, NULL, 0);
chip->read_buf(mtd, buffer, strlen(fingerprint));
/* Look for the fingerprint. */
@@ -1771,7 +1763,7 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
unsigned int block;
unsigned int stride;
unsigned int page;
- uint8_t *buffer = chip->buffers->databuf;
+ uint8_t *buffer = chip->data_buf;
int saved_chip_number;
int status;
@@ -1797,17 +1789,10 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
dev_dbg(dev, "Erasing the search area...\n");
for (block = 0; block < search_area_size_in_blocks; block++) {
- /* Compute the page address. */
- page = block * block_size_in_pages;
-
/* Erase this block. */
dev_dbg(dev, "\tErasing block 0x%x\n", block);
- chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
- chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
-
- /* Wait for the erase to finish. */
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
+ status = nand_erase_op(chip, block);
+ if (status)
dev_err(dev, "[%s] Erase failed.\n", __func__);
}
@@ -1823,13 +1808,9 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
/* Write the first page of the current stride. */
dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
- chip->ecc.write_page_raw(mtd, chip, buffer, 0, page);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- /* Wait for the write to finish. */
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
+ status = chip->ecc.write_page_raw(mtd, chip, buffer, 0, page);
+ if (status)
dev_err(dev, "[%s] Write failed.\n", __func__);
}
@@ -1884,7 +1865,7 @@ static int mx23_boot_init(struct gpmi_nand_data *this)
/* Send the command to read the conventional block mark. */
chip->select_chip(mtd, chipnr);
- chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
+ nand_read_page_op(chip, page, mtd->writesize, NULL, 0);
block_mark = chip->read_byte(mtd);
chip->select_chip(mtd, -1);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index a45e4ce13d10..06c1f993912c 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -268,31 +268,31 @@ struct timing_threshold {
};
/* Common Services */
-extern int common_nfc_set_geometry(struct gpmi_nand_data *);
-extern struct dma_chan *get_dma_chan(struct gpmi_nand_data *);
-extern void prepare_data_dma(struct gpmi_nand_data *,
- enum dma_data_direction dr);
-extern int start_dma_without_bch_irq(struct gpmi_nand_data *,
- struct dma_async_tx_descriptor *);
-extern int start_dma_with_bch_irq(struct gpmi_nand_data *,
- struct dma_async_tx_descriptor *);
+int common_nfc_set_geometry(struct gpmi_nand_data *);
+struct dma_chan *get_dma_chan(struct gpmi_nand_data *);
+void prepare_data_dma(struct gpmi_nand_data *,
+ enum dma_data_direction dr);
+int start_dma_without_bch_irq(struct gpmi_nand_data *,
+ struct dma_async_tx_descriptor *);
+int start_dma_with_bch_irq(struct gpmi_nand_data *,
+ struct dma_async_tx_descriptor *);
/* GPMI-NAND helper function library */
-extern int gpmi_init(struct gpmi_nand_data *);
-extern int gpmi_extra_init(struct gpmi_nand_data *);
-extern void gpmi_clear_bch(struct gpmi_nand_data *);
-extern void gpmi_dump_info(struct gpmi_nand_data *);
-extern int bch_set_geometry(struct gpmi_nand_data *);
-extern int gpmi_is_ready(struct gpmi_nand_data *, unsigned chip);
-extern int gpmi_send_command(struct gpmi_nand_data *);
-extern void gpmi_begin(struct gpmi_nand_data *);
-extern void gpmi_end(struct gpmi_nand_data *);
-extern int gpmi_read_data(struct gpmi_nand_data *);
-extern int gpmi_send_data(struct gpmi_nand_data *);
-extern int gpmi_send_page(struct gpmi_nand_data *,
- dma_addr_t payload, dma_addr_t auxiliary);
-extern int gpmi_read_page(struct gpmi_nand_data *,
- dma_addr_t payload, dma_addr_t auxiliary);
+int gpmi_init(struct gpmi_nand_data *);
+int gpmi_extra_init(struct gpmi_nand_data *);
+void gpmi_clear_bch(struct gpmi_nand_data *);
+void gpmi_dump_info(struct gpmi_nand_data *);
+int bch_set_geometry(struct gpmi_nand_data *);
+int gpmi_is_ready(struct gpmi_nand_data *, unsigned chip);
+int gpmi_send_command(struct gpmi_nand_data *);
+void gpmi_begin(struct gpmi_nand_data *);
+void gpmi_end(struct gpmi_nand_data *);
+int gpmi_read_data(struct gpmi_nand_data *);
+int gpmi_send_data(struct gpmi_nand_data *);
+int gpmi_send_page(struct gpmi_nand_data *,
+ dma_addr_t payload, dma_addr_t auxiliary);
+int gpmi_read_page(struct gpmi_nand_data *,
+ dma_addr_t payload, dma_addr_t auxiliary);
void gpmi_copy_bits(u8 *dst, size_t dst_bit_off,
const u8 *src, size_t src_bit_off,
diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c
index 0897261c3e17..cb862793ab6d 100644
--- a/drivers/mtd/nand/hisi504_nand.c
+++ b/drivers/mtd/nand/hisi504_nand.c
@@ -544,7 +544,7 @@ static int hisi_nand_read_page_hwecc(struct mtd_info *mtd,
int max_bitflips = 0, stat = 0, stat_max = 0, status_ecc;
int stat_1, stat_2;
- chip->read_buf(mtd, buf, mtd->writesize);
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
/* errors which can not be corrected by ECC */
@@ -574,8 +574,7 @@ static int hisi_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
{
struct hinfc_host *host = nand_get_controller_data(chip);
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
if (host->irq_status & HINFC504_INTS_UE) {
host->irq_status = 0;
@@ -590,11 +589,11 @@ static int hisi_nand_write_page_hwecc(struct mtd_info *mtd,
struct nand_chip *chip, const uint8_t *buf, int oob_required,
int page)
{
- chip->write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static void hisi_nfc_host_init(struct hinfc_host *host)
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index ad827d4af3e9..613b00a9604b 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -313,6 +313,7 @@ static int jz_nand_detect_bank(struct platform_device *pdev,
uint32_t ctrl;
struct nand_chip *chip = &nand->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 id[2];
/* Request I/O resource. */
sprintf(res_name, "bank%d", bank);
@@ -335,17 +336,16 @@ static int jz_nand_detect_bank(struct platform_device *pdev,
/* Retrieve the IDs from the first chip. */
chip->select_chip(mtd, 0);
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
- *nand_maf_id = chip->read_byte(mtd);
- *nand_dev_id = chip->read_byte(mtd);
+ nand_reset_op(chip);
+ nand_readid_op(chip, 0, id, sizeof(id));
+ *nand_maf_id = id[0];
+ *nand_dev_id = id[1];
} else {
/* Detect additional chip. */
chip->select_chip(mtd, chipnr);
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
- if (*nand_maf_id != chip->read_byte(mtd)
- || *nand_dev_id != chip->read_byte(mtd)) {
+ nand_reset_op(chip);
+ nand_readid_op(chip, 0, id, sizeof(id));
+ if (*nand_maf_id != id[0] || *nand_dev_id != id[1]) {
ret = -ENODEV;
goto notfound_id;
}
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index 5796468db653..e357948a7505 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -461,7 +461,7 @@ static int lpc32xx_read_page(struct mtd_info *mtd, struct nand_chip *chip,
}
/* Writing Command and Address */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
/* For all sub-pages */
for (i = 0; i < host->mlcsubpages; i++) {
@@ -522,6 +522,8 @@ static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
memcpy(dma_buf, buf, mtd->writesize);
}
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
for (i = 0; i < host->mlcsubpages; i++) {
/* Start Encode */
writeb(0x00, MLC_ECC_ENC_REG(host->io_base));
@@ -550,7 +552,8 @@ static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
/* Wait for Controller Ready */
lpc32xx_waitfunc_controller(mtd, chip);
}
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
index b61f28a1554d..5f7cc6da0a7f 100644
--- a/drivers/mtd/nand/lpc32xx_slc.c
+++ b/drivers/mtd/nand/lpc32xx_slc.c
@@ -399,10 +399,7 @@ static void lpc32xx_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int
static int lpc32xx_nand_read_oob_syndrome(struct mtd_info *mtd,
struct nand_chip *chip, int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-
- return 0;
+ return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
}
/*
@@ -411,17 +408,8 @@ static int lpc32xx_nand_read_oob_syndrome(struct mtd_info *mtd,
static int lpc32xx_nand_write_oob_syndrome(struct mtd_info *mtd,
struct nand_chip *chip, int page)
{
- int status;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
-
- /* Send command to program the OOB data */
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
+ mtd->oobsize);
}
/*
@@ -632,7 +620,7 @@ static int lpc32xx_nand_read_page_syndrome(struct mtd_info *mtd,
uint8_t *oobecc, tmpecc[LPC32XX_ECC_SAVE_SIZE];
/* Issue read command */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
/* Read data and oob, calculate ECC */
status = lpc32xx_xfer(mtd, buf, chip->ecc.steps, 1);
@@ -675,7 +663,7 @@ static int lpc32xx_nand_read_page_raw_syndrome(struct mtd_info *mtd,
int page)
{
/* Issue read command */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
/* Raw reads can just use the FIFO interface */
chip->read_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
@@ -698,6 +686,8 @@ static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd,
uint8_t *pb;
int error;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
/* Write data, calculate ECC on outbound data */
error = lpc32xx_xfer(mtd, (uint8_t *)buf, chip->ecc.steps, 0);
if (error)
@@ -716,7 +706,8 @@ static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd,
/* Write ECC data to device */
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
/*
@@ -729,9 +720,11 @@ static int lpc32xx_nand_write_page_raw_syndrome(struct mtd_info *mtd,
int oob_required, int page)
{
/* Raw writes can just use the FIFO interface */
- chip->write_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
+ nand_prog_page_begin_op(chip, page, 0, buf,
+ chip->ecc.size * chip->ecc.steps);
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host *host)
diff --git a/drivers/mtd/nand/marvell_nand.c b/drivers/mtd/nand/marvell_nand.c
new file mode 100644
index 000000000000..2196f2a233d6
--- /dev/null
+++ b/drivers/mtd/nand/marvell_nand.c
@@ -0,0 +1,2896 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Marvell NAND flash controller driver
+ *
+ * Copyright (C) 2017 Marvell
+ * Author: Miquel RAYNAL <miquel.raynal@free-electrons.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of_platform.h>
+#include <linux/iopoll.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <asm/unaligned.h>
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/pxa-dma.h>
+#include <linux/platform_data/mtd-nand-pxa3xx.h>
+
+/* Data FIFO granularity, FIFO reads/writes must be a multiple of this length */
+#define FIFO_DEPTH 8
+#define FIFO_REP(x) (x / sizeof(u32))
+#define BCH_SEQ_READS (32 / FIFO_DEPTH)
+/* NFC does not support transfers of larger chunks at a time */
+#define MAX_CHUNK_SIZE 2112
+/* NFCv1 cannot read more that 7 bytes of ID */
+#define NFCV1_READID_LEN 7
+/* Polling is done at a pace of POLL_PERIOD us until POLL_TIMEOUT is reached */
+#define POLL_PERIOD 0
+#define POLL_TIMEOUT 100000
+/* Interrupt maximum wait period in ms */
+#define IRQ_TIMEOUT 1000
+/* Latency in clock cycles between SoC pins and NFC logic */
+#define MIN_RD_DEL_CNT 3
+/* Maximum number of contiguous address cycles */
+#define MAX_ADDRESS_CYC_NFCV1 5
+#define MAX_ADDRESS_CYC_NFCV2 7
+/* System control registers/bits to enable the NAND controller on some SoCs */
+#define GENCONF_SOC_DEVICE_MUX 0x208
+#define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0)
+#define GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST BIT(20)
+#define GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST BIT(21)
+#define GENCONF_SOC_DEVICE_MUX_NFC_INT_EN BIT(25)
+#define GENCONF_CLK_GATING_CTRL 0x220
+#define GENCONF_CLK_GATING_CTRL_ND_GATE BIT(2)
+#define GENCONF_ND_CLK_CTRL 0x700
+#define GENCONF_ND_CLK_CTRL_EN BIT(0)
+
+/* NAND controller data flash control register */
+#define NDCR 0x00
+#define NDCR_ALL_INT GENMASK(11, 0)
+#define NDCR_CS1_CMDDM BIT(7)
+#define NDCR_CS0_CMDDM BIT(8)
+#define NDCR_RDYM BIT(11)
+#define NDCR_ND_ARB_EN BIT(12)
+#define NDCR_RA_START BIT(15)
+#define NDCR_RD_ID_CNT(x) (min_t(unsigned int, x, 0x7) << 16)
+#define NDCR_PAGE_SZ(x) (x >= 2048 ? BIT(24) : 0)
+#define NDCR_DWIDTH_M BIT(26)
+#define NDCR_DWIDTH_C BIT(27)
+#define NDCR_ND_RUN BIT(28)
+#define NDCR_DMA_EN BIT(29)
+#define NDCR_ECC_EN BIT(30)
+#define NDCR_SPARE_EN BIT(31)
+#define NDCR_GENERIC_FIELDS_MASK (~(NDCR_RA_START | NDCR_PAGE_SZ(2048) | \
+ NDCR_DWIDTH_M | NDCR_DWIDTH_C))
+
+/* NAND interface timing parameter 0 register */
+#define NDTR0 0x04
+#define NDTR0_TRP(x) ((min_t(unsigned int, x, 0xF) & 0x7) << 0)
+#define NDTR0_TRH(x) (min_t(unsigned int, x, 0x7) << 3)
+#define NDTR0_ETRP(x) ((min_t(unsigned int, x, 0xF) & 0x8) << 3)
+#define NDTR0_SEL_NRE_EDGE BIT(7)
+#define NDTR0_TWP(x) (min_t(unsigned int, x, 0x7) << 8)
+#define NDTR0_TWH(x) (min_t(unsigned int, x, 0x7) << 11)
+#define NDTR0_TCS(x) (min_t(unsigned int, x, 0x7) << 16)
+#define NDTR0_TCH(x) (min_t(unsigned int, x, 0x7) << 19)
+#define NDTR0_RD_CNT_DEL(x) (min_t(unsigned int, x, 0xF) << 22)
+#define NDTR0_SELCNTR BIT(26)
+#define NDTR0_TADL(x) (min_t(unsigned int, x, 0x1F) << 27)
+
+/* NAND interface timing parameter 1 register */
+#define NDTR1 0x0C
+#define NDTR1_TAR(x) (min_t(unsigned int, x, 0xF) << 0)
+#define NDTR1_TWHR(x) (min_t(unsigned int, x, 0xF) << 4)
+#define NDTR1_TRHW(x) (min_t(unsigned int, x / 16, 0x3) << 8)
+#define NDTR1_PRESCALE BIT(14)
+#define NDTR1_WAIT_MODE BIT(15)
+#define NDTR1_TR(x) (min_t(unsigned int, x, 0xFFFF) << 16)
+
+/* NAND controller status register */
+#define NDSR 0x14
+#define NDSR_WRCMDREQ BIT(0)
+#define NDSR_RDDREQ BIT(1)
+#define NDSR_WRDREQ BIT(2)
+#define NDSR_CORERR BIT(3)
+#define NDSR_UNCERR BIT(4)
+#define NDSR_CMDD(cs) BIT(8 - cs)
+#define NDSR_RDY(rb) BIT(11 + rb)
+#define NDSR_ERRCNT(x) ((x >> 16) & 0x1F)
+
+/* NAND ECC control register */
+#define NDECCCTRL 0x28
+#define NDECCCTRL_BCH_EN BIT(0)
+
+/* NAND controller data buffer register */
+#define NDDB 0x40
+
+/* NAND controller command buffer 0 register */
+#define NDCB0 0x48
+#define NDCB0_CMD1(x) ((x & 0xFF) << 0)
+#define NDCB0_CMD2(x) ((x & 0xFF) << 8)
+#define NDCB0_ADDR_CYC(x) ((x & 0x7) << 16)
+#define NDCB0_ADDR_GET_NUM_CYC(x) (((x) >> 16) & 0x7)
+#define NDCB0_DBC BIT(19)
+#define NDCB0_CMD_TYPE(x) ((x & 0x7) << 21)
+#define NDCB0_CSEL BIT(24)
+#define NDCB0_RDY_BYP BIT(27)
+#define NDCB0_LEN_OVRD BIT(28)
+#define NDCB0_CMD_XTYPE(x) ((x & 0x7) << 29)
+
+/* NAND controller command buffer 1 register */
+#define NDCB1 0x4C
+#define NDCB1_COLS(x) ((x & 0xFFFF) << 0)
+#define NDCB1_ADDRS_PAGE(x) (x << 16)
+
+/* NAND controller command buffer 2 register */
+#define NDCB2 0x50
+#define NDCB2_ADDR5_PAGE(x) (((x >> 16) & 0xFF) << 0)
+#define NDCB2_ADDR5_CYC(x) ((x & 0xFF) << 0)
+
+/* NAND controller command buffer 3 register */
+#define NDCB3 0x54
+#define NDCB3_ADDR6_CYC(x) ((x & 0xFF) << 16)
+#define NDCB3_ADDR7_CYC(x) ((x & 0xFF) << 24)
+
+/* NAND controller command buffer 0 register 'type' and 'xtype' fields */
+#define TYPE_READ 0
+#define TYPE_WRITE 1
+#define TYPE_ERASE 2
+#define TYPE_READ_ID 3
+#define TYPE_STATUS 4
+#define TYPE_RESET 5
+#define TYPE_NAKED_CMD 6
+#define TYPE_NAKED_ADDR 7
+#define TYPE_MASK 7
+#define XTYPE_MONOLITHIC_RW 0
+#define XTYPE_LAST_NAKED_RW 1
+#define XTYPE_FINAL_COMMAND 3
+#define XTYPE_READ 4
+#define XTYPE_WRITE_DISPATCH 4
+#define XTYPE_NAKED_RW 5
+#define XTYPE_COMMAND_DISPATCH 6
+#define XTYPE_MASK 7
+
+/**
+ * Marvell ECC engine works differently than the others, in order to limit the
+ * size of the IP, hardware engineers chose to set a fixed strength at 16 bits
+ * per subpage, and depending on a the desired strength needed by the NAND chip,
+ * a particular layout mixing data/spare/ecc is defined, with a possible last
+ * chunk smaller that the others.
+ *
+ * @writesize: Full page size on which the layout applies
+ * @chunk: Desired ECC chunk size on which the layout applies
+ * @strength: Desired ECC strength (per chunk size bytes) on which the
+ * layout applies
+ * @nchunks: Total number of chunks
+ * @full_chunk_cnt: Number of full-sized chunks, which is the number of
+ * repetitions of the pattern:
+ * (data_bytes + spare_bytes + ecc_bytes).
+ * @data_bytes: Number of data bytes per chunk
+ * @spare_bytes: Number of spare bytes per chunk
+ * @ecc_bytes: Number of ecc bytes per chunk
+ * @last_data_bytes: Number of data bytes in the last chunk
+ * @last_spare_bytes: Number of spare bytes in the last chunk
+ * @last_ecc_bytes: Number of ecc bytes in the last chunk
+ */
+struct marvell_hw_ecc_layout {
+ /* Constraints */
+ int writesize;
+ int chunk;
+ int strength;
+ /* Corresponding layout */
+ int nchunks;
+ int full_chunk_cnt;
+ int data_bytes;
+ int spare_bytes;
+ int ecc_bytes;
+ int last_data_bytes;
+ int last_spare_bytes;
+ int last_ecc_bytes;
+};
+
+#define MARVELL_LAYOUT(ws, dc, ds, nc, fcc, db, sb, eb, ldb, lsb, leb) \
+ { \
+ .writesize = ws, \
+ .chunk = dc, \
+ .strength = ds, \
+ .nchunks = nc, \
+ .full_chunk_cnt = fcc, \
+ .data_bytes = db, \
+ .spare_bytes = sb, \
+ .ecc_bytes = eb, \
+ .last_data_bytes = ldb, \
+ .last_spare_bytes = lsb, \
+ .last_ecc_bytes = leb, \
+ }
+
+/* Layouts explained in AN-379_Marvell_SoC_NFC_ECC */
+static const struct marvell_hw_ecc_layout marvell_nfc_layouts[] = {
+ MARVELL_LAYOUT( 512, 512, 1, 1, 1, 512, 8, 8, 0, 0, 0),
+ MARVELL_LAYOUT( 2048, 512, 1, 1, 1, 2048, 40, 24, 0, 0, 0),
+ MARVELL_LAYOUT( 2048, 512, 4, 1, 1, 2048, 32, 30, 0, 0, 0),
+ MARVELL_LAYOUT( 4096, 512, 4, 2, 2, 2048, 32, 30, 0, 0, 0),
+ MARVELL_LAYOUT( 4096, 512, 8, 5, 4, 1024, 0, 30, 0, 64, 30),
+};
+
+/**
+ * The Nand Flash Controller has up to 4 CE and 2 RB pins. The CE selection
+ * is made by a field in NDCB0 register, and in another field in NDCB2 register.
+ * The datasheet describes the logic with an error: ADDR5 field is once
+ * declared at the beginning of NDCB2, and another time at its end. Because the
+ * ADDR5 field of NDCB2 may be used by other bytes, it would be more logical
+ * to use the last bit of this field instead of the first ones.
+ *
+ * @cs: Wanted CE lane.
+ * @ndcb0_csel: Value of the NDCB0 register with or without the flag
+ * selecting the wanted CE lane. This is set once when
+ * the Device Tree is probed.
+ * @rb: Ready/Busy pin for the flash chip
+ */
+struct marvell_nand_chip_sel {
+ unsigned int cs;
+ u32 ndcb0_csel;
+ unsigned int rb;
+};
+
+/**
+ * NAND chip structure: stores NAND chip device related information
+ *
+ * @chip: Base NAND chip structure
+ * @node: Used to store NAND chips into a list
+ * @layout NAND layout when using hardware ECC
+ * @ndcr: Controller register value for this NAND chip
+ * @ndtr0: Timing registers 0 value for this NAND chip
+ * @ndtr1: Timing registers 1 value for this NAND chip
+ * @selected_die: Current active CS
+ * @nsels: Number of CS lines required by the NAND chip
+ * @sels: Array of CS lines descriptions
+ */
+struct marvell_nand_chip {
+ struct nand_chip chip;
+ struct list_head node;
+ const struct marvell_hw_ecc_layout *layout;
+ u32 ndcr;
+ u32 ndtr0;
+ u32 ndtr1;
+ int addr_cyc;
+ int selected_die;
+ unsigned int nsels;
+ struct marvell_nand_chip_sel sels[0];
+};
+
+static inline struct marvell_nand_chip *to_marvell_nand(struct nand_chip *chip)
+{
+ return container_of(chip, struct marvell_nand_chip, chip);
+}
+
+static inline struct marvell_nand_chip_sel *to_nand_sel(struct marvell_nand_chip
+ *nand)
+{
+ return &nand->sels[nand->selected_die];
+}
+
+/**
+ * NAND controller capabilities for distinction between compatible strings
+ *
+ * @max_cs_nb: Number of Chip Select lines available
+ * @max_rb_nb: Number of Ready/Busy lines available
+ * @need_system_controller: Indicates if the SoC needs to have access to the
+ * system controller (ie. to enable the NAND controller)
+ * @legacy_of_bindings: Indicates if DT parsing must be done using the old
+ * fashion way
+ * @is_nfcv2: NFCv2 has numerous enhancements compared to NFCv1, ie.
+ * BCH error detection and correction algorithm,
+ * NDCB3 register has been added
+ * @use_dma: Use dma for data transfers
+ */
+struct marvell_nfc_caps {
+ unsigned int max_cs_nb;
+ unsigned int max_rb_nb;
+ bool need_system_controller;
+ bool legacy_of_bindings;
+ bool is_nfcv2;
+ bool use_dma;
+};
+
+/**
+ * NAND controller structure: stores Marvell NAND controller information
+ *
+ * @controller: Base controller structure
+ * @dev: Parent device (used to print error messages)
+ * @regs: NAND controller registers
+ * @ecc_clk: ECC block clock, two times the NAND controller clock
+ * @complete: Completion object to wait for NAND controller events
+ * @assigned_cs: Bitmask describing already assigned CS lines
+ * @chips: List containing all the NAND chips attached to
+ * this NAND controller
+ * @caps: NAND controller capabilities for each compatible string
+ * @dma_chan: DMA channel (NFCv1 only)
+ * @dma_buf: 32-bit aligned buffer for DMA transfers (NFCv1 only)
+ */
+struct marvell_nfc {
+ struct nand_hw_control controller;
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *ecc_clk;
+ struct completion complete;
+ unsigned long assigned_cs;
+ struct list_head chips;
+ struct nand_chip *selected_chip;
+ const struct marvell_nfc_caps *caps;
+
+ /* DMA (NFCv1 only) */
+ bool use_dma;
+ struct dma_chan *dma_chan;
+ u8 *dma_buf;
+};
+
+static inline struct marvell_nfc *to_marvell_nfc(struct nand_hw_control *ctrl)
+{
+ return container_of(ctrl, struct marvell_nfc, controller);
+}
+
+/**
+ * NAND controller timings expressed in NAND Controller clock cycles
+ *
+ * @tRP: ND_nRE pulse width
+ * @tRH: ND_nRE high duration
+ * @tWP: ND_nWE pulse time
+ * @tWH: ND_nWE high duration
+ * @tCS: Enable signal setup time
+ * @tCH: Enable signal hold time
+ * @tADL: Address to write data delay
+ * @tAR: ND_ALE low to ND_nRE low delay
+ * @tWHR: ND_nWE high to ND_nRE low for status read
+ * @tRHW: ND_nRE high duration, read to write delay
+ * @tR: ND_nWE high to ND_nRE low for read
+ */
+struct marvell_nfc_timings {
+ /* NDTR0 fields */
+ unsigned int tRP;
+ unsigned int tRH;
+ unsigned int tWP;
+ unsigned int tWH;
+ unsigned int tCS;
+ unsigned int tCH;
+ unsigned int tADL;
+ /* NDTR1 fields */
+ unsigned int tAR;
+ unsigned int tWHR;
+ unsigned int tRHW;
+ unsigned int tR;
+};
+
+/**
+ * Derives a duration in numbers of clock cycles.
+ *
+ * @ps: Duration in pico-seconds
+ * @period_ns: Clock period in nano-seconds
+ *
+ * Convert the duration in nano-seconds, then divide by the period and
+ * return the number of clock periods.
+ */
+#define TO_CYCLES(ps, period_ns) (DIV_ROUND_UP(ps / 1000, period_ns))
+
+/**
+ * NAND driver structure filled during the parsing of the ->exec_op() subop
+ * subset of instructions.
+ *
+ * @ndcb: Array of values written to NDCBx registers
+ * @cle_ale_delay_ns: Optional delay after the last CMD or ADDR cycle
+ * @rdy_timeout_ms: Timeout for waits on Ready/Busy pin
+ * @rdy_delay_ns: Optional delay after waiting for the RB pin
+ * @data_delay_ns: Optional delay after the data xfer
+ * @data_instr_idx: Index of the data instruction in the subop
+ * @data_instr: Pointer to the data instruction in the subop
+ */
+struct marvell_nfc_op {
+ u32 ndcb[4];
+ unsigned int cle_ale_delay_ns;
+ unsigned int rdy_timeout_ms;
+ unsigned int rdy_delay_ns;
+ unsigned int data_delay_ns;
+ unsigned int data_instr_idx;
+ const struct nand_op_instr *data_instr;
+};
+
+/*
+ * Internal helper to conditionnally apply a delay (from the above structure,
+ * most of the time).
+ */
+static void cond_delay(unsigned int ns)
+{
+ if (!ns)
+ return;
+
+ if (ns < 10000)
+ ndelay(ns);
+ else
+ udelay(DIV_ROUND_UP(ns, 1000));
+}
+
+/*
+ * The controller has many flags that could generate interrupts, most of them
+ * are disabled and polling is used. For the very slow signals, using interrupts
+ * may relax the CPU charge.
+ */
+static void marvell_nfc_disable_int(struct marvell_nfc *nfc, u32 int_mask)
+{
+ u32 reg;
+
+ /* Writing 1 disables the interrupt */
+ reg = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(reg | int_mask, nfc->regs + NDCR);
+}
+
+static void marvell_nfc_enable_int(struct marvell_nfc *nfc, u32 int_mask)
+{
+ u32 reg;
+
+ /* Writing 0 enables the interrupt */
+ reg = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(reg & ~int_mask, nfc->regs + NDCR);
+}
+
+static void marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask)
+{
+ writel_relaxed(int_mask, nfc->regs + NDSR);
+}
+
+static void marvell_nfc_force_byte_access(struct nand_chip *chip,
+ bool force_8bit)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr;
+
+ /*
+ * Callers of this function do not verify if the NAND is using a 16-bit
+ * an 8-bit bus for normal operations, so we need to take care of that
+ * here by leaving the configuration unchanged if the NAND does not have
+ * the NAND_BUSWIDTH_16 flag set.
+ */
+ if (!(chip->options & NAND_BUSWIDTH_16))
+ return;
+
+ ndcr = readl_relaxed(nfc->regs + NDCR);
+
+ if (force_8bit)
+ ndcr &= ~(NDCR_DWIDTH_M | NDCR_DWIDTH_C);
+ else
+ ndcr |= NDCR_DWIDTH_M | NDCR_DWIDTH_C;
+
+ writel_relaxed(ndcr, nfc->regs + NDCR);
+}
+
+static int marvell_nfc_wait_ndrun(struct nand_chip *chip)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 val;
+ int ret;
+
+ /*
+ * The command is being processed, wait for the ND_RUN bit to be
+ * cleared by the NFC. If not, we must clear it by hand.
+ */
+ ret = readl_relaxed_poll_timeout(nfc->regs + NDCR, val,
+ (val & NDCR_ND_RUN) == 0,
+ POLL_PERIOD, POLL_TIMEOUT);
+ if (ret) {
+ dev_err(nfc->dev, "Timeout on NAND controller run mode\n");
+ writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
+ nfc->regs + NDCR);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Any time a command has to be sent to the controller, the following sequence
+ * has to be followed:
+ * - call marvell_nfc_prepare_cmd()
+ * -> activate the ND_RUN bit that will kind of 'start a job'
+ * -> wait the signal indicating the NFC is waiting for a command
+ * - send the command (cmd and address cycles)
+ * - enventually send or receive the data
+ * - call marvell_nfc_end_cmd() with the corresponding flag
+ * -> wait the flag to be triggered or cancel the job with a timeout
+ *
+ * The following helpers are here to factorize the code a bit so that
+ * specialized functions responsible for executing the actual NAND
+ * operations do not have to replicate the same code blocks.
+ */
+static int marvell_nfc_prepare_cmd(struct nand_chip *chip)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr, val;
+ int ret;
+
+ /* Poll ND_RUN and clear NDSR before issuing any command */
+ ret = marvell_nfc_wait_ndrun(chip);
+ if (ret) {
+ dev_err(nfc->dev, "Last operation did not succeed\n");
+ return ret;
+ }
+
+ ndcr = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(readl(nfc->regs + NDSR), nfc->regs + NDSR);
+
+ /* Assert ND_RUN bit and wait the NFC to be ready */
+ writel_relaxed(ndcr | NDCR_ND_RUN, nfc->regs + NDCR);
+ ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val,
+ val & NDSR_WRCMDREQ,
+ POLL_PERIOD, POLL_TIMEOUT);
+ if (ret) {
+ dev_err(nfc->dev, "Timeout on WRCMDRE\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Command may be written, clear WRCMDREQ status bit */
+ writel_relaxed(NDSR_WRCMDREQ, nfc->regs + NDSR);
+
+ return 0;
+}
+
+static void marvell_nfc_send_cmd(struct nand_chip *chip,
+ struct marvell_nfc_op *nfc_op)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+
+ dev_dbg(nfc->dev, "\nNDCR: 0x%08x\n"
+ "NDCB0: 0x%08x\nNDCB1: 0x%08x\nNDCB2: 0x%08x\nNDCB3: 0x%08x\n",
+ (u32)readl_relaxed(nfc->regs + NDCR), nfc_op->ndcb[0],
+ nfc_op->ndcb[1], nfc_op->ndcb[2], nfc_op->ndcb[3]);
+
+ writel_relaxed(to_nand_sel(marvell_nand)->ndcb0_csel | nfc_op->ndcb[0],
+ nfc->regs + NDCB0);
+ writel_relaxed(nfc_op->ndcb[1], nfc->regs + NDCB0);
+ writel(nfc_op->ndcb[2], nfc->regs + NDCB0);
+
+ /*
+ * Write NDCB0 four times only if LEN_OVRD is set or if ADDR6 or ADDR7
+ * fields are used (only available on NFCv2).
+ */
+ if (nfc_op->ndcb[0] & NDCB0_LEN_OVRD ||
+ NDCB0_ADDR_GET_NUM_CYC(nfc_op->ndcb[0]) >= 6) {
+ if (!WARN_ON_ONCE(!nfc->caps->is_nfcv2))
+ writel(nfc_op->ndcb[3], nfc->regs + NDCB0);
+ }
+}
+
+static int marvell_nfc_end_cmd(struct nand_chip *chip, int flag,
+ const char *label)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 val;
+ int ret;
+
+ ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val,
+ val & flag,
+ POLL_PERIOD, POLL_TIMEOUT);
+
+ if (ret) {
+ dev_err(nfc->dev, "Timeout on %s (NDSR: 0x%08x)\n",
+ label, val);
+ if (nfc->dma_chan)
+ dmaengine_terminate_all(nfc->dma_chan);
+ return ret;
+ }
+
+ /*
+ * DMA function uses this helper to poll on CMDD bits without wanting
+ * them to be cleared.
+ */
+ if (nfc->use_dma && (readl_relaxed(nfc->regs + NDCR) & NDCR_DMA_EN))
+ return 0;
+
+ writel_relaxed(flag, nfc->regs + NDSR);
+
+ return 0;
+}
+
+static int marvell_nfc_wait_cmdd(struct nand_chip *chip)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ int cs_flag = NDSR_CMDD(to_nand_sel(marvell_nand)->ndcb0_csel);
+
+ return marvell_nfc_end_cmd(chip, cs_flag, "CMDD");
+}
+
+static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ int ret;
+
+ /* Timeout is expressed in ms */
+ if (!timeout_ms)
+ timeout_ms = IRQ_TIMEOUT;
+
+ init_completion(&nfc->complete);
+
+ marvell_nfc_enable_int(nfc, NDCR_RDYM);
+ ret = wait_for_completion_timeout(&nfc->complete,
+ msecs_to_jiffies(timeout_ms));
+ marvell_nfc_disable_int(nfc, NDCR_RDYM);
+ marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
+ if (!ret) {
+ dev_err(nfc->dev, "Timeout waiting for RB signal\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void marvell_nfc_select_chip(struct mtd_info *mtd, int die_nr)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr_generic;
+
+ if (chip == nfc->selected_chip && die_nr == marvell_nand->selected_die)
+ return;
+
+ if (die_nr < 0 || die_nr >= marvell_nand->nsels) {
+ nfc->selected_chip = NULL;
+ marvell_nand->selected_die = -1;
+ return;
+ }
+
+ /*
+ * Do not change the timing registers when using the DT property
+ * marvell,nand-keep-config; in that case ->ndtr0 and ->ndtr1 from the
+ * marvell_nand structure are supposedly empty.
+ */
+ writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0);
+ writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1);
+
+ /*
+ * Reset the NDCR register to a clean state for this particular chip,
+ * also clear ND_RUN bit.
+ */
+ ndcr_generic = readl_relaxed(nfc->regs + NDCR) &
+ NDCR_GENERIC_FIELDS_MASK & ~NDCR_ND_RUN;
+ writel_relaxed(ndcr_generic | marvell_nand->ndcr, nfc->regs + NDCR);
+
+ /* Also reset the interrupt status register */
+ marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
+
+ nfc->selected_chip = chip;
+ marvell_nand->selected_die = die_nr;
+}
+
+static irqreturn_t marvell_nfc_isr(int irq, void *dev_id)
+{
+ struct marvell_nfc *nfc = dev_id;
+ u32 st = readl_relaxed(nfc->regs + NDSR);
+ u32 ien = (~readl_relaxed(nfc->regs + NDCR)) & NDCR_ALL_INT;
+
+ /*
+ * RDY interrupt mask is one bit in NDCR while there are two status
+ * bit in NDSR (RDY[cs0/cs2] and RDY[cs1/cs3]).
+ */
+ if (st & NDSR_RDY(1))
+ st |= NDSR_RDY(0);
+
+ if (!(st & ien))
+ return IRQ_NONE;
+
+ marvell_nfc_disable_int(nfc, st & NDCR_ALL_INT);
+
+ if (!(st & (NDSR_RDDREQ | NDSR_WRDREQ | NDSR_WRCMDREQ)))
+ complete(&nfc->complete);
+
+ return IRQ_HANDLED;
+}
+
+/* HW ECC related functions */
+static void marvell_nfc_enable_hw_ecc(struct nand_chip *chip)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr = readl_relaxed(nfc->regs + NDCR);
+
+ if (!(ndcr & NDCR_ECC_EN)) {
+ writel_relaxed(ndcr | NDCR_ECC_EN, nfc->regs + NDCR);
+
+ /*
+ * When enabling BCH, set threshold to 0 to always know the
+ * number of corrected bitflips.
+ */
+ if (chip->ecc.algo == NAND_ECC_BCH)
+ writel_relaxed(NDECCCTRL_BCH_EN, nfc->regs + NDECCCTRL);
+ }
+}
+
+static void marvell_nfc_disable_hw_ecc(struct nand_chip *chip)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 ndcr = readl_relaxed(nfc->regs + NDCR);
+
+ if (ndcr & NDCR_ECC_EN) {
+ writel_relaxed(ndcr & ~NDCR_ECC_EN, nfc->regs + NDCR);
+ if (chip->ecc.algo == NAND_ECC_BCH)
+ writel_relaxed(0, nfc->regs + NDECCCTRL);
+ }
+}
+
+/* DMA related helpers */
+static void marvell_nfc_enable_dma(struct marvell_nfc *nfc)
+{
+ u32 reg;
+
+ reg = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(reg | NDCR_DMA_EN, nfc->regs + NDCR);
+}
+
+static void marvell_nfc_disable_dma(struct marvell_nfc *nfc)
+{
+ u32 reg;
+
+ reg = readl_relaxed(nfc->regs + NDCR);
+ writel_relaxed(reg & ~NDCR_DMA_EN, nfc->regs + NDCR);
+}
+
+/* Read/write PIO/DMA accessors */
+static int marvell_nfc_xfer_data_dma(struct marvell_nfc *nfc,
+ enum dma_data_direction direction,
+ unsigned int len)
+{
+ unsigned int dma_len = min_t(int, ALIGN(len, 32), MAX_CHUNK_SIZE);
+ struct dma_async_tx_descriptor *tx;
+ struct scatterlist sg;
+ dma_cookie_t cookie;
+ int ret;
+
+ marvell_nfc_enable_dma(nfc);
+ /* Prepare the DMA transfer */
+ sg_init_one(&sg, nfc->dma_buf, dma_len);
+ dma_map_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
+ tx = dmaengine_prep_slave_sg(nfc->dma_chan, &sg, 1,
+ direction == DMA_FROM_DEVICE ?
+ DMA_DEV_TO_MEM : DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT);
+ if (!tx) {
+ dev_err(nfc->dev, "Could not prepare DMA S/G list\n");
+ return -ENXIO;
+ }
+
+ /* Do the task and wait for it to finish */
+ cookie = dmaengine_submit(tx);
+ ret = dma_submit_error(cookie);
+ if (ret)
+ return -EIO;
+
+ dma_async_issue_pending(nfc->dma_chan);
+ ret = marvell_nfc_wait_cmdd(nfc->selected_chip);
+ dma_unmap_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
+ marvell_nfc_disable_dma(nfc);
+ if (ret) {
+ dev_err(nfc->dev, "Timeout waiting for DMA (status: %d)\n",
+ dmaengine_tx_status(nfc->dma_chan, cookie, NULL));
+ dmaengine_terminate_all(nfc->dma_chan);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int marvell_nfc_xfer_data_in_pio(struct marvell_nfc *nfc, u8 *in,
+ unsigned int len)
+{
+ unsigned int last_len = len % FIFO_DEPTH;
+ unsigned int last_full_offset = round_down(len, FIFO_DEPTH);
+ int i;
+
+ for (i = 0; i < last_full_offset; i += FIFO_DEPTH)
+ ioread32_rep(nfc->regs + NDDB, in + i, FIFO_REP(FIFO_DEPTH));
+
+ if (last_len) {
+ u8 tmp_buf[FIFO_DEPTH];
+
+ ioread32_rep(nfc->regs + NDDB, tmp_buf, FIFO_REP(FIFO_DEPTH));
+ memcpy(in + last_full_offset, tmp_buf, last_len);
+ }
+
+ return 0;
+}
+
+static int marvell_nfc_xfer_data_out_pio(struct marvell_nfc *nfc, const u8 *out,
+ unsigned int len)
+{
+ unsigned int last_len = len % FIFO_DEPTH;
+ unsigned int last_full_offset = round_down(len, FIFO_DEPTH);
+ int i;
+
+ for (i = 0; i < last_full_offset; i += FIFO_DEPTH)
+ iowrite32_rep(nfc->regs + NDDB, out + i, FIFO_REP(FIFO_DEPTH));
+
+ if (last_len) {
+ u8 tmp_buf[FIFO_DEPTH];
+
+ memcpy(tmp_buf, out + last_full_offset, last_len);
+ iowrite32_rep(nfc->regs + NDDB, tmp_buf, FIFO_REP(FIFO_DEPTH));
+ }
+
+ return 0;
+}
+
+static void marvell_nfc_check_empty_chunk(struct nand_chip *chip,
+ u8 *data, int data_len,
+ u8 *spare, int spare_len,
+ u8 *ecc, int ecc_len,
+ unsigned int *max_bitflips)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int bf;
+
+ /*
+ * Blank pages (all 0xFF) that have not been written may be recognized
+ * as bad if bitflips occur, so whenever an uncorrectable error occurs,
+ * check if the entire page (with ECC bytes) is actually blank or not.
+ */
+ if (!data)
+ data_len = 0;
+ if (!spare)
+ spare_len = 0;
+ if (!ecc)
+ ecc_len = 0;
+
+ bf = nand_check_erased_ecc_chunk(data, data_len, ecc, ecc_len,
+ spare, spare_len, chip->ecc.strength);
+ if (bf < 0) {
+ mtd->ecc_stats.failed++;
+ return;
+ }
+
+ /* Update the stats and max_bitflips */
+ mtd->ecc_stats.corrected += bf;
+ *max_bitflips = max_t(unsigned int, *max_bitflips, bf);
+}
+
+/*
+ * Check a chunk is correct or not according to hardware ECC engine.
+ * mtd->ecc_stats.corrected is updated, as well as max_bitflips, however
+ * mtd->ecc_stats.failure is not, the function will instead return a non-zero
+ * value indicating that a check on the emptyness of the subpage must be
+ * performed before declaring the subpage corrupted.
+ */
+static int marvell_nfc_hw_ecc_correct(struct nand_chip *chip,
+ unsigned int *max_bitflips)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ int bf = 0;
+ u32 ndsr;
+
+ ndsr = readl_relaxed(nfc->regs + NDSR);
+
+ /* Check uncorrectable error flag */
+ if (ndsr & NDSR_UNCERR) {
+ writel_relaxed(ndsr, nfc->regs + NDSR);
+
+ /*
+ * Do not increment ->ecc_stats.failed now, instead, return a
+ * non-zero value to indicate that this chunk was apparently
+ * bad, and it should be check to see if it empty or not. If
+ * the chunk (with ECC bytes) is not declared empty, the calling
+ * function must increment the failure count.
+ */
+ return -EBADMSG;
+ }
+
+ /* Check correctable error flag */
+ if (ndsr & NDSR_CORERR) {
+ writel_relaxed(ndsr, nfc->regs + NDSR);
+
+ if (chip->ecc.algo == NAND_ECC_BCH)
+ bf = NDSR_ERRCNT(ndsr);
+ else
+ bf = 1;
+ }
+
+ /* Update the stats and max_bitflips */
+ mtd->ecc_stats.corrected += bf;
+ *max_bitflips = max_t(unsigned int, *max_bitflips, bf);
+
+ return 0;
+}
+
+/* Hamming read helpers */
+static int marvell_nfc_hw_ecc_hmg_do_read_page(struct nand_chip *chip,
+ u8 *data_buf, u8 *oob_buf,
+ bool raw, int page)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ struct marvell_nfc_op nfc_op = {
+ .ndcb[0] = NDCB0_CMD_TYPE(TYPE_READ) |
+ NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ NDCB0_DBC |
+ NDCB0_CMD1(NAND_CMD_READ0) |
+ NDCB0_CMD2(NAND_CMD_READSTART),
+ .ndcb[1] = NDCB1_ADDRS_PAGE(page),
+ .ndcb[2] = NDCB2_ADDR5_PAGE(page),
+ };
+ unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
+ int ret;
+
+ /* NFCv2 needs more information about the operation being executed */
+ if (nfc->caps->is_nfcv2)
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while draining FIFO (data/oob)");
+ if (ret)
+ return ret;
+
+ /*
+ * Read the page then the OOB area. Unlike what is shown in current
+ * documentation, spare bytes are protected by the ECC engine, and must
+ * be at the beginning of the OOB area or running this driver on legacy
+ * systems will prevent the discovery of the BBM/BBT.
+ */
+ if (nfc->use_dma) {
+ marvell_nfc_xfer_data_dma(nfc, DMA_FROM_DEVICE,
+ lt->data_bytes + oob_bytes);
+ memcpy(data_buf, nfc->dma_buf, lt->data_bytes);
+ memcpy(oob_buf, nfc->dma_buf + lt->data_bytes, oob_bytes);
+ } else {
+ marvell_nfc_xfer_data_in_pio(nfc, data_buf, lt->data_bytes);
+ marvell_nfc_xfer_data_in_pio(nfc, oob_buf, oob_bytes);
+ }
+
+ ret = marvell_nfc_wait_cmdd(chip);
+
+ return ret;
+}
+
+static int marvell_nfc_hw_ecc_hmg_read_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ return marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi,
+ true, page);
+}
+
+static int marvell_nfc_hw_ecc_hmg_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ u8 *buf, int oob_required,
+ int page)
+{
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ unsigned int full_sz = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
+ int max_bitflips = 0, ret;
+ u8 *raw_buf;
+
+ marvell_nfc_enable_hw_ecc(chip);
+ marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi, false,
+ page);
+ ret = marvell_nfc_hw_ecc_correct(chip, &max_bitflips);
+ marvell_nfc_disable_hw_ecc(chip);
+
+ if (!ret)
+ return max_bitflips;
+
+ /*
+ * When ECC failures are detected, check if the full page has been
+ * written or not. Ignore the failure if it is actually empty.
+ */
+ raw_buf = kmalloc(full_sz, GFP_KERNEL);
+ if (!raw_buf)
+ return -ENOMEM;
+
+ marvell_nfc_hw_ecc_hmg_do_read_page(chip, raw_buf, raw_buf +
+ lt->data_bytes, true, page);
+ marvell_nfc_check_empty_chunk(chip, raw_buf, full_sz, NULL, 0, NULL, 0,
+ &max_bitflips);
+ kfree(raw_buf);
+
+ return max_bitflips;
+}
+
+/*
+ * Spare area in Hamming layouts is not protected by the ECC engine (even if
+ * it appears before the ECC bytes when reading), the ->read_oob_raw() function
+ * also stands for ->read_oob().
+ */
+static int marvell_nfc_hw_ecc_hmg_read_oob_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ return marvell_nfc_hw_ecc_hmg_do_read_page(chip, chip->data_buf,
+ chip->oob_poi, true, page);
+}
+
+/* Hamming write helpers */
+static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
+ const u8 *data_buf,
+ const u8 *oob_buf, bool raw,
+ int page)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ struct marvell_nfc_op nfc_op = {
+ .ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) |
+ NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ NDCB0_CMD1(NAND_CMD_SEQIN) |
+ NDCB0_CMD2(NAND_CMD_PAGEPROG) |
+ NDCB0_DBC,
+ .ndcb[1] = NDCB1_ADDRS_PAGE(page),
+ .ndcb[2] = NDCB2_ADDR5_PAGE(page),
+ };
+ unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
+ int ret;
+
+ /* NFCv2 needs more information about the operation being executed */
+ if (nfc->caps->is_nfcv2)
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_WRDREQ,
+ "WRDREQ while loading FIFO (data)");
+ if (ret)
+ return ret;
+
+ /* Write the page then the OOB area */
+ if (nfc->use_dma) {
+ memcpy(nfc->dma_buf, data_buf, lt->data_bytes);
+ memcpy(nfc->dma_buf + lt->data_bytes, oob_buf, oob_bytes);
+ marvell_nfc_xfer_data_dma(nfc, DMA_TO_DEVICE, lt->data_bytes +
+ lt->ecc_bytes + lt->spare_bytes);
+ } else {
+ marvell_nfc_xfer_data_out_pio(nfc, data_buf, lt->data_bytes);
+ marvell_nfc_xfer_data_out_pio(nfc, oob_buf, oob_bytes);
+ }
+
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ ret = marvell_nfc_wait_op(chip,
+ chip->data_interface.timings.sdr.tPROG_max);
+ return ret;
+}
+
+static int marvell_nfc_hw_ecc_hmg_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required, int page)
+{
+ return marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
+ true, page);
+}
+
+static int marvell_nfc_hw_ecc_hmg_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required, int page)
+{
+ int ret;
+
+ marvell_nfc_enable_hw_ecc(chip);
+ ret = marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
+ false, page);
+ marvell_nfc_disable_hw_ecc(chip);
+
+ return ret;
+}
+
+/*
+ * Spare area in Hamming layouts is not protected by the ECC engine (even if
+ * it appears before the ECC bytes when reading), the ->write_oob_raw() function
+ * also stands for ->write_oob().
+ */
+static int marvell_nfc_hw_ecc_hmg_write_oob_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ memset(chip->data_buf, 0xFF, mtd->writesize);
+
+ return marvell_nfc_hw_ecc_hmg_do_write_page(chip, chip->data_buf,
+ chip->oob_poi, true, page);
+}
+
+/* BCH read helpers */
+static int marvell_nfc_hw_ecc_bch_read_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ u8 *oob = chip->oob_poi;
+ int chunk_size = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
+ int ecc_offset = (lt->full_chunk_cnt * lt->spare_bytes) +
+ lt->last_spare_bytes;
+ int data_len = lt->data_bytes;
+ int spare_len = lt->spare_bytes;
+ int ecc_len = lt->ecc_bytes;
+ int chunk;
+
+ if (oob_required)
+ memset(chip->oob_poi, 0xFF, mtd->oobsize);
+
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ /* Update last chunk length */
+ if (chunk >= lt->full_chunk_cnt) {
+ data_len = lt->last_data_bytes;
+ spare_len = lt->last_spare_bytes;
+ ecc_len = lt->last_ecc_bytes;
+ }
+
+ /* Read data bytes*/
+ nand_change_read_column_op(chip, chunk * chunk_size,
+ buf + (lt->data_bytes * chunk),
+ data_len, false);
+
+ /* Read spare bytes */
+ nand_read_data_op(chip, oob + (lt->spare_bytes * chunk),
+ spare_len, false);
+
+ /* Read ECC bytes */
+ nand_read_data_op(chip, oob + ecc_offset +
+ (ALIGN(lt->ecc_bytes, 32) * chunk),
+ ecc_len, false);
+ }
+
+ return 0;
+}
+
+static void marvell_nfc_hw_ecc_bch_read_chunk(struct nand_chip *chip, int chunk,
+ u8 *data, unsigned int data_len,
+ u8 *spare, unsigned int spare_len,
+ int page)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ int i, ret;
+ struct marvell_nfc_op nfc_op = {
+ .ndcb[0] = NDCB0_CMD_TYPE(TYPE_READ) |
+ NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ NDCB0_LEN_OVRD,
+ .ndcb[1] = NDCB1_ADDRS_PAGE(page),
+ .ndcb[2] = NDCB2_ADDR5_PAGE(page),
+ .ndcb[3] = data_len + spare_len,
+ };
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return;
+
+ if (chunk == 0)
+ nfc_op.ndcb[0] |= NDCB0_DBC |
+ NDCB0_CMD1(NAND_CMD_READ0) |
+ NDCB0_CMD2(NAND_CMD_READSTART);
+
+ /*
+ * Trigger the naked read operation only on the last chunk.
+ * Otherwise, use monolithic read.
+ */
+ if (lt->nchunks == 1 || (chunk < lt->nchunks - 1))
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
+ else
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+
+ /*
+ * According to the datasheet, when reading from NDDB
+ * with BCH enabled, after each 32 bytes reads, we
+ * have to make sure that the NDSR.RDDREQ bit is set.
+ *
+ * Drain the FIFO, 8 32-bit reads at a time, and skip
+ * the polling on the last read.
+ *
+ * Length is a multiple of 32 bytes, hence it is a multiple of 8 too.
+ */
+ for (i = 0; i < data_len; i += FIFO_DEPTH * BCH_SEQ_READS) {
+ marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while draining FIFO (data)");
+ marvell_nfc_xfer_data_in_pio(nfc, data,
+ FIFO_DEPTH * BCH_SEQ_READS);
+ data += FIFO_DEPTH * BCH_SEQ_READS;
+ }
+
+ for (i = 0; i < spare_len; i += FIFO_DEPTH * BCH_SEQ_READS) {
+ marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while draining FIFO (OOB)");
+ marvell_nfc_xfer_data_in_pio(nfc, spare,
+ FIFO_DEPTH * BCH_SEQ_READS);
+ spare += FIFO_DEPTH * BCH_SEQ_READS;
+ }
+}
+
+static int marvell_nfc_hw_ecc_bch_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ u8 *buf, int oob_required,
+ int page)
+{
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ int data_len = lt->data_bytes, spare_len = lt->spare_bytes, ecc_len;
+ u8 *data = buf, *spare = chip->oob_poi, *ecc;
+ int max_bitflips = 0;
+ u32 failure_mask = 0;
+ int chunk, ecc_offset_in_page, ret;
+
+ /*
+ * With BCH, OOB is not fully used (and thus not read entirely), not
+ * expected bytes could show up at the end of the OOB buffer if not
+ * explicitly erased.
+ */
+ if (oob_required)
+ memset(chip->oob_poi, 0xFF, mtd->oobsize);
+
+ marvell_nfc_enable_hw_ecc(chip);
+
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ /* Update length for the last chunk */
+ if (chunk >= lt->full_chunk_cnt) {
+ data_len = lt->last_data_bytes;
+ spare_len = lt->last_spare_bytes;
+ }
+
+ /* Read the chunk and detect number of bitflips */
+ marvell_nfc_hw_ecc_bch_read_chunk(chip, chunk, data, data_len,
+ spare, spare_len, page);
+ ret = marvell_nfc_hw_ecc_correct(chip, &max_bitflips);
+ if (ret)
+ failure_mask |= BIT(chunk);
+
+ data += data_len;
+ spare += spare_len;
+ }
+
+ marvell_nfc_disable_hw_ecc(chip);
+
+ if (!failure_mask)
+ return max_bitflips;
+
+ /*
+ * Please note that dumping the ECC bytes during a normal read with OOB
+ * area would add a significant overhead as ECC bytes are "consumed" by
+ * the controller in normal mode and must be re-read in raw mode. To
+ * avoid dropping the performances, we prefer not to include them. The
+ * user should re-read the page in raw mode if ECC bytes are required.
+ *
+ * However, for any subpage read error reported by ->correct(), the ECC
+ * bytes must be read in raw mode and the full subpage must be checked
+ * to see if it is entirely empty of if there was an actual error.
+ */
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ /* No failure reported for this chunk, move to the next one */
+ if (!(failure_mask & BIT(chunk)))
+ continue;
+
+ /* Derive ECC bytes positions (in page/buffer) and length */
+ ecc = chip->oob_poi +
+ (lt->full_chunk_cnt * lt->spare_bytes) +
+ lt->last_spare_bytes +
+ (chunk * ALIGN(lt->ecc_bytes, 32));
+ ecc_offset_in_page =
+ (chunk * (lt->data_bytes + lt->spare_bytes +
+ lt->ecc_bytes)) +
+ (chunk < lt->full_chunk_cnt ?
+ lt->data_bytes + lt->spare_bytes :
+ lt->last_data_bytes + lt->last_spare_bytes);
+ ecc_len = chunk < lt->full_chunk_cnt ?
+ lt->ecc_bytes : lt->last_ecc_bytes;
+
+ /* Do the actual raw read of the ECC bytes */
+ nand_change_read_column_op(chip, ecc_offset_in_page,
+ ecc, ecc_len, false);
+
+ /* Derive data/spare bytes positions (in buffer) and length */
+ data = buf + (chunk * lt->data_bytes);
+ data_len = chunk < lt->full_chunk_cnt ?
+ lt->data_bytes : lt->last_data_bytes;
+ spare = chip->oob_poi + (chunk * (lt->spare_bytes +
+ lt->ecc_bytes));
+ spare_len = chunk < lt->full_chunk_cnt ?
+ lt->spare_bytes : lt->last_spare_bytes;
+
+ /* Check the entire chunk (data + spare + ecc) for emptyness */
+ marvell_nfc_check_empty_chunk(chip, data, data_len, spare,
+ spare_len, ecc, ecc_len,
+ &max_bitflips);
+ }
+
+ return max_bitflips;
+}
+
+static int marvell_nfc_hw_ecc_bch_read_oob_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ return chip->ecc.read_page_raw(mtd, chip, chip->data_buf, true, page);
+}
+
+static int marvell_nfc_hw_ecc_bch_read_oob(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ return chip->ecc.read_page(mtd, chip, chip->data_buf, true, page);
+}
+
+/* BCH write helpers */
+static int marvell_nfc_hw_ecc_bch_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required, int page)
+{
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ int full_chunk_size = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
+ int data_len = lt->data_bytes;
+ int spare_len = lt->spare_bytes;
+ int ecc_len = lt->ecc_bytes;
+ int spare_offset = 0;
+ int ecc_offset = (lt->full_chunk_cnt * lt->spare_bytes) +
+ lt->last_spare_bytes;
+ int chunk;
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ if (chunk >= lt->full_chunk_cnt) {
+ data_len = lt->last_data_bytes;
+ spare_len = lt->last_spare_bytes;
+ ecc_len = lt->last_ecc_bytes;
+ }
+
+ /* Point to the column of the next chunk */
+ nand_change_write_column_op(chip, chunk * full_chunk_size,
+ NULL, 0, false);
+
+ /* Write the data */
+ nand_write_data_op(chip, buf + (chunk * lt->data_bytes),
+ data_len, false);
+
+ if (!oob_required)
+ continue;
+
+ /* Write the spare bytes */
+ if (spare_len)
+ nand_write_data_op(chip, chip->oob_poi + spare_offset,
+ spare_len, false);
+
+ /* Write the ECC bytes */
+ if (ecc_len)
+ nand_write_data_op(chip, chip->oob_poi + ecc_offset,
+ ecc_len, false);
+
+ spare_offset += spare_len;
+ ecc_offset += ALIGN(ecc_len, 32);
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int
+marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk,
+ const u8 *data, unsigned int data_len,
+ const u8 *spare, unsigned int spare_len,
+ int page)
+{
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ int ret;
+ struct marvell_nfc_op nfc_op = {
+ .ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) | NDCB0_LEN_OVRD,
+ .ndcb[3] = data_len + spare_len,
+ };
+
+ /*
+ * First operation dispatches the CMD_SEQIN command, issue the address
+ * cycles and asks for the first chunk of data.
+ * All operations in the middle (if any) will issue a naked write and
+ * also ask for data.
+ * Last operation (if any) asks for the last chunk of data through a
+ * last naked write.
+ */
+ if (chunk == 0) {
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_WRITE_DISPATCH) |
+ NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
+ NDCB0_CMD1(NAND_CMD_SEQIN);
+ nfc_op.ndcb[1] |= NDCB1_ADDRS_PAGE(page);
+ nfc_op.ndcb[2] |= NDCB2_ADDR5_PAGE(page);
+ } else if (chunk < lt->nchunks - 1) {
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW);
+ } else {
+ nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
+ }
+
+ /* Always dispatch the PAGEPROG command on the last chunk */
+ if (chunk == lt->nchunks - 1)
+ nfc_op.ndcb[0] |= NDCB0_CMD2(NAND_CMD_PAGEPROG) | NDCB0_DBC;
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_WRDREQ,
+ "WRDREQ while loading FIFO (data)");
+ if (ret)
+ return ret;
+
+ /* Transfer the contents */
+ iowrite32_rep(nfc->regs + NDDB, data, FIFO_REP(data_len));
+ iowrite32_rep(nfc->regs + NDDB, spare, FIFO_REP(spare_len));
+
+ return 0;
+}
+
+static int marvell_nfc_hw_ecc_bch_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf,
+ int oob_required, int page)
+{
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+ const u8 *data = buf;
+ const u8 *spare = chip->oob_poi;
+ int data_len = lt->data_bytes;
+ int spare_len = lt->spare_bytes;
+ int chunk, ret;
+
+ /* Spare data will be written anyway, so clear it to avoid garbage */
+ if (!oob_required)
+ memset(chip->oob_poi, 0xFF, mtd->oobsize);
+
+ marvell_nfc_enable_hw_ecc(chip);
+
+ for (chunk = 0; chunk < lt->nchunks; chunk++) {
+ if (chunk >= lt->full_chunk_cnt) {
+ data_len = lt->last_data_bytes;
+ spare_len = lt->last_spare_bytes;
+ }
+
+ marvell_nfc_hw_ecc_bch_write_chunk(chip, chunk, data, data_len,
+ spare, spare_len, page);
+ data += data_len;
+ spare += spare_len;
+
+ /*
+ * Waiting only for CMDD or PAGED is not enough, ECC are
+ * partially written. No flag is set once the operation is
+ * really finished but the ND_RUN bit is cleared, so wait for it
+ * before stepping into the next command.
+ */
+ marvell_nfc_wait_ndrun(chip);
+ }
+
+ ret = marvell_nfc_wait_op(chip,
+ chip->data_interface.timings.sdr.tPROG_max);
+
+ marvell_nfc_disable_hw_ecc(chip);
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int marvell_nfc_hw_ecc_bch_write_oob_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ memset(chip->data_buf, 0xFF, mtd->writesize);
+
+ return chip->ecc.write_page_raw(mtd, chip, chip->data_buf, true, page);
+}
+
+static int marvell_nfc_hw_ecc_bch_write_oob(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ memset(chip->data_buf, 0xFF, mtd->writesize);
+
+ return chip->ecc.write_page(mtd, chip, chip->data_buf, true, page);
+}
+
+/* NAND framework ->exec_op() hooks and related helpers */
+static void marvell_nfc_parse_instructions(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ struct marvell_nfc_op *nfc_op)
+{
+ const struct nand_op_instr *instr = NULL;
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ bool first_cmd = true;
+ unsigned int op_id;
+ int i;
+
+ /* Reset the input structure as most of its fields will be OR'ed */
+ memset(nfc_op, 0, sizeof(struct marvell_nfc_op));
+
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ unsigned int offset, naddrs;
+ const u8 *addrs;
+ int len = nand_subop_get_data_len(subop, op_id);
+
+ instr = &subop->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (first_cmd)
+ nfc_op->ndcb[0] |=
+ NDCB0_CMD1(instr->ctx.cmd.opcode);
+ else
+ nfc_op->ndcb[0] |=
+ NDCB0_CMD2(instr->ctx.cmd.opcode) |
+ NDCB0_DBC;
+
+ nfc_op->cle_ale_delay_ns = instr->delay_ns;
+ first_cmd = false;
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+
+ nfc_op->ndcb[0] |= NDCB0_ADDR_CYC(naddrs);
+
+ for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
+ nfc_op->ndcb[1] |= addrs[i] << (8 * i);
+
+ if (naddrs >= 5)
+ nfc_op->ndcb[2] |= NDCB2_ADDR5_CYC(addrs[4]);
+ if (naddrs >= 6)
+ nfc_op->ndcb[3] |= NDCB3_ADDR6_CYC(addrs[5]);
+ if (naddrs == 7)
+ nfc_op->ndcb[3] |= NDCB3_ADDR7_CYC(addrs[6]);
+
+ nfc_op->cle_ale_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ nfc_op->data_instr = instr;
+ nfc_op->data_instr_idx = op_id;
+ nfc_op->ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ);
+ if (nfc->caps->is_nfcv2) {
+ nfc_op->ndcb[0] |=
+ NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
+ NDCB0_LEN_OVRD;
+ nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
+ }
+ nfc_op->data_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ nfc_op->data_instr = instr;
+ nfc_op->data_instr_idx = op_id;
+ nfc_op->ndcb[0] |= NDCB0_CMD_TYPE(TYPE_WRITE);
+ if (nfc->caps->is_nfcv2) {
+ nfc_op->ndcb[0] |=
+ NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
+ NDCB0_LEN_OVRD;
+ nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
+ }
+ nfc_op->data_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
+ nfc_op->rdy_delay_ns = instr->delay_ns;
+ break;
+ }
+ }
+}
+
+static int marvell_nfc_xfer_data_pio(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ struct marvell_nfc_op *nfc_op)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct nand_op_instr *instr = nfc_op->data_instr;
+ unsigned int op_id = nfc_op->data_instr_idx;
+ unsigned int len = nand_subop_get_data_len(subop, op_id);
+ unsigned int offset = nand_subop_get_data_start_off(subop, op_id);
+ bool reading = (instr->type == NAND_OP_DATA_IN_INSTR);
+ int ret;
+
+ if (instr->ctx.data.force_8bit)
+ marvell_nfc_force_byte_access(chip, true);
+
+ if (reading) {
+ u8 *in = instr->ctx.data.buf.in + offset;
+
+ ret = marvell_nfc_xfer_data_in_pio(nfc, in, len);
+ } else {
+ const u8 *out = instr->ctx.data.buf.out + offset;
+
+ ret = marvell_nfc_xfer_data_out_pio(nfc, out, len);
+ }
+
+ if (instr->ctx.data.force_8bit)
+ marvell_nfc_force_byte_access(chip, false);
+
+ return ret;
+}
+
+static int marvell_nfc_monolithic_access_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ bool reading;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ reading = (nfc_op.data_instr->type == NAND_OP_DATA_IN_INSTR);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ | NDSR_WRDREQ,
+ "RDDREQ/WRDREQ while draining raw data");
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ if (reading) {
+ if (nfc_op.rdy_timeout_ms) {
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ cond_delay(nfc_op.rdy_delay_ns);
+ }
+
+ marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.data_delay_ns);
+
+ if (!reading) {
+ if (nfc_op.rdy_timeout_ms) {
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ cond_delay(nfc_op.rdy_delay_ns);
+ }
+
+ /*
+ * NDCR ND_RUN bit should be cleared automatically at the end of each
+ * operation but experience shows that the behavior is buggy when it
+ * comes to writes (with LEN_OVRD). Clear it by hand in this case.
+ */
+ if (!reading) {
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+
+ writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
+ nfc->regs + NDCR);
+ }
+
+ return 0;
+}
+
+static int marvell_nfc_naked_access_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+
+ /*
+ * Naked access are different in that they need to be flagged as naked
+ * by the controller. Reset the controller registers fields that inform
+ * on the type and refill them according to the ongoing operation.
+ */
+ nfc_op.ndcb[0] &= ~(NDCB0_CMD_TYPE(TYPE_MASK) |
+ NDCB0_CMD_XTYPE(XTYPE_MASK));
+ switch (subop->instrs[0].type) {
+ case NAND_OP_CMD_INSTR:
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_NAKED_CMD);
+ break;
+ case NAND_OP_ADDR_INSTR:
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_NAKED_ADDR);
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ) |
+ NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_WRITE) |
+ NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
+ break;
+ default:
+ /* This should never happen */
+ break;
+ }
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+
+ if (!nfc_op.data_instr) {
+ ret = marvell_nfc_wait_cmdd(chip);
+ cond_delay(nfc_op.cle_ale_delay_ns);
+ return ret;
+ }
+
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ | NDSR_WRDREQ,
+ "RDDREQ/WRDREQ while draining raw data");
+ if (ret)
+ return ret;
+
+ marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ /*
+ * NDCR ND_RUN bit should be cleared automatically at the end of each
+ * operation but experience shows that the behavior is buggy when it
+ * comes to writes (with LEN_OVRD). Clear it by hand in this case.
+ */
+ if (subop->instrs[0].type == NAND_OP_DATA_OUT_INSTR) {
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+
+ writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
+ nfc->regs + NDCR);
+ }
+
+ return 0;
+}
+
+static int marvell_nfc_naked_waitrdy_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ return ret;
+}
+
+static int marvell_nfc_read_id_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ nfc_op.ndcb[0] &= ~NDCB0_CMD_TYPE(TYPE_READ);
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ_ID);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while reading ID");
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ if (nfc_op.rdy_timeout_ms) {
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.data_delay_ns);
+
+ return 0;
+}
+
+static int marvell_nfc_read_status_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ nfc_op.ndcb[0] &= ~NDCB0_CMD_TYPE(TYPE_READ);
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_STATUS);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
+ "RDDREQ while reading status");
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ if (nfc_op.rdy_timeout_ms) {
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+ }
+
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.data_delay_ns);
+
+ return 0;
+}
+
+static int marvell_nfc_reset_cmd_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_RESET);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ return 0;
+}
+
+static int marvell_nfc_erase_cmd_type_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct marvell_nfc_op nfc_op;
+ int ret;
+
+ marvell_nfc_parse_instructions(chip, subop, &nfc_op);
+ nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_ERASE);
+
+ ret = marvell_nfc_prepare_cmd(chip);
+ if (ret)
+ return ret;
+
+ marvell_nfc_send_cmd(chip, &nfc_op);
+ ret = marvell_nfc_wait_cmdd(chip);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.cle_ale_delay_ns);
+
+ ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
+ if (ret)
+ return ret;
+
+ cond_delay(nfc_op.rdy_delay_ns);
+
+ return 0;
+}
+
+static const struct nand_op_parser marvell_nfcv2_op_parser = NAND_OP_PARSER(
+ /* Monolithic reads/writes */
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_monolithic_access_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, MAX_ADDRESS_CYC_NFCV2),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_monolithic_access_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV2),
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_CHUNK_SIZE),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
+ /* Naked commands */
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_access_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_access_exec,
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV2)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_access_exec,
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_access_exec,
+ NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_CHUNK_SIZE)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_waitrdy_exec,
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ );
+
+static const struct nand_op_parser marvell_nfcv1_op_parser = NAND_OP_PARSER(
+ /* Naked commands not supported, use a function for each pattern */
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_read_id_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV1),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_erase_cmd_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV1),
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_read_status_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_reset_cmd_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ NAND_OP_PARSER_PATTERN(
+ marvell_nfc_naked_waitrdy_exec,
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ );
+
+static int marvell_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+
+ if (nfc->caps->is_nfcv2)
+ return nand_op_parser_exec_op(chip, &marvell_nfcv2_op_parser,
+ op, check_only);
+ else
+ return nand_op_parser_exec_op(chip, &marvell_nfcv1_op_parser,
+ op, check_only);
+}
+
+/*
+ * Layouts were broken in old pxa3xx_nand driver, these are supposed to be
+ * usable.
+ */
+static int marvell_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = (lt->full_chunk_cnt * lt->ecc_bytes) +
+ lt->last_ecc_bytes;
+ oobregion->offset = mtd->oobsize - oobregion->length;
+
+ return 0;
+}
+
+static int marvell_nand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+
+ if (section)
+ return -ERANGE;
+
+ /*
+ * Bootrom looks in bytes 0 & 5 for bad blocks for the
+ * 4KB page / 4bit BCH combination.
+ */
+ if (mtd->writesize == SZ_4K && lt->data_bytes == SZ_2K)
+ oobregion->offset = 6;
+ else
+ oobregion->offset = 2;
+
+ oobregion->length = (lt->full_chunk_cnt * lt->spare_bytes) +
+ lt->last_spare_bytes - oobregion->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops marvell_nand_ooblayout_ops = {
+ .ecc = marvell_nand_ooblayout_ecc,
+ .free = marvell_nand_ooblayout_free,
+};
+
+static int marvell_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ const struct marvell_hw_ecc_layout *l;
+ int i;
+
+ if (!nfc->caps->is_nfcv2 &&
+ (mtd->writesize + mtd->oobsize > MAX_CHUNK_SIZE)) {
+ dev_err(nfc->dev,
+ "NFCv1: writesize (%d) cannot be bigger than a chunk (%d)\n",
+ mtd->writesize, MAX_CHUNK_SIZE - mtd->oobsize);
+ return -ENOTSUPP;
+ }
+
+ to_marvell_nand(chip)->layout = NULL;
+ for (i = 0; i < ARRAY_SIZE(marvell_nfc_layouts); i++) {
+ l = &marvell_nfc_layouts[i];
+ if (mtd->writesize == l->writesize &&
+ ecc->size == l->chunk && ecc->strength == l->strength) {
+ to_marvell_nand(chip)->layout = l;
+ break;
+ }
+ }
+
+ if (!to_marvell_nand(chip)->layout ||
+ (!nfc->caps->is_nfcv2 && ecc->strength > 1)) {
+ dev_err(nfc->dev,
+ "ECC strength %d at page size %d is not supported\n",
+ ecc->strength, mtd->writesize);
+ return -ENOTSUPP;
+ }
+
+ mtd_set_ooblayout(mtd, &marvell_nand_ooblayout_ops);
+ ecc->steps = l->nchunks;
+ ecc->size = l->data_bytes;
+
+ if (ecc->strength == 1) {
+ chip->ecc.algo = NAND_ECC_HAMMING;
+ ecc->read_page_raw = marvell_nfc_hw_ecc_hmg_read_page_raw;
+ ecc->read_page = marvell_nfc_hw_ecc_hmg_read_page;
+ ecc->read_oob_raw = marvell_nfc_hw_ecc_hmg_read_oob_raw;
+ ecc->read_oob = ecc->read_oob_raw;
+ ecc->write_page_raw = marvell_nfc_hw_ecc_hmg_write_page_raw;
+ ecc->write_page = marvell_nfc_hw_ecc_hmg_write_page;
+ ecc->write_oob_raw = marvell_nfc_hw_ecc_hmg_write_oob_raw;
+ ecc->write_oob = ecc->write_oob_raw;
+ } else {
+ chip->ecc.algo = NAND_ECC_BCH;
+ ecc->strength = 16;
+ ecc->read_page_raw = marvell_nfc_hw_ecc_bch_read_page_raw;
+ ecc->read_page = marvell_nfc_hw_ecc_bch_read_page;
+ ecc->read_oob_raw = marvell_nfc_hw_ecc_bch_read_oob_raw;
+ ecc->read_oob = marvell_nfc_hw_ecc_bch_read_oob;
+ ecc->write_page_raw = marvell_nfc_hw_ecc_bch_write_page_raw;
+ ecc->write_page = marvell_nfc_hw_ecc_bch_write_page;
+ ecc->write_oob_raw = marvell_nfc_hw_ecc_bch_write_oob_raw;
+ ecc->write_oob = marvell_nfc_hw_ecc_bch_write_oob;
+ }
+
+ return 0;
+}
+
+static int marvell_nand_ecc_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ int ret;
+
+ if (ecc->mode != NAND_ECC_NONE && (!ecc->size || !ecc->strength)) {
+ if (chip->ecc_step_ds && chip->ecc_strength_ds) {
+ ecc->size = chip->ecc_step_ds;
+ ecc->strength = chip->ecc_strength_ds;
+ } else {
+ dev_info(nfc->dev,
+ "No minimum ECC strength, using 1b/512B\n");
+ ecc->size = 512;
+ ecc->strength = 1;
+ }
+ }
+
+ switch (ecc->mode) {
+ case NAND_ECC_HW:
+ ret = marvell_nand_hw_ecc_ctrl_init(mtd, ecc);
+ if (ret)
+ return ret;
+ break;
+ case NAND_ECC_NONE:
+ case NAND_ECC_SOFT:
+ if (!nfc->caps->is_nfcv2 && mtd->writesize != SZ_512 &&
+ mtd->writesize != SZ_2K) {
+ dev_err(nfc->dev, "NFCv1 cannot write %d bytes pages\n",
+ mtd->writesize);
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
+static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 8,
+ .len = 6,
+ .veroffs = 14,
+ .maxblocks = 8, /* Last 8 blocks in each chip */
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 8,
+ .len = 6,
+ .veroffs = 14,
+ .maxblocks = 8, /* Last 8 blocks in each chip */
+ .pattern = bbt_mirror_pattern
+};
+
+static int marvell_nfc_setup_data_interface(struct mtd_info *mtd, int chipnr,
+ const struct nand_data_interface
+ *conf)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
+ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ unsigned int period_ns = 1000000000 / clk_get_rate(nfc->ecc_clk) * 2;
+ const struct nand_sdr_timings *sdr;
+ struct marvell_nfc_timings nfc_tmg;
+ int read_delay;
+
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ /*
+ * SDR timings are given in pico-seconds while NFC timings must be
+ * expressed in NAND controller clock cycles, which is half of the
+ * frequency of the accessible ECC clock retrieved by clk_get_rate().
+ * This is not written anywhere in the datasheet but was observed
+ * with an oscilloscope.
+ *
+ * NFC datasheet gives equations from which thoses calculations
+ * are derived, they tend to be slightly more restrictives than the
+ * given core timings and may improve the overall speed.
+ */
+ nfc_tmg.tRP = TO_CYCLES(DIV_ROUND_UP(sdr->tRC_min, 2), period_ns) - 1;
+ nfc_tmg.tRH = nfc_tmg.tRP;
+ nfc_tmg.tWP = TO_CYCLES(DIV_ROUND_UP(sdr->tWC_min, 2), period_ns) - 1;
+ nfc_tmg.tWH = nfc_tmg.tWP;
+ nfc_tmg.tCS = TO_CYCLES(sdr->tCS_min, period_ns);
+ nfc_tmg.tCH = TO_CYCLES(sdr->tCH_min, period_ns) - 1;
+ nfc_tmg.tADL = TO_CYCLES(sdr->tADL_min, period_ns);
+ /*
+ * Read delay is the time of propagation from SoC pins to NFC internal
+ * logic. With non-EDO timings, this is MIN_RD_DEL_CNT clock cycles. In
+ * EDO mode, an additional delay of tRH must be taken into account so
+ * the data is sampled on the falling edge instead of the rising edge.
+ */
+ read_delay = sdr->tRC_min >= 30000 ?
+ MIN_RD_DEL_CNT : MIN_RD_DEL_CNT + nfc_tmg.tRH;
+
+ nfc_tmg.tAR = TO_CYCLES(sdr->tAR_min, period_ns);
+ /*
+ * tWHR and tRHW are supposed to be read to write delays (and vice
+ * versa) but in some cases, ie. when doing a change column, they must
+ * be greater than that to be sure tCCS delay is respected.
+ */
+ nfc_tmg.tWHR = TO_CYCLES(max_t(int, sdr->tWHR_min, sdr->tCCS_min),
+ period_ns) - 2,
+ nfc_tmg.tRHW = TO_CYCLES(max_t(int, sdr->tRHW_min, sdr->tCCS_min),
+ period_ns);
+
+ /* Use WAIT_MODE (wait for RB line) instead of only relying on delays */
+ nfc_tmg.tR = TO_CYCLES(sdr->tWB_max, period_ns);
+
+ if (chipnr < 0)
+ return 0;
+
+ marvell_nand->ndtr0 =
+ NDTR0_TRP(nfc_tmg.tRP) |
+ NDTR0_TRH(nfc_tmg.tRH) |
+ NDTR0_ETRP(nfc_tmg.tRP) |
+ NDTR0_TWP(nfc_tmg.tWP) |
+ NDTR0_TWH(nfc_tmg.tWH) |
+ NDTR0_TCS(nfc_tmg.tCS) |
+ NDTR0_TCH(nfc_tmg.tCH) |
+ NDTR0_RD_CNT_DEL(read_delay) |
+ NDTR0_SELCNTR |
+ NDTR0_TADL(nfc_tmg.tADL);
+
+ marvell_nand->ndtr1 =
+ NDTR1_TAR(nfc_tmg.tAR) |
+ NDTR1_TWHR(nfc_tmg.tWHR) |
+ NDTR1_TRHW(nfc_tmg.tRHW) |
+ NDTR1_WAIT_MODE |
+ NDTR1_TR(nfc_tmg.tR);
+
+ return 0;
+}
+
+static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
+ struct device_node *np)
+{
+ struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(dev);
+ struct marvell_nand_chip *marvell_nand;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ int nsels, ret, i;
+ u32 cs, rb;
+
+ /*
+ * The legacy "num-cs" property indicates the number of CS on the only
+ * chip connected to the controller (legacy bindings does not support
+ * more than one chip). CS are only incremented one by one while the RB
+ * pin is always the #0.
+ *
+ * When not using legacy bindings, a couple of "reg" and "nand-rb"
+ * properties must be filled. For each chip, expressed as a subnode,
+ * "reg" points to the CS lines and "nand-rb" to the RB line.
+ */
+ if (pdata) {
+ nsels = 1;
+ } else if (nfc->caps->legacy_of_bindings &&
+ !of_get_property(np, "num-cs", &nsels)) {
+ dev_err(dev, "missing num-cs property\n");
+ return -EINVAL;
+ } else if (!of_get_property(np, "reg", &nsels)) {
+ dev_err(dev, "missing reg property\n");
+ return -EINVAL;
+ }
+
+ if (!pdata)
+ nsels /= sizeof(u32);
+ if (!nsels) {
+ dev_err(dev, "invalid reg property size\n");
+ return -EINVAL;
+ }
+
+ /* Alloc the nand chip structure */
+ marvell_nand = devm_kzalloc(dev, sizeof(*marvell_nand) +
+ (nsels *
+ sizeof(struct marvell_nand_chip_sel)),
+ GFP_KERNEL);
+ if (!marvell_nand) {
+ dev_err(dev, "could not allocate chip structure\n");
+ return -ENOMEM;
+ }
+
+ marvell_nand->nsels = nsels;
+ marvell_nand->selected_die = -1;
+
+ for (i = 0; i < nsels; i++) {
+ if (pdata || nfc->caps->legacy_of_bindings) {
+ /*
+ * Legacy bindings use the CS lines in natural
+ * order (0, 1, ...)
+ */
+ cs = i;
+ } else {
+ /* Retrieve CS id */
+ ret = of_property_read_u32_index(np, "reg", i, &cs);
+ if (ret) {
+ dev_err(dev, "could not retrieve reg property: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (cs >= nfc->caps->max_cs_nb) {
+ dev_err(dev, "invalid reg value: %u (max CS = %d)\n",
+ cs, nfc->caps->max_cs_nb);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(cs, &nfc->assigned_cs)) {
+ dev_err(dev, "CS %d already assigned\n", cs);
+ return -EINVAL;
+ }
+
+ /*
+ * The cs variable represents the chip select id, which must be
+ * converted in bit fields for NDCB0 and NDCB2 to select the
+ * right chip. Unfortunately, due to a lack of information on
+ * the subject and incoherent documentation, the user should not
+ * use CS1 and CS3 at all as asserting them is not supported in
+ * a reliable way (due to multiplexing inside ADDR5 field).
+ */
+ marvell_nand->sels[i].cs = cs;
+ switch (cs) {
+ case 0:
+ case 2:
+ marvell_nand->sels[i].ndcb0_csel = 0;
+ break;
+ case 1:
+ case 3:
+ marvell_nand->sels[i].ndcb0_csel = NDCB0_CSEL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Retrieve RB id */
+ if (pdata || nfc->caps->legacy_of_bindings) {
+ /* Legacy bindings always use RB #0 */
+ rb = 0;
+ } else {
+ ret = of_property_read_u32_index(np, "nand-rb", i,
+ &rb);
+ if (ret) {
+ dev_err(dev,
+ "could not retrieve RB property: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (rb >= nfc->caps->max_rb_nb) {
+ dev_err(dev, "invalid reg value: %u (max RB = %d)\n",
+ rb, nfc->caps->max_rb_nb);
+ return -EINVAL;
+ }
+
+ marvell_nand->sels[i].rb = rb;
+ }
+
+ chip = &marvell_nand->chip;
+ chip->controller = &nfc->controller;
+ nand_set_flash_node(chip, np);
+
+ chip->exec_op = marvell_nfc_exec_op;
+ chip->select_chip = marvell_nfc_select_chip;
+ if (nfc->caps->is_nfcv2 &&
+ !of_property_read_bool(np, "marvell,nand-keep-config"))
+ chip->setup_data_interface = marvell_nfc_setup_data_interface;
+
+ mtd = nand_to_mtd(chip);
+ mtd->dev.parent = dev;
+
+ /*
+ * Default to HW ECC engine mode. If the nand-ecc-mode property is given
+ * in the DT node, this entry will be overwritten in nand_scan_ident().
+ */
+ chip->ecc.mode = NAND_ECC_HW;
+
+ /*
+ * Save a reference value for timing registers before
+ * ->setup_data_interface() is called.
+ */
+ marvell_nand->ndtr0 = readl_relaxed(nfc->regs + NDTR0);
+ marvell_nand->ndtr1 = readl_relaxed(nfc->regs + NDTR1);
+
+ chip->options |= NAND_BUSWIDTH_AUTO;
+ ret = nand_scan_ident(mtd, marvell_nand->nsels, NULL);
+ if (ret) {
+ dev_err(dev, "could not identify the nand chip\n");
+ return ret;
+ }
+
+ if (pdata && pdata->flash_bbt)
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH) {
+ /*
+ * We'll use a bad block table stored in-flash and don't
+ * allow writing the bad block marker to the flash.
+ */
+ chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+ }
+
+ /* Save the chip-specific fields of NDCR */
+ marvell_nand->ndcr = NDCR_PAGE_SZ(mtd->writesize);
+ if (chip->options & NAND_BUSWIDTH_16)
+ marvell_nand->ndcr |= NDCR_DWIDTH_M | NDCR_DWIDTH_C;
+
+ /*
+ * On small page NANDs, only one cycle is needed to pass the
+ * column address.
+ */
+ if (mtd->writesize <= 512) {
+ marvell_nand->addr_cyc = 1;
+ } else {
+ marvell_nand->addr_cyc = 2;
+ marvell_nand->ndcr |= NDCR_RA_START;
+ }
+
+ /*
+ * Now add the number of cycles needed to pass the row
+ * address.
+ *
+ * Addressing a chip using CS 2 or 3 should also need the third row
+ * cycle but due to inconsistance in the documentation and lack of
+ * hardware to test this situation, this case is not supported.
+ */
+ if (chip->options & NAND_ROW_ADDR_3)
+ marvell_nand->addr_cyc += 3;
+ else
+ marvell_nand->addr_cyc += 2;
+
+ if (pdata) {
+ chip->ecc.size = pdata->ecc_step_size;
+ chip->ecc.strength = pdata->ecc_strength;
+ }
+
+ ret = marvell_nand_ecc_init(mtd, &chip->ecc);
+ if (ret) {
+ dev_err(dev, "ECC init failed: %d\n", ret);
+ return ret;
+ }
+
+ if (chip->ecc.mode == NAND_ECC_HW) {
+ /*
+ * Subpage write not available with hardware ECC, prohibit also
+ * subpage read as in userspace subpage access would still be
+ * allowed and subpage write, if used, would lead to numerous
+ * uncorrectable ECC errors.
+ */
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ }
+
+ if (pdata || nfc->caps->legacy_of_bindings) {
+ /*
+ * We keep the MTD name unchanged to avoid breaking platforms
+ * where the MTD cmdline parser is used and the bootloader
+ * has not been updated to use the new naming scheme.
+ */
+ mtd->name = "pxa3xx_nand-0";
+ } else if (!mtd->name) {
+ /*
+ * If the new bindings are used and the bootloader has not been
+ * updated to pass a new mtdparts parameter on the cmdline, you
+ * should define the following property in your NAND node, ie:
+ *
+ * label = "main-storage";
+ *
+ * This way, mtd->name will be set by the core when
+ * nand_set_flash_node() is called.
+ */
+ mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL,
+ "%s:nand.%d", dev_name(nfc->dev),
+ marvell_nand->sels[0].cs);
+ if (!mtd->name) {
+ dev_err(nfc->dev, "Failed to allocate mtd->name\n");
+ return -ENOMEM;
+ }
+ }
+
+ ret = nand_scan_tail(mtd);
+ if (ret) {
+ dev_err(dev, "nand_scan_tail failed: %d\n", ret);
+ return ret;
+ }
+
+ if (pdata)
+ /* Legacy bindings support only one chip */
+ ret = mtd_device_register(mtd, pdata->parts[0],
+ pdata->nr_parts[0]);
+ else
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ dev_err(dev, "failed to register mtd device: %d\n", ret);
+ nand_release(mtd);
+ return ret;
+ }
+
+ list_add_tail(&marvell_nand->node, &nfc->chips);
+
+ return 0;
+}
+
+static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc)
+{
+ struct device_node *np = dev->of_node;
+ struct device_node *nand_np;
+ int max_cs = nfc->caps->max_cs_nb;
+ int nchips;
+ int ret;
+
+ if (!np)
+ nchips = 1;
+ else
+ nchips = of_get_child_count(np);
+
+ if (nchips > max_cs) {
+ dev_err(dev, "too many NAND chips: %d (max = %d CS)\n", nchips,
+ max_cs);
+ return -EINVAL;
+ }
+
+ /*
+ * Legacy bindings do not use child nodes to exhibit NAND chip
+ * properties and layout. Instead, NAND properties are mixed with the
+ * controller ones, and partitions are defined as direct subnodes of the
+ * NAND controller node.
+ */
+ if (nfc->caps->legacy_of_bindings) {
+ ret = marvell_nand_chip_init(dev, nfc, np);
+ return ret;
+ }
+
+ for_each_child_of_node(np, nand_np) {
+ ret = marvell_nand_chip_init(dev, nfc, nand_np);
+ if (ret) {
+ of_node_put(nand_np);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc)
+{
+ struct marvell_nand_chip *entry, *temp;
+
+ list_for_each_entry_safe(entry, temp, &nfc->chips, node) {
+ nand_release(nand_to_mtd(&entry->chip));
+ list_del(&entry->node);
+ }
+}
+
+static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
+{
+ struct platform_device *pdev = container_of(nfc->dev,
+ struct platform_device,
+ dev);
+ struct dma_slave_config config = {};
+ struct resource *r;
+ dma_cap_mask_t mask;
+ struct pxad_param param;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_PXA_DMA)) {
+ dev_warn(nfc->dev,
+ "DMA not enabled in configuration\n");
+ return -ENOTSUPP;
+ }
+
+ ret = dma_set_mask_and_coherent(nfc->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!r) {
+ dev_err(nfc->dev, "No resource defined for data DMA\n");
+ return -ENXIO;
+ }
+
+ param.drcmr = r->start;
+ param.prio = PXAD_PRIO_LOWEST;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ nfc->dma_chan =
+ dma_request_slave_channel_compat(mask, pxad_filter_fn,
+ &param, nfc->dev,
+ "data");
+ if (!nfc->dma_chan) {
+ dev_err(nfc->dev,
+ "Unable to request data DMA channel\n");
+ return -ENODEV;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r)
+ return -ENXIO;
+
+ config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.src_addr = r->start + NDDB;
+ config.dst_addr = r->start + NDDB;
+ config.src_maxburst = 32;
+ config.dst_maxburst = 32;
+ ret = dmaengine_slave_config(nfc->dma_chan, &config);
+ if (ret < 0) {
+ dev_err(nfc->dev, "Failed to configure DMA channel\n");
+ return ret;
+ }
+
+ /*
+ * DMA must act on length multiple of 32 and this length may be
+ * bigger than the destination buffer. Use this buffer instead
+ * for DMA transfers and then copy the desired amount of data to
+ * the provided buffer.
+ */
+ nfc->dma_buf = kmalloc(MAX_CHUNK_SIZE, GFP_KERNEL | GFP_DMA);
+ if (!nfc->dma_buf)
+ return -ENOMEM;
+
+ nfc->use_dma = true;
+
+ return 0;
+}
+
+static int marvell_nfc_init(struct marvell_nfc *nfc)
+{
+ struct device_node *np = nfc->dev->of_node;
+
+ /*
+ * Some SoCs like A7k/A8k need to enable manually the NAND
+ * controller, gated clocks and reset bits to avoid being bootloader
+ * dependent. This is done through the use of the System Functions
+ * registers.
+ */
+ if (nfc->caps->need_system_controller) {
+ struct regmap *sysctrl_base =
+ syscon_regmap_lookup_by_phandle(np,
+ "marvell,system-controller");
+ u32 reg;
+
+ if (IS_ERR(sysctrl_base))
+ return PTR_ERR(sysctrl_base);
+
+ reg = GENCONF_SOC_DEVICE_MUX_NFC_EN |
+ GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST |
+ GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST |
+ GENCONF_SOC_DEVICE_MUX_NFC_INT_EN;
+ regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg);
+
+ regmap_read(sysctrl_base, GENCONF_CLK_GATING_CTRL, &reg);
+ reg |= GENCONF_CLK_GATING_CTRL_ND_GATE;
+ regmap_write(sysctrl_base, GENCONF_CLK_GATING_CTRL, reg);
+
+ regmap_read(sysctrl_base, GENCONF_ND_CLK_CTRL, &reg);
+ reg |= GENCONF_ND_CLK_CTRL_EN;
+ regmap_write(sysctrl_base, GENCONF_ND_CLK_CTRL, reg);
+ }
+
+ /* Configure the DMA if appropriate */
+ if (!nfc->caps->is_nfcv2)
+ marvell_nfc_init_dma(nfc);
+
+ /*
+ * ECC operations and interruptions are only enabled when specifically
+ * needed. ECC shall not be activated in the early stages (fails probe).
+ * Arbiter flag, even if marked as "reserved", must be set (empirical).
+ * SPARE_EN bit must always be set or ECC bytes will not be at the same
+ * offset in the read page and this will fail the protection.
+ */
+ writel_relaxed(NDCR_ALL_INT | NDCR_ND_ARB_EN | NDCR_SPARE_EN |
+ NDCR_RD_ID_CNT(NFCV1_READID_LEN), nfc->regs + NDCR);
+ writel_relaxed(0xFFFFFFFF, nfc->regs + NDSR);
+ writel_relaxed(0, nfc->regs + NDECCCTRL);
+
+ return 0;
+}
+
+static int marvell_nfc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *r;
+ struct marvell_nfc *nfc;
+ int ret;
+ int irq;
+
+ nfc = devm_kzalloc(&pdev->dev, sizeof(struct marvell_nfc),
+ GFP_KERNEL);
+ if (!nfc)
+ return -ENOMEM;
+
+ nfc->dev = dev;
+ nand_hw_control_init(&nfc->controller);
+ INIT_LIST_HEAD(&nfc->chips);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nfc->regs = devm_ioremap_resource(dev, r);
+ if (IS_ERR(nfc->regs))
+ return PTR_ERR(nfc->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "failed to retrieve irq\n");
+ return irq;
+ }
+
+ nfc->ecc_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(nfc->ecc_clk))
+ return PTR_ERR(nfc->ecc_clk);
+
+ ret = clk_prepare_enable(nfc->ecc_clk);
+ if (ret)
+ return ret;
+
+ marvell_nfc_disable_int(nfc, NDCR_ALL_INT);
+ marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
+ ret = devm_request_irq(dev, irq, marvell_nfc_isr,
+ 0, "marvell-nfc", nfc);
+ if (ret)
+ goto unprepare_clk;
+
+ /* Get NAND controller capabilities */
+ if (pdev->id_entry)
+ nfc->caps = (void *)pdev->id_entry->driver_data;
+ else
+ nfc->caps = of_device_get_match_data(&pdev->dev);
+
+ if (!nfc->caps) {
+ dev_err(dev, "Could not retrieve NFC caps\n");
+ ret = -EINVAL;
+ goto unprepare_clk;
+ }
+
+ /* Init the controller and then probe the chips */
+ ret = marvell_nfc_init(nfc);
+ if (ret)
+ goto unprepare_clk;
+
+ platform_set_drvdata(pdev, nfc);
+
+ ret = marvell_nand_chips_init(dev, nfc);
+ if (ret)
+ goto unprepare_clk;
+
+ return 0;
+
+unprepare_clk:
+ clk_disable_unprepare(nfc->ecc_clk);
+
+ return ret;
+}
+
+static int marvell_nfc_remove(struct platform_device *pdev)
+{
+ struct marvell_nfc *nfc = platform_get_drvdata(pdev);
+
+ marvell_nand_chips_cleanup(nfc);
+
+ if (nfc->use_dma) {
+ dmaengine_terminate_all(nfc->dma_chan);
+ dma_release_channel(nfc->dma_chan);
+ }
+
+ clk_disable_unprepare(nfc->ecc_clk);
+
+ return 0;
+}
+
+static const struct marvell_nfc_caps marvell_armada_8k_nfc_caps = {
+ .max_cs_nb = 4,
+ .max_rb_nb = 2,
+ .need_system_controller = true,
+ .is_nfcv2 = true,
+};
+
+static const struct marvell_nfc_caps marvell_armada370_nfc_caps = {
+ .max_cs_nb = 4,
+ .max_rb_nb = 2,
+ .is_nfcv2 = true,
+};
+
+static const struct marvell_nfc_caps marvell_pxa3xx_nfc_caps = {
+ .max_cs_nb = 2,
+ .max_rb_nb = 1,
+ .use_dma = true,
+};
+
+static const struct marvell_nfc_caps marvell_armada_8k_nfc_legacy_caps = {
+ .max_cs_nb = 4,
+ .max_rb_nb = 2,
+ .need_system_controller = true,
+ .legacy_of_bindings = true,
+ .is_nfcv2 = true,
+};
+
+static const struct marvell_nfc_caps marvell_armada370_nfc_legacy_caps = {
+ .max_cs_nb = 4,
+ .max_rb_nb = 2,
+ .legacy_of_bindings = true,
+ .is_nfcv2 = true,
+};
+
+static const struct marvell_nfc_caps marvell_pxa3xx_nfc_legacy_caps = {
+ .max_cs_nb = 2,
+ .max_rb_nb = 1,
+ .legacy_of_bindings = true,
+ .use_dma = true,
+};
+
+static const struct platform_device_id marvell_nfc_platform_ids[] = {
+ {
+ .name = "pxa3xx-nand",
+ .driver_data = (kernel_ulong_t)&marvell_pxa3xx_nfc_legacy_caps,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, marvell_nfc_platform_ids);
+
+static const struct of_device_id marvell_nfc_of_ids[] = {
+ {
+ .compatible = "marvell,armada-8k-nand-controller",
+ .data = &marvell_armada_8k_nfc_caps,
+ },
+ {
+ .compatible = "marvell,armada370-nand-controller",
+ .data = &marvell_armada370_nfc_caps,
+ },
+ {
+ .compatible = "marvell,pxa3xx-nand-controller",
+ .data = &marvell_pxa3xx_nfc_caps,
+ },
+ /* Support for old/deprecated bindings: */
+ {
+ .compatible = "marvell,armada-8k-nand",
+ .data = &marvell_armada_8k_nfc_legacy_caps,
+ },
+ {
+ .compatible = "marvell,armada370-nand",
+ .data = &marvell_armada370_nfc_legacy_caps,
+ },
+ {
+ .compatible = "marvell,pxa3xx-nand",
+ .data = &marvell_pxa3xx_nfc_legacy_caps,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, marvell_nfc_of_ids);
+
+static struct platform_driver marvell_nfc_driver = {
+ .driver = {
+ .name = "marvell-nfc",
+ .of_match_table = marvell_nfc_of_ids,
+ },
+ .id_table = marvell_nfc_platform_ids,
+ .probe = marvell_nfc_probe,
+ .remove = marvell_nfc_remove,
+};
+module_platform_driver(marvell_nfc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Marvell NAND controller driver");
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
index c51d214d169e..40d86a861a70 100644
--- a/drivers/mtd/nand/mtk_ecc.c
+++ b/drivers/mtd/nand/mtk_ecc.c
@@ -34,34 +34,28 @@
#define ECC_ENCCON (0x00)
#define ECC_ENCCNFG (0x04)
-#define ECC_MODE_SHIFT (5)
#define ECC_MS_SHIFT (16)
#define ECC_ENCDIADDR (0x08)
#define ECC_ENCIDLE (0x0C)
-#define ECC_ENCIRQ_EN (0x80)
-#define ECC_ENCIRQ_STA (0x84)
#define ECC_DECCON (0x100)
#define ECC_DECCNFG (0x104)
#define DEC_EMPTY_EN BIT(31)
#define DEC_CNFG_CORRECT (0x3 << 12)
#define ECC_DECIDLE (0x10C)
#define ECC_DECENUM0 (0x114)
-#define ECC_DECDONE (0x124)
-#define ECC_DECIRQ_EN (0x200)
-#define ECC_DECIRQ_STA (0x204)
#define ECC_TIMEOUT (500000)
#define ECC_IDLE_REG(op) ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE)
#define ECC_CTL_REG(op) ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON)
-#define ECC_IRQ_REG(op) ((op) == ECC_ENCODE ? \
- ECC_ENCIRQ_EN : ECC_DECIRQ_EN)
struct mtk_ecc_caps {
u32 err_mask;
const u8 *ecc_strength;
+ const u32 *ecc_regs;
u8 num_ecc_strength;
- u32 encode_parity_reg0;
+ u8 ecc_mode_shift;
+ u32 parity_bits;
int pg_irq_sel;
};
@@ -89,6 +83,46 @@ static const u8 ecc_strength_mt2712[] = {
40, 44, 48, 52, 56, 60, 68, 72, 80
};
+static const u8 ecc_strength_mt7622[] = {
+ 4, 6, 8, 10, 12, 14, 16
+};
+
+enum mtk_ecc_regs {
+ ECC_ENCPAR00,
+ ECC_ENCIRQ_EN,
+ ECC_ENCIRQ_STA,
+ ECC_DECDONE,
+ ECC_DECIRQ_EN,
+ ECC_DECIRQ_STA,
+};
+
+static int mt2701_ecc_regs[] = {
+ [ECC_ENCPAR00] = 0x10,
+ [ECC_ENCIRQ_EN] = 0x80,
+ [ECC_ENCIRQ_STA] = 0x84,
+ [ECC_DECDONE] = 0x124,
+ [ECC_DECIRQ_EN] = 0x200,
+ [ECC_DECIRQ_STA] = 0x204,
+};
+
+static int mt2712_ecc_regs[] = {
+ [ECC_ENCPAR00] = 0x300,
+ [ECC_ENCIRQ_EN] = 0x80,
+ [ECC_ENCIRQ_STA] = 0x84,
+ [ECC_DECDONE] = 0x124,
+ [ECC_DECIRQ_EN] = 0x200,
+ [ECC_DECIRQ_STA] = 0x204,
+};
+
+static int mt7622_ecc_regs[] = {
+ [ECC_ENCPAR00] = 0x10,
+ [ECC_ENCIRQ_EN] = 0x30,
+ [ECC_ENCIRQ_STA] = 0x34,
+ [ECC_DECDONE] = 0x11c,
+ [ECC_DECIRQ_EN] = 0x140,
+ [ECC_DECIRQ_STA] = 0x144,
+};
+
static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
enum mtk_ecc_operation op)
{
@@ -107,32 +141,30 @@ static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
static irqreturn_t mtk_ecc_irq(int irq, void *id)
{
struct mtk_ecc *ecc = id;
- enum mtk_ecc_operation op;
u32 dec, enc;
- dec = readw(ecc->regs + ECC_DECIRQ_STA) & ECC_IRQ_EN;
+ dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA])
+ & ECC_IRQ_EN;
if (dec) {
- op = ECC_DECODE;
- dec = readw(ecc->regs + ECC_DECDONE);
+ dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]);
if (dec & ecc->sectors) {
/*
* Clear decode IRQ status once again to ensure that
* there will be no extra IRQ.
*/
- readw(ecc->regs + ECC_DECIRQ_STA);
+ readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA]);
ecc->sectors = 0;
complete(&ecc->done);
} else {
return IRQ_HANDLED;
}
} else {
- enc = readl(ecc->regs + ECC_ENCIRQ_STA) & ECC_IRQ_EN;
- if (enc) {
- op = ECC_ENCODE;
+ enc = readl(ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_STA])
+ & ECC_IRQ_EN;
+ if (enc)
complete(&ecc->done);
- } else {
+ else
return IRQ_NONE;
- }
}
return IRQ_HANDLED;
@@ -160,7 +192,7 @@ static int mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
/* configure ECC encoder (in bits) */
enc_sz = config->len << 3;
- reg = ecc_bit | (config->mode << ECC_MODE_SHIFT);
+ reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift);
reg |= (enc_sz << ECC_MS_SHIFT);
writel(reg, ecc->regs + ECC_ENCCNFG);
@@ -171,9 +203,9 @@ static int mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
} else {
/* configure ECC decoder (in bits) */
dec_sz = (config->len << 3) +
- config->strength * ECC_PARITY_BITS;
+ config->strength * ecc->caps->parity_bits;
- reg = ecc_bit | (config->mode << ECC_MODE_SHIFT);
+ reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift);
reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT;
reg |= DEC_EMPTY_EN;
writel(reg, ecc->regs + ECC_DECCNFG);
@@ -291,7 +323,12 @@ int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
*/
if (ecc->caps->pg_irq_sel && config->mode == ECC_NFI_MODE)
reg_val |= ECC_PG_IRQ_SEL;
- writew(reg_val, ecc->regs + ECC_IRQ_REG(op));
+ if (op == ECC_ENCODE)
+ writew(reg_val, ecc->regs +
+ ecc->caps->ecc_regs[ECC_ENCIRQ_EN]);
+ else
+ writew(reg_val, ecc->regs +
+ ecc->caps->ecc_regs[ECC_DECIRQ_EN]);
}
writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op));
@@ -310,13 +347,17 @@ void mtk_ecc_disable(struct mtk_ecc *ecc)
/* disable it */
mtk_ecc_wait_idle(ecc, op);
- if (op == ECC_DECODE)
+ if (op == ECC_DECODE) {
/*
* Clear decode IRQ status in case there is a timeout to wait
* decode IRQ.
*/
- readw(ecc->regs + ECC_DECIRQ_STA);
- writew(0, ecc->regs + ECC_IRQ_REG(op));
+ readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]);
+ writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_EN]);
+ } else {
+ writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_EN]);
+ }
+
writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
mutex_unlock(&ecc->lock);
@@ -367,11 +408,11 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
mtk_ecc_wait_idle(ecc, ECC_ENCODE);
/* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
- len = (config->strength * ECC_PARITY_BITS + 7) >> 3;
+ len = (config->strength * ecc->caps->parity_bits + 7) >> 3;
/* write the parity bytes generated by the ECC back to temp buffer */
__ioread32_copy(ecc->eccdata,
- ecc->regs + ecc->caps->encode_parity_reg0,
+ ecc->regs + ecc->caps->ecc_regs[ECC_ENCPAR00],
round_up(len, 4));
/* copy into possibly unaligned OOB region with actual length */
@@ -404,22 +445,42 @@ void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p)
}
EXPORT_SYMBOL(mtk_ecc_adjust_strength);
+unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc)
+{
+ return ecc->caps->parity_bits;
+}
+EXPORT_SYMBOL(mtk_ecc_get_parity_bits);
+
static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = {
.err_mask = 0x3f,
.ecc_strength = ecc_strength_mt2701,
+ .ecc_regs = mt2701_ecc_regs,
.num_ecc_strength = 20,
- .encode_parity_reg0 = 0x10,
+ .ecc_mode_shift = 5,
+ .parity_bits = 14,
.pg_irq_sel = 0,
};
static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = {
.err_mask = 0x7f,
.ecc_strength = ecc_strength_mt2712,
+ .ecc_regs = mt2712_ecc_regs,
.num_ecc_strength = 23,
- .encode_parity_reg0 = 0x300,
+ .ecc_mode_shift = 5,
+ .parity_bits = 14,
.pg_irq_sel = 1,
};
+static const struct mtk_ecc_caps mtk_ecc_caps_mt7622 = {
+ .err_mask = 0x3f,
+ .ecc_strength = ecc_strength_mt7622,
+ .ecc_regs = mt7622_ecc_regs,
+ .num_ecc_strength = 7,
+ .ecc_mode_shift = 4,
+ .parity_bits = 13,
+ .pg_irq_sel = 0,
+};
+
static const struct of_device_id mtk_ecc_dt_match[] = {
{
.compatible = "mediatek,mt2701-ecc",
@@ -427,6 +488,9 @@ static const struct of_device_id mtk_ecc_dt_match[] = {
}, {
.compatible = "mediatek,mt2712-ecc",
.data = &mtk_ecc_caps_mt2712,
+ }, {
+ .compatible = "mediatek,mt7622-ecc",
+ .data = &mtk_ecc_caps_mt7622,
},
{},
};
@@ -452,7 +516,7 @@ static int mtk_ecc_probe(struct platform_device *pdev)
max_eccdata_size = ecc->caps->num_ecc_strength - 1;
max_eccdata_size = ecc->caps->ecc_strength[max_eccdata_size];
- max_eccdata_size = (max_eccdata_size * ECC_PARITY_BITS + 7) >> 3;
+ max_eccdata_size = (max_eccdata_size * ecc->caps->parity_bits + 7) >> 3;
max_eccdata_size = round_up(max_eccdata_size, 4);
ecc->eccdata = devm_kzalloc(dev, max_eccdata_size, GFP_KERNEL);
if (!ecc->eccdata)
diff --git a/drivers/mtd/nand/mtk_ecc.h b/drivers/mtd/nand/mtk_ecc.h
index d245c14f1b80..a455df080952 100644
--- a/drivers/mtd/nand/mtk_ecc.h
+++ b/drivers/mtd/nand/mtk_ecc.h
@@ -14,8 +14,6 @@
#include <linux/types.h>
-#define ECC_PARITY_BITS (14)
-
enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1};
enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE};
@@ -43,6 +41,7 @@ int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation);
int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *);
void mtk_ecc_disable(struct mtk_ecc *);
void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p);
+unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc);
struct mtk_ecc *of_mtk_ecc_get(struct device_node *);
void mtk_ecc_release(struct mtk_ecc *);
diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c
index d86a7d131cc0..6977da3a26aa 100644
--- a/drivers/mtd/nand/mtk_nand.c
+++ b/drivers/mtd/nand/mtk_nand.c
@@ -97,7 +97,6 @@
#define MTK_TIMEOUT (500000)
#define MTK_RESET_TIMEOUT (1000000)
-#define MTK_MAX_SECTOR (16)
#define MTK_NAND_MAX_NSELS (2)
#define MTK_NFC_MIN_SPARE (16)
#define ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt) \
@@ -109,6 +108,8 @@ struct mtk_nfc_caps {
u8 num_spare_size;
u8 pageformat_spare_shift;
u8 nfi_clk_div;
+ u8 max_sector;
+ u32 max_sector_size;
};
struct mtk_nfc_bad_mark_ctl {
@@ -173,6 +174,10 @@ static const u8 spare_size_mt2712[] = {
74
};
+static const u8 spare_size_mt7622[] = {
+ 16, 26, 27, 28
+};
+
static inline struct mtk_nfc_nand_chip *to_mtk_nand(struct nand_chip *nand)
{
return container_of(nand, struct mtk_nfc_nand_chip, nand);
@@ -450,7 +455,7 @@ static inline u8 mtk_nfc_read_byte(struct mtd_info *mtd)
* set to max sector to allow the HW to continue reading over
* unaligned accesses
*/
- reg = (MTK_MAX_SECTOR << CON_SEC_SHIFT) | CON_BRD;
+ reg = (nfc->caps->max_sector << CON_SEC_SHIFT) | CON_BRD;
nfi_writel(nfc, reg, NFI_CON);
/* trigger to fetch data */
@@ -481,7 +486,7 @@ static void mtk_nfc_write_byte(struct mtd_info *mtd, u8 byte)
reg = nfi_readw(nfc, NFI_CNFG) | CNFG_BYTE_RW;
nfi_writew(nfc, reg, NFI_CNFG);
- reg = MTK_MAX_SECTOR << CON_SEC_SHIFT | CON_BWR;
+ reg = nfc->caps->max_sector << CON_SEC_SHIFT | CON_BWR;
nfi_writel(nfc, reg, NFI_CON);
nfi_writew(nfc, STAR_EN, NFI_STRDATA);
@@ -761,6 +766,8 @@ static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
u32 reg;
int ret;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
if (!raw) {
/* OOB => FDM: from register, ECC: from HW */
reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AUTO_FMT_EN;
@@ -794,7 +801,10 @@ static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
if (!raw)
mtk_ecc_disable(nfc->ecc);
- return ret;
+ if (ret)
+ return ret;
+
+ return nand_prog_page_end_op(chip);
}
static int mtk_nfc_write_page_hwecc(struct mtd_info *mtd,
@@ -832,18 +842,7 @@ static int mtk_nfc_write_subpage_hwecc(struct mtd_info *mtd,
static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- int ret;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
-
- ret = mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page);
- if (ret < 0)
- return -EIO;
-
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- ret = chip->waitfunc(mtd, chip);
-
- return ret & NAND_STATUS_FAIL ? -EIO : 0;
+ return mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page);
}
static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors)
@@ -892,8 +891,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
len = sectors * chip->ecc.size + (raw ? sectors * spare : 0);
buf = bufpoi + start * chip->ecc.size;
- if (column != 0)
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, column, -1);
+ nand_read_page_op(chip, page, column, NULL, 0);
addr = dma_map_single(nfc->dev, buf, len, DMA_FROM_DEVICE);
rc = dma_mapping_error(nfc->dev, addr);
@@ -1016,8 +1014,6 @@ static int mtk_nfc_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
static int mtk_nfc_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
return mtk_nfc_read_page_raw(mtd, chip, NULL, 1, page);
}
@@ -1126,9 +1122,11 @@ static void mtk_nfc_set_fdm(struct mtk_nfc_fdm *fdm, struct mtd_info *mtd)
{
struct nand_chip *nand = mtd_to_nand(mtd);
struct mtk_nfc_nand_chip *chip = to_mtk_nand(nand);
+ struct mtk_nfc *nfc = nand_get_controller_data(nand);
u32 ecc_bytes;
- ecc_bytes = DIV_ROUND_UP(nand->ecc.strength * ECC_PARITY_BITS, 8);
+ ecc_bytes = DIV_ROUND_UP(nand->ecc.strength *
+ mtk_ecc_get_parity_bits(nfc->ecc), 8);
fdm->reg_size = chip->spare_per_sector - ecc_bytes;
if (fdm->reg_size > NFI_FDM_MAX_SIZE)
@@ -1208,7 +1206,8 @@ static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
* this controller only supports 512 and 1024 sizes
*/
if (nand->ecc.size < 1024) {
- if (mtd->writesize > 512) {
+ if (mtd->writesize > 512 &&
+ nfc->caps->max_sector_size > 512) {
nand->ecc.size = 1024;
nand->ecc.strength <<= 1;
} else {
@@ -1223,7 +1222,8 @@ static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
return ret;
/* calculate oob bytes except ecc parity data */
- free = ((nand->ecc.strength * ECC_PARITY_BITS) + 7) >> 3;
+ free = (nand->ecc.strength * mtk_ecc_get_parity_bits(nfc->ecc)
+ + 7) >> 3;
free = spare - free;
/*
@@ -1233,10 +1233,12 @@ static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
*/
if (free > NFI_FDM_MAX_SIZE) {
spare -= NFI_FDM_MAX_SIZE;
- nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS;
+ nand->ecc.strength = (spare << 3) /
+ mtk_ecc_get_parity_bits(nfc->ecc);
} else if (free < 0) {
spare -= NFI_FDM_MIN_SIZE;
- nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS;
+ nand->ecc.strength = (spare << 3) /
+ mtk_ecc_get_parity_bits(nfc->ecc);
}
}
@@ -1389,6 +1391,8 @@ static const struct mtk_nfc_caps mtk_nfc_caps_mt2701 = {
.num_spare_size = 16,
.pageformat_spare_shift = 4,
.nfi_clk_div = 1,
+ .max_sector = 16,
+ .max_sector_size = 1024,
};
static const struct mtk_nfc_caps mtk_nfc_caps_mt2712 = {
@@ -1396,6 +1400,17 @@ static const struct mtk_nfc_caps mtk_nfc_caps_mt2712 = {
.num_spare_size = 19,
.pageformat_spare_shift = 16,
.nfi_clk_div = 2,
+ .max_sector = 16,
+ .max_sector_size = 1024,
+};
+
+static const struct mtk_nfc_caps mtk_nfc_caps_mt7622 = {
+ .spare_size = spare_size_mt7622,
+ .num_spare_size = 4,
+ .pageformat_spare_shift = 4,
+ .nfi_clk_div = 1,
+ .max_sector = 8,
+ .max_sector_size = 512,
};
static const struct of_device_id mtk_nfc_id_table[] = {
@@ -1405,6 +1420,9 @@ static const struct of_device_id mtk_nfc_id_table[] = {
}, {
.compatible = "mediatek,mt2712-nfc",
.data = &mtk_nfc_caps_mt2712,
+ }, {
+ .compatible = "mediatek,mt7622-nfc",
+ .data = &mtk_nfc_caps_mt7622,
},
{}
};
@@ -1540,7 +1558,6 @@ static int mtk_nfc_resume(struct device *dev)
struct mtk_nfc *nfc = dev_get_drvdata(dev);
struct mtk_nfc_nand_chip *chip;
struct nand_chip *nand;
- struct mtd_info *mtd;
int ret;
u32 i;
@@ -1553,11 +1570,8 @@ static int mtk_nfc_resume(struct device *dev)
/* reset NAND chip if VCC was powered off */
list_for_each_entry(chip, &nfc->chips, node) {
nand = &chip->nand;
- mtd = nand_to_mtd(nand);
- for (i = 0; i < chip->nsels; i++) {
- nand->select_chip(mtd, i);
- nand->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
- }
+ for (i = 0; i < chip->nsels; i++)
+ nand_reset(nand, i);
}
return 0;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 6135d007a068..e70ca16a5118 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -561,14 +561,19 @@ static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
static int nand_check_wp(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
+ u8 status;
+ int ret;
/* Broken xD cards report WP despite being writable */
if (chip->options & NAND_BROKEN_XD)
return 0;
/* Check the WP bit */
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
- return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ return status & NAND_STATUS_WP ? 0 : 1;
}
/**
@@ -667,16 +672,83 @@ EXPORT_SYMBOL_GPL(nand_wait_ready);
static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
{
register struct nand_chip *chip = mtd_to_nand(mtd);
+ int ret;
timeo = jiffies + msecs_to_jiffies(timeo);
do {
- if ((chip->read_byte(mtd) & NAND_STATUS_READY))
+ u8 status;
+
+ ret = nand_read_data_op(chip, &status, sizeof(status), true);
+ if (ret)
+ return;
+
+ if (status & NAND_STATUS_READY)
break;
touch_softlockup_watchdog();
} while (time_before(jiffies, timeo));
};
/**
+ * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
+ * @chip: NAND chip structure
+ * @timeout_ms: Timeout in ms
+ *
+ * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
+ * If that does not happen whitin the specified timeout, -ETIMEDOUT is
+ * returned.
+ *
+ * This helper is intended to be used when the controller does not have access
+ * to the NAND R/B pin.
+ *
+ * Be aware that calling this helper from an ->exec_op() implementation means
+ * ->exec_op() must be re-entrant.
+ *
+ * Return 0 if the NAND chip is ready, a negative error otherwise.
+ */
+int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
+{
+ u8 status = 0;
+ int ret;
+
+ if (!chip->exec_op)
+ return -ENOTSUPP;
+
+ ret = nand_status_op(chip, NULL);
+ if (ret)
+ return ret;
+
+ timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
+ do {
+ ret = nand_read_data_op(chip, &status, sizeof(status), true);
+ if (ret)
+ break;
+
+ if (status & NAND_STATUS_READY)
+ break;
+
+ /*
+ * Typical lowest execution time for a tR on most NANDs is 10us,
+ * use this as polling delay before doing something smarter (ie.
+ * deriving a delay from the timeout value, timeout_ms/ratio).
+ */
+ udelay(10);
+ } while (time_before(jiffies, timeout_ms));
+
+ /*
+ * We have to exit READ_STATUS mode in order to read real data on the
+ * bus in case the WAITRDY instruction is preceding a DATA_IN
+ * instruction.
+ */
+ nand_exit_status_op(chip);
+
+ if (ret)
+ return ret;
+
+ return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
+};
+EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
+
+/**
* nand_command - [DEFAULT] Send command to NAND device
* @mtd: MTD device structure
* @command: the command to be sent
@@ -710,7 +782,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
chip->cmd_ctrl(mtd, readcmd, ctrl);
ctrl &= ~NAND_CTRL_CHANGE;
}
- chip->cmd_ctrl(mtd, command, ctrl);
+ if (command != NAND_CMD_NONE)
+ chip->cmd_ctrl(mtd, command, ctrl);
/* Address cycle, when necessary */
ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
@@ -738,6 +811,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
*/
switch (command) {
+ case NAND_CMD_NONE:
case NAND_CMD_PAGEPROG:
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
@@ -802,8 +876,8 @@ static void nand_ccs_delay(struct nand_chip *chip)
* Wait tCCS_min if it is correctly defined, otherwise wait 500ns
* (which should be safe for all NANDs).
*/
- if (chip->data_interface && chip->data_interface->timings.sdr.tCCS_min)
- ndelay(chip->data_interface->timings.sdr.tCCS_min / 1000);
+ if (chip->setup_data_interface)
+ ndelay(chip->data_interface.timings.sdr.tCCS_min / 1000);
else
ndelay(500);
}
@@ -831,7 +905,9 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
}
/* Command latch cycle */
- chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ if (command != NAND_CMD_NONE)
+ chip->cmd_ctrl(mtd, command,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
if (column != -1 || page_addr != -1) {
int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
@@ -866,6 +942,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
*/
switch (command) {
+ case NAND_CMD_NONE:
case NAND_CMD_CACHEDPROG:
case NAND_CMD_PAGEPROG:
case NAND_CMD_ERASE1:
@@ -1014,7 +1091,15 @@ static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
if (chip->dev_ready(mtd))
break;
} else {
- if (chip->read_byte(mtd) & NAND_STATUS_READY)
+ int ret;
+ u8 status;
+
+ ret = nand_read_data_op(chip, &status, sizeof(status),
+ true);
+ if (ret)
+ return;
+
+ if (status & NAND_STATUS_READY)
break;
}
mdelay(1);
@@ -1031,8 +1116,9 @@ static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
{
- int status;
unsigned long timeo = 400;
+ u8 status;
+ int ret;
/*
* Apply this short delay always to ensure that we do wait tWB in any
@@ -1040,7 +1126,9 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
*/
ndelay(100);
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ ret = nand_status_op(chip, NULL);
+ if (ret)
+ return ret;
if (in_interrupt() || oops_in_progress)
panic_nand_wait(mtd, chip, timeo);
@@ -1051,14 +1139,22 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
if (chip->dev_ready(mtd))
break;
} else {
- if (chip->read_byte(mtd) & NAND_STATUS_READY)
+ ret = nand_read_data_op(chip, &status,
+ sizeof(status), true);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_READY)
break;
}
cond_resched();
} while (time_before(jiffies, timeo));
}
- status = (int)chip->read_byte(mtd);
+ ret = nand_read_data_op(chip, &status, sizeof(status), true);
+ if (ret)
+ return ret;
+
/* This can happen if in case of timeout or buggy dev_ready */
WARN_ON(!(status & NAND_STATUS_READY));
return status;
@@ -1076,7 +1172,6 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- const struct nand_data_interface *conf;
int ret;
if (!chip->setup_data_interface)
@@ -1096,8 +1191,8 @@ static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
* timings to timing mode 0.
*/
- conf = nand_get_default_data_interface();
- ret = chip->setup_data_interface(mtd, chipnr, conf);
+ onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
+ ret = chip->setup_data_interface(mtd, chipnr, &chip->data_interface);
if (ret)
pr_err("Failed to configure data interface to SDR timing mode 0\n");
@@ -1122,7 +1217,7 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
- if (!chip->setup_data_interface || !chip->data_interface)
+ if (!chip->setup_data_interface)
return 0;
/*
@@ -1143,7 +1238,7 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
goto err;
}
- ret = chip->setup_data_interface(mtd, chipnr, chip->data_interface);
+ ret = chip->setup_data_interface(mtd, chipnr, &chip->data_interface);
err:
return ret;
}
@@ -1183,21 +1278,19 @@ static int nand_init_data_interface(struct nand_chip *chip)
modes = GENMASK(chip->onfi_timing_mode_default, 0);
}
- chip->data_interface = kzalloc(sizeof(*chip->data_interface),
- GFP_KERNEL);
- if (!chip->data_interface)
- return -ENOMEM;
for (mode = fls(modes) - 1; mode >= 0; mode--) {
- ret = onfi_init_data_interface(chip, chip->data_interface,
- NAND_SDR_IFACE, mode);
+ ret = onfi_fill_data_interface(chip, NAND_SDR_IFACE, mode);
if (ret)
continue;
- /* Pass -1 to only */
+ /*
+ * Pass NAND_DATA_IFACE_CHECK_ONLY to only check if the
+ * controller supports the requested timings.
+ */
ret = chip->setup_data_interface(mtd,
NAND_DATA_IFACE_CHECK_ONLY,
- chip->data_interface);
+ &chip->data_interface);
if (!ret) {
chip->onfi_timing_mode_default = mode;
break;
@@ -1207,21 +1300,1429 @@ static int nand_init_data_interface(struct nand_chip *chip)
return 0;
}
-static void nand_release_data_interface(struct nand_chip *chip)
+/**
+ * nand_fill_column_cycles - fill the column cycles of an address
+ * @chip: The NAND chip
+ * @addrs: Array of address cycles to fill
+ * @offset_in_page: The offset in the page
+ *
+ * Fills the first or the first two bytes of the @addrs field depending
+ * on the NAND bus width and the page size.
+ *
+ * Returns the number of cycles needed to encode the column, or a negative
+ * error code in case one of the arguments is invalid.
+ */
+static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
+ unsigned int offset_in_page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /* Make sure the offset is less than the actual page size. */
+ if (offset_in_page > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ /*
+ * On small page NANDs, there's a dedicated command to access the OOB
+ * area, and the column address is relative to the start of the OOB
+ * area, not the start of the page. Asjust the address accordingly.
+ */
+ if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
+ offset_in_page -= mtd->writesize;
+
+ /*
+ * The offset in page is expressed in bytes, if the NAND bus is 16-bit
+ * wide, then it must be divided by 2.
+ */
+ if (chip->options & NAND_BUSWIDTH_16) {
+ if (WARN_ON(offset_in_page % 2))
+ return -EINVAL;
+
+ offset_in_page /= 2;
+ }
+
+ addrs[0] = offset_in_page;
+
+ /*
+ * Small page NANDs use 1 cycle for the columns, while large page NANDs
+ * need 2
+ */
+ if (mtd->writesize <= 512)
+ return 1;
+
+ addrs[1] = offset_in_page >> 8;
+
+ return 2;
+}
+
+static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ u8 addrs[4];
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ NAND_OP_ADDR(3, addrs, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
+ PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+ int ret;
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ if (offset_in_page >= mtd->writesize)
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
+ else if (offset_in_page >= 256 &&
+ !(chip->options & NAND_BUSWIDTH_16))
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ addrs[1] = page;
+ addrs[2] = page >> 8;
+
+ if (chip->options & NAND_ROW_ADDR_3) {
+ addrs[3] = page >> 16;
+ instrs[1].ctx.addr.naddrs++;
+ }
+
+ return nand_exec_op(chip, &op);
+}
+
+static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len)
+{
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ u8 addrs[5];
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ NAND_OP_ADDR(4, addrs, 0),
+ NAND_OP_CMD(NAND_CMD_READSTART, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
+ PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+ int ret;
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ addrs[2] = page;
+ addrs[3] = page >> 8;
+
+ if (chip->options & NAND_ROW_ADDR_3) {
+ addrs[4] = page >> 16;
+ instrs[1].ctx.addr.naddrs++;
+ }
+
+ return nand_exec_op(chip, &op);
+}
+
+/**
+ * nand_read_page_op - Do a READ PAGE operation
+ * @chip: The NAND chip
+ * @page: page to read
+ * @offset_in_page: offset within the page
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ *
+ * This function issues a READ PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf, unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ if (chip->exec_op) {
+ if (mtd->writesize > 512)
+ return nand_lp_exec_read_page_op(chip, page,
+ offset_in_page, buf,
+ len);
+
+ return nand_sp_exec_read_page_op(chip, page, offset_in_page,
+ buf, len);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_READ0, offset_in_page, page);
+ if (len)
+ chip->read_buf(mtd, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_read_page_op);
+
+/**
+ * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
+ * @chip: The NAND chip
+ * @page: parameter page to read
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ *
+ * This function issues a READ PARAMETER PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
+ unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int i;
+ u8 *p = buf;
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_PARAM, 0),
+ NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
+ PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_8BIT_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_PARAM, page, -1);
+ for (i = 0; i < len; i++)
+ p[i] = chip->read_byte(mtd);
+
+ return 0;
+}
+
+/**
+ * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
+ * @chip: The NAND chip
+ * @offset_in_page: offset within the page
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function issues a CHANGE READ COLUMN operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_change_read_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ /* Small page NANDs do not support column change. */
+ if (mtd->writesize <= 512)
+ return -ENOTSUPP;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ u8 addrs[2] = {};
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
+ NAND_OP_ADDR(2, addrs, 0),
+ NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
+ PSEC_TO_NSEC(sdr->tCCS_min)),
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+ int ret;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ instrs[3].ctx.data.force_8bit = force_8bit;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset_in_page, -1);
+ if (len)
+ chip->read_buf(mtd, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_change_read_column_op);
+
+/**
+ * nand_read_oob_op - Do a READ OOB operation
+ * @chip: The NAND chip
+ * @page: page to read
+ * @offset_in_oob: offset within the OOB area
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ *
+ * This function issues a READ OOB operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_oob, void *buf, unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_oob + len > mtd->oobsize)
+ return -EINVAL;
+
+ if (chip->exec_op)
+ return nand_read_page_op(chip, page,
+ mtd->writesize + offset_in_oob,
+ buf, len);
+
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, offset_in_oob, page);
+ if (len)
+ chip->read_buf(mtd, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_read_oob_op);
+
+static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len, bool prog)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ u8 addrs[5] = {};
+ struct nand_op_instr instrs[] = {
+ /*
+ * The first instruction will be dropped if we're dealing
+ * with a large page NAND and adjusted if we're dealing
+ * with a small page NAND and the page offset is > 255.
+ */
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ NAND_OP_CMD(NAND_CMD_SEQIN, 0),
+ NAND_OP_ADDR(0, addrs, PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_DATA_OUT(len, buf, 0),
+ NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+ int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ int ret;
+ u8 status;
+
+ if (naddrs < 0)
+ return naddrs;
+
+ addrs[naddrs++] = page;
+ addrs[naddrs++] = page >> 8;
+ if (chip->options & NAND_ROW_ADDR_3)
+ addrs[naddrs++] = page >> 16;
+
+ instrs[2].ctx.addr.naddrs = naddrs;
+
+ /* Drop the last two instructions if we're not programming the page. */
+ if (!prog) {
+ op.ninstrs -= 2;
+ /* Also drop the DATA_OUT instruction if empty. */
+ if (!len)
+ op.ninstrs--;
+ }
+
+ if (mtd->writesize <= 512) {
+ /*
+ * Small pages need some more tweaking: we have to adjust the
+ * first instruction depending on the page offset we're trying
+ * to access.
+ */
+ if (offset_in_page >= mtd->writesize)
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
+ else if (offset_in_page >= 256 &&
+ !(chip->options & NAND_BUSWIDTH_16))
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
+ } else {
+ /*
+ * Drop the first command if we're dealing with a large page
+ * NAND.
+ */
+ op.instrs++;
+ op.ninstrs--;
+ }
+
+ ret = nand_exec_op(chip, &op);
+ if (!prog || ret)
+ return ret;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ return status;
+}
+
+/**
+ * nand_prog_page_begin_op - starts a PROG PAGE operation
+ * @chip: The NAND chip
+ * @page: page to write
+ * @offset_in_page: offset within the page
+ * @buf: buffer containing the data to write to the page
+ * @len: length of the buffer
+ *
+ * This function issues the first half of a PROG PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ if (chip->exec_op)
+ return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
+ len, false);
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, offset_in_page, page);
+
+ if (buf)
+ chip->write_buf(mtd, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
+
+/**
+ * nand_prog_page_end_op - ends a PROG PAGE operation
+ * @chip: The NAND chip
+ *
+ * This function issues the second half of a PROG PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_prog_page_end_op(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+ u8 status;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_PAGEPROG,
+ PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ ret = nand_exec_op(chip, &op);
+ if (ret)
+ return ret;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ ret = chip->waitfunc(mtd, chip);
+ if (ret < 0)
+ return ret;
+
+ status = ret;
+ }
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
+
+/**
+ * nand_prog_page_op - Do a full PROG PAGE operation
+ * @chip: The NAND chip
+ * @page: page to write
+ * @offset_in_page: offset within the page
+ * @buf: buffer containing the data to write to the page
+ * @len: length of the buffer
+ *
+ * This function issues a full PROG PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len)
{
- kfree(chip->data_interface);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int status;
+
+ if (!len || !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ if (chip->exec_op) {
+ status = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
+ len, true);
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, offset_in_page, page);
+ chip->write_buf(mtd, buf, len);
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+ }
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
}
+EXPORT_SYMBOL_GPL(nand_prog_page_op);
+
+/**
+ * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
+ * @chip: The NAND chip
+ * @offset_in_page: offset within the page
+ * @buf: buffer containing the data to send to the NAND
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function issues a CHANGE WRITE COLUMN operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_change_write_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page,
+ const void *buf, unsigned int len,
+ bool force_8bit)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ /* Small page NANDs do not support column change. */
+ if (mtd->writesize <= 512)
+ return -ENOTSUPP;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ u8 addrs[2];
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_RNDIN, 0),
+ NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)),
+ NAND_OP_DATA_OUT(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+ int ret;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ instrs[2].ctx.data.force_8bit = force_8bit;
+
+ /* Drop the DATA_OUT instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset_in_page, -1);
+ if (len)
+ chip->write_buf(mtd, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_change_write_column_op);
+
+/**
+ * nand_readid_op - Do a READID operation
+ * @chip: The NAND chip
+ * @addr: address cycle to pass after the READID command
+ * @buf: buffer used to store the ID
+ * @len: length of the buffer
+ *
+ * This function sends a READID command and reads back the ID returned by the
+ * NAND.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
+ unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int i;
+ u8 *id = buf;
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READID, 0),
+ NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_8BIT_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_READID, addr, -1);
+
+ for (i = 0; i < len; i++)
+ id[i] = chip->read_byte(mtd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_readid_op);
+
+/**
+ * nand_status_op - Do a STATUS operation
+ * @chip: The NAND chip
+ * @status: out variable to store the NAND status
+ *
+ * This function sends a STATUS command and reads back the status returned by
+ * the NAND.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_status_op(struct nand_chip *chip, u8 *status)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_STATUS,
+ PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_8BIT_DATA_IN(1, status, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ if (!status)
+ op.ninstrs--;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ if (status)
+ *status = chip->read_byte(mtd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_status_op);
+
+/**
+ * nand_exit_status_op - Exit a STATUS operation
+ * @chip: The NAND chip
+ *
+ * This function sends a READ0 command to cancel the effect of the STATUS
+ * command to avoid reading only the status until a new read command is sent.
+ *
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_exit_status_op(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (chip->exec_op) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_READ0, -1, -1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_exit_status_op);
+
+/**
+ * nand_erase_op - Do an erase operation
+ * @chip: The NAND chip
+ * @eraseblock: block to erase
+ *
+ * This function sends an ERASE command and waits for the NAND to be ready
+ * before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int page = eraseblock <<
+ (chip->phys_erase_shift - chip->page_shift);
+ int ret;
+ u8 status;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ u8 addrs[3] = { page, page >> 8, page >> 16 };
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_ERASE1, 0),
+ NAND_OP_ADDR(2, addrs, 0),
+ NAND_OP_CMD(NAND_CMD_ERASE2,
+ PSEC_TO_MSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ if (chip->options & NAND_ROW_ADDR_3)
+ instrs[1].ctx.addr.naddrs++;
+
+ ret = nand_exec_op(chip, &op);
+ if (ret)
+ return ret;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+
+ ret = chip->waitfunc(mtd, chip);
+ if (ret < 0)
+ return ret;
+
+ status = ret;
+ }
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_erase_op);
+
+/**
+ * nand_set_features_op - Do a SET FEATURES operation
+ * @chip: The NAND chip
+ * @feature: feature id
+ * @data: 4 bytes of data
+ *
+ * This function sends a SET FEATURES command and waits for the NAND to be
+ * ready before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int nand_set_features_op(struct nand_chip *chip, u8 feature,
+ const void *data)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const u8 *params = data;
+ int i, ret;
+ u8 status;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
+ NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
+ PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ ret = nand_exec_op(chip, &op);
+ if (ret)
+ return ret;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, feature, -1);
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ chip->write_byte(mtd, params[i]);
+
+ ret = chip->waitfunc(mtd, chip);
+ if (ret < 0)
+ return ret;
+
+ status = ret;
+ }
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * nand_get_features_op - Do a GET FEATURES operation
+ * @chip: The NAND chip
+ * @feature: feature id
+ * @data: 4 bytes of data
+ *
+ * This function sends a GET FEATURES command and waits for the NAND to be
+ * ready before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int nand_get_features_op(struct nand_chip *chip, u8 feature,
+ void *data)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *params = data;
+ int i;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
+ NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max),
+ PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
+ data, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, feature, -1);
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ params[i] = chip->read_byte(mtd);
+
+ return 0;
+}
+
+/**
+ * nand_reset_op - Do a reset operation
+ * @chip: The NAND chip
+ *
+ * This function sends a RESET command and waits for the NAND to be ready
+ * before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_reset_op(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(&chip->data_interface);
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_reset_op);
+
+/**
+ * nand_read_data_op - Read data from the NAND
+ * @chip: The NAND chip
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function does a raw data read on the bus. Usually used after launching
+ * another NAND operation like nand_read_page_op().
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
+ bool force_8bit)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (!len || !buf)
+ return -EINVAL;
+
+ if (chip->exec_op) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ instrs[0].ctx.data.force_8bit = force_8bit;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ if (force_8bit) {
+ u8 *p = buf;
+ unsigned int i;
+
+ for (i = 0; i < len; i++)
+ p[i] = chip->read_byte(mtd);
+ } else {
+ chip->read_buf(mtd, buf, len);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_read_data_op);
+
+/**
+ * nand_write_data_op - Write data from the NAND
+ * @chip: The NAND chip
+ * @buf: buffer containing the data to send on the bus
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function does a raw data write on the bus. Usually used after launching
+ * another NAND operation like nand_write_page_begin_op().
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_write_data_op(struct nand_chip *chip, const void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (!len || !buf)
+ return -EINVAL;
+
+ if (chip->exec_op) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_DATA_OUT(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ instrs[0].ctx.data.force_8bit = force_8bit;
+
+ return nand_exec_op(chip, &op);
+ }
+
+ if (force_8bit) {
+ const u8 *p = buf;
+ unsigned int i;
+
+ for (i = 0; i < len; i++)
+ chip->write_byte(mtd, p[i]);
+ } else {
+ chip->write_buf(mtd, buf, len);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_write_data_op);
+
+/**
+ * struct nand_op_parser_ctx - Context used by the parser
+ * @instrs: array of all the instructions that must be addressed
+ * @ninstrs: length of the @instrs array
+ * @subop: Sub-operation to be passed to the NAND controller
+ *
+ * This structure is used by the core to split NAND operations into
+ * sub-operations that can be handled by the NAND controller.
+ */
+struct nand_op_parser_ctx {
+ const struct nand_op_instr *instrs;
+ unsigned int ninstrs;
+ struct nand_subop subop;
+};
+
+/**
+ * nand_op_parser_must_split_instr - Checks if an instruction must be split
+ * @pat: the parser pattern element that matches @instr
+ * @instr: pointer to the instruction to check
+ * @start_offset: this is an in/out parameter. If @instr has already been
+ * split, then @start_offset is the offset from which to start
+ * (either an address cycle or an offset in the data buffer).
+ * Conversely, if the function returns true (ie. instr must be
+ * split), this parameter is updated to point to the first
+ * data/address cycle that has not been taken care of.
+ *
+ * Some NAND controllers are limited and cannot send X address cycles with a
+ * unique operation, or cannot read/write more than Y bytes at the same time.
+ * In this case, split the instruction that does not fit in a single
+ * controller-operation into two or more chunks.
+ *
+ * Returns true if the instruction must be split, false otherwise.
+ * The @start_offset parameter is also updated to the offset at which the next
+ * bundle of instruction must start (if an address or a data instruction).
+ */
+static bool
+nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
+ const struct nand_op_instr *instr,
+ unsigned int *start_offset)
+{
+ switch (pat->type) {
+ case NAND_OP_ADDR_INSTR:
+ if (!pat->ctx.addr.maxcycles)
+ break;
+
+ if (instr->ctx.addr.naddrs - *start_offset >
+ pat->ctx.addr.maxcycles) {
+ *start_offset += pat->ctx.addr.maxcycles;
+ return true;
+ }
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_DATA_OUT_INSTR:
+ if (!pat->ctx.data.maxlen)
+ break;
+
+ if (instr->ctx.data.len - *start_offset >
+ pat->ctx.data.maxlen) {
+ *start_offset += pat->ctx.data.maxlen;
+ return true;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/**
+ * nand_op_parser_match_pat - Checks if a pattern matches the instructions
+ * remaining in the parser context
+ * @pat: the pattern to test
+ * @ctx: the parser context structure to match with the pattern @pat
+ *
+ * Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
+ * Returns true if this is the case, false ortherwise. When true is returned,
+ * @ctx->subop is updated with the set of instructions to be passed to the
+ * controller driver.
+ */
+static bool
+nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
+ struct nand_op_parser_ctx *ctx)
+{
+ unsigned int instr_offset = ctx->subop.first_instr_start_off;
+ const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
+ const struct nand_op_instr *instr = ctx->subop.instrs;
+ unsigned int i, ninstrs;
+
+ for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
+ /*
+ * The pattern instruction does not match the operation
+ * instruction. If the instruction is marked optional in the
+ * pattern definition, we skip the pattern element and continue
+ * to the next one. If the element is mandatory, there's no
+ * match and we can return false directly.
+ */
+ if (instr->type != pat->elems[i].type) {
+ if (!pat->elems[i].optional)
+ return false;
+
+ continue;
+ }
+
+ /*
+ * Now check the pattern element constraints. If the pattern is
+ * not able to handle the whole instruction in a single step,
+ * we have to split it.
+ * The last_instr_end_off value comes back updated to point to
+ * the position where we have to split the instruction (the
+ * start of the next subop chunk).
+ */
+ if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
+ &instr_offset)) {
+ ninstrs++;
+ i++;
+ break;
+ }
+
+ instr++;
+ ninstrs++;
+ instr_offset = 0;
+ }
+
+ /*
+ * This can happen if all instructions of a pattern are optional.
+ * Still, if there's not at least one instruction handled by this
+ * pattern, this is not a match, and we should try the next one (if
+ * any).
+ */
+ if (!ninstrs)
+ return false;
+
+ /*
+ * We had a match on the pattern head, but the pattern may be longer
+ * than the instructions we're asked to execute. We need to make sure
+ * there's no mandatory elements in the pattern tail.
+ */
+ for (; i < pat->nelems; i++) {
+ if (!pat->elems[i].optional)
+ return false;
+ }
+
+ /*
+ * We have a match: update the subop structure accordingly and return
+ * true.
+ */
+ ctx->subop.ninstrs = ninstrs;
+ ctx->subop.last_instr_end_off = instr_offset;
+
+ return true;
+}
+
+#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
+static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
+{
+ const struct nand_op_instr *instr;
+ char *prefix = " ";
+ unsigned int i;
+
+ pr_debug("executing subop:\n");
+
+ for (i = 0; i < ctx->ninstrs; i++) {
+ instr = &ctx->instrs[i];
+
+ if (instr == &ctx->subop.instrs[0])
+ prefix = " ->";
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ pr_debug("%sCMD [0x%02x]\n", prefix,
+ instr->ctx.cmd.opcode);
+ break;
+ case NAND_OP_ADDR_INSTR:
+ pr_debug("%sADDR [%d cyc: %*ph]\n", prefix,
+ instr->ctx.addr.naddrs,
+ instr->ctx.addr.naddrs < 64 ?
+ instr->ctx.addr.naddrs : 64,
+ instr->ctx.addr.addrs);
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ pr_debug("%sDATA_IN [%d B%s]\n", prefix,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit ?
+ ", force 8-bit" : "");
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ pr_debug("%sDATA_OUT [%d B%s]\n", prefix,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit ?
+ ", force 8-bit" : "");
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ pr_debug("%sWAITRDY [max %d ms]\n", prefix,
+ instr->ctx.waitrdy.timeout_ms);
+ break;
+ }
+
+ if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
+ prefix = " ";
+ }
+}
+#else
+static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
+{
+ /* NOP */
+}
+#endif
+
+/**
+ * nand_op_parser_exec_op - exec_op parser
+ * @chip: the NAND chip
+ * @parser: patterns description provided by the controller driver
+ * @op: the NAND operation to address
+ * @check_only: when true, the function only checks if @op can be handled but
+ * does not execute the operation
+ *
+ * Helper function designed to ease integration of NAND controller drivers that
+ * only support a limited set of instruction sequences. The supported sequences
+ * are described in @parser, and the framework takes care of splitting @op into
+ * multiple sub-operations (if required) and pass them back to the ->exec()
+ * callback of the matching pattern if @check_only is set to false.
+ *
+ * NAND controller drivers should call this function from their own ->exec_op()
+ * implementation.
+ *
+ * Returns 0 on success, a negative error code otherwise. A failure can be
+ * caused by an unsupported operation (none of the supported patterns is able
+ * to handle the requested operation), or an error returned by one of the
+ * matching pattern->exec() hook.
+ */
+int nand_op_parser_exec_op(struct nand_chip *chip,
+ const struct nand_op_parser *parser,
+ const struct nand_operation *op, bool check_only)
+{
+ struct nand_op_parser_ctx ctx = {
+ .subop.instrs = op->instrs,
+ .instrs = op->instrs,
+ .ninstrs = op->ninstrs,
+ };
+ unsigned int i;
+
+ while (ctx.subop.instrs < op->instrs + op->ninstrs) {
+ int ret;
+
+ for (i = 0; i < parser->npatterns; i++) {
+ const struct nand_op_parser_pattern *pattern;
+
+ pattern = &parser->patterns[i];
+ if (!nand_op_parser_match_pat(pattern, &ctx))
+ continue;
+
+ nand_op_parser_trace(&ctx);
+
+ if (check_only)
+ break;
+
+ ret = pattern->exec(chip, &ctx.subop);
+ if (ret)
+ return ret;
+
+ break;
+ }
+
+ if (i == parser->npatterns) {
+ pr_debug("->exec_op() parser: pattern not found!\n");
+ return -ENOTSUPP;
+ }
+
+ /*
+ * Update the context structure by pointing to the start of the
+ * next subop.
+ */
+ ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
+ if (ctx.subop.last_instr_end_off)
+ ctx.subop.instrs -= 1;
+
+ ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
+
+static bool nand_instr_is_data(const struct nand_op_instr *instr)
+{
+ return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
+ instr->type == NAND_OP_DATA_OUT_INSTR);
+}
+
+static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ return subop && instr_idx < subop->ninstrs;
+}
+
+static int nand_subop_get_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ if (instr_idx)
+ return 0;
+
+ return subop->first_instr_start_off;
+}
+
+/**
+ * nand_subop_get_addr_start_off - Get the start offset in an address array
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
+ *
+ * During driver development, one could be tempted to directly use the
+ * ->addr.addrs field of address instructions. This is wrong as address
+ * instructions might be split.
+ *
+ * Given an address instruction, returns the offset of the first cycle to issue.
+ */
+int nand_subop_get_addr_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ if (!nand_subop_instr_is_valid(subop, instr_idx) ||
+ subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)
+ return -EINVAL;
+
+ return nand_subop_get_start_off(subop, instr_idx);
+}
+EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
+
+/**
+ * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
+ *
+ * During driver development, one could be tempted to directly use the
+ * ->addr->naddrs field of a data instruction. This is wrong as instructions
+ * might be split.
+ *
+ * Given an address instruction, returns the number of address cycle to issue.
+ */
+int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ int start_off, end_off;
+
+ if (!nand_subop_instr_is_valid(subop, instr_idx) ||
+ subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)
+ return -EINVAL;
+
+ start_off = nand_subop_get_addr_start_off(subop, instr_idx);
+
+ if (instr_idx == subop->ninstrs - 1 &&
+ subop->last_instr_end_off)
+ end_off = subop->last_instr_end_off;
+ else
+ end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
+
+ return end_off - start_off;
+}
+EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
+
+/**
+ * nand_subop_get_data_start_off - Get the start offset in a data array
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
+ *
+ * During driver development, one could be tempted to directly use the
+ * ->data->buf.{in,out} field of data instructions. This is wrong as data
+ * instructions might be split.
+ *
+ * Given a data instruction, returns the offset to start from.
+ */
+int nand_subop_get_data_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ if (!nand_subop_instr_is_valid(subop, instr_idx) ||
+ !nand_instr_is_data(&subop->instrs[instr_idx]))
+ return -EINVAL;
+
+ return nand_subop_get_start_off(subop, instr_idx);
+}
+EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
+
+/**
+ * nand_subop_get_data_len - Get the number of bytes to retrieve
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
+ *
+ * During driver development, one could be tempted to directly use the
+ * ->data->len field of a data instruction. This is wrong as data instructions
+ * might be split.
+ *
+ * Returns the length of the chunk of data to send/receive.
+ */
+int nand_subop_get_data_len(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ int start_off = 0, end_off;
+
+ if (!nand_subop_instr_is_valid(subop, instr_idx) ||
+ !nand_instr_is_data(&subop->instrs[instr_idx]))
+ return -EINVAL;
+
+ start_off = nand_subop_get_data_start_off(subop, instr_idx);
+
+ if (instr_idx == subop->ninstrs - 1 &&
+ subop->last_instr_end_off)
+ end_off = subop->last_instr_end_off;
+ else
+ end_off = subop->instrs[instr_idx].ctx.data.len;
+
+ return end_off - start_off;
+}
+EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
/**
* nand_reset - Reset and initialize a NAND device
* @chip: The NAND chip
* @chipnr: Internal die id
*
- * Returns 0 for success or negative error code otherwise
+ * Save the timings data structure, then apply SDR timings mode 0 (see
+ * nand_reset_data_interface for details), do the reset operation, and
+ * apply back the previous timings.
+ *
+ * Returns 0 on success, a negative error code otherwise.
*/
int nand_reset(struct nand_chip *chip, int chipnr)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_data_interface saved_data_intf = chip->data_interface;
int ret;
ret = nand_reset_data_interface(chip, chipnr);
@@ -1233,10 +2734,13 @@ int nand_reset(struct nand_chip *chip, int chipnr)
* interface settings, hence this weird ->select_chip() dance.
*/
chip->select_chip(mtd, chipnr);
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ ret = nand_reset_op(chip);
chip->select_chip(mtd, -1);
+ if (ret)
+ return ret;
chip->select_chip(mtd, chipnr);
+ chip->data_interface = saved_data_intf;
ret = nand_setup_data_interface(chip, chipnr);
chip->select_chip(mtd, -1);
if (ret)
@@ -1390,9 +2894,19 @@ EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
- chip->read_buf(mtd, buf, mtd->writesize);
- if (oob_required)
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ int ret;
+
+ ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
+ if (ret)
+ return ret;
+
+ if (oob_required) {
+ ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
EXPORT_SYMBOL(nand_read_page_raw);
@@ -1414,29 +2928,50 @@ static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
uint8_t *oob = chip->oob_poi;
- int steps, size;
+ int steps, size, ret;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
for (steps = chip->ecc.steps; steps > 0; steps--) {
- chip->read_buf(mtd, buf, eccsize);
+ ret = nand_read_data_op(chip, buf, eccsize, false);
+ if (ret)
+ return ret;
+
buf += eccsize;
if (chip->ecc.prepad) {
- chip->read_buf(mtd, oob, chip->ecc.prepad);
+ ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.prepad;
}
- chip->read_buf(mtd, oob, eccbytes);
+ ret = nand_read_data_op(chip, oob, eccbytes, false);
+ if (ret)
+ return ret;
+
oob += eccbytes;
if (chip->ecc.postpad) {
- chip->read_buf(mtd, oob, chip->ecc.postpad);
+ ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.postpad;
}
}
size = mtd->oobsize - (oob - chip->oob_poi);
- if (size)
- chip->read_buf(mtd, oob, size);
+ if (size) {
+ ret = nand_read_data_op(chip, oob, size, false);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -1456,8 +2991,8 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- uint8_t *ecc_code = chip->buffers->ecccode;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
unsigned int max_bitflips = 0;
chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
@@ -1521,15 +3056,14 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
data_col_addr = start_step * chip->ecc.size;
/* If we read not a page aligned data */
- if (data_col_addr != 0)
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1);
-
p = bufpoi + data_col_addr;
- chip->read_buf(mtd, p, datafrag_len);
+ ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
+ if (ret)
+ return ret;
/* Calculate ECC */
for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
- chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
+ chip->ecc.calculate(mtd, p, &chip->ecc.calc_buf[i]);
/*
* The performance is faster if we position offsets according to
@@ -1543,8 +3077,11 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
gaps = 1;
if (gaps) {
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
} else {
/*
* Send the command to read the particular ECC bytes take care
@@ -1558,12 +3095,15 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
(busw - 1))
aligned_len++;
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
- mtd->writesize + aligned_pos, -1);
- chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
+ ret = nand_change_read_column_op(chip,
+ mtd->writesize + aligned_pos,
+ &chip->oob_poi[aligned_pos],
+ aligned_len, false);
+ if (ret)
+ return ret;
}
- ret = mtd_ooblayout_get_eccbytes(mtd, chip->buffers->ecccode,
+ ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
chip->oob_poi, index, eccfrag_len);
if (ret)
return ret;
@@ -1572,13 +3112,13 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
int stat;
- stat = chip->ecc.correct(mtd, p,
- &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
+ stat = chip->ecc.correct(mtd, p, &chip->ecc.code_buf[i],
+ &chip->ecc.calc_buf[i]);
if (stat == -EBADMSG &&
(chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
/* check for empty pages with bitflips */
stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
- &chip->buffers->ecccode[i],
+ &chip->ecc.code_buf[i],
chip->ecc.bytes,
NULL, 0,
chip->ecc.strength);
@@ -1611,16 +3151,27 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- uint8_t *ecc_code = chip->buffers->ecccode;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
unsigned int max_bitflips = 0;
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
chip->ecc.hwctl(mtd, NAND_ECC_READ);
- chip->read_buf(mtd, p, eccsize);
+
+ ret = nand_read_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
+
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
}
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+ if (ret)
+ return ret;
ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
chip->ecc.total);
@@ -1674,14 +3225,18 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
- uint8_t *ecc_code = chip->buffers->ecccode;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_code = chip->ecc.code_buf;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
unsigned int max_bitflips = 0;
/* Read the OOB area first */
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+ if (ret)
+ return ret;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
chip->ecc.total);
@@ -1692,7 +3247,11 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
int stat;
chip->ecc.hwctl(mtd, NAND_ECC_READ);
- chip->read_buf(mtd, p, eccsize);
+
+ ret = nand_read_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
+
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
@@ -1729,7 +3288,7 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
- int i, eccsize = chip->ecc.size;
+ int ret, i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
@@ -1737,25 +3296,44 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *oob = chip->oob_poi;
unsigned int max_bitflips = 0;
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
int stat;
chip->ecc.hwctl(mtd, NAND_ECC_READ);
- chip->read_buf(mtd, p, eccsize);
+
+ ret = nand_read_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
if (chip->ecc.prepad) {
- chip->read_buf(mtd, oob, chip->ecc.prepad);
+ ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.prepad;
}
chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
- chip->read_buf(mtd, oob, eccbytes);
+
+ ret = nand_read_data_op(chip, oob, eccbytes, false);
+ if (ret)
+ return ret;
+
stat = chip->ecc.correct(mtd, p, oob, NULL);
oob += eccbytes;
if (chip->ecc.postpad) {
- chip->read_buf(mtd, oob, chip->ecc.postpad);
+ ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.postpad;
}
@@ -1779,8 +3357,11 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
/* Calculate remaining oob bytes */
i = mtd->oobsize - (oob - chip->oob_poi);
- if (i)
- chip->read_buf(mtd, oob, i);
+ if (i) {
+ ret = nand_read_data_op(chip, oob, i, false);
+ if (ret)
+ return ret;
+ }
return max_bitflips;
}
@@ -1894,16 +3475,13 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
/* Is the current page in the buffer? */
if (realpage != chip->pagebuf || oob) {
- bufpoi = use_bufpoi ? chip->buffers->databuf : buf;
+ bufpoi = use_bufpoi ? chip->data_buf : buf;
if (use_bufpoi && aligned)
pr_debug("%s: using read bounce buffer for buf@%p\n",
__func__, buf);
read_retry:
- if (nand_standard_page_accessors(&chip->ecc))
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
-
/*
* Now read the page into the buffer. Absent an error,
* the read methods return max bitflips per ecc step.
@@ -1938,7 +3516,7 @@ read_retry:
/* Invalidate page cache */
chip->pagebuf = -1;
}
- memcpy(buf, chip->buffers->databuf + col, bytes);
+ memcpy(buf, chip->data_buf + col, bytes);
}
if (unlikely(oob)) {
@@ -1979,7 +3557,7 @@ read_retry:
buf += bytes;
max_bitflips = max_t(unsigned int, max_bitflips, ret);
} else {
- memcpy(buf, chip->buffers->databuf + col, bytes);
+ memcpy(buf, chip->data_buf + col, bytes);
buf += bytes;
max_bitflips = max_t(unsigned int, max_bitflips,
chip->pagebuf_bitflips);
@@ -2027,33 +3605,6 @@ read_retry:
}
/**
- * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
- * @mtd: MTD device structure
- * @from: offset to read from
- * @len: number of bytes to read
- * @retlen: pointer to variable to store the number of read bytes
- * @buf: the databuffer to put data
- *
- * Get hold of the chip and call nand_do_read.
- */
-static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, uint8_t *buf)
-{
- struct mtd_oob_ops ops;
- int ret;
-
- nand_get_device(mtd, FL_READING);
- memset(&ops, 0, sizeof(ops));
- ops.len = len;
- ops.datbuf = buf;
- ops.mode = MTD_OPS_PLACE_OOB;
- ret = nand_do_read_ops(mtd, from, &ops);
- *retlen = ops.retlen;
- nand_release_device(mtd);
- return ret;
-}
-
-/**
* nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
* @mtd: mtd info structure
* @chip: nand chip info structure
@@ -2061,9 +3612,7 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
*/
int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
}
EXPORT_SYMBOL(nand_read_oob_std);
@@ -2081,25 +3630,43 @@ int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
int eccsize = chip->ecc.size;
uint8_t *bufpoi = chip->oob_poi;
- int i, toread, sndrnd = 0, pos;
+ int i, toread, sndrnd = 0, pos, ret;
+
+ ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
+ if (ret)
+ return ret;
- chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
for (i = 0; i < chip->ecc.steps; i++) {
if (sndrnd) {
+ int ret;
+
pos = eccsize + i * (eccsize + chunk);
if (mtd->writesize > 512)
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, pos, -1);
+ ret = nand_change_read_column_op(chip, pos,
+ NULL, 0,
+ false);
else
- chip->cmdfunc(mtd, NAND_CMD_READ0, pos, page);
+ ret = nand_read_page_op(chip, page, pos, NULL,
+ 0);
+
+ if (ret)
+ return ret;
} else
sndrnd = 1;
toread = min_t(int, length, chunk);
- chip->read_buf(mtd, bufpoi, toread);
+
+ ret = nand_read_data_op(chip, bufpoi, toread, false);
+ if (ret)
+ return ret;
+
bufpoi += toread;
length -= toread;
}
- if (length > 0)
- chip->read_buf(mtd, bufpoi, length);
+ if (length > 0) {
+ ret = nand_read_data_op(chip, bufpoi, length, false);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -2113,18 +3680,8 @@ EXPORT_SYMBOL(nand_read_oob_syndrome);
*/
int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
{
- int status = 0;
- const uint8_t *buf = chip->oob_poi;
- int length = mtd->oobsize;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
- chip->write_buf(mtd, buf, length);
- /* Send command to program the OOB data */
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
+ mtd->oobsize);
}
EXPORT_SYMBOL(nand_write_oob_std);
@@ -2140,7 +3697,7 @@ int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
{
int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
int eccsize = chip->ecc.size, length = mtd->oobsize;
- int i, len, pos, status = 0, sndcmd = 0, steps = chip->ecc.steps;
+ int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
const uint8_t *bufpoi = chip->oob_poi;
/*
@@ -2154,7 +3711,10 @@ int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
} else
pos = eccsize;
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
+ ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
+ if (ret)
+ return ret;
+
for (i = 0; i < steps; i++) {
if (sndcmd) {
if (mtd->writesize <= 512) {
@@ -2163,28 +3723,40 @@ int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
len = eccsize;
while (len > 0) {
int num = min_t(int, len, 4);
- chip->write_buf(mtd, (uint8_t *)&fill,
- num);
+
+ ret = nand_write_data_op(chip, &fill,
+ num, false);
+ if (ret)
+ return ret;
+
len -= num;
}
} else {
pos = eccsize + i * (eccsize + chunk);
- chip->cmdfunc(mtd, NAND_CMD_RNDIN, pos, -1);
+ ret = nand_change_write_column_op(chip, pos,
+ NULL, 0,
+ false);
+ if (ret)
+ return ret;
}
} else
sndcmd = 1;
len = min_t(int, length, chunk);
- chip->write_buf(mtd, bufpoi, len);
+
+ ret = nand_write_data_op(chip, bufpoi, len, false);
+ if (ret)
+ return ret;
+
bufpoi += len;
length -= len;
}
- if (length > 0)
- chip->write_buf(mtd, bufpoi, length);
-
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
+ if (length > 0) {
+ ret = nand_write_data_op(chip, bufpoi, length, false);
+ if (ret)
+ return ret;
+ }
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_end_op(chip);
}
EXPORT_SYMBOL(nand_write_oob_syndrome);
@@ -2199,6 +3771,7 @@ EXPORT_SYMBOL(nand_write_oob_syndrome);
static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
+ unsigned int max_bitflips = 0;
int page, realpage, chipnr;
struct nand_chip *chip = mtd_to_nand(mtd);
struct mtd_ecc_stats stats;
@@ -2214,21 +3787,6 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
len = mtd_oobavail(mtd, ops);
- if (unlikely(ops->ooboffs >= len)) {
- pr_debug("%s: attempt to start read outside oob\n",
- __func__);
- return -EINVAL;
- }
-
- /* Do not allow reads past end of device */
- if (unlikely(from >= mtd->size ||
- ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
- (from >> chip->page_shift)) * len)) {
- pr_debug("%s: attempt to read beyond end of device\n",
- __func__);
- return -EINVAL;
- }
-
chipnr = (int)(from >> chip->chip_shift);
chip->select_chip(mtd, chipnr);
@@ -2256,6 +3814,8 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
nand_wait_ready(mtd);
}
+ max_bitflips = max_t(unsigned int, max_bitflips, ret);
+
readlen -= len;
if (!readlen)
break;
@@ -2281,7 +3841,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
if (mtd->ecc_stats.failed - stats.failed)
return -EBADMSG;
- return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+ return max_bitflips;
}
/**
@@ -2299,13 +3859,6 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
ops->retlen = 0;
- /* Do not allow reads past end of device */
- if (ops->datbuf && (from + ops->len) > mtd->size) {
- pr_debug("%s: attempt to read beyond end of device\n",
- __func__);
- return -EINVAL;
- }
-
if (ops->mode != MTD_OPS_PLACE_OOB &&
ops->mode != MTD_OPS_AUTO_OOB &&
ops->mode != MTD_OPS_RAW)
@@ -2336,11 +3889,20 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required, int page)
{
- chip->write_buf(mtd, buf, mtd->writesize);
- if (oob_required)
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ int ret;
- return 0;
+ ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
+ if (ret)
+ return ret;
+
+ if (oob_required) {
+ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ return nand_prog_page_end_op(chip);
}
EXPORT_SYMBOL(nand_write_page_raw);
@@ -2362,31 +3924,52 @@ static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
uint8_t *oob = chip->oob_poi;
- int steps, size;
+ int steps, size, ret;
+
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
for (steps = chip->ecc.steps; steps > 0; steps--) {
- chip->write_buf(mtd, buf, eccsize);
+ ret = nand_write_data_op(chip, buf, eccsize, false);
+ if (ret)
+ return ret;
+
buf += eccsize;
if (chip->ecc.prepad) {
- chip->write_buf(mtd, oob, chip->ecc.prepad);
+ ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.prepad;
}
- chip->write_buf(mtd, oob, eccbytes);
+ ret = nand_write_data_op(chip, oob, eccbytes, false);
+ if (ret)
+ return ret;
+
oob += eccbytes;
if (chip->ecc.postpad) {
- chip->write_buf(mtd, oob, chip->ecc.postpad);
+ ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.postpad;
}
}
size = mtd->oobsize - (oob - chip->oob_poi);
- if (size)
- chip->write_buf(mtd, oob, size);
+ if (size) {
+ ret = nand_write_data_op(chip, oob, size, false);
+ if (ret)
+ return ret;
+ }
- return 0;
+ return nand_prog_page_end_op(chip);
}
/**
* nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
@@ -2403,7 +3986,7 @@ static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
int i, eccsize = chip->ecc.size, ret;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
const uint8_t *p = buf;
/* Software ECC calculation */
@@ -2433,12 +4016,20 @@ static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
int i, eccsize = chip->ecc.size, ret;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
const uint8_t *p = buf;
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
- chip->write_buf(mtd, p, eccsize);
+
+ ret = nand_write_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
+
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
}
@@ -2447,9 +4038,11 @@ static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
if (ret)
return ret;
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+ if (ret)
+ return ret;
- return 0;
+ return nand_prog_page_end_op(chip);
}
@@ -2469,7 +4062,7 @@ static int nand_write_subpage_hwecc(struct mtd_info *mtd,
int oob_required, int page)
{
uint8_t *oob_buf = chip->oob_poi;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
int ecc_steps = chip->ecc.steps;
@@ -2478,12 +4071,18 @@ static int nand_write_subpage_hwecc(struct mtd_info *mtd,
int oob_bytes = mtd->oobsize / ecc_steps;
int step, ret;
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
for (step = 0; step < ecc_steps; step++) {
/* configure controller for WRITE access */
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
/* write data (untouched subpages already masked by 0xFF) */
- chip->write_buf(mtd, buf, ecc_size);
+ ret = nand_write_data_op(chip, buf, ecc_size, false);
+ if (ret)
+ return ret;
/* mask ECC of un-touched subpages by padding 0xFF */
if ((step < start_step) || (step > end_step))
@@ -2503,16 +4102,18 @@ static int nand_write_subpage_hwecc(struct mtd_info *mtd,
/* copy calculated ECC for whole page to chip->buffer->oob */
/* this include masked-value(0xFF) for unwritten subpages */
- ecc_calc = chip->buffers->ecccalc;
+ ecc_calc = chip->ecc.calc_buf;
ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
chip->ecc.total);
if (ret)
return ret;
/* write OOB buffer to NAND device */
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+ if (ret)
+ return ret;
- return 0;
+ return nand_prog_page_end_op(chip);
}
@@ -2537,33 +4138,55 @@ static int nand_write_page_syndrome(struct mtd_info *mtd,
int eccsteps = chip->ecc.steps;
const uint8_t *p = buf;
uint8_t *oob = chip->oob_poi;
+ int ret;
- for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
- chip->write_buf(mtd, p, eccsize);
+
+ ret = nand_write_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
if (chip->ecc.prepad) {
- chip->write_buf(mtd, oob, chip->ecc.prepad);
+ ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.prepad;
}
chip->ecc.calculate(mtd, p, oob);
- chip->write_buf(mtd, oob, eccbytes);
+
+ ret = nand_write_data_op(chip, oob, eccbytes, false);
+ if (ret)
+ return ret;
+
oob += eccbytes;
if (chip->ecc.postpad) {
- chip->write_buf(mtd, oob, chip->ecc.postpad);
+ ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
+ false);
+ if (ret)
+ return ret;
+
oob += chip->ecc.postpad;
}
}
/* Calculate remaining oob bytes */
i = mtd->oobsize - (oob - chip->oob_poi);
- if (i)
- chip->write_buf(mtd, oob, i);
+ if (i) {
+ ret = nand_write_data_op(chip, oob, i, false);
+ if (ret)
+ return ret;
+ }
- return 0;
+ return nand_prog_page_end_op(chip);
}
/**
@@ -2589,9 +4212,6 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
else
subpage = 0;
- if (nand_standard_page_accessors(&chip->ecc))
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
-
if (unlikely(raw))
status = chip->ecc.write_page_raw(mtd, chip, buf,
oob_required, page);
@@ -2605,14 +4225,6 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
if (status < 0)
return status;
- if (nand_standard_page_accessors(&chip->ecc)) {
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
- return -EIO;
- }
-
return 0;
}
@@ -2737,9 +4349,9 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
if (part_pagewr)
bytes = min_t(int, bytes - column, writelen);
chip->pagebuf = -1;
- memset(chip->buffers->databuf, 0xff, mtd->writesize);
- memcpy(&chip->buffers->databuf[column], buf, bytes);
- wbuf = chip->buffers->databuf;
+ memset(chip->data_buf, 0xff, mtd->writesize);
+ memcpy(&chip->data_buf[column], buf, bytes);
+ wbuf = chip->data_buf;
}
if (unlikely(oob)) {
@@ -2822,33 +4434,6 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
}
/**
- * nand_write - [MTD Interface] NAND write with ECC
- * @mtd: MTD device structure
- * @to: offset to write to
- * @len: number of bytes to write
- * @retlen: pointer to variable to store the number of written bytes
- * @buf: the data to write
- *
- * NAND write with ECC.
- */
-static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const uint8_t *buf)
-{
- struct mtd_oob_ops ops;
- int ret;
-
- nand_get_device(mtd, FL_WRITING);
- memset(&ops, 0, sizeof(ops));
- ops.len = len;
- ops.datbuf = (uint8_t *)buf;
- ops.mode = MTD_OPS_PLACE_OOB;
- ret = nand_do_write_ops(mtd, to, &ops);
- *retlen = ops.retlen;
- nand_release_device(mtd);
- return ret;
-}
-
-/**
* nand_do_write_oob - [MTD Interface] NAND write out-of-band
* @mtd: MTD device structure
* @to: offset to write to
@@ -2874,22 +4459,6 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
return -EINVAL;
}
- if (unlikely(ops->ooboffs >= len)) {
- pr_debug("%s: attempt to start write outside oob\n",
- __func__);
- return -EINVAL;
- }
-
- /* Do not allow write past end of device */
- if (unlikely(to >= mtd->size ||
- ops->ooboffs + ops->ooblen >
- ((mtd->size >> chip->page_shift) -
- (to >> chip->page_shift)) * len)) {
- pr_debug("%s: attempt to write beyond end of device\n",
- __func__);
- return -EINVAL;
- }
-
chipnr = (int)(to >> chip->chip_shift);
/*
@@ -2945,13 +4514,6 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to,
ops->retlen = 0;
- /* Do not allow writes past end of device */
- if (ops->datbuf && (to + ops->len) > mtd->size) {
- pr_debug("%s: attempt to write beyond end of device\n",
- __func__);
- return -EINVAL;
- }
-
nand_get_device(mtd, FL_WRITING);
switch (ops->mode) {
@@ -2984,11 +4546,12 @@ out:
static int single_erase(struct mtd_info *mtd, int page)
{
struct nand_chip *chip = mtd_to_nand(mtd);
+ unsigned int eraseblock;
+
/* Send commands to erase a block */
- chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
- chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+ eraseblock = page >> (chip->phys_erase_shift - chip->page_shift);
- return chip->waitfunc(mtd, chip);
+ return nand_erase_op(chip, eraseblock);
}
/**
@@ -3072,7 +4635,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
status = chip->erase(mtd, page & chip->pagemask);
/* See if block erase succeeded */
- if (status & NAND_STATUS_FAIL) {
+ if (status) {
pr_debug("%s: failed erase, page 0x%08x\n",
__func__, page);
instr->state = MTD_ERASE_FAILED;
@@ -3215,22 +4778,12 @@ static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
int addr, uint8_t *subfeature_param)
{
- int status;
- int i;
-
if (!chip->onfi_version ||
!(le16_to_cpu(chip->onfi_params.opt_cmd)
& ONFI_OPT_CMD_SET_GET_FEATURES))
return -EINVAL;
- chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
- for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
- chip->write_byte(mtd, subfeature_param[i]);
-
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
- return -EIO;
- return 0;
+ return nand_set_features_op(chip, addr, subfeature_param);
}
/**
@@ -3243,17 +4796,12 @@ static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
int addr, uint8_t *subfeature_param)
{
- int i;
-
if (!chip->onfi_version ||
!(le16_to_cpu(chip->onfi_params.opt_cmd)
& ONFI_OPT_CMD_SET_GET_FEATURES))
return -EINVAL;
- chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
- for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
- *subfeature_param++ = chip->read_byte(mtd);
- return 0;
+ return nand_get_features_op(chip, addr, subfeature_param);
}
/**
@@ -3319,7 +4867,7 @@ static void nand_set_defaults(struct nand_chip *chip)
chip->chip_delay = 20;
/* check, if a user supplied command function given */
- if (chip->cmdfunc == NULL)
+ if (!chip->cmdfunc && !chip->exec_op)
chip->cmdfunc = nand_command;
/* check, if a user supplied wait function given */
@@ -3396,12 +4944,11 @@ static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
struct nand_onfi_params *p)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
struct onfi_ext_param_page *ep;
struct onfi_ext_section *s;
struct onfi_ext_ecc_info *ecc;
uint8_t *cursor;
- int ret = -EINVAL;
+ int ret;
int len;
int i;
@@ -3411,14 +4958,18 @@ static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
return -ENOMEM;
/* Send our own NAND_CMD_PARAM. */
- chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
+ ret = nand_read_param_page_op(chip, 0, NULL, 0);
+ if (ret)
+ goto ext_out;
/* Use the Change Read Column command to skip the ONFI param pages. */
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
- sizeof(*p) * p->num_of_param_pages , -1);
+ ret = nand_change_read_column_op(chip,
+ sizeof(*p) * p->num_of_param_pages,
+ ep, len, true);
+ if (ret)
+ goto ext_out;
- /* Read out the Extended Parameter Page. */
- chip->read_buf(mtd, (uint8_t *)ep, len);
+ ret = -EINVAL;
if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
!= le16_to_cpu(ep->crc))) {
pr_debug("fail in the CRC.\n");
@@ -3471,19 +5022,23 @@ static int nand_flash_detect_onfi(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_onfi_params *p = &chip->onfi_params;
- int i, j;
- int val;
+ char id[4];
+ int i, ret, val;
/* Try ONFI for unknown chip or LP */
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
- if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
- chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
+ ret = nand_readid_op(chip, 0x20, id, sizeof(id));
+ if (ret || strncmp(id, "ONFI", 4))
+ return 0;
+
+ ret = nand_read_param_page_op(chip, 0, NULL, 0);
+ if (ret)
return 0;
- chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
for (i = 0; i < 3; i++) {
- for (j = 0; j < sizeof(*p); j++)
- ((uint8_t *)p)[j] = chip->read_byte(mtd);
+ ret = nand_read_data_op(chip, p, sizeof(*p), true);
+ if (ret)
+ return 0;
+
if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
le16_to_cpu(p->crc)) {
break;
@@ -3574,20 +5129,22 @@ static int nand_flash_detect_jedec(struct nand_chip *chip)
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_jedec_params *p = &chip->jedec_params;
struct jedec_ecc_info *ecc;
- int val;
- int i, j;
+ char id[5];
+ int i, val, ret;
/* Try JEDEC for unknown chip or LP */
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1);
- if (chip->read_byte(mtd) != 'J' || chip->read_byte(mtd) != 'E' ||
- chip->read_byte(mtd) != 'D' || chip->read_byte(mtd) != 'E' ||
- chip->read_byte(mtd) != 'C')
+ ret = nand_readid_op(chip, 0x40, id, sizeof(id));
+ if (ret || strncmp(id, "JEDEC", sizeof(id)))
+ return 0;
+
+ ret = nand_read_param_page_op(chip, 0x40, NULL, 0);
+ if (ret)
return 0;
- chip->cmdfunc(mtd, NAND_CMD_PARAM, 0x40, -1);
for (i = 0; i < 3; i++) {
- for (j = 0; j < sizeof(*p); j++)
- ((uint8_t *)p)[j] = chip->read_byte(mtd);
+ ret = nand_read_data_op(chip, p, sizeof(*p), true);
+ if (ret)
+ return 0;
if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
le16_to_cpu(p->crc))
@@ -3866,8 +5423,7 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
{
const struct nand_manufacturer *manufacturer;
struct mtd_info *mtd = nand_to_mtd(chip);
- int busw;
- int i;
+ int busw, ret;
u8 *id_data = chip->id.data;
u8 maf_id, dev_id;
@@ -3875,17 +5431,21 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
* Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
* after power-up.
*/
- nand_reset(chip, 0);
+ ret = nand_reset(chip, 0);
+ if (ret)
+ return ret;
/* Select the device */
chip->select_chip(mtd, 0);
/* Send the command for reading device ID */
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+ ret = nand_readid_op(chip, 0, id_data, 2);
+ if (ret)
+ return ret;
/* Read manufacturer and device IDs */
- maf_id = chip->read_byte(mtd);
- dev_id = chip->read_byte(mtd);
+ maf_id = id_data[0];
+ dev_id = id_data[1];
/*
* Try again to make sure, as some systems the bus-hold or other
@@ -3894,11 +5454,10 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
* not match, ignore the device completely.
*/
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
-
/* Read entire ID string */
- for (i = 0; i < ARRAY_SIZE(chip->id.data); i++)
- id_data[i] = chip->read_byte(mtd);
+ ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
+ if (ret)
+ return ret;
if (id_data[0] != maf_id || id_data[1] != dev_id) {
pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
@@ -4190,6 +5749,9 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
struct nand_chip *chip = mtd_to_nand(mtd);
int ret;
+ /* Enforce the right timings for reset/detection */
+ onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
+
ret = nand_dt_init(chip);
if (ret)
return ret;
@@ -4197,15 +5759,21 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
if (!mtd->name && mtd->dev.parent)
mtd->name = dev_name(mtd->dev.parent);
- if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) {
+ /*
+ * ->cmdfunc() is legacy and will only be used if ->exec_op() is not
+ * populated.
+ */
+ if (!chip->exec_op) {
/*
- * Default functions assigned for chip_select() and
- * cmdfunc() both expect cmd_ctrl() to be populated,
- * so we need to check that that's the case
+ * Default functions assigned for ->cmdfunc() and
+ * ->select_chip() both expect ->cmd_ctrl() to be populated.
*/
- pr_err("chip.cmd_ctrl() callback is not provided");
- return -EINVAL;
+ if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) {
+ pr_err("->cmd_ctrl() should be provided\n");
+ return -EINVAL;
+ }
}
+
/* Set the default functions */
nand_set_defaults(chip);
@@ -4225,15 +5793,16 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
/* Check for a chip array */
for (i = 1; i < maxchips; i++) {
+ u8 id[2];
+
/* See comment in nand_get_flash_type for reset */
nand_reset(chip, i);
chip->select_chip(mtd, i);
/* Send the command for reading device ID */
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+ nand_readid_op(chip, 0, id, sizeof(id));
/* Read manufacturer and device IDs */
- if (nand_maf_id != chip->read_byte(mtd) ||
- nand_dev_id != chip->read_byte(mtd)) {
+ if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
chip->select_chip(mtd, -1);
break;
}
@@ -4600,26 +6169,6 @@ static bool nand_ecc_strength_good(struct mtd_info *mtd)
return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
}
-static bool invalid_ecc_page_accessors(struct nand_chip *chip)
-{
- struct nand_ecc_ctrl *ecc = &chip->ecc;
-
- if (nand_standard_page_accessors(ecc))
- return false;
-
- /*
- * NAND_ECC_CUSTOM_PAGE_ACCESS flag is set, make sure the NAND
- * controller driver implements all the page accessors because
- * default helpers are not suitable when the core does not
- * send the READ0/PAGEPROG commands.
- */
- return (!ecc->read_page || !ecc->write_page ||
- !ecc->read_page_raw || !ecc->write_page_raw ||
- (NAND_HAS_SUBPAGE_READ(chip) && !ecc->read_subpage) ||
- (NAND_HAS_SUBPAGE_WRITE(chip) && !ecc->write_subpage &&
- ecc->hwctl && ecc->calculate));
-}
-
/**
* nand_scan_tail - [NAND Interface] Scan for the NAND device
* @mtd: MTD device structure
@@ -4632,7 +6181,6 @@ int nand_scan_tail(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct nand_ecc_ctrl *ecc = &chip->ecc;
- struct nand_buffers *nbuf = NULL;
int ret, i;
/* New bad blocks should be marked in OOB, flash-based BBT, or both */
@@ -4641,39 +6189,9 @@ int nand_scan_tail(struct mtd_info *mtd)
return -EINVAL;
}
- if (invalid_ecc_page_accessors(chip)) {
- pr_err("Invalid ECC page accessors setup\n");
- return -EINVAL;
- }
-
- if (!(chip->options & NAND_OWN_BUFFERS)) {
- nbuf = kzalloc(sizeof(*nbuf), GFP_KERNEL);
- if (!nbuf)
- return -ENOMEM;
-
- nbuf->ecccalc = kmalloc(mtd->oobsize, GFP_KERNEL);
- if (!nbuf->ecccalc) {
- ret = -ENOMEM;
- goto err_free_nbuf;
- }
-
- nbuf->ecccode = kmalloc(mtd->oobsize, GFP_KERNEL);
- if (!nbuf->ecccode) {
- ret = -ENOMEM;
- goto err_free_nbuf;
- }
-
- nbuf->databuf = kmalloc(mtd->writesize + mtd->oobsize,
- GFP_KERNEL);
- if (!nbuf->databuf) {
- ret = -ENOMEM;
- goto err_free_nbuf;
- }
-
- chip->buffers = nbuf;
- } else if (!chip->buffers) {
+ chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
+ if (!chip->data_buf)
return -ENOMEM;
- }
/*
* FIXME: some NAND manufacturer drivers expect the first die to be
@@ -4685,10 +6203,10 @@ int nand_scan_tail(struct mtd_info *mtd)
ret = nand_manufacturer_init(chip);
chip->select_chip(mtd, -1);
if (ret)
- goto err_free_nbuf;
+ goto err_free_buf;
/* Set the internal oob buffer location, just after the page data */
- chip->oob_poi = chip->buffers->databuf + mtd->writesize;
+ chip->oob_poi = chip->data_buf + mtd->writesize;
/*
* If no default placement scheme is given, select an appropriate one.
@@ -4836,6 +6354,15 @@ int nand_scan_tail(struct mtd_info *mtd)
goto err_nand_manuf_cleanup;
}
+ if (ecc->correct || ecc->calculate) {
+ ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
+ ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
+ if (!ecc->calc_buf || !ecc->code_buf) {
+ ret = -ENOMEM;
+ goto err_nand_manuf_cleanup;
+ }
+ }
+
/* For many systems, the standard OOB write also works for raw */
if (!ecc->read_oob_raw)
ecc->read_oob_raw = ecc->read_oob;
@@ -4917,8 +6444,6 @@ int nand_scan_tail(struct mtd_info *mtd)
mtd->_erase = nand_erase;
mtd->_point = NULL;
mtd->_unpoint = NULL;
- mtd->_read = nand_read;
- mtd->_write = nand_write;
mtd->_panic_write = panic_nand_write;
mtd->_read_oob = nand_read_oob;
mtd->_write_oob = nand_write_oob;
@@ -4954,7 +6479,7 @@ int nand_scan_tail(struct mtd_info *mtd)
chip->select_chip(mtd, -1);
if (ret)
- goto err_nand_data_iface_cleanup;
+ goto err_nand_manuf_cleanup;
}
/* Check, if we should skip the bad block table scan */
@@ -4964,23 +6489,18 @@ int nand_scan_tail(struct mtd_info *mtd)
/* Build bad block table */
ret = chip->scan_bbt(mtd);
if (ret)
- goto err_nand_data_iface_cleanup;
+ goto err_nand_manuf_cleanup;
return 0;
-err_nand_data_iface_cleanup:
- nand_release_data_interface(chip);
err_nand_manuf_cleanup:
nand_manufacturer_cleanup(chip);
-err_free_nbuf:
- if (nbuf) {
- kfree(nbuf->databuf);
- kfree(nbuf->ecccode);
- kfree(nbuf->ecccalc);
- kfree(nbuf);
- }
+err_free_buf:
+ kfree(chip->data_buf);
+ kfree(ecc->code_buf);
+ kfree(ecc->calc_buf);
return ret;
}
@@ -5028,16 +6548,11 @@ void nand_cleanup(struct nand_chip *chip)
chip->ecc.algo == NAND_ECC_BCH)
nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
- nand_release_data_interface(chip);
-
/* Free bad block table memory */
kfree(chip->bbt);
- if (!(chip->options & NAND_OWN_BUFFERS) && chip->buffers) {
- kfree(chip->buffers->databuf);
- kfree(chip->buffers->ecccode);
- kfree(chip->buffers->ecccalc);
- kfree(chip->buffers);
- }
+ kfree(chip->data_buf);
+ kfree(chip->ecc.code_buf);
+ kfree(chip->ecc.calc_buf);
/* Free bad block descriptor memory */
if (chip->badblock_pattern && chip->badblock_pattern->options
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 2915b6739bf8..36092850be2c 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -898,7 +898,7 @@ static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *b
{
struct nand_chip *this = mtd_to_nand(mtd);
- return create_bbt(mtd, this->buffers->databuf, bd, -1);
+ return create_bbt(mtd, this->data_buf, bd, -1);
}
/**
diff --git a/drivers/mtd/nand/nand_hynix.c b/drivers/mtd/nand/nand_hynix.c
index 985751eda317..d542908a0ebb 100644
--- a/drivers/mtd/nand/nand_hynix.c
+++ b/drivers/mtd/nand/nand_hynix.c
@@ -67,15 +67,43 @@ struct hynix_read_retry_otp {
static bool hynix_nand_has_valid_jedecid(struct nand_chip *chip)
{
+ u8 jedecid[5] = { };
+ int ret;
+
+ ret = nand_readid_op(chip, 0x40, jedecid, sizeof(jedecid));
+ if (ret)
+ return false;
+
+ return !strncmp("JEDEC", jedecid, sizeof(jedecid));
+}
+
+static int hynix_nand_cmd_op(struct nand_chip *chip, u8 cmd)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (chip->exec_op) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(cmd, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->cmdfunc(mtd, cmd, -1, -1);
+
+ return 0;
+}
+
+static int hynix_nand_reg_write_op(struct nand_chip *chip, u8 addr, u8 val)
+{
struct mtd_info *mtd = nand_to_mtd(chip);
- u8 jedecid[6] = { };
- int i = 0;
+ u16 column = ((u16)addr << 8) | addr;
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1);
- for (i = 0; i < 5; i++)
- jedecid[i] = chip->read_byte(mtd);
+ chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1);
+ chip->write_byte(mtd, val);
- return !strcmp("JEDEC", jedecid);
+ return 0;
}
static int hynix_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
@@ -83,14 +111,15 @@ static int hynix_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
struct nand_chip *chip = mtd_to_nand(mtd);
struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
const u8 *values;
- int status;
- int i;
+ int i, ret;
values = hynix->read_retry->values +
(retry_mode * hynix->read_retry->nregs);
/* Enter 'Set Hynix Parameters' mode */
- chip->cmdfunc(mtd, NAND_HYNIX_CMD_SET_PARAMS, -1, -1);
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
+ if (ret)
+ return ret;
/*
* Configure the NAND in the requested read-retry mode.
@@ -102,21 +131,14 @@ static int hynix_nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
* probably tweaked at production in this case).
*/
for (i = 0; i < hynix->read_retry->nregs; i++) {
- int column = hynix->read_retry->regs[i];
-
- column |= column << 8;
- chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1);
- chip->write_byte(mtd, values[i]);
+ ret = hynix_nand_reg_write_op(chip, hynix->read_retry->regs[i],
+ values[i]);
+ if (ret)
+ return ret;
}
/* Apply the new settings. */
- chip->cmdfunc(mtd, NAND_HYNIX_CMD_APPLY_PARAMS, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
- return -EIO;
-
- return 0;
+ return hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
}
/**
@@ -172,40 +194,63 @@ static int hynix_read_rr_otp(struct nand_chip *chip,
const struct hynix_read_retry_otp *info,
void *buf)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- int i;
+ int i, ret;
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ ret = nand_reset_op(chip);
+ if (ret)
+ return ret;
- chip->cmdfunc(mtd, NAND_HYNIX_CMD_SET_PARAMS, -1, -1);
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
+ if (ret)
+ return ret;
for (i = 0; i < info->nregs; i++) {
- int column = info->regs[i];
-
- column |= column << 8;
- chip->cmdfunc(mtd, NAND_CMD_NONE, column, -1);
- chip->write_byte(mtd, info->values[i]);
+ ret = hynix_nand_reg_write_op(chip, info->regs[i],
+ info->values[i]);
+ if (ret)
+ return ret;
}
- chip->cmdfunc(mtd, NAND_HYNIX_CMD_APPLY_PARAMS, -1, -1);
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
+ if (ret)
+ return ret;
/* Sequence to enter OTP mode? */
- chip->cmdfunc(mtd, 0x17, -1, -1);
- chip->cmdfunc(mtd, 0x04, -1, -1);
- chip->cmdfunc(mtd, 0x19, -1, -1);
+ ret = hynix_nand_cmd_op(chip, 0x17);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_cmd_op(chip, 0x4);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_cmd_op(chip, 0x19);
+ if (ret)
+ return ret;
/* Now read the page */
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x0, info->page);
- chip->read_buf(mtd, buf, info->size);
+ ret = nand_read_page_op(chip, info->page, 0, buf, info->size);
+ if (ret)
+ return ret;
/* Put everything back to normal */
- chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
- chip->cmdfunc(mtd, NAND_HYNIX_CMD_SET_PARAMS, 0x38, -1);
- chip->write_byte(mtd, 0x0);
- chip->cmdfunc(mtd, NAND_HYNIX_CMD_APPLY_PARAMS, -1, -1);
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x0, -1);
+ ret = nand_reset_op(chip);
+ if (ret)
+ return ret;
- return 0;
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_reg_write_op(chip, 0x38, 0);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
+ if (ret)
+ return ret;
+
+ return nand_read_page_op(chip, 0, 0, NULL, 0);
}
#define NAND_HYNIX_1XNM_RR_COUNT_OFFS 0
diff --git a/drivers/mtd/nand/nand_micron.c b/drivers/mtd/nand/nand_micron.c
index abf6a3c376e8..02e109ae73f1 100644
--- a/drivers/mtd/nand/nand_micron.c
+++ b/drivers/mtd/nand/nand_micron.c
@@ -117,16 +117,28 @@ micron_nand_read_page_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required,
int page)
{
- int status;
- int max_bitflips = 0;
+ u8 status;
+ int ret, max_bitflips = 0;
- micron_nand_on_die_ecc_setup(chip, true);
+ ret = micron_nand_on_die_ecc_setup(chip, true);
+ if (ret)
+ return ret;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ goto out;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ goto out;
+
+ ret = nand_exit_status_op(chip);
+ if (ret)
+ goto out;
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
- status = chip->read_byte(mtd);
if (status & NAND_STATUS_FAIL)
mtd->ecc_stats.failed++;
+
/*
* The internal ECC doesn't tell us the number of bitflips
* that have been corrected, but tells us if it recommends to
@@ -137,13 +149,15 @@ micron_nand_read_page_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip,
else if (status & NAND_STATUS_WRITE_RECOMMENDED)
max_bitflips = chip->ecc.strength;
- chip->cmdfunc(mtd, NAND_CMD_READ0, -1, -1);
-
- nand_read_page_raw(mtd, chip, buf, oob_required, page);
+ ret = nand_read_data_op(chip, buf, mtd->writesize, false);
+ if (!ret && oob_required)
+ ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
+ false);
+out:
micron_nand_on_die_ecc_setup(chip, false);
- return max_bitflips;
+ return ret ? ret : max_bitflips;
}
static int
@@ -151,46 +165,16 @@ micron_nand_write_page_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required,
int page)
{
- int status;
-
- micron_nand_on_die_ecc_setup(chip, true);
+ int ret;
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
- nand_write_page_raw(mtd, chip, buf, oob_required, page);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
+ ret = micron_nand_on_die_ecc_setup(chip, true);
+ if (ret)
+ return ret;
+ ret = nand_write_page_raw(mtd, chip, buf, oob_required, page);
micron_nand_on_die_ecc_setup(chip, false);
- return status & NAND_STATUS_FAIL ? -EIO : 0;
-}
-
-static int
-micron_nand_read_page_raw_on_die_ecc(struct mtd_info *mtd,
- struct nand_chip *chip,
- uint8_t *buf, int oob_required,
- int page)
-{
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
- nand_read_page_raw(mtd, chip, buf, oob_required, page);
-
- return 0;
-}
-
-static int
-micron_nand_write_page_raw_on_die_ecc(struct mtd_info *mtd,
- struct nand_chip *chip,
- const uint8_t *buf, int oob_required,
- int page)
-{
- int status;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
- nand_write_page_raw(mtd, chip, buf, oob_required, page);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return ret;
}
enum {
@@ -285,17 +269,14 @@ static int micron_nand_init(struct nand_chip *chip)
return -EINVAL;
}
- chip->ecc.options = NAND_ECC_CUSTOM_PAGE_ACCESS;
chip->ecc.bytes = 8;
chip->ecc.size = 512;
chip->ecc.strength = 4;
chip->ecc.algo = NAND_ECC_BCH;
chip->ecc.read_page = micron_nand_read_page_on_die_ecc;
chip->ecc.write_page = micron_nand_write_page_on_die_ecc;
- chip->ecc.read_page_raw =
- micron_nand_read_page_raw_on_die_ecc;
- chip->ecc.write_page_raw =
- micron_nand_write_page_raw_on_die_ecc;
+ chip->ecc.read_page_raw = nand_read_page_raw;
+ chip->ecc.write_page_raw = nand_write_page_raw;
mtd_set_ooblayout(mtd, &micron_nand_on_die_ooblayout_ops);
}
diff --git a/drivers/mtd/nand/nand_samsung.c b/drivers/mtd/nand/nand_samsung.c
index d348f0129ae7..ef022f62f74c 100644
--- a/drivers/mtd/nand/nand_samsung.c
+++ b/drivers/mtd/nand/nand_samsung.c
@@ -91,6 +91,25 @@ static void samsung_nand_decode_id(struct nand_chip *chip)
}
} else {
nand_decode_ext_id(chip);
+
+ if (nand_is_slc(chip)) {
+ switch (chip->id.data[1]) {
+ /* K9F4G08U0D-S[I|C]B0(T00) */
+ case 0xDC:
+ chip->ecc_step_ds = 512;
+ chip->ecc_strength_ds = 1;
+ break;
+
+ /* K9F1G08U0E 21nm chips do not support subpage write */
+ case 0xF1:
+ if (chip->id.len > 4 &&
+ (chip->id.data[4] & GENMASK(1, 0)) == 0x1)
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ break;
+ default:
+ break;
+ }
+ }
}
}
diff --git a/drivers/mtd/nand/nand_timings.c b/drivers/mtd/nand/nand_timings.c
index 5d1533bcc5bd..9400d039ddbd 100644
--- a/drivers/mtd/nand/nand_timings.c
+++ b/drivers/mtd/nand/nand_timings.c
@@ -283,16 +283,16 @@ const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode)
EXPORT_SYMBOL(onfi_async_timing_mode_to_sdr_timings);
/**
- * onfi_init_data_interface - [NAND Interface] Initialize a data interface from
+ * onfi_fill_data_interface - [NAND Interface] Initialize a data interface from
* given ONFI mode
- * @iface: The data interface to be initialized
* @mode: The ONFI timing mode
*/
-int onfi_init_data_interface(struct nand_chip *chip,
- struct nand_data_interface *iface,
+int onfi_fill_data_interface(struct nand_chip *chip,
enum nand_data_interface_type type,
int timing_mode)
{
+ struct nand_data_interface *iface = &chip->data_interface;
+
if (type != NAND_SDR_IFACE)
return -EINVAL;
@@ -321,15 +321,4 @@ int onfi_init_data_interface(struct nand_chip *chip,
return 0;
}
-EXPORT_SYMBOL(onfi_init_data_interface);
-
-/**
- * nand_get_default_data_interface - [NAND Interface] Retrieve NAND
- * data interface for mode 0. This is used as default timing after
- * reset.
- */
-const struct nand_data_interface *nand_get_default_data_interface(void)
-{
- return &onfi_sdr_timings[0];
-}
-EXPORT_SYMBOL(nand_get_default_data_interface);
+EXPORT_SYMBOL(onfi_fill_data_interface);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index dad438c4906a..8cdf7d3d8fa7 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1530,7 +1530,9 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required, int page)
{
int ret;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
/* Enable GPMC ecc engine */
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
@@ -1548,7 +1550,8 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
/* Write ecc vector to OOB area */
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
/**
@@ -1568,7 +1571,7 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
u32 data_len, const u8 *buf,
int oob_required, int page)
{
- u8 *ecc_calc = chip->buffers->ecccalc;
+ u8 *ecc_calc = chip->ecc.calc_buf;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
int ecc_steps = chip->ecc.steps;
@@ -1582,6 +1585,7 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
* ECC is calculated for all subpages but we choose
* only what we want.
*/
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
/* Enable GPMC ECC engine */
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
@@ -1605,7 +1609,7 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
/* copy calculated ECC for whole page to chip->buffer->oob */
/* this include masked-value(0xFF) for unwritten subpages */
- ecc_calc = chip->buffers->ecccalc;
+ ecc_calc = chip->ecc.calc_buf;
ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
chip->ecc.total);
if (ret)
@@ -1614,7 +1618,7 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
/* write OOB buffer to NAND device */
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
/**
@@ -1635,11 +1639,13 @@ static int omap_write_subpage_bch(struct mtd_info *mtd,
static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- uint8_t *ecc_code = chip->buffers->ecccode;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
int stat, ret;
unsigned int max_bitflips = 0;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
/* Enable GPMC ecc engine */
chip->ecc.hwctl(mtd, NAND_ECC_READ);
@@ -1647,10 +1653,10 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
chip->read_buf(mtd, buf, mtd->writesize);
/* Read oob bytes */
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
- mtd->writesize + BADBLOCK_MARKER_LENGTH, -1);
- chip->read_buf(mtd, chip->oob_poi + BADBLOCK_MARKER_LENGTH,
- chip->ecc.total);
+ nand_change_read_column_op(chip,
+ mtd->writesize + BADBLOCK_MARKER_LENGTH,
+ chip->oob_poi + BADBLOCK_MARKER_LENGTH,
+ chip->ecc.total, false);
/* Calculate ecc bytes */
omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc);
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 9285f60e5783..d1979c7dbe7e 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -520,15 +520,13 @@ static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host *host,
struct nand_chip *chip = &host->chip;
struct pxa3xx_nand_info *info = host->info_data;
const struct pxa3xx_nand_flash *f = NULL;
- struct mtd_info *mtd = nand_to_mtd(&host->chip);
int i, id, ntypes;
+ u8 idbuf[2];
ntypes = ARRAY_SIZE(builtin_flash_types);
- chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
-
- id = chip->read_byte(mtd);
- id |= chip->read_byte(mtd) << 0x8;
+ nand_readid_op(chip, 0, idbuf, sizeof(idbuf));
+ id = idbuf[0] | (idbuf[1] << 8);
for (i = 0; i < ntypes; i++) {
f = &builtin_flash_types[i];
@@ -1351,10 +1349,10 @@ static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
struct nand_chip *chip, const uint8_t *buf, int oob_required,
int page)
{
- chip->write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
@@ -1364,7 +1362,7 @@ static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
struct pxa3xx_nand_info *info = host->info_data;
- chip->read_buf(mtd, buf, mtd->writesize);
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
if (info->retcode == ERR_CORERR && info->use_ecc) {
diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
index 2656c1ac5646..563b759ffca6 100644
--- a/drivers/mtd/nand/qcom_nandc.c
+++ b/drivers/mtd/nand/qcom_nandc.c
@@ -23,6 +23,7 @@
#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/dma/qcom_bam_dma.h>
+#include <linux/dma-direct.h> /* XXX: drivers shall never use this directly! */
/* NANDc reg offsets */
#define NAND_FLASH_CMD 0x00
@@ -1725,6 +1726,7 @@ static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
u8 *data_buf, *oob_buf = NULL;
int ret;
+ nand_read_page_op(chip, page, 0, NULL, 0);
data_buf = buf;
oob_buf = oob_required ? chip->oob_poi : NULL;
@@ -1750,6 +1752,7 @@ static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
int i, ret;
int read_loc;
+ nand_read_page_op(chip, page, 0, NULL, 0);
data_buf = buf;
oob_buf = chip->oob_poi;
@@ -1850,6 +1853,8 @@ static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
u8 *data_buf, *oob_buf;
int i, ret;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
clear_read_regs(nandc);
clear_bam_transaction(nandc);
@@ -1902,6 +1907,9 @@ static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
free_descs(nandc);
+ if (!ret)
+ ret = nand_prog_page_end_op(chip);
+
return ret;
}
@@ -1916,6 +1924,7 @@ static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
u8 *data_buf, *oob_buf;
int i, ret;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
clear_read_regs(nandc);
clear_bam_transaction(nandc);
@@ -1970,6 +1979,9 @@ static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
free_descs(nandc);
+ if (!ret)
+ ret = nand_prog_page_end_op(chip);
+
return ret;
}
@@ -1990,7 +2002,7 @@ static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
struct nand_ecc_ctrl *ecc = &chip->ecc;
u8 *oob = chip->oob_poi;
int data_size, oob_size;
- int ret, status = 0;
+ int ret;
host->use_ecc = true;
@@ -2027,11 +2039,7 @@ static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
return -EIO;
}
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_end_op(chip);
}
static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
@@ -2081,7 +2089,7 @@ static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
- int page, ret, status = 0;
+ int page, ret;
clear_read_regs(nandc);
clear_bam_transaction(nandc);
@@ -2114,11 +2122,7 @@ static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
return -EIO;
}
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_end_op(chip);
}
/*
@@ -2636,6 +2640,9 @@ static int qcom_nand_host_init(struct qcom_nand_controller *nandc,
nand_set_flash_node(chip, dn);
mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
+ if (!mtd->name)
+ return -ENOMEM;
+
mtd->owner = THIS_MODULE;
mtd->dev.parent = dev;
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index fc9287af4614..595635b9e9de 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -364,7 +364,7 @@ static int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
struct r852_device *dev = nand_get_controller_data(chip);
unsigned long timeout;
- int status;
+ u8 status;
timeout = jiffies + (chip->state == FL_ERASING ?
msecs_to_jiffies(400) : msecs_to_jiffies(20));
@@ -373,8 +373,7 @@ static int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
if (chip->dev_ready(mtd))
break;
- chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
- status = (int)chip->read_byte(mtd);
+ nand_status_op(chip, &status);
/* Unfortunelly, no way to send detailed error status... */
if (dev->dma_error) {
@@ -522,9 +521,7 @@ exit:
static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
}
/*
@@ -1046,7 +1043,7 @@ static int r852_resume(struct device *device)
if (dev->card_registred) {
r852_engine_enable(dev);
dev->chip->select_chip(mtd, 0);
- dev->chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ nand_reset_op(dev->chip);
dev->chip->select_chip(mtd, -1);
}
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 3c5008a4f5f3..c4e7755448e6 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -614,7 +614,7 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va
static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
- chip->read_buf(mtd, buf, mtd->writesize);
+ nand_read_page_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
return 0;
@@ -624,9 +624,9 @@ static int flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required,
int page)
{
- chip->write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
diff --git a/drivers/mtd/nand/sm_common.h b/drivers/mtd/nand/sm_common.h
index d3e028e58b0f..1581671b05ae 100644
--- a/drivers/mtd/nand/sm_common.h
+++ b/drivers/mtd/nand/sm_common.h
@@ -36,7 +36,7 @@ struct sm_oob {
#define SM_SMALL_OOB_SIZE 8
-extern int sm_register_device(struct mtd_info *mtd, int smartmedia);
+int sm_register_device(struct mtd_info *mtd, int smartmedia);
static inline int sm_sector_valid(struct sm_oob *oob)
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index 82244be3e766..f5a55c63935c 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -958,12 +958,12 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
int ret;
if (*cur_off != data_off)
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
+ nand_change_read_column_op(nand, data_off, NULL, 0, false);
sunxi_nfc_randomizer_read_buf(mtd, NULL, ecc->size, false, page);
if (data_off + ecc->size != oob_off)
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
+ nand_change_read_column_op(nand, oob_off, NULL, 0, false);
ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
if (ret)
@@ -991,16 +991,15 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
* Re-read the data with the randomizer disabled to identify
* bitflips in erased pages.
*/
- if (nand->options & NAND_NEED_SCRAMBLING) {
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
- nand->read_buf(mtd, data, ecc->size);
- } else {
+ if (nand->options & NAND_NEED_SCRAMBLING)
+ nand_change_read_column_op(nand, data_off, data,
+ ecc->size, false);
+ else
memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE,
ecc->size);
- }
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
- nand->read_buf(mtd, oob, ecc->bytes + 4);
+ nand_change_read_column_op(nand, oob_off, oob, ecc->bytes + 4,
+ false);
ret = nand_check_erased_ecc_chunk(data, ecc->size,
oob, ecc->bytes + 4,
@@ -1011,7 +1010,8 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size);
if (oob_required) {
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
+ nand_change_read_column_op(nand, oob_off, NULL, 0,
+ false);
sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4,
true, page);
@@ -1038,8 +1038,8 @@ static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
return;
if (!cur_off || *cur_off != offset)
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
- offset + mtd->writesize, -1);
+ nand_change_read_column_op(nand, mtd->writesize, NULL, 0,
+ false);
if (!randomize)
sunxi_nfc_read_buf(mtd, oob + offset, len);
@@ -1116,9 +1116,9 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
if (oob_required && !erased) {
/* TODO: use DMA to retrieve OOB */
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
- mtd->writesize + oob_off, -1);
- nand->read_buf(mtd, oob, ecc->bytes + 4);
+ nand_change_read_column_op(nand,
+ mtd->writesize + oob_off,
+ oob, ecc->bytes + 4, false);
sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, i,
!i, page);
@@ -1143,18 +1143,17 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
/*
* Re-read the data with the randomizer disabled to
* identify bitflips in erased pages.
+ * TODO: use DMA to read page in raw mode
*/
- if (randomized) {
- /* TODO: use DMA to read page in raw mode */
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
- data_off, -1);
- nand->read_buf(mtd, data, ecc->size);
- }
+ if (randomized)
+ nand_change_read_column_op(nand, data_off,
+ data, ecc->size,
+ false);
/* TODO: use DMA to retrieve OOB */
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
- mtd->writesize + oob_off, -1);
- nand->read_buf(mtd, oob, ecc->bytes + 4);
+ nand_change_read_column_op(nand,
+ mtd->writesize + oob_off,
+ oob, ecc->bytes + 4, false);
ret = nand_check_erased_ecc_chunk(data, ecc->size,
oob, ecc->bytes + 4,
@@ -1187,12 +1186,12 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
int ret;
if (data_off != *cur_off)
- nand->cmdfunc(mtd, NAND_CMD_RNDIN, data_off, -1);
+ nand_change_write_column_op(nand, data_off, NULL, 0, false);
sunxi_nfc_randomizer_write_buf(mtd, data, ecc->size, false, page);
if (data_off + ecc->size != oob_off)
- nand->cmdfunc(mtd, NAND_CMD_RNDIN, oob_off, -1);
+ nand_change_write_column_op(nand, oob_off, NULL, 0, false);
ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
if (ret)
@@ -1228,8 +1227,8 @@ static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd,
return;
if (!cur_off || *cur_off != offset)
- nand->cmdfunc(mtd, NAND_CMD_RNDIN,
- offset + mtd->writesize, -1);
+ nand_change_write_column_op(nand, offset + mtd->writesize,
+ NULL, 0, false);
sunxi_nfc_randomizer_write_buf(mtd, oob + offset, len, false, page);
@@ -1246,6 +1245,8 @@ static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
int ret, i, cur_off = 0;
bool raw_mode = false;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
for (i = 0; i < ecc->steps; i++) {
@@ -1279,14 +1280,14 @@ static int sunxi_nfc_hw_ecc_read_page_dma(struct mtd_info *mtd,
{
int ret;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, oob_required, page,
chip->ecc.steps);
if (ret >= 0)
return ret;
/* Fallback to PIO mode */
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
-
return sunxi_nfc_hw_ecc_read_page(mtd, chip, buf, oob_required, page);
}
@@ -1299,6 +1300,8 @@ static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd,
int ret, i, cur_off = 0;
unsigned int max_bitflips = 0;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
for (i = data_offs / ecc->size;
@@ -1330,13 +1333,13 @@ static int sunxi_nfc_hw_ecc_read_subpage_dma(struct mtd_info *mtd,
int nchunks = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
int ret;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, false, page, nchunks);
if (ret >= 0)
return ret;
/* Fallback to PIO mode */
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
-
return sunxi_nfc_hw_ecc_read_subpage(mtd, chip, data_offs, readlen,
buf, page);
}
@@ -1349,6 +1352,8 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
struct nand_ecc_ctrl *ecc = &chip->ecc;
int ret, i, cur_off = 0;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
for (i = 0; i < ecc->steps; i++) {
@@ -1370,7 +1375,7 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
sunxi_nfc_hw_ecc_disable(mtd);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd,
@@ -1382,6 +1387,8 @@ static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd,
struct nand_ecc_ctrl *ecc = &chip->ecc;
int ret, i, cur_off = 0;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
for (i = data_offs / ecc->size;
@@ -1400,7 +1407,7 @@ static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd,
sunxi_nfc_hw_ecc_disable(mtd);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
@@ -1430,6 +1437,8 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, i, !i, page);
}
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
sunxi_nfc_randomizer_config(mtd, page, false);
sunxi_nfc_randomizer_enable(mtd);
@@ -1460,7 +1469,7 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
NULL, page);
- return 0;
+ return nand_prog_page_end_op(chip);
pio_fallback:
return sunxi_nfc_hw_ecc_write_page(mtd, chip, buf, oob_required, page);
@@ -1476,6 +1485,8 @@ static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
int ret, i, cur_off = 0;
bool raw_mode = false;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
for (i = 0; i < ecc->steps; i++) {
@@ -1512,6 +1523,8 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
struct nand_ecc_ctrl *ecc = &chip->ecc;
int ret, i, cur_off = 0;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
sunxi_nfc_hw_ecc_enable(mtd);
for (i = 0; i < ecc->steps; i++) {
@@ -1533,41 +1546,33 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
sunxi_nfc_hw_ecc_disable(mtd);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int sunxi_nfc_hw_common_ecc_read_oob(struct mtd_info *mtd,
struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
chip->pagebuf = -1;
- return chip->ecc.read_page(mtd, chip, chip->buffers->databuf, 1, page);
+ return chip->ecc.read_page(mtd, chip, chip->data_buf, 1, page);
}
static int sunxi_nfc_hw_common_ecc_write_oob(struct mtd_info *mtd,
struct nand_chip *chip,
int page)
{
- int ret, status;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
+ int ret;
chip->pagebuf = -1;
- memset(chip->buffers->databuf, 0xff, mtd->writesize);
- ret = chip->ecc.write_page(mtd, chip, chip->buffers->databuf, 1, page);
+ memset(chip->data_buf, 0xff, mtd->writesize);
+ ret = chip->ecc.write_page(mtd, chip, chip->data_buf, 1, page);
if (ret)
return ret;
/* Send command to program the OOB data */
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
-
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return nand_prog_page_end_op(chip);
}
static const s32 tWB_lut[] = {6, 12, 16, 20};
@@ -1853,8 +1858,14 @@ static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
/* Add ECC info retrieval from DT */
for (i = 0; i < ARRAY_SIZE(strengths); i++) {
- if (ecc->strength <= strengths[i])
+ if (ecc->strength <= strengths[i]) {
+ /*
+ * Update ecc->strength value with the actual strength
+ * that will be used by the ECC engine.
+ */
+ ecc->strength = strengths[i];
break;
+ }
}
if (i >= ARRAY_SIZE(strengths)) {
diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c
index 766906f03943..c5bee00b7f5e 100644
--- a/drivers/mtd/nand/tango_nand.c
+++ b/drivers/mtd/nand/tango_nand.c
@@ -329,7 +329,7 @@ static void aux_read(struct nand_chip *chip, u8 **buf, int len, int *pos)
if (!*buf) {
/* skip over "len" bytes */
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, *pos, -1);
+ nand_change_read_column_op(chip, *pos, NULL, 0, false);
} else {
tango_read_buf(mtd, *buf, len);
*buf += len;
@@ -344,7 +344,7 @@ static void aux_write(struct nand_chip *chip, const u8 **buf, int len, int *pos)
if (!*buf) {
/* skip over "len" bytes */
- chip->cmdfunc(mtd, NAND_CMD_RNDIN, *pos, -1);
+ nand_change_write_column_op(chip, *pos, NULL, 0, false);
} else {
tango_write_buf(mtd, *buf, len);
*buf += len;
@@ -427,7 +427,7 @@ static void raw_write(struct nand_chip *chip, const u8 *buf, const u8 *oob)
static int tango_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
u8 *buf, int oob_required, int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
raw_read(chip, buf, chip->oob_poi);
return 0;
}
@@ -435,23 +435,15 @@ static int tango_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
static int tango_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
const u8 *buf, int oob_required, int page)
{
- int status;
-
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
raw_write(chip, buf, chip->oob_poi);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->waitfunc(mtd, chip);
- if (status & NAND_STATUS_FAIL)
- return -EIO;
-
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int tango_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand_read_page_op(chip, page, 0, NULL, 0);
raw_read(chip, NULL, chip->oob_poi);
return 0;
}
@@ -459,11 +451,9 @@ static int tango_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
static int tango_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
int page)
{
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
raw_write(chip, NULL, chip->oob_poi);
- chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
- chip->waitfunc(mtd, chip);
- return 0;
+ return nand_prog_page_end_op(chip);
}
static int oob_ecc(struct mtd_info *mtd, int idx, struct mtd_oob_region *res)
@@ -590,7 +580,6 @@ static int chip_init(struct device *dev, struct device_node *np)
ecc->write_page = tango_write_page;
ecc->read_oob = tango_read_oob;
ecc->write_oob = tango_write_oob;
- ecc->options = NAND_ECC_CUSTOM_PAGE_ACCESS;
err = nand_scan_tail(mtd);
if (err)
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 84dbf32332e1..dcaa924502de 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -192,6 +192,7 @@ tmio_nand_wait(struct mtd_info *mtd, struct nand_chip *nand_chip)
{
struct tmio_nand *tmio = mtd_to_tmio(mtd);
long timeout;
+ u8 status;
/* enable RDYREQ interrupt */
tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR);
@@ -212,8 +213,8 @@ tmio_nand_wait(struct mtd_info *mtd, struct nand_chip *nand_chip)
dev_warn(&tmio->dev->dev, "timeout waiting for interrupt\n");
}
- nand_chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
- return nand_chip->read_byte(mtd);
+ nand_status_op(nand_chip, &status);
+ return status;
}
/*
diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c
index 8037d4b48a05..80d31a58e558 100644
--- a/drivers/mtd/nand/vf610_nfc.c
+++ b/drivers/mtd/nand/vf610_nfc.c
@@ -560,7 +560,7 @@ static int vf610_nfc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
int eccsize = chip->ecc.size;
int stat;
- vf610_nfc_read_buf(mtd, buf, eccsize);
+ nand_read_page_op(chip, page, 0, buf, eccsize);
if (oob_required)
vf610_nfc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -580,7 +580,7 @@ static int vf610_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
{
struct vf610_nfc *nfc = mtd_to_nfc(mtd);
- vf610_nfc_write_buf(mtd, buf, mtd->writesize);
+ nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
if (oob_required)
vf610_nfc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -588,7 +588,7 @@ static int vf610_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
nfc->use_hw_ecc = true;
nfc->write_sz = mtd->writesize + mtd->oobsize;
- return 0;
+ return nand_prog_page_end_op(chip);
}
static const struct of_device_id vf610_nfc_dt_ids[] = {
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index dcae2f6a2b11..9dc15748947b 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -4,8 +4,7 @@ menuconfig MTD_ONENAND
depends on HAS_IOMEM
help
This enables support for accessing all type of OneNAND flash
- devices. For further information see
- <http://www.samsung.com/Products/Semiconductor/OneNAND/index.htm>
+ devices.
if MTD_ONENAND
@@ -26,9 +25,11 @@ config MTD_ONENAND_GENERIC
config MTD_ONENAND_OMAP2
tristate "OneNAND on OMAP2/OMAP3 support"
depends on ARCH_OMAP2 || ARCH_OMAP3
+ depends on OF || COMPILE_TEST
help
- Support for a OneNAND flash device connected to an OMAP2/OMAP3 CPU
+ Support for a OneNAND flash device connected to an OMAP2/OMAP3 SoC
via the GPMC memory controller.
+ Enable dmaengine and gpiolib for better performance.
config MTD_ONENAND_SAMSUNG
tristate "OneNAND on Samsung SOC controller support"
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 24a1388d3031..87c34f607a75 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -28,19 +28,18 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/onenand.h>
#include <linux/mtd/partitions.h>
+#include <linux/of_device.h>
+#include <linux/omap-gpmc.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/io.h>
#include <linux/slab.h>
-#include <linux/regulator/consumer.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <asm/mach/flash.h>
-#include <linux/platform_data/mtd-onenand-omap2.h>
-
-#include <linux/omap-dma.h>
#define DRIVER_NAME "omap2-onenand"
@@ -50,24 +49,17 @@ struct omap2_onenand {
struct platform_device *pdev;
int gpmc_cs;
unsigned long phys_base;
- unsigned int mem_size;
- int gpio_irq;
+ struct gpio_desc *int_gpiod;
struct mtd_info mtd;
struct onenand_chip onenand;
struct completion irq_done;
struct completion dma_done;
- int dma_channel;
- int freq;
- int (*setup)(void __iomem *base, int *freq_ptr);
- struct regulator *regulator;
- u8 flags;
+ struct dma_chan *dma_chan;
};
-static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
+static void omap2_onenand_dma_complete_func(void *completion)
{
- struct omap2_onenand *c = data;
-
- complete(&c->dma_done);
+ complete(completion);
}
static irqreturn_t omap2_onenand_interrupt(int irq, void *dev_id)
@@ -90,6 +82,65 @@ static inline void write_reg(struct omap2_onenand *c, unsigned short value,
writew(value, c->onenand.base + reg);
}
+static int omap2_onenand_set_cfg(struct omap2_onenand *c,
+ bool sr, bool sw,
+ int latency, int burst_len)
+{
+ unsigned short reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
+
+ reg |= latency << ONENAND_SYS_CFG1_BRL_SHIFT;
+
+ switch (burst_len) {
+ case 0: /* continuous */
+ break;
+ case 4:
+ reg |= ONENAND_SYS_CFG1_BL_4;
+ break;
+ case 8:
+ reg |= ONENAND_SYS_CFG1_BL_8;
+ break;
+ case 16:
+ reg |= ONENAND_SYS_CFG1_BL_16;
+ break;
+ case 32:
+ reg |= ONENAND_SYS_CFG1_BL_32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (latency > 5)
+ reg |= ONENAND_SYS_CFG1_HF;
+ if (latency > 7)
+ reg |= ONENAND_SYS_CFG1_VHF;
+ if (sr)
+ reg |= ONENAND_SYS_CFG1_SYNC_READ;
+ if (sw)
+ reg |= ONENAND_SYS_CFG1_SYNC_WRITE;
+
+ write_reg(c, reg, ONENAND_REG_SYS_CFG1);
+
+ return 0;
+}
+
+static int omap2_onenand_get_freq(int ver)
+{
+ switch ((ver >> 4) & 0xf) {
+ case 0:
+ return 40;
+ case 1:
+ return 54;
+ case 2:
+ return 66;
+ case 3:
+ return 83;
+ case 4:
+ return 104;
+ }
+
+ return -EINVAL;
+}
+
static void wait_err(char *msg, int state, unsigned int ctrl, unsigned int intr)
{
printk(KERN_ERR "onenand_wait: %s! state %d ctrl 0x%04x intr 0x%04x\n",
@@ -153,28 +204,22 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
if (!(syscfg & ONENAND_SYS_CFG1_IOBE)) {
syscfg |= ONENAND_SYS_CFG1_IOBE;
write_reg(c, syscfg, ONENAND_REG_SYS_CFG1);
- if (c->flags & ONENAND_IN_OMAP34XX)
- /* Add a delay to let GPIO settle */
- syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
+ /* Add a delay to let GPIO settle */
+ syscfg = read_reg(c, ONENAND_REG_SYS_CFG1);
}
reinit_completion(&c->irq_done);
- if (c->gpio_irq) {
- result = gpio_get_value(c->gpio_irq);
- if (result == -1) {
- ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
- intr = read_reg(c, ONENAND_REG_INTERRUPT);
- wait_err("gpio error", state, ctrl, intr);
- return -EIO;
- }
- } else
- result = 0;
- if (result == 0) {
+ result = gpiod_get_value(c->int_gpiod);
+ if (result < 0) {
+ ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
+ intr = read_reg(c, ONENAND_REG_INTERRUPT);
+ wait_err("gpio error", state, ctrl, intr);
+ return result;
+ } else if (result == 0) {
int retry_cnt = 0;
retry:
- result = wait_for_completion_timeout(&c->irq_done,
- msecs_to_jiffies(20));
- if (result == 0) {
+ if (!wait_for_completion_io_timeout(&c->irq_done,
+ msecs_to_jiffies(20))) {
/* Timeout after 20ms */
ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
if (ctrl & ONENAND_CTRL_ONGO &&
@@ -291,9 +336,42 @@ static inline int omap2_onenand_bufferram_offset(struct mtd_info *mtd, int area)
return 0;
}
-#if defined(CONFIG_ARCH_OMAP3) || defined(MULTI_OMAP2)
+static inline int omap2_onenand_dma_transfer(struct omap2_onenand *c,
+ dma_addr_t src, dma_addr_t dst,
+ size_t count)
+{
+ struct dma_async_tx_descriptor *tx;
+ dma_cookie_t cookie;
+
+ tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count, 0);
+ if (!tx) {
+ dev_err(&c->pdev->dev, "Failed to prepare DMA memcpy\n");
+ return -EIO;
+ }
+
+ reinit_completion(&c->dma_done);
+
+ tx->callback = omap2_onenand_dma_complete_func;
+ tx->callback_param = &c->dma_done;
+
+ cookie = tx->tx_submit(tx);
+ if (dma_submit_error(cookie)) {
+ dev_err(&c->pdev->dev, "Failed to do DMA tx_submit\n");
+ return -EIO;
+ }
+
+ dma_async_issue_pending(c->dma_chan);
+
+ if (!wait_for_completion_io_timeout(&c->dma_done,
+ msecs_to_jiffies(20))) {
+ dmaengine_terminate_sync(c->dma_chan);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
-static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
+static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
unsigned char *buffer, int offset,
size_t count)
{
@@ -301,10 +379,9 @@ static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
struct onenand_chip *this = mtd->priv;
dma_addr_t dma_src, dma_dst;
int bram_offset;
- unsigned long timeout;
void *buf = (void *)buffer;
size_t xtra;
- volatile unsigned *done;
+ int ret;
bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
@@ -341,25 +418,10 @@ static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
goto out_copy;
}
- omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
- count >> 2, 1, 0, 0, 0);
- omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_src, 0, 0);
- omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_dst, 0, 0);
-
- reinit_completion(&c->dma_done);
- omap_start_dma(c->dma_channel);
-
- timeout = jiffies + msecs_to_jiffies(20);
- done = &c->dma_done.done;
- while (time_before(jiffies, timeout))
- if (*done)
- break;
-
+ ret = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
- if (!*done) {
+ if (ret) {
dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
goto out_copy;
}
@@ -371,7 +433,7 @@ out_copy:
return 0;
}
-static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
+static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
const unsigned char *buffer,
int offset, size_t count)
{
@@ -379,9 +441,8 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
struct onenand_chip *this = mtd->priv;
dma_addr_t dma_src, dma_dst;
int bram_offset;
- unsigned long timeout;
void *buf = (void *)buffer;
- volatile unsigned *done;
+ int ret;
bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
@@ -412,25 +473,10 @@ static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
return -1;
}
- omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
- count >> 2, 1, 0, 0, 0);
- omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_src, 0, 0);
- omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_dst, 0, 0);
-
- reinit_completion(&c->dma_done);
- omap_start_dma(c->dma_channel);
-
- timeout = jiffies + msecs_to_jiffies(20);
- done = &c->dma_done.done;
- while (time_before(jiffies, timeout))
- if (*done)
- break;
-
+ ret = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
- if (!*done) {
+ if (ret) {
dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
goto out_copy;
}
@@ -442,136 +488,6 @@ out_copy:
return 0;
}
-#else
-
-static int omap3_onenand_read_bufferram(struct mtd_info *mtd, int area,
- unsigned char *buffer, int offset,
- size_t count)
-{
- return -ENOSYS;
-}
-
-static int omap3_onenand_write_bufferram(struct mtd_info *mtd, int area,
- const unsigned char *buffer,
- int offset, size_t count)
-{
- return -ENOSYS;
-}
-
-#endif
-
-#if defined(CONFIG_ARCH_OMAP2) || defined(MULTI_OMAP2)
-
-static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
- unsigned char *buffer, int offset,
- size_t count)
-{
- struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
- struct onenand_chip *this = mtd->priv;
- dma_addr_t dma_src, dma_dst;
- int bram_offset;
-
- bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
- /* DMA is not used. Revisit PM requirements before enabling it. */
- if (1 || (c->dma_channel < 0) ||
- ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
- (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
- memcpy(buffer, (__force void *)(this->base + bram_offset),
- count);
- return 0;
- }
-
- dma_src = c->phys_base + bram_offset;
- dma_dst = dma_map_single(&c->pdev->dev, buffer, count,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
- dev_err(&c->pdev->dev,
- "Couldn't DMA map a %d byte buffer\n",
- count);
- return -1;
- }
-
- omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S32,
- count / 4, 1, 0, 0, 0);
- omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_src, 0, 0);
- omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_dst, 0, 0);
-
- reinit_completion(&c->dma_done);
- omap_start_dma(c->dma_channel);
- wait_for_completion(&c->dma_done);
-
- dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
-
- return 0;
-}
-
-static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
- const unsigned char *buffer,
- int offset, size_t count)
-{
- struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
- struct onenand_chip *this = mtd->priv;
- dma_addr_t dma_src, dma_dst;
- int bram_offset;
-
- bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
- /* DMA is not used. Revisit PM requirements before enabling it. */
- if (1 || (c->dma_channel < 0) ||
- ((void *) buffer >= (void *) high_memory) || (bram_offset & 3) ||
- (((unsigned int) buffer) & 3) || (count < 1024) || (count & 3)) {
- memcpy((__force void *)(this->base + bram_offset), buffer,
- count);
- return 0;
- }
-
- dma_src = dma_map_single(&c->pdev->dev, (void *) buffer, count,
- DMA_TO_DEVICE);
- dma_dst = c->phys_base + bram_offset;
- if (dma_mapping_error(&c->pdev->dev, dma_src)) {
- dev_err(&c->pdev->dev,
- "Couldn't DMA map a %d byte buffer\n",
- count);
- return -1;
- }
-
- omap_set_dma_transfer_params(c->dma_channel, OMAP_DMA_DATA_TYPE_S16,
- count / 2, 1, 0, 0, 0);
- omap_set_dma_src_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_src, 0, 0);
- omap_set_dma_dest_params(c->dma_channel, 0, OMAP_DMA_AMODE_POST_INC,
- dma_dst, 0, 0);
-
- reinit_completion(&c->dma_done);
- omap_start_dma(c->dma_channel);
- wait_for_completion(&c->dma_done);
-
- dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
-
- return 0;
-}
-
-#else
-
-static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
- unsigned char *buffer, int offset,
- size_t count)
-{
- return -ENOSYS;
-}
-
-static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
- const unsigned char *buffer,
- int offset, size_t count)
-{
- return -ENOSYS;
-}
-
-#endif
-
-static struct platform_driver omap2_onenand_driver;
-
static void omap2_onenand_shutdown(struct platform_device *pdev)
{
struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
@@ -583,168 +499,117 @@ static void omap2_onenand_shutdown(struct platform_device *pdev)
memset((__force void *)c->onenand.base, 0, ONENAND_BUFRAM_SIZE);
}
-static int omap2_onenand_enable(struct mtd_info *mtd)
-{
- int ret;
- struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
-
- ret = regulator_enable(c->regulator);
- if (ret != 0)
- dev_err(&c->pdev->dev, "can't enable regulator\n");
-
- return ret;
-}
-
-static int omap2_onenand_disable(struct mtd_info *mtd)
-{
- int ret;
- struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
-
- ret = regulator_disable(c->regulator);
- if (ret != 0)
- dev_err(&c->pdev->dev, "can't disable regulator\n");
-
- return ret;
-}
-
static int omap2_onenand_probe(struct platform_device *pdev)
{
- struct omap_onenand_platform_data *pdata;
- struct omap2_onenand *c;
- struct onenand_chip *this;
- int r;
+ u32 val;
+ dma_cap_mask_t mask;
+ int freq, latency, r;
struct resource *res;
+ struct omap2_onenand *c;
+ struct gpmc_onenand_info info;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "error getting memory resource\n");
+ return -EINVAL;
+ }
- pdata = dev_get_platdata(&pdev->dev);
- if (pdata == NULL) {
- dev_err(&pdev->dev, "platform data missing\n");
- return -ENODEV;
+ r = of_property_read_u32(np, "reg", &val);
+ if (r) {
+ dev_err(dev, "reg not found in DT\n");
+ return r;
}
- c = kzalloc(sizeof(struct omap2_onenand), GFP_KERNEL);
+ c = devm_kzalloc(dev, sizeof(struct omap2_onenand), GFP_KERNEL);
if (!c)
return -ENOMEM;
init_completion(&c->irq_done);
init_completion(&c->dma_done);
- c->flags = pdata->flags;
- c->gpmc_cs = pdata->cs;
- c->gpio_irq = pdata->gpio_irq;
- c->dma_channel = pdata->dma_channel;
- if (c->dma_channel < 0) {
- /* if -1, don't use DMA */
- c->gpio_irq = 0;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- r = -EINVAL;
- dev_err(&pdev->dev, "error getting memory resource\n");
- goto err_kfree;
- }
-
+ c->gpmc_cs = val;
c->phys_base = res->start;
- c->mem_size = resource_size(res);
-
- if (request_mem_region(c->phys_base, c->mem_size,
- pdev->dev.driver->name) == NULL) {
- dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, size: 0x%x\n",
- c->phys_base, c->mem_size);
- r = -EBUSY;
- goto err_kfree;
- }
- c->onenand.base = ioremap(c->phys_base, c->mem_size);
- if (c->onenand.base == NULL) {
- r = -ENOMEM;
- goto err_release_mem_region;
- }
- if (pdata->onenand_setup != NULL) {
- r = pdata->onenand_setup(c->onenand.base, &c->freq);
- if (r < 0) {
- dev_err(&pdev->dev, "Onenand platform setup failed: "
- "%d\n", r);
- goto err_iounmap;
- }
- c->setup = pdata->onenand_setup;
+ c->onenand.base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(c->onenand.base))
+ return PTR_ERR(c->onenand.base);
+
+ c->int_gpiod = devm_gpiod_get_optional(dev, "int", GPIOD_IN);
+ if (IS_ERR(c->int_gpiod)) {
+ r = PTR_ERR(c->int_gpiod);
+ /* Just try again if this happens */
+ if (r != -EPROBE_DEFER)
+ dev_err(dev, "error getting gpio: %d\n", r);
+ return r;
}
- if (c->gpio_irq) {
- if ((r = gpio_request(c->gpio_irq, "OneNAND irq")) < 0) {
- dev_err(&pdev->dev, "Failed to request GPIO%d for "
- "OneNAND\n", c->gpio_irq);
- goto err_iounmap;
- }
- gpio_direction_input(c->gpio_irq);
+ if (c->int_gpiod) {
+ r = devm_request_irq(dev, gpiod_to_irq(c->int_gpiod),
+ omap2_onenand_interrupt,
+ IRQF_TRIGGER_RISING, "onenand", c);
+ if (r)
+ return r;
- if ((r = request_irq(gpio_to_irq(c->gpio_irq),
- omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
- pdev->dev.driver->name, c)) < 0)
- goto err_release_gpio;
+ c->onenand.wait = omap2_onenand_wait;
}
- if (c->dma_channel >= 0) {
- r = omap_request_dma(0, pdev->dev.driver->name,
- omap2_onenand_dma_cb, (void *) c,
- &c->dma_channel);
- if (r == 0) {
- omap_set_dma_write_mode(c->dma_channel,
- OMAP_DMA_WRITE_NON_POSTED);
- omap_set_dma_src_data_pack(c->dma_channel, 1);
- omap_set_dma_src_burst_mode(c->dma_channel,
- OMAP_DMA_DATA_BURST_8);
- omap_set_dma_dest_data_pack(c->dma_channel, 1);
- omap_set_dma_dest_burst_mode(c->dma_channel,
- OMAP_DMA_DATA_BURST_8);
- } else {
- dev_info(&pdev->dev,
- "failed to allocate DMA for OneNAND, "
- "using PIO instead\n");
- c->dma_channel = -1;
- }
- }
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_MEMCPY, mask);
- dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual "
- "base %p, freq %d MHz\n", c->gpmc_cs, c->phys_base,
- c->onenand.base, c->freq);
+ c->dma_chan = dma_request_channel(mask, NULL, NULL);
+ if (c->dma_chan) {
+ c->onenand.read_bufferram = omap2_onenand_read_bufferram;
+ c->onenand.write_bufferram = omap2_onenand_write_bufferram;
+ }
c->pdev = pdev;
c->mtd.priv = &c->onenand;
+ c->mtd.dev.parent = dev;
+ mtd_set_of_node(&c->mtd, dev->of_node);
- c->mtd.dev.parent = &pdev->dev;
- mtd_set_of_node(&c->mtd, pdata->of_node);
-
- this = &c->onenand;
- if (c->dma_channel >= 0) {
- this->wait = omap2_onenand_wait;
- if (c->flags & ONENAND_IN_OMAP34XX) {
- this->read_bufferram = omap3_onenand_read_bufferram;
- this->write_bufferram = omap3_onenand_write_bufferram;
- } else {
- this->read_bufferram = omap2_onenand_read_bufferram;
- this->write_bufferram = omap2_onenand_write_bufferram;
- }
- }
+ dev_info(dev, "initializing on CS%d (0x%08lx), va %p, %s mode\n",
+ c->gpmc_cs, c->phys_base, c->onenand.base,
+ c->dma_chan ? "DMA" : "PIO");
- if (pdata->regulator_can_sleep) {
- c->regulator = regulator_get(&pdev->dev, "vonenand");
- if (IS_ERR(c->regulator)) {
- dev_err(&pdev->dev, "Failed to get regulator\n");
- r = PTR_ERR(c->regulator);
- goto err_release_dma;
+ if ((r = onenand_scan(&c->mtd, 1)) < 0)
+ goto err_release_dma;
+
+ freq = omap2_onenand_get_freq(c->onenand.version_id);
+ if (freq > 0) {
+ switch (freq) {
+ case 104:
+ latency = 7;
+ break;
+ case 83:
+ latency = 6;
+ break;
+ case 66:
+ latency = 5;
+ break;
+ case 56:
+ latency = 4;
+ break;
+ default: /* 40 MHz or lower */
+ latency = 3;
+ break;
}
- c->onenand.enable = omap2_onenand_enable;
- c->onenand.disable = omap2_onenand_disable;
- }
- if (pdata->skip_initial_unlocking)
- this->options |= ONENAND_SKIP_INITIAL_UNLOCKING;
+ r = gpmc_omap_onenand_set_timings(dev, c->gpmc_cs,
+ freq, latency, &info);
+ if (r)
+ goto err_release_onenand;
- if ((r = onenand_scan(&c->mtd, 1)) < 0)
- goto err_release_regulator;
+ r = omap2_onenand_set_cfg(c, info.sync_read, info.sync_write,
+ latency, info.burst_len);
+ if (r)
+ goto err_release_onenand;
- r = mtd_device_register(&c->mtd, pdata ? pdata->parts : NULL,
- pdata ? pdata->nr_parts : 0);
+ if (info.sync_read || info.sync_write)
+ dev_info(dev, "optimized timings for %d MHz\n", freq);
+ }
+
+ r = mtd_device_register(&c->mtd, NULL, 0);
if (r)
goto err_release_onenand;
@@ -754,22 +619,9 @@ static int omap2_onenand_probe(struct platform_device *pdev)
err_release_onenand:
onenand_release(&c->mtd);
-err_release_regulator:
- regulator_put(c->regulator);
err_release_dma:
- if (c->dma_channel != -1)
- omap_free_dma(c->dma_channel);
- if (c->gpio_irq)
- free_irq(gpio_to_irq(c->gpio_irq), c);
-err_release_gpio:
- if (c->gpio_irq)
- gpio_free(c->gpio_irq);
-err_iounmap:
- iounmap(c->onenand.base);
-err_release_mem_region:
- release_mem_region(c->phys_base, c->mem_size);
-err_kfree:
- kfree(c);
+ if (c->dma_chan)
+ dma_release_channel(c->dma_chan);
return r;
}
@@ -779,27 +631,26 @@ static int omap2_onenand_remove(struct platform_device *pdev)
struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
onenand_release(&c->mtd);
- regulator_put(c->regulator);
- if (c->dma_channel != -1)
- omap_free_dma(c->dma_channel);
+ if (c->dma_chan)
+ dma_release_channel(c->dma_chan);
omap2_onenand_shutdown(pdev);
- if (c->gpio_irq) {
- free_irq(gpio_to_irq(c->gpio_irq), c);
- gpio_free(c->gpio_irq);
- }
- iounmap(c->onenand.base);
- release_mem_region(c->phys_base, c->mem_size);
- kfree(c);
return 0;
}
+static const struct of_device_id omap2_onenand_id_table[] = {
+ { .compatible = "ti,omap2-onenand", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap2_onenand_id_table);
+
static struct platform_driver omap2_onenand_driver = {
.probe = omap2_onenand_probe,
.remove = omap2_onenand_remove,
.shutdown = omap2_onenand_shutdown,
.driver = {
.name = DRIVER_NAME,
+ .of_match_table = omap2_onenand_id_table,
},
};
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 1a6d0e367b89..979f4031f23c 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1383,15 +1383,6 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
return -EINVAL;
}
- /* Do not allow reads past end of device */
- if (unlikely(from >= mtd->size ||
- column + len > ((mtd->size >> this->page_shift) -
- (from >> this->page_shift)) * oobsize)) {
- printk(KERN_ERR "%s: Attempted to read beyond end of device\n",
- __func__);
- return -EINVAL;
- }
-
stats = mtd->ecc_stats;
readcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
@@ -1448,38 +1439,6 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
}
/**
- * onenand_read - [MTD Interface] Read data from flash
- * @param mtd MTD device structure
- * @param from offset to read from
- * @param len number of bytes to read
- * @param retlen pointer to variable to store the number of read bytes
- * @param buf the databuffer to put data
- *
- * Read with ecc
-*/
-static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
-{
- struct onenand_chip *this = mtd->priv;
- struct mtd_oob_ops ops = {
- .len = len,
- .ooblen = 0,
- .datbuf = buf,
- .oobbuf = NULL,
- };
- int ret;
-
- onenand_get_device(mtd, FL_READING);
- ret = ONENAND_IS_4KB_PAGE(this) ?
- onenand_mlc_read_ops_nolock(mtd, from, &ops) :
- onenand_read_ops_nolock(mtd, from, &ops);
- onenand_release_device(mtd);
-
- *retlen = ops.retlen;
- return ret;
-}
-
-/**
* onenand_read_oob - [MTD Interface] Read main and/or out-of-band
* @param mtd: MTD device structure
* @param from: offset to read from
@@ -2056,15 +2015,6 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
return -EINVAL;
}
- /* Do not allow reads past end of device */
- if (unlikely(to >= mtd->size ||
- column + len > ((mtd->size >> this->page_shift) -
- (to >> this->page_shift)) * oobsize)) {
- printk(KERN_ERR "%s: Attempted to write past end of device\n",
- __func__);
- return -EINVAL;
- }
-
oobbuf = this->oob_buf;
oobcmd = ONENAND_IS_4KB_PAGE(this) ? ONENAND_CMD_PROG : ONENAND_CMD_PROGOOB;
@@ -2129,35 +2079,6 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
}
/**
- * onenand_write - [MTD Interface] write buffer to FLASH
- * @param mtd MTD device structure
- * @param to offset to write to
- * @param len number of bytes to write
- * @param retlen pointer to variable to store the number of written bytes
- * @param buf the data to write
- *
- * Write with ECC
- */
-static int onenand_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
-{
- struct mtd_oob_ops ops = {
- .len = len,
- .ooblen = 0,
- .datbuf = (u_char *) buf,
- .oobbuf = NULL,
- };
- int ret;
-
- onenand_get_device(mtd, FL_WRITING);
- ret = onenand_write_ops_nolock(mtd, to, &ops);
- onenand_release_device(mtd);
-
- *retlen = ops.retlen;
- return ret;
-}
-
-/**
* onenand_write_oob - [MTD Interface] NAND write data and/or out-of-band
* @param mtd: MTD device structure
* @param to: offset to write
@@ -4038,8 +3959,6 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
mtd->_erase = onenand_erase;
mtd->_point = NULL;
mtd->_unpoint = NULL;
- mtd->_read = onenand_read;
- mtd->_write = onenand_write;
mtd->_read_oob = onenand_read_oob;
mtd->_write_oob = onenand_write_oob;
mtd->_panic_write = onenand_panic_write;
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index af0ac1a7bf8f..2e9d076e445a 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -25,8 +25,6 @@
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <asm/mach/flash.h>
-
#include "samsung.h"
enum soc_type {
@@ -129,16 +127,13 @@ struct s3c_onenand {
struct platform_device *pdev;
enum soc_type type;
void __iomem *base;
- struct resource *base_res;
void __iomem *ahb_addr;
- struct resource *ahb_res;
int bootram_command;
- void __iomem *page_buf;
- void __iomem *oob_buf;
+ void *page_buf;
+ void *oob_buf;
unsigned int (*mem_addr)(int fba, int fpa, int fsa);
unsigned int (*cmd_map)(unsigned int type, unsigned int val);
void __iomem *dma_addr;
- struct resource *dma_res;
unsigned long phys_base;
struct completion complete;
};
@@ -413,8 +408,8 @@ static int s3c_onenand_command(struct mtd_info *mtd, int cmd, loff_t addr,
/*
* Emulate Two BufferRAMs and access with 4 bytes pointer
*/
- m = (unsigned int *) onenand->page_buf;
- s = (unsigned int *) onenand->oob_buf;
+ m = onenand->page_buf;
+ s = onenand->oob_buf;
if (index) {
m += (this->writesize >> 2);
@@ -486,11 +481,11 @@ static unsigned char *s3c_get_bufferram(struct mtd_info *mtd, int area)
unsigned char *p;
if (area == ONENAND_DATARAM) {
- p = (unsigned char *) onenand->page_buf;
+ p = onenand->page_buf;
if (index == 1)
p += this->writesize;
} else {
- p = (unsigned char *) onenand->oob_buf;
+ p = onenand->oob_buf;
if (index == 1)
p += mtd->oobsize;
}
@@ -851,15 +846,14 @@ static int s3c_onenand_probe(struct platform_device *pdev)
/* No need to check pdata. the platform data is optional */
size = sizeof(struct mtd_info) + sizeof(struct onenand_chip);
- mtd = kzalloc(size, GFP_KERNEL);
+ mtd = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
if (!mtd)
return -ENOMEM;
- onenand = kzalloc(sizeof(struct s3c_onenand), GFP_KERNEL);
- if (!onenand) {
- err = -ENOMEM;
- goto onenand_fail;
- }
+ onenand = devm_kzalloc(&pdev->dev, sizeof(struct s3c_onenand),
+ GFP_KERNEL);
+ if (!onenand)
+ return -ENOMEM;
this = (struct onenand_chip *) &mtd[1];
mtd->priv = this;
@@ -870,26 +864,12 @@ static int s3c_onenand_probe(struct platform_device *pdev)
s3c_onenand_setup(mtd);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "no memory resource defined\n");
- return -ENOENT;
- goto ahb_resource_failed;
- }
+ onenand->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(onenand->base))
+ return PTR_ERR(onenand->base);
- onenand->base_res = request_mem_region(r->start, resource_size(r),
- pdev->name);
- if (!onenand->base_res) {
- dev_err(&pdev->dev, "failed to request memory resource\n");
- err = -EBUSY;
- goto resource_failed;
- }
+ onenand->phys_base = r->start;
- onenand->base = ioremap(r->start, resource_size(r));
- if (!onenand->base) {
- dev_err(&pdev->dev, "failed to map memory resource\n");
- err = -EFAULT;
- goto ioremap_failed;
- }
/* Set onenand_chip also */
this->base = onenand->base;
@@ -898,40 +878,20 @@ static int s3c_onenand_probe(struct platform_device *pdev)
if (onenand->type != TYPE_S5PC110) {
r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!r) {
- dev_err(&pdev->dev, "no buffer memory resource defined\n");
- err = -ENOENT;
- goto ahb_resource_failed;
- }
-
- onenand->ahb_res = request_mem_region(r->start, resource_size(r),
- pdev->name);
- if (!onenand->ahb_res) {
- dev_err(&pdev->dev, "failed to request buffer memory resource\n");
- err = -EBUSY;
- goto ahb_resource_failed;
- }
-
- onenand->ahb_addr = ioremap(r->start, resource_size(r));
- if (!onenand->ahb_addr) {
- dev_err(&pdev->dev, "failed to map buffer memory resource\n");
- err = -EINVAL;
- goto ahb_ioremap_failed;
- }
+ onenand->ahb_addr = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(onenand->ahb_addr))
+ return PTR_ERR(onenand->ahb_addr);
/* Allocate 4KiB BufferRAM */
- onenand->page_buf = kzalloc(SZ_4K, GFP_KERNEL);
- if (!onenand->page_buf) {
- err = -ENOMEM;
- goto page_buf_fail;
- }
+ onenand->page_buf = devm_kzalloc(&pdev->dev, SZ_4K,
+ GFP_KERNEL);
+ if (!onenand->page_buf)
+ return -ENOMEM;
/* Allocate 128 SpareRAM */
- onenand->oob_buf = kzalloc(128, GFP_KERNEL);
- if (!onenand->oob_buf) {
- err = -ENOMEM;
- goto oob_buf_fail;
- }
+ onenand->oob_buf = devm_kzalloc(&pdev->dev, 128, GFP_KERNEL);
+ if (!onenand->oob_buf)
+ return -ENOMEM;
/* S3C doesn't handle subpage write */
mtd->subpage_sft = 0;
@@ -939,28 +899,9 @@ static int s3c_onenand_probe(struct platform_device *pdev)
} else { /* S5PC110 */
r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!r) {
- dev_err(&pdev->dev, "no dma memory resource defined\n");
- err = -ENOENT;
- goto dma_resource_failed;
- }
-
- onenand->dma_res = request_mem_region(r->start, resource_size(r),
- pdev->name);
- if (!onenand->dma_res) {
- dev_err(&pdev->dev, "failed to request dma memory resource\n");
- err = -EBUSY;
- goto dma_resource_failed;
- }
-
- onenand->dma_addr = ioremap(r->start, resource_size(r));
- if (!onenand->dma_addr) {
- dev_err(&pdev->dev, "failed to map dma memory resource\n");
- err = -EINVAL;
- goto dma_ioremap_failed;
- }
-
- onenand->phys_base = onenand->base_res->start;
+ onenand->dma_addr = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(onenand->dma_addr))
+ return PTR_ERR(onenand->dma_addr);
s5pc110_dma_ops = s5pc110_dma_poll;
/* Interrupt support */
@@ -968,19 +909,20 @@ static int s3c_onenand_probe(struct platform_device *pdev)
if (r) {
init_completion(&onenand->complete);
s5pc110_dma_ops = s5pc110_dma_irq;
- err = request_irq(r->start, s5pc110_onenand_irq,
- IRQF_SHARED, "onenand", &onenand);
+ err = devm_request_irq(&pdev->dev, r->start,
+ s5pc110_onenand_irq,
+ IRQF_SHARED, "onenand",
+ &onenand);
if (err) {
dev_err(&pdev->dev, "failed to get irq\n");
- goto scan_failed;
+ return err;
}
}
}
- if (onenand_scan(mtd, 1)) {
- err = -EFAULT;
- goto scan_failed;
- }
+ err = onenand_scan(mtd, 1);
+ if (err)
+ return err;
if (onenand->type != TYPE_S5PC110) {
/* S3C doesn't handle subpage write */
@@ -994,40 +936,15 @@ static int s3c_onenand_probe(struct platform_device *pdev)
err = mtd_device_parse_register(mtd, NULL, NULL,
pdata ? pdata->parts : NULL,
pdata ? pdata->nr_parts : 0);
+ if (err) {
+ dev_err(&pdev->dev, "failed to parse partitions and register the MTD device\n");
+ onenand_release(mtd);
+ return err;
+ }
platform_set_drvdata(pdev, mtd);
return 0;
-
-scan_failed:
- if (onenand->dma_addr)
- iounmap(onenand->dma_addr);
-dma_ioremap_failed:
- if (onenand->dma_res)
- release_mem_region(onenand->dma_res->start,
- resource_size(onenand->dma_res));
- kfree(onenand->oob_buf);
-oob_buf_fail:
- kfree(onenand->page_buf);
-page_buf_fail:
- if (onenand->ahb_addr)
- iounmap(onenand->ahb_addr);
-ahb_ioremap_failed:
- if (onenand->ahb_res)
- release_mem_region(onenand->ahb_res->start,
- resource_size(onenand->ahb_res));
-dma_resource_failed:
-ahb_resource_failed:
- iounmap(onenand->base);
-ioremap_failed:
- if (onenand->base_res)
- release_mem_region(onenand->base_res->start,
- resource_size(onenand->base_res));
-resource_failed:
- kfree(onenand);
-onenand_fail:
- kfree(mtd);
- return err;
}
static int s3c_onenand_remove(struct platform_device *pdev)
@@ -1035,25 +952,7 @@ static int s3c_onenand_remove(struct platform_device *pdev)
struct mtd_info *mtd = platform_get_drvdata(pdev);
onenand_release(mtd);
- if (onenand->ahb_addr)
- iounmap(onenand->ahb_addr);
- if (onenand->ahb_res)
- release_mem_region(onenand->ahb_res->start,
- resource_size(onenand->ahb_res));
- if (onenand->dma_addr)
- iounmap(onenand->dma_addr);
- if (onenand->dma_res)
- release_mem_region(onenand->dma_res->start,
- resource_size(onenand->dma_res));
-
- iounmap(onenand->base);
- release_mem_region(onenand->base_res->start,
- resource_size(onenand->base_res));
-
- kfree(onenand->oob_buf);
- kfree(onenand->page_buf);
- kfree(onenand);
- kfree(mtd);
+
return 0;
}
diff --git a/drivers/mtd/parsers/sharpslpart.c b/drivers/mtd/parsers/sharpslpart.c
index 5fe0079ea5ed..8893dc82a5c8 100644
--- a/drivers/mtd/parsers/sharpslpart.c
+++ b/drivers/mtd/parsers/sharpslpart.c
@@ -192,7 +192,7 @@ static int sharpsl_nand_init_ftl(struct mtd_info *mtd, struct sharpsl_ftl *ftl)
/* create physical-logical table */
for (block_num = 0; block_num < phymax; block_num++) {
- block_adr = block_num * mtd->erasesize;
+ block_adr = (loff_t)block_num * mtd->erasesize;
if (mtd_block_isbad(mtd, block_adr))
continue;
@@ -219,7 +219,7 @@ exit:
return ret;
}
-void sharpsl_nand_cleanup_ftl(struct sharpsl_ftl *ftl)
+static void sharpsl_nand_cleanup_ftl(struct sharpsl_ftl *ftl)
{
kfree(ftl->log2phy);
}
@@ -244,7 +244,7 @@ static int sharpsl_nand_read_laddr(struct mtd_info *mtd,
return -EINVAL;
block_num = ftl->log2phy[log_num];
- block_adr = block_num * mtd->erasesize;
+ block_adr = (loff_t)block_num * mtd->erasesize;
block_ofs = mtd_mod_by_eb((u32)from, mtd);
err = mtd_read(mtd, block_adr + block_ofs, len, &retlen, buf);
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index 75a2bc447a99..4b8e9183489a 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -58,6 +58,7 @@ struct cqspi_flash_pdata {
u8 data_width;
u8 cs;
bool registered;
+ bool use_direct_mode;
};
struct cqspi_st {
@@ -68,6 +69,7 @@ struct cqspi_st {
void __iomem *iobase;
void __iomem *ahb_base;
+ resource_size_t ahb_size;
struct completion transfer_complete;
struct mutex bus_mutex;
@@ -103,6 +105,7 @@ struct cqspi_st {
/* Register map */
#define CQSPI_REG_CONFIG 0x00
#define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0)
+#define CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL BIT(7)
#define CQSPI_REG_CONFIG_DECODE_MASK BIT(9)
#define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10
#define CQSPI_REG_CONFIG_DMA_MASK BIT(15)
@@ -450,8 +453,7 @@ static int cqspi_command_write_addr(struct spi_nor *nor,
return cqspi_exec_flash_cmd(cqspi, reg);
}
-static int cqspi_indirect_read_setup(struct spi_nor *nor,
- const unsigned int from_addr)
+static int cqspi_read_setup(struct spi_nor *nor)
{
struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
@@ -459,8 +461,6 @@ static int cqspi_indirect_read_setup(struct spi_nor *nor,
unsigned int dummy_clk = 0;
unsigned int reg;
- writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
-
reg = nor->read_opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
reg |= cqspi_calc_rdreg(nor, nor->read_opcode);
@@ -493,8 +493,8 @@ static int cqspi_indirect_read_setup(struct spi_nor *nor,
return 0;
}
-static int cqspi_indirect_read_execute(struct spi_nor *nor,
- u8 *rxbuf, const unsigned n_rx)
+static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
+ loff_t from_addr, const size_t n_rx)
{
struct cqspi_flash_pdata *f_pdata = nor->priv;
struct cqspi_st *cqspi = f_pdata->cqspi;
@@ -504,6 +504,7 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor,
unsigned int bytes_to_read = 0;
int ret = 0;
+ writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES);
/* Clear all interrupts. */
@@ -570,8 +571,7 @@ failrd:
return ret;
}
-static int cqspi_indirect_write_setup(struct spi_nor *nor,
- const unsigned int to_addr)
+static int cqspi_write_setup(struct spi_nor *nor)
{
unsigned int reg;
struct cqspi_flash_pdata *f_pdata = nor->priv;
@@ -584,8 +584,6 @@ static int cqspi_indirect_write_setup(struct spi_nor *nor,
reg = cqspi_calc_rdreg(nor, nor->program_opcode);
writel(reg, reg_base + CQSPI_REG_RD_INSTR);
- writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
-
reg = readl(reg_base + CQSPI_REG_SIZE);
reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
reg |= (nor->addr_width - 1);
@@ -593,8 +591,8 @@ static int cqspi_indirect_write_setup(struct spi_nor *nor,
return 0;
}
-static int cqspi_indirect_write_execute(struct spi_nor *nor,
- const u8 *txbuf, const unsigned n_tx)
+static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
+ const u8 *txbuf, const size_t n_tx)
{
const unsigned int page_size = nor->page_size;
struct cqspi_flash_pdata *f_pdata = nor->priv;
@@ -604,6 +602,7 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor,
unsigned int write_bytes;
int ret;
+ writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES);
/* Clear all interrupts. */
@@ -894,17 +893,22 @@ static int cqspi_set_protocol(struct spi_nor *nor, const int read)
static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
size_t len, const u_char *buf)
{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
int ret;
ret = cqspi_set_protocol(nor, 0);
if (ret)
return ret;
- ret = cqspi_indirect_write_setup(nor, to);
+ ret = cqspi_write_setup(nor);
if (ret)
return ret;
- ret = cqspi_indirect_write_execute(nor, buf, len);
+ if (f_pdata->use_direct_mode)
+ memcpy_toio(cqspi->ahb_base + to, buf, len);
+ else
+ ret = cqspi_indirect_write_execute(nor, to, buf, len);
if (ret)
return ret;
@@ -914,17 +918,22 @@ static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
size_t len, u_char *buf)
{
+ struct cqspi_flash_pdata *f_pdata = nor->priv;
+ struct cqspi_st *cqspi = f_pdata->cqspi;
int ret;
ret = cqspi_set_protocol(nor, 1);
if (ret)
return ret;
- ret = cqspi_indirect_read_setup(nor, from);
+ ret = cqspi_read_setup(nor);
if (ret)
return ret;
- ret = cqspi_indirect_read_execute(nor, buf, len);
+ if (f_pdata->use_direct_mode)
+ memcpy_fromio(buf, cqspi->ahb_base + from, len);
+ else
+ ret = cqspi_indirect_read_execute(nor, buf, from, len);
if (ret)
return ret;
@@ -1059,6 +1068,8 @@ static int cqspi_of_get_pdata(struct platform_device *pdev)
static void cqspi_controller_init(struct cqspi_st *cqspi)
{
+ u32 reg;
+
cqspi_controller_enable(cqspi, 0);
/* Configure the remap address register, no remap */
@@ -1081,6 +1092,11 @@ static void cqspi_controller_init(struct cqspi_st *cqspi)
writel(cqspi->fifo_depth * cqspi->fifo_width / 8,
cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK);
+ /* Enable Direct Access Controller */
+ reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+ reg |= CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL;
+ writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
+
cqspi_controller_enable(cqspi, 1);
}
@@ -1156,6 +1172,12 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
goto err;
f_pdata->registered = true;
+
+ if (mtd->size <= cqspi->ahb_size) {
+ f_pdata->use_direct_mode = true;
+ dev_dbg(nor->dev, "using direct mode for %s\n",
+ mtd->name);
+ }
}
return 0;
@@ -1215,6 +1237,7 @@ static int cqspi_probe(struct platform_device *pdev)
dev_err(dev, "Cannot remap AHB address.\n");
return PTR_ERR(cqspi->ahb_base);
}
+ cqspi->ahb_size = resource_size(res_ahb);
init_completion(&cqspi->transfer_complete);
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
index f17d22435bfc..2901c7bd9e30 100644
--- a/drivers/mtd/spi-nor/fsl-quadspi.c
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -801,10 +801,10 @@ static int fsl_qspi_nor_setup_last(struct fsl_qspi *q)
}
static const struct of_device_id fsl_qspi_dt_ids[] = {
- { .compatible = "fsl,vf610-qspi", .data = (void *)&vybrid_data, },
- { .compatible = "fsl,imx6sx-qspi", .data = (void *)&imx6sx_data, },
- { .compatible = "fsl,imx7d-qspi", .data = (void *)&imx7d_data, },
- { .compatible = "fsl,imx6ul-qspi", .data = (void *)&imx6ul_data, },
+ { .compatible = "fsl,vf610-qspi", .data = &vybrid_data, },
+ { .compatible = "fsl,imx6sx-qspi", .data = &imx6sx_data, },
+ { .compatible = "fsl,imx7d-qspi", .data = &imx7d_data, },
+ { .compatible = "fsl,imx6ul-qspi", .data = &imx6ul_data, },
{ .compatible = "fsl,ls1021a-qspi", .data = (void *)&ls1021a_data, },
{ /* sentinel */ }
};
diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
index ef034d898a23..699951523179 100644
--- a/drivers/mtd/spi-nor/intel-spi.c
+++ b/drivers/mtd/spi-nor/intel-spi.c
@@ -138,7 +138,6 @@
* @erase_64k: 64k erase supported
* @opcodes: Opcodes which are supported. This are programmed by BIOS
* before it locks down the controller.
- * @preopcodes: Preopcodes which are supported.
*/
struct intel_spi {
struct device *dev;
@@ -155,7 +154,6 @@ struct intel_spi {
bool swseq_erase;
bool erase_64k;
u8 opcodes[8];
- u8 preopcodes[2];
};
static bool writeable;
@@ -400,10 +398,6 @@ static int intel_spi_init(struct intel_spi *ispi)
ispi->opcodes[i] = opmenu0 >> i * 8;
ispi->opcodes[i + 4] = opmenu1 >> i * 8;
}
-
- val = readl(ispi->sregs + PREOP_OPTYPE);
- ispi->preopcodes[0] = val;
- ispi->preopcodes[1] = val >> 8;
}
}
diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c
index abe455ccd68b..5442993b71ff 100644
--- a/drivers/mtd/spi-nor/mtk-quadspi.c
+++ b/drivers/mtd/spi-nor/mtk-quadspi.c
@@ -110,7 +110,7 @@
#define MTK_NOR_PRG_REG(n) (MTK_NOR_PRGDATA0_REG + 4 * (n))
#define MTK_NOR_SHREG(n) (MTK_NOR_SHREG0_REG + 4 * (n))
-struct mt8173_nor {
+struct mtk_nor {
struct spi_nor nor;
struct device *dev;
void __iomem *base; /* nor flash base address */
@@ -118,48 +118,48 @@ struct mt8173_nor {
struct clk *nor_clk;
};
-static void mt8173_nor_set_read_mode(struct mt8173_nor *mt8173_nor)
+static void mtk_nor_set_read_mode(struct mtk_nor *mtk_nor)
{
- struct spi_nor *nor = &mt8173_nor->nor;
+ struct spi_nor *nor = &mtk_nor->nor;
switch (nor->read_proto) {
case SNOR_PROTO_1_1_1:
- writeb(nor->read_opcode, mt8173_nor->base +
+ writeb(nor->read_opcode, mtk_nor->base +
MTK_NOR_PRGDATA3_REG);
- writeb(MTK_NOR_FAST_READ, mt8173_nor->base +
+ writeb(MTK_NOR_FAST_READ, mtk_nor->base +
MTK_NOR_CFG1_REG);
break;
case SNOR_PROTO_1_1_2:
- writeb(nor->read_opcode, mt8173_nor->base +
+ writeb(nor->read_opcode, mtk_nor->base +
MTK_NOR_PRGDATA3_REG);
- writeb(MTK_NOR_DUAL_READ_EN, mt8173_nor->base +
+ writeb(MTK_NOR_DUAL_READ_EN, mtk_nor->base +
MTK_NOR_DUAL_REG);
break;
case SNOR_PROTO_1_1_4:
- writeb(nor->read_opcode, mt8173_nor->base +
+ writeb(nor->read_opcode, mtk_nor->base +
MTK_NOR_PRGDATA4_REG);
- writeb(MTK_NOR_QUAD_READ_EN, mt8173_nor->base +
+ writeb(MTK_NOR_QUAD_READ_EN, mtk_nor->base +
MTK_NOR_DUAL_REG);
break;
default:
- writeb(MTK_NOR_DUAL_DISABLE, mt8173_nor->base +
+ writeb(MTK_NOR_DUAL_DISABLE, mtk_nor->base +
MTK_NOR_DUAL_REG);
break;
}
}
-static int mt8173_nor_execute_cmd(struct mt8173_nor *mt8173_nor, u8 cmdval)
+static int mtk_nor_execute_cmd(struct mtk_nor *mtk_nor, u8 cmdval)
{
int reg;
u8 val = cmdval & 0x1f;
- writeb(cmdval, mt8173_nor->base + MTK_NOR_CMD_REG);
- return readl_poll_timeout(mt8173_nor->base + MTK_NOR_CMD_REG, reg,
+ writeb(cmdval, mtk_nor->base + MTK_NOR_CMD_REG);
+ return readl_poll_timeout(mtk_nor->base + MTK_NOR_CMD_REG, reg,
!(reg & val), 100, 10000);
}
-static int mt8173_nor_do_tx_rx(struct mt8173_nor *mt8173_nor, u8 op,
- u8 *tx, int txlen, u8 *rx, int rxlen)
+static int mtk_nor_do_tx_rx(struct mtk_nor *mtk_nor, u8 op,
+ u8 *tx, int txlen, u8 *rx, int rxlen)
{
int len = 1 + txlen + rxlen;
int i, ret, idx;
@@ -167,26 +167,26 @@ static int mt8173_nor_do_tx_rx(struct mt8173_nor *mt8173_nor, u8 op,
if (len > MTK_NOR_MAX_SHIFT)
return -EINVAL;
- writeb(len * 8, mt8173_nor->base + MTK_NOR_CNT_REG);
+ writeb(len * 8, mtk_nor->base + MTK_NOR_CNT_REG);
/* start at PRGDATA5, go down to PRGDATA0 */
idx = MTK_NOR_MAX_RX_TX_SHIFT - 1;
/* opcode */
- writeb(op, mt8173_nor->base + MTK_NOR_PRG_REG(idx));
+ writeb(op, mtk_nor->base + MTK_NOR_PRG_REG(idx));
idx--;
/* program TX data */
for (i = 0; i < txlen; i++, idx--)
- writeb(tx[i], mt8173_nor->base + MTK_NOR_PRG_REG(idx));
+ writeb(tx[i], mtk_nor->base + MTK_NOR_PRG_REG(idx));
/* clear out rest of TX registers */
while (idx >= 0) {
- writeb(0, mt8173_nor->base + MTK_NOR_PRG_REG(idx));
+ writeb(0, mtk_nor->base + MTK_NOR_PRG_REG(idx));
idx--;
}
- ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PRG_CMD);
+ ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_PRG_CMD);
if (ret)
return ret;
@@ -195,20 +195,20 @@ static int mt8173_nor_do_tx_rx(struct mt8173_nor *mt8173_nor, u8 op,
/* read out RX data */
for (i = 0; i < rxlen; i++, idx--)
- rx[i] = readb(mt8173_nor->base + MTK_NOR_SHREG(idx));
+ rx[i] = readb(mtk_nor->base + MTK_NOR_SHREG(idx));
return 0;
}
/* Do a WRSR (Write Status Register) command */
-static int mt8173_nor_wr_sr(struct mt8173_nor *mt8173_nor, u8 sr)
+static int mtk_nor_wr_sr(struct mtk_nor *mtk_nor, u8 sr)
{
- writeb(sr, mt8173_nor->base + MTK_NOR_PRGDATA5_REG);
- writeb(8, mt8173_nor->base + MTK_NOR_CNT_REG);
- return mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_WRSR_CMD);
+ writeb(sr, mtk_nor->base + MTK_NOR_PRGDATA5_REG);
+ writeb(8, mtk_nor->base + MTK_NOR_CNT_REG);
+ return mtk_nor_execute_cmd(mtk_nor, MTK_NOR_WRSR_CMD);
}
-static int mt8173_nor_write_buffer_enable(struct mt8173_nor *mt8173_nor)
+static int mtk_nor_write_buffer_enable(struct mtk_nor *mtk_nor)
{
u8 reg;
@@ -216,27 +216,27 @@ static int mt8173_nor_write_buffer_enable(struct mt8173_nor *mt8173_nor)
* 0: pre-fetch buffer use for read
* 1: pre-fetch buffer use for page program
*/
- writel(MTK_NOR_WR_BUF_ENABLE, mt8173_nor->base + MTK_NOR_CFG2_REG);
- return readb_poll_timeout(mt8173_nor->base + MTK_NOR_CFG2_REG, reg,
+ writel(MTK_NOR_WR_BUF_ENABLE, mtk_nor->base + MTK_NOR_CFG2_REG);
+ return readb_poll_timeout(mtk_nor->base + MTK_NOR_CFG2_REG, reg,
0x01 == (reg & 0x01), 100, 10000);
}
-static int mt8173_nor_write_buffer_disable(struct mt8173_nor *mt8173_nor)
+static int mtk_nor_write_buffer_disable(struct mtk_nor *mtk_nor)
{
u8 reg;
- writel(MTK_NOR_WR_BUF_DISABLE, mt8173_nor->base + MTK_NOR_CFG2_REG);
- return readb_poll_timeout(mt8173_nor->base + MTK_NOR_CFG2_REG, reg,
+ writel(MTK_NOR_WR_BUF_DISABLE, mtk_nor->base + MTK_NOR_CFG2_REG);
+ return readb_poll_timeout(mtk_nor->base + MTK_NOR_CFG2_REG, reg,
MTK_NOR_WR_BUF_DISABLE == (reg & 0x1), 100,
10000);
}
-static void mt8173_nor_set_addr_width(struct mt8173_nor *mt8173_nor)
+static void mtk_nor_set_addr_width(struct mtk_nor *mtk_nor)
{
u8 val;
- struct spi_nor *nor = &mt8173_nor->nor;
+ struct spi_nor *nor = &mtk_nor->nor;
- val = readb(mt8173_nor->base + MTK_NOR_DUAL_REG);
+ val = readb(mtk_nor->base + MTK_NOR_DUAL_REG);
switch (nor->addr_width) {
case 3:
@@ -246,115 +246,115 @@ static void mt8173_nor_set_addr_width(struct mt8173_nor *mt8173_nor)
val |= MTK_NOR_4B_ADDR_EN;
break;
default:
- dev_warn(mt8173_nor->dev, "Unexpected address width %u.\n",
+ dev_warn(mtk_nor->dev, "Unexpected address width %u.\n",
nor->addr_width);
break;
}
- writeb(val, mt8173_nor->base + MTK_NOR_DUAL_REG);
+ writeb(val, mtk_nor->base + MTK_NOR_DUAL_REG);
}
-static void mt8173_nor_set_addr(struct mt8173_nor *mt8173_nor, u32 addr)
+static void mtk_nor_set_addr(struct mtk_nor *mtk_nor, u32 addr)
{
int i;
- mt8173_nor_set_addr_width(mt8173_nor);
+ mtk_nor_set_addr_width(mtk_nor);
for (i = 0; i < 3; i++) {
- writeb(addr & 0xff, mt8173_nor->base + MTK_NOR_RADR0_REG + i * 4);
+ writeb(addr & 0xff, mtk_nor->base + MTK_NOR_RADR0_REG + i * 4);
addr >>= 8;
}
/* Last register is non-contiguous */
- writeb(addr & 0xff, mt8173_nor->base + MTK_NOR_RADR3_REG);
+ writeb(addr & 0xff, mtk_nor->base + MTK_NOR_RADR3_REG);
}
-static ssize_t mt8173_nor_read(struct spi_nor *nor, loff_t from, size_t length,
- u_char *buffer)
+static ssize_t mtk_nor_read(struct spi_nor *nor, loff_t from, size_t length,
+ u_char *buffer)
{
int i, ret;
int addr = (int)from;
u8 *buf = (u8 *)buffer;
- struct mt8173_nor *mt8173_nor = nor->priv;
+ struct mtk_nor *mtk_nor = nor->priv;
/* set mode for fast read mode ,dual mode or quad mode */
- mt8173_nor_set_read_mode(mt8173_nor);
- mt8173_nor_set_addr(mt8173_nor, addr);
+ mtk_nor_set_read_mode(mtk_nor);
+ mtk_nor_set_addr(mtk_nor, addr);
for (i = 0; i < length; i++) {
- ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PIO_READ_CMD);
+ ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_PIO_READ_CMD);
if (ret < 0)
return ret;
- buf[i] = readb(mt8173_nor->base + MTK_NOR_RDATA_REG);
+ buf[i] = readb(mtk_nor->base + MTK_NOR_RDATA_REG);
}
return length;
}
-static int mt8173_nor_write_single_byte(struct mt8173_nor *mt8173_nor,
- int addr, int length, u8 *data)
+static int mtk_nor_write_single_byte(struct mtk_nor *mtk_nor,
+ int addr, int length, u8 *data)
{
int i, ret;
- mt8173_nor_set_addr(mt8173_nor, addr);
+ mtk_nor_set_addr(mtk_nor, addr);
for (i = 0; i < length; i++) {
- writeb(*data++, mt8173_nor->base + MTK_NOR_WDATA_REG);
- ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PIO_WR_CMD);
+ writeb(*data++, mtk_nor->base + MTK_NOR_WDATA_REG);
+ ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_PIO_WR_CMD);
if (ret < 0)
return ret;
}
return 0;
}
-static int mt8173_nor_write_buffer(struct mt8173_nor *mt8173_nor, int addr,
- const u8 *buf)
+static int mtk_nor_write_buffer(struct mtk_nor *mtk_nor, int addr,
+ const u8 *buf)
{
int i, bufidx, data;
- mt8173_nor_set_addr(mt8173_nor, addr);
+ mtk_nor_set_addr(mtk_nor, addr);
bufidx = 0;
for (i = 0; i < SFLASH_WRBUF_SIZE; i += 4) {
data = buf[bufidx + 3]<<24 | buf[bufidx + 2]<<16 |
buf[bufidx + 1]<<8 | buf[bufidx];
bufidx += 4;
- writel(data, mt8173_nor->base + MTK_NOR_PP_DATA_REG);
+ writel(data, mtk_nor->base + MTK_NOR_PP_DATA_REG);
}
- return mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_WR_CMD);
+ return mtk_nor_execute_cmd(mtk_nor, MTK_NOR_WR_CMD);
}
-static ssize_t mt8173_nor_write(struct spi_nor *nor, loff_t to, size_t len,
- const u_char *buf)
+static ssize_t mtk_nor_write(struct spi_nor *nor, loff_t to, size_t len,
+ const u_char *buf)
{
int ret;
- struct mt8173_nor *mt8173_nor = nor->priv;
+ struct mtk_nor *mtk_nor = nor->priv;
size_t i;
- ret = mt8173_nor_write_buffer_enable(mt8173_nor);
+ ret = mtk_nor_write_buffer_enable(mtk_nor);
if (ret < 0) {
- dev_warn(mt8173_nor->dev, "write buffer enable failed!\n");
+ dev_warn(mtk_nor->dev, "write buffer enable failed!\n");
return ret;
}
for (i = 0; i + SFLASH_WRBUF_SIZE <= len; i += SFLASH_WRBUF_SIZE) {
- ret = mt8173_nor_write_buffer(mt8173_nor, to, buf);
+ ret = mtk_nor_write_buffer(mtk_nor, to, buf);
if (ret < 0) {
- dev_err(mt8173_nor->dev, "write buffer failed!\n");
+ dev_err(mtk_nor->dev, "write buffer failed!\n");
return ret;
}
to += SFLASH_WRBUF_SIZE;
buf += SFLASH_WRBUF_SIZE;
}
- ret = mt8173_nor_write_buffer_disable(mt8173_nor);
+ ret = mtk_nor_write_buffer_disable(mtk_nor);
if (ret < 0) {
- dev_warn(mt8173_nor->dev, "write buffer disable failed!\n");
+ dev_warn(mtk_nor->dev, "write buffer disable failed!\n");
return ret;
}
if (i < len) {
- ret = mt8173_nor_write_single_byte(mt8173_nor, to,
- (int)(len - i), (u8 *)buf);
+ ret = mtk_nor_write_single_byte(mtk_nor, to,
+ (int)(len - i), (u8 *)buf);
if (ret < 0) {
- dev_err(mt8173_nor->dev, "write single byte failed!\n");
+ dev_err(mtk_nor->dev, "write single byte failed!\n");
return ret;
}
}
@@ -362,72 +362,72 @@ static ssize_t mt8173_nor_write(struct spi_nor *nor, loff_t to, size_t len,
return len;
}
-static int mt8173_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+static int mtk_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
{
int ret;
- struct mt8173_nor *mt8173_nor = nor->priv;
+ struct mtk_nor *mtk_nor = nor->priv;
switch (opcode) {
case SPINOR_OP_RDSR:
- ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_RDSR_CMD);
+ ret = mtk_nor_execute_cmd(mtk_nor, MTK_NOR_RDSR_CMD);
if (ret < 0)
return ret;
if (len == 1)
- *buf = readb(mt8173_nor->base + MTK_NOR_RDSR_REG);
+ *buf = readb(mtk_nor->base + MTK_NOR_RDSR_REG);
else
- dev_err(mt8173_nor->dev, "len should be 1 for read status!\n");
+ dev_err(mtk_nor->dev, "len should be 1 for read status!\n");
break;
default:
- ret = mt8173_nor_do_tx_rx(mt8173_nor, opcode, NULL, 0, buf, len);
+ ret = mtk_nor_do_tx_rx(mtk_nor, opcode, NULL, 0, buf, len);
break;
}
return ret;
}
-static int mt8173_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
- int len)
+static int mtk_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
+ int len)
{
int ret;
- struct mt8173_nor *mt8173_nor = nor->priv;
+ struct mtk_nor *mtk_nor = nor->priv;
switch (opcode) {
case SPINOR_OP_WRSR:
/* We only handle 1 byte */
- ret = mt8173_nor_wr_sr(mt8173_nor, *buf);
+ ret = mtk_nor_wr_sr(mtk_nor, *buf);
break;
default:
- ret = mt8173_nor_do_tx_rx(mt8173_nor, opcode, buf, len, NULL, 0);
+ ret = mtk_nor_do_tx_rx(mtk_nor, opcode, buf, len, NULL, 0);
if (ret)
- dev_warn(mt8173_nor->dev, "write reg failure!\n");
+ dev_warn(mtk_nor->dev, "write reg failure!\n");
break;
}
return ret;
}
-static void mt8173_nor_disable_clk(struct mt8173_nor *mt8173_nor)
+static void mtk_nor_disable_clk(struct mtk_nor *mtk_nor)
{
- clk_disable_unprepare(mt8173_nor->spi_clk);
- clk_disable_unprepare(mt8173_nor->nor_clk);
+ clk_disable_unprepare(mtk_nor->spi_clk);
+ clk_disable_unprepare(mtk_nor->nor_clk);
}
-static int mt8173_nor_enable_clk(struct mt8173_nor *mt8173_nor)
+static int mtk_nor_enable_clk(struct mtk_nor *mtk_nor)
{
int ret;
- ret = clk_prepare_enable(mt8173_nor->spi_clk);
+ ret = clk_prepare_enable(mtk_nor->spi_clk);
if (ret)
return ret;
- ret = clk_prepare_enable(mt8173_nor->nor_clk);
+ ret = clk_prepare_enable(mtk_nor->nor_clk);
if (ret) {
- clk_disable_unprepare(mt8173_nor->spi_clk);
+ clk_disable_unprepare(mtk_nor->spi_clk);
return ret;
}
return 0;
}
-static int mtk_nor_init(struct mt8173_nor *mt8173_nor,
+static int mtk_nor_init(struct mtk_nor *mtk_nor,
struct device_node *flash_node)
{
const struct spi_nor_hwcaps hwcaps = {
@@ -439,18 +439,18 @@ static int mtk_nor_init(struct mt8173_nor *mt8173_nor,
struct spi_nor *nor;
/* initialize controller to accept commands */
- writel(MTK_NOR_ENABLE_SF_CMD, mt8173_nor->base + MTK_NOR_WRPROT_REG);
+ writel(MTK_NOR_ENABLE_SF_CMD, mtk_nor->base + MTK_NOR_WRPROT_REG);
- nor = &mt8173_nor->nor;
- nor->dev = mt8173_nor->dev;
- nor->priv = mt8173_nor;
+ nor = &mtk_nor->nor;
+ nor->dev = mtk_nor->dev;
+ nor->priv = mtk_nor;
spi_nor_set_flash_node(nor, flash_node);
/* fill the hooks to spi nor */
- nor->read = mt8173_nor_read;
- nor->read_reg = mt8173_nor_read_reg;
- nor->write = mt8173_nor_write;
- nor->write_reg = mt8173_nor_write_reg;
+ nor->read = mtk_nor_read;
+ nor->read_reg = mtk_nor_read_reg;
+ nor->write = mtk_nor_write;
+ nor->write_reg = mtk_nor_write_reg;
nor->mtd.name = "mtk_nor";
/* initialized with NULL */
ret = spi_nor_scan(nor, NULL, &hwcaps);
@@ -465,34 +465,34 @@ static int mtk_nor_drv_probe(struct platform_device *pdev)
struct device_node *flash_np;
struct resource *res;
int ret;
- struct mt8173_nor *mt8173_nor;
+ struct mtk_nor *mtk_nor;
if (!pdev->dev.of_node) {
dev_err(&pdev->dev, "No DT found\n");
return -EINVAL;
}
- mt8173_nor = devm_kzalloc(&pdev->dev, sizeof(*mt8173_nor), GFP_KERNEL);
- if (!mt8173_nor)
+ mtk_nor = devm_kzalloc(&pdev->dev, sizeof(*mtk_nor), GFP_KERNEL);
+ if (!mtk_nor)
return -ENOMEM;
- platform_set_drvdata(pdev, mt8173_nor);
+ platform_set_drvdata(pdev, mtk_nor);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mt8173_nor->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mt8173_nor->base))
- return PTR_ERR(mt8173_nor->base);
+ mtk_nor->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mtk_nor->base))
+ return PTR_ERR(mtk_nor->base);
- mt8173_nor->spi_clk = devm_clk_get(&pdev->dev, "spi");
- if (IS_ERR(mt8173_nor->spi_clk))
- return PTR_ERR(mt8173_nor->spi_clk);
+ mtk_nor->spi_clk = devm_clk_get(&pdev->dev, "spi");
+ if (IS_ERR(mtk_nor->spi_clk))
+ return PTR_ERR(mtk_nor->spi_clk);
- mt8173_nor->nor_clk = devm_clk_get(&pdev->dev, "sf");
- if (IS_ERR(mt8173_nor->nor_clk))
- return PTR_ERR(mt8173_nor->nor_clk);
+ mtk_nor->nor_clk = devm_clk_get(&pdev->dev, "sf");
+ if (IS_ERR(mtk_nor->nor_clk))
+ return PTR_ERR(mtk_nor->nor_clk);
- mt8173_nor->dev = &pdev->dev;
+ mtk_nor->dev = &pdev->dev;
- ret = mt8173_nor_enable_clk(mt8173_nor);
+ ret = mtk_nor_enable_clk(mtk_nor);
if (ret)
return ret;
@@ -503,20 +503,20 @@ static int mtk_nor_drv_probe(struct platform_device *pdev)
ret = -ENODEV;
goto nor_free;
}
- ret = mtk_nor_init(mt8173_nor, flash_np);
+ ret = mtk_nor_init(mtk_nor, flash_np);
nor_free:
if (ret)
- mt8173_nor_disable_clk(mt8173_nor);
+ mtk_nor_disable_clk(mtk_nor);
return ret;
}
static int mtk_nor_drv_remove(struct platform_device *pdev)
{
- struct mt8173_nor *mt8173_nor = platform_get_drvdata(pdev);
+ struct mtk_nor *mtk_nor = platform_get_drvdata(pdev);
- mt8173_nor_disable_clk(mt8173_nor);
+ mtk_nor_disable_clk(mtk_nor);
return 0;
}
@@ -524,18 +524,18 @@ static int mtk_nor_drv_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int mtk_nor_suspend(struct device *dev)
{
- struct mt8173_nor *mt8173_nor = dev_get_drvdata(dev);
+ struct mtk_nor *mtk_nor = dev_get_drvdata(dev);
- mt8173_nor_disable_clk(mt8173_nor);
+ mtk_nor_disable_clk(mtk_nor);
return 0;
}
static int mtk_nor_resume(struct device *dev)
{
- struct mt8173_nor *mt8173_nor = dev_get_drvdata(dev);
+ struct mtk_nor *mtk_nor = dev_get_drvdata(dev);
- return mt8173_nor_enable_clk(mt8173_nor);
+ return mtk_nor_enable_clk(mtk_nor);
}
static const struct dev_pm_ops mtk_nor_dev_pm_ops = {
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index bc266f70a15b..d445a4d3b770 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -330,8 +330,22 @@ static inline int spi_nor_fsr_ready(struct spi_nor *nor)
int fsr = read_fsr(nor);
if (fsr < 0)
return fsr;
- else
- return fsr & FSR_READY;
+
+ if (fsr & (FSR_E_ERR | FSR_P_ERR)) {
+ if (fsr & FSR_E_ERR)
+ dev_err(nor->dev, "Erase operation failed.\n");
+ else
+ dev_err(nor->dev, "Program operation failed.\n");
+
+ if (fsr & FSR_PT_ERR)
+ dev_err(nor->dev,
+ "Attempted to modify a protected sector.\n");
+
+ nor->write_reg(nor, SPINOR_OP_CLFSR, NULL, 0);
+ return -EIO;
+ }
+
+ return fsr & FSR_READY;
}
static int spi_nor_ready(struct spi_nor *nor)
@@ -552,6 +566,27 @@ erase_err:
return ret;
}
+/* Write status register and ensure bits in mask match written values */
+static int write_sr_and_check(struct spi_nor *nor, u8 status_new, u8 mask)
+{
+ int ret;
+
+ write_enable(nor);
+ ret = write_sr(nor, status_new);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ return ret;
+
+ ret = read_sr(nor);
+ if (ret < 0)
+ return ret;
+
+ return ((ret & mask) != (status_new & mask)) ? -EIO : 0;
+}
+
static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
uint64_t *len)
{
@@ -650,7 +685,6 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
loff_t lock_len;
bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
bool use_top;
- int ret;
status_old = read_sr(nor);
if (status_old < 0)
@@ -714,11 +748,7 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
if ((status_new & mask) < (status_old & mask))
return -EINVAL;
- write_enable(nor);
- ret = write_sr(nor, status_new);
- if (ret)
- return ret;
- return spi_nor_wait_till_ready(nor);
+ return write_sr_and_check(nor, status_new, mask);
}
/*
@@ -735,7 +765,6 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
loff_t lock_len;
bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
bool use_top;
- int ret;
status_old = read_sr(nor);
if (status_old < 0)
@@ -802,11 +831,7 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
if ((status_new & mask) > (status_old & mask))
return -EINVAL;
- write_enable(nor);
- ret = write_sr(nor, status_new);
- if (ret)
- return ret;
- return spi_nor_wait_till_ready(nor);
+ return write_sr_and_check(nor, status_new, mask);
}
/*
@@ -1020,7 +1045,13 @@ static const struct flash_info spi_nor_ids[] = {
{ "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
/* ISSI */
- { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
+ { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
+ { "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 16,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ) },
/* Macronix */
{ "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
@@ -1065,7 +1096,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
{ "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
- /* Spansion -- single (large) sector size only, at least
+ /* Spansion/Cypress -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
*/
{ "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
@@ -1094,6 +1125,8 @@ static const struct flash_info spi_nor_ids[] = {
{ "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) },
{ "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ) },
{ "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "s25fl128l", INFO(0x016018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "s25fl256l", INFO(0x016019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
@@ -2713,6 +2746,16 @@ static void spi_nor_resume(struct mtd_info *mtd)
dev_err(dev, "resume() failed\n");
}
+void spi_nor_restore(struct spi_nor *nor)
+{
+ /* restore the addressing mode */
+ if ((nor->addr_width == 4) &&
+ (JEDEC_MFR(nor->info) != SNOR_MFR_SPANSION) &&
+ !(nor->info->flags & SPI_NOR_4B_OPCODES))
+ set_4byte(nor, nor->info, 0);
+}
+EXPORT_SYMBOL_GPL(spi_nor_restore);
+
int spi_nor_scan(struct spi_nor *nor, const char *name,
const struct spi_nor_hwcaps *hwcaps)
{
diff --git a/drivers/mtd/tests/nandbiterrs.c b/drivers/mtd/tests/nandbiterrs.c
index 5f03b8c885a9..cde19c99e77b 100644
--- a/drivers/mtd/tests/nandbiterrs.c
+++ b/drivers/mtd/tests/nandbiterrs.c
@@ -151,7 +151,7 @@ static int read_page(int log)
memcpy(&oldstats, &mtd->ecc_stats, sizeof(oldstats));
err = mtd_read(mtd, offset, mtd->writesize, &read, rbuffer);
- if (err == -EUCLEAN)
+ if (!err || err == -EUCLEAN)
err = mtd->ecc_stats.corrected - oldstats.corrected;
if (err < 0 || read != mtd->writesize) {
diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c
index 1cb3f7758fb6..766b2c385682 100644
--- a/drivers/mtd/tests/oobtest.c
+++ b/drivers/mtd/tests/oobtest.c
@@ -193,6 +193,9 @@ static int verify_eraseblock(int ebnum)
ops.datbuf = NULL;
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err || ops.oobretlen != use_len) {
pr_err("error: readoob failed at %#llx\n",
(long long)addr);
@@ -227,6 +230,9 @@ static int verify_eraseblock(int ebnum)
ops.datbuf = NULL;
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err || ops.oobretlen != mtd->oobavail) {
pr_err("error: readoob failed at %#llx\n",
(long long)addr);
@@ -286,6 +292,9 @@ static int verify_eraseblock_in_one_go(int ebnum)
/* read entire block's OOB at one go */
err = mtd_read_oob(mtd, addr, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err || ops.oobretlen != len) {
pr_err("error: readoob failed at %#llx\n",
(long long)addr);
@@ -527,6 +536,9 @@ static int __init mtd_oobtest_init(void)
pr_info("attempting to start read past end of OOB\n");
pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, addr0, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err) {
pr_info("error occurred as expected\n");
err = 0;
@@ -571,6 +583,9 @@ static int __init mtd_oobtest_init(void)
pr_info("attempting to read past end of device\n");
pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err) {
pr_info("error occurred as expected\n");
err = 0;
@@ -615,6 +630,9 @@ static int __init mtd_oobtest_init(void)
pr_info("attempting to read past end of device\n");
pr_info("an error is expected...\n");
err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err) {
pr_info("error occurred as expected\n");
err = 0;
@@ -684,6 +702,9 @@ static int __init mtd_oobtest_init(void)
ops.datbuf = NULL;
ops.oobbuf = readbuf;
err = mtd_read_oob(mtd, addr, &ops);
+ if (mtd_is_bitflip(err))
+ err = 0;
+
if (err)
goto out;
if (memcmpshow(addr, readbuf, writebuf,
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index b210fdb31c98..b1fc28f63882 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -99,6 +99,8 @@ struct ubiblock {
/* Linked list of all ubiblock instances */
static LIST_HEAD(ubiblock_devices);
+static DEFINE_IDR(ubiblock_minor_idr);
+/* Protects ubiblock_devices and ubiblock_minor_idr */
static DEFINE_MUTEX(devices_mutex);
static int ubiblock_major;
@@ -351,8 +353,6 @@ static const struct blk_mq_ops ubiblock_mq_ops = {
.init_request = ubiblock_init_request,
};
-static DEFINE_IDR(ubiblock_minor_idr);
-
int ubiblock_create(struct ubi_volume_info *vi)
{
struct ubiblock *dev;
@@ -365,14 +365,15 @@ int ubiblock_create(struct ubi_volume_info *vi)
/* Check that the volume isn't already handled */
mutex_lock(&devices_mutex);
if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
- mutex_unlock(&devices_mutex);
- return -EEXIST;
+ ret = -EEXIST;
+ goto out_unlock;
}
- mutex_unlock(&devices_mutex);
dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
+ if (!dev) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
mutex_init(&dev->dev_mutex);
@@ -437,14 +438,13 @@ int ubiblock_create(struct ubi_volume_info *vi)
goto out_free_queue;
}
- mutex_lock(&devices_mutex);
list_add_tail(&dev->list, &ubiblock_devices);
- mutex_unlock(&devices_mutex);
/* Must be the last step: anyone can call file ops from now on */
add_disk(dev->gd);
dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
dev->ubi_num, dev->vol_id, vi->name);
+ mutex_unlock(&devices_mutex);
return 0;
out_free_queue:
@@ -457,6 +457,8 @@ out_put_disk:
put_disk(dev->gd);
out_free_dev:
kfree(dev);
+out_unlock:
+ mutex_unlock(&devices_mutex);
return ret;
}
@@ -478,30 +480,36 @@ static void ubiblock_cleanup(struct ubiblock *dev)
int ubiblock_remove(struct ubi_volume_info *vi)
{
struct ubiblock *dev;
+ int ret;
mutex_lock(&devices_mutex);
dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
if (!dev) {
- mutex_unlock(&devices_mutex);
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_unlock;
}
/* Found a device, let's lock it so we can check if it's busy */
mutex_lock(&dev->dev_mutex);
if (dev->refcnt > 0) {
- mutex_unlock(&dev->dev_mutex);
- mutex_unlock(&devices_mutex);
- return -EBUSY;
+ ret = -EBUSY;
+ goto out_unlock_dev;
}
/* Remove from device list */
list_del(&dev->list);
- mutex_unlock(&devices_mutex);
-
ubiblock_cleanup(dev);
mutex_unlock(&dev->dev_mutex);
+ mutex_unlock(&devices_mutex);
+
kfree(dev);
return 0;
+
+out_unlock_dev:
+ mutex_unlock(&dev->dev_mutex);
+out_unlock:
+ mutex_unlock(&devices_mutex);
+ return ret;
}
static int ubiblock_resize(struct ubi_volume_info *vi)
@@ -630,6 +638,7 @@ static void ubiblock_remove_all(void)
struct ubiblock *next;
struct ubiblock *dev;
+ mutex_lock(&devices_mutex);
list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
/* The module is being forcefully removed */
WARN_ON(dev->desc);
@@ -638,6 +647,7 @@ static void ubiblock_remove_all(void)
ubiblock_cleanup(dev);
kfree(dev);
}
+ mutex_unlock(&devices_mutex);
}
int __init ubiblock_init(void)
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 136ce05d2328..e941395de3ae 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -535,8 +535,17 @@ static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
int limit, device_pebs;
uint64_t device_size;
- if (!max_beb_per1024)
- return 0;
+ if (!max_beb_per1024) {
+ /*
+ * Since max_beb_per1024 has not been set by the user in either
+ * the cmdline or Kconfig, use mtd_max_bad_blocks to set the
+ * limit if it is supported by the device.
+ */
+ limit = mtd_max_bad_blocks(ubi->mtd, 0, ubi->mtd->size);
+ if (limit < 0)
+ return 0;
+ return limit;
+ }
/*
* Here we are using size of the entire flash chip and
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 388e46be6ad9..250e30fac61b 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -384,7 +384,7 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
}
/**
- * leb_write_lock - lock logical eraseblock for writing.
+ * leb_write_trylock - try to lock logical eraseblock for writing.
* @ubi: UBI device description object
* @vol_id: volume ID
* @lnum: logical eraseblock number
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index 4f0bd6b4422a..590d967011bb 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -66,7 +66,7 @@ static void return_unused_pool_pebs(struct ubi_device *ubi,
}
}
-static int anchor_pebs_avalible(struct rb_root *root)
+static int anchor_pebs_available(struct rb_root *root)
{
struct rb_node *p;
struct ubi_wl_entry *e;
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 5a832bc79b1b..91705962ba73 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -214,9 +214,8 @@ static void assign_aeb_to_av(struct ubi_attach_info *ai,
struct ubi_ainf_volume *av)
{
struct ubi_ainf_peb *tmp_aeb;
- struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+ struct rb_node **p = &av->root.rb_node, *parent = NULL;
- p = &av->root.rb_node;
while (*p) {
parent = *p;
@@ -1063,7 +1062,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e) {
while (i--)
- kfree(fm->e[i]);
+ kmem_cache_free(ubi_wl_entry_slab, fm->e[i]);
ret = -ENOMEM;
goto free_hdr;
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 85237cf661f9..3fd8d7ff7a02 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -270,6 +270,12 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
vol->last_eb_bytes = vol->usable_leb_size;
}
+ /* Make volume "available" before it becomes accessible via sysfs */
+ spin_lock(&ubi->volumes_lock);
+ ubi->volumes[vol_id] = vol;
+ ubi->vol_count += 1;
+ spin_unlock(&ubi->volumes_lock);
+
/* Register character device for the volume */
cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
vol->cdev.owner = THIS_MODULE;
@@ -298,11 +304,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
if (err)
goto out_sysfs;
- spin_lock(&ubi->volumes_lock);
- ubi->volumes[vol_id] = vol;
- ubi->vol_count += 1;
- spin_unlock(&ubi->volumes_lock);
-
ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
self_check_volumes(ubi);
return err;
@@ -315,6 +316,10 @@ out_sysfs:
*/
cdev_device_del(&vol->cdev, &vol->dev);
out_mapping:
+ spin_lock(&ubi->volumes_lock);
+ ubi->volumes[vol_id] = NULL;
+ ubi->vol_count -= 1;
+ spin_unlock(&ubi->volumes_lock);
ubi_eba_destroy_table(eba_tbl);
out_acc:
spin_lock(&ubi->volumes_lock);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index b5b8cd6f481c..2052a647220e 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -692,7 +692,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
#ifdef CONFIG_MTD_UBI_FASTMAP
/* Check whether we need to produce an anchor PEB */
if (!anchor)
- anchor = !anchor_pebs_avalible(&ubi->free);
+ anchor = !anchor_pebs_available(&ubi->free);
if (anchor) {
e1 = find_anchor_wl_entry(&ubi->used);
@@ -1529,6 +1529,46 @@ static void shutdown_work(struct ubi_device *ubi)
}
/**
+ * erase_aeb - erase a PEB given in UBI attach info PEB
+ * @ubi: UBI device description object
+ * @aeb: UBI attach info PEB
+ * @sync: If true, erase synchronously. Otherwise schedule for erasure
+ */
+static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync)
+{
+ struct ubi_wl_entry *e;
+ int err;
+
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
+ ubi->lookuptbl[e->pnum] = e;
+
+ if (sync) {
+ err = sync_erase(ubi, e, false);
+ if (err)
+ goto out_free;
+
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+ } else {
+ err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
+ if (err)
+ goto out_free;
+ }
+
+ return 0;
+
+out_free:
+ wl_entry_destroy(ubi, e);
+
+ return err;
+}
+
+/**
* ubi_wl_init - initialize the WL sub-system using attaching information.
* @ubi: UBI device description object
* @ai: attaching information
@@ -1566,17 +1606,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
cond_resched();
- e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
- if (!e)
- goto out_free;
-
- e->pnum = aeb->pnum;
- e->ec = aeb->ec;
- ubi->lookuptbl[e->pnum] = e;
- if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
- wl_entry_destroy(ubi, e);
+ err = erase_aeb(ubi, aeb, false);
+ if (err)
goto out_free;
- }
found_pebs++;
}
@@ -1585,8 +1617,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
- if (!e)
+ if (!e) {
+ err = -ENOMEM;
goto out_free;
+ }
e->pnum = aeb->pnum;
e->ec = aeb->ec;
@@ -1605,8 +1639,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
- if (!e)
+ if (!e) {
+ err = -ENOMEM;
goto out_free;
+ }
e->pnum = aeb->pnum;
e->ec = aeb->ec;
@@ -1635,6 +1671,8 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
ubi_assert(!ubi->lookuptbl[e->pnum]);
ubi->lookuptbl[e->pnum] = e;
} else {
+ bool sync = false;
+
/*
* Usually old Fastmap PEBs are scheduled for erasure
* and we don't have to care about them but if we face
@@ -1644,18 +1682,21 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
if (ubi->lookuptbl[aeb->pnum])
continue;
- e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
- if (!e)
- goto out_free;
+ /*
+ * The fastmap update code might not find a free PEB for
+ * writing the fastmap anchor to and then reuses the
+ * current fastmap anchor PEB. When this PEB gets erased
+ * and a power cut happens before it is written again we
+ * must make sure that the fastmap attach code doesn't
+ * find any outdated fastmap anchors, hence we erase the
+ * outdated fastmap anchor PEBs synchronously here.
+ */
+ if (aeb->vol_id == UBI_FM_SB_VOLUME_ID)
+ sync = true;
- e->pnum = aeb->pnum;
- e->ec = aeb->ec;
- ubi_assert(!ubi->lookuptbl[e->pnum]);
- ubi->lookuptbl[e->pnum] = e;
- if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false)) {
- wl_entry_destroy(ubi, e);
+ err = erase_aeb(ubi, aeb, sync);
+ if (err)
goto out_free;
- }
}
found_pebs++;
diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h
index 2aaa3f7f2ba9..a9e2d669acd8 100644
--- a/drivers/mtd/ubi/wl.h
+++ b/drivers/mtd/ubi/wl.h
@@ -2,7 +2,7 @@
#ifndef UBI_WL_H
#define UBI_WL_H
#ifdef CONFIG_MTD_UBI_FASTMAP
-static int anchor_pebs_avalible(struct rb_root *root);
+static int anchor_pebs_available(struct rb_root *root);
static void update_fastmap_work_fn(struct work_struct *wrk);
static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index 9497f18eaba0..2f91ce8dc614 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -123,7 +123,8 @@ enum mac8390_access {
};
extern int mac8390_memtest(struct net_device *dev);
-static int mac8390_initdev(struct net_device *dev, struct nubus_dev *ndev,
+static int mac8390_initdev(struct net_device *dev,
+ struct nubus_rsrc *ndev,
enum mac8390_type type);
static int mac8390_open(struct net_device *dev);
@@ -169,11 +170,11 @@ static void word_memcpy_tocard(unsigned long tp, const void *fp, int count);
static void word_memcpy_fromcard(void *tp, unsigned long fp, int count);
static u32 mac8390_msg_enable;
-static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
+static enum mac8390_type __init mac8390_ident(struct nubus_rsrc *fres)
{
- switch (dev->dr_sw) {
+ switch (fres->dr_sw) {
case NUBUS_DRSW_3COM:
- switch (dev->dr_hw) {
+ switch (fres->dr_hw) {
case NUBUS_DRHW_APPLE_SONIC_NB:
case NUBUS_DRHW_APPLE_SONIC_LC:
case NUBUS_DRHW_SONNET:
@@ -184,7 +185,7 @@ static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
break;
case NUBUS_DRSW_APPLE:
- switch (dev->dr_hw) {
+ switch (fres->dr_hw) {
case NUBUS_DRHW_ASANTE_LC:
return MAC8390_NONE;
case NUBUS_DRHW_CABLETRON:
@@ -201,7 +202,7 @@ static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
case NUBUS_DRSW_TECHWORKS:
case NUBUS_DRSW_DAYNA2:
case NUBUS_DRSW_DAYNA_LC:
- if (dev->dr_hw == NUBUS_DRHW_CABLETRON)
+ if (fres->dr_hw == NUBUS_DRHW_CABLETRON)
return MAC8390_CABLETRON;
else
return MAC8390_APPLE;
@@ -212,7 +213,7 @@ static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
break;
case NUBUS_DRSW_KINETICS:
- switch (dev->dr_hw) {
+ switch (fres->dr_hw) {
case NUBUS_DRHW_INTERLAN:
return MAC8390_INTERLAN;
default:
@@ -225,8 +226,8 @@ static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
* These correspond to Dayna Sonic cards
* which use the macsonic driver
*/
- if (dev->dr_hw == NUBUS_DRHW_SMC9194 ||
- dev->dr_hw == NUBUS_DRHW_INTERLAN)
+ if (fres->dr_hw == NUBUS_DRHW_SMC9194 ||
+ fres->dr_hw == NUBUS_DRHW_INTERLAN)
return MAC8390_NONE;
else
return MAC8390_DAYNA;
@@ -289,7 +290,8 @@ static int __init mac8390_memsize(unsigned long membase)
return i * 0x1000;
}
-static bool __init mac8390_init(struct net_device *dev, struct nubus_dev *ndev,
+static bool __init mac8390_init(struct net_device *dev,
+ struct nubus_rsrc *ndev,
enum mac8390_type cardtype)
{
struct nubus_dir dir;
@@ -394,7 +396,7 @@ static bool __init mac8390_init(struct net_device *dev, struct nubus_dev *ndev,
struct net_device * __init mac8390_probe(int unit)
{
struct net_device *dev;
- struct nubus_dev *ndev = NULL;
+ struct nubus_rsrc *ndev = NULL;
int err = -ENODEV;
struct ei_device *ei_local;
@@ -414,8 +416,11 @@ struct net_device * __init mac8390_probe(int unit)
if (unit >= 0)
sprintf(dev->name, "eth%d", unit);
- while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, NUBUS_TYPE_ETHERNET,
- ndev))) {
+ for_each_func_rsrc(ndev) {
+ if (ndev->category != NUBUS_CAT_NETWORK ||
+ ndev->type != NUBUS_TYPE_ETHERNET)
+ continue;
+
/* Have we seen it already? */
if (slots & (1 << ndev->board->slot))
continue;
@@ -489,7 +494,7 @@ static const struct net_device_ops mac8390_netdev_ops = {
};
static int __init mac8390_initdev(struct net_device *dev,
- struct nubus_dev *ndev,
+ struct nubus_rsrc *ndev,
enum mac8390_type type)
{
static u32 fwrd4_offsets[16] = {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 429467364219..9040e13ce4b7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -58,6 +58,13 @@
extern struct list_head adapter_list;
extern struct mutex uld_mutex;
+/* Suspend an Ethernet Tx queue with fewer available descriptors than this.
+ * This is the same as calc_tx_descs() for a TSO packet with
+ * nr_frags == MAX_SKB_FRAGS.
+ */
+#define ETHTXQ_STOP_THRES \
+ (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
+
enum {
MAX_NPORTS = 4, /* max # of ports */
SERNUM_LEN = 24, /* Serial # length */
@@ -565,6 +572,7 @@ enum { /* adapter flags */
enum {
ULP_CRYPTO_LOOKASIDE = 1 << 0,
+ ULP_CRYPTO_IPSEC_INLINE = 1 << 1,
};
struct rx_sw_desc;
@@ -981,6 +989,11 @@ enum {
SCHED_CLASS_RATEMODE_ABS = 1, /* Kb/s */
};
+struct tx_sw_desc { /* SW state per Tx descriptor */
+ struct sk_buff *skb;
+ struct ulptx_sgl *sgl;
+};
+
/* Support for "sched_queue" command to allow one or more NIC TX Queues
* to be bound to a TX Scheduling Class.
*/
@@ -1739,6 +1752,16 @@ void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl);
void free_tx_desc(struct adapter *adap, struct sge_txq *q,
unsigned int n, bool unmap);
void free_txq(struct adapter *adap, struct sge_txq *q);
+void cxgb4_reclaim_completed_tx(struct adapter *adap,
+ struct sge_txq *q, bool unmap);
+int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb,
+ dma_addr_t *addr);
+void cxgb4_inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
+ void *pos);
+void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
+ struct ulptx_sgl *sgl, u64 *end, unsigned int start,
+ const dma_addr_t *addr);
+void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n);
int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
u16 vlan);
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 4ea76c1411dc..2822bbff73e8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2906,6 +2906,8 @@ static int chcr_show(struct seq_file *seq, void *v)
atomic_read(&adap->chcr_stats.error));
seq_printf(seq, "Fallback: %10u \n",
atomic_read(&adap->chcr_stats.fallback));
+ seq_printf(seq, "IPSec PDU: %10u\n",
+ atomic_read(&adap->chcr_stats.ipsec_cnt));
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 1e3cd8abc56d..1ca2a39ed0f8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4294,7 +4294,7 @@ static int adap_init0(struct adapter *adap)
} else {
adap->vres.ncrypto_fc = val[0];
}
- adap->params.crypto |= ULP_CRYPTO_LOOKASIDE;
+ adap->params.crypto = ntohs(caps_cmd.cryptocaps);
adap->num_uld += 1;
}
#undef FW_PARAM_PFVF
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 71a315bc1409..6b5fea4532f3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -637,6 +637,7 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
lld->nchan = adap->params.nports;
lld->nports = adap->params.nports;
lld->wr_cred = adap->params.ofldq_wr_cred;
+ lld->crypto = adap->params.crypto;
lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 08e709ab6dd4..1d37672902da 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -297,6 +297,7 @@ struct chcr_stats_debug {
atomic_t complete;
atomic_t error;
atomic_t fallback;
+ atomic_t ipsec_cnt;
};
#define OCQ_WIN_OFFSET(pdev, vres) \
@@ -322,6 +323,7 @@ struct cxgb4_lld_info {
unsigned char wr_cred; /* WR 16-byte credits */
unsigned char adapter_type; /* type of adapter */
unsigned char fw_api_ver; /* FW API version */
+ unsigned char crypto; /* crypto support */
unsigned int fw_vers; /* FW version */
unsigned int iscsi_iolen; /* iSCSI max I/O length */
unsigned int cclk_ps; /* Core clock period in psec */
@@ -370,6 +372,7 @@ struct cxgb4_uld_info {
struct t4_lro_mgr *lro_mgr,
struct napi_struct *napi);
void (*lro_flush)(struct t4_lro_mgr *);
+ int (*tx_handler)(struct sk_buff *skb, struct net_device *dev);
};
int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index a7af71bf14fb..6e310a0da7c9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -41,6 +41,7 @@
#include <linux/jiffies.h>
#include <linux/prefetch.h>
#include <linux/export.h>
+#include <net/xfrm.h>
#include <net/ipv6.h>
#include <net/tcp.h>
#include <net/busy_poll.h>
@@ -53,6 +54,7 @@
#include "t4_msg.h"
#include "t4fw_api.h"
#include "cxgb4_ptp.h"
+#include "cxgb4_uld.h"
/*
* Rx buffer size. We use largish buffers if possible but settle for single
@@ -110,14 +112,6 @@
#define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
/*
- * Suspend an Ethernet Tx queue with fewer available descriptors than this.
- * This is the same as calc_tx_descs() for a TSO packet with
- * nr_frags == MAX_SKB_FRAGS.
- */
-#define ETHTXQ_STOP_THRES \
- (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
-
-/*
* Suspension threshold for non-Ethernet Tx queues. We require enough room
* for a full sized WR.
*/
@@ -134,11 +128,6 @@
*/
#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
-struct tx_sw_desc { /* SW state per Tx descriptor */
- struct sk_buff *skb;
- struct ulptx_sgl *sgl;
-};
-
struct rx_sw_desc { /* SW state per Rx descriptor */
struct page *page;
dma_addr_t dma_addr;
@@ -248,8 +237,8 @@ static inline bool fl_starving(const struct adapter *adapter,
return fl->avail - fl->pend_cred <= s->fl_starve_thres;
}
-static int map_skb(struct device *dev, const struct sk_buff *skb,
- dma_addr_t *addr)
+int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb,
+ dma_addr_t *addr)
{
const skb_frag_t *fp, *end;
const struct skb_shared_info *si;
@@ -277,6 +266,7 @@ unwind:
out_err:
return -ENOMEM;
}
+EXPORT_SYMBOL(cxgb4_map_skb);
#ifdef CONFIG_NEED_DMA_MAP_STATE
static void unmap_skb(struct device *dev, const struct sk_buff *skb,
@@ -411,7 +401,7 @@ static inline int reclaimable(const struct sge_txq *q)
}
/**
- * reclaim_completed_tx - reclaims completed Tx descriptors
+ * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors
* @adap: the adapter
* @q: the Tx queue to reclaim completed descriptors from
* @unmap: whether the buffers should be unmapped for DMA
@@ -420,7 +410,7 @@ static inline int reclaimable(const struct sge_txq *q)
* and frees the associated buffers if possible. Called with the Tx
* queue locked.
*/
-static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
+inline void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
bool unmap)
{
int avail = reclaimable(q);
@@ -437,6 +427,7 @@ static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
q->in_use -= avail;
}
}
+EXPORT_SYMBOL(cxgb4_reclaim_completed_tx);
static inline int get_buf_size(struct adapter *adapter,
const struct rx_sw_desc *d)
@@ -849,7 +840,7 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb,
}
/**
- * write_sgl - populate a scatter/gather list for a packet
+ * cxgb4_write_sgl - populate a scatter/gather list for a packet
* @skb: the packet
* @q: the Tx queue we are writing into
* @sgl: starting location for writing the SGL
@@ -865,9 +856,9 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb,
* right after the end of the SGL but does not account for any potential
* wrap around, i.e., @end > @sgl.
*/
-static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
- struct ulptx_sgl *sgl, u64 *end, unsigned int start,
- const dma_addr_t *addr)
+void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
+ struct ulptx_sgl *sgl, u64 *end, unsigned int start,
+ const dma_addr_t *addr)
{
unsigned int i, len;
struct ulptx_sge_pair *to;
@@ -919,6 +910,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
*end = 0;
}
+EXPORT_SYMBOL(cxgb4_write_sgl);
/* This function copies 64 byte coalesced work request to
* memory mapped BAR2 space. For coalesced WR SGE fetches
@@ -937,14 +929,14 @@ static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
}
/**
- * ring_tx_db - check and potentially ring a Tx queue's doorbell
+ * cxgb4_ring_tx_db - check and potentially ring a Tx queue's doorbell
* @adap: the adapter
* @q: the Tx queue
* @n: number of new descriptors to give to HW
*
* Ring the doorbel for a Tx queue.
*/
-static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
+inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
{
/* Make sure that all writes to the TX Descriptors are committed
* before we tell the hardware about them.
@@ -1011,9 +1003,10 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
wmb();
}
}
+EXPORT_SYMBOL(cxgb4_ring_tx_db);
/**
- * inline_tx_skb - inline a packet's data into Tx descriptors
+ * cxgb4_inline_tx_skb - inline a packet's data into Tx descriptors
* @skb: the packet
* @q: the Tx queue where the packet will be inlined
* @pos: starting position in the Tx queue where to inline the packet
@@ -1023,8 +1016,8 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
* Most of the complexity of this operation is dealing with wrap arounds
* in the middle of the packet we want to inline.
*/
-static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
- void *pos)
+void cxgb4_inline_tx_skb(const struct sk_buff *skb,
+ const struct sge_txq *q, void *pos)
{
u64 *p;
int left = (void *)q->stat - pos;
@@ -1046,6 +1039,7 @@ static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
if ((uintptr_t)p & 8)
*p = 0;
}
+EXPORT_SYMBOL(cxgb4_inline_tx_skb);
static void *inline_tx_skb_header(const struct sk_buff *skb,
const struct sge_txq *q, void *pos,
@@ -1317,6 +1311,12 @@ out_free: dev_kfree_skb_any(skb);
pi = netdev_priv(dev);
adap = pi->adapter;
+ ssi = skb_shinfo(skb);
+#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+ if (xfrm_offload(skb) && !ssi->gso_size)
+ return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev);
+#endif /* CHELSIO_IPSEC_INLINE */
+
qidx = skb_get_queue_mapping(skb);
if (ptp_enabled) {
spin_lock(&adap->ptp_lock);
@@ -1333,7 +1333,7 @@ out_free: dev_kfree_skb_any(skb);
}
skb_tx_timestamp(skb);
- reclaim_completed_tx(adap, &q->q, true);
+ cxgb4_reclaim_completed_tx(adap, &q->q, true);
cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
#ifdef CONFIG_CHELSIO_T4_FCOE
@@ -1367,7 +1367,7 @@ out_free: dev_kfree_skb_any(skb);
tnl_type = cxgb_encap_offload_supported(skb);
if (!immediate &&
- unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
+ unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
q->mapping_err++;
if (ptp_enabled)
spin_unlock(&adap->ptp_lock);
@@ -1386,7 +1386,6 @@ out_free: dev_kfree_skb_any(skb);
end = (u64 *)wr + flits;
len = immediate ? skb->len : 0;
- ssi = skb_shinfo(skb);
if (ssi->gso_size) {
struct cpl_tx_pkt_lso *lso = (void *)wr;
bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
@@ -1488,13 +1487,13 @@ out_free: dev_kfree_skb_any(skb);
cpl->ctrl1 = cpu_to_be64(cntrl);
if (immediate) {
- inline_tx_skb(skb, &q->q, cpl + 1);
+ cxgb4_inline_tx_skb(skb, &q->q, cpl + 1);
dev_consume_skb_any(skb);
} else {
int last_desc;
- write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
- addr);
+ cxgb4_write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1),
+ end, 0, addr);
skb_orphan(skb);
last_desc = q->q.pidx + ndesc - 1;
@@ -1506,7 +1505,7 @@ out_free: dev_kfree_skb_any(skb);
txq_advance(&q->q, ndesc);
- ring_tx_db(adap, &q->q, ndesc);
+ cxgb4_ring_tx_db(adap, &q->q, ndesc);
if (ptp_enabled)
spin_unlock(&adap->ptp_lock);
return NETDEV_TX_OK;
@@ -1516,9 +1515,9 @@ out_free: dev_kfree_skb_any(skb);
* reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
* @q: the SGE control Tx queue
*
- * This is a variant of reclaim_completed_tx() that is used for Tx queues
- * that send only immediate data (presently just the control queues) and
- * thus do not have any sk_buffs to release.
+ * This is a variant of cxgb4_reclaim_completed_tx() that is used
+ * for Tx queues that send only immediate data (presently just
+ * the control queues) and thus do not have any sk_buffs to release.
*/
static inline void reclaim_completed_tx_imm(struct sge_txq *q)
{
@@ -1593,13 +1592,13 @@ static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
}
wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
- inline_tx_skb(skb, &q->q, wr);
+ cxgb4_inline_tx_skb(skb, &q->q, wr);
txq_advance(&q->q, ndesc);
if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
ctrlq_check_stop(q, wr);
- ring_tx_db(q->adap, &q->q, ndesc);
+ cxgb4_ring_tx_db(q->adap, &q->q, ndesc);
spin_unlock(&q->sendq.lock);
kfree_skb(skb);
@@ -1634,7 +1633,7 @@ static void restart_ctrlq(unsigned long data)
txq_advance(&q->q, ndesc);
spin_unlock(&q->sendq.lock);
- inline_tx_skb(skb, &q->q, wr);
+ cxgb4_inline_tx_skb(skb, &q->q, wr);
kfree_skb(skb);
if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
@@ -1647,14 +1646,15 @@ static void restart_ctrlq(unsigned long data)
}
}
if (written > 16) {
- ring_tx_db(q->adap, &q->q, written);
+ cxgb4_ring_tx_db(q->adap, &q->q, written);
written = 0;
}
spin_lock(&q->sendq.lock);
}
q->full = 0;
-ringdb: if (written)
- ring_tx_db(q->adap, &q->q, written);
+ringdb:
+ if (written)
+ cxgb4_ring_tx_db(q->adap, &q->q, written);
spin_unlock(&q->sendq.lock);
}
@@ -1797,7 +1797,7 @@ static void service_ofldq(struct sge_uld_txq *q)
*/
spin_unlock(&q->sendq.lock);
- reclaim_completed_tx(q->adap, &q->q, false);
+ cxgb4_reclaim_completed_tx(q->adap, &q->q, false);
flits = skb->priority; /* previously saved */
ndesc = flits_to_desc(flits);
@@ -1808,9 +1808,9 @@ static void service_ofldq(struct sge_uld_txq *q)
pos = (u64 *)&q->q.desc[q->q.pidx];
if (is_ofld_imm(skb))
- inline_tx_skb(skb, &q->q, pos);
- else if (map_skb(q->adap->pdev_dev, skb,
- (dma_addr_t *)skb->head)) {
+ cxgb4_inline_tx_skb(skb, &q->q, pos);
+ else if (cxgb4_map_skb(q->adap->pdev_dev, skb,
+ (dma_addr_t *)skb->head)) {
txq_stop_maperr(q);
spin_lock(&q->sendq.lock);
break;
@@ -1841,9 +1841,9 @@ static void service_ofldq(struct sge_uld_txq *q)
pos = (void *)txq->desc;
}
- write_sgl(skb, &q->q, (void *)pos,
- end, hdr_len,
- (dma_addr_t *)skb->head);
+ cxgb4_write_sgl(skb, &q->q, (void *)pos,
+ end, hdr_len,
+ (dma_addr_t *)skb->head);
#ifdef CONFIG_NEED_DMA_MAP_STATE
skb->dev = q->adap->port[0];
skb->destructor = deferred_unmap_destructor;
@@ -1857,7 +1857,7 @@ static void service_ofldq(struct sge_uld_txq *q)
txq_advance(&q->q, ndesc);
written += ndesc;
if (unlikely(written > 32)) {
- ring_tx_db(q->adap, &q->q, written);
+ cxgb4_ring_tx_db(q->adap, &q->q, written);
written = 0;
}
@@ -1872,7 +1872,7 @@ static void service_ofldq(struct sge_uld_txq *q)
kfree_skb(skb);
}
if (likely(written))
- ring_tx_db(q->adap, &q->q, written);
+ cxgb4_ring_tx_db(q->adap, &q->q, written);
/*Indicate that no thread is processing the Pending Send Queue
* currently.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index f88766d2401d..0d83b4064a78 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -513,6 +513,13 @@ struct fw_ulptx_wr {
u64 cookie;
};
+#define FW_ULPTX_WR_DATA_S 28
+#define FW_ULPTX_WR_DATA_M 0x1
+#define FW_ULPTX_WR_DATA_V(x) ((x) << FW_ULPTX_WR_DATA_S)
+#define FW_ULPTX_WR_DATA_G(x) \
+ (((x) >> FW_ULPTX_WR_DATA_S) & FW_ULPTX_WR_DATA_M)
+#define FW_ULPTX_WR_DATA_F FW_ULPTX_WR_DATA_V(1U)
+
struct fw_tp_wr {
__be32 op_to_immdlen;
__be32 flowid_len16;
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index f910f0f386d6..977d4c2c759d 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -187,6 +187,7 @@ struct net_device * __init mac89x0_probe(int unit)
unsigned long ioaddr;
unsigned short sig;
int err = -ENODEV;
+ struct nubus_rsrc *fres;
if (!MACH_IS_MAC)
return ERR_PTR(-ENODEV);
@@ -207,8 +208,9 @@ struct net_device * __init mac89x0_probe(int unit)
/* We might have to parameterize this later */
slot = 0xE;
/* Get out now if there's a real NuBus card in slot E */
- if (nubus_find_slot(slot, NULL) != NULL)
- goto out;
+ for_each_func_rsrc(fres)
+ if (fres->board->slot == slot)
+ goto out;
/* The pseudo-ISA bits always live at offset 0x300 (gee,
wonder why...) */
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 769598f7b6c8..3aaf4bad6c5a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -287,6 +287,9 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
u64 in_param = 0;
int err;
+ if (!cnt)
+ return;
+
if (mlx4_is_mfunc(dev)) {
set_param_l(&in_param, base_qpn);
set_param_h(&in_param, cnt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 4d98ce0901af..05c1157bc9f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -417,7 +417,11 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
mlx5_cq_completion(dev, cqn);
break;
-
+ case MLX5_EVENT_TYPE_DCT_DRAINED:
+ rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
+ rsn |= (MLX5_RES_DCT << MLX5_USER_INDEX_LEN);
+ mlx5_rsc_event(dev, rsn, eqe->type);
+ break;
case MLX5_EVENT_TYPE_PATH_MIG:
case MLX5_EVENT_TYPE_COMM_EST:
case MLX5_EVENT_TYPE_SQ_DRAINED:
@@ -733,6 +737,9 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, fpga))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR);
+ if (MLX5_CAP_GEN_MAX(dev, dct))
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED);
+
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index c4392f741c5f..e6175f8ac0e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -688,7 +688,7 @@ static inline int mlx5_fpga_conn_init_qp(struct mlx5_fpga_conn *conn)
MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
MLX5_SET(qpc, qpc, primary_address_path.pkey_index, MLX5_FPGA_PKEY_INDEX);
- MLX5_SET(qpc, qpc, primary_address_path.port, MLX5_FPGA_PORT_NUM);
+ MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, MLX5_FPGA_PORT_NUM);
MLX5_SET(qpc, qpc, pd, conn->fdev->conn_res.pdn);
MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn);
MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn);
@@ -727,7 +727,7 @@ static inline int mlx5_fpga_conn_rtr_qp(struct mlx5_fpga_conn *conn)
MLX5_SET(qpc, qpc, next_rcv_psn,
MLX5_GET(fpga_qpc, conn->fpga_qpc, next_send_psn));
MLX5_SET(qpc, qpc, primary_address_path.pkey_index, MLX5_FPGA_PKEY_INDEX);
- MLX5_SET(qpc, qpc, primary_address_path.port, MLX5_FPGA_PORT_NUM);
+ MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, MLX5_FPGA_PORT_NUM);
ether_addr_copy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rmac_47_32),
MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, fpga_mac_47_32));
MLX5_SET(qpc, qpc, primary_address_path.udp_sport,
@@ -888,7 +888,8 @@ struct mlx5_fpga_conn *mlx5_fpga_conn_create(struct mlx5_fpga_device *fdev,
err = mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index,
MLX5_ROCE_VERSION_2,
MLX5_ROCE_L3_TYPE_IPV6,
- remote_ip, remote_mac, true, 0);
+ remote_ip, remote_mac, true, 0,
+ MLX5_FPGA_PORT_NUM);
if (err) {
mlx5_fpga_err(fdev, "Failed to set SGID: %d\n", err);
ret = ERR_PTR(err);
@@ -954,7 +955,7 @@ err_cq:
mlx5_fpga_conn_destroy_cq(conn);
err_gid:
mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index, 0, 0, NULL,
- NULL, false, 0);
+ NULL, false, 0, MLX5_FPGA_PORT_NUM);
err_rsvd_gid:
mlx5_core_reserved_gid_free(fdev->mdev, conn->qp.sgid_index);
err:
@@ -982,7 +983,7 @@ void mlx5_fpga_conn_destroy(struct mlx5_fpga_conn *conn)
mlx5_fpga_conn_destroy_cq(conn);
mlx5_core_roce_gid_set(conn->fdev->mdev, conn->qp.sgid_index, 0, 0,
- NULL, NULL, false, 0);
+ NULL, NULL, false, 0, MLX5_FPGA_PORT_NUM);
mlx5_core_reserved_gid_free(conn->fdev->mdev, conn->qp.sgid_index);
kfree(conn);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 5ef1b56b6a96..9d11e92fb541 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -195,12 +195,20 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return 0;
}
-int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
+int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id)
{
u32 out[MLX5_ST_SZ_DW(init_hca_out)] = {0};
u32 in[MLX5_ST_SZ_DW(init_hca_in)] = {0};
+ int i;
MLX5_SET(init_hca_in, in, opcode, MLX5_CMD_OP_INIT_HCA);
+
+ if (MLX5_CAP_GEN(dev, sw_owner_id)) {
+ for (i = 0; i < 4; i++)
+ MLX5_ARRAY_SET(init_hca_in, in, sw_owner_id, i,
+ sw_owner_id[i]);
+ }
+
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 1f50b77a081d..f953378bd13d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -188,7 +188,7 @@ int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp
MLX5_QP_ENHANCED_ULP_STATELESS_MODE);
addr_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
- MLX5_SET(ads, addr_path, port, 1);
+ MLX5_SET(ads, addr_path, vhca_port_num, 1);
MLX5_SET(ads, addr_path, grh, 1);
ret = mlx5_core_create_qp(mdev, qp, in, inlen);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 5701f125e99c..e159243e0fcf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -31,6 +31,8 @@
*/
#include <linux/clocksource.h>
+#include <linux/highmem.h>
+#include <rdma/mlx5-abi.h>
#include "en.h"
enum {
@@ -71,6 +73,28 @@ static u64 read_internal_timer(const struct cyclecounter *cc)
return mlx5_read_internal_timer(mdev) & cc->mask;
}
+static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
+ struct mlx5_clock *clock = &mdev->clock;
+ u32 sign;
+
+ if (!clock_info)
+ return;
+
+ sign = smp_load_acquire(&clock_info->sign);
+ smp_store_mb(clock_info->sign,
+ sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
+
+ clock_info->cycles = clock->tc.cycle_last;
+ clock_info->mult = clock->cycles.mult;
+ clock_info->nsec = clock->tc.nsec;
+ clock_info->frac = clock->tc.frac;
+
+ smp_store_release(&clock_info->sign,
+ sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
+}
+
static void mlx5_pps_out(struct work_struct *work)
{
struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
@@ -109,6 +133,7 @@ static void mlx5_timestamp_overflow(struct work_struct *work)
write_lock_irqsave(&clock->lock, flags);
timecounter_read(&clock->tc);
+ mlx5_update_clock_info_page(clock->mdev);
write_unlock_irqrestore(&clock->lock, flags);
schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
}
@@ -123,6 +148,7 @@ static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
write_lock_irqsave(&clock->lock, flags);
timecounter_init(&clock->tc, &clock->cycles, ns);
+ mlx5_update_clock_info_page(clock->mdev);
write_unlock_irqrestore(&clock->lock, flags);
return 0;
@@ -152,6 +178,7 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
write_lock_irqsave(&clock->lock, flags);
timecounter_adjtime(&clock->tc, delta);
+ mlx5_update_clock_info_page(clock->mdev);
write_unlock_irqrestore(&clock->lock, flags);
return 0;
@@ -179,6 +206,7 @@ static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
timecounter_read(&clock->tc);
clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
clock->nominal_c_mult + diff;
+ mlx5_update_clock_info_page(clock->mdev);
write_unlock_irqrestore(&clock->lock, flags);
return 0;
@@ -474,6 +502,7 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
clock->cycles.shift);
clock->nominal_c_mult = clock->cycles.mult;
clock->cycles.mask = CLOCKSOURCE_MASK(41);
+ clock->mdev = mdev;
timecounter_init(&clock->tc, &clock->cycles,
ktime_to_ns(ktime_get_real()));
@@ -486,6 +515,25 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
do_div(ns, NSEC_PER_SEC / 2 / HZ);
clock->overflow_period = ns;
+ mdev->clock_info_page = alloc_page(GFP_KERNEL);
+ if (mdev->clock_info_page) {
+ mdev->clock_info = kmap(mdev->clock_info_page);
+ if (!mdev->clock_info) {
+ __free_page(mdev->clock_info_page);
+ mlx5_core_warn(mdev, "failed to map clock page\n");
+ } else {
+ mdev->clock_info->sign = 0;
+ mdev->clock_info->nsec = clock->tc.nsec;
+ mdev->clock_info->cycles = clock->tc.cycle_last;
+ mdev->clock_info->mask = clock->cycles.mask;
+ mdev->clock_info->mult = clock->nominal_c_mult;
+ mdev->clock_info->shift = clock->cycles.shift;
+ mdev->clock_info->frac = clock->tc.frac;
+ mdev->clock_info->overflow_period =
+ clock->overflow_period;
+ }
+ }
+
INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow);
if (clock->overflow_period)
@@ -525,5 +573,12 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
cancel_work_sync(&clock->pps_info.out_work);
cancel_delayed_work_sync(&clock->overflow_work);
+
+ if (mdev->clock_info) {
+ kunmap(mdev->clock_info_page);
+ __free_page(mdev->clock_info_page);
+ mdev->clock_info = NULL;
+ }
+
kfree(clock->ptp_info.pin_config);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
index 573f59f46d41..7722a3f9bb68 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
@@ -121,7 +121,7 @@ EXPORT_SYMBOL_GPL(mlx5_core_reserved_gids_count);
int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
u8 roce_version, u8 roce_l3_type, const u8 *gid,
- const u8 *mac, bool vlan, u16 vlan_id)
+ const u8 *mac, bool vlan, u16 vlan_id, u8 port_num)
{
#define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v)
u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0};
@@ -148,6 +148,9 @@ int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
memcpy(addr_l3_addr, gid, gidsz);
}
+ if (MLX5_CAP_GEN(dev, num_vhca_ports) > 0)
+ MLX5_SET(set_roce_address_in, in, vhca_port_num, port_num);
+
MLX5_SET(set_roce_address_in, in, roce_address_index, index);
MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 0f88fd30a09a..2ef641c91c26 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -75,6 +75,8 @@ static unsigned int prof_sel = MLX5_DEFAULT_PROF;
module_param_named(prof_sel, prof_sel, uint, 0444);
MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
+static u32 sw_owner_id[4];
+
enum {
MLX5_ATOMIC_REQ_MODE_BE = 0x0,
MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1,
@@ -551,6 +553,15 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
cache_line_128byte,
cache_line_size() == 128 ? 1 : 0);
+ if (MLX5_CAP_GEN_MAX(dev, dct))
+ MLX5_SET(cmd_hca_cap, set_hca_cap, dct, 1);
+
+ if (MLX5_CAP_GEN_MAX(dev, num_vhca_ports))
+ MLX5_SET(cmd_hca_cap,
+ set_hca_cap,
+ num_vhca_ports,
+ MLX5_CAP_GEN_MAX(dev, num_vhca_ports));
+
err = set_caps(dev, set_ctx, set_sz,
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
@@ -1107,7 +1118,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto reclaim_boot_pages;
}
- err = mlx5_cmd_init_hca(dev);
+ err = mlx5_cmd_init_hca(dev, sw_owner_id);
if (err) {
dev_err(&pdev->dev, "init hca failed\n");
goto err_pagealloc_stop;
@@ -1643,6 +1654,8 @@ static int __init init(void)
{
int err;
+ get_random_bytes(&sw_owner_id, sizeof(sw_owner_id));
+
mlx5_core_verify_params();
mlx5_register_debugfs();
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index b5a46c128b28..394552f36fcf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -86,7 +86,7 @@ enum {
int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_query_board_id(struct mlx5_core_dev *dev);
-int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
+int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 889130edb715..02d6c5b5d502 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -98,6 +98,11 @@ static u64 sq_allowed_event_types(void)
return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
}
+static u64 dct_allowed_event_types(void)
+{
+ return BIT(MLX5_EVENT_TYPE_DCT_DRAINED);
+}
+
static bool is_event_type_allowed(int rsc_type, int event_type)
{
switch (rsc_type) {
@@ -107,6 +112,8 @@ static bool is_event_type_allowed(int rsc_type, int event_type)
return BIT(event_type) & rq_allowed_event_types();
case MLX5_EVENT_QUEUE_TYPE_SQ:
return BIT(event_type) & sq_allowed_event_types();
+ case MLX5_EVENT_QUEUE_TYPE_DCT:
+ return BIT(event_type) & dct_allowed_event_types();
default:
WARN(1, "Event arrived for unknown resource type");
return false;
@@ -116,6 +123,7 @@ static bool is_event_type_allowed(int rsc_type, int event_type)
void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
{
struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
+ struct mlx5_core_dct *dct;
struct mlx5_core_qp *qp;
if (!common)
@@ -134,7 +142,11 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
qp = (struct mlx5_core_qp *)common;
qp->event(qp, event_type);
break;
-
+ case MLX5_RES_DCT:
+ dct = (struct mlx5_core_dct *)common;
+ if (event_type == MLX5_EVENT_TYPE_DCT_DRAINED)
+ complete(&dct->drained);
+ break;
default:
mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
}
@@ -142,9 +154,9 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
mlx5_core_put_rsc(common);
}
-static int create_qprqsq_common(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *qp,
- int rsc_type)
+static int create_resource_common(struct mlx5_core_dev *dev,
+ struct mlx5_core_qp *qp,
+ int rsc_type)
{
struct mlx5_qp_table *table = &dev->priv.qp_table;
int err;
@@ -165,8 +177,8 @@ static int create_qprqsq_common(struct mlx5_core_dev *dev,
return 0;
}
-static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
- struct mlx5_core_qp *qp)
+static void destroy_resource_common(struct mlx5_core_dev *dev,
+ struct mlx5_core_qp *qp)
{
struct mlx5_qp_table *table = &dev->priv.qp_table;
unsigned long flags;
@@ -179,6 +191,40 @@ static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
wait_for_completion(&qp->common.free);
}
+int mlx5_core_create_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct,
+ u32 *in, int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0};
+ u32 din[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
+ u32 dout[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
+ struct mlx5_core_qp *qp = &dct->mqp;
+ int err;
+
+ init_completion(&dct->drained);
+ MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
+
+ err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
+ if (err) {
+ mlx5_core_warn(dev, "create DCT failed, ret %d\n", err);
+ return err;
+ }
+
+ qp->qpn = MLX5_GET(create_dct_out, out, dctn);
+ err = create_resource_common(dev, qp, MLX5_RES_DCT);
+ if (err)
+ goto err_cmd;
+
+ return 0;
+err_cmd:
+ MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
+ MLX5_SET(destroy_dct_in, din, dctn, qp->qpn);
+ mlx5_cmd_exec(dev, (void *)&in, sizeof(din),
+ (void *)&out, sizeof(dout));
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
+
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
u32 *in, int inlen)
@@ -197,7 +243,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
qp->qpn = MLX5_GET(create_qp_out, out, qpn);
mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
- err = create_qprqsq_common(dev, qp, MLX5_RES_QP);
+ err = create_resource_common(dev, qp, MLX5_RES_QP);
if (err)
goto err_cmd;
@@ -220,6 +266,47 @@ err_cmd:
}
EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
+static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct)
+{
+ u32 out[MLX5_ST_SZ_DW(drain_dct_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(drain_dct_in)] = {0};
+ struct mlx5_core_qp *qp = &dct->mqp;
+
+ MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
+ MLX5_SET(drain_dct_in, in, dctn, qp->qpn);
+ return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
+ (void *)&out, sizeof(out));
+}
+
+int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct)
+{
+ u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
+ u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
+ struct mlx5_core_qp *qp = &dct->mqp;
+ int err;
+
+ err = mlx5_core_drain_dct(dev, dct);
+ if (err) {
+ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ goto destroy;
+ } else {
+ mlx5_core_warn(dev, "failed drain DCT 0x%x with error 0x%x\n", qp->qpn, err);
+ return err;
+ }
+ }
+ wait_for_completion(&dct->drained);
+destroy:
+ destroy_resource_common(dev, &dct->mqp);
+ MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
+ MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
+ err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
+ (void *)&out, sizeof(out));
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
+
int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp)
{
@@ -229,7 +316,7 @@ int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
mlx5_debug_qp_remove(dev, qp);
- destroy_qprqsq_common(dev, qp);
+ destroy_resource_common(dev, qp);
MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
@@ -405,6 +492,20 @@ int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
}
EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
+int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
+ u32 *out, int outlen)
+{
+ u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {0};
+ struct mlx5_core_qp *qp = &dct->mqp;
+
+ MLX5_SET(query_dct_in, in, opcode, MLX5_CMD_OP_QUERY_DCT);
+ MLX5_SET(query_dct_in, in, dctn, qp->qpn);
+
+ return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
+ (void *)out, outlen);
+}
+EXPORT_SYMBOL_GPL(mlx5_core_dct_query);
+
int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
{
u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0};
@@ -441,7 +542,7 @@ int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
return err;
rq->qpn = rqn;
- err = create_qprqsq_common(dev, rq, MLX5_RES_RQ);
+ err = create_resource_common(dev, rq, MLX5_RES_RQ);
if (err)
goto err_destroy_rq;
@@ -457,7 +558,7 @@ EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
struct mlx5_core_qp *rq)
{
- destroy_qprqsq_common(dev, rq);
+ destroy_resource_common(dev, rq);
mlx5_core_destroy_rq(dev, rq->qpn);
}
EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
@@ -473,7 +574,7 @@ int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
return err;
sq->qpn = sqn;
- err = create_qprqsq_common(dev, sq, MLX5_RES_SQ);
+ err = create_resource_common(dev, sq, MLX5_RES_SQ);
if (err)
goto err_destroy_sq;
@@ -489,7 +590,7 @@ EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
struct mlx5_core_qp *sq)
{
- destroy_qprqsq_common(dev, sq);
+ destroy_resource_common(dev, sq);
mlx5_core_destroy_sq(dev, sq->qpn);
}
EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index a1296a62497d..dfe36cf6fbea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -36,6 +36,9 @@
#include <linux/mlx5/vport.h>
#include "mlx5_core.h"
+/* Mutex to hold while enabling or disabling RoCE */
+static DEFINE_MUTEX(mlx5_roce_en_lock);
+
static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u32 *out, int outlen)
{
@@ -998,17 +1001,35 @@ static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
{
- if (atomic_inc_return(&mdev->roce.roce_en) != 1)
- return 0;
- return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
+ int err = 0;
+
+ mutex_lock(&mlx5_roce_en_lock);
+ if (!mdev->roce.roce_en)
+ err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
+
+ if (!err)
+ mdev->roce.roce_en++;
+ mutex_unlock(&mlx5_roce_en_lock);
+
+ return err;
}
EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
{
- if (atomic_dec_return(&mdev->roce.roce_en) != 0)
- return 0;
- return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
+ int err = 0;
+
+ mutex_lock(&mlx5_roce_en_lock);
+ if (mdev->roce.roce_en) {
+ mdev->roce.roce_en--;
+ if (mdev->roce.roce_en == 0)
+ err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
+
+ if (err)
+ mdev->roce.roce_en++;
+ }
+ mutex_unlock(&mlx5_roce_en_lock);
+ return err;
}
EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
@@ -1110,3 +1131,61 @@ ex:
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);
+
+int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
+ struct mlx5_core_dev *port_mdev)
+{
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ void *in;
+ int err;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ err = mlx5_nic_vport_enable_roce(port_mdev);
+ if (err)
+ goto free;
+
+ MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.affiliated_vhca_id,
+ MLX5_CAP_GEN(master_mdev, vhca_id));
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.affiliation_criteria,
+ MLX5_CAP_GEN(port_mdev, affiliate_nic_vport_criteria));
+
+ err = mlx5_modify_nic_vport_context(port_mdev, in, inlen);
+ if (err)
+ mlx5_nic_vport_disable_roce(port_mdev);
+
+free:
+ kvfree(in);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_nic_vport_affiliate_multiport);
+
+int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev)
+{
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ void *in;
+ int err;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.affiliated_vhca_id, 0);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.affiliation_criteria, 0);
+
+ err = mlx5_modify_nic_vport_context(port_mdev, in, inlen);
+ if (!err)
+ mlx5_nic_vport_disable_roce(port_mdev);
+
+ kvfree(in);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_nic_vport_unaffiliate_multiport);
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index a42433fb6949..b922ab5cedea 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -311,7 +311,7 @@ static int mac_onboard_sonic_probe(struct net_device *dev)
{
struct sonic_local* lp = netdev_priv(dev);
int sr;
- int commslot = 0;
+ bool commslot = macintosh_config->expansion_type == MAC_EXP_PDS_COMM;
if (!MACH_IS_MAC)
return -ENODEV;
@@ -322,10 +322,7 @@ static int mac_onboard_sonic_probe(struct net_device *dev)
Ethernet (BTW, the Ethernet *is* always at the same
address, and nothing else lives there, at least if Apple's
documentation is to be believed) */
- if (macintosh_config->ident == MAC_MODEL_Q630 ||
- macintosh_config->ident == MAC_MODEL_P588 ||
- macintosh_config->ident == MAC_MODEL_P575 ||
- macintosh_config->ident == MAC_MODEL_C610) {
+ if (commslot || macintosh_config->ident == MAC_MODEL_C610) {
int card_present;
card_present = hwreg_present((void*)ONBOARD_SONIC_REGISTERS);
@@ -333,7 +330,6 @@ static int mac_onboard_sonic_probe(struct net_device *dev)
printk("none.\n");
return -ENODEV;
}
- commslot = 1;
}
printk("yes\n");
@@ -428,26 +424,26 @@ static int mac_nubus_sonic_ethernet_addr(struct net_device *dev,
return 0;
}
-static int macsonic_ident(struct nubus_dev *ndev)
+static int macsonic_ident(struct nubus_rsrc *fres)
{
- if (ndev->dr_hw == NUBUS_DRHW_ASANTE_LC &&
- ndev->dr_sw == NUBUS_DRSW_SONIC_LC)
+ if (fres->dr_hw == NUBUS_DRHW_ASANTE_LC &&
+ fres->dr_sw == NUBUS_DRSW_SONIC_LC)
return MACSONIC_DAYNALINK;
- if (ndev->dr_hw == NUBUS_DRHW_SONIC &&
- ndev->dr_sw == NUBUS_DRSW_APPLE) {
+ if (fres->dr_hw == NUBUS_DRHW_SONIC &&
+ fres->dr_sw == NUBUS_DRSW_APPLE) {
/* There has to be a better way to do this... */
- if (strstr(ndev->board->name, "DuoDock"))
+ if (strstr(fres->board->name, "DuoDock"))
return MACSONIC_DUODOCK;
else
return MACSONIC_APPLE;
}
- if (ndev->dr_hw == NUBUS_DRHW_SMC9194 &&
- ndev->dr_sw == NUBUS_DRSW_DAYNA)
+ if (fres->dr_hw == NUBUS_DRHW_SMC9194 &&
+ fres->dr_sw == NUBUS_DRSW_DAYNA)
return MACSONIC_DAYNA;
- if (ndev->dr_hw == NUBUS_DRHW_APPLE_SONIC_LC &&
- ndev->dr_sw == 0) { /* huh? */
+ if (fres->dr_hw == NUBUS_DRHW_APPLE_SONIC_LC &&
+ fres->dr_sw == 0) { /* huh? */
return MACSONIC_APPLE16;
}
return -1;
@@ -456,7 +452,7 @@ static int macsonic_ident(struct nubus_dev *ndev)
static int mac_nubus_sonic_probe(struct net_device *dev)
{
static int slots;
- struct nubus_dev* ndev = NULL;
+ struct nubus_rsrc *ndev = NULL;
struct sonic_local* lp = netdev_priv(dev);
unsigned long base_addr, prom_addr;
u16 sonic_dcr;
@@ -464,9 +460,11 @@ static int mac_nubus_sonic_probe(struct net_device *dev)
int reg_offset, dma_bitmode;
/* Find the first SONIC that hasn't been initialized already */
- while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK,
- NUBUS_TYPE_ETHERNET, ndev)) != NULL)
- {
+ for_each_func_rsrc(ndev) {
+ if (ndev->category != NUBUS_CAT_NETWORK ||
+ ndev->type != NUBUS_TYPE_ETHERNET)
+ continue;
+
/* Have we seen it already? */
if (slots & (1<<ndev->board->slot))
continue;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 3e57bf5d3d03..1673fc90027f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -97,9 +97,7 @@ static int __qed_spq_block(struct qed_hwfn *p_hwfn,
while (iter_cnt--) {
/* Validate we receive completion update */
- if (READ_ONCE(comp_done->done) == 1) {
- /* Read updated FW return value */
- smp_read_barrier_depends();
+ if (smp_load_acquire(&comp_done->done) == 1) { /* ^^^ */
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
return 0;
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 7900ed066d8a..e412dfdda7dd 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -2638,12 +2638,12 @@ static long ca8210_test_int_ioctl(
*
* Return: set of poll return flags
*/
-static unsigned int ca8210_test_int_poll(
+static __poll_t ca8210_test_int_poll(
struct file *filp,
struct poll_table_struct *ptable
)
{
- unsigned int return_flags = 0;
+ __poll_t return_flags = 0;
struct ca8210_priv *priv = filp->private_data;
poll_wait(filp, &priv->test.readq, ptable);
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index 1b28e6e702f5..bdc4d23627c5 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -334,7 +334,7 @@ ppp_asynctty_ioctl(struct tty_struct *tty, struct file *file,
}
/* No kernel lock - fine */
-static unsigned int
+static __poll_t
ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
{
return 0;
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 264d4af0bf69..ef6b2126b23a 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -531,10 +531,10 @@ static ssize_t ppp_write(struct file *file, const char __user *buf,
}
/* No kernel lock - fine */
-static unsigned int ppp_poll(struct file *file, poll_table *wait)
+static __poll_t ppp_poll(struct file *file, poll_table *wait)
{
struct ppp_file *pf = file->private_data;
- unsigned int mask;
+ __poll_t mask;
if (!pf)
return 0;
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index 7196f00f0991..047f6c68a441 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -327,7 +327,7 @@ ppp_synctty_ioctl(struct tty_struct *tty, struct file *file,
}
/* No kernel lock - fine */
-static unsigned int
+static __poll_t
ppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
{
return 0;
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 77872699c45d..0a5ed004781c 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -569,10 +569,10 @@ static int tap_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int tap_poll(struct file *file, poll_table *wait)
+static __poll_t tap_poll(struct file *file, poll_table *wait)
{
struct tap_queue *q = file->private_data;
- unsigned int mask = POLLERR;
+ __poll_t mask = POLLERR;
if (!q)
goto out;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a0c5cb1a1617..0dc66e4fbb2c 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1428,12 +1428,12 @@ static void tun_net_init(struct net_device *dev)
/* Character device part */
/* Poll */
-static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
+static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
{
struct tun_file *tfile = file->private_data;
struct tun_struct *tun = tun_get(tfile);
struct sock *sk;
- unsigned int mask = 0;
+ __poll_t mask = 0;
if (!tun)
return POLLERR;
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 6ea16260ec76..f6b000ddcd15 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -924,7 +924,7 @@ static int chrdev_tx_done(struct channel_data *chan, int size)
return 1;
}
-static unsigned int cosa_poll(struct file *file, poll_table *poll)
+static __poll_t cosa_poll(struct file *file, poll_table *poll)
{
pr_info("cosa_poll is here\n");
return 0;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
index f4fdad2ed319..72c55d1f8903 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
@@ -301,7 +301,7 @@ exit:
return status;
}
-static unsigned int rt2x00debug_poll_queue_dump(struct file *file,
+static __poll_t rt2x00debug_poll_queue_dump(struct file *file,
poll_table *wait)
{
struct rt2x00debug_intf *intf = file->private_data;
diff --git a/drivers/nubus/Makefile b/drivers/nubus/Makefile
index 21bda2031e7e..6d063cde39d1 100644
--- a/drivers/nubus/Makefile
+++ b/drivers/nubus/Makefile
@@ -2,6 +2,6 @@
# Makefile for the nubus specific drivers.
#
-obj-y := nubus.o
+obj-y := nubus.o bus.o
obj-$(CONFIG_PROC_FS) += proc.o
diff --git a/drivers/nubus/bus.c b/drivers/nubus/bus.c
new file mode 100644
index 000000000000..d306c348c857
--- /dev/null
+++ b/drivers/nubus/bus.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Bus implementation for the NuBus subsystem.
+//
+// Copyright (C) 2017 Finn Thain
+
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/nubus.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+
+#define to_nubus_board(d) container_of(d, struct nubus_board, dev)
+#define to_nubus_driver(d) container_of(d, struct nubus_driver, driver)
+
+static int nubus_bus_match(struct device *dev, struct device_driver *driver)
+{
+ return 1;
+}
+
+static int nubus_device_probe(struct device *dev)
+{
+ struct nubus_driver *ndrv = to_nubus_driver(dev->driver);
+ int err = -ENODEV;
+
+ if (ndrv->probe)
+ err = ndrv->probe(to_nubus_board(dev));
+ return err;
+}
+
+static int nubus_device_remove(struct device *dev)
+{
+ struct nubus_driver *ndrv = to_nubus_driver(dev->driver);
+ int err = -ENODEV;
+
+ if (dev->driver && ndrv->remove)
+ err = ndrv->remove(to_nubus_board(dev));
+ return err;
+}
+
+struct bus_type nubus_bus_type = {
+ .name = "nubus",
+ .match = nubus_bus_match,
+ .probe = nubus_device_probe,
+ .remove = nubus_device_remove,
+};
+EXPORT_SYMBOL(nubus_bus_type);
+
+int nubus_driver_register(struct nubus_driver *ndrv)
+{
+ ndrv->driver.bus = &nubus_bus_type;
+ return driver_register(&ndrv->driver);
+}
+EXPORT_SYMBOL(nubus_driver_register);
+
+void nubus_driver_unregister(struct nubus_driver *ndrv)
+{
+ driver_unregister(&ndrv->driver);
+}
+EXPORT_SYMBOL(nubus_driver_unregister);
+
+static struct device nubus_parent = {
+ .init_name = "nubus",
+};
+
+int __init nubus_bus_register(void)
+{
+ int err;
+
+ err = device_register(&nubus_parent);
+ if (err)
+ return err;
+
+ err = bus_register(&nubus_bus_type);
+ if (!err)
+ return 0;
+
+ device_unregister(&nubus_parent);
+ return err;
+}
+
+static void nubus_device_release(struct device *dev)
+{
+ struct nubus_board *board = to_nubus_board(dev);
+ struct nubus_rsrc *fres, *tmp;
+
+ list_for_each_entry_safe(fres, tmp, &nubus_func_rsrcs, list)
+ if (fres->board == board) {
+ list_del(&fres->list);
+ kfree(fres);
+ }
+ kfree(board);
+}
+
+int nubus_device_register(struct nubus_board *board)
+{
+ board->dev.parent = &nubus_parent;
+ board->dev.release = nubus_device_release;
+ board->dev.bus = &nubus_bus_type;
+ dev_set_name(&board->dev, "slot.%X", board->slot);
+ return device_register(&board->dev);
+}
+
+static int nubus_print_device_name_fn(struct device *dev, void *data)
+{
+ struct nubus_board *board = to_nubus_board(dev);
+ struct seq_file *m = data;
+
+ seq_printf(m, "Slot %X: %s\n", board->slot, board->name);
+ return 0;
+}
+
+int nubus_proc_show(struct seq_file *m, void *data)
+{
+ return bus_for_each_dev(&nubus_bus_type, NULL, m,
+ nubus_print_device_name_fn);
+}
diff --git a/drivers/nubus/nubus.c b/drivers/nubus/nubus.c
index b793727cd4f7..4621ff98138c 100644
--- a/drivers/nubus/nubus.c
+++ b/drivers/nubus/nubus.c
@@ -15,6 +15,7 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include <asm/setup.h>
#include <asm/page.h>
@@ -31,8 +32,7 @@
/* Globals */
-struct nubus_dev *nubus_devices;
-struct nubus_board *nubus_boards;
+LIST_HEAD(nubus_func_rsrcs);
/* Meaning of "bytelanes":
@@ -146,7 +146,7 @@ static inline void *nubus_rom_addr(int slot)
return (void *)(0xF1000000 + (slot << 24));
}
-static unsigned char *nubus_dirptr(const struct nubus_dirent *nd)
+unsigned char *nubus_dirptr(const struct nubus_dirent *nd)
{
unsigned char *p = nd->base;
@@ -161,7 +161,7 @@ static unsigned char *nubus_dirptr(const struct nubus_dirent *nd)
pointed to with offsets) out of the card ROM. */
void nubus_get_rsrc_mem(void *dest, const struct nubus_dirent *dirent,
- int len)
+ unsigned int len)
{
unsigned char *t = (unsigned char *)dest;
unsigned char *p = nubus_dirptr(dirent);
@@ -173,21 +173,49 @@ void nubus_get_rsrc_mem(void *dest, const struct nubus_dirent *dirent,
}
EXPORT_SYMBOL(nubus_get_rsrc_mem);
-void nubus_get_rsrc_str(void *dest, const struct nubus_dirent *dirent,
- int len)
+unsigned int nubus_get_rsrc_str(char *dest, const struct nubus_dirent *dirent,
+ unsigned int len)
{
- unsigned char *t = (unsigned char *)dest;
+ char *t = dest;
unsigned char *p = nubus_dirptr(dirent);
- while (len) {
- *t = nubus_get_rom(&p, 1, dirent->mask);
- if (!*t++)
+ while (len > 1) {
+ unsigned char c = nubus_get_rom(&p, 1, dirent->mask);
+
+ if (!c)
break;
+ *t++ = c;
len--;
}
+ if (len > 0)
+ *t = '\0';
+ return t - dest;
}
EXPORT_SYMBOL(nubus_get_rsrc_str);
+void nubus_seq_write_rsrc_mem(struct seq_file *m,
+ const struct nubus_dirent *dirent,
+ unsigned int len)
+{
+ unsigned long buf[32];
+ unsigned int buf_size = sizeof(buf);
+ unsigned char *p = nubus_dirptr(dirent);
+
+ /* If possible, write out full buffers */
+ while (len >= buf_size) {
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(buf); i++)
+ buf[i] = nubus_get_rom(&p, sizeof(buf[0]),
+ dirent->mask);
+ seq_write(m, buf, buf_size);
+ len -= buf_size;
+ }
+ /* If not, write out individual bytes */
+ while (len--)
+ seq_putc(m, nubus_get_rom(&p, 1, dirent->mask));
+}
+
int nubus_get_root_dir(const struct nubus_board *board,
struct nubus_dir *dir)
{
@@ -199,12 +227,11 @@ int nubus_get_root_dir(const struct nubus_board *board,
EXPORT_SYMBOL(nubus_get_root_dir);
/* This is a slyly renamed version of the above */
-int nubus_get_func_dir(const struct nubus_dev *dev,
- struct nubus_dir *dir)
+int nubus_get_func_dir(const struct nubus_rsrc *fres, struct nubus_dir *dir)
{
- dir->ptr = dir->base = dev->directory;
+ dir->ptr = dir->base = fres->directory;
dir->done = 0;
- dir->mask = dev->board->lanes;
+ dir->mask = fres->board->lanes;
return 0;
}
EXPORT_SYMBOL(nubus_get_func_dir);
@@ -277,51 +304,20 @@ EXPORT_SYMBOL(nubus_rewinddir);
/* Driver interface functions, more or less like in pci.c */
-struct nubus_dev*
-nubus_find_device(unsigned short category, unsigned short type,
- unsigned short dr_hw, unsigned short dr_sw,
- const struct nubus_dev *from)
-{
- struct nubus_dev *itor = from ? from->next : nubus_devices;
-
- while (itor) {
- if (itor->category == category && itor->type == type &&
- itor->dr_hw == dr_hw && itor->dr_sw == dr_sw)
- return itor;
- itor = itor->next;
- }
- return NULL;
-}
-EXPORT_SYMBOL(nubus_find_device);
-
-struct nubus_dev*
-nubus_find_type(unsigned short category, unsigned short type,
- const struct nubus_dev *from)
+struct nubus_rsrc *nubus_first_rsrc_or_null(void)
{
- struct nubus_dev *itor = from ? from->next : nubus_devices;
-
- while (itor) {
- if (itor->category == category && itor->type == type)
- return itor;
- itor = itor->next;
- }
- return NULL;
+ return list_first_entry_or_null(&nubus_func_rsrcs, struct nubus_rsrc,
+ list);
}
-EXPORT_SYMBOL(nubus_find_type);
+EXPORT_SYMBOL(nubus_first_rsrc_or_null);
-struct nubus_dev*
-nubus_find_slot(unsigned int slot, const struct nubus_dev *from)
+struct nubus_rsrc *nubus_next_rsrc_or_null(struct nubus_rsrc *from)
{
- struct nubus_dev *itor = from ? from->next : nubus_devices;
-
- while (itor) {
- if (itor->board->slot == slot)
- return itor;
- itor = itor->next;
- }
- return NULL;
+ if (list_is_last(&from->list, &nubus_func_rsrcs))
+ return NULL;
+ return list_next_entry(from, list);
}
-EXPORT_SYMBOL(nubus_find_slot);
+EXPORT_SYMBOL(nubus_next_rsrc_or_null);
int
nubus_find_rsrc(struct nubus_dir *dir, unsigned char rsrc_type,
@@ -339,31 +335,83 @@ EXPORT_SYMBOL(nubus_find_rsrc);
looking at, and print out lots and lots of information from the
resource blocks. */
-/* FIXME: A lot of this stuff will eventually be useful after
- initialization, for intelligently probing Ethernet and video chips,
- among other things. The rest of it should go in the /proc code.
- For now, we just use it to give verbose boot logs. */
+static int __init nubus_get_block_rsrc_dir(struct nubus_board *board,
+ struct proc_dir_entry *procdir,
+ const struct nubus_dirent *parent)
+{
+ struct nubus_dir dir;
+ struct nubus_dirent ent;
+
+ nubus_get_subdir(parent, &dir);
+ dir.procdir = nubus_proc_add_rsrc_dir(procdir, parent, board);
-static int __init nubus_show_display_resource(struct nubus_dev *dev,
- const struct nubus_dirent *ent)
+ while (nubus_readdir(&dir, &ent) != -1) {
+ u32 size;
+
+ nubus_get_rsrc_mem(&size, &ent, 4);
+ pr_debug(" block (0x%x), size %d\n", ent.type, size);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, size);
+ }
+ return 0;
+}
+
+static int __init nubus_get_display_vidmode(struct nubus_board *board,
+ struct proc_dir_entry *procdir,
+ const struct nubus_dirent *parent)
+{
+ struct nubus_dir dir;
+ struct nubus_dirent ent;
+
+ nubus_get_subdir(parent, &dir);
+ dir.procdir = nubus_proc_add_rsrc_dir(procdir, parent, board);
+
+ while (nubus_readdir(&dir, &ent) != -1) {
+ switch (ent.type) {
+ case 1: /* mVidParams */
+ case 2: /* mTable */
+ {
+ u32 size;
+
+ nubus_get_rsrc_mem(&size, &ent, 4);
+ pr_debug(" block (0x%x), size %d\n", ent.type,
+ size);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, size);
+ break;
+ }
+ default:
+ pr_debug(" unknown resource 0x%02x, data 0x%06x\n",
+ ent.type, ent.data);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, 0);
+ }
+ }
+ return 0;
+}
+
+static int __init nubus_get_display_resource(struct nubus_rsrc *fres,
+ struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent)
{
switch (ent->type) {
case NUBUS_RESID_GAMMADIR:
- pr_info(" gamma directory offset: 0x%06x\n", ent->data);
+ pr_debug(" gamma directory offset: 0x%06x\n", ent->data);
+ nubus_get_block_rsrc_dir(fres->board, procdir, ent);
break;
case 0x0080 ... 0x0085:
- pr_info(" mode %02X info offset: 0x%06x\n",
- ent->type, ent->data);
+ pr_debug(" mode 0x%02x info offset: 0x%06x\n",
+ ent->type, ent->data);
+ nubus_get_display_vidmode(fres->board, procdir, ent);
break;
default:
- pr_info(" unknown resource %02X, data 0x%06x\n",
- ent->type, ent->data);
+ pr_debug(" unknown resource 0x%02x, data 0x%06x\n",
+ ent->type, ent->data);
+ nubus_proc_add_rsrc_mem(procdir, ent, 0);
}
return 0;
}
-static int __init nubus_show_network_resource(struct nubus_dev *dev,
- const struct nubus_dirent *ent)
+static int __init nubus_get_network_resource(struct nubus_rsrc *fres,
+ struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent)
{
switch (ent->type) {
case NUBUS_RESID_MAC_ADDRESS:
@@ -371,18 +419,21 @@ static int __init nubus_show_network_resource(struct nubus_dev *dev,
char addr[6];
nubus_get_rsrc_mem(addr, ent, 6);
- pr_info(" MAC address: %pM\n", addr);
+ pr_debug(" MAC address: %pM\n", addr);
+ nubus_proc_add_rsrc_mem(procdir, ent, 6);
break;
}
default:
- pr_info(" unknown resource %02X, data 0x%06x\n",
- ent->type, ent->data);
+ pr_debug(" unknown resource 0x%02x, data 0x%06x\n",
+ ent->type, ent->data);
+ nubus_proc_add_rsrc_mem(procdir, ent, 0);
}
return 0;
}
-static int __init nubus_show_cpu_resource(struct nubus_dev *dev,
- const struct nubus_dirent *ent)
+static int __init nubus_get_cpu_resource(struct nubus_rsrc *fres,
+ struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent)
{
switch (ent->type) {
case NUBUS_RESID_MEMINFO:
@@ -390,8 +441,9 @@ static int __init nubus_show_cpu_resource(struct nubus_dev *dev,
unsigned long meminfo[2];
nubus_get_rsrc_mem(&meminfo, ent, 8);
- pr_info(" memory: [ 0x%08lx 0x%08lx ]\n",
- meminfo[0], meminfo[1]);
+ pr_debug(" memory: [ 0x%08lx 0x%08lx ]\n",
+ meminfo[0], meminfo[1]);
+ nubus_proc_add_rsrc_mem(procdir, ent, 8);
break;
}
case NUBUS_RESID_ROMINFO:
@@ -399,57 +451,60 @@ static int __init nubus_show_cpu_resource(struct nubus_dev *dev,
unsigned long rominfo[2];
nubus_get_rsrc_mem(&rominfo, ent, 8);
- pr_info(" ROM: [ 0x%08lx 0x%08lx ]\n",
- rominfo[0], rominfo[1]);
+ pr_debug(" ROM: [ 0x%08lx 0x%08lx ]\n",
+ rominfo[0], rominfo[1]);
+ nubus_proc_add_rsrc_mem(procdir, ent, 8);
break;
}
default:
- pr_info(" unknown resource %02X, data 0x%06x\n",
- ent->type, ent->data);
+ pr_debug(" unknown resource 0x%02x, data 0x%06x\n",
+ ent->type, ent->data);
+ nubus_proc_add_rsrc_mem(procdir, ent, 0);
}
return 0;
}
-static int __init nubus_show_private_resource(struct nubus_dev *dev,
- const struct nubus_dirent *ent)
+static int __init nubus_get_private_resource(struct nubus_rsrc *fres,
+ struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent)
{
- switch (dev->category) {
+ switch (fres->category) {
case NUBUS_CAT_DISPLAY:
- nubus_show_display_resource(dev, ent);
+ nubus_get_display_resource(fres, procdir, ent);
break;
case NUBUS_CAT_NETWORK:
- nubus_show_network_resource(dev, ent);
+ nubus_get_network_resource(fres, procdir, ent);
break;
case NUBUS_CAT_CPU:
- nubus_show_cpu_resource(dev, ent);
+ nubus_get_cpu_resource(fres, procdir, ent);
break;
default:
- pr_info(" unknown resource %02X, data 0x%06x\n",
- ent->type, ent->data);
+ pr_debug(" unknown resource 0x%02x, data 0x%06x\n",
+ ent->type, ent->data);
+ nubus_proc_add_rsrc_mem(procdir, ent, 0);
}
return 0;
}
-static struct nubus_dev * __init
+static struct nubus_rsrc * __init
nubus_get_functional_resource(struct nubus_board *board, int slot,
const struct nubus_dirent *parent)
{
struct nubus_dir dir;
struct nubus_dirent ent;
- struct nubus_dev *dev;
+ struct nubus_rsrc *fres;
- pr_info(" Function 0x%02x:\n", parent->type);
+ pr_debug(" Functional resource 0x%02x:\n", parent->type);
nubus_get_subdir(parent, &dir);
-
- pr_debug("%s: parent is 0x%p, dir is 0x%p\n",
- __func__, parent->base, dir.base);
+ dir.procdir = nubus_proc_add_rsrc_dir(board->procdir, parent, board);
/* Actually we should probably panic if this fails */
- if ((dev = kzalloc(sizeof(*dev), GFP_ATOMIC)) == NULL)
+ fres = kzalloc(sizeof(*fres), GFP_ATOMIC);
+ if (!fres)
return NULL;
- dev->resid = parent->type;
- dev->directory = dir.base;
- dev->board = board;
+ fres->resid = parent->type;
+ fres->directory = dir.base;
+ fres->board = board;
while (nubus_readdir(&dir, &ent) != -1) {
switch (ent.type) {
@@ -458,130 +513,96 @@ nubus_get_functional_resource(struct nubus_board *board, int slot,
unsigned short nbtdata[4];
nubus_get_rsrc_mem(nbtdata, &ent, 8);
- dev->category = nbtdata[0];
- dev->type = nbtdata[1];
- dev->dr_sw = nbtdata[2];
- dev->dr_hw = nbtdata[3];
- pr_info(" type: [cat 0x%x type 0x%x sw 0x%x hw 0x%x]\n",
- nbtdata[0], nbtdata[1], nbtdata[2], nbtdata[3]);
+ fres->category = nbtdata[0];
+ fres->type = nbtdata[1];
+ fres->dr_sw = nbtdata[2];
+ fres->dr_hw = nbtdata[3];
+ pr_debug(" type: [cat 0x%x type 0x%x sw 0x%x hw 0x%x]\n",
+ nbtdata[0], nbtdata[1], nbtdata[2], nbtdata[3]);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, 8);
break;
}
case NUBUS_RESID_NAME:
{
- nubus_get_rsrc_str(dev->name, &ent, 64);
- pr_info(" name: %s\n", dev->name);
+ char name[64];
+ unsigned int len;
+
+ len = nubus_get_rsrc_str(name, &ent, sizeof(name));
+ pr_debug(" name: %s\n", name);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, len + 1);
break;
}
case NUBUS_RESID_DRVRDIR:
{
/* MacOS driver. If we were NetBSD we might
use this :-) */
- struct nubus_dir drvr_dir;
- struct nubus_dirent drvr_ent;
-
- nubus_get_subdir(&ent, &drvr_dir);
- nubus_readdir(&drvr_dir, &drvr_ent);
- dev->driver = nubus_dirptr(&drvr_ent);
- pr_info(" driver at: 0x%p\n", dev->driver);
+ pr_debug(" driver directory offset: 0x%06x\n",
+ ent.data);
+ nubus_get_block_rsrc_dir(board, dir.procdir, &ent);
break;
}
case NUBUS_RESID_MINOR_BASEOS:
+ {
/* We will need this in order to support
multiple framebuffers. It might be handy
for Ethernet as well */
- nubus_get_rsrc_mem(&dev->iobase, &ent, 4);
- pr_info(" memory offset: 0x%08lx\n", dev->iobase);
+ u32 base_offset;
+
+ nubus_get_rsrc_mem(&base_offset, &ent, 4);
+ pr_debug(" memory offset: 0x%08x\n", base_offset);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, 4);
break;
+ }
case NUBUS_RESID_MINOR_LENGTH:
+ {
/* Ditto */
- nubus_get_rsrc_mem(&dev->iosize, &ent, 4);
- pr_info(" memory length: 0x%08lx\n", dev->iosize);
+ u32 length;
+
+ nubus_get_rsrc_mem(&length, &ent, 4);
+ pr_debug(" memory length: 0x%08x\n", length);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, 4);
break;
+ }
case NUBUS_RESID_FLAGS:
- dev->flags = ent.data;
- pr_info(" flags: 0x%06x\n", dev->flags);
+ pr_debug(" flags: 0x%06x\n", ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
case NUBUS_RESID_HWDEVID:
- dev->hwdevid = ent.data;
- pr_info(" hwdevid: 0x%06x\n", dev->hwdevid);
+ pr_debug(" hwdevid: 0x%06x\n", ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
default:
/* Local/Private resources have their own
function */
- nubus_show_private_resource(dev, &ent);
+ nubus_get_private_resource(fres, dir.procdir, &ent);
}
}
- return dev;
-}
-
-/* This is cool. */
-static int __init nubus_get_vidnames(struct nubus_board *board,
- const struct nubus_dirent *parent)
-{
- struct nubus_dir dir;
- struct nubus_dirent ent;
-
- /* FIXME: obviously we want to put this in a header file soon */
- struct vidmode {
- u32 size;
- /* Don't know what this is yet */
- u16 id;
- /* Longest one I've seen so far is 26 characters */
- char name[32];
- };
-
- pr_info(" video modes supported:\n");
- nubus_get_subdir(parent, &dir);
- pr_debug("%s: parent is 0x%p, dir is 0x%p\n",
- __func__, parent->base, dir.base);
-
- while (nubus_readdir(&dir, &ent) != -1) {
- struct vidmode mode;
- u32 size;
-
- /* First get the length */
- nubus_get_rsrc_mem(&size, &ent, 4);
-
- /* Now clobber the whole thing */
- if (size > sizeof(mode) - 1)
- size = sizeof(mode) - 1;
- memset(&mode, 0, sizeof(mode));
- nubus_get_rsrc_mem(&mode, &ent, size);
- pr_info(" %02X: (%02X) %s\n", ent.type,
- mode.id, mode.name);
- }
- return 0;
+ return fres;
}
/* This is *really* cool. */
static int __init nubus_get_icon(struct nubus_board *board,
+ struct proc_dir_entry *procdir,
const struct nubus_dirent *ent)
{
/* Should be 32x32 if my memory serves me correctly */
- unsigned char icon[128];
- int x, y;
+ u32 icon[32];
+ int i;
nubus_get_rsrc_mem(&icon, ent, 128);
- pr_info(" icon:\n");
-
- /* We should actually plot these somewhere in the framebuffer
- init. This is just to demonstrate that they do, in fact,
- exist */
- for (y = 0; y < 32; y++) {
- pr_info(" ");
- for (x = 0; x < 32; x++) {
- if (icon[y * 4 + x / 8] & (0x80 >> (x % 8)))
- pr_cont("*");
- else
- pr_cont(" ");
- }
- pr_cont("\n");
- }
+ pr_debug(" icon:\n");
+ for (i = 0; i < 8; i++)
+ pr_debug(" %08x %08x %08x %08x\n",
+ icon[i * 4 + 0], icon[i * 4 + 1],
+ icon[i * 4 + 2], icon[i * 4 + 3]);
+ nubus_proc_add_rsrc_mem(procdir, ent, 128);
+
return 0;
}
static int __init nubus_get_vendorinfo(struct nubus_board *board,
+ struct proc_dir_entry *procdir,
const struct nubus_dirent *parent)
{
struct nubus_dir dir;
@@ -589,19 +610,20 @@ static int __init nubus_get_vendorinfo(struct nubus_board *board,
static char *vendor_fields[6] = { "ID", "serial", "revision",
"part", "date", "unknown field" };
- pr_info(" vendor info:\n");
+ pr_debug(" vendor info:\n");
nubus_get_subdir(parent, &dir);
- pr_debug("%s: parent is 0x%p, dir is 0x%p\n",
- __func__, parent->base, dir.base);
+ dir.procdir = nubus_proc_add_rsrc_dir(procdir, parent, board);
while (nubus_readdir(&dir, &ent) != -1) {
char name[64];
+ unsigned int len;
/* These are all strings, we think */
- nubus_get_rsrc_str(name, &ent, 64);
- if (ent.type > 5)
+ len = nubus_get_rsrc_str(name, &ent, sizeof(name));
+ if (ent.type < 1 || ent.type > 5)
ent.type = 5;
- pr_info(" %s: %s\n", vendor_fields[ent.type - 1], name);
+ pr_debug(" %s: %s\n", vendor_fields[ent.type - 1], name);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, len + 1);
}
return 0;
}
@@ -612,9 +634,9 @@ static int __init nubus_get_board_resource(struct nubus_board *board, int slot,
struct nubus_dir dir;
struct nubus_dirent ent;
+ pr_debug(" Board resource 0x%02x:\n", parent->type);
nubus_get_subdir(parent, &dir);
- pr_debug("%s: parent is 0x%p, dir is 0x%p\n",
- __func__, parent->base, dir.base);
+ dir.procdir = nubus_proc_add_rsrc_dir(board->procdir, parent, board);
while (nubus_readdir(&dir, &ent) != -1) {
switch (ent.type) {
@@ -625,64 +647,81 @@ static int __init nubus_get_board_resource(struct nubus_board *board, int slot,
useful except insofar as it tells us that
we really are looking at a board resource. */
nubus_get_rsrc_mem(nbtdata, &ent, 8);
- pr_info(" type: [cat 0x%x type 0x%x sw 0x%x hw 0x%x]\n",
- nbtdata[0], nbtdata[1], nbtdata[2], nbtdata[3]);
+ pr_debug(" type: [cat 0x%x type 0x%x sw 0x%x hw 0x%x]\n",
+ nbtdata[0], nbtdata[1], nbtdata[2], nbtdata[3]);
if (nbtdata[0] != 1 || nbtdata[1] != 0 ||
nbtdata[2] != 0 || nbtdata[3] != 0)
- pr_err("this sResource is not a board resource!\n");
+ pr_err("Slot %X: sResource is not a board resource!\n",
+ slot);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, 8);
break;
}
case NUBUS_RESID_NAME:
- nubus_get_rsrc_str(board->name, &ent, 64);
- pr_info(" name: %s\n", board->name);
+ {
+ unsigned int len;
+
+ len = nubus_get_rsrc_str(board->name, &ent,
+ sizeof(board->name));
+ pr_debug(" name: %s\n", board->name);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, len + 1);
break;
+ }
case NUBUS_RESID_ICON:
- nubus_get_icon(board, &ent);
+ nubus_get_icon(board, dir.procdir, &ent);
break;
case NUBUS_RESID_BOARDID:
- pr_info(" board id: 0x%x\n", ent.data);
+ pr_debug(" board id: 0x%x\n", ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
case NUBUS_RESID_PRIMARYINIT:
- pr_info(" primary init offset: 0x%06x\n", ent.data);
+ pr_debug(" primary init offset: 0x%06x\n", ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
case NUBUS_RESID_VENDORINFO:
- nubus_get_vendorinfo(board, &ent);
+ nubus_get_vendorinfo(board, dir.procdir, &ent);
break;
case NUBUS_RESID_FLAGS:
- pr_info(" flags: 0x%06x\n", ent.data);
+ pr_debug(" flags: 0x%06x\n", ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
case NUBUS_RESID_HWDEVID:
- pr_info(" hwdevid: 0x%06x\n", ent.data);
+ pr_debug(" hwdevid: 0x%06x\n", ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
case NUBUS_RESID_SECONDINIT:
- pr_info(" secondary init offset: 0x%06x\n", ent.data);
+ pr_debug(" secondary init offset: 0x%06x\n",
+ ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
/* WTF isn't this in the functional resources? */
case NUBUS_RESID_VIDNAMES:
- nubus_get_vidnames(board, &ent);
+ pr_debug(" vidnames directory offset: 0x%06x\n",
+ ent.data);
+ nubus_get_block_rsrc_dir(board, dir.procdir, &ent);
break;
/* Same goes for this */
case NUBUS_RESID_VIDMODES:
- pr_info(" video mode parameter directory offset: 0x%06x\n",
- ent.data);
+ pr_debug(" video mode parameter directory offset: 0x%06x\n",
+ ent.data);
+ nubus_proc_add_rsrc(dir.procdir, &ent);
break;
default:
- pr_info(" unknown resource %02X, data 0x%06x\n",
- ent.type, ent.data);
+ pr_debug(" unknown resource 0x%02x, data 0x%06x\n",
+ ent.type, ent.data);
+ nubus_proc_add_rsrc_mem(dir.procdir, &ent, 0);
}
}
return 0;
}
-/* Add a board (might be many devices) to the list */
-static struct nubus_board * __init nubus_add_board(int slot, int bytelanes)
+static void __init nubus_add_board(int slot, int bytelanes)
{
struct nubus_board *board;
- struct nubus_board **boardp;
unsigned char *rp;
unsigned long dpat;
struct nubus_dir dir;
struct nubus_dirent ent;
+ int prev_resid = -1;
/* Move to the start of the format block */
rp = nubus_rom_addr(slot);
@@ -690,19 +729,19 @@ static struct nubus_board * __init nubus_add_board(int slot, int bytelanes)
/* Actually we should probably panic if this fails */
if ((board = kzalloc(sizeof(*board), GFP_ATOMIC)) == NULL)
- return NULL;
+ return;
board->fblock = rp;
/* Dump the format block for debugging purposes */
pr_debug("Slot %X, format block at 0x%p:\n", slot, rp);
+ pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
+ pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
+ pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
pr_debug("%02lx\n", nubus_get_rom(&rp, 1, bytelanes));
pr_debug("%02lx\n", nubus_get_rom(&rp, 1, bytelanes));
pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
pr_debug("%02lx\n", nubus_get_rom(&rp, 1, bytelanes));
pr_debug("%02lx\n", nubus_get_rom(&rp, 1, bytelanes));
- pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
- pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
- pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes));
rp = board->fblock;
board->slot = slot;
@@ -722,10 +761,10 @@ static struct nubus_board * __init nubus_add_board(int slot, int bytelanes)
/* Directory offset should be small and negative... */
if (!(board->doffset & 0x00FF0000))
- pr_warn("Dodgy doffset!\n");
+ pr_warn("Slot %X: Dodgy doffset!\n", slot);
dpat = nubus_get_rom(&rp, 4, bytelanes);
if (dpat != NUBUS_TEST_PATTERN)
- pr_warn("Wrong test pattern %08lx!\n", dpat);
+ pr_warn("Slot %X: Wrong test pattern %08lx!\n", slot, dpat);
/*
* I wonder how the CRC is meant to work -
@@ -742,53 +781,52 @@ static struct nubus_board * __init nubus_add_board(int slot, int bytelanes)
nubus_get_root_dir(board, &dir);
/* We're ready to rock */
- pr_info("Slot %X:\n", slot);
+ pr_debug("Slot %X resources:\n", slot);
/* Each slot should have one board resource and any number of
- functional resources. So we'll fill in some fields in the
- struct nubus_board from the board resource, then walk down
- the list of functional resources, spinning out a nubus_dev
- for each of them. */
+ * functional resources. So we'll fill in some fields in the
+ * struct nubus_board from the board resource, then walk down
+ * the list of functional resources, spinning out a nubus_rsrc
+ * for each of them.
+ */
if (nubus_readdir(&dir, &ent) == -1) {
/* We can't have this! */
- pr_err("Board resource not found!\n");
- return NULL;
- } else {
- pr_info(" Board resource:\n");
- nubus_get_board_resource(board, slot, &ent);
+ pr_err("Slot %X: Board resource not found!\n", slot);
+ kfree(board);
+ return;
}
+ if (ent.type < 1 || ent.type > 127)
+ pr_warn("Slot %X: Board resource ID is invalid!\n", slot);
+
+ board->procdir = nubus_proc_add_board(board);
+
+ nubus_get_board_resource(board, slot, &ent);
+
while (nubus_readdir(&dir, &ent) != -1) {
- struct nubus_dev *dev;
- struct nubus_dev **devp;
+ struct nubus_rsrc *fres;
- dev = nubus_get_functional_resource(board, slot, &ent);
- if (dev == NULL)
+ fres = nubus_get_functional_resource(board, slot, &ent);
+ if (fres == NULL)
continue;
- /* We zeroed this out above */
- if (board->first_dev == NULL)
- board->first_dev = dev;
+ /* Resources should appear in ascending ID order. This sanity
+ * check prevents duplicate resource IDs.
+ */
+ if (fres->resid <= prev_resid) {
+ kfree(fres);
+ continue;
+ }
+ prev_resid = fres->resid;
- /* Put it on the global NuBus device chain. Keep entries in order. */
- for (devp = &nubus_devices; *devp != NULL;
- devp = &((*devp)->next))
- /* spin */;
- *devp = dev;
- dev->next = NULL;
+ list_add_tail(&fres->list, &nubus_func_rsrcs);
}
- /* Put it on the global NuBus board chain. Keep entries in order. */
- for (boardp = &nubus_boards; *boardp != NULL;
- boardp = &((*boardp)->next))
- /* spin */;
- *boardp = board;
- board->next = NULL;
-
- return board;
+ if (nubus_device_register(board))
+ put_device(&board->dev);
}
-void __init nubus_probe_slot(int slot)
+static void __init nubus_probe_slot(int slot)
{
unsigned char dp;
unsigned char *rp;
@@ -796,11 +834,8 @@ void __init nubus_probe_slot(int slot)
rp = nubus_rom_addr(slot);
for (i = 4; i; i--) {
- int card_present;
-
rp--;
- card_present = hwreg_present(rp);
- if (!card_present)
+ if (!hwreg_present(rp))
continue;
dp = *rp;
@@ -822,10 +857,11 @@ void __init nubus_probe_slot(int slot)
}
}
-void __init nubus_scan_bus(void)
+static void __init nubus_scan_bus(void)
{
int slot;
+ pr_info("NuBus: Scanning NuBus slots.\n");
for (slot = 9; slot < 15; slot++) {
nubus_probe_slot(slot);
}
@@ -833,14 +869,16 @@ void __init nubus_scan_bus(void)
static int __init nubus_init(void)
{
+ int err;
+
if (!MACH_IS_MAC)
return 0;
- pr_info("NuBus: Scanning NuBus slots.\n");
- nubus_devices = NULL;
- nubus_boards = NULL;
- nubus_scan_bus();
nubus_proc_init();
+ err = nubus_bus_register();
+ if (err)
+ return err;
+ nubus_scan_bus();
return 0;
}
diff --git a/drivers/nubus/proc.c b/drivers/nubus/proc.c
index 004a122ac0ff..c2e5a7e6bd3e 100644
--- a/drivers/nubus/proc.c
+++ b/drivers/nubus/proc.c
@@ -11,39 +11,37 @@
structure in /proc analogous to the structure of the NuBus ROM
resources.
- Therefore each NuBus device is in fact a directory, which may in
- turn contain subdirectories. The "files" correspond to NuBus
- resource records. For those types of records which we know how to
- convert to formats that are meaningful to userspace (mostly just
- icons) these files will provide "cooked" data. Otherwise they will
- simply provide raw access (read-only of course) to the ROM. */
+ Therefore each board function gets a directory, which may in turn
+ contain subdirectories. Each slot resource is a file. Unrecognized
+ resources are empty files, since every resource ID requires a special
+ case (e.g. if the resource ID implies a directory or block, then its
+ value has to be interpreted as a slot ROM pointer etc.).
+ */
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/nubus.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/slab.h>
#include <linux/init.h>
#include <linux/module.h>
-
#include <linux/uaccess.h>
#include <asm/byteorder.h>
+/*
+ * /proc/bus/nubus/devices stuff
+ */
+
static int
nubus_devices_proc_show(struct seq_file *m, void *v)
{
- struct nubus_dev *dev = nubus_devices;
-
- while (dev) {
- seq_printf(m, "%x\t%04x %04x %04x %04x",
- dev->board->slot,
- dev->category,
- dev->type,
- dev->dr_sw,
- dev->dr_hw);
- seq_printf(m, "\t%08lx\n", dev->board->slot_addr);
- dev = dev->next;
- }
+ struct nubus_rsrc *fres;
+
+ for_each_func_rsrc(fres)
+ seq_printf(m, "%x\t%04x %04x %04x %04x\t%08lx\n",
+ fres->board->slot, fres->category, fres->type,
+ fres->dr_sw, fres->dr_hw, fres->board->slot_addr);
return 0;
}
@@ -61,174 +59,163 @@ static const struct file_operations nubus_devices_proc_fops = {
static struct proc_dir_entry *proc_bus_nubus_dir;
-static const struct file_operations nubus_proc_subdir_fops = {
-#warning Need to set some I/O handlers here
-};
+/*
+ * /proc/bus/nubus/x/ stuff
+ */
-static void nubus_proc_subdir(struct nubus_dev* dev,
- struct proc_dir_entry* parent,
- struct nubus_dir* dir)
+struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board)
{
- struct nubus_dirent ent;
-
- /* Some of these are directories, others aren't */
- while (nubus_readdir(dir, &ent) != -1) {
- char name[8];
- struct proc_dir_entry* e;
-
- sprintf(name, "%x", ent.type);
- e = proc_create(name, S_IFREG | S_IRUGO | S_IWUSR, parent,
- &nubus_proc_subdir_fops);
- if (!e)
- return;
- }
+ char name[2];
+
+ if (!proc_bus_nubus_dir)
+ return NULL;
+ snprintf(name, sizeof(name), "%x", board->slot);
+ return proc_mkdir(name, proc_bus_nubus_dir);
}
-/* Can't do this recursively since the root directory is structured
- somewhat differently from the subdirectories */
-static void nubus_proc_populate(struct nubus_dev* dev,
- struct proc_dir_entry* parent,
- struct nubus_dir* root)
+/* The PDE private data for any directory under /proc/bus/nubus/x/
+ * is the bytelanes value for the board in slot x.
+ */
+
+struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent,
+ struct nubus_board *board)
{
- struct nubus_dirent ent;
-
- /* We know these are all directories (board resource + one or
- more functional resources) */
- while (nubus_readdir(root, &ent) != -1) {
- char name[8];
- struct proc_dir_entry* e;
- struct nubus_dir dir;
-
- sprintf(name, "%x", ent.type);
- e = proc_mkdir(name, parent);
- if (!e) return;
-
- /* And descend */
- if (nubus_get_subdir(&ent, &dir) == -1) {
- /* This shouldn't happen */
- printk(KERN_ERR "NuBus root directory node %x:%x has no subdir!\n",
- dev->board->slot, ent.type);
- continue;
- } else {
- nubus_proc_subdir(dev, e, &dir);
- }
- }
+ char name[9];
+ int lanes = board->lanes;
+
+ if (!procdir)
+ return NULL;
+ snprintf(name, sizeof(name), "%x", ent->type);
+ return proc_mkdir_data(name, 0555, procdir, (void *)lanes);
}
-int nubus_proc_attach_device(struct nubus_dev *dev)
+/* The PDE private data for a file under /proc/bus/nubus/x/ is a pointer to
+ * an instance of the following structure, which gives the location and size
+ * of the resource data in the slot ROM. For slot resources which hold only a
+ * small integer, this integer value is stored directly and size is set to 0.
+ * A NULL private data pointer indicates an unrecognized resource.
+ */
+
+struct nubus_proc_pde_data {
+ unsigned char *res_ptr;
+ unsigned int res_size;
+};
+
+static struct nubus_proc_pde_data *
+nubus_proc_alloc_pde_data(unsigned char *ptr, unsigned int size)
{
- struct proc_dir_entry *e;
- struct nubus_dir root;
- char name[8];
-
- if (dev == NULL) {
- printk(KERN_ERR
- "NULL pointer in nubus_proc_attach_device, shoot the programmer!\n");
- return -1;
- }
-
- if (dev->board == NULL) {
- printk(KERN_ERR
- "NULL pointer in nubus_proc_attach_device, shoot the programmer!\n");
- printk("dev = %p, dev->board = %p\n", dev, dev->board);
- return -1;
- }
-
- /* Create a directory */
- sprintf(name, "%x", dev->board->slot);
- e = dev->procdir = proc_mkdir(name, proc_bus_nubus_dir);
- if (!e)
- return -ENOMEM;
+ struct nubus_proc_pde_data *pde_data;
- /* Now recursively populate it with files */
- nubus_get_root_dir(dev->board, &root);
- nubus_proc_populate(dev, e, &root);
+ pde_data = kmalloc(sizeof(*pde_data), GFP_KERNEL);
+ if (!pde_data)
+ return NULL;
- return 0;
+ pde_data->res_ptr = ptr;
+ pde_data->res_size = size;
+ return pde_data;
}
-EXPORT_SYMBOL(nubus_proc_attach_device);
-/*
- * /proc/nubus stuff
- */
-static int nubus_proc_show(struct seq_file *m, void *v)
+static int nubus_proc_rsrc_show(struct seq_file *m, void *v)
{
- const struct nubus_board *board = v;
+ struct inode *inode = m->private;
+ struct nubus_proc_pde_data *pde_data;
- /* Display header on line 1 */
- if (v == SEQ_START_TOKEN)
- seq_puts(m, "Nubus devices found:\n");
- else
- seq_printf(m, "Slot %X: %s\n", board->slot, board->name);
+ pde_data = PDE_DATA(inode);
+ if (!pde_data)
+ return 0;
+
+ if (pde_data->res_size > m->size)
+ return -EFBIG;
+
+ if (pde_data->res_size) {
+ int lanes = (int)proc_get_parent_data(inode);
+ struct nubus_dirent ent;
+
+ if (!lanes)
+ return 0;
+
+ ent.mask = lanes;
+ ent.base = pde_data->res_ptr;
+ ent.data = 0;
+ nubus_seq_write_rsrc_mem(m, &ent, pde_data->res_size);
+ } else {
+ unsigned int data = (unsigned int)pde_data->res_ptr;
+
+ seq_putc(m, data >> 16);
+ seq_putc(m, data >> 8);
+ seq_putc(m, data >> 0);
+ }
return 0;
}
-static void *nubus_proc_start(struct seq_file *m, loff_t *_pos)
+static int nubus_proc_rsrc_open(struct inode *inode, struct file *file)
{
- struct nubus_board *board;
- unsigned pos;
-
- if (*_pos > LONG_MAX)
- return NULL;
- pos = *_pos;
- if (pos == 0)
- return SEQ_START_TOKEN;
- for (board = nubus_boards; board; board = board->next)
- if (--pos == 0)
- break;
- return board;
+ return single_open(file, nubus_proc_rsrc_show, inode);
}
-static void *nubus_proc_next(struct seq_file *p, void *v, loff_t *_pos)
+static const struct file_operations nubus_proc_rsrc_fops = {
+ .open = nubus_proc_rsrc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent,
+ unsigned int size)
{
- /* Walk the list of NuBus boards */
- struct nubus_board *board = v;
-
- ++*_pos;
- if (v == SEQ_START_TOKEN)
- board = nubus_boards;
- else if (board)
- board = board->next;
- return board;
+ char name[9];
+ struct nubus_proc_pde_data *pde_data;
+
+ if (!procdir)
+ return;
+
+ snprintf(name, sizeof(name), "%x", ent->type);
+ if (size)
+ pde_data = nubus_proc_alloc_pde_data(nubus_dirptr(ent), size);
+ else
+ pde_data = NULL;
+ proc_create_data(name, S_IFREG | 0444, procdir,
+ &nubus_proc_rsrc_fops, pde_data);
}
-static void nubus_proc_stop(struct seq_file *p, void *v)
+void nubus_proc_add_rsrc(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent)
{
+ char name[9];
+ unsigned char *data = (unsigned char *)ent->data;
+
+ if (!procdir)
+ return;
+
+ snprintf(name, sizeof(name), "%x", ent->type);
+ proc_create_data(name, S_IFREG | 0444, procdir,
+ &nubus_proc_rsrc_fops,
+ nubus_proc_alloc_pde_data(data, 0));
}
-static const struct seq_operations nubus_proc_seqops = {
- .start = nubus_proc_start,
- .next = nubus_proc_next,
- .stop = nubus_proc_stop,
- .show = nubus_proc_show,
-};
+/*
+ * /proc/nubus stuff
+ */
static int nubus_proc_open(struct inode *inode, struct file *file)
{
- return seq_open(file, &nubus_proc_seqops);
+ return single_open(file, nubus_proc_show, NULL);
}
static const struct file_operations nubus_proc_fops = {
.open = nubus_proc_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = single_release,
};
-void __init proc_bus_nubus_add_devices(void)
-{
- struct nubus_dev *dev;
-
- for(dev = nubus_devices; dev; dev = dev->next)
- nubus_proc_attach_device(dev);
-}
-
void __init nubus_proc_init(void)
{
proc_create("nubus", 0, NULL, &nubus_proc_fops);
- if (!MACH_IS_MAC)
- return;
proc_bus_nubus_dir = proc_mkdir("bus/nubus", NULL);
+ if (!proc_bus_nubus_dir)
+ return;
proc_create("devices", 0, proc_bus_nubus_dir, &nubus_devices_proc_fops);
- proc_bus_nubus_add_devices();
}
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index a25fd43650ad..441e67e3a9d7 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -1,4 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -I$(src)
+
obj-$(CONFIG_NVME_CORE) += nvme-core.o
obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
obj-$(CONFIG_NVME_FABRICS) += nvme-fabrics.o
@@ -6,6 +9,7 @@ obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o
obj-$(CONFIG_NVME_FC) += nvme-fc.o
nvme-core-y := core.o
+nvme-core-$(CONFIG_TRACING) += trace.o
nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
nvme-core-$(CONFIG_NVM) += lightnvm.o
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 839650e0926a..e8104871cbbf 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -29,6 +29,9 @@
#include <linux/pm_qos.h>
#include <asm/unaligned.h>
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
#include "nvme.h"
#include "fabrics.h"
@@ -65,9 +68,26 @@ static bool streams;
module_param(streams, bool, 0644);
MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
+/*
+ * nvme_wq - hosts nvme related works that are not reset or delete
+ * nvme_reset_wq - hosts nvme reset works
+ * nvme_delete_wq - hosts nvme delete works
+ *
+ * nvme_wq will host works such are scan, aen handling, fw activation,
+ * keep-alive error recovery, periodic reconnects etc. nvme_reset_wq
+ * runs reset works which also flush works hosted on nvme_wq for
+ * serialization purposes. nvme_delete_wq host controller deletion
+ * works which flush reset works for serialization.
+ */
struct workqueue_struct *nvme_wq;
EXPORT_SYMBOL_GPL(nvme_wq);
+struct workqueue_struct *nvme_reset_wq;
+EXPORT_SYMBOL_GPL(nvme_reset_wq);
+
+struct workqueue_struct *nvme_delete_wq;
+EXPORT_SYMBOL_GPL(nvme_delete_wq);
+
static DEFINE_IDA(nvme_subsystems_ida);
static LIST_HEAD(nvme_subsystems);
static DEFINE_MUTEX(nvme_subsystems_lock);
@@ -89,13 +109,13 @@ int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
{
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
return -EBUSY;
- if (!queue_work(nvme_wq, &ctrl->reset_work))
+ if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
return -EBUSY;
return 0;
}
EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
-static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
+int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
{
int ret;
@@ -104,6 +124,7 @@ static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
flush_work(&ctrl->reset_work);
return ret;
}
+EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
static void nvme_delete_ctrl_work(struct work_struct *work)
{
@@ -122,7 +143,7 @@ int nvme_delete_ctrl(struct nvme_ctrl *ctrl)
{
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
return -EBUSY;
- if (!queue_work(nvme_wq, &ctrl->delete_work))
+ if (!queue_work(nvme_delete_wq, &ctrl->delete_work))
return -EBUSY;
return 0;
}
@@ -157,13 +178,20 @@ static blk_status_t nvme_error_status(struct request *req)
return BLK_STS_OK;
case NVME_SC_CAP_EXCEEDED:
return BLK_STS_NOSPC;
+ case NVME_SC_LBA_RANGE:
+ return BLK_STS_TARGET;
+ case NVME_SC_BAD_ATTRIBUTES:
case NVME_SC_ONCS_NOT_SUPPORTED:
+ case NVME_SC_INVALID_OPCODE:
+ case NVME_SC_INVALID_FIELD:
+ case NVME_SC_INVALID_NS:
return BLK_STS_NOTSUPP;
case NVME_SC_WRITE_FAULT:
case NVME_SC_READ_ERROR:
case NVME_SC_UNWRITTEN_BLOCK:
case NVME_SC_ACCESS_DENIED:
case NVME_SC_READ_ONLY:
+ case NVME_SC_COMPARE_FAILED:
return BLK_STS_MEDIUM;
case NVME_SC_GUARD_CHECK:
case NVME_SC_APPTAG_CHECK:
@@ -190,8 +218,12 @@ static inline bool nvme_req_needs_retry(struct request *req)
void nvme_complete_rq(struct request *req)
{
- if (unlikely(nvme_req(req)->status && nvme_req_needs_retry(req))) {
- if (nvme_req_needs_failover(req)) {
+ blk_status_t status = nvme_error_status(req);
+
+ trace_nvme_complete_rq(req);
+
+ if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) {
+ if (nvme_req_needs_failover(req, status)) {
nvme_failover_req(req);
return;
}
@@ -202,8 +234,7 @@ void nvme_complete_rq(struct request *req)
return;
}
}
-
- blk_mq_end_request(req, nvme_error_status(req));
+ blk_mq_end_request(req, status);
}
EXPORT_SYMBOL_GPL(nvme_complete_rq);
@@ -232,6 +263,15 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
old_state = ctrl->state;
switch (new_state) {
+ case NVME_CTRL_ADMIN_ONLY:
+ switch (old_state) {
+ case NVME_CTRL_RECONNECTING:
+ changed = true;
+ /* FALLTHRU */
+ default:
+ break;
+ }
+ break;
case NVME_CTRL_LIVE:
switch (old_state) {
case NVME_CTRL_NEW:
@@ -247,6 +287,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
switch (old_state) {
case NVME_CTRL_NEW:
case NVME_CTRL_LIVE:
+ case NVME_CTRL_ADMIN_ONLY:
changed = true;
/* FALLTHRU */
default:
@@ -266,6 +307,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
case NVME_CTRL_DELETING:
switch (old_state) {
case NVME_CTRL_LIVE:
+ case NVME_CTRL_ADMIN_ONLY:
case NVME_CTRL_RESETTING:
case NVME_CTRL_RECONNECTING:
changed = true;
@@ -591,6 +633,10 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
}
cmd->common.command_id = req->tag;
+ if (ns)
+ trace_nvme_setup_nvm_cmd(req->q->id, cmd);
+ else
+ trace_nvme_setup_admin_cmd(cmd);
return ret;
}
EXPORT_SYMBOL_GPL(nvme_setup_cmd);
@@ -1217,16 +1263,27 @@ static int nvme_open(struct block_device *bdev, fmode_t mode)
#ifdef CONFIG_NVME_MULTIPATH
/* should never be called due to GENHD_FL_HIDDEN */
if (WARN_ON_ONCE(ns->head->disk))
- return -ENXIO;
+ goto fail;
#endif
if (!kref_get_unless_zero(&ns->kref))
- return -ENXIO;
+ goto fail;
+ if (!try_module_get(ns->ctrl->ops->module))
+ goto fail_put_ns;
+
return 0;
+
+fail_put_ns:
+ nvme_put_ns(ns);
+fail:
+ return -ENXIO;
}
static void nvme_release(struct gendisk *disk, fmode_t mode)
{
- nvme_put_ns(disk->private_data);
+ struct nvme_ns *ns = disk->private_data;
+
+ module_put(ns->ctrl->ops->module);
+ nvme_put_ns(ns);
}
static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@ -2052,6 +2109,22 @@ static const struct attribute_group *nvme_subsys_attrs_groups[] = {
NULL,
};
+static int nvme_active_ctrls(struct nvme_subsystem *subsys)
+{
+ int count = 0;
+ struct nvme_ctrl *ctrl;
+
+ mutex_lock(&subsys->lock);
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
+ if (ctrl->state != NVME_CTRL_DELETING &&
+ ctrl->state != NVME_CTRL_DEAD)
+ count++;
+ }
+ mutex_unlock(&subsys->lock);
+
+ return count;
+}
+
static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
struct nvme_subsystem *subsys, *found;
@@ -2090,7 +2163,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
* Verify that the subsystem actually supports multiple
* controllers, else bail out.
*/
- if (!(id->cmic & (1 << 1))) {
+ if (nvme_active_ctrls(found) && !(id->cmic & (1 << 1))) {
dev_err(ctrl->device,
"ignoring ctrl due to duplicate subnqn (%s).\n",
found->subnqn);
@@ -2257,7 +2330,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
shutdown_timeout, 60);
if (ctrl->shutdown_timeout != shutdown_timeout)
- dev_warn(ctrl->device,
+ dev_info(ctrl->device,
"Shutdown timeout set to %u seconds\n",
ctrl->shutdown_timeout);
} else
@@ -2341,8 +2414,14 @@ static int nvme_dev_open(struct inode *inode, struct file *file)
struct nvme_ctrl *ctrl =
container_of(inode->i_cdev, struct nvme_ctrl, cdev);
- if (ctrl->state != NVME_CTRL_LIVE)
+ switch (ctrl->state) {
+ case NVME_CTRL_LIVE:
+ case NVME_CTRL_ADMIN_ONLY:
+ break;
+ default:
return -EWOULDBLOCK;
+ }
+
file->private_data = ctrl;
return 0;
}
@@ -2606,6 +2685,7 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
static const char *const state_name[] = {
[NVME_CTRL_NEW] = "new",
[NVME_CTRL_LIVE] = "live",
+ [NVME_CTRL_ADMIN_ONLY] = "only-admin",
[NVME_CTRL_RESETTING] = "resetting",
[NVME_CTRL_RECONNECTING]= "reconnecting",
[NVME_CTRL_DELETING] = "deleting",
@@ -3079,6 +3159,8 @@ static void nvme_scan_work(struct work_struct *work)
if (ctrl->state != NVME_CTRL_LIVE)
return;
+ WARN_ON_ONCE(!ctrl->tagset);
+
if (nvme_identify_ctrl(ctrl, &id))
return;
@@ -3099,8 +3181,7 @@ static void nvme_scan_work(struct work_struct *work)
void nvme_queue_scan(struct nvme_ctrl *ctrl)
{
/*
- * Do not queue new scan work when a controller is reset during
- * removal.
+ * Only new queue scan work when admin and IO queues are both alive
*/
if (ctrl->state == NVME_CTRL_LIVE)
queue_work(nvme_wq, &ctrl->scan_work);
@@ -3477,16 +3558,26 @@ EXPORT_SYMBOL_GPL(nvme_reinit_tagset);
int __init nvme_core_init(void)
{
- int result;
+ int result = -ENOMEM;
nvme_wq = alloc_workqueue("nvme-wq",
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
if (!nvme_wq)
- return -ENOMEM;
+ goto out;
+
+ nvme_reset_wq = alloc_workqueue("nvme-reset-wq",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+ if (!nvme_reset_wq)
+ goto destroy_wq;
+
+ nvme_delete_wq = alloc_workqueue("nvme-delete-wq",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+ if (!nvme_delete_wq)
+ goto destroy_reset_wq;
result = alloc_chrdev_region(&nvme_chr_devt, 0, NVME_MINORS, "nvme");
if (result < 0)
- goto destroy_wq;
+ goto destroy_delete_wq;
nvme_class = class_create(THIS_MODULE, "nvme");
if (IS_ERR(nvme_class)) {
@@ -3505,8 +3596,13 @@ destroy_class:
class_destroy(nvme_class);
unregister_chrdev:
unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
+destroy_delete_wq:
+ destroy_workqueue(nvme_delete_wq);
+destroy_reset_wq:
+ destroy_workqueue(nvme_reset_wq);
destroy_wq:
destroy_workqueue(nvme_wq);
+out:
return result;
}
@@ -3516,6 +3612,8 @@ void nvme_core_exit(void)
class_destroy(nvme_subsys_class);
class_destroy(nvme_class);
unregister_chrdev_region(nvme_chr_devt, NVME_MINORS);
+ destroy_workqueue(nvme_delete_wq);
+ destroy_workqueue(nvme_reset_wq);
destroy_workqueue(nvme_wq);
}
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 894c2ccb3891..5dd4ceefed8f 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -493,7 +493,7 @@ EXPORT_SYMBOL_GPL(nvmf_should_reconnect);
*/
int nvmf_register_transport(struct nvmf_transport_ops *ops)
{
- if (!ops->create_ctrl)
+ if (!ops->create_ctrl || !ops->module)
return -EINVAL;
down_write(&nvmf_transports_rwsem);
@@ -739,11 +739,14 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
ret = -ENOMEM;
goto out;
}
- if (uuid_parse(p, &hostid)) {
+ ret = uuid_parse(p, &hostid);
+ if (ret) {
pr_err("Invalid hostid %s\n", p);
ret = -EINVAL;
+ kfree(p);
goto out;
}
+ kfree(p);
break;
case NVMF_OPT_DUP_CONNECT:
opts->duplicate_connect = true;
@@ -869,32 +872,41 @@ nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
goto out_unlock;
}
+ if (!try_module_get(ops->module)) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
ret = nvmf_check_required_opts(opts, ops->required_opts);
if (ret)
- goto out_unlock;
+ goto out_module_put;
ret = nvmf_check_allowed_opts(opts, NVMF_ALLOWED_OPTS |
ops->allowed_opts | ops->required_opts);
if (ret)
- goto out_unlock;
+ goto out_module_put;
ctrl = ops->create_ctrl(dev, opts);
if (IS_ERR(ctrl)) {
ret = PTR_ERR(ctrl);
- goto out_unlock;
+ goto out_module_put;
}
if (strcmp(ctrl->subsys->subnqn, opts->subsysnqn)) {
dev_warn(ctrl->device,
"controller returned incorrect NQN: \"%s\".\n",
ctrl->subsys->subnqn);
+ module_put(ops->module);
up_read(&nvmf_transports_rwsem);
nvme_delete_ctrl_sync(ctrl);
return ERR_PTR(-EINVAL);
}
+ module_put(ops->module);
up_read(&nvmf_transports_rwsem);
return ctrl;
+out_module_put:
+ module_put(ops->module);
out_unlock:
up_read(&nvmf_transports_rwsem);
out_free_opts:
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 9ba614953607..25b19f722f5b 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -108,6 +108,7 @@ struct nvmf_ctrl_options {
* fabric implementation of NVMe fabrics.
* @entry: Used by the fabrics library to add the new
* registration entry to its linked-list internal tree.
+ * @module: Transport module reference
* @name: Name of the NVMe fabric driver implementation.
* @required_opts: sysfs command-line options that must be specified
* when adding a new NVMe controller.
@@ -126,6 +127,7 @@ struct nvmf_ctrl_options {
*/
struct nvmf_transport_ops {
struct list_head entry;
+ struct module *module;
const char *name;
int required_opts;
int allowed_opts;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 794e66e4aa20..99bf51c7e513 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2921,6 +2921,9 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
nvme_fc_free_queue(&ctrl->queues[0]);
+ /* re-enable the admin_q so anything new can fast fail */
+ blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+
nvme_fc_ctlr_inactive_on_rport(ctrl);
}
@@ -2935,6 +2938,9 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
* waiting for io to terminate
*/
nvme_fc_delete_association(ctrl);
+
+ /* resume the io queues so that things will fast fail */
+ nvme_start_queues(nctrl);
}
static void
@@ -3380,6 +3386,7 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
static struct nvmf_transport_ops nvme_fc_transport = {
.name = "fc",
+ .module = THIS_MODULE,
.required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
.allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
.create_ctrl = nvme_fc_create_ctrl,
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index ba3d7f3349e5..50ef71ee3d86 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -31,27 +31,10 @@
enum nvme_nvm_admin_opcode {
nvme_nvm_admin_identity = 0xe2,
- nvme_nvm_admin_get_l2p_tbl = 0xea,
nvme_nvm_admin_get_bb_tbl = 0xf2,
nvme_nvm_admin_set_bb_tbl = 0xf1,
};
-struct nvme_nvm_hb_rw {
- __u8 opcode;
- __u8 flags;
- __u16 command_id;
- __le32 nsid;
- __u64 rsvd2;
- __le64 metadata;
- __le64 prp1;
- __le64 prp2;
- __le64 spba;
- __le16 length;
- __le16 control;
- __le32 dsmgmt;
- __le64 slba;
-};
-
struct nvme_nvm_ph_rw {
__u8 opcode;
__u8 flags;
@@ -80,19 +63,6 @@ struct nvme_nvm_identity {
__u32 rsvd11[5];
};
-struct nvme_nvm_l2ptbl {
- __u8 opcode;
- __u8 flags;
- __u16 command_id;
- __le32 nsid;
- __le32 cdw2[4];
- __le64 prp1;
- __le64 prp2;
- __le64 slba;
- __le32 nlb;
- __le16 cdw14[6];
-};
-
struct nvme_nvm_getbbtbl {
__u8 opcode;
__u8 flags;
@@ -139,9 +109,7 @@ struct nvme_nvm_command {
union {
struct nvme_common_command common;
struct nvme_nvm_identity identity;
- struct nvme_nvm_hb_rw hb_rw;
struct nvme_nvm_ph_rw ph_rw;
- struct nvme_nvm_l2ptbl l2p;
struct nvme_nvm_getbbtbl get_bb;
struct nvme_nvm_setbbtbl set_bb;
struct nvme_nvm_erase_blk erase;
@@ -167,7 +135,7 @@ struct nvme_nvm_id_group {
__u8 num_lun;
__u8 num_pln;
__u8 rsvd1;
- __le16 num_blk;
+ __le16 num_chk;
__le16 num_pg;
__le16 fpg_sz;
__le16 csecs;
@@ -234,11 +202,9 @@ struct nvme_nvm_bb_tbl {
static inline void _nvme_nvm_check_size(void)
{
BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
- BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960);
BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 16);
@@ -249,51 +215,58 @@ static inline void _nvme_nvm_check_size(void)
static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
{
struct nvme_nvm_id_group *src;
- struct nvm_id_group *dst;
+ struct nvm_id_group *grp;
+ int sec_per_pg, sec_per_pl, pg_per_blk;
if (nvme_nvm_id->cgrps != 1)
return -EINVAL;
src = &nvme_nvm_id->groups[0];
- dst = &nvm_id->grp;
-
- dst->mtype = src->mtype;
- dst->fmtype = src->fmtype;
- dst->num_ch = src->num_ch;
- dst->num_lun = src->num_lun;
- dst->num_pln = src->num_pln;
-
- dst->num_pg = le16_to_cpu(src->num_pg);
- dst->num_blk = le16_to_cpu(src->num_blk);
- dst->fpg_sz = le16_to_cpu(src->fpg_sz);
- dst->csecs = le16_to_cpu(src->csecs);
- dst->sos = le16_to_cpu(src->sos);
-
- dst->trdt = le32_to_cpu(src->trdt);
- dst->trdm = le32_to_cpu(src->trdm);
- dst->tprt = le32_to_cpu(src->tprt);
- dst->tprm = le32_to_cpu(src->tprm);
- dst->tbet = le32_to_cpu(src->tbet);
- dst->tbem = le32_to_cpu(src->tbem);
- dst->mpos = le32_to_cpu(src->mpos);
- dst->mccap = le32_to_cpu(src->mccap);
-
- dst->cpar = le16_to_cpu(src->cpar);
-
- if (dst->fmtype == NVM_ID_FMTYPE_MLC) {
- memcpy(dst->lptbl.id, src->lptbl.id, 8);
- dst->lptbl.mlc.num_pairs =
- le16_to_cpu(src->lptbl.mlc.num_pairs);
-
- if (dst->lptbl.mlc.num_pairs > NVME_NVM_LP_MLC_PAIRS) {
- pr_err("nvm: number of MLC pairs not supported\n");
- return -EINVAL;
- }
+ grp = &nvm_id->grp;
+
+ grp->mtype = src->mtype;
+ grp->fmtype = src->fmtype;
+
+ grp->num_ch = src->num_ch;
+ grp->num_lun = src->num_lun;
+
+ grp->num_chk = le16_to_cpu(src->num_chk);
+ grp->csecs = le16_to_cpu(src->csecs);
+ grp->sos = le16_to_cpu(src->sos);
+
+ pg_per_blk = le16_to_cpu(src->num_pg);
+ sec_per_pg = le16_to_cpu(src->fpg_sz) / grp->csecs;
+ sec_per_pl = sec_per_pg * src->num_pln;
+ grp->clba = sec_per_pl * pg_per_blk;
+ grp->ws_per_chk = pg_per_blk;
- memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs,
- dst->lptbl.mlc.num_pairs);
+ grp->mpos = le32_to_cpu(src->mpos);
+ grp->cpar = le16_to_cpu(src->cpar);
+ grp->mccap = le32_to_cpu(src->mccap);
+
+ grp->ws_opt = grp->ws_min = sec_per_pg;
+ grp->ws_seq = NVM_IO_SNGL_ACCESS;
+
+ if (grp->mpos & 0x020202) {
+ grp->ws_seq = NVM_IO_DUAL_ACCESS;
+ grp->ws_opt <<= 1;
+ } else if (grp->mpos & 0x040404) {
+ grp->ws_seq = NVM_IO_QUAD_ACCESS;
+ grp->ws_opt <<= 2;
}
+ grp->trdt = le32_to_cpu(src->trdt);
+ grp->trdm = le32_to_cpu(src->trdm);
+ grp->tprt = le32_to_cpu(src->tprt);
+ grp->tprm = le32_to_cpu(src->tprm);
+ grp->tbet = le32_to_cpu(src->tbet);
+ grp->tbem = le32_to_cpu(src->tbem);
+
+ /* 1.2 compatibility */
+ grp->num_pln = src->num_pln;
+ grp->num_pg = le16_to_cpu(src->num_pg);
+ grp->fpg_sz = le16_to_cpu(src->fpg_sz);
+
return 0;
}
@@ -332,62 +305,6 @@ out:
return ret;
}
-static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
- nvm_l2p_update_fn *update_l2p, void *priv)
-{
- struct nvme_ns *ns = nvmdev->q->queuedata;
- struct nvme_nvm_command c = {};
- u32 len = queue_max_hw_sectors(ns->ctrl->admin_q) << 9;
- u32 nlb_pr_rq = len / sizeof(u64);
- u64 cmd_slba = slba;
- void *entries;
- int ret = 0;
-
- c.l2p.opcode = nvme_nvm_admin_get_l2p_tbl;
- c.l2p.nsid = cpu_to_le32(ns->head->ns_id);
- entries = kmalloc(len, GFP_KERNEL);
- if (!entries)
- return -ENOMEM;
-
- while (nlb) {
- u32 cmd_nlb = min(nlb_pr_rq, nlb);
- u64 elba = slba + cmd_nlb;
-
- c.l2p.slba = cpu_to_le64(cmd_slba);
- c.l2p.nlb = cpu_to_le32(cmd_nlb);
-
- ret = nvme_submit_sync_cmd(ns->ctrl->admin_q,
- (struct nvme_command *)&c, entries, len);
- if (ret) {
- dev_err(ns->ctrl->device,
- "L2P table transfer failed (%d)\n", ret);
- ret = -EIO;
- goto out;
- }
-
- if (unlikely(elba > nvmdev->total_secs)) {
- pr_err("nvm: L2P data from device is out of bounds!\n");
- ret = -EINVAL;
- goto out;
- }
-
- /* Transform physical address to target address space */
- nvm_part_to_tgt(nvmdev, entries, cmd_nlb);
-
- if (update_l2p(cmd_slba, cmd_nlb, entries, priv)) {
- ret = -EINTR;
- goto out;
- }
-
- cmd_slba += cmd_nlb;
- nlb -= cmd_nlb;
- }
-
-out:
- kfree(entries);
- return ret;
-}
-
static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
u8 *blks)
{
@@ -397,7 +314,7 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_nvm_command c = {};
struct nvme_nvm_bb_tbl *bb_tbl;
- int nr_blks = geo->blks_per_lun * geo->plane_mode;
+ int nr_blks = geo->nr_chks * geo->plane_mode;
int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
int ret = 0;
@@ -438,7 +355,7 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
goto out;
}
- memcpy(blks, bb_tbl->blk, geo->blks_per_lun * geo->plane_mode);
+ memcpy(blks, bb_tbl->blk, geo->nr_chks * geo->plane_mode);
out:
kfree(bb_tbl);
return ret;
@@ -474,10 +391,6 @@ static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
c->ph_rw.control = cpu_to_le16(rqd->flags);
c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
-
- if (rqd->opcode == NVM_OP_HBWRITE || rqd->opcode == NVM_OP_HBREAD)
- c->hb_rw.slba = cpu_to_le64(nvme_block_nr(ns,
- rqd->bio->bi_iter.bi_sector));
}
static void nvme_nvm_end_io(struct request *rq, blk_status_t status)
@@ -597,8 +510,6 @@ static void nvme_nvm_dev_dma_free(void *pool, void *addr,
static struct nvm_dev_ops nvme_nvm_dev_ops = {
.identity = nvme_nvm_identity,
- .get_l2p_tbl = nvme_nvm_get_l2p_tbl,
-
.get_bb_tbl = nvme_nvm_get_bb_tbl,
.set_bb_tbl = nvme_nvm_set_bb_tbl,
@@ -883,7 +794,7 @@ static ssize_t nvm_dev_attr_show(struct device *dev,
} else if (strcmp(attr->name, "num_planes") == 0) {
return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln);
} else if (strcmp(attr->name, "num_blocks") == 0) { /* u16 */
- return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_blk);
+ return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_chk);
} else if (strcmp(attr->name, "num_pages") == 0) {
return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg);
} else if (strcmp(attr->name, "page_size") == 0) {
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 1218a9fca846..3b211d9e58b8 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -33,51 +33,11 @@ void nvme_failover_req(struct request *req)
kblockd_schedule_work(&ns->head->requeue_work);
}
-bool nvme_req_needs_failover(struct request *req)
+bool nvme_req_needs_failover(struct request *req, blk_status_t error)
{
if (!(req->cmd_flags & REQ_NVME_MPATH))
return false;
-
- switch (nvme_req(req)->status & 0x7ff) {
- /*
- * Generic command status:
- */
- case NVME_SC_INVALID_OPCODE:
- case NVME_SC_INVALID_FIELD:
- case NVME_SC_INVALID_NS:
- case NVME_SC_LBA_RANGE:
- case NVME_SC_CAP_EXCEEDED:
- case NVME_SC_RESERVATION_CONFLICT:
- return false;
-
- /*
- * I/O command set specific error. Unfortunately these values are
- * reused for fabrics commands, but those should never get here.
- */
- case NVME_SC_BAD_ATTRIBUTES:
- case NVME_SC_INVALID_PI:
- case NVME_SC_READ_ONLY:
- case NVME_SC_ONCS_NOT_SUPPORTED:
- WARN_ON_ONCE(nvme_req(req)->cmd->common.opcode ==
- nvme_fabrics_command);
- return false;
-
- /*
- * Media and Data Integrity Errors:
- */
- case NVME_SC_WRITE_FAULT:
- case NVME_SC_READ_ERROR:
- case NVME_SC_GUARD_CHECK:
- case NVME_SC_APPTAG_CHECK:
- case NVME_SC_REFTAG_CHECK:
- case NVME_SC_COMPARE_FAILED:
- case NVME_SC_ACCESS_DENIED:
- case NVME_SC_UNWRITTEN_BLOCK:
- return false;
- }
-
- /* Everything else could be a path failure, so should be retried */
- return true;
+ return blk_path_error(error);
}
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index a00eabd06427..8e4550fa08f8 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -32,6 +32,8 @@ extern unsigned int admin_timeout;
#define NVME_KATO_GRACE 10
extern struct workqueue_struct *nvme_wq;
+extern struct workqueue_struct *nvme_reset_wq;
+extern struct workqueue_struct *nvme_delete_wq;
enum {
NVME_NS_LBA = 0,
@@ -119,6 +121,7 @@ static inline struct nvme_request *nvme_req(struct request *req)
enum nvme_ctrl_state {
NVME_CTRL_NEW,
NVME_CTRL_LIVE,
+ NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */
NVME_CTRL_RESETTING,
NVME_CTRL_RECONNECTING,
NVME_CTRL_DELETING,
@@ -393,6 +396,7 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_start_keep_alive(struct nvme_ctrl *ctrl);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
+int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
@@ -401,7 +405,7 @@ extern const struct block_device_operations nvme_ns_head_ops;
#ifdef CONFIG_NVME_MULTIPATH
void nvme_failover_req(struct request *req);
-bool nvme_req_needs_failover(struct request *req);
+bool nvme_req_needs_failover(struct request *req, blk_status_t error);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
void nvme_mpath_add_disk(struct nvme_ns_head *head);
@@ -430,7 +434,8 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
static inline void nvme_failover_req(struct request *req)
{
}
-static inline bool nvme_req_needs_failover(struct request *req)
+static inline bool nvme_req_needs_failover(struct request *req,
+ blk_status_t error)
{
return false;
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 4276ebfff22b..6fe7af00a1f4 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -75,7 +75,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
* Represents an NVM Express device. Each nvme_dev is a PCI function.
*/
struct nvme_dev {
- struct nvme_queue **queues;
+ struct nvme_queue *queues;
struct blk_mq_tag_set tagset;
struct blk_mq_tag_set admin_tagset;
u32 __iomem *dbs;
@@ -365,7 +365,7 @@ static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
struct nvme_dev *dev = data;
- struct nvme_queue *nvmeq = dev->queues[0];
+ struct nvme_queue *nvmeq = &dev->queues[0];
WARN_ON(hctx_idx != 0);
WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
@@ -387,7 +387,7 @@ static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
struct nvme_dev *dev = data;
- struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];
+ struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1];
if (!nvmeq->tags)
nvmeq->tags = &dev->tagset.tags[hctx_idx];
@@ -403,7 +403,7 @@ static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
struct nvme_dev *dev = set->driver_data;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0;
- struct nvme_queue *nvmeq = dev->queues[queue_idx];
+ struct nvme_queue *nvmeq = &dev->queues[queue_idx];
BUG_ON(!nvmeq);
iod->nvmeq = nvmeq;
@@ -1044,7 +1044,7 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
{
struct nvme_dev *dev = to_nvme_dev(ctrl);
- struct nvme_queue *nvmeq = dev->queues[0];
+ struct nvme_queue *nvmeq = &dev->queues[0];
struct nvme_command c;
memset(&c, 0, sizeof(c));
@@ -1138,9 +1138,14 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
*/
bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
- /* If there is a reset ongoing, we shouldn't reset again. */
- if (dev->ctrl.state == NVME_CTRL_RESETTING)
+ /* If there is a reset/reinit ongoing, we shouldn't reset again. */
+ switch (dev->ctrl.state) {
+ case NVME_CTRL_RESETTING:
+ case NVME_CTRL_RECONNECTING:
return false;
+ default:
+ break;
+ }
/* We shouldn't reset unless the controller is on fatal error state
* _or_ if we lost the communication with it.
@@ -1280,7 +1285,6 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
if (nvmeq->sq_cmds)
dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
nvmeq->sq_cmds, nvmeq->sq_dma_addr);
- kfree(nvmeq);
}
static void nvme_free_queues(struct nvme_dev *dev, int lowest)
@@ -1288,10 +1292,8 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
int i;
for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) {
- struct nvme_queue *nvmeq = dev->queues[i];
dev->ctrl.queue_count--;
- dev->queues[i] = NULL;
- nvme_free_queue(nvmeq);
+ nvme_free_queue(&dev->queues[i]);
}
}
@@ -1323,12 +1325,7 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
{
- struct nvme_queue *nvmeq = dev->queues[0];
-
- if (!nvmeq)
- return;
- if (nvme_suspend_queue(nvmeq))
- return;
+ struct nvme_queue *nvmeq = &dev->queues[0];
if (shutdown)
nvme_shutdown_ctrl(&dev->ctrl);
@@ -1367,7 +1364,7 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
int qid, int depth)
{
- if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
+ if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
dev->ctrl.page_size);
nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
@@ -1382,13 +1379,13 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
return 0;
}
-static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
- int depth, int node)
+static int nvme_alloc_queue(struct nvme_dev *dev, int qid,
+ int depth, int node)
{
- struct nvme_queue *nvmeq = kzalloc_node(sizeof(*nvmeq), GFP_KERNEL,
- node);
- if (!nvmeq)
- return NULL;
+ struct nvme_queue *nvmeq = &dev->queues[qid];
+
+ if (dev->ctrl.queue_count > qid)
+ return 0;
nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth),
&nvmeq->cq_dma_addr, GFP_KERNEL);
@@ -1407,17 +1404,15 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
nvmeq->q_depth = depth;
nvmeq->qid = qid;
nvmeq->cq_vector = -1;
- dev->queues[qid] = nvmeq;
dev->ctrl.queue_count++;
- return nvmeq;
+ return 0;
free_cqdma:
dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes,
nvmeq->cq_dma_addr);
free_nvmeq:
- kfree(nvmeq);
- return NULL;
+ return -ENOMEM;
}
static int queue_request_irq(struct nvme_queue *nvmeq)
@@ -1590,14 +1585,12 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
if (result < 0)
return result;
- nvmeq = dev->queues[0];
- if (!nvmeq) {
- nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH,
- dev_to_node(dev->dev));
- if (!nvmeq)
- return -ENOMEM;
- }
+ result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH,
+ dev_to_node(dev->dev));
+ if (result)
+ return result;
+ nvmeq = &dev->queues[0];
aqa = nvmeq->q_depth - 1;
aqa |= aqa << 16;
@@ -1627,7 +1620,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
/* vector == qid - 1, match nvme_create_queue */
- if (!nvme_alloc_queue(dev, i, dev->q_depth,
+ if (nvme_alloc_queue(dev, i, dev->q_depth,
pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) {
ret = -ENOMEM;
break;
@@ -1636,15 +1629,15 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
max = min(dev->max_qid, dev->ctrl.queue_count - 1);
for (i = dev->online_queues; i <= max; i++) {
- ret = nvme_create_queue(dev->queues[i], i);
+ ret = nvme_create_queue(&dev->queues[i], i);
if (ret)
break;
}
/*
* Ignore failing Create SQ/CQ commands, we can continue with less
- * than the desired aount of queues, and even a controller without
- * I/O queues an still be used to issue admin commands. This might
+ * than the desired amount of queues, and even a controller without
+ * I/O queues can still be used to issue admin commands. This might
* be useful to upgrade a buggy firmware for example.
*/
return ret >= 0 ? 0 : ret;
@@ -1661,30 +1654,40 @@ static ssize_t nvme_cmb_show(struct device *dev,
}
static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
-static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
+static u64 nvme_cmb_size_unit(struct nvme_dev *dev)
{
- u64 szu, size, offset;
+ u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK;
+
+ return 1ULL << (12 + 4 * szu);
+}
+
+static u32 nvme_cmb_size(struct nvme_dev *dev)
+{
+ return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK;
+}
+
+static void nvme_map_cmb(struct nvme_dev *dev)
+{
+ u64 size, offset;
resource_size_t bar_size;
struct pci_dev *pdev = to_pci_dev(dev->dev);
- void __iomem *cmb;
int bar;
dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
- if (!(NVME_CMB_SZ(dev->cmbsz)))
- return NULL;
+ if (!dev->cmbsz)
+ return;
dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
if (!use_cmb_sqes)
- return NULL;
+ return;
- szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
- size = szu * NVME_CMB_SZ(dev->cmbsz);
- offset = szu * NVME_CMB_OFST(dev->cmbloc);
+ size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev);
+ offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc);
bar = NVME_CMB_BIR(dev->cmbloc);
bar_size = pci_resource_len(pdev, bar);
if (offset > bar_size)
- return NULL;
+ return;
/*
* Controllers may support a CMB size larger than their BAR,
@@ -1694,13 +1697,16 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
if (size > bar_size - offset)
size = bar_size - offset;
- cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
- if (!cmb)
- return NULL;
-
+ dev->cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
+ if (!dev->cmb)
+ return;
dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset;
dev->cmb_size = size;
- return cmb;
+
+ if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
+ &dev_attr_cmb.attr, NULL))
+ dev_warn(dev->ctrl.device,
+ "failed to add sysfs attribute for CMB\n");
}
static inline void nvme_release_cmb(struct nvme_dev *dev)
@@ -1768,7 +1774,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
dma_addr_t descs_dma;
int i = 0;
void **bufs;
- u64 size = 0, tmp;
+ u64 size, tmp;
tmp = (preferred + chunk_size - 1);
do_div(tmp, chunk_size);
@@ -1851,7 +1857,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
u64 preferred = (u64)dev->ctrl.hmpre * 4096;
u64 min = (u64)dev->ctrl.hmmin * 4096;
u32 enable_bits = NVME_HOST_MEM_ENABLE;
- int ret = 0;
+ int ret;
preferred = min(preferred, max);
if (min > max) {
@@ -1892,7 +1898,7 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
static int nvme_setup_io_queues(struct nvme_dev *dev)
{
- struct nvme_queue *adminq = dev->queues[0];
+ struct nvme_queue *adminq = &dev->queues[0];
struct pci_dev *pdev = to_pci_dev(dev->dev);
int result, nr_io_queues;
unsigned long size;
@@ -1905,7 +1911,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (nr_io_queues == 0)
return 0;
- if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
+ if (dev->cmb && (dev->cmbsz & NVME_CMBSZ_SQS)) {
result = nvme_cmb_qdepth(dev, nr_io_queues,
sizeof(struct nvme_command));
if (result > 0)
@@ -2005,9 +2011,9 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
return 0;
}
-static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
+static void nvme_disable_io_queues(struct nvme_dev *dev)
{
- int pass;
+ int pass, queues = dev->online_queues - 1;
unsigned long timeout;
u8 opcode = nvme_admin_delete_sq;
@@ -2018,7 +2024,7 @@ static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
retry:
timeout = ADMIN_TIMEOUT;
for (; i > 0; i--, sent++)
- if (nvme_delete_queue(dev->queues[i], opcode))
+ if (nvme_delete_queue(&dev->queues[i], opcode))
break;
while (sent--) {
@@ -2033,13 +2039,12 @@ static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
}
/*
- * Return: error value if an error occurred setting up the queues or calling
- * Identify Device. 0 if these succeeded, even if adding some of the
- * namespaces failed. At the moment, these failures are silent. TBD which
- * failures should be reported.
+ * return error value only when tagset allocation failed
*/
static int nvme_dev_add(struct nvme_dev *dev)
{
+ int ret;
+
if (!dev->ctrl.tagset) {
dev->tagset.ops = &nvme_mq_ops;
dev->tagset.nr_hw_queues = dev->online_queues - 1;
@@ -2055,8 +2060,12 @@ static int nvme_dev_add(struct nvme_dev *dev)
dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
dev->tagset.driver_data = dev;
- if (blk_mq_alloc_tag_set(&dev->tagset))
- return 0;
+ ret = blk_mq_alloc_tag_set(&dev->tagset);
+ if (ret) {
+ dev_warn(dev->ctrl.device,
+ "IO queues tagset allocation failed %d\n", ret);
+ return ret;
+ }
dev->ctrl.tagset = &dev->tagset;
nvme_dbbuf_set(dev);
@@ -2122,22 +2131,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
"set queue depth=%u\n", dev->q_depth);
}
- /*
- * CMBs can currently only exist on >=1.2 PCIe devices. We only
- * populate sysfs if a CMB is implemented. Since nvme_dev_attrs_group
- * has no name we can pass NULL as final argument to
- * sysfs_add_file_to_group.
- */
-
- if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {
- dev->cmb = nvme_map_cmb(dev);
- if (dev->cmb) {
- if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
- &dev_attr_cmb.attr, NULL))
- dev_warn(dev->ctrl.device,
- "failed to add sysfs attribute for CMB\n");
- }
- }
+ nvme_map_cmb(dev);
pci_enable_pcie_error_reporting(pdev);
pci_save_state(pdev);
@@ -2170,7 +2164,7 @@ static void nvme_pci_disable(struct nvme_dev *dev)
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
{
- int i, queues;
+ int i;
bool dead = true;
struct pci_dev *pdev = to_pci_dev(dev->dev);
@@ -2205,21 +2199,13 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
}
nvme_stop_queues(&dev->ctrl);
- queues = dev->online_queues - 1;
- for (i = dev->ctrl.queue_count - 1; i > 0; i--)
- nvme_suspend_queue(dev->queues[i]);
-
- if (dead) {
- /* A device might become IO incapable very soon during
- * probe, before the admin queue is configured. Thus,
- * queue_count can be 0 here.
- */
- if (dev->ctrl.queue_count)
- nvme_suspend_queue(dev->queues[0]);
- } else {
- nvme_disable_io_queues(dev, queues);
+ if (!dead) {
+ nvme_disable_io_queues(dev);
nvme_disable_admin_queue(dev, shutdown);
}
+ for (i = dev->ctrl.queue_count - 1; i >= 0; i--)
+ nvme_suspend_queue(&dev->queues[i]);
+
nvme_pci_disable(dev);
blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
@@ -2289,6 +2275,7 @@ static void nvme_reset_work(struct work_struct *work)
container_of(work, struct nvme_dev, ctrl.reset_work);
bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
int result = -ENODEV;
+ enum nvme_ctrl_state new_state = NVME_CTRL_LIVE;
if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING))
goto out;
@@ -2300,6 +2287,16 @@ static void nvme_reset_work(struct work_struct *work)
if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
nvme_dev_disable(dev, false);
+ /*
+ * Introduce RECONNECTING state from nvme-fc/rdma transports to mark the
+ * initializing procedure here.
+ */
+ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RECONNECTING)) {
+ dev_warn(dev->ctrl.device,
+ "failed to mark controller RECONNECTING\n");
+ goto out;
+ }
+
result = nvme_pci_enable(dev);
if (result)
goto out;
@@ -2352,15 +2349,23 @@ static void nvme_reset_work(struct work_struct *work)
dev_warn(dev->ctrl.device, "IO queues not created\n");
nvme_kill_queues(&dev->ctrl);
nvme_remove_namespaces(&dev->ctrl);
+ new_state = NVME_CTRL_ADMIN_ONLY;
} else {
nvme_start_queues(&dev->ctrl);
nvme_wait_freeze(&dev->ctrl);
- nvme_dev_add(dev);
+ /* hit this only when allocate tagset fails */
+ if (nvme_dev_add(dev))
+ new_state = NVME_CTRL_ADMIN_ONLY;
nvme_unfreeze(&dev->ctrl);
}
- if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
- dev_warn(dev->ctrl.device, "failed to mark controller live\n");
+ /*
+ * If only admin queue live, keep it to do further investigation or
+ * recovery.
+ */
+ if (!nvme_change_ctrl_state(&dev->ctrl, new_state)) {
+ dev_warn(dev->ctrl.device,
+ "failed to mark controller state %d\n", new_state);
goto out;
}
@@ -2468,8 +2473,9 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
if (!dev)
return -ENOMEM;
- dev->queues = kzalloc_node((num_possible_cpus() + 1) * sizeof(void *),
- GFP_KERNEL, node);
+
+ dev->queues = kcalloc_node(num_possible_cpus() + 1,
+ sizeof(struct nvme_queue), GFP_KERNEL, node);
if (!dev->queues)
goto free;
@@ -2496,10 +2502,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result)
goto release_pools;
- nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING);
dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
- queue_work(nvme_wq, &dev->ctrl.reset_work);
+ nvme_reset_ctrl(&dev->ctrl);
+
return 0;
release_pools:
@@ -2523,7 +2529,7 @@ static void nvme_reset_prepare(struct pci_dev *pdev)
static void nvme_reset_done(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
- nvme_reset_ctrl(&dev->ctrl);
+ nvme_reset_ctrl_sync(&dev->ctrl);
}
static void nvme_shutdown(struct pci_dev *pdev)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 2a0bba7f50cf..2bc059f7d73c 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -66,7 +66,6 @@ struct nvme_rdma_request {
struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
u32 num_sge;
int nents;
- bool inline_data;
struct ib_reg_wr reg_wr;
struct ib_cqe reg_cqe;
struct nvme_rdma_queue *queue;
@@ -1092,7 +1091,6 @@ static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
sg->length = cpu_to_le32(sg_dma_len(req->sg_table.sgl));
sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
- req->inline_data = true;
req->num_sge++;
return 0;
}
@@ -1164,7 +1162,6 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
int count, ret;
req->num_sge = 1;
- req->inline_data = false;
refcount_set(&req->ref, 2); /* send and recv completions */
c->common.flags |= NVME_CMD_SGL_METABUF;
@@ -2018,6 +2015,7 @@ out_free_ctrl:
static struct nvmf_transport_ops nvme_rdma_transport = {
.name = "rdma",
+ .module = THIS_MODULE,
.required_opts = NVMF_OPT_TRADDR,
.allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO,
@@ -2040,7 +2038,7 @@ static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
}
mutex_unlock(&nvme_rdma_ctrl_mutex);
- flush_workqueue(nvme_wq);
+ flush_workqueue(nvme_delete_wq);
}
static struct ib_client nvme_rdma_ib_client = {
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c
new file mode 100644
index 000000000000..41944bbef835
--- /dev/null
+++ b/drivers/nvme/host/trace.c
@@ -0,0 +1,130 @@
+/*
+ * NVM Express device driver tracepoints
+ * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <asm/unaligned.h>
+#include "trace.h"
+
+static const char *nvme_trace_create_sq(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u16 sqid = get_unaligned_le16(cdw10);
+ u16 qsize = get_unaligned_le16(cdw10 + 2);
+ u16 sq_flags = get_unaligned_le16(cdw10 + 4);
+ u16 cqid = get_unaligned_le16(cdw10 + 6);
+
+
+ trace_seq_printf(p, "sqid=%u, qsize=%u, sq_flags=0x%x, cqid=%u",
+ sqid, qsize, sq_flags, cqid);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_create_cq(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u16 cqid = get_unaligned_le16(cdw10);
+ u16 qsize = get_unaligned_le16(cdw10 + 2);
+ u16 cq_flags = get_unaligned_le16(cdw10 + 4);
+ u16 irq_vector = get_unaligned_le16(cdw10 + 6);
+
+ trace_seq_printf(p, "cqid=%u, qsize=%u, cq_flags=0x%x, irq_vector=%u",
+ cqid, qsize, cq_flags, irq_vector);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_admin_identify(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 cns = cdw10[0];
+ u16 ctrlid = get_unaligned_le16(cdw10 + 2);
+
+ trace_seq_printf(p, "cns=%u, ctrlid=%u", cns, ctrlid);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+
+
+static const char *nvme_trace_read_write(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u64 slba = get_unaligned_le64(cdw10);
+ u16 length = get_unaligned_le16(cdw10 + 8);
+ u16 control = get_unaligned_le16(cdw10 + 10);
+ u32 dsmgmt = get_unaligned_le32(cdw10 + 12);
+ u32 reftag = get_unaligned_le32(cdw10 + 16);
+
+ trace_seq_printf(p,
+ "slba=%llu, len=%u, ctrl=0x%x, dsmgmt=%u, reftag=%u",
+ slba, length, control, dsmgmt, reftag);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_dsm(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "nr=%u, attributes=%u",
+ get_unaligned_le32(cdw10),
+ get_unaligned_le32(cdw10 + 4));
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_common(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "cdw10=%*ph", 24, cdw10);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+const char *nvme_trace_parse_admin_cmd(struct trace_seq *p,
+ u8 opcode, u8 *cdw10)
+{
+ switch (opcode) {
+ case nvme_admin_create_sq:
+ return nvme_trace_create_sq(p, cdw10);
+ case nvme_admin_create_cq:
+ return nvme_trace_create_cq(p, cdw10);
+ case nvme_admin_identify:
+ return nvme_trace_admin_identify(p, cdw10);
+ default:
+ return nvme_trace_common(p, cdw10);
+ }
+}
+
+const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p,
+ u8 opcode, u8 *cdw10)
+{
+ switch (opcode) {
+ case nvme_cmd_read:
+ case nvme_cmd_write:
+ case nvme_cmd_write_zeroes:
+ return nvme_trace_read_write(p, cdw10);
+ case nvme_cmd_dsm:
+ return nvme_trace_dsm(p, cdw10);
+ default:
+ return nvme_trace_common(p, cdw10);
+ }
+}
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
new file mode 100644
index 000000000000..ea91fccd1bc0
--- /dev/null
+++ b/drivers/nvme/host/trace.h
@@ -0,0 +1,165 @@
+/*
+ * NVM Express device driver tracepoints
+ * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM nvme
+
+#if !defined(_TRACE_NVME_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NVME_H
+
+#include <linux/nvme.h>
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "nvme.h"
+
+#define nvme_admin_opcode_name(opcode) { opcode, #opcode }
+#define show_admin_opcode_name(val) \
+ __print_symbolic(val, \
+ nvme_admin_opcode_name(nvme_admin_delete_sq), \
+ nvme_admin_opcode_name(nvme_admin_create_sq), \
+ nvme_admin_opcode_name(nvme_admin_get_log_page), \
+ nvme_admin_opcode_name(nvme_admin_delete_cq), \
+ nvme_admin_opcode_name(nvme_admin_create_cq), \
+ nvme_admin_opcode_name(nvme_admin_identify), \
+ nvme_admin_opcode_name(nvme_admin_abort_cmd), \
+ nvme_admin_opcode_name(nvme_admin_set_features), \
+ nvme_admin_opcode_name(nvme_admin_get_features), \
+ nvme_admin_opcode_name(nvme_admin_async_event), \
+ nvme_admin_opcode_name(nvme_admin_ns_mgmt), \
+ nvme_admin_opcode_name(nvme_admin_activate_fw), \
+ nvme_admin_opcode_name(nvme_admin_download_fw), \
+ nvme_admin_opcode_name(nvme_admin_ns_attach), \
+ nvme_admin_opcode_name(nvme_admin_keep_alive), \
+ nvme_admin_opcode_name(nvme_admin_directive_send), \
+ nvme_admin_opcode_name(nvme_admin_directive_recv), \
+ nvme_admin_opcode_name(nvme_admin_dbbuf), \
+ nvme_admin_opcode_name(nvme_admin_format_nvm), \
+ nvme_admin_opcode_name(nvme_admin_security_send), \
+ nvme_admin_opcode_name(nvme_admin_security_recv), \
+ nvme_admin_opcode_name(nvme_admin_sanitize_nvm))
+
+const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, u8 opcode,
+ u8 *cdw10);
+#define __parse_nvme_admin_cmd(opcode, cdw10) \
+ nvme_trace_parse_admin_cmd(p, opcode, cdw10)
+
+#define nvme_opcode_name(opcode) { opcode, #opcode }
+#define show_opcode_name(val) \
+ __print_symbolic(val, \
+ nvme_opcode_name(nvme_cmd_flush), \
+ nvme_opcode_name(nvme_cmd_write), \
+ nvme_opcode_name(nvme_cmd_read), \
+ nvme_opcode_name(nvme_cmd_write_uncor), \
+ nvme_opcode_name(nvme_cmd_compare), \
+ nvme_opcode_name(nvme_cmd_write_zeroes), \
+ nvme_opcode_name(nvme_cmd_dsm), \
+ nvme_opcode_name(nvme_cmd_resv_register), \
+ nvme_opcode_name(nvme_cmd_resv_report), \
+ nvme_opcode_name(nvme_cmd_resv_acquire), \
+ nvme_opcode_name(nvme_cmd_resv_release))
+
+const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode,
+ u8 *cdw10);
+#define __parse_nvme_cmd(opcode, cdw10) \
+ nvme_trace_parse_nvm_cmd(p, opcode, cdw10)
+
+TRACE_EVENT(nvme_setup_admin_cmd,
+ TP_PROTO(struct nvme_command *cmd),
+ TP_ARGS(cmd),
+ TP_STRUCT__entry(
+ __field(u8, opcode)
+ __field(u8, flags)
+ __field(u16, cid)
+ __field(u64, metadata)
+ __array(u8, cdw10, 24)
+ ),
+ TP_fast_assign(
+ __entry->opcode = cmd->common.opcode;
+ __entry->flags = cmd->common.flags;
+ __entry->cid = cmd->common.command_id;
+ __entry->metadata = le64_to_cpu(cmd->common.metadata);
+ memcpy(__entry->cdw10, cmd->common.cdw10,
+ sizeof(__entry->cdw10));
+ ),
+ TP_printk(" cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
+ __entry->cid, __entry->flags, __entry->metadata,
+ show_admin_opcode_name(__entry->opcode),
+ __parse_nvme_admin_cmd(__entry->opcode, __entry->cdw10))
+);
+
+
+TRACE_EVENT(nvme_setup_nvm_cmd,
+ TP_PROTO(int qid, struct nvme_command *cmd),
+ TP_ARGS(qid, cmd),
+ TP_STRUCT__entry(
+ __field(int, qid)
+ __field(u8, opcode)
+ __field(u8, flags)
+ __field(u16, cid)
+ __field(u32, nsid)
+ __field(u64, metadata)
+ __array(u8, cdw10, 24)
+ ),
+ TP_fast_assign(
+ __entry->qid = qid;
+ __entry->opcode = cmd->common.opcode;
+ __entry->flags = cmd->common.flags;
+ __entry->cid = cmd->common.command_id;
+ __entry->nsid = le32_to_cpu(cmd->common.nsid);
+ __entry->metadata = le64_to_cpu(cmd->common.metadata);
+ memcpy(__entry->cdw10, cmd->common.cdw10,
+ sizeof(__entry->cdw10));
+ ),
+ TP_printk("qid=%d, nsid=%u, cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
+ __entry->qid, __entry->nsid, __entry->cid,
+ __entry->flags, __entry->metadata,
+ show_opcode_name(__entry->opcode),
+ __parse_nvme_cmd(__entry->opcode, __entry->cdw10))
+);
+
+TRACE_EVENT(nvme_complete_rq,
+ TP_PROTO(struct request *req),
+ TP_ARGS(req),
+ TP_STRUCT__entry(
+ __field(int, qid)
+ __field(int, cid)
+ __field(u64, result)
+ __field(u8, retries)
+ __field(u8, flags)
+ __field(u16, status)
+ ),
+ TP_fast_assign(
+ __entry->qid = req->q->id;
+ __entry->cid = req->tag;
+ __entry->result = le64_to_cpu(nvme_req(req)->result.u64);
+ __entry->retries = nvme_req(req)->retries;
+ __entry->flags = nvme_req(req)->flags;
+ __entry->status = nvme_req(req)->status;
+ ),
+ TP_printk("cmdid=%u, qid=%d, res=%llu, retries=%u, flags=0x%x, status=%u",
+ __entry->cid, __entry->qid, __entry->result,
+ __entry->retries, __entry->flags, __entry->status)
+
+);
+
+#endif /* _TRACE_NVME_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index 03e4ab65fe77..5f4f8b16685f 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -29,6 +29,7 @@ config NVME_TARGET_RDMA
tristate "NVMe over Fabrics RDMA target support"
depends on INFINIBAND
depends on NVME_TARGET
+ select SGL_ALLOC
help
This enables the NVMe RDMA target support, which allows exporting NVMe
devices over RDMA.
@@ -39,6 +40,7 @@ config NVME_TARGET_FC
tristate "NVMe over Fabrics FC target driver"
depends on NVME_TARGET
depends on HAS_DMA
+ select SGL_ALLOC
help
This enables the NVMe FC target support, which allows exporting NVMe
devices over FC.
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b54748ad5f48..0bd737117a80 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -512,6 +512,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req->sg_cnt = 0;
req->transfer_len = 0;
req->rsp->status = 0;
+ req->ns = NULL;
/* no support for fused commands yet */
if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
@@ -557,6 +558,8 @@ EXPORT_SYMBOL_GPL(nvmet_req_init);
void nvmet_req_uninit(struct nvmet_req *req)
{
percpu_ref_put(&req->sq->ref);
+ if (req->ns)
+ nvmet_put_namespace(req->ns);
}
EXPORT_SYMBOL_GPL(nvmet_req_uninit);
@@ -830,7 +833,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
/* Don't accept keep-alive timeout for discovery controllers */
if (kato) {
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
- goto out_free_sqs;
+ goto out_remove_ida;
}
/*
@@ -860,6 +863,8 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
*ctrlp = ctrl;
return 0;
+out_remove_ida:
+ ida_simple_remove(&cntlid_ida, ctrl->cntlid);
out_free_sqs:
kfree(ctrl->sqs);
out_free_cqs:
@@ -877,21 +882,22 @@ static void nvmet_ctrl_free(struct kref *ref)
struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
struct nvmet_subsys *subsys = ctrl->subsys;
- nvmet_stop_keep_alive_timer(ctrl);
-
mutex_lock(&subsys->lock);
list_del(&ctrl->subsys_entry);
mutex_unlock(&subsys->lock);
+ nvmet_stop_keep_alive_timer(ctrl);
+
flush_work(&ctrl->async_event_work);
cancel_work_sync(&ctrl->fatal_err_work);
ida_simple_remove(&cntlid_ida, ctrl->cntlid);
- nvmet_subsys_put(subsys);
kfree(ctrl->sqs);
kfree(ctrl->cqs);
kfree(ctrl);
+
+ nvmet_subsys_put(subsys);
}
void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index db3bf6b8bf9e..19e9e42ae943 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -225,7 +225,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
goto out_ctrl_put;
}
- pr_info("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
+ pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
out:
kfree(d);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 5fd86039e353..9b39a6cb1935 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1697,31 +1697,12 @@ static int
nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
{
struct scatterlist *sg;
- struct page *page;
unsigned int nent;
- u32 page_len, length;
- int i = 0;
- length = fod->req.transfer_len;
- nent = DIV_ROUND_UP(length, PAGE_SIZE);
- sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
+ sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
if (!sg)
goto out;
- sg_init_table(sg, nent);
-
- while (length) {
- page_len = min_t(u32, length, PAGE_SIZE);
-
- page = alloc_page(GFP_KERNEL);
- if (!page)
- goto out_free_pages;
-
- sg_set_page(&sg[i], page, page_len, 0);
- length -= page_len;
- i++;
- }
-
fod->data_sg = sg;
fod->data_sg_cnt = nent;
fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
@@ -1731,14 +1712,6 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
return 0;
-out_free_pages:
- while (i > 0) {
- i--;
- __free_page(sg_page(&sg[i]));
- }
- kfree(sg);
- fod->data_sg = NULL;
- fod->data_sg_cnt = 0;
out:
return NVME_SC_INTERNAL;
}
@@ -1746,18 +1719,13 @@ out:
static void
nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
{
- struct scatterlist *sg;
- int count;
-
if (!fod->data_sg || !fod->data_sg_cnt)
return;
fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
((fod->io_dir == NVMET_FCP_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE));
- for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count)
- __free_page(sg_page(sg));
- kfree(fod->data_sg);
+ sgl_free(fod->data_sg);
fod->data_sg = NULL;
fod->data_sg_cnt = 0;
}
@@ -2522,14 +2490,8 @@ nvmet_fc_add_port(struct nvmet_port *port)
list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
if ((tgtport->fc_target_port.node_name == traddr.nn) &&
(tgtport->fc_target_port.port_name == traddr.pn)) {
- /* a FC port can only be 1 nvmet port id */
- if (!tgtport->port) {
- tgtport->port = port;
- port->priv = tgtport;
- nvmet_fc_tgtport_get(tgtport);
- ret = 0;
- } else
- ret = -EALREADY;
+ tgtport->port = port;
+ ret = 0;
break;
}
}
@@ -2540,19 +2502,7 @@ nvmet_fc_add_port(struct nvmet_port *port)
static void
nvmet_fc_remove_port(struct nvmet_port *port)
{
- struct nvmet_fc_tgtport *tgtport = port->priv;
- unsigned long flags;
- bool matched = false;
-
- spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
- if (tgtport->port == port) {
- matched = true;
- tgtport->port = NULL;
- }
- spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
-
- if (matched)
- nvmet_fc_tgtport_put(tgtport);
+ /* nothing to do */
}
static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 6a018a0bd6ce..34712def81b1 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -204,6 +204,10 @@ struct fcloop_lport {
struct completion unreg_done;
};
+struct fcloop_lport_priv {
+ struct fcloop_lport *lport;
+};
+
struct fcloop_rport {
struct nvme_fc_remote_port *remoteport;
struct nvmet_fc_target_port *targetport;
@@ -238,21 +242,32 @@ struct fcloop_lsreq {
int status;
};
+enum {
+ INI_IO_START = 0,
+ INI_IO_ACTIVE = 1,
+ INI_IO_ABORTED = 2,
+ INI_IO_COMPLETED = 3,
+};
+
struct fcloop_fcpreq {
struct fcloop_tport *tport;
struct nvmefc_fcp_req *fcpreq;
spinlock_t reqlock;
u16 status;
+ u32 inistate;
bool active;
bool aborted;
- struct work_struct work;
+ struct kref ref;
+ struct work_struct fcp_rcv_work;
+ struct work_struct abort_rcv_work;
+ struct work_struct tio_done_work;
struct nvmefc_tgt_fcp_req tgt_fcp_req;
};
struct fcloop_ini_fcpreq {
struct nvmefc_fcp_req *fcpreq;
struct fcloop_fcpreq *tfcp_req;
- struct work_struct iniwork;
+ spinlock_t inilock;
};
static inline struct fcloop_lsreq *
@@ -343,17 +358,122 @@ fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
return 0;
}
-/*
- * FCP IO operation done by initiator abort.
- * call back up initiator "done" flows.
- */
static void
-fcloop_tgt_fcprqst_ini_done_work(struct work_struct *work)
+fcloop_tfcp_req_free(struct kref *ref)
{
- struct fcloop_ini_fcpreq *inireq =
- container_of(work, struct fcloop_ini_fcpreq, iniwork);
+ struct fcloop_fcpreq *tfcp_req =
+ container_of(ref, struct fcloop_fcpreq, ref);
+
+ kfree(tfcp_req);
+}
+
+static void
+fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
+{
+ kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
+}
+
+static int
+fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
+{
+ return kref_get_unless_zero(&tfcp_req->ref);
+}
+
+static void
+fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
+ struct fcloop_fcpreq *tfcp_req, int status)
+{
+ struct fcloop_ini_fcpreq *inireq = NULL;
+
+ if (fcpreq) {
+ inireq = fcpreq->private;
+ spin_lock(&inireq->inilock);
+ inireq->tfcp_req = NULL;
+ spin_unlock(&inireq->inilock);
+
+ fcpreq->status = status;
+ fcpreq->done(fcpreq);
+ }
+
+ /* release original io reference on tgt struct */
+ fcloop_tfcp_req_put(tfcp_req);
+}
+
+static void
+fcloop_fcp_recv_work(struct work_struct *work)
+{
+ struct fcloop_fcpreq *tfcp_req =
+ container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
+ struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
+ int ret = 0;
+ bool aborted = false;
+
+ spin_lock(&tfcp_req->reqlock);
+ switch (tfcp_req->inistate) {
+ case INI_IO_START:
+ tfcp_req->inistate = INI_IO_ACTIVE;
+ break;
+ case INI_IO_ABORTED:
+ aborted = true;
+ break;
+ default:
+ spin_unlock(&tfcp_req->reqlock);
+ WARN_ON(1);
+ return;
+ }
+ spin_unlock(&tfcp_req->reqlock);
+
+ if (unlikely(aborted))
+ ret = -ECANCELED;
+ else
+ ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
+ &tfcp_req->tgt_fcp_req,
+ fcpreq->cmdaddr, fcpreq->cmdlen);
+ if (ret)
+ fcloop_call_host_done(fcpreq, tfcp_req, ret);
+
+ return;
+}
+
+static void
+fcloop_fcp_abort_recv_work(struct work_struct *work)
+{
+ struct fcloop_fcpreq *tfcp_req =
+ container_of(work, struct fcloop_fcpreq, abort_rcv_work);
+ struct nvmefc_fcp_req *fcpreq;
+ bool completed = false;
+
+ spin_lock(&tfcp_req->reqlock);
+ fcpreq = tfcp_req->fcpreq;
+ switch (tfcp_req->inistate) {
+ case INI_IO_ABORTED:
+ break;
+ case INI_IO_COMPLETED:
+ completed = true;
+ break;
+ default:
+ spin_unlock(&tfcp_req->reqlock);
+ WARN_ON(1);
+ return;
+ }
+ spin_unlock(&tfcp_req->reqlock);
+
+ if (unlikely(completed)) {
+ /* remove reference taken in original abort downcall */
+ fcloop_tfcp_req_put(tfcp_req);
+ return;
+ }
- inireq->fcpreq->done(inireq->fcpreq);
+ if (tfcp_req->tport->targetport)
+ nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
+ &tfcp_req->tgt_fcp_req);
+
+ spin_lock(&tfcp_req->reqlock);
+ tfcp_req->fcpreq = NULL;
+ spin_unlock(&tfcp_req->reqlock);
+
+ fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
+ /* call_host_done releases reference for abort downcall */
}
/*
@@ -364,20 +484,15 @@ static void
fcloop_tgt_fcprqst_done_work(struct work_struct *work)
{
struct fcloop_fcpreq *tfcp_req =
- container_of(work, struct fcloop_fcpreq, work);
- struct fcloop_tport *tport = tfcp_req->tport;
+ container_of(work, struct fcloop_fcpreq, tio_done_work);
struct nvmefc_fcp_req *fcpreq;
spin_lock(&tfcp_req->reqlock);
fcpreq = tfcp_req->fcpreq;
+ tfcp_req->inistate = INI_IO_COMPLETED;
spin_unlock(&tfcp_req->reqlock);
- if (tport->remoteport && fcpreq) {
- fcpreq->status = tfcp_req->status;
- fcpreq->done(fcpreq);
- }
-
- kfree(tfcp_req);
+ fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
}
@@ -390,7 +505,6 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
struct fcloop_rport *rport = remoteport->private;
struct fcloop_ini_fcpreq *inireq = fcpreq->private;
struct fcloop_fcpreq *tfcp_req;
- int ret = 0;
if (!rport->targetport)
return -ECONNREFUSED;
@@ -401,16 +515,20 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
inireq->fcpreq = fcpreq;
inireq->tfcp_req = tfcp_req;
- INIT_WORK(&inireq->iniwork, fcloop_tgt_fcprqst_ini_done_work);
+ spin_lock_init(&inireq->inilock);
+
tfcp_req->fcpreq = fcpreq;
tfcp_req->tport = rport->targetport->private;
+ tfcp_req->inistate = INI_IO_START;
spin_lock_init(&tfcp_req->reqlock);
- INIT_WORK(&tfcp_req->work, fcloop_tgt_fcprqst_done_work);
+ INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
+ INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
+ INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
+ kref_init(&tfcp_req->ref);
- ret = nvmet_fc_rcv_fcp_req(rport->targetport, &tfcp_req->tgt_fcp_req,
- fcpreq->cmdaddr, fcpreq->cmdlen);
+ schedule_work(&tfcp_req->fcp_rcv_work);
- return ret;
+ return 0;
}
static void
@@ -589,7 +707,7 @@ fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
{
struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
- schedule_work(&tfcp_req->work);
+ schedule_work(&tfcp_req->tio_done_work);
}
static void
@@ -605,27 +723,47 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
void *hw_queue_handle,
struct nvmefc_fcp_req *fcpreq)
{
- struct fcloop_rport *rport = remoteport->private;
struct fcloop_ini_fcpreq *inireq = fcpreq->private;
- struct fcloop_fcpreq *tfcp_req = inireq->tfcp_req;
+ struct fcloop_fcpreq *tfcp_req;
+ bool abortio = true;
+
+ spin_lock(&inireq->inilock);
+ tfcp_req = inireq->tfcp_req;
+ if (tfcp_req)
+ fcloop_tfcp_req_get(tfcp_req);
+ spin_unlock(&inireq->inilock);
if (!tfcp_req)
/* abort has already been called */
return;
- if (rport->targetport)
- nvmet_fc_rcv_fcp_abort(rport->targetport,
- &tfcp_req->tgt_fcp_req);
-
/* break initiator/target relationship for io */
spin_lock(&tfcp_req->reqlock);
- inireq->tfcp_req = NULL;
- tfcp_req->fcpreq = NULL;
+ switch (tfcp_req->inistate) {
+ case INI_IO_START:
+ case INI_IO_ACTIVE:
+ tfcp_req->inistate = INI_IO_ABORTED;
+ break;
+ case INI_IO_COMPLETED:
+ abortio = false;
+ break;
+ default:
+ spin_unlock(&tfcp_req->reqlock);
+ WARN_ON(1);
+ return;
+ }
spin_unlock(&tfcp_req->reqlock);
- /* post the aborted io completion */
- fcpreq->status = -ECANCELED;
- schedule_work(&inireq->iniwork);
+ if (abortio)
+ /* leave the reference while the work item is scheduled */
+ WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
+ else {
+ /*
+ * as the io has already had the done callback made,
+ * nothing more to do. So release the reference taken above
+ */
+ fcloop_tfcp_req_put(tfcp_req);
+ }
}
static void
@@ -657,7 +795,8 @@ fcloop_nport_get(struct fcloop_nport *nport)
static void
fcloop_localport_delete(struct nvme_fc_local_port *localport)
{
- struct fcloop_lport *lport = localport->private;
+ struct fcloop_lport_priv *lport_priv = localport->private;
+ struct fcloop_lport *lport = lport_priv->lport;
/* release any threads waiting for the unreg to complete */
complete(&lport->unreg_done);
@@ -697,7 +836,7 @@ static struct nvme_fc_port_template fctemplate = {
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
.dma_boundary = FCLOOP_DMABOUND_4G,
/* sizes of additional private data for data structures */
- .local_priv_sz = sizeof(struct fcloop_lport),
+ .local_priv_sz = sizeof(struct fcloop_lport_priv),
.remote_priv_sz = sizeof(struct fcloop_rport),
.lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
.fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
@@ -714,8 +853,7 @@ static struct nvmet_fc_target_template tgttemplate = {
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
.dma_boundary = FCLOOP_DMABOUND_4G,
/* optional features */
- .target_features = NVMET_FCTGTFEAT_CMD_IN_ISR |
- NVMET_FCTGTFEAT_OPDONE_IN_ISR,
+ .target_features = 0,
/* sizes of additional private data for data structures */
.target_priv_sz = sizeof(struct fcloop_tport),
};
@@ -728,11 +866,17 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
struct fcloop_ctrl_options *opts;
struct nvme_fc_local_port *localport;
struct fcloop_lport *lport;
- int ret;
+ struct fcloop_lport_priv *lport_priv;
+ unsigned long flags;
+ int ret = -ENOMEM;
+
+ lport = kzalloc(sizeof(*lport), GFP_KERNEL);
+ if (!lport)
+ return -ENOMEM;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
- return -ENOMEM;
+ goto out_free_lport;
ret = fcloop_parse_options(opts, buf);
if (ret)
@@ -752,23 +896,25 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
if (!ret) {
- unsigned long flags;
-
/* success */
- lport = localport->private;
+ lport_priv = localport->private;
+ lport_priv->lport = lport;
+
lport->localport = localport;
INIT_LIST_HEAD(&lport->lport_list);
spin_lock_irqsave(&fcloop_lock, flags);
list_add_tail(&lport->lport_list, &fcloop_lports);
spin_unlock_irqrestore(&fcloop_lock, flags);
-
- /* mark all of the input buffer consumed */
- ret = count;
}
out_free_opts:
kfree(opts);
+out_free_lport:
+ /* free only if we're going to fail */
+ if (ret)
+ kfree(lport);
+
return ret ? ret : count;
}
@@ -790,6 +936,8 @@ __wait_localport_unreg(struct fcloop_lport *lport)
wait_for_completion(&lport->unreg_done);
+ kfree(lport);
+
return ret;
}
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 1e21b286f299..7991ec3a17db 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -686,6 +686,7 @@ static struct nvmet_fabrics_ops nvme_loop_ops = {
static struct nvmf_transport_ops nvme_loop_transport = {
.name = "loop",
+ .module = THIS_MODULE,
.create_ctrl = nvme_loop_create_ctrl,
};
@@ -716,7 +717,7 @@ static void __exit nvme_loop_cleanup_module(void)
nvme_delete_ctrl(&ctrl->ctrl);
mutex_unlock(&nvme_loop_ctrl_mutex);
- flush_workqueue(nvme_wq);
+ flush_workqueue(nvme_delete_wq);
}
module_init(nvme_loop_init_module);
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 49912909c298..978e169c11bf 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -185,59 +185,6 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
}
-static void nvmet_rdma_free_sgl(struct scatterlist *sgl, unsigned int nents)
-{
- struct scatterlist *sg;
- int count;
-
- if (!sgl || !nents)
- return;
-
- for_each_sg(sgl, sg, nents, count)
- __free_page(sg_page(sg));
- kfree(sgl);
-}
-
-static int nvmet_rdma_alloc_sgl(struct scatterlist **sgl, unsigned int *nents,
- u32 length)
-{
- struct scatterlist *sg;
- struct page *page;
- unsigned int nent;
- int i = 0;
-
- nent = DIV_ROUND_UP(length, PAGE_SIZE);
- sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
- if (!sg)
- goto out;
-
- sg_init_table(sg, nent);
-
- while (length) {
- u32 page_len = min_t(u32, length, PAGE_SIZE);
-
- page = alloc_page(GFP_KERNEL);
- if (!page)
- goto out_free_pages;
-
- sg_set_page(&sg[i], page, page_len, 0);
- length -= page_len;
- i++;
- }
- *sgl = sg;
- *nents = nent;
- return 0;
-
-out_free_pages:
- while (i > 0) {
- i--;
- __free_page(sg_page(&sg[i]));
- }
- kfree(sg);
-out:
- return NVME_SC_INTERNAL;
-}
-
static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
struct nvmet_rdma_cmd *c, bool admin)
{
@@ -484,7 +431,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
}
if (rsp->req.sg != &rsp->cmd->inline_sg)
- nvmet_rdma_free_sgl(rsp->req.sg, rsp->req.sg_cnt);
+ sgl_free(rsp->req.sg);
if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
nvmet_rdma_process_wr_wait_list(queue);
@@ -621,16 +568,14 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
u32 len = get_unaligned_le24(sgl->length);
u32 key = get_unaligned_le32(sgl->key);
int ret;
- u16 status;
/* no data command? */
if (!len)
return 0;
- status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt,
- len);
- if (status)
- return status;
+ rsp->req.sg = sgl_alloc(len, GFP_KERNEL, &rsp->req.sg_cnt);
+ if (!rsp->req.sg)
+ return NVME_SC_INTERNAL;
ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
@@ -976,7 +921,7 @@ static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
{
- pr_info("freeing queue %d\n", queue->idx);
+ pr_debug("freeing queue %d\n", queue->idx);
nvmet_sq_destroy(&queue->nvme_sq);
@@ -1558,25 +1503,9 @@ err_ib_client:
static void __exit nvmet_rdma_exit(void)
{
- struct nvmet_rdma_queue *queue;
-
nvmet_unregister_transport(&nvmet_rdma_ops);
-
- flush_scheduled_work();
-
- mutex_lock(&nvmet_rdma_queue_mutex);
- while ((queue = list_first_entry_or_null(&nvmet_rdma_queue_list,
- struct nvmet_rdma_queue, queue_list))) {
- list_del_init(&queue->queue_list);
-
- mutex_unlock(&nvmet_rdma_queue_mutex);
- __nvmet_rdma_queue_disconnect(queue);
- mutex_lock(&nvmet_rdma_queue_mutex);
- }
- mutex_unlock(&nvmet_rdma_queue_mutex);
-
- flush_scheduled_work();
ib_unregister_client(&nvmet_rdma_ib_client);
+ WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
ida_destroy(&nvmet_rdma_queue_ida);
}
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 26618ba8f92a..a9d6fe86585b 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -316,6 +316,32 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
EXPORT_SYMBOL(of_get_cpu_node);
/**
+ * of_cpu_node_to_id: Get the logical CPU number for a given device_node
+ *
+ * @cpu_node: Pointer to the device_node for CPU.
+ *
+ * Returns the logical CPU number of the given CPU device_node.
+ * Returns -ENODEV if the CPU is not found.
+ */
+int of_cpu_node_to_id(struct device_node *cpu_node)
+{
+ int cpu;
+ bool found = false;
+ struct device_node *np;
+
+ for_each_possible_cpu(cpu) {
+ np = of_cpu_device_node_get(cpu);
+ found = (cpu_node == np);
+ of_node_put(np);
+ if (found)
+ return cpu;
+ }
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL(of_cpu_node_to_id);
+
+/**
* __of_device_is_compatible() - Check if the node matches given constraints
* @device: pointer to node
* @compat: required compatible string, NULL or "" for any match
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 8ad33a44a7b8..f25d36358187 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -981,10 +981,18 @@ static int of_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
return 0;
}
+static void *
+of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
+ const struct device *dev)
+{
+ return (void *)of_device_get_match_data(dev);
+}
+
const struct fwnode_operations of_fwnode_ops = {
.get = of_fwnode_get,
.put = of_fwnode_put,
.device_is_available = of_fwnode_device_is_available,
+ .device_get_match_data = of_fwnode_device_get_match_data,
.property_present = of_fwnode_property_present,
.property_read_int_array = of_fwnode_property_read_int_array,
.property_read_string_array = of_fwnode_property_read_string_array,
diff --git a/drivers/opp/Makefile b/drivers/opp/Makefile
index e70ceb406fe9..6ce6aefacc81 100644
--- a/drivers/opp/Makefile
+++ b/drivers/opp/Makefile
@@ -2,3 +2,4 @@ ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
obj-y += core.o cpu.o
obj-$(CONFIG_OF) += of.o
obj-$(CONFIG_DEBUG_FS) += debugfs.o
+obj-$(CONFIG_ARM_TI_CPUFREQ) += ti-opp-supply.o
diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c
new file mode 100644
index 000000000000..370eff3acd8a
--- /dev/null
+++ b/drivers/opp/ti-opp-supply.c
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016-2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Nishanth Menon <nm@ti.com>
+ * Dave Gerlach <d-gerlach@ti.com>
+ *
+ * TI OPP supply driver that provides override into the regulator control
+ * for generic opp core to handle devices with ABB regulator and/or
+ * SmartReflex Class0.
+ */
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+/**
+ * struct ti_opp_supply_optimum_voltage_table - optimized voltage table
+ * @reference_uv: reference voltage (usually Nominal voltage)
+ * @optimized_uv: Optimized voltage from efuse
+ */
+struct ti_opp_supply_optimum_voltage_table {
+ unsigned int reference_uv;
+ unsigned int optimized_uv;
+};
+
+/**
+ * struct ti_opp_supply_data - OMAP specific opp supply data
+ * @vdd_table: Optimized voltage mapping table
+ * @num_vdd_table: number of entries in vdd_table
+ * @vdd_absolute_max_voltage_uv: absolute maximum voltage in UV for the supply
+ */
+struct ti_opp_supply_data {
+ struct ti_opp_supply_optimum_voltage_table *vdd_table;
+ u32 num_vdd_table;
+ u32 vdd_absolute_max_voltage_uv;
+};
+
+static struct ti_opp_supply_data opp_data;
+
+/**
+ * struct ti_opp_supply_of_data - device tree match data
+ * @flags: specific type of opp supply
+ * @efuse_voltage_mask: mask required for efuse register representing voltage
+ * @efuse_voltage_uv: Are the efuse entries in micro-volts? if not, assume
+ * milli-volts.
+ */
+struct ti_opp_supply_of_data {
+#define OPPDM_EFUSE_CLASS0_OPTIMIZED_VOLTAGE BIT(1)
+#define OPPDM_HAS_NO_ABB BIT(2)
+ const u8 flags;
+ const u32 efuse_voltage_mask;
+ const bool efuse_voltage_uv;
+};
+
+/**
+ * _store_optimized_voltages() - store optimized voltages
+ * @dev: ti opp supply device for which we need to store info
+ * @data: data specific to the device
+ *
+ * Picks up efuse based optimized voltages for VDD unique per device and
+ * stores it in internal data structure for use during transition requests.
+ *
+ * Return: If successful, 0, else appropriate error value.
+ */
+static int _store_optimized_voltages(struct device *dev,
+ struct ti_opp_supply_data *data)
+{
+ void __iomem *base;
+ struct property *prop;
+ struct resource *res;
+ const __be32 *val;
+ int proplen, i;
+ int ret = 0;
+ struct ti_opp_supply_optimum_voltage_table *table;
+ const struct ti_opp_supply_of_data *of_data = dev_get_drvdata(dev);
+
+ /* pick up Efuse based voltages */
+ res = platform_get_resource(to_platform_device(dev), IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Unable to get IO resource\n");
+ ret = -ENODEV;
+ goto out_map;
+ }
+
+ base = ioremap_nocache(res->start, resource_size(res));
+ if (!base) {
+ dev_err(dev, "Unable to map Efuse registers\n");
+ ret = -ENOMEM;
+ goto out_map;
+ }
+
+ /* Fetch efuse-settings. */
+ prop = of_find_property(dev->of_node, "ti,efuse-settings", NULL);
+ if (!prop) {
+ dev_err(dev, "No 'ti,efuse-settings' property found\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ proplen = prop->length / sizeof(int);
+ data->num_vdd_table = proplen / 2;
+ /* Verify for corrupted OPP entries in dt */
+ if (data->num_vdd_table * 2 * sizeof(int) != prop->length) {
+ dev_err(dev, "Invalid 'ti,efuse-settings'\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "ti,absolute-max-voltage-uv",
+ &data->vdd_absolute_max_voltage_uv);
+ if (ret) {
+ dev_err(dev, "ti,absolute-max-voltage-uv is missing\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ table = kzalloc(sizeof(*data->vdd_table) *
+ data->num_vdd_table, GFP_KERNEL);
+ if (!table) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ data->vdd_table = table;
+
+ val = prop->value;
+ for (i = 0; i < data->num_vdd_table; i++, table++) {
+ u32 efuse_offset;
+ u32 tmp;
+
+ table->reference_uv = be32_to_cpup(val++);
+ efuse_offset = be32_to_cpup(val++);
+
+ tmp = readl(base + efuse_offset);
+ tmp &= of_data->efuse_voltage_mask;
+ tmp >>= __ffs(of_data->efuse_voltage_mask);
+
+ table->optimized_uv = of_data->efuse_voltage_uv ? tmp :
+ tmp * 1000;
+
+ dev_dbg(dev, "[%d] efuse=0x%08x volt_table=%d vset=%d\n",
+ i, efuse_offset, table->reference_uv,
+ table->optimized_uv);
+
+ /*
+ * Some older samples might not have optimized efuse
+ * Use reference voltage for those - just add debug message
+ * for them.
+ */
+ if (!table->optimized_uv) {
+ dev_dbg(dev, "[%d] efuse=0x%08x volt_table=%d:vset0\n",
+ i, efuse_offset, table->reference_uv);
+ table->optimized_uv = table->reference_uv;
+ }
+ }
+out:
+ iounmap(base);
+out_map:
+ return ret;
+}
+
+/**
+ * _free_optimized_voltages() - free resources for optvoltages
+ * @dev: device for which we need to free info
+ * @data: data specific to the device
+ */
+static void _free_optimized_voltages(struct device *dev,
+ struct ti_opp_supply_data *data)
+{
+ kfree(data->vdd_table);
+ data->vdd_table = NULL;
+ data->num_vdd_table = 0;
+}
+
+/**
+ * _get_optimal_vdd_voltage() - Finds optimal voltage for the supply
+ * @dev: device for which we need to find info
+ * @data: data specific to the device
+ * @reference_uv: reference voltage (OPP voltage) for which we need value
+ *
+ * Return: if a match is found, return optimized voltage, else return
+ * reference_uv, also return reference_uv if no optimization is needed.
+ */
+static int _get_optimal_vdd_voltage(struct device *dev,
+ struct ti_opp_supply_data *data,
+ int reference_uv)
+{
+ int i;
+ struct ti_opp_supply_optimum_voltage_table *table;
+
+ if (!data->num_vdd_table)
+ return reference_uv;
+
+ table = data->vdd_table;
+ if (!table)
+ return -EINVAL;
+
+ /* Find a exact match - this list is usually very small */
+ for (i = 0; i < data->num_vdd_table; i++, table++)
+ if (table->reference_uv == reference_uv)
+ return table->optimized_uv;
+
+ /* IF things are screwed up, we'd make a mess on console.. ratelimit */
+ dev_err_ratelimited(dev, "%s: Failed optimized voltage match for %d\n",
+ __func__, reference_uv);
+ return reference_uv;
+}
+
+static int _opp_set_voltage(struct device *dev,
+ struct dev_pm_opp_supply *supply,
+ int new_target_uv, struct regulator *reg,
+ char *reg_name)
+{
+ int ret;
+ unsigned long vdd_uv, uv_max;
+
+ if (new_target_uv)
+ vdd_uv = new_target_uv;
+ else
+ vdd_uv = supply->u_volt;
+
+ /*
+ * If we do have an absolute max voltage specified, then we should
+ * use that voltage instead to allow for cases where the voltage rails
+ * are ganged (example if we set the max for an opp as 1.12v, and
+ * the absolute max is 1.5v, for another rail to get 1.25v, it cannot
+ * be achieved if the regulator is constrainted to max of 1.12v, even
+ * if it can function at 1.25v
+ */
+ if (opp_data.vdd_absolute_max_voltage_uv)
+ uv_max = opp_data.vdd_absolute_max_voltage_uv;
+ else
+ uv_max = supply->u_volt_max;
+
+ if (vdd_uv > uv_max ||
+ vdd_uv < supply->u_volt_min ||
+ supply->u_volt_min > uv_max) {
+ dev_warn(dev,
+ "Invalid range voltages [Min:%lu target:%lu Max:%lu]\n",
+ supply->u_volt_min, vdd_uv, uv_max);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "%s scaling to %luuV[min %luuV max %luuV]\n", reg_name,
+ vdd_uv, supply->u_volt_min,
+ uv_max);
+
+ ret = regulator_set_voltage_triplet(reg,
+ supply->u_volt_min,
+ vdd_uv,
+ uv_max);
+ if (ret) {
+ dev_err(dev, "%s failed for %luuV[min %luuV max %luuV]\n",
+ reg_name, vdd_uv, supply->u_volt_min,
+ uv_max);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ti_opp_supply_set_opp() - do the opp supply transition
+ * @data: information on regulators and new and old opps provided by
+ * opp core to use in transition
+ *
+ * Return: If successful, 0, else appropriate error value.
+ */
+static int ti_opp_supply_set_opp(struct dev_pm_set_opp_data *data)
+{
+ struct dev_pm_opp_supply *old_supply_vdd = &data->old_opp.supplies[0];
+ struct dev_pm_opp_supply *old_supply_vbb = &data->old_opp.supplies[1];
+ struct dev_pm_opp_supply *new_supply_vdd = &data->new_opp.supplies[0];
+ struct dev_pm_opp_supply *new_supply_vbb = &data->new_opp.supplies[1];
+ struct device *dev = data->dev;
+ unsigned long old_freq = data->old_opp.rate, freq = data->new_opp.rate;
+ struct clk *clk = data->clk;
+ struct regulator *vdd_reg = data->regulators[0];
+ struct regulator *vbb_reg = data->regulators[1];
+ int vdd_uv;
+ int ret;
+
+ vdd_uv = _get_optimal_vdd_voltage(dev, &opp_data,
+ new_supply_vbb->u_volt);
+
+ /* Scaling up? Scale voltage before frequency */
+ if (freq > old_freq) {
+ ret = _opp_set_voltage(dev, new_supply_vdd, vdd_uv, vdd_reg,
+ "vdd");
+ if (ret)
+ goto restore_voltage;
+
+ ret = _opp_set_voltage(dev, new_supply_vbb, 0, vbb_reg, "vbb");
+ if (ret)
+ goto restore_voltage;
+ }
+
+ /* Change frequency */
+ dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
+ __func__, old_freq, freq);
+
+ ret = clk_set_rate(clk, freq);
+ if (ret) {
+ dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
+ ret);
+ goto restore_voltage;
+ }
+
+ /* Scaling down? Scale voltage after frequency */
+ if (freq < old_freq) {
+ ret = _opp_set_voltage(dev, new_supply_vbb, 0, vbb_reg, "vbb");
+ if (ret)
+ goto restore_freq;
+
+ ret = _opp_set_voltage(dev, new_supply_vdd, vdd_uv, vdd_reg,
+ "vdd");
+ if (ret)
+ goto restore_freq;
+ }
+
+ return 0;
+
+restore_freq:
+ ret = clk_set_rate(clk, old_freq);
+ if (ret)
+ dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
+ __func__, old_freq);
+restore_voltage:
+ /* This shouldn't harm even if the voltages weren't updated earlier */
+ if (old_supply_vdd->u_volt) {
+ ret = _opp_set_voltage(dev, old_supply_vbb, 0, vbb_reg, "vbb");
+ if (ret)
+ return ret;
+
+ ret = _opp_set_voltage(dev, old_supply_vdd, 0, vdd_reg,
+ "vdd");
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static const struct ti_opp_supply_of_data omap_generic_of_data = {
+};
+
+static const struct ti_opp_supply_of_data omap_omap5_of_data = {
+ .flags = OPPDM_EFUSE_CLASS0_OPTIMIZED_VOLTAGE,
+ .efuse_voltage_mask = 0xFFF,
+ .efuse_voltage_uv = false,
+};
+
+static const struct ti_opp_supply_of_data omap_omap5core_of_data = {
+ .flags = OPPDM_EFUSE_CLASS0_OPTIMIZED_VOLTAGE | OPPDM_HAS_NO_ABB,
+ .efuse_voltage_mask = 0xFFF,
+ .efuse_voltage_uv = false,
+};
+
+static const struct of_device_id ti_opp_supply_of_match[] = {
+ {.compatible = "ti,omap-opp-supply", .data = &omap_generic_of_data},
+ {.compatible = "ti,omap5-opp-supply", .data = &omap_omap5_of_data},
+ {.compatible = "ti,omap5-core-opp-supply",
+ .data = &omap_omap5core_of_data},
+ {},
+};
+MODULE_DEVICE_TABLE(of, ti_opp_supply_of_match);
+
+static int ti_opp_supply_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device *cpu_dev = get_cpu_device(0);
+ const struct of_device_id *match;
+ const struct ti_opp_supply_of_data *of_data;
+ int ret = 0;
+
+ match = of_match_device(ti_opp_supply_of_match, dev);
+ if (!match) {
+ /* We do not expect this to happen */
+ dev_err(dev, "%s: Unable to match device\n", __func__);
+ return -ENODEV;
+ }
+ if (!match->data) {
+ /* Again, unlikely.. but mistakes do happen */
+ dev_err(dev, "%s: Bad data in match\n", __func__);
+ return -EINVAL;
+ }
+ of_data = match->data;
+
+ dev_set_drvdata(dev, (void *)of_data);
+
+ /* If we need optimized voltage */
+ if (of_data->flags & OPPDM_EFUSE_CLASS0_OPTIMIZED_VOLTAGE) {
+ ret = _store_optimized_voltages(dev, &opp_data);
+ if (ret)
+ return ret;
+ }
+
+ ret = PTR_ERR_OR_ZERO(dev_pm_opp_register_set_opp_helper(cpu_dev,
+ ti_opp_supply_set_opp));
+ if (ret)
+ _free_optimized_voltages(dev, &opp_data);
+
+ return ret;
+}
+
+static struct platform_driver ti_opp_supply_driver = {
+ .probe = ti_opp_supply_probe,
+ .driver = {
+ .name = "ti_opp_supply",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(ti_opp_supply_of_match),
+ },
+};
+module_platform_driver(ti_opp_supply_driver);
+
+MODULE_DESCRIPTION("Texas Instruments OMAP OPP Supply driver");
+MODULE_AUTHOR("Texas Instruments Inc.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 14fd865a5120..5958c8dda4e3 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -699,7 +699,7 @@ static void pci_pm_complete(struct device *dev)
pm_generic_complete(dev);
/* Resume device if platform firmware has put it in reset-power-on */
- if (dev->power.direct_complete && pm_resume_via_firmware()) {
+ if (pm_runtime_suspended(dev) && pm_resume_via_firmware()) {
pci_power_t pre_sleep_state = pci_dev->current_state;
pci_update_current_state(pci_dev, pci_dev->current_state);
@@ -783,8 +783,10 @@ static int pci_pm_suspend_noirq(struct device *dev)
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- if (dev_pm_smart_suspend_and_suspended(dev))
+ if (dev_pm_smart_suspend_and_suspended(dev)) {
+ dev->power.may_skip_resume = true;
return 0;
+ }
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
@@ -838,6 +840,16 @@ static int pci_pm_suspend_noirq(struct device *dev)
Fixup:
pci_fixup_device(pci_fixup_suspend_late, pci_dev);
+ /*
+ * If the target system sleep state is suspend-to-idle, it is sufficient
+ * to check whether or not the device's wakeup settings are good for
+ * runtime PM. Otherwise, the pm_resume_via_firmware() check will cause
+ * pci_pm_complete() to take care of fixing up the device's state
+ * anyway, if need be.
+ */
+ dev->power.may_skip_resume = device_may_wakeup(dev) ||
+ !device_can_wakeup(dev);
+
return 0;
}
@@ -847,6 +859,9 @@ static int pci_pm_resume_noirq(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
+ if (dev_pm_may_skip_resume(dev))
+ return 0;
+
/*
* Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend
* during system suspend, so update their runtime PM status to "active"
@@ -953,7 +968,7 @@ static int pci_pm_freeze_late(struct device *dev)
if (dev_pm_smart_suspend_and_suspended(dev))
return 0;
- return pm_generic_freeze_late(dev);;
+ return pm_generic_freeze_late(dev);
}
static int pci_pm_freeze_noirq(struct device *dev)
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index ffbf4e723527..fb1c1bb87316 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -150,6 +150,9 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
pci_save_state(dev);
+ dev_pm_set_driver_flags(&dev->dev, DPM_FLAG_SMART_SUSPEND |
+ DPM_FLAG_LEAVE_SUSPENDED);
+
if (pci_bridge_d3_possible(dev)) {
/*
* Keep the port resumed 100ms to make sure things like
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 730cc897b94d..fab143a17f9c 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -510,11 +510,11 @@ out:
return -EBADMSG;
}
-static unsigned int switchtec_dev_poll(struct file *filp, poll_table *wait)
+static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait)
{
struct switchtec_user *stuser = filp->private_data;
struct switchtec_dev *stdev = stuser->stdev;
- int ret = 0;
+ __poll_t ret = 0;
poll_wait(filp, &stuser->comp.wait, wait);
poll_wait(filp, &stdev->event_wq, wait);
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index b8f44b068fc6..da5724cd89cf 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -17,6 +17,15 @@ config ARM_PMU_ACPI
depends on ARM_PMU && ACPI
def_bool y
+config ARM_DSU_PMU
+ tristate "ARM DynamIQ Shared Unit (DSU) PMU"
+ depends on ARM64
+ help
+ Provides support for performance monitor unit in ARM DynamIQ Shared
+ Unit (DSU). The DSU integrates one or more cores with an L3 memory
+ system, control logic. The PMU allows counting various events related
+ to DSU.
+
config HISI_PMU
bool "HiSilicon SoC PMU"
depends on ARM64 && ACPI
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index 710a0135bd61..c2f27419bdf0 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o
obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o
obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
obj-$(CONFIG_HISI_PMU) += hisilicon/
diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c
new file mode 100644
index 000000000000..93c50e377507
--- /dev/null
+++ b/drivers/perf/arm_dsu_pmu.c
@@ -0,0 +1,843 @@
+/*
+ * ARM DynamIQ Shared Unit (DSU) PMU driver
+ *
+ * Copyright (C) ARM Limited, 2017.
+ *
+ * Based on ARM CCI-PMU, ARMv8 PMU-v3 drivers.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#define PMUNAME "arm_dsu"
+#define DRVNAME PMUNAME "_pmu"
+#define pr_fmt(fmt) DRVNAME ": " fmt
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#include <asm/arm_dsu_pmu.h>
+#include <asm/local64.h>
+
+/* PMU event codes */
+#define DSU_PMU_EVT_CYCLES 0x11
+#define DSU_PMU_EVT_CHAIN 0x1e
+
+#define DSU_PMU_MAX_COMMON_EVENTS 0x40
+
+#define DSU_PMU_MAX_HW_CNTRS 32
+#define DSU_PMU_HW_COUNTER_MASK (DSU_PMU_MAX_HW_CNTRS - 1)
+
+#define CLUSTERPMCR_E BIT(0)
+#define CLUSTERPMCR_P BIT(1)
+#define CLUSTERPMCR_C BIT(2)
+#define CLUSTERPMCR_N_SHIFT 11
+#define CLUSTERPMCR_N_MASK 0x1f
+#define CLUSTERPMCR_IDCODE_SHIFT 16
+#define CLUSTERPMCR_IDCODE_MASK 0xff
+#define CLUSTERPMCR_IMP_SHIFT 24
+#define CLUSTERPMCR_IMP_MASK 0xff
+#define CLUSTERPMCR_RES_MASK 0x7e8
+#define CLUSTERPMCR_RES_VAL 0x40
+
+#define DSU_ACTIVE_CPU_MASK 0x0
+#define DSU_ASSOCIATED_CPU_MASK 0x1
+
+/*
+ * We use the index of the counters as they appear in the counter
+ * bit maps in the PMU registers (e.g CLUSTERPMSELR).
+ * i.e,
+ * counter 0 - Bit 0
+ * counter 1 - Bit 1
+ * ...
+ * Cycle counter - Bit 31
+ */
+#define DSU_PMU_IDX_CYCLE_COUNTER 31
+
+/* All event counters are 32bit, with a 64bit Cycle counter */
+#define DSU_PMU_COUNTER_WIDTH(idx) \
+ (((idx) == DSU_PMU_IDX_CYCLE_COUNTER) ? 64 : 32)
+
+#define DSU_PMU_COUNTER_MASK(idx) \
+ GENMASK_ULL((DSU_PMU_COUNTER_WIDTH((idx)) - 1), 0)
+
+#define DSU_EXT_ATTR(_name, _func, _config) \
+ (&((struct dev_ext_attribute[]) { \
+ { \
+ .attr = __ATTR(_name, 0444, _func, NULL), \
+ .var = (void *)_config \
+ } \
+ })[0].attr.attr)
+
+#define DSU_EVENT_ATTR(_name, _config) \
+ DSU_EXT_ATTR(_name, dsu_pmu_sysfs_event_show, (unsigned long)_config)
+
+#define DSU_FORMAT_ATTR(_name, _config) \
+ DSU_EXT_ATTR(_name, dsu_pmu_sysfs_format_show, (char *)_config)
+
+#define DSU_CPUMASK_ATTR(_name, _config) \
+ DSU_EXT_ATTR(_name, dsu_pmu_cpumask_show, (unsigned long)_config)
+
+struct dsu_hw_events {
+ DECLARE_BITMAP(used_mask, DSU_PMU_MAX_HW_CNTRS);
+ struct perf_event *events[DSU_PMU_MAX_HW_CNTRS];
+};
+
+/*
+ * struct dsu_pmu - DSU PMU descriptor
+ *
+ * @pmu_lock : Protects accesses to DSU PMU register from normal vs
+ * interrupt handler contexts.
+ * @hw_events : Holds the event counter state.
+ * @associated_cpus : CPUs attached to the DSU.
+ * @active_cpu : CPU to which the PMU is bound for accesses.
+ * @cpuhp_node : Node for CPU hotplug notifier link.
+ * @num_counters : Number of event counters implemented by the PMU,
+ * excluding the cycle counter.
+ * @irq : Interrupt line for counter overflow.
+ * @cpmceid_bitmap : Bitmap for the availability of architected common
+ * events (event_code < 0x40).
+ */
+struct dsu_pmu {
+ struct pmu pmu;
+ struct device *dev;
+ raw_spinlock_t pmu_lock;
+ struct dsu_hw_events hw_events;
+ cpumask_t associated_cpus;
+ cpumask_t active_cpu;
+ struct hlist_node cpuhp_node;
+ s8 num_counters;
+ int irq;
+ DECLARE_BITMAP(cpmceid_bitmap, DSU_PMU_MAX_COMMON_EVENTS);
+};
+
+static unsigned long dsu_pmu_cpuhp_state;
+
+static inline struct dsu_pmu *to_dsu_pmu(struct pmu *pmu)
+{
+ return container_of(pmu, struct dsu_pmu, pmu);
+}
+
+static ssize_t dsu_pmu_sysfs_event_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *eattr = container_of(attr,
+ struct dev_ext_attribute, attr);
+ return snprintf(buf, PAGE_SIZE, "event=0x%lx\n",
+ (unsigned long)eattr->var);
+}
+
+static ssize_t dsu_pmu_sysfs_format_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *eattr = container_of(attr,
+ struct dev_ext_attribute, attr);
+ return snprintf(buf, PAGE_SIZE, "%s\n", (char *)eattr->var);
+}
+
+static ssize_t dsu_pmu_cpumask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
+ struct dev_ext_attribute *eattr = container_of(attr,
+ struct dev_ext_attribute, attr);
+ unsigned long mask_id = (unsigned long)eattr->var;
+ const cpumask_t *cpumask;
+
+ switch (mask_id) {
+ case DSU_ACTIVE_CPU_MASK:
+ cpumask = &dsu_pmu->active_cpu;
+ break;
+ case DSU_ASSOCIATED_CPU_MASK:
+ cpumask = &dsu_pmu->associated_cpus;
+ break;
+ default:
+ return 0;
+ }
+ return cpumap_print_to_pagebuf(true, buf, cpumask);
+}
+
+static struct attribute *dsu_pmu_format_attrs[] = {
+ DSU_FORMAT_ATTR(event, "config:0-31"),
+ NULL,
+};
+
+static const struct attribute_group dsu_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = dsu_pmu_format_attrs,
+};
+
+static struct attribute *dsu_pmu_event_attrs[] = {
+ DSU_EVENT_ATTR(cycles, 0x11),
+ DSU_EVENT_ATTR(bus_access, 0x19),
+ DSU_EVENT_ATTR(memory_error, 0x1a),
+ DSU_EVENT_ATTR(bus_cycles, 0x1d),
+ DSU_EVENT_ATTR(l3d_cache_allocate, 0x29),
+ DSU_EVENT_ATTR(l3d_cache_refill, 0x2a),
+ DSU_EVENT_ATTR(l3d_cache, 0x2b),
+ DSU_EVENT_ATTR(l3d_cache_wb, 0x2c),
+ NULL,
+};
+
+static umode_t
+dsu_pmu_event_attr_is_visible(struct kobject *kobj, struct attribute *attr,
+ int unused)
+{
+ struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj));
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
+ struct dev_ext_attribute *eattr = container_of(attr,
+ struct dev_ext_attribute, attr.attr);
+ unsigned long evt = (unsigned long)eattr->var;
+
+ return test_bit(evt, dsu_pmu->cpmceid_bitmap) ? attr->mode : 0;
+}
+
+static const struct attribute_group dsu_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = dsu_pmu_event_attrs,
+ .is_visible = dsu_pmu_event_attr_is_visible,
+};
+
+static struct attribute *dsu_pmu_cpumask_attrs[] = {
+ DSU_CPUMASK_ATTR(cpumask, DSU_ACTIVE_CPU_MASK),
+ DSU_CPUMASK_ATTR(associated_cpus, DSU_ASSOCIATED_CPU_MASK),
+ NULL,
+};
+
+static const struct attribute_group dsu_pmu_cpumask_attr_group = {
+ .attrs = dsu_pmu_cpumask_attrs,
+};
+
+static const struct attribute_group *dsu_pmu_attr_groups[] = {
+ &dsu_pmu_cpumask_attr_group,
+ &dsu_pmu_events_attr_group,
+ &dsu_pmu_format_attr_group,
+ NULL,
+};
+
+static int dsu_pmu_get_online_cpu_any_but(struct dsu_pmu *dsu_pmu, int cpu)
+{
+ struct cpumask online_supported;
+
+ cpumask_and(&online_supported,
+ &dsu_pmu->associated_cpus, cpu_online_mask);
+ return cpumask_any_but(&online_supported, cpu);
+}
+
+static inline bool dsu_pmu_counter_valid(struct dsu_pmu *dsu_pmu, u32 idx)
+{
+ return (idx < dsu_pmu->num_counters) ||
+ (idx == DSU_PMU_IDX_CYCLE_COUNTER);
+}
+
+static inline u64 dsu_pmu_read_counter(struct perf_event *event)
+{
+ u64 val;
+ unsigned long flags;
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+ int idx = event->hw.idx;
+
+ if (WARN_ON(!cpumask_test_cpu(smp_processor_id(),
+ &dsu_pmu->associated_cpus)))
+ return 0;
+
+ if (!dsu_pmu_counter_valid(dsu_pmu, idx)) {
+ dev_err(event->pmu->dev,
+ "Trying reading invalid counter %d\n", idx);
+ return 0;
+ }
+
+ raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
+ if (idx == DSU_PMU_IDX_CYCLE_COUNTER)
+ val = __dsu_pmu_read_pmccntr();
+ else
+ val = __dsu_pmu_read_counter(idx);
+ raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
+
+ return val;
+}
+
+static void dsu_pmu_write_counter(struct perf_event *event, u64 val)
+{
+ unsigned long flags;
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+ int idx = event->hw.idx;
+
+ if (WARN_ON(!cpumask_test_cpu(smp_processor_id(),
+ &dsu_pmu->associated_cpus)))
+ return;
+
+ if (!dsu_pmu_counter_valid(dsu_pmu, idx)) {
+ dev_err(event->pmu->dev,
+ "writing to invalid counter %d\n", idx);
+ return;
+ }
+
+ raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
+ if (idx == DSU_PMU_IDX_CYCLE_COUNTER)
+ __dsu_pmu_write_pmccntr(val);
+ else
+ __dsu_pmu_write_counter(idx, val);
+ raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
+}
+
+static int dsu_pmu_get_event_idx(struct dsu_hw_events *hw_events,
+ struct perf_event *event)
+{
+ int idx;
+ unsigned long evtype = event->attr.config;
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+ unsigned long *used_mask = hw_events->used_mask;
+
+ if (evtype == DSU_PMU_EVT_CYCLES) {
+ if (test_and_set_bit(DSU_PMU_IDX_CYCLE_COUNTER, used_mask))
+ return -EAGAIN;
+ return DSU_PMU_IDX_CYCLE_COUNTER;
+ }
+
+ idx = find_first_zero_bit(used_mask, dsu_pmu->num_counters);
+ if (idx >= dsu_pmu->num_counters)
+ return -EAGAIN;
+ set_bit(idx, hw_events->used_mask);
+ return idx;
+}
+
+static void dsu_pmu_enable_counter(struct dsu_pmu *dsu_pmu, int idx)
+{
+ __dsu_pmu_counter_interrupt_enable(idx);
+ __dsu_pmu_enable_counter(idx);
+}
+
+static void dsu_pmu_disable_counter(struct dsu_pmu *dsu_pmu, int idx)
+{
+ __dsu_pmu_disable_counter(idx);
+ __dsu_pmu_counter_interrupt_disable(idx);
+}
+
+static inline void dsu_pmu_set_event(struct dsu_pmu *dsu_pmu,
+ struct perf_event *event)
+{
+ int idx = event->hw.idx;
+ unsigned long flags;
+
+ if (!dsu_pmu_counter_valid(dsu_pmu, idx)) {
+ dev_err(event->pmu->dev,
+ "Trying to set invalid counter %d\n", idx);
+ return;
+ }
+
+ raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
+ __dsu_pmu_set_event(idx, event->hw.config_base);
+ raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
+}
+
+static void dsu_pmu_event_update(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ u64 delta, prev_count, new_count;
+
+ do {
+ /* We may also be called from the irq handler */
+ prev_count = local64_read(&hwc->prev_count);
+ new_count = dsu_pmu_read_counter(event);
+ } while (local64_cmpxchg(&hwc->prev_count, prev_count, new_count) !=
+ prev_count);
+ delta = (new_count - prev_count) & DSU_PMU_COUNTER_MASK(hwc->idx);
+ local64_add(delta, &event->count);
+}
+
+static void dsu_pmu_read(struct perf_event *event)
+{
+ dsu_pmu_event_update(event);
+}
+
+static inline u32 dsu_pmu_get_reset_overflow(void)
+{
+ return __dsu_pmu_get_reset_overflow();
+}
+
+/**
+ * dsu_pmu_set_event_period: Set the period for the counter.
+ *
+ * All DSU PMU event counters, except the cycle counter are 32bit
+ * counters. To handle cases of extreme interrupt latency, we program
+ * the counter with half of the max count for the counters.
+ */
+static void dsu_pmu_set_event_period(struct perf_event *event)
+{
+ int idx = event->hw.idx;
+ u64 val = DSU_PMU_COUNTER_MASK(idx) >> 1;
+
+ local64_set(&event->hw.prev_count, val);
+ dsu_pmu_write_counter(event, val);
+}
+
+static irqreturn_t dsu_pmu_handle_irq(int irq_num, void *dev)
+{
+ int i;
+ bool handled = false;
+ struct dsu_pmu *dsu_pmu = dev;
+ struct dsu_hw_events *hw_events = &dsu_pmu->hw_events;
+ unsigned long overflow;
+
+ overflow = dsu_pmu_get_reset_overflow();
+ if (!overflow)
+ return IRQ_NONE;
+
+ for_each_set_bit(i, &overflow, DSU_PMU_MAX_HW_CNTRS) {
+ struct perf_event *event = hw_events->events[i];
+
+ if (!event)
+ continue;
+ dsu_pmu_event_update(event);
+ dsu_pmu_set_event_period(event);
+ handled = true;
+ }
+
+ return IRQ_RETVAL(handled);
+}
+
+static void dsu_pmu_start(struct perf_event *event, int pmu_flags)
+{
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+
+ /* We always reprogram the counter */
+ if (pmu_flags & PERF_EF_RELOAD)
+ WARN_ON(!(event->hw.state & PERF_HES_UPTODATE));
+ dsu_pmu_set_event_period(event);
+ if (event->hw.idx != DSU_PMU_IDX_CYCLE_COUNTER)
+ dsu_pmu_set_event(dsu_pmu, event);
+ event->hw.state = 0;
+ dsu_pmu_enable_counter(dsu_pmu, event->hw.idx);
+}
+
+static void dsu_pmu_stop(struct perf_event *event, int pmu_flags)
+{
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+
+ if (event->hw.state & PERF_HES_STOPPED)
+ return;
+ dsu_pmu_disable_counter(dsu_pmu, event->hw.idx);
+ dsu_pmu_event_update(event);
+ event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+}
+
+static int dsu_pmu_add(struct perf_event *event, int flags)
+{
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+ struct dsu_hw_events *hw_events = &dsu_pmu->hw_events;
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+
+ if (WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(),
+ &dsu_pmu->associated_cpus)))
+ return -ENOENT;
+
+ idx = dsu_pmu_get_event_idx(hw_events, event);
+ if (idx < 0)
+ return idx;
+
+ hwc->idx = idx;
+ hw_events->events[idx] = event;
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
+ if (flags & PERF_EF_START)
+ dsu_pmu_start(event, PERF_EF_RELOAD);
+
+ perf_event_update_userpage(event);
+ return 0;
+}
+
+static void dsu_pmu_del(struct perf_event *event, int flags)
+{
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+ struct dsu_hw_events *hw_events = &dsu_pmu->hw_events;
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ dsu_pmu_stop(event, PERF_EF_UPDATE);
+ hw_events->events[idx] = NULL;
+ clear_bit(idx, hw_events->used_mask);
+ perf_event_update_userpage(event);
+}
+
+static void dsu_pmu_enable(struct pmu *pmu)
+{
+ u32 pmcr;
+ unsigned long flags;
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
+
+ /* If no counters are added, skip enabling the PMU */
+ if (bitmap_empty(dsu_pmu->hw_events.used_mask, DSU_PMU_MAX_HW_CNTRS))
+ return;
+
+ raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
+ pmcr = __dsu_pmu_read_pmcr();
+ pmcr |= CLUSTERPMCR_E;
+ __dsu_pmu_write_pmcr(pmcr);
+ raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
+}
+
+static void dsu_pmu_disable(struct pmu *pmu)
+{
+ u32 pmcr;
+ unsigned long flags;
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu);
+
+ raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags);
+ pmcr = __dsu_pmu_read_pmcr();
+ pmcr &= ~CLUSTERPMCR_E;
+ __dsu_pmu_write_pmcr(pmcr);
+ raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags);
+}
+
+static bool dsu_pmu_validate_event(struct pmu *pmu,
+ struct dsu_hw_events *hw_events,
+ struct perf_event *event)
+{
+ if (is_software_event(event))
+ return true;
+ /* Reject groups spanning multiple HW PMUs. */
+ if (event->pmu != pmu)
+ return false;
+ return dsu_pmu_get_event_idx(hw_events, event) >= 0;
+}
+
+/*
+ * Make sure the group of events can be scheduled at once
+ * on the PMU.
+ */
+static bool dsu_pmu_validate_group(struct perf_event *event)
+{
+ struct perf_event *sibling, *leader = event->group_leader;
+ struct dsu_hw_events fake_hw;
+
+ if (event->group_leader == event)
+ return true;
+
+ memset(fake_hw.used_mask, 0, sizeof(fake_hw.used_mask));
+ if (!dsu_pmu_validate_event(event->pmu, &fake_hw, leader))
+ return false;
+ list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+ if (!dsu_pmu_validate_event(event->pmu, &fake_hw, sibling))
+ return false;
+ }
+ return dsu_pmu_validate_event(event->pmu, &fake_hw, event);
+}
+
+static int dsu_pmu_event_init(struct perf_event *event)
+{
+ struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu);
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /* We don't support sampling */
+ if (is_sampling_event(event)) {
+ dev_dbg(dsu_pmu->pmu.dev, "Can't support sampling events\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* We cannot support task bound events */
+ if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK) {
+ dev_dbg(dsu_pmu->pmu.dev, "Can't support per-task counters\n");
+ return -EINVAL;
+ }
+
+ if (has_branch_stack(event) ||
+ event->attr.exclude_user ||
+ event->attr.exclude_kernel ||
+ event->attr.exclude_hv ||
+ event->attr.exclude_idle ||
+ event->attr.exclude_host ||
+ event->attr.exclude_guest) {
+ dev_dbg(dsu_pmu->pmu.dev, "Can't support filtering\n");
+ return -EINVAL;
+ }
+
+ if (!cpumask_test_cpu(event->cpu, &dsu_pmu->associated_cpus)) {
+ dev_dbg(dsu_pmu->pmu.dev,
+ "Requested cpu is not associated with the DSU\n");
+ return -EINVAL;
+ }
+ /*
+ * Choose the current active CPU to read the events. We don't want
+ * to migrate the event contexts, irq handling etc to the requested
+ * CPU. As long as the requested CPU is within the same DSU, we
+ * are fine.
+ */
+ event->cpu = cpumask_first(&dsu_pmu->active_cpu);
+ if (event->cpu >= nr_cpu_ids)
+ return -EINVAL;
+ if (!dsu_pmu_validate_group(event))
+ return -EINVAL;
+
+ event->hw.config_base = event->attr.config;
+ return 0;
+}
+
+static struct dsu_pmu *dsu_pmu_alloc(struct platform_device *pdev)
+{
+ struct dsu_pmu *dsu_pmu;
+
+ dsu_pmu = devm_kzalloc(&pdev->dev, sizeof(*dsu_pmu), GFP_KERNEL);
+ if (!dsu_pmu)
+ return ERR_PTR(-ENOMEM);
+
+ raw_spin_lock_init(&dsu_pmu->pmu_lock);
+ /*
+ * Initialise the number of counters to -1, until we probe
+ * the real number on a connected CPU.
+ */
+ dsu_pmu->num_counters = -1;
+ return dsu_pmu;
+}
+
+/**
+ * dsu_pmu_dt_get_cpus: Get the list of CPUs in the cluster.
+ */
+static int dsu_pmu_dt_get_cpus(struct device_node *dev, cpumask_t *mask)
+{
+ int i = 0, n, cpu;
+ struct device_node *cpu_node;
+
+ n = of_count_phandle_with_args(dev, "cpus", NULL);
+ if (n <= 0)
+ return -ENODEV;
+ for (; i < n; i++) {
+ cpu_node = of_parse_phandle(dev, "cpus", i);
+ if (!cpu_node)
+ break;
+ cpu = of_cpu_node_to_id(cpu_node);
+ of_node_put(cpu_node);
+ /*
+ * We have to ignore the failures here and continue scanning
+ * the list to handle cases where the nr_cpus could be capped
+ * in the running kernel.
+ */
+ if (cpu < 0)
+ continue;
+ cpumask_set_cpu(cpu, mask);
+ }
+ return 0;
+}
+
+/*
+ * dsu_pmu_probe_pmu: Probe the PMU details on a CPU in the cluster.
+ */
+static void dsu_pmu_probe_pmu(struct dsu_pmu *dsu_pmu)
+{
+ u64 num_counters;
+ u32 cpmceid[2];
+
+ num_counters = (__dsu_pmu_read_pmcr() >> CLUSTERPMCR_N_SHIFT) &
+ CLUSTERPMCR_N_MASK;
+ /* We can only support up to 31 independent counters */
+ if (WARN_ON(num_counters > 31))
+ num_counters = 31;
+ dsu_pmu->num_counters = num_counters;
+ if (!dsu_pmu->num_counters)
+ return;
+ cpmceid[0] = __dsu_pmu_read_pmceid(0);
+ cpmceid[1] = __dsu_pmu_read_pmceid(1);
+ bitmap_from_u32array(dsu_pmu->cpmceid_bitmap,
+ DSU_PMU_MAX_COMMON_EVENTS,
+ cpmceid,
+ ARRAY_SIZE(cpmceid));
+}
+
+static void dsu_pmu_set_active_cpu(int cpu, struct dsu_pmu *dsu_pmu)
+{
+ cpumask_set_cpu(cpu, &dsu_pmu->active_cpu);
+ if (irq_set_affinity_hint(dsu_pmu->irq, &dsu_pmu->active_cpu))
+ pr_warn("Failed to set irq affinity to %d\n", cpu);
+}
+
+/*
+ * dsu_pmu_init_pmu: Initialise the DSU PMU configurations if
+ * we haven't done it already.
+ */
+static void dsu_pmu_init_pmu(struct dsu_pmu *dsu_pmu)
+{
+ if (dsu_pmu->num_counters == -1)
+ dsu_pmu_probe_pmu(dsu_pmu);
+ /* Reset the interrupt overflow mask */
+ dsu_pmu_get_reset_overflow();
+}
+
+static int dsu_pmu_device_probe(struct platform_device *pdev)
+{
+ int irq, rc;
+ struct dsu_pmu *dsu_pmu;
+ char *name;
+ static atomic_t pmu_idx = ATOMIC_INIT(-1);
+
+ dsu_pmu = dsu_pmu_alloc(pdev);
+ if (IS_ERR(dsu_pmu))
+ return PTR_ERR(dsu_pmu);
+
+ rc = dsu_pmu_dt_get_cpus(pdev->dev.of_node, &dsu_pmu->associated_cpus);
+ if (rc) {
+ dev_warn(&pdev->dev, "Failed to parse the CPUs\n");
+ return rc;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_warn(&pdev->dev, "Failed to find IRQ\n");
+ return -EINVAL;
+ }
+
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_%d",
+ PMUNAME, atomic_inc_return(&pmu_idx));
+ if (!name)
+ return -ENOMEM;
+ rc = devm_request_irq(&pdev->dev, irq, dsu_pmu_handle_irq,
+ IRQF_NOBALANCING, name, dsu_pmu);
+ if (rc) {
+ dev_warn(&pdev->dev, "Failed to request IRQ %d\n", irq);
+ return rc;
+ }
+
+ dsu_pmu->irq = irq;
+ platform_set_drvdata(pdev, dsu_pmu);
+ rc = cpuhp_state_add_instance(dsu_pmu_cpuhp_state,
+ &dsu_pmu->cpuhp_node);
+ if (rc)
+ return rc;
+
+ dsu_pmu->pmu = (struct pmu) {
+ .task_ctx_nr = perf_invalid_context,
+ .module = THIS_MODULE,
+ .pmu_enable = dsu_pmu_enable,
+ .pmu_disable = dsu_pmu_disable,
+ .event_init = dsu_pmu_event_init,
+ .add = dsu_pmu_add,
+ .del = dsu_pmu_del,
+ .start = dsu_pmu_start,
+ .stop = dsu_pmu_stop,
+ .read = dsu_pmu_read,
+
+ .attr_groups = dsu_pmu_attr_groups,
+ };
+
+ rc = perf_pmu_register(&dsu_pmu->pmu, name, -1);
+ if (rc) {
+ cpuhp_state_remove_instance(dsu_pmu_cpuhp_state,
+ &dsu_pmu->cpuhp_node);
+ irq_set_affinity_hint(dsu_pmu->irq, NULL);
+ }
+
+ return rc;
+}
+
+static int dsu_pmu_device_remove(struct platform_device *pdev)
+{
+ struct dsu_pmu *dsu_pmu = platform_get_drvdata(pdev);
+
+ perf_pmu_unregister(&dsu_pmu->pmu);
+ cpuhp_state_remove_instance(dsu_pmu_cpuhp_state, &dsu_pmu->cpuhp_node);
+ irq_set_affinity_hint(dsu_pmu->irq, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id dsu_pmu_of_match[] = {
+ { .compatible = "arm,dsu-pmu", },
+ {},
+};
+
+static struct platform_driver dsu_pmu_driver = {
+ .driver = {
+ .name = DRVNAME,
+ .of_match_table = of_match_ptr(dsu_pmu_of_match),
+ },
+ .probe = dsu_pmu_device_probe,
+ .remove = dsu_pmu_device_remove,
+};
+
+static int dsu_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
+{
+ struct dsu_pmu *dsu_pmu = hlist_entry_safe(node, struct dsu_pmu,
+ cpuhp_node);
+
+ if (!cpumask_test_cpu(cpu, &dsu_pmu->associated_cpus))
+ return 0;
+
+ /* If the PMU is already managed, there is nothing to do */
+ if (!cpumask_empty(&dsu_pmu->active_cpu))
+ return 0;
+
+ dsu_pmu_init_pmu(dsu_pmu);
+ dsu_pmu_set_active_cpu(cpu, dsu_pmu);
+
+ return 0;
+}
+
+static int dsu_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
+{
+ int dst;
+ struct dsu_pmu *dsu_pmu = hlist_entry_safe(node, struct dsu_pmu,
+ cpuhp_node);
+
+ if (!cpumask_test_and_clear_cpu(cpu, &dsu_pmu->active_cpu))
+ return 0;
+
+ dst = dsu_pmu_get_online_cpu_any_but(dsu_pmu, cpu);
+ /* If there are no active CPUs in the DSU, leave IRQ disabled */
+ if (dst >= nr_cpu_ids) {
+ irq_set_affinity_hint(dsu_pmu->irq, NULL);
+ return 0;
+ }
+
+ perf_pmu_migrate_context(&dsu_pmu->pmu, cpu, dst);
+ dsu_pmu_set_active_cpu(dst, dsu_pmu);
+
+ return 0;
+}
+
+static int __init dsu_pmu_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ DRVNAME,
+ dsu_pmu_cpu_online,
+ dsu_pmu_cpu_teardown);
+ if (ret < 0)
+ return ret;
+ dsu_pmu_cpuhp_state = ret;
+ return platform_driver_register(&dsu_pmu_driver);
+}
+
+static void __exit dsu_pmu_exit(void)
+{
+ platform_driver_unregister(&dsu_pmu_driver);
+ cpuhp_remove_multi_state(dsu_pmu_cpuhp_state);
+}
+
+module_init(dsu_pmu_init);
+module_exit(dsu_pmu_exit);
+
+MODULE_DEVICE_TABLE(of, dsu_pmu_of_match);
+MODULE_DESCRIPTION("Perf driver for ARM DynamIQ Shared Unit");
+MODULE_AUTHOR("Suzuki K Poulose <suzuki.poulose@arm.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 91b224eced18..46501cc79fd7 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -82,19 +82,10 @@ static int pmu_parse_irq_affinity(struct device_node *node, int i)
return -EINVAL;
}
- /* Now look up the logical CPU number */
- for_each_possible_cpu(cpu) {
- struct device_node *cpu_dn;
-
- cpu_dn = of_cpu_device_node_get(cpu);
- of_node_put(cpu_dn);
-
- if (dn == cpu_dn)
- break;
- }
-
- if (cpu >= nr_cpu_ids) {
+ cpu = of_cpu_node_to_id(dn);
+ if (cpu < 0) {
pr_warn("failed to find logical CPU for %s\n", dn->name);
+ cpu = nr_cpu_ids;
}
of_node_put(dn);
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index 8ce262fc2561..51b40aecb776 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -1164,6 +1164,15 @@ static int arm_spe_pmu_device_dt_probe(struct platform_device *pdev)
struct arm_spe_pmu *spe_pmu;
struct device *dev = &pdev->dev;
+ /*
+ * If kernelspace is unmapped when running at EL0, then the SPE
+ * buffer will fault and prematurely terminate the AUX session.
+ */
+ if (arm64_kernel_unmapped_at_el0()) {
+ dev_warn_once(dev, "profiling buffer inaccessible. Try passing \"kpti=off\" on the kernel command line\n");
+ return -EPERM;
+ }
+
spe_pmu = devm_kzalloc(dev, sizeof(*spe_pmu), GFP_KERNEL);
if (!spe_pmu) {
dev_err(dev, "failed to allocate spe_pmu\n");
diff --git a/drivers/phy/broadcom/phy-brcm-sata.c b/drivers/phy/broadcom/phy-brcm-sata.c
index 3f953db70288..8708ea3b4d6d 100644
--- a/drivers/phy/broadcom/phy-brcm-sata.c
+++ b/drivers/phy/broadcom/phy-brcm-sata.c
@@ -150,6 +150,9 @@ enum sata_phy_regs {
TXPMD_TX_FREQ_CTRL_CONTROL2_FMIN_MASK = 0x3ff,
TXPMD_TX_FREQ_CTRL_CONTROL3 = 0x84,
TXPMD_TX_FREQ_CTRL_CONTROL3_FMAX_MASK = 0x3ff,
+
+ RXPMD_REG_BANK = 0x1c0,
+ RXPMD_RX_FREQ_MON_CONTROL1 = 0x87,
};
enum sata_phy_ctrl_regs {
@@ -505,8 +508,36 @@ static int brcm_sata_phy_init(struct phy *phy)
return rc;
}
+static void brcm_stb_sata_calibrate(struct brcm_sata_port *port)
+{
+ void __iomem *base = brcm_sata_pcb_base(port);
+ u32 tmp = BIT(8);
+
+ brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_FREQ_MON_CONTROL1,
+ ~tmp, tmp);
+}
+
+static int brcm_sata_phy_calibrate(struct phy *phy)
+{
+ struct brcm_sata_port *port = phy_get_drvdata(phy);
+ int rc = -EOPNOTSUPP;
+
+ switch (port->phy_priv->version) {
+ case BRCM_SATA_PHY_STB_28NM:
+ case BRCM_SATA_PHY_STB_40NM:
+ brcm_stb_sata_calibrate(port);
+ rc = 0;
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
static const struct phy_ops phy_ops = {
.init = brcm_sata_phy_init,
+ .calibrate = brcm_sata_phy_calibrate,
.owner = THIS_MODULE,
};
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 4571cc098b76..ce126955212c 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -63,6 +63,16 @@ config PINCTRL_AS3722
open drain configuration for the GPIO pins of AS3722 devices. It also
supports the GPIO functionality through gpiolib.
+config PINCTRL_AXP209
+ tristate "X-Powers AXP209 PMIC pinctrl and GPIO Support"
+ depends on MFD_AXP20X
+ help
+ AXP PMICs provides multiple GPIOs that can be muxed for different
+ functions. This driver bundles a pinctrl driver to select the function
+ muxing and a GPIO driver to handle the GPIO when the GPIO function is
+ selected.
+ Say yes to enable pinctrl and GPIO support for the AXP209 PMIC
+
config PINCTRL_BF54x
def_bool y if BF54x
select PINCTRL_ADI2
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index d0d4844f8022..4777f1595ce2 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_GENERIC_PINCONF) += pinconf-generic.o
obj-$(CONFIG_PINCTRL_ADI2) += pinctrl-adi2.o
obj-$(CONFIG_PINCTRL_ARTPEC6) += pinctrl-artpec6.o
obj-$(CONFIG_PINCTRL_AS3722) += pinctrl-as3722.o
+obj-$(CONFIG_PINCTRL_AXP209) += pinctrl-axp209.o
obj-$(CONFIG_PINCTRL_BF54x) += pinctrl-adi2-bf54x.o
obj-$(CONFIG_PINCTRL_BF60x) += pinctrl-adi2-bf60x.o
obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o
diff --git a/drivers/pinctrl/pinctrl-axp209.c b/drivers/pinctrl/pinctrl-axp209.c
new file mode 100644
index 000000000000..22d3bb0bf927
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-axp209.c
@@ -0,0 +1,476 @@
+/*
+ * AXP20x pinctrl and GPIO driver
+ *
+ * Copyright (C) 2016 Maxime Ripard <maxime.ripard@free-electrons.com>
+ * Copyright (C) 2017 Quentin Schulz <quentin.schulz@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/axp20x.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define AXP20X_GPIO_FUNCTIONS 0x7
+#define AXP20X_GPIO_FUNCTION_OUT_LOW 0
+#define AXP20X_GPIO_FUNCTION_OUT_HIGH 1
+#define AXP20X_GPIO_FUNCTION_INPUT 2
+
+#define AXP20X_FUNC_GPIO_OUT 0
+#define AXP20X_FUNC_GPIO_IN 1
+#define AXP20X_FUNC_LDO 2
+#define AXP20X_FUNC_ADC 3
+#define AXP20X_FUNCS_NB 4
+
+#define AXP20X_MUX_GPIO_OUT 0
+#define AXP20X_MUX_GPIO_IN BIT(1)
+#define AXP20X_MUX_ADC BIT(2)
+
+#define AXP813_MUX_ADC (BIT(2) | BIT(0))
+
+struct axp20x_pctrl_desc {
+ const struct pinctrl_pin_desc *pins;
+ unsigned int npins;
+ /* Stores the pins supporting LDO function. Bit offset is pin number. */
+ u8 ldo_mask;
+ /* Stores the pins supporting ADC function. Bit offset is pin number. */
+ u8 adc_mask;
+ u8 gpio_status_offset;
+ u8 adc_mux;
+};
+
+struct axp20x_pinctrl_function {
+ const char *name;
+ unsigned int muxval;
+ const char **groups;
+ unsigned int ngroups;
+};
+
+struct axp20x_pctl {
+ struct gpio_chip chip;
+ struct regmap *regmap;
+ struct pinctrl_dev *pctl_dev;
+ struct device *dev;
+ const struct axp20x_pctrl_desc *desc;
+ struct axp20x_pinctrl_function funcs[AXP20X_FUNCS_NB];
+};
+
+static const struct pinctrl_pin_desc axp209_pins[] = {
+ PINCTRL_PIN(0, "GPIO0"),
+ PINCTRL_PIN(1, "GPIO1"),
+ PINCTRL_PIN(2, "GPIO2"),
+};
+
+static const struct pinctrl_pin_desc axp813_pins[] = {
+ PINCTRL_PIN(0, "GPIO0"),
+ PINCTRL_PIN(1, "GPIO1"),
+};
+
+static const struct axp20x_pctrl_desc axp20x_data = {
+ .pins = axp209_pins,
+ .npins = ARRAY_SIZE(axp209_pins),
+ .ldo_mask = BIT(0) | BIT(1),
+ .adc_mask = BIT(0) | BIT(1),
+ .gpio_status_offset = 4,
+ .adc_mux = AXP20X_MUX_ADC,
+};
+
+static const struct axp20x_pctrl_desc axp813_data = {
+ .pins = axp813_pins,
+ .npins = ARRAY_SIZE(axp813_pins),
+ .ldo_mask = BIT(0) | BIT(1),
+ .adc_mask = BIT(0),
+ .gpio_status_offset = 0,
+ .adc_mux = AXP813_MUX_ADC,
+};
+
+static int axp20x_gpio_get_reg(unsigned int offset)
+{
+ switch (offset) {
+ case 0:
+ return AXP20X_GPIO0_CTRL;
+ case 1:
+ return AXP20X_GPIO1_CTRL;
+ case 2:
+ return AXP20X_GPIO2_CTRL;
+ }
+
+ return -EINVAL;
+}
+
+static int axp20x_gpio_input(struct gpio_chip *chip, unsigned int offset)
+{
+ return pinctrl_gpio_direction_input(chip->base + offset);
+}
+
+static int axp20x_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct axp20x_pctl *pctl = gpiochip_get_data(chip);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(pctl->regmap, AXP20X_GPIO20_SS, &val);
+ if (ret)
+ return ret;
+
+ return !!(val & BIT(offset + pctl->desc->gpio_status_offset));
+}
+
+static int axp20x_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct axp20x_pctl *pctl = gpiochip_get_data(chip);
+ unsigned int val;
+ int reg, ret;
+
+ reg = axp20x_gpio_get_reg(offset);
+ if (reg < 0)
+ return reg;
+
+ ret = regmap_read(pctl->regmap, reg, &val);
+ if (ret)
+ return ret;
+
+ /*
+ * This shouldn't really happen if the pin is in use already,
+ * or if it's not in use yet, it doesn't matter since we're
+ * going to change the value soon anyway. Default to output.
+ */
+ if ((val & AXP20X_GPIO_FUNCTIONS) > 2)
+ return 0;
+
+ /*
+ * The GPIO directions are the three lowest values.
+ * 2 is input, 0 and 1 are output
+ */
+ return val & 2;
+}
+
+static int axp20x_gpio_output(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ chip->set(chip, offset, value);
+
+ return 0;
+}
+
+static void axp20x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct axp20x_pctl *pctl = gpiochip_get_data(chip);
+ int reg;
+
+ reg = axp20x_gpio_get_reg(offset);
+ if (reg < 0)
+ return;
+
+ regmap_update_bits(pctl->regmap, reg,
+ AXP20X_GPIO_FUNCTIONS,
+ value ? AXP20X_GPIO_FUNCTION_OUT_HIGH :
+ AXP20X_GPIO_FUNCTION_OUT_LOW);
+}
+
+static int axp20x_pmx_set(struct pinctrl_dev *pctldev, unsigned int offset,
+ u8 config)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ int reg;
+
+ reg = axp20x_gpio_get_reg(offset);
+ if (reg < 0)
+ return reg;
+
+ return regmap_update_bits(pctl->regmap, reg, AXP20X_GPIO_FUNCTIONS,
+ config);
+}
+
+static int axp20x_pmx_func_cnt(struct pinctrl_dev *pctldev)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return ARRAY_SIZE(pctl->funcs);
+}
+
+static const char *axp20x_pmx_func_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctl->funcs[selector].name;
+}
+
+static int axp20x_pmx_func_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char * const **groups,
+ unsigned int *num_groups)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pctl->funcs[selector].groups;
+ *num_groups = pctl->funcs[selector].ngroups;
+
+ return 0;
+}
+
+static int axp20x_pmx_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int function, unsigned int group)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int mask;
+
+ /* Every pin supports GPIO_OUT and GPIO_IN functions */
+ if (function <= AXP20X_FUNC_GPIO_IN)
+ return axp20x_pmx_set(pctldev, group,
+ pctl->funcs[function].muxval);
+
+ if (function == AXP20X_FUNC_LDO)
+ mask = pctl->desc->ldo_mask;
+ else
+ mask = pctl->desc->adc_mask;
+
+ if (!(BIT(group) & mask))
+ return -EINVAL;
+
+ /*
+ * We let the regulator framework handle the LDO muxing as muxing bits
+ * are basically also regulators on/off bits. It's better not to enforce
+ * any state of the regulator when selecting LDO mux so that we don't
+ * interfere with the regulator driver.
+ */
+ if (function == AXP20X_FUNC_LDO)
+ return 0;
+
+ return axp20x_pmx_set(pctldev, group, pctl->funcs[function].muxval);
+}
+
+static int axp20x_pmx_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset, bool input)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ if (input)
+ return axp20x_pmx_set(pctldev, offset,
+ pctl->funcs[AXP20X_FUNC_GPIO_IN].muxval);
+
+ return axp20x_pmx_set(pctldev, offset,
+ pctl->funcs[AXP20X_FUNC_GPIO_OUT].muxval);
+}
+
+static const struct pinmux_ops axp20x_pmx_ops = {
+ .get_functions_count = axp20x_pmx_func_cnt,
+ .get_function_name = axp20x_pmx_func_name,
+ .get_function_groups = axp20x_pmx_func_groups,
+ .set_mux = axp20x_pmx_set_mux,
+ .gpio_set_direction = axp20x_pmx_gpio_set_direction,
+ .strict = true,
+};
+
+static int axp20x_groups_cnt(struct pinctrl_dev *pctldev)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctl->desc->npins;
+}
+
+static int axp20x_group_pins(struct pinctrl_dev *pctldev, unsigned int selector,
+ const unsigned int **pins, unsigned int *num_pins)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = (unsigned int *)&pctl->desc->pins[selector];
+ *num_pins = 1;
+
+ return 0;
+}
+
+static const char *axp20x_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct axp20x_pctl *pctl = pinctrl_dev_get_drvdata(pctldev);
+
+ return pctl->desc->pins[selector].name;
+}
+
+static const struct pinctrl_ops axp20x_pctrl_ops = {
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_group,
+ .dt_free_map = pinconf_generic_dt_free_map,
+ .get_groups_count = axp20x_groups_cnt,
+ .get_group_name = axp20x_group_name,
+ .get_group_pins = axp20x_group_pins,
+};
+
+static void axp20x_funcs_groups_from_mask(struct device *dev, unsigned int mask,
+ unsigned int mask_len,
+ struct axp20x_pinctrl_function *func,
+ const struct pinctrl_pin_desc *pins)
+{
+ unsigned long int mask_cpy = mask;
+ const char **group;
+ unsigned int ngroups = hweight8(mask);
+ int bit;
+
+ func->ngroups = ngroups;
+ if (func->ngroups > 0) {
+ func->groups = devm_kzalloc(dev, ngroups * sizeof(const char *),
+ GFP_KERNEL);
+ group = func->groups;
+ for_each_set_bit(bit, &mask_cpy, mask_len) {
+ *group = pins[bit].name;
+ group++;
+ }
+ }
+}
+
+static void axp20x_build_funcs_groups(struct platform_device *pdev)
+{
+ struct axp20x_pctl *pctl = platform_get_drvdata(pdev);
+ int i, pin, npins = pctl->desc->npins;
+
+ pctl->funcs[AXP20X_FUNC_GPIO_OUT].name = "gpio_out";
+ pctl->funcs[AXP20X_FUNC_GPIO_OUT].muxval = AXP20X_MUX_GPIO_OUT;
+ pctl->funcs[AXP20X_FUNC_GPIO_IN].name = "gpio_in";
+ pctl->funcs[AXP20X_FUNC_GPIO_IN].muxval = AXP20X_MUX_GPIO_IN;
+ pctl->funcs[AXP20X_FUNC_LDO].name = "ldo";
+ /*
+ * Muxval for LDO is useless as we won't use it.
+ * See comment in axp20x_pmx_set_mux.
+ */
+ pctl->funcs[AXP20X_FUNC_ADC].name = "adc";
+ pctl->funcs[AXP20X_FUNC_ADC].muxval = pctl->desc->adc_mux;
+
+ /* Every pin supports GPIO_OUT and GPIO_IN functions */
+ for (i = 0; i <= AXP20X_FUNC_GPIO_IN; i++) {
+ pctl->funcs[i].ngroups = npins;
+ pctl->funcs[i].groups = devm_kzalloc(&pdev->dev,
+ npins * sizeof(char *),
+ GFP_KERNEL);
+ for (pin = 0; pin < npins; pin++)
+ pctl->funcs[i].groups[pin] = pctl->desc->pins[pin].name;
+ }
+
+ axp20x_funcs_groups_from_mask(&pdev->dev, pctl->desc->ldo_mask,
+ npins, &pctl->funcs[AXP20X_FUNC_LDO],
+ pctl->desc->pins);
+
+ axp20x_funcs_groups_from_mask(&pdev->dev, pctl->desc->adc_mask,
+ npins, &pctl->funcs[AXP20X_FUNC_ADC],
+ pctl->desc->pins);
+}
+
+static const struct of_device_id axp20x_pctl_match[] = {
+ { .compatible = "x-powers,axp209-gpio", .data = &axp20x_data, },
+ { .compatible = "x-powers,axp813-gpio", .data = &axp813_data, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, axp20x_pctl_match);
+
+static int axp20x_pctl_probe(struct platform_device *pdev)
+{
+ struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
+ struct axp20x_pctl *pctl;
+ struct device *dev = &pdev->dev;
+ struct pinctrl_desc *pctrl_desc;
+ int ret;
+
+ if (!of_device_is_available(pdev->dev.of_node))
+ return -ENODEV;
+
+ if (!axp20x) {
+ dev_err(&pdev->dev, "Parent drvdata not set\n");
+ return -EINVAL;
+ }
+
+ pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
+ if (!pctl)
+ return -ENOMEM;
+
+ pctl->chip.base = -1;
+ pctl->chip.can_sleep = true;
+ pctl->chip.request = gpiochip_generic_request;
+ pctl->chip.free = gpiochip_generic_free;
+ pctl->chip.parent = &pdev->dev;
+ pctl->chip.label = dev_name(&pdev->dev);
+ pctl->chip.owner = THIS_MODULE;
+ pctl->chip.get = axp20x_gpio_get;
+ pctl->chip.get_direction = axp20x_gpio_get_direction;
+ pctl->chip.set = axp20x_gpio_set;
+ pctl->chip.direction_input = axp20x_gpio_input;
+ pctl->chip.direction_output = axp20x_gpio_output;
+ pctl->chip.ngpio = pctl->desc->npins;
+
+ pctl->desc = (struct axp20x_pctrl_desc *)of_device_get_match_data(dev);
+ pctl->regmap = axp20x->regmap;
+ pctl->dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, pctl);
+
+ axp20x_build_funcs_groups(pdev);
+
+ pctrl_desc = devm_kzalloc(&pdev->dev, sizeof(*pctrl_desc), GFP_KERNEL);
+ if (!pctrl_desc)
+ return -ENOMEM;
+
+ pctrl_desc->name = dev_name(&pdev->dev);
+ pctrl_desc->owner = THIS_MODULE;
+ pctrl_desc->pins = pctl->desc->pins;
+ pctrl_desc->npins = pctl->desc->npins;
+ pctrl_desc->pctlops = &axp20x_pctrl_ops;
+ pctrl_desc->pmxops = &axp20x_pmx_ops;
+
+ pctl->pctl_dev = devm_pinctrl_register(&pdev->dev, pctrl_desc, pctl);
+ if (IS_ERR(pctl->pctl_dev)) {
+ dev_err(&pdev->dev, "couldn't register pinctrl driver\n");
+ return PTR_ERR(pctl->pctl_dev);
+ }
+
+ ret = devm_gpiochip_add_data(&pdev->dev, &pctl->chip, pctl);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register GPIO chip\n");
+ return ret;
+ }
+
+ ret = gpiochip_add_pin_range(&pctl->chip, dev_name(&pdev->dev),
+ pctl->desc->pins->number,
+ pctl->desc->pins->number,
+ pctl->desc->npins);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add pin range\n");
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "AXP209 pinctrl and GPIO driver loaded\n");
+
+ return 0;
+}
+
+static struct platform_driver axp20x_pctl_driver = {
+ .probe = axp20x_pctl_probe,
+ .driver = {
+ .name = "axp20x-gpio",
+ .of_match_table = axp20x_pctl_match,
+ },
+};
+
+module_platform_driver(axp20x_pctl_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_AUTHOR("Quentin Schulz <quentin.schulz@free-electrons.com>");
+MODULE_DESCRIPTION("AXP20x PMIC pinctrl and GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index 0ad6e290bbda..e728a96cabfd 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -38,14 +38,8 @@ config CHROMEOS_PSTORE
If you have a supported Chromebook, choose Y or M here.
The module will be called chromeos_pstore.
-config CROS_EC_CHARDEV
- tristate "Chrome OS Embedded Controller userspace device interface"
- depends on MFD_CROS_EC
- ---help---
- This driver adds support to talk with the ChromeOS EC from userspace.
-
- If you have a supported Chromebook, choose Y or M here.
- The module will be called cros_ec_dev.
+config CROS_EC_CTL
+ tristate
config CROS_EC_LPC
tristate "ChromeOS Embedded Controller (LPC)"
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index a077b1f0211d..ff3b369911f0 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -2,10 +2,9 @@
obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o
-cros_ec_devs-objs := cros_ec_dev.o cros_ec_sysfs.o \
- cros_ec_lightbar.o cros_ec_vbc.o \
- cros_ec_debugfs.o
-obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_devs.o
+cros_ec_ctl-objs := cros_ec_sysfs.o cros_ec_lightbar.o \
+ cros_ec_vbc.o cros_ec_debugfs.o
+obj-$(CONFIG_CROS_EC_CTL) += cros_ec_ctl.o
cros_ec_lpcs-objs := cros_ec_lpc.o cros_ec_lpc_reg.o
cros_ec_lpcs-$(CONFIG_CROS_EC_LPC_MEC) += cros_ec_lpc_mec.o
obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpcs.o
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index 4cc66f405760..5473e602f7e0 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -29,9 +29,6 @@
#include <linux/slab.h>
#include <linux/wait.h>
-#include "cros_ec_dev.h"
-#include "cros_ec_debugfs.h"
-
#define LOG_SHIFT 14
#define LOG_SIZE (1 << LOG_SHIFT)
#define LOG_POLL_SEC 10
@@ -191,11 +188,11 @@ error:
return ret;
}
-static unsigned int cros_ec_console_log_poll(struct file *file,
+static __poll_t cros_ec_console_log_poll(struct file *file,
poll_table *wait)
{
struct cros_ec_debugfs *debug_info = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &debug_info->log_wq, wait);
@@ -390,6 +387,7 @@ remove_debugfs:
debugfs_remove_recursive(debug_info->dir);
return ret;
}
+EXPORT_SYMBOL(cros_ec_debugfs_init);
void cros_ec_debugfs_remove(struct cros_ec_dev *ec)
{
@@ -399,3 +397,4 @@ void cros_ec_debugfs_remove(struct cros_ec_dev *ec)
debugfs_remove_recursive(ec->debug_info->dir);
cros_ec_cleanup_console_log(ec->debug_info);
}
+EXPORT_SYMBOL(cros_ec_debugfs_remove);
diff --git a/drivers/platform/chrome/cros_ec_debugfs.h b/drivers/platform/chrome/cros_ec_debugfs.h
deleted file mode 100644
index 1ff3a50aa1b8..000000000000
--- a/drivers/platform/chrome/cros_ec_debugfs.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright 2015 Google, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _DRV_CROS_EC_DEBUGFS_H_
-#define _DRV_CROS_EC_DEBUGFS_H_
-
-#include "cros_ec_dev.h"
-
-/* debugfs stuff */
-int cros_ec_debugfs_init(struct cros_ec_dev *ec);
-void cros_ec_debugfs_remove(struct cros_ec_dev *ec);
-
-#endif /* _DRV_CROS_EC_DEBUGFS_H_ */
diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c
index fd2b047a2748..6ea79d495aa2 100644
--- a/drivers/platform/chrome/cros_ec_lightbar.c
+++ b/drivers/platform/chrome/cros_ec_lightbar.c
@@ -33,8 +33,6 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
-#include "cros_ec_dev.h"
-
/* Rate-limit the lightbar interface to prevent DoS. */
static unsigned long lb_interval_jiffies = 50 * HZ / 1000;
@@ -414,6 +412,7 @@ error:
return ret;
}
+EXPORT_SYMBOL(lb_manual_suspend_ctrl);
int lb_suspend(struct cros_ec_dev *ec)
{
@@ -422,6 +421,7 @@ int lb_suspend(struct cros_ec_dev *ec)
return lb_send_empty_cmd(ec, LIGHTBAR_CMD_SUSPEND);
}
+EXPORT_SYMBOL(lb_suspend);
int lb_resume(struct cros_ec_dev *ec)
{
@@ -430,6 +430,7 @@ int lb_resume(struct cros_ec_dev *ec)
return lb_send_empty_cmd(ec, LIGHTBAR_CMD_RESUME);
}
+EXPORT_SYMBOL(lb_resume);
static ssize_t sequence_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -622,3 +623,4 @@ struct attribute_group cros_ec_lightbar_attr_group = {
.attrs = __lb_cmds_attrs,
.is_visible = cros_ec_lightbar_attrs_are_visible,
};
+EXPORT_SYMBOL(cros_ec_lightbar_attr_group);
diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c
index f3baf9973989..d6eebe872187 100644
--- a/drivers/platform/chrome/cros_ec_sysfs.c
+++ b/drivers/platform/chrome/cros_ec_sysfs.c
@@ -34,8 +34,6 @@
#include <linux/types.h>
#include <linux/uaccess.h>
-#include "cros_ec_dev.h"
-
/* Accessor functions */
static ssize_t show_ec_reboot(struct device *dev,
@@ -294,4 +292,7 @@ static struct attribute *__ec_attrs[] = {
struct attribute_group cros_ec_attr_group = {
.attrs = __ec_attrs,
};
+EXPORT_SYMBOL(cros_ec_attr_group);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ChromeOS EC control driver");
diff --git a/drivers/platform/chrome/cros_ec_vbc.c b/drivers/platform/chrome/cros_ec_vbc.c
index 564a0d08c8bf..6d38e6b08334 100644
--- a/drivers/platform/chrome/cros_ec_vbc.c
+++ b/drivers/platform/chrome/cros_ec_vbc.c
@@ -135,3 +135,4 @@ struct attribute_group cros_ec_vbc_attr_group = {
.bin_attrs = cros_ec_vbc_bin_attrs,
.is_bin_visible = cros_ec_vbc_is_visible,
};
+EXPORT_SYMBOL(cros_ec_vbc_attr_group);
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 0578d34eec3f..999f1152655a 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -536,10 +536,10 @@ static ssize_t goldfish_pipe_write(struct file *filp,
/* is_write */ 1);
}
-static unsigned int goldfish_pipe_poll(struct file *filp, poll_table *wait)
+static __poll_t goldfish_pipe_poll(struct file *filp, poll_table *wait)
{
struct goldfish_pipe *pipe = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
int status;
poll_wait(filp, &pipe->wake_queue, wait);
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 935121814c97..a4fabf9d75f3 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -4124,7 +4124,7 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
return ret;
}
-static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait)
+static __poll_t sonypi_misc_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &sonypi_compat.fifo_proc_list, wait);
if (kfifo_len(&sonypi_compat.fifo))
diff --git a/drivers/platform/x86/surfacepro3_button.c b/drivers/platform/x86/surfacepro3_button.c
index 6505c97705e1..1b491690ce07 100644
--- a/drivers/platform/x86/surfacepro3_button.c
+++ b/drivers/platform/x86/surfacepro3_button.c
@@ -119,7 +119,7 @@ static void surface_button_notify(struct acpi_device *device, u32 event)
if (key_code == KEY_RESERVED)
return;
if (pressed)
- pm_wakeup_event(&device->dev, 0);
+ pm_wakeup_dev_event(&device->dev, 0, button->suspended);
if (button->suspended)
return;
input_report_key(input, key_code, pressed?1:0);
@@ -185,6 +185,8 @@ static int surface_button_add(struct acpi_device *device)
error = input_register_device(input);
if (error)
goto err_free_input;
+
+ device_init_wakeup(&device->dev, true);
dev_info(&device->dev,
"%s [%s]\n", name, acpi_device_bid(device));
return 0;
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index e681140b85d8..077f334fdbae 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -581,10 +581,7 @@ static int __init pnpbios_thread_init(void)
init_completion(&unload_sem);
task = kthread_run(pnp_dock_thread, NULL, "kpnpbiosd");
- if (IS_ERR(task))
- return PTR_ERR(task);
-
- return 0;
+ return PTR_ERR_OR_ZERO(task);
}
/* Start the kernel thread later: */
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index f054cdddfef8..803666ae3635 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -21,7 +21,6 @@
#include <linux/slab.h>
#include <linux/pnp.h>
#include <linux/io.h>
-#include <linux/kallsyms.h>
#include "base.h"
static void quirk_awe32_add_ports(struct pnp_dev *dev,
diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c
index 75f63e38a8d1..ed2b109ae8fc 100644
--- a/drivers/power/avs/rockchip-io-domain.c
+++ b/drivers/power/avs/rockchip-io-domain.c
@@ -76,7 +76,7 @@ struct rockchip_iodomain_supply {
struct rockchip_iodomain {
struct device *dev;
struct regmap *grf;
- struct rockchip_iodomain_soc_data *soc_data;
+ const struct rockchip_iodomain_soc_data *soc_data;
struct rockchip_iodomain_supply supplies[MAX_SUPPLIES];
};
@@ -382,43 +382,43 @@ static const struct rockchip_iodomain_soc_data soc_data_rv1108_pmu = {
static const struct of_device_id rockchip_iodomain_match[] = {
{
.compatible = "rockchip,rk3188-io-voltage-domain",
- .data = (void *)&soc_data_rk3188
+ .data = &soc_data_rk3188
},
{
.compatible = "rockchip,rk3228-io-voltage-domain",
- .data = (void *)&soc_data_rk3228
+ .data = &soc_data_rk3228
},
{
.compatible = "rockchip,rk3288-io-voltage-domain",
- .data = (void *)&soc_data_rk3288
+ .data = &soc_data_rk3288
},
{
.compatible = "rockchip,rk3328-io-voltage-domain",
- .data = (void *)&soc_data_rk3328
+ .data = &soc_data_rk3328
},
{
.compatible = "rockchip,rk3368-io-voltage-domain",
- .data = (void *)&soc_data_rk3368
+ .data = &soc_data_rk3368
},
{
.compatible = "rockchip,rk3368-pmu-io-voltage-domain",
- .data = (void *)&soc_data_rk3368_pmu
+ .data = &soc_data_rk3368_pmu
},
{
.compatible = "rockchip,rk3399-io-voltage-domain",
- .data = (void *)&soc_data_rk3399
+ .data = &soc_data_rk3399
},
{
.compatible = "rockchip,rk3399-pmu-io-voltage-domain",
- .data = (void *)&soc_data_rk3399_pmu
+ .data = &soc_data_rk3399_pmu
},
{
.compatible = "rockchip,rv1108-io-voltage-domain",
- .data = (void *)&soc_data_rv1108
+ .data = &soc_data_rv1108
},
{
.compatible = "rockchip,rv1108-pmu-io-voltage-domain",
- .data = (void *)&soc_data_rv1108_pmu
+ .data = &soc_data_rv1108_pmu
},
{ /* sentinel */ },
};
@@ -443,7 +443,7 @@ static int rockchip_iodomain_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, iod);
match = of_match_node(rockchip_iodomain_match, np);
- iod->soc_data = (struct rockchip_iodomain_soc_data *)match->data;
+ iod->soc_data = match->data;
parent = pdev->dev.parent;
if (parent && parent->of_node) {
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index ca0de1a78e85..a102e74ab24e 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -98,15 +98,6 @@ config POWER_RESET_HISI
help
Reboot support for Hisilicon boards.
-config POWER_RESET_IMX
- bool "IMX6 power-off driver"
- depends on POWER_RESET && SOC_IMX6
- help
- This driver support power off external PMIC by PMIC_ON_REQ on i.mx6
- boards.If you want to use other pin to control external power,please
- say N here or disable in dts to make sure pm_power_off never be
- overwrote wrongly by this driver.
-
config POWER_RESET_MSM
bool "Qualcomm MSM power-off driver"
depends on ARCH_QCOM
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index aeb65edb17b7..dcc92f5f7a37 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_POWER_RESET_GEMINI_POWEROFF) += gemini-poweroff.o
obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o
obj-$(CONFIG_POWER_RESET_HISI) += hisi-reboot.o
-obj-$(CONFIG_POWER_RESET_IMX) += imx-snvs-poweroff.o
obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
obj-$(CONFIG_POWER_RESET_PIIX4_POWEROFF) += piix4-poweroff.o
obj-$(CONFIG_POWER_RESET_LTC2952) += ltc2952-poweroff.o
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
index 31080c254124..0206cce328b3 100644
--- a/drivers/power/reset/at91-sama5d2_shdwc.c
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -68,7 +68,7 @@ struct shdwc_config {
};
struct shdwc {
- struct shdwc_config *cfg;
+ const struct shdwc_config *cfg;
void __iomem *at91_shdwc_base;
};
@@ -260,7 +260,7 @@ static int __init at91_shdwc_probe(struct platform_device *pdev)
}
match = of_match_node(at91_shdwc_of_match, pdev->dev.of_node);
- at91_shdwc->cfg = (struct shdwc_config *)(match->data);
+ at91_shdwc->cfg = match->data;
sclk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(sclk))
diff --git a/drivers/power/reset/imx-snvs-poweroff.c b/drivers/power/reset/imx-snvs-poweroff.c
deleted file mode 100644
index ad6ce5020ea7..000000000000
--- a/drivers/power/reset/imx-snvs-poweroff.c
+++ /dev/null
@@ -1,66 +0,0 @@
-/* Power off driver for i.mx6
- * Copyright (c) 2014, FREESCALE CORPORATION. All rights reserved.
- *
- * based on msm-poweroff.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-
-static void __iomem *snvs_base;
-
-static void do_imx_poweroff(void)
-{
- u32 value = readl(snvs_base);
-
- /* set TOP and DP_EN bit */
- writel(value | 0x60, snvs_base);
-}
-
-static int imx_poweroff_probe(struct platform_device *pdev)
-{
- snvs_base = of_iomap(pdev->dev.of_node, 0);
- if (!snvs_base) {
- dev_err(&pdev->dev, "failed to get memory\n");
- return -ENODEV;
- }
-
- pm_power_off = do_imx_poweroff;
- return 0;
-}
-
-static const struct of_device_id of_imx_poweroff_match[] = {
- { .compatible = "fsl,sec-v4.0-poweroff", },
- {},
-};
-MODULE_DEVICE_TABLE(of, of_imx_poweroff_match);
-
-static struct platform_driver imx_poweroff_driver = {
- .probe = imx_poweroff_probe,
- .driver = {
- .name = "imx-snvs-poweroff",
- .of_match_table = of_match_ptr(of_imx_poweroff_match),
- },
-};
-
-static int __init imx_poweroff_init(void)
-{
- return platform_driver_register(&imx_poweroff_driver);
-}
-device_initcall(imx_poweroff_init);
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
index 4702efdfe466..01b8c71697cb 100644
--- a/drivers/power/reset/msm-poweroff.c
+++ b/drivers/power/reset/msm-poweroff.c
@@ -23,7 +23,7 @@
#include <linux/pm.h>
static void __iomem *msm_ps_hold;
-static int do_msm_restart(struct notifier_block *nb, unsigned long action,
+static int deassert_pshold(struct notifier_block *nb, unsigned long action,
void *data)
{
writel(0, msm_ps_hold);
@@ -33,14 +33,13 @@ static int do_msm_restart(struct notifier_block *nb, unsigned long action,
}
static struct notifier_block restart_nb = {
- .notifier_call = do_msm_restart,
+ .notifier_call = deassert_pshold,
.priority = 128,
};
static void do_msm_poweroff(void)
{
- /* TODO: Add poweroff capability */
- do_msm_restart(&restart_nb, 0, NULL);
+ deassert_pshold(&restart_nb, 0, NULL);
}
static int msm_restart_probe(struct platform_device *pdev)
diff --git a/drivers/power/reset/zx-reboot.c b/drivers/power/reset/zx-reboot.c
index 7549c7f74a3c..c03e96e6a041 100644
--- a/drivers/power/reset/zx-reboot.c
+++ b/drivers/power/reset/zx-reboot.c
@@ -82,3 +82,7 @@ static struct platform_driver zx_reboot_driver = {
},
};
module_platform_driver(zx_reboot_driver);
+
+MODULE_DESCRIPTION("ZTE SoCs reset driver");
+MODULE_AUTHOR("Jun Nie <jun.nie@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index 4ebbcce45c48..5a76c6d343de 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -3218,11 +3218,13 @@ static int ab8500_charger_init_hw_registers(struct ab8500_charger *di)
}
/* Enable backup battery charging */
- abx500_mask_and_set_register_interruptible(di->dev,
+ ret = abx500_mask_and_set_register_interruptible(di->dev,
AB8500_RTC, AB8500_RTC_CTRL_REG,
RTC_BUP_CH_ENA, RTC_BUP_CH_ENA);
- if (ret < 0)
+ if (ret < 0) {
dev_err(di->dev, "%s mask and set failed\n", __func__);
+ goto out;
+ }
if (is_ab8540(di->parent)) {
ret = abx500_mask_and_set_register_interruptible(di->dev,
diff --git a/drivers/power/supply/axp20x_ac_power.c b/drivers/power/supply/axp20x_ac_power.c
index 38f4e87cf24d..0771f951b11f 100644
--- a/drivers/power/supply/axp20x_ac_power.c
+++ b/drivers/power/supply/axp20x_ac_power.c
@@ -159,7 +159,7 @@ static int axp20x_ac_power_probe(struct platform_device *pdev)
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
struct power_supply_config psy_cfg = {};
struct axp20x_ac_power *power;
- struct axp_data *axp_data;
+ const struct axp_data *axp_data;
static const char * const irq_names[] = { "ACIN_PLUGIN", "ACIN_REMOVAL",
NULL };
int i, irq, ret;
@@ -176,7 +176,7 @@ static int axp20x_ac_power_probe(struct platform_device *pdev)
if (!power)
return -ENOMEM;
- axp_data = (struct axp_data *)of_device_get_match_data(&pdev->dev);
+ axp_data = of_device_get_match_data(&pdev->dev);
if (axp_data->acin_adc) {
power->acin_v = devm_iio_channel_get(&pdev->dev, "acin_v");
@@ -230,10 +230,10 @@ static int axp20x_ac_power_probe(struct platform_device *pdev)
static const struct of_device_id axp20x_ac_power_match[] = {
{
.compatible = "x-powers,axp202-ac-power-supply",
- .data = (void *)&axp20x_data,
+ .data = &axp20x_data,
}, {
.compatible = "x-powers,axp221-ac-power-supply",
- .data = (void *)&axp22x_data,
+ .data = &axp22x_data,
}, { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, axp20x_ac_power_match);
diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
index d51ebd1da65e..9bfbde15b07d 100644
--- a/drivers/power/supply/axp288_charger.c
+++ b/drivers/power/supply/axp288_charger.c
@@ -1,6 +1,7 @@
/*
* axp288_charger.c - X-power AXP288 PMIC Charger driver
*
+ * Copyright (C) 2016-2017 Hans de Goede <hdegoede@redhat.com>
* Copyright (C) 2014 Intel Corporation
* Author: Ramakrishna Pallala <ramakrishna.pallala@intel.com>
*
@@ -98,28 +99,10 @@
#define CV_4200MV 4200 /* 4200mV */
#define CV_4350MV 4350 /* 4350mV */
-#define CC_200MA 200 /* 200mA */
-#define CC_600MA 600 /* 600mA */
-#define CC_800MA 800 /* 800mA */
-#define CC_1000MA 1000 /* 1000mA */
-#define CC_1600MA 1600 /* 1600mA */
-#define CC_2000MA 2000 /* 2000mA */
-
-#define ILIM_100MA 100 /* 100mA */
-#define ILIM_500MA 500 /* 500mA */
-#define ILIM_900MA 900 /* 900mA */
-#define ILIM_1500MA 1500 /* 1500mA */
-#define ILIM_2000MA 2000 /* 2000mA */
-#define ILIM_2500MA 2500 /* 2500mA */
-#define ILIM_3000MA 3000 /* 3000mA */
-
#define AXP288_EXTCON_DEV_NAME "axp288_extcon"
#define USB_HOST_EXTCON_HID "INT3496"
#define USB_HOST_EXTCON_NAME "INT3496:00"
-static const unsigned int cable_ids[] =
- { EXTCON_CHG_USB_SDP, EXTCON_CHG_USB_CDP, EXTCON_CHG_USB_DCP };
-
enum {
VBUS_OV_IRQ = 0,
CHARGE_DONE_IRQ,
@@ -139,7 +122,6 @@ struct axp288_chrg_info {
struct regmap_irq_chip_data *regmap_irqc;
int irq[CHRG_INTR_END];
struct power_supply *psy_usb;
- struct mutex lock;
/* OTG/Host mode */
struct {
@@ -152,18 +134,14 @@ struct axp288_chrg_info {
/* SDP/CDP/DCP USB charging cable notifications */
struct {
struct extcon_dev *edev;
- bool connected;
- enum power_supply_type chg_type;
- struct notifier_block nb[ARRAY_SIZE(cable_ids)];
+ struct notifier_block nb;
struct work_struct work;
} cable;
- int inlmt;
int cc;
int cv;
int max_cc;
int max_cv;
- int is_charger_enabled;
};
static inline int axp288_charger_set_cc(struct axp288_chrg_info *info, int cc)
@@ -220,51 +198,63 @@ static inline int axp288_charger_set_cv(struct axp288_chrg_info *info, int cv)
return ret;
}
-static inline int axp288_charger_set_vbus_inlmt(struct axp288_chrg_info *info,
- int inlmt)
+static int axp288_charger_get_vbus_inlmt(struct axp288_chrg_info *info)
{
- int ret;
unsigned int val;
- u8 reg_val;
+ int ret;
- /* Read in limit register */
ret = regmap_read(info->regmap, AXP20X_CHRG_BAK_CTRL, &val);
if (ret < 0)
- goto set_inlmt_fail;
-
- if (inlmt <= ILIM_100MA) {
- reg_val = CHRG_VBUS_ILIM_100MA;
- inlmt = ILIM_100MA;
- } else if (inlmt <= ILIM_500MA) {
- reg_val = CHRG_VBUS_ILIM_500MA;
- inlmt = ILIM_500MA;
- } else if (inlmt <= ILIM_900MA) {
- reg_val = CHRG_VBUS_ILIM_900MA;
- inlmt = ILIM_900MA;
- } else if (inlmt <= ILIM_1500MA) {
- reg_val = CHRG_VBUS_ILIM_1500MA;
- inlmt = ILIM_1500MA;
- } else if (inlmt <= ILIM_2000MA) {
- reg_val = CHRG_VBUS_ILIM_2000MA;
- inlmt = ILIM_2000MA;
- } else if (inlmt <= ILIM_2500MA) {
- reg_val = CHRG_VBUS_ILIM_2500MA;
- inlmt = ILIM_2500MA;
- } else {
- reg_val = CHRG_VBUS_ILIM_3000MA;
- inlmt = ILIM_3000MA;
+ return ret;
+
+ val >>= CHRG_VBUS_ILIM_BIT_POS;
+ switch (val) {
+ case CHRG_VBUS_ILIM_100MA:
+ return 100000;
+ case CHRG_VBUS_ILIM_500MA:
+ return 500000;
+ case CHRG_VBUS_ILIM_900MA:
+ return 900000;
+ case CHRG_VBUS_ILIM_1500MA:
+ return 1500000;
+ case CHRG_VBUS_ILIM_2000MA:
+ return 2000000;
+ case CHRG_VBUS_ILIM_2500MA:
+ return 2500000;
+ case CHRG_VBUS_ILIM_3000MA:
+ return 3000000;
+ default:
+ dev_warn(&info->pdev->dev, "Unknown ilim reg val: %d\n", val);
+ return 0;
}
+}
- reg_val = (val & ~CHRG_VBUS_ILIM_MASK)
- | (reg_val << CHRG_VBUS_ILIM_BIT_POS);
- ret = regmap_write(info->regmap, AXP20X_CHRG_BAK_CTRL, reg_val);
- if (ret >= 0)
- info->inlmt = inlmt;
+static inline int axp288_charger_set_vbus_inlmt(struct axp288_chrg_info *info,
+ int inlmt)
+{
+ int ret;
+ u8 reg_val;
+
+ if (inlmt >= 3000000)
+ reg_val = CHRG_VBUS_ILIM_3000MA << CHRG_VBUS_ILIM_BIT_POS;
+ else if (inlmt >= 2500000)
+ reg_val = CHRG_VBUS_ILIM_2500MA << CHRG_VBUS_ILIM_BIT_POS;
+ else if (inlmt >= 2000000)
+ reg_val = CHRG_VBUS_ILIM_2000MA << CHRG_VBUS_ILIM_BIT_POS;
+ else if (inlmt >= 1500000)
+ reg_val = CHRG_VBUS_ILIM_1500MA << CHRG_VBUS_ILIM_BIT_POS;
+ else if (inlmt >= 900000)
+ reg_val = CHRG_VBUS_ILIM_900MA << CHRG_VBUS_ILIM_BIT_POS;
+ else if (inlmt >= 500000)
+ reg_val = CHRG_VBUS_ILIM_500MA << CHRG_VBUS_ILIM_BIT_POS;
else
- dev_err(&info->pdev->dev, "charger BAK control %d\n", ret);
+ reg_val = CHRG_VBUS_ILIM_100MA << CHRG_VBUS_ILIM_BIT_POS;
+ ret = regmap_update_bits(info->regmap, AXP20X_CHRG_BAK_CTRL,
+ CHRG_VBUS_ILIM_MASK, reg_val);
+ if (ret < 0)
+ dev_err(&info->pdev->dev, "charger BAK control %d\n", ret);
-set_inlmt_fail:
return ret;
}
@@ -283,7 +273,6 @@ static int axp288_charger_vbus_path_select(struct axp288_chrg_info *info,
if (ret < 0)
dev_err(&info->pdev->dev, "axp288 vbus path select %d\n", ret);
-
return ret;
}
@@ -292,9 +281,6 @@ static int axp288_charger_enable_charger(struct axp288_chrg_info *info,
{
int ret;
- if ((int)enable == info->is_charger_enabled)
- return 0;
-
if (enable)
ret = regmap_update_bits(info->regmap, AXP20X_CHRG_CTRL1,
CHRG_CCCV_CHG_EN, CHRG_CCCV_CHG_EN);
@@ -303,8 +289,6 @@ static int axp288_charger_enable_charger(struct axp288_chrg_info *info,
CHRG_CCCV_CHG_EN, 0);
if (ret < 0)
dev_err(&info->pdev->dev, "axp288 enable charger %d\n", ret);
- else
- info->is_charger_enabled = enable;
return ret;
}
@@ -376,8 +360,6 @@ static int axp288_charger_usb_set_property(struct power_supply *psy,
int ret = 0;
int scaled_val;
- mutex_lock(&info->lock);
-
switch (psp) {
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
scaled_val = min(val->intval, info->max_cc);
@@ -393,11 +375,15 @@ static int axp288_charger_usb_set_property(struct power_supply *psy,
if (ret < 0)
dev_warn(&info->pdev->dev, "set charge voltage failed\n");
break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ ret = axp288_charger_set_vbus_inlmt(info, val->intval);
+ if (ret < 0)
+ dev_warn(&info->pdev->dev, "set input current limit failed\n");
+ break;
default:
ret = -EINVAL;
}
- mutex_unlock(&info->lock);
return ret;
}
@@ -406,9 +392,7 @@ static int axp288_charger_usb_get_property(struct power_supply *psy,
union power_supply_propval *val)
{
struct axp288_chrg_info *info = power_supply_get_drvdata(psy);
- int ret = 0;
-
- mutex_lock(&info->lock);
+ int ret;
switch (psp) {
case POWER_SUPPLY_PROP_PRESENT:
@@ -419,7 +403,7 @@ static int axp288_charger_usb_get_property(struct power_supply *psy,
}
ret = axp288_charger_is_present(info);
if (ret < 0)
- goto psy_get_prop_fail;
+ return ret;
val->intval = ret;
break;
case POWER_SUPPLY_PROP_ONLINE:
@@ -430,7 +414,7 @@ static int axp288_charger_usb_get_property(struct power_supply *psy,
}
ret = axp288_charger_is_online(info);
if (ret < 0)
- goto psy_get_prop_fail;
+ return ret;
val->intval = ret;
break;
case POWER_SUPPLY_PROP_HEALTH:
@@ -448,17 +432,17 @@ static int axp288_charger_usb_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
val->intval = info->max_cv * 1000;
break;
- case POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
- val->intval = info->inlmt * 1000;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ ret = axp288_charger_get_vbus_inlmt(info);
+ if (ret < 0)
+ return ret;
+ val->intval = ret;
break;
default:
- ret = -EINVAL;
- goto psy_get_prop_fail;
+ return -EINVAL;
}
-psy_get_prop_fail:
- mutex_unlock(&info->lock);
- return ret;
+ return 0;
}
static int axp288_charger_property_is_writeable(struct power_supply *psy,
@@ -469,6 +453,7 @@ static int axp288_charger_property_is_writeable(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
ret = 1;
break;
default:
@@ -487,7 +472,7 @@ static enum power_supply_property axp288_usb_props[] = {
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
- POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
};
static const struct power_supply_desc axp288_charger_desc = {
@@ -565,99 +550,53 @@ static void axp288_charger_extcon_evt_worker(struct work_struct *work)
container_of(work, struct axp288_chrg_info, cable.work);
int ret, current_limit;
struct extcon_dev *edev = info->cable.edev;
- bool old_connected = info->cable.connected;
- enum power_supply_type old_chg_type = info->cable.chg_type;
+ unsigned int val;
+
+ ret = regmap_read(info->regmap, AXP20X_PWR_INPUT_STATUS, &val);
+ if (ret < 0) {
+ dev_err(&info->pdev->dev, "Error reading status (%d)\n", ret);
+ return;
+ }
+
+ /* Offline? Disable charging and bail */
+ if (!(val & PS_STAT_VBUS_VALID)) {
+ dev_dbg(&info->pdev->dev, "USB charger disconnected\n");
+ axp288_charger_enable_charger(info, false);
+ power_supply_changed(info->psy_usb);
+ return;
+ }
/* Determine cable/charger type */
if (extcon_get_state(edev, EXTCON_CHG_USB_SDP) > 0) {
- dev_dbg(&info->pdev->dev, "USB SDP charger is connected");
- info->cable.connected = true;
- info->cable.chg_type = POWER_SUPPLY_TYPE_USB;
+ dev_dbg(&info->pdev->dev, "USB SDP charger is connected\n");
+ current_limit = 500000;
} else if (extcon_get_state(edev, EXTCON_CHG_USB_CDP) > 0) {
- dev_dbg(&info->pdev->dev, "USB CDP charger is connected");
- info->cable.connected = true;
- info->cable.chg_type = POWER_SUPPLY_TYPE_USB_CDP;
+ dev_dbg(&info->pdev->dev, "USB CDP charger is connected\n");
+ current_limit = 1500000;
} else if (extcon_get_state(edev, EXTCON_CHG_USB_DCP) > 0) {
- dev_dbg(&info->pdev->dev, "USB DCP charger is connected");
- info->cable.connected = true;
- info->cable.chg_type = POWER_SUPPLY_TYPE_USB_DCP;
+ dev_dbg(&info->pdev->dev, "USB DCP charger is connected\n");
+ current_limit = 2000000;
} else {
- if (old_connected)
- dev_dbg(&info->pdev->dev, "USB charger disconnected");
- info->cable.connected = false;
- info->cable.chg_type = POWER_SUPPLY_TYPE_USB;
- }
-
- /* Cable status changed */
- if (old_connected == info->cable.connected &&
- old_chg_type == info->cable.chg_type)
+ /* Charger type detection still in progress, bail. */
return;
-
- mutex_lock(&info->lock);
-
- if (info->cable.connected) {
- axp288_charger_enable_charger(info, false);
-
- switch (info->cable.chg_type) {
- case POWER_SUPPLY_TYPE_USB:
- current_limit = ILIM_500MA;
- break;
- case POWER_SUPPLY_TYPE_USB_CDP:
- current_limit = ILIM_1500MA;
- break;
- case POWER_SUPPLY_TYPE_USB_DCP:
- current_limit = ILIM_2000MA;
- break;
- default:
- /* Unknown */
- current_limit = 0;
- break;
- }
-
- /* Set vbus current limit first, then enable charger */
- ret = axp288_charger_set_vbus_inlmt(info, current_limit);
- if (ret == 0)
- axp288_charger_enable_charger(info, true);
- else
- dev_err(&info->pdev->dev,
- "error setting current limit (%d)", ret);
- } else {
- axp288_charger_enable_charger(info, false);
}
- mutex_unlock(&info->lock);
+ /* Set vbus current limit first, then enable charger */
+ ret = axp288_charger_set_vbus_inlmt(info, current_limit);
+ if (ret == 0)
+ axp288_charger_enable_charger(info, true);
+ else
+ dev_err(&info->pdev->dev,
+ "error setting current limit (%d)\n", ret);
power_supply_changed(info->psy_usb);
}
-/*
- * We need 3 copies of this, because there is no way to find out for which
- * cable id we are being called from the passed in arguments; and we must
- * have a separate nb for each extcon_register_notifier call.
- */
-static int axp288_charger_handle_cable0_evt(struct notifier_block *nb,
- unsigned long event, void *param)
+static int axp288_charger_handle_cable_evt(struct notifier_block *nb,
+ unsigned long event, void *param)
{
struct axp288_chrg_info *info =
- container_of(nb, struct axp288_chrg_info, cable.nb[0]);
- schedule_work(&info->cable.work);
- return NOTIFY_OK;
-}
-
-static int axp288_charger_handle_cable1_evt(struct notifier_block *nb,
- unsigned long event, void *param)
-{
- struct axp288_chrg_info *info =
- container_of(nb, struct axp288_chrg_info, cable.nb[1]);
- schedule_work(&info->cable.work);
- return NOTIFY_OK;
-}
-
-static int axp288_charger_handle_cable2_evt(struct notifier_block *nb,
- unsigned long event, void *param)
-{
- struct axp288_chrg_info *info =
- container_of(nb, struct axp288_chrg_info, cable.nb[2]);
+ container_of(nb, struct axp288_chrg_info, cable.nb);
schedule_work(&info->cable.work);
return NOTIFY_OK;
}
@@ -785,6 +724,14 @@ static int charger_init_hw_regs(struct axp288_chrg_info *info)
return 0;
}
+static void axp288_charger_cancel_work(void *data)
+{
+ struct axp288_chrg_info *info = data;
+
+ cancel_work_sync(&info->otg.work);
+ cancel_work_sync(&info->cable.work);
+}
+
static int axp288_charger_probe(struct platform_device *pdev)
{
int ret, i, pirq;
@@ -799,8 +746,6 @@ static int axp288_charger_probe(struct platform_device *pdev)
info->pdev = pdev;
info->regmap = axp20x->regmap;
info->regmap_irqc = axp20x->regmap_irqc;
- info->cable.chg_type = -1;
- info->is_charger_enabled = -1;
info->cable.edev = extcon_get_extcon_dev(AXP288_EXTCON_DEV_NAME);
if (info->cable.edev == NULL) {
@@ -820,7 +765,6 @@ static int axp288_charger_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, info);
- mutex_init(&info->lock);
ret = charger_init_hw_regs(info);
if (ret)
@@ -836,19 +780,19 @@ static int axp288_charger_probe(struct platform_device *pdev)
return ret;
}
+ /* Cancel our work on cleanup, register this before the notifiers */
+ ret = devm_add_action(dev, axp288_charger_cancel_work, info);
+ if (ret)
+ return ret;
+
/* Register for extcon notification */
INIT_WORK(&info->cable.work, axp288_charger_extcon_evt_worker);
- info->cable.nb[0].notifier_call = axp288_charger_handle_cable0_evt;
- info->cable.nb[1].notifier_call = axp288_charger_handle_cable1_evt;
- info->cable.nb[2].notifier_call = axp288_charger_handle_cable2_evt;
- for (i = 0; i < ARRAY_SIZE(cable_ids); i++) {
- ret = devm_extcon_register_notifier(dev, info->cable.edev,
- cable_ids[i], &info->cable.nb[i]);
- if (ret) {
- dev_err(dev, "failed to register extcon notifier for %u: %d\n",
- cable_ids[i], ret);
- return ret;
- }
+ info->cable.nb.notifier_call = axp288_charger_handle_cable_evt;
+ ret = devm_extcon_register_notifier_all(dev, info->cable.edev,
+ &info->cable.nb);
+ if (ret) {
+ dev_err(dev, "failed to register cable extcon notifier\n");
+ return ret;
}
schedule_work(&info->cable.work);
diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
index a8dcabc32721..4cc6e038dfdd 100644
--- a/drivers/power/supply/axp288_fuel_gauge.c
+++ b/drivers/power/supply/axp288_fuel_gauge.c
@@ -1,6 +1,7 @@
/*
* axp288_fuel_gauge.c - Xpower AXP288 PMIC Fuel Gauge Driver
*
+ * Copyright (C) 2016-2017 Hans de Goede <hdegoede@redhat.com>
* Copyright (C) 2014 Intel Corporation
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -16,6 +17,7 @@
*
*/
+#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
@@ -31,6 +33,12 @@
#include <linux/seq_file.h>
#include <asm/unaligned.h>
+#define PS_STAT_VBUS_TRIGGER (1 << 0)
+#define PS_STAT_BAT_CHRG_DIR (1 << 2)
+#define PS_STAT_VBAT_ABOVE_VHOLD (1 << 3)
+#define PS_STAT_VBUS_VALID (1 << 4)
+#define PS_STAT_VBUS_PRESENT (1 << 5)
+
#define CHRG_STAT_BAT_SAFE_MODE (1 << 3)
#define CHRG_STAT_BAT_VALID (1 << 4)
#define CHRG_STAT_BAT_PRESENT (1 << 5)
@@ -100,11 +108,22 @@ enum {
WL1_IRQ,
};
+enum {
+ BAT_TEMP = 0,
+ PMIC_TEMP,
+ SYSTEM_TEMP,
+ BAT_CHRG_CURR,
+ BAT_D_CURR,
+ BAT_VOLT,
+ IIO_CHANNEL_NUM
+};
+
struct axp288_fg_info {
struct platform_device *pdev;
struct regmap *regmap;
struct regmap_irq_chip_data *regmap_irqc;
int irq[AXP288_FG_INTR_NUM];
+ struct iio_channel *iio_channel[IIO_CHANNEL_NUM];
struct power_supply *bat;
struct mutex lock;
int status;
@@ -199,33 +218,6 @@ static int fuel_gauge_read_12bit_word(struct axp288_fg_info *info, int reg)
return (buf[0] << 4) | ((buf[1] >> 4) & 0x0f);
}
-static int pmic_read_adc_val(const char *name, int *raw_val,
- struct axp288_fg_info *info)
-{
- int ret, val = 0;
- struct iio_channel *indio_chan;
-
- indio_chan = iio_channel_get(NULL, name);
- if (IS_ERR_OR_NULL(indio_chan)) {
- ret = PTR_ERR(indio_chan);
- goto exit;
- }
- ret = iio_read_channel_raw(indio_chan, &val);
- if (ret < 0) {
- dev_err(&info->pdev->dev,
- "IIO channel read error: %x, %x\n", ret, val);
- goto err_exit;
- }
-
- dev_dbg(&info->pdev->dev, "adc raw val=%x\n", val);
- *raw_val = val;
-
-err_exit:
- iio_channel_release(indio_chan);
-exit:
- return ret;
-}
-
#ifdef CONFIG_DEBUG_FS
static int fuel_gauge_debug_show(struct seq_file *s, void *data)
{
@@ -296,22 +288,22 @@ static int fuel_gauge_debug_show(struct seq_file *s, void *data)
AXP288_FG_TUNE5,
fuel_gauge_reg_readb(info, AXP288_FG_TUNE5));
- ret = pmic_read_adc_val("axp288-batt-temp", &raw_val, info);
+ ret = iio_read_channel_raw(info->iio_channel[BAT_TEMP], &raw_val);
if (ret >= 0)
seq_printf(s, "axp288-batttemp : %d\n", raw_val);
- ret = pmic_read_adc_val("axp288-pmic-temp", &raw_val, info);
+ ret = iio_read_channel_raw(info->iio_channel[PMIC_TEMP], &raw_val);
if (ret >= 0)
seq_printf(s, "axp288-pmictemp : %d\n", raw_val);
- ret = pmic_read_adc_val("axp288-system-temp", &raw_val, info);
+ ret = iio_read_channel_raw(info->iio_channel[SYSTEM_TEMP], &raw_val);
if (ret >= 0)
seq_printf(s, "axp288-systtemp : %d\n", raw_val);
- ret = pmic_read_adc_val("axp288-chrg-curr", &raw_val, info);
+ ret = iio_read_channel_raw(info->iio_channel[BAT_CHRG_CURR], &raw_val);
if (ret >= 0)
seq_printf(s, "axp288-chrgcurr : %d\n", raw_val);
- ret = pmic_read_adc_val("axp288-chrg-d-curr", &raw_val, info);
+ ret = iio_read_channel_raw(info->iio_channel[BAT_D_CURR], &raw_val);
if (ret >= 0)
seq_printf(s, "axp288-dchrgcur : %d\n", raw_val);
- ret = pmic_read_adc_val("axp288-batt-volt", &raw_val, info);
+ ret = iio_read_channel_raw(info->iio_channel[BAT_VOLT], &raw_val);
if (ret >= 0)
seq_printf(s, "axp288-battvolt : %d\n", raw_val);
@@ -351,8 +343,7 @@ static inline void fuel_gauge_remove_debugfs(struct axp288_fg_info *info)
static void fuel_gauge_get_status(struct axp288_fg_info *info)
{
- int pwr_stat, ret;
- int charge, discharge;
+ int pwr_stat, fg_res;
pwr_stat = fuel_gauge_reg_readb(info, AXP20X_PWR_INPUT_STATUS);
if (pwr_stat < 0) {
@@ -360,36 +351,32 @@ static void fuel_gauge_get_status(struct axp288_fg_info *info)
"PWR STAT read failed:%d\n", pwr_stat);
return;
}
- ret = pmic_read_adc_val("axp288-chrg-curr", &charge, info);
- if (ret < 0) {
- dev_err(&info->pdev->dev,
- "ADC charge current read failed:%d\n", ret);
- return;
- }
- ret = pmic_read_adc_val("axp288-chrg-d-curr", &discharge, info);
- if (ret < 0) {
- dev_err(&info->pdev->dev,
- "ADC discharge current read failed:%d\n", ret);
- return;
+
+ /* Report full if Vbus is valid and the reported capacity is 100% */
+ if (pwr_stat & PS_STAT_VBUS_VALID) {
+ fg_res = fuel_gauge_reg_readb(info, AXP20X_FG_RES);
+ if (fg_res < 0) {
+ dev_err(&info->pdev->dev,
+ "FG RES read failed: %d\n", fg_res);
+ return;
+ }
+ if (fg_res == (FG_REP_CAP_VALID | 100)) {
+ info->status = POWER_SUPPLY_STATUS_FULL;
+ return;
+ }
}
- if (charge > 0)
+ if (pwr_stat & PS_STAT_BAT_CHRG_DIR)
info->status = POWER_SUPPLY_STATUS_CHARGING;
- else if (discharge > 0)
+ else
info->status = POWER_SUPPLY_STATUS_DISCHARGING;
- else {
- if (pwr_stat & CHRG_STAT_BAT_PRESENT)
- info->status = POWER_SUPPLY_STATUS_FULL;
- else
- info->status = POWER_SUPPLY_STATUS_NOT_CHARGING;
- }
}
static int fuel_gauge_get_vbatt(struct axp288_fg_info *info, int *vbatt)
{
int ret = 0, raw_val;
- ret = pmic_read_adc_val("axp288-batt-volt", &raw_val, info);
+ ret = iio_read_channel_raw(info->iio_channel[BAT_VOLT], &raw_val);
if (ret < 0)
goto vbatt_read_fail;
@@ -400,24 +387,19 @@ vbatt_read_fail:
static int fuel_gauge_get_current(struct axp288_fg_info *info, int *cur)
{
- int ret, value = 0;
- int charge, discharge;
+ int ret, discharge;
- ret = pmic_read_adc_val("axp288-chrg-curr", &charge, info);
- if (ret < 0)
- goto current_read_fail;
- ret = pmic_read_adc_val("axp288-chrg-d-curr", &discharge, info);
+ /* First check discharge current, so that we do only 1 read on bat. */
+ ret = iio_read_channel_raw(info->iio_channel[BAT_D_CURR], &discharge);
if (ret < 0)
- goto current_read_fail;
+ return ret;
- if (charge > 0)
- value = charge;
- else if (discharge > 0)
- value = -1 * discharge;
+ if (discharge > 0) {
+ *cur = -1 * discharge;
+ return 0;
+ }
- *cur = value;
-current_read_fail:
- return ret;
+ return iio_read_channel_raw(info->iio_channel[BAT_CHRG_CURR], cur);
}
static int fuel_gauge_get_vocv(struct axp288_fg_info *info, int *vocv)
@@ -698,12 +680,54 @@ intr_failed:
}
}
+/*
+ * Some devices have no battery (HDMI sticks) and the axp288 battery's
+ * detection reports one despite it not being there.
+ */
+static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = {
+ {
+ /* Intel Cherry Trail Compute Stick, Windows version */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "STK1AW32SC"),
+ },
+ },
+ {
+ /* Intel Cherry Trail Compute Stick, version without an OS */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "STK1A32SC"),
+ },
+ },
+ {
+ /* Meegopad T08 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
+ DMI_MATCH(DMI_BOARD_VENDOR, "To be filled by OEM."),
+ DMI_MATCH(DMI_BOARD_NAME, "T3 MRD"),
+ DMI_MATCH(DMI_BOARD_VERSION, "V1.1"),
+ },
+ },
+ {}
+};
+
static int axp288_fuel_gauge_probe(struct platform_device *pdev)
{
- int ret = 0;
+ int i, ret = 0;
struct axp288_fg_info *info;
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
struct power_supply_config psy_cfg = {};
+ static const char * const iio_chan_name[] = {
+ [BAT_TEMP] = "axp288-batt-temp",
+ [PMIC_TEMP] = "axp288-pmic-temp",
+ [SYSTEM_TEMP] = "axp288-system-temp",
+ [BAT_CHRG_CURR] = "axp288-chrg-curr",
+ [BAT_D_CURR] = "axp288-chrg-d-curr",
+ [BAT_VOLT] = "axp288-batt-volt",
+ };
+
+ if (dmi_check_system(axp288_fuel_gauge_blacklist))
+ return -ENODEV;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -719,18 +743,39 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
mutex_init(&info->lock);
INIT_DELAYED_WORK(&info->status_monitor, fuel_gauge_status_monitor);
+ for (i = 0; i < IIO_CHANNEL_NUM; i++) {
+ /*
+ * Note cannot use devm_iio_channel_get because x86 systems
+ * lack the device<->channel maps which iio_channel_get will
+ * try to use when passed a non NULL device pointer.
+ */
+ info->iio_channel[i] =
+ iio_channel_get(NULL, iio_chan_name[i]);
+ if (IS_ERR(info->iio_channel[i])) {
+ ret = PTR_ERR(info->iio_channel[i]);
+ dev_dbg(&pdev->dev, "error getting iiochan %s: %d\n",
+ iio_chan_name[i], ret);
+ /* Wait for axp288_adc to load */
+ if (ret == -ENODEV)
+ ret = -EPROBE_DEFER;
+
+ goto out_free_iio_chan;
+ }
+ }
+
ret = fuel_gauge_reg_readb(info, AXP288_FG_DES_CAP1_REG);
if (ret < 0)
- return ret;
+ goto out_free_iio_chan;
if (!(ret & FG_DES_CAP1_VALID)) {
dev_err(&pdev->dev, "axp288 not configured by firmware\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_free_iio_chan;
}
ret = fuel_gauge_reg_readb(info, AXP20X_CHRG_CTRL1);
if (ret < 0)
- return ret;
+ goto out_free_iio_chan;
switch ((ret & CHRG_CCCV_CV_MASK) >> CHRG_CCCV_CV_BIT_POS) {
case CHRG_CCCV_CV_4100MV:
info->max_volt = 4100;
@@ -751,7 +796,7 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
if (IS_ERR(info->bat)) {
ret = PTR_ERR(info->bat);
dev_err(&pdev->dev, "failed to register battery: %d\n", ret);
- return ret;
+ goto out_free_iio_chan;
}
fuel_gauge_create_debugfs(info);
@@ -759,6 +804,13 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev)
schedule_delayed_work(&info->status_monitor, STATUS_MON_DELAY_JIFFIES);
return 0;
+
+out_free_iio_chan:
+ for (i = 0; i < IIO_CHANNEL_NUM; i++)
+ if (!IS_ERR_OR_NULL(info->iio_channel[i]))
+ iio_channel_release(info->iio_channel[i]);
+
+ return ret;
}
static const struct platform_device_id axp288_fg_id_table[] = {
@@ -780,6 +832,9 @@ static int axp288_fuel_gauge_remove(struct platform_device *pdev)
if (info->irq[i] >= 0)
free_irq(info->irq[i], info);
+ for (i = 0; i < IIO_CHANNEL_NUM; i++)
+ iio_channel_release(info->iio_channel[i]);
+
return 0;
}
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index 35ff406aca48..b58df04d03b3 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/extcon.h>
#include <linux/of_irq.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
@@ -162,9 +161,6 @@ struct bq24190_dev_info {
struct device *dev;
struct power_supply *charger;
struct power_supply *battery;
- struct extcon_dev *extcon;
- struct notifier_block extcon_nb;
- struct delayed_work extcon_work;
struct delayed_work input_current_limit_work;
char model_name[I2C_NAME_SIZE];
bool initialized;
@@ -686,6 +682,16 @@ static int bq24190_register_reset(struct bq24190_dev_info *bdi)
int ret, limit = 100;
u8 v;
+ /*
+ * This prop. can be passed on device instantiation from platform code:
+ * struct property_entry pe[] =
+ * { PROPERTY_ENTRY_BOOL("disable-reset"), ... };
+ * struct i2c_board_info bi =
+ * { .type = "bq24190", .addr = 0x6b, .properties = pe, .irq = irq };
+ * struct i2c_adapter ad = { ... };
+ * i2c_add_adapter(&ad);
+ * i2c_new_device(&ad, &bi);
+ */
if (device_property_read_bool(bdi->dev, "disable-reset"))
return 0;
@@ -1193,8 +1199,6 @@ static int bq24190_charger_set_property(struct power_supply *psy,
static int bq24190_charger_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
- int ret;
-
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
@@ -1202,13 +1206,10 @@ static int bq24190_charger_property_is_writeable(struct power_supply *psy,
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
- ret = 1;
- break;
+ return 1;
default:
- ret = 0;
+ return 0;
}
-
- return ret;
}
static void bq24190_input_current_limit_work(struct work_struct *work)
@@ -1623,75 +1624,6 @@ static irqreturn_t bq24190_irq_handler_thread(int irq, void *data)
return IRQ_HANDLED;
}
-static void bq24190_extcon_work(struct work_struct *work)
-{
- struct bq24190_dev_info *bdi =
- container_of(work, struct bq24190_dev_info, extcon_work.work);
- int error, iinlim = 0;
- u8 v;
-
- error = pm_runtime_get_sync(bdi->dev);
- if (error < 0) {
- dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", error);
- pm_runtime_put_noidle(bdi->dev);
- return;
- }
-
- if (extcon_get_state(bdi->extcon, EXTCON_CHG_USB_SDP) == 1)
- iinlim = 500000;
- else if (extcon_get_state(bdi->extcon, EXTCON_CHG_USB_CDP) == 1 ||
- extcon_get_state(bdi->extcon, EXTCON_CHG_USB_ACA) == 1)
- iinlim = 1500000;
- else if (extcon_get_state(bdi->extcon, EXTCON_CHG_USB_DCP) == 1)
- iinlim = 2000000;
-
- if (iinlim) {
- error = bq24190_set_field_val(bdi, BQ24190_REG_ISC,
- BQ24190_REG_ISC_IINLIM_MASK,
- BQ24190_REG_ISC_IINLIM_SHIFT,
- bq24190_isc_iinlim_values,
- ARRAY_SIZE(bq24190_isc_iinlim_values),
- iinlim);
- if (error < 0)
- dev_err(bdi->dev, "Can't set IINLIM: %d\n", error);
- }
-
- /* if no charger found and in USB host mode, set OTG 5V boost, else normal */
- if (!iinlim && extcon_get_state(bdi->extcon, EXTCON_USB_HOST) == 1)
- v = BQ24190_REG_POC_CHG_CONFIG_OTG;
- else
- v = BQ24190_REG_POC_CHG_CONFIG_CHARGE;
-
- error = bq24190_write_mask(bdi, BQ24190_REG_POC,
- BQ24190_REG_POC_CHG_CONFIG_MASK,
- BQ24190_REG_POC_CHG_CONFIG_SHIFT,
- v);
- if (error < 0)
- dev_err(bdi->dev, "Can't set CHG_CONFIG: %d\n", error);
-
- pm_runtime_mark_last_busy(bdi->dev);
- pm_runtime_put_autosuspend(bdi->dev);
-}
-
-static int bq24190_extcon_event(struct notifier_block *nb, unsigned long event,
- void *param)
-{
- struct bq24190_dev_info *bdi =
- container_of(nb, struct bq24190_dev_info, extcon_nb);
-
- /*
- * The Power-Good detection may take up to 220ms, sometimes
- * the external charger detection is quicker, and the bq24190 will
- * reset to iinlim based on its own charger detection (which is not
- * hooked up when using external charger detection) resulting in
- * a too low default 500mA iinlim. Delay applying the extcon value
- * for 300ms to avoid this.
- */
- queue_delayed_work(system_wq, &bdi->extcon_work, msecs_to_jiffies(300));
-
- return NOTIFY_OK;
-}
-
static int bq24190_hw_init(struct bq24190_dev_info *bdi)
{
u8 v;
@@ -1766,7 +1698,6 @@ static int bq24190_probe(struct i2c_client *client,
struct device *dev = &client->dev;
struct power_supply_config charger_cfg = {}, battery_cfg = {};
struct bq24190_dev_info *bdi;
- const char *name;
int ret;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
@@ -1796,25 +1727,6 @@ static int bq24190_probe(struct i2c_client *client,
return -EINVAL;
}
- /*
- * Devicetree platforms should get extcon via phandle (not yet supported).
- * On ACPI platforms, extcon clients may invoke us with:
- * struct property_entry pe[] =
- * { PROPERTY_ENTRY_STRING("extcon-name", client_name), ... };
- * struct i2c_board_info bi =
- * { .type = "bq24190", .addr = 0x6b, .properties = pe, .irq = irq };
- * struct i2c_adapter ad = { ... };
- * i2c_add_adapter(&ad);
- * i2c_new_device(&ad, &bi);
- */
- if (device_property_read_string(dev, "extcon-name", &name) == 0) {
- bdi->extcon = extcon_get_extcon_dev(name);
- if (!bdi->extcon)
- return -EPROBE_DEFER;
-
- dev_info(bdi->dev, "using extcon device %s\n", name);
- }
-
pm_runtime_enable(dev);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, 600);
@@ -1882,20 +1794,6 @@ static int bq24190_probe(struct i2c_client *client,
if (ret < 0)
goto out_sysfs;
- if (bdi->extcon) {
- INIT_DELAYED_WORK(&bdi->extcon_work, bq24190_extcon_work);
- bdi->extcon_nb.notifier_call = bq24190_extcon_event;
- ret = devm_extcon_register_notifier_all(dev, bdi->extcon,
- &bdi->extcon_nb);
- if (ret) {
- dev_err(dev, "Can't register extcon\n");
- goto out_sysfs;
- }
-
- /* Sync initial cable state */
- queue_delayed_work(system_wq, &bdi->extcon_work, 0);
- }
-
enable_irq_wake(client->irq);
pm_runtime_mark_last_busy(dev);
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 51f0961ecf3e..d99981542a46 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -323,6 +323,30 @@ static u8
[BQ27XXX_REG_AP] = INVALID_REG_ADDR,
BQ27XXX_DM_REG_ROWS,
},
+ bq27521_regs[BQ27XXX_REG_MAX] = {
+ [BQ27XXX_REG_CTRL] = 0x02,
+ [BQ27XXX_REG_TEMP] = 0x0a,
+ [BQ27XXX_REG_INT_TEMP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_VOLT] = 0x0c,
+ [BQ27XXX_REG_AI] = 0x0e,
+ [BQ27XXX_REG_FLAGS] = 0x08,
+ [BQ27XXX_REG_TTE] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_TTF] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_TTES] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_TTECP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_NAC] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_FCC] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_CYCT] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_AE] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_SOC] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_DCAP] = INVALID_REG_ADDR,
+ [BQ27XXX_REG_AP] = INVALID_REG_ADDR,
+ [BQ27XXX_DM_CTRL] = INVALID_REG_ADDR,
+ [BQ27XXX_DM_CLASS] = INVALID_REG_ADDR,
+ [BQ27XXX_DM_BLOCK] = INVALID_REG_ADDR,
+ [BQ27XXX_DM_DATA] = INVALID_REG_ADDR,
+ [BQ27XXX_DM_CKSUM] = INVALID_REG_ADDR,
+ },
bq27530_regs[BQ27XXX_REG_MAX] = {
[BQ27XXX_REG_CTRL] = 0x00,
[BQ27XXX_REG_TEMP] = 0x06,
@@ -557,6 +581,15 @@ static enum power_supply_property bq27520g4_props[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
};
+static enum power_supply_property bq27521_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+};
+
static enum power_supply_property bq27530_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
@@ -671,6 +704,7 @@ static struct bq27xxx_dm_reg bq27500_dm_regs[] = {
#define bq27520g2_dm_regs 0
#define bq27520g3_dm_regs 0
#define bq27520g4_dm_regs 0
+#define bq27521_dm_regs 0
#define bq27530_dm_regs 0
#define bq27531_dm_regs 0
#define bq27541_dm_regs 0
@@ -717,8 +751,8 @@ static struct bq27xxx_dm_reg bq27621_dm_regs[] = {
#endif
#define BQ27XXX_O_ZERO 0x00000001
-#define BQ27XXX_O_OTDC 0x00000002
-#define BQ27XXX_O_UTOT 0x00000004
+#define BQ27XXX_O_OTDC 0x00000002 /* has OTC/OTD overtemperature flags */
+#define BQ27XXX_O_UTOT 0x00000004 /* has OT overtemperature flag */
#define BQ27XXX_O_CFGUP 0x00000008
#define BQ27XXX_O_RAM 0x00000010
@@ -751,6 +785,7 @@ static struct {
[BQ27520G2] = BQ27XXX_DATA(bq27520g2, 0 , BQ27XXX_O_OTDC),
[BQ27520G3] = BQ27XXX_DATA(bq27520g3, 0 , BQ27XXX_O_OTDC),
[BQ27520G4] = BQ27XXX_DATA(bq27520g4, 0 , BQ27XXX_O_OTDC),
+ [BQ27521] = BQ27XXX_DATA(bq27521, 0 , 0),
[BQ27530] = BQ27XXX_DATA(bq27530, 0 , BQ27XXX_O_UTOT),
[BQ27531] = BQ27XXX_DATA(bq27531, 0 , BQ27XXX_O_UTOT),
[BQ27541] = BQ27XXX_DATA(bq27541, 0 , BQ27XXX_O_OTDC),
diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
index 0b11ed472f33..6b25e5f2337e 100644
--- a/drivers/power/supply/bq27xxx_battery_i2c.c
+++ b/drivers/power/supply/bq27xxx_battery_i2c.c
@@ -239,6 +239,7 @@ static const struct i2c_device_id bq27xxx_i2c_id_table[] = {
{ "bq27520g2", BQ27520G2 },
{ "bq27520g3", BQ27520G3 },
{ "bq27520g4", BQ27520G4 },
+ { "bq27521", BQ27521 },
{ "bq27530", BQ27530 },
{ "bq27531", BQ27531 },
{ "bq27541", BQ27541 },
@@ -269,6 +270,7 @@ static const struct of_device_id bq27xxx_battery_i2c_of_match_table[] = {
{ .compatible = "ti,bq27520g2" },
{ .compatible = "ti,bq27520g3" },
{ .compatible = "ti,bq27520g4" },
+ { .compatible = "ti,bq27521" },
{ .compatible = "ti,bq27530" },
{ .compatible = "ti,bq27531" },
{ .compatible = "ti,bq27541" },
diff --git a/drivers/power/supply/charger-manager.c b/drivers/power/supply/charger-manager.c
index 6502fa7c2106..1de4b4493824 100644
--- a/drivers/power/supply/charger-manager.c
+++ b/drivers/power/supply/charger-manager.c
@@ -578,7 +578,7 @@ static int check_charging_duration(struct charger_manager *cm)
} else if (is_ext_pwr_online(cm) && !cm->charger_enabled) {
duration = curr - cm->charging_end_time;
- if (duration > desc->charging_max_duration_ms &&
+ if (duration > desc->discharging_max_duration_ms &&
is_ext_pwr_online(cm)) {
dev_info(cm->dev, "Discharging duration exceed %ums\n",
desc->discharging_max_duration_ms);
diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
index ee71a2b37b12..98ba07869c3b 100644
--- a/drivers/power/supply/cpcap-battery.c
+++ b/drivers/power/supply/cpcap-battery.c
@@ -586,8 +586,8 @@ static int cpcap_battery_init_irq(struct platform_device *pdev,
int irq, error;
irq = platform_get_irq_byname(pdev, name);
- if (!irq)
- return -ENODEV;
+ if (irq < 0)
+ return irq;
error = devm_request_threaded_irq(ddata->dev, irq, NULL,
cpcap_battery_irq_thread,
diff --git a/drivers/power/supply/ltc2941-battery-gauge.c b/drivers/power/supply/ltc2941-battery-gauge.c
index 08e4fd9ee607..4cfa3f0cd689 100644
--- a/drivers/power/supply/ltc2941-battery-gauge.c
+++ b/drivers/power/supply/ltc2941-battery-gauge.c
@@ -60,6 +60,7 @@ enum ltc294x_id {
#define LTC294X_REG_CONTROL_PRESCALER_SET(x) \
((x << 3) & LTC294X_REG_CONTROL_PRESCALER_MASK)
#define LTC294X_REG_CONTROL_ALCC_CONFIG_DISABLED 0
+#define LTC294X_REG_CONTROL_ADC_DISABLE(x) ((x) & ~(BIT(7) | BIT(6)))
struct ltc294x_info {
struct i2c_client *client; /* I2C Client pointer */
@@ -523,6 +524,29 @@ static int ltc294x_i2c_probe(struct i2c_client *client,
return 0;
}
+static void ltc294x_i2c_shutdown(struct i2c_client *client)
+{
+ struct ltc294x_info *info = i2c_get_clientdata(client);
+ int ret;
+ u8 value;
+ u8 control;
+
+ /* The LTC2941 does not need any special handling */
+ if (info->id == LTC2941_ID)
+ return;
+
+ /* Read control register */
+ ret = ltc294x_read_regs(info->client, LTC294X_REG_CONTROL, &value, 1);
+ if (ret < 0)
+ return;
+
+ /* Disable continuous ADC conversion as this drains the battery */
+ control = LTC294X_REG_CONTROL_ADC_DISABLE(value);
+ if (control != value)
+ ltc294x_write_regs(info->client, LTC294X_REG_CONTROL,
+ &control, 1);
+}
+
#ifdef CONFIG_PM_SLEEP
static int ltc294x_suspend(struct device *dev)
@@ -589,6 +613,7 @@ static struct i2c_driver ltc294x_driver = {
},
.probe = ltc294x_i2c_probe,
.remove = ltc294x_i2c_remove,
+ .shutdown = ltc294x_i2c_shutdown,
.id_table = ltc294x_i2c_id,
};
module_i2c_driver(ltc294x_driver);
diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
index 5b556a13f517..35dde81b1c9b 100644
--- a/drivers/power/supply/max17042_battery.c
+++ b/drivers/power/supply/max17042_battery.c
@@ -123,6 +123,8 @@ static int max17042_get_temperature(struct max17042_chip *chip, int *temp)
static int max17042_get_status(struct max17042_chip *chip, int *status)
{
int ret, charge_full, charge_now;
+ int avg_current;
+ u32 data;
ret = power_supply_am_i_supplied(chip->battery);
if (ret < 0) {
@@ -152,10 +154,31 @@ static int max17042_get_status(struct max17042_chip *chip, int *status)
if (ret < 0)
return ret;
- if ((charge_full - charge_now) <= MAX17042_FULL_THRESHOLD)
+ if ((charge_full - charge_now) <= MAX17042_FULL_THRESHOLD) {
*status = POWER_SUPPLY_STATUS_FULL;
- else
+ return 0;
+ }
+
+ /*
+ * Even though we are supplied, we may still be discharging if the
+ * supply is e.g. only delivering 5V 0.5A. Check current if available.
+ */
+ if (!chip->pdata->enable_current_sense) {
*status = POWER_SUPPLY_STATUS_CHARGING;
+ return 0;
+ }
+
+ ret = regmap_read(chip->regmap, MAX17042_AvgCurrent, &data);
+ if (ret < 0)
+ return ret;
+
+ avg_current = sign_extend32(data, 15);
+ avg_current *= 1562500 / chip->pdata->r_sns;
+
+ if (avg_current > 0)
+ *status = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ *status = POWER_SUPPLY_STATUS_DISCHARGING;
return 0;
}
@@ -863,16 +886,13 @@ static void max17042_init_worker(struct work_struct *work)
#ifdef CONFIG_OF
static struct max17042_platform_data *
-max17042_get_pdata(struct max17042_chip *chip)
+max17042_get_of_pdata(struct max17042_chip *chip)
{
struct device *dev = &chip->client->dev;
struct device_node *np = dev->of_node;
u32 prop;
struct max17042_platform_data *pdata;
- if (!np)
- return dev->platform_data;
-
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
@@ -897,7 +917,8 @@ max17042_get_pdata(struct max17042_chip *chip)
return pdata;
}
-#else
+#endif
+
static struct max17042_reg_data max17047_default_pdata_init_regs[] = {
/*
* Some firmwares do not set FullSOCThr, Enable End-of-Charge Detection
@@ -907,15 +928,12 @@ static struct max17042_reg_data max17047_default_pdata_init_regs[] = {
};
static struct max17042_platform_data *
-max17042_get_pdata(struct max17042_chip *chip)
+max17042_get_default_pdata(struct max17042_chip *chip)
{
struct device *dev = &chip->client->dev;
struct max17042_platform_data *pdata;
int ret, misc_cfg;
- if (dev->platform_data)
- return dev->platform_data;
-
/*
* The MAX17047 gets used on x86 where we might not have pdata, assume
* the firmware will already have initialized the fuel-gauge and provide
@@ -948,7 +966,21 @@ max17042_get_pdata(struct max17042_chip *chip)
return pdata;
}
+
+static struct max17042_platform_data *
+max17042_get_pdata(struct max17042_chip *chip)
+{
+ struct device *dev = &chip->client->dev;
+
+#ifdef CONFIG_OF
+ if (dev->of_node)
+ return max17042_get_of_pdata(chip);
#endif
+ if (dev->platform_data)
+ return dev->platform_data;
+
+ return max17042_get_default_pdata(chip);
+}
static const struct regmap_config max17042_regmap_config = {
.reg_bits = 8,
diff --git a/drivers/power/supply/sbs-manager.c b/drivers/power/supply/sbs-manager.c
index ccb4217b9638..cb6e8f66c7a2 100644
--- a/drivers/power/supply/sbs-manager.c
+++ b/drivers/power/supply/sbs-manager.c
@@ -183,7 +183,7 @@ static int sbsm_select(struct i2c_mux_core *muxc, u32 chan)
return ret;
/* chan goes from 1 ... 4 */
- reg = 1 << BIT(SBSM_SMB_BAT_OFFSET + chan);
+ reg = BIT(SBSM_SMB_BAT_OFFSET + chan);
ret = sbsm_write_word(data->client, SBSM_CMD_BATSYSSTATE, reg);
if (ret)
dev_err(dev, "Failed to select channel %i\n", chan);
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index d1694f1def72..35636e1d8a3d 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -29,6 +29,7 @@
#include <linux/sysfs.h>
#include <linux/cpu.h>
#include <linux/powercap.h>
+#include <linux/suspend.h>
#include <asm/iosf_mbi.h>
#include <asm/processor.h>
@@ -155,6 +156,7 @@ struct rapl_power_limit {
int prim_id; /* primitive ID used to enable */
struct rapl_domain *domain;
const char *name;
+ u64 last_power_limit;
};
static const char pl1_name[] = "long_term";
@@ -1209,7 +1211,7 @@ static int rapl_package_register_powercap(struct rapl_package *rp)
struct rapl_domain *rd;
char dev_name[17]; /* max domain name = 7 + 1 + 8 for int + 1 for null*/
struct powercap_zone *power_zone = NULL;
- int nr_pl, ret;;
+ int nr_pl, ret;
/* Update the domain data of the new package */
rapl_update_domain_data(rp);
@@ -1533,6 +1535,92 @@ static int rapl_cpu_down_prep(unsigned int cpu)
static enum cpuhp_state pcap_rapl_online;
+static void power_limit_state_save(void)
+{
+ struct rapl_package *rp;
+ struct rapl_domain *rd;
+ int nr_pl, ret, i;
+
+ get_online_cpus();
+ list_for_each_entry(rp, &rapl_packages, plist) {
+ if (!rp->power_zone)
+ continue;
+ rd = power_zone_to_rapl_domain(rp->power_zone);
+ nr_pl = find_nr_power_limit(rd);
+ for (i = 0; i < nr_pl; i++) {
+ switch (rd->rpl[i].prim_id) {
+ case PL1_ENABLE:
+ ret = rapl_read_data_raw(rd,
+ POWER_LIMIT1,
+ true,
+ &rd->rpl[i].last_power_limit);
+ if (ret)
+ rd->rpl[i].last_power_limit = 0;
+ break;
+ case PL2_ENABLE:
+ ret = rapl_read_data_raw(rd,
+ POWER_LIMIT2,
+ true,
+ &rd->rpl[i].last_power_limit);
+ if (ret)
+ rd->rpl[i].last_power_limit = 0;
+ break;
+ }
+ }
+ }
+ put_online_cpus();
+}
+
+static void power_limit_state_restore(void)
+{
+ struct rapl_package *rp;
+ struct rapl_domain *rd;
+ int nr_pl, i;
+
+ get_online_cpus();
+ list_for_each_entry(rp, &rapl_packages, plist) {
+ if (!rp->power_zone)
+ continue;
+ rd = power_zone_to_rapl_domain(rp->power_zone);
+ nr_pl = find_nr_power_limit(rd);
+ for (i = 0; i < nr_pl; i++) {
+ switch (rd->rpl[i].prim_id) {
+ case PL1_ENABLE:
+ if (rd->rpl[i].last_power_limit)
+ rapl_write_data_raw(rd,
+ POWER_LIMIT1,
+ rd->rpl[i].last_power_limit);
+ break;
+ case PL2_ENABLE:
+ if (rd->rpl[i].last_power_limit)
+ rapl_write_data_raw(rd,
+ POWER_LIMIT2,
+ rd->rpl[i].last_power_limit);
+ break;
+ }
+ }
+ }
+ put_online_cpus();
+}
+
+static int rapl_pm_callback(struct notifier_block *nb,
+ unsigned long mode, void *_unused)
+{
+ switch (mode) {
+ case PM_SUSPEND_PREPARE:
+ power_limit_state_save();
+ break;
+ case PM_POST_SUSPEND:
+ power_limit_state_restore();
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block rapl_pm_notifier = {
+ .notifier_call = rapl_pm_callback,
+};
+
static int __init rapl_init(void)
{
const struct x86_cpu_id *id;
@@ -1560,8 +1648,16 @@ static int __init rapl_init(void)
/* Don't bail out if PSys is not supported */
rapl_register_psys();
+
+ ret = register_pm_notifier(&rapl_pm_notifier);
+ if (ret)
+ goto err_unreg_all;
+
return 0;
+err_unreg_all:
+ cpuhp_remove_state(pcap_rapl_online);
+
err_unreg:
rapl_unregister_powercap();
return ret;
@@ -1569,6 +1665,7 @@ err_unreg:
static void __exit rapl_exit(void)
{
+ unregister_pm_notifier(&rapl_pm_notifier);
cpuhp_remove_state(pcap_rapl_online);
rapl_unregister_powercap();
}
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index 5b10b50f8686..64b2b2501a79 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -673,15 +673,13 @@ EXPORT_SYMBOL_GPL(powercap_unregister_control_type);
static int __init powercap_init(void)
{
- int result = 0;
+ int result;
result = seed_constraint_attributes();
if (result)
return result;
- result = class_register(&powercap_class);
-
- return result;
+ return class_register(&powercap_class);
}
device_initcall(powercap_init);
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index 6eb0db37dd88..1d42385b1aa5 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -49,7 +49,7 @@ static DEFINE_IDR(pps_idr);
* Char device methods
*/
-static unsigned int pps_cdev_poll(struct file *file, poll_table *wait)
+static __poll_t pps_cdev_poll(struct file *file, poll_table *wait)
{
struct pps_device *pps = file->private_data;
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 58a97d420572..a593b4cf47bf 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -280,7 +280,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
return err;
}
-unsigned int ptp_poll(struct posix_clock *pc, struct file *fp, poll_table *wait)
+__poll_t ptp_poll(struct posix_clock *pc, struct file *fp, poll_table *wait)
{
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index b86f1bfecd6f..c7c62b782cb9 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -90,7 +90,7 @@ int ptp_open(struct posix_clock *pc, fmode_t fmode);
ssize_t ptp_read(struct posix_clock *pc,
uint flags, char __user *buf, size_t cnt);
-uint ptp_poll(struct posix_clock *pc,
+__poll_t ptp_poll(struct posix_clock *pc,
struct file *fp, poll_table *wait);
/*
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index ec4bc1515f0d..6092b3a5978e 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -2319,7 +2319,7 @@ static int mport_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
-static unsigned int mport_cdev_poll(struct file *filp, poll_table *wait)
+static __poll_t mport_cdev_poll(struct file *filp, poll_table *wait)
{
struct mport_cdev_priv *priv = filp->private_data;
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
index ca44e6977cf2..2d9ec378a8bc 100644
--- a/drivers/ras/cec.c
+++ b/drivers/ras/cec.c
@@ -327,7 +327,7 @@ int cec_add_elem(u64 pfn)
} else {
/* We have reached max count for this page, soft-offline it. */
pr_err("Soft-offlining pfn: 0x%llx\n", pfn);
- memory_failure_queue(pfn, 0, MF_SOFT_OFFLINE);
+ memory_failure_queue(pfn, MF_SOFT_OFFLINE);
ca->pfns_poisoned++;
}
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 96cd55f9e3c5..b27417ca188a 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -744,6 +744,13 @@ config REGULATOR_S5M8767
via I2C bus. S5M8767A have 9 Bucks and 28 LDOs output and
supports DVS mode with 8bits of output voltage control.
+config REGULATOR_SC2731
+ tristate "Spreadtrum SC2731 power regulator driver"
+ depends on MFD_SC27XX_PMIC || COMPILE_TEST
+ help
+ This driver provides support for the voltage regulators on the
+ SC2731 PMIC.
+
config REGULATOR_SKY81452
tristate "Skyworks Solutions SKY81452 voltage regulator"
depends on MFD_SKY81452
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 80ffc57a9ca3..19fea09ba10a 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -95,6 +95,7 @@ obj-$(CONFIG_REGULATOR_RT5033) += rt5033-regulator.o
obj-$(CONFIG_REGULATOR_S2MPA01) += s2mpa01.o
obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o
obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
+obj-$(CONFIG_REGULATOR_SC2731) += sc2731-regulator.o
obj-$(CONFIG_REGULATOR_SKY81452) += sky81452-regulator.o
obj-$(CONFIG_REGULATOR_STM32_VREFBUF) += stm32-vrefbuf.o
obj-$(CONFIG_REGULATOR_STW481X_VMMC) += stw481x-vmmc.o
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index b64b7916507f..42681c10cbe4 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -58,8 +58,6 @@ static bool has_full_constraints;
static struct dentry *debugfs_root;
-static struct class regulator_class;
-
/*
* struct regulator_map
*
@@ -112,11 +110,6 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
const char *supply_name);
static void _regulator_put(struct regulator *regulator);
-static struct regulator_dev *dev_to_rdev(struct device *dev)
-{
- return container_of(dev, struct regulator_dev, dev);
-}
-
static const char *rdev_get_name(struct regulator_dev *rdev)
{
if (rdev->constraints && rdev->constraints->name)
@@ -236,26 +229,35 @@ static int regulator_check_voltage(struct regulator_dev *rdev,
return 0;
}
+/* return 0 if the state is valid */
+static int regulator_check_states(suspend_state_t state)
+{
+ return (state > PM_SUSPEND_MAX || state == PM_SUSPEND_TO_IDLE);
+}
+
/* Make sure we select a voltage that suits the needs of all
* regulator consumers
*/
static int regulator_check_consumers(struct regulator_dev *rdev,
- int *min_uV, int *max_uV)
+ int *min_uV, int *max_uV,
+ suspend_state_t state)
{
struct regulator *regulator;
+ struct regulator_voltage *voltage;
list_for_each_entry(regulator, &rdev->consumer_list, list) {
+ voltage = &regulator->voltage[state];
/*
* Assume consumers that didn't say anything are OK
* with anything in the constraint range.
*/
- if (!regulator->min_uV && !regulator->max_uV)
+ if (!voltage->min_uV && !voltage->max_uV)
continue;
- if (*max_uV > regulator->max_uV)
- *max_uV = regulator->max_uV;
- if (*min_uV < regulator->min_uV)
- *min_uV = regulator->min_uV;
+ if (*max_uV > voltage->max_uV)
+ *max_uV = voltage->max_uV;
+ if (*min_uV < voltage->min_uV)
+ *min_uV = voltage->min_uV;
}
if (*min_uV > *max_uV) {
@@ -324,6 +326,24 @@ static int regulator_mode_constrain(struct regulator_dev *rdev,
return -EINVAL;
}
+static inline struct regulator_state *
+regulator_get_suspend_state(struct regulator_dev *rdev, suspend_state_t state)
+{
+ if (rdev->constraints == NULL)
+ return NULL;
+
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ return &rdev->constraints->state_standby;
+ case PM_SUSPEND_MEM:
+ return &rdev->constraints->state_mem;
+ case PM_SUSPEND_MAX:
+ return &rdev->constraints->state_disk;
+ default:
+ return NULL;
+ }
+}
+
static ssize_t regulator_uV_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -731,29 +751,32 @@ static int drms_uA_update(struct regulator_dev *rdev)
}
static int suspend_set_state(struct regulator_dev *rdev,
- struct regulator_state *rstate)
+ suspend_state_t state)
{
int ret = 0;
+ struct regulator_state *rstate;
+
+ rstate = regulator_get_suspend_state(rdev, state);
+ if (rstate == NULL)
+ return -EINVAL;
/* If we have no suspend mode configration don't set anything;
* only warn if the driver implements set_suspend_voltage or
* set_suspend_mode callback.
*/
- if (!rstate->enabled && !rstate->disabled) {
+ if (rstate->enabled != ENABLE_IN_SUSPEND &&
+ rstate->enabled != DISABLE_IN_SUSPEND) {
if (rdev->desc->ops->set_suspend_voltage ||
rdev->desc->ops->set_suspend_mode)
rdev_warn(rdev, "No configuration\n");
return 0;
}
- if (rstate->enabled && rstate->disabled) {
- rdev_err(rdev, "invalid configuration\n");
- return -EINVAL;
- }
-
- if (rstate->enabled && rdev->desc->ops->set_suspend_enable)
+ if (rstate->enabled == ENABLE_IN_SUSPEND &&
+ rdev->desc->ops->set_suspend_enable)
ret = rdev->desc->ops->set_suspend_enable(rdev);
- else if (rstate->disabled && rdev->desc->ops->set_suspend_disable)
+ else if (rstate->enabled == DISABLE_IN_SUSPEND &&
+ rdev->desc->ops->set_suspend_disable)
ret = rdev->desc->ops->set_suspend_disable(rdev);
else /* OK if set_suspend_enable or set_suspend_disable is NULL */
ret = 0;
@@ -778,28 +801,8 @@ static int suspend_set_state(struct regulator_dev *rdev,
return ret;
}
}
- return ret;
-}
-
-/* locks held by caller */
-static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state)
-{
- if (!rdev->constraints)
- return -EINVAL;
- switch (state) {
- case PM_SUSPEND_STANDBY:
- return suspend_set_state(rdev,
- &rdev->constraints->state_standby);
- case PM_SUSPEND_MEM:
- return suspend_set_state(rdev,
- &rdev->constraints->state_mem);
- case PM_SUSPEND_MAX:
- return suspend_set_state(rdev,
- &rdev->constraints->state_disk);
- default:
- return -EINVAL;
- }
+ return ret;
}
static void print_constraints(struct regulator_dev *rdev)
@@ -1068,7 +1071,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
/* do we need to setup our suspend state */
if (rdev->constraints->initial_state) {
- ret = suspend_prepare(rdev, rdev->constraints->initial_state);
+ ret = suspend_set_state(rdev, rdev->constraints->initial_state);
if (ret < 0) {
rdev_err(rdev, "failed to set suspend state\n");
return ret;
@@ -1356,9 +1359,9 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
debugfs_create_u32("uA_load", 0444, regulator->debugfs,
&regulator->uA_load);
debugfs_create_u32("min_uV", 0444, regulator->debugfs,
- &regulator->min_uV);
+ &regulator->voltage[PM_SUSPEND_ON].min_uV);
debugfs_create_u32("max_uV", 0444, regulator->debugfs,
- &regulator->max_uV);
+ &regulator->voltage[PM_SUSPEND_ON].max_uV);
debugfs_create_file("constraint_flags", 0444,
regulator->debugfs, regulator,
&constraint_flags_fops);
@@ -1417,20 +1420,6 @@ static void regulator_supply_alias(struct device **dev, const char **supply)
}
}
-static int of_node_match(struct device *dev, const void *data)
-{
- return dev->of_node == data;
-}
-
-static struct regulator_dev *of_find_regulator_by_node(struct device_node *np)
-{
- struct device *dev;
-
- dev = class_find_device(&regulator_class, NULL, np, of_node_match);
-
- return dev ? dev_to_rdev(dev) : NULL;
-}
-
static int regulator_match(struct device *dev, const void *data)
{
struct regulator_dev *r = dev_to_rdev(dev);
@@ -2468,10 +2457,9 @@ static int _regulator_is_enabled(struct regulator_dev *rdev)
return rdev->desc->ops->is_enabled(rdev);
}
-static int _regulator_list_voltage(struct regulator *regulator,
- unsigned selector, int lock)
+static int _regulator_list_voltage(struct regulator_dev *rdev,
+ unsigned selector, int lock)
{
- struct regulator_dev *rdev = regulator->rdev;
const struct regulator_ops *ops = rdev->desc->ops;
int ret;
@@ -2487,7 +2475,8 @@ static int _regulator_list_voltage(struct regulator *regulator,
if (lock)
mutex_unlock(&rdev->mutex);
} else if (rdev->is_switch && rdev->supply) {
- ret = _regulator_list_voltage(rdev->supply, selector, lock);
+ ret = _regulator_list_voltage(rdev->supply->rdev,
+ selector, lock);
} else {
return -EINVAL;
}
@@ -2563,7 +2552,7 @@ EXPORT_SYMBOL_GPL(regulator_count_voltages);
*/
int regulator_list_voltage(struct regulator *regulator, unsigned selector)
{
- return _regulator_list_voltage(regulator, selector, 1);
+ return _regulator_list_voltage(regulator->rdev, selector, 1);
}
EXPORT_SYMBOL_GPL(regulator_list_voltage);
@@ -2605,8 +2594,8 @@ int regulator_get_hardware_vsel_register(struct regulator *regulator,
if (ops->set_voltage_sel != regulator_set_voltage_sel_regmap)
return -EOPNOTSUPP;
- *vsel_reg = rdev->desc->vsel_reg;
- *vsel_mask = rdev->desc->vsel_mask;
+ *vsel_reg = rdev->desc->vsel_reg;
+ *vsel_mask = rdev->desc->vsel_mask;
return 0;
}
@@ -2897,10 +2886,38 @@ out:
return ret;
}
+static int _regulator_do_set_suspend_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV, suspend_state_t state)
+{
+ struct regulator_state *rstate;
+ int uV, sel;
+
+ rstate = regulator_get_suspend_state(rdev, state);
+ if (rstate == NULL)
+ return -EINVAL;
+
+ if (min_uV < rstate->min_uV)
+ min_uV = rstate->min_uV;
+ if (max_uV > rstate->max_uV)
+ max_uV = rstate->max_uV;
+
+ sel = regulator_map_voltage(rdev, min_uV, max_uV);
+ if (sel < 0)
+ return sel;
+
+ uV = rdev->desc->ops->list_voltage(rdev, sel);
+ if (uV >= min_uV && uV <= max_uV)
+ rstate->uV = uV;
+
+ return 0;
+}
+
static int regulator_set_voltage_unlocked(struct regulator *regulator,
- int min_uV, int max_uV)
+ int min_uV, int max_uV,
+ suspend_state_t state)
{
struct regulator_dev *rdev = regulator->rdev;
+ struct regulator_voltage *voltage = &regulator->voltage[state];
int ret = 0;
int old_min_uV, old_max_uV;
int current_uV;
@@ -2911,7 +2928,7 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
* should be a noop (some cpufreq implementations use the same
* voltage for multiple frequencies, for example).
*/
- if (regulator->min_uV == min_uV && regulator->max_uV == max_uV)
+ if (voltage->min_uV == min_uV && voltage->max_uV == max_uV)
goto out;
/* If we're trying to set a range that overlaps the current voltage,
@@ -2921,8 +2938,8 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE)) {
current_uV = _regulator_get_voltage(rdev);
if (min_uV <= current_uV && current_uV <= max_uV) {
- regulator->min_uV = min_uV;
- regulator->max_uV = max_uV;
+ voltage->min_uV = min_uV;
+ voltage->max_uV = max_uV;
goto out;
}
}
@@ -2940,12 +2957,12 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
goto out;
/* restore original values in case of error */
- old_min_uV = regulator->min_uV;
- old_max_uV = regulator->max_uV;
- regulator->min_uV = min_uV;
- regulator->max_uV = max_uV;
+ old_min_uV = voltage->min_uV;
+ old_max_uV = voltage->max_uV;
+ voltage->min_uV = min_uV;
+ voltage->max_uV = max_uV;
- ret = regulator_check_consumers(rdev, &min_uV, &max_uV);
+ ret = regulator_check_consumers(rdev, &min_uV, &max_uV, state);
if (ret < 0)
goto out2;
@@ -2963,7 +2980,7 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
goto out2;
}
- best_supply_uV = _regulator_list_voltage(regulator, selector, 0);
+ best_supply_uV = _regulator_list_voltage(rdev, selector, 0);
if (best_supply_uV < 0) {
ret = best_supply_uV;
goto out2;
@@ -2982,7 +2999,7 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
if (supply_change_uV > 0) {
ret = regulator_set_voltage_unlocked(rdev->supply,
- best_supply_uV, INT_MAX);
+ best_supply_uV, INT_MAX, state);
if (ret) {
dev_err(&rdev->dev, "Failed to increase supply voltage: %d\n",
ret);
@@ -2990,13 +3007,17 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
}
}
- ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
+ if (state == PM_SUSPEND_ON)
+ ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
+ else
+ ret = _regulator_do_set_suspend_voltage(rdev, min_uV,
+ max_uV, state);
if (ret < 0)
goto out2;
if (supply_change_uV < 0) {
ret = regulator_set_voltage_unlocked(rdev->supply,
- best_supply_uV, INT_MAX);
+ best_supply_uV, INT_MAX, state);
if (ret)
dev_warn(&rdev->dev, "Failed to decrease supply voltage: %d\n",
ret);
@@ -3007,8 +3028,8 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
out:
return ret;
out2:
- regulator->min_uV = old_min_uV;
- regulator->max_uV = old_max_uV;
+ voltage->min_uV = old_min_uV;
+ voltage->max_uV = old_max_uV;
return ret;
}
@@ -3037,7 +3058,8 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
regulator_lock_supply(regulator->rdev);
- ret = regulator_set_voltage_unlocked(regulator, min_uV, max_uV);
+ ret = regulator_set_voltage_unlocked(regulator, min_uV, max_uV,
+ PM_SUSPEND_ON);
regulator_unlock_supply(regulator->rdev);
@@ -3045,6 +3067,89 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
}
EXPORT_SYMBOL_GPL(regulator_set_voltage);
+static inline int regulator_suspend_toggle(struct regulator_dev *rdev,
+ suspend_state_t state, bool en)
+{
+ struct regulator_state *rstate;
+
+ rstate = regulator_get_suspend_state(rdev, state);
+ if (rstate == NULL)
+ return -EINVAL;
+
+ if (!rstate->changeable)
+ return -EPERM;
+
+ rstate->enabled = en;
+
+ return 0;
+}
+
+int regulator_suspend_enable(struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ return regulator_suspend_toggle(rdev, state, true);
+}
+EXPORT_SYMBOL_GPL(regulator_suspend_enable);
+
+int regulator_suspend_disable(struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ struct regulator *regulator;
+ struct regulator_voltage *voltage;
+
+ /*
+ * if any consumer wants this regulator device keeping on in
+ * suspend states, don't set it as disabled.
+ */
+ list_for_each_entry(regulator, &rdev->consumer_list, list) {
+ voltage = &regulator->voltage[state];
+ if (voltage->min_uV || voltage->max_uV)
+ return 0;
+ }
+
+ return regulator_suspend_toggle(rdev, state, false);
+}
+EXPORT_SYMBOL_GPL(regulator_suspend_disable);
+
+static int _regulator_set_suspend_voltage(struct regulator *regulator,
+ int min_uV, int max_uV,
+ suspend_state_t state)
+{
+ struct regulator_dev *rdev = regulator->rdev;
+ struct regulator_state *rstate;
+
+ rstate = regulator_get_suspend_state(rdev, state);
+ if (rstate == NULL)
+ return -EINVAL;
+
+ if (rstate->min_uV == rstate->max_uV) {
+ rdev_err(rdev, "The suspend voltage can't be changed!\n");
+ return -EPERM;
+ }
+
+ return regulator_set_voltage_unlocked(regulator, min_uV, max_uV, state);
+}
+
+int regulator_set_suspend_voltage(struct regulator *regulator, int min_uV,
+ int max_uV, suspend_state_t state)
+{
+ int ret = 0;
+
+ /* PM_SUSPEND_ON is handled by regulator_set_voltage() */
+ if (regulator_check_states(state) || state == PM_SUSPEND_ON)
+ return -EINVAL;
+
+ regulator_lock_supply(regulator->rdev);
+
+ ret = _regulator_set_suspend_voltage(regulator, min_uV,
+ max_uV, state);
+
+ regulator_unlock_supply(regulator->rdev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regulator_set_suspend_voltage);
+
/**
* regulator_set_voltage_time - get raise/fall time
* @regulator: regulator source
@@ -3138,6 +3243,7 @@ EXPORT_SYMBOL_GPL(regulator_set_voltage_time_sel);
int regulator_sync_voltage(struct regulator *regulator)
{
struct regulator_dev *rdev = regulator->rdev;
+ struct regulator_voltage *voltage = &regulator->voltage[PM_SUSPEND_ON];
int ret, min_uV, max_uV;
mutex_lock(&rdev->mutex);
@@ -3149,20 +3255,20 @@ int regulator_sync_voltage(struct regulator *regulator)
}
/* This is only going to work if we've had a voltage configured. */
- if (!regulator->min_uV && !regulator->max_uV) {
+ if (!voltage->min_uV && !voltage->max_uV) {
ret = -EINVAL;
goto out;
}
- min_uV = regulator->min_uV;
- max_uV = regulator->max_uV;
+ min_uV = voltage->min_uV;
+ max_uV = voltage->max_uV;
/* This should be a paranoia check... */
ret = regulator_check_voltage(rdev, &min_uV, &max_uV);
if (ret < 0)
goto out;
- ret = regulator_check_consumers(rdev, &min_uV, &max_uV);
+ ret = regulator_check_consumers(rdev, &min_uV, &max_uV, 0);
if (ret < 0)
goto out;
@@ -3918,12 +4024,6 @@ static void regulator_dev_release(struct device *dev)
kfree(rdev);
}
-static struct class regulator_class = {
- .name = "regulator",
- .dev_release = regulator_dev_release,
- .dev_groups = regulator_dev_groups,
-};
-
static void rdev_init_debugfs(struct regulator_dev *rdev)
{
struct device *parent = rdev->dev.parent;
@@ -4174,81 +4274,86 @@ void regulator_unregister(struct regulator_dev *rdev)
}
EXPORT_SYMBOL_GPL(regulator_unregister);
-static int _regulator_suspend_prepare(struct device *dev, void *data)
+#ifdef CONFIG_SUSPEND
+static int _regulator_suspend_late(struct device *dev, void *data)
{
struct regulator_dev *rdev = dev_to_rdev(dev);
- const suspend_state_t *state = data;
+ suspend_state_t *state = data;
int ret;
mutex_lock(&rdev->mutex);
- ret = suspend_prepare(rdev, *state);
+ ret = suspend_set_state(rdev, *state);
mutex_unlock(&rdev->mutex);
return ret;
}
/**
- * regulator_suspend_prepare - prepare regulators for system wide suspend
+ * regulator_suspend_late - prepare regulators for system wide suspend
* @state: system suspend state
*
* Configure each regulator with it's suspend operating parameters for state.
- * This will usually be called by machine suspend code prior to supending.
*/
-int regulator_suspend_prepare(suspend_state_t state)
+static int regulator_suspend_late(struct device *dev)
{
- /* ON is handled by regulator active state */
- if (state == PM_SUSPEND_ON)
- return -EINVAL;
+ suspend_state_t state = pm_suspend_target_state;
return class_for_each_device(&regulator_class, NULL, &state,
- _regulator_suspend_prepare);
+ _regulator_suspend_late);
}
-EXPORT_SYMBOL_GPL(regulator_suspend_prepare);
-
-static int _regulator_suspend_finish(struct device *dev, void *data)
+static int _regulator_resume_early(struct device *dev, void *data)
{
+ int ret = 0;
struct regulator_dev *rdev = dev_to_rdev(dev);
- int ret;
+ suspend_state_t *state = data;
+ struct regulator_state *rstate;
+
+ rstate = regulator_get_suspend_state(rdev, *state);
+ if (rstate == NULL)
+ return -EINVAL;
mutex_lock(&rdev->mutex);
- if (rdev->use_count > 0 || rdev->constraints->always_on) {
- if (!_regulator_is_enabled(rdev)) {
- ret = _regulator_do_enable(rdev);
- if (ret)
- dev_err(dev,
- "Failed to resume regulator %d\n",
- ret);
- }
- } else {
- if (!have_full_constraints())
- goto unlock;
- if (!_regulator_is_enabled(rdev))
- goto unlock;
- ret = _regulator_do_disable(rdev);
- if (ret)
- dev_err(dev, "Failed to suspend regulator %d\n", ret);
- }
-unlock:
+ if (rdev->desc->ops->resume_early &&
+ (rstate->enabled == ENABLE_IN_SUSPEND ||
+ rstate->enabled == DISABLE_IN_SUSPEND))
+ ret = rdev->desc->ops->resume_early(rdev);
+
mutex_unlock(&rdev->mutex);
- /* Keep processing regulators in spite of any errors */
- return 0;
+ return ret;
}
-/**
- * regulator_suspend_finish - resume regulators from system wide suspend
- *
- * Turn on regulators that might be turned off by regulator_suspend_prepare
- * and that should be turned on according to the regulators properties.
- */
-int regulator_suspend_finish(void)
+static int regulator_resume_early(struct device *dev)
{
- return class_for_each_device(&regulator_class, NULL, NULL,
- _regulator_suspend_finish);
+ suspend_state_t state = pm_suspend_target_state;
+
+ return class_for_each_device(&regulator_class, NULL, &state,
+ _regulator_resume_early);
}
-EXPORT_SYMBOL_GPL(regulator_suspend_finish);
+#else /* !CONFIG_SUSPEND */
+
+#define regulator_suspend_late NULL
+#define regulator_resume_early NULL
+
+#endif /* !CONFIG_SUSPEND */
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops __maybe_unused regulator_pm_ops = {
+ .suspend_late = regulator_suspend_late,
+ .resume_early = regulator_resume_early,
+};
+#endif
+
+struct class regulator_class = {
+ .name = "regulator",
+ .dev_release = regulator_dev_release,
+ .dev_groups = regulator_dev_groups,
+#ifdef CONFIG_PM
+ .pm = &regulator_pm_ops,
+#endif
+};
/**
* regulator_has_full_constraints - the system has fully specified constraints
*
@@ -4424,8 +4529,8 @@ static void regulator_summary_show_subtree(struct seq_file *s,
switch (rdev->desc->type) {
case REGULATOR_VOLTAGE:
seq_printf(s, "%37dmV %5dmV",
- consumer->min_uV / 1000,
- consumer->max_uV / 1000);
+ consumer->voltage[PM_SUSPEND_ON].min_uV / 1000,
+ consumer->voltage[PM_SUSPEND_ON].max_uV / 1000);
break;
case REGULATOR_CURRENT:
break;
diff --git a/drivers/regulator/internal.h b/drivers/regulator/internal.h
index 66a8ea0c8386..abfd56e8c78a 100644
--- a/drivers/regulator/internal.h
+++ b/drivers/regulator/internal.h
@@ -16,10 +16,25 @@
#ifndef __REGULATOR_INTERNAL_H
#define __REGULATOR_INTERNAL_H
+#include <linux/suspend.h>
+
+#define REGULATOR_STATES_NUM (PM_SUSPEND_MAX + 1)
+
+struct regulator_voltage {
+ int min_uV;
+ int max_uV;
+};
+
/*
* struct regulator
*
* One for each consumer device.
+ * @voltage - a voltage array for each state of runtime, i.e.:
+ * PM_SUSPEND_ON
+ * PM_SUSPEND_TO_IDLE
+ * PM_SUSPEND_STANDBY
+ * PM_SUSPEND_MEM
+ * PM_SUSPEND_MAX
*/
struct regulator {
struct device *dev;
@@ -27,14 +42,22 @@ struct regulator {
unsigned int always_on:1;
unsigned int bypass:1;
int uA_load;
- int min_uV;
- int max_uV;
+ struct regulator_voltage voltage[REGULATOR_STATES_NUM];
const char *supply_name;
struct device_attribute dev_attr;
struct regulator_dev *rdev;
struct dentry *debugfs;
};
+extern struct class regulator_class;
+
+static inline struct regulator_dev *dev_to_rdev(struct device *dev)
+{
+ return container_of(dev, struct regulator_dev, dev);
+}
+
+struct regulator_dev *of_find_regulator_by_node(struct device_node *np);
+
#ifdef CONFIG_OF
struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
const struct regulator_desc *desc,
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 14637a01ba2d..092ed6efb3ec 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -177,14 +177,30 @@ static void of_get_regulation_constraints(struct device_node *np,
if (of_property_read_bool(suspend_np,
"regulator-on-in-suspend"))
- suspend_state->enabled = true;
+ suspend_state->enabled = ENABLE_IN_SUSPEND;
else if (of_property_read_bool(suspend_np,
"regulator-off-in-suspend"))
- suspend_state->disabled = true;
+ suspend_state->enabled = DISABLE_IN_SUSPEND;
+ else
+ suspend_state->enabled = DO_NOTHING_IN_SUSPEND;
+
+ if (!of_property_read_u32(np, "regulator-suspend-min-microvolt",
+ &pval))
+ suspend_state->min_uV = pval;
+
+ if (!of_property_read_u32(np, "regulator-suspend-max-microvolt",
+ &pval))
+ suspend_state->max_uV = pval;
if (!of_property_read_u32(suspend_np,
"regulator-suspend-microvolt", &pval))
suspend_state->uV = pval;
+ else /* otherwise use min_uV as default suspend voltage */
+ suspend_state->uV = suspend_state->min_uV;
+
+ if (of_property_read_bool(suspend_np,
+ "regulator-changeable-in-suspend"))
+ suspend_state->changeable = true;
if (i == PM_SUSPEND_MEM)
constraints->initial_state = PM_SUSPEND_MEM;
@@ -376,3 +392,17 @@ struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
return init_data;
}
+
+static int of_node_match(struct device *dev, const void *data)
+{
+ return dev->of_node == data;
+}
+
+struct regulator_dev *of_find_regulator_by_node(struct device_node *np)
+{
+ struct device *dev;
+
+ dev = class_find_device(&regulator_class, NULL, np, of_node_match);
+
+ return dev ? dev_to_rdev(dev) : NULL;
+}
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 0241ada47d04..63c7a0c17777 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -486,24 +486,6 @@ static int spmi_vreg_update_bits(struct spmi_regulator *vreg, u16 addr, u8 val,
return regmap_update_bits(vreg->regmap, vreg->base + addr, mask, val);
}
-static int spmi_regulator_common_is_enabled(struct regulator_dev *rdev)
-{
- struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
- u8 reg;
-
- spmi_vreg_read(vreg, SPMI_COMMON_REG_ENABLE, &reg, 1);
-
- return (reg & SPMI_COMMON_ENABLE_MASK) == SPMI_COMMON_ENABLE;
-}
-
-static int spmi_regulator_common_enable(struct regulator_dev *rdev)
-{
- struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
-
- return spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_ENABLE,
- SPMI_COMMON_ENABLE, SPMI_COMMON_ENABLE_MASK);
-}
-
static int spmi_regulator_vs_enable(struct regulator_dev *rdev)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
@@ -513,7 +495,7 @@ static int spmi_regulator_vs_enable(struct regulator_dev *rdev)
vreg->vs_enable_time = ktime_get();
}
- return spmi_regulator_common_enable(rdev);
+ return regulator_enable_regmap(rdev);
}
static int spmi_regulator_vs_ocp(struct regulator_dev *rdev)
@@ -524,14 +506,6 @@ static int spmi_regulator_vs_ocp(struct regulator_dev *rdev)
return spmi_vreg_write(vreg, SPMI_VS_REG_OCP, &reg, 1);
}
-static int spmi_regulator_common_disable(struct regulator_dev *rdev)
-{
- struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
-
- return spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_ENABLE,
- SPMI_COMMON_DISABLE, SPMI_COMMON_ENABLE_MASK);
-}
-
static int spmi_regulator_select_voltage(struct spmi_regulator *vreg,
int min_uV, int max_uV)
{
@@ -1062,9 +1036,9 @@ static irqreturn_t spmi_regulator_vs_ocp_isr(int irq, void *data)
}
static struct regulator_ops spmi_smps_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_common_set_voltage,
.set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
.get_voltage_sel = spmi_regulator_common_get_voltage,
@@ -1077,9 +1051,9 @@ static struct regulator_ops spmi_smps_ops = {
};
static struct regulator_ops spmi_ldo_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_common_set_voltage,
.get_voltage_sel = spmi_regulator_common_get_voltage,
.map_voltage = spmi_regulator_common_map_voltage,
@@ -1094,9 +1068,9 @@ static struct regulator_ops spmi_ldo_ops = {
};
static struct regulator_ops spmi_ln_ldo_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_common_set_voltage,
.get_voltage_sel = spmi_regulator_common_get_voltage,
.map_voltage = spmi_regulator_common_map_voltage,
@@ -1107,8 +1081,8 @@ static struct regulator_ops spmi_ln_ldo_ops = {
static struct regulator_ops spmi_vs_ops = {
.enable = spmi_regulator_vs_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_pull_down = spmi_regulator_common_set_pull_down,
.set_soft_start = spmi_regulator_common_set_soft_start,
.set_over_current_protection = spmi_regulator_vs_ocp,
@@ -1117,9 +1091,9 @@ static struct regulator_ops spmi_vs_ops = {
};
static struct regulator_ops spmi_boost_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_single_range_set_voltage,
.get_voltage_sel = spmi_regulator_single_range_get_voltage,
.map_voltage = spmi_regulator_single_map_voltage,
@@ -1128,9 +1102,9 @@ static struct regulator_ops spmi_boost_ops = {
};
static struct regulator_ops spmi_ftsmps_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_common_set_voltage,
.set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
.get_voltage_sel = spmi_regulator_common_get_voltage,
@@ -1143,9 +1117,9 @@ static struct regulator_ops spmi_ftsmps_ops = {
};
static struct regulator_ops spmi_ult_lo_smps_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_ult_lo_smps_set_voltage,
.set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
.get_voltage_sel = spmi_regulator_ult_lo_smps_get_voltage,
@@ -1157,9 +1131,9 @@ static struct regulator_ops spmi_ult_lo_smps_ops = {
};
static struct regulator_ops spmi_ult_ho_smps_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_single_range_set_voltage,
.set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
.get_voltage_sel = spmi_regulator_single_range_get_voltage,
@@ -1172,9 +1146,9 @@ static struct regulator_ops spmi_ult_ho_smps_ops = {
};
static struct regulator_ops spmi_ult_ldo_ops = {
- .enable = spmi_regulator_common_enable,
- .disable = spmi_regulator_common_disable,
- .is_enabled = spmi_regulator_common_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_voltage_sel = spmi_regulator_single_range_set_voltage,
.get_voltage_sel = spmi_regulator_single_range_get_voltage,
.map_voltage = spmi_regulator_single_map_voltage,
@@ -1711,6 +1685,9 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
vreg->desc.id = -1;
vreg->desc.owner = THIS_MODULE;
vreg->desc.type = REGULATOR_VOLTAGE;
+ vreg->desc.enable_reg = reg->base + SPMI_COMMON_REG_ENABLE;
+ vreg->desc.enable_mask = SPMI_COMMON_ENABLE_MASK;
+ vreg->desc.enable_val = SPMI_COMMON_ENABLE;
vreg->desc.name = name = reg->name;
vreg->desc.supply_name = reg->supply;
vreg->desc.of_match = reg->name;
@@ -1723,6 +1700,7 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
config.dev = dev;
config.driver_data = vreg;
+ config.regmap = regmap;
rdev = devm_regulator_register(dev, &vreg->desc, &config);
if (IS_ERR(rdev)) {
dev_err(dev, "failed to register %s\n", name);
diff --git a/drivers/regulator/sc2731-regulator.c b/drivers/regulator/sc2731-regulator.c
new file mode 100644
index 000000000000..eb2bdf060b7b
--- /dev/null
+++ b/drivers/regulator/sc2731-regulator.c
@@ -0,0 +1,256 @@
+ //SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Spreadtrum Communications Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+
+/*
+ * SC2731 regulator lock register
+ */
+#define SC2731_PWR_WR_PROT 0xf0c
+#define SC2731_WR_UNLOCK_VALUE 0x6e7f
+
+/*
+ * SC2731 enable register
+ */
+#define SC2731_POWER_PD_SW 0xc28
+#define SC2731_LDO_CAMA0_PD 0xcfc
+#define SC2731_LDO_CAMA1_PD 0xd04
+#define SC2731_LDO_CAMMOT_PD 0xd0c
+#define SC2731_LDO_VLDO_PD 0xd6c
+#define SC2731_LDO_EMMCCORE_PD 0xd2c
+#define SC2731_LDO_SDCORE_PD 0xd74
+#define SC2731_LDO_SDIO_PD 0xd70
+#define SC2731_LDO_WIFIPA_PD 0xd4c
+#define SC2731_LDO_USB33_PD 0xd5c
+#define SC2731_LDO_CAMD0_PD 0xd7c
+#define SC2731_LDO_CAMD1_PD 0xd84
+#define SC2731_LDO_CON_PD 0xd8c
+#define SC2731_LDO_CAMIO_PD 0xd94
+#define SC2731_LDO_SRAM_PD 0xd78
+
+/*
+ * SC2731 enable mask
+ */
+#define SC2731_DCDC_CPU0_PD_MASK BIT(4)
+#define SC2731_DCDC_CPU1_PD_MASK BIT(3)
+#define SC2731_DCDC_RF_PD_MASK BIT(11)
+#define SC2731_LDO_CAMA0_PD_MASK BIT(0)
+#define SC2731_LDO_CAMA1_PD_MASK BIT(0)
+#define SC2731_LDO_CAMMOT_PD_MASK BIT(0)
+#define SC2731_LDO_VLDO_PD_MASK BIT(0)
+#define SC2731_LDO_EMMCCORE_PD_MASK BIT(0)
+#define SC2731_LDO_SDCORE_PD_MASK BIT(0)
+#define SC2731_LDO_SDIO_PD_MASK BIT(0)
+#define SC2731_LDO_WIFIPA_PD_MASK BIT(0)
+#define SC2731_LDO_USB33_PD_MASK BIT(0)
+#define SC2731_LDO_CAMD0_PD_MASK BIT(0)
+#define SC2731_LDO_CAMD1_PD_MASK BIT(0)
+#define SC2731_LDO_CON_PD_MASK BIT(0)
+#define SC2731_LDO_CAMIO_PD_MASK BIT(0)
+#define SC2731_LDO_SRAM_PD_MASK BIT(0)
+
+/*
+ * SC2731 vsel register
+ */
+#define SC2731_DCDC_CPU0_VOL 0xc54
+#define SC2731_DCDC_CPU1_VOL 0xc64
+#define SC2731_DCDC_RF_VOL 0xcb8
+#define SC2731_LDO_CAMA0_VOL 0xd00
+#define SC2731_LDO_CAMA1_VOL 0xd08
+#define SC2731_LDO_CAMMOT_VOL 0xd10
+#define SC2731_LDO_VLDO_VOL 0xd28
+#define SC2731_LDO_EMMCCORE_VOL 0xd30
+#define SC2731_LDO_SDCORE_VOL 0xd38
+#define SC2731_LDO_SDIO_VOL 0xd40
+#define SC2731_LDO_WIFIPA_VOL 0xd50
+#define SC2731_LDO_USB33_VOL 0xd60
+#define SC2731_LDO_CAMD0_VOL 0xd80
+#define SC2731_LDO_CAMD1_VOL 0xd88
+#define SC2731_LDO_CON_VOL 0xd90
+#define SC2731_LDO_CAMIO_VOL 0xd98
+#define SC2731_LDO_SRAM_VOL 0xdB0
+
+/*
+ * SC2731 vsel register mask
+ */
+#define SC2731_DCDC_CPU0_VOL_MASK GENMASK(8, 0)
+#define SC2731_DCDC_CPU1_VOL_MASK GENMASK(8, 0)
+#define SC2731_DCDC_RF_VOL_MASK GENMASK(8, 0)
+#define SC2731_LDO_CAMA0_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_CAMA1_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_CAMMOT_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_VLDO_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_EMMCCORE_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_SDCORE_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_SDIO_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_WIFIPA_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_USB33_VOL_MASK GENMASK(7, 0)
+#define SC2731_LDO_CAMD0_VOL_MASK GENMASK(6, 0)
+#define SC2731_LDO_CAMD1_VOL_MASK GENMASK(6, 0)
+#define SC2731_LDO_CON_VOL_MASK GENMASK(6, 0)
+#define SC2731_LDO_CAMIO_VOL_MASK GENMASK(6, 0)
+#define SC2731_LDO_SRAM_VOL_MASK GENMASK(6, 0)
+
+enum sc2731_regulator_id {
+ SC2731_BUCK_CPU0,
+ SC2731_BUCK_CPU1,
+ SC2731_BUCK_RF,
+ SC2731_LDO_CAMA0,
+ SC2731_LDO_CAMA1,
+ SC2731_LDO_CAMMOT,
+ SC2731_LDO_VLDO,
+ SC2731_LDO_EMMCCORE,
+ SC2731_LDO_SDCORE,
+ SC2731_LDO_SDIO,
+ SC2731_LDO_WIFIPA,
+ SC2731_LDO_USB33,
+ SC2731_LDO_CAMD0,
+ SC2731_LDO_CAMD1,
+ SC2731_LDO_CON,
+ SC2731_LDO_CAMIO,
+ SC2731_LDO_SRAM,
+};
+
+static const struct regulator_ops sc2731_regu_linear_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+#define SC2731_REGU_LINEAR(_id, en_reg, en_mask, vreg, vmask, \
+ vstep, vmin, vmax) { \
+ .name = #_id, \
+ .of_match = of_match_ptr(#_id), \
+ .ops = &sc2731_regu_linear_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = SC2731_##_id, \
+ .owner = THIS_MODULE, \
+ .min_uV = vmin, \
+ .n_voltages = ((vmax) - (vmin)) / (vstep) + 1, \
+ .uV_step = vstep, \
+ .enable_is_inverted = true, \
+ .enable_val = 0, \
+ .enable_reg = en_reg, \
+ .enable_mask = en_mask, \
+ .vsel_reg = vreg, \
+ .vsel_mask = vmask, \
+}
+
+static struct regulator_desc regulators[] = {
+ SC2731_REGU_LINEAR(BUCK_CPU0, SC2731_POWER_PD_SW,
+ SC2731_DCDC_CPU0_PD_MASK, SC2731_DCDC_CPU0_VOL,
+ SC2731_DCDC_CPU0_VOL_MASK, 3125, 400000, 1996875),
+ SC2731_REGU_LINEAR(BUCK_CPU1, SC2731_POWER_PD_SW,
+ SC2731_DCDC_CPU1_PD_MASK, SC2731_DCDC_CPU1_VOL,
+ SC2731_DCDC_CPU1_VOL_MASK, 3125, 400000, 1996875),
+ SC2731_REGU_LINEAR(BUCK_RF, SC2731_POWER_PD_SW, SC2731_DCDC_RF_PD_MASK,
+ SC2731_DCDC_RF_VOL, SC2731_DCDC_RF_VOL_MASK,
+ 3125, 600000, 2196875),
+ SC2731_REGU_LINEAR(LDO_CAMA0, SC2731_LDO_CAMA0_PD,
+ SC2731_LDO_CAMA0_PD_MASK, SC2731_LDO_CAMA0_VOL,
+ SC2731_LDO_CAMA0_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_CAMA1, SC2731_LDO_CAMA1_PD,
+ SC2731_LDO_CAMA1_PD_MASK, SC2731_LDO_CAMA1_VOL,
+ SC2731_LDO_CAMA1_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_CAMMOT, SC2731_LDO_CAMMOT_PD,
+ SC2731_LDO_CAMMOT_PD_MASK, SC2731_LDO_CAMMOT_VOL,
+ SC2731_LDO_CAMMOT_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_VLDO, SC2731_LDO_VLDO_PD,
+ SC2731_LDO_VLDO_PD_MASK, SC2731_LDO_VLDO_VOL,
+ SC2731_LDO_VLDO_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_EMMCCORE, SC2731_LDO_EMMCCORE_PD,
+ SC2731_LDO_EMMCCORE_PD_MASK, SC2731_LDO_EMMCCORE_VOL,
+ SC2731_LDO_EMMCCORE_VOL_MASK, 10000, 1200000,
+ 3750000),
+ SC2731_REGU_LINEAR(LDO_SDCORE, SC2731_LDO_SDCORE_PD,
+ SC2731_LDO_SDCORE_PD_MASK, SC2731_LDO_SDCORE_VOL,
+ SC2731_LDO_SDCORE_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_SDIO, SC2731_LDO_SDIO_PD,
+ SC2731_LDO_SDIO_PD_MASK, SC2731_LDO_SDIO_VOL,
+ SC2731_LDO_SDIO_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_WIFIPA, SC2731_LDO_WIFIPA_PD,
+ SC2731_LDO_WIFIPA_PD_MASK, SC2731_LDO_WIFIPA_VOL,
+ SC2731_LDO_WIFIPA_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_USB33, SC2731_LDO_USB33_PD,
+ SC2731_LDO_USB33_PD_MASK, SC2731_LDO_USB33_VOL,
+ SC2731_LDO_USB33_VOL_MASK, 10000, 1200000, 3750000),
+ SC2731_REGU_LINEAR(LDO_CAMD0, SC2731_LDO_CAMD0_PD,
+ SC2731_LDO_CAMD0_PD_MASK, SC2731_LDO_CAMD0_VOL,
+ SC2731_LDO_CAMD0_VOL_MASK, 6250, 1000000, 1793750),
+ SC2731_REGU_LINEAR(LDO_CAMD1, SC2731_LDO_CAMD1_PD,
+ SC2731_LDO_CAMD1_PD_MASK, SC2731_LDO_CAMD1_VOL,
+ SC2731_LDO_CAMD1_VOL_MASK, 6250, 1000000, 1793750),
+ SC2731_REGU_LINEAR(LDO_CON, SC2731_LDO_CON_PD,
+ SC2731_LDO_CON_PD_MASK, SC2731_LDO_CON_VOL,
+ SC2731_LDO_CON_VOL_MASK, 6250, 1000000, 1793750),
+ SC2731_REGU_LINEAR(LDO_CAMIO, SC2731_LDO_CAMIO_PD,
+ SC2731_LDO_CAMIO_PD_MASK, SC2731_LDO_CAMIO_VOL,
+ SC2731_LDO_CAMIO_VOL_MASK, 6250, 1000000, 1793750),
+ SC2731_REGU_LINEAR(LDO_SRAM, SC2731_LDO_SRAM_PD,
+ SC2731_LDO_SRAM_PD_MASK, SC2731_LDO_SRAM_VOL,
+ SC2731_LDO_SRAM_VOL_MASK, 6250, 1000000, 1793750),
+};
+
+static int sc2731_regulator_unlock(struct regmap *regmap)
+{
+ return regmap_write(regmap, SC2731_PWR_WR_PROT,
+ SC2731_WR_UNLOCK_VALUE);
+}
+
+static int sc2731_regulator_probe(struct platform_device *pdev)
+{
+ int i, ret;
+ struct regmap *regmap;
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+
+ regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!regmap) {
+ dev_err(&pdev->dev, "failed to get regmap.\n");
+ return -ENODEV;
+ }
+
+ ret = sc2731_regulator_unlock(regmap);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to release regulator lock\n");
+ return ret;
+ }
+
+ config.dev = &pdev->dev;
+ config.regmap = regmap;
+
+ for (i = 0; i < ARRAY_SIZE(regulators); i++) {
+ rdev = devm_regulator_register(&pdev->dev, &regulators[i],
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ regulators[i].name);
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver sc2731_regulator_driver = {
+ .driver = {
+ .name = "sc27xx-regulator",
+ },
+ .probe = sc2731_regulator_probe,
+};
+
+module_platform_driver(sc2731_regulator_driver);
+
+MODULE_AUTHOR("Chen Junhui <erick.chen@spreadtrum.com>");
+MODULE_DESCRIPTION("Spreadtrum SC2731 regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index bc489958fed7..1827185beacc 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -28,9 +28,6 @@
#include <linux/regulator/machine.h>
#include <linux/mfd/tps65218.h>
-enum tps65218_regulators { DCDC1, DCDC2, DCDC3, DCDC4,
- DCDC5, DCDC6, LDO1, LS3 };
-
#define TPS65218_REGULATOR(_name, _of, _id, _type, _ops, _n, _vr, _vm, _er, \
_em, _cr, _cm, _lr, _nlr, _delay, _fuv, _sr, _sm) \
{ \
@@ -329,6 +326,8 @@ static int tps65218_regulator_probe(struct platform_device *pdev)
/* Allocate memory for strobes */
tps->strobes = devm_kzalloc(&pdev->dev, sizeof(u8) *
TPS65218_NUM_REGULATOR, GFP_KERNEL);
+ if (!tps->strobes)
+ return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(regulators); i++) {
rdev = devm_regulator_register(&pdev->dev, &regulators[i],
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
index b01774e9fac0..e540ca362d08 100644
--- a/drivers/rpmsg/qcom_smd.c
+++ b/drivers/rpmsg/qcom_smd.c
@@ -919,12 +919,12 @@ static int qcom_smd_trysend(struct rpmsg_endpoint *ept, void *data, int len)
return __qcom_smd_send(qsept->qsch, data, len, false);
}
-static unsigned int qcom_smd_poll(struct rpmsg_endpoint *ept,
+static __poll_t qcom_smd_poll(struct rpmsg_endpoint *ept,
struct file *filp, poll_table *wait)
{
struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept);
struct qcom_smd_channel *channel = qsept->qsch;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(filp, &channel->fblockread_event, wait);
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
index e0996fce3963..e622fcda30fa 100644
--- a/drivers/rpmsg/rpmsg_char.c
+++ b/drivers/rpmsg/rpmsg_char.c
@@ -256,10 +256,10 @@ free_kbuf:
return ret < 0 ? ret : len;
}
-static unsigned int rpmsg_eptdev_poll(struct file *filp, poll_table *wait)
+static __poll_t rpmsg_eptdev_poll(struct file *filp, poll_table *wait)
{
struct rpmsg_eptdev *eptdev = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
if (!eptdev->ept)
return POLLERR;
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index dffa3aab7178..5a081762afcc 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -247,7 +247,7 @@ EXPORT_SYMBOL(rpmsg_trysendto);
*
* Returns mask representing the current state of the endpoint's send buffers
*/
-unsigned int rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
+__poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
poll_table *wait)
{
if (WARN_ON(!ept))
diff --git a/drivers/rpmsg/rpmsg_internal.h b/drivers/rpmsg/rpmsg_internal.h
index 0cf9c7e2ee83..685aa70e9cbe 100644
--- a/drivers/rpmsg/rpmsg_internal.h
+++ b/drivers/rpmsg/rpmsg_internal.h
@@ -71,7 +71,7 @@ struct rpmsg_endpoint_ops {
int (*trysendto)(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
int (*trysend_offchannel)(struct rpmsg_endpoint *ept, u32 src, u32 dst,
void *data, int len);
- unsigned int (*poll)(struct rpmsg_endpoint *ept, struct file *filp,
+ __poll_t (*poll)(struct rpmsg_endpoint *ept, struct file *filp,
poll_table *wait);
};
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 215eac68ae2d..5a7b30d0773b 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -194,7 +194,7 @@ rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
return ret;
}
-static unsigned int rtc_dev_poll(struct file *file, poll_table *wait)
+static __poll_t rtc_dev_poll(struct file *file, poll_table *wait)
{
struct rtc_device *rtc = file->private_data;
unsigned long data;
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index a7917d473774..0c075d100252 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -661,9 +661,9 @@ static ssize_t dasd_eer_read(struct file *filp, char __user *buf,
return effective_count;
}
-static unsigned int dasd_eer_poll(struct file *filp, poll_table *ptable)
+static __poll_t dasd_eer_poll(struct file *filp, poll_table *ptable)
{
- unsigned int mask;
+ __poll_t mask;
unsigned long flags;
struct eerbuffer *eerb;
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index bf4ab4efed73..956f662908a6 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -429,7 +429,7 @@ out_copy:
return count;
}
-static unsigned int mon_poll(struct file *filp, struct poll_table_struct *p)
+static __poll_t mon_poll(struct file *filp, struct poll_table_struct *p)
{
struct mon_private *monpriv = filp->private_data;
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 00e7968a1d70..b42c9c479d4b 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -369,7 +369,6 @@ out:
static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
{
u32 local_time;
- struct timeval time;
TW_Event *event;
unsigned short aen;
char host[16];
@@ -392,8 +391,8 @@ static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_H
memset(event, 0, sizeof(TW_Event));
event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
- do_gettimeofday(&time);
- local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
+ /* event->time_stamp_sec overflows in y2106 */
+ local_time = (u32)(ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
event->time_stamp_sec = local_time;
event->aen_code = aen;
event->retrieved = TW_AEN_NOT_RETRIEVED;
@@ -473,11 +472,10 @@ out:
static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
{
u32 schedulertime;
- struct timeval utc;
TW_Command_Full *full_command_packet;
TW_Command *command_packet;
TW_Param_Apache *param;
- u32 local_time;
+ time64_t local_time;
/* Fill out the command packet */
full_command_packet = tw_dev->command_packet_virt[request_id];
@@ -499,9 +497,8 @@ static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
/* Convert system time in UTC to local time seconds since last
Sunday 12:00AM */
- do_gettimeofday(&utc);
- local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
- schedulertime = local_time - (3 * 86400);
+ local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
+ div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
schedulertime = cpu_to_le32(schedulertime % 604800);
memcpy(param->data, &schedulertime, sizeof(u32));
@@ -648,8 +645,7 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
TW_Command_Full *full_command_packet;
TW_Compatibility_Info *tw_compat_info;
TW_Event *event;
- struct timeval current_time;
- u32 current_time_ms;
+ ktime_t current_time;
TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
int retval = TW_IOCTL_ERROR_OS_EFAULT;
void __user *argp = (void __user *)arg;
@@ -840,17 +836,17 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
break;
case TW_IOCTL_GET_LOCK:
tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
- do_gettimeofday(&current_time);
- current_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_usec / 1000);
+ current_time = ktime_get();
- if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || (current_time_ms >= tw_dev->ioctl_msec)) {
+ if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) ||
+ ktime_after(current_time, tw_dev->ioctl_time)) {
tw_dev->ioctl_sem_lock = 1;
- tw_dev->ioctl_msec = current_time_ms + tw_lock->timeout_msec;
+ tw_dev->ioctl_time = ktime_add_ms(current_time, tw_lock->timeout_msec);
tw_ioctl->driver_command.status = 0;
tw_lock->time_remaining_msec = tw_lock->timeout_msec;
} else {
tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
- tw_lock->time_remaining_msec = tw_dev->ioctl_msec - current_time_ms;
+ tw_lock->time_remaining_msec = ktime_ms_delta(tw_dev->ioctl_time, current_time);
}
break;
case TW_IOCTL_RELEASE_LOCK:
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index b6c208cc474f..d88cd3499bd5 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -666,7 +666,7 @@ typedef struct TAG_TW_Device_Extension {
unsigned char event_queue_wrapped;
unsigned int error_sequence_id;
int ioctl_sem_lock;
- u32 ioctl_msec;
+ ktime_t ioctl_time;
int chrdev_request_id;
wait_queue_head_t ioctl_wqueue;
struct mutex ioctl_lock;
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index b150e131b2e7..cf9f2a09b47d 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -221,7 +221,6 @@ out:
static void twl_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
{
u32 local_time;
- struct timeval time;
TW_Event *event;
unsigned short aen;
char host[16];
@@ -240,8 +239,8 @@ static void twl_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_H
memset(event, 0, sizeof(TW_Event));
event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
- do_gettimeofday(&time);
- local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
+ /* event->time_stamp_sec overflows in y2106 */
+ local_time = (u32)(ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
event->time_stamp_sec = local_time;
event->aen_code = aen;
event->retrieved = TW_AEN_NOT_RETRIEVED;
@@ -408,11 +407,10 @@ out:
static void twl_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
{
u32 schedulertime;
- struct timeval utc;
TW_Command_Full *full_command_packet;
TW_Command *command_packet;
TW_Param_Apache *param;
- u32 local_time;
+ time64_t local_time;
/* Fill out the command packet */
full_command_packet = tw_dev->command_packet_virt[request_id];
@@ -434,10 +432,9 @@ static void twl_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
/* Convert system time in UTC to local time seconds since last
Sunday 12:00AM */
- do_gettimeofday(&utc);
- local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
- schedulertime = local_time - (3 * 86400);
- schedulertime = cpu_to_le32(schedulertime % 604800);
+ local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
+ div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
+ schedulertime = cpu_to_le32(schedulertime);
memcpy(param->data, &schedulertime, sizeof(u32));
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index af3e4d3f9735..e7961cbd2c55 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -42,6 +42,8 @@
#include <linux/highmem.h> /* For flush_kernel_dcache_page */
#include <linux/module.h>
+#include <asm/unaligned.h>
+
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -913,8 +915,15 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
memset(str, ' ', sizeof(*str));
if (sup_adap_info->adapter_type_text[0]) {
- char *cp = sup_adap_info->adapter_type_text;
int c;
+ char *cp;
+ char *cname = kmemdup(sup_adap_info->adapter_type_text,
+ sizeof(sup_adap_info->adapter_type_text),
+ GFP_ATOMIC);
+ if (!cname)
+ return;
+
+ cp = cname;
if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C'))
inqstrcpy("SMC", str->vid);
else {
@@ -923,7 +932,7 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
++cp;
c = *cp;
*cp = '\0';
- inqstrcpy(sup_adap_info->adapter_type_text, str->vid);
+ inqstrcpy(cname, str->vid);
*cp = c;
while (*cp && *cp != ' ')
++cp;
@@ -931,14 +940,11 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
while (*cp == ' ')
++cp;
/* last six chars reserved for vol type */
- c = 0;
- if (strlen(cp) > sizeof(str->pid)) {
- c = cp[sizeof(str->pid)];
+ if (strlen(cp) > sizeof(str->pid))
cp[sizeof(str->pid)] = '\0';
- }
inqstrcpy (cp, str->pid);
- if (c)
- cp[sizeof(str->pid)] = c;
+
+ kfree(cname);
} else {
struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype);
@@ -1660,87 +1666,309 @@ static int aac_adapter_hba(struct fib *fib, struct scsi_cmnd *cmd)
(void *) cmd);
}
-int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target)
+static int aac_send_safw_bmic_cmd(struct aac_dev *dev,
+ struct aac_srb_unit *srbu, void *xfer_buf, int xfer_len)
{
- struct fib *fibptr;
- struct aac_srb *srbcmd;
- struct sgmap64 *sg64;
- struct aac_ciss_identify_pd *identify_resp;
- dma_addr_t addr;
- u32 vbus, vid;
- u16 fibsize, datasize;
- int rcode = -ENOMEM;
-
+ struct fib *fibptr;
+ dma_addr_t addr;
+ int rcode;
+ int fibsize;
+ struct aac_srb *srb;
+ struct aac_srb_reply *srb_reply;
+ struct sgmap64 *sg64;
+ u32 vbus;
+ u32 vid;
+
+ if (!dev->sa_firmware)
+ return 0;
+ /* allocate FIB */
fibptr = aac_fib_alloc(dev);
if (!fibptr)
- goto out;
+ return -ENOMEM;
- fibsize = sizeof(struct aac_srb) -
- sizeof(struct sgentry) + sizeof(struct sgentry64);
- datasize = sizeof(struct aac_ciss_identify_pd);
+ aac_fib_init(fibptr);
+ fibptr->hw_fib_va->header.XferState &=
+ ~cpu_to_le32(FastResponseCapable);
- identify_resp = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr,
- GFP_KERNEL);
- if (!identify_resp)
- goto fib_free_ptr;
+ fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) +
+ sizeof(struct sgentry64);
- vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_bus);
- vid = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_target);
+ /* allocate DMA buffer for response */
+ addr = dma_map_single(&dev->pdev->dev, xfer_buf, xfer_len,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&dev->pdev->dev, addr)) {
+ rcode = -ENOMEM;
+ goto fib_error;
+ }
- aac_fib_init(fibptr);
+ srb = fib_data(fibptr);
+ memcpy(srb, &srbu->srb, sizeof(struct aac_srb));
- srbcmd = (struct aac_srb *) fib_data(fibptr);
- srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
- srbcmd->channel = cpu_to_le32(vbus);
- srbcmd->id = cpu_to_le32(vid);
- srbcmd->lun = 0;
- srbcmd->flags = cpu_to_le32(SRB_DataIn);
- srbcmd->timeout = cpu_to_le32(10);
- srbcmd->retry_limit = 0;
- srbcmd->cdb_size = cpu_to_le32(12);
- srbcmd->count = cpu_to_le32(datasize);
+ vbus = (u32)le16_to_cpu(
+ dev->supplement_adapter_info.virt_device_bus);
+ vid = (u32)le16_to_cpu(
+ dev->supplement_adapter_info.virt_device_target);
- memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
- srbcmd->cdb[0] = 0x26;
- srbcmd->cdb[2] = (u8)((AAC_MAX_LUN + target) & 0x00FF);
- srbcmd->cdb[6] = CISS_IDENTIFY_PHYSICAL_DEVICE;
+ /* set the common request fields */
+ srb->channel = cpu_to_le32(vbus);
+ srb->id = cpu_to_le32(vid);
+ srb->lun = 0;
+ srb->function = cpu_to_le32(SRBF_ExecuteScsi);
+ srb->timeout = 0;
+ srb->retry_limit = 0;
+ srb->cdb_size = cpu_to_le32(16);
+ srb->count = cpu_to_le32(xfer_len);
+
+ sg64 = (struct sgmap64 *)&srb->sg;
+ sg64->count = cpu_to_le32(1);
+ sg64->sg[0].addr[1] = cpu_to_le32(upper_32_bits(addr));
+ sg64->sg[0].addr[0] = cpu_to_le32(lower_32_bits(addr));
+ sg64->sg[0].count = cpu_to_le32(xfer_len);
- sg64 = (struct sgmap64 *)&srbcmd->sg;
- sg64->count = cpu_to_le32(1);
- sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16));
- sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
- sg64->sg[0].count = cpu_to_le32(datasize);
+ /*
+ * Copy the updated data for other dumping or other usage if needed
+ */
+ memcpy(&srbu->srb, srb, sizeof(struct aac_srb));
+
+ /* issue request to the controller */
+ rcode = aac_fib_send(ScsiPortCommand64, fibptr, fibsize, FsaNormal,
+ 1, 1, NULL, NULL);
+
+ if (rcode == -ERESTARTSYS)
+ rcode = -ERESTART;
+
+ if (unlikely(rcode < 0))
+ goto bmic_error;
+
+ srb_reply = (struct aac_srb_reply *)fib_data(fibptr);
+ memcpy(&srbu->srb_reply, srb_reply, sizeof(struct aac_srb_reply));
+
+bmic_error:
+ dma_unmap_single(&dev->pdev->dev, addr, xfer_len, DMA_BIDIRECTIONAL);
+fib_error:
+ aac_fib_complete(fibptr);
+ aac_fib_free(fibptr);
+ return rcode;
+}
+
+static void aac_set_safw_target_qd(struct aac_dev *dev, int bus, int target)
+{
+
+ struct aac_ciss_identify_pd *identify_resp;
- rcode = aac_fib_send(ScsiPortCommand64,
- fibptr, fibsize, FsaNormal, 1, 1, NULL, NULL);
+ if (dev->hba_map[bus][target].devtype != AAC_DEVTYPE_NATIVE_RAW)
+ return;
+
+ identify_resp = dev->hba_map[bus][target].safw_identify_resp;
+ if (identify_resp == NULL) {
+ dev->hba_map[bus][target].qd_limit = 32;
+ return;
+ }
if (identify_resp->current_queue_depth_limit <= 0 ||
- identify_resp->current_queue_depth_limit > 32)
+ identify_resp->current_queue_depth_limit > 255)
dev->hba_map[bus][target].qd_limit = 32;
else
dev->hba_map[bus][target].qd_limit =
identify_resp->current_queue_depth_limit;
+}
- dma_free_coherent(&dev->pdev->dev, datasize, identify_resp, addr);
+static int aac_issue_safw_bmic_identify(struct aac_dev *dev,
+ struct aac_ciss_identify_pd **identify_resp, u32 bus, u32 target)
+{
+ int rcode = -ENOMEM;
+ int datasize;
+ struct aac_srb_unit srbu;
+ struct aac_srb *srbcmd;
+ struct aac_ciss_identify_pd *identify_reply;
- aac_fib_complete(fibptr);
+ datasize = sizeof(struct aac_ciss_identify_pd);
+ identify_reply = kmalloc(datasize, GFP_KERNEL);
+ if (!identify_reply)
+ goto out;
+
+ memset(&srbu, 0, sizeof(struct aac_srb_unit));
+
+ srbcmd = &srbu.srb;
+ srbcmd->flags = cpu_to_le32(SRB_DataIn);
+ srbcmd->cdb[0] = 0x26;
+ srbcmd->cdb[2] = (u8)((AAC_MAX_LUN + target) & 0x00FF);
+ srbcmd->cdb[6] = CISS_IDENTIFY_PHYSICAL_DEVICE;
+
+ rcode = aac_send_safw_bmic_cmd(dev, &srbu, identify_reply, datasize);
+ if (unlikely(rcode < 0))
+ goto mem_free_all;
+
+ *identify_resp = identify_reply;
+
+out:
+ return rcode;
+mem_free_all:
+ kfree(identify_reply);
+ goto out;
+}
+
+static inline void aac_free_safw_ciss_luns(struct aac_dev *dev)
+{
+ kfree(dev->safw_phys_luns);
+ dev->safw_phys_luns = NULL;
+}
+
+/**
+ * aac_get_safw_ciss_luns() Process topology change
+ * @dev: aac_dev structure
+ *
+ * Execute a CISS REPORT PHYS LUNS and process the results into
+ * the current hba_map.
+ */
+static int aac_get_safw_ciss_luns(struct aac_dev *dev)
+{
+ int rcode = -ENOMEM;
+ int datasize;
+ struct aac_srb *srbcmd;
+ struct aac_srb_unit srbu;
+ struct aac_ciss_phys_luns_resp *phys_luns;
+
+ datasize = sizeof(struct aac_ciss_phys_luns_resp) +
+ (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun);
+ phys_luns = kmalloc(datasize, GFP_KERNEL);
+ if (phys_luns == NULL)
+ goto out;
+
+ memset(&srbu, 0, sizeof(struct aac_srb_unit));
+
+ srbcmd = &srbu.srb;
+ srbcmd->flags = cpu_to_le32(SRB_DataIn);
+ srbcmd->cdb[0] = CISS_REPORT_PHYSICAL_LUNS;
+ srbcmd->cdb[1] = 2; /* extended reporting */
+ srbcmd->cdb[8] = (u8)(datasize >> 8);
+ srbcmd->cdb[9] = (u8)(datasize);
+
+ rcode = aac_send_safw_bmic_cmd(dev, &srbu, phys_luns, datasize);
+ if (unlikely(rcode < 0))
+ goto mem_free_all;
+
+ if (phys_luns->resp_flag != 2) {
+ rcode = -ENOMSG;
+ goto mem_free_all;
+ }
+
+ dev->safw_phys_luns = phys_luns;
+
+out:
+ return rcode;
+mem_free_all:
+ kfree(phys_luns);
+ goto out;
+}
+
+static inline u32 aac_get_safw_phys_lun_count(struct aac_dev *dev)
+{
+ return get_unaligned_be32(&dev->safw_phys_luns->list_length[0])/24;
+}
+
+static inline u32 aac_get_safw_phys_bus(struct aac_dev *dev, int lun)
+{
+ return dev->safw_phys_luns->lun[lun].level2[1] & 0x3f;
+}
+
+static inline u32 aac_get_safw_phys_target(struct aac_dev *dev, int lun)
+{
+ return dev->safw_phys_luns->lun[lun].level2[0];
+}
+
+static inline u32 aac_get_safw_phys_expose_flag(struct aac_dev *dev, int lun)
+{
+ return dev->safw_phys_luns->lun[lun].bus >> 6;
+}
+
+static inline u32 aac_get_safw_phys_attribs(struct aac_dev *dev, int lun)
+{
+ return dev->safw_phys_luns->lun[lun].node_ident[9];
+}
+
+static inline u32 aac_get_safw_phys_nexus(struct aac_dev *dev, int lun)
+{
+ return *((u32 *)&dev->safw_phys_luns->lun[lun].node_ident[12]);
+}
+
+static inline u32 aac_get_safw_phys_device_type(struct aac_dev *dev, int lun)
+{
+ return dev->safw_phys_luns->lun[lun].node_ident[8];
+}
+
+static inline void aac_free_safw_identify_resp(struct aac_dev *dev,
+ int bus, int target)
+{
+ kfree(dev->hba_map[bus][target].safw_identify_resp);
+ dev->hba_map[bus][target].safw_identify_resp = NULL;
+}
+
+static inline void aac_free_safw_all_identify_resp(struct aac_dev *dev,
+ int lun_count)
+{
+ int luns;
+ int i;
+ u32 bus;
+ u32 target;
+
+ luns = aac_get_safw_phys_lun_count(dev);
+
+ if (luns < lun_count)
+ lun_count = luns;
+ else if (lun_count < 0)
+ lun_count = luns;
+
+ for (i = 0; i < lun_count; i++) {
+ bus = aac_get_safw_phys_bus(dev, i);
+ target = aac_get_safw_phys_target(dev, i);
+
+ aac_free_safw_identify_resp(dev, bus, target);
+ }
+}
+
+static int aac_get_safw_attr_all_targets(struct aac_dev *dev)
+{
+ int i;
+ int rcode = 0;
+ u32 lun_count;
+ u32 bus;
+ u32 target;
+ struct aac_ciss_identify_pd *identify_resp = NULL;
+
+ lun_count = aac_get_safw_phys_lun_count(dev);
+
+ for (i = 0; i < lun_count; ++i) {
+
+ bus = aac_get_safw_phys_bus(dev, i);
+ target = aac_get_safw_phys_target(dev, i);
+
+ rcode = aac_issue_safw_bmic_identify(dev,
+ &identify_resp, bus, target);
+
+ if (unlikely(rcode < 0))
+ goto free_identify_resp;
+
+ dev->hba_map[bus][target].safw_identify_resp = identify_resp;
+ }
-fib_free_ptr:
- aac_fib_free(fibptr);
out:
return rcode;
+free_identify_resp:
+ aac_free_safw_all_identify_resp(dev, i);
+ goto out;
}
/**
- * aac_update hba_map()- update current hba map with data from FW
+ * aac_set_safw_attr_all_targets- update current hba map with data from FW
* @dev: aac_dev structure
* @phys_luns: FW information from report phys luns
+ * @rescan: Indicates scan type
*
* Update our hba map with the information gathered from the FW
*/
-void aac_update_hba_map(struct aac_dev *dev,
- struct aac_ciss_phys_luns_resp *phys_luns, int rescan)
+static void aac_set_safw_attr_all_targets(struct aac_dev *dev)
{
/* ok and extended reporting */
u32 lun_count, nexus;
@@ -1748,24 +1976,21 @@ void aac_update_hba_map(struct aac_dev *dev,
u8 expose_flag, attribs;
u8 devtype;
- lun_count = ((phys_luns->list_length[0] << 24)
- + (phys_luns->list_length[1] << 16)
- + (phys_luns->list_length[2] << 8)
- + (phys_luns->list_length[3])) / 24;
+ lun_count = aac_get_safw_phys_lun_count(dev);
+
+ dev->scan_counter++;
for (i = 0; i < lun_count; ++i) {
- bus = phys_luns->lun[i].level2[1] & 0x3f;
- target = phys_luns->lun[i].level2[0];
- expose_flag = phys_luns->lun[i].bus >> 6;
- attribs = phys_luns->lun[i].node_ident[9];
- nexus = *((u32 *) &phys_luns->lun[i].node_ident[12]);
+ bus = aac_get_safw_phys_bus(dev, i);
+ target = aac_get_safw_phys_target(dev, i);
+ expose_flag = aac_get_safw_phys_expose_flag(dev, i);
+ attribs = aac_get_safw_phys_attribs(dev, i);
+ nexus = aac_get_safw_phys_nexus(dev, i);
if (bus >= AAC_MAX_BUSES || target >= AAC_MAX_TARGETS)
continue;
- dev->hba_map[bus][target].expose = expose_flag;
-
if (expose_flag != 0) {
devtype = AAC_DEVTYPE_RAID_MEMBER;
goto update_devtype;
@@ -1778,95 +2003,45 @@ void aac_update_hba_map(struct aac_dev *dev,
} else
devtype = AAC_DEVTYPE_ARC_RAW;
- if (devtype != AAC_DEVTYPE_NATIVE_RAW)
- goto update_devtype;
+ dev->hba_map[bus][target].scan_counter = dev->scan_counter;
- if (aac_issue_bmic_identify(dev, bus, target) < 0)
- dev->hba_map[bus][target].qd_limit = 32;
+ aac_set_safw_target_qd(dev, bus, target);
update_devtype:
- if (rescan == AAC_INIT)
- dev->hba_map[bus][target].devtype = devtype;
- else
- dev->hba_map[bus][target].new_devtype = devtype;
+ dev->hba_map[bus][target].devtype = devtype;
}
}
-/**
- * aac_report_phys_luns() Process topology change
- * @dev: aac_dev structure
- * @fibptr: fib pointer
- *
- * Execute a CISS REPORT PHYS LUNS and process the results into
- * the current hba_map.
- */
-int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan)
+static int aac_setup_safw_targets(struct aac_dev *dev)
{
- int fibsize, datasize;
- struct aac_ciss_phys_luns_resp *phys_luns;
- struct aac_srb *srbcmd;
- struct sgmap64 *sg64;
- dma_addr_t addr;
- u32 vbus, vid;
int rcode = 0;
- /* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */
- fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry)
- + sizeof(struct sgentry64);
- datasize = sizeof(struct aac_ciss_phys_luns_resp)
- + (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun);
-
- phys_luns = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr,
- GFP_KERNEL);
- if (phys_luns == NULL) {
- rcode = -ENOMEM;
- goto err_out;
- }
-
- vbus = (u32) le16_to_cpu(
- dev->supplement_adapter_info.virt_device_bus);
- vid = (u32) le16_to_cpu(
- dev->supplement_adapter_info.virt_device_target);
+ rcode = aac_get_containers(dev);
+ if (unlikely(rcode < 0))
+ goto out;
- aac_fib_init(fibptr);
+ rcode = aac_get_safw_ciss_luns(dev);
+ if (unlikely(rcode < 0))
+ goto out;
- srbcmd = (struct aac_srb *) fib_data(fibptr);
- srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
- srbcmd->channel = cpu_to_le32(vbus);
- srbcmd->id = cpu_to_le32(vid);
- srbcmd->lun = 0;
- srbcmd->flags = cpu_to_le32(SRB_DataIn);
- srbcmd->timeout = cpu_to_le32(10);
- srbcmd->retry_limit = 0;
- srbcmd->cdb_size = cpu_to_le32(12);
- srbcmd->count = cpu_to_le32(datasize);
+ rcode = aac_get_safw_attr_all_targets(dev);
+ if (unlikely(rcode < 0))
+ goto free_ciss_luns;
- memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
- srbcmd->cdb[0] = CISS_REPORT_PHYSICAL_LUNS;
- srbcmd->cdb[1] = 2; /* extended reporting */
- srbcmd->cdb[8] = (u8)(datasize >> 8);
- srbcmd->cdb[9] = (u8)(datasize);
-
- sg64 = (struct sgmap64 *) &srbcmd->sg;
- sg64->count = cpu_to_le32(1);
- sg64->sg[0].addr[1] = cpu_to_le32(upper_32_bits(addr));
- sg64->sg[0].addr[0] = cpu_to_le32(lower_32_bits(addr));
- sg64->sg[0].count = cpu_to_le32(datasize);
-
- rcode = aac_fib_send(ScsiPortCommand64, fibptr, fibsize,
- FsaNormal, 1, 1, NULL, NULL);
-
- /* analyse data */
- if (rcode >= 0 && phys_luns->resp_flag == 2) {
- /* ok and extended reporting */
- aac_update_hba_map(dev, phys_luns, rescan);
- }
+ aac_set_safw_attr_all_targets(dev);
- dma_free_coherent(&dev->pdev->dev, datasize, phys_luns, addr);
-err_out:
+ aac_free_safw_all_identify_resp(dev, -1);
+free_ciss_luns:
+ aac_free_safw_ciss_luns(dev);
+out:
return rcode;
}
+int aac_setup_safw_adapter(struct aac_dev *dev)
+{
+ return aac_setup_safw_targets(dev);
+}
+
int aac_get_adapter_info(struct aac_dev* dev)
{
struct fib* fibptr;
@@ -1969,12 +2144,6 @@ int aac_get_adapter_info(struct aac_dev* dev)
dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount);
}
- if (!dev->sync_mode && dev->sa_firmware &&
- dev->supplement_adapter_info.virt_device_bus != 0xffff) {
- /* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */
- rcode = aac_report_phys_luns(dev, fibptr, AAC_INIT);
- }
-
if (!dev->in_reset) {
char buffer[16];
tmp = le32_to_cpu(dev->adapter_info.kernelrev);
@@ -2739,14 +2908,6 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
}
} else { /* check for physical non-dasd devices */
bus = aac_logical_to_phys(scmd_channel(scsicmd));
- if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
- (dev->hba_map[bus][cid].expose
- == AAC_HIDE_DISK)){
- if (scsicmd->cmnd[0] == INQUIRY) {
- scsicmd->result = DID_NO_CONNECT << 16;
- goto scsi_done_ret;
- }
- }
if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
dev->hba_map[bus][cid].devtype
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index d52265416da2..0095fcbd1c88 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -41,6 +41,7 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <scsi/scsi_host.h>
/*------------------------------------------------------------------------------
* D E F I N E S
@@ -97,7 +98,7 @@ enum {
#define PMC_GLOBAL_INT_BIT0 0x00000001
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 50834
+# define AAC_DRIVER_BUILD 50877
# define AAC_DRIVER_BRANCH "-custom"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@@ -117,9 +118,13 @@ enum {
/* Thor: 5 phys. buses: #0: empty, 1-4: 256 targets each */
#define AAC_MAX_BUSES 5
#define AAC_MAX_TARGETS 256
+#define AAC_BUS_TARGET_LOOP (AAC_MAX_BUSES * AAC_MAX_TARGETS)
#define AAC_MAX_NATIVE_SIZE 2048
#define FW_ERROR_BUFFER_SIZE 512
+#define get_bus_number(x) (x/AAC_MAX_TARGETS)
+#define get_target_number(x) (x%AAC_MAX_TARGETS)
+
/* Thor AIF events */
#define SA_AIF_HOTPLUG (1<<1)
#define SA_AIF_HARDWARE (1<<2)
@@ -1334,17 +1339,17 @@ struct fib {
#define AAC_DEVTYPE_RAID_MEMBER 1
#define AAC_DEVTYPE_ARC_RAW 2
#define AAC_DEVTYPE_NATIVE_RAW 3
-#define AAC_EXPOSE_DISK 0
-#define AAC_HIDE_DISK 3
+
+#define AAC_SAFW_RESCAN_DELAY (10 * HZ)
struct aac_hba_map_info {
__le32 rmw_nexus; /* nexus for native HBA devices */
u8 devtype; /* device type */
- u8 new_devtype;
u8 reset_state; /* 0 - no reset, 1..x - */
/* after xth TM LUN reset */
u16 qd_limit;
- u8 expose; /*checks if to expose or not*/
+ u32 scan_counter;
+ struct aac_ciss_identify_pd *safw_identify_resp;
};
/*
@@ -1560,6 +1565,7 @@ struct aac_dev
spinlock_t fib_lock;
struct mutex ioctl_mutex;
+ struct mutex scan_mutex;
struct aac_queue_block *queues;
/*
* The user API will use an IOCTL to register itself to receive
@@ -1605,6 +1611,7 @@ struct aac_dev
int maximum_num_channels;
struct fsa_dev_info *fsa_dev;
struct task_struct *thread;
+ struct delayed_work safw_rescan_work;
int cardtype;
/*
*This lock will protect the two 32-bit
@@ -1668,9 +1675,11 @@ struct aac_dev
u32 vector_cap; /* MSI-X vector capab.*/
int msi_enabled; /* MSI/MSI-X enabled */
atomic_t msix_counter;
+ u32 scan_counter;
struct msix_entry msixentry[AAC_MAX_MSIX];
struct aac_msix_ctx aac_msix[AAC_MAX_MSIX]; /* context */
struct aac_hba_map_info hba_map[AAC_MAX_BUSES][AAC_MAX_TARGETS];
+ struct aac_ciss_phys_luns_resp *safw_phys_luns;
u8 adapter_shutdown;
u32 handle_pci_error;
bool init_reset;
@@ -2023,6 +2032,12 @@ struct aac_srb_reply
__le32 sense_data_size;
u8 sense_data[AAC_SENSE_BUFFERSIZE]; // Can this be SCSI_SENSE_BUFFERSIZE
};
+
+struct aac_srb_unit {
+ struct aac_srb srb;
+ struct aac_srb_reply srb_reply;
+};
+
/*
* SRB Flags
*/
@@ -2627,16 +2642,41 @@ static inline int aac_adapter_check_health(struct aac_dev *dev)
return (dev)->a_ops.adapter_check_health(dev);
}
+
+int aac_scan_host(struct aac_dev *dev);
+
+static inline void aac_schedule_safw_scan_worker(struct aac_dev *dev)
+{
+ schedule_delayed_work(&dev->safw_rescan_work, AAC_SAFW_RESCAN_DELAY);
+}
+
+static inline void aac_safw_rescan_worker(struct work_struct *work)
+{
+ struct aac_dev *dev = container_of(to_delayed_work(work),
+ struct aac_dev, safw_rescan_work);
+
+ wait_event(dev->scsi_host_ptr->host_wait,
+ !scsi_host_in_recovery(dev->scsi_host_ptr));
+
+ aac_scan_host(dev);
+}
+
+static inline void aac_cancel_safw_rescan_worker(struct aac_dev *dev)
+{
+ if (dev->sa_firmware)
+ cancel_delayed_work_sync(&dev->safw_rescan_work);
+}
+
/* SCp.phase values */
#define AAC_OWNER_MIDLEVEL 0x101
#define AAC_OWNER_LOWLEVEL 0x102
#define AAC_OWNER_ERROR_HANDLER 0x103
#define AAC_OWNER_FIRMWARE 0x106
+void aac_safw_rescan_worker(struct work_struct *work);
int aac_acquire_irq(struct aac_dev *dev);
void aac_free_irq(struct aac_dev *dev);
-int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan);
-int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target);
+int aac_setup_safw_adapter(struct aac_dev *dev);
const char *aac_driverinfo(struct Scsi_Host *);
void aac_fib_vector_assign(struct aac_dev *dev);
struct fib *aac_fib_alloc(struct aac_dev *dev);
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 9ab0fa959d83..a2b3430072c7 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -1052,9 +1052,13 @@ static int aac_send_reset_adapter(struct aac_dev *dev, void __user *arg)
if (copy_from_user((void *)&reset, arg, sizeof(struct aac_reset_iop)))
return -EFAULT;
+ dev->adapter_shutdown = 1;
+
+ mutex_unlock(&dev->ioctl_mutex);
retval = aac_reset_adapter(dev, 0, reset.reset_type);
- return retval;
+ mutex_lock(&dev->ioctl_mutex);
+ return retval;
}
int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 1bc623ad3faf..0dc7b5a4fea2 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -42,6 +42,8 @@
#include <linux/completion.h>
#include <linux/mm.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
#include "aacraid.h"
@@ -284,6 +286,38 @@ static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem,
q->entries = qsize;
}
+static void aac_wait_for_io_completion(struct aac_dev *aac)
+{
+ unsigned long flagv = 0;
+ int i = 0;
+
+ for (i = 60; i; --i) {
+ struct scsi_device *dev;
+ struct scsi_cmnd *command;
+ int active = 0;
+
+ __shost_for_each_device(dev, aac->scsi_host_ptr) {
+ spin_lock_irqsave(&dev->list_lock, flagv);
+ list_for_each_entry(command, &dev->cmd_list, list) {
+ if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
+ active++;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->list_lock, flagv);
+ if (active)
+ break;
+
+ }
+ /*
+ * We can exit If all the commands are complete
+ */
+ if (active == 0)
+ break;
+ ssleep(1);
+ }
+}
+
/**
* aac_send_shutdown - shutdown an adapter
* @dev: Adapter to shutdown
@@ -295,12 +329,10 @@ int aac_send_shutdown(struct aac_dev * dev)
{
struct fib * fibctx;
struct aac_close *cmd;
- int status;
+ int status = 0;
- fibctx = aac_fib_alloc(dev);
- if (!fibctx)
- return -ENOMEM;
- aac_fib_init(fibctx);
+ if (aac_adapter_check_health(dev))
+ return status;
if (!dev->adapter_shutdown) {
mutex_lock(&dev->ioctl_mutex);
@@ -308,6 +340,13 @@ int aac_send_shutdown(struct aac_dev * dev)
mutex_unlock(&dev->ioctl_mutex);
}
+ aac_wait_for_io_completion(dev);
+
+ fibctx = aac_fib_alloc(dev);
+ if (!fibctx)
+ return -ENOMEM;
+ aac_fib_init(fibctx);
+
cmd = (struct aac_close *) fib_data(fibctx);
cmd->command = cpu_to_le32(VM_CloseAll);
cmd->cid = cpu_to_le32(0xfffffffe);
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 80a8cb26cdea..84858d5c8257 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -33,6 +33,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/crash_dump.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/pci.h>
@@ -1629,28 +1630,28 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
command->scsi_done(command);
}
/*
- * Any Device that was already marked offline needs to be cleaned up
+ * Any Device that was already marked offline needs to be marked
+ * running
*/
__shost_for_each_device(dev, host) {
- if (!scsi_device_online(dev)) {
- sdev_printk(KERN_INFO, dev, "Removing offline device\n");
- scsi_remove_device(dev);
- scsi_device_put(dev);
- }
+ if (!scsi_device_online(dev))
+ scsi_device_set_state(dev, SDEV_RUNNING);
}
retval = 0;
out:
aac->in_reset = 0;
scsi_unblock_requests(host);
+
/*
* Issue bus rescan to catch any configuration that might have
* occurred
*/
- if (!retval) {
- dev_info(&aac->pdev->dev, "Issuing bus rescan\n");
- scsi_scan_host(host);
+ if (!retval && !is_kdump_kernel()) {
+ dev_info(&aac->pdev->dev, "Scheduling bus rescan\n");
+ aac_schedule_safw_scan_worker(aac);
}
+
if (jafo) {
spin_lock_irq(host->host_lock);
}
@@ -1681,31 +1682,6 @@ int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
*/
host = aac->scsi_host_ptr;
scsi_block_requests(host);
- if (forced < 2) for (retval = 60; retval; --retval) {
- struct scsi_device * dev;
- struct scsi_cmnd * command;
- int active = 0;
-
- __shost_for_each_device(dev, host) {
- spin_lock_irqsave(&dev->list_lock, flagv);
- list_for_each_entry(command, &dev->cmd_list, list) {
- if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
- active++;
- break;
- }
- }
- spin_unlock_irqrestore(&dev->list_lock, flagv);
- if (active)
- break;
-
- }
- /*
- * We can exit If all the commands are complete
- */
- if (active == 0)
- break;
- ssleep(1);
- }
/* Quiesce build, flush cache, write through mode */
if (forced < 2)
@@ -1874,42 +1850,124 @@ out:
return BlinkLED;
}
+static inline int is_safw_raid_volume(struct aac_dev *aac, int bus, int target)
+{
+ return bus == CONTAINER_CHANNEL && target < aac->maximum_num_containers;
+}
+
+static struct scsi_device *aac_lookup_safw_scsi_device(struct aac_dev *dev,
+ int bus,
+ int target)
+{
+ if (bus != CONTAINER_CHANNEL)
+ bus = aac_phys_to_logical(bus);
+
+ return scsi_device_lookup(dev->scsi_host_ptr, bus, target, 0);
+}
+
+static int aac_add_safw_device(struct aac_dev *dev, int bus, int target)
+{
+ if (bus != CONTAINER_CHANNEL)
+ bus = aac_phys_to_logical(bus);
+
+ return scsi_add_device(dev->scsi_host_ptr, bus, target, 0);
+}
+
+static void aac_put_safw_scsi_device(struct scsi_device *sdev)
+{
+ if (sdev)
+ scsi_device_put(sdev);
+}
-static void aac_resolve_luns(struct aac_dev *dev)
+static void aac_remove_safw_device(struct aac_dev *dev, int bus, int target)
{
- int bus, target, channel;
struct scsi_device *sdev;
- u8 devtype;
- u8 new_devtype;
- for (bus = 0; bus < AAC_MAX_BUSES; bus++) {
- for (target = 0; target < AAC_MAX_TARGETS; target++) {
+ sdev = aac_lookup_safw_scsi_device(dev, bus, target);
+ scsi_remove_device(sdev);
+ aac_put_safw_scsi_device(sdev);
+}
- if (bus == CONTAINER_CHANNEL)
- channel = CONTAINER_CHANNEL;
- else
- channel = aac_phys_to_logical(bus);
+static inline int aac_is_safw_scan_count_equal(struct aac_dev *dev,
+ int bus, int target)
+{
+ return dev->hba_map[bus][target].scan_counter == dev->scan_counter;
+}
- devtype = dev->hba_map[bus][target].devtype;
- new_devtype = dev->hba_map[bus][target].new_devtype;
+static int aac_is_safw_target_valid(struct aac_dev *dev, int bus, int target)
+{
+ if (is_safw_raid_volume(dev, bus, target))
+ return dev->fsa_dev[target].valid;
+ else
+ return aac_is_safw_scan_count_equal(dev, bus, target);
+}
- sdev = scsi_device_lookup(dev->scsi_host_ptr, channel,
- target, 0);
+static int aac_is_safw_device_exposed(struct aac_dev *dev, int bus, int target)
+{
+ int is_exposed = 0;
+ struct scsi_device *sdev;
- if (!sdev && new_devtype)
- scsi_add_device(dev->scsi_host_ptr, channel,
- target, 0);
- else if (sdev && new_devtype != devtype)
- scsi_remove_device(sdev);
- else if (sdev && new_devtype == devtype)
- scsi_rescan_device(&sdev->sdev_gendev);
+ sdev = aac_lookup_safw_scsi_device(dev, bus, target);
+ if (sdev)
+ is_exposed = 1;
+ aac_put_safw_scsi_device(sdev);
- if (sdev)
- scsi_device_put(sdev);
+ return is_exposed;
+}
- dev->hba_map[bus][target].devtype = new_devtype;
- }
+static int aac_update_safw_host_devices(struct aac_dev *dev)
+{
+ int i;
+ int bus;
+ int target;
+ int is_exposed = 0;
+ int rcode = 0;
+
+ rcode = aac_setup_safw_adapter(dev);
+ if (unlikely(rcode < 0)) {
+ goto out;
}
+
+ for (i = 0; i < AAC_BUS_TARGET_LOOP; i++) {
+
+ bus = get_bus_number(i);
+ target = get_target_number(i);
+
+ is_exposed = aac_is_safw_device_exposed(dev, bus, target);
+
+ if (aac_is_safw_target_valid(dev, bus, target) && !is_exposed)
+ aac_add_safw_device(dev, bus, target);
+ else if (!aac_is_safw_target_valid(dev, bus, target) &&
+ is_exposed)
+ aac_remove_safw_device(dev, bus, target);
+ }
+out:
+ return rcode;
+}
+
+static int aac_scan_safw_host(struct aac_dev *dev)
+{
+ int rcode = 0;
+
+ rcode = aac_update_safw_host_devices(dev);
+ if (rcode)
+ aac_schedule_safw_scan_worker(dev);
+
+ return rcode;
+}
+
+int aac_scan_host(struct aac_dev *dev)
+{
+ int rcode = 0;
+
+ mutex_lock(&dev->scan_mutex);
+ if (dev->sa_firmware)
+ rcode = aac_scan_safw_host(dev);
+ else
+ scsi_scan_host(dev->scsi_host_ptr);
+ mutex_unlock(&dev->scan_mutex);
+
+ return rcode;
}
/**
@@ -1922,10 +1980,8 @@ static void aac_resolve_luns(struct aac_dev *dev)
*/
static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr)
{
- int i, bus, target, container, rcode = 0;
+ int i;
u32 events = 0;
- struct fib *fib;
- struct scsi_device *sdev;
if (fibptr->hbacmd_size & SA_AIF_HOTPLUG)
events = SA_AIF_HOTPLUG;
@@ -1947,44 +2003,8 @@ static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr)
case SA_AIF_LDEV_CHANGE:
case SA_AIF_BPCFG_CHANGE:
- fib = aac_fib_alloc(dev);
- if (!fib) {
- pr_err("aac_handle_sa_aif: out of memory\n");
- return;
- }
- for (bus = 0; bus < AAC_MAX_BUSES; bus++)
- for (target = 0; target < AAC_MAX_TARGETS; target++)
- dev->hba_map[bus][target].new_devtype = 0;
-
- rcode = aac_report_phys_luns(dev, fib, AAC_RESCAN);
-
- if (rcode != -ERESTARTSYS)
- aac_fib_free(fib);
-
- aac_resolve_luns(dev);
-
- if (events == SA_AIF_LDEV_CHANGE ||
- events == SA_AIF_BPCFG_CHANGE) {
- aac_get_containers(dev);
- for (container = 0; container <
- dev->maximum_num_containers; ++container) {
- sdev = scsi_device_lookup(dev->scsi_host_ptr,
- CONTAINER_CHANNEL,
- container, 0);
- if (dev->fsa_dev[container].valid && !sdev) {
- scsi_add_device(dev->scsi_host_ptr,
- CONTAINER_CHANNEL,
- container, 0);
- } else if (!dev->fsa_dev[container].valid &&
- sdev) {
- scsi_remove_device(sdev);
- scsi_device_put(sdev);
- } else if (sdev) {
- scsi_rescan_device(&sdev->sdev_gendev);
- scsi_device_put(sdev);
- }
- }
- }
+ aac_scan_host(dev);
+
break;
case SA_AIF_BPSTAT_CHANGE:
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index d55332de08f9..b3b931ab77eb 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -683,6 +683,9 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
u32 bus, cid;
int ret = FAILED;
+ if (aac_adapter_check_health(aac))
+ return ret;
+
bus = aac_logical_to_phys(scmd_channel(cmd));
cid = scmd_id(cmd);
if (aac->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
@@ -690,7 +693,6 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
struct aac_hba_tm_req *tmf;
int status;
u64 address;
- __le32 managed_request_id;
pr_err("%s: Host adapter abort request (%d,%d,%d,%d)\n",
AAC_DRIVERNAME,
@@ -703,8 +705,6 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
(fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) &&
(fib->callback_data == cmd)) {
found = 1;
- managed_request_id = ((struct aac_hba_cmd_req *)
- fib->hw_fib_va)->request_id;
break;
}
}
@@ -1375,18 +1375,15 @@ static ssize_t aac_store_reset_adapter(struct device *device,
const char *buf, size_t count)
{
int retval = -EACCES;
- int bled = 0;
- struct aac_dev *aac;
-
if (!capable(CAP_SYS_ADMIN))
return retval;
- aac = (struct aac_dev *)class_to_shost(device)->hostdata;
- bled = buf[0] == '!' ? 1:0;
- retval = aac_reset_adapter(aac, bled, IOP_HWSOFT_RESET);
+ retval = aac_reset_adapter(shost_priv(class_to_shost(device)),
+ buf[0] == '!', IOP_HWSOFT_RESET);
if (retval >= 0)
retval = count;
+
return retval;
}
@@ -1689,6 +1686,9 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init(&aac->fib_lock);
mutex_init(&aac->ioctl_mutex);
+ mutex_init(&aac->scan_mutex);
+
+ INIT_DELAYED_WORK(&aac->safw_rescan_work, aac_safw_rescan_worker);
/*
* Map in the registers from the adapter.
*/
@@ -1792,7 +1792,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
error = scsi_add_host(shost, &pdev->dev);
if (error)
goto out_deinit;
- scsi_scan_host(shost);
+
+ aac_scan_host(aac);
pci_enable_pcie_error_reporting(pdev);
pci_save_state(pdev);
@@ -1877,6 +1878,7 @@ static int aac_suspend(struct pci_dev *pdev, pm_message_t state)
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
scsi_block_requests(shost);
+ aac_cancel_safw_rescan_worker(aac);
aac_send_shutdown(aac);
aac_release_resources(aac);
@@ -1935,6 +1937,7 @@ static void aac_remove_one(struct pci_dev *pdev)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
+ aac_cancel_safw_rescan_worker(aac);
scsi_remove_host(shost);
__aac_shutdown(aac);
@@ -1992,6 +1995,7 @@ static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
aac->handle_pci_error = 1;
scsi_block_requests(aac->scsi_host_ptr);
+ aac_cancel_safw_rescan_worker(aac);
aac_flush_ios(aac);
aac_release_resources(aac);
@@ -2076,7 +2080,7 @@ static void aac_pci_resume(struct pci_dev *pdev)
if (sdev->sdev_state == SDEV_OFFLINE)
sdev->sdev_state = SDEV_RUNNING;
scsi_unblock_requests(aac->scsi_host_ptr);
- scsi_scan_host(aac->scsi_host_ptr);
+ aac_scan_host(aac);
pci_save_state(pdev);
dev_err(&pdev->dev, "aacraid: PCI error - resume\n");
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index 553922fed524..882f40353b96 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -329,6 +329,22 @@ int aac_sa_init(struct aac_dev *dev)
instance = dev->id;
name = dev->name;
+ /*
+ * Fill in the function dispatch table.
+ */
+
+ dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
+ dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt;
+ dev->a_ops.adapter_enable_int = aac_sa_enable_interrupt;
+ dev->a_ops.adapter_notify = aac_sa_notify_adapter;
+ dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
+ dev->a_ops.adapter_check_health = aac_sa_check_health;
+ dev->a_ops.adapter_restart = aac_sa_restart_adapter;
+ dev->a_ops.adapter_start = aac_sa_start_adapter;
+ dev->a_ops.adapter_intr = aac_sa_intr;
+ dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
+ dev->a_ops.adapter_ioremap = aac_sa_ioremap;
+
if (aac_sa_ioremap(dev, dev->base_size)) {
printk(KERN_WARNING "%s: unable to map adapter.\n", name);
goto error_iounmap;
@@ -363,22 +379,6 @@ int aac_sa_init(struct aac_dev *dev)
}
/*
- * Fill in the function dispatch table.
- */
-
- dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
- dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt;
- dev->a_ops.adapter_enable_int = aac_sa_enable_interrupt;
- dev->a_ops.adapter_notify = aac_sa_notify_adapter;
- dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
- dev->a_ops.adapter_check_health = aac_sa_check_health;
- dev->a_ops.adapter_restart = aac_sa_restart_adapter;
- dev->a_ops.adapter_start = aac_sa_start_adapter;
- dev->a_ops.adapter_intr = aac_sa_intr;
- dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
- dev->a_ops.adapter_ioremap = aac_sa_ioremap;
-
- /*
* First clear out all interrupts. Then enable the one's that
* we can handle.
*/
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index a254b32eba39..f375f3557c18 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -45,52 +45,57 @@
#include <linux/interrupt.h>
struct device_attribute;
/*The limit of outstanding scsi command that firmware can handle*/
-#ifdef CONFIG_XEN
- #define ARCMSR_MAX_FREECCB_NUM 160
-#define ARCMSR_MAX_OUTSTANDING_CMD 155
-#else
- #define ARCMSR_MAX_FREECCB_NUM 320
-#define ARCMSR_MAX_OUTSTANDING_CMD 255
-#endif
-#define ARCMSR_DRIVER_VERSION "v1.30.00.22-20151126"
-#define ARCMSR_SCSI_INITIATOR_ID 255
-#define ARCMSR_MAX_XFER_SECTORS 512
-#define ARCMSR_MAX_XFER_SECTORS_B 4096
-#define ARCMSR_MAX_XFER_SECTORS_C 304
-#define ARCMSR_MAX_TARGETID 17
-#define ARCMSR_MAX_TARGETLUN 8
-#define ARCMSR_MAX_CMD_PERLUN ARCMSR_MAX_OUTSTANDING_CMD
-#define ARCMSR_MAX_QBUFFER 4096
-#define ARCMSR_DEFAULT_SG_ENTRIES 38
-#define ARCMSR_MAX_HBB_POSTQUEUE 264
+#define ARCMSR_MAX_FREECCB_NUM 1024
+#define ARCMSR_MAX_OUTSTANDING_CMD 1024
+#define ARCMSR_DEFAULT_OUTSTANDING_CMD 128
+#define ARCMSR_MIN_OUTSTANDING_CMD 32
+#define ARCMSR_DRIVER_VERSION "v1.40.00.04-20171130"
+#define ARCMSR_SCSI_INITIATOR_ID 255
+#define ARCMSR_MAX_XFER_SECTORS 512
+#define ARCMSR_MAX_XFER_SECTORS_B 4096
+#define ARCMSR_MAX_XFER_SECTORS_C 304
+#define ARCMSR_MAX_TARGETID 17
+#define ARCMSR_MAX_TARGETLUN 8
+#define ARCMSR_MAX_CMD_PERLUN 128
+#define ARCMSR_DEFAULT_CMD_PERLUN 32
+#define ARCMSR_MIN_CMD_PERLUN 1
+#define ARCMSR_MAX_QBUFFER 4096
+#define ARCMSR_DEFAULT_SG_ENTRIES 38
+#define ARCMSR_MAX_HBB_POSTQUEUE 264
#define ARCMSR_MAX_ARC1214_POSTQUEUE 256
#define ARCMSR_MAX_ARC1214_DONEQUEUE 257
-#define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */
-#define ARCMSR_CDB_SG_PAGE_LENGTH 256
+#define ARCMSR_MAX_HBE_DONEQUEUE 512
+#define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */
+#define ARCMSR_CDB_SG_PAGE_LENGTH 256
#define ARCMST_NUM_MSIX_VECTORS 4
#ifndef PCI_DEVICE_ID_ARECA_1880
-#define PCI_DEVICE_ID_ARECA_1880 0x1880
- #endif
+#define PCI_DEVICE_ID_ARECA_1880 0x1880
+#endif
#ifndef PCI_DEVICE_ID_ARECA_1214
- #define PCI_DEVICE_ID_ARECA_1214 0x1214
+#define PCI_DEVICE_ID_ARECA_1214 0x1214
#endif
#ifndef PCI_DEVICE_ID_ARECA_1203
- #define PCI_DEVICE_ID_ARECA_1203 0x1203
+#define PCI_DEVICE_ID_ARECA_1203 0x1203
#endif
+#ifndef PCI_DEVICE_ID_ARECA_1884
+#define PCI_DEVICE_ID_ARECA_1884 0x1884
+#endif
+#define ARCMSR_HOURS (1000 * 60 * 60 * 4)
+#define ARCMSR_MINUTES (1000 * 60 * 60)
/*
**********************************************************************************
**
**********************************************************************************
*/
-#define ARC_SUCCESS 0
-#define ARC_FAILURE 1
+#define ARC_SUCCESS 0
+#define ARC_FAILURE 1
/*
*******************************************************************************
** split 64bits dma addressing
*******************************************************************************
*/
-#define dma_addr_hi32(addr) (uint32_t) ((addr>>16)>>16)
-#define dma_addr_lo32(addr) (uint32_t) (addr & 0xffffffff)
+#define dma_addr_hi32(addr) (uint32_t) ((addr>>16)>>16)
+#define dma_addr_lo32(addr) (uint32_t) (addr & 0xffffffff)
/*
*******************************************************************************
** MESSAGE CONTROL CODE
@@ -130,7 +135,7 @@ struct CMD_MESSAGE_FIELD
#define FUNCTION_SAY_HELLO 0x0807
#define FUNCTION_SAY_GOODBYE 0x0808
#define FUNCTION_FLUSH_ADAPTER_CACHE 0x0809
-#define FUNCTION_GET_FIRMWARE_STATUS 0x080A
+#define FUNCTION_GET_FIRMWARE_STATUS 0x080A
#define FUNCTION_HARDWARE_RESET 0x080B
/* ARECA IO CONTROL CODE*/
#define ARCMSR_MESSAGE_READ_RQBUFFER \
@@ -161,18 +166,18 @@ struct CMD_MESSAGE_FIELD
** structure for holding DMA address data
*************************************************************
*/
-#define IS_DMA64 (sizeof(dma_addr_t) == 8)
-#define IS_SG64_ADDR 0x01000000 /* bit24 */
+#define IS_DMA64 (sizeof(dma_addr_t) == 8)
+#define IS_SG64_ADDR 0x01000000 /* bit24 */
struct SG32ENTRY
{
- __le32 length;
- __le32 address;
+ __le32 length;
+ __le32 address;
}__attribute__ ((packed));
struct SG64ENTRY
{
- __le32 length;
- __le32 address;
- __le32 addresshigh;
+ __le32 length;
+ __le32 address;
+ __le32 addresshigh;
}__attribute__ ((packed));
/*
********************************************************************
@@ -191,50 +196,50 @@ struct QBUFFER
*/
struct FIRMWARE_INFO
{
- uint32_t signature; /*0, 00-03*/
- uint32_t request_len; /*1, 04-07*/
- uint32_t numbers_queue; /*2, 08-11*/
- uint32_t sdram_size; /*3, 12-15*/
- uint32_t ide_channels; /*4, 16-19*/
- char vendor[40]; /*5, 20-59*/
- char model[8]; /*15, 60-67*/
- char firmware_ver[16]; /*17, 68-83*/
- char device_map[16]; /*21, 84-99*/
- uint32_t cfgVersion; /*25,100-103 Added for checking of new firmware capability*/
- uint8_t cfgSerial[16]; /*26,104-119*/
- uint32_t cfgPicStatus; /*30,120-123*/
+ uint32_t signature; /*0, 00-03*/
+ uint32_t request_len; /*1, 04-07*/
+ uint32_t numbers_queue; /*2, 08-11*/
+ uint32_t sdram_size; /*3, 12-15*/
+ uint32_t ide_channels; /*4, 16-19*/
+ char vendor[40]; /*5, 20-59*/
+ char model[8]; /*15, 60-67*/
+ char firmware_ver[16]; /*17, 68-83*/
+ char device_map[16]; /*21, 84-99*/
+ uint32_t cfgVersion; /*25,100-103 Added for checking of new firmware capability*/
+ uint8_t cfgSerial[16]; /*26,104-119*/
+ uint32_t cfgPicStatus; /*30,120-123*/
};
/* signature of set and get firmware config */
-#define ARCMSR_SIGNATURE_GET_CONFIG 0x87974060
-#define ARCMSR_SIGNATURE_SET_CONFIG 0x87974063
+#define ARCMSR_SIGNATURE_GET_CONFIG 0x87974060
+#define ARCMSR_SIGNATURE_SET_CONFIG 0x87974063
/* message code of inbound message register */
-#define ARCMSR_INBOUND_MESG0_NOP 0x00000000
-#define ARCMSR_INBOUND_MESG0_GET_CONFIG 0x00000001
-#define ARCMSR_INBOUND_MESG0_SET_CONFIG 0x00000002
-#define ARCMSR_INBOUND_MESG0_ABORT_CMD 0x00000003
-#define ARCMSR_INBOUND_MESG0_STOP_BGRB 0x00000004
-#define ARCMSR_INBOUND_MESG0_FLUSH_CACHE 0x00000005
-#define ARCMSR_INBOUND_MESG0_START_BGRB 0x00000006
-#define ARCMSR_INBOUND_MESG0_CHK331PENDING 0x00000007
-#define ARCMSR_INBOUND_MESG0_SYNC_TIMER 0x00000008
+#define ARCMSR_INBOUND_MESG0_NOP 0x00000000
+#define ARCMSR_INBOUND_MESG0_GET_CONFIG 0x00000001
+#define ARCMSR_INBOUND_MESG0_SET_CONFIG 0x00000002
+#define ARCMSR_INBOUND_MESG0_ABORT_CMD 0x00000003
+#define ARCMSR_INBOUND_MESG0_STOP_BGRB 0x00000004
+#define ARCMSR_INBOUND_MESG0_FLUSH_CACHE 0x00000005
+#define ARCMSR_INBOUND_MESG0_START_BGRB 0x00000006
+#define ARCMSR_INBOUND_MESG0_CHK331PENDING 0x00000007
+#define ARCMSR_INBOUND_MESG0_SYNC_TIMER 0x00000008
/* doorbell interrupt generator */
-#define ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK 0x00000001
-#define ARCMSR_INBOUND_DRIVER_DATA_READ_OK 0x00000002
-#define ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK 0x00000001
-#define ARCMSR_OUTBOUND_IOP331_DATA_READ_OK 0x00000002
+#define ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK 0x00000001
+#define ARCMSR_INBOUND_DRIVER_DATA_READ_OK 0x00000002
+#define ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK 0x00000001
+#define ARCMSR_OUTBOUND_IOP331_DATA_READ_OK 0x00000002
/* ccb areca cdb flag */
-#define ARCMSR_CCBPOST_FLAG_SGL_BSIZE 0x80000000
-#define ARCMSR_CCBPOST_FLAG_IAM_BIOS 0x40000000
-#define ARCMSR_CCBREPLY_FLAG_IAM_BIOS 0x40000000
-#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE0 0x10000000
-#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE1 0x00000001
+#define ARCMSR_CCBPOST_FLAG_SGL_BSIZE 0x80000000
+#define ARCMSR_CCBPOST_FLAG_IAM_BIOS 0x40000000
+#define ARCMSR_CCBREPLY_FLAG_IAM_BIOS 0x40000000
+#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE0 0x10000000
+#define ARCMSR_CCBREPLY_FLAG_ERROR_MODE1 0x00000001
/* outbound firmware ok */
-#define ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK 0x80000000
+#define ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK 0x80000000
/* ARC-1680 Bus Reset*/
-#define ARCMSR_ARC1680_BUS_RESET 0x00000003
+#define ARCMSR_ARC1680_BUS_RESET 0x00000003
/* ARC-1880 Bus Reset*/
-#define ARCMSR_ARC1880_RESET_ADAPTER 0x00000024
-#define ARCMSR_ARC1880_DiagWrite_ENABLE 0x00000080
+#define ARCMSR_ARC1880_RESET_ADAPTER 0x00000024
+#define ARCMSR_ARC1880_DiagWrite_ENABLE 0x00000080
/*
************************************************************************
@@ -277,9 +282,10 @@ struct FIRMWARE_INFO
#define ARCMSR_MESSAGE_FLUSH_CACHE 0x00050008
/* (ARCMSR_INBOUND_MESG0_START_BGRB<<16)|ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED) */
#define ARCMSR_MESSAGE_START_BGRB 0x00060008
+#define ARCMSR_MESSAGE_SYNC_TIMER 0x00080008
#define ARCMSR_MESSAGE_START_DRIVER_MODE 0x000E0008
#define ARCMSR_MESSAGE_SET_POST_WINDOW 0x000F0008
-#define ARCMSR_MESSAGE_ACTIVE_EOI_MODE 0x00100008
+#define ARCMSR_MESSAGE_ACTIVE_EOI_MODE 0x00100008
/* ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK */
#define ARCMSR_MESSAGE_FIRMWARE_OK 0x80000000
/* ioctl transfer */
@@ -288,7 +294,7 @@ struct FIRMWARE_INFO
#define ARCMSR_DRV2IOP_DATA_READ_OK 0x00000002
#define ARCMSR_DRV2IOP_CDB_POSTED 0x00000004
#define ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED 0x00000008
-#define ARCMSR_DRV2IOP_END_OF_INTERRUPT 0x00000010
+#define ARCMSR_DRV2IOP_END_OF_INTERRUPT 0x00000010
/* data tunnel buffer between user space program and its firmware */
/* user space data to iop 128bytes */
@@ -313,12 +319,12 @@ struct FIRMWARE_INFO
#define ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK 0x00000008 /* When clear, the Outbound Post List FIFO Not Empty interrupt routes to the host.*/
#define ARCMSR_HBCMU_ALL_INTMASKENABLE 0x0000000D /* disable all ISR */
/* Host Interrupt Status */
-#define ARCMSR_HBCMU_UTILITY_A_ISR 0x00000001
+#define ARCMSR_HBCMU_UTILITY_A_ISR 0x00000001
/*
** Set when the Utility_A Interrupt bit is set in the Outbound Doorbell Register.
** It clears by writing a 1 to the Utility_A bit in the Outbound Doorbell Clear Register or through automatic clearing (if enabled).
*/
-#define ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR 0x00000004
+#define ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR 0x00000004
/*
** Set if Outbound Doorbell register bits 30:1 have a non-zero
** value. This bit clears only when Outbound Doorbell bits
@@ -331,7 +337,7 @@ struct FIRMWARE_INFO
** Register (FIFO) is not empty. It clears when the Outbound
** Post List FIFO is empty.
*/
-#define ARCMSR_HBCMU_SAS_ALL_INT 0x00000010
+#define ARCMSR_HBCMU_SAS_ALL_INT 0x00000010
/*
** This bit indicates a SAS interrupt from a source external to
** the PCIe core. This bit is not maskable.
@@ -340,17 +346,17 @@ struct FIRMWARE_INFO
#define ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK 0x00000002
#define ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK 0x00000004
/*inbound message 0 ready*/
-#define ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE 0x00000008
+#define ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE 0x00000008
/*more than 12 request completed in a time*/
#define ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING 0x00000010
#define ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK 0x00000002
/*outbound DATA WRITE isr door bell clear*/
-#define ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_DOORBELL_CLEAR 0x00000002
+#define ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_DOORBELL_CLEAR 0x00000002
#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK 0x00000004
/*outbound DATA READ isr door bell clear*/
-#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_DOORBELL_CLEAR 0x00000004
+#define ARCMSR_HBCMU_IOP2DRV_DATA_READ_DOORBELL_CLEAR 0x00000004
/*outbound message 0 ready*/
-#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE 0x00000008
+#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE 0x00000008
/*outbound message cmd isr door bell clear*/
#define ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR 0x00000008
/*ARCMSR_HBAMU_MESSAGE_FIRMWARE_OK*/
@@ -407,18 +413,43 @@ struct FIRMWARE_INFO
#define ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR 0x00000001
/*
*******************************************************************************
+** SPEC. for Areca Type E adapter
+*******************************************************************************
+*/
+#define ARCMSR_SIGNATURE_1884 0x188417D3
+
+#define ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK 0x00000002
+#define ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK 0x00000004
+#define ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE 0x00000008
+
+#define ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK 0x00000002
+#define ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK 0x00000004
+#define ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE 0x00000008
+
+#define ARCMSR_HBEMU_MESSAGE_FIRMWARE_OK 0x80000000
+
+#define ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR 0x00000001
+#define ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR 0x00000008
+#define ARCMSR_HBEMU_ALL_INTMASKENABLE 0x00000009
+
+/* ARC-1884 doorbell sync */
+#define ARCMSR_HBEMU_DOORBELL_SYNC 0x100
+#define ARCMSR_ARC188X_RESET_ADAPTER 0x00000004
+#define ARCMSR_ARC1884_DiagWrite_ENABLE 0x00000080
+/*
+*******************************************************************************
** ARECA SCSI COMMAND DESCRIPTOR BLOCK size 0x1F8 (504)
*******************************************************************************
*/
struct ARCMSR_CDB
{
- uint8_t Bus;
- uint8_t TargetID;
- uint8_t LUN;
- uint8_t Function;
- uint8_t CdbLength;
- uint8_t sgcount;
- uint8_t Flags;
+ uint8_t Bus;
+ uint8_t TargetID;
+ uint8_t LUN;
+ uint8_t Function;
+ uint8_t CdbLength;
+ uint8_t sgcount;
+ uint8_t Flags;
#define ARCMSR_CDB_FLAG_SGL_BSIZE 0x01
#define ARCMSR_CDB_FLAG_BIOS 0x02
#define ARCMSR_CDB_FLAG_WRITE 0x04
@@ -426,21 +457,21 @@ struct ARCMSR_CDB
#define ARCMSR_CDB_FLAG_HEADQ 0x08
#define ARCMSR_CDB_FLAG_ORDEREDQ 0x10
- uint8_t msgPages;
- uint32_t msgContext;
- uint32_t DataLength;
- uint8_t Cdb[16];
- uint8_t DeviceStatus;
+ uint8_t msgPages;
+ uint32_t msgContext;
+ uint32_t DataLength;
+ uint8_t Cdb[16];
+ uint8_t DeviceStatus;
#define ARCMSR_DEV_CHECK_CONDITION 0x02
#define ARCMSR_DEV_SELECT_TIMEOUT 0xF0
#define ARCMSR_DEV_ABORTED 0xF1
#define ARCMSR_DEV_INIT_FAIL 0xF2
- uint8_t SenseData[15];
+ uint8_t SenseData[15];
union
{
- struct SG32ENTRY sg32entry[1];
- struct SG64ENTRY sg64entry[1];
+ struct SG32ENTRY sg32entry[1];
+ struct SG64ENTRY sg64entry[1];
} u;
};
/*
@@ -480,13 +511,13 @@ struct MessageUnit_B
uint32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE];
uint32_t postq_index;
uint32_t doneq_index;
- uint32_t __iomem *drv2iop_doorbell;
- uint32_t __iomem *drv2iop_doorbell_mask;
- uint32_t __iomem *iop2drv_doorbell;
- uint32_t __iomem *iop2drv_doorbell_mask;
- uint32_t __iomem *message_rwbuffer;
- uint32_t __iomem *message_wbuffer;
- uint32_t __iomem *message_rbuffer;
+ uint32_t __iomem *drv2iop_doorbell;
+ uint32_t __iomem *drv2iop_doorbell_mask;
+ uint32_t __iomem *iop2drv_doorbell;
+ uint32_t __iomem *iop2drv_doorbell_mask;
+ uint32_t __iomem *message_rwbuffer;
+ uint32_t __iomem *message_wbuffer;
+ uint32_t __iomem *message_rbuffer;
};
/*
*********************************************************************
@@ -506,7 +537,7 @@ struct MessageUnit_C{
uint32_t diagnostic_rw_data; /*0024 0027*/
uint32_t diagnostic_rw_address_low; /*0028 002B*/
uint32_t diagnostic_rw_address_high; /*002C 002F*/
- uint32_t host_int_status; /*0030 0033*/
+ uint32_t host_int_status; /*0030 0033*/
uint32_t host_int_mask; /*0034 0037*/
uint32_t dcr_data; /*0038 003B*/
uint32_t dcr_address; /*003C 003F*/
@@ -518,12 +549,12 @@ struct MessageUnit_C{
uint32_t iop_int_mask; /*0054 0057*/
uint32_t iop_inbound_queue_port; /*0058 005B*/
uint32_t iop_outbound_queue_port; /*005C 005F*/
- uint32_t inbound_free_list_index; /*0060 0063*/
- uint32_t inbound_post_list_index; /*0064 0067*/
- uint32_t outbound_free_list_index; /*0068 006B*/
- uint32_t outbound_post_list_index; /*006C 006F*/
+ uint32_t inbound_free_list_index; /*0060 0063*/
+ uint32_t inbound_post_list_index; /*0064 0067*/
+ uint32_t outbound_free_list_index; /*0068 006B*/
+ uint32_t outbound_post_list_index; /*006C 006F*/
uint32_t inbound_doorbell_clear; /*0070 0073*/
- uint32_t i2o_message_unit_control; /*0074 0077*/
+ uint32_t i2o_message_unit_control; /*0074 0077*/
uint32_t last_used_message_source_address_low; /*0078 007B*/
uint32_t last_used_message_source_address_high; /*007C 007F*/
uint32_t pull_mode_data_byte_count[4]; /*0080 008F*/
@@ -531,7 +562,7 @@ struct MessageUnit_C{
uint32_t done_queue_not_empty_int_counter_timer; /*0094 0097*/
uint32_t utility_A_int_counter_timer; /*0098 009B*/
uint32_t outbound_doorbell; /*009C 009F*/
- uint32_t outbound_doorbell_clear; /*00A0 00A3*/
+ uint32_t outbound_doorbell_clear; /*00A0 00A3*/
uint32_t message_source_address_index; /*00A4 00A7*/
uint32_t message_done_queue_index; /*00A8 00AB*/
uint32_t reserved0; /*00AC 00AF*/
@@ -553,10 +584,10 @@ struct MessageUnit_C{
uint32_t last_used_message_dest_address_high; /*00EC 00EF*/
uint32_t message_done_queue_base_address_low; /*00F0 00F3*/
uint32_t message_done_queue_base_address_high; /*00F4 00F7*/
- uint32_t host_diagnostic; /*00F8 00FB*/
+ uint32_t host_diagnostic; /*00F8 00FB*/
uint32_t write_sequence; /*00FC 00FF*/
uint32_t reserved1[34]; /*0100 0187*/
- uint32_t reserved2[1950]; /*0188 1FFF*/
+ uint32_t reserved2[1950]; /*0188 1FFF*/
uint32_t message_wbuffer[32]; /*2000 207F*/
uint32_t reserved3[32]; /*2080 20FF*/
uint32_t message_rbuffer[32]; /*2100 217F*/
@@ -614,115 +645,208 @@ struct MessageUnit_D {
u32 __iomem *msgcode_rwbuffer; /* 0x2200 */
};
/*
+*********************************************************************
+** Messaging Unit (MU) of Type E processor(LSI)
+*********************************************************************
+*/
+struct MessageUnit_E{
+ uint32_t iobound_doorbell; /*0000 0003*/
+ uint32_t write_sequence_3xxx; /*0004 0007*/
+ uint32_t host_diagnostic_3xxx; /*0008 000B*/
+ uint32_t posted_outbound_doorbell; /*000C 000F*/
+ uint32_t master_error_attribute; /*0010 0013*/
+ uint32_t master_error_address_low; /*0014 0017*/
+ uint32_t master_error_address_high; /*0018 001B*/
+ uint32_t hcb_size; /*001C 001F*/
+ uint32_t inbound_doorbell; /*0020 0023*/
+ uint32_t diagnostic_rw_data; /*0024 0027*/
+ uint32_t diagnostic_rw_address_low; /*0028 002B*/
+ uint32_t diagnostic_rw_address_high; /*002C 002F*/
+ uint32_t host_int_status; /*0030 0033*/
+ uint32_t host_int_mask; /*0034 0037*/
+ uint32_t dcr_data; /*0038 003B*/
+ uint32_t dcr_address; /*003C 003F*/
+ uint32_t inbound_queueport; /*0040 0043*/
+ uint32_t outbound_queueport; /*0044 0047*/
+ uint32_t hcb_pci_address_low; /*0048 004B*/
+ uint32_t hcb_pci_address_high; /*004C 004F*/
+ uint32_t iop_int_status; /*0050 0053*/
+ uint32_t iop_int_mask; /*0054 0057*/
+ uint32_t iop_inbound_queue_port; /*0058 005B*/
+ uint32_t iop_outbound_queue_port; /*005C 005F*/
+ uint32_t inbound_free_list_index; /*0060 0063*/
+ uint32_t inbound_post_list_index; /*0064 0067*/
+ uint32_t reply_post_producer_index; /*0068 006B*/
+ uint32_t reply_post_consumer_index; /*006C 006F*/
+ uint32_t inbound_doorbell_clear; /*0070 0073*/
+ uint32_t i2o_message_unit_control; /*0074 0077*/
+ uint32_t last_used_message_source_address_low; /*0078 007B*/
+ uint32_t last_used_message_source_address_high; /*007C 007F*/
+ uint32_t pull_mode_data_byte_count[4]; /*0080 008F*/
+ uint32_t message_dest_address_index; /*0090 0093*/
+ uint32_t done_queue_not_empty_int_counter_timer; /*0094 0097*/
+ uint32_t utility_A_int_counter_timer; /*0098 009B*/
+ uint32_t outbound_doorbell; /*009C 009F*/
+ uint32_t outbound_doorbell_clear; /*00A0 00A3*/
+ uint32_t message_source_address_index; /*00A4 00A7*/
+ uint32_t message_done_queue_index; /*00A8 00AB*/
+ uint32_t reserved0; /*00AC 00AF*/
+ uint32_t inbound_msgaddr0; /*00B0 00B3*/
+ uint32_t inbound_msgaddr1; /*00B4 00B7*/
+ uint32_t outbound_msgaddr0; /*00B8 00BB*/
+ uint32_t outbound_msgaddr1; /*00BC 00BF*/
+ uint32_t inbound_queueport_low; /*00C0 00C3*/
+ uint32_t inbound_queueport_high; /*00C4 00C7*/
+ uint32_t outbound_queueport_low; /*00C8 00CB*/
+ uint32_t outbound_queueport_high; /*00CC 00CF*/
+ uint32_t iop_inbound_queue_port_low; /*00D0 00D3*/
+ uint32_t iop_inbound_queue_port_high; /*00D4 00D7*/
+ uint32_t iop_outbound_queue_port_low; /*00D8 00DB*/
+ uint32_t iop_outbound_queue_port_high; /*00DC 00DF*/
+ uint32_t message_dest_queue_port_low; /*00E0 00E3*/
+ uint32_t message_dest_queue_port_high; /*00E4 00E7*/
+ uint32_t last_used_message_dest_address_low; /*00E8 00EB*/
+ uint32_t last_used_message_dest_address_high; /*00EC 00EF*/
+ uint32_t message_done_queue_base_address_low; /*00F0 00F3*/
+ uint32_t message_done_queue_base_address_high; /*00F4 00F7*/
+ uint32_t host_diagnostic; /*00F8 00FB*/
+ uint32_t write_sequence; /*00FC 00FF*/
+ uint32_t reserved1[34]; /*0100 0187*/
+ uint32_t reserved2[1950]; /*0188 1FFF*/
+ uint32_t message_wbuffer[32]; /*2000 207F*/
+ uint32_t reserved3[32]; /*2080 20FF*/
+ uint32_t message_rbuffer[32]; /*2100 217F*/
+ uint32_t reserved4[32]; /*2180 21FF*/
+ uint32_t msgcode_rwbuffer[256]; /*2200 23FF*/
+};
+
+typedef struct deliver_completeQ {
+ uint16_t cmdFlag;
+ uint16_t cmdSMID;
+ uint16_t cmdLMID; // reserved (0)
+ uint16_t cmdFlag2; // reserved (0)
+} DeliverQ, CompletionQ, *pDeliver_Q, *pCompletion_Q;
+/*
*******************************************************************************
** Adapter Control Block
*******************************************************************************
*/
struct AdapterControlBlock
{
- uint32_t adapter_type; /* adapter A,B..... */
- #define ACB_ADAPTER_TYPE_A 0x00000001 /* hba I IOP */
- #define ACB_ADAPTER_TYPE_B 0x00000002 /* hbb M IOP */
- #define ACB_ADAPTER_TYPE_C 0x00000004 /* hbc P IOP */
- #define ACB_ADAPTER_TYPE_D 0x00000008 /* hbd A IOP */
- u32 roundup_ccbsize;
- struct pci_dev * pdev;
- struct Scsi_Host * host;
- unsigned long vir2phy_offset;
+ uint32_t adapter_type; /* adapter A,B..... */
+#define ACB_ADAPTER_TYPE_A 0x00000000 /* hba I IOP */
+#define ACB_ADAPTER_TYPE_B 0x00000001 /* hbb M IOP */
+#define ACB_ADAPTER_TYPE_C 0x00000002 /* hbc L IOP */
+#define ACB_ADAPTER_TYPE_D 0x00000003 /* hbd M IOP */
+#define ACB_ADAPTER_TYPE_E 0x00000004 /* hba L IOP */
+ u32 roundup_ccbsize;
+ struct pci_dev * pdev;
+ struct Scsi_Host * host;
+ unsigned long vir2phy_offset;
/* Offset is used in making arc cdb physical to virtual calculations */
- uint32_t outbound_int_enable;
- uint32_t cdb_phyaddr_hi32;
- uint32_t reg_mu_acc_handle0;
- spinlock_t eh_lock;
- spinlock_t ccblist_lock;
- spinlock_t postq_lock;
- spinlock_t doneq_lock;
- spinlock_t rqbuffer_lock;
- spinlock_t wqbuffer_lock;
+ uint32_t outbound_int_enable;
+ uint32_t cdb_phyaddr_hi32;
+ uint32_t reg_mu_acc_handle0;
+ spinlock_t eh_lock;
+ spinlock_t ccblist_lock;
+ spinlock_t postq_lock;
+ spinlock_t doneq_lock;
+ spinlock_t rqbuffer_lock;
+ spinlock_t wqbuffer_lock;
union {
struct MessageUnit_A __iomem *pmuA;
struct MessageUnit_B *pmuB;
struct MessageUnit_C __iomem *pmuC;
struct MessageUnit_D *pmuD;
+ struct MessageUnit_E __iomem *pmuE;
};
/* message unit ATU inbound base address0 */
- void __iomem *mem_base0;
- void __iomem *mem_base1;
- uint32_t acb_flags;
+ void __iomem *mem_base0;
+ void __iomem *mem_base1;
+ uint32_t acb_flags;
u16 dev_id;
- uint8_t adapter_index;
- #define ACB_F_SCSISTOPADAPTER 0x0001
- #define ACB_F_MSG_STOP_BGRB 0x0002
- /* stop RAID background rebuild */
- #define ACB_F_MSG_START_BGRB 0x0004
- /* stop RAID background rebuild */
- #define ACB_F_IOPDATA_OVERFLOW 0x0008
- /* iop message data rqbuffer overflow */
- #define ACB_F_MESSAGE_WQBUFFER_CLEARED 0x0010
- /* message clear wqbuffer */
- #define ACB_F_MESSAGE_RQBUFFER_CLEARED 0x0020
- /* message clear rqbuffer */
- #define ACB_F_MESSAGE_WQBUFFER_READED 0x0040
- #define ACB_F_BUS_RESET 0x0080
- #define ACB_F_BUS_HANG_ON 0x0800/* need hardware reset bus */
+ uint8_t adapter_index;
+#define ACB_F_SCSISTOPADAPTER 0x0001
+#define ACB_F_MSG_STOP_BGRB 0x0002
+/* stop RAID background rebuild */
+#define ACB_F_MSG_START_BGRB 0x0004
+/* stop RAID background rebuild */
+#define ACB_F_IOPDATA_OVERFLOW 0x0008
+/* iop message data rqbuffer overflow */
+#define ACB_F_MESSAGE_WQBUFFER_CLEARED 0x0010
+/* message clear wqbuffer */
+#define ACB_F_MESSAGE_RQBUFFER_CLEARED 0x0020
+/* message clear rqbuffer */
+#define ACB_F_MESSAGE_WQBUFFER_READED 0x0040
+#define ACB_F_BUS_RESET 0x0080
+#define ACB_F_BUS_HANG_ON 0x0800/* need hardware reset bus */
- #define ACB_F_IOP_INITED 0x0100
- /* iop init */
- #define ACB_F_ABORT 0x0200
- #define ACB_F_FIRMWARE_TRAP 0x0400
- struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM];
+#define ACB_F_IOP_INITED 0x0100
+/* iop init */
+#define ACB_F_ABORT 0x0200
+#define ACB_F_FIRMWARE_TRAP 0x0400
+#define ACB_F_MSG_GET_CONFIG 0x1000
+ struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM];
/* used for memory free */
- struct list_head ccb_free_list;
+ struct list_head ccb_free_list;
/* head of free ccb list */
- atomic_t ccboutstandingcount;
+ atomic_t ccboutstandingcount;
/*The present outstanding command number that in the IOP that
waiting for being handled by FW*/
- void * dma_coherent;
+ void * dma_coherent;
/* dma_coherent used for memory free */
- dma_addr_t dma_coherent_handle;
+ dma_addr_t dma_coherent_handle;
/* dma_coherent_handle used for memory free */
- dma_addr_t dma_coherent_handle2;
- void *dma_coherent2;
- unsigned int uncache_size;
- uint8_t rqbuffer[ARCMSR_MAX_QBUFFER];
+ dma_addr_t dma_coherent_handle2;
+ void *dma_coherent2;
+ unsigned int uncache_size;
+ uint8_t rqbuffer[ARCMSR_MAX_QBUFFER];
/* data collection buffer for read from 80331 */
- int32_t rqbuf_getIndex;
+ int32_t rqbuf_getIndex;
/* first of read buffer */
- int32_t rqbuf_putIndex;
+ int32_t rqbuf_putIndex;
/* last of read buffer */
- uint8_t wqbuffer[ARCMSR_MAX_QBUFFER];
+ uint8_t wqbuffer[ARCMSR_MAX_QBUFFER];
/* data collection buffer for write to 80331 */
- int32_t wqbuf_getIndex;
+ int32_t wqbuf_getIndex;
/* first of write buffer */
- int32_t wqbuf_putIndex;
+ int32_t wqbuf_putIndex;
/* last of write buffer */
- uint8_t devstate[ARCMSR_MAX_TARGETID][ARCMSR_MAX_TARGETLUN];
+ uint8_t devstate[ARCMSR_MAX_TARGETID][ARCMSR_MAX_TARGETLUN];
/* id0 ..... id15, lun0...lun7 */
-#define ARECA_RAID_GONE 0x55
-#define ARECA_RAID_GOOD 0xaa
- uint32_t num_resets;
- uint32_t num_aborts;
- uint32_t signature;
- uint32_t firm_request_len;
- uint32_t firm_numbers_queue;
- uint32_t firm_sdram_size;
- uint32_t firm_hd_channels;
- uint32_t firm_cfg_version;
+#define ARECA_RAID_GONE 0x55
+#define ARECA_RAID_GOOD 0xaa
+ uint32_t num_resets;
+ uint32_t num_aborts;
+ uint32_t signature;
+ uint32_t firm_request_len;
+ uint32_t firm_numbers_queue;
+ uint32_t firm_sdram_size;
+ uint32_t firm_hd_channels;
+ uint32_t firm_cfg_version;
char firm_model[12];
char firm_version[20];
char device_map[20]; /*21,84-99*/
- struct work_struct arcmsr_do_message_isr_bh;
- struct timer_list eternal_timer;
+ struct work_struct arcmsr_do_message_isr_bh;
+ struct timer_list eternal_timer;
unsigned short fw_flag;
- #define FW_NORMAL 0x0000
- #define FW_BOG 0x0001
- #define FW_DEADLOCK 0x0010
- atomic_t rq_map_token;
- atomic_t ante_token_value;
- uint32_t maxOutstanding;
- int vector_count;
+#define FW_NORMAL 0x0000
+#define FW_BOG 0x0001
+#define FW_DEADLOCK 0x0010
+ atomic_t rq_map_token;
+ atomic_t ante_token_value;
+ uint32_t maxOutstanding;
+ int vector_count;
+ uint32_t maxFreeCCB;
+ struct timer_list refresh_timer;
+ uint32_t doneq_index;
+ uint32_t ccbsize;
+ uint32_t in_doorbell;
+ uint32_t out_doorbell;
+ uint32_t completionQ_entry;
+ pCompletion_Q pCompletionQ;
};/* HW_DEVICE_EXTENSION */
/*
*******************************************************************************
@@ -732,29 +856,30 @@ struct AdapterControlBlock
*/
struct CommandControlBlock{
/*x32:sizeof struct_CCB=(32+60)byte, x64:sizeof struct_CCB=(64+60)byte*/
- struct list_head list; /*x32: 8byte, x64: 16byte*/
- struct scsi_cmnd *pcmd; /*8 bytes pointer of linux scsi command */
- struct AdapterControlBlock *acb; /*x32: 4byte, x64: 8byte*/
- uint32_t cdb_phyaddr; /*x32: 4byte, x64: 4byte*/
- uint32_t arc_cdb_size; /*x32:4byte,x64:4byte*/
- uint16_t ccb_flags; /*x32: 2byte, x64: 2byte*/
- #define CCB_FLAG_READ 0x0000
- #define CCB_FLAG_WRITE 0x0001
- #define CCB_FLAG_ERROR 0x0002
- #define CCB_FLAG_FLUSHCACHE 0x0004
- #define CCB_FLAG_MASTER_ABORTED 0x0008
- uint16_t startdone; /*x32:2byte,x32:2byte*/
- #define ARCMSR_CCB_DONE 0x0000
- #define ARCMSR_CCB_START 0x55AA
- #define ARCMSR_CCB_ABORTED 0xAA55
- #define ARCMSR_CCB_ILLEGAL 0xFFFF
- #if BITS_PER_LONG == 64
+ struct list_head list; /*x32: 8byte, x64: 16byte*/
+ struct scsi_cmnd *pcmd; /*8 bytes pointer of linux scsi command */
+ struct AdapterControlBlock *acb; /*x32: 4byte, x64: 8byte*/
+ uint32_t cdb_phyaddr; /*x32: 4byte, x64: 4byte*/
+ uint32_t arc_cdb_size; /*x32:4byte,x64:4byte*/
+ uint16_t ccb_flags; /*x32: 2byte, x64: 2byte*/
+#define CCB_FLAG_READ 0x0000
+#define CCB_FLAG_WRITE 0x0001
+#define CCB_FLAG_ERROR 0x0002
+#define CCB_FLAG_FLUSHCACHE 0x0004
+#define CCB_FLAG_MASTER_ABORTED 0x0008
+ uint16_t startdone; /*x32:2byte,x32:2byte*/
+#define ARCMSR_CCB_DONE 0x0000
+#define ARCMSR_CCB_START 0x55AA
+#define ARCMSR_CCB_ABORTED 0xAA55
+#define ARCMSR_CCB_ILLEGAL 0xFFFF
+ uint32_t smid;
+#if BITS_PER_LONG == 64
/* ======================512+64 bytes======================== */
- uint32_t reserved[5]; /*24 byte*/
- #else
+ uint32_t reserved[4]; /*16 byte*/
+#else
/* ======================512+32 bytes======================== */
- uint32_t reserved; /*8 byte*/
- #endif
+ // uint32_t reserved; /*4 byte*/
+#endif
/* ======================================================= */
struct ARCMSR_CDB arcmsr_cdb;
};
@@ -788,13 +913,13 @@ struct SENSE_DATA
** Outbound Interrupt Status Register - OISR
*******************************************************************************
*/
-#define ARCMSR_MU_OUTBOUND_INTERRUPT_STATUS_REG 0x30
-#define ARCMSR_MU_OUTBOUND_PCI_INT 0x10
-#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INT 0x08
-#define ARCMSR_MU_OUTBOUND_DOORBELL_INT 0x04
-#define ARCMSR_MU_OUTBOUND_MESSAGE1_INT 0x02
-#define ARCMSR_MU_OUTBOUND_MESSAGE0_INT 0x01
-#define ARCMSR_MU_OUTBOUND_HANDLE_INT \
+#define ARCMSR_MU_OUTBOUND_INTERRUPT_STATUS_REG 0x30
+#define ARCMSR_MU_OUTBOUND_PCI_INT 0x10
+#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INT 0x08
+#define ARCMSR_MU_OUTBOUND_DOORBELL_INT 0x04
+#define ARCMSR_MU_OUTBOUND_MESSAGE1_INT 0x02
+#define ARCMSR_MU_OUTBOUND_MESSAGE0_INT 0x01
+#define ARCMSR_MU_OUTBOUND_HANDLE_INT \
(ARCMSR_MU_OUTBOUND_MESSAGE0_INT \
|ARCMSR_MU_OUTBOUND_MESSAGE1_INT \
|ARCMSR_MU_OUTBOUND_DOORBELL_INT \
@@ -805,13 +930,13 @@ struct SENSE_DATA
** Outbound Interrupt Mask Register - OIMR
*******************************************************************************
*/
-#define ARCMSR_MU_OUTBOUND_INTERRUPT_MASK_REG 0x34
-#define ARCMSR_MU_OUTBOUND_PCI_INTMASKENABLE 0x10
-#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE 0x08
-#define ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE 0x04
-#define ARCMSR_MU_OUTBOUND_MESSAGE1_INTMASKENABLE 0x02
-#define ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE 0x01
-#define ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE 0x1F
+#define ARCMSR_MU_OUTBOUND_INTERRUPT_MASK_REG 0x34
+#define ARCMSR_MU_OUTBOUND_PCI_INTMASKENABLE 0x10
+#define ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE 0x08
+#define ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE 0x04
+#define ARCMSR_MU_OUTBOUND_MESSAGE1_INTMASKENABLE 0x02
+#define ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE 0x01
+#define ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE 0x1F
extern void arcmsr_write_ioctldata2iop(struct AdapterControlBlock *);
extern uint32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *,
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 21f6421536a0..75e828bd30e3 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -75,6 +75,26 @@ MODULE_DESCRIPTION("Areca ARC11xx/12xx/16xx/188x SAS/SATA RAID Controller Driver
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(ARCMSR_DRIVER_VERSION);
+static int msix_enable = 1;
+module_param(msix_enable, int, S_IRUGO);
+MODULE_PARM_DESC(msix_enable, "Enable MSI-X interrupt(0 ~ 1), msix_enable=1(enable), =0(disable)");
+
+static int msi_enable = 1;
+module_param(msi_enable, int, S_IRUGO);
+MODULE_PARM_DESC(msi_enable, "Enable MSI interrupt(0 ~ 1), msi_enable=1(enable), =0(disable)");
+
+static int host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD;
+module_param(host_can_queue, int, S_IRUGO);
+MODULE_PARM_DESC(host_can_queue, " adapter queue depth(32 ~ 1024), default is 128");
+
+static int cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN;
+module_param(cmd_per_lun, int, S_IRUGO);
+MODULE_PARM_DESC(cmd_per_lun, " device queue depth(1 ~ 128), default is 32");
+
+static int set_date_time = 0;
+module_param(set_date_time, int, S_IRUGO);
+MODULE_PARM_DESC(set_date_time, " send date, time to iop(0 ~ 1), set_date_time=1(enable), default(=0) is disable");
+
#define ARCMSR_SLEEPTIME 10
#define ARCMSR_RETRYCOUNT 12
@@ -102,19 +122,19 @@ static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb);
static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb);
static void arcmsr_request_device_map(struct timer_list *t);
-static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb);
-static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb);
-static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb);
static void arcmsr_message_isr_bh_fn(struct work_struct *work);
static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb);
static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB);
static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb);
+static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb);
+static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb);
static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
static const char *arcmsr_info(struct Scsi_Host *);
static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *);
static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb);
+static void arcmsr_set_iop_datetime(struct timer_list *);
static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
{
if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
@@ -127,15 +147,15 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
.name = "Areca SAS/SATA RAID driver",
.info = arcmsr_info,
.queuecommand = arcmsr_queue_command,
- .eh_abort_handler = arcmsr_abort,
+ .eh_abort_handler = arcmsr_abort,
.eh_bus_reset_handler = arcmsr_bus_reset,
.bios_param = arcmsr_bios_param,
.change_queue_depth = arcmsr_adjust_disk_queue_depth,
- .can_queue = ARCMSR_MAX_OUTSTANDING_CMD,
- .this_id = ARCMSR_SCSI_INITIATOR_ID,
- .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
- .max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
- .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN,
+ .can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD,
+ .this_id = ARCMSR_SCSI_INITIATOR_ID,
+ .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
+ .max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
+ .cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = arcmsr_host_attrs,
.no_write_same = 1,
@@ -184,13 +204,15 @@ static struct pci_device_id arcmsr_device_id_table[] = {
.driver_data = ACB_ADAPTER_TYPE_A},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880),
.driver_data = ACB_ADAPTER_TYPE_C},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1884),
+ .driver_data = ACB_ADAPTER_TYPE_E},
{0, 0}, /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
static struct pci_driver arcmsr_pci_driver = {
.name = "arcmsr",
- .id_table = arcmsr_device_id_table,
+ .id_table = arcmsr_device_id_table,
.probe = arcmsr_probe,
.remove = arcmsr_remove,
.suspend = arcmsr_suspend,
@@ -206,7 +228,8 @@ static void arcmsr_free_mu(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_B:
- case ACB_ADAPTER_TYPE_D: {
+ case ACB_ADAPTER_TYPE_D:
+ case ACB_ADAPTER_TYPE_E: {
dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize,
acb->dma_coherent2, acb->dma_coherent_handle2);
break;
@@ -271,6 +294,20 @@ static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
acb->mem_base0 = mem_base0;
break;
}
+ case ACB_ADAPTER_TYPE_E: {
+ acb->pmuE = ioremap(pci_resource_start(pdev, 1),
+ pci_resource_len(pdev, 1));
+ if (!acb->pmuE) {
+ pr_notice("arcmsr%d: memory mapping region fail \n",
+ acb->host->host_no);
+ return false;
+ }
+ writel(0, &acb->pmuE->host_int_status); /*clear interrupt*/
+ writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell); /* synchronize doorbell to 0 */
+ acb->in_doorbell = 0;
+ acb->out_doorbell = 0;
+ break;
+ }
}
return true;
}
@@ -295,6 +332,9 @@ static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
iounmap(acb->mem_base0);
break;
+ case ACB_ADAPTER_TYPE_E:
+ iounmap(acb->pmuE);
+ break;
}
}
@@ -408,6 +448,24 @@ static bool arcmsr_hbaD_wait_msgint_ready(struct AdapterControlBlock *pACB)
return false;
}
+static bool arcmsr_hbaE_wait_msgint_ready(struct AdapterControlBlock *pACB)
+{
+ int i;
+ uint32_t read_doorbell;
+ struct MessageUnit_E __iomem *phbcmu = pACB->pmuE;
+
+ for (i = 0; i < 2000; i++) {
+ read_doorbell = readl(&phbcmu->iobound_doorbell);
+ if ((read_doorbell ^ pACB->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) {
+ writel(0, &phbcmu->host_int_status); /*clear interrupt*/
+ pACB->in_doorbell = read_doorbell;
+ return true;
+ }
+ msleep(10);
+ } /* max 20 seconds */
+ return false;
+}
+
static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb)
{
struct MessageUnit_A __iomem *reg = acb->pmuA;
@@ -475,6 +533,24 @@ static void arcmsr_hbaD_flush_cache(struct AdapterControlBlock *pACB)
} while (retry_count != 0);
}
+static void arcmsr_hbaE_flush_cache(struct AdapterControlBlock *pACB)
+{
+ int retry_count = 30;
+ struct MessageUnit_E __iomem *reg = pACB->pmuE;
+
+ writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
+ pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pACB->out_doorbell, &reg->iobound_doorbell);
+ do {
+ if (arcmsr_hbaE_wait_msgint_ready(pACB))
+ break;
+ retry_count--;
+ pr_notice("arcmsr%d: wait 'flush adapter "
+ "cache' timeout, retry count down = %d\n",
+ pACB->host->host_no, retry_count);
+ } while (retry_count != 0);
+}
+
static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
@@ -495,6 +571,9 @@ static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
arcmsr_hbaD_flush_cache(acb);
break;
+ case ACB_ADAPTER_TYPE_E:
+ arcmsr_hbaE_flush_cache(acb);
+ break;
}
}
@@ -577,6 +656,23 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER);
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ uint32_t completeQ_size;
+ completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128;
+ acb->roundup_ccbsize = roundup(completeQ_size, 32);
+ dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize,
+ &dma_coherent_handle, GFP_KERNEL);
+ if (!dma_coherent){
+ pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
+ return false;
+ }
+ acb->dma_coherent_handle2 = dma_coherent_handle;
+ acb->dma_coherent2 = dma_coherent;
+ acb->pCompletionQ = dma_coherent;
+ acb->completionQ_entry = acb->roundup_ccbsize / sizeof(struct deliver_completeQ);
+ acb->doneq_index = 0;
+ }
+ break;
default:
break;
}
@@ -610,7 +706,7 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
acb->host->max_sectors = max_xfer_len/512;
acb->host->sg_tablesize = max_sg_entrys;
roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32);
- acb->uncache_size = roundup_ccbsize * ARCMSR_MAX_FREECCB_NUM;
+ acb->uncache_size = roundup_ccbsize * acb->maxFreeCCB;
dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL);
if(!dma_coherent){
printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no);
@@ -619,9 +715,10 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
acb->dma_coherent = dma_coherent;
acb->dma_coherent_handle = dma_coherent_handle;
memset(dma_coherent, 0, acb->uncache_size);
+ acb->ccbsize = roundup_ccbsize;
ccb_tmp = dma_coherent;
acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
- for(i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++){
+ for(i = 0; i < acb->maxFreeCCB; i++){
cdb_phyaddr = dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
@@ -630,11 +727,13 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
break;
case ACB_ADAPTER_TYPE_C:
case ACB_ADAPTER_TYPE_D:
+ case ACB_ADAPTER_TYPE_E:
ccb_tmp->cdb_phyaddr = cdb_phyaddr;
break;
}
acb->pccb_pool[i] = ccb_tmp;
ccb_tmp->acb = acb;
+ ccb_tmp->smid = (u32)i << 16;
INIT_LIST_HEAD(&ccb_tmp->list);
list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize);
@@ -654,6 +753,7 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work)
struct scsi_device *psdev;
char diff, temp;
+ acb->acb_flags &= ~ACB_F_MSG_GET_CONFIG;
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
struct MessageUnit_A __iomem *reg = acb->pmuA;
@@ -683,6 +783,13 @@ static void arcmsr_message_isr_bh_fn(struct work_struct *work)
devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
break;
}
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+
+ signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]);
+ devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
+ break;
+ }
}
atomic_inc(&acb->rq_map_token);
if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG)
@@ -723,17 +830,26 @@ arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb)
unsigned long flags;
int nvec, i;
+ if (msix_enable == 0)
+ goto msi_int0;
nvec = pci_alloc_irq_vectors(pdev, 1, ARCMST_NUM_MSIX_VECTORS,
PCI_IRQ_MSIX);
if (nvec > 0) {
pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
flags = 0;
} else {
- nvec = pci_alloc_irq_vectors(pdev, 1, 1,
- PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+msi_int0:
+ if (msi_enable == 1) {
+ nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (nvec == 1) {
+ dev_info(&pdev->dev, "msi enabled\n");
+ goto msi_int1;
+ }
+ }
+ nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
if (nvec < 1)
return FAILED;
-
+msi_int1:
flags = IRQF_SHARED;
}
@@ -755,6 +871,24 @@ out_free_irq:
return FAILED;
}
+static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock *pacb)
+{
+ INIT_WORK(&pacb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
+ atomic_set(&pacb->rq_map_token, 16);
+ atomic_set(&pacb->ante_token_value, 16);
+ pacb->fw_flag = FW_NORMAL;
+ timer_setup(&pacb->eternal_timer, arcmsr_request_device_map, 0);
+ pacb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
+ add_timer(&pacb->eternal_timer);
+}
+
+static void arcmsr_init_set_datetime_timer(struct AdapterControlBlock *pacb)
+{
+ timer_setup(&pacb->refresh_timer, arcmsr_set_iop_datetime, 0);
+ pacb->refresh_timer.expires = jiffies + msecs_to_jiffies(60 * 1000);
+ add_timer(&pacb->refresh_timer);
+}
+
static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct Scsi_Host *host;
@@ -789,8 +923,12 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
host->max_lun = ARCMSR_MAX_TARGETLUN;
host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/
host->max_cmd_len = 16; /*this is issue of 64bit LBA ,over 2T byte*/
- host->can_queue = ARCMSR_MAX_OUTSTANDING_CMD;
- host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
+ if ((host_can_queue < ARCMSR_MIN_OUTSTANDING_CMD) || (host_can_queue > ARCMSR_MAX_OUTSTANDING_CMD))
+ host_can_queue = ARCMSR_DEFAULT_OUTSTANDING_CMD;
+ host->can_queue = host_can_queue; /* max simultaneous cmds */
+ if ((cmd_per_lun < ARCMSR_MIN_CMD_PERLUN) || (cmd_per_lun > ARCMSR_MAX_CMD_PERLUN))
+ cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN;
+ host->cmd_per_lun = cmd_per_lun;
host->this_id = ARCMSR_SCSI_INITIATOR_ID;
host->unique_id = (bus << 8) | dev_fun;
pci_set_drvdata(pdev, host);
@@ -833,18 +971,16 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (arcmsr_request_irq(pdev, acb) == FAILED)
goto scsi_host_remove;
arcmsr_iop_init(acb);
- INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- timer_setup(&acb->eternal_timer, arcmsr_request_device_map, 0);
- acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
- add_timer(&acb->eternal_timer);
+ arcmsr_init_get_devmap_timer(acb);
+ if (set_date_time)
+ arcmsr_init_set_datetime_timer(acb);
if(arcmsr_alloc_sysfs_attr(acb))
goto out_free_sysfs;
scsi_scan_host(host);
return 0;
out_free_sysfs:
+ if (set_date_time)
+ del_timer_sync(&acb->refresh_timer);
del_timer_sync(&acb->eternal_timer);
flush_work(&acb->arcmsr_do_message_isr_bh);
arcmsr_stop_adapter_bgrb(acb);
@@ -887,6 +1023,8 @@ static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state)
intmask_org = arcmsr_disable_outbound_ints(acb);
arcmsr_free_irq(pdev, acb);
del_timer_sync(&acb->eternal_timer);
+ if (set_date_time)
+ del_timer_sync(&acb->refresh_timer);
flush_work(&acb->arcmsr_do_message_isr_bh);
arcmsr_stop_adapter_bgrb(acb);
arcmsr_flush_adapter_cache(acb);
@@ -924,13 +1062,9 @@ static int arcmsr_resume(struct pci_dev *pdev)
if (arcmsr_request_irq(pdev, acb) == FAILED)
goto controller_stop;
arcmsr_iop_init(acb);
- INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- timer_setup(&acb->eternal_timer, arcmsr_request_device_map, 0);
- acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
- add_timer(&acb->eternal_timer);
+ arcmsr_init_get_devmap_timer(acb);
+ if (set_date_time)
+ arcmsr_init_set_datetime_timer(acb);
return 0;
controller_stop:
arcmsr_stop_adapter_bgrb(acb);
@@ -998,6 +1132,21 @@ static uint8_t arcmsr_hbaD_abort_allcmd(struct AdapterControlBlock *pACB)
return true;
}
+static uint8_t arcmsr_hbaE_abort_allcmd(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_E __iomem *reg = pACB->pmuE;
+
+ writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
+ pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pACB->out_doorbell, &reg->iobound_doorbell);
+ if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
+ pr_notice("arcmsr%d: wait 'abort all outstanding "
+ "command' timeout\n", pACB->host->host_no);
+ return false;
+ }
+ return true;
+}
+
static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
{
uint8_t rtnval = 0;
@@ -1020,6 +1169,9 @@ static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
rtnval = arcmsr_hbaD_abort_allcmd(acb);
break;
+ case ACB_ADAPTER_TYPE_E:
+ rtnval = arcmsr_hbaE_abort_allcmd(acb);
+ break;
}
return rtnval;
}
@@ -1050,7 +1202,7 @@ static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
struct scsi_cmnd *pcmd = ccb->pcmd;
struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
- pcmd->result = DID_OK << 16;
+ pcmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
if (sensebuffer) {
int sense_data_length =
sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE
@@ -1059,6 +1211,7 @@ static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
sensebuffer->Valid = 1;
+ pcmd->result |= (DRIVER_SENSE << 24);
}
}
@@ -1092,6 +1245,13 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable);
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ orig_mask = readl(&reg->host_int_mask);
+ writel(orig_mask | ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR, &reg->host_int_mask);
+ readl(&reg->host_int_mask); /* Dummy readl to force pci flush */
+ }
+ break;
}
return orig_mask;
}
@@ -1196,7 +1356,7 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
/*clear and abort all outbound posted Q*/
writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
while(((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF)
- && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
+ && (i++ < acb->maxOutstanding)) {
pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
@@ -1226,7 +1386,7 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
break;
case ACB_ADAPTER_TYPE_C: {
struct MessageUnit_C __iomem *reg = acb->pmuC;
- while ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
+ while ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < acb->maxOutstanding)) {
/*need to do*/
flag_ccb = readl(&reg->outbound_queueport_low);
ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
@@ -1280,6 +1440,9 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
pmu->doneq_index = 0x40FF;
}
break;
+ case ACB_ADAPTER_TYPE_E:
+ arcmsr_hbaE_postqueue_isr(acb);
+ break;
}
}
@@ -1293,13 +1456,15 @@ static void arcmsr_remove(struct pci_dev *pdev)
scsi_remove_host(host);
flush_work(&acb->arcmsr_do_message_isr_bh);
del_timer_sync(&acb->eternal_timer);
+ if (set_date_time)
+ del_timer_sync(&acb->refresh_timer);
arcmsr_disable_outbound_ints(acb);
arcmsr_stop_adapter_bgrb(acb);
arcmsr_flush_adapter_cache(acb);
acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
acb->acb_flags &= ~ACB_F_IOP_INITED;
- for (poll_count = 0; poll_count < ARCMSR_MAX_OUTSTANDING_CMD; poll_count++){
+ for (poll_count = 0; poll_count < acb->maxOutstanding; poll_count++){
if (!atomic_read(&acb->ccboutstandingcount))
break;
arcmsr_interrupt(acb);/* FIXME: need spinlock */
@@ -1311,7 +1476,7 @@ static void arcmsr_remove(struct pci_dev *pdev)
arcmsr_abort_allcmd(acb);
arcmsr_done4abort_postqueue(acb);
- for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
+ for (i = 0; i < acb->maxFreeCCB; i++) {
struct CommandControlBlock *ccb = acb->pccb_pool[i];
if (ccb->startdone == ARCMSR_CCB_START) {
ccb->startdone = ARCMSR_CCB_ABORTED;
@@ -1335,6 +1500,8 @@ static void arcmsr_shutdown(struct pci_dev *pdev)
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *)host->hostdata;
del_timer_sync(&acb->eternal_timer);
+ if (set_date_time)
+ del_timer_sync(&acb->refresh_timer);
arcmsr_disable_outbound_ints(acb);
arcmsr_free_irq(pdev, acb);
flush_work(&acb->arcmsr_do_message_isr_bh);
@@ -1396,6 +1563,13 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
writel(intmask_org | mask, reg->pcief0_int_enable);
break;
}
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+
+ mask = ~(ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR | ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR);
+ writel(intmask_org & mask, &reg->host_int_mask);
+ break;
+ }
}
}
@@ -1527,6 +1701,16 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr
spin_unlock_irqrestore(&acb->postq_lock, flags);
break;
}
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *pmu = acb->pmuE;
+ u32 ccb_post_stamp, arc_cdb_size;
+
+ arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
+ ccb_post_stamp = (ccb->smid | ((arc_cdb_size - 1) >> 6));
+ writel(0, &pmu->inbound_queueport_high);
+ writel(ccb_post_stamp, &pmu->inbound_queueport_low);
+ break;
+ }
}
}
@@ -1580,6 +1764,20 @@ static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB)
"timeout\n", pACB->host->host_no);
}
+static void arcmsr_hbaE_stop_bgrb(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_E __iomem *reg = pACB->pmuE;
+
+ pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
+ writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
+ pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pACB->out_doorbell, &reg->iobound_doorbell);
+ if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
+ pr_notice("arcmsr%d: wait 'stop adapter background rebulid' "
+ "timeout\n", pACB->host->host_no);
+ }
+}
+
static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
@@ -1599,6 +1797,9 @@ static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
arcmsr_hbaD_stop_bgrb(acb);
break;
+ case ACB_ADAPTER_TYPE_E:
+ arcmsr_hbaE_stop_bgrb(acb);
+ break;
}
}
@@ -1633,6 +1834,12 @@ static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
reg->inbound_doorbell);
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ }
+ break;
}
}
@@ -1673,6 +1880,12 @@ static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
reg->inbound_doorbell);
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ }
+ break;
}
}
@@ -1702,6 +1915,11 @@ struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer;
+ }
+ break;
}
return qbuffer;
}
@@ -1732,6 +1950,11 @@ static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBloc
pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ pqbuffer = (struct QBUFFER __iomem *)&reg->message_wbuffer;
+ }
+ break;
}
return pqbuffer;
}
@@ -1785,7 +2008,7 @@ arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb,
uint8_t __iomem *iop_data;
uint32_t iop_len;
- if (acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D))
+ if (acb->adapter_type > ACB_ADAPTER_TYPE_B)
return arcmsr_Read_iop_rqbuffer_in_DWORD(acb, prbuffer);
iop_data = (uint8_t __iomem *)prbuffer->data;
iop_len = readl(&prbuffer->data_len);
@@ -1871,7 +2094,7 @@ arcmsr_write_ioctldata2iop(struct AdapterControlBlock *acb)
uint8_t __iomem *iop_data;
int32_t allxfer_len = 0;
- if (acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) {
+ if (acb->adapter_type > ACB_ADAPTER_TYPE_B) {
arcmsr_write_ioctldata2iop_in_DWORD(acb);
return;
}
@@ -1968,6 +2191,33 @@ static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock *pACB)
| ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE));
}
+static void arcmsr_hbaE_doorbell_isr(struct AdapterControlBlock *pACB)
+{
+ uint32_t outbound_doorbell, in_doorbell, tmp;
+ struct MessageUnit_E __iomem *reg = pACB->pmuE;
+
+ in_doorbell = readl(&reg->iobound_doorbell);
+ outbound_doorbell = in_doorbell ^ pACB->in_doorbell;
+ do {
+ writel(0, &reg->host_int_status); /* clear interrupt */
+ if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) {
+ arcmsr_iop2drv_data_wrote_handle(pACB);
+ }
+ if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK) {
+ arcmsr_iop2drv_data_read_handle(pACB);
+ }
+ if (outbound_doorbell & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE) {
+ arcmsr_hbaE_message_isr(pACB);
+ }
+ tmp = in_doorbell;
+ in_doorbell = readl(&reg->iobound_doorbell);
+ outbound_doorbell = tmp ^ in_doorbell;
+ } while (outbound_doorbell & (ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK
+ | ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK
+ | ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE));
+ pACB->in_doorbell = in_doorbell;
+}
+
static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock *acb)
{
uint32_t flag_ccb;
@@ -2077,6 +2327,33 @@ static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb)
spin_unlock_irqrestore(&acb->doneq_lock, flags);
}
+static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock *acb)
+{
+ uint32_t doneq_index;
+ uint16_t cmdSMID;
+ int error;
+ struct MessageUnit_E __iomem *pmu;
+ struct CommandControlBlock *ccb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&acb->doneq_lock, flags);
+ doneq_index = acb->doneq_index;
+ pmu = acb->pmuE;
+ while ((readl(&pmu->reply_post_producer_index) & 0xFFFF) != doneq_index) {
+ cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
+ ccb = acb->pccb_pool[cmdSMID];
+ error = (acb->pCompletionQ[doneq_index].cmdFlag
+ & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
+ arcmsr_drain_donequeue(acb, ccb, error);
+ doneq_index++;
+ if (doneq_index >= acb->completionQ_entry)
+ doneq_index = 0;
+ }
+ acb->doneq_index = doneq_index;
+ writel(doneq_index, &pmu->reply_post_consumer_index);
+ spin_unlock_irqrestore(&acb->doneq_lock, flags);
+}
+
/*
**********************************************************************************
** Handle a message interrupt
@@ -2090,7 +2367,8 @@ static void arcmsr_hbaA_message_isr(struct AdapterControlBlock *acb)
struct MessageUnit_A __iomem *reg = acb->pmuA;
/*clear interrupt and message state*/
writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus);
- schedule_work(&acb->arcmsr_do_message_isr_bh);
+ if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
}
static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb)
{
@@ -2098,7 +2376,8 @@ static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb)
/*clear interrupt and message state*/
writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
- schedule_work(&acb->arcmsr_do_message_isr_bh);
+ if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
}
/*
**********************************************************************************
@@ -2114,7 +2393,8 @@ static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *acb)
struct MessageUnit_C __iomem *reg = acb->pmuC;
/*clear interrupt and message state*/
writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);
- schedule_work(&acb->arcmsr_do_message_isr_bh);
+ if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
}
static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb)
@@ -2123,7 +2403,17 @@ static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb)
writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, reg->outbound_doorbell);
readl(reg->outbound_doorbell);
- schedule_work(&acb->arcmsr_do_message_isr_bh);
+ if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
+}
+
+static void arcmsr_hbaE_message_isr(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+
+ writel(0, &reg->host_int_status);
+ if (acb->acb_flags & ACB_F_MSG_GET_CONFIG)
+ schedule_work(&acb->arcmsr_do_message_isr_bh);
}
static int arcmsr_hbaA_handle_isr(struct AdapterControlBlock *acb)
@@ -2229,6 +2519,31 @@ static irqreturn_t arcmsr_hbaD_handle_isr(struct AdapterControlBlock *pACB)
return IRQ_HANDLED;
}
+static irqreturn_t arcmsr_hbaE_handle_isr(struct AdapterControlBlock *pACB)
+{
+ uint32_t host_interrupt_status;
+ struct MessageUnit_E __iomem *pmu = pACB->pmuE;
+
+ host_interrupt_status = readl(&pmu->host_int_status) &
+ (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
+ ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR);
+ if (!host_interrupt_status)
+ return IRQ_NONE;
+ do {
+ /* MU ioctl transfer doorbell interrupts*/
+ if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR) {
+ arcmsr_hbaE_doorbell_isr(pACB);
+ }
+ /* MU post queue interrupts*/
+ if (host_interrupt_status & ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR) {
+ arcmsr_hbaE_postqueue_isr(pACB);
+ }
+ host_interrupt_status = readl(&pmu->host_int_status);
+ } while (host_interrupt_status & (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR |
+ ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR));
+ return IRQ_HANDLED;
+}
+
static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
@@ -2242,6 +2557,8 @@ static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
return arcmsr_hbaC_handle_isr(acb);
case ACB_ADAPTER_TYPE_D:
return arcmsr_hbaD_handle_isr(acb);
+ case ACB_ADAPTER_TYPE_E:
+ return arcmsr_hbaE_handle_isr(acb);
default:
return IRQ_NONE;
}
@@ -2636,74 +2953,66 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
static DEF_SCSI_QCMD(arcmsr_queue_command)
-static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb)
+static void arcmsr_get_adapter_config(struct AdapterControlBlock *pACB, uint32_t *rwbuffer)
{
- struct MessageUnit_A __iomem *reg = acb->pmuA;
- char *acb_firm_model = acb->firm_model;
- char *acb_firm_version = acb->firm_version;
- char *acb_device_map = acb->device_map;
- char __iomem *iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]);
- char __iomem *iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]);
- char __iomem *iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]);
int count;
- writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
- if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
- printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
- miscellaneous data' timeout \n", acb->host->host_no);
- return false;
- }
- count = 8;
- while (count){
- *acb_firm_model = readb(iop_firm_model);
+ uint32_t *acb_firm_model = (uint32_t *)pACB->firm_model;
+ uint32_t *acb_firm_version = (uint32_t *)pACB->firm_version;
+ uint32_t *acb_device_map = (uint32_t *)pACB->device_map;
+ uint32_t *firm_model = &rwbuffer[15];
+ uint32_t *firm_version = &rwbuffer[17];
+ uint32_t *device_map = &rwbuffer[21];
+
+ count = 2;
+ while (count) {
+ *acb_firm_model = readl(firm_model);
acb_firm_model++;
- iop_firm_model++;
+ firm_model++;
count--;
}
-
- count = 16;
- while (count){
- *acb_firm_version = readb(iop_firm_version);
+ count = 4;
+ while (count) {
+ *acb_firm_version = readl(firm_version);
acb_firm_version++;
- iop_firm_version++;
+ firm_version++;
count--;
}
-
- count=16;
- while(count){
- *acb_device_map = readb(iop_device_map);
+ count = 4;
+ while (count) {
+ *acb_device_map = readl(device_map);
acb_device_map++;
- iop_device_map++;
+ device_map++;
count--;
}
+ pACB->signature = readl(&rwbuffer[0]);
+ pACB->firm_request_len = readl(&rwbuffer[1]);
+ pACB->firm_numbers_queue = readl(&rwbuffer[2]);
+ pACB->firm_sdram_size = readl(&rwbuffer[3]);
+ pACB->firm_hd_channels = readl(&rwbuffer[4]);
+ pACB->firm_cfg_version = readl(&rwbuffer[25]);
pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
- acb->host->host_no,
- acb->firm_model,
- acb->firm_version);
- acb->signature = readl(&reg->message_rwbuffer[0]);
- acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
- acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
- acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
- acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
- acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
+ pACB->host->host_no,
+ pACB->firm_model,
+ pACB->firm_version);
+}
+
+static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+
+ arcmsr_wait_firmware_ready(acb);
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+ if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
+ printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
+ miscellaneous data' timeout \n", acb->host->host_no);
+ return false;
+ }
+ arcmsr_get_adapter_config(acb, reg->message_rwbuffer);
return true;
}
static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
{
struct MessageUnit_B *reg = acb->pmuB;
- char *acb_firm_model = acb->firm_model;
- char *acb_firm_version = acb->firm_version;
- char *acb_device_map = acb->device_map;
- char __iomem *iop_firm_model;
- /*firm_model,15,60-67*/
- char __iomem *iop_firm_version;
- /*firm_version,17,68-83*/
- char __iomem *iop_device_map;
- /*firm_version,21,84-99*/
- int count;
-
- iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]); /*firm_model,15,60-67*/
- iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]); /*firm_version,17,68-83*/
- iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]); /*firm_version,21,84-99*/
arcmsr_wait_firmware_ready(acb);
writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
@@ -2717,127 +3026,43 @@ static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
miscellaneous data' timeout \n", acb->host->host_no);
return false;
}
- count = 8;
- while (count){
- *acb_firm_model = readb(iop_firm_model);
- acb_firm_model++;
- iop_firm_model++;
- count--;
- }
- count = 16;
- while (count){
- *acb_firm_version = readb(iop_firm_version);
- acb_firm_version++;
- iop_firm_version++;
- count--;
- }
-
- count = 16;
- while(count){
- *acb_device_map = readb(iop_device_map);
- acb_device_map++;
- iop_device_map++;
- count--;
- }
-
- pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
- acb->host->host_no,
- acb->firm_model,
- acb->firm_version);
-
- acb->signature = readl(&reg->message_rwbuffer[0]);
- /*firm_signature,1,00-03*/
- acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
- /*firm_request_len,1,04-07*/
- acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
- /*firm_numbers_queue,2,08-11*/
- acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
- /*firm_sdram_size,3,12-15*/
- acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
- /*firm_ide_channels,4,16-19*/
- acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
- /*firm_ide_channels,4,16-19*/
+ arcmsr_get_adapter_config(acb, reg->message_rwbuffer);
return true;
}
static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB)
{
- uint32_t intmask_org, Index, firmware_state = 0;
+ uint32_t intmask_org;
struct MessageUnit_C __iomem *reg = pACB->pmuC;
- char *acb_firm_model = pACB->firm_model;
- char *acb_firm_version = pACB->firm_version;
- char __iomem *iop_firm_model = (char __iomem *)(&reg->msgcode_rwbuffer[15]); /*firm_model,15,60-67*/
- char __iomem *iop_firm_version = (char __iomem *)(&reg->msgcode_rwbuffer[17]); /*firm_version,17,68-83*/
- int count;
+
/* disable all outbound interrupt */
intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
writel(intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask);
/* wait firmware ready */
- do {
- firmware_state = readl(&reg->outbound_msgaddr1);
- } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
+ arcmsr_wait_firmware_ready(pACB);
/* post "get config" instruction */
writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
/* wait message ready */
- for (Index = 0; Index < 2000; Index++) {
- if (readl(&reg->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
- writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);/*clear interrupt*/
- break;
- }
- udelay(10);
- } /*max 1 seconds*/
- if (Index >= 2000) {
+ if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
miscellaneous data' timeout \n", pACB->host->host_no);
return false;
}
- count = 8;
- while (count) {
- *acb_firm_model = readb(iop_firm_model);
- acb_firm_model++;
- iop_firm_model++;
- count--;
- }
- count = 16;
- while (count) {
- *acb_firm_version = readb(iop_firm_version);
- acb_firm_version++;
- iop_firm_version++;
- count--;
- }
- pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
- pACB->host->host_no,
- pACB->firm_model,
- pACB->firm_version);
- pACB->firm_request_len = readl(&reg->msgcode_rwbuffer[1]); /*firm_request_len,1,04-07*/
- pACB->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[2]); /*firm_numbers_queue,2,08-11*/
- pACB->firm_sdram_size = readl(&reg->msgcode_rwbuffer[3]); /*firm_sdram_size,3,12-15*/
- pACB->firm_hd_channels = readl(&reg->msgcode_rwbuffer[4]); /*firm_ide_channels,4,16-19*/
- pACB->firm_cfg_version = readl(&reg->msgcode_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
- /*all interrupt service will be enable at arcmsr_iop_init*/
+ arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer);
return true;
}
static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb)
{
- char *acb_firm_model = acb->firm_model;
- char *acb_firm_version = acb->firm_version;
- char *acb_device_map = acb->device_map;
- char __iomem *iop_firm_model;
- char __iomem *iop_firm_version;
- char __iomem *iop_device_map;
- u32 count;
struct MessageUnit_D *reg = acb->pmuD;
- iop_firm_model = (char __iomem *)(&reg->msgcode_rwbuffer[15]);
- iop_firm_version = (char __iomem *)(&reg->msgcode_rwbuffer[17]);
- iop_device_map = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
if (readl(acb->pmuD->outbound_doorbell) &
ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
acb->pmuD->outbound_doorbell);/*clear interrupt*/
}
+ arcmsr_wait_firmware_ready(acb);
/* post "get config" instruction */
writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0);
/* wait message ready */
@@ -2846,42 +3071,33 @@ static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb)
"miscellaneous data timeout\n", acb->host->host_no);
return false;
}
- count = 8;
- while (count) {
- *acb_firm_model = readb(iop_firm_model);
- acb_firm_model++;
- iop_firm_model++;
- count--;
- }
- count = 16;
- while (count) {
- *acb_firm_version = readb(iop_firm_version);
- acb_firm_version++;
- iop_firm_version++;
- count--;
- }
- count = 16;
- while (count) {
- *acb_device_map = readb(iop_device_map);
- acb_device_map++;
- iop_device_map++;
- count--;
+ arcmsr_get_adapter_config(acb, reg->msgcode_rwbuffer);
+ return true;
+}
+
+static bool arcmsr_hbaE_get_config(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_E __iomem *reg = pACB->pmuE;
+ uint32_t intmask_org;
+
+ /* disable all outbound interrupt */
+ intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
+ writel(intmask_org | ARCMSR_HBEMU_ALL_INTMASKENABLE, &reg->host_int_mask);
+ /* wait firmware ready */
+ arcmsr_wait_firmware_ready(pACB);
+ mdelay(20);
+ /* post "get config" instruction */
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+
+ pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pACB->out_doorbell, &reg->iobound_doorbell);
+ /* wait message ready */
+ if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
+ pr_notice("arcmsr%d: wait get adapter firmware "
+ "miscellaneous data timeout\n", pACB->host->host_no);
+ return false;
}
- acb->signature = readl(&reg->msgcode_rwbuffer[0]);
- /*firm_signature,1,00-03*/
- acb->firm_request_len = readl(&reg->msgcode_rwbuffer[1]);
- /*firm_request_len,1,04-07*/
- acb->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[2]);
- /*firm_numbers_queue,2,08-11*/
- acb->firm_sdram_size = readl(&reg->msgcode_rwbuffer[3]);
- /*firm_sdram_size,3,12-15*/
- acb->firm_hd_channels = readl(&reg->msgcode_rwbuffer[4]);
- /*firm_hd_channels,4,16-19*/
- acb->firm_cfg_version = readl(&reg->msgcode_rwbuffer[25]);
- pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
- acb->host->host_no,
- acb->firm_model,
- acb->firm_version);
+ arcmsr_get_adapter_config(pACB, reg->msgcode_rwbuffer);
return true;
}
@@ -2902,14 +3118,20 @@ static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
rtn = arcmsr_hbaD_get_config(acb);
break;
+ case ACB_ADAPTER_TYPE_E:
+ rtn = arcmsr_hbaE_get_config(acb);
+ break;
default:
break;
}
- if (acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD)
- acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD;
+ acb->maxOutstanding = acb->firm_numbers_queue - 1;
+ if (acb->host->can_queue >= acb->firm_numbers_queue)
+ acb->host->can_queue = acb->maxOutstanding;
else
- acb->maxOutstanding = acb->firm_numbers_queue - 1;
- acb->host->can_queue = acb->maxOutstanding;
+ acb->maxOutstanding = acb->host->can_queue;
+ acb->maxFreeCCB = acb->host->can_queue;
+ if (acb->maxFreeCCB < ARCMSR_MAX_FREECCB_NUM)
+ acb->maxFreeCCB += 64;
return rtn;
}
@@ -3166,6 +3388,75 @@ polling_hbaD_ccb_retry:
return rtn;
}
+static int arcmsr_hbaE_polling_ccbdone(struct AdapterControlBlock *acb,
+ struct CommandControlBlock *poll_ccb)
+{
+ bool error;
+ uint32_t poll_ccb_done = 0, poll_count = 0, doneq_index;
+ uint16_t cmdSMID;
+ unsigned long flags;
+ int rtn;
+ struct CommandControlBlock *pCCB;
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+
+ polling_hbaC_ccb_retry:
+ poll_count++;
+ while (1) {
+ spin_lock_irqsave(&acb->doneq_lock, flags);
+ doneq_index = acb->doneq_index;
+ if ((readl(&reg->reply_post_producer_index) & 0xFFFF) ==
+ doneq_index) {
+ spin_unlock_irqrestore(&acb->doneq_lock, flags);
+ if (poll_ccb_done) {
+ rtn = SUCCESS;
+ break;
+ } else {
+ msleep(25);
+ if (poll_count > 40) {
+ rtn = FAILED;
+ break;
+ }
+ goto polling_hbaC_ccb_retry;
+ }
+ }
+ cmdSMID = acb->pCompletionQ[doneq_index].cmdSMID;
+ doneq_index++;
+ if (doneq_index >= acb->completionQ_entry)
+ doneq_index = 0;
+ acb->doneq_index = doneq_index;
+ spin_unlock_irqrestore(&acb->doneq_lock, flags);
+ pCCB = acb->pccb_pool[cmdSMID];
+ poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
+ /* check if command done with no error*/
+ if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
+ if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
+ pr_notice("arcmsr%d: scsi id = %d "
+ "lun = %d ccb = '0x%p' poll command "
+ "abort successfully\n"
+ , acb->host->host_no
+ , pCCB->pcmd->device->id
+ , (u32)pCCB->pcmd->device->lun
+ , pCCB);
+ pCCB->pcmd->result = DID_ABORT << 16;
+ arcmsr_ccb_complete(pCCB);
+ continue;
+ }
+ pr_notice("arcmsr%d: polling an illegal "
+ "ccb command done ccb = '0x%p' "
+ "ccboutstandingcount = %d\n"
+ , acb->host->host_no
+ , pCCB
+ , atomic_read(&acb->ccboutstandingcount));
+ continue;
+ }
+ error = (acb->pCompletionQ[doneq_index].cmdFlag &
+ ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
+ arcmsr_report_ccb_state(acb, pCCB, error);
+ }
+ writel(doneq_index, &reg->reply_post_consumer_index);
+ return rtn;
+}
+
static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
struct CommandControlBlock *poll_ccb)
{
@@ -3188,10 +3479,95 @@ static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
case ACB_ADAPTER_TYPE_D:
rtn = arcmsr_hbaD_polling_ccbdone(acb, poll_ccb);
break;
+ case ACB_ADAPTER_TYPE_E:
+ rtn = arcmsr_hbaE_polling_ccbdone(acb, poll_ccb);
+ break;
}
return rtn;
}
+static void arcmsr_set_iop_datetime(struct timer_list *t)
+{
+ struct AdapterControlBlock *pacb = from_timer(pacb, t, refresh_timer);
+ unsigned int next_time;
+ struct tm tm;
+
+ union {
+ struct {
+ uint16_t signature;
+ uint8_t year;
+ uint8_t month;
+ uint8_t date;
+ uint8_t hour;
+ uint8_t minute;
+ uint8_t second;
+ } a;
+ struct {
+ uint32_t msg_time[2];
+ } b;
+ } datetime;
+
+ time64_to_tm(ktime_get_real_seconds(), -sys_tz.tz_minuteswest * 60, &tm);
+
+ datetime.a.signature = 0x55AA;
+ datetime.a.year = tm.tm_year - 100; /* base 2000 instead of 1900 */
+ datetime.a.month = tm.tm_mon;
+ datetime.a.date = tm.tm_mday;
+ datetime.a.hour = tm.tm_hour;
+ datetime.a.minute = tm.tm_min;
+ datetime.a.second = tm.tm_sec;
+
+ switch (pacb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A: {
+ struct MessageUnit_A __iomem *reg = pacb->pmuA;
+ writel(datetime.b.msg_time[0], &reg->message_rwbuffer[0]);
+ writel(datetime.b.msg_time[1], &reg->message_rwbuffer[1]);
+ writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, &reg->inbound_msgaddr0);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_B: {
+ uint32_t __iomem *rwbuffer;
+ struct MessageUnit_B *reg = pacb->pmuB;
+ rwbuffer = reg->message_rwbuffer;
+ writel(datetime.b.msg_time[0], rwbuffer++);
+ writel(datetime.b.msg_time[1], rwbuffer++);
+ writel(ARCMSR_MESSAGE_SYNC_TIMER, reg->drv2iop_doorbell);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_C: {
+ struct MessageUnit_C __iomem *reg = pacb->pmuC;
+ writel(datetime.b.msg_time[0], &reg->msgcode_rwbuffer[0]);
+ writel(datetime.b.msg_time[1], &reg->msgcode_rwbuffer[1]);
+ writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, &reg->inbound_msgaddr0);
+ writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_D: {
+ uint32_t __iomem *rwbuffer;
+ struct MessageUnit_D *reg = pacb->pmuD;
+ rwbuffer = reg->msgcode_rwbuffer;
+ writel(datetime.b.msg_time[0], rwbuffer++);
+ writel(datetime.b.msg_time[1], rwbuffer++);
+ writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, reg->inbound_msgaddr0);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = pacb->pmuE;
+ writel(datetime.b.msg_time[0], &reg->msgcode_rwbuffer[0]);
+ writel(datetime.b.msg_time[1], &reg->msgcode_rwbuffer[1]);
+ writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER, &reg->inbound_msgaddr0);
+ pacb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pacb->out_doorbell, &reg->iobound_doorbell);
+ break;
+ }
+ }
+ if (sys_tz.tz_minuteswest)
+ next_time = ARCMSR_HOURS;
+ else
+ next_time = ARCMSR_MINUTES;
+ mod_timer(&pacb->refresh_timer, jiffies + msecs_to_jiffies(next_time));
+}
+
static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
{
uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
@@ -3208,6 +3584,10 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
dma_coherent_handle = acb->dma_coherent_handle2;
break;
+ case ACB_ADAPTER_TYPE_E:
+ dma_coherent_handle = acb->dma_coherent_handle +
+ offsetof(struct CommandControlBlock, arcmsr_cdb);
+ break;
default:
dma_coherent_handle = acb->dma_coherent_handle;
break;
@@ -3316,6 +3696,29 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
}
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ writel(ARCMSR_SIGNATURE_SET_CONFIG, &reg->msgcode_rwbuffer[0]);
+ writel(ARCMSR_SIGNATURE_1884, &reg->msgcode_rwbuffer[1]);
+ writel(cdb_phyaddr, &reg->msgcode_rwbuffer[2]);
+ writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[3]);
+ writel(acb->ccbsize, &reg->msgcode_rwbuffer[4]);
+ dma_coherent_handle = acb->dma_coherent_handle2;
+ cdb_phyaddr = (uint32_t)(dma_coherent_handle & 0xffffffff);
+ cdb_phyaddr_hi32 = (uint32_t)((dma_coherent_handle >> 16) >> 16);
+ writel(cdb_phyaddr, &reg->msgcode_rwbuffer[5]);
+ writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[6]);
+ writel(acb->roundup_ccbsize, &reg->msgcode_rwbuffer[7]);
+ writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ if (!arcmsr_hbaE_wait_msgint_ready(acb)) {
+ pr_notice("arcmsr%d: 'set command Q window' timeout \n",
+ acb->host->host_no);
+ return 1;
+ }
+ }
+ break;
}
return 0;
}
@@ -3356,83 +3759,22 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK) == 0);
}
break;
- }
-}
-
-static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb)
-{
- struct MessageUnit_A __iomem *reg = acb->pmuA;
- if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- return;
- } else {
- acb->fw_flag = FW_NORMAL;
- if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)){
- atomic_set(&acb->rq_map_token, 16);
- }
- atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
- if (atomic_dec_and_test(&acb->rq_map_token)) {
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- return;
- }
- writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- }
- return;
-}
-
-static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb)
-{
- struct MessageUnit_B *reg = acb->pmuB;
- if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- return;
- } else {
- acb->fw_flag = FW_NORMAL;
- if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
- atomic_set(&acb->rq_map_token, 16);
- }
- atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
- if (atomic_dec_and_test(&acb->rq_map_token)) {
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- return;
- }
- writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- }
- return;
-}
-
-static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb)
-{
- struct MessageUnit_C __iomem *reg = acb->pmuC;
- if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) {
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- return;
- } else {
- acb->fw_flag = FW_NORMAL;
- if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
- atomic_set(&acb->rq_map_token, 16);
- }
- atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
- if (atomic_dec_and_test(&acb->rq_map_token)) {
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- return;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ do {
+ firmware_state = readl(&reg->outbound_msgaddr1);
+ } while ((firmware_state & ARCMSR_HBEMU_MESSAGE_FIRMWARE_OK) == 0);
}
- writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
- writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
+ break;
}
- return;
}
-static void arcmsr_hbaD_request_device_map(struct AdapterControlBlock *acb)
+static void arcmsr_request_device_map(struct timer_list *t)
{
- struct MessageUnit_D *reg = acb->pmuD;
-
+ struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer);
if (unlikely(atomic_read(&acb->rq_map_token) == 0) ||
- ((acb->acb_flags & ACB_F_BUS_RESET) != 0) ||
- ((acb->acb_flags & ACB_F_ABORT) != 0)) {
+ (acb->acb_flags & ACB_F_BUS_RESET) ||
+ (acb->acb_flags & ACB_F_ABORT)) {
mod_timer(&acb->eternal_timer,
jiffies + msecs_to_jiffies(6 * HZ));
} else {
@@ -3448,32 +3790,40 @@ static void arcmsr_hbaD_request_device_map(struct AdapterControlBlock *acb)
msecs_to_jiffies(6 * HZ));
return;
}
- writel(ARCMSR_INBOUND_MESG0_GET_CONFIG,
- reg->inbound_msgaddr0);
- mod_timer(&acb->eternal_timer, jiffies +
- msecs_to_jiffies(6 * HZ));
- }
-}
-
-static void arcmsr_request_device_map(struct timer_list *t)
-{
- struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer);
- switch (acb->adapter_type) {
+ switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
- arcmsr_hbaA_request_device_map(acb);
- }
- break;
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+ break;
+ }
case ACB_ADAPTER_TYPE_B: {
- arcmsr_hbaB_request_device_map(acb);
- }
- break;
+ struct MessageUnit_B *reg = acb->pmuB;
+ writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
+ break;
+ }
case ACB_ADAPTER_TYPE_C: {
- arcmsr_hbaC_request_device_map(acb);
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+ writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_D: {
+ struct MessageUnit_D *reg = acb->pmuD;
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ break;
+ }
+ default:
+ return;
}
- break;
- case ACB_ADAPTER_TYPE_D:
- arcmsr_hbaD_request_device_map(acb);
- break;
+ acb->acb_flags |= ACB_F_MSG_GET_CONFIG;
+ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
}
}
@@ -3524,6 +3874,20 @@ static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB)
}
}
+static void arcmsr_hbaE_start_bgrb(struct AdapterControlBlock *pACB)
+{
+ struct MessageUnit_E __iomem *pmu = pACB->pmuE;
+
+ pACB->acb_flags |= ACB_F_MSG_START_BGRB;
+ writel(ARCMSR_INBOUND_MESG0_START_BGRB, &pmu->inbound_msgaddr0);
+ pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
+ writel(pACB->out_doorbell, &pmu->iobound_doorbell);
+ if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
+ pr_notice("arcmsr%d: wait 'start adapter "
+ "background rebulid' timeout \n", pACB->host->host_no);
+ }
+}
+
static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
@@ -3539,6 +3903,9 @@ static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_D:
arcmsr_hbaD_start_bgrb(acb);
break;
+ case ACB_ADAPTER_TYPE_E:
+ arcmsr_hbaE_start_bgrb(acb);
+ break;
}
}
@@ -3558,10 +3925,19 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg = acb->pmuB;
- /*clear interrupt and message state*/
- writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
+ uint32_t outbound_doorbell, i;
+ writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
/* let IOP know data has been read */
+ for(i=0; i < 200; i++) {
+ msleep(20);
+ outbound_doorbell = readl(reg->iop2drv_doorbell);
+ if( outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
+ writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
+ writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
+ } else
+ break;
+ }
}
break;
case ACB_ADAPTER_TYPE_C: {
@@ -3607,6 +3983,27 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
}
}
break;
+ case ACB_ADAPTER_TYPE_E: {
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ uint32_t i, tmp;
+
+ acb->in_doorbell = readl(&reg->iobound_doorbell);
+ writel(0, &reg->host_int_status); /*clear interrupt*/
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ for(i=0; i < 200; i++) {
+ msleep(20);
+ tmp = acb->in_doorbell;
+ acb->in_doorbell = readl(&reg->iobound_doorbell);
+ if((tmp ^ acb->in_doorbell) & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK) {
+ writel(0, &reg->host_int_status); /*clear interrupt*/
+ acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK;
+ writel(acb->out_doorbell, &reg->iobound_doorbell);
+ } else
+ break;
+ }
+ }
+ break;
}
}
@@ -3658,6 +4055,19 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
writel(0xD, &pmuC->write_sequence);
} while (((readl(&pmuC->host_diagnostic) & ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5));
writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic);
+ } else if (acb->dev_id == 0x1884) {
+ struct MessageUnit_E __iomem *pmuE = acb->pmuE;
+ do {
+ count++;
+ writel(0x4, &pmuE->write_sequence_3xxx);
+ writel(0xB, &pmuE->write_sequence_3xxx);
+ writel(0x2, &pmuE->write_sequence_3xxx);
+ writel(0x7, &pmuE->write_sequence_3xxx);
+ writel(0xD, &pmuE->write_sequence_3xxx);
+ mdelay(10);
+ } while (((readl(&pmuE->host_diagnostic_3xxx) &
+ ARCMSR_ARC1884_DiagWrite_ENABLE) == 0) && (count < 5));
+ writel(ARCMSR_ARC188X_RESET_ADAPTER, &pmuE->host_diagnostic_3xxx);
} else if ((acb->dev_id == 0x1214)) {
writel(0x20, pmuD->reset_request);
} else {
@@ -3671,6 +4081,45 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
msleep(1000);
return;
}
+
+static bool arcmsr_reset_in_progress(struct AdapterControlBlock *acb)
+{
+ bool rtn = true;
+
+ switch(acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A:{
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ rtn = ((readl(&reg->outbound_msgaddr1) &
+ ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) ? true : false;
+ }
+ break;
+ case ACB_ADAPTER_TYPE_B:{
+ struct MessageUnit_B *reg = acb->pmuB;
+ rtn = ((readl(reg->iop2drv_doorbell) &
+ ARCMSR_MESSAGE_FIRMWARE_OK) == 0) ? true : false;
+ }
+ break;
+ case ACB_ADAPTER_TYPE_C:{
+ struct MessageUnit_C __iomem *reg = acb->pmuC;
+ rtn = (readl(&reg->host_diagnostic) & 0x04) ? true : false;
+ }
+ break;
+ case ACB_ADAPTER_TYPE_D:{
+ struct MessageUnit_D *reg = acb->pmuD;
+ rtn = ((readl(reg->sample_at_reset) & 0x80) == 0) ?
+ true : false;
+ }
+ break;
+ case ACB_ADAPTER_TYPE_E:{
+ struct MessageUnit_E __iomem *reg = acb->pmuE;
+ rtn = (readl(&reg->host_diagnostic_3xxx) &
+ ARCMSR_ARC188X_RESET_ADAPTER) ? true : false;
+ }
+ break;
+ }
+ return rtn;
+}
+
static void arcmsr_iop_init(struct AdapterControlBlock *acb)
{
uint32_t intmask_org;
@@ -3703,7 +4152,7 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
rtnval = arcmsr_abort_allcmd(acb);
/* clear all outbound posted Q */
arcmsr_done4abort_postqueue(acb);
- for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
+ for (i = 0; i < acb->maxFreeCCB; i++) {
ccb = acb->pccb_pool[i];
if (ccb->startdone == ARCMSR_CCB_START) {
scsi_dma_unmap(ccb->pcmd);
@@ -3725,197 +4174,55 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
{
struct AdapterControlBlock *acb;
- uint32_t intmask_org, outbound_doorbell;
int retry_count = 0;
int rtn = FAILED;
acb = (struct AdapterControlBlock *) cmd->device->host->hostdata;
- printk(KERN_ERR "arcmsr: executing bus reset eh.....num_resets = %d, num_aborts = %d \n", acb->num_resets, acb->num_aborts);
+ pr_notice("arcmsr: executing bus reset eh.....num_resets = %d,"
+ " num_aborts = %d \n", acb->num_resets, acb->num_aborts);
acb->num_resets++;
- switch(acb->adapter_type){
- case ACB_ADAPTER_TYPE_A:{
- if (acb->acb_flags & ACB_F_BUS_RESET){
- long timeout;
- printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n");
- timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ);
- if (timeout) {
- return SUCCESS;
- }
- }
- acb->acb_flags |= ACB_F_BUS_RESET;
- if (!arcmsr_iop_reset(acb)) {
- struct MessageUnit_A __iomem *reg;
- reg = acb->pmuA;
- arcmsr_hardware_reset(acb);
- acb->acb_flags &= ~ACB_F_IOP_INITED;
-sleep_again:
- ssleep(ARCMSR_SLEEPTIME);
- if ((readl(&reg->outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
- printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
- if (retry_count > ARCMSR_RETRYCOUNT) {
- acb->fw_flag = FW_DEADLOCK;
- printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
- return FAILED;
- }
- retry_count++;
- goto sleep_again;
- }
- acb->acb_flags |= ACB_F_IOP_INITED;
- /* disable all outbound interrupt */
- intmask_org = arcmsr_disable_outbound_ints(acb);
- arcmsr_get_firmware_spec(acb);
- arcmsr_start_adapter_bgrb(acb);
- /* clear Qbuffer if door bell ringed */
- outbound_doorbell = readl(&reg->outbound_doorbell);
- writel(outbound_doorbell, &reg->outbound_doorbell); /*clear interrupt */
- writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
- /* enable outbound Post Queue,outbound doorbell Interrupt */
- arcmsr_enable_outbound_ints(acb, intmask_org);
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- rtn = SUCCESS;
- printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
- } else {
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
- rtn = SUCCESS;
- }
- break;
- }
- case ACB_ADAPTER_TYPE_B:{
- acb->acb_flags |= ACB_F_BUS_RESET;
- if (!arcmsr_iop_reset(acb)) {
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- rtn = FAILED;
- } else {
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- rtn = SUCCESS;
- }
- break;
- }
- case ACB_ADAPTER_TYPE_C:{
- if (acb->acb_flags & ACB_F_BUS_RESET) {
- long timeout;
- printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n");
- timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ);
- if (timeout) {
- return SUCCESS;
- }
- }
- acb->acb_flags |= ACB_F_BUS_RESET;
- if (!arcmsr_iop_reset(acb)) {
- struct MessageUnit_C __iomem *reg;
- reg = acb->pmuC;
- arcmsr_hardware_reset(acb);
- acb->acb_flags &= ~ACB_F_IOP_INITED;
-sleep:
- ssleep(ARCMSR_SLEEPTIME);
- if ((readl(&reg->host_diagnostic) & 0x04) != 0) {
- printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
- if (retry_count > ARCMSR_RETRYCOUNT) {
- acb->fw_flag = FW_DEADLOCK;
- printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
- return FAILED;
- }
- retry_count++;
- goto sleep;
- }
- acb->acb_flags |= ACB_F_IOP_INITED;
- /* disable all outbound interrupt */
- intmask_org = arcmsr_disable_outbound_ints(acb);
- arcmsr_get_firmware_spec(acb);
- arcmsr_start_adapter_bgrb(acb);
- /* clear Qbuffer if door bell ringed */
- arcmsr_clear_doorbell_queue_buffer(acb);
- /* enable outbound Post Queue,outbound doorbell Interrupt */
- arcmsr_enable_outbound_ints(acb, intmask_org);
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- rtn = SUCCESS;
- printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
- } else {
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
- rtn = SUCCESS;
- }
- break;
- }
- case ACB_ADAPTER_TYPE_D: {
- if (acb->acb_flags & ACB_F_BUS_RESET) {
- long timeout;
- pr_notice("arcmsr: there is an bus reset"
- " eh proceeding.......\n");
- timeout = wait_event_timeout(wait_q, (acb->acb_flags
- & ACB_F_BUS_RESET) == 0, 220 * HZ);
- if (timeout)
- return SUCCESS;
- }
- acb->acb_flags |= ACB_F_BUS_RESET;
- if (!arcmsr_iop_reset(acb)) {
- struct MessageUnit_D *reg;
- reg = acb->pmuD;
- arcmsr_hardware_reset(acb);
- acb->acb_flags &= ~ACB_F_IOP_INITED;
- nap:
- ssleep(ARCMSR_SLEEPTIME);
- if ((readl(reg->sample_at_reset) & 0x80) != 0) {
- pr_err("arcmsr%d: waiting for "
- "hw bus reset return, retry=%d\n",
- acb->host->host_no, retry_count);
- if (retry_count > ARCMSR_RETRYCOUNT) {
- acb->fw_flag = FW_DEADLOCK;
- pr_err("arcmsr%d: waiting for hw bus"
- " reset return, "
- "RETRY TERMINATED!!\n",
- acb->host->host_no);
- return FAILED;
- }
- retry_count++;
- goto nap;
- }
- acb->acb_flags |= ACB_F_IOP_INITED;
- /* disable all outbound interrupt */
- intmask_org = arcmsr_disable_outbound_ints(acb);
- arcmsr_get_firmware_spec(acb);
- arcmsr_start_adapter_bgrb(acb);
- arcmsr_clear_doorbell_queue_buffer(acb);
- arcmsr_enable_outbound_ints(acb, intmask_org);
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer,
- jiffies + msecs_to_jiffies(6 * HZ));
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- rtn = SUCCESS;
- pr_err("arcmsr: scsi bus reset "
- "eh returns with success\n");
- } else {
- acb->acb_flags &= ~ACB_F_BUS_RESET;
- atomic_set(&acb->rq_map_token, 16);
- atomic_set(&acb->ante_token_value, 16);
- acb->fw_flag = FW_NORMAL;
- mod_timer(&acb->eternal_timer,
- jiffies + msecs_to_jiffies(6 * HZ));
- rtn = SUCCESS;
+ if (acb->acb_flags & ACB_F_BUS_RESET) {
+ long timeout;
+ pr_notice("arcmsr: there is a bus reset eh proceeding...\n");
+ timeout = wait_event_timeout(wait_q, (acb->acb_flags
+ & ACB_F_BUS_RESET) == 0, 220 * HZ);
+ if (timeout)
+ return SUCCESS;
+ }
+ acb->acb_flags |= ACB_F_BUS_RESET;
+ if (!arcmsr_iop_reset(acb)) {
+ arcmsr_hardware_reset(acb);
+ acb->acb_flags &= ~ACB_F_IOP_INITED;
+wait_reset_done:
+ ssleep(ARCMSR_SLEEPTIME);
+ if (arcmsr_reset_in_progress(acb)) {
+ if (retry_count > ARCMSR_RETRYCOUNT) {
+ acb->fw_flag = FW_DEADLOCK;
+ pr_notice("arcmsr%d: waiting for hw bus reset"
+ " return, RETRY TERMINATED!!\n",
+ acb->host->host_no);
+ return FAILED;
}
- break;
+ retry_count++;
+ goto wait_reset_done;
}
+ arcmsr_iop_init(acb);
+ atomic_set(&acb->rq_map_token, 16);
+ atomic_set(&acb->ante_token_value, 16);
+ acb->fw_flag = FW_NORMAL;
+ mod_timer(&acb->eternal_timer, jiffies +
+ msecs_to_jiffies(6 * HZ));
+ acb->acb_flags &= ~ACB_F_BUS_RESET;
+ rtn = SUCCESS;
+ pr_notice("arcmsr: scsi bus reset eh returns with success\n");
+ } else {
+ acb->acb_flags &= ~ACB_F_BUS_RESET;
+ atomic_set(&acb->rq_map_token, 16);
+ atomic_set(&acb->ante_token_value, 16);
+ acb->fw_flag = FW_NORMAL;
+ mod_timer(&acb->eternal_timer, jiffies +
+ msecs_to_jiffies(6 * HZ));
+ rtn = SUCCESS;
}
return rtn;
}
@@ -3953,7 +4260,7 @@ static int arcmsr_abort(struct scsi_cmnd *cmd)
}
intmask_org = arcmsr_disable_outbound_ints(acb);
- for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
+ for (i = 0; i < acb->maxFreeCCB; i++) {
struct CommandControlBlock *ccb = acb->pccb_pool[i];
if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
ccb->startdone = ARCMSR_CCB_ABORTED;
@@ -3999,6 +4306,7 @@ static const char *arcmsr_info(struct Scsi_Host *host)
case PCI_DEVICE_ID_ARECA_1680:
case PCI_DEVICE_ID_ARECA_1681:
case PCI_DEVICE_ID_ARECA_1880:
+ case PCI_DEVICE_ID_ARECA_1884:
type = "SAS/SATA";
break;
default:
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index f4775ca70bab..27bda2b05de6 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2011,7 +2011,7 @@ static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
* have valid data in the sense buffer that could
* confuse the higher levels.
*/
- memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer));
+ memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
//printk("scsi%d.%c: sense buffer: ", info->host->host_no, '0' + SCpnt->device->id);
//{ int i; for (i = 0; i < 32; i++) printk("%02x ", SCpnt->sense_buffer[i]); printk("\n"); }
/*
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 3e1caec82554..10a63be92544 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -1957,7 +1957,7 @@ bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
{BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
};
- *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
+ *npciids = ARRAY_SIZE(__pciids);
*pciids = __pciids;
}
diff --git a/drivers/scsi/bfa/bfa_cs.h b/drivers/scsi/bfa/bfa_cs.h
index df6760ca0911..9685efc59b16 100644
--- a/drivers/scsi/bfa/bfa_cs.h
+++ b/drivers/scsi/bfa/bfa_cs.h
@@ -35,10 +35,10 @@
#define BFA_TRC_TS(_trcm) \
({ \
- struct timeval tv; \
+ struct timespec64 ts; \
\
- do_gettimeofday(&tv); \
- (tv.tv_sec*1000000+tv.tv_usec); \
+ ktime_get_ts64(&ts); \
+ (ts.tv_sec*1000000+ts.tv_nsec / 1000); \
})
#ifndef BFA_TRC_TS
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index e81707f938cb..3d0c96a5c873 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -1455,7 +1455,8 @@ struct bfa_aen_entry_s {
enum bfa_aen_category aen_category;
u32 aen_type;
union bfa_aen_data_u aen_data;
- struct timeval aen_tv;
+ u64 aen_tv_sec;
+ u64 aen_tv_usec;
u32 seq_num;
u32 bfad_num;
};
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index b8dadc9cc993..d3b00a475aeb 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -1250,8 +1250,8 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s));
rspnid->dap = s_id;
- rspnid->spn_len = (u8) strlen((char *)name);
- strncpy((char *)rspnid->spn, (char *)name, rspnid->spn_len);
+ strlcpy(rspnid->spn, name, sizeof(rspnid->spn));
+ rspnid->spn_len = (u8) strlen(rspnid->spn);
return sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s);
}
@@ -1271,8 +1271,8 @@ fc_rsnn_nn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
memset(rsnn_nn, 0, sizeof(struct fcgs_rsnn_nn_req_s));
rsnn_nn->node_name = node_name;
- rsnn_nn->snn_len = (u8) strlen((char *)name);
- strncpy((char *)rsnn_nn->snn, (char *)name, rsnn_nn->snn_len);
+ strlcpy(rsnn_nn->snn, name, sizeof(rsnn_nn->snn));
+ rsnn_nn->snn_len = (u8) strlen(rsnn_nn->snn);
return sizeof(struct fcgs_rsnn_nn_req_s) + sizeof(struct ct_hdr_s);
}
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 5f53b3276234..2c85f5b1f9c1 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -468,7 +468,7 @@ bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
}
bfa_status_t
-bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
+bfa_fcpim_profile_on(struct bfa_s *bfa, time64_t time)
{
struct bfa_itnim_s *itnim;
struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
@@ -1478,6 +1478,7 @@ bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
return BFA_STATUS_IOPROFILE_OFF;
itnim->ioprofile.index = BFA_IOBUCKET_MAX;
+ /* unsigned 32-bit time_t overflow here in y2106 */
itnim->ioprofile.io_profile_start_time =
bfa_io_profile_start_time(itnim->bfa);
itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index e93921dec347..ec8f863540ae 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -136,7 +136,7 @@ struct bfa_fcpim_s {
struct bfa_fcpim_del_itn_stats_s del_itn_stats;
bfa_boolean_t ioredirect;
bfa_boolean_t io_profile;
- u32 io_profile_start_time;
+ time64_t io_profile_start_time;
bfa_fcpim_profile_t profile_comp;
bfa_fcpim_profile_t profile_start;
};
@@ -310,7 +310,7 @@ bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa,
struct bfa_itnim_iostats_s *stats, u8 lp_tag);
void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats,
struct bfa_itnim_iostats_s *itnim_stats);
-bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time);
+bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, time64_t time);
bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa);
#define bfa_fcpim_ioredirect_enabled(__bfa) \
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index 4aa61e20e82d..932feb0ed4da 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -769,23 +769,23 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
/* Model name/number */
- strncpy((char *)&port_cfg->sym_name, model,
- BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
- strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ strlcpy(port_cfg->sym_name.symname, model,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ BFA_SYMNAME_MAXLEN);
/* Driver Version */
- strncat((char *)&port_cfg->sym_name, (char *)driver_info->version,
- BFA_FCS_PORT_SYMBNAME_VERSION_SZ);
- strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ strlcat(port_cfg->sym_name.symname, driver_info->version,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ BFA_SYMNAME_MAXLEN);
/* Host machine name */
- strncat((char *)&port_cfg->sym_name,
- (char *)driver_info->host_machine_name,
- BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ);
- strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ strlcat(port_cfg->sym_name.symname,
+ driver_info->host_machine_name,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ BFA_SYMNAME_MAXLEN);
/*
* Host OS Info :
@@ -793,24 +793,24 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
* OS name string and instead copy the entire OS info string (64 bytes).
*/
if (driver_info->host_os_patch[0] == '\0') {
- strncat((char *)&port_cfg->sym_name,
- (char *)driver_info->host_os_name,
- BFA_FCS_OS_STR_LEN);
- strncat((char *)&port_cfg->sym_name,
+ strlcat(port_cfg->sym_name.symname,
+ driver_info->host_os_name,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ BFA_SYMNAME_MAXLEN);
} else {
- strncat((char *)&port_cfg->sym_name,
- (char *)driver_info->host_os_name,
- BFA_FCS_PORT_SYMBNAME_OSINFO_SZ);
- strncat((char *)&port_cfg->sym_name,
+ strlcat(port_cfg->sym_name.symname,
+ driver_info->host_os_name,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ BFA_SYMNAME_MAXLEN);
/* Append host OS Patch Info */
- strncat((char *)&port_cfg->sym_name,
- (char *)driver_info->host_os_patch,
- BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ);
+ strlcat(port_cfg->sym_name.symname,
+ driver_info->host_os_patch,
+ BFA_SYMNAME_MAXLEN);
}
/* null terminate */
@@ -830,26 +830,26 @@ bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric)
bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
/* Model name/number */
- strncpy((char *)&port_cfg->node_sym_name, model,
- BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
- strncat((char *)&port_cfg->node_sym_name,
+ strlcpy(port_cfg->node_sym_name.symname, model,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->node_sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ BFA_SYMNAME_MAXLEN);
/* Driver Version */
- strncat((char *)&port_cfg->node_sym_name, (char *)driver_info->version,
- BFA_FCS_PORT_SYMBNAME_VERSION_SZ);
- strncat((char *)&port_cfg->node_sym_name,
+ strlcat(port_cfg->node_sym_name.symname, (char *)driver_info->version,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->node_sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ BFA_SYMNAME_MAXLEN);
/* Host machine name */
- strncat((char *)&port_cfg->node_sym_name,
- (char *)driver_info->host_machine_name,
- BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ);
- strncat((char *)&port_cfg->node_sym_name,
+ strlcat(port_cfg->node_sym_name.symname,
+ driver_info->host_machine_name,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->node_sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ BFA_SYMNAME_MAXLEN);
/* null terminate */
port_cfg->node_sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 638c0a2857f7..b4f2c1d8742e 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -2642,10 +2642,10 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc,
hba_attr->fw_version);
- strncpy(hba_attr->driver_version, (char *)driver_info->version,
+ strlcpy(hba_attr->driver_version, (char *)driver_info->version,
sizeof(hba_attr->driver_version));
- strncpy(hba_attr->os_name, driver_info->host_os_name,
+ strlcpy(hba_attr->os_name, driver_info->host_os_name,
sizeof(hba_attr->os_name));
/*
@@ -2653,23 +2653,23 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
* to the os name along with a separator
*/
if (driver_info->host_os_patch[0] != '\0') {
- strncat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
- strncat(hba_attr->os_name, driver_info->host_os_patch,
- sizeof(driver_info->host_os_patch));
+ strlcat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ sizeof(hba_attr->os_name));
+ strlcat(hba_attr->os_name, driver_info->host_os_patch,
+ sizeof(hba_attr->os_name));
}
/* Retrieve the max frame size from the port attr */
bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr);
hba_attr->max_ct_pyld = fcs_port_attr.max_frm_size;
- strncpy(hba_attr->node_sym_name.symname,
+ strlcpy(hba_attr->node_sym_name.symname,
port->port_cfg.node_sym_name.symname, BFA_SYMNAME_MAXLEN);
strcpy(hba_attr->vendor_info, "QLogic");
hba_attr->num_ports =
cpu_to_be32(bfa_ioc_get_nports(&port->fcs->bfa->ioc));
hba_attr->fabric_name = port->fabric->lps->pr_nwwn;
- strncpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN);
+ strlcpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN);
}
@@ -2736,20 +2736,20 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
/*
* OS device Name
*/
- strncpy(port_attr->os_device_name, (char *)driver_info->os_device_name,
+ strlcpy(port_attr->os_device_name, driver_info->os_device_name,
sizeof(port_attr->os_device_name));
/*
* Host name
*/
- strncpy(port_attr->host_name, (char *)driver_info->host_machine_name,
+ strlcpy(port_attr->host_name, driver_info->host_machine_name,
sizeof(port_attr->host_name));
port_attr->node_name = bfa_fcs_lport_get_nwwn(port);
port_attr->port_name = bfa_fcs_lport_get_pwwn(port);
- strncpy(port_attr->port_sym_name.symname,
- (char *)&bfa_fcs_lport_get_psym_name(port), BFA_SYMNAME_MAXLEN);
+ strlcpy(port_attr->port_sym_name.symname,
+ bfa_fcs_lport_get_psym_name(port).symname, BFA_SYMNAME_MAXLEN);
bfa_fcs_lport_get_attr(port, &lport_attr);
port_attr->port_type = cpu_to_be32(lport_attr.port_type);
port_attr->scos = pport_attr.cos_supported;
@@ -3229,7 +3229,7 @@ bfa_fcs_lport_ms_gmal_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
rsp_str[gmal_entry->len-1] = 0;
/* copy IP Address to fabric */
- strncpy(bfa_fcs_lport_get_fabric_ipaddr(port),
+ strlcpy(bfa_fcs_lport_get_fabric_ipaddr(port),
gmal_entry->ip_addr,
BFA_FCS_FABRIC_IPADDR_SZ);
break;
@@ -4667,21 +4667,13 @@ bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
* to that of the base port.
*/
- strncpy((char *)psymbl,
- (char *) &
- (bfa_fcs_lport_get_psym_name
+ strlcpy(symbl,
+ (char *)&(bfa_fcs_lport_get_psym_name
(bfa_fcs_get_base_port(port->fcs))),
- strlen((char *) &
- bfa_fcs_lport_get_psym_name(bfa_fcs_get_base_port
- (port->fcs))));
-
- /* Ensure we have a null terminating string. */
- ((char *)psymbl)[strlen((char *) &
- bfa_fcs_lport_get_psym_name(bfa_fcs_get_base_port
- (port->fcs)))] = 0;
- strncat((char *)psymbl,
- (char *) &(bfa_fcs_lport_get_psym_name(port)),
- strlen((char *) &bfa_fcs_lport_get_psym_name(port)));
+ sizeof(symbl));
+
+ strlcat(symbl, (char *)&(bfa_fcs_lport_get_psym_name(port)),
+ sizeof(symbl));
} else {
psymbl = (u8 *) &(bfa_fcs_lport_get_psym_name(port));
}
@@ -5173,7 +5165,6 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
struct fchs_s fchs;
struct bfa_fcxp_s *fcxp;
u8 symbl[256];
- u8 *psymbl = &symbl[0];
int len;
/* Avoid sending RSPN in the following states. */
@@ -5203,22 +5194,17 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
* For Vports, we append the vport's port symbolic name
* to that of the base port.
*/
- strncpy((char *)psymbl, (char *)&(bfa_fcs_lport_get_psym_name
+ strlcpy(symbl, (char *)&(bfa_fcs_lport_get_psym_name
(bfa_fcs_get_base_port(port->fcs))),
- strlen((char *)&bfa_fcs_lport_get_psym_name(
- bfa_fcs_get_base_port(port->fcs))));
-
- /* Ensure we have a null terminating string. */
- ((char *)psymbl)[strlen((char *)&bfa_fcs_lport_get_psym_name(
- bfa_fcs_get_base_port(port->fcs)))] = 0;
+ sizeof(symbl));
- strncat((char *)psymbl,
+ strlcat(symbl,
(char *)&(bfa_fcs_lport_get_psym_name(port)),
- strlen((char *)&bfa_fcs_lport_get_psym_name(port)));
+ sizeof(symbl));
}
len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
- bfa_fcs_lport_get_fcid(port), 0, psymbl);
+ bfa_fcs_lport_get_fcid(port), 0, symbl);
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 256f4afaccf9..16d3aeb0e572 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -1809,13 +1809,12 @@ static void
bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
{
struct bfi_ioc_ctrl_req_s enable_req;
- struct timeval tv;
bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
bfa_ioc_portid(ioc));
enable_req.clscode = cpu_to_be16(ioc->clscode);
- do_gettimeofday(&tv);
- enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
+ /* unsigned 32-bit time_t overflow in y2106 */
+ enable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
}
@@ -1826,6 +1825,9 @@ bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
bfa_ioc_portid(ioc));
+ disable_req.clscode = cpu_to_be16(ioc->clscode);
+ /* unsigned 32-bit time_t overflow in y2106 */
+ disable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
}
@@ -2803,7 +2805,7 @@ void
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
{
memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
- strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+ strlcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
}
void
diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c
index da1721e0d167..079bc77f4102 100644
--- a/drivers/scsi/bfa/bfa_port.c
+++ b/drivers/scsi/bfa/bfa_port.c
@@ -96,14 +96,11 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
port->stats_busy = BFA_FALSE;
if (status == BFA_STATUS_OK) {
- struct timeval tv;
-
memcpy(port->stats, port->stats_dma.kva,
sizeof(union bfa_port_stats_u));
bfa_port_stats_swap(port, port->stats);
- do_gettimeofday(&tv);
- port->stats->fc.secs_reset = tv.tv_sec - port->stats_reset_time;
+ port->stats->fc.secs_reset = ktime_get_seconds() - port->stats_reset_time;
}
if (port->stats_cbfn) {
@@ -124,16 +121,13 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
static void
bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
{
- struct timeval tv;
-
port->stats_status = status;
port->stats_busy = BFA_FALSE;
/*
* re-initialize time stamp for stats reset
*/
- do_gettimeofday(&tv);
- port->stats_reset_time = tv.tv_sec;
+ port->stats_reset_time = ktime_get_seconds();
if (port->stats_cbfn) {
port->stats_cbfn(port->stats_cbarg, status);
@@ -471,8 +465,6 @@ void
bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
void *dev, struct bfa_trc_mod_s *trcmod)
{
- struct timeval tv;
-
WARN_ON(!port);
port->dev = dev;
@@ -494,8 +486,7 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
/*
* initialize time stamp for stats reset
*/
- do_gettimeofday(&tv);
- port->stats_reset_time = tv.tv_sec;
+ port->stats_reset_time = ktime_get_seconds();
bfa_trc(port, 0);
}
diff --git a/drivers/scsi/bfa/bfa_port.h b/drivers/scsi/bfa/bfa_port.h
index 26dc1bf14c85..0c3b200243ca 100644
--- a/drivers/scsi/bfa/bfa_port.h
+++ b/drivers/scsi/bfa/bfa_port.h
@@ -36,7 +36,7 @@ struct bfa_port_s {
bfa_port_stats_cbfn_t stats_cbfn;
void *stats_cbarg;
bfa_status_t stats_status;
- u32 stats_reset_time;
+ time64_t stats_reset_time;
union bfa_port_stats_u *stats;
struct bfa_dma_s stats_dma;
bfa_boolean_t endis_pending;
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index e640223bab3c..6fc34fb20f00 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -288,18 +288,6 @@ plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
return 0;
}
-static u64
-bfa_get_log_time(void)
-{
- u64 system_time = 0;
- struct timeval tv;
- do_gettimeofday(&tv);
-
- /* We are interested in seconds only. */
- system_time = tv.tv_sec;
- return system_time;
-}
-
static void
bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
{
@@ -320,7 +308,7 @@ bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
- pl_recp->tv = bfa_get_log_time();
+ pl_recp->tv = ktime_get_real_seconds();
BFA_PL_LOG_REC_INCR(plog->tail);
if (plog->head == plog->tail)
@@ -350,8 +338,8 @@ bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
lp.eid = event;
lp.log_type = BFA_PL_LOG_TYPE_STRING;
lp.misc = misc;
- strncpy(lp.log_entry.string_log, log_str,
- BFA_PL_STRING_LOG_SZ - 1);
+ strlcpy(lp.log_entry.string_log, log_str,
+ BFA_PL_STRING_LOG_SZ);
lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
bfa_plog_add(plog, &lp);
}
@@ -3047,7 +3035,6 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
struct bfa_fcport_ln_s *ln = &fcport->ln;
- struct timeval tv;
fcport->bfa = bfa;
ln->fcport = fcport;
@@ -3060,8 +3047,7 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
/*
* initialize time stamp for stats reset
*/
- do_gettimeofday(&tv);
- fcport->stats_reset_time = tv.tv_sec;
+ fcport->stats_reset_time = ktime_get_seconds();
fcport->stats_dma_ready = BFA_FALSE;
/*
@@ -3295,9 +3281,7 @@ __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
union bfa_fcport_stats_u *ret;
if (complete) {
- struct timeval tv;
- if (fcport->stats_status == BFA_STATUS_OK)
- do_gettimeofday(&tv);
+ time64_t time = ktime_get_seconds();
list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
bfa_q_deq(&fcport->stats_pending_q, &qe);
@@ -3312,7 +3296,7 @@ __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
bfa_fcport_fcoe_stats_swap(&ret->fcoe,
&fcport->stats->fcoe);
ret->fcoe.secs_reset =
- tv.tv_sec - fcport->stats_reset_time;
+ time - fcport->stats_reset_time;
}
}
bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
@@ -3373,13 +3357,10 @@ __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
struct list_head *qe, *qen;
if (complete) {
- struct timeval tv;
-
/*
* re-initialize time stamp for stats reset
*/
- do_gettimeofday(&tv);
- fcport->stats_reset_time = tv.tv_sec;
+ fcport->stats_reset_time = ktime_get_seconds();
list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
bfa_q_deq(&fcport->statsclr_pending_q, &qe);
cb = (struct bfa_cb_pending_q_s *)qe;
@@ -6148,13 +6129,13 @@ bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
/*
* D-port
*/
-#define bfa_dport_result_start(__dport, __mode) do { \
- (__dport)->result.start_time = bfa_get_log_time(); \
- (__dport)->result.status = DPORT_TEST_ST_INPRG; \
- (__dport)->result.mode = (__mode); \
- (__dport)->result.rp_pwwn = (__dport)->rp_pwwn; \
- (__dport)->result.rp_nwwn = (__dport)->rp_nwwn; \
- (__dport)->result.lpcnt = (__dport)->lpcnt; \
+#define bfa_dport_result_start(__dport, __mode) do { \
+ (__dport)->result.start_time = ktime_get_real_seconds(); \
+ (__dport)->result.status = DPORT_TEST_ST_INPRG; \
+ (__dport)->result.mode = (__mode); \
+ (__dport)->result.rp_pwwn = (__dport)->rp_pwwn; \
+ (__dport)->result.rp_nwwn = (__dport)->rp_nwwn; \
+ (__dport)->result.lpcnt = (__dport)->lpcnt; \
} while (0)
static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport,
@@ -6588,7 +6569,7 @@ bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg)
switch (dport->i2hmsg.scn.state) {
case BFI_DPORT_SCN_TESTCOMP:
- dport->result.end_time = bfa_get_log_time();
+ dport->result.end_time = ktime_get_real_seconds();
bfa_trc(dport->bfa, dport->result.end_time);
dport->result.status = msg->info.testcomp.status;
@@ -6635,7 +6616,7 @@ bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg)
case BFI_DPORT_SCN_SUBTESTSTART:
subtesttype = msg->info.teststart.type;
dport->result.subtest[subtesttype].start_time =
- bfa_get_log_time();
+ ktime_get_real_seconds();
dport->result.subtest[subtesttype].status =
DPORT_TEST_ST_INPRG;
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index ea2278bc78a8..7e8fb6231d49 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -505,7 +505,7 @@ struct bfa_fcport_s {
struct list_head stats_pending_q;
struct list_head statsclr_pending_q;
bfa_boolean_t stats_qfull;
- u32 stats_reset_time; /* stats reset time stamp */
+ time64_t stats_reset_time; /* stats reset time stamp */
bfa_boolean_t diag_busy; /* diag busy status */
bfa_boolean_t beacon; /* port beacon status */
bfa_boolean_t link_e2e_beacon; /* link beacon status */
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index cf0466686804..bd7e6a6fc1f1 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -610,13 +610,12 @@ bfad_hal_mem_alloc(struct bfad_s *bfad)
/* Iterate through the KVA meminfo queue */
list_for_each(km_qe, &kva_info->qe) {
kva_elem = (struct bfa_mem_kva_s *) km_qe;
- kva_elem->kva = vmalloc(kva_elem->mem_len);
+ kva_elem->kva = vzalloc(kva_elem->mem_len);
if (kva_elem->kva == NULL) {
bfad_hal_mem_release(bfad);
rc = BFA_STATUS_ENOMEM;
goto ext;
}
- memset(kva_elem->kva, 0, kva_elem->mem_len);
}
/* Iterate through the DMA meminfo queue */
@@ -981,20 +980,20 @@ bfad_start_ops(struct bfad_s *bfad) {
/* Fill the driver_info info to fcs*/
memset(&driver_info, 0, sizeof(driver_info));
- strncpy(driver_info.version, BFAD_DRIVER_VERSION,
- sizeof(driver_info.version) - 1);
+ strlcpy(driver_info.version, BFAD_DRIVER_VERSION,
+ sizeof(driver_info.version));
if (host_name)
- strncpy(driver_info.host_machine_name, host_name,
- sizeof(driver_info.host_machine_name) - 1);
+ strlcpy(driver_info.host_machine_name, host_name,
+ sizeof(driver_info.host_machine_name));
if (os_name)
- strncpy(driver_info.host_os_name, os_name,
- sizeof(driver_info.host_os_name) - 1);
+ strlcpy(driver_info.host_os_name, os_name,
+ sizeof(driver_info.host_os_name));
if (os_patch)
- strncpy(driver_info.host_os_patch, os_patch,
- sizeof(driver_info.host_os_patch) - 1);
+ strlcpy(driver_info.host_os_patch, os_patch,
+ sizeof(driver_info.host_os_patch));
- strncpy(driver_info.os_device_name, bfad->pci_name,
- sizeof(driver_info.os_device_name) - 1);
+ strlcpy(driver_info.os_device_name, bfad->pci_name,
+ sizeof(driver_info.os_device_name));
/* FCS driver info init */
spin_lock_irqsave(&bfad->bfad_lock, flags);
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 13db3b7bc873..d4d276c757ea 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -487,7 +487,6 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
struct bfad_im_port_s *im_port =
(struct bfad_im_port_s *) vport->drv_port.im_port;
struct bfad_s *bfad = im_port->bfad;
- struct bfad_port_s *port;
struct bfa_fcs_vport_s *fcs_vport;
struct Scsi_Host *vshost;
wwn_t pwwn;
@@ -502,8 +501,6 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
return 0;
}
- port = im_port->port;
-
vshost = vport->drv_port.im_port->shost;
u64_to_wwn(fc_host_port_name(vshost), (u8 *)&pwwn);
@@ -843,7 +840,7 @@ bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr,
char symname[BFA_SYMNAME_MAXLEN];
bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
- strncpy(symname, port_attr.port_cfg.sym_name.symname,
+ strlcpy(symname, port_attr.port_cfg.sym_name.symname,
BFA_SYMNAME_MAXLEN);
return snprintf(buf, PAGE_SIZE, "%s\n", symname);
}
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index b2fa195adc7a..3976e787ba64 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -127,7 +127,7 @@ bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
/* fill in driver attr info */
strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
- strncpy(iocmd->ioc_attr.driver_attr.driver_ver,
+ strlcpy(iocmd->ioc_attr.driver_attr.driver_ver,
BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
iocmd->ioc_attr.adapter_attr.fw_ver);
@@ -315,9 +315,9 @@ bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
iocmd->attr.port_type = port_attr.port_type;
iocmd->attr.loopback = port_attr.loopback;
iocmd->attr.authfail = port_attr.authfail;
- strncpy(iocmd->attr.port_symname.symname,
+ strlcpy(iocmd->attr.port_symname.symname,
port_attr.port_cfg.sym_name.symname,
- sizeof(port_attr.port_cfg.sym_name.symname));
+ sizeof(iocmd->attr.port_symname.symname));
iocmd->status = BFA_STATUS_OK;
return 0;
@@ -2094,13 +2094,11 @@ bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
{
struct bfa_bsg_fcpim_profile_s *iocmd =
(struct bfa_bsg_fcpim_profile_s *)cmd;
- struct timeval tv;
unsigned long flags;
- do_gettimeofday(&tv);
spin_lock_irqsave(&bfad->bfad_lock, flags);
if (v_cmd == IOCMD_FCPIM_PROFILE_ON)
- iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, tv.tv_sec);
+ iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, ktime_get_real_seconds());
else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF)
iocmd->status = bfa_fcpim_profile_off(&bfad->bfa);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 05f523971348..349cfe7d055e 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -81,7 +81,7 @@ bfad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
fw_debug->buffer_len = sizeof(struct bfa_trc_mod_s);
- fw_debug->debug_buffer = vmalloc(fw_debug->buffer_len);
+ fw_debug->debug_buffer = vzalloc(fw_debug->buffer_len);
if (!fw_debug->debug_buffer) {
kfree(fw_debug);
printk(KERN_INFO "bfad[%d]: Failed to allocate fwtrc buffer\n",
@@ -89,8 +89,6 @@ bfad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
return -ENOMEM;
}
- memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len);
-
spin_lock_irqsave(&bfad->bfad_lock, flags);
rc = bfa_ioc_debug_fwtrc(&bfad->bfa.ioc,
fw_debug->debug_buffer,
@@ -125,7 +123,7 @@ bfad_debugfs_open_fwsave(struct inode *inode, struct file *file)
fw_debug->buffer_len = sizeof(struct bfa_trc_mod_s);
- fw_debug->debug_buffer = vmalloc(fw_debug->buffer_len);
+ fw_debug->debug_buffer = vzalloc(fw_debug->buffer_len);
if (!fw_debug->debug_buffer) {
kfree(fw_debug);
printk(KERN_INFO "bfad[%d]: Failed to allocate fwsave buffer\n",
@@ -133,8 +131,6 @@ bfad_debugfs_open_fwsave(struct inode *inode, struct file *file)
return -ENOMEM;
}
- memset(fw_debug->debug_buffer, 0, fw_debug->buffer_len);
-
spin_lock_irqsave(&bfad->bfad_lock, flags);
rc = bfa_ioc_debug_fwsave(&bfad->bfa.ioc,
fw_debug->debug_buffer,
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 06ce4ba2b7bc..af66275570c3 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -141,16 +141,28 @@ struct bfad_im_s {
} while (0)
/* post fc_host vendor event */
-#define bfad_im_post_vendor_event(_entry, _drv, _cnt, _cat, _evt) do { \
- do_gettimeofday(&(_entry)->aen_tv); \
- (_entry)->bfad_num = (_drv)->inst_no; \
- (_entry)->seq_num = (_cnt); \
- (_entry)->aen_category = (_cat); \
- (_entry)->aen_type = (_evt); \
- if ((_drv)->bfad_flags & BFAD_FC4_PROBE_DONE) \
- queue_work((_drv)->im->drv_workq, \
- &(_drv)->im->aen_im_notify_work); \
-} while (0)
+static inline void bfad_im_post_vendor_event(struct bfa_aen_entry_s *entry,
+ struct bfad_s *drv, int cnt,
+ enum bfa_aen_category cat,
+ enum bfa_ioc_aen_event evt)
+{
+ struct timespec64 ts;
+
+ ktime_get_real_ts64(&ts);
+ /*
+ * 'unsigned long aen_tv_sec' overflows in y2106 on 32-bit
+ * architectures, or in 2038 if user space interprets it
+ * as 'signed'.
+ */
+ entry->aen_tv_sec = ts.tv_sec;
+ entry->aen_tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+ entry->bfad_num = drv->inst_no;
+ entry->seq_num = cnt;
+ entry->aen_category = cat;
+ entry->aen_type = evt;
+ if (drv->bfad_flags & BFAD_FC4_PROBE_DONE)
+ queue_work(drv->im->drv_workq, &drv->im->aen_im_notify_work);
+}
struct Scsi_Host *bfad_scsi_host_alloc(struct bfad_im_port_s *im_port,
struct bfad_s *);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index e6b9de7d41ac..65de1d0578a1 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -1552,7 +1552,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
rc = bnx2fc_shost_config(lport, parent);
if (rc) {
- printk(KERN_ERR PFX "Couldnt configure shost for %s\n",
+ printk(KERN_ERR PFX "Couldn't configure shost for %s\n",
interface->netdev->name);
goto lp_config_err;
}
@@ -1560,7 +1560,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
/* Initialize the libfc library */
rc = bnx2fc_libfc_config(lport);
if (rc) {
- printk(KERN_ERR PFX "Couldnt configure libfc\n");
+ printk(KERN_ERR PFX "Couldn't configure libfc\n");
goto shost_err;
}
fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 26de61d65a4d..e8ae4d671d23 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1857,16 +1857,15 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
* entries. Hence the limit with one page is 8192 task context
* entries.
*/
- hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
- PAGE_SIZE,
- &hba->task_ctx_bd_dma,
- GFP_KERNEL);
+ hba->task_ctx_bd_tbl = dma_zalloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->task_ctx_bd_dma,
+ GFP_KERNEL);
if (!hba->task_ctx_bd_tbl) {
printk(KERN_ERR PFX "unable to allocate task context BDT\n");
rc = -1;
goto out;
}
- memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE);
/*
* Allocate task_ctx which is an array of pointers pointing to
@@ -1895,16 +1894,15 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
for (i = 0; i < task_ctx_arr_sz; i++) {
- hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
- PAGE_SIZE,
- &hba->task_ctx_dma[i],
- GFP_KERNEL);
+ hba->task_ctx[i] = dma_zalloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->task_ctx_dma[i],
+ GFP_KERNEL);
if (!hba->task_ctx[i]) {
printk(KERN_ERR PFX "unable to alloc task context\n");
rc = -1;
goto out3;
}
- memset(hba->task_ctx[i], 0, PAGE_SIZE);
addr = (u64)hba->task_ctx_dma[i];
task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
task_ctx_bdt->lo = cpu_to_le32((u32)addr);
@@ -2033,28 +2031,23 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
}
for (i = 0; i < segment_count; ++i) {
- hba->hash_tbl_segments[i] =
- dma_alloc_coherent(&hba->pcidev->dev,
- BNX2FC_HASH_TBL_CHUNK_SIZE,
- &dma_segment_array[i],
- GFP_KERNEL);
+ hba->hash_tbl_segments[i] = dma_zalloc_coherent(&hba->pcidev->dev,
+ BNX2FC_HASH_TBL_CHUNK_SIZE,
+ &dma_segment_array[i],
+ GFP_KERNEL);
if (!hba->hash_tbl_segments[i]) {
printk(KERN_ERR PFX "hash segment alloc failed\n");
goto cleanup_dma;
}
- memset(hba->hash_tbl_segments[i], 0,
- BNX2FC_HASH_TBL_CHUNK_SIZE);
}
- hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev,
- PAGE_SIZE,
- &hba->hash_tbl_pbl_dma,
- GFP_KERNEL);
+ hba->hash_tbl_pbl = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ &hba->hash_tbl_pbl_dma,
+ GFP_KERNEL);
if (!hba->hash_tbl_pbl) {
printk(KERN_ERR PFX "hash table pbl alloc failed\n");
goto cleanup_dma;
}
- memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
pbl = hba->hash_tbl_pbl;
for (i = 0; i < segment_count; ++i) {
@@ -2111,27 +2104,26 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
return -ENOMEM;
mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
- hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
- &hba->t2_hash_tbl_ptr_dma,
- GFP_KERNEL);
+ hba->t2_hash_tbl_ptr = dma_zalloc_coherent(&hba->pcidev->dev,
+ mem_size,
+ &hba->t2_hash_tbl_ptr_dma,
+ GFP_KERNEL);
if (!hba->t2_hash_tbl_ptr) {
printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
bnx2fc_free_fw_resc(hba);
return -ENOMEM;
}
- memset(hba->t2_hash_tbl_ptr, 0x00, mem_size);
mem_size = BNX2FC_NUM_MAX_SESS *
sizeof(struct fcoe_t2_hash_table_entry);
- hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
- &hba->t2_hash_tbl_dma,
- GFP_KERNEL);
+ hba->t2_hash_tbl = dma_zalloc_coherent(&hba->pcidev->dev, mem_size,
+ &hba->t2_hash_tbl_dma,
+ GFP_KERNEL);
if (!hba->t2_hash_tbl) {
printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
bnx2fc_free_fw_resc(hba);
return -ENOMEM;
}
- memset(hba->t2_hash_tbl, 0x00, mem_size);
for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
addr = (unsigned long) hba->t2_hash_tbl_dma +
((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
@@ -2148,16 +2140,14 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
return -ENOMEM;
}
- hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev,
- PAGE_SIZE,
- &hba->stats_buf_dma,
- GFP_KERNEL);
+ hba->stats_buffer = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ &hba->stats_buf_dma,
+ GFP_KERNEL);
if (!hba->stats_buffer) {
printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
bnx2fc_free_fw_resc(hba);
return -ENOMEM;
}
- memset(hba->stats_buffer, 0x00, PAGE_SIZE);
return 0;
}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index a8ae1a019eea..e3d1c7c440c8 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -672,56 +672,52 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
- &tgt->sq_dma, GFP_KERNEL);
+ tgt->sq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
+ &tgt->sq_dma, GFP_KERNEL);
if (!tgt->sq) {
printk(KERN_ERR PFX "unable to allocate SQ memory %d\n",
tgt->sq_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->sq, 0, tgt->sq_mem_size);
/* Allocate and map CQ */
tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE;
tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
- &tgt->cq_dma, GFP_KERNEL);
+ tgt->cq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
+ &tgt->cq_dma, GFP_KERNEL);
if (!tgt->cq) {
printk(KERN_ERR PFX "unable to allocate CQ memory %d\n",
tgt->cq_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->cq, 0, tgt->cq_mem_size);
/* Allocate and map RQ and RQ PBL */
tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE;
tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
- &tgt->rq_dma, GFP_KERNEL);
+ tgt->rq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
+ &tgt->rq_dma, GFP_KERNEL);
if (!tgt->rq) {
printk(KERN_ERR PFX "unable to allocate RQ memory %d\n",
tgt->rq_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->rq, 0, tgt->rq_mem_size);
tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
- &tgt->rq_pbl_dma, GFP_KERNEL);
+ tgt->rq_pbl = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
+ &tgt->rq_pbl_dma, GFP_KERNEL);
if (!tgt->rq_pbl) {
printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n",
tgt->rq_pbl_size);
goto mem_alloc_failure;
}
- memset(tgt->rq_pbl, 0, tgt->rq_pbl_size);
num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE;
page = tgt->rq_dma;
pbl = (u32 *)tgt->rq_pbl;
@@ -739,44 +735,43 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
- &tgt->xferq_dma, GFP_KERNEL);
+ tgt->xferq = dma_zalloc_coherent(&hba->pcidev->dev,
+ tgt->xferq_mem_size, &tgt->xferq_dma,
+ GFP_KERNEL);
if (!tgt->xferq) {
printk(KERN_ERR PFX "unable to allocate XFERQ %d\n",
tgt->xferq_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->xferq, 0, tgt->xferq_mem_size);
/* Allocate and map CONFQ & CONFQ PBL */
tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE;
tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
- &tgt->confq_dma, GFP_KERNEL);
+ tgt->confq = dma_zalloc_coherent(&hba->pcidev->dev,
+ tgt->confq_mem_size, &tgt->confq_dma,
+ GFP_KERNEL);
if (!tgt->confq) {
printk(KERN_ERR PFX "unable to allocate CONFQ %d\n",
tgt->confq_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->confq, 0, tgt->confq_mem_size);
tgt->confq_pbl_size =
(tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
tgt->confq_pbl_size =
(tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
- tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
- tgt->confq_pbl_size,
- &tgt->confq_pbl_dma, GFP_KERNEL);
+ tgt->confq_pbl = dma_zalloc_coherent(&hba->pcidev->dev,
+ tgt->confq_pbl_size,
+ &tgt->confq_pbl_dma, GFP_KERNEL);
if (!tgt->confq_pbl) {
printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n",
tgt->confq_pbl_size);
goto mem_alloc_failure;
}
- memset(tgt->confq_pbl, 0, tgt->confq_pbl_size);
num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE;
page = tgt->confq_dma;
pbl = (u32 *)tgt->confq_pbl;
@@ -792,15 +787,14 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
/* Allocate and map ConnDB */
tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db);
- tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev,
- tgt->conn_db_mem_size,
- &tgt->conn_db_dma, GFP_KERNEL);
+ tgt->conn_db = dma_zalloc_coherent(&hba->pcidev->dev,
+ tgt->conn_db_mem_size,
+ &tgt->conn_db_dma, GFP_KERNEL);
if (!tgt->conn_db) {
printk(KERN_ERR PFX "unable to allocate conn_db %d\n",
tgt->conn_db_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->conn_db, 0, tgt->conn_db_mem_size);
/* Allocate and map LCQ */
@@ -808,15 +802,14 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
- tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
- &tgt->lcq_dma, GFP_KERNEL);
+ tgt->lcq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
+ &tgt->lcq_dma, GFP_KERNEL);
if (!tgt->lcq) {
printk(KERN_ERR PFX "unable to allocate lcq %d\n",
tgt->lcq_mem_size);
goto mem_alloc_failure;
}
- memset(tgt->lcq, 0, tgt->lcq_mem_size);
tgt->conn_db->rq_prod = 0x8000;
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index e0640e0f259f..8f03a869ac98 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -547,12 +547,9 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
memcpy(nopout_wqe->lun, &nopout_hdr->lun, 8);
- if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
- u32 tmp = nopout_wqe->lun[0];
- /* 57710 requires LUN field to be swapped */
- nopout_wqe->lun[0] = nopout_wqe->lun[1];
- nopout_wqe->lun[1] = tmp;
- }
+ /* 57710 requires LUN field to be swapped */
+ if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+ swap(nopout_wqe->lun[0], nopout_wqe->lun[1]);
nopout_wqe->itt = ((u16)task->itt |
(ISCSI_TASK_TYPE_MPATH <<
@@ -1073,15 +1070,14 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
/* Allocate memory area for actual SQ element */
ep->qp.sq_virt =
- dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
- &ep->qp.sq_phys, GFP_KERNEL);
+ dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
+ &ep->qp.sq_phys, GFP_KERNEL);
if (!ep->qp.sq_virt) {
printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n",
ep->qp.sq_mem_size);
goto mem_alloc_err;
}
- memset(ep->qp.sq_virt, 0x00, ep->qp.sq_mem_size);
ep->qp.sq_first_qe = ep->qp.sq_virt;
ep->qp.sq_prod_qe = ep->qp.sq_first_qe;
ep->qp.sq_cons_qe = ep->qp.sq_first_qe;
@@ -1110,14 +1106,13 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
/* Allocate memory area for actual CQ element */
ep->qp.cq_virt =
- dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
- &ep->qp.cq_phys, GFP_KERNEL);
+ dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
+ &ep->qp.cq_phys, GFP_KERNEL);
if (!ep->qp.cq_virt) {
printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n",
ep->qp.cq_mem_size);
goto mem_alloc_err;
}
- memset(ep->qp.cq_virt, 0x00, ep->qp.cq_mem_size);
ep->qp.cq_first_qe = ep->qp.cq_virt;
ep->qp.cq_prod_qe = ep->qp.cq_first_qe;
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index cb1711a5d7a3..ed2dae657964 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -1258,7 +1258,7 @@ module_init(csio_init);
module_exit(csio_exit);
MODULE_AUTHOR(CSIO_DRV_AUTHOR);
MODULE_DESCRIPTION(CSIO_DRV_DESC);
-MODULE_LICENSE(CSIO_DRV_LICENSE);
+MODULE_LICENSE("Dual BSD/GPL");
MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
MODULE_VERSION(CSIO_DRV_VERSION);
MODULE_FIRMWARE(FW_FNAME_T5);
diff --git a/drivers/scsi/csiostor/csio_init.h b/drivers/scsi/csiostor/csio_init.h
index 96b31e5af91e..20244254325a 100644
--- a/drivers/scsi/csiostor/csio_init.h
+++ b/drivers/scsi/csiostor/csio_init.h
@@ -48,7 +48,6 @@
#include "csio_hw.h"
#define CSIO_DRV_AUTHOR "Chelsio Communications"
-#define CSIO_DRV_LICENSE "Dual BSD/GPL"
#define CSIO_DRV_DESC "Chelsio FCoE driver"
#define CSIO_DRV_VERSION "1.0.0-ko"
diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c
index 931b1d8f9f3e..5f4e0a787bd1 100644
--- a/drivers/scsi/csiostor/csio_mb.c
+++ b/drivers/scsi/csiostor/csio_mb.c
@@ -1216,7 +1216,7 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
/* Queue mbox cmd, if another mbox cmd is active */
if (mbp->mb_cbfn == NULL) {
rv = -EBUSY;
- csio_dbg(hw, "Couldnt own Mailbox %x op:0x%x\n",
+ csio_dbg(hw, "Couldn't own Mailbox %x op:0x%x\n",
hw->pfn, *((uint8_t *)mbp->mb));
goto error_out;
@@ -1244,14 +1244,14 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
rv = owner ? -EBUSY : -ETIMEDOUT;
csio_dbg(hw,
- "Couldnt own Mailbox %x op:0x%x "
+ "Couldn't own Mailbox %x op:0x%x "
"owner:%x\n",
hw->pfn, *((uint8_t *)mbp->mb), owner);
goto error_out;
} else {
if (mbm->mcurrent == NULL) {
csio_err(hw,
- "Couldnt own Mailbox %x "
+ "Couldn't own Mailbox %x "
"op:0x%x owner:%x\n",
hw->pfn, *((uint8_t *)mbp->mb),
owner);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index ce1336414e0a..3f3af5e74a07 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -1914,7 +1914,7 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
if (task->sc) {
task->hdr = (struct iscsi_hdr *)tdata->skb->data;
} else {
- task->hdr = kzalloc(SKB_TX_ISCSI_PDU_HEADER_MAX, GFP_KERNEL);
+ task->hdr = kzalloc(SKB_TX_ISCSI_PDU_HEADER_MAX, GFP_ATOMIC);
if (!task->hdr) {
__kfree_skb(tdata->skb);
tdata->skb = NULL;
diff --git a/drivers/scsi/cxlflash/Makefile b/drivers/scsi/cxlflash/Makefile
index 9e39866d473b..7ec3f6b55dde 100644
--- a/drivers/scsi/cxlflash/Makefile
+++ b/drivers/scsi/cxlflash/Makefile
@@ -1,2 +1,2 @@
obj-$(CONFIG_CXLFLASH) += cxlflash.o
-cxlflash-y += main.o superpipe.o lunmgt.o vlun.o
+cxlflash-y += main.o superpipe.o lunmgt.o vlun.o cxl_hw.o
diff --git a/drivers/scsi/cxlflash/backend.h b/drivers/scsi/cxlflash/backend.h
new file mode 100644
index 000000000000..339e42b03c49
--- /dev/null
+++ b/drivers/scsi/cxlflash/backend.h
@@ -0,0 +1,41 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2018 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+extern const struct cxlflash_backend_ops cxlflash_cxl_ops;
+
+struct cxlflash_backend_ops {
+ struct module *module;
+ void __iomem * (*psa_map)(void *);
+ void (*psa_unmap)(void __iomem *);
+ int (*process_element)(void *);
+ int (*map_afu_irq)(void *, int, irq_handler_t, void *, char *);
+ void (*unmap_afu_irq)(void *, int, void *);
+ int (*start_context)(void *);
+ int (*stop_context)(void *);
+ int (*afu_reset)(void *);
+ void (*set_master)(void *);
+ void * (*get_context)(struct pci_dev *, void *);
+ void * (*dev_context_init)(struct pci_dev *, void *);
+ int (*release_context)(void *);
+ void (*perst_reloads_same_image)(void *, bool);
+ ssize_t (*read_adapter_vpd)(struct pci_dev *, void *, size_t);
+ int (*allocate_afu_irqs)(void *, int);
+ void (*free_afu_irqs)(void *);
+ void * (*create_afu)(struct pci_dev *);
+ struct file * (*get_fd)(void *, struct file_operations *, int *);
+ void * (*fops_get_context)(struct file *);
+ int (*start_work)(void *, u64);
+ int (*fd_mmap)(struct file *, struct vm_area_struct *);
+ int (*fd_release)(struct inode *, struct file *);
+};
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index 6d95e8e147e0..102fd26ca886 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -25,6 +25,8 @@
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
+#include "backend.h"
+
extern const struct file_operations cxlflash_cxl_fops;
#define MAX_CONTEXT CXLFLASH_MAX_CONTEXT /* num contexts per afu */
@@ -114,6 +116,7 @@ enum cxlflash_hwq_mode {
struct cxlflash_cfg {
struct afu *afu;
+ const struct cxlflash_backend_ops *ops;
struct pci_dev *dev;
struct pci_device_id *dev_id;
struct Scsi_Host *host;
@@ -129,7 +132,7 @@ struct cxlflash_cfg {
int lr_port;
atomic_t scan_host_needed;
- struct cxl_afu *cxl_afu;
+ void *afu_cookie;
atomic_t recovery_threads;
struct mutex ctx_recovery_mutex;
@@ -203,8 +206,7 @@ struct hwq {
* fields after this point
*/
struct afu *afu;
- struct cxl_context *ctx;
- struct cxl_ioctl_start_work work;
+ void *ctx_cookie;
struct sisl_host_map __iomem *host_map; /* MC host map */
struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */
ctx_hndl_t ctx_hndl; /* master's context handle */
diff --git a/drivers/scsi/cxlflash/cxl_hw.c b/drivers/scsi/cxlflash/cxl_hw.c
new file mode 100644
index 000000000000..db1cadad5c5d
--- /dev/null
+++ b/drivers/scsi/cxlflash/cxl_hw.c
@@ -0,0 +1,168 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2018 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <misc/cxl.h>
+
+#include "backend.h"
+
+/*
+ * The following routines map the cxlflash backend operations to existing CXL
+ * kernel API function and are largely simple shims that provide an abstraction
+ * for converting generic context and AFU cookies into cxl_context or cxl_afu
+ * pointers.
+ */
+
+static void __iomem *cxlflash_psa_map(void *ctx_cookie)
+{
+ return cxl_psa_map(ctx_cookie);
+}
+
+static void cxlflash_psa_unmap(void __iomem *addr)
+{
+ cxl_psa_unmap(addr);
+}
+
+static int cxlflash_process_element(void *ctx_cookie)
+{
+ return cxl_process_element(ctx_cookie);
+}
+
+static int cxlflash_map_afu_irq(void *ctx_cookie, int num,
+ irq_handler_t handler, void *cookie, char *name)
+{
+ return cxl_map_afu_irq(ctx_cookie, num, handler, cookie, name);
+}
+
+static void cxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
+{
+ cxl_unmap_afu_irq(ctx_cookie, num, cookie);
+}
+
+static int cxlflash_start_context(void *ctx_cookie)
+{
+ return cxl_start_context(ctx_cookie, 0, NULL);
+}
+
+static int cxlflash_stop_context(void *ctx_cookie)
+{
+ return cxl_stop_context(ctx_cookie);
+}
+
+static int cxlflash_afu_reset(void *ctx_cookie)
+{
+ return cxl_afu_reset(ctx_cookie);
+}
+
+static void cxlflash_set_master(void *ctx_cookie)
+{
+ cxl_set_master(ctx_cookie);
+}
+
+static void *cxlflash_get_context(struct pci_dev *dev, void *afu_cookie)
+{
+ return cxl_get_context(dev);
+}
+
+static void *cxlflash_dev_context_init(struct pci_dev *dev, void *afu_cookie)
+{
+ return cxl_dev_context_init(dev);
+}
+
+static int cxlflash_release_context(void *ctx_cookie)
+{
+ return cxl_release_context(ctx_cookie);
+}
+
+static void cxlflash_perst_reloads_same_image(void *afu_cookie, bool image)
+{
+ cxl_perst_reloads_same_image(afu_cookie, image);
+}
+
+static ssize_t cxlflash_read_adapter_vpd(struct pci_dev *dev,
+ void *buf, size_t count)
+{
+ return cxl_read_adapter_vpd(dev, buf, count);
+}
+
+static int cxlflash_allocate_afu_irqs(void *ctx_cookie, int num)
+{
+ return cxl_allocate_afu_irqs(ctx_cookie, num);
+}
+
+static void cxlflash_free_afu_irqs(void *ctx_cookie)
+{
+ cxl_free_afu_irqs(ctx_cookie);
+}
+
+static void *cxlflash_create_afu(struct pci_dev *dev)
+{
+ return cxl_pci_to_afu(dev);
+}
+
+static struct file *cxlflash_get_fd(void *ctx_cookie,
+ struct file_operations *fops, int *fd)
+{
+ return cxl_get_fd(ctx_cookie, fops, fd);
+}
+
+static void *cxlflash_fops_get_context(struct file *file)
+{
+ return cxl_fops_get_context(file);
+}
+
+static int cxlflash_start_work(void *ctx_cookie, u64 irqs)
+{
+ struct cxl_ioctl_start_work work = { 0 };
+
+ work.num_interrupts = irqs;
+ work.flags = CXL_START_WORK_NUM_IRQS;
+
+ return cxl_start_work(ctx_cookie, &work);
+}
+
+static int cxlflash_fd_mmap(struct file *file, struct vm_area_struct *vm)
+{
+ return cxl_fd_mmap(file, vm);
+}
+
+static int cxlflash_fd_release(struct inode *inode, struct file *file)
+{
+ return cxl_fd_release(inode, file);
+}
+
+const struct cxlflash_backend_ops cxlflash_cxl_ops = {
+ .module = THIS_MODULE,
+ .psa_map = cxlflash_psa_map,
+ .psa_unmap = cxlflash_psa_unmap,
+ .process_element = cxlflash_process_element,
+ .map_afu_irq = cxlflash_map_afu_irq,
+ .unmap_afu_irq = cxlflash_unmap_afu_irq,
+ .start_context = cxlflash_start_context,
+ .stop_context = cxlflash_stop_context,
+ .afu_reset = cxlflash_afu_reset,
+ .set_master = cxlflash_set_master,
+ .get_context = cxlflash_get_context,
+ .dev_context_init = cxlflash_dev_context_init,
+ .release_context = cxlflash_release_context,
+ .perst_reloads_same_image = cxlflash_perst_reloads_same_image,
+ .read_adapter_vpd = cxlflash_read_adapter_vpd,
+ .allocate_afu_irqs = cxlflash_allocate_afu_irqs,
+ .free_afu_irqs = cxlflash_free_afu_irqs,
+ .create_afu = cxlflash_create_afu,
+ .get_fd = cxlflash_get_fd,
+ .fops_get_context = cxlflash_fops_get_context,
+ .start_work = cxlflash_start_work,
+ .fd_mmap = cxlflash_fd_mmap,
+ .fd_release = cxlflash_fd_release,
+};
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 38b3a9c84fd1..d8fe7ab870b8 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -620,6 +620,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
cmd->parent = afu;
cmd->hwq_index = hwq_index;
+ cmd->sa.ioasc = 0;
cmd->rcb.ctx_id = hwq->ctx_hndl;
cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
@@ -710,7 +711,7 @@ static void stop_afu(struct cxlflash_cfg *cfg)
}
if (likely(afu->afu_map)) {
- cxl_psa_unmap((void __iomem *)afu->afu_map);
+ cfg->ops->psa_unmap(afu->afu_map);
afu->afu_map = NULL;
}
}
@@ -738,7 +739,7 @@ static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
hwq = get_hwq(afu, index);
- if (!hwq->ctx) {
+ if (!hwq->ctx_cookie) {
dev_err(dev, "%s: returning with NULL MC\n", __func__);
return;
}
@@ -747,13 +748,13 @@ static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
case UNMAP_THREE:
/* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
if (index == PRIMARY_HWQ)
- cxl_unmap_afu_irq(hwq->ctx, 3, hwq);
+ cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
case UNMAP_TWO:
- cxl_unmap_afu_irq(hwq->ctx, 2, hwq);
+ cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
case UNMAP_ONE:
- cxl_unmap_afu_irq(hwq->ctx, 1, hwq);
+ cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
case FREE_IRQ:
- cxl_free_afu_irqs(hwq->ctx);
+ cfg->ops->free_afu_irqs(hwq->ctx_cookie);
/* fall through */
case UNDO_NOOP:
/* No action required */
@@ -782,15 +783,15 @@ static void term_mc(struct cxlflash_cfg *cfg, u32 index)
hwq = get_hwq(afu, index);
- if (!hwq->ctx) {
+ if (!hwq->ctx_cookie) {
dev_err(dev, "%s: returning with NULL MC\n", __func__);
return;
}
- WARN_ON(cxl_stop_context(hwq->ctx));
+ WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie));
if (index != PRIMARY_HWQ)
- WARN_ON(cxl_release_context(hwq->ctx));
- hwq->ctx = NULL;
+ WARN_ON(cfg->ops->release_context(hwq->ctx_cookie));
+ hwq->ctx_cookie = NULL;
spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
flush_pending_cmds(hwq);
@@ -1598,27 +1599,6 @@ out:
}
/**
- * start_context() - starts the master context
- * @cfg: Internal structure associated with the host.
- * @index: Index of the hardware queue.
- *
- * Return: A success or failure value from CXL services.
- */
-static int start_context(struct cxlflash_cfg *cfg, u32 index)
-{
- struct device *dev = &cfg->dev->dev;
- struct hwq *hwq = get_hwq(cfg->afu, index);
- int rc = 0;
-
- rc = cxl_start_context(hwq->ctx,
- hwq->work.work_element_descriptor,
- NULL);
-
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
-}
-
-/**
* read_vpd() - obtains the WWPNs from VPD
* @cfg: Internal structure associated with the host.
* @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs
@@ -1640,7 +1620,7 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
/* Get the VPD data from the device */
- vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
+ vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
if (unlikely(vpd_size <= 0)) {
dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
__func__, vpd_size);
@@ -1732,6 +1712,7 @@ static void init_pcr(struct cxlflash_cfg *cfg)
struct afu *afu = cfg->afu;
struct sisl_ctrl_map __iomem *ctrl_map;
struct hwq *hwq;
+ void *cookie;
int i;
for (i = 0; i < MAX_CONTEXT; i++) {
@@ -1746,8 +1727,9 @@ static void init_pcr(struct cxlflash_cfg *cfg)
/* Copy frequently used fields into hwq */
for (i = 0; i < afu->num_hwqs; i++) {
hwq = get_hwq(afu, i);
+ cookie = hwq->ctx_cookie;
- hwq->ctx_hndl = (u16) cxl_process_element(hwq->ctx);
+ hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie);
hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
@@ -1925,13 +1907,13 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
struct hwq *hwq)
{
struct device *dev = &cfg->dev->dev;
- struct cxl_context *ctx = hwq->ctx;
+ void *ctx = hwq->ctx_cookie;
int rc = 0;
enum undo_level level = UNDO_NOOP;
bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
int num_irqs = is_primary_hwq ? 3 : 2;
- rc = cxl_allocate_afu_irqs(ctx, num_irqs);
+ rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs);
if (unlikely(rc)) {
dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
__func__, rc);
@@ -1939,16 +1921,16 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
goto out;
}
- rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
- "SISL_MSI_SYNC_ERROR");
+ rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
+ "SISL_MSI_SYNC_ERROR");
if (unlikely(rc <= 0)) {
dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
level = FREE_IRQ;
goto out;
}
- rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
- "SISL_MSI_RRQ_UPDATED");
+ rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
+ "SISL_MSI_RRQ_UPDATED");
if (unlikely(rc <= 0)) {
dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
level = UNMAP_ONE;
@@ -1959,8 +1941,8 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
if (!is_primary_hwq)
goto out;
- rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
- "SISL_MSI_ASYNC_ERROR");
+ rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
+ "SISL_MSI_ASYNC_ERROR");
if (unlikely(rc <= 0)) {
dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
level = UNMAP_TWO;
@@ -1979,7 +1961,7 @@ out:
*/
static int init_mc(struct cxlflash_cfg *cfg, u32 index)
{
- struct cxl_context *ctx;
+ void *ctx;
struct device *dev = &cfg->dev->dev;
struct hwq *hwq = get_hwq(cfg->afu, index);
int rc = 0;
@@ -1990,23 +1972,23 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
INIT_LIST_HEAD(&hwq->pending_cmds);
if (index == PRIMARY_HWQ)
- ctx = cxl_get_context(cfg->dev);
+ ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie);
else
- ctx = cxl_dev_context_init(cfg->dev);
- if (unlikely(!ctx)) {
+ ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
+ if (IS_ERR_OR_NULL(ctx)) {
rc = -ENOMEM;
goto err1;
}
- WARN_ON(hwq->ctx);
- hwq->ctx = ctx;
+ WARN_ON(hwq->ctx_cookie);
+ hwq->ctx_cookie = ctx;
/* Set it up as a master with the CXL */
- cxl_set_master(ctx);
+ cfg->ops->set_master(ctx);
/* Reset AFU when initializing primary context */
if (index == PRIMARY_HWQ) {
- rc = cxl_afu_reset(ctx);
+ rc = cfg->ops->afu_reset(ctx);
if (unlikely(rc)) {
dev_err(dev, "%s: AFU reset failed rc=%d\n",
__func__, rc);
@@ -2020,11 +2002,8 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
goto err2;
}
- /* This performs the equivalent of the CXL_IOCTL_START_WORK.
- * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
- * element (pe) that is embedded in the context (ctx)
- */
- rc = start_context(cfg, index);
+ /* Finally, activate the context by starting it */
+ rc = cfg->ops->start_context(hwq->ctx_cookie);
if (unlikely(rc)) {
dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
level = UNMAP_THREE;
@@ -2037,9 +2016,9 @@ out:
err2:
term_intr(cfg, level, index);
if (index != PRIMARY_HWQ)
- cxl_release_context(ctx);
+ cfg->ops->release_context(ctx);
err1:
- hwq->ctx = NULL;
+ hwq->ctx_cookie = NULL;
goto out;
}
@@ -2094,7 +2073,7 @@ static int init_afu(struct cxlflash_cfg *cfg)
struct hwq *hwq;
int i;
- cxl_perst_reloads_same_image(cfg->cxl_afu, true);
+ cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true);
afu->num_hwqs = afu->desired_hwqs;
for (i = 0; i < afu->num_hwqs; i++) {
@@ -2108,9 +2087,9 @@ static int init_afu(struct cxlflash_cfg *cfg)
/* Map the entire MMIO space of the AFU using the first context */
hwq = get_hwq(afu, PRIMARY_HWQ);
- afu->afu_map = cxl_psa_map(hwq->ctx);
+ afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie);
if (!afu->afu_map) {
- dev_err(dev, "%s: cxl_psa_map failed\n", __func__);
+ dev_err(dev, "%s: psa_map failed\n", __func__);
rc = -ENOMEM;
goto err1;
}
@@ -3670,6 +3649,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
cfg->init_state = INIT_STATE_NONE;
cfg->dev = pdev;
+ cfg->ops = &cxlflash_cxl_ops;
cfg->cxl_fops = cxlflash_cxl_fops;
/*
@@ -3701,7 +3681,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, cfg);
- cfg->cxl_afu = cxl_pci_to_afu(pdev);
+ cfg->afu_cookie = cfg->ops->create_afu(pdev);
rc = init_pci(cfg);
if (rc) {
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index 170fff5aeff6..2fe79df5c73c 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -810,20 +810,22 @@ err:
* init_context() - initializes a previously allocated context
* @ctxi: Previously allocated context
* @cfg: Internal structure associated with the host.
- * @ctx: Previously obtained CXL context reference.
+ * @ctx: Previously obtained context cookie.
* @ctxid: Previously obtained process element associated with CXL context.
* @file: Previously obtained file associated with CXL context.
* @perms: User-specified permissions.
+ * @irqs: User-specified number of interrupts.
*/
static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
- struct cxl_context *ctx, int ctxid, struct file *file,
- u32 perms)
+ void *ctx, int ctxid, struct file *file, u32 perms,
+ u64 irqs)
{
struct afu *afu = cfg->afu;
ctxi->rht_perms = perms;
ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
+ ctxi->irqs = irqs;
ctxi->pid = task_tgid_nr(current); /* tgid = pid */
ctxi->ctx = ctx;
ctxi->cfg = cfg;
@@ -976,9 +978,9 @@ static int cxlflash_disk_detach(struct scsi_device *sdev,
*/
static int cxlflash_cxl_release(struct inode *inode, struct file *file)
{
- struct cxl_context *ctx = cxl_fops_get_context(file);
struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
cxl_fops);
+ void *ctx = cfg->ops->fops_get_context(file);
struct device *dev = &cfg->dev->dev;
struct ctx_info *ctxi = NULL;
struct dk_cxlflash_detach detach = { { 0 }, 0 };
@@ -986,7 +988,7 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file)
enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
int ctxid;
- ctxid = cxl_process_element(ctx);
+ ctxid = cfg->ops->process_element(ctx);
if (unlikely(ctxid < 0)) {
dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
__func__, ctx, ctxid);
@@ -1014,7 +1016,7 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file)
list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
_cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
out_release:
- cxl_fd_release(inode, file);
+ cfg->ops->fd_release(inode, file);
out:
dev_dbg(dev, "%s: returning\n", __func__);
return 0;
@@ -1089,9 +1091,9 @@ static int cxlflash_mmap_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct file *file = vma->vm_file;
- struct cxl_context *ctx = cxl_fops_get_context(file);
struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
cxl_fops);
+ void *ctx = cfg->ops->fops_get_context(file);
struct device *dev = &cfg->dev->dev;
struct ctx_info *ctxi = NULL;
struct page *err_page = NULL;
@@ -1099,7 +1101,7 @@ static int cxlflash_mmap_fault(struct vm_fault *vmf)
int rc = 0;
int ctxid;
- ctxid = cxl_process_element(ctx);
+ ctxid = cfg->ops->process_element(ctx);
if (unlikely(ctxid < 0)) {
dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
__func__, ctx, ctxid);
@@ -1162,16 +1164,16 @@ static const struct vm_operations_struct cxlflash_mmap_vmops = {
*/
static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct cxl_context *ctx = cxl_fops_get_context(file);
struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
cxl_fops);
+ void *ctx = cfg->ops->fops_get_context(file);
struct device *dev = &cfg->dev->dev;
struct ctx_info *ctxi = NULL;
enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
int ctxid;
int rc = 0;
- ctxid = cxl_process_element(ctx);
+ ctxid = cfg->ops->process_element(ctx);
if (unlikely(ctxid < 0)) {
dev_err(dev, "%s: Context %p was closed ctxid=%d\n",
__func__, ctx, ctxid);
@@ -1188,7 +1190,7 @@ static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid);
- rc = cxl_fd_mmap(file, vma);
+ rc = cfg->ops->fd_mmap(file, vma);
if (likely(!rc)) {
/* Insert ourself in the mmap fault handler path */
ctxi->cxl_mmap_vmops = vma->vm_ops;
@@ -1307,23 +1309,23 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
struct afu *afu = cfg->afu;
struct llun_info *lli = sdev->hostdata;
struct glun_info *gli = lli->parent;
- struct cxl_ioctl_start_work *work;
struct ctx_info *ctxi = NULL;
struct lun_access *lun_access = NULL;
int rc = 0;
u32 perms;
int ctxid = -1;
+ u64 irqs = attach->num_interrupts;
u64 flags = 0UL;
u64 rctxid = 0UL;
struct file *file = NULL;
- struct cxl_context *ctx = NULL;
+ void *ctx = NULL;
int fd = -1;
- if (attach->num_interrupts > 4) {
+ if (irqs > 4) {
dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n",
- __func__, attach->num_interrupts);
+ __func__, irqs);
rc = -EINVAL;
goto out;
}
@@ -1394,7 +1396,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
goto err;
}
- ctx = cxl_dev_context_init(cfg->dev);
+ ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
if (IS_ERR_OR_NULL(ctx)) {
dev_err(dev, "%s: Could not initialize context %p\n",
__func__, ctx);
@@ -1402,25 +1404,21 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
goto err;
}
- work = &ctxi->work;
- work->num_interrupts = attach->num_interrupts;
- work->flags = CXL_START_WORK_NUM_IRQS;
-
- rc = cxl_start_work(ctx, work);
+ rc = cfg->ops->start_work(ctx, irqs);
if (unlikely(rc)) {
dev_dbg(dev, "%s: Could not start context rc=%d\n",
__func__, rc);
goto err;
}
- ctxid = cxl_process_element(ctx);
+ ctxid = cfg->ops->process_element(ctx);
if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
rc = -EPERM;
goto err;
}
- file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
+ file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
if (unlikely(fd < 0)) {
rc = -ENODEV;
dev_err(dev, "%s: Could not get file descriptor\n", __func__);
@@ -1431,7 +1429,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
perms = SISL_RHT_PERM(attach->hdr.flags + 1);
/* Context mutex is locked upon return */
- init_context(ctxi, cfg, ctx, ctxid, file, perms);
+ init_context(ctxi, cfg, ctx, ctxid, file, perms, irqs);
rc = afu_attach(cfg, ctxi);
if (unlikely(rc)) {
@@ -1479,8 +1477,8 @@ out:
err:
/* Cleanup CXL context; okay to 'stop' even if it was not started */
if (!IS_ERR_OR_NULL(ctx)) {
- cxl_stop_context(ctx);
- cxl_release_context(ctx);
+ cfg->ops->stop_context(ctx);
+ cfg->ops->release_context(ctx);
ctx = NULL;
}
@@ -1529,10 +1527,10 @@ static int recover_context(struct cxlflash_cfg *cfg,
int fd = -1;
int ctxid = -1;
struct file *file;
- struct cxl_context *ctx;
+ void *ctx;
struct afu *afu = cfg->afu;
- ctx = cxl_dev_context_init(cfg->dev);
+ ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
if (IS_ERR_OR_NULL(ctx)) {
dev_err(dev, "%s: Could not initialize context %p\n",
__func__, ctx);
@@ -1540,21 +1538,21 @@ static int recover_context(struct cxlflash_cfg *cfg,
goto out;
}
- rc = cxl_start_work(ctx, &ctxi->work);
+ rc = cfg->ops->start_work(ctx, ctxi->irqs);
if (unlikely(rc)) {
dev_dbg(dev, "%s: Could not start context rc=%d\n",
__func__, rc);
goto err1;
}
- ctxid = cxl_process_element(ctx);
+ ctxid = cfg->ops->process_element(ctx);
if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) {
dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid);
rc = -EPERM;
goto err2;
}
- file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
+ file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd);
if (unlikely(fd < 0)) {
rc = -ENODEV;
dev_err(dev, "%s: Could not get file descriptor\n", __func__);
@@ -1601,9 +1599,9 @@ err3:
fput(file);
put_unused_fd(fd);
err2:
- cxl_stop_context(ctx);
+ cfg->ops->stop_context(ctx);
err1:
- cxl_release_context(ctx);
+ cfg->ops->release_context(ctx);
goto out;
}
diff --git a/drivers/scsi/cxlflash/superpipe.h b/drivers/scsi/cxlflash/superpipe.h
index 0b5976829913..35c3cbf83fb5 100644
--- a/drivers/scsi/cxlflash/superpipe.h
+++ b/drivers/scsi/cxlflash/superpipe.h
@@ -96,15 +96,15 @@ struct ctx_info {
struct llun_info **rht_lun; /* Mapping of RHT entries to LUNs */
u8 *rht_needs_ws; /* User-desired write-same function per RHTE */
- struct cxl_ioctl_start_work work;
u64 ctxid;
+ u64 irqs; /* Number of interrupts requested for context */
pid_t pid;
bool initialized;
bool unavail;
bool err_recovery_active;
struct mutex mutex; /* Context protection */
struct kref kref;
- struct cxl_context *ctx;
+ void *ctx;
struct cxlflash_cfg *cfg;
struct list_head luns; /* LUNs attached to this context */
const struct vm_operations_struct *cxl_mmap_vmops;
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index fd22dc6ab5d9..022e421c2185 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -40,6 +40,7 @@
#define TPGS_SUPPORT_LBA_DEPENDENT 0x10
#define TPGS_SUPPORT_OFFLINE 0x40
#define TPGS_SUPPORT_TRANSITION 0x80
+#define TPGS_SUPPORT_ALL 0xdf
#define RTPG_FMT_MASK 0x70
#define RTPG_FMT_EXT_HDR 0x10
@@ -81,6 +82,7 @@ struct alua_port_group {
int tpgs;
int state;
int pref;
+ int valid_states;
unsigned flags; /* used for optimizing STPG */
unsigned char transition_tmo;
unsigned long expiry;
@@ -243,6 +245,7 @@ static struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev,
pg->group_id = group_id;
pg->tpgs = tpgs;
pg->state = SCSI_ACCESS_STATE_OPTIMAL;
+ pg->valid_states = TPGS_SUPPORT_ALL;
if (optimize_stpg)
pg->flags |= ALUA_OPTIMIZE_STPG;
kref_init(&pg->kref);
@@ -516,7 +519,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
{
struct scsi_sense_hdr sense_hdr;
struct alua_port_group *tmp_pg;
- int len, k, off, valid_states = 0, bufflen = ALUA_RTPG_SIZE;
+ int len, k, off, bufflen = ALUA_RTPG_SIZE;
unsigned char *desc, *buff;
unsigned err, retval;
unsigned int tpg_desc_tbl_off;
@@ -541,6 +544,22 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
retval = submit_rtpg(sdev, buff, bufflen, &sense_hdr, pg->flags);
if (retval) {
+ /*
+ * Some (broken) implementations have a habit of returning
+ * an error during things like firmware update etc.
+ * But if the target only supports active/optimized there's
+ * not much we can do; it's not that we can switch paths
+ * or anything.
+ * So ignore any errors to avoid spurious failures during
+ * path failover.
+ */
+ if ((pg->valid_states & ~TPGS_SUPPORT_OPTIMIZED) == 0) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: ignoring rtpg result %d\n",
+ ALUA_DH_NAME, retval);
+ kfree(buff);
+ return SCSI_DH_OK;
+ }
if (!scsi_sense_valid(&sense_hdr)) {
sdev_printk(KERN_INFO, sdev,
"%s: rtpg failed, result %d\n",
@@ -652,7 +671,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
rcu_read_unlock();
}
if (tmp_pg == pg)
- valid_states = desc[1];
+ tmp_pg->valid_states = desc[1];
spin_unlock_irqrestore(&tmp_pg->lock, flags);
}
kref_put(&tmp_pg->kref, release_port_group);
@@ -665,13 +684,13 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
"%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
pg->pref ? "preferred" : "non-preferred",
- valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
- valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
- valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
- valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
- valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
- valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
- valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
+ pg->valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
+ pg->valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
+ pg->valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
+ pg->valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
+ pg->valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
+ pg->valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
+ pg->valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
switch (pg->state) {
case SCSI_ACCESS_STATE_TRANSITIONING:
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index 5e3d909cfc53..6d3e1cb4fea6 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -108,24 +108,6 @@ void fnic_debugfs_terminate(void)
}
/*
- * fnic_trace_ctrl_open - Open the trace_enable file for fnic_trace
- * Or Open fc_trace_enable file for fc_trace
- * @inode: The inode pointer.
- * @file: The file pointer to attach the trace enable/disable flag.
- *
- * Description:
- * This routine opens a debugsfs file trace_enable or fc_trace_enable.
- *
- * Returns:
- * This function returns zero if successful.
- */
-static int fnic_trace_ctrl_open(struct inode *inode, struct file *filp)
-{
- filp->private_data = inode->i_private;
- return 0;
-}
-
-/*
* fnic_trace_ctrl_read -
* Read trace_enable ,fc_trace_enable
* or fc_trace_clear debugfs file
@@ -220,7 +202,7 @@ static ssize_t fnic_trace_ctrl_write(struct file *filp,
static const struct file_operations fnic_trace_ctrl_fops = {
.owner = THIS_MODULE,
- .open = fnic_trace_ctrl_open,
+ .open = simple_open,
.read = fnic_trace_ctrl_read,
.write = fnic_trace_ctrl_write,
};
@@ -632,7 +614,7 @@ static ssize_t fnic_reset_stats_write(struct file *file,
sizeof(struct io_path_stats) - sizeof(u64));
memset(fw_stats_p+1, 0,
sizeof(struct fw_stats) - sizeof(u64));
- getnstimeofday(&stats->stats_timestamps.last_reset_time);
+ ktime_get_real_ts64(&stats->stats_timestamps.last_reset_time);
}
(*ppos)++;
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 999fc7547560..c7bf316d8e83 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -442,15 +442,13 @@ static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
shost_printk(KERN_INFO, fnic->lport->host,
"process_vlan_resp: FIP VLAN %d\n", vid);
- vlan = kmalloc(sizeof(*vlan),
- GFP_ATOMIC);
+ vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
if (!vlan) {
/* retry from timer */
spin_unlock_irqrestore(&fnic->vlans_lock,
flags);
goto out;
}
- memset(vlan, 0, sizeof(struct fcoe_vlan));
vlan->vid = vid & 0x0fff;
vlan->state = FIP_VLAN_AVAIL;
list_add_tail(&vlan->list, &fnic->vlans);
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 242e2ee494a1..8cbd3c9f0b4c 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -906,7 +906,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
"icmnd_cmpl abts pending "
- "hdr status = %s tag = 0x%x sc = 0x%p"
+ "hdr status = %s tag = 0x%x sc = 0x%p "
"scsi_status = %x residual = %d\n",
fnic_fcpio_status_to_str(hdr_status),
id, sc,
diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h
index e007feedbf72..9daa6ada6fa0 100644
--- a/drivers/scsi/fnic/fnic_stats.h
+++ b/drivers/scsi/fnic/fnic_stats.h
@@ -18,8 +18,8 @@
#define _FNIC_STATS_H_
struct stats_timestamps {
- struct timespec last_reset_time;
- struct timespec last_read_time;
+ struct timespec64 last_reset_time;
+ struct timespec64 last_read_time;
};
struct io_path_stats {
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index 4826f596cb31..abddde11982b 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -111,7 +111,7 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
int len = 0;
unsigned long flags;
char str[KSYM_SYMBOL_LEN];
- struct timespec val;
+ struct timespec64 val;
fnic_trace_data_t *tbp;
spin_lock_irqsave(&fnic_trace_lock, flags);
@@ -129,10 +129,10 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
/* Convert function pointer to function name */
if (sizeof(unsigned long) < 8) {
sprint_symbol(str, tbp->fnaddr.low);
- jiffies_to_timespec(tbp->timestamp.low, &val);
+ jiffies_to_timespec64(tbp->timestamp.low, &val);
} else {
sprint_symbol(str, tbp->fnaddr.val);
- jiffies_to_timespec(tbp->timestamp.val, &val);
+ jiffies_to_timespec64(tbp->timestamp.val, &val);
}
/*
* Dump trace buffer entry to memory file
@@ -140,8 +140,8 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
*/
len += snprintf(fnic_dbgfs_prt->buffer + len,
(trace_max_pages * PAGE_SIZE * 3) - len,
- "%16lu.%16lu %-50s %8x %8x %16llx %16llx "
- "%16llx %16llx %16llx\n", val.tv_sec,
+ "%16llu.%09lu %-50s %8x %8x %16llx %16llx "
+ "%16llx %16llx %16llx\n", (u64)val.tv_sec,
val.tv_nsec, str, tbp->host_no, tbp->tag,
tbp->data[0], tbp->data[1], tbp->data[2],
tbp->data[3], tbp->data[4]);
@@ -171,10 +171,10 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
/* Convert function pointer to function name */
if (sizeof(unsigned long) < 8) {
sprint_symbol(str, tbp->fnaddr.low);
- jiffies_to_timespec(tbp->timestamp.low, &val);
+ jiffies_to_timespec64(tbp->timestamp.low, &val);
} else {
sprint_symbol(str, tbp->fnaddr.val);
- jiffies_to_timespec(tbp->timestamp.val, &val);
+ jiffies_to_timespec64(tbp->timestamp.val, &val);
}
/*
* Dump trace buffer entry to memory file
@@ -182,8 +182,8 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
*/
len += snprintf(fnic_dbgfs_prt->buffer + len,
(trace_max_pages * PAGE_SIZE * 3) - len,
- "%16lu.%16lu %-50s %8x %8x %16llx %16llx "
- "%16llx %16llx %16llx\n", val.tv_sec,
+ "%16llu.%09lu %-50s %8x %8x %16llx %16llx "
+ "%16llx %16llx %16llx\n", (u64)val.tv_sec,
val.tv_nsec, str, tbp->host_no, tbp->tag,
tbp->data[0], tbp->data[1], tbp->data[2],
tbp->data[3], tbp->data[4]);
@@ -217,29 +217,29 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
{
int len = 0;
int buf_size = debug->buf_size;
- struct timespec val1, val2;
+ struct timespec64 val1, val2;
- getnstimeofday(&val1);
+ ktime_get_real_ts64(&val1);
len = snprintf(debug->debug_buffer + len, buf_size - len,
"------------------------------------------\n"
"\t\tTime\n"
"------------------------------------------\n");
len += snprintf(debug->debug_buffer + len, buf_size - len,
- "Current time : [%ld:%ld]\n"
- "Last stats reset time: [%ld:%ld]\n"
- "Last stats read time: [%ld:%ld]\n"
- "delta since last reset: [%ld:%ld]\n"
- "delta since last read: [%ld:%ld]\n",
- val1.tv_sec, val1.tv_nsec,
- stats->stats_timestamps.last_reset_time.tv_sec,
+ "Current time : [%lld:%ld]\n"
+ "Last stats reset time: [%lld:%09ld]\n"
+ "Last stats read time: [%lld:%ld]\n"
+ "delta since last reset: [%lld:%ld]\n"
+ "delta since last read: [%lld:%ld]\n",
+ (s64)val1.tv_sec, val1.tv_nsec,
+ (s64)stats->stats_timestamps.last_reset_time.tv_sec,
stats->stats_timestamps.last_reset_time.tv_nsec,
- stats->stats_timestamps.last_read_time.tv_sec,
+ (s64)stats->stats_timestamps.last_read_time.tv_sec,
stats->stats_timestamps.last_read_time.tv_nsec,
- timespec_sub(val1, stats->stats_timestamps.last_reset_time).tv_sec,
- timespec_sub(val1, stats->stats_timestamps.last_reset_time).tv_nsec,
- timespec_sub(val1, stats->stats_timestamps.last_read_time).tv_sec,
- timespec_sub(val1, stats->stats_timestamps.last_read_time).tv_nsec);
+ (s64)timespec64_sub(val1, stats->stats_timestamps.last_reset_time).tv_sec,
+ timespec64_sub(val1, stats->stats_timestamps.last_reset_time).tv_nsec,
+ (s64)timespec64_sub(val1, stats->stats_timestamps.last_read_time).tv_sec,
+ timespec64_sub(val1, stats->stats_timestamps.last_read_time).tv_nsec);
stats->stats_timestamps.last_read_time = val1;
@@ -403,12 +403,12 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
"\t\tOther Important Statistics\n"
"------------------------------------------\n");
- jiffies_to_timespec(stats->misc_stats.last_isr_time, &val1);
- jiffies_to_timespec(stats->misc_stats.last_ack_time, &val2);
+ jiffies_to_timespec64(stats->misc_stats.last_isr_time, &val1);
+ jiffies_to_timespec64(stats->misc_stats.last_ack_time, &val2);
len += snprintf(debug->debug_buffer + len, buf_size - len,
- "Last ISR time: %llu (%8lu.%8lu)\n"
- "Last ACK time: %llu (%8lu.%8lu)\n"
+ "Last ISR time: %llu (%8llu.%09lu)\n"
+ "Last ACK time: %llu (%8llu.%09lu)\n"
"Number of ISRs: %lld\n"
"Maximum CQ Entries: %lld\n"
"Number of ACK index out of range: %lld\n"
@@ -425,9 +425,9 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
"Number of rport not ready: %lld\n"
"Number of receive frame errors: %lld\n",
(u64)stats->misc_stats.last_isr_time,
- val1.tv_sec, val1.tv_nsec,
+ (s64)val1.tv_sec, val1.tv_nsec,
(u64)stats->misc_stats.last_ack_time,
- val2.tv_sec, val2.tv_nsec,
+ (s64)val2.tv_sec, val2.tv_nsec,
(u64)atomic64_read(&stats->misc_stats.isr_count),
(u64)atomic64_read(&stats->misc_stats.max_cq_entries),
(u64)atomic64_read(&stats->misc_stats.ack_index_out_of_range),
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 83357b0367d8..e7fd2877c19c 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -99,12 +99,43 @@ struct hisi_sas_hw_error {
const struct hisi_sas_hw_error *sub;
};
+struct hisi_sas_rst {
+ struct hisi_hba *hisi_hba;
+ struct completion *completion;
+ struct work_struct work;
+ bool done;
+};
+
+#define HISI_SAS_RST_WORK_INIT(r, c) \
+ { .hisi_hba = hisi_hba, \
+ .completion = &c, \
+ .work = __WORK_INITIALIZER(r.work, \
+ hisi_sas_sync_rst_work_handler), \
+ .done = false, \
+ }
+
+#define HISI_SAS_DECLARE_RST_WORK_ON_STACK(r) \
+ DECLARE_COMPLETION_ONSTACK(c); \
+ DECLARE_WORK(w, hisi_sas_sync_rst_work_handler); \
+ struct hisi_sas_rst r = HISI_SAS_RST_WORK_INIT(r, c)
+
+enum hisi_sas_bit_err_type {
+ HISI_SAS_ERR_SINGLE_BIT_ECC = 0x0,
+ HISI_SAS_ERR_MULTI_BIT_ECC = 0x1,
+};
+
+enum hisi_sas_phy_event {
+ HISI_PHYE_PHY_UP = 0U,
+ HISI_PHYE_LINK_RESET,
+ HISI_PHYES_NUM,
+};
+
struct hisi_sas_phy {
+ struct work_struct works[HISI_PHYES_NUM];
struct hisi_hba *hisi_hba;
struct hisi_sas_port *port;
struct asd_sas_phy sas_phy;
struct sas_identify identify;
- struct work_struct phyup_ws;
u64 port_id; /* from hw */
u64 dev_sas_addr;
u64 frame_rcvd_size;
@@ -205,13 +236,16 @@ struct hisi_sas_hw {
void (*phy_set_linkrate)(struct hisi_hba *hisi_hba, int phy_no,
struct sas_phy_linkrates *linkrates);
enum sas_linkrate (*phy_get_max_linkrate)(void);
- void (*free_device)(struct hisi_hba *hisi_hba,
+ void (*clear_itct)(struct hisi_hba *hisi_hba,
struct hisi_sas_device *dev);
+ void (*free_device)(struct hisi_sas_device *sas_dev);
int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id);
void (*dereg_device)(struct hisi_hba *hisi_hba,
struct domain_device *device);
int (*soft_reset)(struct hisi_hba *hisi_hba);
u32 (*get_phys_state)(struct hisi_hba *hisi_hba);
+ int (*write_gpio)(struct hisi_hba *hisi_hba, u8 reg_type,
+ u8 reg_index, u8 reg_count, u8 *write_data);
int max_command_entries;
int complete_hdr_size;
};
@@ -225,6 +259,7 @@ struct hisi_hba {
struct device *dev;
void __iomem *regs;
+ void __iomem *sgpio_regs;
struct regmap *ctrl;
u32 ctrl_reset_reg;
u32 ctrl_reset_sts_reg;
@@ -409,7 +444,8 @@ extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba);
extern void hisi_sas_init_add(struct hisi_hba *hisi_hba);
extern int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost);
extern void hisi_sas_free(struct hisi_hba *hisi_hba);
-extern u8 hisi_sas_get_ata_protocol(u8 cmd, int direction);
+extern u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis,
+ int direction);
extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port);
extern void hisi_sas_sata_done(struct sas_task *task,
struct hisi_sas_slot *slot);
@@ -425,5 +461,9 @@ extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot);
extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba);
extern void hisi_sas_rst_work_handler(struct work_struct *work);
+extern void hisi_sas_sync_rst_work_handler(struct work_struct *work);
extern void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba);
+extern bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
+ enum hisi_sas_phy_event event);
+extern void hisi_sas_release_tasks(struct hisi_hba *hisi_hba);
#endif
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 5f503cb09508..2d4dbed03ee3 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -22,10 +22,12 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
struct domain_device *device,
int abort_flag, int tag);
static int hisi_sas_softreset_ata_disk(struct domain_device *device);
+static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
+ void *funcdata);
-u8 hisi_sas_get_ata_protocol(u8 cmd, int direction)
+u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
{
- switch (cmd) {
+ switch (fis->command) {
case ATA_CMD_FPDMA_WRITE:
case ATA_CMD_FPDMA_READ:
case ATA_CMD_FPDMA_RECV:
@@ -77,10 +79,26 @@ u8 hisi_sas_get_ata_protocol(u8 cmd, int direction)
case ATA_CMD_ZAC_MGMT_OUT:
return HISI_SAS_SATA_PROTOCOL_NONDATA;
default:
+ {
+ if (fis->command == ATA_CMD_SET_MAX) {
+ switch (fis->features) {
+ case ATA_SET_MAX_PASSWD:
+ case ATA_SET_MAX_LOCK:
+ return HISI_SAS_SATA_PROTOCOL_PIO;
+
+ case ATA_SET_MAX_PASSWD_DMA:
+ case ATA_SET_MAX_UNLOCK_DMA:
+ return HISI_SAS_SATA_PROTOCOL_DMA;
+
+ default:
+ return HISI_SAS_SATA_PROTOCOL_NONDATA;
+ }
+ }
if (direction == DMA_NONE)
return HISI_SAS_SATA_PROTOCOL_NONDATA;
return HISI_SAS_SATA_PROTOCOL_PIO;
}
+ }
}
EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
@@ -192,7 +210,8 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
if (!sas_protocol_ata(task->task_proto))
if (slot->n_elem)
- dma_unmap_sg(dev, task->scatter, slot->n_elem,
+ dma_unmap_sg(dev, task->scatter,
+ task->num_scatter,
task->data_dir);
if (sas_dev)
@@ -431,7 +450,8 @@ err_out:
dev_err(dev, "task prep: failed[%d]!\n", rc);
if (!sas_protocol_ata(task->task_proto))
if (n_elem)
- dma_unmap_sg(dev, task->scatter, n_elem,
+ dma_unmap_sg(dev, task->scatter,
+ task->num_scatter,
task->data_dir);
prep_out:
return rc;
@@ -578,6 +598,9 @@ static int hisi_sas_dev_found(struct domain_device *device)
}
}
+ dev_info(dev, "dev[%d:%x] found\n",
+ sas_dev->device_id, sas_dev->dev_type);
+
return 0;
}
@@ -617,7 +640,7 @@ static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
static void hisi_sas_phyup_work(struct work_struct *work)
{
struct hisi_sas_phy *phy =
- container_of(work, struct hisi_sas_phy, phyup_ws);
+ container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
struct hisi_hba *hisi_hba = phy->hisi_hba;
struct asd_sas_phy *sas_phy = &phy->sas_phy;
int phy_no = sas_phy->id;
@@ -626,10 +649,37 @@ static void hisi_sas_phyup_work(struct work_struct *work)
hisi_sas_bytes_dmaed(hisi_hba, phy_no);
}
+static void hisi_sas_linkreset_work(struct work_struct *work)
+{
+ struct hisi_sas_phy *phy =
+ container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+ hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
+}
+
+static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
+ [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
+ [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
+};
+
+bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
+ enum hisi_sas_phy_event event)
+{
+ struct hisi_hba *hisi_hba = phy->hisi_hba;
+
+ if (WARN_ON(event >= HISI_PHYES_NUM))
+ return false;
+
+ return queue_work(hisi_hba->wq, &phy->works[event]);
+}
+EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
+
static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
{
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ int i;
phy->hisi_hba = hisi_hba;
phy->port = NULL;
@@ -647,7 +697,8 @@ static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
sas_phy->lldd_phy = phy;
- INIT_WORK(&phy->phyup_ws, hisi_sas_phyup_work);
+ for (i = 0; i < HISI_PHYES_NUM; i++)
+ INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
}
static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
@@ -702,7 +753,7 @@ static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
hisi_sas_do_release_task(hisi_hba, slot->task, slot);
}
-static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
+void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
{
struct hisi_sas_device *sas_dev;
struct domain_device *device;
@@ -719,6 +770,7 @@ static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
hisi_sas_release_task(hisi_hba, device);
}
}
+EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
struct domain_device *device)
@@ -733,17 +785,21 @@ static void hisi_sas_dev_gone(struct domain_device *device)
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = hisi_hba->dev;
- dev_info(dev, "found dev[%d:%x] is gone\n",
+ dev_info(dev, "dev[%d:%x] is gone\n",
sas_dev->device_id, sas_dev->dev_type);
- hisi_sas_internal_task_abort(hisi_hba, device,
+ if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
+ hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_DEV, 0);
- hisi_sas_dereg_device(hisi_hba, device);
+ hisi_sas_dereg_device(hisi_hba, device);
- hisi_hba->hw->free_device(hisi_hba, sas_dev);
- device->lldd_dev = NULL;
- memset(sas_dev, 0, sizeof(*sas_dev));
+ hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
+ device->lldd_dev = NULL;
+ }
+
+ if (hisi_hba->hw->free_device)
+ hisi_hba->hw->free_device(sas_dev);
sas_dev->dev_type = SAS_PHY_UNUSED;
}
@@ -859,12 +915,13 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
struct hisi_sas_slot *slot = task->lldd_task;
- dev_err(dev, "abort tmf: TMF task timeout\n");
+ dev_err(dev, "abort tmf: TMF task timeout and not done\n");
if (slot)
slot->task = NULL;
goto ex_err;
- }
+ } else
+ dev_err(dev, "abort tmf: TMF task timeout\n");
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
@@ -985,27 +1042,42 @@ static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
sizeof(ssp_task), tmf);
}
-static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba,
- struct asd_sas_port *sas_port, enum sas_linkrate linkrate)
+static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
{
- struct hisi_sas_device *sas_dev;
- struct domain_device *device;
+ u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
int i;
for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
- sas_dev = &hisi_hba->devices[i];
- device = sas_dev->sas_device;
+ struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
+ struct domain_device *device = sas_dev->sas_device;
+ struct asd_sas_port *sas_port;
+ struct hisi_sas_port *port;
+ struct hisi_sas_phy *phy = NULL;
+ struct asd_sas_phy *sas_phy;
+
if ((sas_dev->dev_type == SAS_PHY_UNUSED)
- || !device || (device->port != sas_port))
+ || !device || !device->port)
continue;
- hisi_hba->hw->free_device(hisi_hba, sas_dev);
+ sas_port = device->port;
+ port = to_hisi_sas_port(sas_port);
+
+ list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
+ if (state & BIT(sas_phy->id)) {
+ phy = sas_phy->lldd_phy;
+ break;
+ }
+
+ if (phy) {
+ port->id = phy->port_id;
- /* Update linkrate of directly attached device. */
- if (!device->parent)
- device->linkrate = linkrate;
+ /* Update linkrate of directly attached device. */
+ if (!device->parent)
+ device->linkrate = phy->sas_phy.linkrate;
- hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
+ hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
+ } else
+ port->id = 0xff;
}
}
@@ -1020,21 +1092,17 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct asd_sas_port *sas_port = sas_phy->port;
- struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
bool do_port_check = !!(_sas_port != sas_port);
if (!sas_phy->phy->enabled)
continue;
/* Report PHY state change to libsas */
- if (state & (1 << phy_no)) {
- if (do_port_check && sas_port) {
+ if (state & BIT(phy_no)) {
+ if (do_port_check && sas_port && sas_port->port_dev) {
struct domain_device *dev = sas_port->port_dev;
_sas_port = sas_port;
- port->id = phy->port_id;
- hisi_sas_refresh_port_id(hisi_hba,
- sas_port, sas_phy->linkrate);
if (DEV_IS_EXPANDER(dev->dev_type))
sas_ha->notify_port_event(sas_phy,
@@ -1045,8 +1113,6 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
hisi_sas_phy_down(hisi_hba, phy_no, 0);
}
-
- drain_workqueue(hisi_hba->shost->work_q);
}
static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
@@ -1063,7 +1129,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
return -1;
- dev_dbg(dev, "controller resetting...\n");
+ dev_info(dev, "controller resetting...\n");
old_state = hisi_hba->hw->get_phys_state(hisi_hba);
scsi_block_requests(shost);
@@ -1072,6 +1138,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
if (rc) {
dev_warn(dev, "controller reset failed (%d)\n", rc);
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+ scsi_unblock_requests(shost);
goto out;
}
spin_lock_irqsave(&hisi_hba->lock, flags);
@@ -1083,15 +1150,14 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
/* Init and wait for PHYs to come up and all libsas event finished. */
hisi_hba->hw->phys_init(hisi_hba);
msleep(1000);
- drain_workqueue(hisi_hba->wq);
- drain_workqueue(shost->work_q);
+ hisi_sas_refresh_port_id(hisi_hba);
+ scsi_unblock_requests(shost);
state = hisi_hba->hw->get_phys_state(hisi_hba);
hisi_sas_rescan_topology(hisi_hba, old_state, state);
- dev_dbg(dev, "controller reset complete\n");
+ dev_info(dev, "controller reset complete\n");
out:
- scsi_unblock_requests(shost);
clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
return rc;
@@ -1134,6 +1200,11 @@ static int hisi_sas_abort_task(struct sas_task *task)
rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_CMD, tag);
+ if (rc2 < 0) {
+ dev_err(dev, "abort task: internal abort (%d)\n", rc2);
+ return TMF_RESP_FUNC_FAILED;
+ }
+
/*
* If the TMF finds that the IO is not in the device and also
* the internal abort does not succeed, then it is safe to
@@ -1151,8 +1222,12 @@ static int hisi_sas_abort_task(struct sas_task *task)
} else if (task->task_proto & SAS_PROTOCOL_SATA ||
task->task_proto & SAS_PROTOCOL_STP) {
if (task->dev->dev_type == SAS_SATA_DEV) {
- hisi_sas_internal_task_abort(hisi_hba, device,
- HISI_SAS_INT_ABT_DEV, 0);
+ rc = hisi_sas_internal_task_abort(hisi_hba, device,
+ HISI_SAS_INT_ABT_DEV, 0);
+ if (rc < 0) {
+ dev_err(dev, "abort task: internal abort failed\n");
+ goto out;
+ }
hisi_sas_dereg_device(hisi_hba, device);
rc = hisi_sas_softreset_ata_disk(device);
}
@@ -1163,7 +1238,8 @@ static int hisi_sas_abort_task(struct sas_task *task)
rc = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_CMD, tag);
- if (rc == TMF_RESP_FUNC_FAILED && task->lldd_task) {
+ if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
+ task->lldd_task) {
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_do_release_task(hisi_hba, task, slot);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
@@ -1178,12 +1254,29 @@ out:
static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
{
+ struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
+ struct device *dev = hisi_hba->dev;
struct hisi_sas_tmf_task tmf_task;
int rc = TMF_RESP_FUNC_FAILED;
+ unsigned long flags;
+
+ rc = hisi_sas_internal_task_abort(hisi_hba, device,
+ HISI_SAS_INT_ABT_DEV, 0);
+ if (rc < 0) {
+ dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
+ return TMF_RESP_FUNC_FAILED;
+ }
+ hisi_sas_dereg_device(hisi_hba, device);
tmf_task.tmf = TMF_ABORT_TASK_SET;
rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
+ if (rc == TMF_RESP_FUNC_COMPLETE) {
+ spin_lock_irqsave(&hisi_hba->lock, flags);
+ hisi_sas_release_task(hisi_hba, device);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ }
+
return rc;
}
@@ -1213,20 +1306,25 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
{
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
- unsigned long flags;
+ struct device *dev = hisi_hba->dev;
int rc = TMF_RESP_FUNC_FAILED;
+ unsigned long flags;
if (sas_dev->dev_status != HISI_SAS_DEV_EH)
return TMF_RESP_FUNC_FAILED;
sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
- hisi_sas_internal_task_abort(hisi_hba, device,
+ rc = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_DEV, 0);
+ if (rc < 0) {
+ dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
+ return TMF_RESP_FUNC_FAILED;
+ }
hisi_sas_dereg_device(hisi_hba, device);
rc = hisi_sas_debug_I_T_nexus_reset(device);
- if (rc == TMF_RESP_FUNC_COMPLETE) {
+ if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) {
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_release_task(hisi_hba, device);
spin_unlock_irqrestore(&hisi_hba->lock, flags);
@@ -1249,8 +1347,10 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
/* Clear internal IO and then hardreset */
rc = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_DEV, 0);
- if (rc == TMF_RESP_FUNC_FAILED)
+ if (rc < 0) {
+ dev_err(dev, "lu_reset: internal abort failed\n");
goto out;
+ }
hisi_sas_dereg_device(hisi_hba, device);
phy = sas_get_local_phy(device);
@@ -1266,6 +1366,14 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
} else {
struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
+ rc = hisi_sas_internal_task_abort(hisi_hba, device,
+ HISI_SAS_INT_ABT_DEV, 0);
+ if (rc < 0) {
+ dev_err(dev, "lu_reset: internal abort failed\n");
+ goto out;
+ }
+ hisi_sas_dereg_device(hisi_hba, device);
+
rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
if (rc == TMF_RESP_FUNC_COMPLETE) {
spin_lock_irqsave(&hisi_hba->lock, flags);
@@ -1283,8 +1391,14 @@ out:
static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
{
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
+ HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
- return hisi_sas_controller_reset(hisi_hba);
+ queue_work(hisi_hba->wq, &r.work);
+ wait_for_completion(r.completion);
+ if (r.done)
+ return TMF_RESP_FUNC_COMPLETE;
+
+ return TMF_RESP_FUNC_FAILED;
}
static int hisi_sas_query_task(struct sas_task *task)
@@ -1441,8 +1555,14 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
struct device *dev = hisi_hba->dev;
int res;
+ /*
+ * The interface is not realized means this HW don't support internal
+ * abort, or don't need to do internal abort. Then here, we return
+ * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
+ * the internal abort has been executed and returned CQ.
+ */
if (!hisi_hba->hw->prep_abort)
- return -EOPNOTSUPP;
+ return TMF_RESP_FUNC_FAILED;
task = sas_alloc_slow_task(GFP_KERNEL);
if (!task)
@@ -1473,9 +1593,11 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
if (slot)
slot->task = NULL;
- dev_err(dev, "internal task abort: timeout.\n");
+ dev_err(dev, "internal task abort: timeout and not done.\n");
+ res = -EIO;
goto exit;
- }
+ } else
+ dev_err(dev, "internal task abort: timeout.\n");
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
@@ -1507,6 +1629,22 @@ static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
hisi_sas_port_notify_formed(sas_phy);
}
+static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
+{
+}
+
+static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
+ u8 reg_index, u8 reg_count, u8 *write_data)
+{
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+
+ if (!hisi_hba->hw->write_gpio)
+ return -EOPNOTSUPP;
+
+ return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
+ reg_index, reg_count, write_data);
+}
+
static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
{
phy->phy_attached = 0;
@@ -1561,6 +1699,11 @@ EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets);
struct scsi_transport_template *hisi_sas_stt;
EXPORT_SYMBOL_GPL(hisi_sas_stt);
+static struct device_attribute *host_attrs[] = {
+ &dev_attr_phy_event_threshold,
+ NULL,
+};
+
static struct scsi_host_template _hisi_sas_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
@@ -1580,6 +1723,7 @@ static struct scsi_host_template _hisi_sas_sht = {
.eh_target_reset_handler = sas_eh_target_reset_handler,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
+ .shost_attrs = host_attrs,
};
struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht;
EXPORT_SYMBOL_GPL(hisi_sas_sht);
@@ -1597,6 +1741,8 @@ static struct sas_domain_function_template hisi_sas_transport_ops = {
.lldd_query_task = hisi_sas_query_task,
.lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
.lldd_port_formed = hisi_sas_port_formed,
+ .lldd_port_deformed = hisi_sas_port_deformed,
+ .lldd_write_gpio = hisi_sas_write_gpio,
};
void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
@@ -1657,6 +1803,7 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
cq->hisi_hba = hisi_hba;
/* Delivery queue structure */
+ spin_lock_init(&dq->lock);
dq->id = i;
dq->hisi_hba = hisi_hba;
@@ -1803,6 +1950,17 @@ void hisi_sas_rst_work_handler(struct work_struct *work)
}
EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
+void hisi_sas_sync_rst_work_handler(struct work_struct *work)
+{
+ struct hisi_sas_rst *rst =
+ container_of(work, struct hisi_sas_rst, work);
+
+ if (!hisi_sas_controller_reset(rst->hisi_hba))
+ rst->done = true;
+ complete(rst->completion);
+}
+EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
+
int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
@@ -1909,6 +2067,13 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
if (IS_ERR(hisi_hba->regs))
goto err_out;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hisi_hba->sgpio_regs))
+ goto err_out;
+ }
+
if (hisi_sas_alloc(hisi_hba, shost)) {
hisi_sas_free(hisi_hba);
goto err_out;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index dc6eca8d6afd..679e76f58a0a 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -544,7 +544,7 @@ static void setup_itct_v1_hw(struct hisi_hba *hisi_hba,
(0xff00ULL << ITCT_HDR_REJ_OPEN_TL_OFF));
}
-static void free_device_v1_hw(struct hisi_hba *hisi_hba,
+static void clear_itct_v1_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev)
{
u64 dev_id = sas_dev->device_id;
@@ -1482,7 +1482,7 @@ static irqreturn_t int_phyup_v1_hw(int irq_no, void *p)
else if (phy->identify.device_type != SAS_PHY_UNUSED)
phy->identify.target_port_protocols =
SAS_PROTOCOL_SMP;
- queue_work(hisi_hba->wq, &phy->phyup_ws);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
end:
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2,
@@ -1850,7 +1850,7 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = {
.hw_init = hisi_sas_v1_init,
.setup_itct = setup_itct_v1_hw,
.sl_notify = sl_notify_v1_hw,
- .free_device = free_device_v1_hw,
+ .clear_itct = clear_itct_v1_hw,
.prep_smp = prep_smp_v1_hw,
.prep_ssp = prep_ssp_v1_hw,
.get_free_slot = get_free_slot_v1_hw,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 5d3467fd728d..4ccb61e2ae5c 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -240,7 +240,12 @@
#define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF)
#define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17
#define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF)
+#define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF 19
+#define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF 20
+#define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21
+#define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22
#define CHL_INT2 (PORT_BASE + 0x1bc)
+#define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0
#define CHL_INT0_MSK (PORT_BASE + 0x1c0)
#define CHL_INT1_MSK (PORT_BASE + 0x1c4)
#define CHL_INT2_MSK (PORT_BASE + 0x1c8)
@@ -952,7 +957,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
(0x1ULL << ITCT_HDR_RTOLT_OFF));
}
-static void free_device_v2_hw(struct hisi_hba *hisi_hba,
+static void clear_itct_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev)
{
DECLARE_COMPLETION_ONSTACK(completion);
@@ -963,10 +968,6 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba,
sas_dev->completion = &completion;
- /* SoC bug workaround */
- if (dev_is_sata(sas_dev->sas_device))
- clear_bit(sas_dev->sata_idx, hisi_hba->sata_dev_bitmap);
-
/* clear the itct interrupt state */
if (ENT_INT_SRC3_ITC_INT_MSK & reg_val)
hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
@@ -981,6 +982,15 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba,
}
}
+static void free_device_v2_hw(struct hisi_sas_device *sas_dev)
+{
+ struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
+
+ /* SoC bug workaround */
+ if (dev_is_sata(sas_dev->sas_device))
+ clear_bit(sas_dev->sata_idx, hisi_hba->sata_dev_bitmap);
+}
+
static int reset_hw_v2_hw(struct hisi_hba *hisi_hba)
{
int i, reset_val;
@@ -1177,8 +1187,8 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xfff87fff);
hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
- hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
- hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xff857fff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbfe);
hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x13f801fc);
hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
@@ -2356,6 +2366,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
ts->resp = SAS_TASK_COMPLETE;
if (unlikely(aborted)) {
+ dev_dbg(dev, "slot_complete: task(%p) aborted\n", task);
ts->stat = SAS_ABORTED_TASK;
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_task_free(hisi_hba, task, slot);
@@ -2400,6 +2411,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
(!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) {
u32 err_phase = (complete_hdr->dw0 & CMPLT_HDR_ERR_PHASE_MSK)
>> CMPLT_HDR_ERR_PHASE_OFF;
+ u32 *error_info = hisi_sas_status_buf_addr_mem(slot);
/* Analyse error happens on which phase TX or RX */
if (ERR_ON_TX_PHASE(err_phase))
@@ -2407,6 +2419,16 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
else if (ERR_ON_RX_PHASE(err_phase))
slot_err_v2_hw(hisi_hba, task, slot, 2);
+ if (ts->stat != SAS_DATA_UNDERRUN)
+ dev_info(dev, "erroneous completion iptt=%d task=%p "
+ "CQ hdr: 0x%x 0x%x 0x%x 0x%x "
+ "Error info: 0x%x 0x%x 0x%x 0x%x\n",
+ slot->idx, task,
+ complete_hdr->dw0, complete_hdr->dw1,
+ complete_hdr->act, complete_hdr->dw3,
+ error_info[0], error_info[1],
+ error_info[2], error_info[3]);
+
if (unlikely(slot->abort))
return ts->stat;
goto out;
@@ -2456,7 +2478,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
if (!slot->port->port_attached) {
- dev_err(dev, "slot complete: port %d has removed\n",
+ dev_warn(dev, "slot complete: port %d has removed\n",
slot->port->sas_port.id);
ts->stat = SAS_PHY_DOWN;
}
@@ -2517,7 +2539,7 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
dw1 |= 1 << CMD_HDR_RESET_OFF;
dw1 |= (hisi_sas_get_ata_protocol(
- task->ata_task.fis.command, task->data_dir))
+ &task->ata_task.fis, task->data_dir))
<< CMD_HDR_FRAME_TYPE_OFF;
dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
hdr->dw1 = cpu_to_le32(dw1);
@@ -2687,7 +2709,7 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
if (!timer_pending(&hisi_hba->timer))
set_link_timer_quirk(hisi_hba);
}
- queue_work(hisi_hba->wq, &phy->phyup_ws);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
end:
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
@@ -2713,10 +2735,12 @@ static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
u32 phy_state, sl_ctrl, txid_auto;
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct hisi_sas_port *port = phy->port;
+ struct device *dev = hisi_hba->dev;
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
+ dev_info(dev, "phydown: phy%d phy_state=0x%x\n", phy_no, phy_state);
hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0);
sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
@@ -2813,6 +2837,33 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
}
+static const struct hisi_sas_hw_error port_ecc_axi_error[] = {
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_TX_ECC_ERR_OFF),
+ .msg = "dmac_tx_ecc_bad_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_RX_ECC_ERR_OFF),
+ .msg = "dmac_rx_ecc_bad_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF),
+ .msg = "dma_tx_axi_wr_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF),
+ .msg = "dma_tx_axi_rd_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF),
+ .msg = "dma_rx_axi_wr_err",
+ },
+ {
+ .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF),
+ .msg = "dma_rx_axi_rd_err",
+ },
+};
+
static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
{
struct hisi_hba *hisi_hba = p;
@@ -2829,40 +2880,55 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
HGC_INVLD_DQE_INFO_FB_CH3_OFF) & 0x1ff;
while (irq_msk) {
- if (irq_msk & (1 << phy_no)) {
- u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no,
- CHL_INT0);
- u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no,
- CHL_INT1);
- u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
- CHL_INT2);
-
- if (irq_value1) {
- if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK |
- CHL_INT1_DMAC_TX_ECC_ERR_MSK))
- panic("%s: DMAC RX/TX ecc bad error!\
- (0x%x)",
- dev_name(dev), irq_value1);
-
- hisi_sas_phy_write32(hisi_hba, phy_no,
- CHL_INT1, irq_value1);
- }
+ u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT0);
+ u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT1);
+ u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT2);
+
+ if ((irq_msk & (1 << phy_no)) && irq_value1) {
+ int i;
- if (irq_value2)
- hisi_sas_phy_write32(hisi_hba, phy_no,
- CHL_INT2, irq_value2);
+ for (i = 0; i < ARRAY_SIZE(port_ecc_axi_error); i++) {
+ const struct hisi_sas_hw_error *error =
+ &port_ecc_axi_error[i];
+
+ if (!(irq_value1 & error->irq_msk))
+ continue;
+
+ dev_warn(dev, "%s error (phy%d 0x%x) found!\n",
+ error->msg, phy_no, irq_value1);
+ queue_work(hisi_hba->wq, &hisi_hba->rst_work);
+ }
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ CHL_INT1, irq_value1);
+ }
- if (irq_value0) {
- if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK)
- phy_bcast_v2_hw(phy_no, hisi_hba);
+ if ((irq_msk & (1 << phy_no)) && irq_value2) {
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
- hisi_sas_phy_write32(hisi_hba, phy_no,
- CHL_INT0, irq_value0
- & (~CHL_INT0_HOTPLUG_TOUT_MSK)
- & (~CHL_INT0_SL_PHY_ENABLE_MSK)
- & (~CHL_INT0_NOT_RDY_MSK));
+ if (irq_value2 & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
+ dev_warn(dev, "phy%d identify timeout\n",
+ phy_no);
+ hisi_sas_notify_phy_event(phy,
+ HISI_PHYE_LINK_RESET);
}
+
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ CHL_INT2, irq_value2);
+ }
+
+ if ((irq_msk & (1 << phy_no)) && irq_value0) {
+ if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK)
+ phy_bcast_v2_hw(phy_no, hisi_hba);
+
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ CHL_INT0, irq_value0
+ & (~CHL_INT0_HOTPLUG_TOUT_MSK)
+ & (~CHL_INT0_SL_PHY_ENABLE_MSK)
+ & (~CHL_INT0_NOT_RDY_MSK));
}
irq_msk &= ~(1 << phy_no);
phy_no++;
@@ -2906,7 +2972,7 @@ static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba,
val = hisi_sas_read32(hisi_hba, ecc_error->reg);
val &= ecc_error->msk;
val >>= ecc_error->shift;
- dev_warn(dev, ecc_error->msg, irq_value, val);
+ dev_err(dev, ecc_error->msg, irq_value, val);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
}
@@ -3015,12 +3081,12 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
for (; sub->msk || sub->msg; sub++) {
if (!(err_value & sub->msk))
continue;
- dev_warn(dev, "%s (0x%x) found!\n",
+ dev_err(dev, "%s (0x%x) found!\n",
sub->msg, irq_value);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
} else {
- dev_warn(dev, "%s (0x%x) found!\n",
+ dev_err(dev, "%s (0x%x) found!\n",
axi_error->msg, irq_value);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
@@ -3206,7 +3272,7 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
phy->identify.device_type = SAS_SATA_DEV;
phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
- queue_work(hisi_hba->wq, &phy->phyup_ws);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
end:
hisi_sas_write32(hisi_hba, ENT_INT_SRC1 + offset, ent_tmp);
@@ -3392,7 +3458,7 @@ static int soft_reset_v2_hw(struct hisi_hba *hisi_hba)
udelay(10);
if (cnt++ > 10) {
- dev_info(dev, "wait axi bus state to idle timeout!\n");
+ dev_err(dev, "wait axi bus state to idle timeout!\n");
return -1;
}
}
@@ -3408,6 +3474,44 @@ static int soft_reset_v2_hw(struct hisi_hba *hisi_hba)
return 0;
}
+static int write_gpio_v2_hw(struct hisi_hba *hisi_hba, u8 reg_type,
+ u8 reg_index, u8 reg_count, u8 *write_data)
+{
+ struct device *dev = hisi_hba->dev;
+ int phy_no, count;
+
+ if (!hisi_hba->sgpio_regs)
+ return -EOPNOTSUPP;
+
+ switch (reg_type) {
+ case SAS_GPIO_REG_TX:
+ count = reg_count * 4;
+ count = min(count, hisi_hba->n_phy);
+
+ for (phy_no = 0; phy_no < count; phy_no++) {
+ /*
+ * GPIO_TX[n] register has the highest numbered drive
+ * of the four in the first byte and the lowest
+ * numbered drive in the fourth byte.
+ * See SFF-8485 Rev. 0.7 Table 24.
+ */
+ void __iomem *reg_addr = hisi_hba->sgpio_regs +
+ reg_index * 4 + phy_no;
+ int data_idx = phy_no + 3 - (phy_no % 4) * 2;
+
+ writeb(write_data[data_idx], reg_addr);
+ }
+
+ break;
+ default:
+ dev_err(dev, "write gpio: unsupported or bad reg type %d\n",
+ reg_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct hisi_sas_hw hisi_sas_v2_hw = {
.hw_init = hisi_sas_v2_init,
.setup_itct = setup_itct_v2_hw,
@@ -3415,6 +3519,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
.alloc_dev = alloc_dev_quirk_v2_hw,
.sl_notify = sl_notify_v2_hw,
.get_wideport_bitmap = get_wideport_bitmap_v2_hw,
+ .clear_itct = clear_itct_v2_hw,
.free_device = free_device_v2_hw,
.prep_smp = prep_smp_v2_hw,
.prep_ssp = prep_ssp_v2_hw,
@@ -3434,6 +3539,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
.complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr),
.soft_reset = soft_reset_v2_hw,
.get_phys_state = get_phys_state_v2_hw,
+ .write_gpio = write_gpio_v2_hw,
};
static int hisi_sas_v2_probe(struct platform_device *pdev)
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 19b1f2ffec17..a1f18689729a 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -140,6 +140,7 @@
#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4)
#define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc)
#define STP_LINK_TIMER (PORT_BASE + 0x120)
+#define STP_LINK_TIMEOUT_STATE (PORT_BASE + 0x124)
#define CON_CFG_DRIVER (PORT_BASE + 0x130)
#define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134)
#define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138)
@@ -165,6 +166,8 @@
#define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21
#define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22
#define CHL_INT2 (PORT_BASE + 0x1bc)
+#define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0
+#define CHL_INT2_STP_LINK_TIMEOUT_OFF 31
#define CHL_INT0_MSK (PORT_BASE + 0x1c0)
#define CHL_INT1_MSK (PORT_BASE + 0x1c4)
#define CHL_INT2_MSK (PORT_BASE + 0x1c8)
@@ -204,6 +207,13 @@
#define AM_ROB_ECC_MULBIT_ERR_ADDR_OFF 8
#define AM_ROB_ECC_MULBIT_ERR_ADDR_MSK (0xff << AM_ROB_ECC_MULBIT_ERR_ADDR_OFF)
+/* RAS registers need init */
+#define RAS_BASE (0x6000)
+#define SAS_RAS_INTR0 (RAS_BASE)
+#define SAS_RAS_INTR1 (RAS_BASE + 0x04)
+#define SAS_RAS_INTR0_MASK (RAS_BASE + 0x08)
+#define SAS_RAS_INTR1_MASK (RAS_BASE + 0x0c)
+
/* HW dma structures */
/* Delivery queue header */
/* dw0 */
@@ -422,7 +432,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xff87ffff);
- hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffbfe);
hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0);
@@ -496,6 +506,10 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI,
upper_32_bits(hisi_hba->initial_fis_dma));
+
+ /* RAS registers init */
+ hisi_sas_write32(hisi_hba, SAS_RAS_INTR0_MASK, 0x0);
+ hisi_sas_write32(hisi_hba, SAS_RAS_INTR1_MASK, 0x0);
}
static void config_phy_opt_mode_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
@@ -588,7 +602,7 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
(0x1ULL << ITCT_HDR_RTOLT_OFF));
}
-static void free_device_v3_hw(struct hisi_hba *hisi_hba,
+static void clear_itct_v3_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev)
{
DECLARE_COMPLETION_ONSTACK(completion);
@@ -1033,7 +1047,7 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
dw1 |= 1 << CMD_HDR_RESET_OFF;
dw1 |= (hisi_sas_get_ata_protocol(
- task->ata_task.fis.command, task->data_dir))
+ &task->ata_task.fis, task->data_dir))
<< CMD_HDR_FRAME_TYPE_OFF;
dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
@@ -1138,7 +1152,7 @@ static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
struct dev_to_host_fis *fis;
u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
- dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate);
+ dev_info(dev, "phyup: phy%d link_rate=%d(sata)\n", phy_no, link_rate);
initial_fis = &hisi_hba->initial_fis[phy_no];
fis = &initial_fis->fis;
sas_phy->oob_mode = SATA_OOB_MODE;
@@ -1181,7 +1195,7 @@ static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
phy->port_id = port_id;
phy->phy_attached = 1;
- queue_work(hisi_hba->wq, &phy->phyup_ws);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
end:
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
@@ -1322,7 +1336,7 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
if (!(irq_value1 & error->irq_msk))
continue;
- dev_warn(dev, "%s error (phy%d 0x%x) found!\n",
+ dev_err(dev, "%s error (phy%d 0x%x) found!\n",
error->msg, phy_no, irq_value1);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
@@ -1331,9 +1345,31 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
CHL_INT1, irq_value1);
}
- if (irq_msk & (8 << (phy_no * 4)) && irq_value2)
+ if (irq_msk & (8 << (phy_no * 4)) && irq_value2) {
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+
+ if (irq_value2 & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
+ dev_warn(dev, "phy%d identify timeout\n",
+ phy_no);
+ hisi_sas_notify_phy_event(phy,
+ HISI_PHYE_LINK_RESET);
+
+ }
+
+ if (irq_value2 & BIT(CHL_INT2_STP_LINK_TIMEOUT_OFF)) {
+ u32 reg_value = hisi_sas_phy_read32(hisi_hba,
+ phy_no, STP_LINK_TIMEOUT_STATE);
+
+ dev_warn(dev, "phy%d stp link timeout (0x%x)\n",
+ phy_no, reg_value);
+ if (reg_value & BIT(4))
+ hisi_sas_notify_phy_event(phy,
+ HISI_PHYE_LINK_RESET);
+ }
+
hisi_sas_phy_write32(hisi_hba, phy_no,
CHL_INT2, irq_value2);
+ }
if (irq_msk & (2 << (phy_no * 4)) && irq_value0) {
@@ -1432,12 +1468,12 @@ static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p)
if (!(err_value & sub->msk))
continue;
- dev_warn(dev, "%s error (0x%x) found!\n",
+ dev_err(dev, "%s error (0x%x) found!\n",
sub->msg, irq_value);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
} else {
- dev_warn(dev, "%s error (0x%x) found!\n",
+ dev_err(dev, "%s error (0x%x) found!\n",
error->msg, irq_value);
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
}
@@ -1542,6 +1578,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
memset(ts, 0, sizeof(*ts));
ts->resp = SAS_TASK_COMPLETE;
if (unlikely(aborted)) {
+ dev_dbg(dev, "slot complete: task(%p) aborted\n", task);
ts->stat = SAS_ABORTED_TASK;
spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_task_free(hisi_hba, task, slot);
@@ -1583,7 +1620,18 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
/* check for erroneous completion */
if ((complete_hdr->dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) {
+ u32 *error_info = hisi_sas_status_buf_addr_mem(slot);
+
slot_err_v3_hw(hisi_hba, task, slot);
+ if (ts->stat != SAS_DATA_UNDERRUN)
+ dev_info(dev, "erroneous completion iptt=%d task=%p "
+ "CQ hdr: 0x%x 0x%x 0x%x 0x%x "
+ "Error info: 0x%x 0x%x 0x%x 0x%x\n",
+ slot->idx, task,
+ complete_hdr->dw0, complete_hdr->dw1,
+ complete_hdr->act, complete_hdr->dw3,
+ error_info[0], error_info[1],
+ error_info[2], error_info[3]);
if (unlikely(slot->abort))
return ts->stat;
goto out;
@@ -1628,7 +1676,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
if (!slot->port->port_attached) {
- dev_err(dev, "slot complete: port %d has removed\n",
+ dev_warn(dev, "slot complete: port %d has removed\n",
slot->port->sas_port.id);
ts->stat = SAS_PHY_DOWN;
}
@@ -1653,9 +1701,8 @@ static void cq_tasklet_v3_hw(unsigned long val)
struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val;
struct hisi_hba *hisi_hba = cq->hisi_hba;
struct hisi_sas_slot *slot;
- struct hisi_sas_itct *itct;
struct hisi_sas_complete_v3_hdr *complete_queue;
- u32 rd_point = cq->rd_point, wr_point, dev_id;
+ u32 rd_point = cq->rd_point, wr_point;
int queue = cq->id;
struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
@@ -1671,38 +1718,11 @@ static void cq_tasklet_v3_hw(unsigned long val)
complete_hdr = &complete_queue[rd_point];
- /* Check for NCQ completion */
- if (complete_hdr->act) {
- u32 act_tmp = complete_hdr->act;
- int ncq_tag_count = ffs(act_tmp);
-
- dev_id = (complete_hdr->dw1 & CMPLT_HDR_DEV_ID_MSK) >>
- CMPLT_HDR_DEV_ID_OFF;
- itct = &hisi_hba->itct[dev_id];
-
- /* The NCQ tags are held in the itct header */
- while (ncq_tag_count) {
- __le64 *ncq_tag = &itct->qw4_15[0];
-
- ncq_tag_count -= 1;
- iptt = (ncq_tag[ncq_tag_count / 5]
- >> (ncq_tag_count % 5) * 12) & 0xfff;
-
- slot = &hisi_hba->slot_info[iptt];
- slot->cmplt_queue_slot = rd_point;
- slot->cmplt_queue = queue;
- slot_complete_v3_hw(hisi_hba, slot);
-
- act_tmp &= ~(1 << ncq_tag_count);
- ncq_tag_count = ffs(act_tmp);
- }
- } else {
- iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
- slot = &hisi_hba->slot_info[iptt];
- slot->cmplt_queue_slot = rd_point;
- slot->cmplt_queue = queue;
- slot_complete_v3_hw(hisi_hba, slot);
- }
+ iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
+ slot = &hisi_hba->slot_info[iptt];
+ slot->cmplt_queue_slot = rd_point;
+ slot->cmplt_queue = queue;
+ slot_complete_v3_hw(hisi_hba, slot);
if (++rd_point >= HISI_SAS_QUEUE_SLOTS)
rd_point = 0;
@@ -1951,7 +1971,7 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = {
.max_command_entries = HISI_SAS_COMMAND_ENTRIES_V3_HW,
.get_wideport_bitmap = get_wideport_bitmap_v3_hw,
.complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr),
- .free_device = free_device_v3_hw,
+ .clear_itct = clear_itct_v3_hw,
.sl_notify = sl_notify_v3_hw,
.prep_ssp = prep_ssp_v3_hw,
.prep_smp = prep_smp_v3_hw,
@@ -2157,21 +2177,243 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
scsi_host_put(shost);
}
+static const struct hisi_sas_hw_error sas_ras_intr0_nfe[] = {
+ { .irq_msk = BIT(19), .msg = "HILINK_INT" },
+ { .irq_msk = BIT(20), .msg = "HILINK_PLL0_OUT_OF_LOCK" },
+ { .irq_msk = BIT(21), .msg = "HILINK_PLL1_OUT_OF_LOCK" },
+ { .irq_msk = BIT(22), .msg = "HILINK_LOSS_OF_REFCLK0" },
+ { .irq_msk = BIT(23), .msg = "HILINK_LOSS_OF_REFCLK1" },
+ { .irq_msk = BIT(24), .msg = "DMAC0_TX_POISON" },
+ { .irq_msk = BIT(25), .msg = "DMAC1_TX_POISON" },
+ { .irq_msk = BIT(26), .msg = "DMAC2_TX_POISON" },
+ { .irq_msk = BIT(27), .msg = "DMAC3_TX_POISON" },
+ { .irq_msk = BIT(28), .msg = "DMAC4_TX_POISON" },
+ { .irq_msk = BIT(29), .msg = "DMAC5_TX_POISON" },
+ { .irq_msk = BIT(30), .msg = "DMAC6_TX_POISON" },
+ { .irq_msk = BIT(31), .msg = "DMAC7_TX_POISON" },
+};
+
+static const struct hisi_sas_hw_error sas_ras_intr1_nfe[] = {
+ { .irq_msk = BIT(0), .msg = "RXM_CFG_MEM3_ECC2B_INTR" },
+ { .irq_msk = BIT(1), .msg = "RXM_CFG_MEM2_ECC2B_INTR" },
+ { .irq_msk = BIT(2), .msg = "RXM_CFG_MEM1_ECC2B_INTR" },
+ { .irq_msk = BIT(3), .msg = "RXM_CFG_MEM0_ECC2B_INTR" },
+ { .irq_msk = BIT(4), .msg = "HGC_CQE_ECC2B_INTR" },
+ { .irq_msk = BIT(5), .msg = "LM_CFG_IOSTL_ECC2B_INTR" },
+ { .irq_msk = BIT(6), .msg = "LM_CFG_ITCTL_ECC2B_INTR" },
+ { .irq_msk = BIT(7), .msg = "HGC_ITCT_ECC2B_INTR" },
+ { .irq_msk = BIT(8), .msg = "HGC_IOST_ECC2B_INTR" },
+ { .irq_msk = BIT(9), .msg = "HGC_DQE_ECC2B_INTR" },
+ { .irq_msk = BIT(10), .msg = "DMAC0_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(11), .msg = "DMAC1_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(12), .msg = "DMAC2_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(13), .msg = "DMAC3_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(14), .msg = "DMAC4_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(15), .msg = "DMAC5_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(16), .msg = "DMAC6_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(17), .msg = "DMAC7_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(18), .msg = "OOO_RAM_ECC2B_INTR" },
+ { .irq_msk = BIT(20), .msg = "HGC_DQE_POISON_INTR" },
+ { .irq_msk = BIT(21), .msg = "HGC_IOST_POISON_INTR" },
+ { .irq_msk = BIT(22), .msg = "HGC_ITCT_POISON_INTR" },
+ { .irq_msk = BIT(23), .msg = "HGC_ITCT_NCQ_POISON_INTR" },
+ { .irq_msk = BIT(24), .msg = "DMAC0_RX_POISON" },
+ { .irq_msk = BIT(25), .msg = "DMAC1_RX_POISON" },
+ { .irq_msk = BIT(26), .msg = "DMAC2_RX_POISON" },
+ { .irq_msk = BIT(27), .msg = "DMAC3_RX_POISON" },
+ { .irq_msk = BIT(28), .msg = "DMAC4_RX_POISON" },
+ { .irq_msk = BIT(29), .msg = "DMAC5_RX_POISON" },
+ { .irq_msk = BIT(30), .msg = "DMAC6_RX_POISON" },
+ { .irq_msk = BIT(31), .msg = "DMAC7_RX_POISON" },
+};
+
+static bool process_non_fatal_error_v3_hw(struct hisi_hba *hisi_hba)
+{
+ struct device *dev = hisi_hba->dev;
+ const struct hisi_sas_hw_error *ras_error;
+ bool need_reset = false;
+ u32 irq_value;
+ int i;
+
+ irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR0);
+ for (i = 0; i < ARRAY_SIZE(sas_ras_intr0_nfe); i++) {
+ ras_error = &sas_ras_intr0_nfe[i];
+ if (ras_error->irq_msk & irq_value) {
+ dev_warn(dev, "SAS_RAS_INTR0: %s(irq_value=0x%x) found.\n",
+ ras_error->msg, irq_value);
+ need_reset = true;
+ }
+ }
+ hisi_sas_write32(hisi_hba, SAS_RAS_INTR0, irq_value);
+
+ irq_value = hisi_sas_read32(hisi_hba, SAS_RAS_INTR1);
+ for (i = 0; i < ARRAY_SIZE(sas_ras_intr1_nfe); i++) {
+ ras_error = &sas_ras_intr1_nfe[i];
+ if (ras_error->irq_msk & irq_value) {
+ dev_warn(dev, "SAS_RAS_INTR1: %s(irq_value=0x%x) found.\n",
+ ras_error->msg, irq_value);
+ need_reset = true;
+ }
+ }
+ hisi_sas_write32(hisi_hba, SAS_RAS_INTR1, irq_value);
+
+ return need_reset;
+}
+
+static pci_ers_result_t hisi_sas_error_detected_v3_hw(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct device *dev = hisi_hba->dev;
+
+ dev_info(dev, "PCI error: detected callback, state(%d)!!\n", state);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (process_non_fatal_error_v3_hw(hisi_hba))
+ return PCI_ERS_RESULT_NEED_RESET;
+
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
+
+static pci_ers_result_t hisi_sas_mmio_enabled_v3_hw(struct pci_dev *pdev)
+{
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static pci_ers_result_t hisi_sas_slot_reset_v3_hw(struct pci_dev *pdev)
+{
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct device *dev = hisi_hba->dev;
+ HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
+
+ dev_info(dev, "PCI error: slot reset callback!!\n");
+ queue_work(hisi_hba->wq, &r.work);
+ wait_for_completion(r.completion);
+ if (r.done)
+ return PCI_ERS_RESULT_RECOVERED;
+
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
enum {
/* instances of the controller */
hip08,
};
+static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct device *dev = hisi_hba->dev;
+ struct Scsi_Host *shost = hisi_hba->shost;
+ u32 device_state, status;
+ int rc;
+ u32 reg_val;
+ unsigned long flags;
+
+ if (!pdev->pm_cap) {
+ dev_err(dev, "PCI PM not supported\n");
+ return -ENODEV;
+ }
+
+ set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ scsi_block_requests(shost);
+ set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+ flush_workqueue(hisi_hba->wq);
+ /* disable DQ/PHY/bus */
+ interrupt_disable_v3_hw(hisi_hba);
+ hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
+ hisi_sas_kill_tasklets(hisi_hba);
+
+ hisi_sas_stop_phys(hisi_hba);
+
+ reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE +
+ AM_CTRL_GLOBAL);
+ reg_val |= 0x1;
+ hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE +
+ AM_CTRL_GLOBAL, reg_val);
+
+ /* wait until bus idle */
+ rc = readl_poll_timeout(hisi_hba->regs + AXI_MASTER_CFG_BASE +
+ AM_CURR_TRANS_RETURN, status, status == 0x3, 10, 100);
+ if (rc) {
+ dev_err(dev, "axi bus is not idle, rc = %d\n", rc);
+ clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+ clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+ scsi_unblock_requests(shost);
+ return rc;
+ }
+
+ hisi_sas_init_mem(hisi_hba);
+
+ device_state = pci_choose_state(pdev, state);
+ dev_warn(dev, "entering operating state [D%d]\n",
+ device_state);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, device_state);
+
+ spin_lock_irqsave(&hisi_hba->lock, flags);
+ hisi_sas_release_tasks(hisi_hba);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+
+ sas_suspend_ha(sha);
+ return 0;
+}
+
+static int hisi_sas_v3_resume(struct pci_dev *pdev)
+{
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ struct Scsi_Host *shost = hisi_hba->shost;
+ struct device *dev = hisi_hba->dev;
+ unsigned int rc;
+ u32 device_state = pdev->current_state;
+
+ dev_warn(dev, "resuming from operating state [D%d]\n",
+ device_state);
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+ rc = pci_enable_device(pdev);
+ if (rc)
+ dev_err(dev, "enable device failed during resume (%d)\n", rc);
+
+ pci_set_master(pdev);
+ scsi_unblock_requests(shost);
+ clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+
+ sas_prep_resume_ha(sha);
+ init_reg_v3_hw(hisi_hba);
+ hisi_hba->hw->phys_init(hisi_hba);
+ sas_resume_ha(sha);
+ clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
+
+ return 0;
+}
+
static const struct pci_device_id sas_v3_pci_table[] = {
{ PCI_VDEVICE(HUAWEI, 0xa230), hip08 },
{}
};
+static const struct pci_error_handlers hisi_sas_err_handler = {
+ .error_detected = hisi_sas_error_detected_v3_hw,
+ .mmio_enabled = hisi_sas_mmio_enabled_v3_hw,
+ .slot_reset = hisi_sas_slot_reset_v3_hw,
+};
+
static struct pci_driver sas_v3_pci_driver = {
.name = DRV_NAME,
.id_table = sas_v3_pci_table,
.probe = hisi_sas_v3_probe,
.remove = hisi_sas_v3_remove,
+ .suspend = hisi_sas_v3_suspend,
+ .resume = hisi_sas_v3_resume,
+ .err_handler = &hisi_sas_err_handler,
};
module_pci_driver(sas_v3_pci_driver);
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index fe3a0da3ec97..57bf43e34863 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -318,6 +318,9 @@ static void scsi_host_dev_release(struct device *dev)
scsi_proc_hostdir_rm(shost->hostt);
+ /* Wait for functions invoked through call_rcu(&shost->rcu, ...) */
+ rcu_barrier();
+
if (shost->tmf_work_q)
destroy_workqueue(shost->tmf_work_q);
if (shost->ehandler)
@@ -325,6 +328,8 @@ static void scsi_host_dev_release(struct device *dev)
if (shost->work_q)
destroy_workqueue(shost->work_q);
+ destroy_rcu_head(&shost->rcu);
+
if (shost->shost_state == SHOST_CREATED) {
/*
* Free the shost_dev device name here if scsi_host_alloc()
@@ -399,6 +404,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
INIT_LIST_HEAD(&shost->starved_list);
init_waitqueue_head(&shost->host_wait);
mutex_init(&shost->scan_mutex);
+ init_rcu_head(&shost->rcu);
index = ida_simple_get(&host_index_ida, 0, 0, GFP_KERNEL);
if (index < 0)
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 287e5eb0723f..87b260e403ec 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -3518,7 +3518,7 @@ out:
if (rc != IO_OK)
hpsa_show_dev_msg(KERN_INFO, h, encl_dev,
- "Error, could not get enclosure information\n");
+ "Error, could not get enclosure information");
}
static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h,
@@ -4619,21 +4619,13 @@ sglist_finished:
return 0;
}
-#define BUFLEN 128
static inline void warn_zero_length_transfer(struct ctlr_info *h,
u8 *cdb, int cdb_len,
const char *func)
{
- char buf[BUFLEN];
- int outlen;
- int i;
-
- outlen = scnprintf(buf, BUFLEN,
- "%s: Blocking zero-length request: CDB:", func);
- for (i = 0; i < cdb_len; i++)
- outlen += scnprintf(buf+outlen, BUFLEN - outlen,
- "%02hhx", cdb[i]);
- dev_warn(&h->pdev->dev, "%s\n", buf);
+ dev_warn(&h->pdev->dev,
+ "%s: Blocking zero-length request: CDB:%*phN\n",
+ func, cdb_len, cdb);
}
#define IO_ACCEL_INELIGIBLE 1
@@ -8223,8 +8215,6 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h)
if (!device)
continue;
- if (!device->scsi3addr)
- continue;
if (!hpsa_vpd_page_supported(h, device->scsi3addr,
HPSA_VPD_LV_IOACCEL_STATUS))
continue;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 0d2f7eb3acb6..b1b1d3a3b173 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -181,7 +181,7 @@ static void ibmvfc_trc_start(struct ibmvfc_event *evt)
break;
default:
break;
- };
+ }
}
/**
@@ -220,7 +220,7 @@ static void ibmvfc_trc_end(struct ibmvfc_event *evt)
default:
break;
- };
+ }
}
#else
@@ -464,7 +464,7 @@ static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
default:
vhost->state = state;
break;
- };
+ }
return rc;
}
@@ -500,7 +500,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
break;
default:
break;
- };
+ }
break;
case IBMVFC_HOST_ACTION_TGT_INIT:
if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
@@ -515,7 +515,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
default:
vhost->action = action;
break;
- };
+ }
break;
case IBMVFC_HOST_ACTION_LOGO:
case IBMVFC_HOST_ACTION_QUERY_TGTS:
@@ -526,7 +526,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
default:
vhost->action = action;
break;
- };
+ }
}
/**
@@ -1601,7 +1601,7 @@ static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
case IBMVFC_ACTIVE:
result = 0;
break;
- };
+ }
return result;
}
@@ -1856,7 +1856,7 @@ static int ibmvfc_bsg_request(struct bsg_job *job)
break;
default:
return -ENOTSUPP;
- };
+ }
if (port_id == -1)
return -EINVAL;
@@ -2661,7 +2661,7 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
vhost->delay_init = 1;
__ibmvfc_reset_host(vhost);
break;
- };
+ }
break;
case IBMVFC_AE_LINK_UP:
@@ -2715,7 +2715,7 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
default:
dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event);
break;
- };
+ }
}
/**
@@ -3351,7 +3351,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
rsp->status, rsp->error, status);
break;
- };
+ }
kref_put(&tgt->kref, ibmvfc_release_tgt);
ibmvfc_free_event(evt);
@@ -3451,7 +3451,7 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), rsp->fc_type,
ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), rsp->fc_explain, status);
break;
- };
+ }
kref_put(&tgt->kref, ibmvfc_release_tgt);
ibmvfc_free_event(evt);
@@ -3522,7 +3522,7 @@ static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
default:
tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
break;
- };
+ }
if (vhost->action == IBMVFC_HOST_ACTION_TGT_INIT)
ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
@@ -3626,7 +3626,7 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
ibmvfc_get_fc_type(fc_reason), fc_reason,
ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
break;
- };
+ }
kref_put(&tgt->kref, ibmvfc_release_tgt);
ibmvfc_free_event(evt);
@@ -3838,7 +3838,7 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
rsp->fc_type, ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)),
rsp->fc_explain, status);
break;
- };
+ }
kref_put(&tgt->kref, ibmvfc_release_tgt);
ibmvfc_free_event(evt);
@@ -4236,7 +4236,7 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
case IBMVFC_HOST_ACTION_REENABLE:
default:
break;
- };
+ }
return 1;
}
@@ -4464,7 +4464,7 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
break;
default:
break;
- };
+ }
spin_unlock_irqrestore(vhost->host->host_lock, flags);
}
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 2799a6b08f73..c3a76af9f5fa 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -122,7 +122,7 @@ static bool connection_broken(struct scsi_info *vscsi)
cpu_to_be64(buffer[MSG_HI]),
cpu_to_be64(buffer[MSG_LOW]));
- pr_debug("connection_broken: rc %ld\n", h_return_code);
+ dev_dbg(&vscsi->dev, "Connection_broken: rc %ld\n", h_return_code);
if (h_return_code == H_CLOSED)
rc = true;
@@ -210,7 +210,7 @@ static long ibmvscsis_unregister_command_q(struct scsi_info *vscsi)
}
} while (qrc != H_SUCCESS && rc == ADAPT_SUCCESS);
- pr_debug("Freeing CRQ: phyp rc %ld, rc %ld\n", qrc, rc);
+ dev_dbg(&vscsi->dev, "Freeing CRQ: phyp rc %ld, rc %ld\n", qrc, rc);
return rc;
}
@@ -291,9 +291,9 @@ static long ibmvscsis_free_command_q(struct scsi_info *vscsi)
ibmvscsis_delete_client_info(vscsi, false);
}
- pr_debug("free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
- vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
- vscsi->phyp_acr_state);
+ dev_dbg(&vscsi->dev, "free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
+ vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
+ vscsi->phyp_acr_state);
}
return rc;
}
@@ -428,8 +428,8 @@ static void ibmvscsis_disconnect(struct work_struct *work)
vscsi->flags |= DISCONNECT_SCHEDULED;
vscsi->flags &= ~SCHEDULE_DISCONNECT;
- pr_debug("disconnect: flags 0x%x, state 0x%hx\n", vscsi->flags,
- vscsi->state);
+ dev_dbg(&vscsi->dev, "disconnect: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
/*
* check which state we are in and see if we
@@ -540,13 +540,14 @@ static void ibmvscsis_disconnect(struct work_struct *work)
}
if (wait_idle) {
- pr_debug("disconnect start wait, active %d, sched %d\n",
- (int)list_empty(&vscsi->active_q),
- (int)list_empty(&vscsi->schedule_q));
+ dev_dbg(&vscsi->dev, "disconnect start wait, active %d, sched %d\n",
+ (int)list_empty(&vscsi->active_q),
+ (int)list_empty(&vscsi->schedule_q));
if (!list_empty(&vscsi->active_q) ||
!list_empty(&vscsi->schedule_q)) {
vscsi->flags |= WAIT_FOR_IDLE;
- pr_debug("disconnect flags 0x%x\n", vscsi->flags);
+ dev_dbg(&vscsi->dev, "disconnect flags 0x%x\n",
+ vscsi->flags);
/*
* This routine is can not be called with the interrupt
* lock held.
@@ -555,7 +556,7 @@ static void ibmvscsis_disconnect(struct work_struct *work)
wait_for_completion(&vscsi->wait_idle);
spin_lock_bh(&vscsi->intr_lock);
}
- pr_debug("disconnect stop wait\n");
+ dev_dbg(&vscsi->dev, "disconnect stop wait\n");
ibmvscsis_adapter_idle(vscsi);
}
@@ -597,8 +598,8 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
vscsi->flags |= flag_bits;
- pr_debug("post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n",
- new_state, flag_bits, vscsi->flags, vscsi->state);
+ dev_dbg(&vscsi->dev, "post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n",
+ new_state, flag_bits, vscsi->flags, vscsi->state);
if (!(vscsi->flags & (DISCONNECT_SCHEDULED | SCHEDULE_DISCONNECT))) {
vscsi->flags |= SCHEDULE_DISCONNECT;
@@ -648,8 +649,8 @@ static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
}
}
- pr_debug("Leaving post_disconnect: flags 0x%x, new_state 0x%x\n",
- vscsi->flags, vscsi->new_state);
+ dev_dbg(&vscsi->dev, "Leaving post_disconnect: flags 0x%x, new_state 0x%x\n",
+ vscsi->flags, vscsi->new_state);
}
/**
@@ -724,7 +725,8 @@ static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
break;
case H_CLOSED:
- pr_warn("init_msg: failed to send, rc %ld\n", rc);
+ dev_warn(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
+ rc);
rc = 0;
break;
}
@@ -768,7 +770,7 @@ static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
{
long rc = ADAPT_SUCCESS;
- pr_debug("init_msg: state 0x%hx\n", vscsi->state);
+ dev_dbg(&vscsi->dev, "init_msg: state 0x%hx\n", vscsi->state);
rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
(u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
@@ -776,10 +778,10 @@ static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
if (rc == H_SUCCESS) {
vscsi->client_data.partition_number =
be64_to_cpu(*(u64 *)vscsi->map_buf);
- pr_debug("init_msg, part num %d\n",
- vscsi->client_data.partition_number);
+ dev_dbg(&vscsi->dev, "init_msg, part num %d\n",
+ vscsi->client_data.partition_number);
} else {
- pr_debug("init_msg h_vioctl rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "init_msg h_vioctl rc %ld\n", rc);
rc = ADAPT_SUCCESS;
}
@@ -813,7 +815,8 @@ static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
if (rc == H_SUCCESS)
vscsi->flags |= PREP_FOR_SUSPEND_ENABLED;
else if (rc != H_NOT_FOUND)
- pr_err("Error from Enable Prepare for Suspend: %ld\n", rc);
+ dev_err(&vscsi->dev, "Error from Enable Prepare for Suspend: %ld\n",
+ rc);
vscsi->flags &= PRESERVE_FLAG_FIELDS;
vscsi->rsp_q_timer.timer_pops = 0;
@@ -822,8 +825,8 @@ static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
rc = vio_enable_interrupts(vscsi->dma_dev);
if (rc) {
- pr_warn("establish_new_q: failed to enable interrupts, rc %ld\n",
- rc);
+ dev_warn(&vscsi->dev, "establish_new_q: failed to enable interrupts, rc %ld\n",
+ rc);
return rc;
}
@@ -883,7 +886,7 @@ static void ibmvscsis_reset_queue(struct scsi_info *vscsi)
int bytes;
long rc = ADAPT_SUCCESS;
- pr_debug("reset_queue: flags 0x%x\n", vscsi->flags);
+ dev_dbg(&vscsi->dev, "reset_queue: flags 0x%x\n", vscsi->flags);
/* don't reset, the client did it for us */
if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
@@ -906,7 +909,8 @@ static void ibmvscsis_reset_queue(struct scsi_info *vscsi)
}
if (rc != ADAPT_SUCCESS) {
- pr_debug("reset_queue: reg_crq rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "reset_queue: reg_crq rc %ld\n",
+ rc);
vscsi->state = ERR_DISCONNECTED;
vscsi->flags |= RESPONSE_Q_DOWN;
@@ -985,14 +989,15 @@ static long ibmvscsis_ready_for_suspend(struct scsi_info *vscsi, bool idle)
/* See if there is a Resume event in the queue */
crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
- pr_debug("ready_suspend: flags 0x%x, state 0x%hx crq_valid:%x\n",
- vscsi->flags, vscsi->state, (int)crq->valid);
+ dev_dbg(&vscsi->dev, "ready_suspend: flags 0x%x, state 0x%hx crq_valid:%x\n",
+ vscsi->flags, vscsi->state, (int)crq->valid);
if (!(vscsi->flags & PREP_FOR_SUSPEND_ABORTED) && !(crq->valid)) {
rc = h_vioctl(vscsi->dds.unit_id, H_READY_FOR_SUSPEND, 0, 0, 0,
0, 0);
if (rc) {
- pr_err("Ready for Suspend Vioctl failed: %ld\n", rc);
+ dev_err(&vscsi->dev, "Ready for Suspend Vioctl failed: %ld\n",
+ rc);
rc = 0;
}
} else if (((vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE) &&
@@ -1012,7 +1017,7 @@ static long ibmvscsis_ready_for_suspend(struct scsi_info *vscsi, bool idle)
if ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) ||
(crq->format != RESUME_FROM_SUSP)))
- pr_err("Invalid element in CRQ after Prepare for Suspend");
+ dev_err(&vscsi->dev, "Invalid element in CRQ after Prepare for Suspend");
}
vscsi->flags &= ~(PREP_FOR_SUSPEND_PENDING | PREP_FOR_SUSPEND_ABORTED);
@@ -1036,8 +1041,8 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
{
long rc = ADAPT_SUCCESS;
- pr_debug("trans_event: format %d, flags 0x%x, state 0x%hx\n",
- (int)crq->format, vscsi->flags, vscsi->state);
+ dev_dbg(&vscsi->dev, "trans_event: format %d, flags 0x%x, state 0x%hx\n",
+ (int)crq->format, vscsi->flags, vscsi->state);
switch (crq->format) {
case MIGRATED:
@@ -1073,14 +1078,14 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
!list_empty(&vscsi->schedule_q) ||
!list_empty(&vscsi->waiting_rsp) ||
!list_empty(&vscsi->active_q)) {
- pr_debug("debit %d, sched %d, wait %d, active %d\n",
- vscsi->debit,
- (int)list_empty(&vscsi->schedule_q),
- (int)list_empty(&vscsi->waiting_rsp),
- (int)list_empty(&vscsi->active_q));
- pr_warn("connection lost with outstanding work\n");
+ dev_dbg(&vscsi->dev, "debit %d, sched %d, wait %d, active %d\n",
+ vscsi->debit,
+ (int)list_empty(&vscsi->schedule_q),
+ (int)list_empty(&vscsi->waiting_rsp),
+ (int)list_empty(&vscsi->active_q));
+ dev_warn(&vscsi->dev, "connection lost with outstanding work\n");
} else {
- pr_debug("trans_event: SRP Processing, but no outstanding work\n");
+ dev_dbg(&vscsi->dev, "trans_event: SRP Processing, but no outstanding work\n");
}
ibmvscsis_post_disconnect(vscsi, WAIT_IDLE,
@@ -1097,8 +1102,8 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
break;
case PREPARE_FOR_SUSPEND:
- pr_debug("Prep for Suspend, crq status = 0x%x\n",
- (int)crq->status);
+ dev_dbg(&vscsi->dev, "Prep for Suspend, crq status = 0x%x\n",
+ (int)crq->status);
switch (vscsi->state) {
case ERR_DISCONNECTED:
case WAIT_CONNECTION:
@@ -1119,15 +1124,15 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
case ERR_DISCONNECT:
case ERR_DISCONNECT_RECONNECT:
case WAIT_IDLE:
- pr_err("Invalid state for Prepare for Suspend Trans Event: 0x%x\n",
- vscsi->state);
+ dev_err(&vscsi->dev, "Invalid state for Prepare for Suspend Trans Event: 0x%x\n",
+ vscsi->state);
break;
}
break;
case RESUME_FROM_SUSP:
- pr_debug("Resume from Suspend, crq status = 0x%x\n",
- (int)crq->status);
+ dev_dbg(&vscsi->dev, "Resume from Suspend, crq status = 0x%x\n",
+ (int)crq->status);
if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) {
vscsi->flags |= PREP_FOR_SUSPEND_ABORTED;
} else {
@@ -1152,8 +1157,8 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi,
rc = vscsi->flags & SCHEDULE_DISCONNECT;
- pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
- vscsi->flags, vscsi->state, rc);
+ dev_dbg(&vscsi->dev, "Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
+ vscsi->flags, vscsi->state, rc);
return rc;
}
@@ -1175,8 +1180,8 @@ static void ibmvscsis_poll_cmd_q(struct scsi_info *vscsi)
bool ack = true;
volatile u8 valid;
- pr_debug("poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n",
- vscsi->flags, vscsi->state, vscsi->cmd_q.index);
+ dev_dbg(&vscsi->dev, "poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n",
+ vscsi->flags, vscsi->state, vscsi->cmd_q.index);
rc = vscsi->flags & SCHEDULE_DISCONNECT;
crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
@@ -1204,7 +1209,7 @@ poll_work:
* if a tranport event has occurred leave
* everything but transport events on the queue
*/
- pr_debug("poll_cmd_q, ignoring\n");
+ dev_dbg(&vscsi->dev, "poll_cmd_q, ignoring\n");
/*
* need to decrement the queue index so we can
@@ -1233,7 +1238,7 @@ poll_work:
if (ack) {
vio_enable_interrupts(vscsi->dma_dev);
ack = false;
- pr_debug("poll_cmd_q, reenabling interrupts\n");
+ dev_dbg(&vscsi->dev, "poll_cmd_q, reenabling interrupts\n");
}
valid = crq->valid;
dma_rmb();
@@ -1241,7 +1246,7 @@ poll_work:
goto poll_work;
}
- pr_debug("Leaving poll_cmd_q: rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "Leaving poll_cmd_q: rc %ld\n", rc);
}
/**
@@ -1258,9 +1263,9 @@ static void ibmvscsis_free_cmd_qs(struct scsi_info *vscsi)
{
struct ibmvscsis_cmd *cmd, *nxt;
- pr_debug("free_cmd_qs: waiting_rsp empty %d, timer starter %d\n",
- (int)list_empty(&vscsi->waiting_rsp),
- vscsi->rsp_q_timer.started);
+ dev_dbg(&vscsi->dev, "free_cmd_qs: waiting_rsp empty %d, timer starter %d\n",
+ (int)list_empty(&vscsi->waiting_rsp),
+ vscsi->rsp_q_timer.started);
list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
list_del(&cmd->list);
@@ -1317,8 +1322,8 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
int free_qs = false;
long rc = 0;
- pr_debug("adapter_idle: flags 0x%x, state 0x%hx\n", vscsi->flags,
- vscsi->state);
+ dev_dbg(&vscsi->dev, "adapter_idle: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
/* Only need to free qs if we're disconnecting from client */
if (vscsi->state != WAIT_CONNECTION || vscsi->flags & TRANS_EVENT)
@@ -1336,7 +1341,8 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
break;
case ERR_DISCONNECT_RECONNECT:
ibmvscsis_reset_queue(vscsi);
- pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags);
+ dev_dbg(&vscsi->dev, "adapter_idle, disc_rec: flags 0x%x\n",
+ vscsi->flags);
break;
case ERR_DISCONNECT:
@@ -1347,8 +1353,8 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
vscsi->state = ERR_DISCONNECTED;
else
vscsi->state = WAIT_ENABLED;
- pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n",
- vscsi->flags, vscsi->state);
+ dev_dbg(&vscsi->dev, "adapter_idle, disc: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
break;
case WAIT_IDLE:
@@ -1370,15 +1376,15 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
vscsi->flags &= ~DISCONNECT_SCHEDULED;
}
- pr_debug("adapter_idle, wait: flags 0x%x, state 0x%hx\n",
- vscsi->flags, vscsi->state);
+ dev_dbg(&vscsi->dev, "adapter_idle, wait: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
ibmvscsis_poll_cmd_q(vscsi);
break;
case ERR_DISCONNECTED:
vscsi->flags &= ~DISCONNECT_SCHEDULED;
- pr_debug("adapter_idle, disconnected: flags 0x%x, state 0x%hx\n",
- vscsi->flags, vscsi->state);
+ dev_dbg(&vscsi->dev, "adapter_idle, disconnected: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
break;
default:
@@ -1419,13 +1425,13 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
vscsi->phyp_acr_state = 0;
vscsi->phyp_acr_flags = 0;
- pr_debug("adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
- vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
- vscsi->phyp_acr_state);
+ dev_dbg(&vscsi->dev, "adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
+ vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
+ vscsi->phyp_acr_state);
}
- pr_debug("Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n",
- vscsi->flags, vscsi->state, vscsi->new_state);
+ dev_dbg(&vscsi->dev, "Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n",
+ vscsi->flags, vscsi->state, vscsi->new_state);
}
/**
@@ -1464,8 +1470,8 @@ static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi,
cmd->init_time = mftb();
iue->remote_token = crq->IU_data_ptr;
iue->iu_len = len;
- pr_debug("copy_crq: ioba 0x%llx, init_time 0x%llx\n",
- be64_to_cpu(crq->IU_data_ptr), cmd->init_time);
+ dev_dbg(&vscsi->dev, "copy_crq: ioba 0x%llx, init_time 0x%llx\n",
+ be64_to_cpu(crq->IU_data_ptr), cmd->init_time);
break;
case H_PERMISSION:
if (connection_broken(vscsi))
@@ -1536,10 +1542,10 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
if (connection_broken(vscsi))
flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
}
- pr_warn("adapter_info: h_copy_rdma from client failed, rc %ld\n",
- rc);
- pr_debug("adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n",
- be64_to_cpu(mad->buffer), vscsi->flags, flag_bits);
+ dev_warn(&vscsi->dev, "adapter_info: h_copy_rdma from client failed, rc %ld\n",
+ rc);
+ dev_dbg(&vscsi->dev, "adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n",
+ be64_to_cpu(mad->buffer), vscsi->flags, flag_bits);
ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
flag_bits);
goto free_dma;
@@ -1595,7 +1601,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
free_dma:
dma_free_coherent(&vscsi->dma_dev->dev, sizeof(*info), info, token);
- pr_debug("Leaving adapter_info, rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "Leaving adapter_info, rc %ld\n", rc);
return rc;
}
@@ -1629,7 +1635,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
*/
min_len = offsetof(struct capabilities, migration);
if ((olen < min_len) || (olen > PAGE_SIZE)) {
- pr_warn("cap_mad: invalid len %d\n", olen);
+ dev_warn(&vscsi->dev, "cap_mad: invalid len %d\n", olen);
mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
return 0;
}
@@ -1654,9 +1660,9 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
common = (struct mad_capability_common *)&cap->migration;
while ((len > 0) && (status == VIOSRP_MAD_SUCCESS) && !rc) {
- pr_debug("cap_mad: len left %hd, cap type %d, cap len %hd\n",
- len, be32_to_cpu(common->cap_type),
- be16_to_cpu(common->length));
+ dev_dbg(&vscsi->dev, "cap_mad: len left %hd, cap type %d, cap len %hd\n",
+ len, be32_to_cpu(common->cap_type),
+ be16_to_cpu(common->length));
cap_len = be16_to_cpu(common->length);
if (cap_len > len) {
@@ -1673,7 +1679,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
switch (common->cap_type) {
default:
- pr_debug("cap_mad: unsupported capability\n");
+ dev_dbg(&vscsi->dev, "cap_mad: unsupported capability\n");
common->server_support = 0;
flag = cpu_to_be32((u32)CAP_LIST_SUPPORTED);
cap->flags &= ~flag;
@@ -1693,8 +1699,8 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
be64_to_cpu(mad->buffer));
if (rc != H_SUCCESS) {
- pr_debug("cap_mad: failed to copy to client, rc %ld\n",
- rc);
+ dev_dbg(&vscsi->dev, "cap_mad: failed to copy to client, rc %ld\n",
+ rc);
if (rc == H_PERMISSION) {
if (connection_broken(vscsi))
@@ -1702,8 +1708,8 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
CLIENT_FAILED);
}
- pr_warn("cap_mad: error copying data to client, rc %ld\n",
- rc);
+ dev_warn(&vscsi->dev, "cap_mad: error copying data to client, rc %ld\n",
+ rc);
ibmvscsis_post_disconnect(vscsi,
ERR_DISCONNECT_RECONNECT,
flag_bits);
@@ -1712,8 +1718,8 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
dma_free_coherent(&vscsi->dma_dev->dev, olen, cap, token);
- pr_debug("Leaving cap_mad, rc %ld, client_cap 0x%x\n",
- rc, vscsi->client_cap);
+ dev_dbg(&vscsi->dev, "Leaving cap_mad, rc %ld, client_cap 0x%x\n",
+ rc, vscsi->client_cap);
return rc;
}
@@ -1749,7 +1755,7 @@ static long ibmvscsis_process_mad(struct scsi_info *vscsi, struct iu_entry *iue)
vscsi->fast_fail = true;
mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
} else {
- pr_warn("fast fail mad sent after login\n");
+ dev_warn(&vscsi->dev, "fast fail mad sent after login\n");
mad->status = cpu_to_be16(VIOSRP_MAD_FAILED);
}
break;
@@ -1809,9 +1815,9 @@ static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc)
*/
if ((vscsi->rsp_q_timer.timer_pops < MAX_TIMER_POPS) ||
(vscsi->state == SRP_PROCESSING)) {
- pr_debug("snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n",
- vscsi->flags, (int)vscsi->rsp_q_timer.started,
- vscsi->rsp_q_timer.timer_pops);
+ dev_dbg(&vscsi->dev, "snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n",
+ vscsi->flags, (int)vscsi->rsp_q_timer.started,
+ vscsi->rsp_q_timer.timer_pops);
/*
* Check if the timer is running; if it
@@ -1947,8 +1953,9 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
be64_to_cpu(msg_hi),
be64_to_cpu(cmd->rsp.tag));
- pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n",
- cmd, be64_to_cpu(cmd->rsp.tag), rc);
+ dev_dbg(&vscsi->dev, "send_messages: cmd %p, tag 0x%llx, rc %ld\n",
+ cmd, be64_to_cpu(cmd->rsp.tag),
+ rc);
/* if all ok free up the command
* element resources
@@ -2003,7 +2010,8 @@ static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi,
list_add_tail(&cmd->list, &vscsi->waiting_rsp);
ibmvscsis_send_messages(vscsi);
} else {
- pr_debug("Error sending mad response, rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "Error sending mad response, rc %ld\n",
+ rc);
if (rc == H_PERMISSION) {
if (connection_broken(vscsi))
flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
@@ -2039,8 +2047,8 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
* expecting a response.
*/
case WAIT_CONNECTION:
- pr_debug("mad: in Wait Connection state, ignoring MAD, flags %d\n",
- vscsi->flags);
+ dev_dbg(&vscsi->dev, "mad: in Wait Connection state, ignoring MAD, flags %d\n",
+ vscsi->flags);
return ADAPT_SUCCESS;
case SRP_PROCESSING:
@@ -2075,12 +2083,12 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
if (!rc) {
mad = (struct mad_common *)&vio_iu(iue)->mad;
- pr_debug("mad: type %d\n", be32_to_cpu(mad->type));
+ dev_dbg(&vscsi->dev, "mad: type %d\n", be32_to_cpu(mad->type));
rc = ibmvscsis_process_mad(vscsi, iue);
- pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status),
- rc);
+ dev_dbg(&vscsi->dev, "mad: status %hd, rc %ld\n",
+ be16_to_cpu(mad->status), rc);
if (!rc)
ibmvscsis_send_mad_resp(vscsi, cmd, crq);
@@ -2088,7 +2096,7 @@ static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
ibmvscsis_free_cmd_resources(vscsi, cmd);
}
- pr_debug("Leaving mad, rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "Leaving mad, rc %ld\n", rc);
return rc;
}
@@ -2211,16 +2219,17 @@ static int ibmvscsis_make_nexus(struct ibmvscsis_tport *tport)
{
char *name = tport->tport_name;
struct ibmvscsis_nexus *nexus;
+ struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
int rc;
if (tport->ibmv_nexus) {
- pr_debug("tport->ibmv_nexus already exists\n");
+ dev_dbg(&vscsi->dev, "tport->ibmv_nexus already exists\n");
return 0;
}
nexus = kzalloc(sizeof(*nexus), GFP_KERNEL);
if (!nexus) {
- pr_err("Unable to allocate struct ibmvscsis_nexus\n");
+ dev_err(&vscsi->dev, "Unable to allocate struct ibmvscsis_nexus\n");
return -ENOMEM;
}
@@ -2316,7 +2325,7 @@ static long ibmvscsis_srp_login(struct scsi_info *vscsi,
cmd->rsp.format = VIOSRP_SRP_FORMAT;
cmd->rsp.tag = req->tag;
- pr_debug("srp_login: reason 0x%x\n", reason);
+ dev_dbg(&vscsi->dev, "srp_login: reason 0x%x\n", reason);
if (reason)
rc = ibmvscsis_srp_login_rej(vscsi, cmd, reason);
@@ -2333,7 +2342,7 @@ static long ibmvscsis_srp_login(struct scsi_info *vscsi,
ibmvscsis_free_cmd_resources(vscsi, cmd);
}
- pr_debug("Leaving srp_login, rc %ld\n", rc);
+ dev_dbg(&vscsi->dev, "Leaving srp_login, rc %ld\n", rc);
return rc;
}
@@ -2415,8 +2424,8 @@ static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq)
case SRP_TSK_MGMT:
tsk = &vio_iu(iue)->srp.tsk_mgmt;
- pr_debug("tsk_mgmt tag: %llu (0x%llx)\n", tsk->tag,
- tsk->tag);
+ dev_dbg(&vscsi->dev, "tsk_mgmt tag: %llu (0x%llx)\n",
+ tsk->tag, tsk->tag);
cmd->rsp.tag = tsk->tag;
vscsi->debit += 1;
cmd->type = TASK_MANAGEMENT;
@@ -2425,8 +2434,8 @@ static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq)
break;
case SRP_CMD:
- pr_debug("srp_cmd tag: %llu (0x%llx)\n", srp->tag,
- srp->tag);
+ dev_dbg(&vscsi->dev, "srp_cmd tag: %llu (0x%llx)\n",
+ srp->tag, srp->tag);
cmd->rsp.tag = srp->tag;
vscsi->debit += 1;
cmd->type = SCSI_CDB;
@@ -2603,7 +2612,7 @@ static int read_dma_window(struct scsi_info *vscsi)
"ibm,my-dma-window",
NULL);
if (!dma_window) {
- pr_err("Couldn't find ibm,my-dma-window property\n");
+ dev_err(&vscsi->dev, "Couldn't find ibm,my-dma-window property\n");
return -1;
}
@@ -2613,7 +2622,7 @@ static int read_dma_window(struct scsi_info *vscsi)
prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells",
NULL);
if (!prop) {
- pr_warn("Couldn't find ibm,#dma-address-cells property\n");
+ dev_warn(&vscsi->dev, "Couldn't find ibm,#dma-address-cells property\n");
dma_window++;
} else {
dma_window += be32_to_cpu(*prop);
@@ -2622,7 +2631,7 @@ static int read_dma_window(struct scsi_info *vscsi)
prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells",
NULL);
if (!prop) {
- pr_warn("Couldn't find ibm,#dma-size-cells property\n");
+ dev_warn(&vscsi->dev, "Couldn't find ibm,#dma-size-cells property\n");
dma_window++;
} else {
dma_window += be32_to_cpu(*prop);
@@ -2808,8 +2817,8 @@ static void ibmvscsis_parse_task(struct scsi_info *vscsi,
srp_tsk->lun.scsi_lun[0] &= 0x3f;
- pr_debug("calling submit_tmr, func %d\n",
- srp_tsk->tsk_mgmt_func);
+ dev_dbg(&vscsi->dev, "calling submit_tmr, func %d\n",
+ srp_tsk->tsk_mgmt_func);
rc = target_submit_tmr(&cmd->se_cmd, nexus->se_sess, NULL,
scsilun_to_int(&srp_tsk->lun), srp_tsk,
tcm_type, GFP_KERNEL, tag_to_abort, 0);
@@ -3113,8 +3122,8 @@ static long srp_build_response(struct scsi_info *vscsi,
if (cmd->type == SCSI_CDB) {
rsp->status = ibmvscsis_fast_fail(vscsi, cmd);
if (rsp->status) {
- pr_debug("build_resp: cmd %p, scsi status %d\n", cmd,
- (int)rsp->status);
+ dev_dbg(&vscsi->dev, "build_resp: cmd %p, scsi status %d\n",
+ cmd, (int)rsp->status);
ibmvscsis_determine_resid(se_cmd, rsp);
if (se_cmd->scsi_sense_length && se_cmd->sense_buffer) {
rsp->sense_data_len =
@@ -3127,7 +3136,8 @@ static long srp_build_response(struct scsi_info *vscsi,
rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
UCSOLNT_RESP_SHIFT;
} else if (cmd->flags & CMD_FAST_FAIL) {
- pr_debug("build_resp: cmd %p, fast fail\n", cmd);
+ dev_dbg(&vscsi->dev, "build_resp: cmd %p, fast fail\n",
+ cmd);
rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
UCSOLNT_RESP_SHIFT;
} else {
@@ -3340,7 +3350,7 @@ static void ibmvscsis_handle_crq(unsigned long data)
spin_lock_bh(&vscsi->intr_lock);
- pr_debug("got interrupt\n");
+ dev_dbg(&vscsi->dev, "got interrupt\n");
/*
* if we are in a path where we are waiting for all pending commands
@@ -3350,8 +3360,8 @@ static void ibmvscsis_handle_crq(unsigned long data)
if (TARGET_STOP(vscsi)) {
vio_enable_interrupts(vscsi->dma_dev);
- pr_debug("handle_crq, don't process: flags 0x%x, state 0x%hx\n",
- vscsi->flags, vscsi->state);
+ dev_dbg(&vscsi->dev, "handle_crq, don't process: flags 0x%x, state 0x%hx\n",
+ vscsi->flags, vscsi->state);
spin_unlock_bh(&vscsi->intr_lock);
return;
}
@@ -3414,20 +3424,20 @@ cmd_work:
if (ack) {
vio_enable_interrupts(vscsi->dma_dev);
ack = false;
- pr_debug("handle_crq, reenabling interrupts\n");
+ dev_dbg(&vscsi->dev, "handle_crq, reenabling interrupts\n");
}
valid = crq->valid;
dma_rmb();
if (valid)
goto cmd_work;
} else {
- pr_debug("handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n",
- vscsi->flags, vscsi->state, vscsi->cmd_q.index);
+ dev_dbg(&vscsi->dev, "handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n",
+ vscsi->flags, vscsi->state, vscsi->cmd_q.index);
}
- pr_debug("Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n",
- (int)list_empty(&vscsi->schedule_q), vscsi->flags,
- vscsi->state);
+ dev_dbg(&vscsi->dev, "Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n",
+ (int)list_empty(&vscsi->schedule_q), vscsi->flags,
+ vscsi->state);
spin_unlock_bh(&vscsi->intr_lock);
}
@@ -3443,7 +3453,7 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
vscsi = kzalloc(sizeof(*vscsi), GFP_KERNEL);
if (!vscsi) {
rc = -ENOMEM;
- pr_err("probe: allocation of adapter failed\n");
+ dev_err(&vdev->dev, "probe: allocation of adapter failed\n");
return rc;
}
@@ -3456,14 +3466,14 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
snprintf(vscsi->tport.tport_name, IBMVSCSIS_NAMELEN, "%s",
dev_name(&vdev->dev));
- pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name);
+ dev_dbg(&vscsi->dev, "probe tport_name: %s\n", vscsi->tport.tport_name);
rc = read_dma_window(vscsi);
if (rc)
goto free_adapter;
- pr_debug("Probe: liobn 0x%x, riobn 0x%x\n",
- vscsi->dds.window[LOCAL].liobn,
- vscsi->dds.window[REMOTE].liobn);
+ dev_dbg(&vscsi->dev, "Probe: liobn 0x%x, riobn 0x%x\n",
+ vscsi->dds.window[LOCAL].liobn,
+ vscsi->dds.window[REMOTE].liobn);
strcpy(vscsi->eye, "VSCSI ");
strncat(vscsi->eye, vdev->name, MAX_EYE);
@@ -3541,8 +3551,8 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
* client can connect" and the client isn't activated yet.
* We'll make the call again when he sends an init msg.
*/
- pr_debug("probe hrc %ld, client partition num %d\n",
- hrc, vscsi->client_data.partition_number);
+ dev_dbg(&vscsi->dev, "probe hrc %ld, client partition num %d\n",
+ hrc, vscsi->client_data.partition_number);
tasklet_init(&vscsi->work_task, ibmvscsis_handle_crq,
(unsigned long)vscsi);
@@ -3602,7 +3612,7 @@ static int ibmvscsis_remove(struct vio_dev *vdev)
{
struct scsi_info *vscsi = dev_get_drvdata(&vdev->dev);
- pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
+ dev_dbg(&vscsi->dev, "remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
spin_lock_bh(&vscsi->intr_lock);
ibmvscsis_post_disconnect(vscsi, UNCONFIGURING, 0);
@@ -3766,14 +3776,16 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
* attempt an srp_transfer_data.
*/
if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) {
- pr_err("write_pending failed since: %d\n", vscsi->flags);
+ dev_err(&vscsi->dev, "write_pending failed since: %d\n",
+ vscsi->flags);
return -EIO;
+
}
rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
1, 1);
if (rc) {
- pr_err("srp_transfer_data() failed: %d\n", rc);
+ dev_err(&vscsi->dev, "srp_transfer_data() failed: %d\n", rc);
return -EIO;
}
/*
@@ -3811,7 +3823,7 @@ static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1,
1);
if (rc) {
- pr_err("srp_transfer_data failed: %d\n", rc);
+ dev_err(&vscsi->dev, "srp_transfer_data failed: %d\n", rc);
sd = se_cmd->sense_buffer;
se_cmd->scsi_sense_length = 18;
memset(se_cmd->sense_buffer, 0, se_cmd->scsi_sense_length);
@@ -3834,7 +3846,7 @@ static int ibmvscsis_queue_status(struct se_cmd *se_cmd)
struct scsi_info *vscsi = cmd->adapter;
uint len;
- pr_debug("queue_status %p\n", se_cmd);
+ dev_dbg(&vscsi->dev, "queue_status %p\n", se_cmd);
srp_build_response(vscsi, cmd, &len);
cmd->rsp.format = SRP_FORMAT;
@@ -3854,8 +3866,8 @@ static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd)
u64 tag_to_abort = be64_to_cpu(srp_tsk->task_tag);
uint len;
- pr_debug("queue_tm_rsp %p, status %d\n",
- se_cmd, (int)se_cmd->se_tmr_req->response);
+ dev_dbg(&vscsi->dev, "queue_tm_rsp %p, status %d\n",
+ se_cmd, (int)se_cmd->se_tmr_req->response);
if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK &&
cmd->se_cmd.se_tmr_req->response == TMR_TASK_DOES_NOT_EXIST) {
@@ -3877,8 +3889,12 @@ static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd)
static void ibmvscsis_aborted_task(struct se_cmd *se_cmd)
{
- pr_debug("ibmvscsis_aborted_task %p task_tag: %llu\n",
- se_cmd, se_cmd->tag);
+ struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
+ se_cmd);
+ struct scsi_info *vscsi = cmd->adapter;
+
+ dev_dbg(&vscsi->dev, "ibmvscsis_aborted_task %p task_tag: %llu\n",
+ se_cmd, se_cmd->tag);
}
static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf,
@@ -3886,12 +3902,14 @@ static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf,
const char *name)
{
struct ibmvscsis_tport *tport;
+ struct scsi_info *vscsi;
tport = ibmvscsis_lookup_port(name);
if (tport) {
+ vscsi = container_of(tport, struct scsi_info, tport);
tport->tport_proto_id = SCSI_PROTOCOL_SRP;
- pr_debug("make_tport(%s), pointer:%p, tport_id:%x\n",
- name, tport, tport->tport_proto_id);
+ dev_dbg(&vscsi->dev, "make_tport(%s), pointer:%p, tport_id:%x\n",
+ name, tport, tport->tport_proto_id);
return &tport->tport_wwn;
}
@@ -3903,9 +3921,10 @@ static void ibmvscsis_drop_tport(struct se_wwn *wwn)
struct ibmvscsis_tport *tport = container_of(wwn,
struct ibmvscsis_tport,
tport_wwn);
+ struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
- pr_debug("drop_tport(%s)\n",
- config_item_name(&tport->tport_wwn.wwn_group.cg_item));
+ dev_dbg(&vscsi->dev, "drop_tport(%s)\n",
+ config_item_name(&tport->tport_wwn.wwn_group.cg_item));
}
static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn,
@@ -3990,12 +4009,12 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
rc = kstrtoul(page, 0, &tmp);
if (rc < 0) {
- pr_err("Unable to extract srpt_tpg_store_enable\n");
+ dev_err(&vscsi->dev, "Unable to extract srpt_tpg_store_enable\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1)) {
- pr_err("Illegal value for srpt_tpg_store_enable\n");
+ dev_err(&vscsi->dev, "Illegal value for srpt_tpg_store_enable\n");
return -EINVAL;
}
@@ -4004,8 +4023,8 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
tport->enabled = true;
lrc = ibmvscsis_enable_change_state(vscsi);
if (lrc)
- pr_err("enable_change_state failed, rc %ld state %d\n",
- lrc, vscsi->state);
+ dev_err(&vscsi->dev, "enable_change_state failed, rc %ld state %d\n",
+ lrc, vscsi->state);
spin_unlock_bh(&vscsi->intr_lock);
} else {
spin_lock_bh(&vscsi->intr_lock);
@@ -4015,7 +4034,8 @@ static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
spin_unlock_bh(&vscsi->intr_lock);
}
- pr_debug("tpg_enable_store, tmp %ld, state %d\n", tmp, vscsi->state);
+ dev_dbg(&vscsi->dev, "tpg_enable_store, tmp %ld, state %d\n", tmp,
+ vscsi->state);
return count;
}
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index cc0187965eee..e07dd990e585 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -9653,8 +9653,8 @@ static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
if (i == 0) {
entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
ioa_cfg->hrrq[i].min_cmd_id = 0;
- ioa_cfg->hrrq[i].max_cmd_id =
- (entries_each_hrrq - 1);
+ ioa_cfg->hrrq[i].max_cmd_id =
+ (entries_each_hrrq - 1);
} else {
entries_each_hrrq =
IPR_NUM_BASE_CMD_BLKS/
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 4d934d6c3e13..6198559abbd8 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -307,6 +307,7 @@ static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
/**
* iscsi_sw_tcp_xmit - TCP transmit
+ * @conn: iscsi connection
**/
static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn)
{
@@ -357,6 +358,7 @@ error:
/**
* iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit
+ * @conn: iscsi connection
*/
static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn)
{
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 9c50d2d9f27c..15a2fef51e38 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1696,6 +1696,15 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
*/
switch (session->state) {
case ISCSI_STATE_FAILED:
+ /*
+ * cmds should fail during shutdown, if the session
+ * state is bad, allowing completion to happen
+ */
+ if (unlikely(system_state != SYSTEM_RUNNING)) {
+ reason = FAILURE_SESSION_FAILED;
+ sc->result = DID_NO_CONNECT << 16;
+ break;
+ }
case ISCSI_STATE_IN_RECOVERY:
reason = FAILURE_SESSION_IN_RECOVERY;
sc->result = DID_IMM_RETRY << 16;
@@ -1979,6 +1988,19 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
if (session->state != ISCSI_STATE_LOGGED_IN) {
/*
+ * During shutdown, if session is prematurely disconnected,
+ * recovery won't happen and there will be hung cmds. Not
+ * handling cmds would trigger EH, also bad in this case.
+ * Instead, handle cmd, allow completion to happen and let
+ * upper layer to deal with the result.
+ */
+ if (unlikely(system_state != SYSTEM_RUNNING)) {
+ sc->result = DID_NO_CONNECT << 16;
+ ISCSI_DBG_EH(session, "sc on shutdown, handled\n");
+ rc = BLK_EH_HANDLED;
+ goto done;
+ }
+ /*
* We are probably in the middle of iscsi recovery so let
* that complete and handle the error.
*/
@@ -2082,7 +2104,7 @@ done:
task->last_timeout = jiffies;
spin_unlock(&session->frwd_lock);
ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
- "timer reset" : "nh");
+ "timer reset" : "shutdown or nh");
return rc;
}
EXPORT_SYMBOL_GPL(iscsi_eh_cmd_timed_out);
@@ -2722,8 +2744,10 @@ static void iscsi_host_dec_session_cnt(struct Scsi_Host *shost)
* @iscsit: iscsi transport template
* @shost: scsi host
* @cmds_max: session can queue
+ * @dd_size: private driver data size, added to session allocation size
* @cmd_task_size: LLD task private data size
* @initial_cmdsn: initial CmdSN
+ * @id: target ID to add to this session
*
* This can be used by software iscsi_transports that allocate
* a session per scsi host.
@@ -2951,7 +2975,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_setup);
/**
* iscsi_conn_teardown - teardown iscsi connection
- * cls_conn: iscsi class connection
+ * @cls_conn: iscsi class connection
*
* TODO: we may need to make this into a two step process
* like scsi-mls remove + put host
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 63a1d69ff515..369ef8f23b24 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -798,6 +798,8 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
/**
* iscsi_tcp_hdr_recv_done - process PDU header
+ * @tcp_conn: iSCSI TCP connection
+ * @segment: the buffer segment being processed
*
* This is the callback invoked when the PDU header has
* been received. If the header is followed by additional
@@ -876,9 +878,10 @@ EXPORT_SYMBOL_GPL(iscsi_tcp_recv_segment_is_hdr);
* @conn: iscsi connection
* @skb: network buffer with header and/or data segment
* @offset: offset in skb
- * @offload: bool indicating if transfer was offloaded
+ * @offloaded: bool indicating if transfer was offloaded
+ * @status: iscsi TCP status result
*
- * Will return status of transfer in status. And will return
+ * Will return status of transfer in @status. And will return
* number of bytes copied.
*/
int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
@@ -955,9 +958,7 @@ EXPORT_SYMBOL_GPL(iscsi_tcp_recv_skb);
/**
* iscsi_tcp_task_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
- * @conn: iscsi connection
* @task: scsi command task
- * @sc: scsi command
*/
int iscsi_tcp_task_init(struct iscsi_task *task)
{
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 70be4425ae0b..2b3637b40dde 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -730,7 +730,6 @@ int sas_discover_sata(struct domain_device *dev)
if (res)
return res;
- sas_discover_event(dev->port, DISCE_PROBE);
return 0;
}
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 60de66252fa2..e4fd078e4175 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -212,13 +212,9 @@ void sas_notify_lldd_dev_gone(struct domain_device *dev)
}
}
-static void sas_probe_devices(struct work_struct *work)
+static void sas_probe_devices(struct asd_sas_port *port)
{
struct domain_device *dev, *n;
- struct sas_discovery_event *ev = to_sas_discovery_event(work);
- struct asd_sas_port *port = ev->port;
-
- clear_bit(DISCE_PROBE, &port->disc.pending);
/* devices must be domain members before link recovery and probe */
list_for_each_entry(dev, &port->disco_list, disco_list_node) {
@@ -294,7 +290,6 @@ int sas_discover_end_dev(struct domain_device *dev)
res = sas_notify_lldd_dev_found(dev);
if (res)
return res;
- sas_discover_event(dev->port, DISCE_PROBE);
return 0;
}
@@ -353,13 +348,9 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d
sas_put_device(dev);
}
-static void sas_destruct_devices(struct work_struct *work)
+void sas_destruct_devices(struct asd_sas_port *port)
{
struct domain_device *dev, *n;
- struct sas_discovery_event *ev = to_sas_discovery_event(work);
- struct asd_sas_port *port = ev->port;
-
- clear_bit(DISCE_DESTRUCT, &port->disc.pending);
list_for_each_entry_safe(dev, n, &port->destroy_list, disco_list_node) {
list_del_init(&dev->disco_list_node);
@@ -370,6 +361,16 @@ static void sas_destruct_devices(struct work_struct *work)
}
}
+static void sas_destruct_ports(struct asd_sas_port *port)
+{
+ struct sas_port *sas_port, *p;
+
+ list_for_each_entry_safe(sas_port, p, &port->sas_port_del_list, del_list) {
+ list_del_init(&sas_port->del_list);
+ sas_port_delete(sas_port);
+ }
+}
+
void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
{
if (!test_bit(SAS_DEV_DESTROY, &dev->state) &&
@@ -384,7 +385,6 @@ void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
if (!test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) {
sas_rphy_unlink(dev->rphy);
list_move_tail(&dev->disco_list_node, &port->destroy_list);
- sas_discover_event(dev->port, DISCE_DESTRUCT);
}
}
@@ -490,6 +490,8 @@ static void sas_discover_domain(struct work_struct *work)
port->port_dev = NULL;
}
+ sas_probe_devices(port);
+
SAS_DPRINTK("DONE DISCOVERY on port %d, pid:%d, result:%d\n", port->id,
task_pid_nr(current), error);
}
@@ -523,6 +525,10 @@ static void sas_revalidate_domain(struct work_struct *work)
port->id, task_pid_nr(current), res);
out:
mutex_unlock(&ha->disco_mutex);
+
+ sas_destruct_devices(port);
+ sas_destruct_ports(port);
+ sas_probe_devices(port);
}
/* ---------- Events ---------- */
@@ -534,7 +540,7 @@ static void sas_chain_work(struct sas_ha_struct *ha, struct sas_work *sw)
* workqueue, or known to be submitted from a context that is
* not racing against draining
*/
- scsi_queue_work(ha->core.shost, &sw->work);
+ queue_work(ha->disco_q, &sw->work);
}
static void sas_chain_event(int event, unsigned long *pending,
@@ -578,10 +584,8 @@ void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port)
static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = {
[DISCE_DISCOVER_DOMAIN] = sas_discover_domain,
[DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain,
- [DISCE_PROBE] = sas_probe_devices,
[DISCE_SUSPEND] = sas_suspend_devices,
[DISCE_RESUME] = sas_resume_devices,
- [DISCE_DESTRUCT] = sas_destruct_devices,
};
disc->pending = 0;
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index 0bb9eefc08c8..ae923eb6de95 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -29,7 +29,8 @@
int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
{
- int rc = 0;
+ /* it's added to the defer_q when draining so return succeed */
+ int rc = 1;
if (!test_bit(SAS_HA_REGISTERED, &ha->state))
return 0;
@@ -39,24 +40,20 @@ int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
if (list_empty(&sw->drain_node))
list_add_tail(&sw->drain_node, &ha->defer_q);
} else
- rc = scsi_queue_work(ha->core.shost, &sw->work);
+ rc = queue_work(ha->event_q, &sw->work);
return rc;
}
-static int sas_queue_event(int event, unsigned long *pending,
- struct sas_work *work,
+static int sas_queue_event(int event, struct sas_work *work,
struct sas_ha_struct *ha)
{
- int rc = 0;
+ unsigned long flags;
+ int rc;
- if (!test_and_set_bit(event, pending)) {
- unsigned long flags;
-
- spin_lock_irqsave(&ha->lock, flags);
- rc = sas_queue_work(ha, work);
- spin_unlock_irqrestore(&ha->lock, flags);
- }
+ spin_lock_irqsave(&ha->lock, flags);
+ rc = sas_queue_work(ha, work);
+ spin_unlock_irqrestore(&ha->lock, flags);
return rc;
}
@@ -64,21 +61,25 @@ static int sas_queue_event(int event, unsigned long *pending,
void __sas_drain_work(struct sas_ha_struct *ha)
{
- struct workqueue_struct *wq = ha->core.shost->work_q;
struct sas_work *sw, *_sw;
+ int ret;
set_bit(SAS_HA_DRAINING, &ha->state);
/* flush submitters */
spin_lock_irq(&ha->lock);
spin_unlock_irq(&ha->lock);
- drain_workqueue(wq);
+ drain_workqueue(ha->event_q);
+ drain_workqueue(ha->disco_q);
spin_lock_irq(&ha->lock);
clear_bit(SAS_HA_DRAINING, &ha->state);
list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
list_del_init(&sw->drain_node);
- sas_queue_work(ha, sw);
+ ret = sas_queue_work(ha, sw);
+ if (ret != 1)
+ sas_free_event(to_asd_sas_event(&sw->work));
+
}
spin_unlock_irq(&ha->lock);
}
@@ -115,33 +116,78 @@ void sas_enable_revalidation(struct sas_ha_struct *ha)
struct asd_sas_port *port = ha->sas_port[i];
const int ev = DISCE_REVALIDATE_DOMAIN;
struct sas_discovery *d = &port->disc;
+ struct asd_sas_phy *sas_phy;
if (!test_and_clear_bit(ev, &d->pending))
continue;
- sas_queue_event(ev, &d->pending, &d->disc_work[ev].work, ha);
+ if (list_empty(&port->phy_list))
+ continue;
+
+ sas_phy = container_of(port->phy_list.next, struct asd_sas_phy,
+ port_phy_el);
+ ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
}
mutex_unlock(&ha->disco_mutex);
}
+
+static void sas_port_event_worker(struct work_struct *work)
+{
+ struct asd_sas_event *ev = to_asd_sas_event(work);
+
+ sas_port_event_fns[ev->event](work);
+ sas_free_event(ev);
+}
+
+static void sas_phy_event_worker(struct work_struct *work)
+{
+ struct asd_sas_event *ev = to_asd_sas_event(work);
+
+ sas_phy_event_fns[ev->event](work);
+ sas_free_event(ev);
+}
+
static int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event)
{
+ struct asd_sas_event *ev;
struct sas_ha_struct *ha = phy->ha;
+ int ret;
BUG_ON(event >= PORT_NUM_EVENTS);
- return sas_queue_event(event, &phy->port_events_pending,
- &phy->port_events[event].work, ha);
+ ev = sas_alloc_event(phy);
+ if (!ev)
+ return -ENOMEM;
+
+ INIT_SAS_EVENT(ev, sas_port_event_worker, phy, event);
+
+ ret = sas_queue_event(event, &ev->work, ha);
+ if (ret != 1)
+ sas_free_event(ev);
+
+ return ret;
}
int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
{
+ struct asd_sas_event *ev;
struct sas_ha_struct *ha = phy->ha;
+ int ret;
BUG_ON(event >= PHY_NUM_EVENTS);
- return sas_queue_event(event, &phy->phy_events_pending,
- &phy->phy_events[event].work, ha);
+ ev = sas_alloc_event(phy);
+ if (!ev)
+ return -ENOMEM;
+
+ INIT_SAS_EVENT(ev, sas_phy_event_worker, phy, event);
+
+ ret = sas_queue_event(event, &ev->work, ha);
+ if (ret != 1)
+ sas_free_event(ev);
+
+ return ret;
}
int sas_init_events(struct sas_ha_struct *sas_ha)
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 3183d63de4da..6a4f8198b78e 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -293,6 +293,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
phy->phy->minimum_linkrate = dr->pmin_linkrate;
phy->phy->maximum_linkrate = dr->pmax_linkrate;
phy->phy->negotiated_linkrate = phy->linkrate;
+ phy->phy->enabled = (phy->linkrate != SAS_PHY_DISABLED);
skip:
if (new_phy)
@@ -686,7 +687,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
res = smp_execute_task(dev, req, RPEL_REQ_SIZE,
resp, RPEL_RESP_SIZE);
- if (!res)
+ if (res)
goto out;
phy->invalid_dword_count = scsi_to_u32(&resp[12]);
@@ -695,6 +696,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
phy->phy_reset_problem_count = scsi_to_u32(&resp[24]);
out:
+ kfree(req);
kfree(resp);
return res;
@@ -1914,7 +1916,8 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
sas_port_delete_phy(phy->port, phy->phy);
sas_device_set_phy(found, phy->port);
if (phy->port->num_phys == 0)
- sas_port_delete(phy->port);
+ list_add_tail(&phy->port->del_list,
+ &parent->port->sas_port_del_list);
phy->port = NULL;
}
}
@@ -2122,7 +2125,7 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
struct domain_device *dev = NULL;
res = sas_find_bcast_dev(port_dev, &dev);
- while (res == 0 && dev) {
+ if (res == 0 && dev) {
struct expander_device *ex = &dev->ex_dev;
int i = 0, phy_id;
@@ -2134,9 +2137,6 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
res = sas_rediscover(dev, phy_id);
i = phy_id + 1;
} while (i < ex->num_phys);
-
- dev = NULL;
- res = sas_find_bcast_dev(port_dev, &dev);
}
return res;
}
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 64fa6f53cb8b..c81a63b5dc71 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -39,6 +39,7 @@
#include "../scsi_sas_internal.h"
static struct kmem_cache *sas_task_cache;
+static struct kmem_cache *sas_event_cache;
struct sas_task *sas_alloc_task(gfp_t flags)
{
@@ -109,6 +110,7 @@ void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
int sas_register_ha(struct sas_ha_struct *sas_ha)
{
+ char name[64];
int error = 0;
mutex_init(&sas_ha->disco_mutex);
@@ -122,6 +124,8 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
INIT_LIST_HEAD(&sas_ha->defer_q);
INIT_LIST_HEAD(&sas_ha->eh_dev_q);
+ sas_ha->event_thres = SAS_PHY_SHUTDOWN_THRES;
+
error = sas_register_phys(sas_ha);
if (error) {
printk(KERN_NOTICE "couldn't register sas phys:%d\n", error);
@@ -140,10 +144,24 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
goto Undo_ports;
}
+ error = -ENOMEM;
+ snprintf(name, sizeof(name), "%s_event_q", dev_name(sas_ha->dev));
+ sas_ha->event_q = create_singlethread_workqueue(name);
+ if (!sas_ha->event_q)
+ goto Undo_ports;
+
+ snprintf(name, sizeof(name), "%s_disco_q", dev_name(sas_ha->dev));
+ sas_ha->disco_q = create_singlethread_workqueue(name);
+ if (!sas_ha->disco_q)
+ goto Undo_event_q;
+
INIT_LIST_HEAD(&sas_ha->eh_done_q);
INIT_LIST_HEAD(&sas_ha->eh_ata_q);
return 0;
+
+Undo_event_q:
+ destroy_workqueue(sas_ha->event_q);
Undo_ports:
sas_unregister_ports(sas_ha);
Undo_phys:
@@ -174,6 +192,9 @@ int sas_unregister_ha(struct sas_ha_struct *sas_ha)
__sas_drain_work(sas_ha);
mutex_unlock(&sas_ha->drain_mutex);
+ destroy_workqueue(sas_ha->disco_q);
+ destroy_workqueue(sas_ha->event_q);
+
return 0;
}
@@ -364,8 +385,6 @@ void sas_prep_resume_ha(struct sas_ha_struct *ha)
struct asd_sas_phy *phy = ha->sas_phy[i];
memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
- phy->port_events_pending = 0;
- phy->phy_events_pending = 0;
phy->frame_rcvd_size = 0;
}
}
@@ -537,6 +556,37 @@ static struct sas_function_template sft = {
.smp_handler = sas_smp_handler,
};
+static inline ssize_t phy_event_threshold_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", sha->event_thres);
+}
+
+static inline ssize_t phy_event_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+
+ sha->event_thres = simple_strtol(buf, NULL, 10);
+
+ /* threshold cannot be set too small */
+ if (sha->event_thres < 32)
+ sha->event_thres = 32;
+
+ return count;
+}
+
+DEVICE_ATTR(phy_event_threshold,
+ S_IRUGO|S_IWUSR,
+ phy_event_threshold_show,
+ phy_event_threshold_store);
+EXPORT_SYMBOL_GPL(dev_attr_phy_event_threshold);
+
struct scsi_transport_template *
sas_domain_attach_transport(struct sas_domain_function_template *dft)
{
@@ -555,20 +605,71 @@ sas_domain_attach_transport(struct sas_domain_function_template *dft)
}
EXPORT_SYMBOL_GPL(sas_domain_attach_transport);
+
+struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy)
+{
+ struct asd_sas_event *event;
+ gfp_t flags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
+ struct sas_ha_struct *sas_ha = phy->ha;
+ struct sas_internal *i =
+ to_sas_internal(sas_ha->core.shost->transportt);
+
+ event = kmem_cache_zalloc(sas_event_cache, flags);
+ if (!event)
+ return NULL;
+
+ atomic_inc(&phy->event_nr);
+
+ if (atomic_read(&phy->event_nr) > phy->ha->event_thres) {
+ if (i->dft->lldd_control_phy) {
+ if (cmpxchg(&phy->in_shutdown, 0, 1) == 0) {
+ sas_printk("The phy%02d bursting events, shut it down.\n",
+ phy->id);
+ sas_notify_phy_event(phy, PHYE_SHUTDOWN);
+ }
+ } else {
+ /* Do not support PHY control, stop allocating events */
+ WARN_ONCE(1, "PHY control not supported.\n");
+ kmem_cache_free(sas_event_cache, event);
+ atomic_dec(&phy->event_nr);
+ event = NULL;
+ }
+ }
+
+ return event;
+}
+
+void sas_free_event(struct asd_sas_event *event)
+{
+ struct asd_sas_phy *phy = event->phy;
+
+ kmem_cache_free(sas_event_cache, event);
+ atomic_dec(&phy->event_nr);
+}
+
/* ---------- SAS Class register/unregister ---------- */
static int __init sas_class_init(void)
{
sas_task_cache = KMEM_CACHE(sas_task, SLAB_HWCACHE_ALIGN);
if (!sas_task_cache)
- return -ENOMEM;
+ goto out;
+
+ sas_event_cache = KMEM_CACHE(asd_sas_event, SLAB_HWCACHE_ALIGN);
+ if (!sas_event_cache)
+ goto free_task_kmem;
return 0;
+free_task_kmem:
+ kmem_cache_destroy(sas_task_cache);
+out:
+ return -ENOMEM;
}
static void __exit sas_class_exit(void)
{
kmem_cache_destroy(sas_task_cache);
+ kmem_cache_destroy(sas_event_cache);
}
MODULE_AUTHOR("Luben Tuikov <luben_tuikov@adaptec.com>");
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index c07e08136491..50e12d662ffe 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -61,6 +61,9 @@ int sas_show_oob_mode(enum sas_oob_mode oob_mode, char *buf);
int sas_register_phys(struct sas_ha_struct *sas_ha);
void sas_unregister_phys(struct sas_ha_struct *sas_ha);
+struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy);
+void sas_free_event(struct asd_sas_event *event);
+
int sas_register_ports(struct sas_ha_struct *sas_ha);
void sas_unregister_ports(struct sas_ha_struct *sas_ha);
@@ -98,6 +101,10 @@ int sas_try_ata_reset(struct asd_sas_phy *phy);
void sas_hae_reset(struct work_struct *work);
void sas_free_device(struct kref *kref);
+void sas_destruct_devices(struct asd_sas_port *port);
+
+extern const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS];
+extern const work_func_t sas_port_event_fns[PORT_NUM_EVENTS];
#ifdef CONFIG_SCSI_SAS_HOST_SMP
extern void sas_smp_host_handler(struct bsg_job *job, struct Scsi_Host *shost);
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index cdee446c29e1..bf3e1b979ca6 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -35,7 +35,7 @@ static void sas_phye_loss_of_signal(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PHYE_LOSS_OF_SIGNAL, &phy->phy_events_pending);
+ phy->in_shutdown = 0;
phy->error = 0;
sas_deform_port(phy, 1);
}
@@ -45,7 +45,7 @@ static void sas_phye_oob_done(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PHYE_OOB_DONE, &phy->phy_events_pending);
+ phy->in_shutdown = 0;
phy->error = 0;
}
@@ -58,8 +58,6 @@ static void sas_phye_oob_error(struct work_struct *work)
struct sas_internal *i =
to_sas_internal(sas_ha->core.shost->transportt);
- clear_bit(PHYE_OOB_ERROR, &phy->phy_events_pending);
-
sas_deform_port(phy, 1);
if (!port && phy->enabled && i->dft->lldd_control_phy) {
@@ -88,8 +86,6 @@ static void sas_phye_spinup_hold(struct work_struct *work)
struct sas_internal *i =
to_sas_internal(sas_ha->core.shost->transportt);
- clear_bit(PHYE_SPINUP_HOLD, &phy->phy_events_pending);
-
phy->error = 0;
i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL);
}
@@ -99,8 +95,6 @@ static void sas_phye_resume_timeout(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PHYE_RESUME_TIMEOUT, &phy->phy_events_pending);
-
/* phew, lldd got the phy back in the nick of time */
if (!phy->suspended) {
dev_info(&phy->phy->dev, "resume timeout cancelled\n");
@@ -113,45 +107,41 @@ static void sas_phye_resume_timeout(struct work_struct *work)
}
+static void sas_phye_shutdown(struct work_struct *work)
+{
+ struct asd_sas_event *ev = to_asd_sas_event(work);
+ struct asd_sas_phy *phy = ev->phy;
+ struct sas_ha_struct *sas_ha = phy->ha;
+ struct sas_internal *i =
+ to_sas_internal(sas_ha->core.shost->transportt);
+
+ if (phy->enabled) {
+ int ret;
+
+ phy->error = 0;
+ phy->enabled = 0;
+ ret = i->dft->lldd_control_phy(phy, PHY_FUNC_DISABLE, NULL);
+ if (ret)
+ sas_printk("lldd disable phy%02d returned %d\n",
+ phy->id, ret);
+ } else
+ sas_printk("phy%02d is not enabled, cannot shutdown\n",
+ phy->id);
+}
+
/* ---------- Phy class registration ---------- */
int sas_register_phys(struct sas_ha_struct *sas_ha)
{
int i;
- static const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = {
- [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal,
- [PHYE_OOB_DONE] = sas_phye_oob_done,
- [PHYE_OOB_ERROR] = sas_phye_oob_error,
- [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
- [PHYE_RESUME_TIMEOUT] = sas_phye_resume_timeout,
-
- };
-
- static const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {
- [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed,
- [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd,
- [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err,
- [PORTE_TIMER_EVENT] = sas_porte_timer_event,
- [PORTE_HARD_RESET] = sas_porte_hard_reset,
- };
-
/* Now register the phys. */
for (i = 0; i < sas_ha->num_phys; i++) {
- int k;
struct asd_sas_phy *phy = sas_ha->sas_phy[i];
phy->error = 0;
+ atomic_set(&phy->event_nr, 0);
INIT_LIST_HEAD(&phy->port_phy_el);
- for (k = 0; k < PORT_NUM_EVENTS; k++) {
- INIT_SAS_WORK(&phy->port_events[k].work, sas_port_event_fns[k]);
- phy->port_events[k].phy = phy;
- }
-
- for (k = 0; k < PHY_NUM_EVENTS; k++) {
- INIT_SAS_WORK(&phy->phy_events[k].work, sas_phy_event_fns[k]);
- phy->phy_events[k].phy = phy;
- }
phy->port = NULL;
phy->ha = sas_ha;
@@ -179,3 +169,12 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
return 0;
}
+
+const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = {
+ [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal,
+ [PHYE_OOB_DONE] = sas_phye_oob_done,
+ [PHYE_OOB_ERROR] = sas_phye_oob_error,
+ [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
+ [PHYE_RESUME_TIMEOUT] = sas_phye_resume_timeout,
+ [PHYE_SHUTDOWN] = sas_phye_shutdown,
+};
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index d3c5297c6c89..f07e55d3aa73 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -66,6 +66,7 @@ static void sas_resume_port(struct asd_sas_phy *phy)
rc = sas_notify_lldd_dev_found(dev);
if (rc) {
sas_unregister_dev(port, dev);
+ sas_destruct_devices(port);
continue;
}
@@ -192,6 +193,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
si->dft->lldd_port_formed(phy);
sas_discover_event(phy->port, DISCE_DISCOVER_DOMAIN);
+ flush_workqueue(sas_ha->disco_q);
}
/**
@@ -219,6 +221,7 @@ void sas_deform_port(struct asd_sas_phy *phy, int gone)
if (port->num_phys == 1) {
sas_unregister_domain_devices(port, gone);
+ sas_destruct_devices(port);
sas_port_delete(port->port);
port->port = NULL;
} else {
@@ -261,8 +264,6 @@ void sas_porte_bytes_dmaed(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PORTE_BYTES_DMAED, &phy->port_events_pending);
-
sas_form_port(phy);
}
@@ -273,14 +274,15 @@ void sas_porte_broadcast_rcvd(struct work_struct *work)
unsigned long flags;
u32 prim;
- clear_bit(PORTE_BROADCAST_RCVD, &phy->port_events_pending);
-
spin_lock_irqsave(&phy->sas_prim_lock, flags);
prim = phy->sas_prim;
spin_unlock_irqrestore(&phy->sas_prim_lock, flags);
SAS_DPRINTK("broadcast received: %d\n", prim);
sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN);
+
+ if (phy->port)
+ flush_workqueue(phy->port->ha->disco_q);
}
void sas_porte_link_reset_err(struct work_struct *work)
@@ -288,8 +290,6 @@ void sas_porte_link_reset_err(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PORTE_LINK_RESET_ERR, &phy->port_events_pending);
-
sas_deform_port(phy, 1);
}
@@ -298,8 +298,6 @@ void sas_porte_timer_event(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PORTE_TIMER_EVENT, &phy->port_events_pending);
-
sas_deform_port(phy, 1);
}
@@ -308,8 +306,6 @@ void sas_porte_hard_reset(struct work_struct *work)
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
- clear_bit(PORTE_HARD_RESET, &phy->port_events_pending);
-
sas_deform_port(phy, 1);
}
@@ -323,6 +319,7 @@ static void sas_init_port(struct asd_sas_port *port,
INIT_LIST_HEAD(&port->dev_list);
INIT_LIST_HEAD(&port->disco_list);
INIT_LIST_HEAD(&port->destroy_list);
+ INIT_LIST_HEAD(&port->sas_port_del_list);
spin_lock_init(&port->phy_list_lock);
INIT_LIST_HEAD(&port->phy_list);
port->ha = sas_ha;
@@ -353,3 +350,11 @@ void sas_unregister_ports(struct sas_ha_struct *sas_ha)
sas_deform_port(sas_ha->sas_phy[i], 0);
}
+
+const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {
+ [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed,
+ [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd,
+ [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err,
+ [PORTE_TIMER_EVENT] = sas_porte_timer_event,
+ [PORTE_HARD_RESET] = sas_porte_hard_reset,
+};
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index c9406852c3e9..6de9681ace82 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -27,6 +27,7 @@
#include <linux/firmware.h>
#include <linux/export.h>
#include <linux/ctype.h>
+#include <linux/kernel.h>
#include "sas_internal.h"
@@ -959,21 +960,6 @@ void sas_target_destroy(struct scsi_target *starget)
sas_put_device(found_dev);
}
-static void sas_parse_addr(u8 *sas_addr, const char *p)
-{
- int i;
- for (i = 0; i < SAS_ADDR_SIZE; i++) {
- u8 h, l;
- if (!*p)
- break;
- h = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10;
- p++;
- l = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10;
- p++;
- sas_addr[i] = (h<<4) | l;
- }
-}
-
#define SAS_STRING_ADDR_SIZE 16
int sas_request_addr(struct Scsi_Host *shost, u8 *addr)
@@ -990,7 +976,9 @@ int sas_request_addr(struct Scsi_Host *shost, u8 *addr)
goto out;
}
- sas_parse_addr(addr, fw->data);
+ res = hex2bin(addr, fw->data, strnlen(fw->data, SAS_ADDR_SIZE * 2) / 2);
+ if (res)
+ goto out;
out:
release_firmware(fw);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 231302273257..61fb46da05d4 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -55,9 +55,10 @@ struct lpfc_sli2_slim;
#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */
#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */
#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
+#define LPFC_MIN_SG_SEG_CNT 32 /* sg element count per scsi cmnd */
#define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */
#define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */
-#define LPFC_MAX_NVME_SEG_CNT 128 /* max SGL element cnt per NVME cmnd */
+#define LPFC_MAX_NVME_SEG_CNT 256 /* max SGL element cnt per NVME cmnd */
#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
@@ -705,7 +706,6 @@ struct lpfc_hba {
* capability
*/
#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */
-#define NVME_XRI_ABORT_EVENT 0x100000
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
@@ -945,6 +945,8 @@ struct lpfc_hba {
struct list_head lpfc_nvme_buf_list_get;
struct list_head lpfc_nvme_buf_list_put;
uint32_t total_nvme_bufs;
+ uint32_t get_nvme_bufs;
+ uint32_t put_nvme_bufs;
struct list_head lpfc_iocb_list;
uint32_t total_iocbq_bufs;
struct list_head active_rrq_list;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 82f6e219ee34..d188fb565a32 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -148,6 +148,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
struct lpfc_hba *phba = vport->phba;
struct lpfc_nvmet_tgtport *tgtp;
struct nvme_fc_local_port *localport;
+ struct lpfc_nvme_lport *lport;
struct lpfc_nodelist *ndlp;
struct nvme_fc_remote_port *nrport;
uint64_t data1, data2, data3, tot;
@@ -198,10 +199,15 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
}
len += snprintf(buf+len, PAGE_SIZE-len,
- "LS: Xmt %08x Drop %08x Cmpl %08x Err %08x\n",
+ "LS: Xmt %08x Drop %08x Cmpl %08x\n",
atomic_read(&tgtp->xmt_ls_rsp),
atomic_read(&tgtp->xmt_ls_drop),
- atomic_read(&tgtp->xmt_ls_rsp_cmpl),
+ atomic_read(&tgtp->xmt_ls_rsp_cmpl));
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "LS: RSP Abort %08x xb %08x Err %08x\n",
+ atomic_read(&tgtp->xmt_ls_rsp_aborted),
+ atomic_read(&tgtp->xmt_ls_rsp_xb_set),
atomic_read(&tgtp->xmt_ls_rsp_error));
len += snprintf(buf+len, PAGE_SIZE-len,
@@ -236,6 +242,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&tgtp->xmt_fcp_rsp_drop));
len += snprintf(buf+len, PAGE_SIZE-len,
+ "FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
+ atomic_read(&tgtp->xmt_fcp_rsp_aborted),
+ atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
+ atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
"ABORT: Xmt %08x Cmpl %08x\n",
atomic_read(&tgtp->xmt_fcp_abort),
atomic_read(&tgtp->xmt_fcp_abort_cmpl));
@@ -271,6 +283,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
wwn_to_u64(vport->fc_portname.u.wwn));
return len;
}
+ lport = (struct lpfc_nvme_lport *)localport->private;
len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n");
spin_lock_irq(shost->host_lock);
@@ -347,9 +360,16 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
len += snprintf(buf + len, PAGE_SIZE - len, "\nNVME Statistics\n");
len += snprintf(buf+len, PAGE_SIZE-len,
- "LS: Xmt %016x Cmpl %016x\n",
+ "LS: Xmt %010x Cmpl %010x Abort %08x\n",
atomic_read(&phba->fc4NvmeLsRequests),
- atomic_read(&phba->fc4NvmeLsCmpls));
+ atomic_read(&phba->fc4NvmeLsCmpls),
+ atomic_read(&lport->xmt_ls_abort));
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "LS XMIT: Err %08x CMPL: xb %08x Err %08x\n",
+ atomic_read(&lport->xmt_ls_err),
+ atomic_read(&lport->cmpl_ls_xb),
+ atomic_read(&lport->cmpl_ls_err));
tot = atomic_read(&phba->fc4NvmeIoCmpls);
data1 = atomic_read(&phba->fc4NvmeInputRequests);
@@ -360,8 +380,22 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
data1, data2, data3);
len += snprintf(buf+len, PAGE_SIZE-len,
- " Cmpl %016llx Outstanding %016llx\n",
- tot, (data1 + data2 + data3) - tot);
+ " noxri %08x nondlp %08x qdepth %08x "
+ "wqerr %08x\n",
+ atomic_read(&lport->xmt_fcp_noxri),
+ atomic_read(&lport->xmt_fcp_bad_ndlp),
+ atomic_read(&lport->xmt_fcp_qdepth),
+ atomic_read(&lport->xmt_fcp_wqerr));
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ " Cmpl %016llx Outstanding %016llx Abort %08x\n",
+ tot, ((data1 + data2 + data3) - tot),
+ atomic_read(&lport->xmt_fcp_abort));
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "FCP CMPL: xb %08x Err %08x\n",
+ atomic_read(&lport->cmpl_fcp_xb),
+ atomic_read(&lport->cmpl_fcp_err));
return len;
}
@@ -3366,12 +3400,13 @@ LPFC_ATTR_R(suppress_rsp, 1, 0, 1,
/*
* lpfc_nvmet_mrq: Specify number of RQ pairs for processing NVMET cmds
+ * lpfc_nvmet_mrq = 0 driver will calcualte optimal number of RQ pairs
* lpfc_nvmet_mrq = 1 use a single RQ pair
* lpfc_nvmet_mrq >= 2 use specified RQ pairs for MRQ
*
*/
LPFC_ATTR_R(nvmet_mrq,
- 1, 1, 16,
+ LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_MAX,
"Specify number of RQ pairs for processing NVMET cmds");
/*
@@ -5139,7 +5174,7 @@ LPFC_ATTR(delay_discovery, 0, 0, 1,
* this parameter will be limited to 128 if BlockGuard is enabled under SLI4
* and will be limited to 512 if BlockGuard is enabled under SLI3.
*/
-LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
+LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MIN_SG_SEG_CNT,
LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
/*
@@ -6362,6 +6397,9 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX;
}
+ if (!phba->cfg_nvmet_mrq)
+ phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+
/* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
if (phba->cfg_nvmet_mrq > phba->cfg_nvme_io_channel) {
phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
@@ -6369,10 +6407,13 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
"6018 Adjust lpfc_nvmet_mrq to %d\n",
phba->cfg_nvmet_mrq);
}
+ if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
+ phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
+
} else {
/* Not NVME Target mode. Turn off Target parameters. */
phba->nvmet_support = 0;
- phba->cfg_nvmet_mrq = 0;
+ phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
phba->cfg_nvmet_fb_size = 0;
}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 4e858b38529a..559f9aa0ed08 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -254,6 +254,8 @@ void lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba,
struct lpfc_nvmet_ctxbuf *ctxp);
int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
struct fc_frame_header *fc_hdr);
+void lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba);
+void lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba);
void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
uint16_t);
int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index f77673ab4a84..9d20d2c208c7 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -471,6 +471,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
"Parse GID_FTrsp: did:x%x flg:x%x x%x",
Did, ndlp->nlp_flag, vport->fc_flag);
+ ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
/* By default, the driver expects to support FCP FC4 */
if (fc4_type == FC_TYPE_FCP)
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
@@ -685,6 +686,25 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_els_flush_rscn(vport);
goto out;
}
+
+ spin_lock_irq(shost->host_lock);
+ if (vport->fc_flag & FC_RSCN_DEFERRED) {
+ vport->fc_flag &= ~FC_RSCN_DEFERRED;
+ spin_unlock_irq(shost->host_lock);
+
+ /*
+ * Skip processing the NS response
+ * Re-issue the NS cmd
+ */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "0151 Process Deferred RSCN Data: x%x x%x\n",
+ vport->fc_flag, vport->fc_rscn_id_cnt);
+ lpfc_els_handle_rscn(vport);
+
+ goto out;
+ }
+ spin_unlock_irq(shost->host_lock);
+
if (irsp->ulpStatus) {
/* Check for retry */
if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 2bf5ad3b1512..17ea3bb04266 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -750,6 +750,8 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
struct lpfc_hba *phba = vport->phba;
struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
+ struct nvme_fc_local_port *localport;
+ struct lpfc_nvme_lport *lport;
uint64_t tot, data1, data2, data3;
int len = 0;
int cnt;
@@ -775,10 +777,15 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
}
len += snprintf(buf + len, size - len,
- "LS: Xmt %08x Drop %08x Cmpl %08x Err %08x\n",
+ "LS: Xmt %08x Drop %08x Cmpl %08x\n",
atomic_read(&tgtp->xmt_ls_rsp),
atomic_read(&tgtp->xmt_ls_drop),
- atomic_read(&tgtp->xmt_ls_rsp_cmpl),
+ atomic_read(&tgtp->xmt_ls_rsp_cmpl));
+
+ len += snprintf(buf + len, size - len,
+ "LS: RSP Abort %08x xb %08x Err %08x\n",
+ atomic_read(&tgtp->xmt_ls_rsp_aborted),
+ atomic_read(&tgtp->xmt_ls_rsp_xb_set),
atomic_read(&tgtp->xmt_ls_rsp_error));
len += snprintf(buf + len, size - len,
@@ -812,6 +819,12 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
atomic_read(&tgtp->xmt_fcp_rsp_drop));
len += snprintf(buf + len, size - len,
+ "FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
+ atomic_read(&tgtp->xmt_fcp_rsp_aborted),
+ atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
+ atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
+
+ len += snprintf(buf + len, size - len,
"ABORT: Xmt %08x Cmpl %08x\n",
atomic_read(&tgtp->xmt_fcp_abort),
atomic_read(&tgtp->xmt_fcp_abort_cmpl));
@@ -885,8 +898,38 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
data1, data2, data3);
len += snprintf(buf + len, size - len,
- " Cmpl %016llx Outstanding %016llx\n",
+ " Cmpl %016llx Outstanding %016llx\n",
tot, (data1 + data2 + data3) - tot);
+
+ localport = vport->localport;
+ if (!localport)
+ return len;
+ lport = (struct lpfc_nvme_lport *)localport->private;
+ if (!lport)
+ return len;
+
+ len += snprintf(buf + len, size - len,
+ "LS Xmt Err: Abrt %08x Err %08x "
+ "Cmpl Err: xb %08x Err %08x\n",
+ atomic_read(&lport->xmt_ls_abort),
+ atomic_read(&lport->xmt_ls_err),
+ atomic_read(&lport->cmpl_ls_xb),
+ atomic_read(&lport->cmpl_ls_err));
+
+ len += snprintf(buf + len, size - len,
+ "FCP Xmt Err: noxri %06x nondlp %06x "
+ "qdepth %06x wqerr %06x Abrt %06x\n",
+ atomic_read(&lport->xmt_fcp_noxri),
+ atomic_read(&lport->xmt_fcp_bad_ndlp),
+ atomic_read(&lport->xmt_fcp_qdepth),
+ atomic_read(&lport->xmt_fcp_wqerr),
+ atomic_read(&lport->xmt_fcp_abort));
+
+ len += snprintf(buf + len, size - len,
+ "FCP Cmpl Err: xb %08x Err %08x\n",
+ atomic_read(&lport->cmpl_fcp_xb),
+ atomic_read(&lport->cmpl_fcp_err));
+
}
return len;
@@ -3213,7 +3256,7 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
return 1;
}
- if (eqidx < phba->cfg_nvmet_mrq) {
+ if ((eqidx < phba->cfg_nvmet_mrq) && phba->nvmet_support) {
/* NVMET CQset */
qp = phba->sli4_hba.nvmet_cqset[eqidx];
*len = __lpfc_idiag_print_cq(qp, "NVMET CQset", pbuffer, *len);
@@ -3246,7 +3289,7 @@ __lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype,
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"\n%s EQ info: EQ-STAT[max:x%x noE:x%x "
- "bs:x%x proc:x%llx eqd %d]\n",
+ "cqe_proc:x%x eqe_proc:x%llx eqd %d]\n",
eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3,
(unsigned long long)qp->q_cnt_4, qp->q_mode);
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
@@ -3366,6 +3409,12 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
if (len >= max_cnt)
goto too_big;
+ qp = phba->sli4_hba.hdr_rq;
+ len = __lpfc_idiag_print_rqpair(qp, phba->sli4_hba.dat_rq,
+ "ELS RQpair", pbuffer, len);
+ if (len >= max_cnt)
+ goto too_big;
+
/* Slow-path NVME LS response CQ */
qp = phba->sli4_hba.nvmels_cq;
len = __lpfc_idiag_print_cq(qp, "NVME LS",
@@ -3383,12 +3432,6 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
if (len >= max_cnt)
goto too_big;
- qp = phba->sli4_hba.hdr_rq;
- len = __lpfc_idiag_print_rqpair(qp, phba->sli4_hba.dat_rq,
- "RQpair", pbuffer, len);
- if (len >= max_cnt)
- goto too_big;
-
goto out;
}
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index f9a566eaef04..5a7547f9d8d8 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -134,6 +134,8 @@ struct lpfc_nodelist {
struct lpfc_scsicmd_bkt *lat_data; /* Latency data */
uint32_t fc4_prli_sent;
uint32_t upcall_flags;
+#define NLP_WAIT_FOR_UNREG 0x1
+
uint32_t nvme_fb_size; /* NVME target's supported byte cnt */
#define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */
};
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 39d5b146202e..234c7c015982 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -858,6 +858,9 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
vport->fc_flag |= FC_PT2PT;
spin_unlock_irq(shost->host_lock);
+ /* If we are pt2pt with another NPort, force NPIV off! */
+ phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
+
/* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
lpfc_unregister_fcf_prep(phba);
@@ -916,28 +919,29 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
- } else
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ goto fail;
+
+ lpfc_config_link(phba, mbox);
+
+ mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
+ mbox->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ goto fail;
+ }
+ } else {
/* This side will wait for the PLOGI, decrement ndlp reference
* count indicating that ndlp can be released when other
* references to it are done.
*/
lpfc_nlp_put(ndlp);
- /* If we are pt2pt with another NPort, force NPIV off! */
- phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
-
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox)
- goto fail;
-
- lpfc_config_link(phba, mbox);
-
- mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
- mbox->vport = vport;
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(mbox, phba->mbox_mem_pool);
- goto fail;
+ /* Start discovery - this should just do CLEAR_LA */
+ lpfc_disc_start(vport);
}
return 0;
@@ -1030,30 +1034,31 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
stop_rr_fcf_flogi:
/* FLOGI failure */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "2858 FLOGI failure Status:x%x/x%x TMO:x%x "
- "Data x%x x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout, phba->hba_flag,
- phba->fcf.fcf_flag);
+ if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
+ ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
+ IOERR_LOOP_OPEN_FAILURE)))
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "2858 FLOGI failure Status:x%x/x%x "
+ "TMO:x%x Data x%x x%x\n",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->ulpTimeout, phba->hba_flag,
+ phba->fcf.fcf_flag);
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb))
goto out;
- /* FLOGI failure */
- lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
- irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout);
-
-
/* If this is not a loop open failure, bail out */
if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
IOERR_LOOP_OPEN_FAILURE)))
goto flogifail;
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ "0150 FLOGI failure Status:x%x/x%x TMO:x%x\n",
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ irsp->ulpTimeout);
+
/* FLOGI failed, so there is no fabric */
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
@@ -1670,6 +1675,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
/* Two ndlps cannot have the same did on the nodelist */
ndlp->nlp_DID = keepDID;
+ lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
if (phba->sli_rev == LPFC_SLI_REV4 &&
active_rrqs_xri_bitmap)
memcpy(ndlp->active_rrqs_xri_bitmap,
@@ -2088,6 +2094,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_PRLI_SND;
+
+ /* Driver supports multiple FC4 types. Counters matter. */
+ vport->fc_prli_sent--;
+ ndlp->fc4_prli_sent--;
spin_unlock_irq(shost->host_lock);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
@@ -2095,9 +2105,6 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpStatus, irsp->un.ulpWord[4],
ndlp->nlp_DID);
- /* Ddriver supports multiple FC4 types. Counters matter. */
- vport->fc_prli_sent--;
-
/* PRLI completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0103 PRLI completes to NPort x%06x "
@@ -2111,7 +2118,6 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (irsp->ulpStatus) {
/* Check for retry */
- ndlp->fc4_prli_sent--;
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
/* ELS command is being retried */
goto out;
@@ -2190,6 +2196,15 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
local_nlp_type = ndlp->nlp_fc4_type;
+ /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp
+ * fields here before any of them can complete.
+ */
+ ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
+ ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
+ ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ ndlp->nlp_flag &= ~NLP_FIRSTBURST;
+ ndlp->nvme_fb_size = 0;
+
send_next_prli:
if (local_nlp_type & NLP_FC4_FCP) {
/* Payload is 4 + 16 = 20 x14 bytes. */
@@ -2298,6 +2313,13 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_PRLI_SND;
+
+ /* The vport counters are used for lpfc_scan_finished, but
+ * the ndlp is used to track outstanding PRLIs for different
+ * FC4 types.
+ */
+ vport->fc_prli_sent++;
+ ndlp->fc4_prli_sent++;
spin_unlock_irq(shost->host_lock);
if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
IOCB_ERROR) {
@@ -2308,12 +2330,6 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return 1;
}
- /* The vport counters are used for lpfc_scan_finished, but
- * the ndlp is used to track outstanding PRLIs for different
- * FC4 types.
- */
- vport->fc_prli_sent++;
- ndlp->fc4_prli_sent++;
/* The driver supports 2 FC4 types. Make sure
* a PRLI is issued for all types before exiting.
@@ -2951,8 +2967,8 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
/* This will cause the callback-function lpfc_cmpl_els_cmd to
* trigger the release of node.
*/
-
- lpfc_nlp_put(ndlp);
+ if (!(vport->fc_flag & FC_PT2PT))
+ lpfc_nlp_put(ndlp);
return 0;
}
@@ -6172,9 +6188,6 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
/* send RECOVERY event for ALL nodes that match RSCN payload */
lpfc_rscn_recovery_check(vport);
- spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~FC_RSCN_DEFERRED;
- spin_unlock_irq(shost->host_lock);
return 0;
}
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -6849,7 +6862,7 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
return 1;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
- *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+ *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
pcmd += sizeof(uint32_t); /* Skip past command */
/* use the command's xri in the response */
@@ -8060,13 +8073,6 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
rjt_exp = LSEXP_NOTHING_MORE;
break;
}
-
- /* NVMET accepts NVME PRLI only. Reject FCP PRLI */
- if (cmd == ELS_CMD_PRLI && phba->nvmet_support) {
- rjt_err = LSRJT_CMD_UNSUPPORTED;
- rjt_exp = LSEXP_REQ_UNSUPPORTED;
- break;
- }
lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
break;
case ELS_CMD_LIRR:
@@ -8149,9 +8155,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_nlp_put(ndlp);
break;
case ELS_CMD_REC:
- /* receive this due to exchange closed */
- rjt_err = LSRJT_UNABLE_TPC;
- rjt_exp = LSEXP_INVALID_OX_RX;
+ /* receive this due to exchange closed */
+ rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_INVALID_OX_RX;
break;
default:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 2bafde2b7cfe..b159a5c4e388 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -640,8 +640,6 @@ lpfc_work_done(struct lpfc_hba *phba)
lpfc_handle_rrq_active(phba);
if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
lpfc_sli4_fcp_xri_abort_event_proc(phba);
- if (phba->hba_flag & NVME_XRI_ABORT_EVENT)
- lpfc_sli4_nvme_xri_abort_event_proc(phba);
if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
lpfc_sli4_els_xri_abort_event_proc(phba);
if (phba->hba_flag & ASYNC_EVENT)
@@ -4178,12 +4176,14 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
vport->phba->nport_event_cnt++;
- if (vport->phba->nvmet_support == 0)
- /* Start devloss */
- lpfc_nvme_unregister_port(vport, ndlp);
- else
+ if (vport->phba->nvmet_support == 0) {
+ /* Start devloss if target. */
+ if (ndlp->nlp_type & NLP_NVME_TARGET)
+ lpfc_nvme_unregister_port(vport, ndlp);
+ } else {
/* NVMET has no upcall. */
lpfc_nlp_put(ndlp);
+ }
}
}
@@ -4207,11 +4207,13 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_fc4_type & NLP_FC4_NVME) {
if (vport->phba->nvmet_support == 0) {
/* Register this rport with the transport.
- * Initiators take the NDLP ref count in
- * the register.
+ * Only NVME Target Rports are registered with
+ * the transport.
*/
- vport->phba->nport_event_cnt++;
- lpfc_nvme_register_port(vport, ndlp);
+ if (ndlp->nlp_type & NLP_NVME_TARGET) {
+ vport->phba->nport_event_cnt++;
+ lpfc_nvme_register_port(vport, ndlp);
+ }
} else {
/* Just take an NDLP ref count since the
* target does not register rports.
@@ -5838,9 +5840,12 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
if (filter(ndlp, param)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"3185 FIND node filter %p DID "
- "Data: x%p x%x x%x\n",
+ "ndlp %p did x%x flg x%x st x%x "
+ "xri x%x type x%x rpi x%x\n",
filter, ndlp, ndlp->nlp_DID,
- ndlp->nlp_flag);
+ ndlp->nlp_flag, ndlp->nlp_state,
+ ndlp->nlp_xri, ndlp->nlp_type,
+ ndlp->nlp_rpi);
return ndlp;
}
}
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 2b145966c73f..73c2f6971d2b 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1122,6 +1122,7 @@ struct cq_context {
#define LPFC_CQ_CNT_256 0x0
#define LPFC_CQ_CNT_512 0x1
#define LPFC_CQ_CNT_1024 0x2
+#define LPFC_CQ_CNT_WORD7 0x3
uint32_t word1;
#define lpfc_cq_eq_id_SHIFT 22 /* Version 0 Only */
#define lpfc_cq_eq_id_MASK 0x000000FF
@@ -1129,7 +1130,7 @@ struct cq_context {
#define lpfc_cq_eq_id_2_SHIFT 0 /* Version 2 Only */
#define lpfc_cq_eq_id_2_MASK 0x0000FFFF
#define lpfc_cq_eq_id_2_WORD word1
- uint32_t reserved0;
+ uint32_t lpfc_cq_context_count; /* Version 2 Only */
uint32_t reserved1;
};
@@ -1193,6 +1194,9 @@ struct lpfc_mbx_cq_create_set {
#define lpfc_mbx_cq_create_set_arm_SHIFT 31
#define lpfc_mbx_cq_create_set_arm_MASK 0x00000001
#define lpfc_mbx_cq_create_set_arm_WORD word2
+#define lpfc_mbx_cq_create_set_cq_cnt_SHIFT 16
+#define lpfc_mbx_cq_create_set_cq_cnt_MASK 0x00007FFF
+#define lpfc_mbx_cq_create_set_cq_cnt_WORD word2
#define lpfc_mbx_cq_create_set_num_cq_SHIFT 0
#define lpfc_mbx_cq_create_set_num_cq_MASK 0x0000FFFF
#define lpfc_mbx_cq_create_set_num_cq_WORD word2
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 2b7ea7e53e12..f539c554588c 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1034,6 +1034,7 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
LIST_HEAD(nvmet_aborts);
unsigned long iflag = 0;
struct lpfc_sglq *sglq_entry = NULL;
+ int cnt;
lpfc_sli_hbqbuf_free_all(phba);
@@ -1090,11 +1091,14 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ cnt = 0;
list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) {
psb->pCmd = NULL;
psb->status = IOSTAT_SUCCESS;
+ cnt++;
}
spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
+ phba->put_nvme_bufs += cnt;
list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put);
spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
@@ -3339,6 +3343,7 @@ lpfc_nvme_free(struct lpfc_hba *phba)
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
&phba->lpfc_nvme_buf_list_put, list) {
list_del(&lpfc_ncmd->list);
+ phba->put_nvme_bufs--;
dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
lpfc_ncmd->dma_handle);
kfree(lpfc_ncmd);
@@ -3350,6 +3355,7 @@ lpfc_nvme_free(struct lpfc_hba *phba)
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
&phba->lpfc_nvme_buf_list_get, list) {
list_del(&lpfc_ncmd->list);
+ phba->get_nvme_bufs--;
dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
lpfc_ncmd->dma_handle);
kfree(lpfc_ncmd);
@@ -3754,9 +3760,11 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
uint16_t i, lxri, els_xri_cnt;
uint16_t nvme_xri_cnt, nvme_xri_max;
LIST_HEAD(nvme_sgl_list);
- int rc;
+ int rc, cnt;
phba->total_nvme_bufs = 0;
+ phba->get_nvme_bufs = 0;
+ phba->put_nvme_bufs = 0;
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return 0;
@@ -3780,6 +3788,9 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
spin_lock(&phba->nvme_buf_list_put_lock);
list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list);
list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list);
+ cnt = phba->get_nvme_bufs + phba->put_nvme_bufs;
+ phba->get_nvme_bufs = 0;
+ phba->put_nvme_bufs = 0;
spin_unlock(&phba->nvme_buf_list_put_lock);
spin_unlock_irq(&phba->nvme_buf_list_get_lock);
@@ -3824,6 +3835,7 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
spin_lock_irq(&phba->nvme_buf_list_get_lock);
spin_lock(&phba->nvme_buf_list_put_lock);
list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get);
+ phba->get_nvme_bufs = cnt;
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
spin_unlock(&phba->nvme_buf_list_put_lock);
spin_unlock_irq(&phba->nvme_buf_list_get_lock);
@@ -5609,8 +5621,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
/* Initialize the NVME buffer list used by driver for NVME IO */
spin_lock_init(&phba->nvme_buf_list_get_lock);
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get);
+ phba->get_nvme_bufs = 0;
spin_lock_init(&phba->nvme_buf_list_put_lock);
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
+ phba->put_nvme_bufs = 0;
}
/* Initialize the fabric iocb list */
@@ -5806,6 +5820,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
struct lpfc_mqe *mqe;
int longs;
int fof_vectors = 0;
+ int extra;
uint64_t wwn;
phba->sli4_hba.num_online_cpu = num_online_cpus();
@@ -5860,13 +5875,21 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
*/
/*
+ * 1 for cmd, 1 for rsp, NVME adds an extra one
+ * for boundary conditions in its max_sgl_segment template.
+ */
+ extra = 2;
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ extra++;
+
+ /*
* It doesn't matter what family our adapter is in, we are
* limited to 2 Pages, 512 SGEs, for our SGL.
* There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
*/
max_buf_size = (2 * SLI4_PAGE_SIZE);
- if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
- phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
+ if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - extra)
+ phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - extra;
/*
* Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
@@ -5899,14 +5922,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
*/
phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
sizeof(struct fcp_rsp) +
- ((phba->cfg_sg_seg_cnt + 2) *
+ ((phba->cfg_sg_seg_cnt + extra) *
sizeof(struct sli4_sge));
/* Total SGEs for scsi_sg_list */
- phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
+ phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
/*
- * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only
+ * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
* need to post 1 page for the SGL.
*/
}
@@ -5947,9 +5970,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
-
- /* Fast-path XRI aborted CQ Event work queue list */
- INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
}
/* This abort list used by worker thread */
@@ -7936,8 +7956,12 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
phba->cfg_fcp_io_channel = io_channel;
if (phba->cfg_nvme_io_channel > io_channel)
phba->cfg_nvme_io_channel = io_channel;
- if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
- phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+ if (phba->nvmet_support) {
+ if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
+ phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+ }
+ if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
+ phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n",
@@ -7958,10 +7982,10 @@ static int
lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
{
struct lpfc_queue *qdesc;
- int cnt;
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ LPFC_CQE_EXP_COUNT);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0508 Failed allocate fast-path NVME CQ (%d)\n",
@@ -7970,8 +7994,8 @@ lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
}
phba->sli4_hba.nvme_cq[wqidx] = qdesc;
- cnt = LPFC_NVME_WQSIZE;
- qdesc = lpfc_sli4_queue_alloc(phba, LPFC_WQE128_SIZE, cnt);
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+ LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0509 Failed allocate fast-path NVME WQ (%d)\n",
@@ -7987,11 +8011,18 @@ static int
lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
{
struct lpfc_queue *qdesc;
- uint32_t wqesize;
/* Create Fast Path FCP CQs */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
+ if (phba->fcp_embed_io)
+ /* Increase the CQ size when WQEs contain an embedded cdb */
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ LPFC_CQE_EXP_COUNT);
+
+ else
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
@@ -8000,9 +8031,15 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
phba->sli4_hba.fcp_cq[wqidx] = qdesc;
/* Create Fast Path FCP WQs */
- wqesize = (phba->fcp_embed_io) ?
- LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
- qdesc = lpfc_sli4_queue_alloc(phba, wqesize, phba->sli4_hba.wq_ecount);
+ if (phba->fcp_embed_io)
+ /* Increase the WQ size when WQEs contain an embedded cdb */
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
+ LPFC_WQE128_SIZE,
+ LPFC_WQE_EXP_COUNT);
+ else
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.wq_esize,
+ phba->sli4_hba.wq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0503 Failed allocate fast-path FCP WQ (%d)\n",
@@ -8173,7 +8210,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
/* Create HBA Event Queues (EQs) */
for (idx = 0; idx < io_channel; idx++) {
/* Create EQs */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.eq_esize,
phba->sli4_hba.eq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8196,8 +8234,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
if (phba->nvmet_support) {
for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
qdesc = lpfc_sli4_queue_alloc(phba,
- phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
+ LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3142 Failed allocate NVME "
@@ -8213,7 +8252,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
*/
/* Create slow-path Mailbox Command Complete Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8223,7 +8263,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.mbx_cq = qdesc;
/* Create slow-path ELS Complete Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8239,7 +8280,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
/* Create Mailbox Command Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.mq_esize,
phba->sli4_hba.mq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8253,7 +8295,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
*/
/* Create slow-path ELS Work Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.wq_esize,
phba->sli4_hba.wq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8265,7 +8308,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
/* Create NVME LS Complete Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8275,7 +8319,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.nvmels_cq = qdesc;
/* Create NVME LS Work Queue */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.wq_esize,
phba->sli4_hba.wq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8291,7 +8336,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
*/
/* Create Receive Queue for header */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.rq_esize,
phba->sli4_hba.rq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8301,7 +8347,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.hdr_rq = qdesc;
/* Create Receive Queue for data */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.rq_esize,
phba->sli4_hba.rq_ecount);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -8314,6 +8361,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
/* Create NVMET Receive Queue for header */
qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.rq_esize,
LPFC_NVMET_RQE_DEF_COUNT);
if (!qdesc) {
@@ -8339,6 +8387,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
/* Create NVMET Receive Queue for data */
qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.rq_esize,
LPFC_NVMET_RQE_DEF_COUNT);
if (!qdesc) {
@@ -8437,13 +8486,15 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
/* Release NVME CQ mapping array */
lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map);
- lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
- phba->cfg_nvmet_mrq);
+ if (phba->nvmet_support) {
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
+ phba->cfg_nvmet_mrq);
- lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
- phba->cfg_nvmet_mrq);
- lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
- phba->cfg_nvmet_mrq);
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
+ phba->cfg_nvmet_mrq);
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
+ phba->cfg_nvmet_mrq);
+ }
/* Release mailbox command work queue */
__lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
@@ -8514,6 +8565,7 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
qidx, (uint32_t)rc);
return rc;
}
+ cq->chann = qidx;
if (qtype != LPFC_MBOX) {
/* Setup nvme_cq_map for fast lookup */
@@ -8533,6 +8585,7 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
/* no need to tear down cq - caller will do so */
return rc;
}
+ wq->chann = qidx;
/* Bind this CQ/WQ to the NVME ring */
pring = wq->pring;
@@ -8773,6 +8826,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
"rc = 0x%x\n", (uint32_t)rc);
goto out_destroy;
}
+ phba->sli4_hba.nvmet_cqset[0]->chann = 0;
+
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"6090 NVMET CQ setup: cq-id=%d, "
"parent eq-id=%d\n",
@@ -8994,19 +9049,22 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]);
- /* Unset NVMET MRQ queue */
- if (phba->sli4_hba.nvmet_mrq_hdr) {
- for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
- lpfc_rq_destroy(phba,
+ if (phba->nvmet_support) {
+ /* Unset NVMET MRQ queue */
+ if (phba->sli4_hba.nvmet_mrq_hdr) {
+ for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
+ lpfc_rq_destroy(
+ phba,
phba->sli4_hba.nvmet_mrq_hdr[qidx],
phba->sli4_hba.nvmet_mrq_data[qidx]);
- }
+ }
- /* Unset NVMET CQ Set complete queue */
- if (phba->sli4_hba.nvmet_cqset) {
- for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
- lpfc_cq_destroy(phba,
- phba->sli4_hba.nvmet_cqset[qidx]);
+ /* Unset NVMET CQ Set complete queue */
+ if (phba->sli4_hba.nvmet_cqset) {
+ for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
+ lpfc_cq_destroy(
+ phba, phba->sli4_hba.nvmet_cqset[qidx]);
+ }
}
/* Unset FCP response complete queue */
@@ -9175,11 +9233,6 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
/* Pending ELS XRI abort events */
list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
&cqelist);
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- /* Pending NVME XRI abort events */
- list_splice_init(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
- &cqelist);
- }
/* Pending asynnc events */
list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
&cqelist);
@@ -9421,44 +9474,62 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
lpfc_sli4_bar0_register_memmap(phba, if_type);
}
- if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
- (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
- /*
- * Map SLI4 if type 0 HBA Control Register base to a kernel
- * virtual address and setup the registers.
- */
- phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
- bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
- phba->sli4_hba.ctrl_regs_memmap_p =
- ioremap(phba->pci_bar1_map, bar1map_len);
- if (!phba->sli4_hba.ctrl_regs_memmap_p) {
- dev_printk(KERN_ERR, &pdev->dev,
- "ioremap failed for SLI4 HBA control registers.\n");
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+ if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
+ /*
+ * Map SLI4 if type 0 HBA Control Register base to a
+ * kernel virtual address and setup the registers.
+ */
+ phba->pci_bar1_map = pci_resource_start(pdev,
+ PCI_64BIT_BAR2);
+ bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
+ phba->sli4_hba.ctrl_regs_memmap_p =
+ ioremap(phba->pci_bar1_map,
+ bar1map_len);
+ if (!phba->sli4_hba.ctrl_regs_memmap_p) {
+ dev_err(&pdev->dev,
+ "ioremap failed for SLI4 HBA "
+ "control registers.\n");
+ error = -ENOMEM;
+ goto out_iounmap_conf;
+ }
+ phba->pci_bar2_memmap_p =
+ phba->sli4_hba.ctrl_regs_memmap_p;
+ lpfc_sli4_bar1_register_memmap(phba);
+ } else {
+ error = -ENOMEM;
goto out_iounmap_conf;
}
- phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p;
- lpfc_sli4_bar1_register_memmap(phba);
}
- if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
- (pci_resource_start(pdev, PCI_64BIT_BAR4))) {
- /*
- * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
- * virtual address and setup the registers.
- */
- phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
- bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
- phba->sli4_hba.drbl_regs_memmap_p =
- ioremap(phba->pci_bar2_map, bar2map_len);
- if (!phba->sli4_hba.drbl_regs_memmap_p) {
- dev_printk(KERN_ERR, &pdev->dev,
- "ioremap failed for SLI4 HBA doorbell registers.\n");
- goto out_iounmap_ctrl;
- }
- phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
- error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
- if (error)
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
+ if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
+ /*
+ * Map SLI4 if type 0 HBA Doorbell Register base to
+ * a kernel virtual address and setup the registers.
+ */
+ phba->pci_bar2_map = pci_resource_start(pdev,
+ PCI_64BIT_BAR4);
+ bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
+ phba->sli4_hba.drbl_regs_memmap_p =
+ ioremap(phba->pci_bar2_map,
+ bar2map_len);
+ if (!phba->sli4_hba.drbl_regs_memmap_p) {
+ dev_err(&pdev->dev,
+ "ioremap failed for SLI4 HBA"
+ " doorbell registers.\n");
+ error = -ENOMEM;
+ goto out_iounmap_ctrl;
+ }
+ phba->pci_bar4_memmap_p =
+ phba->sli4_hba.drbl_regs_memmap_p;
+ error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
+ if (error)
+ goto out_iounmap_all;
+ } else {
+ error = -ENOMEM;
goto out_iounmap_all;
+ }
}
return 0;
@@ -10093,6 +10164,16 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
int fcp_xri_cmpl = 1;
int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
+ /* Driver just aborted IOs during the hba_unset process. Pause
+ * here to give the HBA time to complete the IO and get entries
+ * into the abts lists.
+ */
+ msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
+
+ /* Wait for NVME pending IO to flush back to transport. */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ lpfc_nvme_wait_for_io_drain(phba);
+
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
fcp_xri_cmpl =
list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
@@ -10369,7 +10450,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
!phba->nvme_support) {
phba->nvme_support = 0;
phba->nvmet_support = 0;
- phba->cfg_nvmet_mrq = 0;
+ phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF;
phba->cfg_nvme_io_channel = 0;
phba->io_channel_irqs = phba->cfg_fcp_io_channel;
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
@@ -11616,6 +11697,10 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
/* Flush all driver's outstanding SCSI I/Os as we are to reset */
lpfc_sli_flush_fcp_rings(phba);
+ /* Flush the outstanding NVME IOs if fc4 type enabled. */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ lpfc_sli_flush_nvme_rings(phba);
+
/* stop all timers */
lpfc_stop_hba_timers(phba);
@@ -11647,6 +11732,10 @@ lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
/* Clean up all driver's outstanding SCSI I/Os */
lpfc_sli_flush_fcp_rings(phba);
+
+ /* Flush the outstanding NVME IOs if fc4 type enabled. */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ lpfc_sli_flush_nvme_rings(phba);
}
/**
@@ -12138,10 +12227,10 @@ int
lpfc_fof_queue_create(struct lpfc_hba *phba)
{
struct lpfc_queue *qdesc;
- uint32_t wqesize;
/* Create FOF EQ */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.eq_esize,
phba->sli4_hba.eq_ecount);
if (!qdesc)
goto out_error;
@@ -12151,7 +12240,15 @@ lpfc_fof_queue_create(struct lpfc_hba *phba)
if (phba->cfg_fof) {
/* Create OAS CQ */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ if (phba->fcp_embed_io)
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_EXPANDED_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
+ LPFC_CQE_EXP_COUNT);
+ else
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.cq_esize,
phba->sli4_hba.cq_ecount);
if (!qdesc)
goto out_error;
@@ -12159,11 +12256,16 @@ lpfc_fof_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.oas_cq = qdesc;
/* Create OAS WQ */
- wqesize = (phba->fcp_embed_io) ?
- LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
- qdesc = lpfc_sli4_queue_alloc(phba, wqesize,
- phba->sli4_hba.wq_ecount);
-
+ if (phba->fcp_embed_io)
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_EXPANDED_PAGE_SIZE,
+ LPFC_WQE128_SIZE,
+ LPFC_WQE_EXP_COUNT);
+ else
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_DEFAULT_PAGE_SIZE,
+ phba->sli4_hba.wq_esize,
+ phba->sli4_hba.wq_ecount);
if (!qdesc)
goto out_error;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index b6957d944b9a..d841aa42f607 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -390,6 +390,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
break;
}
+ ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
+ ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
+ ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ ndlp->nlp_flag &= ~NLP_FIRSTBURST;
+
/* Check for Nport to NPort pt2pt protocol */
if ((vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -727,6 +732,41 @@ out:
return 0;
}
+static uint32_t
+lpfc_rcv_prli_support_check(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ struct lpfc_iocbq *cmdiocb)
+{
+ struct ls_rjt stat;
+ uint32_t *payload;
+ uint32_t cmd;
+
+ payload = ((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
+ cmd = *payload;
+ if (vport->phba->nvmet_support) {
+ /* Must be a NVME PRLI */
+ if (cmd == ELS_CMD_PRLI)
+ goto out;
+ } else {
+ /* Initiator mode. */
+ if (!vport->nvmei_support && (cmd == ELS_CMD_NVMEPRLI))
+ goto out;
+ }
+ return 1;
+out:
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME_DISC,
+ "6115 Rcv PRLI (%x) check failed: ndlp rpi %d "
+ "state x%x flags x%x\n",
+ cmd, ndlp->nlp_rpi, ndlp->nlp_state,
+ ndlp->nlp_flag);
+ memset(&stat, 0, sizeof(struct ls_rjt));
+ stat.un.b.lsRjtRsnCode = LSRJT_CMD_UNSUPPORTED;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_REQ_UNSUPPORTED;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
+ ndlp, NULL);
+ return 0;
+}
+
static void
lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb)
@@ -742,9 +782,6 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lp = (uint32_t *) pcmd->virt;
npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
- ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
- ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
- ndlp->nlp_flag &= ~NLP_FIRSTBURST;
if ((npr->prliType == PRLI_FCP_TYPE) ||
(npr->prliType == PRLI_NVME_TYPE)) {
if (npr->initiatorFunc) {
@@ -769,8 +806,12 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* type. Target mode does not issue gft_id so doesn't get
* the fc4 type set until now.
*/
- if ((phba->nvmet_support) && (npr->prliType == PRLI_NVME_TYPE))
+ if (phba->nvmet_support && (npr->prliType == PRLI_NVME_TYPE)) {
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+ }
+ if (npr->prliType == PRLI_FCP_TYPE)
+ ndlp->nlp_fc4_type |= NLP_FC4_FCP;
}
if (rport) {
/* We need to update the rport role values */
@@ -1373,7 +1414,8 @@ lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
- lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
+ if (lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
+ lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
}
@@ -1544,6 +1586,9 @@ lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
struct ls_rjt stat;
+ if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb)) {
+ return ndlp->nlp_state;
+ }
if (vport->phba->nvmet_support) {
/* NVME Target mode. Handle and respond to the PRLI and
* transition to UNMAPPED provided the RPI has completed
@@ -1552,28 +1597,22 @@ lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
lpfc_rcv_prli(vport, ndlp, cmdiocb);
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
} else {
/* RPI registration has not completed. Reject the PRLI
* to prevent an illegal state transition when the
* rpi registration does complete.
*/
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME_DISC,
- "6115 NVMET ndlp rpi %d state "
- "unknown, state x%x flags x%08x\n",
- ndlp->nlp_rpi, ndlp->nlp_state,
- ndlp->nlp_flag);
memset(&stat, 0, sizeof(struct ls_rjt));
- stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
- stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
+ stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
ndlp, NULL);
+ return ndlp->nlp_state;
}
} else {
/* Initiator mode. */
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
}
-
return ndlp->nlp_state;
}
@@ -1819,6 +1858,8 @@ lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+ if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
+ return ndlp->nlp_state;
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
}
@@ -1922,13 +1963,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return ndlp->nlp_state;
}
- /* Check out PRLI rsp */
- ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
- ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
-
- /* NVME or FCP first burst must be negotiated for each PRLI. */
- ndlp->nlp_flag &= ~NLP_FIRSTBURST;
- ndlp->nvme_fb_size = 0;
if (npr && (npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
(npr->prliType == PRLI_FCP_TYPE)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
@@ -1945,8 +1979,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (npr->Retry)
ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
- /* PRLI completed. Decrement count. */
- ndlp->fc4_prli_sent--;
} else if (nvpr &&
(bf_get_be32(prli_acc_rsp_code, nvpr) ==
PRLI_REQ_EXECUTED) &&
@@ -1991,8 +2023,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
be32_to_cpu(nvpr->word5),
ndlp->nlp_flag, ndlp->nlp_fcp_info,
ndlp->nlp_type);
- /* PRLI completed. Decrement count. */
- ndlp->fc4_prli_sent--;
}
if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
(vport->port_type == LPFC_NPIV_PORT) &&
@@ -2016,7 +2046,8 @@ out_err:
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET))
lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
- else
+ else if (ndlp->nlp_type &
+ (NLP_FCP_INITIATOR | NLP_NVME_INITIATOR))
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
} else
lpfc_printf_vlog(vport,
@@ -2241,6 +2272,9 @@ lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+ if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
+ return ndlp->nlp_state;
+
lpfc_rcv_prli(vport, ndlp, cmdiocb);
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
@@ -2310,6 +2344,8 @@ lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+ if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
+ return ndlp->nlp_state;
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 517ae570e507..81e3a4f10c3c 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -57,11 +57,13 @@
/* NVME initiator-based functions */
static struct lpfc_nvme_buf *
-lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp);
+lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+ int expedite);
static void
lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *);
+static struct nvme_fc_port_template lpfc_nvme_template;
/**
* lpfc_nvme_create_queue -
@@ -88,6 +90,9 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_qhandle *qhandle;
char *str;
+ if (!pnvme_lport->private)
+ return -ENOMEM;
+
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
vport = lport->vport;
qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
@@ -140,6 +145,9 @@ lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_lport *lport;
struct lpfc_vport *vport;
+ if (!pnvme_lport->private)
+ return;
+
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
vport = lport->vport;
@@ -154,6 +162,10 @@ lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
{
struct lpfc_nvme_lport *lport = localport->private;
+ lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
+ "6173 localport %p delete complete\n",
+ lport);
+
/* release any threads waiting for the unreg to complete */
complete(&lport->lport_unreg_done);
}
@@ -189,16 +201,19 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
* calling state machine to remove the node.
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6146 remoteport delete complete %p\n",
+ "6146 remoteport delete of remoteport %p\n",
remoteport);
+ spin_lock_irq(&vport->phba->hbalock);
ndlp->nrport = NULL;
+ spin_unlock_irq(&vport->phba->hbalock);
+
+ /* Remove original register reference. The host transport
+ * won't reference this rport/remoteport any further.
+ */
lpfc_nlp_put(ndlp);
rport_err:
- /* This call has to execute as long as the rport is valid.
- * Release any threads waiting for the unreg to complete.
- */
- complete(&rport->rport_unreg_done);
+ return;
}
static void
@@ -206,6 +221,7 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
struct lpfc_wcqe_complete *wcqe)
{
struct lpfc_vport *vport = cmdwqe->vport;
+ struct lpfc_nvme_lport *lport;
uint32_t status;
struct nvmefc_ls_req *pnvme_lsreq;
struct lpfc_dmabuf *buf_ptr;
@@ -215,6 +231,13 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
+ if (status) {
+ lport = (struct lpfc_nvme_lport *)vport->localport->private;
+ if (bf_get(lpfc_wcqe_c_xb, wcqe))
+ atomic_inc(&lport->cmpl_ls_xb);
+ atomic_inc(&lport->cmpl_ls_err);
+ }
+
ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6047 nvme cmpl Enter "
@@ -419,6 +442,9 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
if (vport->load_flag & FC_UNLOADING)
return -ENODEV;
+ if (vport->load_flag & FC_UNLOADING)
+ return -ENODEV;
+
ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
@@ -490,6 +516,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
pnvme_lsreq, lpfc_nvme_cmpl_gen_req,
ndlp, 2, 30, 0);
if (ret != WQE_SUCCESS) {
+ atomic_inc(&lport->xmt_ls_err);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6052 EXIT. issue ls wqe failed lport %p, "
"rport %p lsreq%p Status %x DID %x\n",
@@ -534,6 +561,9 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
vport = lport->vport;
phba = vport->phba;
+ if (vport->load_flag & FC_UNLOADING)
+ return;
+
ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
if (!ndlp) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
@@ -571,6 +601,7 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
/* Abort the targeted IOs and remove them from the abort list. */
list_for_each_entry_safe(wqe, next_wqe, &abort_list, dlist) {
+ atomic_inc(&lport->xmt_ls_abort);
spin_lock_irq(&phba->hbalock);
list_del_init(&wqe->dlist);
lpfc_sli_issue_abort_iotag(phba, pring, wqe);
@@ -774,8 +805,9 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
struct lpfc_nvme_rport *rport;
struct lpfc_nodelist *ndlp;
struct lpfc_nvme_fcpreq_priv *freqpriv;
+ struct lpfc_nvme_lport *lport;
unsigned long flags;
- uint32_t code;
+ uint32_t code, status;
uint16_t cid, sqhd, data;
uint32_t *ptr;
@@ -790,10 +822,17 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
nCmd = lpfc_ncmd->nvmeCmd;
rport = lpfc_ncmd->nrport;
+ status = bf_get(lpfc_wcqe_c_status, wcqe);
+ if (status) {
+ lport = (struct lpfc_nvme_lport *)vport->localport->private;
+ if (bf_get(lpfc_wcqe_c_xb, wcqe))
+ atomic_inc(&lport->cmpl_fcp_xb);
+ atomic_inc(&lport->cmpl_fcp_err);
+ }
lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
lpfc_ncmd->cur_iocbq.sli4_xritag,
- bf_get(lpfc_wcqe_c_status, wcqe), wcqe->parameter);
+ status, wcqe->parameter);
/*
* Catch race where our node has transitioned, but the
* transport is still transitioning.
@@ -851,8 +890,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
nCmd->transferred_length = nCmd->payload_length;
} else {
- lpfc_ncmd->status = (bf_get(lpfc_wcqe_c_status, wcqe) &
- LPFC_IOCB_STATUS_MASK);
+ lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK);
lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
/* For NVME, the only failure path that results in an
@@ -946,10 +984,13 @@ out_err:
freqpriv->nvme_buf = NULL;
/* NVME targets need completion held off until the abort exchange
- * completes.
+ * completes unless the NVME Rport is getting unregistered.
*/
- if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY))
+
+ if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
nCmd->done(nCmd);
+ lpfc_ncmd->nvmeCmd = NULL;
+ }
spin_lock_irqsave(&phba->hbalock, flags);
lpfc_ncmd->nrport = NULL;
@@ -1149,7 +1190,7 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
first_data_sgl = sgl;
lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
- if (lpfc_ncmd->seg_cnt > phba->cfg_nvme_seg_cnt + 1) {
+ if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6058 Too many sg segments from "
"NVME Transport. Max %d, "
@@ -1239,6 +1280,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
struct nvmefc_fcp_req *pnvme_fcreq)
{
int ret = 0;
+ int expedite = 0;
struct lpfc_nvme_lport *lport;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
@@ -1246,13 +1288,30 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_buf *lpfc_ncmd;
struct lpfc_nvme_rport *rport;
struct lpfc_nvme_qhandle *lpfc_queue_info;
- struct lpfc_nvme_fcpreq_priv *freqpriv = pnvme_fcreq->private;
+ struct lpfc_nvme_fcpreq_priv *freqpriv;
+ struct nvme_common_command *sqe;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint64_t start = 0;
#endif
+ /* Validate pointers. LLDD fault handling with transport does
+ * have timing races.
+ */
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
+ if (unlikely(!lport)) {
+ ret = -EINVAL;
+ goto out_fail;
+ }
+
vport = lport->vport;
+
+ if (unlikely(!hw_queue_handle)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
+ "6129 Fail Abort, NULL hw_queue_handle\n");
+ ret = -EINVAL;
+ goto out_fail;
+ }
+
phba = vport->phba;
if (vport->load_flag & FC_UNLOADING) {
@@ -1260,16 +1319,17 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
goto out_fail;
}
- /* Validate pointers. */
- if (!pnvme_lport || !pnvme_rport || !freqpriv) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR | LOG_NODE,
- "6117 No Send:IO submit ptrs NULL, lport %p, "
- "rport %p fcreq_priv %p\n",
- pnvme_lport, pnvme_rport, freqpriv);
+ if (vport->load_flag & FC_UNLOADING) {
ret = -ENODEV;
goto out_fail;
}
+ freqpriv = pnvme_fcreq->private;
+ if (unlikely(!freqpriv)) {
+ ret = -EINVAL;
+ goto out_fail;
+ }
+
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (phba->ktime_on)
start = ktime_get_ns();
@@ -1293,6 +1353,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
"6066 Missing node for DID %x\n",
pnvme_rport->port_id);
+ atomic_inc(&lport->xmt_fcp_bad_ndlp);
ret = -ENODEV;
goto out_fail;
}
@@ -1306,21 +1367,36 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
"IO. State x%x, Type x%x\n",
rport, pnvme_rport->port_id,
ndlp->nlp_state, ndlp->nlp_type);
+ atomic_inc(&lport->xmt_fcp_bad_ndlp);
ret = -ENODEV;
goto out_fail;
}
+ /* Currently only NVME Keep alive commands should be expedited
+ * if the driver runs out of a resource. These should only be
+ * issued on the admin queue, qidx 0
+ */
+ if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) {
+ sqe = &((struct nvme_fc_cmd_iu *)
+ pnvme_fcreq->cmdaddr)->sqe.common;
+ if (sqe->opcode == nvme_admin_keep_alive)
+ expedite = 1;
+ }
+
/* The node is shared with FCP IO, make sure the IO pending count does
* not exceed the programmed depth.
*/
- if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
+ if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
+ !expedite) {
+ atomic_inc(&lport->xmt_fcp_qdepth);
ret = -EBUSY;
goto out_fail;
}
- lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp);
+ lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, expedite);
if (lpfc_ncmd == NULL) {
+ atomic_inc(&lport->xmt_fcp_noxri);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6065 driver's buffer pool is empty, "
"IO failed\n");
@@ -1373,6 +1449,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
if (ret) {
+ atomic_inc(&lport->xmt_fcp_wqerr);
atomic_dec(&ndlp->cmd_pending);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
"6113 FCP could not issue WQE err %x "
@@ -1473,19 +1550,36 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_lport *lport;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
- struct lpfc_nvme_rport *rport;
struct lpfc_nvme_buf *lpfc_nbuf;
struct lpfc_iocbq *abts_buf;
struct lpfc_iocbq *nvmereq_wqe;
- struct lpfc_nvme_fcpreq_priv *freqpriv = pnvme_fcreq->private;
+ struct lpfc_nvme_fcpreq_priv *freqpriv;
union lpfc_wqe *abts_wqe;
unsigned long flags;
int ret_val;
+ /* Validate pointers. LLDD fault handling with transport does
+ * have timing races.
+ */
lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
- rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
+ if (unlikely(!lport))
+ return;
+
vport = lport->vport;
+
+ if (unlikely(!hw_queue_handle)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
+ "6129 Fail Abort, HW Queue Handle NULL.\n");
+ return;
+ }
+
phba = vport->phba;
+ freqpriv = pnvme_fcreq->private;
+
+ if (unlikely(!freqpriv))
+ return;
+ if (vport->load_flag & FC_UNLOADING)
+ return;
/* Announce entry to new IO submit field. */
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
@@ -1552,6 +1646,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
return;
}
+ atomic_inc(&lport->xmt_fcp_abort);
lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
nvmereq_wqe->sli4_xritag,
nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
@@ -1931,6 +2026,8 @@ lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba)
spin_lock(&phba->nvme_buf_list_put_lock);
list_splice_init(&phba->lpfc_nvme_buf_list_get, &post_nblist);
list_splice(&phba->lpfc_nvme_buf_list_put, &post_nblist);
+ phba->get_nvme_bufs = 0;
+ phba->put_nvme_bufs = 0;
spin_unlock(&phba->nvme_buf_list_put_lock);
spin_unlock_irq(&phba->nvme_buf_list_get_lock);
@@ -2067,6 +2164,20 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
return num_posted;
}
+static inline struct lpfc_nvme_buf *
+lpfc_nvme_buf(struct lpfc_hba *phba)
+{
+ struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
+
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+ &phba->lpfc_nvme_buf_list_get, list) {
+ list_del_init(&lpfc_ncmd->list);
+ phba->get_nvme_bufs--;
+ return lpfc_ncmd;
+ }
+ return NULL;
+}
+
/**
* lpfc_get_nvme_buf - Get a nvme buffer from lpfc_nvme_buf_list of the HBA
* @phba: The HBA for which this call is being executed.
@@ -2079,35 +2190,27 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
* Pointer to lpfc_nvme_buf - Success
**/
static struct lpfc_nvme_buf *
-lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+ int expedite)
{
- struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
+ struct lpfc_nvme_buf *lpfc_ncmd = NULL;
unsigned long iflag = 0;
- int found = 0;
spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag);
- list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
- &phba->lpfc_nvme_buf_list_get, list) {
- list_del_init(&lpfc_ncmd->list);
- found = 1;
- break;
- }
- if (!found) {
+ if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
+ lpfc_ncmd = lpfc_nvme_buf(phba);
+ if (!lpfc_ncmd) {
spin_lock(&phba->nvme_buf_list_put_lock);
list_splice(&phba->lpfc_nvme_buf_list_put,
&phba->lpfc_nvme_buf_list_get);
+ phba->get_nvme_bufs += phba->put_nvme_bufs;
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
+ phba->put_nvme_bufs = 0;
spin_unlock(&phba->nvme_buf_list_put_lock);
- list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
- &phba->lpfc_nvme_buf_list_get, list) {
- list_del_init(&lpfc_ncmd->list);
- found = 1;
- break;
- }
+ if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
+ lpfc_ncmd = lpfc_nvme_buf(phba);
}
spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag);
- if (!found)
- return NULL;
return lpfc_ncmd;
}
@@ -2145,6 +2248,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
lpfc_ncmd->cur_iocbq.iocb_flag = LPFC_IO_NVME;
spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
list_add_tail(&lpfc_ncmd->list, &phba->lpfc_nvme_buf_list_put);
+ phba->put_nvme_bufs++;
spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
}
}
@@ -2221,6 +2325,18 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
lport->vport = vport;
vport->nvmei_support = 1;
+ atomic_set(&lport->xmt_fcp_noxri, 0);
+ atomic_set(&lport->xmt_fcp_bad_ndlp, 0);
+ atomic_set(&lport->xmt_fcp_qdepth, 0);
+ atomic_set(&lport->xmt_fcp_wqerr, 0);
+ atomic_set(&lport->xmt_fcp_abort, 0);
+ atomic_set(&lport->xmt_ls_abort, 0);
+ atomic_set(&lport->xmt_ls_err, 0);
+ atomic_set(&lport->cmpl_fcp_xb, 0);
+ atomic_set(&lport->cmpl_fcp_err, 0);
+ atomic_set(&lport->cmpl_ls_xb, 0);
+ atomic_set(&lport->cmpl_ls_err, 0);
+
/* Don't post more new bufs if repost already recovered
* the nvme sgls.
*/
@@ -2234,6 +2350,47 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
return ret;
}
+/* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg.
+ *
+ * The driver has to wait for the host nvme transport to callback
+ * indicating the localport has successfully unregistered all
+ * resources. Since this is an uninterruptible wait, loop every ten
+ * seconds and print a message indicating no progress.
+ *
+ * An uninterruptible wait is used because of the risk of transport-to-
+ * driver state mismatch.
+ */
+void
+lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
+ struct lpfc_nvme_lport *lport)
+{
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ u32 wait_tmo;
+ int ret;
+
+ /* Host transport has to clean up and confirm requiring an indefinite
+ * wait. Print a message if a 10 second wait expires and renew the
+ * wait. This is unexpected.
+ */
+ wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
+ while (true) {
+ ret = wait_for_completion_timeout(&lport->lport_unreg_done,
+ wait_tmo);
+ if (unlikely(!ret)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
+ "6176 Lport %p Localport %p wait "
+ "timed out. Renewing.\n",
+ lport, vport->localport);
+ continue;
+ }
+ break;
+ }
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
+ "6177 Lport %p Localport %p Complete Success\n",
+ lport, vport->localport);
+#endif
+}
+
/**
* lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
* @pnvme: pointer to lpfc nvme data structure.
@@ -2268,7 +2425,11 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
*/
init_completion(&lport->lport_unreg_done);
ret = nvme_fc_unregister_localport(localport);
- wait_for_completion_timeout(&lport->lport_unreg_done, 5);
+
+ /* Wait for completion. This either blocks
+ * indefinitely or succeeds
+ */
+ lpfc_nvme_lport_unreg_wait(vport, lport);
/* Regardless of the unregister upcall response, clear
* nvmei_support. All rports are unregistered and the
@@ -2365,6 +2526,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
+ if (!ndlp->nrport)
+ lpfc_nlp_get(ndlp);
+
ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
if (!ret) {
/* If the ndlp already has an nrport, this is just
@@ -2373,23 +2537,33 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
*/
rport = remote_port->private;
if (ndlp->nrport) {
- lpfc_printf_vlog(ndlp->vport, KERN_INFO,
- LOG_NVME_DISC,
- "6014 Rebinding lport to "
- "rport wwpn 0x%llx, "
- "Data: x%x x%x x%x x%06x\n",
- remote_port->port_name,
- remote_port->port_id,
- remote_port->port_role,
- ndlp->nlp_type,
- ndlp->nlp_DID);
+ if (ndlp->nrport == remote_port->private) {
+ /* Same remoteport. Just reuse. */
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO,
+ LOG_NVME_DISC,
+ "6014 Rebinding lport to "
+ "remoteport %p wwpn 0x%llx, "
+ "Data: x%x x%x %p x%x x%06x\n",
+ remote_port,
+ remote_port->port_name,
+ remote_port->port_id,
+ remote_port->port_role,
+ ndlp,
+ ndlp->nlp_type,
+ ndlp->nlp_DID);
+ return 0;
+ }
prev_ndlp = rport->ndlp;
- /* Sever the ndlp<->rport connection before dropping
- * the ndlp ref from register.
+ /* Sever the ndlp<->rport association
+ * before dropping the ndlp ref from
+ * register.
*/
+ spin_lock_irq(&vport->phba->hbalock);
ndlp->nrport = NULL;
+ spin_unlock_irq(&vport->phba->hbalock);
rport->ndlp = NULL;
+ rport->remoteport = NULL;
if (prev_ndlp)
lpfc_nlp_put(ndlp);
}
@@ -2397,19 +2571,20 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* Clean bind the rport to the ndlp. */
rport->remoteport = remote_port;
rport->lport = lport;
- rport->ndlp = lpfc_nlp_get(ndlp);
- if (!rport->ndlp)
- return -1;
+ rport->ndlp = ndlp;
+ spin_lock_irq(&vport->phba->hbalock);
ndlp->nrport = rport;
+ spin_unlock_irq(&vport->phba->hbalock);
lpfc_printf_vlog(vport, KERN_INFO,
LOG_NVME_DISC | LOG_NODE,
"6022 Binding new rport to "
- "lport %p Rport WWNN 0x%llx, "
+ "lport %p Remoteport %p WWNN 0x%llx, "
"Rport WWPN 0x%llx DID "
- "x%06x Role x%x\n",
- lport,
+ "x%06x Role x%x, ndlp %p\n",
+ lport, remote_port,
rpinfo.node_name, rpinfo.port_name,
- rpinfo.port_id, rpinfo.port_role);
+ rpinfo.port_id, rpinfo.port_role,
+ ndlp);
} else {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_NVME_DISC | LOG_NODE,
@@ -2473,20 +2648,20 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
/* Sanity check ndlp type. Only call for NVME ports. Don't
* clear any rport state until the transport calls back.
*/
- if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) {
- init_completion(&rport->rport_unreg_done);
+ if (ndlp->nlp_type & NLP_NVME_TARGET) {
/* No concern about the role change on the nvme remoteport.
* The transport will update it.
*/
+ ndlp->upcall_flags |= NLP_WAIT_FOR_UNREG;
ret = nvme_fc_unregister_remoteport(remoteport);
if (ret != 0) {
+ lpfc_nlp_put(ndlp);
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
"6167 NVME unregister failed %d "
"port_state x%x\n",
ret, remoteport->port_state);
}
-
}
return;
@@ -2545,8 +2720,11 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
* before the abort exchange command fully completes.
* Once completed, it is available via the put list.
*/
- nvme_cmd = lpfc_ncmd->nvmeCmd;
- nvme_cmd->done(nvme_cmd);
+ if (lpfc_ncmd->nvmeCmd) {
+ nvme_cmd = lpfc_ncmd->nvmeCmd;
+ nvme_cmd->done(nvme_cmd);
+ lpfc_ncmd->nvmeCmd = NULL;
+ }
lpfc_release_nvme_buf(phba, lpfc_ncmd);
return;
}
@@ -2558,3 +2736,45 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
"6312 XRI Aborted xri x%x not found\n", xri);
}
+
+/**
+ * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete
+ * @phba: Pointer to HBA context object.
+ *
+ * This function flushes all wqes in the nvme rings and frees all resources
+ * in the txcmplq. This function does not issue abort wqes for the IO
+ * commands in txcmplq, they will just be returned with
+ * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
+ * slot has been permanently disabled.
+ **/
+void
+lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
+{
+ struct lpfc_sli_ring *pring;
+ u32 i, wait_cnt = 0;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ return;
+
+ /* Cycle through all NVME rings and make sure all outstanding
+ * WQEs have been removed from the txcmplqs.
+ */
+ for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
+ pring = phba->sli4_hba.nvme_wq[i]->pring;
+
+ /* Retrieve everything on the txcmplq */
+ while (!list_empty(&pring->txcmplq)) {
+ msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
+ wait_cnt++;
+
+ /* The sleep is 10mS. Every ten seconds,
+ * dump a message. Something is wrong.
+ */
+ if ((wait_cnt % 1000) == 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6178 NVME IO not empty, "
+ "cnt %d\n", wait_cnt);
+ }
+ }
+ }
+}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index d192bb268f99..e79f8f75758c 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -22,10 +22,12 @@
********************************************************************/
#define LPFC_NVME_DEFAULT_SEGS (64 + 1) /* 256K IOs */
-#define LPFC_NVME_WQSIZE 256
#define LPFC_NVME_ERSP_LEN 0x20
+#define LPFC_NVME_WAIT_TMO 10
+#define LPFC_NVME_EXPEDITE_XRICNT 8
+
struct lpfc_nvme_qhandle {
uint32_t index; /* WQ index to use */
uint32_t qidx; /* queue index passed to create */
@@ -36,7 +38,18 @@ struct lpfc_nvme_qhandle {
struct lpfc_nvme_lport {
struct lpfc_vport *vport;
struct completion lport_unreg_done;
- /* Add sttats counters here */
+ /* Add stats counters here */
+ atomic_t xmt_fcp_noxri;
+ atomic_t xmt_fcp_bad_ndlp;
+ atomic_t xmt_fcp_qdepth;
+ atomic_t xmt_fcp_wqerr;
+ atomic_t xmt_fcp_abort;
+ atomic_t xmt_ls_abort;
+ atomic_t xmt_ls_err;
+ atomic_t cmpl_fcp_xb;
+ atomic_t cmpl_fcp_err;
+ atomic_t cmpl_ls_xb;
+ atomic_t cmpl_ls_err;
};
struct lpfc_nvme_rport {
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 84cf1b9079f7..8dbf5c9d51aa 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -38,6 +38,7 @@
#include <../drivers/nvme/host/nvme.h>
#include <linux/nvme-fc-driver.h>
+#include <linux/nvme-fc.h>
#include "lpfc_version.h"
#include "lpfc_hw4.h"
@@ -126,10 +127,17 @@ lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
- if (status)
- atomic_inc(&tgtp->xmt_ls_rsp_error);
- else
- atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
+ if (tgtp) {
+ if (status) {
+ atomic_inc(&tgtp->xmt_ls_rsp_error);
+ if (status == IOERR_ABORT_REQUESTED)
+ atomic_inc(&tgtp->xmt_ls_rsp_aborted);
+ if (bf_get(lpfc_wcqe_c_xb, wcqe))
+ atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
+ } else {
+ atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
+ }
+ }
out:
rsp = &ctxp->ctx.ls_req;
@@ -218,6 +226,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
ctxp->entry_cnt = 1;
ctxp->flag = 0;
ctxp->ctxbuf = ctx_buf;
+ ctxp->rqb_buffer = (void *)nvmebuf;
spin_lock_init(&ctxp->ctxlock);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -253,6 +262,17 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
return;
}
+ /* Processing of FCP command is deferred */
+ if (rc == -EOVERFLOW) {
+ lpfc_nvmeio_data(phba,
+ "NVMET RCV BUSY: xri x%x sz %d "
+ "from %06x\n",
+ oxid, size, sid);
+ /* defer repost rcv buffer till .defer_rcv callback */
+ ctxp->flag &= ~LPFC_NVMET_DEFER_RCV_REPOST;
+ atomic_inc(&tgtp->rcv_fcp_cmd_out);
+ return;
+ }
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
@@ -519,8 +539,11 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
if (status) {
rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
rsp->transferred_length = 0;
- if (tgtp)
+ if (tgtp) {
atomic_inc(&tgtp->xmt_fcp_rsp_error);
+ if (status == IOERR_ABORT_REQUESTED)
+ atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
+ }
logerr = LOG_NVME_IOERR;
@@ -528,6 +551,8 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
ctxp->flag |= LPFC_NVMET_XBUSY;
logerr |= LOG_NVME_ABTS;
+ if (tgtp)
+ atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
} else {
ctxp->flag &= ~LPFC_NVMET_XBUSY;
@@ -635,6 +660,9 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
if (phba->pport->load_flag & FC_UNLOADING)
return -ENODEV;
+ if (phba->pport->load_flag & FC_UNLOADING)
+ return -ENODEV;
+
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
"6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
@@ -721,6 +749,11 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
goto aerr;
}
+ if (phba->pport->load_flag & FC_UNLOADING) {
+ rc = -ENODEV;
+ goto aerr;
+ }
+
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (ctxp->ts_cmd_nvme) {
if (rsp->op == NVMET_FCOP_RSP)
@@ -823,6 +856,9 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
if (phba->pport->load_flag & FC_UNLOADING)
return;
+ if (phba->pport->load_flag & FC_UNLOADING)
+ return;
+
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
ctxp->oxid, ctxp->flag, ctxp->state);
@@ -910,7 +946,11 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
tgtp = phba->targetport->private;
atomic_inc(&tgtp->rcv_fcp_cmd_defer);
- lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
+ if (ctxp->flag & LPFC_NVMET_DEFER_RCV_REPOST)
+ lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
+ else
+ nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
+ ctxp->flag &= ~LPFC_NVMET_DEFER_RCV_REPOST;
}
static struct nvmet_fc_target_template lpfc_tgttemplate = {
@@ -1216,6 +1256,8 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
atomic_set(&tgtp->xmt_ls_rsp, 0);
atomic_set(&tgtp->xmt_ls_drop, 0);
atomic_set(&tgtp->xmt_ls_rsp_error, 0);
+ atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
+ atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
@@ -1228,7 +1270,10 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
atomic_set(&tgtp->xmt_fcp_release, 0);
atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
+ atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
+ atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
+ atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
atomic_set(&tgtp->xmt_fcp_abort, 0);
atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
atomic_set(&tgtp->xmt_abort_unsol, 0);
@@ -1270,6 +1315,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
+ struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_nodelist *ndlp;
unsigned long iflag = 0;
int rrq_empty = 0;
@@ -1280,6 +1326,12 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return;
+
+ if (phba->targetport) {
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
+ }
+
spin_lock_irqsave(&phba->hbalock, iflag);
spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
list_for_each_entry_safe(ctxp, next_ctxp,
@@ -1682,6 +1734,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
ctxp->entry_cnt = 1;
ctxp->flag = 0;
ctxp->ctxbuf = ctx_buf;
+ ctxp->rqb_buffer = (void *)nvmebuf;
spin_lock_init(&ctxp->ctxlock);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -1715,6 +1768,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
/* Process FCP command */
if (rc == 0) {
+ ctxp->rqb_buffer = NULL;
atomic_inc(&tgtp->rcv_fcp_cmd_out);
lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
return;
@@ -1726,10 +1780,11 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
"NVMET RCV BUSY: xri x%x sz %d from %06x\n",
oxid, size, sid);
/* defer reposting rcv buffer till .defer_rcv callback */
- ctxp->rqb_buffer = nvmebuf;
+ ctxp->flag |= LPFC_NVMET_DEFER_RCV_REPOST;
atomic_inc(&tgtp->rcv_fcp_cmd_out);
return;
}
+ ctxp->rqb_buffer = nvmebuf;
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
@@ -1992,7 +2047,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
return NULL;
}
- if (rsp->sg_cnt > phba->cfg_nvme_seg_cnt) {
+ if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6109 NVMET prep FCP wqe: seg cnt err: "
"NPORT x%x oxid x%x ste %d cnt %d\n",
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 25a65b0bb7f3..5b32c9e4d4ef 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -25,6 +25,10 @@
#define LPFC_NVMET_RQE_DEF_COUNT 512
#define LPFC_NVMET_SUCCESS_LEN 12
+#define LPFC_NVMET_MRQ_OFF 0xffff
+#define LPFC_NVMET_MRQ_AUTO 0
+#define LPFC_NVMET_MRQ_MAX 16
+
/* Used for NVME Target */
struct lpfc_nvmet_tgtport {
struct lpfc_hba *phba;
@@ -43,6 +47,8 @@ struct lpfc_nvmet_tgtport {
/* Stats counters - lpfc_nvmet_xmt_ls_rsp_cmp */
atomic_t xmt_ls_rsp_error;
+ atomic_t xmt_ls_rsp_aborted;
+ atomic_t xmt_ls_rsp_xb_set;
atomic_t xmt_ls_rsp_cmpl;
/* Stats counters - lpfc_nvmet_unsol_fcp_buffer */
@@ -60,12 +66,15 @@ struct lpfc_nvmet_tgtport {
atomic_t xmt_fcp_rsp;
/* Stats counters - lpfc_nvmet_xmt_fcp_op_cmp */
+ atomic_t xmt_fcp_rsp_xb_set;
atomic_t xmt_fcp_rsp_cmpl;
atomic_t xmt_fcp_rsp_error;
+ atomic_t xmt_fcp_rsp_aborted;
atomic_t xmt_fcp_rsp_drop;
/* Stats counters - lpfc_nvmet_xmt_fcp_abort */
+ atomic_t xmt_fcp_xri_abort_cqe;
atomic_t xmt_fcp_abort;
atomic_t xmt_fcp_abort_cmpl;
atomic_t xmt_abort_sol;
@@ -122,6 +131,7 @@ struct lpfc_nvmet_rcv_ctx {
#define LPFC_NVMET_XBUSY 0x4 /* XB bit set on IO cmpl */
#define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */
#define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */
+#define LPFC_NVMET_DEFER_RCV_REPOST 0x20 /* repost to RQ on defer rcv */
struct rqb_dmabuf *rqb_buffer;
struct lpfc_nvmet_ctxbuf *ctxbuf;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index aecd2399005d..5f5528a12308 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -475,28 +475,30 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
struct lpfc_rqe *temp_hrqe;
struct lpfc_rqe *temp_drqe;
struct lpfc_register doorbell;
- int put_index;
+ int hq_put_index;
+ int dq_put_index;
/* sanity check on queue memory */
if (unlikely(!hq) || unlikely(!dq))
return -ENOMEM;
- put_index = hq->host_index;
- temp_hrqe = hq->qe[put_index].rqe;
- temp_drqe = dq->qe[dq->host_index].rqe;
+ hq_put_index = hq->host_index;
+ dq_put_index = dq->host_index;
+ temp_hrqe = hq->qe[hq_put_index].rqe;
+ temp_drqe = dq->qe[dq_put_index].rqe;
if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
return -EINVAL;
- if (put_index != dq->host_index)
+ if (hq_put_index != dq_put_index)
return -EINVAL;
/* If the host has not yet processed the next entry then we are done */
- if (((put_index + 1) % hq->entry_count) == hq->hba_index)
+ if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
return -EBUSY;
lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
/* Update the host index to point to the next slot */
- hq->host_index = ((put_index + 1) % hq->entry_count);
- dq->host_index = ((dq->host_index + 1) % dq->entry_count);
+ hq->host_index = ((hq_put_index + 1) % hq->entry_count);
+ dq->host_index = ((dq_put_index + 1) % dq->entry_count);
hq->RQ_buf_posted++;
/* Ring The Header Receive Queue Doorbell */
@@ -517,7 +519,7 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
}
writel(doorbell.word0, hq->db_regaddr);
}
- return put_index;
+ return hq_put_index;
}
/**
@@ -12318,41 +12320,6 @@ void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
}
/**
- * lpfc_sli4_nvme_xri_abort_event_proc - Process nvme xri abort event
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine is invoked by the worker thread to process all the pending
- * SLI4 NVME abort XRI events.
- **/
-void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba)
-{
- struct lpfc_cq_event *cq_event;
-
- /* First, declare the fcp xri abort event has been handled */
- spin_lock_irq(&phba->hbalock);
- phba->hba_flag &= ~NVME_XRI_ABORT_EVENT;
- spin_unlock_irq(&phba->hbalock);
- /* Now, handle all the fcp xri abort events */
- while (!list_empty(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue)) {
- /* Get the first event from the head of the event queue */
- spin_lock_irq(&phba->hbalock);
- list_remove_head(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
- cq_event, struct lpfc_cq_event, list);
- spin_unlock_irq(&phba->hbalock);
- /* Notify aborted XRI for NVME work queue */
- if (phba->nvmet_support) {
- lpfc_sli4_nvmet_xri_aborted(phba,
- &cq_event->cqe.wcqe_axri);
- } else {
- lpfc_sli4_nvme_xri_aborted(phba,
- &cq_event->cqe.wcqe_axri);
- }
- /* Free the event processed back to the free pool */
- lpfc_sli4_cq_event_release(phba, cq_event);
- }
-}
-
-/**
* lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
* @phba: pointer to lpfc hba data structure.
*
@@ -12548,6 +12515,24 @@ lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
return irspiocbq;
}
+inline struct lpfc_cq_event *
+lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
+{
+ struct lpfc_cq_event *cq_event;
+
+ /* Allocate a new internal CQ_EVENT entry */
+ cq_event = lpfc_sli4_cq_event_alloc(phba);
+ if (!cq_event) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "0602 Failed to alloc CQ_EVENT entry\n");
+ return NULL;
+ }
+
+ /* Move the CQE into the event */
+ memcpy(&cq_event->cqe, entry, size);
+ return cq_event;
+}
+
/**
* lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
* @phba: Pointer to HBA context object.
@@ -12569,16 +12554,9 @@ lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
"word2:x%x, word3:x%x\n", mcqe->word0,
mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
- /* Allocate a new internal CQ_EVENT entry */
- cq_event = lpfc_sli4_cq_event_alloc(phba);
- if (!cq_event) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0394 Failed to allocate CQ_EVENT entry\n");
+ cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
+ if (!cq_event)
return false;
- }
-
- /* Move the CQE into an asynchronous event entry */
- memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
spin_lock_irqsave(&phba->hbalock, iflags);
list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
/* Set the async event flag */
@@ -12824,18 +12802,12 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
struct lpfc_cq_event *cq_event;
unsigned long iflags;
- /* Allocate a new internal CQ_EVENT entry */
- cq_event = lpfc_sli4_cq_event_alloc(phba);
- if (!cq_event) {
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0602 Failed to allocate CQ_EVENT entry\n");
- return false;
- }
-
- /* Move the CQE into the proper xri abort event list */
- memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
switch (cq->subtype) {
case LPFC_FCP:
+ cq_event = lpfc_cq_event_setup(
+ phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
+ if (!cq_event)
+ return false;
spin_lock_irqsave(&phba->hbalock, iflags);
list_add_tail(&cq_event->list,
&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
@@ -12844,7 +12816,12 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
spin_unlock_irqrestore(&phba->hbalock, iflags);
workposted = true;
break;
+ case LPFC_NVME_LS: /* NVME LS uses ELS resources */
case LPFC_ELS:
+ cq_event = lpfc_cq_event_setup(
+ phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
+ if (!cq_event)
+ return false;
spin_lock_irqsave(&phba->hbalock, iflags);
list_add_tail(&cq_event->list,
&phba->sli4_hba.sp_els_xri_aborted_work_queue);
@@ -12854,13 +12831,13 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
workposted = true;
break;
case LPFC_NVME:
- spin_lock_irqsave(&phba->hbalock, iflags);
- list_add_tail(&cq_event->list,
- &phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
- /* Set the nvme xri abort event flag */
- phba->hba_flag |= NVME_XRI_ABORT_EVENT;
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- workposted = true;
+ /* Notify aborted XRI for NVME work queue */
+ if (phba->nvmet_support)
+ lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
+ else
+ lpfc_sli4_nvme_xri_aborted(phba, wcqe);
+
+ workposted = false;
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -12868,7 +12845,6 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
"%08x %08x %08x %08x\n",
cq->subtype, wcqe->word0, wcqe->parameter,
wcqe->word2, wcqe->word3);
- lpfc_sli4_cq_event_release(phba, cq_event);
workposted = false;
break;
}
@@ -12913,8 +12889,8 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2537 Receive Frame Truncated!!\n");
case FC_STATUS_RQ_SUCCESS:
- lpfc_sli4_rq_release(hrq, drq);
spin_lock_irqsave(&phba->hbalock, iflags);
+ lpfc_sli4_rq_release(hrq, drq);
dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
if (!dma_buf) {
hrq->RQ_no_buf_found++;
@@ -13316,8 +13292,8 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
"6126 Receive Frame Truncated!!\n");
/* Drop thru */
case FC_STATUS_RQ_SUCCESS:
- lpfc_sli4_rq_release(hrq, drq);
spin_lock_irqsave(&phba->hbalock, iflags);
+ lpfc_sli4_rq_release(hrq, drq);
dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
if (!dma_buf) {
hrq->RQ_no_buf_found++;
@@ -13919,7 +13895,7 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
while (!list_empty(&queue->page_list)) {
list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
list);
- dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
+ dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
dmabuf->virt, dmabuf->phys);
kfree(dmabuf);
}
@@ -13938,6 +13914,7 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
/**
* lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
* @phba: The HBA that this queue is being created on.
+ * @page_size: The size of a queue page
* @entry_size: The size of each queue entry for this queue.
* @entry count: The number of entries that this queue will handle.
*
@@ -13946,8 +13923,8 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
* queue on the HBA.
**/
struct lpfc_queue *
-lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
- uint32_t entry_count)
+lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
+ uint32_t entry_size, uint32_t entry_count)
{
struct lpfc_queue *queue;
struct lpfc_dmabuf *dmabuf;
@@ -13956,7 +13933,7 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
if (!phba->sli4_hba.pc_sli4_params.supported)
- hw_page_size = SLI4_PAGE_SIZE;
+ hw_page_size = page_size;
queue = kzalloc(sizeof(struct lpfc_queue) +
(sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
@@ -13973,6 +13950,15 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
INIT_LIST_HEAD(&queue->wq_list);
INIT_LIST_HEAD(&queue->page_list);
INIT_LIST_HEAD(&queue->child_list);
+
+ /* Set queue parameters now. If the system cannot provide memory
+ * resources, the free routine needs to know what was allocated.
+ */
+ queue->entry_size = entry_size;
+ queue->entry_count = entry_count;
+ queue->page_size = hw_page_size;
+ queue->phba = phba;
+
for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!dmabuf)
@@ -13994,9 +13980,6 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
queue->qe[total_qe_count].address = dma_pointer;
}
}
- queue->entry_size = entry_size;
- queue->entry_count = entry_count;
- queue->phba = phba;
INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
@@ -14299,7 +14282,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
if (!cq || !eq)
return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported)
- hw_page_size = SLI4_PAGE_SIZE;
+ hw_page_size = cq->page_size;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -14318,8 +14301,8 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
bf_set(lpfc_mbox_hdr_version, &shdr->request,
phba->sli4_hba.pc_sli4_params.cqv);
if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
- /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
- bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
+ bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
+ (cq->page_size / SLI4_PAGE_SIZE));
bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
eq->queue_id);
} else {
@@ -14327,6 +14310,18 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
eq->queue_id);
}
switch (cq->entry_count) {
+ case 2048:
+ case 4096:
+ if (phba->sli4_hba.pc_sli4_params.cqv ==
+ LPFC_Q_CREATE_VERSION_2) {
+ cq_create->u.request.context.lpfc_cq_context_count =
+ cq->entry_count;
+ bf_set(lpfc_cq_context_count,
+ &cq_create->u.request.context,
+ LPFC_CQ_CNT_WORD7);
+ break;
+ }
+ /* Fall Thru */
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0361 Unsupported CQ count: "
@@ -14352,7 +14347,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
break;
}
list_for_each_entry(dmabuf, &cq->page_list, list) {
- memset(dmabuf->virt, 0, hw_page_size);
+ memset(dmabuf->virt, 0, cq->page_size);
cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
@@ -14433,8 +14428,6 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
numcq = phba->cfg_nvmet_mrq;
if (!cqp || !eqp || !numcq)
return -ENODEV;
- if (!phba->sli4_hba.pc_sli4_params.supported)
- hw_page_size = SLI4_PAGE_SIZE;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -14465,6 +14458,8 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
status = -ENOMEM;
goto out;
}
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = cq->page_size;
switch (idx) {
case 0:
@@ -14482,6 +14477,19 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
bf_set(lpfc_mbx_cq_create_set_num_cq,
&cq_set->u.request, numcq);
switch (cq->entry_count) {
+ case 2048:
+ case 4096:
+ if (phba->sli4_hba.pc_sli4_params.cqv ==
+ LPFC_Q_CREATE_VERSION_2) {
+ bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
+ &cq_set->u.request,
+ cq->entry_count);
+ bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
+ &cq_set->u.request,
+ LPFC_CQ_CNT_WORD7);
+ break;
+ }
+ /* Fall Thru */
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3118 Bad CQ count. (%d)\n",
@@ -14578,6 +14586,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
cq->host_index = 0;
cq->hba_index = 0;
cq->entry_repost = LPFC_CQ_REPOST;
+ cq->chann = idx;
rc = 0;
list_for_each_entry(dmabuf, &cq->page_list, list) {
@@ -14872,12 +14881,13 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
void __iomem *bar_memmap_p;
uint32_t db_offset;
uint16_t pci_barset;
+ uint8_t wq_create_version;
/* sanity check on queue memory */
if (!wq || !cq)
return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported)
- hw_page_size = SLI4_PAGE_SIZE;
+ hw_page_size = wq->page_size;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
@@ -14898,7 +14908,12 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
bf_set(lpfc_mbox_hdr_version, &shdr->request,
phba->sli4_hba.pc_sli4_params.wqv);
- switch (phba->sli4_hba.pc_sli4_params.wqv) {
+ if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
+ wq_create_version = LPFC_Q_CREATE_VERSION_1;
+ else
+ wq_create_version = LPFC_Q_CREATE_VERSION_0;
+
+ switch (wq_create_version) {
case LPFC_Q_CREATE_VERSION_0:
switch (wq->entry_size) {
default:
@@ -14956,7 +14971,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
}
bf_set(lpfc_mbx_wq_create_page_size,
&wq_create->u.request_1,
- LPFC_WQ_PAGE_SIZE_4096);
+ (wq->page_size / SLI4_PAGE_SIZE));
page = wq_create->u.request_1.page;
break;
default:
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 13b8f4d4da34..81fb58e59e60 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -161,7 +161,6 @@ struct lpfc_queue {
#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 /* For WQs */
uint32_t queue_id; /* Queue ID assigned by the hardware */
uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */
- uint32_t page_count; /* Number of pages allocated for this queue */
uint32_t host_index; /* The host's index for putting or getting */
uint32_t hba_index; /* The last known hba index for get or put */
@@ -169,6 +168,11 @@ struct lpfc_queue {
struct lpfc_rqb *rqbp; /* ptr to RQ buffers */
uint32_t q_mode;
+ uint16_t page_count; /* Number of pages allocated for this queue */
+ uint16_t page_size; /* size of page allocated for this queue */
+#define LPFC_EXPANDED_PAGE_SIZE 16384
+#define LPFC_DEFAULT_PAGE_SIZE 4096
+ uint16_t chann; /* IO channel this queue is associated with */
uint16_t db_format;
#define LPFC_DB_RING_FORMAT 0x01
#define LPFC_DB_LIST_FORMAT 0x02
@@ -366,9 +370,9 @@ struct lpfc_bmbx {
#define LPFC_EQE_DEF_COUNT 1024
#define LPFC_CQE_DEF_COUNT 1024
+#define LPFC_CQE_EXP_COUNT 4096
#define LPFC_WQE_DEF_COUNT 256
-#define LPFC_WQE128_DEF_COUNT 128
-#define LPFC_WQE128_MAX_COUNT 256
+#define LPFC_WQE_EXP_COUNT 1024
#define LPFC_MQE_DEF_COUNT 16
#define LPFC_RQE_DEF_COUNT 512
@@ -668,7 +672,6 @@ struct lpfc_sli4_hba {
struct list_head sp_asynce_work_queue;
struct list_head sp_fcp_xri_aborted_work_queue;
struct list_head sp_els_xri_aborted_work_queue;
- struct list_head sp_nvme_xri_aborted_work_queue;
struct list_head sp_unsol_work_queue;
struct lpfc_sli4_link link_state;
struct lpfc_sli4_lnk_info lnk_info;
@@ -769,7 +772,7 @@ int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *, struct lpfcMboxq *,
void lpfc_sli4_hba_reset(struct lpfc_hba *);
struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
- uint32_t);
+ uint32_t, uint32_t);
void lpfc_sli4_queue_free(struct lpfc_queue *);
int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
@@ -820,7 +823,6 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
-void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba);
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
struct sli4_wcqe_xri_aborted *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index e0181371af09..c232bf0e8998 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "11.4.0.4"
+#define LPFC_DRIVER_VERSION "11.4.0.6"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index f5a36ccb8606..ba6503f37756 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.703.05.00-rc1"
-#define MEGASAS_RELDATE "October 5, 2017"
+#define MEGASAS_VERSION "07.704.04.00-rc1"
+#define MEGASAS_RELDATE "December 7, 2017"
/*
* Device IDs
@@ -197,6 +197,7 @@ enum MFI_CMD_OP {
MFI_CMD_ABORT = 0x6,
MFI_CMD_SMP = 0x7,
MFI_CMD_STP = 0x8,
+ MFI_CMD_NVME = 0x9,
MFI_CMD_OP_COUNT,
MFI_CMD_INVALID = 0xff
};
@@ -230,7 +231,7 @@ enum MFI_CMD_OP {
/*
* Global functions
*/
-extern u8 MR_ValidateMapInfo(struct megasas_instance *instance);
+extern u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id);
/*
@@ -1352,7 +1353,13 @@ struct megasas_ctrl_info {
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u16 reserved:8;
+ u16 reserved:2;
+ u16 support_nvme_passthru:1;
+ u16 support_pl_debug_info:1;
+ u16 support_flash_comp_info:1;
+ u16 support_host_info:1;
+ u16 support_dual_fw_update:1;
+ u16 support_ssc_rev3:1;
u16 fw_swaps_bbu_vpd_info:1;
u16 support_pd_map_target_id:1;
u16 support_ses_ctrl_in_multipathcfg:1;
@@ -1377,7 +1384,19 @@ struct megasas_ctrl_info {
* provide the data in little endian order
*/
u16 fw_swaps_bbu_vpd_info:1;
- u16 reserved:8;
+ u16 support_ssc_rev3:1;
+ /* FW supports CacheCade 3.0, only one SSCD creation allowed */
+ u16 support_dual_fw_update:1;
+ /* FW supports dual firmware update feature */
+ u16 support_host_info:1;
+ /* FW supports MR_DCMD_CTRL_HOST_INFO_SET/GET */
+ u16 support_flash_comp_info:1;
+ /* FW supports MR_DCMD_CTRL_FLASH_COMP_INFO_GET */
+ u16 support_pl_debug_info:1;
+ /* FW supports retrieval of PL debug information through apps */
+ u16 support_nvme_passthru:1;
+ /* FW supports NVMe passthru commands */
+ u16 reserved:2;
#endif
} adapter_operations4;
u8 pad[0x800 - 0x7FE]; /* 0x7FE pad to 2K for expansion */
@@ -1630,7 +1649,8 @@ union megasas_sgl_frame {
typedef union _MFI_CAPABILITIES {
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u32 reserved:18;
+ u32 reserved:17;
+ u32 support_nvme_passthru:1;
u32 support_64bit_mode:1;
u32 support_pd_map_target_id:1;
u32 support_qd_throttling:1;
@@ -1660,7 +1680,8 @@ typedef union _MFI_CAPABILITIES {
u32 support_qd_throttling:1;
u32 support_pd_map_target_id:1;
u32 support_64bit_mode:1;
- u32 reserved:18;
+ u32 support_nvme_passthru:1;
+ u32 reserved:17;
#endif
} mfi_capabilities;
__le32 reg;
@@ -2188,7 +2209,6 @@ struct megasas_instance {
struct megasas_evt_detail *evt_detail;
dma_addr_t evt_detail_h;
struct megasas_cmd *aen_cmd;
- struct mutex hba_mutex;
struct semaphore ioctl_sem;
struct Scsi_Host *host;
@@ -2269,6 +2289,7 @@ struct megasas_instance {
u32 nvme_page_size;
u8 adapter_type;
bool consistent_mask_64bit;
+ bool support_nvme_passthru;
};
struct MR_LD_VF_MAP {
u32 size;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index cc54bdb5c712..2791141bd035 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -181,6 +181,7 @@ static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
static u32 support_poll_for_event;
u32 megasas_dbg_lvl;
static u32 support_device_change;
+static bool support_nvme_encapsulation;
/* define lock for aen poll */
spinlock_t poll_aen_lock;
@@ -1952,7 +1953,7 @@ static int megasas_slave_configure(struct scsi_device *sdev)
}
}
- mutex_lock(&instance->hba_mutex);
+ mutex_lock(&instance->reset_mutex);
/* Send DCMD to Firmware and cache the information */
if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
megasas_get_pd_info(instance, sdev);
@@ -1966,7 +1967,7 @@ static int megasas_slave_configure(struct scsi_device *sdev)
is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
megasas_set_static_target_properties(sdev, is_target_prop);
- mutex_unlock(&instance->hba_mutex);
+ mutex_unlock(&instance->reset_mutex);
/* This sdev property may change post OCR */
megasas_set_dynamic_target_properties(sdev);
@@ -3122,6 +3123,16 @@ megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr
return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
}
+static ssize_t
+megasas_fw_cmds_outstanding_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding));
+}
+
static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
@@ -3132,6 +3143,8 @@ static DEVICE_ATTR(page_size, S_IRUGO,
megasas_page_size_show, NULL);
static DEVICE_ATTR(ldio_outstanding, S_IRUGO,
megasas_ldio_outstanding_show, NULL);
+static DEVICE_ATTR(fw_cmds_outstanding, S_IRUGO,
+ megasas_fw_cmds_outstanding_show, NULL);
struct device_attribute *megaraid_host_attrs[] = {
&dev_attr_fw_crash_buffer_size,
@@ -3139,6 +3152,7 @@ struct device_attribute *megaraid_host_attrs[] = {
&dev_attr_fw_crash_state,
&dev_attr_page_size,
&dev_attr_ldio_outstanding,
+ &dev_attr_fw_cmds_outstanding,
NULL,
};
@@ -3321,6 +3335,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
case MFI_CMD_SMP:
case MFI_CMD_STP:
+ case MFI_CMD_NVME:
megasas_complete_int_cmd(instance, cmd);
break;
@@ -3331,10 +3346,10 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
&& (cmd->frame->dcmd.mbox.b[1] == 1)) {
fusion->fast_path_io = 0;
spin_lock_irqsave(instance->host->host_lock, flags);
+ status = cmd->frame->hdr.cmd_status;
instance->map_update_cmd = NULL;
- if (cmd->frame->hdr.cmd_status != 0) {
- if (cmd->frame->hdr.cmd_status !=
- MFI_STAT_NOT_FOUND)
+ if (status != MFI_STAT_OK) {
+ if (status != MFI_STAT_NOT_FOUND)
dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
cmd->frame->hdr.cmd_status);
else {
@@ -3344,8 +3359,8 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
flags);
break;
}
- } else
- instance->map_id++;
+ }
+
megasas_return_cmd(instance, cmd);
/*
@@ -3353,10 +3368,14 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
* Validate Map will set proper value.
* Meanwhile all IOs will go as LD IO.
*/
- if (MR_ValidateMapInfo(instance))
+ if (status == MFI_STAT_OK &&
+ (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) {
+ instance->map_id++;
fusion->fast_path_io = 1;
- else
+ } else {
fusion->fast_path_io = 0;
+ }
+
megasas_sync_map_info(instance);
spin_unlock_irqrestore(instance->host->host_lock,
flags);
@@ -4677,10 +4696,12 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
sizeof(struct megasas_ctrl_info));
if ((instance->adapter_type != MFI_SERIES) &&
- !instance->mask_interrupts)
+ !instance->mask_interrupts) {
ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
- else
+ } else {
ret = megasas_issue_polled(instance, cmd);
+ cmd->flags |= DRV_DCMD_SKIP_REFIRE;
+ }
switch (ret) {
case DCMD_SUCCESS:
@@ -4702,6 +4723,8 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
ci->adapterOperations3.useSeqNumJbodFP;
instance->support_morethan256jbod =
ci->adapter_operations4.support_pd_map_target_id;
+ instance->support_nvme_passthru =
+ ci->adapter_operations4.support_nvme_passthru;
/*Check whether controller is iMR or MR */
instance->is_imr = (ci->memory_size ? 0 : 1);
@@ -4718,6 +4741,8 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
instance->secure_jbod_support ? "Yes" : "No");
+ dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n",
+ instance->support_nvme_passthru ? "Yes" : "No");
break;
case DCMD_TIMEOUT:
@@ -5387,7 +5412,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
}
for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
fusion->stream_detect_by_ld[i] =
- kmalloc(sizeof(struct LD_STREAM_DETECT),
+ kzalloc(sizeof(struct LD_STREAM_DETECT),
GFP_KERNEL);
if (!fusion->stream_detect_by_ld[i]) {
dev_err(&instance->pdev->dev,
@@ -5432,7 +5457,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
ctrl_info->adapterOperations2.supportUnevenSpans;
if (instance->UnevenSpanSupport) {
struct fusion_context *fusion = instance->ctrl_context;
- if (MR_ValidateMapInfo(instance))
+ if (MR_ValidateMapInfo(instance, instance->map_id))
fusion->fast_path_io = 1;
else
fusion->fast_path_io = 0;
@@ -5581,6 +5606,7 @@ megasas_get_seq_num(struct megasas_instance *instance,
struct megasas_dcmd_frame *dcmd;
struct megasas_evt_log_info *el_info;
dma_addr_t el_info_h = 0;
+ int ret;
cmd = megasas_get_cmd(instance);
@@ -5613,26 +5639,29 @@ megasas_get_seq_num(struct megasas_instance *instance,
megasas_set_dma_settings(instance, dcmd, el_info_h,
sizeof(struct megasas_evt_log_info));
- if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) ==
- DCMD_SUCCESS) {
- /*
- * Copy the data back into callers buffer
- */
- eli->newest_seq_num = el_info->newest_seq_num;
- eli->oldest_seq_num = el_info->oldest_seq_num;
- eli->clear_seq_num = el_info->clear_seq_num;
- eli->shutdown_seq_num = el_info->shutdown_seq_num;
- eli->boot_seq_num = el_info->boot_seq_num;
- } else
- dev_err(&instance->pdev->dev, "DCMD failed "
- "from %s\n", __func__);
+ ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
+ if (ret != DCMD_SUCCESS) {
+ dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+ __func__, __LINE__);
+ goto dcmd_failed;
+ }
+ /*
+ * Copy the data back into callers buffer
+ */
+ eli->newest_seq_num = el_info->newest_seq_num;
+ eli->oldest_seq_num = el_info->oldest_seq_num;
+ eli->clear_seq_num = el_info->clear_seq_num;
+ eli->shutdown_seq_num = el_info->shutdown_seq_num;
+ eli->boot_seq_num = el_info->boot_seq_num;
+
+dcmd_failed:
pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
el_info, el_info_h);
megasas_return_cmd(instance, cmd);
- return 0;
+ return ret;
}
/**
@@ -6346,7 +6375,6 @@ static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
spin_lock_init(&instance->stream_lock);
spin_lock_init(&instance->completion_lock);
- mutex_init(&instance->hba_mutex);
mutex_init(&instance->reset_mutex);
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
@@ -6704,6 +6732,7 @@ megasas_resume(struct pci_dev *pdev)
*/
atomic_set(&instance->fw_outstanding, 0);
+ atomic_set(&instance->ldio_outstanding, 0);
/* Now re-enable MSI-X */
if (instance->msix_vectors) {
@@ -6822,7 +6851,6 @@ static void megasas_detach_one(struct pci_dev *pdev)
u32 pd_seq_map_sz;
instance = pci_get_drvdata(pdev);
- instance->unload = 1;
host = instance->host;
fusion = instance->ctrl_context;
@@ -6833,6 +6861,7 @@ static void megasas_detach_one(struct pci_dev *pdev)
if (instance->fw_crash_state != UNAVAILABLE)
megasas_free_host_crash_buffer(instance);
scsi_remove_host(instance->host);
+ instance->unload = 1;
if (megasas_wait_for_adapter_operational(instance))
goto skip_firing_dcmds;
@@ -7004,9 +7033,9 @@ static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
/**
* megasas_mgmt_poll - char node "poll" entry point
* */
-static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
+static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait)
{
- unsigned int mask;
+ __poll_t mask;
unsigned long flags;
poll_wait(file, &megasas_poll_wait, wait);
@@ -7087,7 +7116,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
return -EINVAL;
}
- if (ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) {
+ if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) ||
+ ((ioc->frame.hdr.cmd == MFI_CMD_NVME) &&
+ !instance->support_nvme_passthru)) {
dev_err(&instance->pdev->dev,
"Received invalid ioctl command 0x%x\n",
ioc->frame.hdr.cmd);
@@ -7301,9 +7332,6 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
struct megasas_iocpacket *ioc;
struct megasas_instance *instance;
int error;
- int i;
- unsigned long flags;
- u32 wait_time = MEGASAS_RESET_WAIT_TIME;
ioc = memdup_user(user_ioc, sizeof(*ioc));
if (IS_ERR(ioc))
@@ -7315,10 +7343,6 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
goto out_kfree_ioc;
}
- /* Adjust ioctl wait time for VF mode */
- if (instance->requestorId)
- wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
-
/* Block ioctls in VF mode */
if (instance->requestorId && !allow_vf_ioctls) {
error = -ENODEV;
@@ -7341,32 +7365,10 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
goto out_kfree_ioc;
}
- for (i = 0; i < wait_time; i++) {
-
- spin_lock_irqsave(&instance->hba_lock, flags);
- if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
- spin_unlock_irqrestore(&instance->hba_lock, flags);
- break;
- }
- spin_unlock_irqrestore(&instance->hba_lock, flags);
-
- if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
- dev_notice(&instance->pdev->dev, "waiting"
- "for controller reset to finish\n");
- }
-
- msleep(1000);
- }
-
- spin_lock_irqsave(&instance->hba_lock, flags);
- if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
- spin_unlock_irqrestore(&instance->hba_lock, flags);
-
- dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
+ if (megasas_wait_for_adapter_operational(instance)) {
error = -ENODEV;
goto out_up;
}
- spin_unlock_irqrestore(&instance->hba_lock, flags);
error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
out_up:
@@ -7382,9 +7384,6 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
struct megasas_instance *instance;
struct megasas_aen aen;
int error;
- int i;
- unsigned long flags;
- u32 wait_time = MEGASAS_RESET_WAIT_TIME;
if (file->private_data != file) {
printk(KERN_DEBUG "megasas: fasync_helper was not "
@@ -7408,32 +7407,8 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
return -ENODEV;
}
- for (i = 0; i < wait_time; i++) {
-
- spin_lock_irqsave(&instance->hba_lock, flags);
- if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
- spin_unlock_irqrestore(&instance->hba_lock,
- flags);
- break;
- }
-
- spin_unlock_irqrestore(&instance->hba_lock, flags);
-
- if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
- dev_notice(&instance->pdev->dev, "waiting for"
- "controller reset to finish\n");
- }
-
- msleep(1000);
- }
-
- spin_lock_irqsave(&instance->hba_lock, flags);
- if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
- spin_unlock_irqrestore(&instance->hba_lock, flags);
- dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
+ if (megasas_wait_for_adapter_operational(instance))
return -ENODEV;
- }
- spin_unlock_irqrestore(&instance->hba_lock, flags);
mutex_lock(&instance->reset_mutex);
error = megasas_register_aen(instance, aen.seq_num,
@@ -7613,6 +7588,14 @@ static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
}
static DRIVER_ATTR_RW(dbg_lvl);
+static ssize_t
+support_nvme_encapsulation_show(struct device_driver *dd, char *buf)
+{
+ return sprintf(buf, "%u\n", support_nvme_encapsulation);
+}
+
+static DRIVER_ATTR_RO(support_nvme_encapsulation);
+
static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
{
sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
@@ -7801,6 +7784,7 @@ static int __init megasas_init(void)
support_poll_for_event = 2;
support_device_change = 1;
+ support_nvme_encapsulation = true;
memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
@@ -7850,8 +7834,17 @@ static int __init megasas_init(void)
if (rval)
goto err_dcf_support_device_change;
+ rval = driver_create_file(&megasas_pci_driver.driver,
+ &driver_attr_support_nvme_encapsulation);
+ if (rval)
+ goto err_dcf_support_nvme_encapsulation;
+
return rval;
+err_dcf_support_nvme_encapsulation:
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_support_device_change);
+
err_dcf_support_device_change:
driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_dbg_lvl);
@@ -7884,6 +7877,8 @@ static void __exit megasas_exit(void)
driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_release_date);
driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
+ driver_remove_file(&megasas_pci_driver.driver,
+ &driver_attr_support_nvme_encapsulation);
pci_unregister_driver(&megasas_pci_driver);
unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index bfad9bfc313f..59ecbb3b53b5 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -168,7 +168,7 @@ static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
/*
* This function will Populate Driver Map using firmware raid map
*/
-void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
+static int MR_PopulateDrvRaidMap(struct megasas_instance *instance, u64 map_id)
{
struct fusion_context *fusion = instance->ctrl_context;
struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL;
@@ -181,7 +181,7 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
struct MR_DRV_RAID_MAP_ALL *drv_map =
- fusion->ld_drv_map[(instance->map_id & 1)];
+ fusion->ld_drv_map[(map_id & 1)];
struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
void *raid_map_data = NULL;
@@ -190,7 +190,7 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
0xff, (sizeof(u16) * MAX_LOGICAL_DRIVES_DYN));
if (instance->max_raid_mapsize) {
- fw_map_dyn = fusion->ld_map[(instance->map_id & 1)];
+ fw_map_dyn = fusion->ld_map[(map_id & 1)];
desc_table =
(struct MR_RAID_MAP_DESC_TABLE *)((void *)fw_map_dyn + le32_to_cpu(fw_map_dyn->desc_table_offset));
if (desc_table != fw_map_dyn->raid_map_desc_table)
@@ -255,11 +255,11 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
} else if (instance->supportmax256vd) {
fw_map_ext =
- (struct MR_FW_RAID_MAP_EXT *)fusion->ld_map[(instance->map_id & 1)];
+ (struct MR_FW_RAID_MAP_EXT *)fusion->ld_map[(map_id & 1)];
ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount);
if (ld_count > MAX_LOGICAL_DRIVES_EXT) {
dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n");
- return;
+ return 1;
}
pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
@@ -282,9 +282,15 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
cpu_to_le32(sizeof(struct MR_FW_RAID_MAP_EXT));
} else {
fw_map_old = (struct MR_FW_RAID_MAP_ALL *)
- fusion->ld_map[(instance->map_id & 1)];
+ fusion->ld_map[(map_id & 1)];
pFwRaidMap = &fw_map_old->raidMap;
ld_count = (u16)le32_to_cpu(pFwRaidMap->ldCount);
+ if (ld_count > MAX_LOGICAL_DRIVES) {
+ dev_dbg(&instance->pdev->dev,
+ "LD count exposed in RAID map in not valid\n");
+ return 1;
+ }
+
pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
@@ -300,12 +306,14 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
sizeof(struct MR_DEV_HANDLE_INFO) *
MAX_RAIDMAP_PHYSICAL_DEVICES);
}
+
+ return 0;
}
/*
* This function will validate Map info data provided by FW
*/
-u8 MR_ValidateMapInfo(struct megasas_instance *instance)
+u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id)
{
struct fusion_context *fusion;
struct MR_DRV_RAID_MAP_ALL *drv_map;
@@ -317,11 +325,11 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
u16 ld;
u32 expected_size;
-
- MR_PopulateDrvRaidMap(instance);
+ if (MR_PopulateDrvRaidMap(instance, map_id))
+ return 0;
fusion = instance->ctrl_context;
- drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
+ drv_map = fusion->ld_drv_map[(map_id & 1)];
pDrvRaidMap = &drv_map->raidMap;
lbInfo = fusion->load_balance_info;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 65dc4fea6352..073ced07e662 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -983,7 +983,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
MFI_CAPABILITIES *drv_ops;
u32 scratch_pad_2;
unsigned long flags;
- struct timeval tv;
+ ktime_t time;
bool cur_fw_64bit_dma_capable;
fusion = instance->ctrl_context;
@@ -1042,13 +1042,12 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
- do_gettimeofday(&tv);
+ time = ktime_get_real();
/* Convert to milliseconds as per FW requirement */
- IOCInitMessage->TimeStamp = cpu_to_le64((tv.tv_sec * 1000) +
- (tv.tv_usec / 1000));
+ IOCInitMessage->TimeStamp = cpu_to_le64(ktime_to_ms(time));
init_frame = (struct megasas_init_frame *)cmd->frame;
- memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
+ memset(init_frame, 0, IOC_INIT_FRAME_SIZE);
frame_hdr = &cmd->frame->hdr;
frame_hdr->cmd_status = 0xFF;
@@ -1080,6 +1079,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
drv_ops->mfi_capabilities.support_qd_throttling = 1;
drv_ops->mfi_capabilities.support_pd_map_target_id = 1;
+ drv_ops->mfi_capabilities.support_nvme_passthru = 1;
if (instance->consistent_mask_64bit)
drv_ops->mfi_capabilities.support_64bit_mode = 1;
@@ -1320,7 +1320,7 @@ megasas_get_map_info(struct megasas_instance *instance)
fusion->fast_path_io = 0;
if (!megasas_get_ld_map_info(instance)) {
- if (MR_ValidateMapInfo(instance)) {
+ if (MR_ValidateMapInfo(instance, instance->map_id)) {
fusion->fast_path_io = 1;
return 0;
}
@@ -1603,7 +1603,7 @@ static int megasas_alloc_ioc_init_frame(struct megasas_instance *instance)
fusion = instance->ctrl_context;
- cmd = kmalloc(sizeof(struct megasas_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(struct megasas_cmd), GFP_KERNEL);
if (!cmd) {
dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n",
@@ -2664,16 +2664,6 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
praid_context = &io_request->RaidContext;
if (instance->adapter_type == VENTURA_SERIES) {
- spin_lock_irqsave(&instance->stream_lock, spinlock_flags);
- megasas_stream_detect(instance, cmd, &io_info);
- spin_unlock_irqrestore(&instance->stream_lock, spinlock_flags);
- /* In ventura if stream detected for a read and it is read ahead
- * capable make this IO as LDIO
- */
- if (is_stream_detected(&io_request->RaidContext.raid_context_g35) &&
- io_info.isRead && io_info.ra_capable)
- fp_possible = false;
-
/* FP for Optimal raid level 1.
* All large RAID-1 writes (> 32 KiB, both WT and WB modes)
* are built by the driver as LD I/Os.
@@ -2699,6 +2689,20 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
}
}
+ if (!fp_possible ||
+ (io_info.isRead && io_info.ra_capable)) {
+ spin_lock_irqsave(&instance->stream_lock,
+ spinlock_flags);
+ megasas_stream_detect(instance, cmd, &io_info);
+ spin_unlock_irqrestore(&instance->stream_lock,
+ spinlock_flags);
+ /* In ventura if stream detected for a read and it is
+ * read ahead capable make this IO as LDIO
+ */
+ if (is_stream_detected(&io_request->RaidContext.raid_context_g35))
+ fp_possible = false;
+ }
+
/* If raid is NULL, set CPU affinity to default CPU0 */
if (raid)
megasas_set_raidflag_cpu_affinity(praid_context,
@@ -3953,6 +3957,8 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
u16 smid;
bool refire_cmd = 0;
+ u8 result;
+ u32 opcode = 0;
fusion = instance->ctrl_context;
@@ -3963,29 +3969,53 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
cmd_fusion = fusion->cmd_list[j];
cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
smid = le16_to_cpu(cmd_mfi->context.smid);
+ result = REFIRE_CMD;
if (!smid)
continue;
- /* Do not refire shutdown command */
- if (le32_to_cpu(cmd_mfi->frame->dcmd.opcode) ==
- MR_DCMD_CTRL_SHUTDOWN) {
- cmd_mfi->frame->dcmd.cmd_status = MFI_STAT_OK;
- megasas_complete_cmd(instance, cmd_mfi, DID_OK);
- continue;
+ req_desc = megasas_get_request_descriptor(instance, smid - 1);
+
+ switch (cmd_mfi->frame->hdr.cmd) {
+ case MFI_CMD_DCMD:
+ opcode = le32_to_cpu(cmd_mfi->frame->dcmd.opcode);
+ /* Do not refire shutdown command */
+ if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
+ cmd_mfi->frame->dcmd.cmd_status = MFI_STAT_OK;
+ result = COMPLETE_CMD;
+ break;
+ }
+
+ refire_cmd = ((opcode != MR_DCMD_LD_MAP_GET_INFO)) &&
+ (opcode != MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
+ !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE);
+
+ if (!refire_cmd)
+ result = RETURN_CMD;
+
+ break;
+ case MFI_CMD_NVME:
+ if (!instance->support_nvme_passthru) {
+ cmd_mfi->frame->hdr.cmd_status = MFI_STAT_INVALID_CMD;
+ result = COMPLETE_CMD;
+ }
+
+ break;
+ default:
+ break;
}
- req_desc = megasas_get_request_descriptor
- (instance, smid - 1);
- refire_cmd = req_desc && ((cmd_mfi->frame->dcmd.opcode !=
- cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) &&
- (cmd_mfi->frame->dcmd.opcode !=
- cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO)))
- && !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE);
- if (refire_cmd)
+ switch (result) {
+ case REFIRE_CMD:
megasas_fire_cmd_fusion(instance, req_desc);
- else
+ break;
+ case RETURN_CMD:
megasas_return_cmd(instance, cmd_mfi);
+ break;
+ case COMPLETE_CMD:
+ megasas_complete_cmd(instance, cmd_mfi, DID_OK);
+ break;
+ }
}
}
@@ -4625,8 +4655,6 @@ transition_to_ready:
continue;
}
- megasas_refire_mgmt_cmd(instance);
-
if (megasas_get_ctrl_info(instance)) {
dev_info(&instance->pdev->dev,
"Failed from %s %d\n",
@@ -4635,6 +4663,9 @@ transition_to_ready:
retval = FAILED;
goto out;
}
+
+ megasas_refire_mgmt_cmd(instance);
+
/* Reset load balance info */
if (fusion->load_balance_info)
memset(fusion->load_balance_info, 0,
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 1814d79cb98d..8e5ebee6517f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -1344,6 +1344,12 @@ union desc_value {
} u;
};
+enum CMD_RET_VALUES {
+ REFIRE_CMD = 1,
+ COMPLETE_CMD = 2,
+ RETURN_CMD = 3,
+};
+
void megasas_free_cmds_fusion(struct megasas_instance *instance);
int megasas_ioc_init_fusion(struct megasas_instance *instance);
u8 megasas_get_map_info(struct megasas_instance *instance);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 8027de465d47..13d6e4ec3022 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -888,6 +888,22 @@ _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
return 1;
}
+static struct scsiio_tracker *
+_get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ struct scsi_cmnd *cmd;
+
+ if (WARN_ON(!smid) ||
+ WARN_ON(smid >= ioc->hi_priority_smid))
+ return NULL;
+
+ cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
+ if (cmd)
+ return scsi_cmd_priv(cmd);
+
+ return NULL;
+}
+
/**
* _base_get_cb_idx - obtain the callback index
* @ioc: per adapter object
@@ -899,19 +915,25 @@ static u8
_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
int i;
- u8 cb_idx;
+ u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
+ u8 cb_idx = 0xFF;
if (smid < ioc->hi_priority_smid) {
- i = smid - 1;
- cb_idx = ioc->scsi_lookup[i].cb_idx;
+ struct scsiio_tracker *st;
+
+ if (smid < ctl_smid) {
+ st = _get_st_from_smid(ioc, smid);
+ if (st)
+ cb_idx = st->cb_idx;
+ } else if (smid == ctl_smid)
+ cb_idx = ioc->ctl_cb_idx;
} else if (smid < ioc->internal_smid) {
i = smid - ioc->hi_priority_smid;
cb_idx = ioc->hpr_lookup[i].cb_idx;
} else if (smid <= ioc->hba_queue_depth) {
i = smid - ioc->internal_smid;
cb_idx = ioc->internal_lookup[i].cb_idx;
- } else
- cb_idx = 0xFF;
+ }
return cb_idx;
}
@@ -1287,14 +1309,16 @@ _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
/**
* _base_get_chain_buffer_tracker - obtain chain tracker
* @ioc: per adapter object
- * @smid: smid associated to an IO request
+ * @scmd: SCSI commands of the IO request
*
* Returns chain tracker(from ioc->free_chain_list)
*/
static struct chain_tracker *
-_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
+ struct scsi_cmnd *scmd)
{
struct chain_tracker *chain_req;
+ struct scsiio_tracker *st = scsi_cmd_priv(scmd);
unsigned long flags;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
@@ -1307,8 +1331,7 @@ _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid)
chain_req = list_entry(ioc->free_chain_list.next,
struct chain_tracker, tracker_list);
list_del_init(&chain_req->tracker_list);
- list_add_tail(&chain_req->tracker_list,
- &ioc->scsi_lookup[smid - 1].chain_list);
+ list_add_tail(&chain_req->tracker_list, &st->chain_list);
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
return chain_req;
}
@@ -1923,7 +1946,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
/* initializing the chain flags and pointers */
chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
- chain_req = _base_get_chain_buffer_tracker(ioc, smid);
+ chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
if (!chain_req)
return -1;
chain = chain_req->chain_buffer;
@@ -1963,7 +1986,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
sges_in_segment--;
}
- chain_req = _base_get_chain_buffer_tracker(ioc, smid);
+ chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
if (!chain_req)
return -1;
chain = chain_req->chain_buffer;
@@ -2066,7 +2089,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
}
/* initializing the pointers */
- chain_req = _base_get_chain_buffer_tracker(ioc, smid);
+ chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
if (!chain_req)
return -1;
chain = chain_req->chain_buffer;
@@ -2097,7 +2120,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
sges_in_segment--;
}
- chain_req = _base_get_chain_buffer_tracker(ioc, smid);
+ chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
if (!chain_req)
return -1;
chain = chain_req->chain_buffer;
@@ -2742,7 +2765,7 @@ mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
void *
mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
- return (void *)(ioc->scsi_lookup[smid - 1].pcie_sg_list.pcie_sgl);
+ return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl);
}
/**
@@ -2755,7 +2778,7 @@ mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
dma_addr_t
mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
- return ioc->scsi_lookup[smid - 1].pcie_sg_list.pcie_sgl_dma;
+ return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma;
}
/**
@@ -2822,26 +2845,15 @@ u16
mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
struct scsi_cmnd *scmd)
{
- unsigned long flags;
- struct scsiio_tracker *request;
+ struct scsiio_tracker *request = scsi_cmd_priv(scmd);
+ unsigned int tag = scmd->request->tag;
u16 smid;
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- if (list_empty(&ioc->free_list)) {
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- pr_err(MPT3SAS_FMT "%s: smid not available\n",
- ioc->name, __func__);
- return 0;
- }
-
- request = list_entry(ioc->free_list.next,
- struct scsiio_tracker, tracker_list);
- request->scmd = scmd;
+ smid = tag + 1;
request->cb_idx = cb_idx;
- smid = request->smid;
request->msix_io = _base_get_msix_index(ioc);
- list_del(&request->tracker_list);
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ request->smid = smid;
+ INIT_LIST_HEAD(&request->chain_list);
return smid;
}
@@ -2874,6 +2886,35 @@ mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
return smid;
}
+static void
+_base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
+{
+ /*
+ * See _wait_for_commands_to_complete() call with regards to this code.
+ */
+ if (ioc->shost_recovery && ioc->pending_io_count) {
+ ioc->pending_io_count = atomic_read(&ioc->shost->host_busy);
+ if (ioc->pending_io_count == 0)
+ wake_up(&ioc->reset_wq);
+ }
+}
+
+void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
+ struct scsiio_tracker *st)
+{
+ if (WARN_ON(st->smid == 0))
+ return;
+ st->cb_idx = 0xFF;
+ st->direct_io = 0;
+ if (!list_empty(&st->chain_list)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ list_splice_init(&st->chain_list, &ioc->free_chain_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ }
+}
+
/**
* mpt3sas_base_free_smid - put smid back on free_list
* @ioc: per adapter object
@@ -2886,37 +2927,22 @@ mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
unsigned long flags;
int i;
- struct chain_tracker *chain_req, *next;
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
if (smid < ioc->hi_priority_smid) {
- /* scsiio queue */
- i = smid - 1;
- if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
- list_for_each_entry_safe(chain_req, next,
- &ioc->scsi_lookup[i].chain_list, tracker_list) {
- list_del_init(&chain_req->tracker_list);
- list_add(&chain_req->tracker_list,
- &ioc->free_chain_list);
- }
- }
- ioc->scsi_lookup[i].cb_idx = 0xFF;
- ioc->scsi_lookup[i].scmd = NULL;
- ioc->scsi_lookup[i].direct_io = 0;
- list_add(&ioc->scsi_lookup[i].tracker_list, &ioc->free_list);
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ struct scsiio_tracker *st;
- /*
- * See _wait_for_commands_to_complete() call with regards
- * to this code.
- */
- if (ioc->shost_recovery && ioc->pending_io_count) {
- if (ioc->pending_io_count == 1)
- wake_up(&ioc->reset_wq);
- ioc->pending_io_count--;
+ st = _get_st_from_smid(ioc, smid);
+ if (!st) {
+ _base_recovery_check(ioc);
+ return;
}
+ mpt3sas_base_clear_st(ioc, st);
+ _base_recovery_check(ioc);
return;
- } else if (smid < ioc->internal_smid) {
+ }
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (smid < ioc->internal_smid) {
/* hi-priority */
i = smid - ioc->hi_priority_smid;
ioc->hpr_lookup[i].cb_idx = 0xFF;
@@ -3789,13 +3815,12 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
if (ioc->pcie_sgl_dma_pool) {
for (i = 0; i < ioc->scsiio_depth; i++) {
- if (ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl)
- pci_pool_free(ioc->pcie_sgl_dma_pool,
- ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl,
- ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl_dma);
+ dma_pool_free(ioc->pcie_sgl_dma_pool,
+ ioc->pcie_sg_lookup[i].pcie_sgl,
+ ioc->pcie_sg_lookup[i].pcie_sgl_dma);
}
if (ioc->pcie_sgl_dma_pool)
- pci_pool_destroy(ioc->pcie_sgl_dma_pool);
+ dma_pool_destroy(ioc->pcie_sgl_dma_pool);
}
if (ioc->config_page) {
@@ -3806,10 +3831,6 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->config_page, ioc->config_page_dma);
}
- if (ioc->scsi_lookup) {
- free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
- ioc->scsi_lookup = NULL;
- }
kfree(ioc->hpr_lookup);
kfree(ioc->internal_lookup);
if (ioc->chain_lookup) {
@@ -4110,16 +4131,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->name, (unsigned long long) ioc->request_dma));
total_sz += sz;
- sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
- ioc->scsi_lookup_pages = get_order(sz);
- ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
- GFP_KERNEL, ioc->scsi_lookup_pages);
- if (!ioc->scsi_lookup) {
- pr_err(MPT3SAS_FMT "scsi_lookup: get_free_pages failed, sz(%d)\n",
- ioc->name, (int)sz);
- goto out;
- }
-
dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n",
ioc->name, ioc->request, ioc->scsiio_depth));
@@ -4202,23 +4213,29 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
nvme_blocks_needed++;
+ sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
+ ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
+ if (!ioc->pcie_sg_lookup) {
+ pr_info(MPT3SAS_FMT
+ "PCIe SGL lookup: kzalloc failed\n", ioc->name);
+ goto out;
+ }
sz = nvme_blocks_needed * ioc->page_size;
ioc->pcie_sgl_dma_pool =
- pci_pool_create("PCIe SGL pool", ioc->pdev, sz, 16, 0);
+ dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
if (!ioc->pcie_sgl_dma_pool) {
pr_info(MPT3SAS_FMT
- "PCIe SGL pool: pci_pool_create failed\n",
+ "PCIe SGL pool: dma_pool_create failed\n",
ioc->name);
goto out;
}
for (i = 0; i < ioc->scsiio_depth; i++) {
- ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl =
- pci_pool_alloc(ioc->pcie_sgl_dma_pool,
- GFP_KERNEL,
- &ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl_dma);
- if (!ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl) {
+ ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc(
+ ioc->pcie_sgl_dma_pool, GFP_KERNEL,
+ &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
+ if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
pr_info(MPT3SAS_FMT
- "PCIe SGL pool: pci_pool_alloc failed\n",
+ "PCIe SGL pool: dma_pool_alloc failed\n",
ioc->name);
goto out;
}
@@ -5766,19 +5783,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
kfree(delayed_event_ack);
}
- /* initialize the scsi lookup free list */
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- INIT_LIST_HEAD(&ioc->free_list);
- smid = 1;
- for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
- INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list);
- ioc->scsi_lookup[i].cb_idx = 0xFF;
- ioc->scsi_lookup[i].smid = smid;
- ioc->scsi_lookup[i].scmd = NULL;
- ioc->scsi_lookup[i].direct_io = 0;
- list_add_tail(&ioc->scsi_lookup[i].tracker_list,
- &ioc->free_list);
- }
/* hi-priority queue */
INIT_LIST_HEAD(&ioc->hpr_free_list);
@@ -6292,15 +6297,13 @@ _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
* _wait_for_commands_to_complete - reset controller
* @ioc: Pointer to MPT_ADAPTER structure
*
- * This function waiting(3s) for all pending commands to complete
+ * This function is waiting 10s for all pending commands to complete
* prior to putting controller in reset.
*/
static void
_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
{
u32 ioc_state;
- unsigned long flags;
- u16 i;
ioc->pending_io_count = 0;
@@ -6309,11 +6312,7 @@ _wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
return;
/* pending command count */
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- for (i = 0; i < ioc->scsiio_depth; i++)
- if (ioc->scsi_lookup[i].cb_idx != 0xFF)
- ioc->pending_io_count++;
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ ioc->pending_io_count = atomic_read(&ioc->shost->host_busy);
if (!ioc->pending_io_count)
return;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 60f42ca3954f..789bc421424b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -772,20 +772,17 @@ struct chain_tracker {
/**
* struct scsiio_tracker - scsi mf request tracker
* @smid: system message id
- * @scmd: scsi request pointer
* @cb_idx: callback index
* @direct_io: To indicate whether I/O is direct (WARPDRIVE)
- * @tracker_list: list of free request (ioc->free_list)
+ * @chain_list: list of associated firmware chain tracker
* @msix_io: IO's msix
*/
struct scsiio_tracker {
u16 smid;
- struct scsi_cmnd *scmd;
u8 cb_idx;
u8 direct_io;
struct pcie_sg_list pcie_sg_list;
struct list_head chain_list;
- struct list_head tracker_list;
u16 msix_io;
};
@@ -1248,10 +1245,8 @@ struct MPT3SAS_ADAPTER {
u8 *request;
dma_addr_t request_dma;
u32 request_dma_sz;
- struct scsiio_tracker *scsi_lookup;
- ulong scsi_lookup_pages;
+ struct pcie_sg_list *pcie_sg_lookup;
spinlock_t scsi_lookup_lock;
- struct list_head free_list;
int pending_io_count;
wait_queue_head_t reset_wq;
@@ -1270,6 +1265,7 @@ struct MPT3SAS_ADAPTER {
u16 chains_needed_per_io;
u32 chain_depth;
u16 chain_segment_sz;
+ u16 chains_per_prp_buffer;
/* hi-priority queue */
u16 hi_priority_smid;
@@ -1401,7 +1397,9 @@ void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc);
/* hi-priority queue */
u16 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
u16 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
- struct scsi_cmnd *scmd);
+ struct scsi_cmnd *scmd);
+void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
+ struct scsiio_tracker *st);
u16 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
void mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid);
@@ -1437,16 +1435,16 @@ int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc);
/* scsih shared API */
+struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc,
+ u16 smid);
u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
u32 reply);
void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
- uint channel, uint id, uint lun, u8 type, u16 smid_task,
- ulong timeout);
+ u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout);
int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
- uint channel, uint id, uint lun, u8 type, u16 smid_task,
- ulong timeout);
+ u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout);
void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
@@ -1613,14 +1611,9 @@ void mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status,
u8 mpt3sas_get_num_volumes(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
struct _raid_device *raid_device);
-u8
-mpt3sas_scsi_direct_io_get(struct MPT3SAS_ADAPTER *ioc, u16 smid);
-void
-mpt3sas_scsi_direct_io_set(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 direct_io);
void
mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
- struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request,
- u16 smid);
+ struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request);
/* NCQ Prio Handling Check */
bool scsih_ncq_prio_supp(struct scsi_device *sdev);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index b4c374b08e5e..9cddc3074cd1 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -534,7 +534,7 @@ _ctl_fasync(int fd, struct file *filep, int mode)
* @wait -
*
*/
-static unsigned int
+static __poll_t
_ctl_poll(struct file *filep, poll_table *wait)
{
struct MPT3SAS_ADAPTER *ioc;
@@ -567,11 +567,10 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
Mpi2SCSITaskManagementRequest_t *tm_request)
{
u8 found = 0;
- u16 i;
+ u16 smid;
u16 handle;
struct scsi_cmnd *scmd;
struct MPT3SAS_DEVICE *priv_data;
- unsigned long flags;
Mpi2SCSITaskManagementReply_t *tm_reply;
u32 sz;
u32 lun;
@@ -587,11 +586,11 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
handle = le16_to_cpu(tm_request->DevHandle);
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- for (i = ioc->scsiio_depth; i && !found; i--) {
- scmd = ioc->scsi_lookup[i - 1].scmd;
- if (scmd == NULL || scmd->device == NULL ||
- scmd->device->hostdata == NULL)
+ for (smid = ioc->scsiio_depth; smid && !found; smid--) {
+ struct scsiio_tracker *st;
+
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
+ if (!scmd)
continue;
if (lun != scmd->device->lun)
continue;
@@ -600,10 +599,10 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
continue;
if (priv_data->sas_target->handle != handle)
continue;
- tm_request->TaskMID = cpu_to_le16(ioc->scsi_lookup[i - 1].smid);
+ st = scsi_cmd_priv(scmd);
+ tm_request->TaskMID = cpu_to_le16(st->smid);
found = 1;
}
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
if (!found) {
dctlprintk(ioc, pr_info(MPT3SAS_FMT
@@ -724,14 +723,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
goto out;
}
} else {
-
- smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->ctl_cb_idx, NULL);
- if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
- ret = -EAGAIN;
- goto out;
- }
+ /* Use first reserved smid for passthrough ioctls */
+ smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
}
ret = 0;
@@ -1081,8 +1074,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
le16_to_cpu(mpi_request->FunctionDependent1));
mpt3sas_halt_firmware(ioc);
mpt3sas_scsih_issue_locked_tm(ioc,
- le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
- 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30);
+ le16_to_cpu(mpi_request->FunctionDependent1), 0,
+ MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, 30);
} else
mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index b258f210120a..74fca184dba9 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1445,156 +1445,31 @@ _scsih_is_nvme_device(u32 device_info)
}
/**
- * _scsih_scsi_lookup_get - returns scmd entry
- * @ioc: per adapter object
- * @smid: system request message index
- *
- * Returns the smid stored scmd pointer.
- */
-static struct scsi_cmnd *
-_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
-{
- return ioc->scsi_lookup[smid - 1].scmd;
-}
-
-/**
- * __scsih_scsi_lookup_get_clear - returns scmd entry without
- * holding any lock.
+ * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
* @ioc: per adapter object
* @smid: system request message index
*
* Returns the smid stored scmd pointer.
* Then will dereference the stored scmd pointer.
*/
-static inline struct scsi_cmnd *
-__scsih_scsi_lookup_get_clear(struct MPT3SAS_ADAPTER *ioc,
- u16 smid)
+struct scsi_cmnd *
+mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
{
struct scsi_cmnd *scmd = NULL;
+ struct scsiio_tracker *st;
- swap(scmd, ioc->scsi_lookup[smid - 1].scmd);
-
- return scmd;
-}
-
-/**
- * _scsih_scsi_lookup_get_clear - returns scmd entry
- * @ioc: per adapter object
- * @smid: system request message index
- *
- * Returns the smid stored scmd pointer.
- * Then will derefrence the stored scmd pointer.
- */
-static inline struct scsi_cmnd *
-_scsih_scsi_lookup_get_clear(struct MPT3SAS_ADAPTER *ioc, u16 smid)
-{
- unsigned long flags;
- struct scsi_cmnd *scmd;
-
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- scmd = __scsih_scsi_lookup_get_clear(ioc, smid);
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
-
- return scmd;
-}
+ if (smid > 0 &&
+ smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
+ u32 unique_tag = smid - 1;
-/**
- * _scsih_scsi_lookup_find_by_scmd - scmd lookup
- * @ioc: per adapter object
- * @smid: system request message index
- * @scmd: pointer to scsi command object
- * Context: This function will acquire ioc->scsi_lookup_lock.
- *
- * This will search for a scmd pointer in the scsi_lookup array,
- * returning the revelent smid. A returned value of zero means invalid.
- */
-static u16
-_scsih_scsi_lookup_find_by_scmd(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd
- *scmd)
-{
- u16 smid;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- smid = 0;
- for (i = 0; i < ioc->scsiio_depth; i++) {
- if (ioc->scsi_lookup[i].scmd == scmd) {
- smid = ioc->scsi_lookup[i].smid;
- goto out;
- }
- }
- out:
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- return smid;
-}
-
-/**
- * _scsih_scsi_lookup_find_by_target - search for matching channel:id
- * @ioc: per adapter object
- * @id: target id
- * @channel: channel
- * Context: This function will acquire ioc->scsi_lookup_lock.
- *
- * This will search for a matching channel:id in the scsi_lookup array,
- * returning 1 if found.
- */
-static u8
-_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
- int channel)
-{
- u8 found;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- found = 0;
- for (i = 0 ; i < ioc->scsiio_depth; i++) {
- if (ioc->scsi_lookup[i].scmd &&
- (ioc->scsi_lookup[i].scmd->device->id == id &&
- ioc->scsi_lookup[i].scmd->device->channel == channel)) {
- found = 1;
- goto out;
- }
- }
- out:
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- return found;
-}
-
-/**
- * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
- * @ioc: per adapter object
- * @id: target id
- * @lun: lun number
- * @channel: channel
- * Context: This function will acquire ioc->scsi_lookup_lock.
- *
- * This will search for a matching channel:id:lun in the scsi_lookup array,
- * returning 1 if found.
- */
-static u8
-_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
- unsigned int lun, int channel)
-{
- u8 found;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- found = 0;
- for (i = 0 ; i < ioc->scsiio_depth; i++) {
- if (ioc->scsi_lookup[i].scmd &&
- (ioc->scsi_lookup[i].scmd->device->id == id &&
- ioc->scsi_lookup[i].scmd->device->channel == channel &&
- ioc->scsi_lookup[i].scmd->device->lun == lun)) {
- found = 1;
- goto out;
+ scmd = scsi_host_find_tag(ioc->shost, unique_tag);
+ if (scmd) {
+ st = scsi_cmd_priv(scmd);
+ if (st->cb_idx == 0xFF)
+ scmd = NULL;
}
}
- out:
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- return found;
+ return scmd;
}
/**
@@ -2727,32 +2602,30 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
/**
* mpt3sas_scsih_issue_tm - main routine for sending tm requests
* @ioc: per adapter struct
- * @device_handle: device handle
- * @channel: the channel assigned by the OS
- * @id: the id assigned by the OS
+ * @handle: device handle
* @lun: lun number
* @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
* @smid_task: smid assigned to the task
+ * @msix_task: MSIX table index supplied by the OS
* @timeout: timeout in seconds
* Context: user
*
* A generic API for sending task management requests to firmware.
*
* The callback index is set inside `ioc->tm_cb_idx`.
+ * The caller is responsible to check for outstanding commands.
*
* Return SUCCESS or FAILED.
*/
int
-mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
- uint id, uint lun, u8 type, u16 smid_task, ulong timeout)
+mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout)
{
Mpi2SCSITaskManagementRequest_t *mpi_request;
Mpi2SCSITaskManagementReply_t *mpi_reply;
u16 smid = 0;
u32 ioc_state;
- struct scsiio_tracker *scsi_lookup = NULL;
int rc;
- u16 msix_task = 0;
lockdep_assert_held(&ioc->tm_cmds.mutex);
@@ -2791,9 +2664,6 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
return FAILED;
}
- if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
- scsi_lookup = &ioc->scsi_lookup[smid_task - 1];
-
dtmprintk(ioc, pr_info(MPT3SAS_FMT
"sending tm: handle(0x%04x), task_type(0x%02x), smid(%d)\n",
ioc->name, handle, type, smid_task));
@@ -2809,11 +2679,6 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
mpt3sas_scsih_set_tm_flag(ioc, handle);
init_completion(&ioc->tm_cmds.done);
- if ((type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) &&
- (scsi_lookup->msix_io < ioc->reply_queue_count))
- msix_task = scsi_lookup->msix_io;
- else
- msix_task = 0;
ioc->put_smid_hi_priority(ioc, smid, msix_task);
wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -2847,35 +2712,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
sizeof(Mpi2SCSITaskManagementRequest_t)/4);
}
}
-
- switch (type) {
- case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
- rc = SUCCESS;
- if (scsi_lookup->scmd == NULL)
- break;
- rc = FAILED;
- break;
-
- case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
- if (_scsih_scsi_lookup_find_by_target(ioc, id, channel))
- rc = FAILED;
- else
- rc = SUCCESS;
- break;
- case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
- case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
- if (_scsih_scsi_lookup_find_by_lun(ioc, id, lun, channel))
- rc = FAILED;
- else
- rc = SUCCESS;
- break;
- case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
- rc = SUCCESS;
- break;
- default:
- rc = FAILED;
- break;
- }
+ rc = SUCCESS;
out:
mpt3sas_scsih_clear_tm_flag(ioc, handle);
@@ -2884,13 +2721,13 @@ out:
}
int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
- uint channel, uint id, uint lun, u8 type, u16 smid_task, ulong timeout)
+ u64 lun, u8 type, u16 smid_task, u16 msix_task, ulong timeout)
{
int ret;
mutex_lock(&ioc->tm_cmds.mutex);
- ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
- smid_task, timeout);
+ ret = mpt3sas_scsih_issue_tm(ioc, handle, lun, type, smid_task,
+ msix_task, timeout);
mutex_unlock(&ioc->tm_cmds.mutex);
return ret;
@@ -2989,7 +2826,7 @@ scsih_abort(struct scsi_cmnd *scmd)
{
struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
struct MPT3SAS_DEVICE *sas_device_priv_data;
- u16 smid;
+ struct scsiio_tracker *st = scsi_cmd_priv(scmd);
u16 handle;
int r;
@@ -3007,9 +2844,8 @@ scsih_abort(struct scsi_cmnd *scmd)
goto out;
}
- /* search for the command */
- smid = _scsih_scsi_lookup_find_by_scmd(ioc, scmd);
- if (!smid) {
+ /* check for completed command */
+ if (st == NULL || st->cb_idx == 0xFF) {
scmd->result = DID_RESET << 16;
r = SUCCESS;
goto out;
@@ -3027,10 +2863,12 @@ scsih_abort(struct scsi_cmnd *scmd)
mpt3sas_halt_firmware(ioc);
handle = sas_device_priv_data->sas_target->handle;
- r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
- scmd->device->id, scmd->device->lun,
- MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30);
-
+ r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
+ st->smid, st->msix_io, 30);
+ /* Command must be cleared after abort */
+ if (r == SUCCESS && st->cb_idx != 0xFF)
+ r = FAILED;
out:
sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
@@ -3086,10 +2924,11 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
goto out;
}
- r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
- scmd->device->id, scmd->device->lun,
- MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30);
-
+ r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0, 30);
+ /* Check for busy commands after reset */
+ if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
+ r = FAILED;
out:
sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
@@ -3148,10 +2987,11 @@ scsih_target_reset(struct scsi_cmnd *scmd)
goto out;
}
- r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
- scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
- 30);
-
+ r = mpt3sas_scsih_issue_locked_tm(ioc, handle, 0,
+ MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, 30);
+ /* Check for busy commands after reset */
+ if (r == SUCCESS && atomic_read(&starget->target_busy))
+ r = FAILED;
out:
starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
@@ -4600,16 +4440,18 @@ static void
_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
{
struct scsi_cmnd *scmd;
+ struct scsiio_tracker *st;
u16 smid;
- u16 count = 0;
+ int count = 0;
for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
- scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
if (!scmd)
continue;
count++;
_scsih_set_satl_pending(scmd, false);
- mpt3sas_base_free_smid(ioc, smid);
+ st = scsi_cmd_priv(scmd);
+ mpt3sas_base_clear_st(ioc, st);
scsi_dma_unmap(scmd);
if (ioc->pci_error_recovery)
scmd->result = DID_NO_CONNECT << 16;
@@ -4758,19 +4600,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
return 0;
}
- /*
- * Bug work around for firmware SATL handling. The loop
- * is based on atomic operations and ensures consistency
- * since we're lockless at this point
- */
- do {
- if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
- scmd->result = SAM_STAT_BUSY;
- scmd->scsi_done(scmd);
- return 0;
- }
- } while (_scsih_set_satl_pending(scmd, true));
-
sas_target_priv_data = sas_device_priv_data->sas_target;
/* invalid device handle */
@@ -4796,6 +4625,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
sas_device_priv_data->block)
return SCSI_MLQUEUE_DEVICE_BUSY;
+ /*
+ * Bug work around for firmware SATL handling. The loop
+ * is based on atomic operations and ensures consistency
+ * since we're lockless at this point
+ */
+ do {
+ if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
+ scmd->result = SAM_STAT_BUSY;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ } while (_scsih_set_satl_pending(scmd, true));
+
if (scmd->sc_data_direction == DMA_FROM_DEVICE)
mpi_control = MPI2_SCSIIO_CONTROL_READ;
else if (scmd->sc_data_direction == DMA_TO_DEVICE)
@@ -4823,6 +4665,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
if (!smid) {
pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
ioc->name, __func__);
+ _scsih_set_satl_pending(scmd, false);
goto out;
}
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
@@ -4854,6 +4697,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
pcie_device = sas_target_priv_data->pcie_dev;
if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
mpt3sas_base_free_smid(ioc, smid);
+ _scsih_set_satl_pending(scmd, false);
goto out;
}
} else
@@ -4862,7 +4706,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
raid_device = sas_target_priv_data->raid_device;
if (raid_device && raid_device->direct_io_enabled)
mpt3sas_setup_direct_io(ioc, scmd,
- raid_device, mpi_request, smid);
+ raid_device, mpi_request);
if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
@@ -5330,6 +5174,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
Mpi25SCSIIORequest_t *mpi_request;
Mpi2SCSIIOReply_t *mpi_reply;
struct scsi_cmnd *scmd;
+ struct scsiio_tracker *st;
u16 ioc_status;
u32 xfer_cnt;
u8 scsi_state;
@@ -5337,16 +5182,10 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
u32 log_info;
struct MPT3SAS_DEVICE *sas_device_priv_data;
u32 response_code = 0;
- unsigned long flags;
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
- if (ioc->broadcast_aen_busy || ioc->pci_error_recovery ||
- ioc->got_task_abort_from_ioctl)
- scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
- else
- scmd = __scsih_scsi_lookup_get_clear(ioc, smid);
-
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
if (scmd == NULL)
return 1;
@@ -5371,13 +5210,11 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
* WARPDRIVE: If direct_io is set then it is directIO,
* the failed direct I/O should be redirected to volume
*/
- if (mpt3sas_scsi_direct_io_get(ioc, smid) &&
+ st = scsi_cmd_priv(scmd);
+ if (st->direct_io &&
((ioc_status & MPI2_IOCSTATUS_MASK)
!= MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
- spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- ioc->scsi_lookup[smid - 1].scmd = scmd;
- spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- mpt3sas_scsi_direct_io_set(ioc, smid, 0);
+ st->direct_io = 0;
memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
mpi_request->DevHandle =
cpu_to_le16(sas_device_priv_data->sas_target->handle);
@@ -5555,9 +5392,9 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
out:
scsi_dma_unmap(scmd);
-
+ mpt3sas_base_free_smid(ioc, smid);
scmd->scsi_done(scmd);
- return 1;
+ return 0;
}
/**
@@ -7211,7 +7048,7 @@ _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
* Context: user.
*
*/
-static int
+static void
_scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
struct fw_event_work *fw_event)
{
@@ -7221,7 +7058,6 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
u8 link_rate, prev_link_rate;
unsigned long flags;
int rc;
- int requeue_event;
Mpi26EventDataPCIeTopologyChangeList_t *event_data =
(Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
struct _pcie_device *pcie_device;
@@ -7231,12 +7067,12 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
if (ioc->shost_recovery || ioc->remove_host ||
ioc->pci_error_recovery)
- return 0;
+ return;
if (fw_event->ignore) {
dewtprintk(ioc, pr_info(MPT3SAS_FMT "ignoring switch event\n",
ioc->name));
- return 0;
+ return;
}
/* handle siblings events */
@@ -7244,10 +7080,10 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
if (fw_event->ignore) {
dewtprintk(ioc, pr_info(MPT3SAS_FMT
"ignoring switch event\n", ioc->name));
- return 0;
+ return;
}
if (ioc->remove_host || ioc->pci_error_recovery)
- return 0;
+ return;
reason_code = event_data->PortEntry[i].PortStatus;
handle =
le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
@@ -7316,7 +7152,6 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
break;
}
}
- return requeue_event;
}
/**
@@ -7502,6 +7337,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
{
struct scsi_cmnd *scmd;
struct scsi_device *sdev;
+ struct scsiio_tracker *st;
u16 smid, handle;
u32 lun;
struct MPT3SAS_DEVICE *sas_device_priv_data;
@@ -7543,9 +7379,10 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
if (ioc->shost_recovery)
goto out;
- scmd = _scsih_scsi_lookup_get(ioc, smid);
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
if (!scmd)
continue;
+ st = scsi_cmd_priv(scmd);
sdev = scmd->device;
sas_device_priv_data = sdev->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
@@ -7567,8 +7404,9 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
goto out;
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
- MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30);
+ r = mpt3sas_scsih_issue_tm(ioc, handle, lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
+ st->msix_io, 30);
if (r == FAILED) {
sdev_printk(KERN_WARNING, sdev,
"mpt3sas_scsih_issue_tm: FAILED when sending "
@@ -7607,10 +7445,10 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
if (ioc->shost_recovery)
goto out_no_lock;
- r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
- sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid,
- 30);
- if (r == FAILED) {
+ r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, st->smid,
+ st->msix_io, 30);
+ if (r == FAILED || st->cb_idx != 0xFF) {
sdev_printk(KERN_WARNING, sdev,
"mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
"scmd(%p)\n", scmd);
@@ -10416,6 +10254,7 @@ static struct scsi_host_template mpt2sas_driver_template = {
.shost_attrs = mpt3sas_host_attrs,
.sdev_attrs = mpt3sas_dev_attrs,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct scsiio_tracker),
};
/* raid transport support for SAS 2.0 HBA devices */
@@ -10454,6 +10293,7 @@ static struct scsi_host_template mpt3sas_driver_template = {
.shost_attrs = mpt3sas_host_attrs,
.sdev_attrs = mpt3sas_dev_attrs,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct scsiio_tracker),
};
/* raid transport support for SAS 3.0 HBA devices */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
index ced7d9f6274c..6bfcee4757e0 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
@@ -261,33 +261,6 @@ out_error:
}
/**
- * mpt3sas_scsi_direct_io_get - returns direct io flag
- * @ioc: per adapter object
- * @smid: system request message index
- *
- * Returns the smid stored scmd pointer.
- */
-inline u8
-mpt3sas_scsi_direct_io_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
-{
- return ioc->scsi_lookup[smid - 1].direct_io;
-}
-
-/**
- * mpt3sas_scsi_direct_io_set - sets direct io flag
- * @ioc: per adapter object
- * @smid: system request message index
- * @direct_io: Zero or non-zero value to set in the direct_io flag
- *
- * Returns Nothing.
- */
-inline void
-mpt3sas_scsi_direct_io_set(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 direct_io)
-{
- ioc->scsi_lookup[smid - 1].direct_io = direct_io;
-}
-
-/**
* mpt3sas_setup_direct_io - setup MPI request for WARPDRIVE Direct I/O
* @ioc: per adapter object
* @scmd: pointer to scsi command object
@@ -299,12 +272,12 @@ mpt3sas_scsi_direct_io_set(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 direct_io)
*/
void
mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
- struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request,
- u16 smid)
+ struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request)
{
sector_t v_lba, p_lba, stripe_off, column, io_size;
u32 stripe_sz, stripe_exp;
u8 num_pds, cmd = scmd->cmnd[0];
+ struct scsiio_tracker *st = scsi_cmd_priv(scmd);
if (cmd != READ_10 && cmd != WRITE_10 &&
cmd != READ_16 && cmd != WRITE_16)
@@ -340,5 +313,5 @@ mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
else
put_unaligned_be64(p_lba, &mpi_request->CDB.CDB32[2]);
- mpt3sas_scsi_direct_io_set(ioc, smid, 1);
+ st->direct_io = 1;
}
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index e58be98430b0..201c8de1853d 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -5216,7 +5216,7 @@ static unsigned short pmcraid_get_minor(void)
{
int minor;
- minor = find_first_zero_bit(pmcraid_minor, sizeof(pmcraid_minor));
+ minor = find_first_zero_bit(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
__set_bit(minor, pmcraid_minor);
return minor;
}
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 7be5823ab036..ee86a0c62dbf 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -724,6 +724,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
return 0;
}
cmd->SCp.phase++;
+ /* fall through */
case 3: /* Phase 3 - Ready to accept a command */
w_ctr(ppb, 0x0c);
@@ -733,6 +734,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
if (!ppa_send_command(cmd))
return 0;
cmd->SCp.phase++;
+ /* fall through */
case 4: /* Phase 4 - Setup scatter/gather buffers */
if (scsi_bufflen(cmd)) {
@@ -746,6 +748,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
}
cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
cmd->SCp.phase++;
+ /* fall through */
case 5: /* Phase 5 - Data transfer stage */
w_ctr(ppb, 0x0c);
@@ -758,6 +761,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
if (retv == 0)
return 1;
cmd->SCp.phase++;
+ /* fall through */
case 6: /* Phase 6 - Read status/message */
cmd->result = DID_OK << 16;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 40800dd17d2f..ccd9a08ea030 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -3126,6 +3126,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf);
if (!qedf->cmd_mgr) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to allocate cmd mgr.\n");
+ rc = -ENOMEM;
goto err5;
}
@@ -3149,6 +3150,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
create_workqueue(host_buf);
if (!qedf->ll2_recv_wq) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n");
+ rc = -ENOMEM;
goto err7;
}
@@ -3192,6 +3194,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
if (!qedf->timer_work_queue) {
QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer "
"workqueue.\n");
+ rc = -ENOMEM;
goto err7;
}
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 092e8f9a6020..667d7697ba01 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -198,7 +198,7 @@ static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response;
qedi_cmd = task->dd_data;
- qedi_cmd->tmf_resp_buf = kzalloc(sizeof(*resp_hdr_ptr), GFP_KERNEL);
+ qedi_cmd->tmf_resp_buf = kzalloc(sizeof(*resp_hdr_ptr), GFP_ATOMIC);
if (!qedi_cmd->tmf_resp_buf) {
QEDI_ERR(&qedi->dbg_ctx,
"Failed to allocate resp buf, cid=0x%x\n",
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index a0002232a83f..029e2e69b29f 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -997,7 +997,9 @@ static bool qedi_process_completions(struct qedi_fastpath *fp)
ret = qedi_queue_cqe(qedi, cqe, fp->sb_id, p);
if (ret)
- continue;
+ QEDI_WARN(&qedi->dbg_ctx,
+ "Dropping CQE 0x%x for cid=0x%x.\n",
+ que->cq_cons_idx, cqe->cqe_common.conn_id);
que->cq_cons_idx++;
if (que->cq_cons_idx == QEDI_CQ_SIZE)
@@ -1269,16 +1271,14 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
}
/* Allocate list of PBL pages */
- qedi->bdq_pbl_list = dma_alloc_coherent(&qedi->pdev->dev,
- PAGE_SIZE,
- &qedi->bdq_pbl_list_dma,
- GFP_KERNEL);
+ qedi->bdq_pbl_list = dma_zalloc_coherent(&qedi->pdev->dev, PAGE_SIZE,
+ &qedi->bdq_pbl_list_dma,
+ GFP_KERNEL);
if (!qedi->bdq_pbl_list) {
QEDI_ERR(&qedi->dbg_ctx,
"Could not allocate list of PBL pages.\n");
return -ENOMEM;
}
- memset(qedi->bdq_pbl_list, 0, PAGE_SIZE);
/*
* Now populate PBL list with pages that contain pointers to the
@@ -1368,11 +1368,10 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
(qedi->global_queues[i]->cq_pbl_size +
(QEDI_PAGE_SIZE - 1));
- qedi->global_queues[i]->cq =
- dma_alloc_coherent(&qedi->pdev->dev,
- qedi->global_queues[i]->cq_mem_size,
- &qedi->global_queues[i]->cq_dma,
- GFP_KERNEL);
+ qedi->global_queues[i]->cq = dma_zalloc_coherent(&qedi->pdev->dev,
+ qedi->global_queues[i]->cq_mem_size,
+ &qedi->global_queues[i]->cq_dma,
+ GFP_KERNEL);
if (!qedi->global_queues[i]->cq) {
QEDI_WARN(&qedi->dbg_ctx,
@@ -1380,14 +1379,10 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
status = -ENOMEM;
goto mem_alloc_failure;
}
- memset(qedi->global_queues[i]->cq, 0,
- qedi->global_queues[i]->cq_mem_size);
-
- qedi->global_queues[i]->cq_pbl =
- dma_alloc_coherent(&qedi->pdev->dev,
- qedi->global_queues[i]->cq_pbl_size,
- &qedi->global_queues[i]->cq_pbl_dma,
- GFP_KERNEL);
+ qedi->global_queues[i]->cq_pbl = dma_zalloc_coherent(&qedi->pdev->dev,
+ qedi->global_queues[i]->cq_pbl_size,
+ &qedi->global_queues[i]->cq_pbl_dma,
+ GFP_KERNEL);
if (!qedi->global_queues[i]->cq_pbl) {
QEDI_WARN(&qedi->dbg_ctx,
@@ -1395,8 +1390,6 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
status = -ENOMEM;
goto mem_alloc_failure;
}
- memset(qedi->global_queues[i]->cq_pbl, 0,
- qedi->global_queues[i]->cq_pbl_size);
/* Create PBL */
num_pages = qedi->global_queues[i]->cq_mem_size /
@@ -1457,25 +1450,22 @@ int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *);
ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE;
- ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
- &ep->sq_dma, GFP_KERNEL);
+ ep->sq = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
+ &ep->sq_dma, GFP_KERNEL);
if (!ep->sq) {
QEDI_WARN(&qedi->dbg_ctx,
"Could not allocate send queue.\n");
rval = -ENOMEM;
goto out;
}
- memset(ep->sq, 0, ep->sq_mem_size);
-
- ep->sq_pbl = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
- &ep->sq_pbl_dma, GFP_KERNEL);
+ ep->sq_pbl = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
+ &ep->sq_pbl_dma, GFP_KERNEL);
if (!ep->sq_pbl) {
QEDI_WARN(&qedi->dbg_ctx,
"Could not allocate send queue PBL.\n");
rval = -ENOMEM;
goto out_free_sq;
}
- memset(ep->sq_pbl, 0, ep->sq_pbl_size);
/* Create PBL */
num_pages = ep->sq_mem_size / QEDI_PAGE_SIZE;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 9ce28c4f9812..89a4999fa631 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1843,14 +1843,13 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
if (qla2x00_reset_active(vha))
goto done;
- stats = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(*stats), &stats_dma, GFP_KERNEL);
+ stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats),
+ &stats_dma, GFP_KERNEL);
if (!stats) {
ql_log(ql_log_warn, vha, 0x707d,
"Failed to allocate memory for stats.\n");
goto done;
}
- memset(stats, 0, sizeof(*stats));
rval = QLA_FUNCTION_FAILED;
if (IS_FWI2_CAPABLE(ha)) {
@@ -2170,6 +2169,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
vha->gnl.ldma);
+ vfree(vha->scan.l);
+
if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
ql_log(ql_log_warn, vha, 0x7087,
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index e3ac7078d2aa..e2d5d3ca0f57 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1435,7 +1435,7 @@ qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
ha->optrom_state = QLA_SREADING;
}
- ha->optrom_buffer = vmalloc(ha->optrom_region_size);
+ ha->optrom_buffer = vzalloc(ha->optrom_region_size);
if (!ha->optrom_buffer) {
ql_log(ql_log_warn, vha, 0x7059,
"Read: Unable to allocate memory for optrom retrieval "
@@ -1445,7 +1445,6 @@ qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
return -ENOMEM;
}
- memset(ha->optrom_buffer, 0, ha->optrom_region_size);
return 0;
}
@@ -2314,16 +2313,14 @@ qla2x00_get_priv_stats(struct bsg_job *bsg_job)
if (!IS_FWI2_CAPABLE(ha))
return -EPERM;
- stats = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(*stats), &stats_dma, GFP_KERNEL);
+ stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats),
+ &stats_dma, GFP_KERNEL);
if (!stats) {
ql_log(ql_log_warn, vha, 0x70e2,
"Failed to allocate memory for stats.\n");
return -ENOMEM;
}
- memset(stats, 0, sizeof(*stats));
-
rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
if (rval == QLA_SUCCESS) {
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 01a9b8971e88..be7d6824581a 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -246,8 +246,8 @@
* There is no correspondence between an N-PORT id and an AL_PA. Therefore the
* valid range of an N-PORT id is 0 through 0x7ef.
*/
-#define NPH_LAST_HANDLE 0x7ef
-#define NPH_MGMT_SERVER 0x7fa /* FFFFFA */
+#define NPH_LAST_HANDLE 0x7ee
+#define NPH_MGMT_SERVER 0x7ef /* FFFFEF */
#define NPH_SNS 0x7fc /* FFFFFC */
#define NPH_FABRIC_CONTROLLER 0x7fd /* FFFFFD */
#define NPH_F_PORT 0x7fe /* FFFFFE */
@@ -288,6 +288,8 @@ struct name_list_extended {
#define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */
#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/
#define FW_DEF_EXCHANGES_CNT 2048
+#define FW_MAX_EXCHANGES_CNT (32 * 1024)
+#define REDUCE_EXCHANGES_CNT (8 * 1024)
struct req_que;
struct qla_tgt_sess;
@@ -315,6 +317,29 @@ struct srb_cmd {
/* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */
#define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID)
+/*
+ * 24 bit port ID type definition.
+ */
+typedef union {
+ uint32_t b24 : 24;
+
+ struct {
+#ifdef __BIG_ENDIAN
+ uint8_t domain;
+ uint8_t area;
+ uint8_t al_pa;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t al_pa;
+ uint8_t area;
+ uint8_t domain;
+#else
+#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
+#endif
+ uint8_t rsvd_1;
+ } b;
+} port_id_t;
+#define INVALID_PORT_ID 0xFFFFFF
+
struct els_logo_payload {
uint8_t opcode;
uint8_t rsvd[3];
@@ -338,6 +363,7 @@ struct ct_arg {
u32 rsp_size;
void *req;
void *rsp;
+ port_id_t id;
};
/*
@@ -416,6 +442,7 @@ struct srb_iocb {
struct {
uint32_t cmd_hndl;
__le16 comp_status;
+ __le16 req_que_no;
struct completion comp;
} abt;
struct ct_arg ctarg;
@@ -448,6 +475,10 @@ struct srb_iocb {
uint32_t timeout_sec;
struct list_head entry;
} nvme;
+ struct {
+ u16 cmd;
+ u16 vp_index;
+ } ctrlvp;
} u;
struct timer_list timer;
@@ -476,6 +507,8 @@ struct srb_iocb {
#define SRB_NVME_CMD 19
#define SRB_NVME_LS 20
#define SRB_PRLI_CMD 21
+#define SRB_CTRL_VP 22
+#define SRB_PRLO_CMD 23
enum {
TYPE_SRB,
@@ -499,8 +532,12 @@ typedef struct srb {
const char *name;
int iocbs;
struct qla_qpair *qpair;
+ struct list_head elem;
u32 gen1; /* scratch */
u32 gen2; /* scratch */
+ int rc;
+ int retry_count;
+ struct completion comp;
union {
struct srb_iocb iocb_cmd;
struct bsg_job *bsg_job;
@@ -2164,28 +2201,6 @@ struct imm_ntfy_from_isp {
#define REQUEST_ENTRY_SIZE (sizeof(request_t))
-/*
- * 24 bit port ID type definition.
- */
-typedef union {
- uint32_t b24 : 24;
-
- struct {
-#ifdef __BIG_ENDIAN
- uint8_t domain;
- uint8_t area;
- uint8_t al_pa;
-#elif defined(__LITTLE_ENDIAN)
- uint8_t al_pa;
- uint8_t area;
- uint8_t domain;
-#else
-#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
-#endif
- uint8_t rsvd_1;
- } b;
-} port_id_t;
-#define INVALID_PORT_ID 0xFFFFFF
/*
* Switch info gathering structure.
@@ -2257,14 +2272,17 @@ struct ct_sns_desc {
enum discovery_state {
DSC_DELETED,
+ DSC_GNN_ID,
DSC_GID_PN,
DSC_GNL,
DSC_LOGIN_PEND,
DSC_LOGIN_FAILED,
DSC_GPDB,
+ DSC_GFPN_ID,
DSC_GPSC,
DSC_UPD_FCPORT,
DSC_LOGIN_COMPLETE,
+ DSC_ADISC,
DSC_DELETE_PEND,
};
@@ -2290,7 +2308,9 @@ enum fcport_mgt_event {
FCME_GPDB_DONE,
FCME_GPNID_DONE,
FCME_GFFID_DONE,
- FCME_DELETE_DONE,
+ FCME_ADISC_DONE,
+ FCME_GNNID_DONE,
+ FCME_GFPNID_DONE,
};
enum rscn_addr_format {
@@ -2315,6 +2335,7 @@ typedef struct fc_port {
unsigned int conf_compl_supported:1;
unsigned int deleted:2;
+ unsigned int free_pending:1;
unsigned int local:1;
unsigned int logout_on_delete:1;
unsigned int logo_ack_needed:1;
@@ -2323,6 +2344,7 @@ typedef struct fc_port {
unsigned int login_pause:1;
unsigned int login_succ:1;
unsigned int query:1;
+ unsigned int id_changed:1;
struct work_struct nvme_del_work;
struct completion nvme_del_done;
@@ -2434,6 +2456,7 @@ static const char * const port_state_str[] = {
#define FCF_FCP2_DEVICE BIT_2
#define FCF_ASYNC_SENT BIT_3
#define FCF_CONF_COMP_SUPPORTED BIT_4
+#define FCF_ASYNC_ACTIVE BIT_5
/* No loop ID flag. */
#define FC_NO_LOOP_ID 0x1000
@@ -2470,6 +2493,11 @@ static const char * const port_state_str[] = {
#define GA_NXT_REQ_SIZE (16 + 4)
#define GA_NXT_RSP_SIZE (16 + 620)
+#define GPN_FT_CMD 0x172
+#define GPN_FT_REQ_SIZE (16 + 4)
+#define GNN_FT_CMD 0x173
+#define GNN_FT_REQ_SIZE (16 + 4)
+
#define GID_PT_CMD 0x1A1
#define GID_PT_REQ_SIZE (16 + 4)
@@ -2725,6 +2753,13 @@ struct ct_sns_req {
} port_id;
struct {
+ uint8_t reserved;
+ uint8_t domain;
+ uint8_t area;
+ uint8_t port_type;
+ } gpn_ft;
+
+ struct {
uint8_t port_type;
uint8_t domain;
uint8_t area;
@@ -2837,6 +2872,27 @@ struct ct_sns_gid_pt_data {
uint8_t port_id[3];
};
+/* It's the same for both GPN_FT and GNN_FT */
+struct ct_sns_gpnft_rsp {
+ struct {
+ struct ct_cmd_hdr header;
+ uint16_t response;
+ uint16_t residual;
+ uint8_t fragment_id;
+ uint8_t reason_code;
+ uint8_t explanation_code;
+ uint8_t vendor_unique;
+ };
+ /* Assume the largest number of targets for the union */
+ struct ct_sns_gpn_ft_data {
+ u8 control_byte;
+ u8 port_id[3];
+ u32 reserved;
+ u8 port_name[8];
+ } entries[1];
+};
+
+/* CT command response */
struct ct_sns_rsp {
struct ct_rsp_hdr header;
@@ -2912,6 +2968,33 @@ struct ct_sns_pkt {
} p;
};
+struct ct_sns_gpnft_pkt {
+ union {
+ struct ct_sns_req req;
+ struct ct_sns_gpnft_rsp rsp;
+ } p;
+};
+
+enum scan_flags_t {
+ SF_SCANNING = BIT_0,
+ SF_QUEUED = BIT_1,
+};
+
+struct fab_scan_rp {
+ port_id_t id;
+ u8 port_name[8];
+ u8 node_name[8];
+};
+
+struct fab_scan {
+ struct fab_scan_rp *l;
+ u32 size;
+ u16 scan_retry;
+#define MAX_SCAN_RETRIES 5
+ enum scan_flags_t scan_flags;
+ struct delayed_work scan_work;
+};
+
/*
* SNS command structures -- for 2200 compatibility.
*/
@@ -3117,7 +3200,7 @@ enum qla_work_type {
QLA_EVT_AENFX,
QLA_EVT_GIDPN,
QLA_EVT_GPNID,
- QLA_EVT_GPNID_DONE,
+ QLA_EVT_UNMAP,
QLA_EVT_NEW_SESS,
QLA_EVT_GPDB,
QLA_EVT_PRLI,
@@ -3125,6 +3208,15 @@ enum qla_work_type {
QLA_EVT_UPD_FCPORT,
QLA_EVT_GNL,
QLA_EVT_NACK,
+ QLA_EVT_RELOGIN,
+ QLA_EVT_ASYNC_PRLO,
+ QLA_EVT_ASYNC_PRLO_DONE,
+ QLA_EVT_GPNFT,
+ QLA_EVT_GPNFT_DONE,
+ QLA_EVT_GNNFT_DONE,
+ QLA_EVT_GNNID,
+ QLA_EVT_GFPNID,
+ QLA_EVT_SP_RETRY,
};
@@ -3166,7 +3258,9 @@ struct qla_work_evt {
struct {
port_id_t id;
u8 port_name[8];
+ u8 node_name[8];
void *pla;
+ u8 fc4_type;
} new_sess;
struct { /*Get PDB, Get Speed, update fcport, gnl, gidpn */
fc_port_t *fcport;
@@ -3177,6 +3271,9 @@ struct qla_work_evt {
u8 iocb[IOCB_SIZE];
int type;
} nack;
+ struct {
+ u8 fc4_type;
+ } gpnft;
} u;
};
@@ -3433,10 +3530,6 @@ struct qlt_hw_data {
#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */
-#define QLA_EARLY_LINKUP(_ha) \
- ((_ha->flags.n2n_ae || _ha->flags.lip_ae) && \
- _ha->flags.fw_started && !_ha->flags.fw_init_done)
-
/*
* Qlogic host adapter specific data structure.
*/
@@ -3494,8 +3587,10 @@ struct qla_hw_data {
uint32_t detected_lr_sfp:1;
uint32_t using_lr_setting:1;
+ uint32_t rida_fmt2:1;
} flags;
+ uint16_t max_exchg;
uint16_t long_range_distance; /* 32G & above */
#define LR_DISTANCE_5K 1
#define LR_DISTANCE_10K 0
@@ -3713,6 +3808,8 @@ struct qla_hw_data {
(IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
#define IS_EXLOGIN_OFFLD_CAPABLE(ha) \
(IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define USE_ASYNC_SCAN(ha) (IS_QLA25XX(ha) || IS_QLA81XX(ha) ||\
+ IS_QLA83XX(ha) || IS_QLA27XX(ha))
/* HBA serial number */
uint8_t serial0;
@@ -3795,7 +3892,7 @@ struct qla_hw_data {
int exchoffld_size;
int exchoffld_count;
- void *swl;
+ void *swl;
/* These are used by mailbox operations. */
uint16_t mailbox_out[MAILBOX_REGISTER_COUNT];
@@ -4107,6 +4204,7 @@ typedef struct scsi_qla_host {
#define LOOP_READY 5
#define LOOP_DEAD 6
+ unsigned long relogin_jif;
unsigned long dpc_flags;
#define RESET_MARKER_NEEDED 0 /* Send marker to ISP. */
#define RESET_ACTIVE 1
@@ -4139,6 +4237,7 @@ typedef struct scsi_qla_host {
#define SET_ZIO_THRESHOLD_NEEDED 28
#define DETECT_SFP_CHANGE 29
#define N2N_LOGIN_NEEDED 30
+#define IOCB_WORK_ACTIVE 31
unsigned long pci_flags;
#define PFLG_DISCONNECTED 0 /* PCI device removed */
@@ -4252,6 +4351,8 @@ typedef struct scsi_qla_host {
uint8_t n2n_node_name[WWN_SIZE];
uint8_t n2n_port_name[WWN_SIZE];
uint16_t n2n_id;
+ struct list_head gpnid_list;
+ struct fab_scan scan;
} scsi_qla_host_t;
struct qla27xx_image_status {
@@ -4511,6 +4612,16 @@ struct sff_8247_a0 {
#define USER_CTRL_IRQ(_ha) (ql2xuctrlirq && QLA_TGT_MODE_ENABLED() && \
(IS_QLA27XX(_ha) || IS_QLA83XX(_ha)))
+#define SAVE_TOPO(_ha) { \
+ if (_ha->current_topology) \
+ _ha->prev_topology = _ha->current_topology; \
+}
+
+#define N2N_TOPO(ha) \
+ ((ha->prev_topology == ISP_CFG_N && !ha->current_topology) || \
+ ha->current_topology == ISP_CFG_N || \
+ !ha->current_topology)
+
#include "qla_target.h"
#include "qla_gbl.h"
#include "qla_dbg.h"
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index d231e7156134..0b190082aa8d 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -127,21 +127,32 @@ static int
qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
{
struct scsi_qla_host *vha = s->private;
- struct qla_hw_data *ha = vha->hw;
+ uint16_t mb[MAX_IOCB_MB_REG];
+ int rc;
+
+ rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG);
+ if (rc != QLA_SUCCESS) {
+ seq_printf(s, "Mailbox Command failed %d, mb %#x", rc, mb[0]);
+ } else {
+ seq_puts(s, "FW Resource count\n\n");
+ seq_printf(s, "Original TGT exchg count[%d]\n", mb[1]);
+ seq_printf(s, "current TGT exchg count[%d]\n", mb[2]);
+ seq_printf(s, "original Initiator Exchange count[%d]\n", mb[3]);
+ seq_printf(s, "Current Initiator Exchange count[%d]\n", mb[6]);
+ seq_printf(s, "Original IOCB count[%d]\n", mb[7]);
+ seq_printf(s, "Current IOCB count[%d]\n", mb[10]);
+ seq_printf(s, "MAX VP count[%d]\n", mb[11]);
+ seq_printf(s, "MAX FCF count[%d]\n", mb[12]);
+ seq_printf(s, "Current free pageable XCB buffer cnt[%d]\n",
+ mb[20]);
+ seq_printf(s, "Original Initiator fast XCB buffer cnt[%d]\n",
+ mb[21]);
+ seq_printf(s, "Current free Initiator fast XCB buffer cnt[%d]\n",
+ mb[22]);
+ seq_printf(s, "Original Target fast XCB buffer cnt[%d]\n",
+ mb[23]);
- seq_puts(s, "FW Resource count\n\n");
- seq_printf(s, "Original TGT exchg count[%d]\n",
- ha->orig_fw_tgt_xcb_count);
- seq_printf(s, "current TGT exchg count[%d]\n",
- ha->cur_fw_tgt_xcb_count);
- seq_printf(s, "original Initiator Exchange count[%d]\n",
- ha->orig_fw_xcb_count);
- seq_printf(s, "Current Initiator Exchange count[%d]\n",
- ha->cur_fw_xcb_count);
- seq_printf(s, "Original IOCB count[%d]\n", ha->orig_fw_iocb_count);
- seq_printf(s, "Current IOCB count[%d]\n", ha->cur_fw_iocb_count);
- seq_printf(s, "MAX VP count[%d]\n", ha->max_npiv_vports);
- seq_printf(s, "MAX FCF count[%d]\n", ha->fw_max_fcf_count);
+ }
return 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index d5cef0727e72..5d8688e5bc7c 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1392,7 +1392,7 @@ struct vp_rpt_id_entry_24xx {
uint8_t port_name[8];
uint8_t node_name[8];
- uint32_t remote_nport_id;
+ uint8_t remote_nport_id[4];
uint32_t reserved_5;
} f2;
} u;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index fa115c7433e5..e9295398050c 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -66,6 +66,7 @@ extern void qla84xx_put_chip(struct scsi_qla_host *);
extern int qla2x00_async_login(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_async_logout(struct scsi_qla_host *, fc_port_t *);
+extern int qla2x00_async_prlo(struct scsi_qla_host *, fc_port_t *);
extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t);
@@ -104,11 +105,18 @@ int qla24xx_async_gpdb(struct scsi_qla_host *, fc_port_t *, u8);
int qla24xx_async_prli(struct scsi_qla_host *, fc_port_t *);
int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *,
struct imm_ntfy_from_isp *, int);
-int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *,
- void *);
+int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *, u8*,
+ void *, u8);
int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *);
int qla24xx_detect_sfp(scsi_qla_host_t *vha);
int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
+void qla2x00_async_prlo_done(struct scsi_qla_host *, fc_port_t *,
+ uint16_t *);
+extern int qla2x00_post_async_prlo_work(struct scsi_qla_host *, fc_port_t *,
+ uint16_t *);
+extern int qla2x00_post_async_prlo_done_work(struct scsi_qla_host *,
+ fc_port_t *, uint16_t *);
+
/*
* Global Data in qla_os.c source file.
*/
@@ -148,6 +156,7 @@ extern int ql2xuctrlirq;
extern int ql2xnvmeenable;
extern int ql2xautodetectsfp;
extern int ql2xenablemsix;
+extern int qla2xuseresexchforels;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -203,6 +212,7 @@ void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
int qla24xx_async_abort_cmd(srb_t *);
+int qla24xx_post_relogin_work(struct scsi_qla_host *vha);
/*
* Global Functions in qla_mid.c source file.
@@ -494,6 +504,7 @@ int qla24xx_get_port_login_templ(scsi_qla_host_t *, dma_addr_t,
extern int qla27xx_get_zio_threshold(scsi_qla_host_t *, uint16_t *);
extern int qla27xx_set_zio_threshold(scsi_qla_host_t *, uint16_t);
+int qla24xx_res_count_wait(struct scsi_qla_host *, uint16_t *, int);
/*
* Global Function Prototypes in qla_isr.c source file.
@@ -639,14 +650,26 @@ extern void qla2x00_free_fcport(fc_port_t *);
extern int qla24xx_post_gpnid_work(struct scsi_qla_host *, port_id_t *);
extern int qla24xx_async_gpnid(scsi_qla_host_t *, port_id_t *);
-void qla24xx_async_gpnid_done(scsi_qla_host_t *, srb_t*);
void qla24xx_handle_gpnid_event(scsi_qla_host_t *, struct event_arg *);
int qla24xx_post_gpsc_work(struct scsi_qla_host *, fc_port_t *);
int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *);
+void qla24xx_handle_gpsc_event(scsi_qla_host_t *, struct event_arg *);
int qla2x00_mgmt_svr_login(scsi_qla_host_t *);
void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea);
int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport);
+int qla24xx_async_gpnft(scsi_qla_host_t *, u8);
+void qla24xx_async_gpnft_done(scsi_qla_host_t *, srb_t *);
+void qla24xx_async_gnnft_done(scsi_qla_host_t *, srb_t *);
+int qla24xx_async_gnnid(scsi_qla_host_t *, fc_port_t *);
+void qla24xx_handle_gnnid_event(scsi_qla_host_t *, struct event_arg *);
+int qla24xx_post_gnnid_work(struct scsi_qla_host *, fc_port_t *);
+int qla24xx_post_gfpnid_work(struct scsi_qla_host *, fc_port_t *);
+int qla24xx_async_gfpnid(scsi_qla_host_t *, fc_port_t *);
+void qla24xx_handle_gfpnid_event(scsi_qla_host_t *, struct event_arg *);
+void qla24xx_sp_unmap(scsi_qla_host_t *, srb_t *);
+void qla_scan_work_fn(struct work_struct *);
+
/*
* Global Function Prototypes in qla_attr.c source file.
*/
@@ -864,8 +887,7 @@ void qla24xx_do_nack_work(struct scsi_qla_host *, struct qla_work_evt *);
void qlt_plogi_ack_link(struct scsi_qla_host *, struct qlt_plogi_ack_t *,
struct fc_port *, enum qlt_plogi_link_t);
void qlt_plogi_ack_unref(struct scsi_qla_host *, struct qlt_plogi_ack_t *);
-extern void qlt_schedule_sess_for_deletion(struct fc_port *, bool);
-extern void qlt_schedule_sess_for_deletion_lock(struct fc_port *);
+extern void qlt_schedule_sess_for_deletion(struct fc_port *);
extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *,
uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **);
void qla24xx_delete_sess_fn(struct work_struct *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index bc3db6abc9a0..5bf9a59432f6 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -14,6 +14,10 @@ static int qla2x00_sns_gpn_id(scsi_qla_host_t *, sw_info_t *);
static int qla2x00_sns_gnn_id(scsi_qla_host_t *, sw_info_t *);
static int qla2x00_sns_rft_id(scsi_qla_host_t *);
static int qla2x00_sns_rnn_id(scsi_qla_host_t *);
+static int qla_async_rftid(scsi_qla_host_t *, port_id_t *);
+static int qla_async_rffid(scsi_qla_host_t *, port_id_t *, u8, u8);
+static int qla_async_rnnid(scsi_qla_host_t *, port_id_t *, u8*);
+static int qla_async_rsnn_nn(scsi_qla_host_t *);
/**
* qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query.
@@ -175,6 +179,9 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
}
break;
+ case CS_TIMEOUT:
+ rval = QLA_FUNCTION_TIMEOUT;
+ /* fall through */
default:
ql_dbg(ql_dbg_disc, vha, 0x2033,
"%s failed, completion status (%x) on port_id: "
@@ -508,6 +515,72 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
return (rval);
}
+static void qla2x00_async_sns_sp_done(void *s, int rc)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ struct ct_sns_pkt *ct_sns;
+ struct qla_work_evt *e;
+
+ sp->rc = rc;
+ if (rc == QLA_SUCCESS) {
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Async done-%s exiting normally.\n",
+ sp->name);
+ } else if (rc == QLA_FUNCTION_TIMEOUT) {
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Async done-%s timeout\n", sp->name);
+ } else {
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
+ memset(ct_sns, 0, sizeof(*ct_sns));
+ sp->retry_count++;
+ if (sp->retry_count > 3)
+ goto err;
+
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Async done-%s fail rc %x. Retry count %d\n",
+ sp->name, rc, sp->retry_count);
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_SP_RETRY);
+ if (!e)
+ goto err2;
+
+ del_timer(&sp->u.iocb_cmd.timer);
+ e->u.iosb.sp = sp;
+ qla2x00_post_work(vha, e);
+ return;
+ }
+
+err:
+ e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
+err2:
+ if (!e) {
+ /* please ignore kernel warning. otherwise, we have mem leak. */
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ sp->free(sp);
+
+ return;
+ }
+
+ e->u.iosb.sp = sp;
+ qla2x00_post_work(vha, e);
+}
+
/**
* qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
* @ha: HA context
@@ -517,57 +590,87 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
int
qla2x00_rft_id(scsi_qla_host_t *vha)
{
- int rval;
struct qla_hw_data *ha = vha->hw;
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return qla2x00_sns_rft_id(vha);
- arg.iocb = ha->ms_iocb;
- arg.req_dma = ha->ct_sns_dma;
- arg.rsp_dma = ha->ct_sns_dma;
- arg.req_size = RFT_ID_REQ_SIZE;
- arg.rsp_size = RFT_ID_RSP_SIZE;
- arg.nport_handle = NPH_SNS;
+ return qla_async_rftid(vha, &vha->d_id);
+}
- /* Issue RFT_ID */
- /* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
+static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
+{
+ int rval = QLA_MEMORY_ALLOC_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+ struct ct_sns_pkt *ct_sns;
+
+ if (!vha->flags.online)
+ goto done;
+
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "rft_id";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xd041,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xd042,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
+ memset(ct_sns, 0, sizeof(*ct_sns));
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
/* Prepare CT request */
- ct_req = qla2x00_prep_ct_req(ha->ct_sns, RFT_ID_CMD,
- RFT_ID_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
+ ct_req = qla2x00_prep_ct_req(ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE);
/* Prepare CT arguments -- port_id, FC-4 types */
ct_req->req.rft_id.port_id[0] = vha->d_id.b.domain;
ct_req->req.rft_id.port_id[1] = vha->d_id.b.area;
ct_req->req.rft_id.port_id[2] = vha->d_id.b.al_pa;
-
ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
if (vha->flags.nvme_enabled)
ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */
- /* Execute MS IOCB */
- rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
+
+ sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_sns_sp_done;
+
+ rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- /*EMPTY*/
ql_dbg(ql_dbg_disc, vha, 0x2043,
"RFT_ID issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFT_ID") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2044,
- "RFT_ID exiting normally.\n");
+ goto done_free_sp;
}
-
- return (rval);
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - hdl=%x portid %06x.\n",
+ sp->name, sp->handle, d_id->b24);
+ return rval;
+done_free_sp:
+ sp->free(sp);
+done:
+ return rval;
}
/**
@@ -579,12 +682,7 @@ qla2x00_rft_id(scsi_qla_host_t *vha)
int
qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
{
- int rval;
struct qla_hw_data *ha = vha->hw;
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
ql_dbg(ql_dbg_disc, vha, 0x2046,
@@ -592,47 +690,81 @@ qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
return (QLA_SUCCESS);
}
- arg.iocb = ha->ms_iocb;
- arg.req_dma = ha->ct_sns_dma;
- arg.rsp_dma = ha->ct_sns_dma;
- arg.req_size = RFF_ID_REQ_SIZE;
- arg.rsp_size = RFF_ID_RSP_SIZE;
- arg.nport_handle = NPH_SNS;
+ return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha),
+ FC4_TYPE_FCP_SCSI);
+}
- /* Issue RFF_ID */
- /* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
+static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
+ u8 fc4feature, u8 fc4type)
+{
+ int rval = QLA_MEMORY_ALLOC_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+ struct ct_sns_pkt *ct_sns;
- /* Prepare CT request */
- ct_req = qla2x00_prep_ct_req(ha->ct_sns, RFF_ID_CMD,
- RFF_ID_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
- /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
- ct_req->req.rff_id.port_id[0] = vha->d_id.b.domain;
- ct_req->req.rff_id.port_id[1] = vha->d_id.b.area;
- ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa;
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "rff_id";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
- qlt_rff_id(vha, ct_req);
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xd041,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xd042,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
+ memset(ct_sns, 0, sizeof(*ct_sns));
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
- ct_req->req.rff_id.fc4_type = type; /* SCSI - FCP */
+ /* Prepare CT request */
+ ct_req = qla2x00_prep_ct_req(ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE);
- /* Execute MS IOCB */
- rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
+ /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
+ ct_req->req.rff_id.port_id[0] = d_id->b.domain;
+ ct_req->req.rff_id.port_id[1] = d_id->b.area;
+ ct_req->req.rff_id.port_id[2] = d_id->b.al_pa;
+ ct_req->req.rff_id.fc4_feature = fc4feature;
+ ct_req->req.rff_id.fc4_type = fc4type; /* SCSI - FCP */
+
+ sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_sns_sp_done;
+
+ rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- /*EMPTY*/
ql_dbg(ql_dbg_disc, vha, 0x2047,
"RFF_ID issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFF_ID") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2048,
- "RFF_ID exiting normally.\n");
+ goto done_free_sp;
}
- return (rval);
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - hdl=%x portid %06x feature %x type %x.\n",
+ sp->name, sp->handle, d_id->b24, fc4feature, fc4type);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ return rval;
}
/**
@@ -644,54 +776,85 @@ qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
int
qla2x00_rnn_id(scsi_qla_host_t *vha)
{
- int rval;
struct qla_hw_data *ha = vha->hw;
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return qla2x00_sns_rnn_id(vha);
- arg.iocb = ha->ms_iocb;
- arg.req_dma = ha->ct_sns_dma;
- arg.rsp_dma = ha->ct_sns_dma;
- arg.req_size = RNN_ID_REQ_SIZE;
- arg.rsp_size = RNN_ID_RSP_SIZE;
- arg.nport_handle = NPH_SNS;
+ return qla_async_rnnid(vha, &vha->d_id, vha->node_name);
+}
- /* Issue RNN_ID */
- /* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
+static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
+ u8 *node_name)
+{
+ int rval = QLA_MEMORY_ALLOC_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+ struct ct_sns_pkt *ct_sns;
+
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "rnid";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xd041,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xd042,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
+ memset(ct_sns, 0, sizeof(*ct_sns));
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
/* Prepare CT request */
- ct_req = qla2x00_prep_ct_req(ha->ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
+ ct_req = qla2x00_prep_ct_req(ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
/* Prepare CT arguments -- port_id, node_name */
ct_req->req.rnn_id.port_id[0] = vha->d_id.b.domain;
ct_req->req.rnn_id.port_id[1] = vha->d_id.b.area;
ct_req->req.rnn_id.port_id[2] = vha->d_id.b.al_pa;
-
memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
- /* Execute MS IOCB */
- rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
+ sp->u.iocb_cmd.u.ctarg.req_size = RNN_ID_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_sns_sp_done;
+
+ rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- /*EMPTY*/
ql_dbg(ql_dbg_disc, vha, 0x204d,
"RNN_ID issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RNN_ID") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x204e,
- "RNN_ID exiting normally.\n");
+ goto done_free_sp;
}
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - hdl=%x portid %06x\n",
+ sp->name, sp->handle, d_id->b24);
- return (rval);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ return rval;
}
void
@@ -718,12 +881,7 @@ qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size)
int
qla2x00_rsnn_nn(scsi_qla_host_t *vha)
{
- int rval;
struct qla_hw_data *ha = vha->hw;
- ms_iocb_entry_t *ms_pkt;
- struct ct_sns_req *ct_req;
- struct ct_sns_rsp *ct_rsp;
- struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
ql_dbg(ql_dbg_disc, vha, 0x2050,
@@ -731,22 +889,49 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
return (QLA_SUCCESS);
}
- arg.iocb = ha->ms_iocb;
- arg.req_dma = ha->ct_sns_dma;
- arg.rsp_dma = ha->ct_sns_dma;
- arg.req_size = 0;
- arg.rsp_size = RSNN_NN_RSP_SIZE;
- arg.nport_handle = NPH_SNS;
+ return qla_async_rsnn_nn(vha);
+}
- /* Issue RSNN_NN */
- /* Prepare common MS IOCB */
- /* Request size adjusted after CT preparation */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
+static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
+{
+ int rval = QLA_MEMORY_ALLOC_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+ struct ct_sns_pkt *ct_sns;
+
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "rsnn_nn";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xd041,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xd042,
+ "%s: Failed to allocate ct_sns request.\n",
+ __func__);
+ goto done_free_sp;
+ }
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
+ memset(ct_sns, 0, sizeof(*ct_sns));
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
/* Prepare CT request */
- ct_req = qla2x00_prep_ct_req(ha->ct_sns, RSNN_NN_CMD,
- RSNN_NN_RSP_SIZE);
- ct_rsp = &ha->ct_sns->p.rsp;
+ ct_req = qla2x00_prep_ct_req(ct_sns, RSNN_NN_CMD, RSNN_NN_RSP_SIZE);
/* Prepare CT arguments -- node_name, symbolic node_name, size */
memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
@@ -754,32 +939,33 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
/* Prepare the Symbolic Node Name */
qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name,
sizeof(ct_req->req.rsnn_nn.sym_node_name));
-
- /* Calculate SNN length */
ct_req->req.rsnn_nn.name_len =
(uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name);
- /* Update MS IOCB request */
- ms_pkt->req_bytecount =
- cpu_to_le32(24 + 1 + ct_req->req.rsnn_nn.name_len);
- ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
- /* Execute MS IOCB */
- rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
- sizeof(ms_iocb_entry_t));
+ sp->u.iocb_cmd.u.ctarg.req_size = 24 + 1 + ct_req->req.rsnn_nn.name_len;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_sns_sp_done;
+
+ rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- /*EMPTY*/
- ql_dbg(ql_dbg_disc, vha, 0x2051,
- "RSNN_NN issue IOCB failed (%d).\n", rval);
- } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RSNN_NN") !=
- QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2052,
- "RSNN_NN exiting normally.\n");
+ ql_dbg(ql_dbg_disc, vha, 0x2043,
+ "RFT_ID issue IOCB failed (%d).\n", rval);
+ goto done_free_sp;
}
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - hdl=%x.\n",
+ sp->name, sp->handle);
- return (rval);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ return rval;
}
/**
@@ -2790,15 +2976,20 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
fc_port_t *fcport = ea->fcport;
ql_dbg(ql_dbg_disc, vha, 0x201d,
- "%s %8phC login state %d\n",
- __func__, fcport->port_name, fcport->fw_login_state);
+ "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
+ fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
+
+ if (fcport->disc_state == DSC_DELETE_PEND)
+ return;
if (ea->sp->gen2 != fcport->login_gen) {
/* PLOGI/PRLI/LOGO came in while cmd was out.*/
ql_dbg(ql_dbg_disc, vha, 0x201e,
- "%s %8phC generation changed rscn %d|%d login %d|%d \n",
+ "%s %8phC generation changed rscn %d|%d n",
__func__, fcport->port_name, fcport->last_rscn_gen,
- fcport->rscn_gen, fcport->last_login_gen, fcport->login_gen);
+ fcport->rscn_gen);
return;
}
@@ -2811,7 +3002,21 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
/* cable plugged into the same place */
switch (vha->host->active_mode) {
case MODE_TARGET:
- /* NOOP. let the other guy login to us.*/
+ if (fcport->fw_login_state ==
+ DSC_LS_PRLI_COMP) {
+ u16 data[2];
+ /*
+ * Late RSCN was delivered.
+ * Remote port already login'ed.
+ */
+ ql_dbg(ql_dbg_disc, vha, 0x201f,
+ "%s %d %8phC post adisc\n",
+ __func__, __LINE__,
+ fcport->port_name);
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(
+ vha, fcport, data);
+ }
break;
case MODE_INITIATOR:
case MODE_DUAL:
@@ -2820,24 +3025,29 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
"%s %d %8phC post %s\n", __func__,
__LINE__, fcport->port_name,
(atomic_read(&fcport->state) ==
- FCS_ONLINE) ? "gpdb" : "gnl");
+ FCS_ONLINE) ? "adisc" : "gnl");
if (atomic_read(&fcport->state) ==
- FCS_ONLINE)
- qla24xx_post_gpdb_work(vha,
- fcport, PDO_FORCE_ADISC);
- else
+ FCS_ONLINE) {
+ u16 data[2];
+
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(
+ vha, fcport, data);
+ } else {
qla24xx_post_gnl_work(vha,
fcport);
+ }
break;
}
} else { /* fcport->d_id.b24 != ea->id.b24 */
fcport->d_id.b24 = ea->id.b24;
- if (fcport->deleted == QLA_SESS_DELETED) {
+ fcport->id_changed = 1;
+ if (fcport->deleted != QLA_SESS_DELETED) {
ql_dbg(ql_dbg_disc, vha, 0x2021,
"%s %d %8phC post del sess\n",
__func__, __LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion_lock(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
}
}
} else { /* ea->sp->gen1 != fcport->rscn_gen */
@@ -2854,7 +3064,7 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
ql_dbg(ql_dbg_disc, vha, 0x2042,
"%s %d %8phC post del sess\n", __func__,
__LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion_lock(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
} else {
ql_dbg(ql_dbg_disc, vha, 0x2045,
"%s %d %8phC login\n", __func__, __LINE__,
@@ -2878,7 +3088,7 @@ static void qla2x00_async_gidpn_sp_done(void *s, int res)
u8 *id = fcport->ct_desc.ct_sns->p.rsp.rsp.gid_pn.port_id;
struct event_arg ea;
- fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
memset(&ea, 0, sizeof(ea));
ea.fcport = fcport;
@@ -2889,9 +3099,22 @@ static void qla2x00_async_gidpn_sp_done(void *s, int res)
ea.rc = res;
ea.event = FCME_GIDPN_DONE;
- ql_dbg(ql_dbg_disc, vha, 0x204f,
- "Async done-%s res %x, WWPN %8phC ID %3phC \n",
- sp->name, res, fcport->port_name, id);
+ if (res == QLA_FUNCTION_TIMEOUT) {
+ ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
+ "Async done-%s WWPN %8phC timed out.\n",
+ sp->name, fcport->port_name);
+ qla24xx_post_gidpn_work(sp->vha, fcport);
+ sp->free(sp);
+ return;
+ } else if (res) {
+ ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
+ "Async done-%s fail res %x, WWPN %8phC\n",
+ sp->name, res, fcport->port_name);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Async done-%s good WWPN %8phC ID %3phC\n",
+ sp->name, fcport->port_name, id);
+ }
qla2x00_fcport_event_handler(vha, &ea);
@@ -2904,16 +3127,16 @@ int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport)
struct ct_sns_req *ct_req;
srb_t *sp;
- if (!vha->flags.online)
- goto done;
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
- fcport->flags |= FCF_ASYNC_SENT;
fcport->disc_state = DSC_GID_PN;
fcport->scan_state = QLA_FCPORT_SCAN;
sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
if (!sp)
goto done;
+ fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "gidpn";
sp->gen1 = fcport->rscn_gen;
@@ -2954,8 +3177,8 @@ int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport)
done_free_sp:
sp->free(sp);
-done:
fcport->flags &= ~FCF_ASYNC_SENT;
+done:
return rval;
}
@@ -2974,6 +3197,7 @@ int qla24xx_post_gidpn_work(struct scsi_qla_host *vha, fc_port_t *fcport)
return QLA_FUNCTION_FAILED;
e->u.fcport.fcport = fcport;
+ fcport->flags |= FCF_ASYNC_ACTIVE;
return qla2x00_post_work(vha, e);
}
@@ -2986,9 +3210,39 @@ int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
return QLA_FUNCTION_FAILED;
e->u.fcport.fcport = fcport;
+ fcport->flags |= FCF_ASYNC_ACTIVE;
return qla2x00_post_work(vha, e);
}
+void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ struct fc_port *fcport = ea->fcport;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20d8,
+ "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
+ ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1, fcport->loop_id);
+
+ if (fcport->disc_state == DSC_DELETE_PEND)
+ return;
+
+ if (ea->sp->gen2 != fcport->login_gen) {
+ /* target side must have changed it. */
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
+ "%s %8phC generation changed\n",
+ __func__, fcport->port_name);
+ return;
+ } else if (ea->sp->gen1 != fcport->rscn_gen) {
+ ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_gidpn_work(vha, fcport);
+ return;
+ }
+
+ qla24xx_post_upd_fcport_work(vha, ea->fcport);
+}
+
static void qla24xx_async_gpsc_sp_done(void *s, int res)
{
struct srb *sp = s;
@@ -3004,7 +3258,7 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
"Async done-%s res %x, WWPN %8phC \n",
sp->name, res, fcport->port_name);
- fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
if (res == (DID_ERROR << 16)) {
/* entry status error */
@@ -3055,6 +3309,7 @@ done:
ea.event = FCME_GPSC_DONE;
ea.rc = res;
ea.fcport = fcport;
+ ea.sp = sp;
qla2x00_fcport_event_handler(vha, &ea);
sp->free(sp);
@@ -3066,14 +3321,14 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
struct ct_sns_req *ct_req;
srb_t *sp;
- if (!vha->flags.online)
- goto done;
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
- fcport->flags |= FCF_ASYNC_SENT;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
+ fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "gpsc";
sp->gen1 = fcport->rscn_gen;
@@ -3113,8 +3368,8 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
done_free_sp:
sp->free(sp);
-done:
fcport->flags &= ~FCF_ASYNC_SENT;
+done:
return rval;
}
@@ -3133,7 +3388,7 @@ int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id)
return qla2x00_post_work(vha, e);
}
-void qla24xx_async_gpnid_done(scsi_qla_host_t *vha, srb_t *sp)
+void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
{
if (sp->u.iocb_cmd.u.ctarg.req) {
dma_free_coherent(&vha->hw->pdev->dev,
@@ -3155,43 +3410,137 @@ void qla24xx_async_gpnid_done(scsi_qla_host_t *vha, srb_t *sp)
void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
{
- fc_port_t *fcport;
- unsigned long flags;
+ fc_port_t *fcport, *conflict, *t;
+ u16 data[2];
- spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
- fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
- spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d port_id: %06x\n",
+ __func__, __LINE__, ea->id.b24);
- if (fcport) {
- /* cable moved. just plugged in */
- fcport->rscn_gen++;
- fcport->d_id = ea->id;
- fcport->scan_state = QLA_FCPORT_FOUND;
- fcport->flags |= FCF_FABRIC_DEVICE;
-
- switch (fcport->disc_state) {
- case DSC_DELETED:
- ql_dbg(ql_dbg_disc, vha, 0x210d,
- "%s %d %8phC login\n", __func__, __LINE__,
- fcport->port_name);
- qla24xx_fcport_handle_login(vha, fcport);
- break;
- case DSC_DELETE_PEND:
- break;
- default:
- ql_dbg(ql_dbg_disc, vha, 0x2064,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion_lock(fcport);
- break;
+ if (ea->rc) {
+ /* cable is disconnected */
+ list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) {
+ if (fcport->d_id.b24 == ea->id.b24) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC DS %d\n",
+ __func__, __LINE__,
+ fcport->port_name,
+ fcport->disc_state);
+ fcport->scan_state = QLA_FCPORT_SCAN;
+ switch (fcport->disc_state) {
+ case DSC_DELETED:
+ case DSC_DELETE_PEND:
+ break;
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ fcport->port_name);
+ qlt_schedule_sess_for_deletion(fcport);
+ break;
+ }
+ }
}
} else {
- /* create new fcport */
- ql_dbg(ql_dbg_disc, vha, 0x2065,
- "%s %d %8phC post new sess\n",
- __func__, __LINE__, ea->port_name);
+ /* cable is connected */
+ fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
+ if (fcport) {
+ list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
+ list) {
+ if ((conflict->d_id.b24 == ea->id.b24) &&
+ (fcport != conflict)) {
+ /* 2 fcports with conflict Nport ID or
+ * an existing fcport is having nport ID
+ * conflict with new fcport.
+ */
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC DS %d\n",
+ __func__, __LINE__,
+ conflict->port_name,
+ conflict->disc_state);
+ conflict->scan_state = QLA_FCPORT_SCAN;
+ switch (conflict->disc_state) {
+ case DSC_DELETED:
+ case DSC_DELETE_PEND:
+ break;
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ conflict->port_name);
+ qlt_schedule_sess_for_deletion
+ (conflict);
+ break;
+ }
+ }
+ }
+
+ fcport->rscn_gen++;
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ fcport->flags |= FCF_FABRIC_DEVICE;
+ switch (fcport->disc_state) {
+ case DSC_LOGIN_COMPLETE:
+ /* recheck session is still intact. */
+ ql_dbg(ql_dbg_disc, vha, 0x210d,
+ "%s %d %8phC revalidate session with ADISC\n",
+ __func__, __LINE__, fcport->port_name);
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(vha, fcport,
+ data);
+ break;
+ case DSC_DELETED:
+ ql_dbg(ql_dbg_disc, vha, 0x210d,
+ "%s %d %8phC login\n", __func__, __LINE__,
+ fcport->port_name);
+ fcport->d_id = ea->id;
+ qla24xx_fcport_handle_login(vha, fcport);
+ break;
+ case DSC_DELETE_PEND:
+ fcport->d_id = ea->id;
+ break;
+ default:
+ fcport->d_id = ea->id;
+ break;
+ }
+ } else {
+ list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
+ list) {
+ if (conflict->d_id.b24 == ea->id.b24) {
+ /* 2 fcports with conflict Nport ID or
+ * an existing fcport is having nport ID
+ * conflict with new fcport.
+ */
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC DS %d\n",
+ __func__, __LINE__,
+ conflict->port_name,
+ conflict->disc_state);
+
+ conflict->scan_state = QLA_FCPORT_SCAN;
+ switch (conflict->disc_state) {
+ case DSC_DELETED:
+ case DSC_DELETE_PEND:
+ break;
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ conflict->port_name);
+ qlt_schedule_sess_for_deletion
+ (conflict);
+ break;
+ }
+ }
+ }
- qla24xx_post_newsess_work(vha, &ea->id, ea->port_name, NULL);
+ /* create new fcport */
+ ql_dbg(ql_dbg_disc, vha, 0x2065,
+ "%s %d %8phC post new sess\n",
+ __func__, __LINE__, ea->port_name);
+ qla24xx_post_newsess_work(vha, &ea->id,
+ ea->port_name, NULL, NULL, FC4_TYPE_UNKNOWN);
+ }
}
}
@@ -3205,11 +3554,18 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res)
(struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
struct event_arg ea;
struct qla_work_evt *e;
+ unsigned long flags;
- ql_dbg(ql_dbg_disc, vha, 0x2066,
- "Async done-%s res %x ID %3phC. %8phC\n",
- sp->name, res, ct_req->req.port_id.port_id,
- ct_rsp->rsp.gpn_id.port_name);
+ if (res)
+ ql_dbg(ql_dbg_disc, vha, 0x2066,
+ "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n",
+ sp->name, res, sp->gen1, ct_req->req.port_id.port_id,
+ ct_rsp->rsp.gpn_id.port_name);
+ else
+ ql_dbg(ql_dbg_disc, vha, 0x2066,
+ "Async done-%s good rscn gen %d ID %3phC. %8phC\n",
+ sp->name, sp->gen1, ct_req->req.port_id.port_id,
+ ct_rsp->rsp.gpn_id.port_name);
memset(&ea, 0, sizeof(ea));
memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
@@ -3220,9 +3576,26 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res)
ea.rc = res;
ea.event = FCME_GPNID_DONE;
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ list_del(&sp->elem);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ if (res) {
+ if (res == QLA_FUNCTION_TIMEOUT) {
+ qla24xx_post_gpnid_work(sp->vha, &ea.id);
+ sp->free(sp);
+ return;
+ }
+ } else if (sp->gen1) {
+ /* There was another RSCN for this Nport ID */
+ qla24xx_post_gpnid_work(sp->vha, &ea.id);
+ sp->free(sp);
+ return;
+ }
+
qla2x00_fcport_event_handler(vha, &ea);
- e = qla2x00_alloc_work(vha, QLA_EVT_GPNID_DONE);
+ e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
if (!e) {
/* please ignore kernel warning. otherwise, we have mem leak. */
if (sp->u.iocb_cmd.u.ctarg.req) {
@@ -3253,8 +3626,9 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
{
int rval = QLA_FUNCTION_FAILED;
struct ct_sns_req *ct_req;
- srb_t *sp;
+ srb_t *sp, *tsp;
struct ct_sns_pkt *ct_sns;
+ unsigned long flags;
if (!vha->flags.online)
goto done;
@@ -3265,8 +3639,22 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
sp->type = SRB_CT_PTHRU_CMD;
sp->name = "gpnid";
+ sp->u.iocb_cmd.u.ctarg.id = *id;
+ sp->gen1 = 0;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ list_for_each_entry(tsp, &vha->gpnid_list, elem) {
+ if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) {
+ tsp->gen1++;
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ sp->free(sp);
+ goto done;
+ }
+ }
+ list_add_tail(&sp->elem, &vha->gpnid_list);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
GFP_KERNEL);
@@ -3393,7 +3781,7 @@ int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
struct ct_sns_req *ct_req;
srb_t *sp;
- if (!vha->flags.online)
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
return rval;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
@@ -3441,3 +3829,720 @@ done_free_sp:
fcport->flags &= ~FCF_ASYNC_SENT;
return rval;
}
+
+/* GPN_FT + GNN_FT*/
+static int qla2x00_is_a_vp(scsi_qla_host_t *vha, u64 wwn)
+{
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *vp;
+ unsigned long flags;
+ u64 twwn;
+ int rc = 0;
+
+ if (!ha->num_vhosts)
+ return 0;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ twwn = wwn_to_u64(vp->port_name);
+ if (wwn == twwn) {
+ rc = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ return rc;
+}
+
+void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
+{
+ fc_port_t *fcport;
+ u32 i, rc;
+ bool found;
+ u8 fc4type = sp->gen2;
+ struct fab_scan_rp *rp;
+ unsigned long flags;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s enter\n", __func__);
+
+ if (sp->gen1 != vha->hw->base_qpair->chip_reset) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s scan stop due to chip reset %x/%x\n",
+ sp->name, sp->gen1, vha->hw->base_qpair->chip_reset);
+ goto out;
+ }
+
+ rc = sp->rc;
+ if (rc) {
+ vha->scan.scan_retry++;
+ if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Fabric scan failed on all retries.\n");
+ }
+ goto out;
+ }
+ vha->scan.scan_retry = 0;
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list)
+ fcport->scan_state = QLA_FCPORT_SCAN;
+
+ for (i = 0; i < vha->hw->max_fibre_devices; i++) {
+ u64 wwn;
+
+ rp = &vha->scan.l[i];
+ found = false;
+
+ wwn = wwn_to_u64(rp->port_name);
+ if (wwn == 0)
+ continue;
+
+ if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE))
+ continue;
+
+ /* Bypass reserved domain fields. */
+ if ((rp->id.b.domain & 0xf0) == 0xf0)
+ continue;
+
+ /* Bypass virtual ports of the same host. */
+ if (qla2x00_is_a_vp(vha, wwn))
+ continue;
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
+ continue;
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ fcport->d_id.b24 = rp->id.b24;
+ found = true;
+ /*
+ * If device was not a fabric device before.
+ */
+ if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
+ qla2x00_clear_loop_id(fcport);
+ fcport->flags |= FCF_FABRIC_DEVICE;
+ }
+ break;
+ }
+
+ if (!found) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post new sess\n",
+ __func__, __LINE__, rp->port_name);
+ qla24xx_post_newsess_work(vha, &rp->id, rp->port_name,
+ rp->node_name, NULL, fc4type);
+ }
+ }
+
+ /*
+ * Logout all previous fabric dev marked lost, except FCP2 devices.
+ */
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
+ continue;
+
+ if (fcport->scan_state != QLA_FCPORT_FOUND) {
+ if ((qla_dual_mode_enabled(vha) ||
+ qla_ini_mode_enabled(vha)) &&
+ atomic_read(&fcport->state) == FCS_ONLINE) {
+ qla2x00_mark_device_lost(vha, fcport,
+ ql2xplogiabsentdevice, 0);
+
+ if (fcport->loop_id != FC_NO_LOOP_ID &&
+ (fcport->flags & FCF_FCP2_DEVICE) == 0) {
+ ql_dbg(ql_dbg_disc, vha, 0x20f0,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ fcport->port_name);
+
+ qlt_schedule_sess_for_deletion(fcport);
+ continue;
+ }
+ }
+ } else
+ qla24xx_fcport_handle_login(vha, fcport);
+ }
+
+out:
+ qla24xx_sp_unmap(vha, sp);
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+}
+
+static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ struct qla_work_evt *e;
+ struct ct_sns_req *ct_req =
+ (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
+ struct ct_sns_gpnft_rsp *ct_rsp =
+ (struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
+ struct ct_sns_gpn_ft_data *d;
+ struct fab_scan_rp *rp;
+ int i, j, k;
+ u16 cmd = be16_to_cpu(ct_req->command);
+
+ /* gen2 field is holding the fc4type */
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async done-%s res %x FC4Type %x\n",
+ sp->name, res, sp->gen2);
+
+ if (res) {
+ unsigned long flags;
+
+ sp->free(sp);
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ vha->scan.scan_retry++;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
+ if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ } else {
+ ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
+ "Async done-%s rescan failed on all retries\n",
+ sp->name);
+ }
+ return;
+ }
+
+ if (!res) {
+ port_id_t id;
+ u64 wwn;
+
+ j = 0;
+ for (i = 0; i < vha->hw->max_fibre_devices; i++) {
+ d = &ct_rsp->entries[i];
+
+ id.b.rsvd_1 = 0;
+ id.b.domain = d->port_id[0];
+ id.b.area = d->port_id[1];
+ id.b.al_pa = d->port_id[2];
+ wwn = wwn_to_u64(d->port_name);
+
+ if (id.b24 == 0 || wwn == 0)
+ continue;
+
+ if (cmd == GPN_FT_CMD) {
+ rp = &vha->scan.l[j];
+ rp->id = id;
+ memcpy(rp->port_name, d->port_name, 8);
+ j++;
+ } else {/* GNN_FT_CMD */
+ for (k = 0; k < vha->hw->max_fibre_devices;
+ k++) {
+ rp = &vha->scan.l[k];
+ if (id.b24 == rp->id.b24) {
+ memcpy(rp->node_name,
+ d->port_name, 8);
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ if (cmd == GPN_FT_CMD)
+ e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT_DONE);
+ else
+ e = qla2x00_alloc_work(vha, QLA_EVT_GNNFT_DONE);
+ if (!e) {
+ /* please ignore kernel warning. Otherwise, we have mem leak. */
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async done-%s unable to alloc work element\n",
+ sp->name);
+ sp->free(sp);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ return;
+ }
+
+ sp->rc = res;
+ e->u.iosb.sp = sp;
+
+ qla2x00_post_work(vha, e);
+}
+
+/*
+ * Get WWNN list for fc4_type
+ *
+ * It is assumed the same SRB is re-used from GPNFT to avoid
+ * mem free & re-alloc
+ */
+static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
+ u8 fc4_type)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ struct ct_sns_req *ct_req;
+ struct ct_sns_pkt *ct_sns;
+
+ if (!vha->flags.online) {
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ goto done_free_sp;
+ }
+
+ if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "%s: req %p rsp %p are not setup\n",
+ __func__, sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.rsp);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ WARN_ON(1);
+ goto done_free_sp;
+ }
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "gnnft";
+ sp->gen1 = vha->hw->base_qpair->chip_reset;
+ sp->gen2 = fc4_type;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
+ memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
+
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
+ /* CT_IU preamble */
+ ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD,
+ sp->u.iocb_cmd.u.ctarg.rsp_size);
+
+ /* GPN_FT req */
+ ct_req->req.gpn_ft.port_type = fc4_type;
+
+ sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_gpnft_gnnft_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s hdl=%x FC4Type %x.\n", sp->name,
+ sp->handle, ct_req->req.gpn_ft.port_type);
+ return rval;
+
+done_free_sp:
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ sp->free(sp);
+
+ return rval;
+} /* GNNFT */
+
+void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
+{
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s enter\n", __func__);
+ del_timer(&sp->u.iocb_cmd.timer);
+ qla24xx_async_gnnft(vha, sp, sp->gen2);
+}
+
+/* Get WWPN list for certain fc4_type */
+int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+ struct ct_sns_pkt *ct_sns;
+ u32 rspsz;
+ unsigned long flags;
+
+ if (!vha->flags.online)
+ return rval;
+
+ spin_lock_irqsave(&vha->work_lock, flags);
+ if (vha->scan.scan_flags & SF_SCANNING) {
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+ ql_dbg(ql_dbg_disc, vha, 0xffff, "scan active\n");
+ return rval;
+ }
+ vha->scan.scan_flags |= SF_SCANNING;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp) {
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ return rval;
+ }
+
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "gpnft";
+ sp->gen1 = vha->hw->base_qpair->chip_reset;
+ sp->gen2 = fc4_type;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Failed to allocate ct_sns request.\n");
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ goto done_free_sp;
+ }
+
+ rspsz = sizeof(struct ct_sns_gpnft_rsp) +
+ ((vha->hw->max_fibre_devices - 1) *
+ sizeof(struct ct_sns_gpn_ft_data));
+
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent(&vha->hw->pdev->dev,
+ rspsz, &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Failed to allocate ct_sns request.\n");
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ goto done_free_sp;
+ }
+
+ memset(vha->scan.l, 0, vha->scan.size);
+
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
+ /* CT_IU preamble */
+ ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz);
+
+ /* GPN_FT req */
+ ct_req->req.gpn_ft.port_type = fc4_type;
+
+ sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_gpnft_gnnft_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ goto done_free_sp;
+ }
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s hdl=%x FC4Type %x.\n", sp->name,
+ sp->handle, ct_req->req.gpn_ft.port_type);
+ return rval;
+
+done_free_sp:
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ sp->free(sp);
+
+ return rval;
+}
+
+void qla_scan_work_fn(struct work_struct *work)
+{
+ struct fab_scan *s = container_of(to_delayed_work(work),
+ struct fab_scan, scan_work);
+ struct scsi_qla_host *vha = container_of(s, struct scsi_qla_host,
+ scan);
+ unsigned long flags;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s: schedule loop resync\n", __func__);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_QUEUED;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+}
+
+/* GNN_ID */
+void qla24xx_handle_gnnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ qla24xx_post_gnl_work(vha, ea->fcport);
+}
+
+static void qla2x00_async_gnnid_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ fc_port_t *fcport = sp->fcport;
+ u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name;
+ struct event_arg ea;
+ u64 wwnn;
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ wwnn = wwn_to_u64(node_name);
+ if (wwnn)
+ memcpy(fcport->node_name, node_name, WWN_SIZE);
+
+ memset(&ea, 0, sizeof(ea));
+ ea.fcport = fcport;
+ ea.sp = sp;
+ ea.rc = res;
+ ea.event = FCME_GNNID_DONE;
+
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Async done-%s res %x, WWPN %8phC %8phC\n",
+ sp->name, res, fcport->port_name, fcport->node_name);
+
+ qla2x00_fcport_event_handler(vha, &ea);
+
+ sp->free(sp);
+}
+
+int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
+
+ fcport->disc_state = DSC_GNN_ID;
+ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+ if (!sp)
+ goto done;
+
+ fcport->flags |= FCF_ASYNC_SENT;
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "gnnid";
+ sp->gen1 = fcport->rscn_gen;
+ sp->gen2 = fcport->login_gen;
+
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ /* CT_IU preamble */
+ ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GNN_ID_CMD,
+ GNN_ID_RSP_SIZE);
+
+ /* GNN_ID req */
+ ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
+ ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
+ ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
+
+
+ /* req & rsp use the same buffer */
+ sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.req_size = GNN_ID_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_gnnid_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
+ sp->name, fcport->port_name,
+ sp->handle, fcport->loop_id, fcport->d_id.b24);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+ fcport->flags &= ~FCF_ASYNC_SENT;
+done:
+ return rval;
+}
+
+int qla24xx_post_gnnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_work_evt *e;
+ int ls;
+
+ ls = atomic_read(&vha->loop_state);
+ if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
+ test_bit(UNLOADING, &vha->dpc_flags))
+ return 0;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_GNNID);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.fcport.fcport = fcport;
+ return qla2x00_post_work(vha, e);
+}
+
+/* GPFN_ID */
+void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ fc_port_t *fcport = ea->fcport;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d fcpcnt %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
+ fcport->rscn_gen, ea->sp->gen1, vha->fcport_count);
+
+ if (fcport->disc_state == DSC_DELETE_PEND)
+ return;
+
+ if (ea->sp->gen2 != fcport->login_gen) {
+ /* target side must have changed it. */
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
+ "%s %8phC generation changed\n",
+ __func__, fcport->port_name);
+ return;
+ } else if (ea->sp->gen1 != fcport->rscn_gen) {
+ ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_gidpn_work(vha, fcport);
+ return;
+ }
+
+ qla24xx_post_gpsc_work(vha, fcport);
+}
+
+static void qla2x00_async_gfpnid_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ fc_port_t *fcport = sp->fcport;
+ u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name;
+ struct event_arg ea;
+ u64 wwn;
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ wwn = wwn_to_u64(fpn);
+ if (wwn)
+ memcpy(fcport->fabric_port_name, fpn, WWN_SIZE);
+
+ memset(&ea, 0, sizeof(ea));
+ ea.fcport = fcport;
+ ea.sp = sp;
+ ea.rc = res;
+ ea.event = FCME_GFPNID_DONE;
+
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
+ "Async done-%s res %x, WWPN %8phC %8phC\n",
+ sp->name, res, fcport->port_name, fcport->fabric_port_name);
+
+ qla2x00_fcport_event_handler(vha, &ea);
+
+ sp->free(sp);
+}
+
+int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
+
+ fcport->disc_state = DSC_GFPN_ID;
+ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+ if (!sp)
+ goto done;
+
+ fcport->flags |= FCF_ASYNC_SENT;
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "gfpnid";
+ sp->gen1 = fcport->rscn_gen;
+ sp->gen2 = fcport->login_gen;
+
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ /* CT_IU preamble */
+ ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD,
+ GFPN_ID_RSP_SIZE);
+
+ /* GFPN_ID req */
+ ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
+ ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
+ ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
+
+
+ /* req & rsp use the same buffer */
+ sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.req_size = GFPN_ID_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_gfpnid_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
+ sp->name, fcport->port_name,
+ sp->handle, fcport->loop_id, fcport->d_id.b24);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+ fcport->flags &= ~FCF_ASYNC_SENT;
+done:
+ return rval;
+}
+
+int qla24xx_post_gfpnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_work_evt *e;
+ int ls;
+
+ ls = atomic_read(&vha->loop_state);
+ if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
+ test_bit(UNLOADING, &vha->dpc_flags))
+ return 0;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_GFPNID);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.fcport.fcport = fcport;
+ return qla2x00_post_work(vha, e);
+}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 1bafa043f9f1..aececf664654 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -41,6 +41,7 @@ static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *,
struct event_arg *);
static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
struct event_arg *);
+static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *);
/* SRB Extensions ---------------------------------------------------------- */
@@ -58,7 +59,8 @@ qla2x00_sp_timeout(struct timer_list *t)
req->outstanding_cmds[sp->handle] = NULL;
iocb = &sp->u.iocb_cmd;
iocb->timeout(sp);
- sp->free(sp);
+ if (sp->type != SRB_ELS_DCMD)
+ sp->free(sp);
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
}
@@ -102,14 +104,21 @@ qla2x00_async_iocb_timeout(void *data)
struct srb_iocb *lio = &sp->u.iocb_cmd;
struct event_arg ea;
- ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
- "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
- sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
+ if (fcport) {
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
+ "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
+ sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
- fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+ } else {
+ pr_info("Async-%s timeout - hdl=%x.\n",
+ sp->name, sp->handle);
+ }
switch (sp->type) {
case SRB_LOGIN_CMD:
+ if (!fcport)
+ break;
/* Retry as needed. */
lio->u.logio.data[0] = MBS_COMMAND_ERROR;
lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
@@ -123,6 +132,8 @@ qla2x00_async_iocb_timeout(void *data)
qla24xx_handle_plogi_done_event(fcport->vha, &ea);
break;
case SRB_LOGOUT_CMD:
+ if (!fcport)
+ break;
qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
break;
case SRB_CT_PTHRU_CMD:
@@ -130,6 +141,7 @@ qla2x00_async_iocb_timeout(void *data)
case SRB_NACK_PLOGI:
case SRB_NACK_PRLI:
case SRB_NACK_LOGO:
+ case SRB_CTRL_VP:
sp->done(sp, QLA_FUNCTION_TIMEOUT);
break;
}
@@ -146,7 +158,8 @@ qla2x00_async_login_sp_done(void *ptr, int res)
ql_dbg(ql_dbg_disc, vha, 0x20dd,
"%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
- sp->fcport->flags &= ~FCF_ASYNC_SENT;
+ sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+
if (!test_bit(UNLOADING, &vha->dpc_flags)) {
memset(&ea, 0, sizeof(ea));
ea.event = FCME_PLOGI_DONE;
@@ -173,11 +186,6 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
if (!vha->flags.online)
goto done;
- if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
- (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
- (fcport->fw_login_state == DSC_LS_PRLI_PEND))
- goto done;
-
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
@@ -185,8 +193,11 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
fcport->flags |= FCF_ASYNC_SENT;
fcport->logout_completed = 0;
+ fcport->disc_state = DSC_LOGIN_PEND;
sp->type = SRB_LOGIN_CMD;
sp->name = "login";
+ sp->gen1 = fcport->rscn_gen;
+ sp->gen2 = fcport->login_gen;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
lio = &sp->u.iocb_cmd;
@@ -201,7 +212,6 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
lio->u.logio.flags |= SRB_LOGIN_RETRIED;
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- fcport->flags &= ~FCF_ASYNC_SENT;
fcport->flags |= FCF_LOGIN_NEEDED;
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
goto done_free_sp;
@@ -216,8 +226,8 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
done_free_sp:
sp->free(sp);
-done:
fcport->flags &= ~FCF_ASYNC_SENT;
+done:
return rval;
}
@@ -227,7 +237,7 @@ qla2x00_async_logout_sp_done(void *ptr, int res)
srb_t *sp = ptr;
struct srb_iocb *lio = &sp->u.iocb_cmd;
- sp->fcport->flags &= ~FCF_ASYNC_SENT;
+ sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
if (!test_bit(UNLOADING, &sp->vha->dpc_flags))
qla2x00_post_async_logout_done_work(sp->vha, sp->fcport,
lio->u.logio.data);
@@ -239,9 +249,11 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
{
srb_t *sp;
struct srb_iocb *lio;
- int rval;
+ int rval = QLA_FUNCTION_FAILED;
+
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
- rval = QLA_FUNCTION_FAILED;
fcport->flags |= FCF_ASYNC_SENT;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
@@ -272,16 +284,126 @@ done:
return rval;
}
+void
+qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
+ uint16_t *data)
+{
+ /* Don't re-login in target mode */
+ if (!fcport->tgt_session)
+ qla2x00_mark_device_lost(vha, fcport, 1, 0);
+ qlt_logo_completion_handler(fcport, data[0]);
+}
+
+static void
+qla2x00_async_prlo_sp_done(void *s, int res)
+{
+ srb_t *sp = (srb_t *)s;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+ struct scsi_qla_host *vha = sp->vha;
+
+ if (!test_bit(UNLOADING, &vha->dpc_flags))
+ qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,
+ lio->u.logio.data);
+ sp->free(sp);
+}
+
+int
+qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ srb_t *sp;
+ struct srb_iocb *lio;
+ int rval;
+
+ rval = QLA_FUNCTION_FAILED;
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_PRLO_CMD;
+ sp->name = "prlo";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ lio = &sp->u.iocb_cmd;
+ lio->timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_prlo_sp_done;
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0x2070,
+ "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ return rval;
+}
+
+static
+void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ struct fc_port *fcport = ea->fcport;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20d2,
+ "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
+ fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
+
+ if (ea->data[0] != MBS_COMMAND_COMPLETE) {
+ ql_dbg(ql_dbg_disc, vha, 0x2066,
+ "%s %8phC: adisc fail: post delete\n",
+ __func__, ea->fcport->port_name);
+ qlt_schedule_sess_for_deletion(ea->fcport);
+ return;
+ }
+
+ if (ea->fcport->disc_state == DSC_DELETE_PEND)
+ return;
+
+ if (ea->sp->gen2 != ea->fcport->login_gen) {
+ /* target side must have changed it. */
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
+ "%s %8phC generation changed\n",
+ __func__, ea->fcport->port_name);
+ return;
+ } else if (ea->sp->gen1 != ea->fcport->rscn_gen) {
+ ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
+ __func__, __LINE__, ea->fcport->port_name);
+ qla24xx_post_gidpn_work(vha, ea->fcport);
+ return;
+ }
+
+ __qla24xx_handle_gpdb_event(vha, ea);
+}
+
static void
qla2x00_async_adisc_sp_done(void *ptr, int res)
{
srb_t *sp = ptr;
struct scsi_qla_host *vha = sp->vha;
+ struct event_arg ea;
struct srb_iocb *lio = &sp->u.iocb_cmd;
- if (!test_bit(UNLOADING, &vha->dpc_flags))
- qla2x00_post_async_adisc_done_work(sp->vha, sp->fcport,
- lio->u.logio.data);
+ ql_dbg(ql_dbg_disc, vha, 0x2066,
+ "Async done-%s res %x %8phC\n",
+ sp->name, res, sp->fcport->port_name);
+
+ memset(&ea, 0, sizeof(ea));
+ ea.event = FCME_ADISC_DONE;
+ ea.rc = res;
+ ea.data[0] = lio->u.logio.data[0];
+ ea.data[1] = lio->u.logio.data[1];
+ ea.iop[0] = lio->u.logio.iop[0];
+ ea.iop[1] = lio->u.logio.iop[1];
+ ea.fcport = sp->fcport;
+ ea.sp = sp;
+
+ qla2x00_fcport_event_handler(vha, &ea);
+
sp->free(sp);
}
@@ -313,15 +435,15 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
goto done_free_sp;
ql_dbg(ql_dbg_disc, vha, 0x206f,
- "Async-adisc - hdl=%x loopid=%x portid=%02x%02x%02x.\n",
- sp->handle, fcport->loop_id, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
return rval;
done_free_sp:
sp->free(sp);
done:
fcport->flags &= ~FCF_ASYNC_SENT;
+ qla2x00_post_async_adisc_work(vha, fcport, data);
return rval;
}
@@ -333,9 +455,19 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
u16 i, n, found = 0, loop_id;
port_id_t id;
u64 wwn;
- u8 opt = 0, current_login_state;
+ u16 data[2];
+ u8 current_login_state;
fcport = ea->fcport;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, ea->rc,
+ fcport->login_gen, fcport->last_login_gen,
+ fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id);
+
+ if (fcport->disc_state == DSC_DELETE_PEND)
+ return;
if (ea->rc) { /* rval */
if (fcport->login_retry == 0) {
@@ -356,9 +488,8 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
return;
} else if (fcport->last_login_gen != fcport->login_gen) {
ql_dbg(ql_dbg_disc, vha, 0x20e0,
- "%s %8phC login gen changed login %d|%d\n",
- __func__, fcport->port_name,
- fcport->last_login_gen, fcport->login_gen);
+ "%s %8phC login gen changed\n",
+ __func__, fcport->port_name);
return;
}
@@ -400,7 +531,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
ql_dbg(ql_dbg_disc, vha, 0x20e3,
"%s %d %8phC post del sess\n",
__func__, __LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion(fcport, 1);
+ qlt_schedule_sess_for_deletion(fcport);
return;
}
@@ -430,8 +561,14 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
ql_dbg(ql_dbg_disc, vha, 0x20e4,
"%s %d %8phC post gpdb\n",
__func__, __LINE__, fcport->port_name);
- opt = PDO_FORCE_ADISC;
- qla24xx_post_gpdb_work(vha, fcport, opt);
+
+ if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
+ fcport->port_type = FCT_INITIATOR;
+ else
+ fcport->port_type = FCT_TARGET;
+
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(vha, fcport, data);
break;
case DSC_LS_PORT_UNAVAIL:
default:
@@ -449,36 +586,29 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
if (!found) {
/* fw has no record of this port */
- if (fcport->loop_id == FC_NO_LOOP_ID) {
- qla2x00_find_new_loop_id(vha, fcport);
- fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
- } else {
- for (i = 0; i < n; i++) {
- e = &vha->gnl.l[i];
- id.b.domain = e->port_id[0];
- id.b.area = e->port_id[1];
- id.b.al_pa = e->port_id[2];
- id.b.rsvd_1 = 0;
- loop_id = le16_to_cpu(e->nport_handle);
-
- if (fcport->d_id.b24 == id.b24) {
- conflict_fcport =
- qla2x00_find_fcport_by_wwpn(vha,
- e->port_name, 0);
-
- ql_dbg(ql_dbg_disc, vha, 0x20e6,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__,
- conflict_fcport->port_name);
- qlt_schedule_sess_for_deletion
- (conflict_fcport, 1);
- }
-
- if (fcport->loop_id == loop_id) {
- /* FW already picked this loop id for another fcport */
- qla2x00_find_new_loop_id(vha, fcport);
- }
+ for (i = 0; i < n; i++) {
+ e = &vha->gnl.l[i];
+ id.b.domain = e->port_id[0];
+ id.b.area = e->port_id[1];
+ id.b.al_pa = e->port_id[2];
+ id.b.rsvd_1 = 0;
+ loop_id = le16_to_cpu(e->nport_handle);
+
+ if (fcport->d_id.b24 == id.b24) {
+ conflict_fcport =
+ qla2x00_find_fcport_by_wwpn(vha,
+ e->port_name, 0);
+ ql_dbg(ql_dbg_disc, vha, 0x20e6,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ conflict_fcport->port_name);
+ qlt_schedule_sess_for_deletion
+ (conflict_fcport);
}
+
+ /* FW already picked this loop id for another fcport */
+ if (fcport->loop_id == loop_id)
+ fcport->loop_id = FC_NO_LOOP_ID;
}
qla24xx_fcport_handle_login(vha, fcport);
}
@@ -496,6 +626,7 @@ qla24xx_async_gnl_sp_done(void *s, int res)
struct get_name_list_extended *e;
u64 wwn;
struct list_head h;
+ bool found = false;
ql_dbg(ql_dbg_disc, vha, 0x20e7,
"Async done-%s res %x mb[1]=%x mb[2]=%x \n",
@@ -539,12 +670,44 @@ qla24xx_async_gnl_sp_done(void *s, int res)
list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
list_del_init(&fcport->gnl_entry);
- fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
ea.fcport = fcport;
qla2x00_fcport_event_handler(vha, &ea);
}
+ /* create new fcport if fw has knowledge of new sessions */
+ for (i = 0; i < n; i++) {
+ port_id_t id;
+ u64 wwnn;
+
+ e = &vha->gnl.l[i];
+ wwn = wwn_to_u64(e->port_name);
+
+ found = false;
+ list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
+ if (!memcmp((u8 *)&wwn, fcport->port_name,
+ WWN_SIZE)) {
+ found = true;
+ break;
+ }
+ }
+
+ id.b.domain = e->port_id[2];
+ id.b.area = e->port_id[1];
+ id.b.al_pa = e->port_id[0];
+ id.b.rsvd_1 = 0;
+
+ if (!found && wwn && !IS_SW_RESV_ADDR(id)) {
+ ql_dbg(ql_dbg_disc, vha, 0x2065,
+ "%s %d %8phC %06x post new sess\n",
+ __func__, __LINE__, (u8 *)&wwn, id.b24);
+ wwnn = wwn_to_u64(e->node_name);
+ qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn,
+ (u8 *)&wwnn, NULL, FC4_TYPE_UNKNOWN);
+ }
+ }
+
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
sp->free(sp);
@@ -558,14 +721,13 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
unsigned long flags;
u16 *mb;
- if (!vha->flags.online)
- goto done;
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
ql_dbg(ql_dbg_disc, vha, 0x20d9,
"Async-gnlist WWPN %8phC \n", fcport->port_name);
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
- fcport->flags |= FCF_ASYNC_SENT;
fcport->disc_state = DSC_GNL;
fcport->last_rscn_gen = fcport->rscn_gen;
fcport->last_login_gen = fcport->login_gen;
@@ -573,8 +735,7 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
if (vha->gnl.sent) {
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
- rval = QLA_SUCCESS;
- goto done;
+ return QLA_SUCCESS;
}
vha->gnl.sent = 1;
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
@@ -582,6 +743,8 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
+
+ fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_MB_IOCB;
sp->name = "gnlist";
sp->gen1 = fcport->rscn_gen;
@@ -616,8 +779,8 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
done_free_sp:
sp->free(sp);
-done:
fcport->flags &= ~FCF_ASYNC_SENT;
+done:
return rval;
}
@@ -630,6 +793,7 @@ int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
return QLA_FUNCTION_FAILED;
e->u.fcport.fcport = fcport;
+ fcport->flags |= FCF_ASYNC_ACTIVE;
return qla2x00_post_work(vha, e);
}
@@ -639,31 +803,18 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
struct srb *sp = s;
struct scsi_qla_host *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
- struct port_database_24xx *pd;
fc_port_t *fcport = sp->fcport;
u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
- int rval = QLA_SUCCESS;
struct event_arg ea;
ql_dbg(ql_dbg_disc, vha, 0x20db,
"Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
sp->name, res, fcport->port_name, mb[1], mb[2]);
- fcport->flags &= ~FCF_ASYNC_SENT;
-
- if (res) {
- rval = res;
- goto gpd_error_out;
- }
-
- pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
-
- rval = __qla24xx_parse_gpdb(vha, fcport, pd);
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
-gpd_error_out:
memset(&ea, 0, sizeof(ea));
ea.event = FCME_GPDB_DONE;
- ea.rc = rval;
ea.fcport = fcport;
ea.sp = sp;
@@ -754,7 +905,6 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- fcport->flags &= ~FCF_ASYNC_SENT;
fcport->flags |= FCF_LOGIN_NEEDED;
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
goto done_free_sp;
@@ -783,6 +933,7 @@ int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
e->u.fcport.fcport = fcport;
e->u.fcport.opt = opt;
+ fcport->flags |= FCF_ASYNC_ACTIVE;
return qla2x00_post_work(vha, e);
}
@@ -796,16 +947,16 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
struct port_database_24xx *pd;
struct qla_hw_data *ha = vha->hw;
- if (!vha->flags.online)
- goto done;
+ if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
+ return rval;
- fcport->flags |= FCF_ASYNC_SENT;
fcport->disc_state = DSC_GPDB;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
+ fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_MB_IOCB;
sp->name = "gpdb";
sp->gen1 = fcport->rscn_gen;
@@ -851,47 +1002,17 @@ done_free_sp:
dma_pool_free(ha->s_dma_pool, pd, pd_dma);
sp->free(sp);
-done:
fcport->flags &= ~FCF_ASYNC_SENT;
+done:
qla24xx_post_gpdb_work(vha, fcport, opt);
return rval;
}
static
-void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
+void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
{
- int rval = ea->rc;
- fc_port_t *fcport = ea->fcport;
unsigned long flags;
- fcport->flags &= ~FCF_ASYNC_SENT;
-
- ql_dbg(ql_dbg_disc, vha, 0x20d2,
- "%s %8phC DS %d LS %d rval %d\n", __func__, fcport->port_name,
- fcport->disc_state, fcport->fw_login_state, rval);
-
- if (ea->sp->gen2 != fcport->login_gen) {
- /* target side must have changed it. */
- ql_dbg(ql_dbg_disc, vha, 0x20d3,
- "%s %8phC generation changed rscn %d|%d login %d|%d \n",
- __func__, fcport->port_name, fcport->last_rscn_gen,
- fcport->rscn_gen, fcport->last_login_gen,
- fcport->login_gen);
- return;
- } else if (ea->sp->gen1 != fcport->rscn_gen) {
- ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_post_gidpn_work(vha, fcport);
- return;
- }
-
- if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
- __func__, __LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion_lock(fcport);
- return;
- }
-
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
ea->fcport->login_gen++;
ea->fcport->deleted = 0;
@@ -905,47 +1026,157 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
!vha->hw->flags.gpsc_supported) {
ql_dbg(ql_dbg_disc, vha, 0x20d6,
"%s %d %8phC post upd_fcport fcp_cnt %d\n",
- __func__, __LINE__, fcport->port_name,
+ __func__, __LINE__, ea->fcport->port_name,
vha->fcport_count);
- qla24xx_post_upd_fcport_work(vha, fcport);
+ qla24xx_post_upd_fcport_work(vha, ea->fcport);
} else {
- ql_dbg(ql_dbg_disc, vha, 0x20d7,
- "%s %d %8phC post gpsc fcp_cnt %d\n",
- __func__, __LINE__, fcport->port_name,
- vha->fcport_count);
-
- qla24xx_post_gpsc_work(vha, fcport);
+ if (ea->fcport->id_changed) {
+ ea->fcport->id_changed = 0;
+ ql_dbg(ql_dbg_disc, vha, 0x20d7,
+ "%s %d %8phC post gfpnid fcp_cnt %d\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ vha->fcport_count);
+ qla24xx_post_gfpnid_work(vha, ea->fcport);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x20d7,
+ "%s %d %8phC post gpsc fcp_cnt %d\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ vha->fcport_count);
+ qla24xx_post_gpsc_work(vha, ea->fcport);
+ }
}
+ } else if (ea->fcport->login_succ) {
+ /*
+ * We have an existing session. A late RSCN delivery
+ * must have triggered the session to be re-validate.
+ * Session is still valid.
+ */
+ ql_dbg(ql_dbg_disc, vha, 0x20d6,
+ "%s %d %8phC session revalidate success\n",
+ __func__, __LINE__, ea->fcport->port_name);
+ ea->fcport->disc_state = DSC_LOGIN_COMPLETE;
}
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+}
+
+static
+void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ fc_port_t *fcport = ea->fcport;
+ struct port_database_24xx *pd;
+ struct srb *sp = ea->sp;
+
+ pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20d2,
+ "%s %8phC DS %d LS %d rc %d\n", __func__, fcport->port_name,
+ fcport->disc_state, pd->current_login_state, ea->rc);
+
+ if (fcport->disc_state == DSC_DELETE_PEND)
+ return;
+
+ switch (pd->current_login_state) {
+ case PDS_PRLI_COMPLETE:
+ __qla24xx_parse_gpdb(vha, fcport, pd);
+ break;
+ case PDS_PLOGI_PENDING:
+ case PDS_PLOGI_COMPLETE:
+ case PDS_PRLI_PENDING:
+ case PDS_PRLI2_PENDING:
+ ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC relogin needed\n",
+ __func__, __LINE__, fcport->port_name);
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ return;
+ case PDS_LOGO_PENDING:
+ case PDS_PORT_UNAVAILABLE:
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
+ __func__, __LINE__, fcport->port_name);
+ qlt_schedule_sess_for_deletion(fcport);
+ return;
+ }
+ __qla24xx_handle_gpdb_event(vha, ea);
} /* gpdb event */
-int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
+static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
{
- if (fcport->login_retry == 0)
- return 0;
+ u8 login = 0;
+ int rc;
- if (fcport->scan_state != QLA_FCPORT_FOUND)
- return 0;
+ if (qla_tgt_mode_enabled(vha))
+ return;
+
+ if (qla_dual_mode_enabled(vha)) {
+ if (N2N_TOPO(vha->hw)) {
+ u64 mywwn, wwn;
+
+ mywwn = wwn_to_u64(vha->port_name);
+ wwn = wwn_to_u64(fcport->port_name);
+ if (mywwn > wwn)
+ login = 1;
+ else if ((fcport->fw_login_state == DSC_LS_PLOGI_COMP)
+ && time_after_eq(jiffies,
+ fcport->plogi_nack_done_deadline))
+ login = 1;
+ } else {
+ login = 1;
+ }
+ } else {
+ /* initiator mode */
+ login = 1;
+ }
+
+ if (login) {
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
+ rc = qla2x00_find_new_loop_id(vha, fcport);
+ if (rc) {
+ ql_dbg(ql_dbg_disc, vha, 0x20e6,
+ "%s %d %8phC post del sess - out of loopid\n",
+ __func__, __LINE__, fcport->port_name);
+ fcport->scan_state = 0;
+ qlt_schedule_sess_for_deletion(fcport);
+ return;
+ }
+ }
+ ql_dbg(ql_dbg_disc, vha, 0x20bf,
+ "%s %d %8phC post login\n",
+ __func__, __LINE__, fcport->port_name);
+ qla2x00_post_async_login_work(vha, fcport, NULL);
+ }
+}
+
+int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ u16 data[2];
+ u64 wwn;
ql_dbg(ql_dbg_disc, vha, 0x20d8,
- "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d|%d retry %d lid %d\n",
+ "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d retry %d lid %d scan %d\n",
__func__, fcport->port_name, fcport->disc_state,
fcport->fw_login_state, fcport->login_pause, fcport->flags,
fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
- fcport->last_login_gen, fcport->login_gen, fcport->login_retry,
- fcport->loop_id);
+ fcport->login_gen, fcport->login_retry,
+ fcport->loop_id, fcport->scan_state);
- fcport->login_retry--;
+ if (fcport->login_retry == 0)
+ return 0;
+
+ if (fcport->scan_state != QLA_FCPORT_FOUND)
+ return 0;
if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
(fcport->fw_login_state == DSC_LS_PRLI_PEND))
return 0;
if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
- if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
+ if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
return 0;
+ }
}
/* for pure Target Mode. Login will not be initiated */
@@ -957,19 +1188,23 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
return 0;
}
+ fcport->login_retry--;
+
switch (fcport->disc_state) {
case DSC_DELETED:
- if (fcport->loop_id == FC_NO_LOOP_ID) {
+ wwn = wwn_to_u64(fcport->node_name);
+ if (wwn == 0) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post GNNID\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_gnnid_work(vha, fcport);
+ } else if (fcport->loop_id == FC_NO_LOOP_ID) {
ql_dbg(ql_dbg_disc, vha, 0x20bd,
"%s %d %8phC post gnl\n",
__func__, __LINE__, fcport->port_name);
- qla24xx_async_gnl(vha, fcport);
+ qla24xx_post_gnl_work(vha, fcport);
} else {
- ql_dbg(ql_dbg_disc, vha, 0x20bf,
- "%s %d %8phC post login\n",
- __func__, __LINE__, fcport->port_name);
- fcport->disc_state = DSC_LOGIN_PEND;
- qla2x00_post_async_login_work(vha, fcport, NULL);
+ qla_chk_n2n_b4_login(vha, fcport);
}
break;
@@ -981,40 +1216,26 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
break;
}
- if (fcport->flags & FCF_FCP2_DEVICE) {
- u8 opt = PDO_FORCE_ADISC;
-
- ql_dbg(ql_dbg_disc, vha, 0x20c9,
- "%s %d %8phC post gpdb\n",
- __func__, __LINE__, fcport->port_name);
-
- fcport->disc_state = DSC_GPDB;
- qla24xx_post_gpdb_work(vha, fcport, opt);
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x20cf,
- "%s %d %8phC post login\n",
- __func__, __LINE__, fcport->port_name);
- fcport->disc_state = DSC_LOGIN_PEND;
- qla2x00_post_async_login_work(vha, fcport, NULL);
- }
-
+ qla_chk_n2n_b4_login(vha, fcport);
break;
case DSC_LOGIN_FAILED:
ql_dbg(ql_dbg_disc, vha, 0x20d0,
"%s %d %8phC post gidpn\n",
__func__, __LINE__, fcport->port_name);
-
- qla24xx_post_gidpn_work(vha, fcport);
+ if (N2N_TOPO(vha->hw))
+ qla_chk_n2n_b4_login(vha, fcport);
+ else
+ qla24xx_post_gidpn_work(vha, fcport);
break;
case DSC_LOGIN_COMPLETE:
/* recheck login state */
ql_dbg(ql_dbg_disc, vha, 0x20d1,
- "%s %d %8phC post gpdb\n",
+ "%s %d %8phC post adisc\n",
__func__, __LINE__, fcport->port_name);
-
- qla24xx_post_gpdb_work(vha, fcport, PDO_FORCE_ADISC);
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(vha, fcport, data);
break;
default:
@@ -1040,16 +1261,15 @@ void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea)
switch (fcport->disc_state) {
case DSC_DELETED:
case DSC_LOGIN_COMPLETE:
- qla24xx_post_gidpn_work(fcport->vha, fcport);
+ qla24xx_post_gpnid_work(fcport->vha, &ea->id);
break;
-
default:
break;
}
}
int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
- u8 *port_name, void *pla)
+ u8 *port_name, u8 *node_name, void *pla, u8 fc4_type)
{
struct qla_work_evt *e;
e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
@@ -1058,47 +1278,20 @@ int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
e->u.new_sess.id = *id;
e->u.new_sess.pla = pla;
+ e->u.new_sess.fc4_type = fc4_type;
memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
+ if (node_name)
+ memcpy(e->u.new_sess.node_name, node_name, WWN_SIZE);
return qla2x00_post_work(vha, e);
}
static
-int qla24xx_handle_delete_done_event(scsi_qla_host_t *vha,
- struct event_arg *ea)
-{
- fc_port_t *fcport = ea->fcport;
-
- if (test_bit(UNLOADING, &vha->dpc_flags))
- return 0;
-
- switch (vha->host->active_mode) {
- case MODE_INITIATOR:
- case MODE_DUAL:
- if (fcport->scan_state == QLA_FCPORT_FOUND)
- qla24xx_fcport_handle_login(vha, fcport);
- break;
-
- case MODE_TARGET:
- default:
- /* no-op */
- break;
- }
-
- return 0;
-}
-
-static
void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
struct event_arg *ea)
{
fc_port_t *fcport = ea->fcport;
- if (fcport->scan_state != QLA_FCPORT_FOUND) {
- fcport->login_retry++;
- return;
- }
-
ql_dbg(ql_dbg_disc, vha, 0x2102,
"%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
__func__, fcport->port_name, fcport->disc_state,
@@ -1113,8 +1306,10 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
return;
if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
- if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
+ if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
return;
+ }
}
if (fcport->flags & FCF_ASYNC_SENT) {
@@ -1132,7 +1327,7 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
__func__, __LINE__, fcport->port_name);
- qla24xx_async_gidpn(vha, fcport);
+ qla24xx_post_gidpn_work(vha, fcport);
return;
}
@@ -1141,16 +1336,16 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
{
- fc_port_t *fcport, *f, *tf;
+ fc_port_t *f, *tf;
uint32_t id = 0, mask, rid;
- int rc;
+ unsigned long flags;
switch (ea->event) {
- case FCME_RELOGIN:
case FCME_RSCN:
case FCME_GIDPN_DONE:
case FCME_GPSC_DONE:
case FCME_GPNID_DONE:
+ case FCME_GNNID_DONE:
if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
return;
@@ -1171,20 +1366,15 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
return;
switch (ea->id.b.rsvd_1) {
case RSCN_PORT_ADDR:
- fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
- if (!fcport) {
- /* cable moved */
- rc = qla24xx_post_gpnid_work(vha, &ea->id);
- if (rc) {
- ql_log(ql_log_warn, vha, 0xd044,
- "RSCN GPNID work failed %02x%02x%02x\n",
- ea->id.b.domain, ea->id.b.area,
- ea->id.b.al_pa);
- }
- } else {
- ea->fcport = fcport;
- qla24xx_handle_rscn_event(fcport, ea);
+ spin_lock_irqsave(&vha->work_lock, flags);
+ if (vha->scan.scan_flags == 0) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s: schedule\n", __func__);
+ vha->scan.scan_flags |= SF_QUEUED;
+ schedule_delayed_work(&vha->scan.scan_work, 5);
}
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
break;
case RSCN_AREA_ADDR:
case RSCN_DOM_ADDR:
@@ -1227,7 +1417,7 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
qla24xx_handle_gnl_done_event(vha, ea);
break;
case FCME_GPSC_DONE:
- qla24xx_post_upd_fcport_work(vha, ea->fcport);
+ qla24xx_handle_gpsc_event(vha, ea);
break;
case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */
qla24xx_handle_plogi_done_event(vha, ea);
@@ -1244,8 +1434,14 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
case FCME_GFFID_DONE:
qla24xx_handle_gffid_event(vha, ea);
break;
- case FCME_DELETE_DONE:
- qla24xx_handle_delete_done_event(vha, ea);
+ case FCME_ADISC_DONE:
+ qla24xx_handle_adisc_event(vha, ea);
+ break;
+ case FCME_GNNID_DONE:
+ qla24xx_handle_gnnid_event(vha, ea);
+ break;
+ case FCME_GFPNID_DONE:
+ qla24xx_handle_gfpnid_event(vha, ea);
break;
default:
BUG_ON(1);
@@ -1327,6 +1523,7 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
done_free_sp:
sp->free(sp);
+ sp->fcport->flags &= ~FCF_ASYNC_SENT;
done:
return rval;
}
@@ -1368,6 +1565,13 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp)
sp->name = "abort";
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
+
+ if (vha->flags.qpairs_available && cmd_sp->qpair)
+ abt_iocb->u.abt.req_que_no =
+ cpu_to_le16(cmd_sp->qpair->req->id);
+ else
+ abt_iocb->u.abt.req_que_no = cpu_to_le16(vha->req->id);
+
sp->done = qla24xx_abort_sp_done;
abt_iocb->timeout = qla24xx_abort_iocb_timeout;
init_completion(&abt_iocb->u.abt.comp);
@@ -1402,6 +1606,9 @@ qla24xx_async_abort_command(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = vha->req;
+ if (vha->flags.qpairs_available && sp->qpair)
+ req = sp->qpair->req;
+
spin_lock_irqsave(&ha->hardware_lock, flags);
for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
if (req->outstanding_cmds[handle] == sp)
@@ -1452,6 +1659,42 @@ static void
qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
{
port_id_t cid; /* conflict Nport id */
+ u16 lid;
+ struct fc_port *conflict_fcport;
+ unsigned long flags;
+ struct fc_port *fcport = ea->fcport;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
+ ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1,
+ ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]);
+
+ if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
+ (fcport->fw_login_state == DSC_LS_PRLI_PEND)) {
+ ql_dbg(ql_dbg_disc, vha, 0x20ea,
+ "%s %d %8phC Remote is trying to login\n",
+ __func__, __LINE__, fcport->port_name);
+ return;
+ }
+
+ if (fcport->disc_state == DSC_DELETE_PEND)
+ return;
+
+ if (ea->sp->gen2 != fcport->login_gen) {
+ /* target side must have changed it. */
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
+ "%s %8phC generation changed\n",
+ __func__, fcport->port_name);
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ return;
+ } else if (ea->sp->gen1 != fcport->rscn_gen) {
+ ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_gidpn_work(vha, fcport);
+ return;
+ }
switch (ea->data[0]) {
case MBS_COMMAND_COMPLETE:
@@ -1467,11 +1710,19 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
qla24xx_post_prli_work(vha, ea->fcport);
} else {
ql_dbg(ql_dbg_disc, vha, 0x20ea,
- "%s %d %8phC post gpdb\n",
- __func__, __LINE__, ea->fcport->port_name);
+ "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ ea->fcport->loop_id, ea->fcport->d_id.b24);
+
+ set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ ea->fcport->loop_id = FC_NO_LOOP_ID;
ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
ea->fcport->logout_on_delete = 1;
ea->fcport->send_els_logo = 0;
+ ea->fcport->fw_login_state = DSC_LS_PRLI_COMP;
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
qla24xx_post_gpdb_work(vha, ea->fcport, 0);
}
break;
@@ -1513,8 +1764,38 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area,
ea->fcport->d_id.b.al_pa);
- qla2x00_clear_loop_id(ea->fcport);
- qla24xx_post_gidpn_work(vha, ea->fcport);
+ lid = ea->iop[1] & 0xffff;
+ qlt_find_sess_invalidate_other(vha,
+ wwn_to_u64(ea->fcport->port_name),
+ ea->fcport->d_id, lid, &conflict_fcport);
+
+ if (conflict_fcport) {
+ /*
+ * Another fcport share the same loop_id/nport id.
+ * Conflict fcport needs to finish cleanup before this
+ * fcport can proceed to login.
+ */
+ conflict_fcport->conflict = ea->fcport;
+ ea->fcport->login_pause = 1;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20ed,
+ "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ ea->fcport->d_id.b24, lid);
+ qla2x00_clear_loop_id(ea->fcport);
+ qla24xx_post_gidpn_work(vha, ea->fcport);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x20ed,
+ "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ ea->fcport->d_id.b24, lid);
+
+ qla2x00_clear_loop_id(ea->fcport);
+ set_bit(lid, vha->hw->loop_id_map);
+ ea->fcport->loop_id = lid;
+ ea->fcport->keep_nport_handle = 0;
+ qlt_schedule_sess_for_deletion(ea->fcport);
+ }
break;
}
return;
@@ -2540,70 +2821,27 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
return rval;
}
-void
-qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
+static void
+qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
{
int rval;
- uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
- eft_size, fce_size, mq_size;
dma_addr_t tc_dma;
void *tc;
struct qla_hw_data *ha = vha->hw;
- struct req_que *req = ha->req_q_map[0];
- struct rsp_que *rsp = ha->rsp_q_map[0];
- if (ha->fw_dump) {
+ if (ha->eft) {
ql_dbg(ql_dbg_init, vha, 0x00bd,
- "Firmware dump already allocated.\n");
+ "%s: Offload Mem is already allocated.\n",
+ __func__);
return;
}
- ha->fw_dumped = 0;
- ha->fw_dump_cap_flags = 0;
- dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
- req_q_size = rsp_q_size = 0;
-
- if (IS_QLA27XX(ha))
- goto try_fce;
-
- if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
- fixed_size = sizeof(struct qla2100_fw_dump);
- } else if (IS_QLA23XX(ha)) {
- fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
- mem_size = (ha->fw_memory_size - 0x11000 + 1) *
- sizeof(uint16_t);
- } else if (IS_FWI2_CAPABLE(ha)) {
- if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
- fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
- else if (IS_QLA81XX(ha))
- fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
- else if (IS_QLA25XX(ha))
- fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
- else
- fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
-
- mem_size = (ha->fw_memory_size - 0x100000 + 1) *
- sizeof(uint32_t);
- if (ha->mqenable) {
- if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
- mq_size = sizeof(struct qla2xxx_mq_chain);
- /*
- * Allocate maximum buffer size for all queues.
- * Resizing must be done at end-of-dump processing.
- */
- mq_size += ha->max_req_queues *
- (req->length * sizeof(request_t));
- mq_size += ha->max_rsp_queues *
- (rsp->length * sizeof(response_t));
- }
- if (ha->tgt.atio_ring)
- mq_size += ha->tgt.atio_q_length * sizeof(request_t);
+ if (IS_FWI2_CAPABLE(ha)) {
/* Allocate memory for Fibre Channel Event Buffer. */
if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
!IS_QLA27XX(ha))
goto try_eft;
-try_fce:
if (ha->fce)
dma_free_coherent(&ha->pdev->dev,
FCE_SIZE, ha->fce, ha->fce_dma);
@@ -2631,7 +2869,6 @@ try_fce:
ql_dbg(ql_dbg_init, vha, 0x00c0,
"Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
- fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
ha->flags.fce_enabled = 1;
ha->fce_dma = tc_dma;
ha->fce = tc;
@@ -2648,7 +2885,7 @@ try_eft:
ql_log(ql_log_warn, vha, 0x00c1,
"Unable to allocate (%d KB) for EFT.\n",
EFT_SIZE / 1024);
- goto cont_alloc;
+ goto eft_err;
}
rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
@@ -2657,17 +2894,76 @@ try_eft:
"Unable to initialize EFT (%d).\n", rval);
dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
tc_dma);
- goto cont_alloc;
+ goto eft_err;
}
ql_dbg(ql_dbg_init, vha, 0x00c3,
"Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
- eft_size = EFT_SIZE;
ha->eft_dma = tc_dma;
ha->eft = tc;
}
-cont_alloc:
+eft_err:
+ return;
+}
+
+void
+qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
+{
+ uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
+ eft_size, fce_size, mq_size;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
+ struct qla2xxx_fw_dump *fw_dump;
+
+ dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
+ req_q_size = rsp_q_size = 0;
+
+ if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
+ fixed_size = sizeof(struct qla2100_fw_dump);
+ } else if (IS_QLA23XX(ha)) {
+ fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
+ mem_size = (ha->fw_memory_size - 0x11000 + 1) *
+ sizeof(uint16_t);
+ } else if (IS_FWI2_CAPABLE(ha)) {
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
+ else if (IS_QLA81XX(ha))
+ fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
+ else if (IS_QLA25XX(ha))
+ fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
+ else
+ fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
+
+ mem_size = (ha->fw_memory_size - 0x100000 + 1) *
+ sizeof(uint32_t);
+ if (ha->mqenable) {
+ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
+ mq_size = sizeof(struct qla2xxx_mq_chain);
+ /*
+ * Allocate maximum buffer size for all queues.
+ * Resizing must be done at end-of-dump processing.
+ */
+ mq_size += ha->max_req_queues *
+ (req->length * sizeof(request_t));
+ mq_size += ha->max_rsp_queues *
+ (rsp->length * sizeof(response_t));
+ }
+ if (ha->tgt.atio_ring)
+ mq_size += ha->tgt.atio_q_length * sizeof(request_t);
+ /* Allocate memory for Fibre Channel Event Buffer. */
+ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+ !IS_QLA27XX(ha))
+ goto try_eft;
+
+ fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
+try_eft:
+ ql_dbg(ql_dbg_init, vha, 0x00c3,
+ "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
+ eft_size = EFT_SIZE;
+ }
+
if (IS_QLA27XX(ha)) {
if (!ha->fw_dump_template) {
ql_log(ql_log_warn, vha, 0x00ba,
@@ -2695,51 +2991,44 @@ cont_alloc:
ha->exlogin_size;
allocate:
- ha->fw_dump = vmalloc(dump_size);
- if (!ha->fw_dump) {
- ql_log(ql_log_warn, vha, 0x00c4,
- "Unable to allocate (%d KB) for firmware dump.\n",
- dump_size / 1024);
-
- if (ha->fce) {
- dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
- ha->fce_dma);
- ha->fce = NULL;
- ha->fce_dma = 0;
- }
-
- if (ha->eft) {
- dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
- ha->eft_dma);
- ha->eft = NULL;
- ha->eft_dma = 0;
+ if (!ha->fw_dump_len || dump_size != ha->fw_dump_len) {
+ fw_dump = vmalloc(dump_size);
+ if (!fw_dump) {
+ ql_log(ql_log_warn, vha, 0x00c4,
+ "Unable to allocate (%d KB) for firmware dump.\n",
+ dump_size / 1024);
+ } else {
+ if (ha->fw_dump)
+ vfree(ha->fw_dump);
+ ha->fw_dump = fw_dump;
+
+ ha->fw_dump_len = dump_size;
+ ql_dbg(ql_dbg_init, vha, 0x00c5,
+ "Allocated (%d KB) for firmware dump.\n",
+ dump_size / 1024);
+
+ if (IS_QLA27XX(ha))
+ return;
+
+ ha->fw_dump->signature[0] = 'Q';
+ ha->fw_dump->signature[1] = 'L';
+ ha->fw_dump->signature[2] = 'G';
+ ha->fw_dump->signature[3] = 'C';
+ ha->fw_dump->version = htonl(1);
+
+ ha->fw_dump->fixed_size = htonl(fixed_size);
+ ha->fw_dump->mem_size = htonl(mem_size);
+ ha->fw_dump->req_q_size = htonl(req_q_size);
+ ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
+
+ ha->fw_dump->eft_size = htonl(eft_size);
+ ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
+ ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
+
+ ha->fw_dump->header_size =
+ htonl(offsetof(struct qla2xxx_fw_dump, isp));
}
- return;
}
- ha->fw_dump_len = dump_size;
- ql_dbg(ql_dbg_init, vha, 0x00c5,
- "Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
-
- if (IS_QLA27XX(ha))
- return;
-
- ha->fw_dump->signature[0] = 'Q';
- ha->fw_dump->signature[1] = 'L';
- ha->fw_dump->signature[2] = 'G';
- ha->fw_dump->signature[3] = 'C';
- ha->fw_dump->version = htonl(1);
-
- ha->fw_dump->fixed_size = htonl(fixed_size);
- ha->fw_dump->mem_size = htonl(mem_size);
- ha->fw_dump->req_q_size = htonl(req_q_size);
- ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
-
- ha->fw_dump->eft_size = htonl(eft_size);
- ha->fw_dump->eft_addr_l = htonl(LSD(ha->eft_dma));
- ha->fw_dump->eft_addr_h = htonl(MSD(ha->eft_dma));
-
- ha->fw_dump->header_size =
- htonl(offsetof(struct qla2xxx_fw_dump, isp));
}
static int
@@ -3065,9 +3354,12 @@ enable_82xx_npiv:
if (rval != QLA_SUCCESS)
goto failed;
- if (!fw_major_version && ql2xallocfwdump
- && !(IS_P3P_TYPE(ha)))
+ if (!fw_major_version && !(IS_P3P_TYPE(ha)))
+ qla2x00_alloc_offload_mem(vha);
+
+ if (ql2xallocfwdump && !(IS_P3P_TYPE(ha)))
qla2x00_alloc_fw_dump(vha);
+
} else {
goto failed;
}
@@ -3278,6 +3570,12 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
ha->fw_options[2] |= BIT_4;
else
ha->fw_options[2] &= ~BIT_4;
+
+ /* Reserve 1/2 of emergency exchanges for ELS.*/
+ if (qla2xuseresexchforels)
+ ha->fw_options[2] |= BIT_8;
+ else
+ ha->fw_options[2] &= ~BIT_8;
}
ql_dbg(ql_dbg_init, vha, 0x00e8,
@@ -3671,6 +3969,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
port_id_t id;
+ unsigned long flags;
/* Get host addresses. */
rval = qla2x00_get_adapter_id(vha,
@@ -3752,7 +4051,9 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
id.b.area = area;
id.b.al_pa = al_pa;
id.b.rsvd_1 = 0;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_update_host_map(vha, id);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (!vha->flags.init_done)
ql_log(ql_log_info, vha, 0x2010,
@@ -4293,6 +4594,21 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
} else if (ha->current_topology == ISP_CFG_N) {
clear_bit(RSCN_UPDATE, &flags);
+ if (ha->flags.rida_fmt2) {
+ /* With Rida Format 2, the login is already triggered.
+ * We know who is on the other side of the wire.
+ * No need to login to do login to find out or drop into
+ * qla2x00_configure_local_loop().
+ */
+ clear_bit(LOCAL_LOOP_UPDATE, &flags);
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ } else {
+ if (qla_tgt_mode_enabled(vha)) {
+ /* allow the other side to start the login */
+ clear_bit(LOCAL_LOOP_UPDATE, &flags);
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ }
+ }
} else if (ha->current_topology == ISP_CFG_NL) {
clear_bit(RSCN_UPDATE, &flags);
set_bit(LOCAL_LOOP_UPDATE, &flags);
@@ -4521,6 +4837,10 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
(uint8_t *)ha->gid_list,
entries * sizeof(struct gid_list_info));
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ fcport->scan_state = QLA_FCPORT_SCAN;
+ }
+
/* Allocate temporary fcport for any new fcports discovered. */
new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (new_fcport == NULL) {
@@ -4531,22 +4851,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
}
new_fcport->flags &= ~FCF_FABRIC_DEVICE;
- /*
- * Mark local devices that were present with FCF_DEVICE_LOST for now.
- */
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (atomic_read(&fcport->state) == FCS_ONLINE &&
- fcport->port_type != FCT_BROADCAST &&
- (fcport->flags & FCF_FABRIC_DEVICE) == 0) {
-
- ql_dbg(ql_dbg_disc, vha, 0x2096,
- "Marking port lost loop_id=0x%04x.\n",
- fcport->loop_id);
-
- qla2x00_mark_device_lost(vha, fcport, 0, 0);
- }
- }
-
/* Inititae N2N login. */
if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
rval = qla24xx_n2n_handle_login(vha, new_fcport);
@@ -4589,6 +4893,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
new_fcport->d_id.b.area = area;
new_fcport->d_id.b.al_pa = al_pa;
new_fcport->loop_id = loop_id;
+ new_fcport->scan_state = QLA_FCPORT_FOUND;
rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
if (rval2 != QLA_SUCCESS) {
@@ -4620,13 +4925,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
fcport->d_id.b24 = new_fcport->d_id.b24;
memcpy(fcport->node_name, new_fcport->node_name,
WWN_SIZE);
-
- if (!fcport->login_succ) {
- vha->fcport_count++;
- fcport->login_succ = 1;
- fcport->disc_state = DSC_LOGIN_COMPLETE;
- }
-
+ fcport->scan_state = QLA_FCPORT_FOUND;
found++;
break;
}
@@ -4637,11 +4936,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
/* Allocate a new replacement fcport. */
fcport = new_fcport;
- if (!fcport->login_succ) {
- vha->fcport_count++;
- fcport->login_succ = 1;
- fcport->disc_state = DSC_LOGIN_COMPLETE;
- }
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
@@ -4662,11 +4956,38 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
/* Base iIDMA settings on HBA port speed. */
fcport->fp_speed = ha->link_data_rate;
- qla2x00_update_fcport(vha, fcport);
-
found_devs++;
}
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ break;
+
+ if (fcport->scan_state == QLA_FCPORT_SCAN) {
+ if ((qla_dual_mode_enabled(vha) ||
+ qla_ini_mode_enabled(vha)) &&
+ atomic_read(&fcport->state) == FCS_ONLINE) {
+ qla2x00_mark_device_lost(vha, fcport,
+ ql2xplogiabsentdevice, 0);
+ if (fcport->loop_id != FC_NO_LOOP_ID &&
+ (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
+ fcport->port_type != FCT_INITIATOR &&
+ fcport->port_type != FCT_BROADCAST) {
+ ql_dbg(ql_dbg_disc, vha, 0x20f0,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ fcport->port_name);
+
+ qlt_schedule_sess_for_deletion(fcport);
+ continue;
+ }
+ }
+ }
+
+ if (fcport->scan_state == QLA_FCPORT_FOUND)
+ qla24xx_fcport_handle_login(vha, fcport);
+ }
+
cleanup_allocation:
kfree(new_fcport);
@@ -4920,9 +5241,6 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
}
}
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- fcport->scan_state = QLA_FCPORT_SCAN;
- }
/* Mark the time right before querying FW for connected ports.
* This process is long, asynchronous and by the time it's done,
@@ -4932,7 +5250,17 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
* will be newer than discovery_gen. */
qlt_do_generation_tick(vha, &discovery_gen);
- rval = qla2x00_find_all_fabric_devs(vha);
+ if (USE_ASYNC_SCAN(ha)) {
+ rval = QLA_SUCCESS;
+ rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI);
+ if (rval)
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ } else {
+ list_for_each_entry(fcport, &vha->vp_fcports, list)
+ fcport->scan_state = QLA_FCPORT_SCAN;
+
+ rval = qla2x00_find_all_fabric_devs(vha);
+ }
if (rval != QLA_SUCCESS)
break;
} while (0);
@@ -5237,9 +5565,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
"%s %d %8phC post del sess\n",
__func__, __LINE__,
fcport->port_name);
-
- qlt_schedule_sess_for_deletion_lock
- (fcport);
+ qlt_schedule_sess_for_deletion(fcport);
continue;
}
}
@@ -5974,6 +6300,8 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
if (!(IS_P3P_TYPE(ha)))
ha->isp_ops->reset_chip(vha);
+ SAVE_TOPO(ha);
+ ha->flags.rida_fmt2 = 0;
ha->flags.n2n_ae = 0;
ha->flags.lip_ae = 0;
ha->current_topology = 0;
@@ -8173,9 +8501,6 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
int ret = QLA_FUNCTION_FAILED;
struct qla_hw_data *ha = qpair->hw;
- if (!vha->flags.qpairs_req_created && !vha->flags.qpairs_rsp_created)
- goto fail;
-
qpair->delete_in_progress = 1;
while (atomic_read(&qpair->ref_count))
msleep(500);
@@ -8183,6 +8508,7 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
ret = qla25xx_delete_req_que(vha, qpair->req);
if (ret != QLA_SUCCESS)
goto fail;
+
ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
if (ret != QLA_SUCCESS)
goto fail;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 17d2c20f1f75..4d32426393c7 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -273,6 +273,7 @@ qla2x00_init_timer(srb_t *sp, unsigned long tmo)
sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
add_timer(&sp->u.iocb_cmd.timer);
sp->free = qla2x00_sp_free;
+ init_completion(&sp->comp);
if (IS_QLAFX00(sp->vha->hw) && (sp->type == SRB_FXIOCB_DCMD))
init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
if (sp->type == SRB_ELS_DCMD)
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index d810a447cb4a..1b62e943ec49 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2158,7 +2158,9 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
skip_cmd_array:
/* Check for room on request queue. */
if (req->cnt < req_cnt + 2) {
- if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ if (qpair->use_shadow_reg)
+ cnt = *req->out_ptr;
+ else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
else if (IS_P3P_TYPE(ha))
cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
@@ -2392,26 +2394,13 @@ qla2x00_els_dcmd_iocb_timeout(void *data)
srb_t *sp = data;
fc_port_t *fcport = sp->fcport;
struct scsi_qla_host *vha = sp->vha;
- struct qla_hw_data *ha = vha->hw;
struct srb_iocb *lio = &sp->u.iocb_cmd;
- unsigned long flags = 0;
ql_dbg(ql_dbg_io, vha, 0x3069,
"%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
- /* Abort the exchange */
- spin_lock_irqsave(&ha->hardware_lock, flags);
- if (ha->isp_ops->abort_command(sp)) {
- ql_dbg(ql_dbg_io, vha, 0x3070,
- "mbx abort_command failed.\n");
- } else {
- ql_dbg(ql_dbg_io, vha, 0x3071,
- "mbx abort_command success.\n");
- }
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
complete(&lio->u.els_logo.comp);
}
@@ -2631,7 +2620,7 @@ qla2x00_els_dcmd2_sp_done(void *ptr, int res)
struct scsi_qla_host *vha = sp->vha;
ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3072,
- "%s ELS hdl=%x, portid=%06x done %8pC\n",
+ "%s ELS hdl=%x, portid=%06x done %8phC\n",
sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
complete(&lio->u.els_plogi.comp);
@@ -3286,7 +3275,9 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
abt_iocb->entry_type = ABORT_IOCB_TYPE;
abt_iocb->entry_count = 1;
- abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
+ abt_iocb->handle =
+ cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
+ aio->u.abt.cmd_hndl));
abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
abt_iocb->handle_to_abort =
cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
@@ -3294,7 +3285,7 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
abt_iocb->vp_index = vha->vp_idx;
- abt_iocb->req_que_no = cpu_to_le16(req->id);
+ abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
/* Send the command to the firmware */
wmb();
}
@@ -3381,6 +3372,40 @@ qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
return rval;
}
+static void
+qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
+{
+ int map, pos;
+
+ vce->entry_type = VP_CTRL_IOCB_TYPE;
+ vce->handle = sp->handle;
+ vce->entry_count = 1;
+ vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
+ vce->vp_count = cpu_to_le16(1);
+
+ /*
+ * index map in firmware starts with 1; decrement index
+ * this is ok as we never use index 0
+ */
+ map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
+ pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
+ vce->vp_idx_map[map] |= 1 << pos;
+}
+
+static void
+qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
+{
+ logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
+ logio->control_flags =
+ cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
+
+ logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ logio->port_id[0] = sp->fcport->d_id.b.al_pa;
+ logio->port_id[1] = sp->fcport->d_id.b.area;
+ logio->port_id[2] = sp->fcport->d_id.b.domain;
+ logio->vp_index = sp->fcport->vha->vp_idx;
+}
+
int
qla2x00_start_sp(srb_t *sp)
{
@@ -3459,6 +3484,12 @@ qla2x00_start_sp(srb_t *sp)
case SRB_NACK_LOGO:
qla2x00_send_notify_ack_iocb(sp, pkt);
break;
+ case SRB_CTRL_VP:
+ qla25xx_ctrlvp_iocb(sp, pkt);
+ break;
+ case SRB_PRLO_CMD:
+ qla24xx_prlo_iocb(sp, pkt);
+ break;
default:
break;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 2fd79129bb2a..14109d86c3f6 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -809,6 +809,7 @@ skip_rio:
break;
case MBA_LOOP_DOWN: /* Loop Down Event */
+ SAVE_TOPO(ha);
ha->flags.n2n_ae = 0;
ha->flags.lip_ae = 0;
ha->current_topology = 0;
@@ -922,7 +923,6 @@ skip_rio:
set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
- ha->flags.gpsc_supported = 1;
vha->flags.management_server_logged_in = 0;
break;
@@ -1009,7 +1009,7 @@ skip_rio:
if (qla_ini_mode_enabled(vha)) {
qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
fcport->logout_on_delete = 0;
- qlt_schedule_sess_for_deletion_lock(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
}
break;
@@ -1059,8 +1059,7 @@ global_port_update:
* Mark all devices as missing so we will login again.
*/
atomic_set(&vha->loop_state, LOOP_UP);
-
- qla2x00_mark_all_devices_lost(vha, 1);
+ vha->scan.scan_retry = 0;
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
@@ -1202,6 +1201,7 @@ global_port_update:
qla2xxx_wake_dpc(vha);
}
}
+ /* fall through */
case MBA_IDC_COMPLETE:
if (ha->notify_lb_portup_comp && !vha->vp_idx)
complete(&ha->lb_portup_comp);
@@ -1574,7 +1574,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
/* borrowing sts_entry_24xx.comp_status.
same location as ct_entry_24xx.comp_status
*/
- res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
+ res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt,
(struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
sp->name);
sp->done(sp, res);
@@ -1769,7 +1769,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
- /* drop through */
+ /* fall through */
default:
data[0] = MBS_COMMAND_ERROR;
break;
@@ -1936,6 +1936,37 @@ qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
sp->done(sp, ret);
}
+static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req,
+ struct vp_ctrl_entry_24xx *vce)
+{
+ const char func[] = "CTRLVP-IOCB";
+ srb_t *sp;
+ int rval = QLA_SUCCESS;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, vce);
+ if (!sp)
+ return;
+
+ if (vce->entry_status != 0) {
+ ql_dbg(ql_dbg_vport, vha, 0x10c4,
+ "%s: Failed to complete IOCB -- error status (%x)\n",
+ sp->name, vce->entry_status);
+ rval = QLA_FUNCTION_FAILED;
+ } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
+ ql_dbg(ql_dbg_vport, vha, 0x10c5,
+ "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n",
+ sp->name, le16_to_cpu(vce->comp_status),
+ le16_to_cpu(vce->vp_idx_failed));
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+ ql_dbg(ql_dbg_vport, vha, 0x10c6,
+ "Done %s.\n", __func__);
+ }
+
+ sp->rc = rval;
+ sp->done(sp, rval);
+}
+
/**
* qla2x00_process_response_queue() - Process response queue entries.
* @ha: SCSI driver HA context
@@ -2369,7 +2400,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
int res = 0;
uint16_t state_flags = 0;
uint16_t retry_delay = 0;
- uint8_t no_logout = 0;
sts = (sts_entry_t *) pkt;
sts24 = (struct sts_entry_24xx *) pkt;
@@ -2640,7 +2670,6 @@ check_scsi_status:
break;
case CS_PORT_LOGGED_OUT:
- no_logout = 1;
case CS_PORT_CONFIG_CHG:
case CS_PORT_BUSY:
case CS_INCOMPLETE:
@@ -2671,11 +2700,8 @@ check_scsi_status:
port_state_str[atomic_read(&fcport->state)],
comp_status);
- if (no_logout)
- fcport->logout_on_delete = 0;
-
qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
- qlt_schedule_sess_for_deletion_lock(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
}
break;
@@ -2972,9 +2998,9 @@ process_err:
(response_t *)pkt);
break;
} else {
- /* drop through */
qlt_24xx_process_atio_queue(vha, 1);
}
+ /* fall through */
case ABTS_RESP_24XX:
case CTIO_TYPE7:
case CTIO_CRC2:
@@ -3005,6 +3031,10 @@ process_err:
qla24xx_mbx_iocb_entry(vha, rsp->req,
(struct mbx_24xx_entry *)pkt);
break;
+ case VP_CTRL_IOCB_TYPE:
+ qla_ctrlvp_completed(vha, rsp->req,
+ (struct vp_ctrl_entry_24xx *)pkt);
+ break;
default:
/* Type Not Supported. */
ql_dbg(ql_dbg_async, vha, 0x5042,
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index cb717d47339f..7397aeddd96c 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -17,6 +17,7 @@ static struct mb_cmd_name {
{MBC_GET_PORT_DATABASE, "GPDB"},
{MBC_GET_ID_LIST, "GIDList"},
{MBC_GET_LINK_PRIV_STATS, "Stats"},
+ {MBC_GET_RESOURCE_COUNTS, "ResCnt"},
};
static const char *mb_to_str(uint16_t cmd)
@@ -3731,6 +3732,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
unsigned long flags;
int found;
port_id_t id;
+ struct fc_port *fcport;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
"Entered %s.\n", __func__);
@@ -3753,7 +3755,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
"Primary port id %02x%02x%02x.\n",
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]);
-
+ ha->current_topology = ISP_CFG_NL;
qlt_update_host_map(vha, id);
} else if (rptid_entry->format == 1) {
@@ -3797,6 +3799,8 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
return;
}
+ ha->flags.gpsc_supported = 1;
+ ha->current_topology = ISP_CFG_F;
/* buffer to buffer credit flag */
vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
@@ -3862,6 +3866,8 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
rptid_entry->u.f2.port_name);
/* N2N. direct connect */
+ ha->current_topology = ISP_CFG_N;
+ ha->flags.rida_fmt2 = 1;
vha->d_id.b.domain = rptid_entry->port_id[2];
vha->d_id.b.area = rptid_entry->port_id[1];
vha->d_id.b.al_pa = rptid_entry->port_id[0];
@@ -3869,6 +3875,40 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
spin_lock_irqsave(&ha->vport_slock, flags);
qlt_update_vp_map(vha, SET_AL_PA);
spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ fcport->scan_state = QLA_FCPORT_SCAN;
+ }
+
+ fcport = qla2x00_find_fcport_by_wwpn(vha,
+ rptid_entry->u.f2.port_name, 1);
+
+ if (fcport) {
+ fcport->plogi_nack_done_deadline = jiffies + HZ;
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ switch (fcport->disc_state) {
+ case DSC_DELETED:
+ ql_dbg(ql_dbg_disc, vha, 0x210d,
+ "%s %d %8phC login\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_fcport_handle_login(vha, fcport);
+ break;
+ case DSC_DELETE_PEND:
+ break;
+ default:
+ qlt_schedule_sess_for_deletion(fcport);
+ break;
+ }
+ } else {
+ id.b.al_pa = rptid_entry->u.f2.remote_nport_id[0];
+ id.b.area = rptid_entry->u.f2.remote_nport_id[1];
+ id.b.domain = rptid_entry->u.f2.remote_nport_id[2];
+ qla24xx_post_newsess_work(vha, &id,
+ rptid_entry->u.f2.port_name,
+ rptid_entry->u.f2.node_name,
+ NULL,
+ FC4_TYPE_UNKNOWN);
+ }
}
}
@@ -3945,83 +3985,6 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
}
/*
- * qla24xx_control_vp
- * Enable a virtual port for given host
- *
- * Input:
- * ha = adapter block pointer.
- * vhba = virtual adapter (unused)
- * index = index number for enabled VP
- *
- * Returns:
- * qla2xxx local function return status code.
- *
- * Context:
- * Kernel context.
- */
-int
-qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
-{
- int rval;
- int map, pos;
- struct vp_ctrl_entry_24xx *vce;
- dma_addr_t vce_dma;
- struct qla_hw_data *ha = vha->hw;
- int vp_index = vha->vp_idx;
- struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
-
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c1,
- "Entered %s enabling index %d.\n", __func__, vp_index);
-
- if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
- return QLA_PARAMETER_ERROR;
-
- vce = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
- if (!vce) {
- ql_log(ql_log_warn, vha, 0x10c2,
- "Failed to allocate VP control IOCB.\n");
- return QLA_MEMORY_ALLOC_FAILED;
- }
-
- vce->entry_type = VP_CTRL_IOCB_TYPE;
- vce->entry_count = 1;
- vce->command = cpu_to_le16(cmd);
- vce->vp_count = cpu_to_le16(1);
-
- /* index map in firmware starts with 1; decrement index
- * this is ok as we never use index 0
- */
- map = (vp_index - 1) / 8;
- pos = (vp_index - 1) & 7;
- mutex_lock(&ha->vport_lock);
- vce->vp_idx_map[map] |= 1 << pos;
- mutex_unlock(&ha->vport_lock);
-
- rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0);
- if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0x10c3,
- "Failed to issue VP control IOCB (%x).\n", rval);
- } else if (vce->entry_status != 0) {
- ql_dbg(ql_dbg_mbx, vha, 0x10c4,
- "Failed to complete IOCB -- error status (%x).\n",
- vce->entry_status);
- rval = QLA_FUNCTION_FAILED;
- } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
- ql_dbg(ql_dbg_mbx, vha, 0x10c5,
- "Failed to complete IOCB -- completion status (%x).\n",
- le16_to_cpu(vce->comp_status));
- rval = QLA_FUNCTION_FAILED;
- } else {
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c6,
- "Done %s.\n", __func__);
- }
-
- dma_pool_free(ha->s_dma_pool, vce, vce_dma);
-
- return rval;
-}
-
-/*
* qla2x00_send_change_request
* Receive or disable RSCN request from fabric controller
*
@@ -6160,8 +6123,7 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
}
/* Check for logged in state. */
- if (current_login_state != PDS_PRLI_COMPLETE &&
- last_login_state != PDS_PRLI_COMPLETE) {
+ if (current_login_state != PDS_PRLI_COMPLETE) {
ql_dbg(ql_dbg_mbx, vha, 0x119a,
"Unable to verify login-state (%x/%x) for loop_id %x.\n",
current_login_state, last_login_state, fcport->loop_id);
@@ -6350,3 +6312,32 @@ qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
return rval;
}
+
+int qla24xx_res_count_wait(struct scsi_qla_host *vha,
+ uint16_t *out_mb, int out_mb_sz)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ mbx_cmd_t mc;
+
+ if (!vha->hw->flags.fw_started)
+ goto done;
+
+ memset(&mc, 0, sizeof(mc));
+ mc.mb[0] = MBC_GET_RESOURCE_COUNTS;
+
+ rval = qla24xx_send_mb_cmd(vha, &mc);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "%s: fail\n", __func__);
+ } else {
+ if (out_mb_sz <= SIZEOF_IOCB_MB_REG)
+ memcpy(out_mb, mc.mb, out_mb_sz);
+ else
+ memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG);
+
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "%s: done\n", __func__);
+ }
+done:
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index bd9f14bf7ac2..e965b16f21e3 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -50,10 +50,11 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->vport_slock, flags);
list_add_tail(&vha->list, &ha->vp_list);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_update_vp_map(vha, SET_VP_IDX);
-
- spin_unlock_irqrestore(&ha->vport_slock, flags);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
mutex_unlock(&ha->vport_lock);
return vp_id;
@@ -158,9 +159,9 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
/* Remove port id from vp target map */
- spin_lock_irqsave(&vha->hw->vport_slock, flags);
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
qlt_update_vp_map(vha, RESET_AL_PA);
- spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
qla2x00_mark_vp_devices_dead(vha);
atomic_set(&vha->vp_state, VP_FAILED);
@@ -264,13 +265,20 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
case MBA_LIP_RESET:
case MBA_POINT_TO_POINT:
case MBA_CHG_IN_CONNECTION:
- case MBA_PORT_UPDATE:
- case MBA_RSCN_UPDATE:
ql_dbg(ql_dbg_async, vha, 0x5024,
"Async_event for VP[%d], mb=0x%x vha=%p.\n",
i, *mb, vha);
qla2x00_async_event(vha, rsp, mb);
break;
+ case MBA_PORT_UPDATE:
+ case MBA_RSCN_UPDATE:
+ if ((mb[3] & 0xff) == vha->vp_idx) {
+ ql_dbg(ql_dbg_async, vha, 0x5024,
+ "Async_event for VP[%d], mb=0x%x vha=%p\n",
+ i, *mb, vha);
+ qla2x00_async_event(vha, rsp, mb);
+ }
+ break;
}
spin_lock_irqsave(&ha->vport_slock, flags);
@@ -319,8 +327,6 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
"Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
- qla2x00_do_work(vha);
-
/* Check if Fw is ready to configure VP first */
if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) {
if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
@@ -343,15 +349,19 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
"FCPort update end.\n");
}
- if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
- !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
- atomic_read(&vha->loop_state) != LOOP_DOWN) {
+ if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) &&
+ !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
+ atomic_read(&vha->loop_state) != LOOP_DOWN) {
- ql_dbg(ql_dbg_dpc, vha, 0x4018,
- "Relogin needed scheduled.\n");
- qla2x00_relogin(vha);
- ql_dbg(ql_dbg_dpc, vha, 0x4019,
- "Relogin needed end.\n");
+ if (!vha->relogin_jif ||
+ time_after_eq(jiffies, vha->relogin_jif)) {
+ vha->relogin_jif = jiffies + HZ;
+ clear_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+
+ ql_dbg(ql_dbg_dpc, vha, 0x4018,
+ "Relogin needed scheduled.\n");
+ qla24xx_post_relogin_work(vha);
+ }
}
if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
@@ -475,7 +485,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
"Couldn't allocate vp_id.\n");
goto create_vhost_failed;
}
- vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
+ vha->mgmt_svr_loop_id = NPH_MGMT_SERVER;
vha->dpc_flags = 0L;
@@ -569,14 +579,16 @@ qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
int
qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
{
- int ret = -1;
+ int ret = QLA_SUCCESS;
- if (req) {
+ if (req && vha->flags.qpairs_req_created) {
req->options |= BIT_0;
ret = qla25xx_init_req_que(vha, req);
- }
- if (ret == QLA_SUCCESS)
+ if (ret != QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+
qla25xx_free_req_que(vha, req);
+ }
return ret;
}
@@ -584,14 +596,16 @@ qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
int
qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
{
- int ret = -1;
+ int ret = QLA_SUCCESS;
- if (rsp) {
+ if (rsp && vha->flags.qpairs_rsp_created) {
rsp->options |= BIT_0;
ret = qla25xx_init_rsp_que(vha, rsp);
- }
- if (ret == QLA_SUCCESS)
+ if (ret != QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+
qla25xx_free_rsp_que(vha, rsp);
+ }
return ret;
}
@@ -884,3 +898,79 @@ que_failed:
failed:
return 0;
}
+
+static void qla_ctrlvp_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+
+ complete(&sp->comp);
+ /* don't free sp here. Let the caller do the free */
+}
+
+/**
+ * qla24xx_control_vp() - Enable a virtual port for given host
+ * @vha: adapter block pointer
+ * @cmd: command type to be sent for enable virtual port
+ *
+ * Return: qla2xxx local function return status code.
+ */
+int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
+{
+ int rval = QLA_MEMORY_ALLOC_FAILED;
+ struct qla_hw_data *ha = vha->hw;
+ int vp_index = vha->vp_idx;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ srb_t *sp;
+
+ ql_dbg(ql_dbg_vport, vha, 0x10c1,
+ "Entered %s cmd %x index %d.\n", __func__, cmd, vp_index);
+
+ if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
+ return QLA_PARAMETER_ERROR;
+
+ sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_CTRL_VP;
+ sp->name = "ctrl_vp";
+ sp->done = qla_ctrlvp_sp_done;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->u.iocb_cmd.u.ctrlvp.cmd = cmd;
+ sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_async, vha, 0xffff,
+ "%s: %s Failed submission. %x.\n",
+ __func__, sp->name, rval);
+ goto done_free_sp;
+ }
+
+ ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n",
+ sp->name, sp->handle);
+
+ wait_for_completion(&sp->comp);
+ rval = sp->rc;
+ switch (rval) {
+ case QLA_FUNCTION_TIMEOUT:
+ ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Timeout. %x.\n",
+ __func__, sp->name, rval);
+ break;
+ case QLA_SUCCESS:
+ ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n",
+ __func__, sp->name);
+ goto done_free_sp;
+ default:
+ ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n",
+ __func__, sp->name, rval);
+ goto done_free_sp;
+ }
+done:
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index 0aa9c38bf347..525ac35a757b 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -11,8 +11,6 @@
#include "qla_def.h"
#include "qla_gbl.h"
-#include <linux/delay.h>
-
#define TIMEOUT_100_MS 100
static const uint32_t qla8044_reg_tbl[] = {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 46f2d0cf7c0d..12ee6e02d146 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -277,6 +277,12 @@ MODULE_PARM_DESC(ql2xenablemsix,
" 1 -- enable MSI-X interrupt mechanism.\n"
" 2 -- enable MSI interrupt mechanism.\n");
+int qla2xuseresexchforels;
+module_param(qla2xuseresexchforels, int, 0444);
+MODULE_PARM_DESC(qla2xuseresexchforels,
+ "Reserve 1/2 of emergency exchanges for ELS.\n"
+ " 0 (default): disabled");
+
/*
* SCSI host template entry points
*/
@@ -294,7 +300,6 @@ static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
static void qla2x00_clear_drv_active(struct qla_hw_data *);
static void qla2x00_free_device(scsi_qla_host_t *);
-static void qla83xx_disable_laser(scsi_qla_host_t *vha);
static int qla2xxx_map_queues(struct Scsi_Host *shost);
static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
@@ -1705,93 +1710,103 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
return QLA_SUCCESS;
}
-void
-qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
+static void
+__qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
{
- int que, cnt, status;
+ int cnt, status;
unsigned long flags;
srb_t *sp;
+ scsi_qla_host_t *vha = qp->vha;
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_tgt_cmd *cmd;
uint8_t trace = 0;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- for (que = 0; que < ha->max_req_queues; que++) {
- req = ha->req_q_map[que];
- if (!req)
- continue;
- if (!req->outstanding_cmds)
- continue;
- for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
- sp = req->outstanding_cmds[cnt];
- if (sp) {
- req->outstanding_cmds[cnt] = NULL;
- if (sp->cmd_type == TYPE_SRB) {
- if (sp->type == SRB_NVME_CMD ||
- sp->type == SRB_NVME_LS) {
- sp_get(sp);
- spin_unlock_irqrestore(
- &ha->hardware_lock, flags);
- qla_nvme_abort(ha, sp);
- spin_lock_irqsave(
- &ha->hardware_lock, flags);
- } else if (GET_CMD_SP(sp) &&
- !ha->flags.eeh_busy &&
- (!test_bit(ABORT_ISP_ACTIVE,
- &vha->dpc_flags)) &&
- (sp->type == SRB_SCSI_CMD)) {
- /*
- * Don't abort commands in
- * adapter during EEH
- * recovery as it's not
- * accessible/responding.
- *
- * Get a reference to the sp
- * and drop the lock. The
- * reference ensures this
- * sp->done() call and not the
- * call in qla2xxx_eh_abort()
- * ends the SCSI command (with
- * result 'res').
- */
- sp_get(sp);
- spin_unlock_irqrestore(
- &ha->hardware_lock, flags);
- status = qla2xxx_eh_abort(
- GET_CMD_SP(sp));
- spin_lock_irqsave(
- &ha->hardware_lock, flags);
- /*
- * Get rid of extra reference
- * if immediate exit from
- * ql2xxx_eh_abort
- */
- if (status == FAILED &&
- (qla2x00_isp_reg_stat(ha)))
- atomic_dec(
- &sp->ref_count);
- }
- sp->done(sp, res);
- } else {
- if (!vha->hw->tgt.tgt_ops || !tgt ||
- qla_ini_mode_enabled(vha)) {
- if (!trace)
- ql_dbg(ql_dbg_tgt_mgt,
- vha, 0xf003,
- "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
- vha->dpc_flags);
- continue;
- }
- cmd = (struct qla_tgt_cmd *)sp;
- qlt_abort_cmd_on_host_reset(cmd->vha,
- cmd);
+ spin_lock_irqsave(qp->qp_lock_ptr, flags);
+ req = qp->req;
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
+ sp = req->outstanding_cmds[cnt];
+ if (sp) {
+ req->outstanding_cmds[cnt] = NULL;
+ if (sp->cmd_type == TYPE_SRB) {
+ if (sp->type == SRB_NVME_CMD ||
+ sp->type == SRB_NVME_LS) {
+ sp_get(sp);
+ spin_unlock_irqrestore(qp->qp_lock_ptr,
+ flags);
+ qla_nvme_abort(ha, sp);
+ spin_lock_irqsave(qp->qp_lock_ptr,
+ flags);
+ } else if (GET_CMD_SP(sp) &&
+ !ha->flags.eeh_busy &&
+ (!test_bit(ABORT_ISP_ACTIVE,
+ &vha->dpc_flags)) &&
+ (sp->type == SRB_SCSI_CMD)) {
+ /*
+ * Don't abort commands in
+ * adapter during EEH
+ * recovery as it's not
+ * accessible/responding.
+ *
+ * Get a reference to the sp
+ * and drop the lock. The
+ * reference ensures this
+ * sp->done() call and not the
+ * call in qla2xxx_eh_abort()
+ * ends the SCSI command (with
+ * result 'res').
+ */
+ sp_get(sp);
+ spin_unlock_irqrestore(qp->qp_lock_ptr,
+ flags);
+ status = qla2xxx_eh_abort(
+ GET_CMD_SP(sp));
+ spin_lock_irqsave(qp->qp_lock_ptr,
+ flags);
+ /*
+ * Get rid of extra reference
+ * if immediate exit from
+ * ql2xxx_eh_abort
+ */
+ if (status == FAILED &&
+ (qla2x00_isp_reg_stat(ha)))
+ atomic_dec(
+ &sp->ref_count);
}
+ sp->done(sp, res);
+ } else {
+ if (!vha->hw->tgt.tgt_ops || !tgt ||
+ qla_ini_mode_enabled(vha)) {
+ if (!trace)
+ ql_dbg(ql_dbg_tgt_mgt,
+ vha, 0xf003,
+ "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
+ vha->dpc_flags);
+ continue;
+ }
+ cmd = (struct qla_tgt_cmd *)sp;
+ qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
}
}
}
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
+}
+
+void
+qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
+{
+ int que;
+ struct qla_hw_data *ha = vha->hw;
+
+ __qla2x00_abort_all_cmds(ha->base_qpair, res);
+
+ for (que = 0; que < ha->max_qpairs; que++) {
+ if (!ha->queue_pair_map[que])
+ continue;
+
+ __qla2x00_abort_all_cmds(ha->queue_pair_map[que], res);
+ }
}
static int
@@ -2689,14 +2704,22 @@ static void qla2x00_iocb_work_fn(struct work_struct *work)
{
struct scsi_qla_host *vha = container_of(work,
struct scsi_qla_host, iocb_work);
- int cnt = 0;
+ struct qla_hw_data *ha = vha->hw;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ int i = 20;
+ unsigned long flags;
- while (!list_empty(&vha->work_list)) {
+ if (test_bit(UNLOADING, &base_vha->dpc_flags))
+ return;
+
+ while (!list_empty(&vha->work_list) && i > 0) {
qla2x00_do_work(vha);
- cnt++;
- if (cnt > 10)
- break;
+ i--;
}
+
+ spin_lock_irqsave(&vha->work_lock, flags);
+ clear_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags);
+ spin_unlock_irqrestore(&vha->work_lock, flags);
}
/*
@@ -2790,6 +2813,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->init_cb_size = sizeof(init_cb_t);
ha->link_data_rate = PORT_SPEED_UNKNOWN;
ha->optrom_size = OPTROM_SIZE_2300;
+ ha->max_exchg = FW_MAX_EXCHANGES_CNT;
/* Assign ISP specific operations. */
if (IS_QLA2100(ha)) {
@@ -3011,9 +3035,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
base_vha = qla2x00_create_host(sht, ha);
if (!base_vha) {
ret = -ENOMEM;
- qla2x00_mem_free(ha);
- qla2x00_free_req_que(ha, req);
- qla2x00_free_rsp_que(ha, rsp);
goto probe_hw_failed;
}
@@ -3023,7 +3044,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host = base_vha->host;
base_vha->req = req;
if (IS_QLA2XXX_MIDTYPE(ha))
- base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
+ base_vha->mgmt_svr_loop_id = NPH_MGMT_SERVER;
else
base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
base_vha->vp_idx;
@@ -3074,7 +3095,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
/* Set up the irqs */
ret = qla2x00_request_irqs(ha, rsp);
if (ret)
- goto probe_init_failed;
+ goto probe_hw_failed;
/* Alloc arrays of request and response ring ptrs */
if (!qla2x00_alloc_queues(ha, req, rsp)) {
@@ -3193,10 +3214,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->can_queue, base_vha->req,
base_vha->mgmt_svr_loop_id, host->sg_tablesize);
+ ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
+
if (ha->mqenable) {
bool mq = false;
bool startit = false;
- ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0);
if (QLA_TGT_MODE_ENABLED()) {
mq = true;
@@ -3390,6 +3412,9 @@ probe_failed:
scsi_host_put(base_vha->host);
probe_hw_failed:
+ qla2x00_mem_free(ha);
+ qla2x00_free_req_que(ha, req);
+ qla2x00_free_rsp_que(ha, rsp);
qla2x00_clear_drv_active(ha);
iospace_config_failed:
@@ -3448,8 +3473,13 @@ qla2x00_shutdown(struct pci_dev *pdev)
if (ha->eft)
qla2x00_disable_eft_trace(vha);
- /* Stop currently executing firmware. */
- qla2x00_try_to_stop_firmware(vha);
+ if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
+ if (ha->flags.fw_started)
+ qla2x00_abort_isp_cleanup(vha);
+ } else {
+ /* Stop currently executing firmware. */
+ qla2x00_try_to_stop_firmware(vha);
+ }
/* Turn adapter off line */
vha->flags.online = 0;
@@ -3609,6 +3639,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
dma_free_coherent(&ha->pdev->dev,
base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
+ vfree(base_vha->scan.l);
+
if (IS_QLAFX00(ha))
qlafx00_driver_shutdown(base_vha, 20);
@@ -3628,10 +3660,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
qla84xx_put_chip(base_vha);
- /* Laser should be disabled only for ISP2031 */
- if (IS_QLA2031(ha))
- qla83xx_disable_laser(base_vha);
-
/* Disable timer */
if (base_vha->timer_active)
qla2x00_stop_timer(base_vha);
@@ -3692,8 +3720,16 @@ qla2x00_free_device(scsi_qla_host_t *vha)
if (ha->eft)
qla2x00_disable_eft_trace(vha);
- /* Stop currently executing firmware. */
- qla2x00_try_to_stop_firmware(vha);
+ if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
+ if (ha->flags.fw_started)
+ qla2x00_abort_isp_cleanup(vha);
+ } else {
+ if (ha->flags.fw_started) {
+ /* Stop currently executing firmware. */
+ qla2x00_try_to_stop_firmware(vha);
+ ha->flags.fw_started = 0;
+ }
+ }
vha->flags.online = 0;
@@ -3833,7 +3869,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
list_for_each_entry(fcport, &vha->vp_fcports, list) {
fcport->scan_state = 0;
- qlt_schedule_sess_for_deletion_lock(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx)
continue;
@@ -4221,6 +4257,9 @@ qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt)
u32 temp;
*ret_cnt = FW_DEF_EXCHANGES_CNT;
+ if (max_cnt > vha->hw->max_exchg)
+ max_cnt = vha->hw->max_exchg;
+
if (qla_ini_mode_enabled(vha)) {
if (ql2xiniexchg > max_cnt)
ql2xiniexchg = max_cnt;
@@ -4250,8 +4289,8 @@ int
qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
{
int rval;
- u16 size, max_cnt;
- u32 temp;
+ u16 size, max_cnt;
+ u32 actual_cnt, totsz;
struct qla_hw_data *ha = vha->hw;
if (!ha->flags.exchoffld_enabled)
@@ -4268,16 +4307,19 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
return rval;
}
- qla2x00_number_of_exch(vha, &temp, max_cnt);
- temp *= size;
+ qla2x00_number_of_exch(vha, &actual_cnt, max_cnt);
+ ql_log(ql_log_info, vha, 0xd014,
+ "Actual exchange offload count: %d.\n", actual_cnt);
+
+ totsz = actual_cnt * size;
- if (temp != ha->exchoffld_size) {
+ if (totsz != ha->exchoffld_size) {
qla2x00_free_exchoffld_buffer(ha);
- ha->exchoffld_size = temp;
+ ha->exchoffld_size = totsz;
ql_log(ql_log_info, vha, 0xd016,
- "Exchange offload: max_count=%d, buffers=0x%x, total=%d.\n",
- max_cnt, size, temp);
+ "Exchange offload: max_count=%d, actual count=%d entry sz=0x%x, total sz=0x%x\n",
+ max_cnt, actual_cnt, size, totsz);
ql_log(ql_log_info, vha, 0xd017,
"Exchange Buffers requested size = 0x%x\n",
@@ -4288,7 +4330,21 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL);
if (!ha->exchoffld_buf) {
ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
- "Failed to allocate memory for exchoffld_buf_dma.\n");
+ "Failed to allocate memory for Exchange Offload.\n");
+
+ if (ha->max_exchg >
+ (FW_DEF_EXCHANGES_CNT + REDUCE_EXCHANGES_CNT)) {
+ ha->max_exchg -= REDUCE_EXCHANGES_CNT;
+ } else if (ha->max_exchg >
+ (FW_DEF_EXCHANGES_CNT + 512)) {
+ ha->max_exchg -= 512;
+ } else {
+ ha->flags.exchoffld_enabled = 0;
+ ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
+ "Disabling Exchange offload due to lack of memory\n");
+ }
+ ha->exchoffld_size = 0;
+
return -ENOMEM;
}
}
@@ -4514,6 +4570,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
INIT_LIST_HEAD(&vha->qp_list);
INIT_LIST_HEAD(&vha->gnl.fcports);
INIT_LIST_HEAD(&vha->nvme_rport_list);
+ INIT_LIST_HEAD(&vha->gpnid_list);
+ INIT_WORK(&vha->iocb_work, qla2x00_iocb_work_fn);
spin_lock_init(&vha->work_lock);
spin_lock_init(&vha->cmd_list_lock);
@@ -4531,6 +4589,19 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
return NULL;
}
+ /* todo: what about ext login? */
+ vha->scan.size = ha->max_fibre_devices * sizeof(struct fab_scan_rp);
+ vha->scan.l = vmalloc(vha->scan.size);
+ if (!vha->scan.l) {
+ ql_log(ql_log_fatal, vha, 0xd04a,
+ "Alloc failed for scan database.\n");
+ dma_free_coherent(&ha->pdev->dev, vha->gnl.size,
+ vha->gnl.l, vha->gnl.ldma);
+ scsi_remove_host(vha->host);
+ return NULL;
+ }
+ INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn);
+
sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
ql_dbg(ql_dbg_init, vha, 0x0041,
"Allocated the host=%p hw=%p vha=%p dev_name=%s",
@@ -4566,15 +4637,18 @@ int
qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
{
unsigned long flags;
+ bool q = false;
spin_lock_irqsave(&vha->work_lock, flags);
list_add_tail(&e->list, &vha->work_list);
+
+ if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags))
+ q = true;
+
spin_unlock_irqrestore(&vha->work_lock, flags);
- if (QLA_EARLY_LINKUP(vha->hw))
- schedule_work(&vha->iocb_work);
- else
- qla2xxx_wake_dpc(vha);
+ if (q)
+ queue_work(vha->hw->wq, &vha->iocb_work);
return QLA_SUCCESS;
}
@@ -4623,6 +4697,7 @@ int qla2x00_post_async_##name##_work( \
e->u.logio.data[0] = data[0]; \
e->u.logio.data[1] = data[1]; \
} \
+ fcport->flags |= FCF_ASYNC_ACTIVE; \
return qla2x00_post_work(vha, e); \
}
@@ -4631,6 +4706,8 @@ qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE);
+qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO);
+qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE);
int
qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code)
@@ -4699,6 +4776,11 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
struct qlt_plogi_ack_t *pla =
(struct qlt_plogi_ack_t *)e->u.new_sess.pla;
uint8_t free_fcport = 0;
+ u64 wwn;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC enter\n",
+ __func__, __LINE__, e->u.new_sess.port_name);
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1);
@@ -4706,6 +4788,9 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport->d_id = e->u.new_sess.id;
if (pla) {
fcport->fw_login_state = DSC_LS_PLOGI_PEND;
+ memcpy(fcport->node_name,
+ pla->iocb.u.isp24.u.plogi.node_name,
+ WWN_SIZE);
qlt_plogi_ack_link(vha, pla, fcport, QLT_PLOGI_LINK_SAME_WWN);
/* we took an extra ref_count to prevent PLOGI ACK when
* fcport/sess has not been created.
@@ -4717,9 +4802,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (fcport) {
fcport->d_id = e->u.new_sess.id;
- fcport->scan_state = QLA_FCPORT_FOUND;
fcport->flags |= FCF_FABRIC_DEVICE;
fcport->fw_login_state = DSC_LS_PLOGI_PEND;
+ if (e->u.new_sess.fc4_type == FC4_TYPE_FCP_SCSI)
+ fcport->fc4_type = FC4_TYPE_FCP_SCSI;
memcpy(fcport->port_name, e->u.new_sess.port_name,
WWN_SIZE);
@@ -4734,7 +4820,7 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
}
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
- /* search again to make sure one else got ahead */
+ /* search again to make sure no one else got ahead */
tfcp = qla2x00_find_fcport_by_wwpn(vha,
e->u.new_sess.port_name, 1);
if (tfcp) {
@@ -4748,20 +4834,82 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
} else {
list_add_tail(&fcport->list, &vha->vp_fcports);
- if (pla) {
- qlt_plogi_ack_link(vha, pla, fcport,
- QLT_PLOGI_LINK_SAME_WWN);
- pla->ref_count--;
- }
+ }
+ if (pla) {
+ qlt_plogi_ack_link(vha, pla, fcport,
+ QLT_PLOGI_LINK_SAME_WWN);
+ pla->ref_count--;
}
}
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
if (fcport) {
- if (pla)
+ if (N2N_TOPO(vha->hw))
+ fcport->flags &= ~FCF_FABRIC_DEVICE;
+
+ fcport->id_changed = 1;
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE);
+
+ if (pla) {
+ if (pla->iocb.u.isp24.status_subcode == ELS_PRLI) {
+ u16 wd3_lo;
+
+ fcport->fw_login_state = DSC_LS_PRLI_PEND;
+ fcport->local = 0;
+ fcport->loop_id =
+ le16_to_cpu(
+ pla->iocb.u.isp24.nport_handle);
+ fcport->fw_login_state = DSC_LS_PRLI_PEND;
+ wd3_lo =
+ le16_to_cpu(
+ pla->iocb.u.isp24.u.prli.wd3_lo);
+
+ if (wd3_lo & BIT_7)
+ fcport->conf_compl_supported = 1;
+
+ if ((wd3_lo & BIT_4) == 0)
+ fcport->port_type = FCT_INITIATOR;
+ else
+ fcport->port_type = FCT_TARGET;
+ }
qlt_plogi_ack_unref(vha, pla);
- else
- qla24xx_async_gffid(vha, fcport);
+ } else {
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ tfcp = qla2x00_find_fcport_by_nportid(vha,
+ &e->u.new_sess.id, 1);
+ if (tfcp && (tfcp != fcport)) {
+ /*
+ * We have a conflict fcport with same NportID.
+ */
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC found conflict b4 add. DS %d LS %d\n",
+ __func__, tfcp->port_name, tfcp->disc_state,
+ tfcp->fw_login_state);
+
+ switch (tfcp->disc_state) {
+ case DSC_DELETED:
+ break;
+ case DSC_DELETE_PEND:
+ fcport->login_pause = 1;
+ tfcp->conflict = fcport;
+ break;
+ default:
+ fcport->login_pause = 1;
+ tfcp->conflict = fcport;
+ qlt_schedule_sess_for_deletion(tfcp);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ wwn = wwn_to_u64(fcport->node_name);
+
+ if (!wwn)
+ qla24xx_async_gnnid(vha, fcport);
+ else
+ qla24xx_async_gnl(vha, fcport);
+ }
}
if (free_fcport) {
@@ -4771,6 +4919,20 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
}
}
+static void qla_sp_retry(struct scsi_qla_host *vha, struct qla_work_evt *e)
+{
+ struct srb *sp = e->u.iosb.sp;
+ int rval;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_disc, vha, 0x2043,
+ "%s: %s: Re-issue IOCB failed (%d).\n",
+ __func__, sp->name, rval);
+ qla24xx_sp_unmap(vha, sp);
+ }
+}
+
void
qla2x00_do_work(struct scsi_qla_host *vha)
{
@@ -4824,8 +4986,11 @@ qla2x00_do_work(struct scsi_qla_host *vha)
case QLA_EVT_GPNID:
qla24xx_async_gpnid(vha, &e->u.gpnid.id);
break;
- case QLA_EVT_GPNID_DONE:
- qla24xx_async_gpnid_done(vha, e->u.iosb.sp);
+ case QLA_EVT_UNMAP:
+ qla24xx_sp_unmap(vha, e->u.iosb.sp);
+ break;
+ case QLA_EVT_RELOGIN:
+ qla2x00_relogin(vha);
break;
case QLA_EVT_NEW_SESS:
qla24xx_create_new_sess(vha, e);
@@ -4849,6 +5014,30 @@ qla2x00_do_work(struct scsi_qla_host *vha)
case QLA_EVT_NACK:
qla24xx_do_nack_work(vha, e);
break;
+ case QLA_EVT_ASYNC_PRLO:
+ qla2x00_async_prlo(vha, e->u.logio.fcport);
+ break;
+ case QLA_EVT_ASYNC_PRLO_DONE:
+ qla2x00_async_prlo_done(vha, e->u.logio.fcport,
+ e->u.logio.data);
+ break;
+ case QLA_EVT_GPNFT:
+ qla24xx_async_gpnft(vha, e->u.gpnft.fc4_type);
+ break;
+ case QLA_EVT_GPNFT_DONE:
+ qla24xx_async_gpnft_done(vha, e->u.iosb.sp);
+ break;
+ case QLA_EVT_GNNFT_DONE:
+ qla24xx_async_gnnft_done(vha, e->u.iosb.sp);
+ break;
+ case QLA_EVT_GNNID:
+ qla24xx_async_gnnid(vha, e->u.fcport.fcport);
+ break;
+ case QLA_EVT_GFPNID:
+ qla24xx_async_gfpnid(vha, e->u.fcport.fcport);
+ break;
+ case QLA_EVT_SP_RETRY:
+ qla_sp_retry(vha, e);
}
if (e->flags & QLA_EVT_FLAG_FREE)
kfree(e);
@@ -4858,6 +5047,20 @@ qla2x00_do_work(struct scsi_qla_host *vha)
}
}
+int qla24xx_post_relogin_work(struct scsi_qla_host *vha)
+{
+ struct qla_work_evt *e;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_RELOGIN);
+
+ if (!e) {
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ return qla2x00_post_work(vha, e);
+}
+
/* Relogins all the fcports of a vport
* Context: dpc thread
*/
@@ -4868,14 +5071,14 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
struct event_arg ea;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- /*
- * If the port is not ONLINE then try to login
- * to it if we haven't run out of retries.
- */
+ /*
+ * If the port is not ONLINE then try to login
+ * to it if we haven't run out of retries.
+ */
if (atomic_read(&fcport->state) != FCS_ONLINE &&
- fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) {
- fcport->login_retry--;
- if (fcport->flags & FCF_FABRIC_DEVICE) {
+ fcport->login_retry &&
+ !(fcport->flags & (FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE))) {
+ if (vha->hw->current_topology != ISP_CFG_NL) {
ql_dbg(ql_dbg_disc, fcport->vha, 0x2108,
"%s %8phC DS %d LS %d\n", __func__,
fcport->port_name, fcport->disc_state,
@@ -4884,7 +5087,8 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
ea.event = FCME_RELOGIN;
ea.fcport = fcport;
qla2x00_fcport_event_handler(vha, &ea);
- } else {
+ } else if (vha->hw->current_topology == ISP_CFG_NL) {
+ fcport->login_retry--;
status = qla2x00_local_device_login(vha,
fcport);
if (status == QLA_SUCCESS) {
@@ -4912,6 +5116,9 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
break;
}
+
+ ql_dbg(ql_dbg_disc, vha, 0x400e,
+ "Relogin end.\n");
}
/* Schedule work on any of the dpc-workqueues */
@@ -5687,8 +5894,6 @@ qla2x00_do_dpc(void *data)
if (test_bit(UNLOADING, &base_vha->dpc_flags))
break;
- qla2x00_do_work(base_vha);
-
if (IS_P3P_TYPE(ha)) {
if (IS_QLA8044(ha)) {
if (test_and_clear_bit(ISP_UNRECOVERABLE,
@@ -5867,16 +6072,19 @@ qla2x00_do_dpc(void *data)
}
/* Retry each device up to login retry count */
- if ((test_and_clear_bit(RELOGIN_NEEDED,
- &base_vha->dpc_flags)) &&
+ if (test_bit(RELOGIN_NEEDED, &base_vha->dpc_flags) &&
!test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
- ql_dbg(ql_dbg_dpc, base_vha, 0x400d,
- "Relogin scheduled.\n");
- qla2x00_relogin(base_vha);
- ql_dbg(ql_dbg_dpc, base_vha, 0x400e,
- "Relogin end.\n");
+ if (!base_vha->relogin_jif ||
+ time_after_eq(jiffies, base_vha->relogin_jif)) {
+ base_vha->relogin_jif = jiffies + HZ;
+ clear_bit(RELOGIN_NEEDED, &base_vha->dpc_flags);
+
+ ql_dbg(ql_dbg_disc, base_vha, 0x400d,
+ "Relogin scheduled.\n");
+ qla24xx_post_relogin_work(base_vha);
+ }
}
loop_resync_check:
if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
@@ -6135,8 +6343,17 @@ qla2x00_timer(struct timer_list *t)
}
/* Process any deferred work. */
- if (!list_empty(&vha->work_list))
- start_dpc++;
+ if (!list_empty(&vha->work_list)) {
+ unsigned long flags;
+ bool q = false;
+
+ spin_lock_irqsave(&vha->work_lock, flags);
+ if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags))
+ q = true;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+ if (q)
+ queue_work(vha->hw->wq, &vha->iocb_work);
+ }
/*
* FC-NVME
@@ -6580,37 +6797,16 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
ha->flags.eeh_busy = 0;
}
-static void
-qla83xx_disable_laser(scsi_qla_host_t *vha)
-{
- uint32_t reg, data, fn;
- struct qla_hw_data *ha = vha->hw;
- struct device_reg_24xx __iomem *isp_reg = &ha->iobase->isp24;
-
- /* pci func #/port # */
- ql_dbg(ql_dbg_init, vha, 0x004b,
- "Disabling Laser for hba: %p\n", vha);
-
- fn = (RD_REG_DWORD(&isp_reg->ctrl_status) &
- (BIT_15|BIT_14|BIT_13|BIT_12));
-
- fn = (fn >> 12);
-
- if (fn & 1)
- reg = PORT_1_2031;
- else
- reg = PORT_0_2031;
-
- data = LASER_OFF_2031;
-
- qla83xx_wr_reg(vha, reg, data);
-}
-
static int qla2xxx_map_queues(struct Scsi_Host *shost)
{
+ int rc;
scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
- return blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev);
+ if (USER_CTRL_IRQ(vha->hw))
+ rc = blk_mq_map_queues(&shost->tag_set);
+ else
+ rc = blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev);
+ return rc;
}
static const struct pci_error_handlers qla2xxx_err_handler = {
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index b4336e0cd85f..d2db86ea06b2 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -2461,6 +2461,7 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
sec_mask = 0x1e000;
break;
}
+ /* fall through */
default:
/* Default to 16 kb sector size. */
rest_addr = 0x3fff;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 18069edd4773..fc89af8fe256 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -75,7 +75,8 @@ MODULE_PARM_DESC(ql2xuctrlirq,
int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
-static int temp_sam_status = SAM_STAT_BUSY;
+static int qla_sam_status = SAM_STAT_BUSY;
+static int tc_sam_status = SAM_STAT_TASK_SET_FULL; /* target core */
/*
* From scsi/fc/fc_fcp.h
@@ -208,7 +209,7 @@ struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
host = btree_lookup32(&vha->hw->tgt.host_map, key);
if (!host)
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
+ ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005,
"Unable to find host %06x\n", key);
return host;
@@ -309,17 +310,17 @@ static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
if (host != NULL) {
- ql_dbg(ql_dbg_async, vha, 0x502f,
+ ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f,
"Requeuing unknown ATIO_TYPE7 %p\n", u);
qlt_24xx_atio_pkt(host, &u->atio, ha_locked);
} else if (tgt->tgt_stop) {
- ql_dbg(ql_dbg_async, vha, 0x503a,
+ ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503a,
"Freeing unknown %s %p, because tgt is being stopped\n",
"ATIO_TYPE7", u);
qlt_send_term_exchange(vha->hw->base_qpair, NULL,
&u->atio, ha_locked, 0);
} else {
- ql_dbg(ql_dbg_async, vha, 0x503d,
+ ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d,
"Reschedule u %p, vha %p, host %p\n", u, vha, host);
if (!queued) {
queued = 1;
@@ -450,6 +451,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt, vha, 0xe073,
"qla_target(%d):%s: CRC2 Response pkt\n",
vha->vp_idx, __func__);
+ /* fall through */
case CTIO_TYPE7:
{
struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
@@ -606,7 +608,7 @@ void qla2x00_async_nack_sp_done(void *s, int res)
__func__, __LINE__,
sp->fcport->port_name,
vha->fcport_count);
-
+ sp->fcport->disc_state = DSC_UPD_FCPORT;
qla24xx_post_upd_fcport_work(vha, sp->fcport);
} else {
ql_dbg(ql_dbg_disc, vha, 0x20f5,
@@ -665,7 +667,7 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
sp->u.iocb_cmd.u.nack.ntfy = ntfy;
-
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
sp->done = qla2x00_async_nack_sp_done;
rval = qla2x00_start_sp(sp);
@@ -861,7 +863,10 @@ void qlt_plogi_ack_unref(struct scsi_qla_host *vha,
fcport->loop_id = loop_id;
fcport->d_id = port_id;
- qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);
+ if (iocb->u.isp24.status_subcode == ELS_PLOGI)
+ qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);
+ else
+ qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PRLI);
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla)
@@ -890,6 +895,17 @@ qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla,
iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
pla->ref_count, pla, link);
+ if (link == QLT_PLOGI_LINK_CONFLICT) {
+ switch (sess->disc_state) {
+ case DSC_DELETED:
+ case DSC_DELETE_PEND:
+ pla->ref_count--;
+ return;
+ default:
+ break;
+ }
+ }
+
if (sess->plogi_link[link])
qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
@@ -954,8 +970,9 @@ static void qlt_free_session_done(struct work_struct *work)
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
bool logout_started = false;
- struct event_arg ea;
scsi_qla_host_t *base_vha;
+ struct qlt_plogi_ack_t *own =
+ sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
"%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
@@ -971,19 +988,35 @@ static void qlt_free_session_done(struct work_struct *work)
logo.id = sess->d_id;
logo.cmd_count = 0;
+ sess->send_els_logo = 0;
qlt_send_first_logo(vha, &logo);
}
- if (sess->logout_on_delete) {
+ if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) {
int rc;
- rc = qla2x00_post_async_logout_work(vha, sess, NULL);
- if (rc != QLA_SUCCESS)
- ql_log(ql_log_warn, vha, 0xf085,
- "Schedule logo failed sess %p rc %d\n",
- sess, rc);
- else
- logout_started = true;
+ if (!own ||
+ (own &&
+ (own->iocb.u.isp24.status_subcode == ELS_PLOGI))) {
+ rc = qla2x00_post_async_logout_work(vha, sess,
+ NULL);
+ if (rc != QLA_SUCCESS)
+ ql_log(ql_log_warn, vha, 0xf085,
+ "Schedule logo failed sess %p rc %d\n",
+ sess, rc);
+ else
+ logout_started = true;
+ } else if (own && (own->iocb.u.isp24.status_subcode ==
+ ELS_PRLI) && ha->flags.rida_fmt2) {
+ rc = qla2x00_post_async_prlo_work(vha, sess,
+ NULL);
+ if (rc != QLA_SUCCESS)
+ ql_log(ql_log_warn, vha, 0xf085,
+ "Schedule PRLO failed sess %p rc %d\n",
+ sess, rc);
+ else
+ logout_started = true;
+ }
}
}
@@ -1007,7 +1040,7 @@ static void qlt_free_session_done(struct work_struct *work)
}
ql_dbg(ql_dbg_disc, vha, 0xf087,
- "%s: sess %p logout completed\n",__func__, sess);
+ "%s: sess %p logout completed\n", __func__, sess);
}
if (sess->logo_ack_needed) {
@@ -1033,8 +1066,7 @@ static void qlt_free_session_done(struct work_struct *work)
sess->login_succ = 0;
}
- if (sess->chip_reset != ha->base_qpair->chip_reset)
- qla2x00_clear_loop_id(sess);
+ qla2x00_clear_loop_id(sess);
if (sess->conflict) {
sess->conflict->login_pause = 0;
@@ -1044,8 +1076,6 @@ static void qlt_free_session_done(struct work_struct *work)
}
{
- struct qlt_plogi_ack_t *own =
- sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
struct qlt_plogi_ack_t *con =
sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
struct imm_ntfy_from_isp *iocb;
@@ -1076,6 +1106,7 @@ static void qlt_free_session_done(struct work_struct *work)
sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
}
}
+
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
@@ -1089,14 +1120,24 @@ static void qlt_free_session_done(struct work_struct *work)
wake_up_all(&vha->fcport_waitQ);
base_vha = pci_get_drvdata(ha->pdev);
+
+ sess->free_pending = 0;
+
if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags))
return;
- if (!tgt || !tgt->tgt_stop) {
- memset(&ea, 0, sizeof(ea));
- ea.event = FCME_DELETE_DONE;
- ea.fcport = sess;
- qla2x00_fcport_event_handler(vha, &ea);
+ if ((!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
+ switch (vha->host->active_mode) {
+ case MODE_INITIATOR:
+ case MODE_DUAL:
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ break;
+ case MODE_TARGET:
+ default:
+ /* no-op */
+ break;
+ }
}
}
@@ -1104,11 +1145,20 @@ static void qlt_free_session_done(struct work_struct *work)
void qlt_unreg_sess(struct fc_port *sess)
{
struct scsi_qla_host *vha = sess->vha;
+ unsigned long flags;
ql_dbg(ql_dbg_disc, sess->vha, 0x210a,
"%s sess %p for deletion %8phC\n",
__func__, sess, sess->port_name);
+ spin_lock_irqsave(&sess->vha->work_lock, flags);
+ if (sess->free_pending) {
+ spin_unlock_irqrestore(&sess->vha->work_lock, flags);
+ return;
+ }
+ sess->free_pending = 1;
+ spin_unlock_irqrestore(&sess->vha->work_lock, flags);
+
if (sess->se_sess)
vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
@@ -1175,10 +1225,10 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess)
}
/* ha->tgt.sess_lock supposed to be held on entry */
-void qlt_schedule_sess_for_deletion(struct fc_port *sess,
- bool immediate)
+void qlt_schedule_sess_for_deletion(struct fc_port *sess)
{
struct qla_tgt *tgt = sess->tgt;
+ unsigned long flags;
if (sess->disc_state == DSC_DELETE_PEND)
return;
@@ -1194,27 +1244,28 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess,
return;
}
- sess->disc_state = DSC_DELETE_PEND;
-
if (sess->deleted == QLA_SESS_DELETED)
sess->logout_on_delete = 0;
+ spin_lock_irqsave(&sess->vha->work_lock, flags);
+ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ spin_unlock_irqrestore(&sess->vha->work_lock, flags);
+ return;
+ }
sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
+ spin_unlock_irqrestore(&sess->vha->work_lock, flags);
+
+ sess->disc_state = DSC_DELETE_PEND;
+
qla24xx_chk_fcp_state(sess);
ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
"Scheduling sess %p for deletion\n", sess);
- schedule_work(&sess->del_work);
-}
-
-void qlt_schedule_sess_for_deletion_lock(struct fc_port *sess)
-{
- unsigned long flags;
- struct qla_hw_data *ha = sess->vha->hw;
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- qlt_schedule_sess_for_deletion(sess, 1);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ /* use cancel to push work element through before re-queue */
+ cancel_work_sync(&sess->del_work);
+ INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn);
+ queue_work(sess->vha->hw->wq, &sess->del_work);
}
/* ha->tgt.sess_lock supposed to be held on entry */
@@ -1225,7 +1276,7 @@ static void qlt_clear_tgt_db(struct qla_tgt *tgt)
list_for_each_entry(sess, &vha->vp_fcports, list) {
if (sess->se_sess)
- qlt_schedule_sess_for_deletion(sess, 1);
+ qlt_schedule_sess_for_deletion(sess);
}
/* At this point tgt could be already dead */
@@ -1400,7 +1451,7 @@ qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
sess->local = 1;
- qlt_schedule_sess_for_deletion(sess, false);
+ qlt_schedule_sess_for_deletion(sess);
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
}
@@ -1560,8 +1611,11 @@ static void qlt_release(struct qla_tgt *tgt)
btree_destroy64(&tgt->lun_qpair_map);
- if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->remove_target)
- ha->tgt.tgt_ops->remove_target(vha);
+ if (vha->vp_idx)
+ if (ha->tgt.tgt_ops &&
+ ha->tgt.tgt_ops->remove_target &&
+ vha->vha_tgt.target_lport_ptr)
+ ha->tgt.tgt_ops->remove_target(vha);
vha->vha_tgt.qla_tgt = NULL;
@@ -1976,15 +2030,10 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
"qla_target(%d): task abort for non-existant session\n",
vha->vp_idx);
- rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
- QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
-
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- if (rc != 0) {
- qlt_24xx_send_abts_resp(ha->base_qpair, abts,
- FCP_TMF_REJECTED, false);
- }
+ qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
+ false);
return;
}
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
@@ -2174,7 +2223,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
"TM response logo %phC status %#x state %#x",
mcmd->sess->port_name, mcmd->fc_tm_rsp,
mcmd->flags);
- qlt_schedule_sess_for_deletion_lock(mcmd->sess);
+ qlt_schedule_sess_for_deletion(mcmd->sess);
} else {
qlt_send_notify_ack(vha->hw->base_qpair,
&mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
@@ -3708,7 +3757,7 @@ static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
term = 1;
if (term)
- qlt_term_ctio_exchange(qpair, ctio, cmd, status);
+ qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0);
return term;
}
@@ -3869,7 +3918,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
"%s %d %8phC post del sess\n",
__func__, __LINE__, cmd->sess->port_name);
- qlt_schedule_sess_for_deletion_lock(cmd->sess);
+ qlt_schedule_sess_for_deletion(cmd->sess);
}
break;
}
@@ -4204,76 +4253,6 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
return cmd;
}
-static void qlt_create_sess_from_atio(struct work_struct *work)
-{
- struct qla_tgt_sess_op *op = container_of(work,
- struct qla_tgt_sess_op, work);
- scsi_qla_host_t *vha = op->vha;
- struct qla_hw_data *ha = vha->hw;
- struct fc_port *sess;
- struct qla_tgt_cmd *cmd;
- unsigned long flags;
- uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
-
- spin_lock_irqsave(&vha->cmd_list_lock, flags);
- list_del(&op->cmd_list);
- spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
-
- if (op->aborted) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083,
- "sess_op with tag %u is aborted\n",
- op->atio.u.isp24.exchange_addr);
- goto out_term;
- }
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
- "qla_target(%d): Unable to find wwn login"
- " (s_id %x:%x:%x), trying to create it manually\n",
- vha->vp_idx, s_id[0], s_id[1], s_id[2]);
-
- if (op->atio.u.raw.entry_count > 1) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
- "Dropping multy entry atio %p\n", &op->atio);
- goto out_term;
- }
-
- sess = qlt_make_local_sess(vha, s_id);
- /* sess has an extra creation ref. */
-
- if (!sess)
- goto out_term;
- /*
- * Now obtain a pre-allocated session tag using the original op->atio
- * packet header, and dispatch into __qlt_do_work() using the existing
- * process context.
- */
- cmd = qlt_get_tag(vha, sess, &op->atio);
- if (!cmd) {
- struct qla_qpair *qpair = ha->base_qpair;
-
- spin_lock_irqsave(qpair->qp_lock_ptr, flags);
- qlt_send_busy(qpair, &op->atio, SAM_STAT_BUSY);
- spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
-
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- ha->tgt.tgt_ops->put_sess(sess);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- kfree(op);
- return;
- }
-
- /*
- * __qlt_do_work() will call qlt_put_sess() to release
- * the extra reference taken above by qlt_make_local_sess()
- */
- __qlt_do_work(cmd);
- kfree(op);
- return;
-out_term:
- qlt_send_term_exchange(vha->hw->base_qpair, NULL, &op->atio, 0, 0);
- kfree(op);
-}
-
/* ha->hardware_lock supposed to be held on entry */
static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
struct atio_from_isp *atio)
@@ -4283,31 +4262,23 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
struct fc_port *sess;
struct qla_tgt_cmd *cmd;
unsigned long flags;
+ port_id_t id;
if (unlikely(tgt->tgt_stop)) {
ql_dbg(ql_dbg_io, vha, 0x3061,
"New command while device %p is shutting down\n", tgt);
- return -EFAULT;
+ return -ENODEV;
}
- sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
- if (unlikely(!sess)) {
- struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op),
- GFP_ATOMIC);
- if (!op)
- return -ENOMEM;
-
- memcpy(&op->atio, atio, sizeof(*atio));
- op->vha = vha;
-
- spin_lock_irqsave(&vha->cmd_list_lock, flags);
- list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
- spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+ id.b.al_pa = atio->u.isp24.fcp_hdr.s_id[2];
+ id.b.area = atio->u.isp24.fcp_hdr.s_id[1];
+ id.b.domain = atio->u.isp24.fcp_hdr.s_id[0];
+ if (IS_SW_RESV_ADDR(id))
+ return -EBUSY;
- INIT_WORK(&op->work, qlt_create_sess_from_atio);
- queue_work(qla_tgt_wq, &op->work);
- return 0;
- }
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
+ if (unlikely(!sess))
+ return -EFAULT;
/* Another WWN used to have our s_id. Our PLOGI scheduled its
* session deletion, but it's still in sess_del_work wq */
@@ -4336,7 +4307,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- return -ENOMEM;
+ return -EBUSY;
}
cmd->cmd_in_wq = 1;
@@ -4417,14 +4388,11 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
{
struct atio_from_isp *a = (struct atio_from_isp *)iocb;
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt;
struct fc_port *sess;
u64 unpacked_lun;
int fn;
unsigned long flags;
- tgt = vha->vha_tgt.qla_tgt;
-
fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
@@ -4435,15 +4403,7 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
unpacked_lun =
scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
- if (!sess) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
- "qla_target(%d): task mgmt fn 0x%x for "
- "non-existant session\n", vha->vp_idx, fn);
- return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
- sizeof(struct atio_from_isp));
- }
-
- if (sess->deleted)
+ if (sess == NULL || sess->deleted)
return -EFAULT;
return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
@@ -4574,7 +4534,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
* might have cleared it when requested this session
* deletion, so don't touch it
*/
- qlt_schedule_sess_for_deletion(other_sess, true);
+ qlt_schedule_sess_for_deletion(other_sess);
} else {
/*
* Another wwn used to have our s_id/loop_id
@@ -4584,11 +4544,10 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
"Invalidating sess %p loop_id %d wwn %llx.\n",
other_sess, other_sess->loop_id, other_wwn);
-
other_sess->keep_nport_handle = 1;
- *conflict_sess = other_sess;
- qlt_schedule_sess_for_deletion(other_sess,
- true);
+ if (other_sess->disc_state != DSC_DELETED)
+ *conflict_sess = other_sess;
+ qlt_schedule_sess_for_deletion(other_sess);
}
continue;
}
@@ -4602,7 +4561,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
/* Same loop_id but different s_id
* Ok to kill and logout */
- qlt_schedule_sess_for_deletion(other_sess, true);
+ qlt_schedule_sess_for_deletion(other_sess);
}
}
@@ -4652,6 +4611,138 @@ static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
return count;
}
+static int qlt_handle_login(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *iocb)
+{
+ struct fc_port *sess = NULL, *conflict_sess = NULL;
+ uint64_t wwn;
+ port_id_t port_id;
+ uint16_t loop_id, wd3_lo;
+ int res = 0;
+ struct qlt_plogi_ack_t *pla;
+ unsigned long flags;
+
+ wwn = wwn_to_u64(iocb->u.isp24.port_name);
+
+ port_id.b.domain = iocb->u.isp24.port_id[2];
+ port_id.b.area = iocb->u.isp24.port_id[1];
+ port_id.b.al_pa = iocb->u.isp24.port_id[0];
+ port_id.b.rsvd_1 = 0;
+
+ loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
+
+ /* Mark all stale commands sitting in qla_tgt_wq for deletion */
+ abort_cmds_for_s_id(vha, &port_id);
+
+ if (wwn) {
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ sess = qlt_find_sess_invalidate_other(vha, wwn,
+ port_id, loop_id, &conflict_sess);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ }
+
+ if (IS_SW_RESV_ADDR(port_id)) {
+ res = 1;
+ goto out;
+ }
+
+ pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
+ if (!pla) {
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ goto out;
+ }
+
+ if (conflict_sess) {
+ conflict_sess->login_gen++;
+ qlt_plogi_ack_link(vha, pla, conflict_sess,
+ QLT_PLOGI_LINK_CONFLICT);
+ }
+
+ if (!sess) {
+ pla->ref_count++;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post new sess\n",
+ __func__, __LINE__, iocb->u.isp24.port_name);
+ if (iocb->u.isp24.status_subcode == ELS_PLOGI)
+ qla24xx_post_newsess_work(vha, &port_id,
+ iocb->u.isp24.port_name,
+ iocb->u.isp24.u.plogi.node_name,
+ pla, FC4_TYPE_UNKNOWN);
+ else
+ qla24xx_post_newsess_work(vha, &port_id,
+ iocb->u.isp24.port_name, NULL,
+ pla, FC4_TYPE_UNKNOWN);
+
+ goto out;
+ }
+
+ qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
+ sess->d_id = port_id;
+ sess->login_gen++;
+
+ if (iocb->u.isp24.status_subcode == ELS_PRLI) {
+ sess->fw_login_state = DSC_LS_PRLI_PEND;
+ sess->local = 0;
+ sess->loop_id = loop_id;
+ sess->d_id = port_id;
+ sess->fw_login_state = DSC_LS_PRLI_PEND;
+ wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
+
+ if (wd3_lo & BIT_7)
+ sess->conf_compl_supported = 1;
+
+ if ((wd3_lo & BIT_4) == 0)
+ sess->port_type = FCT_INITIATOR;
+ else
+ sess->port_type = FCT_TARGET;
+
+ } else
+ sess->fw_login_state = DSC_LS_PLOGI_PEND;
+
+
+ ql_dbg(ql_dbg_disc, vha, 0x20f9,
+ "%s %d %8phC DS %d\n",
+ __func__, __LINE__, sess->port_name, sess->disc_state);
+
+ switch (sess->disc_state) {
+ case DSC_DELETED:
+ qlt_plogi_ack_unref(vha, pla);
+ break;
+
+ default:
+ /*
+ * Under normal circumstances we want to release nport handle
+ * during LOGO process to avoid nport handle leaks inside FW.
+ * The exception is when LOGO is done while another PLOGI with
+ * the same nport handle is waiting as might be the case here.
+ * Note: there is always a possibily of a race where session
+ * deletion has already started for other reasons (e.g. ACL
+ * removal) and now PLOGI arrives:
+ * 1. if PLOGI arrived in FW after nport handle has been freed,
+ * FW must have assigned this PLOGI a new/same handle and we
+ * can proceed ACK'ing it as usual when session deletion
+ * completes.
+ * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
+ * bit reached it, the handle has now been released. We'll
+ * get an error when we ACK this PLOGI. Nothing will be sent
+ * back to initiator. Initiator should eventually retry
+ * PLOGI and situation will correct itself.
+ */
+ sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
+ (sess->d_id.b24 == port_id.b24));
+
+ ql_dbg(ql_dbg_disc, vha, 0x20f9,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__, sess->port_name);
+
+
+ qlt_schedule_sess_for_deletion(sess);
+ break;
+ }
+out:
+ return res;
+}
+
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
@@ -4666,7 +4757,6 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
uint16_t loop_id;
uint16_t wd3_lo;
int res = 0;
- struct qlt_plogi_ack_t *pla;
unsigned long flags;
wwn = wwn_to_u64(iocb->u.isp24.port_name);
@@ -4690,88 +4780,32 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
*/
switch (iocb->u.isp24.status_subcode) {
case ELS_PLOGI:
+ res = qlt_handle_login(vha, iocb);
+ break;
- /* Mark all stale commands in qla_tgt_wq for deletion */
- abort_cmds_for_s_id(vha, &port_id);
-
- if (wwn) {
- spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
- sess = qlt_find_sess_invalidate_other(vha, wwn,
- port_id, loop_id, &conflict_sess);
- spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
- }
-
- if (IS_SW_RESV_ADDR(port_id)) {
- res = 1;
- break;
- }
-
- pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
- if (!pla) {
- qlt_send_term_imm_notif(vha, iocb, 1);
- break;
- }
-
- res = 0;
+ case ELS_PRLI:
+ if (N2N_TOPO(ha)) {
+ sess = qla2x00_find_fcport_by_wwpn(vha,
+ iocb->u.isp24.port_name, 1);
- if (conflict_sess) {
- conflict_sess->login_gen++;
- qlt_plogi_ack_link(vha, pla, conflict_sess,
- QLT_PLOGI_LINK_CONFLICT);
- }
+ if (sess && sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC Term PRLI due to PLOGI ACK not completed\n",
+ __func__, __LINE__,
+ iocb->u.isp24.port_name);
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ break;
+ }
- if (!sess) {
- pla->ref_count++;
- qla24xx_post_newsess_work(vha, &port_id,
- iocb->u.isp24.port_name, pla);
- res = 0;
+ res = qlt_handle_login(vha, iocb);
break;
}
- qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
- sess->fw_login_state = DSC_LS_PLOGI_PEND;
- sess->d_id = port_id;
- sess->login_gen++;
-
- switch (sess->disc_state) {
- case DSC_DELETED:
- qlt_plogi_ack_unref(vha, pla);
- break;
-
- default:
- /*
- * Under normal circumstances we want to release nport handle
- * during LOGO process to avoid nport handle leaks inside FW.
- * The exception is when LOGO is done while another PLOGI with
- * the same nport handle is waiting as might be the case here.
- * Note: there is always a possibily of a race where session
- * deletion has already started for other reasons (e.g. ACL
- * removal) and now PLOGI arrives:
- * 1. if PLOGI arrived in FW after nport handle has been freed,
- * FW must have assigned this PLOGI a new/same handle and we
- * can proceed ACK'ing it as usual when session deletion
- * completes.
- * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
- * bit reached it, the handle has now been released. We'll
- * get an error when we ACK this PLOGI. Nothing will be sent
- * back to initiator. Initiator should eventually retry
- * PLOGI and situation will correct itself.
- */
- sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
- (sess->d_id.b24 == port_id.b24));
-
- ql_dbg(ql_dbg_disc, vha, 0x20f9,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__, sess->port_name);
-
-
- qlt_schedule_sess_for_deletion_lock(sess);
+ if (IS_SW_RESV_ADDR(port_id)) {
+ res = 1;
break;
}
- break;
-
- case ELS_PRLI:
wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
if (wwn) {
@@ -4782,17 +4816,51 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
}
if (conflict_sess) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
- "PRLI with conflicting sess %p port %8phC\n",
- conflict_sess, conflict_sess->port_name);
- qlt_send_term_imm_notif(vha, iocb, 1);
- res = 0;
- break;
+ switch (conflict_sess->disc_state) {
+ case DSC_DELETED:
+ case DSC_DELETE_PEND:
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
+ "PRLI with conflicting sess %p port %8phC\n",
+ conflict_sess, conflict_sess->port_name);
+ conflict_sess->fw_login_state =
+ DSC_LS_PORT_UNAVAIL;
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ res = 0;
+ break;
+ }
}
if (sess != NULL) {
- if (sess->fw_login_state != DSC_LS_PLOGI_PEND &&
- sess->fw_login_state != DSC_LS_PLOGI_COMP) {
+ bool delete = false;
+ spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
+ switch (sess->fw_login_state) {
+ case DSC_LS_PLOGI_PEND:
+ case DSC_LS_PLOGI_COMP:
+ case DSC_LS_PRLI_COMP:
+ break;
+ default:
+ delete = true;
+ break;
+ }
+
+ switch (sess->disc_state) {
+ case DSC_LOGIN_PEND:
+ case DSC_GPDB:
+ case DSC_GPSC:
+ case DSC_UPD_FCPORT:
+ case DSC_LOGIN_COMPLETE:
+ case DSC_ADISC:
+ delete = false;
+ break;
+ default:
+ break;
+ }
+
+ if (delete) {
+ spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
+ flags);
/*
* Impatient initiator sent PRLI before last
* PLOGI could finish. Will force him to re-try,
@@ -4803,6 +4871,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
sess);
qlt_send_term_imm_notif(vha, iocb, 1);
res = 0;
+ spin_lock_irqsave(&tgt->ha->tgt.sess_lock,
+ flags);
break;
}
@@ -4826,6 +4896,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
sess->port_type = FCT_INITIATOR;
else
sess->port_type = FCT_TARGET;
+
+ spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
}
res = 1; /* send notify ack */
@@ -4863,7 +4935,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
res = 1;
break;
}
- /* drop through */
+ /* fall through */
case ELS_LOGO:
case ELS_PRLO:
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
@@ -4892,7 +4964,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
} else {
/* cmd did not go to upper layer. */
if (sess) {
- qlt_schedule_sess_for_deletion_lock(sess);
+ qlt_schedule_sess_for_deletion(sess);
res = 0;
}
/* else logo will be ack */
@@ -4930,6 +5002,10 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
break;
}
+ ql_dbg(ql_dbg_disc, vha, 0xf026,
+ "qla_target(%d): Exit ELS opcode: 0x%02x res %d\n",
+ vha->vp_idx, iocb->u.isp24.status_subcode, res);
+
return res;
}
@@ -5320,7 +5396,6 @@ qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
struct atio_from_isp *atio, uint8_t ha_locked)
{
struct qla_hw_data *ha = vha->hw;
- uint16_t status;
unsigned long flags;
if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
@@ -5328,8 +5403,7 @@ qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
if (!ha_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
- status = temp_sam_status;
- qlt_send_busy(qpair, atio, status);
+ qlt_send_busy(qpair, atio, qla_sam_status);
if (!ha_locked)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -5344,7 +5418,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
struct qla_hw_data *ha = vha->hw;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
int rc;
- unsigned long flags;
+ unsigned long flags = 0;
if (unlikely(tgt == NULL)) {
ql_dbg(ql_dbg_tgt, vha, 0x3064,
@@ -5368,8 +5442,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
"sending QUEUE_FULL\n", vha->vp_idx);
if (!ha_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_send_busy(ha->base_qpair, atio,
- SAM_STAT_TASK_SET_FULL);
+ qlt_send_busy(ha->base_qpair, atio, qla_sam_status);
if (!ha_locked)
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
@@ -5388,42 +5461,37 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
rc = qlt_handle_task_mgmt(vha, atio);
}
if (unlikely(rc != 0)) {
- if (rc == -ESRCH) {
- if (!ha_locked)
- spin_lock_irqsave(&ha->hardware_lock,
- flags);
-
-#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
- qlt_send_busy(ha->base_qpair, atio,
- SAM_STAT_BUSY);
-#else
+ if (!ha_locked)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ switch (rc) {
+ case -ENODEV:
+ ql_dbg(ql_dbg_tgt, vha, 0xe05f,
+ "qla_target: Unable to send command to target\n");
+ break;
+ case -EBADF:
+ ql_dbg(ql_dbg_tgt, vha, 0xe05f,
+ "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
qlt_send_term_exchange(ha->base_qpair, NULL,
atio, 1, 0);
-#endif
- if (!ha_locked)
- spin_unlock_irqrestore(
- &ha->hardware_lock, flags);
- } else {
- if (tgt->tgt_stop) {
- ql_dbg(ql_dbg_tgt, vha, 0xe059,
- "qla_target: Unable to send "
- "command to target for req, "
- "ignoring.\n");
- } else {
- ql_dbg(ql_dbg_tgt, vha, 0xe05a,
- "qla_target(%d): Unable to send "
- "command to target, sending BUSY "
- "status.\n", vha->vp_idx);
- if (!ha_locked)
- spin_lock_irqsave(
- &ha->hardware_lock, flags);
- qlt_send_busy(ha->base_qpair,
- atio, SAM_STAT_BUSY);
- if (!ha_locked)
- spin_unlock_irqrestore(
- &ha->hardware_lock, flags);
- }
+ break;
+ case -EBUSY:
+ ql_dbg(ql_dbg_tgt, vha, 0xe060,
+ "qla_target(%d): Unable to send command to target, sending BUSY status\n",
+ vha->vp_idx);
+ qlt_send_busy(ha->base_qpair, atio,
+ tc_sam_status);
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt, vha, 0xe060,
+ "qla_target(%d): Unable to send command to target, sending BUSY status\n",
+ vha->vp_idx);
+ qlt_send_busy(ha->base_qpair, atio,
+ qla_sam_status);
+ break;
}
+ if (!ha_locked)
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ flags);
}
break;
@@ -5506,27 +5574,31 @@ static void qlt_response_pkt(struct scsi_qla_host *vha,
rc = qlt_handle_cmd_for_atio(vha, atio);
if (unlikely(rc != 0)) {
- if (rc == -ESRCH) {
-#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
- qlt_send_busy(rsp->qpair, atio, 0);
-#else
- qlt_send_term_exchange(rsp->qpair, NULL, atio, 1, 0);
-#endif
- } else {
- if (tgt->tgt_stop) {
- ql_dbg(ql_dbg_tgt, vha, 0xe05f,
- "qla_target: Unable to send "
- "command to target, sending TERM "
- "EXCHANGE for rsp\n");
- qlt_send_term_exchange(rsp->qpair, NULL,
- atio, 1, 0);
- } else {
- ql_dbg(ql_dbg_tgt, vha, 0xe060,
- "qla_target(%d): Unable to send "
- "command to target, sending BUSY "
- "status\n", vha->vp_idx);
- qlt_send_busy(rsp->qpair, atio, 0);
- }
+ switch (rc) {
+ case -ENODEV:
+ ql_dbg(ql_dbg_tgt, vha, 0xe05f,
+ "qla_target: Unable to send command to target\n");
+ break;
+ case -EBADF:
+ ql_dbg(ql_dbg_tgt, vha, 0xe05f,
+ "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
+ qlt_send_term_exchange(rsp->qpair, NULL,
+ atio, 1, 0);
+ break;
+ case -EBUSY:
+ ql_dbg(ql_dbg_tgt, vha, 0xe060,
+ "qla_target(%d): Unable to send command to target, sending BUSY status\n",
+ vha->vp_idx);
+ qlt_send_busy(rsp->qpair, atio,
+ tc_sam_status);
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt, vha, 0xe060,
+ "qla_target(%d): Unable to send command to target, sending BUSY status\n",
+ vha->vp_idx);
+ qlt_send_busy(rsp->qpair, atio,
+ qla_sam_status);
+ break;
}
}
}
@@ -5755,7 +5827,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
unsigned long flags;
u8 newfcport = 0;
- fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
+ fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (!fcport) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
"qla_target(%d): Allocation of tmp FC port failed",
@@ -5784,6 +5856,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
tfcp->port_type = fcport->port_type;
tfcp->supported_classes = fcport->supported_classes;
tfcp->flags |= fcport->flags;
+ tfcp->scan_state = QLA_FCPORT_FOUND;
del = fcport;
fcport = tfcp;
@@ -6445,18 +6518,21 @@ qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
qlt_add_target(ha, vha);
}
-void
-qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
+u8
+qlt_rff_id(struct scsi_qla_host *vha)
{
+ u8 fc4_feature = 0;
/*
* FC-4 Feature bit 0 indicates target functionality to the name server.
*/
if (qla_tgt_mode_enabled(vha)) {
- ct_req->req.rff_id.fc4_feature = BIT_0;
+ fc4_feature = BIT_0;
} else if (qla_ini_mode_enabled(vha)) {
- ct_req->req.rff_id.fc4_feature = BIT_1;
+ fc4_feature = BIT_1;
} else if (qla_dual_mode_enabled(vha))
- ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
+ fc4_feature = BIT_0 | BIT_1;
+
+ return fc4_feature;
}
/*
@@ -6546,7 +6622,9 @@ void
qlt_24xx_config_rings(struct scsi_qla_host *vha)
{
struct qla_hw_data *ha = vha->hw;
- struct init_cb_24xx *icb;
+ struct qla_msix_entry *msix = &ha->msix_entries[2];
+ struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
+
if (!QLA_TGT_MODE_ENABLED())
return;
@@ -6554,19 +6632,28 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
- icb = (struct init_cb_24xx *)ha->init_cb;
-
- if ((ql2xenablemsix != 0) && IS_ATIO_MSIX_CAPABLE(ha)) {
- struct qla_msix_entry *msix = &ha->msix_entries[2];
-
- icb->msix_atio = cpu_to_le16(msix->entry);
- ql_dbg(ql_dbg_init, vha, 0xf072,
- "Registering ICB vector 0x%x for atio que.\n",
- msix->entry);
- } else if (ql2xenablemsix == 0) {
- icb->firmware_options_2 |= cpu_to_le32(BIT_26);
- ql_dbg(ql_dbg_init, vha, 0xf07f,
- "Registering INTx vector for ATIO.\n");
+ if (ha->flags.msix_enabled) {
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ if (IS_QLA2071(ha)) {
+ /* 4 ports Baker: Enable Interrupt Handshake */
+ icb->msix_atio = 0;
+ icb->firmware_options_2 |= BIT_26;
+ } else {
+ icb->msix_atio = cpu_to_le16(msix->entry);
+ icb->firmware_options_2 &= ~BIT_26;
+ }
+ ql_dbg(ql_dbg_init, vha, 0xf072,
+ "Registering ICB vector 0x%x for atio que.\n",
+ msix->entry);
+ }
+ } else {
+ /* INTx|MSI */
+ if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ icb->msix_atio = 0;
+ icb->firmware_options_2 |= BIT_26;
+ ql_dbg(ql_dbg_init, vha, 0xf072,
+ "%s: Use INTx for ATIOQ.\n", __func__);
+ }
}
}
@@ -6574,6 +6661,7 @@ void
qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
{
struct qla_hw_data *ha = vha->hw;
+ u32 tmp;
if (!QLA_TGT_MODE_ENABLED())
return;
@@ -6625,6 +6713,14 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
/* Enable target PRLI control */
nv->firmware_options_2 |= cpu_to_le32(BIT_14);
+
+ if (IS_QLA25XX(ha)) {
+ /* Change Loop-prefer to Pt-Pt */
+ tmp = ~(BIT_4|BIT_5|BIT_6);
+ nv->firmware_options_2 &= cpu_to_le32(tmp);
+ tmp = P2P << 4;
+ nv->firmware_options_2 |= cpu_to_le32(tmp);
+ }
} else {
if (ha->tgt.saved_set) {
nv->exchange_count = ha->tgt.saved_exchange_count;
@@ -6679,6 +6775,7 @@ void
qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
{
struct qla_hw_data *ha = vha->hw;
+ u32 tmp;
if (!QLA_TGT_MODE_ENABLED())
return;
@@ -6729,6 +6826,12 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
nv->host_p &= cpu_to_le32(~BIT_10);
/* Enable target PRLI control */
nv->firmware_options_2 |= cpu_to_le32(BIT_14);
+
+ /* Change Loop-prefer to Pt-Pt */
+ tmp = ~(BIT_4|BIT_5|BIT_6);
+ nv->firmware_options_2 &= cpu_to_le32(tmp);
+ tmp = P2P << 4;
+ nv->firmware_options_2 |= cpu_to_le32(tmp);
} else {
if (ha->tgt.saved_set) {
nv->exchange_count = ha->tgt.saved_exchange_count;
@@ -6991,20 +7094,14 @@ qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
{
- unsigned long flags;
- struct qla_hw_data *ha = vha->hw;
if (!vha->d_id.b24) {
- spin_lock_irqsave(&ha->vport_slock, flags);
vha->d_id = id;
qlt_update_vp_map(vha, SET_AL_PA);
- spin_unlock_irqrestore(&ha->vport_slock, flags);
} else if (vha->d_id.b24 != id.b24) {
- spin_lock_irqsave(&ha->vport_slock, flags);
qlt_update_vp_map(vha, RESET_AL_PA);
vha->d_id = id;
qlt_update_vp_map(vha, SET_AL_PA);
- spin_unlock_irqrestore(&ha->vport_slock, flags);
}
}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index aba58d3848a6..bb67b5a284a8 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -993,7 +993,7 @@ struct qla_tgt_prm {
/* Check for Switch reserved address */
#define IS_SW_RESV_ADDR(_s_id) \
- ((_s_id.b.domain == 0xff) && (_s_id.b.area == 0xfc))
+ ((_s_id.b.domain == 0xff) && ((_s_id.b.area & 0xf0) == 0xf0))
#define QLA_TGT_XMIT_DATA 1
#define QLA_TGT_XMIT_STATUS 2
@@ -1072,7 +1072,7 @@ extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *);
extern void qlt_enable_vha(struct scsi_qla_host *);
extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
-extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *);
+extern u8 qlt_rff_id(struct scsi_qla_host *);
extern void qlt_init_atio_q_entries(struct scsi_qla_host *);
extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *, uint8_t);
extern void qlt_24xx_config_rings(struct scsi_qla_host *);
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 733e8dcccf5c..731ca0d8520a 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -526,7 +526,8 @@ qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
{
ql_dbg(ql_dbg_misc, vha, 0xd20c,
"%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len);
- if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_TRACE) {
+ switch (ent->t268.buf_type) {
+ case T268_BUF_TYPE_EXTD_TRACE:
if (vha->hw->eft) {
if (buf) {
ent->t268.buf_size = EFT_SIZE;
@@ -538,10 +539,43 @@ qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
"%s: missing eft\n", __func__);
qla27xx_skip_entry(ent, buf);
}
- } else {
- ql_dbg(ql_dbg_misc, vha, 0xd02b,
+ break;
+ case T268_BUF_TYPE_EXCH_BUFOFF:
+ if (vha->hw->exchoffld_buf) {
+ if (buf) {
+ ent->t268.buf_size = vha->hw->exchoffld_size;
+ ent->t268.start_addr =
+ vha->hw->exchoffld_buf_dma;
+ }
+ qla27xx_insertbuf(vha->hw->exchoffld_buf,
+ vha->hw->exchoffld_size, buf, len);
+ } else {
+ ql_dbg(ql_dbg_misc, vha, 0xd028,
+ "%s: missing exch offld\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ }
+ break;
+ case T268_BUF_TYPE_EXTD_LOGIN:
+ if (vha->hw->exlogin_buf) {
+ if (buf) {
+ ent->t268.buf_size = vha->hw->exlogin_size;
+ ent->t268.start_addr =
+ vha->hw->exlogin_buf_dma;
+ }
+ qla27xx_insertbuf(vha->hw->exlogin_buf,
+ vha->hw->exlogin_size, buf, len);
+ } else {
+ ql_dbg(ql_dbg_misc, vha, 0xd028,
+ "%s: missing ext login\n", __func__);
+ qla27xx_skip_entry(ent, buf);
+ }
+ break;
+
+ default:
+ ql_dbg(ql_dbg_async, vha, 0xd02b,
"%s: unknown buffer %x\n", __func__, ent->t268.buf_type);
qla27xx_skip_entry(ent, buf);
+ break;
}
return false;
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index b6ec02b96d3d..549bef9afddd 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.00.00.02-k"
+#define QLA2XXX_VERSION "10.00.00.05-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 3f82ea1b72dc..aadfeaac3898 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1635,16 +1635,13 @@ static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
return rc;
}
- lport->lport_loopid_map = vmalloc(sizeof(struct tcm_qla2xxx_fc_loopid) *
- 65536);
+ lport->lport_loopid_map = vzalloc(sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
if (!lport->lport_loopid_map) {
pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n",
sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
btree_destroy32(&lport->lport_fcport_map);
return -ENOMEM;
}
- memset(lport->lport_loopid_map, 0, sizeof(struct tcm_qla2xxx_fc_loopid)
- * 65536);
pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n",
sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
return 0;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 5d6d158bbfd6..52b1a0bc93c9 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -153,15 +153,14 @@ int qla4xxx_get_sys_info(struct scsi_qla_host *ha)
dma_addr_t sys_info_dma;
int status = QLA_ERROR;
- sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
- &sys_info_dma, GFP_KERNEL);
+ sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
+ &sys_info_dma, GFP_KERNEL);
if (sys_info == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
ha->host_no, __func__));
goto exit_get_sys_info_no_free;
}
- memset(sys_info, 0, sizeof(*sys_info));
/* Get flash sys info */
if (qla4xxx_get_flash(ha, sys_info_dma, FLASH_OFFSET_SYS_INFO,
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 1da04f323d38..bda2e64ee5ca 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -625,15 +625,14 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_ERROR;
- init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct addr_ctrl_blk),
- &init_fw_cb_dma, GFP_KERNEL);
+ init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &init_fw_cb_dma, GFP_KERNEL);
if (init_fw_cb == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n",
ha->host_no, __func__));
goto exit_init_fw_cb_no_free;
}
- memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
/* Get Initialize Firmware Control Block. */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
@@ -710,9 +709,9 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
- init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct addr_ctrl_blk),
- &init_fw_cb_dma, GFP_KERNEL);
+ init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &init_fw_cb_dma, GFP_KERNEL);
if (init_fw_cb == NULL) {
printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no,
__func__);
@@ -720,7 +719,6 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
}
/* Get Initialize Firmware Control Block. */
- memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
@@ -1342,16 +1340,15 @@ int qla4xxx_about_firmware(struct scsi_qla_host *ha)
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_ERROR;
- about_fw = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct about_fw_info),
- &about_fw_dma, GFP_KERNEL);
+ about_fw = dma_zalloc_coherent(&ha->pdev->dev,
+ sizeof(struct about_fw_info),
+ &about_fw_dma, GFP_KERNEL);
if (!about_fw) {
DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory "
"for about_fw\n", __func__));
return status;
}
- memset(about_fw, 0, sizeof(struct about_fw_info));
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index e91abb327745..968bd85610f8 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -4050,15 +4050,14 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
dma_addr_t sys_info_dma;
int status = QLA_ERROR;
- sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
- &sys_info_dma, GFP_KERNEL);
+ sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
+ &sys_info_dma, GFP_KERNEL);
if (sys_info == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
ha->host_no, __func__));
return status;
}
- memset(sys_info, 0, sizeof(*sys_info));
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 2b8a8ce2a431..82e889bbe0ed 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -2689,16 +2689,15 @@ qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
uint32_t rem = len;
struct nlattr *attr;
- init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct addr_ctrl_blk),
- &init_fw_cb_dma, GFP_KERNEL);
+ init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &init_fw_cb_dma, GFP_KERNEL);
if (!init_fw_cb) {
ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
__func__);
return -ENOMEM;
}
- memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
@@ -4196,15 +4195,14 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
sizeof(struct shadow_regs) +
MEM_ALIGN_VALUE +
(PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
- ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
- &ha->queues_dma, GFP_KERNEL);
+ ha->queues = dma_zalloc_coherent(&ha->pdev->dev, ha->queues_len,
+ &ha->queues_dma, GFP_KERNEL);
if (ha->queues == NULL) {
ql4_printk(KERN_WARNING, ha,
"Memory Allocation failed - queues.\n");
goto mem_alloc_error_exit;
}
- memset(ha->queues, 0, ha->queues_len);
/*
* As per RISC alignment requirements -- the bus-address must be a
diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c
index 40bc616cf8ab..90349498f686 100644
--- a/drivers/scsi/scsi_common.c
+++ b/drivers/scsi/scsi_common.c
@@ -12,7 +12,7 @@
/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
* You may not alter any existing entry (although adding new ones is
- * encouraged once assigned by ANSI/INCITS T10
+ * encouraged once assigned by ANSI/INCITS T10).
*/
static const char *const scsi_device_types[] = {
"Direct-Access ",
@@ -39,7 +39,7 @@ static const char *const scsi_device_types[] = {
};
/**
- * scsi_device_type - Return 17 char string indicating device type.
+ * scsi_device_type - Return 17-char string indicating device type.
* @type: type number to look up
*/
const char *scsi_device_type(unsigned type)
@@ -59,7 +59,7 @@ EXPORT_SYMBOL(scsi_device_type);
* @scsilun: struct scsi_lun to be converted.
*
* Description:
- * Convert @scsilun from a struct scsi_lun to a four byte host byte-ordered
+ * Convert @scsilun from a struct scsi_lun to a four-byte host byte-ordered
* integer, and return the result. The caller must check for
* truncation before using this function.
*
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(scsilun_to_int);
* back into the lun value.
*
* Notes:
- * Given an integer : 0x0b03d204, this function returns a
+ * Given an integer : 0x0b03d204, this function returns a
* struct scsi_lun of: d2 04 0b 03 00 00 00 00
*
*/
@@ -221,7 +221,7 @@ EXPORT_SYMBOL(scsi_sense_desc_find);
/**
* scsi_build_sense_buffer - build sense data in a buffer
- * @desc: Sense format (non zero == descriptor format,
+ * @desc: Sense format (non-zero == descriptor format,
* 0 == fixed format)
* @buf: Where to build sense data
* @key: Sense key
@@ -255,7 +255,7 @@ EXPORT_SYMBOL(scsi_build_sense_buffer);
* @info: 64-bit information value to be set
*
* Return value:
- * 0 on success or EINVAL for invalid sense buffer length
+ * 0 on success or -EINVAL for invalid sense buffer length
**/
int scsi_set_sense_information(u8 *buf, int buf_len, u64 info)
{
@@ -305,7 +305,7 @@ EXPORT_SYMBOL(scsi_set_sense_information);
* @cd: command/data bit
*
* Return value:
- * 0 on success or EINVAL for invalid sense buffer length
+ * 0 on success or -EINVAL for invalid sense buffer length
*/
int scsi_set_sense_field_pointer(u8 *buf, int buf_len, u16 fp, u8 bp, bool cd)
{
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index e4f037f0f38b..a5986dae9020 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -6,7 +6,7 @@
* anything out of the ordinary is seen.
* ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
*
- * Copyright (C) 2001 - 2016 Douglas Gilbert
+ * Copyright (C) 2001 - 2017 Douglas Gilbert
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -61,8 +61,8 @@
#include "scsi_logging.h"
/* make sure inq_product_rev string corresponds to this version */
-#define SDEBUG_VERSION "1.86"
-static const char *sdebug_version_date = "20160430";
+#define SDEBUG_VERSION "0187" /* format to fit INQUIRY revision field */
+static const char *sdebug_version_date = "20171202";
#define MY_NAME "scsi_debug"
@@ -93,6 +93,7 @@ static const char *sdebug_version_date = "20160430";
#define MISCOMPARE_VERIFY_ASC 0x1d
#define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
#define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
+#define WRITE_ERROR_ASC 0xc
/* Additional Sense Code Qualifier (ASCQ) */
#define ACK_NAK_TO 0x3
@@ -105,6 +106,7 @@ static const char *sdebug_version_date = "20160430";
* (id 0) containing 1 logical unit (lun 0). That is 1 device.
*/
#define DEF_ATO 1
+#define DEF_CDB_LEN 10
#define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
#define DEF_DEV_SIZE_MB 8
#define DEF_DIF 0
@@ -161,12 +163,14 @@ static const char *sdebug_version_date = "20160430";
#define SDEBUG_OPT_N_WCE 0x1000
#define SDEBUG_OPT_RESET_NOISE 0x2000
#define SDEBUG_OPT_NO_CDB_NOISE 0x4000
+#define SDEBUG_OPT_HOST_BUSY 0x8000
#define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
SDEBUG_OPT_RESET_NOISE)
#define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
SDEBUG_OPT_TRANSPORT_ERR | \
SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
- SDEBUG_OPT_SHORT_TRANSFER)
+ SDEBUG_OPT_SHORT_TRANSFER | \
+ SDEBUG_OPT_HOST_BUSY)
/* When "every_nth" > 0 then modulo "every_nth" commands:
* - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
* - a RECOVERED_ERROR is simulated on successful read and write
@@ -232,7 +236,7 @@ static const char *sdebug_version_date = "20160430";
#define F_M_ACCESS 0x800 /* media access */
#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
-#define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
+#define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
#define FF_SA (F_SA_HIGH | F_SA_LOW)
#define SDEBUG_MAX_PARTS 4
@@ -263,12 +267,18 @@ struct sdebug_host_info {
#define to_sdebug_host(d) \
container_of(d, struct sdebug_host_info, dev)
+enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
+ SDEB_DEFER_WQ = 2};
+
struct sdebug_defer {
struct hrtimer hrt;
struct execute_work ew;
int sqa_idx; /* index of sdebug_queue array */
int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
int issuing_cpu;
+ bool init_hrt;
+ bool init_wq;
+ enum sdeb_defer_type defer_t;
};
struct sdebug_queued_cmd {
@@ -282,6 +292,7 @@ struct sdebug_queued_cmd {
unsigned int inj_dif:1;
unsigned int inj_dix:1;
unsigned int inj_short:1;
+ unsigned int inj_host_busy:1;
};
struct sdebug_queue {
@@ -304,8 +315,8 @@ struct opcode_info_t {
u32 flags; /* OR-ed set of SDEB_F_* */
int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
const struct opcode_info_t *arrp; /* num_attached elements or NULL */
- u8 len_mask[16]; /* len=len_mask[0], then mask for cdb[1]... */
- /* ignore cdb bytes after position 15 */
+ u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
+ /* 1 to min(cdb_len, 15); ignore cdb[15...] */
};
/* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
@@ -322,12 +333,12 @@ enum sdeb_opcode_index {
SDEB_I_READ = 9, /* 6, 10, 12, 16 */
SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
SDEB_I_START_STOP = 11,
- SDEB_I_SERV_ACT_IN = 12, /* 12, 16 */
- SDEB_I_SERV_ACT_OUT = 13, /* 12, 16 */
+ SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
+ SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
SDEB_I_MAINT_IN = 14,
SDEB_I_MAINT_OUT = 15,
SDEB_I_VERIFY = 16, /* 10 only */
- SDEB_I_VARIABLE_LEN = 17,
+ SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
SDEB_I_RESERVE = 18, /* 6, 10 */
SDEB_I_RELEASE = 19, /* 6, 10 */
SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
@@ -340,7 +351,7 @@ enum sdeb_opcode_index {
SDEB_I_WRITE_SAME = 27, /* 10, 16 */
SDEB_I_SYNC_CACHE = 28, /* 10 only */
SDEB_I_COMP_WRITE = 29,
- SDEB_I_LAST_ELEMENT = 30, /* keep this last */
+ SDEB_I_LAST_ELEMENT = 30, /* keep this last (previous + 1) */
};
@@ -372,12 +383,12 @@ static const unsigned char opcode_ind_arr[256] = {
0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
+ 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
/* 0xa0; 0xa0->0xbf: 12 byte cdbs */
SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
SDEB_I_MAINT_OUT, 0, 0, 0,
- SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
- 0, 0, 0, 0,
+ SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
+ 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
/* 0xc0; 0xc0->0xff: vendor specific */
@@ -396,6 +407,7 @@ static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
@@ -409,72 +421,81 @@ static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
-static const struct opcode_info_t msense_iarr[1] = {
+/*
+ * The following are overflow arrays for cdbs that "hit" the same index in
+ * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
+ * should be placed in opcode_info_arr[], the others should be placed here.
+ */
+static const struct opcode_info_t msense_iarr[] = {
{0, 0x1a, 0, F_D_IN, NULL, NULL,
{6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
-static const struct opcode_info_t mselect_iarr[1] = {
+static const struct opcode_info_t mselect_iarr[] = {
{0, 0x15, 0, F_D_OUT, NULL, NULL,
{6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
-static const struct opcode_info_t read_iarr[3] = {
- {0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
- {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
+static const struct opcode_info_t read_iarr[] = {
+ {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
+ {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} },
- {0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
+ {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
{6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
- {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
+ {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
+ {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
0xc7, 0, 0, 0, 0} },
};
-static const struct opcode_info_t write_iarr[3] = {
- {0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 10 */
- {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
- 0, 0, 0, 0} },
- {0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 6 */
- {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL, /* 12 */
- {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
- 0xc7, 0, 0, 0, 0} },
+static const struct opcode_info_t write_iarr[] = {
+ {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
+ NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
+ 0, 0, 0, 0, 0, 0} },
+ {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
+ NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0} },
+ {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
+ NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xbf, 0xc7, 0, 0, 0, 0} },
};
-static const struct opcode_info_t sa_in_iarr[1] = {
+static const struct opcode_info_t sa_in_16_iarr[] = {
{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
{16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0, 0xc7} },
+ 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
};
-static const struct opcode_info_t vl_iarr[1] = { /* VARIABLE LENGTH */
- {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
- NULL, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
+static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
+ {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
+ NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
+ {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
+ NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
+ 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
};
-static const struct opcode_info_t maint_in_iarr[2] = {
+static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
{0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
{12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
- 0xc7, 0, 0, 0, 0} },
+ 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
{0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
{12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
- 0, 0} },
+ 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
};
-static const struct opcode_info_t write_same_iarr[1] = {
- {0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
+static const struct opcode_info_t write_same_iarr[] = {
+ {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
{16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0x1f, 0xc7} },
+ 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
};
-static const struct opcode_info_t reserve_iarr[1] = {
- {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
+static const struct opcode_info_t reserve_iarr[] = {
+ {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
{6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
-static const struct opcode_info_t release_iarr[1] = {
- {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
+static const struct opcode_info_t release_iarr[] = {
+ {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
{6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
@@ -484,57 +505,67 @@ static const struct opcode_info_t release_iarr[1] = {
* REPORT SUPPORTED OPERATION CODES. */
static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
/* 0 */
- {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
+ {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
+ {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
{6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
{12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
- 0, 0} },
+ 0, 0} }, /* REPORT LUNS */
{0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
{6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
{6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
- {10, 0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
- 0} },
- {1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
- {10, 0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
- {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
+/* 5 */
+ {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
+ resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
+ 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
+ {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
+ resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
+ 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
+ {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
{10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
0, 0, 0} },
- {0, 0x25, 0, F_D_IN, resp_readcap, NULL,
+ {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
{10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
0, 0} },
- {3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
- {16, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* READ(16) */
+ {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
+ resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
/* 10 */
- {3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
- {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0x9f, 0xc7} }, /* WRITE(16) */
+ {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
+ resp_write_dt0, write_iarr, /* WRITE(16) */
+ {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* WRITE(16) */
{0, 0x1b, 0, 0, resp_start_stop, NULL, /* START STOP UNIT */
{6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
- {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0x1, 0xc7} }, /* READ CAPACITY(16) */
- {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
- {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
- {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
- 0} },
+ {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
+ resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
+ {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
+ {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
+ NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
+ {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
+ resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
+ maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
+ 0xff, 0, 0xc7, 0, 0, 0, 0} },
+/* 15 */
{0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x2f, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, NULL, NULL, /* VERIFY(10) */
+ {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, NULL, NULL, /* VERIFY(10) */
{10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7,
0, 0, 0, 0, 0, 0} },
- {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
- vl_iarr, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
- 0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
- {1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
+ {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
+ resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
+ {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
+ 0xff, 0xff} },
+ {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
+ NULL, reserve_iarr, /* RESERVE(10) <no response function> */
{10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
0} },
- {1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
+ {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
+ NULL, release_iarr, /* RELEASE(10) <no response function> */
{10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
0} },
/* 20 */
@@ -546,23 +577,25 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
{0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
{6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
- {0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
- {10, 0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
- {0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
- NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
- 0, 0, 0, 0, 0, 0} },
+ {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
+ {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
+/* 25 */
+ {0, 0x53, 0, F_D_IN | F_D_OUT | FF_MEDIA_IO, resp_xdwriteread_10,
+ NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
+ 0, 0, 0, 0, 0, 0} }, /* XDWRITEREAD(10) */
{0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
{10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} }, /* WRITE_BUFFER */
- {1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
- write_same_iarr, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
- 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
- {0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
- {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
+ {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
+ resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
+ {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
+ 0, 0, 0, 0, 0} },
+ {0, 0x35, 0, F_DELAY_OVERR | FF_MEDIA_IO, NULL, NULL, /* SYNC_CACHE */
+ {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
0, 0, 0, 0} },
- {0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL,
+ {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
{16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
- 0, 0xff, 0x1f, 0xc7} }, /* COMPARE AND WRITE */
+ 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
/* 30 */
{0xff, 0, 0, 0, NULL, NULL, /* terminating element */
@@ -571,6 +604,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
static int sdebug_add_host = DEF_NUM_HOST;
static int sdebug_ato = DEF_ATO;
+static int sdebug_cdb_len = DEF_CDB_LEN;
static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
static int sdebug_dif = DEF_DIF;
@@ -797,6 +831,61 @@ static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
/* return -ENOTTY; // correct return but upsets fdisk */
}
+static void config_cdb_len(struct scsi_device *sdev)
+{
+ switch (sdebug_cdb_len) {
+ case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
+ sdev->use_10_for_rw = false;
+ sdev->use_16_for_rw = false;
+ sdev->use_10_for_ms = false;
+ break;
+ case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
+ sdev->use_10_for_rw = true;
+ sdev->use_16_for_rw = false;
+ sdev->use_10_for_ms = false;
+ break;
+ case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
+ sdev->use_10_for_rw = true;
+ sdev->use_16_for_rw = false;
+ sdev->use_10_for_ms = true;
+ break;
+ case 16:
+ sdev->use_10_for_rw = false;
+ sdev->use_16_for_rw = true;
+ sdev->use_10_for_ms = true;
+ break;
+ case 32: /* No knobs to suggest this so same as 16 for now */
+ sdev->use_10_for_rw = false;
+ sdev->use_16_for_rw = true;
+ sdev->use_10_for_ms = true;
+ break;
+ default:
+ pr_warn("unexpected cdb_len=%d, force to 10\n",
+ sdebug_cdb_len);
+ sdev->use_10_for_rw = true;
+ sdev->use_16_for_rw = false;
+ sdev->use_10_for_ms = false;
+ sdebug_cdb_len = 10;
+ break;
+ }
+}
+
+static void all_config_cdb_len(void)
+{
+ struct sdebug_host_info *sdbg_host;
+ struct Scsi_Host *shost;
+ struct scsi_device *sdev;
+
+ spin_lock(&sdebug_host_list_lock);
+ list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
+ shost = sdbg_host->shost;
+ shost_for_each_device(sdev, shost) {
+ config_cdb_len(sdev);
+ }
+ }
+ spin_unlock(&sdebug_host_list_lock);
+}
+
static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
{
struct sdebug_host_info *sdhp;
@@ -955,7 +1044,7 @@ static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
static char sdebug_inq_vendor_id[9] = "Linux ";
static char sdebug_inq_product_id[17] = "scsi_debug ";
-static char sdebug_inq_product_rev[5] = "0186"; /* version less '.' */
+static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
/* Use some locally assigned NAAs for SAS addresses. */
static const u64 naa3_comp_a = 0x3222222000000000ULL;
static const u64 naa3_comp_b = 0x3333333000000000ULL;
@@ -1411,6 +1500,8 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
memcpy(&arr[8], sdebug_inq_vendor_id, 8);
memcpy(&arr[16], sdebug_inq_product_id, 16);
memcpy(&arr[32], sdebug_inq_product_rev, 4);
+ /* Use Vendor Specific area to place driver date in ASCII hex */
+ memcpy(&arr[36], sdebug_version_date, 8);
/* version descriptors (2 bytes each) follow */
put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
@@ -1900,7 +1991,7 @@ static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
{ /* Control mode page for mode_sense */
unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
- 0, 0, 0, 0};
+ 0, 0, 0, 0};
unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
0, 0, 0x2, 0x4b};
@@ -2077,13 +2168,13 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
len = resp_disconnect_pg(ap, pcontrol, target);
offset += len;
break;
- case 0x3: /* Format device page, direct access */
+ case 0x3: /* Format device page, direct access */
if (is_disk) {
len = resp_format_pg(ap, pcontrol, target);
offset += len;
} else
bad_pcode = true;
- break;
+ break;
case 0x8: /* Caching page, direct access */
if (is_disk) {
len = resp_caching_pg(ap, pcontrol, target);
@@ -2099,7 +2190,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
if ((subpcode > 0x2) && (subpcode < 0xff)) {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
return check_condition_result;
- }
+ }
len = 0;
if ((0x0 == subpcode) || (0xff == subpcode))
len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
@@ -2136,7 +2227,7 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
} else {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
return check_condition_result;
- }
+ }
break;
default:
bad_pcode = true;
@@ -2172,8 +2263,8 @@ static int resp_mode_select(struct scsi_cmnd *scp,
mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
return check_condition_result;
}
- res = fetch_to_dev_buffer(scp, arr, param_len);
- if (-1 == res)
+ res = fetch_to_dev_buffer(scp, arr, param_len);
+ if (-1 == res)
return DID_ERROR << 16;
else if (sdebug_verbose && (res < param_len))
sdev_printk(KERN_INFO, scp->device,
@@ -2239,8 +2330,8 @@ static int resp_temp_l_pg(unsigned char * arr)
0x0, 0x1, 0x3, 0x2, 0x0, 65,
};
- memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
- return sizeof(temp_l_pg);
+ memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
+ return sizeof(temp_l_pg);
}
static int resp_ie_l_pg(unsigned char * arr)
@@ -2248,18 +2339,18 @@ static int resp_ie_l_pg(unsigned char * arr)
unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
};
- memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
+ memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
if (iec_m_pg[2] & 0x4) { /* TEST bit set */
arr[4] = THRESHOLD_EXCEEDED;
arr[5] = 0xff;
}
- return sizeof(ie_l_pg);
+ return sizeof(ie_l_pg);
}
#define SDEBUG_MAX_LSENSE_SZ 512
-static int resp_log_sense(struct scsi_cmnd * scp,
- struct sdebug_dev_info * devip)
+static int resp_log_sense(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
{
int ppc, sp, pcode, subpcode, alloc_len, len, n;
unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
@@ -2353,8 +2444,8 @@ static int check_device_access_params(struct scsi_cmnd *scp,
}
/* Returns number of bytes copied or -1 if error. */
-static int do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num,
- bool do_write)
+static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
+ u32 num, bool do_write)
{
int ret;
u64 block, rest = 0;
@@ -2380,14 +2471,15 @@ static int do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num,
ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
fake_storep + (block * sdebug_sector_size),
- (num - rest) * sdebug_sector_size, 0, do_write);
+ (num - rest) * sdebug_sector_size, sg_skip, do_write);
if (ret != (num - rest) * sdebug_sector_size)
return ret;
if (rest) {
ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
fake_storep, rest * sdebug_sector_size,
- (num - rest) * sdebug_sector_size, do_write);
+ sg_skip + ((num - rest) * sdebug_sector_size),
+ do_write);
}
return ret;
@@ -2648,7 +2740,7 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
}
}
- ret = do_device_access(scp, lba, num, false);
+ ret = do_device_access(scp, 0, lba, num, false);
read_unlock_irqrestore(&atomic_rw, iflags);
if (unlikely(ret == -1))
return DID_ERROR << 16;
@@ -2936,7 +3028,7 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
}
}
- ret = do_device_access(scp, lba, num, true);
+ ret = do_device_access(scp, 0, lba, num, true);
if (unlikely(scsi_debug_lbp()))
map_region(lba, num);
write_unlock_irqrestore(&atomic_rw, iflags);
@@ -2970,6 +3062,173 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
return 0;
}
+/*
+ * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
+ * No READ GATHERED yet (requires bidi or long cdb holding gather list).
+ */
+static int resp_write_scat(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ u8 *cmd = scp->cmnd;
+ u8 *lrdp = NULL;
+ u8 *up;
+ u8 wrprotect;
+ u16 lbdof, num_lrd, k;
+ u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
+ u32 lb_size = sdebug_sector_size;
+ u32 ei_lba;
+ u64 lba;
+ unsigned long iflags;
+ int ret, res;
+ bool is_16;
+ static const u32 lrd_size = 32; /* + parameter list header size */
+
+ if (cmd[0] == VARIABLE_LENGTH_CMD) {
+ is_16 = false;
+ wrprotect = (cmd[10] >> 5) & 0x7;
+ lbdof = get_unaligned_be16(cmd + 12);
+ num_lrd = get_unaligned_be16(cmd + 16);
+ bt_len = get_unaligned_be32(cmd + 28);
+ } else { /* that leaves WRITE SCATTERED(16) */
+ is_16 = true;
+ wrprotect = (cmd[2] >> 5) & 0x7;
+ lbdof = get_unaligned_be16(cmd + 4);
+ num_lrd = get_unaligned_be16(cmd + 8);
+ bt_len = get_unaligned_be32(cmd + 10);
+ if (unlikely(have_dif_prot)) {
+ if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
+ wrprotect) {
+ mk_sense_invalid_opcode(scp);
+ return illegal_condition_result;
+ }
+ if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
+ sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
+ wrprotect == 0)
+ sdev_printk(KERN_ERR, scp->device,
+ "Unprotected WR to DIF device\n");
+ }
+ }
+ if ((num_lrd == 0) || (bt_len == 0))
+ return 0; /* T10 says these do-nothings are not errors */
+ if (lbdof == 0) {
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: LB Data Offset field bad\n",
+ my_name, __func__);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ return illegal_condition_result;
+ }
+ lbdof_blen = lbdof * lb_size;
+ if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: LBA range descriptors don't fit\n",
+ my_name, __func__);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ return illegal_condition_result;
+ }
+ lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
+ if (lrdp == NULL)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
+ my_name, __func__, lbdof_blen);
+ res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
+ if (res == -1) {
+ ret = DID_ERROR << 16;
+ goto err_out;
+ }
+
+ write_lock_irqsave(&atomic_rw, iflags);
+ sg_off = lbdof_blen;
+ /* Spec says Buffer xfer Length field in number of LBs in dout */
+ cum_lb = 0;
+ for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
+ lba = get_unaligned_be64(up + 0);
+ num = get_unaligned_be32(up + 8);
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
+ my_name, __func__, k, lba, num, sg_off);
+ if (num == 0)
+ continue;
+ ret = check_device_access_params(scp, lba, num);
+ if (ret)
+ goto err_out_unlock;
+ num_by = num * lb_size;
+ ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
+
+ if ((cum_lb + num) > bt_len) {
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: %s: sum of blocks > data provided\n",
+ my_name, __func__);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
+ 0);
+ ret = illegal_condition_result;
+ goto err_out_unlock;
+ }
+
+ /* DIX + T10 DIF */
+ if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
+ int prot_ret = prot_verify_write(scp, lba, num,
+ ei_lba);
+
+ if (prot_ret) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
+ prot_ret);
+ ret = illegal_condition_result;
+ goto err_out_unlock;
+ }
+ }
+
+ ret = do_device_access(scp, sg_off, lba, num, true);
+ if (unlikely(scsi_debug_lbp()))
+ map_region(lba, num);
+ if (unlikely(-1 == ret)) {
+ ret = DID_ERROR << 16;
+ goto err_out_unlock;
+ } else if (unlikely(sdebug_verbose && (ret < num_by)))
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
+ my_name, num_by, ret);
+
+ if (unlikely(sdebug_any_injecting_opt)) {
+ struct sdebug_queued_cmd *sqcp =
+ (struct sdebug_queued_cmd *)scp->host_scribble;
+
+ if (sqcp) {
+ if (sqcp->inj_recovered) {
+ mk_sense_buffer(scp, RECOVERED_ERROR,
+ THRESHOLD_EXCEEDED, 0);
+ ret = illegal_condition_result;
+ goto err_out_unlock;
+ } else if (sqcp->inj_dif) {
+ /* Logical block guard check failed */
+ mk_sense_buffer(scp, ABORTED_COMMAND,
+ 0x10, 1);
+ ret = illegal_condition_result;
+ goto err_out_unlock;
+ } else if (sqcp->inj_dix) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
+ 0x10, 1);
+ ret = illegal_condition_result;
+ goto err_out_unlock;
+ }
+ }
+ }
+ sg_off += num_by;
+ cum_lb += num;
+ }
+ ret = 0;
+err_out_unlock:
+ write_unlock_irqrestore(&atomic_rw, iflags);
+err_out:
+ kfree(lrdp);
+ return ret;
+}
+
static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
u32 ei_lba, bool unmap, bool ndob)
{
@@ -3177,7 +3436,7 @@ static int resp_comp_write(struct scsi_cmnd *scp,
* from data-in into arr. Safe (atomic) since write_lock held. */
fake_storep_hold = fake_storep;
fake_storep = arr;
- ret = do_device_access(scp, 0, dnum, true);
+ ret = do_device_access(scp, 0, 0, dnum, true);
fake_storep = fake_storep_hold;
if (ret == -1) {
retval = DID_ERROR << 16;
@@ -3495,6 +3754,7 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
struct scsi_cmnd *scp;
struct sdebug_dev_info *devip;
+ sd_dp->defer_t = SDEB_DEFER_NONE;
qc_idx = sd_dp->qc_idx;
sqp = sdebug_q_arr + sd_dp->sqa_idx;
if (sdebug_statistics) {
@@ -3603,12 +3863,12 @@ static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
if (!sdbg_host) {
pr_err("Host info NULL\n");
return NULL;
- }
+ }
list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
if ((devip->used) && (devip->channel == sdev->channel) &&
- (devip->target == sdev->id) &&
- (devip->lun == sdev->lun))
- return devip;
+ (devip->target == sdev->id) &&
+ (devip->lun == sdev->lun))
+ return devip;
else {
if ((!devip->used) && (!open_devip))
open_devip = devip;
@@ -3660,6 +3920,7 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
blk_queue_max_segment_size(sdp->request_queue, -1U);
if (sdebug_no_uld)
sdp->no_uld_attach = 1;
+ config_cdb_len(sdp);
return 0;
}
@@ -3678,13 +3939,14 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp)
}
}
-static void stop_qc_helper(struct sdebug_defer *sd_dp)
+static void stop_qc_helper(struct sdebug_defer *sd_dp,
+ enum sdeb_defer_type defer_t)
{
if (!sd_dp)
return;
- if ((sdebug_jdelay > 0) || (sdebug_ndelay > 0))
+ if (defer_t == SDEB_DEFER_HRT)
hrtimer_cancel(&sd_dp->hrt);
- else if (sdebug_jdelay < 0)
+ else if (defer_t == SDEB_DEFER_WQ)
cancel_work_sync(&sd_dp->ew.work);
}
@@ -3694,6 +3956,7 @@ static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
{
unsigned long iflags;
int j, k, qmax, r_qmax;
+ enum sdeb_defer_type l_defer_t;
struct sdebug_queue *sqp;
struct sdebug_queued_cmd *sqcp;
struct sdebug_dev_info *devip;
@@ -3717,8 +3980,13 @@ static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
atomic_dec(&devip->num_in_q);
sqcp->a_cmnd = NULL;
sd_dp = sqcp->sd_dp;
+ if (sd_dp) {
+ l_defer_t = sd_dp->defer_t;
+ sd_dp->defer_t = SDEB_DEFER_NONE;
+ } else
+ l_defer_t = SDEB_DEFER_NONE;
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- stop_qc_helper(sd_dp);
+ stop_qc_helper(sd_dp, l_defer_t);
clear_bit(k, sqp->in_use_bm);
return true;
}
@@ -3733,6 +4001,7 @@ static void stop_all_queued(void)
{
unsigned long iflags;
int j, k;
+ enum sdeb_defer_type l_defer_t;
struct sdebug_queue *sqp;
struct sdebug_queued_cmd *sqcp;
struct sdebug_dev_info *devip;
@@ -3751,8 +4020,13 @@ static void stop_all_queued(void)
atomic_dec(&devip->num_in_q);
sqcp->a_cmnd = NULL;
sd_dp = sqcp->sd_dp;
+ if (sd_dp) {
+ l_defer_t = sd_dp->defer_t;
+ sd_dp->defer_t = SDEB_DEFER_NONE;
+ } else
+ l_defer_t = SDEB_DEFER_NONE;
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- stop_qc_helper(sd_dp);
+ stop_qc_helper(sd_dp, l_defer_t);
clear_bit(k, sqp->in_use_bm);
spin_lock_irqsave(&sqp->qc_lock, iflags);
}
@@ -3848,8 +4122,8 @@ static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
{
struct sdebug_host_info *sdbg_host;
struct sdebug_dev_info *devip;
- struct scsi_device * sdp;
- struct Scsi_Host * hp;
+ struct scsi_device *sdp;
+ struct Scsi_Host *hp;
int k = 0;
++num_bus_resets;
@@ -3863,7 +4137,7 @@ static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
if (sdbg_host) {
list_for_each_entry(devip,
- &sdbg_host->dev_info_list,
+ &sdbg_host->dev_info_list,
dev_list) {
set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
++k;
@@ -3886,15 +4160,15 @@ static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
++num_host_resets;
if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
- spin_lock(&sdebug_host_list_lock);
- list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
+ spin_lock(&sdebug_host_list_lock);
+ list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
list_for_each_entry(devip, &sdbg_host->dev_info_list,
dev_list) {
set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
++k;
}
- }
- spin_unlock(&sdebug_host_list_lock);
+ }
+ spin_unlock(&sdebug_host_list_lock);
stop_all_queued();
if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, SCpnt->device,
@@ -3921,7 +4195,7 @@ static void __init sdebug_build_parts(unsigned char *ramp,
sectors_per_part = (num_sectors - sdebug_sectors_per)
/ sdebug_num_parts;
heads_by_sects = sdebug_heads * sdebug_sectors_per;
- starts[0] = sdebug_sectors_per;
+ starts[0] = sdebug_sectors_per;
for (k = 1; k < sdebug_num_parts; ++k)
starts[k] = ((k * sectors_per_part) / heads_by_sects)
* heads_by_sects;
@@ -3995,6 +4269,7 @@ static void setup_inject(struct sdebug_queue *sqp,
sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
+ sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
}
/* Complete the processing of the thread that queued a SCSI command to this
@@ -4003,7 +4278,7 @@ static void setup_inject(struct sdebug_queue *sqp,
* SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
*/
static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
- int scsi_result, int delta_jiff)
+ int scsi_result, int delta_jiff, int ndelay)
{
unsigned long iflags;
int k, num_in_q, qdepth, inject;
@@ -4081,20 +4356,20 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
spin_unlock_irqrestore(&sqp->qc_lock, iflags);
if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
setup_inject(sqp, sqcp);
- if (delta_jiff > 0 || sdebug_ndelay > 0) {
+ if (sd_dp == NULL) {
+ sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
+ if (sd_dp == NULL)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ if (delta_jiff > 0 || ndelay > 0) {
ktime_t kt;
if (delta_jiff > 0) {
- struct timespec ts;
-
- jiffies_to_timespec(delta_jiff, &ts);
- kt = ktime_set(ts.tv_sec, ts.tv_nsec);
+ kt = ns_to_ktime((u64)delta_jiff * (NSEC_PER_SEC / HZ));
} else
- kt = sdebug_ndelay;
- if (NULL == sd_dp) {
- sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
- if (NULL == sd_dp)
- return SCSI_MLQUEUE_HOST_BUSY;
+ kt = ndelay;
+ if (!sd_dp->init_hrt) {
+ sd_dp->init_hrt = true;
sqcp->sd_dp = sd_dp;
hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
HRTIMER_MODE_REL_PINNED);
@@ -4104,12 +4379,11 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
}
if (sdebug_statistics)
sd_dp->issuing_cpu = raw_smp_processor_id();
+ sd_dp->defer_t = SDEB_DEFER_HRT;
hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
} else { /* jdelay < 0, use work queue */
- if (NULL == sd_dp) {
- sd_dp = kzalloc(sizeof(*sqcp->sd_dp), GFP_ATOMIC);
- if (NULL == sd_dp)
- return SCSI_MLQUEUE_HOST_BUSY;
+ if (!sd_dp->init_wq) {
+ sd_dp->init_wq = true;
sqcp->sd_dp = sd_dp;
sd_dp->sqa_idx = sqp - sdebug_q_arr;
sd_dp->qc_idx = k;
@@ -4117,6 +4391,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
}
if (sdebug_statistics)
sd_dp->issuing_cpu = raw_smp_processor_id();
+ sd_dp->defer_t = SDEB_DEFER_WQ;
schedule_work(&sd_dp->ew.work);
}
if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
@@ -4141,6 +4416,7 @@ respond_in_thread: /* call back to mid-layer using invocation thread */
*/
module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
module_param_named(ato, sdebug_ato, int, S_IRUGO);
+module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
@@ -4198,6 +4474,7 @@ MODULE_VERSION(SDEBUG_VERSION);
MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
+MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
@@ -4210,7 +4487,8 @@ MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
-MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\"0186\")");
+MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
+ SDEBUG_VERSION "\")");
MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
@@ -4360,9 +4638,6 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf,
}
}
if (res > 0) {
- /* make sure sdebug_defer instances get
- * re-allocated for new delay variant */
- free_all_queued();
sdebug_jdelay = jdelay;
sdebug_ndelay = 0;
}
@@ -4403,9 +4678,6 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
}
}
if (res > 0) {
- /* make sure sdebug_defer instances get
- * re-allocated for new delay variant */
- free_all_queued();
sdebug_ndelay = ndelay;
sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
: DEF_JDELAY;
@@ -4426,15 +4698,15 @@ static ssize_t opts_show(struct device_driver *ddp, char *buf)
static ssize_t opts_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int opts;
+ int opts;
char work[20];
- if (1 == sscanf(buf, "%10s", work)) {
- if (0 == strncasecmp(work,"0x", 2)) {
- if (1 == sscanf(&work[2], "%x", &opts))
+ if (sscanf(buf, "%10s", work) == 1) {
+ if (strncasecmp(work, "0x", 2) == 0) {
+ if (kstrtoint(work + 2, 16, &opts) == 0)
goto opts_done;
} else {
- if (1 == sscanf(work, "%d", &opts))
+ if (kstrtoint(work, 10, &opts) == 0)
goto opts_done;
}
}
@@ -4455,7 +4727,7 @@ static ssize_t ptype_show(struct device_driver *ddp, char *buf)
static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
sdebug_ptype = n;
@@ -4472,7 +4744,7 @@ static ssize_t dsense_show(struct device_driver *ddp, char *buf)
static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
sdebug_dsense = n;
@@ -4489,7 +4761,7 @@ static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
n = (n > 0);
@@ -4522,7 +4794,7 @@ static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
sdebug_no_lun_0 = n;
@@ -4539,7 +4811,7 @@ static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
sdebug_num_tgts = n;
@@ -4569,7 +4841,7 @@ static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int nth;
+ int nth;
if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
sdebug_every_nth = nth;
@@ -4591,7 +4863,7 @@ static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
bool changed;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
@@ -4678,7 +4950,7 @@ static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ int n;
bool changed;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
@@ -4884,6 +5156,24 @@ static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
}
static DRIVER_ATTR_RO(uuid_ctl);
+static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
+}
+static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ int ret, n;
+
+ ret = kstrtoint(buf, 0, &n);
+ if (ret)
+ return ret;
+ sdebug_cdb_len = n;
+ all_config_cdb_len();
+ return count;
+}
+static DRIVER_ATTR_RW(cdb_len);
+
/* Note: The following array creates attribute files in the
/sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
@@ -4923,6 +5213,7 @@ static struct attribute *sdebug_drv_attrs[] = {
&driver_attr_ndelay.attr,
&driver_attr_strict.attr,
&driver_attr_uuid_ctl.attr,
+ &driver_attr_cdb_len.attr,
NULL,
};
ATTRIBUTE_GROUPS(sdebug_drv);
@@ -5113,12 +5404,12 @@ static int __init scsi_debug_init(void)
host_to_add = sdebug_add_host;
sdebug_add_host = 0;
- for (k = 0; k < host_to_add; k++) {
- if (sdebug_add_adapter()) {
+ for (k = 0; k < host_to_add; k++) {
+ if (sdebug_add_adapter()) {
pr_err("sdebug_add_adapter failed k=%d\n", k);
- break;
- }
- }
+ break;
+ }
+ }
if (sdebug_verbose)
pr_info("built %d host(s)\n", sdebug_add_host);
@@ -5161,53 +5452,53 @@ module_exit(scsi_debug_exit);
static void sdebug_release_adapter(struct device * dev)
{
- struct sdebug_host_info *sdbg_host;
+ struct sdebug_host_info *sdbg_host;
sdbg_host = to_sdebug_host(dev);
- kfree(sdbg_host);
+ kfree(sdbg_host);
}
static int sdebug_add_adapter(void)
{
int k, devs_per_host;
- int error = 0;
- struct sdebug_host_info *sdbg_host;
+ int error = 0;
+ struct sdebug_host_info *sdbg_host;
struct sdebug_dev_info *sdbg_devinfo, *tmp;
- sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
- if (NULL == sdbg_host) {
+ sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
+ if (sdbg_host == NULL) {
pr_err("out of memory at line %d\n", __LINE__);
- return -ENOMEM;
- }
+ return -ENOMEM;
+ }
- INIT_LIST_HEAD(&sdbg_host->dev_info_list);
+ INIT_LIST_HEAD(&sdbg_host->dev_info_list);
devs_per_host = sdebug_num_tgts * sdebug_max_luns;
- for (k = 0; k < devs_per_host; k++) {
+ for (k = 0; k < devs_per_host; k++) {
sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
if (!sdbg_devinfo) {
pr_err("out of memory at line %d\n", __LINE__);
- error = -ENOMEM;
+ error = -ENOMEM;
goto clean;
- }
- }
+ }
+ }
- spin_lock(&sdebug_host_list_lock);
- list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
- spin_unlock(&sdebug_host_list_lock);
+ spin_lock(&sdebug_host_list_lock);
+ list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
+ spin_unlock(&sdebug_host_list_lock);
- sdbg_host->dev.bus = &pseudo_lld_bus;
- sdbg_host->dev.parent = pseudo_primary;
- sdbg_host->dev.release = &sdebug_release_adapter;
+ sdbg_host->dev.bus = &pseudo_lld_bus;
+ sdbg_host->dev.parent = pseudo_primary;
+ sdbg_host->dev.release = &sdebug_release_adapter;
dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
- error = device_register(&sdbg_host->dev);
+ error = device_register(&sdbg_host->dev);
- if (error)
+ if (error)
goto clean;
++sdebug_add_host;
- return error;
+ return error;
clean:
list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
@@ -5217,20 +5508,20 @@ clean:
}
kfree(sdbg_host);
- return error;
+ return error;
}
static void sdebug_remove_adapter(void)
{
- struct sdebug_host_info * sdbg_host = NULL;
+ struct sdebug_host_info *sdbg_host = NULL;
- spin_lock(&sdebug_host_list_lock);
- if (!list_empty(&sdebug_host_list)) {
- sdbg_host = list_entry(sdebug_host_list.prev,
- struct sdebug_host_info, host_list);
+ spin_lock(&sdebug_host_list_lock);
+ if (!list_empty(&sdebug_host_list)) {
+ sdbg_host = list_entry(sdebug_host_list.prev,
+ struct sdebug_host_info, host_list);
list_del(&sdbg_host->host_list);
}
- spin_unlock(&sdebug_host_list_lock);
+ spin_unlock(&sdebug_host_list_lock);
if (!sdbg_host)
return;
@@ -5281,6 +5572,12 @@ static bool fake_timeout(struct scsi_cmnd *scp)
return false;
}
+static bool fake_host_busy(struct scsi_cmnd *scp)
+{
+ return (sdebug_opts & SDEBUG_OPT_HOST_BUSY) &&
+ (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
+}
+
static int scsi_debug_queuecommand(struct Scsi_Host *shost,
struct scsi_cmnd *scp)
{
@@ -5323,6 +5620,8 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name,
b);
}
+ if (fake_host_busy(scp))
+ return SCSI_MLQUEUE_HOST_BUSY;
has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
goto err_out;
@@ -5420,12 +5719,15 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
errsts = r_pfp(scp, devip);
fini:
- return schedule_resp(scp, devip, errsts,
- ((F_DELAY_OVERR & flags) ? 0 : sdebug_jdelay));
+ if (F_DELAY_OVERR & flags)
+ return schedule_resp(scp, devip, errsts, 0, 0);
+ else
+ return schedule_resp(scp, devip, errsts, sdebug_jdelay,
+ sdebug_ndelay);
check_cond:
- return schedule_resp(scp, devip, check_condition_result, 0);
+ return schedule_resp(scp, devip, check_condition_result, 0, 0);
err_out:
- return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0);
+ return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0, 0);
}
static struct scsi_host_template sdebug_driver_template = {
@@ -5484,7 +5786,7 @@ static int sdebug_driver_probe(struct device * dev)
if (sdebug_mq_active)
hpnt->nr_hw_queues = submit_queues;
- sdbg_host->shost = hpnt;
+ sdbg_host->shost = hpnt;
*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
hpnt->max_id = sdebug_num_tgts + 1;
@@ -5542,12 +5844,12 @@ static int sdebug_driver_probe(struct device * dev)
sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
if (sdebug_every_nth) /* need stats counters for every_nth */
sdebug_statistics = true;
- error = scsi_add_host(hpnt, &sdbg_host->dev);
- if (error) {
+ error = scsi_add_host(hpnt, &sdbg_host->dev);
+ if (error) {
pr_err("scsi_add_host failed\n");
- error = -ENODEV;
+ error = -ENODEV;
scsi_host_put(hpnt);
- } else
+ } else
scsi_scan_host(hpnt);
return error;
@@ -5555,7 +5857,7 @@ static int sdebug_driver_probe(struct device * dev)
static int sdebug_driver_remove(struct device * dev)
{
- struct sdebug_host_info *sdbg_host;
+ struct sdebug_host_info *sdbg_host;
struct sdebug_dev_info *sdbg_devinfo, *tmp;
sdbg_host = to_sdebug_host(dev);
@@ -5565,16 +5867,16 @@ static int sdebug_driver_remove(struct device * dev)
return -ENODEV;
}
- scsi_remove_host(sdbg_host->shost);
+ scsi_remove_host(sdbg_host->shost);
list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
dev_list) {
- list_del(&sdbg_devinfo->dev_list);
- kfree(sdbg_devinfo);
- }
+ list_del(&sdbg_devinfo->dev_list);
+ kfree(sdbg_devinfo);
+ }
- scsi_host_put(sdbg_host->shost);
- return 0;
+ scsi_host_put(sdbg_host->shost);
+ return 0;
}
static int pseudo_lld_bus_match(struct device *dev,
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index dfb8da83fa50..f3b117246d47 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -108,8 +108,8 @@ static struct {
* seagate controller, which causes SCSI code to reset bus.
*/
{"HP", "C1750A", "3226", BLIST_NOLUN}, /* scanjet iic */
- {"HP", "C1790A", "", BLIST_NOLUN}, /* scanjet iip */
- {"HP", "C2500A", "", BLIST_NOLUN}, /* scanjet iicx */
+ {"HP", "C1790A", NULL, BLIST_NOLUN}, /* scanjet iip */
+ {"HP", "C2500A", NULL, BLIST_NOLUN}, /* scanjet iicx */
{"MEDIAVIS", "CDR-H93MV", "1.31", BLIST_NOLUN}, /* locks up */
{"MICROTEK", "ScanMaker II", "5.61", BLIST_NOLUN}, /* responds to all lun */
{"MITSUMI", "CD-R CR-2201CS", "6119", BLIST_NOLUN}, /* locks up */
@@ -119,7 +119,7 @@ static struct {
{"QUANTUM", "FIREBALL ST4.3S", "0F0C", BLIST_NOLUN}, /* locks up */
{"RELISYS", "Scorpio", NULL, BLIST_NOLUN}, /* responds to all lun */
{"SANKYO", "CP525", "6.64", BLIST_NOLUN}, /* causes failed REQ SENSE, extra reset */
- {"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN},
+ {"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN | BLIST_BORKEN},
{"transtec", "T5008", "0001", BLIST_NOREPORTLUN },
{"YAMAHA", "CDR100", "1.00", BLIST_NOLUN}, /* locks up */
{"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* locks up */
@@ -158,8 +158,8 @@ static struct {
{"DELL", "PSEUDO DEVICE .", NULL, BLIST_SPARSELUN}, /* Dell PV 530F */
{"DELL", "PV530F", NULL, BLIST_SPARSELUN},
{"DELL", "PERCRAID", NULL, BLIST_FORCELUN},
- {"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, storage on LUN 0 */
- {"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* Dell PV 650F, no storage on LUN 0 */
+ {"DGC", "RAID", NULL, BLIST_SPARSELUN}, /* EMC CLARiiON, storage on LUN 0 */
+ {"DGC", "DISK", NULL, BLIST_SPARSELUN}, /* EMC CLARiiON, no storage on LUN 0 */
{"EMC", "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_REPORTLUN2},
{"EMULEX", "MD21/S2 ESDI", NULL, BLIST_SINGLELUN},
@@ -181,15 +181,14 @@ static struct {
{"HITACHI", "6586-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HITACHI", "6588-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HP", "A6189A", NULL, BLIST_SPARSELUN | BLIST_LARGELUN}, /* HP VA7400 */
- {"HP", "OPEN-", "*", BLIST_REPORTLUN2}, /* HP XP Arrays */
+ {"HP", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES}, /* HP XP Arrays */
{"HP", "NetRAID-4M", NULL, BLIST_FORCELUN},
{"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD},
{"HP", "C1557A", NULL, BLIST_FORCELUN},
{"HP", "C3323-300", "4269", BLIST_NOTQ},
{"HP", "C5713A", NULL, BLIST_NOREPORTLUN},
- {"HP", "DF400", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
- {"HP", "DF500", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
- {"HP", "DF600", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ {"HP", "DF400", "*", BLIST_REPORTLUN2},
+ {"HP", "DF500", "*", BLIST_REPORTLUN2},
{"HP", "OP-C-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HP", "3380-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
{"HP", "3390-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
@@ -255,7 +254,6 @@ static struct {
{"ST650211", "CF", NULL, BLIST_RETRY_HWERROR},
{"SUN", "T300", "*", BLIST_SPARSELUN},
{"SUN", "T4", "*", BLIST_SPARSELUN},
- {"TEXEL", "CD-ROM", "1.06", BLIST_BORKEN},
{"Tornado-", "F4", "*", BLIST_NOREPORTLUN},
{"TOSHIBA", "CDROM", NULL, BLIST_ISROM},
{"TOSHIBA", "CD-ROM", NULL, BLIST_ISROM},
@@ -353,7 +351,8 @@ static int scsi_dev_info_list_add(int compatible, char *vendor, char *model,
* Returns: 0 OK, -error on failure.
**/
int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
- char *strflags, blist_flags_t flags, int key)
+ char *strflags, blist_flags_t flags,
+ enum scsi_devinfo_key key)
{
struct scsi_dev_info_list *devinfo;
struct scsi_dev_info_list_table *devinfo_table =
@@ -402,7 +401,7 @@ EXPORT_SYMBOL(scsi_dev_info_list_add_keyed);
* Returns: pointer to matching entry, or ERR_PTR on failure.
**/
static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
- const char *model, int key)
+ const char *model, enum scsi_devinfo_key key)
{
struct scsi_dev_info_list *devinfo;
struct scsi_dev_info_list_table *devinfo_table =
@@ -485,7 +484,8 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
*
* Returns: 0 OK, -error on failure.
**/
-int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key)
+int scsi_dev_info_list_del_keyed(char *vendor, char *model,
+ enum scsi_devinfo_key key)
{
struct scsi_dev_info_list *found;
@@ -587,20 +587,15 @@ blist_flags_t scsi_get_device_flags(struct scsi_device *sdev,
blist_flags_t scsi_get_device_flags_keyed(struct scsi_device *sdev,
const unsigned char *vendor,
const unsigned char *model,
- int key)
+ enum scsi_devinfo_key key)
{
struct scsi_dev_info_list *devinfo;
- int err;
devinfo = scsi_dev_info_list_find(vendor, model, key);
if (!IS_ERR(devinfo))
return devinfo->flags;
- err = PTR_ERR(devinfo);
- if (err != -ENOENT)
- return err;
-
- /* nothing found, return nothing */
+ /* key or device not found: return nothing */
if (key != SCSI_DEVINFO_GLOBAL)
return 0;
@@ -774,7 +769,7 @@ void scsi_exit_devinfo(void)
* Adds the requested list, returns zero on success, -EEXIST if the
* key is already registered to a list, or other error on failure.
*/
-int scsi_dev_info_add_list(int key, const char *name)
+int scsi_dev_info_add_list(enum scsi_devinfo_key key, const char *name)
{
struct scsi_dev_info_list_table *devinfo_table =
scsi_devinfo_lookup_by_key(key);
@@ -806,7 +801,7 @@ EXPORT_SYMBOL(scsi_dev_info_add_list);
* frees the list itself. Returns 0 on success or -EINVAL if the key
* can't be found.
*/
-int scsi_dev_info_remove_list(int key)
+int scsi_dev_info_remove_list(enum scsi_devinfo_key key)
{
struct list_head *lh, *lh_next;
struct scsi_dev_info_list_table *devinfo_table =
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 2b785d09d5bd..b88b5dbbc444 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -56,10 +56,13 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
{"IBM", "1815", "rdac", },
{"IBM", "1818", "rdac", },
{"IBM", "3526", "rdac", },
+ {"IBM", "3542", "rdac", },
+ {"IBM", "3552", "rdac", },
{"SGI", "TP9", "rdac", },
{"SGI", "IS", "rdac", },
- {"STK", "OPENstorage D280", "rdac", },
+ {"STK", "OPENstorage", "rdac", },
{"STK", "FLEXLINE 380", "rdac", },
+ {"STK", "BladeCtlr", "rdac", },
{"SUN", "CSM", "rdac", },
{"SUN", "LCSM100", "rdac", },
{"SUN", "STK6580_6780", "rdac", },
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 62b56de38ae8..d042915ce895 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -61,9 +61,10 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
static int scsi_try_to_abort_cmd(struct scsi_host_template *,
struct scsi_cmnd *);
-/* called with shost->host_lock held */
void scsi_eh_wakeup(struct Scsi_Host *shost)
{
+ lockdep_assert_held(shost->host_lock);
+
if (atomic_read(&shost->host_busy) == shost->host_failed) {
trace_scsi_eh_wakeup(shost);
wake_up_process(shost->ehandler);
@@ -220,6 +221,17 @@ static void scsi_eh_reset(struct scsi_cmnd *scmd)
}
}
+static void scsi_eh_inc_host_failed(struct rcu_head *head)
+{
+ struct Scsi_Host *shost = container_of(head, typeof(*shost), rcu);
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ shost->host_failed++;
+ scsi_eh_wakeup(shost);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
/**
* scsi_eh_scmd_add - add scsi cmd to error handling.
* @scmd: scmd to run eh on.
@@ -242,9 +254,12 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
scsi_eh_reset(scmd);
list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
- shost->host_failed++;
- scsi_eh_wakeup(shost);
spin_unlock_irqrestore(shost->host_lock, flags);
+ /*
+ * Ensure that all tasks observe the host state change before the
+ * host_failed change.
+ */
+ call_rcu(&shost->rcu, scsi_eh_inc_host_failed);
}
/**
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d9ca1dfab154..976c936029cb 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -164,7 +164,7 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
* for a requeue after completion, which should only occur in this
* file.
*/
-static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
+static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
{
struct scsi_device *device = cmd->device;
struct request_queue *q = device->request_queue;
@@ -220,7 +220,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
*/
void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
{
- __scsi_queue_insert(cmd, reason, 1);
+ __scsi_queue_insert(cmd, reason, true);
}
@@ -318,22 +318,39 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
cmd->cmd_len = scsi_command_size(cmd->cmnd);
}
-void scsi_device_unbusy(struct scsi_device *sdev)
+/*
+ * Decrement the host_busy counter and wake up the error handler if necessary.
+ * Avoid as follows that the error handler is not woken up if shost->host_busy
+ * == shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination
+ * with an RCU read lock in this function to ensure that this function in its
+ * entirety either finishes before scsi_eh_scmd_add() increases the
+ * host_failed counter or that it notices the shost state change made by
+ * scsi_eh_scmd_add().
+ */
+static void scsi_dec_host_busy(struct Scsi_Host *shost)
{
- struct Scsi_Host *shost = sdev->host;
- struct scsi_target *starget = scsi_target(sdev);
unsigned long flags;
+ rcu_read_lock();
atomic_dec(&shost->host_busy);
- if (starget->can_queue > 0)
- atomic_dec(&starget->target_busy);
-
- if (unlikely(scsi_host_in_recovery(shost) &&
- (shost->host_failed || shost->host_eh_scheduled))) {
+ if (unlikely(scsi_host_in_recovery(shost))) {
spin_lock_irqsave(shost->host_lock, flags);
- scsi_eh_wakeup(shost);
+ if (shost->host_failed || shost->host_eh_scheduled)
+ scsi_eh_wakeup(shost);
spin_unlock_irqrestore(shost->host_lock, flags);
}
+ rcu_read_unlock();
+}
+
+void scsi_device_unbusy(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = sdev->host;
+ struct scsi_target *starget = scsi_target(sdev);
+
+ scsi_dec_host_busy(shost);
+
+ if (starget->can_queue > 0)
+ atomic_dec(&starget->target_busy);
atomic_dec(&sdev->device_busy);
}
@@ -998,11 +1015,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
break;
case ACTION_RETRY:
/* Retry the same command immediately */
- __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
+ __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, false);
break;
case ACTION_DELAYED_RETRY:
/* Retry the same command after a delay */
- __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
+ __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, false);
break;
}
}
@@ -1128,7 +1145,7 @@ EXPORT_SYMBOL(scsi_init_io);
* Called from inside blk_get_request() for pass-through requests and from
* inside scsi_init_command() for filesystem requests.
*/
-void scsi_initialize_rq(struct request *rq)
+static void scsi_initialize_rq(struct request *rq)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
@@ -1136,7 +1153,6 @@ void scsi_initialize_rq(struct request *rq)
cmd->jiffies_at_alloc = jiffies;
cmd->retries = 0;
}
-EXPORT_SYMBOL(scsi_initialize_rq);
/* Add a command to the list used by the aacraid and dpt_i2o drivers */
void scsi_add_cmd_to_list(struct scsi_cmnd *cmd)
@@ -1532,7 +1548,7 @@ starved:
list_add_tail(&sdev->starved_entry, &shost->starved_list);
spin_unlock_irq(shost->host_lock);
out_dec:
- atomic_dec(&shost->host_busy);
+ scsi_dec_host_busy(shost);
return 0;
}
@@ -2020,7 +2036,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
out_dec_host_busy:
- atomic_dec(&shost->host_busy);
+ scsi_dec_host_busy(shost);
out_dec_target_busy:
if (scsi_target(sdev)->can_queue > 0)
atomic_dec(&scsi_target(sdev)->target_busy);
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index a5946cd64caa..99f1db5e467e 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -45,7 +45,7 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
/* scsi_devinfo.c */
/* list of keys for the lists */
-enum {
+enum scsi_devinfo_key {
SCSI_DEVINFO_GLOBAL = 0,
SCSI_DEVINFO_SPI,
};
@@ -56,13 +56,15 @@ extern blist_flags_t scsi_get_device_flags(struct scsi_device *sdev,
extern blist_flags_t scsi_get_device_flags_keyed(struct scsi_device *sdev,
const unsigned char *vendor,
const unsigned char *model,
- int key);
+ enum scsi_devinfo_key key);
extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor,
char *model, char *strflags,
- blist_flags_t flags, int key);
-extern int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key);
-extern int scsi_dev_info_add_list(int key, const char *name);
-extern int scsi_dev_info_remove_list(int key);
+ blist_flags_t flags,
+ enum scsi_devinfo_key key);
+extern int scsi_dev_info_list_del_keyed(char *vendor, char *model,
+ enum scsi_devinfo_key key);
+extern int scsi_dev_info_add_list(enum scsi_devinfo_key key, const char *name);
+extern int scsi_dev_info_remove_list(enum scsi_devinfo_key key);
extern int __init scsi_init_devinfo(void);
extern void scsi_exit_devinfo(void);
@@ -184,7 +186,6 @@ void scsi_dh_release_device(struct scsi_device *sdev);
static inline void scsi_dh_add_device(struct scsi_device *sdev) { }
static inline void scsi_dh_release_device(struct scsi_device *sdev) { }
#endif
-static inline void scsi_dh_remove_device(struct scsi_device *sdev) { }
/*
* internal scsi timeout functions: for use by mid-layer and transport
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 26ce17178401..91b90f672d23 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1278,7 +1278,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
if (error) {
sdev_printk(KERN_INFO, sdev,
"failed to add device: %d\n", error);
- scsi_dh_remove_device(sdev);
return error;
}
@@ -1287,7 +1286,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
if (error) {
sdev_printk(KERN_INFO, sdev,
"failed to add class device: %d\n", error);
- scsi_dh_remove_device(sdev);
device_del(&sdev->sdev_gendev);
return error;
}
@@ -1354,7 +1352,6 @@ void __scsi_remove_device(struct scsi_device *sdev)
bsg_unregister_queue(sdev->request_queue);
device_unregister(&sdev->sdev_dev);
transport_remove_device(dev);
- scsi_dh_remove_device(sdev);
device_del(dev);
} else
put_device(&sdev->sdev_dev);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 4664024bd5d3..be3be0f9cb2d 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -267,8 +267,8 @@ static const struct {
{ FC_PORTSPEED_50GBIT, "50 Gbit" },
{ FC_PORTSPEED_100GBIT, "100 Gbit" },
{ FC_PORTSPEED_25GBIT, "25 Gbit" },
- { FC_PORTSPEED_64BIT, "64 Gbit" },
- { FC_PORTSPEED_128BIT, "128 Gbit" },
+ { FC_PORTSPEED_64GBIT, "64 Gbit" },
+ { FC_PORTSPEED_128GBIT, "128 Gbit" },
{ FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" },
};
fc_bitfield_name_search(port_speed, fc_port_speed_names)
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 10ebb213ddb3..871ea582029e 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -26,6 +26,7 @@
#include <linux/mutex.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
+#include <linux/suspend.h>
#include <scsi/scsi.h>
#include "scsi_priv.h"
#include <scsi/scsi_device.h>
@@ -1009,11 +1010,20 @@ spi_dv_device(struct scsi_device *sdev)
u8 *buffer;
const int len = SPI_MAX_ECHO_BUFFER_SIZE*2;
+ /*
+ * Because this function and the power management code both call
+ * scsi_device_quiesce(), it is not safe to perform domain validation
+ * while suspend or resume is in progress. Hence the
+ * lock/unlock_system_sleep() calls.
+ */
+ lock_system_sleep();
+
if (unlikely(spi_dv_in_progress(starget)))
- return;
+ goto unlock;
if (unlikely(scsi_device_get(sdev)))
- return;
+ goto unlock;
+
spi_dv_in_progress(starget) = 1;
buffer = kzalloc(len, GFP_KERNEL);
@@ -1049,6 +1059,8 @@ spi_dv_device(struct scsi_device *sdev)
out_put:
spi_dv_in_progress(starget) = 0;
scsi_device_put(sdev);
+unlock:
+ unlock_system_sleep();
}
EXPORT_SYMBOL(spi_dv_device);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a028ab3322a9..ce756d575aff 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2172,7 +2172,7 @@ sd_spinup_disk(struct scsi_disk *sdkp)
}
/* Wait 1 second for next try */
msleep(1000);
- printk(".");
+ printk(KERN_CONT ".");
/*
* Wait for USB flash devices with slow firmware.
@@ -2202,9 +2202,9 @@ sd_spinup_disk(struct scsi_disk *sdkp)
if (spintime) {
if (scsi_status_is_good(the_result))
- printk("ready\n");
+ printk(KERN_CONT "ready\n");
else
- printk("not responding...\n");
+ printk(KERN_CONT "not responding...\n");
}
}
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 11826c5c2dd4..62f04c0511cf 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -615,13 +615,16 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
}
static void ses_match_to_enclosure(struct enclosure_device *edev,
- struct scsi_device *sdev)
+ struct scsi_device *sdev,
+ int refresh)
{
+ struct scsi_device *edev_sdev = to_scsi_device(edev->edev.parent);
struct efd efd = {
.addr = 0,
};
- ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
+ if (refresh)
+ ses_enclosure_data_process(edev, edev_sdev, 0);
if (scsi_is_sas_rphy(sdev->sdev_target->dev.parent))
efd.addr = sas_get_address(sdev);
@@ -652,7 +655,7 @@ static int ses_intf_add(struct device *cdev,
struct enclosure_device *prev = NULL;
while ((edev = enclosure_find(&sdev->host->shost_gendev, prev)) != NULL) {
- ses_match_to_enclosure(edev, sdev);
+ ses_match_to_enclosure(edev, sdev, 1);
prev = edev;
}
return -ENODEV;
@@ -768,7 +771,7 @@ page2_not_supported:
shost_for_each_device(tmp_sdev, sdev->host) {
if (tmp_sdev->lun != 0 || scsi_device_enclosure(tmp_sdev))
continue;
- ses_match_to_enclosure(edev, tmp_sdev);
+ ses_match_to_enclosure(edev, tmp_sdev, 0);
}
return 0;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index f098877eed4a..0c434453aab3 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1140,10 +1140,10 @@ static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned lon
}
#endif
-static unsigned int
+static __poll_t
sg_poll(struct file *filp, poll_table * wait)
{
- unsigned int res = 0;
+ __poll_t res = 0;
Sg_device *sdp;
Sg_fd *sfp;
Sg_request *srp;
@@ -1174,7 +1174,7 @@ sg_poll(struct file *filp, poll_table * wait)
} else if (count < SG_MAX_QUEUE)
res |= POLLOUT | POLLWRNORM;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
- "sg_poll: res=0x%x\n", (int) res));
+ "sg_poll: res=0x%x\n", (__force u32) res));
return res;
}
diff --git a/drivers/scsi/smartpqi/Makefile b/drivers/scsi/smartpqi/Makefile
index 0f42a225a664..e6b779930230 100644
--- a/drivers/scsi/smartpqi/Makefile
+++ b/drivers/scsi/smartpqi/Makefile
@@ -1,3 +1,3 @@
ccflags-y += -I.
-obj-m += smartpqi.o
+obj-$(CONFIG_SCSI_SMARTPQI) += smartpqi.o
smartpqi-objs := smartpqi_init.o smartpqi_sis.o smartpqi_sas_transport.o
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index b141d7641a2e..6c399480783d 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4712,7 +4712,7 @@ static ssize_t read_byte_cnt_show(struct device *dev,
static DEVICE_ATTR_RO(read_byte_cnt);
/**
- * read_us_show - return read us - overall time spent waiting on reads in ns.
+ * read_ns_show - return read ns - overall time spent waiting on reads in ns.
* @dev: struct device
* @attr: attribute structure
* @buf: buffer to return formatted data in
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 3b3d1d050cac..40fc7a590e81 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1834,8 +1834,10 @@ static int storvsc_probe(struct hv_device *device,
fc_host_node_name(host) = stor_device->node_name;
fc_host_port_name(host) = stor_device->port_name;
stor_device->rport = fc_remote_port_add(host, 0, &ids);
- if (!stor_device->rport)
+ if (!stor_device->rport) {
+ ret = -ENOMEM;
goto err_out4;
+ }
}
#endif
return 0;
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 277752b0fc6f..1a1b5d9fe514 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -157,6 +157,8 @@ enum {
#define UTP_TRANSFER_REQ_LIST_READY 0x2
#define UTP_TASK_REQ_LIST_READY 0x4
#define UIC_COMMAND_READY 0x8
+#define HOST_ERROR_INDICATOR 0x10
+#define DEVICE_ERROR_INDICATOR 0x20
#define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8)
#define UFSHCD_STATUS_READY (UTP_TRANSFER_REQ_LIST_READY |\
@@ -185,6 +187,10 @@ enum {
/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
#define UIC_DATA_LINK_LAYER_ERROR 0x80000000
#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF
+#define UIC_DATA_LINK_LAYER_ERROR_TCX_REP_TIMER_EXP 0x2
+#define UIC_DATA_LINK_LAYER_ERROR_AFCX_REQ_TIMER_EXP 0x4
+#define UIC_DATA_LINK_LAYER_ERROR_FCX_PRO_TIMER_EXP 0x8
+#define UIC_DATA_LINK_LAYER_ERROR_RX_BUF_OF 0x20
#define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000
#define UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED 0x0001
#define UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT 0x0002
@@ -192,10 +198,20 @@ enum {
/* UECN - Host UIC Error Code Network Layer 40h */
#define UIC_NETWORK_LAYER_ERROR 0x80000000
#define UIC_NETWORK_LAYER_ERROR_CODE_MASK 0x7
+#define UIC_NETWORK_UNSUPPORTED_HEADER_TYPE 0x1
+#define UIC_NETWORK_BAD_DEVICEID_ENC 0x2
+#define UIC_NETWORK_LHDR_TRAP_PACKET_DROPPING 0x4
/* UECT - Host UIC Error Code Transport Layer 44h */
#define UIC_TRANSPORT_LAYER_ERROR 0x80000000
#define UIC_TRANSPORT_LAYER_ERROR_CODE_MASK 0x7F
+#define UIC_TRANSPORT_UNSUPPORTED_HEADER_TYPE 0x1
+#define UIC_TRANSPORT_UNKNOWN_CPORTID 0x2
+#define UIC_TRANSPORT_NO_CONNECTION_RX 0x4
+#define UIC_TRANSPORT_CONTROLLED_SEGMENT_DROPPING 0x8
+#define UIC_TRANSPORT_BAD_TC 0x10
+#define UIC_TRANSPORT_E2E_CREDIT_OVERFOW 0x20
+#define UIC_TRANSPORT_SAFETY_VALUE_DROPPING 0x40
/* UECDME - Host UIC Error Code DME 48h */
#define UIC_DME_ERROR 0x80000000
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
index 2a9da2e0ea6b..2ba2b7b47f41 100644
--- a/drivers/scsi/wd719x.c
+++ b/drivers/scsi/wd719x.c
@@ -803,7 +803,9 @@ static enum wd719x_card_type wd719x_detect_type(struct wd719x *wd)
static int wd719x_board_found(struct Scsi_Host *sh)
{
struct wd719x *wd = shost_priv(sh);
- char *card_types[] = { "Unknown card", "WD7193", "WD7197", "WD7296" };
+ static const char * const card_types[] = {
+ "Unknown card", "WD7193", "WD7197", "WD7296"
+ };
int ret;
INIT_LIST_HEAD(&wd->active_scbs);
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index d65345312527..7dcb14d303eb 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -27,6 +27,8 @@
#define DRIVER_NAME "armada_3700_spi"
+#define A3700_SPI_MAX_SPEED_HZ 100000000
+#define A3700_SPI_MAX_PRESCALE 30
#define A3700_SPI_TIMEOUT 10
/* SPI Register Offest */
@@ -184,12 +186,15 @@ static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
return 0;
}
-static void a3700_spi_fifo_mode_set(struct a3700_spi *a3700_spi)
+static void a3700_spi_fifo_mode_set(struct a3700_spi *a3700_spi, bool enable)
{
u32 val;
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
- val |= A3700_SPI_FIFO_MODE;
+ if (enable)
+ val |= A3700_SPI_FIFO_MODE;
+ else
+ val &= ~A3700_SPI_FIFO_MODE;
spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
}
@@ -297,7 +302,7 @@ static int a3700_spi_init(struct a3700_spi *a3700_spi)
a3700_spi_deactivate_cs(a3700_spi, i);
/* Enable FIFO mode */
- a3700_spi_fifo_mode_set(a3700_spi);
+ a3700_spi_fifo_mode_set(a3700_spi, true);
/* Set SPI mode */
a3700_spi_mode_set(a3700_spi, master->mode_bits);
@@ -416,15 +421,20 @@ static void a3700_spi_transfer_setup(struct spi_device *spi,
struct spi_transfer *xfer)
{
struct a3700_spi *a3700_spi;
- unsigned int byte_len;
a3700_spi = spi_master_get_devdata(spi->master);
a3700_spi_clock_set(a3700_spi, xfer->speed_hz);
- byte_len = xfer->bits_per_word >> 3;
+ /* Use 4 bytes long transfers. Each transfer method has its way to deal
+ * with the remaining bytes for non 4-bytes aligned transfers.
+ */
+ a3700_spi_bytelen_set(a3700_spi, 4);
- a3700_spi_fifo_thres_set(a3700_spi, byte_len);
+ /* Initialize the working buffers */
+ a3700_spi->tx_buf = xfer->tx_buf;
+ a3700_spi->rx_buf = xfer->rx_buf;
+ a3700_spi->buf_len = xfer->len;
}
static void a3700_spi_set_cs(struct spi_device *spi, bool enable)
@@ -491,7 +501,7 @@ static int a3700_spi_fifo_write(struct a3700_spi *a3700_spi)
u32 val;
while (!a3700_is_wfifo_full(a3700_spi) && a3700_spi->buf_len) {
- val = cpu_to_le32(*(u32 *)a3700_spi->tx_buf);
+ val = *(u32 *)a3700_spi->tx_buf;
spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val);
a3700_spi->buf_len -= 4;
a3700_spi->tx_buf += 4;
@@ -514,9 +524,8 @@ static int a3700_spi_fifo_read(struct a3700_spi *a3700_spi)
while (!a3700_is_rfifo_empty(a3700_spi) && a3700_spi->buf_len) {
val = spireg_read(a3700_spi, A3700_SPI_DATA_IN_REG);
if (a3700_spi->buf_len >= 4) {
- u32 data = le32_to_cpu(val);
- memcpy(a3700_spi->rx_buf, &data, 4);
+ memcpy(a3700_spi->rx_buf, &val, 4);
a3700_spi->buf_len -= 4;
a3700_spi->rx_buf += 4;
@@ -579,27 +588,26 @@ static int a3700_spi_prepare_message(struct spi_master *master,
if (ret)
return ret;
- a3700_spi_bytelen_set(a3700_spi, 4);
-
a3700_spi_mode_set(a3700_spi, spi->mode);
return 0;
}
-static int a3700_spi_transfer_one(struct spi_master *master,
+static int a3700_spi_transfer_one_fifo(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
int ret = 0, timeout = A3700_SPI_TIMEOUT;
- unsigned int nbits = 0;
+ unsigned int nbits = 0, byte_len;
u32 val;
- a3700_spi_transfer_setup(spi, xfer);
+ /* Make sure we use FIFO mode */
+ a3700_spi_fifo_mode_set(a3700_spi, true);
- a3700_spi->tx_buf = xfer->tx_buf;
- a3700_spi->rx_buf = xfer->rx_buf;
- a3700_spi->buf_len = xfer->len;
+ /* Configure FIFO thresholds */
+ byte_len = xfer->bits_per_word >> 3;
+ a3700_spi_fifo_thres_set(a3700_spi, byte_len);
if (xfer->tx_buf)
nbits = xfer->tx_nbits;
@@ -615,6 +623,11 @@ static int a3700_spi_transfer_one(struct spi_master *master,
a3700_spi_header_set(a3700_spi);
if (xfer->rx_buf) {
+ /* Clear WFIFO, since it's last 2 bytes are shifted out during
+ * a read operation
+ */
+ spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, 0);
+
/* Set read data length */
spireg_write(a3700_spi, A3700_SPI_IF_DIN_CNT_REG,
a3700_spi->buf_len);
@@ -729,6 +742,63 @@ out:
return ret;
}
+static int a3700_spi_transfer_one_full_duplex(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
+ u32 val;
+
+ /* Disable FIFO mode */
+ a3700_spi_fifo_mode_set(a3700_spi, false);
+
+ while (a3700_spi->buf_len) {
+
+ /* When we have less than 4 bytes to transfer, switch to 1 byte
+ * mode. This is reset after each transfer
+ */
+ if (a3700_spi->buf_len < 4)
+ a3700_spi_bytelen_set(a3700_spi, 1);
+
+ if (a3700_spi->byte_len == 1)
+ val = *a3700_spi->tx_buf;
+ else
+ val = *(u32 *)a3700_spi->tx_buf;
+
+ spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val);
+
+ /* Wait for all the data to be shifted in / out */
+ while (!(spireg_read(a3700_spi, A3700_SPI_IF_CTRL_REG) &
+ A3700_SPI_XFER_DONE))
+ cpu_relax();
+
+ val = spireg_read(a3700_spi, A3700_SPI_DATA_IN_REG);
+
+ memcpy(a3700_spi->rx_buf, &val, a3700_spi->byte_len);
+
+ a3700_spi->buf_len -= a3700_spi->byte_len;
+ a3700_spi->tx_buf += a3700_spi->byte_len;
+ a3700_spi->rx_buf += a3700_spi->byte_len;
+
+ }
+
+ spi_finalize_current_transfer(master);
+
+ return 0;
+}
+
+static int a3700_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ a3700_spi_transfer_setup(spi, xfer);
+
+ if (xfer->tx_buf && xfer->rx_buf)
+ return a3700_spi_transfer_one_full_duplex(master, spi, xfer);
+
+ return a3700_spi_transfer_one_fifo(master, spi, xfer);
+}
+
static int a3700_spi_unprepare_message(struct spi_master *master,
struct spi_message *message)
{
@@ -778,7 +848,6 @@ static int a3700_spi_probe(struct platform_device *pdev)
master->transfer_one = a3700_spi_transfer_one;
master->unprepare_message = a3700_spi_unprepare_message;
master->set_cs = a3700_spi_set_cs;
- master->flags = SPI_MASTER_HALF_DUPLEX;
master->mode_bits |= (SPI_RX_DUAL | SPI_TX_DUAL |
SPI_RX_QUAD | SPI_TX_QUAD);
@@ -818,6 +887,11 @@ static int a3700_spi_probe(struct platform_device *pdev)
goto error;
}
+ master->max_speed_hz = min_t(unsigned long, A3700_SPI_MAX_SPEED_HZ,
+ clk_get_rate(spi->clk));
+ master->min_speed_hz = DIV_ROUND_UP(clk_get_rate(spi->clk),
+ A3700_SPI_MAX_PRESCALE);
+
ret = a3700_spi_init(spi);
if (ret)
goto error_clk;
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 669470971023..4a11fc0d4136 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -291,6 +291,10 @@ struct atmel_spi {
struct spi_transfer *current_transfer;
int current_remaining_bytes;
int done_status;
+ dma_addr_t dma_addr_rx_bbuf;
+ dma_addr_t dma_addr_tx_bbuf;
+ void *addr_rx_bbuf;
+ void *addr_tx_bbuf;
struct completion xfer_completion;
@@ -436,6 +440,11 @@ static void atmel_spi_unlock(struct atmel_spi *as) __releases(&as->lock)
spin_unlock_irqrestore(&as->lock, as->flags);
}
+static inline bool atmel_spi_is_vmalloc_xfer(struct spi_transfer *xfer)
+{
+ return is_vmalloc_addr(xfer->tx_buf) || is_vmalloc_addr(xfer->rx_buf);
+}
+
static inline bool atmel_spi_use_dma(struct atmel_spi *as,
struct spi_transfer *xfer)
{
@@ -448,7 +457,12 @@ static bool atmel_spi_can_dma(struct spi_master *master,
{
struct atmel_spi *as = spi_master_get_devdata(master);
- return atmel_spi_use_dma(as, xfer);
+ if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5))
+ return atmel_spi_use_dma(as, xfer) &&
+ !atmel_spi_is_vmalloc_xfer(xfer);
+ else
+ return atmel_spi_use_dma(as, xfer);
+
}
static int atmel_spi_dma_slave_config(struct atmel_spi *as,
@@ -594,6 +608,11 @@ static void dma_callback(void *data)
struct spi_master *master = data;
struct atmel_spi *as = spi_master_get_devdata(master);
+ if (is_vmalloc_addr(as->current_transfer->rx_buf) &&
+ IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
+ memcpy(as->current_transfer->rx_buf, as->addr_rx_bbuf,
+ as->current_transfer->len);
+ }
complete(&as->xfer_completion);
}
@@ -744,17 +763,41 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
goto err_exit;
/* Send both scatterlists */
- rxdesc = dmaengine_prep_slave_sg(rxchan,
- xfer->rx_sg.sgl, xfer->rx_sg.nents,
- DMA_FROM_DEVICE,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (atmel_spi_is_vmalloc_xfer(xfer) &&
+ IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
+ rxdesc = dmaengine_prep_slave_single(rxchan,
+ as->dma_addr_rx_bbuf,
+ xfer->len,
+ DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ } else {
+ rxdesc = dmaengine_prep_slave_sg(rxchan,
+ xfer->rx_sg.sgl,
+ xfer->rx_sg.nents,
+ DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ }
if (!rxdesc)
goto err_dma;
- txdesc = dmaengine_prep_slave_sg(txchan,
- xfer->tx_sg.sgl, xfer->tx_sg.nents,
- DMA_TO_DEVICE,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (atmel_spi_is_vmalloc_xfer(xfer) &&
+ IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
+ memcpy(as->addr_tx_bbuf, xfer->tx_buf, xfer->len);
+ txdesc = dmaengine_prep_slave_single(txchan,
+ as->dma_addr_tx_bbuf,
+ xfer->len, DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ } else {
+ txdesc = dmaengine_prep_slave_sg(txchan,
+ xfer->tx_sg.sgl,
+ xfer->tx_sg.nents,
+ DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ }
if (!txdesc)
goto err_dma;
@@ -1426,27 +1469,7 @@ static void atmel_get_caps(struct atmel_spi *as)
as->caps.is_spi2 = version > 0x121;
as->caps.has_wdrbt = version >= 0x210;
-#ifdef CONFIG_SOC_SAM_V4_V5
- /*
- * Atmel SoCs based on ARM9 (SAM9x) cores should not use spi_map_buf()
- * since this later function tries to map buffers with dma_map_sg()
- * even if they have not been allocated inside DMA-safe areas.
- * On SoCs based on Cortex A5 (SAMA5Dx), it works anyway because for
- * those ARM cores, the data cache follows the PIPT model.
- * Also the L2 cache controller of SAMA5D2 uses the PIPT model too.
- * In case of PIPT caches, there cannot be cache aliases.
- * However on ARM9 cores, the data cache follows the VIVT model, hence
- * the cache aliases issue can occur when buffers are allocated from
- * DMA-unsafe areas, by vmalloc() for instance, where cache coherency is
- * not taken into account or at least not handled completely (cache
- * lines of aliases are not invalidated).
- * This is not a theorical issue: it was reproduced when trying to mount
- * a UBI file-system on a at91sam9g35ek board.
- */
- as->caps.has_dma_support = false;
-#else
as->caps.has_dma_support = version >= 0x212;
-#endif
as->caps.has_pdc_support = version < 0x212;
}
@@ -1592,6 +1615,30 @@ static int atmel_spi_probe(struct platform_device *pdev)
as->use_pdc = true;
}
+ if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
+ as->addr_rx_bbuf = dma_alloc_coherent(&pdev->dev,
+ SPI_MAX_DMA_XFER,
+ &as->dma_addr_rx_bbuf,
+ GFP_KERNEL | GFP_DMA);
+ if (!as->addr_rx_bbuf) {
+ as->use_dma = false;
+ } else {
+ as->addr_tx_bbuf = dma_alloc_coherent(&pdev->dev,
+ SPI_MAX_DMA_XFER,
+ &as->dma_addr_tx_bbuf,
+ GFP_KERNEL | GFP_DMA);
+ if (!as->addr_tx_bbuf) {
+ as->use_dma = false;
+ dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
+ as->addr_rx_bbuf,
+ as->dma_addr_rx_bbuf);
+ }
+ }
+ if (!as->use_dma)
+ dev_info(master->dev.parent,
+ " can not allocate dma coherent memory\n");
+ }
+
if (as->caps.has_dma_support && !as->use_dma)
dev_info(&pdev->dev, "Atmel SPI Controller using PIO only\n");
@@ -1664,6 +1711,14 @@ static int atmel_spi_remove(struct platform_device *pdev)
if (as->use_dma) {
atmel_spi_stop_dma(master);
atmel_spi_release_dma(master);
+ if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
+ dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
+ as->addr_tx_bbuf,
+ as->dma_addr_tx_bbuf);
+ dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
+ as->addr_rx_bbuf,
+ as->dma_addr_rx_bbuf);
+ }
}
spin_lock_irq(&as->lock);
diff --git a/drivers/spi/spi-bcm53xx.c b/drivers/spi/spi-bcm53xx.c
index 6e409eabe1c9..d02ceb7a29d1 100644
--- a/drivers/spi/spi-bcm53xx.c
+++ b/drivers/spi/spi-bcm53xx.c
@@ -27,8 +27,6 @@ struct bcm53xxspi {
struct bcma_device *core;
struct spi_master *master;
void __iomem *mmio_base;
-
- size_t read_offset;
bool bspi; /* Boot SPI mode with memory mapping */
};
@@ -172,8 +170,6 @@ static void bcm53xxspi_buf_write(struct bcm53xxspi *b53spi, u8 *w_buf,
if (!cont)
bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 0);
-
- b53spi->read_offset = len;
}
static void bcm53xxspi_buf_read(struct bcm53xxspi *b53spi, u8 *r_buf,
@@ -182,10 +178,10 @@ static void bcm53xxspi_buf_read(struct bcm53xxspi *b53spi, u8 *r_buf,
u32 tmp;
int i;
- for (i = 0; i < b53spi->read_offset + len; i++) {
+ for (i = 0; i < len; i++) {
tmp = B53SPI_CDRAM_CONT | B53SPI_CDRAM_PCS_DISABLE_ALL |
B53SPI_CDRAM_PCS_DSCK;
- if (!cont && i == b53spi->read_offset + len - 1)
+ if (!cont && i == len - 1)
tmp &= ~B53SPI_CDRAM_CONT;
tmp &= ~0x1;
/* Command Register File */
@@ -194,8 +190,7 @@ static void bcm53xxspi_buf_read(struct bcm53xxspi *b53spi, u8 *r_buf,
/* Set queue pointers */
bcm53xxspi_write(b53spi, B53SPI_MSPI_NEWQP, 0);
- bcm53xxspi_write(b53spi, B53SPI_MSPI_ENDQP,
- b53spi->read_offset + len - 1);
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_ENDQP, len - 1);
if (cont)
bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 1);
@@ -214,13 +209,11 @@ static void bcm53xxspi_buf_read(struct bcm53xxspi *b53spi, u8 *r_buf,
bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 0);
for (i = 0; i < len; ++i) {
- int offset = b53spi->read_offset + i;
+ u16 reg = B53SPI_MSPI_RXRAM + 4 * (1 + i * 2);
/* Data stored in the transmit register file LSB */
- r_buf[i] = (u8)bcm53xxspi_read(b53spi, B53SPI_MSPI_RXRAM + 4 * (1 + offset * 2));
+ r_buf[i] = (u8)bcm53xxspi_read(b53spi, reg);
}
-
- b53spi->read_offset = 0;
}
static int bcm53xxspi_transfer_one(struct spi_master *master,
@@ -238,7 +231,8 @@ static int bcm53xxspi_transfer_one(struct spi_master *master,
left = t->len;
while (left) {
size_t to_write = min_t(size_t, 16, left);
- bool cont = left - to_write > 0;
+ bool cont = !spi_transfer_is_last(master, t) ||
+ left - to_write > 0;
bcm53xxspi_buf_write(b53spi, buf, to_write, cont);
left -= to_write;
@@ -250,9 +244,9 @@ static int bcm53xxspi_transfer_one(struct spi_master *master,
buf = (u8 *)t->rx_buf;
left = t->len;
while (left) {
- size_t to_read = min_t(size_t, 16 - b53spi->read_offset,
- left);
- bool cont = left - to_read > 0;
+ size_t to_read = min_t(size_t, 16, left);
+ bool cont = !spi_transfer_is_last(master, t) ||
+ left - to_read > 0;
bcm53xxspi_buf_read(b53spi, buf, to_read, cont);
left -= to_read;
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 6ddb6ef1fda4..60d59b003aa4 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -945,6 +945,8 @@ static int davinci_spi_probe(struct platform_device *pdev)
goto free_master;
}
+ init_completion(&dspi->done);
+
ret = platform_get_irq(pdev, 0);
if (ret == 0)
ret = -EINVAL;
@@ -1021,8 +1023,6 @@ static int davinci_spi_probe(struct platform_device *pdev)
dspi->get_rx = davinci_spi_rx_buf_u8;
dspi->get_tx = davinci_spi_tx_buf_u8;
- init_completion(&dspi->done);
-
/* Reset In/OUT SPI module */
iowrite32(0, dspi->base + SPIGCR0);
udelay(100);
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index b217c22ff72f..211cc7d75bf8 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -30,13 +30,11 @@
/* Slave spi_dev related */
struct chip_data {
- u8 cs; /* chip select pin */
u8 tmode; /* TR/TO/RO/EEPROM */
u8 type; /* SPI/SSP/MicroWire */
u8 poll_mode; /* 1 means use poll mode */
- u8 enable_dma;
u16 clk_div; /* baud rate divider */
u32 speed_hz; /* baud rate */
void (*cs_control)(u32 command);
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index f652f70cb8db..0630962ce442 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -903,10 +903,9 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
}
static const struct of_device_id fsl_dspi_dt_ids[] = {
- { .compatible = "fsl,vf610-dspi", .data = (void *)&vf610_data, },
- { .compatible = "fsl,ls1021a-v1.0-dspi",
- .data = (void *)&ls1021a_v1_data, },
- { .compatible = "fsl,ls2085a-dspi", .data = (void *)&ls2085a_data, },
+ { .compatible = "fsl,vf610-dspi", .data = &vf610_data, },
+ { .compatible = "fsl,ls1021a-v1.0-dspi", .data = &ls1021a_v1_data, },
+ { .compatible = "fsl,ls2085a-dspi", .data = &ls2085a_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
@@ -980,7 +979,7 @@ static int dspi_probe(struct platform_device *pdev)
master->dev.of_node = pdev->dev.of_node;
master->cleanup = dspi_cleanup;
- master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) |
SPI_BPW_MASK(16);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 79ddefe4180d..6f57592a7f95 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -1622,6 +1622,11 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx->devtype_data->intctrl(spi_imx, 0);
master->dev.of_node = pdev->dev.of_node;
+ ret = spi_bitbang_start(&spi_imx->bitbang);
+ if (ret) {
+ dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
+ goto out_clk_put;
+ }
/* Request GPIO CS lines, if any */
if (!spi_imx->slave_mode && master->cs_gpios) {
@@ -1640,12 +1645,6 @@ static int spi_imx_probe(struct platform_device *pdev)
}
}
- ret = spi_bitbang_start(&spi_imx->bitbang);
- if (ret) {
- dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
- goto out_clk_put;
- }
-
dev_info(&pdev->dev, "probed\n");
clk_disable(spi_imx->clk_ipg);
@@ -1668,12 +1667,23 @@ static int spi_imx_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+ int ret;
spi_bitbang_stop(&spi_imx->bitbang);
+ ret = clk_enable(spi_imx->clk_per);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(spi_imx->clk_ipg);
+ if (ret) {
+ clk_disable(spi_imx->clk_per);
+ return ret;
+ }
+
writel(0, spi_imx->base + MXC_CSPICTRL);
- clk_unprepare(spi_imx->clk_ipg);
- clk_unprepare(spi_imx->clk_per);
+ clk_disable_unprepare(spi_imx->clk_ipg);
+ clk_disable_unprepare(spi_imx->clk_per);
spi_imx_sdma_exit(spi_imx);
spi_master_put(master);
diff --git a/drivers/spi/spi-jcore.c b/drivers/spi/spi-jcore.c
index cebfea5faa4b..dafed6280df3 100644
--- a/drivers/spi/spi-jcore.c
+++ b/drivers/spi/spi-jcore.c
@@ -198,8 +198,10 @@ static int jcore_spi_probe(struct platform_device *pdev)
/* Register our spi controller */
err = devm_spi_register_master(&pdev->dev, master);
- if (err)
+ if (err) {
+ clk_disable(clk);
goto exit;
+ }
return 0;
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index 7f8429635502..5c82910e3480 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -599,6 +599,7 @@ static int meson_spicc_remove(struct platform_device *pdev)
static const struct of_device_id meson_spicc_of_match[] = {
{ .compatible = "amlogic,meson-gx-spicc", },
+ { .compatible = "amlogic,meson-axg-spicc", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, meson_spicc_of_match);
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 8974bb340b3a..deca63e82ff6 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -94,6 +94,7 @@ struct orion_spi {
struct spi_master *master;
void __iomem *base;
struct clk *clk;
+ struct clk *axi_clk;
const struct orion_spi_dev *devdata;
struct orion_direct_acc direct_access[ORION_NUM_CHIPSELECTS];
@@ -634,6 +635,16 @@ static int orion_spi_probe(struct platform_device *pdev)
if (status)
goto out;
+ /* The following clock is only used by some SoCs */
+ spi->axi_clk = devm_clk_get(&pdev->dev, "axi");
+ if (IS_ERR(spi->axi_clk) &&
+ PTR_ERR(spi->axi_clk) == -EPROBE_DEFER) {
+ status = -EPROBE_DEFER;
+ goto out_rel_clk;
+ }
+ if (!IS_ERR(spi->axi_clk))
+ clk_prepare_enable(spi->axi_clk);
+
tclk_hz = clk_get_rate(spi->clk);
/*
@@ -658,7 +669,7 @@ static int orion_spi_probe(struct platform_device *pdev)
spi->base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(spi->base)) {
status = PTR_ERR(spi->base);
- goto out_rel_clk;
+ goto out_rel_axi_clk;
}
/* Scan all SPI devices of this controller for direct mapped devices */
@@ -696,7 +707,7 @@ static int orion_spi_probe(struct platform_device *pdev)
PAGE_SIZE);
if (!spi->direct_access[cs].vaddr) {
status = -ENOMEM;
- goto out_rel_clk;
+ goto out_rel_axi_clk;
}
spi->direct_access[cs].size = PAGE_SIZE;
@@ -724,6 +735,8 @@ static int orion_spi_probe(struct platform_device *pdev)
out_rel_pm:
pm_runtime_disable(&pdev->dev);
+out_rel_axi_clk:
+ clk_disable_unprepare(spi->axi_clk);
out_rel_clk:
clk_disable_unprepare(spi->clk);
out:
@@ -738,6 +751,7 @@ static int orion_spi_remove(struct platform_device *pdev)
struct orion_spi *spi = spi_master_get_devdata(master);
pm_runtime_get_sync(&pdev->dev);
+ clk_disable_unprepare(spi->axi_clk);
clk_disable_unprepare(spi->clk);
spi_unregister_master(master);
@@ -754,6 +768,7 @@ static int orion_spi_runtime_suspend(struct device *dev)
struct spi_master *master = dev_get_drvdata(dev);
struct orion_spi *spi = spi_master_get_devdata(master);
+ clk_disable_unprepare(spi->axi_clk);
clk_disable_unprepare(spi->clk);
return 0;
}
@@ -763,6 +778,8 @@ static int orion_spi_runtime_resume(struct device *dev)
struct spi_master *master = dev_get_drvdata(dev);
struct orion_spi *spi = spi_master_get_devdata(master);
+ if (!IS_ERR(spi->axi_clk))
+ clk_prepare_enable(spi->axi_clk);
return clk_prepare_enable(spi->clk);
}
#endif
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 4cb515a3104c..b0822d1dba29 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1237,7 +1237,7 @@ static int setup_cs(struct spi_device *spi, struct chip_data *chip,
* different chip_info, release previously requested GPIO
*/
if (chip->gpiod_cs) {
- gpio_free(desc_to_gpio(chip->gpiod_cs));
+ gpiod_put(chip->gpiod_cs);
chip->gpiod_cs = NULL;
}
@@ -1417,7 +1417,7 @@ static void cleanup(struct spi_device *spi)
if (drv_data->ssp_type != CE4100_SSP && !drv_data->cs_gpiods &&
chip->gpiod_cs)
- gpio_free(desc_to_gpio(chip->gpiod_cs));
+ gpiod_put(chip->gpiod_cs);
kfree(chip);
}
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index de7df20f8712..baa3a9fa2638 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -1,17 +1,7 @@
-/*
- * Copyright (C) 2009 Samsung Electronics Ltd.
- * Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2009 Samsung Electronics Co., Ltd.
+// Jaswinder Singh <jassi.brar@samsung.com>
#include <linux/init.h>
#include <linux/module.h>
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index fcd261f98b9f..c5dcfb434a49 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -19,6 +19,7 @@
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -55,9 +56,14 @@ struct sh_msiof_spi_priv {
void *rx_dma_page;
dma_addr_t tx_dma_addr;
dma_addr_t rx_dma_addr;
+ unsigned short unused_ss;
+ bool native_cs_inited;
+ bool native_cs_high;
bool slave_aborted;
};
+#define MAX_SS 3 /* Maximum number of native chip selects */
+
#define TMDR1 0x00 /* Transmit Mode Register 1 */
#define TMDR2 0x04 /* Transmit Mode Register 2 */
#define TMDR3 0x08 /* Transmit Mode Register 3 */
@@ -91,6 +97,8 @@ struct sh_msiof_spi_priv {
#define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */
/* TMDR1 */
#define TMDR1_PCON 0x40000000 /* Transfer Signal Connection */
+#define TMDR1_SYNCCH_MASK 0xc000000 /* Synchronization Signal Channel Select */
+#define TMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
/* TMDR2 and RMDR2 */
#define MDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
@@ -324,7 +332,7 @@ static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p)
return val;
}
-static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
+static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss,
u32 cpol, u32 cpha,
u32 tx_hi_z, u32 lsb_first, u32 cs_high)
{
@@ -342,10 +350,13 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
tmp |= lsb_first << MDR1_BITLSB_SHIFT;
tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
- if (spi_controller_is_slave(p->master))
+ if (spi_controller_is_slave(p->master)) {
sh_msiof_write(p, TMDR1, tmp | TMDR1_PCON);
- else
- sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON);
+ } else {
+ sh_msiof_write(p, TMDR1,
+ tmp | MDR1_TRMD | TMDR1_PCON |
+ (ss < MAX_SS ? ss : 0) << TMDR1_SYNCCH_SHIFT);
+ }
if (p->master->flags & SPI_MASTER_MUST_TX) {
/* These bits are reserved if RX needs TX */
tmp &= ~0x0000ffff;
@@ -528,8 +539,7 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
{
struct device_node *np = spi->master->dev.of_node;
struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
-
- pm_runtime_get_sync(&p->pdev->dev);
+ u32 clr, set, tmp;
if (!np) {
/*
@@ -539,19 +549,31 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
spi->cs_gpio = (uintptr_t)spi->controller_data;
}
- /* Configure pins before deasserting CS */
- sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
- !!(spi->mode & SPI_CPHA),
- !!(spi->mode & SPI_3WIRE),
- !!(spi->mode & SPI_LSB_FIRST),
- !!(spi->mode & SPI_CS_HIGH));
+ if (gpio_is_valid(spi->cs_gpio)) {
+ gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+ return 0;
+ }
- if (spi->cs_gpio >= 0)
- gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+ if (spi_controller_is_slave(p->master))
+ return 0;
+ if (p->native_cs_inited &&
+ (p->native_cs_high == !!(spi->mode & SPI_CS_HIGH)))
+ return 0;
+ /* Configure native chip select mode/polarity early */
+ clr = MDR1_SYNCMD_MASK;
+ set = MDR1_TRMD | TMDR1_PCON | MDR1_SYNCMD_SPI;
+ if (spi->mode & SPI_CS_HIGH)
+ clr |= BIT(MDR1_SYNCAC_SHIFT);
+ else
+ set |= BIT(MDR1_SYNCAC_SHIFT);
+ pm_runtime_get_sync(&p->pdev->dev);
+ tmp = sh_msiof_read(p, TMDR1) & ~clr;
+ sh_msiof_write(p, TMDR1, tmp | set);
pm_runtime_put(&p->pdev->dev);
-
+ p->native_cs_high = spi->mode & SPI_CS_HIGH;
+ p->native_cs_inited = true;
return 0;
}
@@ -560,13 +582,20 @@ static int sh_msiof_prepare_message(struct spi_master *master,
{
struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
const struct spi_device *spi = msg->spi;
+ u32 ss, cs_high;
/* Configure pins before asserting CS */
- sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
+ if (gpio_is_valid(spi->cs_gpio)) {
+ ss = p->unused_ss;
+ cs_high = p->native_cs_high;
+ } else {
+ ss = spi->chip_select;
+ cs_high = !!(spi->mode & SPI_CS_HIGH);
+ }
+ sh_msiof_spi_set_pin_regs(p, ss, !!(spi->mode & SPI_CPOL),
!!(spi->mode & SPI_CPHA),
!!(spi->mode & SPI_3WIRE),
- !!(spi->mode & SPI_LSB_FIRST),
- !!(spi->mode & SPI_CS_HIGH));
+ !!(spi->mode & SPI_LSB_FIRST), cs_high);
return 0;
}
@@ -784,11 +813,21 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
goto stop_dma;
}
- /* wait for tx fifo to be emptied / rx fifo to be filled */
+ /* wait for tx/rx DMA completion */
ret = sh_msiof_wait_for_completion(p);
if (ret)
goto stop_reset;
+ if (!rx) {
+ reinit_completion(&p->done);
+ sh_msiof_write(p, IER, IER_TEOFE);
+
+ /* wait for tx fifo to be emptied */
+ ret = sh_msiof_wait_for_completion(p);
+ if (ret)
+ goto stop_reset;
+ }
+
/* clear status bits */
sh_msiof_reset_str(p);
@@ -912,9 +951,8 @@ static int sh_msiof_transfer_one(struct spi_master *master,
ret = sh_msiof_dma_once(p, tx_buf, rx_buf, l);
if (ret == -EAGAIN) {
- pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
- dev_driver_string(&p->pdev->dev),
- dev_name(&p->pdev->dev));
+ dev_warn_once(&p->pdev->dev,
+ "DMA not available, falling back to PIO\n");
break;
}
if (ret)
@@ -1071,6 +1109,45 @@ static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
}
#endif
+static int sh_msiof_get_cs_gpios(struct sh_msiof_spi_priv *p)
+{
+ struct device *dev = &p->pdev->dev;
+ unsigned int used_ss_mask = 0;
+ unsigned int cs_gpios = 0;
+ unsigned int num_cs, i;
+ int ret;
+
+ ret = gpiod_count(dev, "cs");
+ if (ret <= 0)
+ return 0;
+
+ num_cs = max_t(unsigned int, ret, p->master->num_chipselect);
+ for (i = 0; i < num_cs; i++) {
+ struct gpio_desc *gpiod;
+
+ gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS);
+ if (!IS_ERR(gpiod)) {
+ cs_gpios++;
+ continue;
+ }
+
+ if (PTR_ERR(gpiod) != -ENOENT)
+ return PTR_ERR(gpiod);
+
+ if (i >= MAX_SS) {
+ dev_err(dev, "Invalid native chip select %d\n", i);
+ return -EINVAL;
+ }
+ used_ss_mask |= BIT(i);
+ }
+ p->unused_ss = ffz(used_ss_mask);
+ if (cs_gpios && p->unused_ss >= MAX_SS) {
+ dev_err(dev, "No unused native chip select available\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev,
enum dma_transfer_direction dir, unsigned int id, dma_addr_t port_addr)
{
@@ -1284,13 +1361,18 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
if (p->info->rx_fifo_override)
p->rx_fifo_size = p->info->rx_fifo_override;
+ /* Setup GPIO chip selects */
+ master->num_chipselect = p->info->num_chipselect;
+ ret = sh_msiof_get_cs_gpios(p);
+ if (ret)
+ goto err1;
+
/* init master code */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
master->flags = chipdata->master_flags;
master->bus_num = pdev->id;
master->dev.of_node = pdev->dev.of_node;
- master->num_chipselect = p->info->num_chipselect;
master->setup = sh_msiof_spi_setup;
master->prepare_message = sh_msiof_prepare_message;
master->slave_abort = sh_msiof_slave_abort;
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index bbb1a275f718..f009d76f96b1 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -1072,7 +1072,7 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
struct sirfsoc_spi *sspi;
struct spi_master *master;
struct resource *mem_res;
- struct sirf_spi_comp_data *spi_comp_data;
+ const struct sirf_spi_comp_data *spi_comp_data;
int irq;
int ret;
const struct of_device_id *match;
@@ -1092,7 +1092,7 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
sspi = spi_master_get_devdata(master);
sspi->fifo_full_offset = ilog2(sspi->fifo_size);
- spi_comp_data = (struct sirf_spi_comp_data *)match->data;
+ spi_comp_data = match->data;
sspi->regs = spi_comp_data->regs;
sspi->type = spi_comp_data->type;
sspi->fifo_level_chk_mask = (sspi->fifo_size / 4) - 1;
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index fb38234249a8..8533f4edd00a 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -541,7 +541,7 @@ err_free_master:
static int sun6i_spi_remove(struct platform_device *pdev)
{
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_force_suspend(&pdev->dev);
return 0;
}
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index e0b9fe1d0e37..63fedc49ae9c 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -381,6 +381,7 @@ static int xilinx_spi_find_buffer_size(struct xilinx_spi *xspi)
}
static const struct of_device_id xilinx_spi_of_match[] = {
+ { .compatible = "xlnx,axi-quad-spi-1.00.a", },
{ .compatible = "xlnx,xps-spi-2.00.a", },
{ .compatible = "xlnx,xps-spi-2.00.b", },
{}
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index e19e395b0e44..491b54d986eb 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -2276,9 +2276,9 @@ done:
return retval;
}
-static unsigned int comedi_poll(struct file *file, poll_table *wait)
+static __poll_t comedi_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
struct comedi_file *cfp = file->private_data;
struct comedi_device *dev = cfp->dev;
struct comedi_subdevice *s, *s_read;
diff --git a/drivers/staging/comedi/drivers/serial2002.c b/drivers/staging/comedi/drivers/serial2002.c
index cc18e25103ca..a557be8a5076 100644
--- a/drivers/staging/comedi/drivers/serial2002.c
+++ b/drivers/staging/comedi/drivers/serial2002.c
@@ -119,7 +119,7 @@ static void serial2002_tty_read_poll_wait(struct file *f, int timeout)
poll_initwait(&table);
while (1) {
long elapsed;
- int mask;
+ __poll_t mask;
mask = f->f_op->poll(f, &table.pt);
if (mask & (POLLRDNORM | POLLRDBAND | POLLIN |
diff --git a/drivers/staging/irda/net/af_irda.c b/drivers/staging/irda/net/af_irda.c
index b82a47b9ef0b..f1d128b2dae9 100644
--- a/drivers/staging/irda/net/af_irda.c
+++ b/drivers/staging/irda/net/af_irda.c
@@ -1737,12 +1737,12 @@ static int irda_shutdown(struct socket *sock, int how)
/*
* Function irda_poll (file, sock, wait)
*/
-static unsigned int irda_poll(struct file * file, struct socket *sock,
+static __poll_t irda_poll(struct file * file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
struct irda_sock *self = irda_sk(sk);
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, sk_sleep(sk), wait);
mask = 0;
diff --git a/drivers/staging/irda/net/irnet/irnet_ppp.c b/drivers/staging/irda/net/irnet/irnet_ppp.c
index 7025dcb853d0..75bf9e34311d 100644
--- a/drivers/staging/irda/net/irnet/irnet_ppp.c
+++ b/drivers/staging/irda/net/irnet/irnet_ppp.c
@@ -419,12 +419,12 @@ irnet_ctrl_read(irnet_socket * ap,
* Poll : called when someone do a select on /dev/irnet.
* Just check if there are new events...
*/
-static inline unsigned int
+static inline __poll_t
irnet_ctrl_poll(irnet_socket * ap,
struct file * file,
poll_table * wait)
{
- unsigned int mask;
+ __poll_t mask;
DENTER(CTRL_TRACE, "(ap=0x%p)\n", ap);
@@ -608,12 +608,12 @@ dev_irnet_read(struct file * file,
/*
* Poll : called when someone do a select on /dev/irnet
*/
-static unsigned int
+static __poll_t
dev_irnet_poll(struct file * file,
poll_table * wait)
{
irnet_socket * ap = file->private_data;
- unsigned int mask;
+ __poll_t mask;
DENTER(FS_TRACE, "(file=0x%p, ap=0x%p)\n",
file, ap);
diff --git a/drivers/staging/irda/net/irnet/irnet_ppp.h b/drivers/staging/irda/net/irnet/irnet_ppp.h
index 32061442cc8e..e6d5aa2a8aac 100644
--- a/drivers/staging/irda/net/irnet/irnet_ppp.h
+++ b/drivers/staging/irda/net/irnet/irnet_ppp.h
@@ -70,7 +70,7 @@ static ssize_t
char __user *,
size_t,
loff_t *);
-static unsigned int
+static __poll_t
dev_irnet_poll(struct file *,
poll_table *);
static long
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
index 2e5d311d2438..db81ed527452 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto-adler.c
@@ -120,6 +120,7 @@ static struct shash_alg alg = {
.cra_name = "adler32",
.cra_driver_name = "adler32-zlib",
.cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(u32),
.cra_module = THIS_MODULE,
diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c
index 7d49d4865298..ed46aaca0ba3 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-socket.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c
@@ -314,19 +314,20 @@ lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout)
long jiffies_left = timeout * msecs_to_jiffies(MSEC_PER_SEC);
unsigned long then;
struct timeval tv;
+ struct kvec iov = {
+ .iov_base = buffer,
+ .iov_len = nob
+ };
+ struct msghdr msg = {
+ .msg_flags = 0
+ };
LASSERT(nob > 0);
LASSERT(jiffies_left > 0);
- for (;;) {
- struct kvec iov = {
- .iov_base = buffer,
- .iov_len = nob
- };
- struct msghdr msg = {
- .msg_flags = 0
- };
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, nob);
+ for (;;) {
/* Set receive timeout to remaining time */
jiffies_to_timeval(jiffies_left, &tv);
rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO,
@@ -338,7 +339,7 @@ lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout)
}
then = jiffies;
- rc = kernel_recvmsg(sock, &msg, &iov, 1, nob, 0);
+ rc = sock_recvmsg(sock, &msg, 0);
jiffies_left -= jiffies - then;
if (rc < 0)
@@ -347,10 +348,7 @@ lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout)
if (!rc)
return -ECONNRESET;
- buffer = ((char *)buffer) + rc;
- nob -= rc;
-
- if (!nob)
+ if (!msg_data_left(&msg))
return 0;
if (jiffies_left <= 0)
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 5b2e47c246f3..6f59045be0f9 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -369,8 +369,6 @@ static int ll_readdir(struct file *filp, struct dir_context *ctx)
}
ctx->pos = pos;
ll_finish_md_op_data(op_data);
- filp->f_version = inode->i_version;
-
out:
if (!rc)
ll_stats_ops_tally(sbi, LPROC_LL_READDIR, 1);
@@ -1678,7 +1676,6 @@ static loff_t ll_dir_seek(struct file *file, loff_t offset, int origin)
else
fd->lfd_pos = offset;
file->f_pos = offset;
- file->f_version = 0;
}
ret = offset;
}
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index b133fd00c08c..0d62fcf016dc 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -1296,15 +1296,7 @@ static inline void d_lustre_invalidate(struct dentry *dentry, int nested)
spin_lock_nested(&dentry->d_lock,
nested ? DENTRY_D_LOCK_NESTED : DENTRY_D_LOCK_NORMAL);
ll_d2d(dentry)->lld_invalid = 1;
- /*
- * We should be careful about dentries created by d_obtain_alias().
- * These dentries are not put in the dentry tree, instead they are
- * linked to sb->s_anon through dentry->d_hash.
- * shrink_dcache_for_umount() shrinks the tree and sb->s_anon list.
- * If we unhashed such a dentry, unmount would not be able to find
- * it and busy inodes would be reported.
- */
- if (d_count(dentry) == 0 && !(dentry->d_flags & DCACHE_DISCONNECTED))
+ if (d_count(dentry) == 0)
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
index dd7596d8763d..6657ebbe068a 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
@@ -1255,7 +1255,7 @@ static int atomisp_file_mmap(struct file *file, struct vm_area_struct *vma)
return videobuf_mmap_mapper(&pipe->outq, vma);
}
-static unsigned int atomisp_poll(struct file *file,
+static __poll_t atomisp_poll(struct file *file,
struct poll_table_struct *pt)
{
struct video_device *vdev = video_devdata(file);
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index 5d3b0e5a1283..4ffff6f8b809 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -2174,11 +2174,11 @@ static int bcm2048_fops_release(struct file *file)
return 0;
}
-static unsigned int bcm2048_fops_poll(struct file *file,
+static __poll_t bcm2048_fops_poll(struct file *file,
struct poll_table_struct *pts)
{
struct bcm2048_device *bdev = video_drvdata(file);
- int retval = 0;
+ __poll_t retval = 0;
poll_wait(file, &bdev->read_queue, pts);
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 155e8c758e4b..588743a6fd8a 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -573,7 +573,7 @@ static int vpfe_mmap(struct file *file, struct vm_area_struct *vma)
/*
* vpfe_poll() - It is used for select/poll system call
*/
-static unsigned int vpfe_poll(struct file *file, poll_table *wait)
+static __poll_t vpfe_poll(struct file *file, poll_table *wait)
{
struct vpfe_video_device *video = video_drvdata(file);
struct vpfe_device *vpfe_dev = video->vpfe_dev;
diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c
index 6bd0717bf76e..a003603af725 100644
--- a/drivers/staging/media/lirc/lirc_zilog.c
+++ b/drivers/staging/media/lirc/lirc_zilog.c
@@ -1197,12 +1197,12 @@ static ssize_t write(struct file *filep, const char __user *buf, size_t n,
}
/* copied from lirc_dev */
-static unsigned int poll(struct file *filep, poll_table *wait)
+static __poll_t poll(struct file *filep, poll_table *wait)
{
struct IR *ir = lirc_get_pdata(filep);
struct IR_rx *rx;
struct lirc_buffer *rbuf = ir->l->buf;
- unsigned int ret;
+ __poll_t ret;
dev_dbg(ir->dev, "%s called\n", __func__);
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index 9e2f0421a01e..a3a83424a926 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -1185,7 +1185,7 @@ static int iss_video_release(struct file *file)
return 0;
}
-static unsigned int iss_video_poll(struct file *file, poll_table *wait)
+static __poll_t iss_video_poll(struct file *file, poll_table *wait)
{
struct iss_video_fh *vfh = to_iss_video_fh(file->private_data);
diff --git a/drivers/staging/most/aim-cdev/cdev.c b/drivers/staging/most/aim-cdev/cdev.c
index 1e5cbc893496..69f530972273 100644
--- a/drivers/staging/most/aim-cdev/cdev.c
+++ b/drivers/staging/most/aim-cdev/cdev.c
@@ -287,10 +287,10 @@ aim_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
return copied;
}
-static unsigned int aim_poll(struct file *filp, poll_table *wait)
+static __poll_t aim_poll(struct file *filp, poll_table *wait)
{
struct aim_channel *c = filp->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(filp, &c->wq, wait);
diff --git a/drivers/staging/most/aim-v4l2/video.c b/drivers/staging/most/aim-v4l2/video.c
index e0748416aee5..7783bc2dd9f5 100644
--- a/drivers/staging/most/aim-v4l2/video.c
+++ b/drivers/staging/most/aim-v4l2/video.c
@@ -209,11 +209,11 @@ static ssize_t aim_vdev_read(struct file *filp, char __user *buf,
return ret;
}
-static unsigned int aim_vdev_poll(struct file *filp, poll_table *wait)
+static __poll_t aim_vdev_poll(struct file *filp, poll_table *wait)
{
struct aim_fh *fh = filp->private_data;
struct most_video_dev *mdev = fh->mdev;
- unsigned int mask = 0;
+ __poll_t mask = 0;
/* only wait if no data is available */
if (!data_ready(mdev))
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c
index 87595c594b12..264ad362d858 100644
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.c
+++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c
@@ -637,8 +637,7 @@ static int spinand_write_page_hwecc(struct mtd_info *mtd,
int eccsteps = chip->ecc.steps;
enable_hw_ecc = 1;
- chip->write_buf(mtd, p, eccsize * eccsteps);
- return 0;
+ return nand_prog_page_op(chip, page, 0, p, eccsize * eccsteps);
}
static int spinand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
@@ -653,7 +652,7 @@ static int spinand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
enable_read_hw_ecc = 1;
- chip->read_buf(mtd, p, eccsize * eccsteps);
+ nand_read_page_op(chip, page, 0, p, eccsize * eccsteps);
if (oob_required)
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index d99daf69e501..585c6aa124cd 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -326,10 +326,10 @@ static ssize_t softsynth_write(struct file *fp, const char __user *buf,
return count;
}
-static unsigned int softsynth_poll(struct file *fp, struct poll_table_struct *wait)
+static __poll_t softsynth_poll(struct file *fp, struct poll_table_struct *wait)
{
unsigned long flags;
- int ret = 0;
+ __poll_t ret = 0;
poll_wait(fp, &speakup_event, wait);
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index a3d4610fbdbe..4c8c6fa0a79f 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -134,7 +134,7 @@ static ssize_t resource_to_user(int minor, char __user *buf, size_t count,
if (copied < 0)
return (int)copied;
- if (__copy_to_user(buf, image[minor].kern_buf, (unsigned long)copied))
+ if (copy_to_user(buf, image[minor].kern_buf, (unsigned long)copied))
return -EFAULT;
return copied;
@@ -146,7 +146,7 @@ static ssize_t resource_from_user(unsigned int minor, const char __user *buf,
if (count > image[minor].size_buf)
count = image[minor].size_buf;
- if (__copy_from_user(image[minor].kern_buf, buf, (unsigned long)count))
+ if (copy_from_user(image[minor].kern_buf, buf, (unsigned long)count))
return -EFAULT;
return vme_master_write(image[minor].resource, image[minor].kern_buf,
@@ -159,7 +159,7 @@ static ssize_t buffer_to_user(unsigned int minor, char __user *buf,
void *image_ptr;
image_ptr = image[minor].kern_buf + *ppos;
- if (__copy_to_user(buf, image_ptr, (unsigned long)count))
+ if (copy_to_user(buf, image_ptr, (unsigned long)count))
return -EFAULT;
return count;
@@ -171,7 +171,7 @@ static ssize_t buffer_from_user(unsigned int minor, const char __user *buf,
void *image_ptr;
image_ptr = image[minor].kern_buf + *ppos;
- if (__copy_from_user(image_ptr, buf, (unsigned long)count))
+ if (copy_from_user(image_ptr, buf, (unsigned long)count))
return -EFAULT;
return count;
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index e2bc99980f75..4c44d7bed01a 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -5,6 +5,7 @@ menuconfig TARGET_CORE
select CONFIGFS_FS
select CRC_T10DIF
select BLK_SCSI_REQUEST # only for scsi_command_size_tbl..
+ select SGL_ALLOC
default n
help
Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 58caacd54a3b..c03a78ee26cd 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -2300,13 +2300,7 @@ queue_full:
void target_free_sgl(struct scatterlist *sgl, int nents)
{
- struct scatterlist *sg;
- int count;
-
- for_each_sg(sgl, sg, nents, count)
- __free_page(sg_page(sg));
-
- kfree(sgl);
+ sgl_free_n_order(sgl, nents, 0);
}
EXPORT_SYMBOL(target_free_sgl);
@@ -2414,42 +2408,10 @@ int
target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
bool zero_page, bool chainable)
{
- struct scatterlist *sg;
- struct page *page;
- gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0;
- unsigned int nalloc, nent;
- int i = 0;
-
- nalloc = nent = DIV_ROUND_UP(length, PAGE_SIZE);
- if (chainable)
- nalloc++;
- sg = kmalloc_array(nalloc, sizeof(struct scatterlist), GFP_KERNEL);
- if (!sg)
- return -ENOMEM;
+ gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0);
- sg_init_table(sg, nalloc);
-
- while (length) {
- u32 page_len = min_t(u32, length, PAGE_SIZE);
- page = alloc_page(GFP_KERNEL | zero_flag);
- if (!page)
- goto out;
-
- sg_set_page(&sg[i], page, page_len, 0);
- length -= page_len;
- i++;
- }
- *sgl = sg;
- *nents = nent;
- return 0;
-
-out:
- while (i > 0) {
- i--;
- __free_page(sg_page(&sg[i]));
- }
- kfree(sg);
- return -ENOMEM;
+ *sgl = sgl_alloc_order(length, 0, chainable, gfp, nents);
+ return *sgl ? 0 : -ENOMEM;
}
EXPORT_SYMBOL(target_alloc_sgl);
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index dc63aba092e4..dfd23245f778 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -88,7 +88,6 @@ struct time_in_idle {
* @policy: cpufreq policy.
* @node: list_head to link all cpufreq_cooling_device together.
* @idle_time: idle time stats
- * @plat_get_static_power: callback to calculate the static power
*
* This structure is required for keeping information of each registered
* cpufreq_cooling_device.
@@ -104,7 +103,6 @@ struct cpufreq_cooling_device {
struct cpufreq_policy *policy;
struct list_head node;
struct time_in_idle *idle_time;
- get_static_t plat_get_static_power;
};
static DEFINE_IDA(cpufreq_ida);
@@ -319,60 +317,6 @@ static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu,
}
/**
- * get_static_power() - calculate the static power consumed by the cpus
- * @cpufreq_cdev: struct &cpufreq_cooling_device for this cpu cdev
- * @tz: thermal zone device in which we're operating
- * @freq: frequency in KHz
- * @power: pointer in which to store the calculated static power
- *
- * Calculate the static power consumed by the cpus described by
- * @cpu_actor running at frequency @freq. This function relies on a
- * platform specific function that should have been provided when the
- * actor was registered. If it wasn't, the static power is assumed to
- * be negligible. The calculated static power is stored in @power.
- *
- * Return: 0 on success, -E* on failure.
- */
-static int get_static_power(struct cpufreq_cooling_device *cpufreq_cdev,
- struct thermal_zone_device *tz, unsigned long freq,
- u32 *power)
-{
- struct dev_pm_opp *opp;
- unsigned long voltage;
- struct cpufreq_policy *policy = cpufreq_cdev->policy;
- struct cpumask *cpumask = policy->related_cpus;
- unsigned long freq_hz = freq * 1000;
- struct device *dev;
-
- if (!cpufreq_cdev->plat_get_static_power) {
- *power = 0;
- return 0;
- }
-
- dev = get_cpu_device(policy->cpu);
- WARN_ON(!dev);
-
- opp = dev_pm_opp_find_freq_exact(dev, freq_hz, true);
- if (IS_ERR(opp)) {
- dev_warn_ratelimited(dev, "Failed to find OPP for frequency %lu: %ld\n",
- freq_hz, PTR_ERR(opp));
- return -EINVAL;
- }
-
- voltage = dev_pm_opp_get_voltage(opp);
- dev_pm_opp_put(opp);
-
- if (voltage == 0) {
- dev_err_ratelimited(dev, "Failed to get voltage for frequency %lu\n",
- freq_hz);
- return -EINVAL;
- }
-
- return cpufreq_cdev->plat_get_static_power(cpumask, tz->passive_delay,
- voltage, power);
-}
-
-/**
* get_dynamic_power() - calculate the dynamic power
* @cpufreq_cdev: &cpufreq_cooling_device for this cdev
* @freq: current frequency
@@ -491,8 +435,8 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
u32 *power)
{
unsigned long freq;
- int i = 0, cpu, ret;
- u32 static_power, dynamic_power, total_load = 0;
+ int i = 0, cpu;
+ u32 total_load = 0;
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
struct cpufreq_policy *policy = cpufreq_cdev->policy;
u32 *load_cpu = NULL;
@@ -522,22 +466,15 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
cpufreq_cdev->last_load = total_load;
- dynamic_power = get_dynamic_power(cpufreq_cdev, freq);
- ret = get_static_power(cpufreq_cdev, tz, freq, &static_power);
- if (ret) {
- kfree(load_cpu);
- return ret;
- }
+ *power = get_dynamic_power(cpufreq_cdev, freq);
if (load_cpu) {
trace_thermal_power_cpu_get_power(policy->related_cpus, freq,
- load_cpu, i, dynamic_power,
- static_power);
+ load_cpu, i, *power);
kfree(load_cpu);
}
- *power = static_power + dynamic_power;
return 0;
}
@@ -561,8 +498,6 @@ static int cpufreq_state2power(struct thermal_cooling_device *cdev,
unsigned long state, u32 *power)
{
unsigned int freq, num_cpus;
- u32 static_power, dynamic_power;
- int ret;
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
/* Request state should be less than max_level */
@@ -572,13 +507,9 @@ static int cpufreq_state2power(struct thermal_cooling_device *cdev,
num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus);
freq = cpufreq_cdev->freq_table[state].frequency;
- dynamic_power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus;
- ret = get_static_power(cpufreq_cdev, tz, freq, &static_power);
- if (ret)
- return ret;
+ *power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus;
- *power = static_power + dynamic_power;
- return ret;
+ return 0;
}
/**
@@ -606,21 +537,14 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
unsigned int cur_freq, target_freq;
- int ret;
- s32 dyn_power;
- u32 last_load, normalised_power, static_power;
+ u32 last_load, normalised_power;
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
struct cpufreq_policy *policy = cpufreq_cdev->policy;
cur_freq = cpufreq_quick_get(policy->cpu);
- ret = get_static_power(cpufreq_cdev, tz, cur_freq, &static_power);
- if (ret)
- return ret;
-
- dyn_power = power - static_power;
- dyn_power = dyn_power > 0 ? dyn_power : 0;
+ power = power > 0 ? power : 0;
last_load = cpufreq_cdev->last_load ?: 1;
- normalised_power = (dyn_power * 100) / last_load;
+ normalised_power = (power * 100) / last_load;
target_freq = cpu_power_to_freq(cpufreq_cdev, normalised_power);
*state = get_level(cpufreq_cdev, target_freq);
@@ -671,8 +595,6 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
* @policy: cpufreq policy
* Normally this should be same as cpufreq policy->related_cpus.
* @capacitance: dynamic power coefficient for these cpus
- * @plat_static_func: function to calculate the static power consumed by these
- * cpus (optional)
*
* This interface function registers the cpufreq cooling device with the name
* "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
@@ -684,8 +606,7 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
*/
static struct thermal_cooling_device *
__cpufreq_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy, u32 capacitance,
- get_static_t plat_static_func)
+ struct cpufreq_policy *policy, u32 capacitance)
{
struct thermal_cooling_device *cdev;
struct cpufreq_cooling_device *cpufreq_cdev;
@@ -755,8 +676,6 @@ __cpufreq_cooling_register(struct device_node *np,
}
if (capacitance) {
- cpufreq_cdev->plat_get_static_power = plat_static_func;
-
ret = update_freq_table(cpufreq_cdev, capacitance);
if (ret) {
cdev = ERR_PTR(ret);
@@ -813,13 +732,12 @@ free_cdev:
struct thermal_cooling_device *
cpufreq_cooling_register(struct cpufreq_policy *policy)
{
- return __cpufreq_cooling_register(NULL, policy, 0, NULL);
+ return __cpufreq_cooling_register(NULL, policy, 0);
}
EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
/**
* of_cpufreq_cooling_register - function to create cpufreq cooling device.
- * @np: a valid struct device_node to the cooling device device tree node
* @policy: cpufreq policy
*
* This interface function registers the cpufreq cooling device with the name
@@ -827,86 +745,45 @@ EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
* cooling devices. Using this API, the cpufreq cooling device will be
* linked to the device tree node provided.
*
- * Return: a valid struct thermal_cooling_device pointer on success,
- * on failure, it returns a corresponding ERR_PTR().
- */
-struct thermal_cooling_device *
-of_cpufreq_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy)
-{
- if (!np)
- return ERR_PTR(-EINVAL);
-
- return __cpufreq_cooling_register(np, policy, 0, NULL);
-}
-EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
-
-/**
- * cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
- * @policy: cpufreq policy
- * @capacitance: dynamic power coefficient for these cpus
- * @plat_static_func: function to calculate the static power consumed by these
- * cpus (optional)
- *
- * This interface function registers the cpufreq cooling device with
- * the name "thermal-cpufreq-%x". This api can support multiple
- * instances of cpufreq cooling devices. Using this function, the
- * cooling device will implement the power extensions by using a
- * simple cpu power model. The cpus must have registered their OPPs
- * using the OPP library.
- *
- * An optional @plat_static_func may be provided to calculate the
- * static power consumed by these cpus. If the platform's static
- * power consumption is unknown or negligible, make it NULL.
- *
- * Return: a valid struct thermal_cooling_device pointer on success,
- * on failure, it returns a corresponding ERR_PTR().
- */
-struct thermal_cooling_device *
-cpufreq_power_cooling_register(struct cpufreq_policy *policy, u32 capacitance,
- get_static_t plat_static_func)
-{
- return __cpufreq_cooling_register(NULL, policy, capacitance,
- plat_static_func);
-}
-EXPORT_SYMBOL(cpufreq_power_cooling_register);
-
-/**
- * of_cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
- * @np: a valid struct device_node to the cooling device device tree node
- * @policy: cpufreq policy
- * @capacitance: dynamic power coefficient for these cpus
- * @plat_static_func: function to calculate the static power consumed by these
- * cpus (optional)
- *
- * This interface function registers the cpufreq cooling device with
- * the name "thermal-cpufreq-%x". This api can support multiple
- * instances of cpufreq cooling devices. Using this API, the cpufreq
- * cooling device will be linked to the device tree node provided.
* Using this function, the cooling device will implement the power
* extensions by using a simple cpu power model. The cpus must have
* registered their OPPs using the OPP library.
*
- * An optional @plat_static_func may be provided to calculate the
- * static power consumed by these cpus. If the platform's static
- * power consumption is unknown or negligible, make it NULL.
+ * It also takes into account, if property present in policy CPU node, the
+ * static power consumed by the cpu.
*
* Return: a valid struct thermal_cooling_device pointer on success,
- * on failure, it returns a corresponding ERR_PTR().
+ * and NULL on failure.
*/
struct thermal_cooling_device *
-of_cpufreq_power_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy,
- u32 capacitance,
- get_static_t plat_static_func)
+of_cpufreq_cooling_register(struct cpufreq_policy *policy)
{
- if (!np)
- return ERR_PTR(-EINVAL);
+ struct device_node *np = of_get_cpu_node(policy->cpu, NULL);
+ struct thermal_cooling_device *cdev = NULL;
+ u32 capacitance = 0;
+
+ if (!np) {
+ pr_err("cpu_cooling: OF node not available for cpu%d\n",
+ policy->cpu);
+ return NULL;
+ }
+
+ if (of_find_property(np, "#cooling-cells", NULL)) {
+ of_property_read_u32(np, "dynamic-power-coefficient",
+ &capacitance);
- return __cpufreq_cooling_register(np, policy, capacitance,
- plat_static_func);
+ cdev = __cpufreq_cooling_register(np, policy, capacitance);
+ if (IS_ERR(cdev)) {
+ pr_err("cpu_cooling: cpu%d is not running as cooling device: %ld\n",
+ policy->cpu, PTR_ERR(cdev));
+ cdev = NULL;
+ }
+ }
+
+ of_node_put(np);
+ return cdev;
}
-EXPORT_SYMBOL(of_cpufreq_power_cooling_register);
+EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
/**
* cpufreq_cooling_unregister - function to remove cpufreq cooling device.
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 5131bdc9e765..0edf4fcfea23 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2457,10 +2457,10 @@ static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
* Called without the kernel lock held - fine
*/
-static unsigned int gsmld_poll(struct tty_struct *tty, struct file *file,
+static __poll_t gsmld_poll(struct tty_struct *tty, struct file *file,
poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
struct gsm_mux *gsm = tty->disc_data;
poll_wait(file, &tty->read_wait, wait);
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index eea7b6cb3cc4..929434ebee50 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -180,7 +180,7 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
const unsigned char *buf, size_t nr);
static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
-static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
+static __poll_t n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
poll_table *wait);
static int n_hdlc_tty_open(struct tty_struct *tty);
static void n_hdlc_tty_close(struct tty_struct *tty);
@@ -796,11 +796,11 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
* to caller.
* Returns a bit mask containing info on which ops will not block.
*/
-static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
+static __poll_t n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
poll_table *wait)
{
struct n_hdlc *n_hdlc = tty2n_hdlc (tty);
- unsigned int mask = 0;
+ __poll_t mask = 0;
if (debuglevel >= DEBUG_LEVEL_INFO)
printk("%s(%d)n_hdlc_tty_poll() called\n",__FILE__,__LINE__);
diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c
index 30bb0900cd2f..e81d3db8ad63 100644
--- a/drivers/tty/n_r3964.c
+++ b/drivers/tty/n_r3964.c
@@ -135,7 +135,7 @@ static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
static int r3964_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
static void r3964_set_termios(struct tty_struct *tty, struct ktermios *old);
-static unsigned int r3964_poll(struct tty_struct *tty, struct file *file,
+static __poll_t r3964_poll(struct tty_struct *tty, struct file *file,
struct poll_table_struct *wait);
static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp,
char *fp, int count);
@@ -1216,14 +1216,14 @@ static void r3964_set_termios(struct tty_struct *tty, struct ktermios *old)
}
/* Called without the kernel lock held - fine */
-static unsigned int r3964_poll(struct tty_struct *tty, struct file *file,
+static __poll_t r3964_poll(struct tty_struct *tty, struct file *file,
struct poll_table_struct *wait)
{
struct r3964_info *pInfo = tty->disc_data;
struct r3964_client_info *pClient;
struct r3964_message *pMsg = NULL;
unsigned long flags;
- int result = POLLOUT;
+ __poll_t result = POLLOUT;
TRACE_L("POLL");
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 539b49adb6af..478a9b40fd03 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -2368,10 +2368,10 @@ break_out:
* Called without the kernel lock held - fine
*/
-static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
+static __poll_t n_tty_poll(struct tty_struct *tty, struct file *file,
poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &tty->read_wait, wait);
poll_wait(file, &tty->write_wait, wait);
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index 3c573a8caa80..5a135ed46159 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -132,6 +132,33 @@ void serdev_device_close(struct serdev_device *serdev)
}
EXPORT_SYMBOL_GPL(serdev_device_close);
+static void devm_serdev_device_release(struct device *dev, void *dr)
+{
+ serdev_device_close(*(struct serdev_device **)dr);
+}
+
+int devm_serdev_device_open(struct device *dev, struct serdev_device *serdev)
+{
+ struct serdev_device **dr;
+ int ret;
+
+ dr = devres_alloc(devm_serdev_device_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ ret = serdev_device_open(serdev);
+ if (ret) {
+ devres_free(dr);
+ return ret;
+ }
+
+ *dr = serdev;
+ devres_add(dev, dr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_serdev_device_open);
+
void serdev_device_write_wakeup(struct serdev_device *serdev)
{
complete(&serdev->write_comp);
@@ -280,8 +307,8 @@ static int serdev_drv_probe(struct device *dev)
static int serdev_drv_remove(struct device *dev)
{
const struct serdev_device_driver *sdrv = to_serdev_device_driver(dev->driver);
-
- sdrv->remove(to_serdev_device(dev));
+ if (sdrv->remove)
+ sdrv->remove(to_serdev_device(dev));
return 0;
}
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index dc60aeea87d8..00d14d6a76bb 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -144,7 +144,7 @@ static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
ssize_t redirected_tty_write(struct file *, const char __user *,
size_t, loff_t *);
-static unsigned int tty_poll(struct file *, poll_table *);
+static __poll_t tty_poll(struct file *, poll_table *);
static int tty_open(struct inode *, struct file *);
long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
#ifdef CONFIG_COMPAT
@@ -443,7 +443,7 @@ static ssize_t hung_up_tty_write(struct file *file, const char __user *buf,
}
/* No kernel lock held - none needed ;) */
-static unsigned int hung_up_tty_poll(struct file *filp, poll_table *wait)
+static __poll_t hung_up_tty_poll(struct file *filp, poll_table *wait)
{
return POLLIN | POLLOUT | POLLERR | POLLHUP | POLLRDNORM | POLLWRNORM;
}
@@ -2055,11 +2055,11 @@ retry_open:
* may be re-entered freely by other callers.
*/
-static unsigned int tty_poll(struct file *filp, poll_table *wait)
+static __poll_t tty_poll(struct file *filp, poll_table *wait)
{
struct tty_struct *tty = file_tty(filp);
struct tty_ldisc *ld;
- int ret = 0;
+ __poll_t ret = 0;
if (tty_paranoia_check(tty, file_inode(filp), "tty_poll"))
return 0;
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index 85b6634f518a..3e64ccd0040f 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -559,11 +559,11 @@ unlock_out:
return ret;
}
-static unsigned int
+static __poll_t
vcs_poll(struct file *file, poll_table *wait)
{
struct vcs_poll_data *poll = vcs_poll_data_get(file);
- int ret = DEFAULT_POLLMASK|POLLERR|POLLPRI;
+ __poll_t ret = DEFAULT_POLLMASK|POLLERR|POLLPRI;
if (poll) {
poll_wait(file, &poll->waitq, wait);
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index ff04b7f8549f..85bc1aaea4a4 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -496,7 +496,7 @@ static int uio_release(struct inode *inode, struct file *filep)
return ret;
}
-static unsigned int uio_poll(struct file *filep, poll_table *wait)
+static __poll_t uio_poll(struct file *filep, poll_table *wait)
{
struct uio_listener *listener = filep->private_data;
struct uio_device *idev = listener->dev;
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 6c181a625daf..9627ea6ec3ae 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -595,11 +595,11 @@ static int wdm_flush(struct file *file, fl_owner_t id)
return usb_translate_errors(desc->werr);
}
-static unsigned int wdm_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t wdm_poll(struct file *file, struct poll_table_struct *wait)
{
struct wdm_device *desc = file->private_data;
unsigned long flags;
- unsigned int mask = 0;
+ __poll_t mask = 0;
spin_lock_irqsave(&desc->iuspin, flags);
if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index c454885ef4a0..f45e8877771a 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -469,9 +469,9 @@ static int usblp_release(struct inode *inode, struct file *file)
}
/* No kernel lock - fine */
-static unsigned int usblp_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t usblp_poll(struct file *file, struct poll_table_struct *wait)
{
- int ret;
+ __poll_t ret;
unsigned long flags;
struct usblp *usblp = file->private_data;
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 0b8b0f3bdd2f..7ea67a55be10 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1257,10 +1257,10 @@ static int usbtmc_fasync(int fd, struct file *file, int on)
return fasync_helper(fd, file, on, &data->fasync);
}
-static unsigned int usbtmc_poll(struct file *file, poll_table *wait)
+static __poll_t usbtmc_poll(struct file *file, poll_table *wait)
{
struct usbtmc_device_data *data = file->private_data;
- unsigned int mask;
+ __poll_t mask;
mutex_lock(&data->io_mutex);
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index c2cf62b7043a..e2cec448779e 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -622,7 +622,7 @@ static ssize_t usb_device_read(struct file *file, char __user *buf,
}
/* Kernel lock for "lastev" protection */
-static unsigned int usb_device_poll(struct file *file,
+static __poll_t usb_device_poll(struct file *file,
struct poll_table_struct *wait)
{
unsigned int event_count;
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index a3fad4ec9870..5e72bf36aca4 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -595,7 +595,7 @@ static void async_completed(struct urb *urb)
as->status = urb->status;
signr = as->signr;
if (signr) {
- memset(&sinfo, 0, sizeof(sinfo));
+ clear_siginfo(&sinfo);
sinfo.si_signo = as->signr;
sinfo.si_errno = as->status;
sinfo.si_code = SI_ASYNCIO;
@@ -2572,11 +2572,11 @@ static long usbdev_compat_ioctl(struct file *file, unsigned int cmd,
#endif
/* No kernel lock - fine */
-static unsigned int usbdev_poll(struct file *file,
+static __poll_t usbdev_poll(struct file *file,
struct poll_table_struct *wait)
{
struct usb_dev_state *ps = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &ps->wait, wait);
if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed))
@@ -2613,7 +2613,7 @@ static void usbdev_remove(struct usb_device *udev)
wake_up_all(&ps->wait);
list_del_init(&ps->list);
if (ps->discsignr) {
- memset(&sinfo, 0, sizeof(sinfo));
+ clear_siginfo(&sinfo);
sinfo.si_signo = ps->discsignr;
sinfo.si_errno = EPIPE;
sinfo.si_code = SI_ASYNCIO;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index b6cf5ab5a0a1..b540935891af 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -638,10 +638,10 @@ static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
return ret;
}
-static unsigned int ffs_ep0_poll(struct file *file, poll_table *wait)
+static __poll_t ffs_ep0_poll(struct file *file, poll_table *wait)
{
struct ffs_data *ffs = file->private_data;
- unsigned int mask = POLLWRNORM;
+ __poll_t mask = POLLWRNORM;
int ret;
poll_wait(file, &ffs->ev.waitq, wait);
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index daae35318a3a..a73efb1c47d0 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -413,10 +413,10 @@ release_write_pending_unlocked:
return status;
}
-static unsigned int f_hidg_poll(struct file *file, poll_table *wait)
+static __poll_t f_hidg_poll(struct file *file, poll_table *wait)
{
struct f_hidg *hidg = file->private_data;
- unsigned int ret = 0;
+ __poll_t ret = 0;
poll_wait(file, &hidg->read_queue, wait);
poll_wait(file, &hidg->write_queue, wait);
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index c5bce8e22983..5780fba620ab 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -73,9 +73,7 @@ struct f_ncm {
struct sk_buff *skb_tx_ndp;
u16 ndp_dgram_count;
bool timer_force_tx;
- struct tasklet_struct tx_tasklet;
struct hrtimer task_timer;
-
bool timer_stopping;
};
@@ -1104,7 +1102,7 @@ static struct sk_buff *ncm_wrap_ntb(struct gether *port,
/* Delay the timer. */
hrtimer_start(&ncm->task_timer, TX_TIMEOUT_NSECS,
- HRTIMER_MODE_REL);
+ HRTIMER_MODE_REL_SOFT);
/* Add the datagram position entries */
ntb_ndp = skb_put_zero(ncm->skb_tx_ndp, dgram_idx_len);
@@ -1148,17 +1146,15 @@ err:
}
/*
- * This transmits the NTB if there are frames waiting.
+ * The transmit should only be run if no skb data has been sent
+ * for a certain duration.
*/
-static void ncm_tx_tasklet(unsigned long data)
+static enum hrtimer_restart ncm_tx_timeout(struct hrtimer *data)
{
- struct f_ncm *ncm = (void *)data;
-
- if (ncm->timer_stopping)
- return;
+ struct f_ncm *ncm = container_of(data, struct f_ncm, task_timer);
/* Only send if data is available. */
- if (ncm->skb_tx_data) {
+ if (!ncm->timer_stopping && ncm->skb_tx_data) {
ncm->timer_force_tx = true;
/* XXX This allowance of a NULL skb argument to ndo_start_xmit
@@ -1171,16 +1167,6 @@ static void ncm_tx_tasklet(unsigned long data)
ncm->timer_force_tx = false;
}
-}
-
-/*
- * The transmit should only be run if no skb data has been sent
- * for a certain duration.
- */
-static enum hrtimer_restart ncm_tx_timeout(struct hrtimer *data)
-{
- struct f_ncm *ncm = container_of(data, struct f_ncm, task_timer);
- tasklet_schedule(&ncm->tx_tasklet);
return HRTIMER_NORESTART;
}
@@ -1513,8 +1499,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
ncm->port.open = ncm_open;
ncm->port.close = ncm_close;
- tasklet_init(&ncm->tx_tasklet, ncm_tx_tasklet, (unsigned long) ncm);
- hrtimer_init(&ncm->task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_init(&ncm->task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
ncm->task_timer.function = ncm_tx_timeout;
DBG(cdev, "CDC Network: %s speed IN/%s OUT/%s NOTIFY/%s\n",
@@ -1623,7 +1608,6 @@ static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
DBG(c->cdev, "ncm unbind\n");
hrtimer_cancel(&ncm->task_timer);
- tasklet_kill(&ncm->tx_tasklet);
ncm_string_defs[0].id = 0;
usb_free_all_descriptors(f);
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index dd607b99eb1d..453578c4af69 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -680,12 +680,12 @@ printer_fsync(struct file *fd, loff_t start, loff_t end, int datasync)
return 0;
}
-static unsigned int
+static __poll_t
printer_poll(struct file *fd, poll_table *wait)
{
struct printer_dev *dev = fd->private_data;
unsigned long flags;
- int status = 0;
+ __poll_t status = 0;
mutex_lock(&dev->lock_printer_io);
spin_lock_irqsave(&dev->lock, flags);
diff --git a/drivers/usb/gadget/function/uvc_queue.c b/drivers/usb/gadget/function/uvc_queue.c
index 278d50ff1eea..9e33d5206d54 100644
--- a/drivers/usb/gadget/function/uvc_queue.c
+++ b/drivers/usb/gadget/function/uvc_queue.c
@@ -193,7 +193,7 @@ int uvcg_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf,
* This function implements video queue polling and is intended to be used by
* the device poll handler.
*/
-unsigned int uvcg_queue_poll(struct uvc_video_queue *queue, struct file *file,
+__poll_t uvcg_queue_poll(struct uvc_video_queue *queue, struct file *file,
poll_table *wait)
{
return vb2_poll(&queue->queue, file, wait);
diff --git a/drivers/usb/gadget/function/uvc_queue.h b/drivers/usb/gadget/function/uvc_queue.h
index 51ee94e5cf2b..f9f65b5c1062 100644
--- a/drivers/usb/gadget/function/uvc_queue.h
+++ b/drivers/usb/gadget/function/uvc_queue.h
@@ -72,7 +72,7 @@ int uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf);
int uvcg_dequeue_buffer(struct uvc_video_queue *queue,
struct v4l2_buffer *buf, int nonblocking);
-unsigned int uvcg_queue_poll(struct uvc_video_queue *queue,
+__poll_t uvcg_queue_poll(struct uvc_video_queue *queue,
struct file *file, poll_table *wait);
int uvcg_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma);
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index f3069db6f08e..9a9019625496 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -329,7 +329,7 @@ uvc_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
return uvcg_queue_mmap(&uvc->video.queue, vma);
}
-static unsigned int
+static __poll_t
uvc_v4l2_poll(struct file *file, poll_table *wait)
{
struct video_device *vdev = video_devdata(file);
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 9343ec436485..05691254d473 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1209,11 +1209,11 @@ dev_release (struct inode *inode, struct file *fd)
return 0;
}
-static unsigned int
+static __poll_t
ep0_poll (struct file *fd, poll_table *wait)
{
struct dev_data *dev = fd->private_data;
- int mask = 0;
+ __poll_t mask = 0;
if (dev->state <= STATE_DEV_OPENED)
return DEFAULT_POLLMASK;
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index ad3109490c0f..1fa00b35f4ad 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -677,10 +677,10 @@ static int iowarrior_release(struct inode *inode, struct file *file)
return retval;
}
-static unsigned iowarrior_poll(struct file *file, poll_table * wait)
+static __poll_t iowarrior_poll(struct file *file, poll_table * wait)
{
struct iowarrior *dev = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
if (!dev->present)
return POLLERR | POLLHUP;
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 5c1a3b852453..074398c1e410 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -409,10 +409,10 @@ exit:
/**
* ld_usb_poll
*/
-static unsigned int ld_usb_poll(struct file *file, poll_table *wait)
+static __poll_t ld_usb_poll(struct file *file, poll_table *wait)
{
struct ld_usb *dev;
- unsigned int mask = 0;
+ __poll_t mask = 0;
dev = file->private_data;
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index c5be6e9e24a5..941c45028828 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -224,7 +224,7 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
static inline void tower_delete (struct lego_usb_tower *dev);
static int tower_open (struct inode *inode, struct file *file);
static int tower_release (struct inode *inode, struct file *file);
-static unsigned int tower_poll (struct file *file, poll_table *wait);
+static __poll_t tower_poll (struct file *file, poll_table *wait);
static loff_t tower_llseek (struct file *file, loff_t off, int whence);
static void tower_abort_transfers (struct lego_usb_tower *dev);
@@ -509,10 +509,10 @@ static void tower_check_for_read_packet (struct lego_usb_tower *dev)
/**
* tower_poll
*/
-static unsigned int tower_poll (struct file *file, poll_table *wait)
+static __poll_t tower_poll (struct file *file, poll_table *wait)
{
struct lego_usb_tower *dev;
- unsigned int mask = 0;
+ __poll_t mask = 0;
dev = file->private_data;
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index f932f40302df..cc5b296bff3f 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -1191,11 +1191,11 @@ static long mon_bin_compat_ioctl(struct file *file,
}
#endif /* CONFIG_COMPAT */
-static unsigned int
+static __poll_t
mon_bin_poll(struct file *file, struct poll_table_struct *wait)
{
struct mon_reader_bin *rp = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
unsigned long flags;
if (file->f_mode & FMODE_READ)
diff --git a/drivers/vfio/virqfd.c b/drivers/vfio/virqfd.c
index 4797217e5e72..8cc4b48ff127 100644
--- a/drivers/vfio/virqfd.c
+++ b/drivers/vfio/virqfd.c
@@ -46,7 +46,7 @@ static void virqfd_deactivate(struct virqfd *virqfd)
static int virqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
{
struct virqfd *virqfd = container_of(wait, struct virqfd, wait);
- unsigned long flags = (unsigned long)key;
+ __poll_t flags = key_to_poll(key);
if (flags & POLLIN) {
/* An event has been signaled, call function */
@@ -113,7 +113,7 @@ int vfio_virqfd_enable(void *opaque,
struct eventfd_ctx *ctx;
struct virqfd *virqfd;
int ret = 0;
- unsigned int events;
+ __poll_t events;
virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL);
if (!virqfd)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 80323948c0dd..9c3f8160ef24 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1373,7 +1373,7 @@ static ssize_t vhost_net_chr_write_iter(struct kiocb *iocb,
return vhost_chr_write_iter(dev, from);
}
-static unsigned int vhost_net_chr_poll(struct file *file, poll_table *wait)
+static __poll_t vhost_net_chr_poll(struct file *file, poll_table *wait)
{
struct vhost_net *n = file->private_data;
struct vhost_dev *dev = &n->dev;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 5727b186b3ca..8d4374606756 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -170,7 +170,7 @@ static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
{
struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
- if (!((unsigned long)key & poll->mask))
+ if (!(key_to_poll(key) & poll->mask))
return 0;
vhost_poll_queue(poll);
@@ -187,7 +187,7 @@ EXPORT_SYMBOL_GPL(vhost_work_init);
/* Init poll structure */
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
- unsigned long mask, struct vhost_dev *dev)
+ __poll_t mask, struct vhost_dev *dev)
{
init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
init_poll_funcptr(&poll->table, vhost_poll_func);
@@ -203,7 +203,7 @@ EXPORT_SYMBOL_GPL(vhost_poll_init);
* keep a reference to a file until after vhost_poll_stop is called. */
int vhost_poll_start(struct vhost_poll *poll, struct file *file)
{
- unsigned long mask;
+ __poll_t mask;
int ret = 0;
if (poll->wqh)
@@ -211,7 +211,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
mask = file->f_op->poll(file, &poll->table);
if (mask)
- vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
+ vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
if (mask & POLLERR) {
if (poll->wqh)
remove_wait_queue(poll->wqh, &poll->wait);
@@ -1061,10 +1061,10 @@ done:
}
EXPORT_SYMBOL(vhost_chr_write_iter);
-unsigned int vhost_chr_poll(struct file *file, struct vhost_dev *dev,
+__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &dev->wait, wait);
@@ -1881,12 +1881,7 @@ static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
return -1U;
/* Check they're not leading us off end of descriptors. */
- next = vhost16_to_cpu(vq, desc->next);
- /* Make sure compiler knows to grab that: we don't want it changing! */
- /* We will use the result as an index in an array, so most
- * architectures only need a compiler barrier here. */
- read_barrier_depends();
-
+ next = vhost16_to_cpu(vq, READ_ONCE(desc->next));
return next;
}
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 79c6e7a60a5e..7876a3d7d1b3 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -34,7 +34,7 @@ struct vhost_poll {
wait_queue_head_t *wqh;
wait_queue_entry_t wait;
struct vhost_work work;
- unsigned long mask;
+ __poll_t mask;
struct vhost_dev *dev;
};
@@ -43,7 +43,7 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
bool vhost_has_work(struct vhost_dev *dev);
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
- unsigned long mask, struct vhost_dev *dev);
+ __poll_t mask, struct vhost_dev *dev);
int vhost_poll_start(struct vhost_poll *poll, struct file *file);
void vhost_poll_stop(struct vhost_poll *poll);
void vhost_poll_flush(struct vhost_poll *poll);
@@ -217,7 +217,7 @@ void vhost_enqueue_msg(struct vhost_dev *dev,
struct vhost_msg_node *node);
struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
struct list_head *head);
-unsigned int vhost_chr_poll(struct file *file, struct vhost_dev *dev,
+__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
poll_table *wait);
ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
int noblock);
diff --git a/drivers/video/backlight/apple_bl.c b/drivers/video/backlight/apple_bl.c
index d84329676689..6a34ab936726 100644
--- a/drivers/video/backlight/apple_bl.c
+++ b/drivers/video/backlight/apple_bl.c
@@ -143,7 +143,7 @@ static int apple_bl_add(struct acpi_device *dev)
struct pci_dev *host;
int intensity;
- host = pci_get_bus_and_slot(0, 0);
+ host = pci_get_domain_bus_and_slot(0, 0, 0);
if (!host) {
pr_err("unable to find PCI host\n");
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index d7c239ea3d09..f5574060f9c8 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -177,7 +177,7 @@ static int corgi_ssp_lcdtg_send(struct corgi_lcd *lcd, int adrs, uint8_t data)
struct spi_message msg;
struct spi_transfer xfer = {
.len = 1,
- .cs_change = 1,
+ .cs_change = 0,
.tx_buf = lcd->buf,
};
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index eab1f842f9c0..e4bd63e9db6b 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -369,7 +369,7 @@ static int tdo24m_probe(struct spi_device *spi)
spi_message_init(m);
- x->cs_change = 1;
+ x->cs_change = 0;
x->tx_buf = &lcd->buf[0];
spi_message_add_tail(x, m);
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index 6a41ea92737a..4dc5ee8debeb 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -49,7 +49,7 @@ static int tosa_tg_send(struct spi_device *spi, int adrs, uint8_t data)
struct spi_message msg;
struct spi_transfer xfer = {
.len = 1,
- .cs_change = 1,
+ .cs_change = 0,
.tx_buf = buf,
};
diff --git a/drivers/video/fbdev/macfb.c b/drivers/video/fbdev/macfb.c
index cda7587cbc86..e707e617bf1c 100644
--- a/drivers/video/fbdev/macfb.c
+++ b/drivers/video/fbdev/macfb.c
@@ -556,7 +556,7 @@ static void __init iounmap_macfb(void)
static int __init macfb_init(void)
{
int video_cmap_len, video_is_nubus = 0;
- struct nubus_dev* ndev = NULL;
+ struct nubus_rsrc *ndev = NULL;
char *option = NULL;
int err;
@@ -670,15 +670,17 @@ static int __init macfb_init(void)
* code is really broken :-)
*/
- while ((ndev = nubus_find_type(NUBUS_CAT_DISPLAY,
- NUBUS_TYPE_VIDEO, ndev)))
- {
+ for_each_func_rsrc(ndev) {
unsigned long base = ndev->board->slot_addr;
if (mac_bi_data.videoaddr < base ||
mac_bi_data.videoaddr - base > 0xFFFFFF)
continue;
+ if (ndev->category != NUBUS_CAT_DISPLAY ||
+ ndev->type != NUBUS_TYPE_VIDEO)
+ continue;
+
video_is_nubus = 1;
slot_addr = (unsigned char *)base;
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index d70ad6d38879..b0597bef4555 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -565,11 +565,11 @@ static irqreturn_t fsl_hv_state_change_isr(int irq, void *data)
/*
* Returns a bitmask indicating whether a read will block
*/
-static unsigned int fsl_hv_poll(struct file *filp, struct poll_table_struct *p)
+static __poll_t fsl_hv_poll(struct file *filp, struct poll_table_struct *p)
{
struct doorbell_queue *dbq = filp->private_data;
unsigned long flags;
- unsigned int mask;
+ __poll_t mask;
spin_lock_irqsave(&dbq->lock, flags);
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c
index a90728ceec5a..55e11bf8ebaf 100644
--- a/drivers/w1/masters/w1-gpio.c
+++ b/drivers/w1/masters/w1-gpio.c
@@ -13,9 +13,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/w1-gpio.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/delay.h>
@@ -30,11 +29,17 @@ static u8 w1_gpio_set_pullup(void *data, int delay)
pdata->pullup_duration = delay;
} else {
if (pdata->pullup_duration) {
- gpio_direction_output(pdata->pin, 1);
-
+ /*
+ * This will OVERRIDE open drain emulation and force-pull
+ * the line high for some time.
+ */
+ gpiod_set_raw_value(pdata->gpiod, 1);
msleep(pdata->pullup_duration);
-
- gpio_direction_input(pdata->pin);
+ /*
+ * This will simply set the line as input since we are doing
+ * open drain emulation in the GPIO library.
+ */
+ gpiod_set_value(pdata->gpiod, 1);
}
pdata->pullup_duration = 0;
}
@@ -42,28 +47,18 @@ static u8 w1_gpio_set_pullup(void *data, int delay)
return 0;
}
-static void w1_gpio_write_bit_dir(void *data, u8 bit)
-{
- struct w1_gpio_platform_data *pdata = data;
-
- if (bit)
- gpio_direction_input(pdata->pin);
- else
- gpio_direction_output(pdata->pin, 0);
-}
-
-static void w1_gpio_write_bit_val(void *data, u8 bit)
+static void w1_gpio_write_bit(void *data, u8 bit)
{
struct w1_gpio_platform_data *pdata = data;
- gpio_set_value(pdata->pin, bit);
+ gpiod_set_value(pdata->gpiod, bit);
}
static u8 w1_gpio_read_bit(void *data)
{
struct w1_gpio_platform_data *pdata = data;
- return gpio_get_value(pdata->pin) ? 1 : 0;
+ return gpiod_get_value(pdata->gpiod) ? 1 : 0;
}
#if defined(CONFIG_OF)
@@ -74,107 +69,85 @@ static const struct of_device_id w1_gpio_dt_ids[] = {
MODULE_DEVICE_TABLE(of, w1_gpio_dt_ids);
#endif
-static int w1_gpio_probe_dt(struct platform_device *pdev)
-{
- struct w1_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct device_node *np = pdev->dev.of_node;
- int gpio;
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
- if (of_get_property(np, "linux,open-drain", NULL))
- pdata->is_open_drain = 1;
-
- gpio = of_get_gpio(np, 0);
- if (gpio < 0) {
- if (gpio != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "Failed to parse gpio property for data pin (%d)\n",
- gpio);
-
- return gpio;
- }
- pdata->pin = gpio;
-
- gpio = of_get_gpio(np, 1);
- if (gpio == -EPROBE_DEFER)
- return gpio;
- /* ignore other errors as the pullup gpio is optional */
- pdata->ext_pullup_enable_pin = gpio;
-
- pdev->dev.platform_data = pdata;
-
- return 0;
-}
-
static int w1_gpio_probe(struct platform_device *pdev)
{
struct w1_bus_master *master;
struct w1_gpio_platform_data *pdata;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ /* Enforce open drain mode by default */
+ enum gpiod_flags gflags = GPIOD_OUT_LOW_OPEN_DRAIN;
int err;
if (of_have_populated_dt()) {
- err = w1_gpio_probe_dt(pdev);
- if (err < 0)
- return err;
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ /*
+ * This parameter means that something else than the gpiolib has
+ * already set the line into open drain mode, so we should just
+ * driver it high/low like we are in full control of the line and
+ * open drain will happen transparently.
+ */
+ if (of_get_property(np, "linux,open-drain", NULL))
+ gflags = GPIOD_OUT_LOW;
+
+ pdev->dev.platform_data = pdata;
}
-
- pdata = dev_get_platdata(&pdev->dev);
+ pdata = dev_get_platdata(dev);
if (!pdata) {
- dev_err(&pdev->dev, "No configuration data\n");
+ dev_err(dev, "No configuration data\n");
return -ENXIO;
}
- master = devm_kzalloc(&pdev->dev, sizeof(struct w1_bus_master),
+ master = devm_kzalloc(dev, sizeof(struct w1_bus_master),
GFP_KERNEL);
if (!master) {
- dev_err(&pdev->dev, "Out of memory\n");
+ dev_err(dev, "Out of memory\n");
return -ENOMEM;
}
- err = devm_gpio_request(&pdev->dev, pdata->pin, "w1");
- if (err) {
- dev_err(&pdev->dev, "gpio_request (pin) failed\n");
- return err;
+ pdata->gpiod = devm_gpiod_get_index(dev, NULL, 0, gflags);
+ if (IS_ERR(pdata->gpiod)) {
+ dev_err(dev, "gpio_request (pin) failed\n");
+ return PTR_ERR(pdata->gpiod);
}
- if (gpio_is_valid(pdata->ext_pullup_enable_pin)) {
- err = devm_gpio_request_one(&pdev->dev,
- pdata->ext_pullup_enable_pin, GPIOF_INIT_LOW,
- "w1 pullup");
- if (err < 0) {
- dev_err(&pdev->dev, "gpio_request_one "
- "(ext_pullup_enable_pin) failed\n");
- return err;
- }
+ pdata->pullup_gpiod =
+ devm_gpiod_get_index_optional(dev, NULL, 1, GPIOD_OUT_LOW);
+ if (IS_ERR(pdata->pullup_gpiod)) {
+ dev_err(dev, "gpio_request_one "
+ "(ext_pullup_enable_pin) failed\n");
+ return PTR_ERR(pdata->pullup_gpiod);
}
master->data = pdata;
master->read_bit = w1_gpio_read_bit;
-
- if (pdata->is_open_drain) {
- gpio_direction_output(pdata->pin, 1);
- master->write_bit = w1_gpio_write_bit_val;
- } else {
- gpio_direction_input(pdata->pin);
- master->write_bit = w1_gpio_write_bit_dir;
+ gpiod_direction_output(pdata->gpiod, 1);
+ master->write_bit = w1_gpio_write_bit;
+
+ /*
+ * If we are using open drain emulation from the GPIO library,
+ * we need to use this pullup function that hammers the line
+ * high using a raw accessor to provide pull-up for the w1
+ * line.
+ */
+ if (gflags == GPIOD_OUT_LOW_OPEN_DRAIN)
master->set_pullup = w1_gpio_set_pullup;
- }
err = w1_add_master_device(master);
if (err) {
- dev_err(&pdev->dev, "w1_add_master device failed\n");
+ dev_err(dev, "w1_add_master device failed\n");
return err;
}
if (pdata->enable_external_pullup)
pdata->enable_external_pullup(1);
- if (gpio_is_valid(pdata->ext_pullup_enable_pin))
- gpio_set_value(pdata->ext_pullup_enable_pin, 1);
+ if (pdata->pullup_gpiod)
+ gpiod_set_value(pdata->pullup_gpiod, 1);
platform_set_drvdata(pdev, master);
@@ -189,8 +162,8 @@ static int w1_gpio_remove(struct platform_device *pdev)
if (pdata->enable_external_pullup)
pdata->enable_external_pullup(0);
- if (gpio_is_valid(pdata->ext_pullup_enable_pin))
- gpio_set_value(pdata->ext_pullup_enable_pin, 0);
+ if (pdata->pullup_gpiod)
+ gpiod_set_value(pdata->pullup_gpiod, 0);
w1_remove_master_device(master);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index ca200d1f310a..5bf613d3b7d6 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -223,6 +223,13 @@ config ZIIRAVE_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called ziirave_wdt.
+config RAVE_SP_WATCHDOG
+ tristate "RAVE SP Watchdog timer"
+ depends on RAVE_SP_CORE
+ select WATCHDOG_CORE
+ help
+ Support for the watchdog on RAVE SP device.
+
# ALPHA Architecture
# ARM Architecture
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 715a21078e0c..135c5e81f25e 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -224,3 +224,4 @@ obj-$(CONFIG_MAX77620_WATCHDOG) += max77620_wdt.o
obj-$(CONFIG_ZIIRAVE_WATCHDOG) += ziirave_wdt.o
obj-$(CONFIG_SOFT_WATCHDOG) += softdog.o
obj-$(CONFIG_MENF21BMC_WATCHDOG) += menf21bmc_wdt.o
+obj-$(CONFIG_RAVE_SP_WATCHDOG) += rave-sp-wdt.o
diff --git a/drivers/watchdog/rave-sp-wdt.c b/drivers/watchdog/rave-sp-wdt.c
new file mode 100644
index 000000000000..35db173252f9
--- /dev/null
+++ b/drivers/watchdog/rave-sp-wdt.c
@@ -0,0 +1,337 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Driver for watchdog aspect of for Zodiac Inflight Innovations RAVE
+ * Supervisory Processor(SP) MCU
+ *
+ * Copyright (C) 2017 Zodiac Inflight Innovation
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/mfd/rave-sp.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/watchdog.h>
+
+enum {
+ RAVE_SP_RESET_BYTE = 1,
+ RAVE_SP_RESET_REASON_NORMAL = 0,
+ RAVE_SP_RESET_DELAY_MS = 500,
+};
+
+/**
+ * struct rave_sp_wdt_variant - RAVE SP watchdog variant
+ *
+ * @max_timeout: Largest possible watchdog timeout setting
+ * @min_timeout: Smallest possible watchdog timeout setting
+ *
+ * @configure: Function to send configuration command
+ * @restart: Function to send "restart" command
+ */
+struct rave_sp_wdt_variant {
+ unsigned int max_timeout;
+ unsigned int min_timeout;
+
+ int (*configure)(struct watchdog_device *, bool);
+ int (*restart)(struct watchdog_device *);
+};
+
+/**
+ * struct rave_sp_wdt - RAVE SP watchdog
+ *
+ * @wdd: Underlying watchdog device
+ * @sp: Pointer to parent RAVE SP device
+ * @variant: Device specific variant information
+ * @reboot_notifier: Reboot notifier implementing machine reset
+ */
+struct rave_sp_wdt {
+ struct watchdog_device wdd;
+ struct rave_sp *sp;
+ const struct rave_sp_wdt_variant *variant;
+ struct notifier_block reboot_notifier;
+};
+
+static struct rave_sp_wdt *to_rave_sp_wdt(struct watchdog_device *wdd)
+{
+ return container_of(wdd, struct rave_sp_wdt, wdd);
+}
+
+static int rave_sp_wdt_exec(struct watchdog_device *wdd, void *data,
+ size_t data_size)
+{
+ return rave_sp_exec(to_rave_sp_wdt(wdd)->sp,
+ data, data_size, NULL, 0);
+}
+
+static int rave_sp_wdt_legacy_configure(struct watchdog_device *wdd, bool on)
+{
+ u8 cmd[] = {
+ [0] = RAVE_SP_CMD_SW_WDT,
+ [1] = 0,
+ [2] = 0,
+ [3] = on,
+ [4] = on ? wdd->timeout : 0,
+ };
+
+ return rave_sp_wdt_exec(wdd, cmd, sizeof(cmd));
+}
+
+static int rave_sp_wdt_rdu_configure(struct watchdog_device *wdd, bool on)
+{
+ u8 cmd[] = {
+ [0] = RAVE_SP_CMD_SW_WDT,
+ [1] = 0,
+ [2] = on,
+ [3] = (u8)wdd->timeout,
+ [4] = (u8)(wdd->timeout >> 8),
+ };
+
+ return rave_sp_wdt_exec(wdd, cmd, sizeof(cmd));
+}
+
+/**
+ * rave_sp_wdt_configure - Configure watchdog device
+ *
+ * @wdd: Device to configure
+ * @on: Desired state of the watchdog timer (ON/OFF)
+ *
+ * This function configures two aspects of the watchdog timer:
+ *
+ * - Wheither it is ON or OFF
+ * - Its timeout duration
+ *
+ * with first aspect specified via function argument and second via
+ * the value of 'wdd->timeout'.
+ */
+static int rave_sp_wdt_configure(struct watchdog_device *wdd, bool on)
+{
+ return to_rave_sp_wdt(wdd)->variant->configure(wdd, on);
+}
+
+static int rave_sp_wdt_legacy_restart(struct watchdog_device *wdd)
+{
+ u8 cmd[] = {
+ [0] = RAVE_SP_CMD_RESET,
+ [1] = 0,
+ [2] = RAVE_SP_RESET_BYTE
+ };
+
+ return rave_sp_wdt_exec(wdd, cmd, sizeof(cmd));
+}
+
+static int rave_sp_wdt_rdu_restart(struct watchdog_device *wdd)
+{
+ u8 cmd[] = {
+ [0] = RAVE_SP_CMD_RESET,
+ [1] = 0,
+ [2] = RAVE_SP_RESET_BYTE,
+ [3] = RAVE_SP_RESET_REASON_NORMAL
+ };
+
+ return rave_sp_wdt_exec(wdd, cmd, sizeof(cmd));
+}
+
+static int rave_sp_wdt_reboot_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ /*
+ * Restart handler is called in atomic context which means we
+ * can't communicate to SP via UART. Luckily for use SP will
+ * wait 500ms before actually resetting us, so we ask it to do
+ * so here and let the rest of the system go on wrapping
+ * things up.
+ */
+ if (action == SYS_DOWN || action == SYS_HALT) {
+ struct rave_sp_wdt *sp_wd =
+ container_of(nb, struct rave_sp_wdt, reboot_notifier);
+
+ const int ret = sp_wd->variant->restart(&sp_wd->wdd);
+
+ if (ret < 0)
+ dev_err(sp_wd->wdd.parent,
+ "Failed to issue restart command (%d)", ret);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int rave_sp_wdt_restart(struct watchdog_device *wdd,
+ unsigned long action, void *data)
+{
+ /*
+ * The actual work was done by reboot notifier above. SP
+ * firmware waits 500 ms before issuing reset, so let's hang
+ * here for twice that delay and hopefuly we'd never reach
+ * the return statement.
+ */
+ mdelay(2 * RAVE_SP_RESET_DELAY_MS);
+
+ return -EIO;
+}
+
+static int rave_sp_wdt_start(struct watchdog_device *wdd)
+{
+ int ret;
+
+ ret = rave_sp_wdt_configure(wdd, true);
+ if (!ret)
+ set_bit(WDOG_HW_RUNNING, &wdd->status);
+
+ return ret;
+}
+
+static int rave_sp_wdt_stop(struct watchdog_device *wdd)
+{
+ return rave_sp_wdt_configure(wdd, false);
+}
+
+static int rave_sp_wdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int timeout)
+{
+ wdd->timeout = timeout;
+
+ return rave_sp_wdt_configure(wdd, watchdog_active(wdd));
+}
+
+static int rave_sp_wdt_ping(struct watchdog_device *wdd)
+{
+ u8 cmd[] = {
+ [0] = RAVE_SP_CMD_PET_WDT,
+ [1] = 0,
+ };
+
+ return rave_sp_wdt_exec(wdd, cmd, sizeof(cmd));
+}
+
+static const struct watchdog_info rave_sp_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+ .identity = "RAVE SP Watchdog",
+};
+
+static const struct watchdog_ops rave_sp_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = rave_sp_wdt_start,
+ .stop = rave_sp_wdt_stop,
+ .ping = rave_sp_wdt_ping,
+ .set_timeout = rave_sp_wdt_set_timeout,
+ .restart = rave_sp_wdt_restart,
+};
+
+static const struct rave_sp_wdt_variant rave_sp_wdt_legacy = {
+ .max_timeout = 255,
+ .min_timeout = 1,
+ .configure = rave_sp_wdt_legacy_configure,
+ .restart = rave_sp_wdt_legacy_restart,
+};
+
+static const struct rave_sp_wdt_variant rave_sp_wdt_rdu = {
+ .max_timeout = 180,
+ .min_timeout = 60,
+ .configure = rave_sp_wdt_rdu_configure,
+ .restart = rave_sp_wdt_rdu_restart,
+};
+
+static const struct of_device_id rave_sp_wdt_of_match[] = {
+ {
+ .compatible = "zii,rave-sp-watchdog-legacy",
+ .data = &rave_sp_wdt_legacy,
+ },
+ {
+ .compatible = "zii,rave-sp-watchdog",
+ .data = &rave_sp_wdt_rdu,
+ },
+ { /* sentinel */ }
+};
+
+static int rave_sp_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct watchdog_device *wdd;
+ struct rave_sp_wdt *sp_wd;
+ struct nvmem_cell *cell;
+ __le16 timeout = 0;
+ int ret;
+
+ sp_wd = devm_kzalloc(dev, sizeof(*sp_wd), GFP_KERNEL);
+ if (!sp_wd)
+ return -ENOMEM;
+
+ sp_wd->variant = of_device_get_match_data(dev);
+ sp_wd->sp = dev_get_drvdata(dev->parent);
+
+ wdd = &sp_wd->wdd;
+ wdd->parent = dev;
+ wdd->info = &rave_sp_wdt_info;
+ wdd->ops = &rave_sp_wdt_ops;
+ wdd->min_timeout = sp_wd->variant->min_timeout;
+ wdd->max_timeout = sp_wd->variant->max_timeout;
+ wdd->status = WATCHDOG_NOWAYOUT_INIT_STATUS;
+ wdd->timeout = 60;
+
+ cell = nvmem_cell_get(dev, "wdt-timeout");
+ if (!IS_ERR(cell)) {
+ size_t len;
+ void *value = nvmem_cell_read(cell, &len);
+
+ if (!IS_ERR(value)) {
+ memcpy(&timeout, value, min(len, sizeof(timeout)));
+ kfree(value);
+ }
+ nvmem_cell_put(cell);
+ }
+ watchdog_init_timeout(wdd, le16_to_cpu(timeout), dev);
+ watchdog_set_restart_priority(wdd, 255);
+ watchdog_stop_on_unregister(wdd);
+
+ sp_wd->reboot_notifier.notifier_call = rave_sp_wdt_reboot_notifier;
+ ret = devm_register_reboot_notifier(dev, &sp_wd->reboot_notifier);
+ if (ret) {
+ dev_err(dev, "Failed to register reboot notifier\n");
+ return ret;
+ }
+
+ /*
+ * We don't know if watchdog is running now. To be sure, let's
+ * start it and depend on watchdog core to ping it
+ */
+ wdd->max_hw_heartbeat_ms = wdd->max_timeout * 1000;
+ ret = rave_sp_wdt_start(wdd);
+ if (ret) {
+ dev_err(dev, "Watchdog didn't start\n");
+ return ret;
+ }
+
+ ret = devm_watchdog_register_device(dev, wdd);
+ if (ret) {
+ dev_err(dev, "Failed to register watchdog device\n");
+ rave_sp_wdt_stop(wdd);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver rave_sp_wdt_driver = {
+ .probe = rave_sp_wdt_probe,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = rave_sp_wdt_of_match,
+ },
+};
+
+module_platform_driver(rave_sp_wdt_driver);
+
+MODULE_DEVICE_TABLE(of, rave_sp_wdt_of_match);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrey Vostrikov <andrey.vostrikov@cogentembedded.com>");
+MODULE_AUTHOR("Nikita Yushchenko <nikita.yoush@cogentembedded.com>");
+MODULE_AUTHOR("Andrey Smirnov <andrew.smirnov@gmail.com>");
+MODULE_DESCRIPTION("RAVE SP Watchdog driver");
+MODULE_ALIAS("platform:rave-sp-watchdog");
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 9729a64ea1a9..72c0416a01cc 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -621,9 +621,9 @@ static long evtchn_ioctl(struct file *file,
return rc;
}
-static unsigned int evtchn_poll(struct file *file, poll_table *wait)
+static __poll_t evtchn_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = POLLOUT | POLLWRNORM;
+ __poll_t mask = POLLOUT | POLLWRNORM;
struct per_user_data *u = file->private_data;
poll_wait(file, &u->evtchn_wait, wait);
diff --git a/drivers/xen/mcelog.c b/drivers/xen/mcelog.c
index 6cc1c15bcd84..9ade533d9e40 100644
--- a/drivers/xen/mcelog.c
+++ b/drivers/xen/mcelog.c
@@ -139,7 +139,7 @@ out:
return err ? err : buf - ubuf;
}
-static unsigned int xen_mce_chrdev_poll(struct file *file, poll_table *wait)
+static __poll_t xen_mce_chrdev_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &xen_mce_chrdev_wait, wait);
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 4c789e61554b..78804e71f9a6 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -878,7 +878,7 @@ received:
return ret;
}
-static unsigned int pvcalls_front_poll_passive(struct file *file,
+static __poll_t pvcalls_front_poll_passive(struct file *file,
struct pvcalls_bedata *bedata,
struct sock_mapping *map,
poll_table *wait)
@@ -935,12 +935,12 @@ static unsigned int pvcalls_front_poll_passive(struct file *file,
return 0;
}
-static unsigned int pvcalls_front_poll_active(struct file *file,
+static __poll_t pvcalls_front_poll_active(struct file *file,
struct pvcalls_bedata *bedata,
struct sock_mapping *map,
poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
int32_t in_error, out_error;
struct pvcalls_data_intf *intf = map->active.ring;
@@ -958,12 +958,12 @@ static unsigned int pvcalls_front_poll_active(struct file *file,
return mask;
}
-unsigned int pvcalls_front_poll(struct file *file, struct socket *sock,
+__poll_t pvcalls_front_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct pvcalls_bedata *bedata;
struct sock_mapping *map;
- int ret;
+ __poll_t ret;
pvcalls_enter();
if (!pvcalls_front_dev) {
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 82fc54f8eb77..5bb72d3f8337 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -36,7 +36,7 @@
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
#include <linux/bootmem.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/export.h>
#include <xen/swiotlb-xen.h>
#include <xen/page.h>
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index f3b089b7c0b6..e17ec3fce590 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -645,7 +645,7 @@ static int xenbus_file_release(struct inode *inode, struct file *filp)
return 0;
}
-static unsigned int xenbus_file_poll(struct file *file, poll_table *wait)
+static __poll_t xenbus_file_poll(struct file *file, poll_table *wait)
{
struct xenbus_file_priv *u = file->private_data;
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index 0f0e6925e97d..14a6c1b90c9f 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -10,6 +10,7 @@
*/
#include <linux/math64.h>
+#include <linux/iversion.h>
#include "affs.h"
/*
@@ -60,7 +61,7 @@ affs_insert_hash(struct inode *dir, struct buffer_head *bh)
affs_brelse(dir_bh);
dir->i_mtime = dir->i_ctime = current_time(dir);
- dir->i_version++;
+ inode_inc_iversion(dir);
mark_inode_dirty(dir);
return 0;
@@ -114,7 +115,7 @@ affs_remove_hash(struct inode *dir, struct buffer_head *rem_bh)
affs_brelse(bh);
dir->i_mtime = dir->i_ctime = current_time(dir);
- dir->i_version++;
+ inode_inc_iversion(dir);
mark_inode_dirty(dir);
return retval;
diff --git a/fs/affs/dir.c b/fs/affs/dir.c
index a105e77df2c1..d180b46453cf 100644
--- a/fs/affs/dir.c
+++ b/fs/affs/dir.c
@@ -14,6 +14,7 @@
*
*/
+#include <linux/iversion.h>
#include "affs.h"
static int affs_readdir(struct file *, struct dir_context *);
@@ -80,7 +81,7 @@ affs_readdir(struct file *file, struct dir_context *ctx)
* we can jump directly to where we left off.
*/
ino = (u32)(long)file->private_data;
- if (ino && file->f_version == inode->i_version) {
+ if (ino && inode_cmp_iversion(inode, file->f_version) == 0) {
pr_debug("readdir() left off=%d\n", ino);
goto inside;
}
@@ -130,7 +131,7 @@ inside:
} while (ino);
}
done:
- file->f_version = inode->i_version;
+ file->f_version = inode_query_iversion(inode);
file->private_data = (void *)(long)ino;
affs_brelse(fh_bh);
diff --git a/fs/affs/super.c b/fs/affs/super.c
index 1117e36134cc..e602619aed9d 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -21,6 +21,7 @@
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/seq_file.h>
+#include <linux/iversion.h>
#include "affs.h"
static int affs_statfs(struct dentry *dentry, struct kstatfs *buf);
@@ -102,7 +103,7 @@ static struct inode *affs_alloc_inode(struct super_block *sb)
if (!i)
return NULL;
- i->vfs_inode.i_version = 1;
+ inode_set_iversion(&i->vfs_inode, 1);
i->i_lc = NULL;
i->i_ext_bh = NULL;
i->i_pa_cnt = 0;
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index b90ef39ae914..88ec38c2d83c 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/circ_buf.h>
+#include <linux/iversion.h>
#include "internal.h"
#include "afs_fs.h"
@@ -124,7 +125,7 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
vnode->vfs_inode.i_ctime.tv_sec = status->mtime_client;
vnode->vfs_inode.i_mtime = vnode->vfs_inode.i_ctime;
vnode->vfs_inode.i_atime = vnode->vfs_inode.i_ctime;
- vnode->vfs_inode.i_version = data_version;
+ inode_set_iversion_raw(&vnode->vfs_inode, data_version);
}
expected_version = status->data_version;
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 1e81864ef0b2..c7f17c44c7ce 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -21,6 +21,7 @@
#include <linux/sched.h>
#include <linux/mount.h>
#include <linux/namei.h>
+#include <linux/iversion.h>
#include "internal.h"
static const struct inode_operations afs_symlink_inode_operations = {
@@ -89,7 +90,7 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
inode->i_atime = inode->i_mtime = inode->i_ctime;
inode->i_blocks = 0;
inode->i_generation = vnode->fid.unique;
- inode->i_version = vnode->status.data_version;
+ inode_set_iversion_raw(inode, vnode->status.data_version);
inode->i_mapping->a_ops = &afs_fs_aops;
read_sequnlock_excl(&vnode->cb_lock);
@@ -218,7 +219,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
inode->i_ctime.tv_nsec = 0;
inode->i_atime = inode->i_mtime = inode->i_ctime;
inode->i_blocks = 0;
- inode->i_version = 0;
+ inode_set_iversion_raw(inode, 0);
inode->i_generation = 0;
set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags);
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index 961a12dc6dc8..a0c57c37fa21 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -442,8 +442,8 @@ int autofs4_wait(struct autofs_sb_info *sbi,
memcpy(&wq->name, &qstr, sizeof(struct qstr));
wq->dev = autofs4_get_dev(sbi);
wq->ino = autofs4_get_ino(sbi);
- wq->uid = current_cred()->uid;
- wq->gid = current_cred()->gid;
+ wq->uid = current_uid();
+ wq->gid = current_gid();
wq->pid = pid;
wq->tgid = tgid;
wq->status = -EINTR; /* Status return if interrupted */
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index 6fe881d5cb38..0c4373628eb4 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -19,4 +19,4 @@ btrfs-$(CONFIG_BTRFS_FS_REF_VERIFY) += ref-verify.o
btrfs-$(CONFIG_BTRFS_FS_RUN_SANITY_TESTS) += tests/free-space-tests.o \
tests/extent-buffer-tests.o tests/btrfs-tests.o \
tests/extent-io-tests.o tests/inode-tests.o tests/qgroup-tests.o \
- tests/free-space-tree-tests.o
+ tests/free-space-tree-tests.o tests/extent-map-tests.o
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 7d0dc100a09a..e4054e533f6d 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -216,7 +216,8 @@ static int prelim_ref_compare(struct prelim_ref *ref1,
return 0;
}
-void update_share_count(struct share_check *sc, int oldcount, int newcount)
+static void update_share_count(struct share_check *sc, int oldcount,
+ int newcount)
{
if ((!sc) || (oldcount == 0 && newcount < 1))
return;
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 5982c8a71f02..07d049c0c20f 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -33,7 +33,6 @@
#include <linux/bit_spinlock.h>
#include <linux/slab.h>
#include <linux/sched/mm.h>
-#include <linux/sort.h>
#include <linux/log2.h>
#include "ctree.h"
#include "disk-io.h"
@@ -45,6 +44,21 @@
#include "extent_io.h"
#include "extent_map.h"
+static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
+
+const char* btrfs_compress_type2str(enum btrfs_compression_type type)
+{
+ switch (type) {
+ case BTRFS_COMPRESS_ZLIB:
+ case BTRFS_COMPRESS_LZO:
+ case BTRFS_COMPRESS_ZSTD:
+ case BTRFS_COMPRESS_NONE:
+ return btrfs_compress_types[type];
+ }
+
+ return NULL;
+}
+
static int btrfs_decompress_bio(struct compressed_bio *cb);
static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
@@ -348,8 +362,6 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
page->mapping = NULL;
if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
PAGE_SIZE) {
- bio_get(bio);
-
/*
* inc the count before we submit the bio so
* we know the end IO handler won't happen before
@@ -372,8 +384,6 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
bio_endio(bio);
}
- bio_put(bio);
-
bio = btrfs_bio_alloc(bdev, first_byte);
bio->bi_opf = REQ_OP_WRITE | write_flags;
bio->bi_private = cb;
@@ -389,7 +399,6 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
first_byte += PAGE_SIZE;
cond_resched();
}
- bio_get(bio);
ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
BUG_ON(ret); /* -ENOMEM */
@@ -405,13 +414,12 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
bio_endio(bio);
}
- bio_put(bio);
return 0;
}
static u64 bio_end_offset(struct bio *bio)
{
- struct bio_vec *last = &bio->bi_io_vec[bio->bi_vcnt - 1];
+ struct bio_vec *last = bio_last_bvec_all(bio);
return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
}
@@ -563,7 +571,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
/* we need the actual starting offset of this extent in the file */
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree,
- page_offset(bio->bi_io_vec->bv_page),
+ page_offset(bio_first_page_all(bio)),
PAGE_SIZE);
read_unlock(&em_tree->lock);
if (!em)
@@ -638,8 +646,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
page->mapping = NULL;
if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
PAGE_SIZE) {
- bio_get(comp_bio);
-
ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
BTRFS_WQ_ENDIO_DATA);
BUG_ON(ret); /* -ENOMEM */
@@ -666,8 +672,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
bio_endio(comp_bio);
}
- bio_put(comp_bio);
-
comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
bio_set_op_attrs(comp_bio, REQ_OP_READ, 0);
comp_bio->bi_private = cb;
@@ -677,7 +681,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
}
cur_disk_byte += PAGE_SIZE;
}
- bio_get(comp_bio);
ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
BUG_ON(ret); /* -ENOMEM */
@@ -693,7 +696,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
bio_endio(comp_bio);
}
- bio_put(comp_bio);
return 0;
fail2:
@@ -752,6 +754,8 @@ struct heuristic_ws {
u32 sample_size;
/* Buckets store counters for each byte value */
struct bucket_item *bucket;
+ /* Sorting buffer */
+ struct bucket_item *bucket_b;
struct list_head list;
};
@@ -763,6 +767,7 @@ static void free_heuristic_ws(struct list_head *ws)
kvfree(workspace->sample);
kfree(workspace->bucket);
+ kfree(workspace->bucket_b);
kfree(workspace);
}
@@ -782,6 +787,10 @@ static struct list_head *alloc_heuristic_ws(void)
if (!ws->bucket)
goto fail;
+ ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
+ if (!ws->bucket_b)
+ goto fail;
+
INIT_LIST_HEAD(&ws->list);
return &ws->list;
fail:
@@ -1278,13 +1287,103 @@ static u32 shannon_entropy(struct heuristic_ws *ws)
return entropy_sum * 100 / entropy_max;
}
-/* Compare buckets by size, ascending */
-static int bucket_comp_rev(const void *lv, const void *rv)
+#define RADIX_BASE 4U
+#define COUNTERS_SIZE (1U << RADIX_BASE)
+
+static u8 get4bits(u64 num, int shift) {
+ u8 low4bits;
+
+ num >>= shift;
+ /* Reverse order */
+ low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
+ return low4bits;
+}
+
+/*
+ * Use 4 bits as radix base
+ * Use 16 u32 counters for calculating new possition in buf array
+ *
+ * @array - array that will be sorted
+ * @array_buf - buffer array to store sorting results
+ * must be equal in size to @array
+ * @num - array size
+ */
+static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
+ int num)
{
- const struct bucket_item *l = (const struct bucket_item *)lv;
- const struct bucket_item *r = (const struct bucket_item *)rv;
+ u64 max_num;
+ u64 buf_num;
+ u32 counters[COUNTERS_SIZE];
+ u32 new_addr;
+ u32 addr;
+ int bitlen;
+ int shift;
+ int i;
- return r->count - l->count;
+ /*
+ * Try avoid useless loop iterations for small numbers stored in big
+ * counters. Example: 48 33 4 ... in 64bit array
+ */
+ max_num = array[0].count;
+ for (i = 1; i < num; i++) {
+ buf_num = array[i].count;
+ if (buf_num > max_num)
+ max_num = buf_num;
+ }
+
+ buf_num = ilog2(max_num);
+ bitlen = ALIGN(buf_num, RADIX_BASE * 2);
+
+ shift = 0;
+ while (shift < bitlen) {
+ memset(counters, 0, sizeof(counters));
+
+ for (i = 0; i < num; i++) {
+ buf_num = array[i].count;
+ addr = get4bits(buf_num, shift);
+ counters[addr]++;
+ }
+
+ for (i = 1; i < COUNTERS_SIZE; i++)
+ counters[i] += counters[i - 1];
+
+ for (i = num - 1; i >= 0; i--) {
+ buf_num = array[i].count;
+ addr = get4bits(buf_num, shift);
+ counters[addr]--;
+ new_addr = counters[addr];
+ array_buf[new_addr] = array[i];
+ }
+
+ shift += RADIX_BASE;
+
+ /*
+ * Normal radix expects to move data from a temporary array, to
+ * the main one. But that requires some CPU time. Avoid that
+ * by doing another sort iteration to original array instead of
+ * memcpy()
+ */
+ memset(counters, 0, sizeof(counters));
+
+ for (i = 0; i < num; i ++) {
+ buf_num = array_buf[i].count;
+ addr = get4bits(buf_num, shift);
+ counters[addr]++;
+ }
+
+ for (i = 1; i < COUNTERS_SIZE; i++)
+ counters[i] += counters[i - 1];
+
+ for (i = num - 1; i >= 0; i--) {
+ buf_num = array_buf[i].count;
+ addr = get4bits(buf_num, shift);
+ counters[addr]--;
+ new_addr = counters[addr];
+ array[new_addr] = array_buf[i];
+ }
+
+ shift += RADIX_BASE;
+ }
}
/*
@@ -1314,7 +1413,7 @@ static int byte_core_set_size(struct heuristic_ws *ws)
struct bucket_item *bucket = ws->bucket;
/* Sort in reverse order */
- sort(bucket, BUCKET_SIZE, sizeof(*bucket), &bucket_comp_rev, NULL);
+ radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
for (i = 0; i < BYTE_CORE_SET_LOW; i++)
coreset_sum += bucket[i].count;
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 0868cc554f14..677fa4aa0bd7 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -75,7 +75,7 @@ struct compressed_bio {
u32 sums;
};
-void btrfs_init_compress(void);
+void __init btrfs_init_compress(void);
void btrfs_exit_compress(void);
int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
@@ -137,6 +137,8 @@ extern const struct btrfs_compress_op btrfs_zlib_compress;
extern const struct btrfs_compress_op btrfs_lzo_compress;
extern const struct btrfs_compress_op btrfs_zstd_compress;
+const char* btrfs_compress_type2str(enum btrfs_compression_type type);
+
int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end);
#endif
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 1e74cf826532..b88a79e69ddf 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1807,8 +1807,8 @@ static noinline int generic_bin_search(struct extent_buffer *eb,
* simple bin_search frontend that does the right thing for
* leaves vs nodes
*/
-static int bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
- int level, int *slot)
+int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
+ int level, int *slot)
{
if (level == 0)
return generic_bin_search(eb,
@@ -1824,12 +1824,6 @@ static int bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
slot);
}
-int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
- int level, int *slot)
-{
- return bin_search(eb, key, level, slot);
-}
-
static void root_add_used(struct btrfs_root *root, u32 size)
{
spin_lock(&root->accounting_lock);
@@ -2614,7 +2608,7 @@ static int key_search(struct extent_buffer *b, const struct btrfs_key *key,
int level, int *prev_cmp, int *slot)
{
if (*prev_cmp != 0) {
- *prev_cmp = bin_search(b, key, level, slot);
+ *prev_cmp = btrfs_bin_search(b, key, level, slot);
return *prev_cmp;
}
@@ -2660,17 +2654,29 @@ int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
}
/*
- * look for key in the tree. path is filled in with nodes along the way
- * if key is found, we return zero and you can find the item in the leaf
- * level of the path (level 0)
+ * btrfs_search_slot - look for a key in a tree and perform necessary
+ * modifications to preserve tree invariants.
+ *
+ * @trans: Handle of transaction, used when modifying the tree
+ * @p: Holds all btree nodes along the search path
+ * @root: The root node of the tree
+ * @key: The key we are looking for
+ * @ins_len: Indicates purpose of search, for inserts it is 1, for
+ * deletions it's -1. 0 for plain searches
+ * @cow: boolean should CoW operations be performed. Must always be 1
+ * when modifying the tree.
+ *
+ * If @ins_len > 0, nodes and leaves will be split as we walk down the tree.
+ * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible)
*
- * If the key isn't found, the path points to the slot where it should
- * be inserted, and 1 is returned. If there are other errors during the
- * search a negative error number is returned.
+ * If @key is found, 0 is returned and you can find the item in the leaf level
+ * of the path (level 0)
*
- * if ins_len > 0, nodes and leaves will be split as we walk down the
- * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
- * possible)
+ * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
+ * points to the slot where it should be inserted
+ *
+ * If an error is encountered while searching the tree a negative error number
+ * is returned
*/
int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
const struct btrfs_key *key, struct btrfs_path *p,
@@ -2774,6 +2780,8 @@ again:
* contention with the cow code
*/
if (cow) {
+ bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
+
/*
* if we don't really need to cow this block
* then we don't want to set the path blocking,
@@ -2798,9 +2806,13 @@ again:
}
btrfs_set_path_blocking(p);
- err = btrfs_cow_block(trans, root, b,
- p->nodes[level + 1],
- p->slots[level + 1], &b);
+ if (last_level)
+ err = btrfs_cow_block(trans, root, b, NULL, 0,
+ &b);
+ else
+ err = btrfs_cow_block(trans, root, b,
+ p->nodes[level + 1],
+ p->slots[level + 1], &b);
if (err) {
ret = err;
goto done;
@@ -5175,7 +5187,7 @@ again:
while (1) {
nritems = btrfs_header_nritems(cur);
level = btrfs_header_level(cur);
- sret = bin_search(cur, min_key, level, &slot);
+ sret = btrfs_bin_search(cur, min_key, level, &slot);
/* at the lowest level, we're done, setup the path and exit */
if (level == path->lowest_level) {
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 13c260b525a1..1a462ab85c49 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -679,7 +679,6 @@ enum btrfs_orphan_cleanup_state {
/* used by the raid56 code to lock stripes for read/modify/write */
struct btrfs_stripe_hash {
struct list_head hash_list;
- wait_queue_head_t wait;
spinlock_t lock;
};
@@ -3060,15 +3059,10 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
struct btrfs_path *path, u64 dir,
const char *name, u16 name_len,
int mod);
-int verify_dir_item(struct btrfs_fs_info *fs_info,
- struct extent_buffer *leaf, int slot,
- struct btrfs_dir_item *dir_item);
struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
const char *name,
int name_len);
-bool btrfs_is_name_len_valid(struct extent_buffer *leaf, int slot,
- unsigned long start, u16 name_len);
/* orphan.c */
int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
@@ -3197,7 +3191,7 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
struct inode *btrfs_alloc_inode(struct super_block *sb);
void btrfs_destroy_inode(struct inode *inode);
int btrfs_drop_inode(struct inode *inode);
-int btrfs_init_cachep(void);
+int __init btrfs_init_cachep(void);
void btrfs_destroy_cachep(void);
long btrfs_ioctl_trans_end(struct file *file);
struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
@@ -3248,7 +3242,7 @@ ssize_t btrfs_dedupe_file_range(struct file *src_file, u64 loff, u64 olen,
struct file *dst_file, u64 dst_loff);
/* file.c */
-int btrfs_auto_defrag_init(void);
+int __init btrfs_auto_defrag_init(void);
void btrfs_auto_defrag_exit(void);
int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode);
@@ -3283,7 +3277,7 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
/* sysfs.c */
-int btrfs_init_sysfs(void);
+int __init btrfs_init_sysfs(void);
void btrfs_exit_sysfs(void);
int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info);
void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info);
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index a6226cd6063c..0530f6f2e4ba 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -18,6 +18,7 @@
*/
#include <linux/slab.h>
+#include <linux/iversion.h>
#include "delayed-inode.h"
#include "disk-io.h"
#include "transaction.h"
@@ -1302,40 +1303,42 @@ static void btrfs_async_run_delayed_root(struct btrfs_work *work)
if (!path)
goto out;
-again:
- if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND / 2)
- goto free_path;
+ do {
+ if (atomic_read(&delayed_root->items) <
+ BTRFS_DELAYED_BACKGROUND / 2)
+ break;
- delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
- if (!delayed_node)
- goto free_path;
+ delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
+ if (!delayed_node)
+ break;
- path->leave_spinning = 1;
- root = delayed_node->root;
+ path->leave_spinning = 1;
+ root = delayed_node->root;
- trans = btrfs_join_transaction(root);
- if (IS_ERR(trans))
- goto release_path;
+ trans = btrfs_join_transaction(root);
+ if (IS_ERR(trans)) {
+ btrfs_release_path(path);
+ btrfs_release_prepared_delayed_node(delayed_node);
+ total_done++;
+ continue;
+ }
- block_rsv = trans->block_rsv;
- trans->block_rsv = &root->fs_info->delayed_block_rsv;
+ block_rsv = trans->block_rsv;
+ trans->block_rsv = &root->fs_info->delayed_block_rsv;
- __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
+ __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
- trans->block_rsv = block_rsv;
- btrfs_end_transaction(trans);
- btrfs_btree_balance_dirty_nodelay(root->fs_info);
+ trans->block_rsv = block_rsv;
+ btrfs_end_transaction(trans);
+ btrfs_btree_balance_dirty_nodelay(root->fs_info);
-release_path:
- btrfs_release_path(path);
- total_done++;
+ btrfs_release_path(path);
+ btrfs_release_prepared_delayed_node(delayed_node);
+ total_done++;
- btrfs_release_prepared_delayed_node(delayed_node);
- if ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK) ||
- total_done < async_work->nr)
- goto again;
+ } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
+ || total_done < async_work->nr);
-free_path:
btrfs_free_path(path);
out:
wake_up(&delayed_root->wait);
@@ -1348,10 +1351,6 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
{
struct btrfs_async_delayed_work *async_work;
- if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND ||
- btrfs_workqueue_normal_congested(fs_info->delayed_workers))
- return 0;
-
async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
if (!async_work)
return -ENOMEM;
@@ -1387,7 +1386,8 @@ void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
{
struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
- if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
+ if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
+ btrfs_workqueue_normal_congested(fs_info->delayed_workers))
return;
if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
@@ -1713,7 +1713,8 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
btrfs_set_stack_inode_generation(inode_item,
BTRFS_I(inode)->generation);
- btrfs_set_stack_inode_sequence(inode_item, inode->i_version);
+ btrfs_set_stack_inode_sequence(inode_item,
+ inode_peek_iversion(inode));
btrfs_set_stack_inode_transid(inode_item, trans->transid);
btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
@@ -1767,7 +1768,8 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev)
BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
- inode->i_version = btrfs_stack_inode_sequence(inode_item);
+ inode_set_iversion_queried(inode,
+ btrfs_stack_inode_sequence(inode_item));
inode->i_rdev = 0;
*rdev = btrfs_stack_inode_rdev(inode_item);
BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 83be8f9fd906..a1a40cf382e3 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -937,7 +937,7 @@ void btrfs_delayed_ref_exit(void)
kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
}
-int btrfs_delayed_ref_init(void)
+int __init btrfs_delayed_ref_init(void)
{
btrfs_delayed_ref_head_cachep = kmem_cache_create(
"btrfs_delayed_ref_head",
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index a43af432f859..c4f625e5a691 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -203,7 +203,7 @@ extern struct kmem_cache *btrfs_delayed_tree_ref_cachep;
extern struct kmem_cache *btrfs_delayed_data_ref_cachep;
extern struct kmem_cache *btrfs_delayed_extent_op_cachep;
-int btrfs_delayed_ref_init(void);
+int __init btrfs_delayed_ref_init(void);
void btrfs_delayed_ref_exit(void);
static inline struct btrfs_delayed_extent_op *
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 7c655f9a7a50..7efbc4d1128b 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -172,7 +172,8 @@ no_valid_dev_replace_entry_found:
dev_replace->tgtdev->commit_bytes_used =
dev_replace->srcdev->commit_bytes_used;
}
- dev_replace->tgtdev->is_tgtdev_for_dev_replace = 1;
+ set_bit(BTRFS_DEV_STATE_REPLACE_TGT,
+ &dev_replace->tgtdev->dev_state);
btrfs_init_dev_replace_tgtdev_for_resume(fs_info,
dev_replace->tgtdev);
}
@@ -304,6 +305,14 @@ void btrfs_after_dev_replace_commit(struct btrfs_fs_info *fs_info)
dev_replace->cursor_left_last_write_of_item;
}
+static char* btrfs_dev_name(struct btrfs_device *device)
+{
+ if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
+ return "<missing disk>";
+ else
+ return rcu_str_deref(device->name);
+}
+
int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
const char *tgtdev_name, u64 srcdevid, const char *srcdev_name,
int read_src)
@@ -363,8 +372,7 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
btrfs_info_in_rcu(fs_info,
"dev_replace from %s (devid %llu) to %s started",
- src_device->missing ? "<missing disk>" :
- rcu_str_deref(src_device->name),
+ btrfs_dev_name(src_device),
src_device->devid,
rcu_str_deref(tgt_device->name));
@@ -538,8 +546,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
} else {
btrfs_err_in_rcu(fs_info,
"btrfs_scrub_dev(%s, %llu, %s) failed %d",
- src_device->missing ? "<missing disk>" :
- rcu_str_deref(src_device->name),
+ btrfs_dev_name(src_device),
src_device->devid,
rcu_str_deref(tgt_device->name), scrub_ret);
btrfs_dev_replace_unlock(dev_replace, 1);
@@ -557,11 +564,10 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
btrfs_info_in_rcu(fs_info,
"dev_replace from %s (devid %llu) to %s finished",
- src_device->missing ? "<missing disk>" :
- rcu_str_deref(src_device->name),
+ btrfs_dev_name(src_device),
src_device->devid,
rcu_str_deref(tgt_device->name));
- tgt_device->is_tgtdev_for_dev_replace = 0;
+ clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &tgt_device->dev_state);
tgt_device->devid = src_device->devid;
src_device->devid = BTRFS_DEV_REPLACE_DEVID;
memcpy(uuid_tmp, tgt_device->uuid, sizeof(uuid_tmp));
@@ -814,12 +820,10 @@ static int btrfs_dev_replace_kthread(void *data)
progress = btrfs_dev_replace_progress(fs_info);
progress = div_u64(progress, 10);
btrfs_info_in_rcu(fs_info,
- "continuing dev_replace from %s (devid %llu) to %s @%u%%",
- dev_replace->srcdev->missing ? "<missing disk>"
- : rcu_str_deref(dev_replace->srcdev->name),
+ "continuing dev_replace from %s (devid %llu) to target %s @%u%%",
+ btrfs_dev_name(dev_replace->srcdev),
dev_replace->srcdev->devid,
- dev_replace->tgtdev ? rcu_str_deref(dev_replace->tgtdev->name)
- : "<missing target disk>",
+ btrfs_dev_name(dev_replace->tgtdev),
(unsigned int)progress);
btrfs_dev_replace_continue_on_mount(fs_info);
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 41cb9196eaa8..cbe421605cd5 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -403,8 +403,6 @@ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
btrfs_dir_data_len(leaf, dir_item);
name_ptr = (unsigned long)(dir_item + 1);
- if (verify_dir_item(fs_info, leaf, path->slots[0], dir_item))
- return NULL;
if (btrfs_dir_name_len(leaf, dir_item) == name_len &&
memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0)
return dir_item;
@@ -450,109 +448,3 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
}
return ret;
}
-
-int verify_dir_item(struct btrfs_fs_info *fs_info,
- struct extent_buffer *leaf,
- int slot,
- struct btrfs_dir_item *dir_item)
-{
- u16 namelen = BTRFS_NAME_LEN;
- int ret;
- u8 type = btrfs_dir_type(leaf, dir_item);
-
- if (type >= BTRFS_FT_MAX) {
- btrfs_crit(fs_info, "invalid dir item type: %d", (int)type);
- return 1;
- }
-
- if (type == BTRFS_FT_XATTR)
- namelen = XATTR_NAME_MAX;
-
- if (btrfs_dir_name_len(leaf, dir_item) > namelen) {
- btrfs_crit(fs_info, "invalid dir item name len: %u",
- (unsigned)btrfs_dir_name_len(leaf, dir_item));
- return 1;
- }
-
- namelen = btrfs_dir_name_len(leaf, dir_item);
- ret = btrfs_is_name_len_valid(leaf, slot,
- (unsigned long)(dir_item + 1), namelen);
- if (!ret)
- return 1;
-
- /* BTRFS_MAX_XATTR_SIZE is the same for all dir items */
- if ((btrfs_dir_data_len(leaf, dir_item) +
- btrfs_dir_name_len(leaf, dir_item)) >
- BTRFS_MAX_XATTR_SIZE(fs_info)) {
- btrfs_crit(fs_info, "invalid dir item name + data len: %u + %u",
- (unsigned)btrfs_dir_name_len(leaf, dir_item),
- (unsigned)btrfs_dir_data_len(leaf, dir_item));
- return 1;
- }
-
- return 0;
-}
-
-bool btrfs_is_name_len_valid(struct extent_buffer *leaf, int slot,
- unsigned long start, u16 name_len)
-{
- struct btrfs_fs_info *fs_info = leaf->fs_info;
- struct btrfs_key key;
- u32 read_start;
- u32 read_end;
- u32 item_start;
- u32 item_end;
- u32 size;
- bool ret = true;
-
- ASSERT(start > BTRFS_LEAF_DATA_OFFSET);
-
- read_start = start - BTRFS_LEAF_DATA_OFFSET;
- read_end = read_start + name_len;
- item_start = btrfs_item_offset_nr(leaf, slot);
- item_end = btrfs_item_end_nr(leaf, slot);
-
- btrfs_item_key_to_cpu(leaf, &key, slot);
-
- switch (key.type) {
- case BTRFS_DIR_ITEM_KEY:
- case BTRFS_XATTR_ITEM_KEY:
- case BTRFS_DIR_INDEX_KEY:
- size = sizeof(struct btrfs_dir_item);
- break;
- case BTRFS_INODE_REF_KEY:
- size = sizeof(struct btrfs_inode_ref);
- break;
- case BTRFS_INODE_EXTREF_KEY:
- size = sizeof(struct btrfs_inode_extref);
- break;
- case BTRFS_ROOT_REF_KEY:
- case BTRFS_ROOT_BACKREF_KEY:
- size = sizeof(struct btrfs_root_ref);
- break;
- default:
- ret = false;
- goto out;
- }
-
- if (read_start < item_start) {
- ret = false;
- goto out;
- }
- if (read_end > item_end) {
- ret = false;
- goto out;
- }
-
- /* there shall be item(s) before name */
- if (read_start - item_start < size) {
- ret = false;
- goto out;
- }
-
-out:
- if (!ret)
- btrfs_crit(fs_info, "invalid dir item name len: %u",
- (unsigned int)name_len);
- return ret;
-}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 83e2349e1362..21f34ad0d411 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -62,7 +62,8 @@
BTRFS_HEADER_FLAG_RELOC |\
BTRFS_SUPER_FLAG_ERROR |\
BTRFS_SUPER_FLAG_SEEDING |\
- BTRFS_SUPER_FLAG_METADUMP)
+ BTRFS_SUPER_FLAG_METADUMP |\
+ BTRFS_SUPER_FLAG_METADUMP_V2)
static const struct extent_io_ops btree_extent_io_ops;
static void end_workqueue_fn(struct btrfs_work *work);
@@ -221,7 +222,7 @@ void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
* extents on the btree inode are pretty simple, there's one extent
* that covers the entire device
*/
-static struct extent_map *btree_get_extent(struct btrfs_inode *inode,
+struct extent_map *btree_get_extent(struct btrfs_inode *inode,
struct page *page, size_t pg_offset, u64 start, u64 len,
int create)
{
@@ -286,7 +287,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
int verify)
{
u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
- char *result = NULL;
+ char result[BTRFS_CSUM_SIZE];
unsigned long len;
unsigned long cur_len;
unsigned long offset = BTRFS_CSUM_SIZE;
@@ -295,7 +296,6 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
unsigned long map_len;
int err;
u32 crc = ~(u32)0;
- unsigned long inline_result;
len = buf->len - offset;
while (len > 0) {
@@ -309,13 +309,7 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
len -= cur_len;
offset += cur_len;
}
- if (csum_size > sizeof(inline_result)) {
- result = kzalloc(csum_size, GFP_NOFS);
- if (!result)
- return -ENOMEM;
- } else {
- result = (char *)&inline_result;
- }
+ memset(result, 0, BTRFS_CSUM_SIZE);
btrfs_csum_final(crc, result);
@@ -330,15 +324,12 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
"%s checksum verify failed on %llu wanted %X found %X level %d",
fs_info->sb->s_id, buf->start,
val, found, btrfs_header_level(buf));
- if (result != (char *)&inline_result)
- kfree(result);
return -EUCLEAN;
}
} else {
write_extent_buffer(buf, result, 0, csum_size);
}
- if (result != (char *)&inline_result)
- kfree(result);
+
return 0;
}
@@ -392,7 +383,7 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
clear_extent_buffer_uptodate(eb);
out:
unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
- &cached_state, GFP_NOFS);
+ &cached_state);
if (need_lock)
btrfs_tree_read_unlock_blocking(eb);
return ret;
@@ -456,7 +447,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
while (1) {
ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
- btree_get_extent, mirror_num);
+ mirror_num);
if (!ret) {
if (!verify_parent_transid(io_tree, eb,
parent_transid, 0))
@@ -1013,7 +1004,7 @@ void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
if (IS_ERR(buf))
return;
read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
- buf, WAIT_NONE, btree_get_extent, 0);
+ buf, WAIT_NONE, 0);
free_extent_buffer(buf);
}
@@ -1032,7 +1023,7 @@ int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK,
- btree_get_extent, mirror_num);
+ mirror_num);
if (ret) {
free_extent_buffer(buf);
return ret;
@@ -1244,7 +1235,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root;
struct btrfs_key key;
int ret = 0;
- uuid_le uuid;
+ uuid_le uuid = NULL_UUID_LE;
root = btrfs_alloc_root(fs_info, GFP_KERNEL);
if (!root)
@@ -1285,7 +1276,8 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
btrfs_set_root_used(&root->root_item, leaf->len);
btrfs_set_root_last_snapshot(&root->root_item, 0);
btrfs_set_root_dirid(&root->root_item, 0);
- uuid_le_gen(&uuid);
+ if (is_fstree(objectid))
+ uuid_le_gen(&uuid);
memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
root->root_item.drop_level = 0;
@@ -2876,7 +2868,7 @@ retry_root_backup:
goto fail_sysfs;
}
- if (!sb_rdonly(sb) && !btrfs_check_rw_degradable(fs_info)) {
+ if (!sb_rdonly(sb) && !btrfs_check_rw_degradable(fs_info, NULL)) {
btrfs_warn(fs_info,
"writeable mount is not allowed due to too many missing devices");
goto fail_sysfs;
@@ -3359,7 +3351,7 @@ static void write_dev_flush(struct btrfs_device *device)
bio->bi_private = &device->flush_wait;
btrfsic_submit_bio(bio);
- device->flush_bio_sent = 1;
+ set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
}
/*
@@ -3369,10 +3361,10 @@ static blk_status_t wait_dev_flush(struct btrfs_device *device)
{
struct bio *bio = device->flush_bio;
- if (!device->flush_bio_sent)
+ if (!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state))
return BLK_STS_OK;
- device->flush_bio_sent = 0;
+ clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
wait_for_completion_io(&device->flush_wait);
return bio->bi_status;
@@ -3380,7 +3372,7 @@ static blk_status_t wait_dev_flush(struct btrfs_device *device)
static int check_barrier_error(struct btrfs_fs_info *fs_info)
{
- if (!btrfs_check_rw_degradable(fs_info))
+ if (!btrfs_check_rw_degradable(fs_info, NULL))
return -EIO;
return 0;
}
@@ -3396,14 +3388,16 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
int errors_wait = 0;
blk_status_t ret;
+ lockdep_assert_held(&info->fs_devices->device_list_mutex);
/* send down all the barriers */
head = &info->fs_devices->devices;
- list_for_each_entry_rcu(dev, head, dev_list) {
- if (dev->missing)
+ list_for_each_entry(dev, head, dev_list) {
+ if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
continue;
if (!dev->bdev)
continue;
- if (!dev->in_fs_metadata || !dev->writeable)
+ if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
+ !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
continue;
write_dev_flush(dev);
@@ -3411,14 +3405,15 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
}
/* wait for all the barriers */
- list_for_each_entry_rcu(dev, head, dev_list) {
- if (dev->missing)
+ list_for_each_entry(dev, head, dev_list) {
+ if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
continue;
if (!dev->bdev) {
errors_wait++;
continue;
}
- if (!dev->in_fs_metadata || !dev->writeable)
+ if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
+ !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
continue;
ret = wait_dev_flush(dev);
@@ -3510,12 +3505,13 @@ int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
}
}
- list_for_each_entry_rcu(dev, head, dev_list) {
+ list_for_each_entry(dev, head, dev_list) {
if (!dev->bdev) {
total_errors++;
continue;
}
- if (!dev->in_fs_metadata || !dev->writeable)
+ if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
+ !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
continue;
btrfs_set_stack_device_generation(dev_item, 0);
@@ -3551,10 +3547,11 @@ int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
}
total_errors = 0;
- list_for_each_entry_rcu(dev, head, dev_list) {
+ list_for_each_entry(dev, head, dev_list) {
if (!dev->bdev)
continue;
- if (!dev->in_fs_metadata || !dev->writeable)
+ if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
+ !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
continue;
ret = wait_dev_supers(dev, max_mirrors);
@@ -3912,9 +3909,11 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info)
btrfs_err(fs_info, "no valid FS found");
ret = -EINVAL;
}
- if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP)
- btrfs_warn(fs_info, "unrecognized super flag: %llu",
+ if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) {
+ btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu",
btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
+ ret = -EINVAL;
+ }
if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
btrfs_err(fs_info, "tree_root level too big: %d >= %d",
btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 7f7c35d6347a..301151a50ac1 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -149,6 +149,9 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
u64 objectid);
int btree_lock_page_hook(struct page *page, void *data,
void (*flush_fn)(void *));
+struct extent_map *btree_get_extent(struct btrfs_inode *inode,
+ struct page *page, size_t pg_offset, u64 start, u64 len,
+ int create);
int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags);
int __init btrfs_end_io_wq_init(void);
void btrfs_end_io_wq_exit(void);
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 3aeb5770f896..ddaccad469f8 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -283,11 +283,6 @@ static int btrfs_get_name(struct dentry *parent, char *name,
name_len = btrfs_inode_ref_name_len(leaf, iref);
}
- ret = btrfs_is_name_len_valid(leaf, path->slots[0], name_ptr, name_len);
- if (!ret) {
- btrfs_free_path(path);
- return -EIO;
- }
read_extent_buffer(leaf, name, name_ptr, name_len);
btrfs_free_path(path);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 2f4328511ac8..05751a677da4 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2145,7 +2145,10 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
for (i = 0; i < bbio->num_stripes; i++, stripe++) {
u64 bytes;
- if (!stripe->dev->can_discard)
+ struct request_queue *req_q;
+
+ req_q = bdev_get_queue(stripe->dev->bdev);
+ if (!blk_queue_discard(req_q))
continue;
ret = btrfs_issue_discard(stripe->dev->bdev,
@@ -2894,7 +2897,7 @@ int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_block_rsv *global_rsv;
u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
- u64 num_dirty_bgs = trans->transaction->num_dirty_bgs;
+ unsigned int num_dirty_bgs = trans->transaction->num_dirty_bgs;
u64 num_bytes, num_dirty_bgs_bytes;
int ret = 0;
@@ -4945,12 +4948,12 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
bytes = 0;
else
bytes -= delayed_rsv->size;
+ spin_unlock(&delayed_rsv->lock);
+
if (percpu_counter_compare(&space_info->total_bytes_pinned,
bytes) < 0) {
- spin_unlock(&delayed_rsv->lock);
return -ENOSPC;
}
- spin_unlock(&delayed_rsv->lock);
commit:
trans = btrfs_join_transaction(fs_info->extent_root);
@@ -5738,8 +5741,8 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
* or return if we already have enough space. This will also handle the resreve
* tracepoint for the reserved amount.
*/
-int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
- enum btrfs_reserve_flush_enum flush)
+static int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
+ enum btrfs_reserve_flush_enum flush)
{
struct btrfs_root *root = inode->root;
struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
@@ -5770,7 +5773,7 @@ int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
* This is the same as btrfs_block_rsv_release, except that it handles the
* tracepoint for the reservation.
*/
-void btrfs_inode_rsv_release(struct btrfs_inode *inode)
+static void btrfs_inode_rsv_release(struct btrfs_inode *inode)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
@@ -9690,7 +9693,7 @@ int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr)
* space to fit our block group in.
*/
if (device->total_bytes > device->bytes_used + min_free &&
- !device->is_tgtdev_for_dev_replace) {
+ !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
ret = find_free_dev_extent(trans, device, min_free,
&dev_offset, NULL);
if (!ret)
@@ -10875,7 +10878,7 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
*trimmed = 0;
/* Not writeable = nothing to do. */
- if (!device->writeable)
+ if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
return 0;
/* No free space = nothing to do. */
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 012d63870b99..dfeb74a0be77 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -21,6 +21,7 @@
#include "locking.h"
#include "rcu-string.h"
#include "backref.h"
+#include "disk-io.h"
static struct kmem_cache *extent_state_cache;
static struct kmem_cache *extent_buffer_cache;
@@ -109,8 +110,6 @@ struct tree_entry {
struct extent_page_data {
struct bio *bio;
struct extent_io_tree *tree;
- get_extent_t *get_extent;
-
/* tells writepage not to lock the state bits for this range
* it still does the unlocking
*/
@@ -139,7 +138,8 @@ static void add_extent_changeset(struct extent_state *state, unsigned bits,
BUG_ON(ret < 0);
}
-static noinline void flush_write_bio(void *data);
+static void flush_write_bio(struct extent_page_data *epd);
+
static inline struct btrfs_fs_info *
tree_fs_info(struct extent_io_tree *tree)
{
@@ -581,7 +581,7 @@ static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
*
* This takes the tree lock, and returns 0 on success and < 0 on error.
*/
-static int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
unsigned bits, int wake, int delete,
struct extent_state **cached_state,
gfp_t mask, struct extent_changeset *changeset)
@@ -1295,10 +1295,10 @@ int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
unsigned bits, int wake, int delete,
- struct extent_state **cached, gfp_t mask)
+ struct extent_state **cached)
{
return __clear_extent_bit(tree, start, end, bits, wake, delete,
- cached, mask, NULL);
+ cached, GFP_NOFS, NULL);
}
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
@@ -1348,7 +1348,7 @@ int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
if (err == -EEXIST) {
if (failed_start > start)
clear_extent_bit(tree, start, failed_start - 1,
- EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
+ EXTENT_LOCKED, 1, 0, NULL);
return 0;
}
return 1;
@@ -1648,7 +1648,7 @@ again:
EXTENT_DELALLOC, 1, cached_state);
if (!ret) {
unlock_extent_cached(tree, delalloc_start, delalloc_end,
- &cached_state, GFP_NOFS);
+ &cached_state);
__unlock_for_delalloc(inode, locked_page,
delalloc_start, delalloc_end);
cond_resched();
@@ -1744,7 +1744,7 @@ void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
unsigned long page_ops)
{
clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, clear_bits, 1, 0,
- NULL, GFP_NOFS);
+ NULL);
__process_pages_contig(inode->i_mapping, locked_page,
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
@@ -2027,7 +2027,8 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
bio->bi_iter.bi_sector = sector;
dev = bbio->stripes[bbio->mirror_num - 1].dev;
btrfs_put_bbio(bbio);
- if (!dev || !dev->bdev || !dev->writeable) {
+ if (!dev || !dev->bdev ||
+ !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
btrfs_bio_counter_dec(fs_info);
bio_put(bio);
return -EIO;
@@ -2257,7 +2258,7 @@ int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
return 0;
}
-bool btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
+bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages,
struct io_failure_record *failrec, int failed_mirror)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@@ -2281,7 +2282,7 @@ bool btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
* a) deliver good data to the caller
* b) correct the bad sectors on disk
*/
- if (failed_bio->bi_vcnt > 1) {
+ if (failed_bio_pages > 1) {
/*
* to fulfill b), we need to know the exact failing sectors, as
* we don't want to rewrite any more than the failed ones. thus,
@@ -2374,6 +2375,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
int read_mode = 0;
blk_status_t status;
int ret;
+ unsigned failed_bio_pages = bio_pages_all(failed_bio);
BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
@@ -2381,13 +2383,13 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
if (ret)
return ret;
- if (!btrfs_check_repairable(inode, failed_bio, failrec,
+ if (!btrfs_check_repairable(inode, failed_bio_pages, failrec,
failed_mirror)) {
free_io_failure(failure_tree, tree, failrec);
return -EIO;
}
- if (failed_bio->bi_vcnt > 1)
+ if (failed_bio_pages > 1)
read_mode |= REQ_FAILFAST_DEV;
phy_offset >>= inode->i_sb->s_blocksize_bits;
@@ -2492,7 +2494,7 @@ endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
if (uptodate && tree->track_uptodate)
set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC);
- unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
+ unlock_extent_cached_atomic(tree, start, end, &cached);
}
/*
@@ -2724,7 +2726,7 @@ static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
unsigned long bio_flags)
{
blk_status_t ret = 0;
- struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+ struct bio_vec *bvec = bio_last_bvec_all(bio);
struct page *page = bvec->bv_page;
struct extent_io_tree *tree = bio->bi_private;
u64 start;
@@ -2732,7 +2734,6 @@ static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
start = page_offset(page) + bvec->bv_offset;
bio->bi_private = NULL;
- bio_get(bio);
if (tree->ops)
ret = tree->ops->submit_bio_hook(tree->private_data, bio,
@@ -2740,7 +2741,6 @@ static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
else
btrfsic_submit_bio(bio);
- bio_put(bio);
return blk_status_to_errno(ret);
}
@@ -2942,8 +2942,7 @@ static int __do_readpage(struct extent_io_tree *tree,
set_extent_uptodate(tree, cur, cur + iosize - 1,
&cached, GFP_NOFS);
unlock_extent_cached(tree, cur,
- cur + iosize - 1,
- &cached, GFP_NOFS);
+ cur + iosize - 1, &cached);
break;
}
em = __get_extent_map(inode, page, pg_offset, cur,
@@ -3036,8 +3035,7 @@ static int __do_readpage(struct extent_io_tree *tree,
set_extent_uptodate(tree, cur, cur + iosize - 1,
&cached, GFP_NOFS);
unlock_extent_cached(tree, cur,
- cur + iosize - 1,
- &cached, GFP_NOFS);
+ cur + iosize - 1, &cached);
cur = cur + iosize;
pg_offset += iosize;
continue;
@@ -3092,9 +3090,8 @@ out:
static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
struct page *pages[], int nr_pages,
u64 start, u64 end,
- get_extent_t *get_extent,
struct extent_map **em_cached,
- struct bio **bio, int mirror_num,
+ struct bio **bio,
unsigned long *bio_flags,
u64 *prev_em_start)
{
@@ -3115,18 +3112,17 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
}
for (index = 0; index < nr_pages; index++) {
- __do_readpage(tree, pages[index], get_extent, em_cached, bio,
- mirror_num, bio_flags, 0, prev_em_start);
+ __do_readpage(tree, pages[index], btrfs_get_extent, em_cached,
+ bio, 0, bio_flags, 0, prev_em_start);
put_page(pages[index]);
}
}
static void __extent_readpages(struct extent_io_tree *tree,
struct page *pages[],
- int nr_pages, get_extent_t *get_extent,
+ int nr_pages,
struct extent_map **em_cached,
- struct bio **bio, int mirror_num,
- unsigned long *bio_flags,
+ struct bio **bio, unsigned long *bio_flags,
u64 *prev_em_start)
{
u64 start = 0;
@@ -3146,8 +3142,8 @@ static void __extent_readpages(struct extent_io_tree *tree,
} else {
__do_contiguous_readpages(tree, &pages[first_index],
index - first_index, start,
- end, get_extent, em_cached,
- bio, mirror_num, bio_flags,
+ end, em_cached,
+ bio, bio_flags,
prev_em_start);
start = page_start;
end = start + PAGE_SIZE - 1;
@@ -3158,9 +3154,8 @@ static void __extent_readpages(struct extent_io_tree *tree,
if (end)
__do_contiguous_readpages(tree, &pages[first_index],
index - first_index, start,
- end, get_extent, em_cached, bio,
- mirror_num, bio_flags,
- prev_em_start);
+ end, em_cached, bio,
+ bio_flags, prev_em_start);
}
static int __extent_read_full_page(struct extent_io_tree *tree,
@@ -3375,7 +3370,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
page_end, NULL, 1);
break;
}
- em = epd->get_extent(BTRFS_I(inode), page, pg_offset, cur,
+ em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, cur,
end - cur + 1, 1);
if (IS_ERR_OR_NULL(em)) {
SetPageError(page);
@@ -3458,10 +3453,9 @@ done:
* and the end_io handler clears the writeback ranges
*/
static int __extent_writepage(struct page *page, struct writeback_control *wbc,
- void *data)
+ struct extent_page_data *epd)
{
struct inode *inode = page->mapping->host;
- struct extent_page_data *epd = data;
u64 start = page_offset(page);
u64 page_end = start + PAGE_SIZE - 1;
int ret;
@@ -3895,8 +3889,7 @@ retry:
* write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
* @mapping: address space structure to write
* @wbc: subtract the number of written pages from *@wbc->nr_to_write
- * @writepage: function called for each page
- * @data: data passed to writepage function
+ * @data: data passed to __extent_writepage function
*
* If a page is already under I/O, write_cache_pages() skips it, even
* if it's dirty. This is desirable behaviour for memory-cleaning writeback,
@@ -3908,8 +3901,7 @@ retry:
*/
static int extent_write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc,
- writepage_t writepage, void *data,
- void (*flush_fn)(void *))
+ struct extent_page_data *epd)
{
struct inode *inode = mapping->host;
int ret = 0;
@@ -3973,7 +3965,7 @@ retry:
* mapping
*/
if (!trylock_page(page)) {
- flush_fn(data);
+ flush_write_bio(epd);
lock_page(page);
}
@@ -3984,7 +3976,7 @@ retry:
if (wbc->sync_mode != WB_SYNC_NONE) {
if (PageWriteback(page))
- flush_fn(data);
+ flush_write_bio(epd);
wait_on_page_writeback(page);
}
@@ -3994,7 +3986,7 @@ retry:
continue;
}
- ret = (*writepage)(page, wbc, data);
+ ret = __extent_writepage(page, wbc, epd);
if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
unlock_page(page);
@@ -4042,7 +4034,7 @@ retry:
return ret;
}
-static void flush_epd_write_bio(struct extent_page_data *epd)
+static void flush_write_bio(struct extent_page_data *epd)
{
if (epd->bio) {
int ret;
@@ -4053,37 +4045,28 @@ static void flush_epd_write_bio(struct extent_page_data *epd)
}
}
-static noinline void flush_write_bio(void *data)
-{
- struct extent_page_data *epd = data;
- flush_epd_write_bio(epd);
-}
-
-int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
- get_extent_t *get_extent,
- struct writeback_control *wbc)
+int extent_write_full_page(struct page *page, struct writeback_control *wbc)
{
int ret;
struct extent_page_data epd = {
.bio = NULL,
- .tree = tree,
- .get_extent = get_extent,
+ .tree = &BTRFS_I(page->mapping->host)->io_tree,
.extent_locked = 0,
.sync_io = wbc->sync_mode == WB_SYNC_ALL,
};
ret = __extent_writepage(page, wbc, &epd);
- flush_epd_write_bio(&epd);
+ flush_write_bio(&epd);
return ret;
}
-int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
- u64 start, u64 end, get_extent_t *get_extent,
+int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
int mode)
{
int ret = 0;
struct address_space *mapping = inode->i_mapping;
+ struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
struct page *page;
unsigned long nr_pages = (end - start + PAGE_SIZE) >>
PAGE_SHIFT;
@@ -4091,7 +4074,6 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
struct extent_page_data epd = {
.bio = NULL,
.tree = tree,
- .get_extent = get_extent,
.extent_locked = 1,
.sync_io = mode == WB_SYNC_ALL,
};
@@ -4117,34 +4099,30 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
start += PAGE_SIZE;
}
- flush_epd_write_bio(&epd);
+ flush_write_bio(&epd);
return ret;
}
int extent_writepages(struct extent_io_tree *tree,
struct address_space *mapping,
- get_extent_t *get_extent,
struct writeback_control *wbc)
{
int ret = 0;
struct extent_page_data epd = {
.bio = NULL,
.tree = tree,
- .get_extent = get_extent,
.extent_locked = 0,
.sync_io = wbc->sync_mode == WB_SYNC_ALL,
};
- ret = extent_write_cache_pages(mapping, wbc, __extent_writepage, &epd,
- flush_write_bio);
- flush_epd_write_bio(&epd);
+ ret = extent_write_cache_pages(mapping, wbc, &epd);
+ flush_write_bio(&epd);
return ret;
}
int extent_readpages(struct extent_io_tree *tree,
struct address_space *mapping,
- struct list_head *pages, unsigned nr_pages,
- get_extent_t get_extent)
+ struct list_head *pages, unsigned nr_pages)
{
struct bio *bio = NULL;
unsigned page_idx;
@@ -4170,13 +4148,13 @@ int extent_readpages(struct extent_io_tree *tree,
pagepool[nr++] = page;
if (nr < ARRAY_SIZE(pagepool))
continue;
- __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
- &bio, 0, &bio_flags, &prev_em_start);
+ __extent_readpages(tree, pagepool, nr, &em_cached, &bio,
+ &bio_flags, &prev_em_start);
nr = 0;
}
if (nr)
- __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
- &bio, 0, &bio_flags, &prev_em_start);
+ __extent_readpages(tree, pagepool, nr, &em_cached, &bio,
+ &bio_flags, &prev_em_start);
if (em_cached)
free_extent_map(em_cached);
@@ -4209,7 +4187,7 @@ int extent_invalidatepage(struct extent_io_tree *tree,
clear_extent_bit(tree, start, end,
EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING,
- 1, 1, &cached_state, GFP_NOFS);
+ 1, 1, &cached_state);
return 0;
}
@@ -4234,9 +4212,9 @@ static int try_release_extent_state(struct extent_map_tree *map,
* at this point we can safely clear everything except the
* locked bit and the nodatasum bit
*/
- ret = clear_extent_bit(tree, start, end,
+ ret = __clear_extent_bit(tree, start, end,
~(EXTENT_LOCKED | EXTENT_NODATASUM),
- 0, 0, NULL, mask);
+ 0, 0, NULL, mask, NULL);
/* if clear_extent_bit failed for enomem reasons,
* we can't allow the release to continue.
@@ -4302,9 +4280,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
* This maps until we find something past 'last'
*/
static struct extent_map *get_extent_skip_holes(struct inode *inode,
- u64 offset,
- u64 last,
- get_extent_t *get_extent)
+ u64 offset, u64 last)
{
u64 sectorsize = btrfs_inode_sectorsize(inode);
struct extent_map *em;
@@ -4318,15 +4294,14 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode,
if (len == 0)
break;
len = ALIGN(len, sectorsize);
- em = get_extent(BTRFS_I(inode), NULL, 0, offset, len, 0);
+ em = btrfs_get_extent_fiemap(BTRFS_I(inode), NULL, 0, offset,
+ len, 0);
if (IS_ERR_OR_NULL(em))
return em;
/* if this isn't a hole return it */
- if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
- em->block_start != EXTENT_MAP_HOLE) {
+ if (em->block_start != EXTENT_MAP_HOLE)
return em;
- }
/* this is a hole, advance to the next extent */
offset = extent_map_end(em);
@@ -4451,7 +4426,7 @@ static int emit_last_fiemap_cache(struct btrfs_fs_info *fs_info,
}
int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
- __u64 start, __u64 len, get_extent_t *get_extent)
+ __u64 start, __u64 len)
{
int ret = 0;
u64 off = start;
@@ -4533,8 +4508,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1,
&cached_state);
- em = get_extent_skip_holes(inode, start, last_for_get_extent,
- get_extent);
+ em = get_extent_skip_holes(inode, start, last_for_get_extent);
if (!em)
goto out;
if (IS_ERR(em)) {
@@ -4622,8 +4596,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
}
/* now scan forward to see if this is really the last extent. */
- em = get_extent_skip_holes(inode, off, last_for_get_extent,
- get_extent);
+ em = get_extent_skip_holes(inode, off, last_for_get_extent);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
goto out;
@@ -4647,7 +4620,7 @@ out_free:
out:
btrfs_free_path(path);
unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
- &cached_state, GFP_NOFS);
+ &cached_state);
return ret;
}
@@ -5263,8 +5236,7 @@ int extent_buffer_uptodate(struct extent_buffer *eb)
}
int read_extent_buffer_pages(struct extent_io_tree *tree,
- struct extent_buffer *eb, int wait,
- get_extent_t *get_extent, int mirror_num)
+ struct extent_buffer *eb, int wait, int mirror_num)
{
unsigned long i;
struct page *page;
@@ -5324,7 +5296,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
ClearPageError(page);
err = __extent_read_full_page(tree, page,
- get_extent, &bio,
+ btree_get_extent, &bio,
mirror_num, &bio_flags,
REQ_META);
if (err) {
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 93dcae0c3183..a7a850abd600 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -300,19 +300,29 @@ int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
unsigned bits, struct extent_changeset *changeset);
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
unsigned bits, int wake, int delete,
- struct extent_state **cached, gfp_t mask);
+ struct extent_state **cached);
+int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ unsigned bits, int wake, int delete,
+ struct extent_state **cached, gfp_t mask,
+ struct extent_changeset *changeset);
static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
{
- return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
- GFP_NOFS);
+ return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL);
}
static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached, gfp_t mask)
+ u64 end, struct extent_state **cached)
+{
+ return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
+ GFP_NOFS, NULL);
+}
+
+static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
+ u64 start, u64 end, struct extent_state **cached)
{
- return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
- mask);
+ return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
+ GFP_ATOMIC, NULL);
}
static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
@@ -323,8 +333,7 @@ static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
if (bits & EXTENT_LOCKED)
wake = 1;
- return clear_extent_bit(tree, start, end, bits, wake, 0, NULL,
- GFP_NOFS);
+ return clear_extent_bit(tree, start, end, bits, wake, 0, NULL);
}
int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
@@ -340,10 +349,10 @@ static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
}
static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached_state, gfp_t mask)
+ u64 end, struct extent_state **cached_state)
{
- return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
- cached_state, mask);
+ return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
+ cached_state, GFP_NOFS, NULL);
}
static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
@@ -358,7 +367,7 @@ static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
{
return clear_extent_bit(tree, start, end,
EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS);
+ EXTENT_DO_ACCOUNTING, 0, 0, NULL);
}
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
@@ -401,24 +410,19 @@ int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
struct extent_state **cached_state);
int extent_invalidatepage(struct extent_io_tree *tree,
struct page *page, unsigned long offset);
-int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
- get_extent_t *get_extent,
- struct writeback_control *wbc);
-int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
- u64 start, u64 end, get_extent_t *get_extent,
+int extent_write_full_page(struct page *page, struct writeback_control *wbc);
+int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
int mode);
int extent_writepages(struct extent_io_tree *tree,
struct address_space *mapping,
- get_extent_t *get_extent,
struct writeback_control *wbc);
int btree_write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc);
int extent_readpages(struct extent_io_tree *tree,
struct address_space *mapping,
- struct list_head *pages, unsigned nr_pages,
- get_extent_t get_extent);
+ struct list_head *pages, unsigned nr_pages);
int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
- __u64 start, __u64 len, get_extent_t *get_extent);
+ __u64 start, __u64 len);
void set_page_extent_mapped(struct page *page);
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
@@ -437,7 +441,7 @@ void free_extent_buffer_stale(struct extent_buffer *eb);
#define WAIT_PAGE_LOCK 2
int read_extent_buffer_pages(struct extent_io_tree *tree,
struct extent_buffer *eb, int wait,
- get_extent_t *get_extent, int mirror_num);
+ int mirror_num);
void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
static inline unsigned long num_extent_pages(u64 start, u64 len)
@@ -540,7 +544,7 @@ void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
u64 end);
int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
struct io_failure_record **failrec_ret);
-bool btrfs_check_repairable(struct inode *inode, struct bio *failed_bio,
+bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages,
struct io_failure_record *failrec, int fail_mirror);
struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
struct io_failure_record *failrec,
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 2e348fb0b280..d3bd02105d1c 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -454,3 +454,135 @@ void replace_extent_mapping(struct extent_map_tree *tree,
setup_extent_mapping(tree, new, modified);
}
+
+static struct extent_map *next_extent_map(struct extent_map *em)
+{
+ struct rb_node *next;
+
+ next = rb_next(&em->rb_node);
+ if (!next)
+ return NULL;
+ return container_of(next, struct extent_map, rb_node);
+}
+
+static struct extent_map *prev_extent_map(struct extent_map *em)
+{
+ struct rb_node *prev;
+
+ prev = rb_prev(&em->rb_node);
+ if (!prev)
+ return NULL;
+ return container_of(prev, struct extent_map, rb_node);
+}
+
+/* helper for btfs_get_extent. Given an existing extent in the tree,
+ * the existing extent is the nearest extent to map_start,
+ * and an extent that you want to insert, deal with overlap and insert
+ * the best fitted new extent into the tree.
+ */
+static noinline int merge_extent_mapping(struct extent_map_tree *em_tree,
+ struct extent_map *existing,
+ struct extent_map *em,
+ u64 map_start)
+{
+ struct extent_map *prev;
+ struct extent_map *next;
+ u64 start;
+ u64 end;
+ u64 start_diff;
+
+ BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
+
+ if (existing->start > map_start) {
+ next = existing;
+ prev = prev_extent_map(next);
+ } else {
+ prev = existing;
+ next = next_extent_map(prev);
+ }
+
+ start = prev ? extent_map_end(prev) : em->start;
+ start = max_t(u64, start, em->start);
+ end = next ? next->start : extent_map_end(em);
+ end = min_t(u64, end, extent_map_end(em));
+ start_diff = start - em->start;
+ em->start = start;
+ em->len = end - start;
+ if (em->block_start < EXTENT_MAP_LAST_BYTE &&
+ !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
+ em->block_start += start_diff;
+ em->block_len = em->len;
+ }
+ return add_extent_mapping(em_tree, em, 0);
+}
+
+/**
+ * btrfs_add_extent_mapping - add extent mapping into em_tree
+ * @em_tree - the extent tree into which we want to insert the extent mapping
+ * @em_in - extent we are inserting
+ * @start - start of the logical range btrfs_get_extent() is requesting
+ * @len - length of the logical range btrfs_get_extent() is requesting
+ *
+ * Note that @em_in's range may be different from [start, start+len),
+ * but they must be overlapped.
+ *
+ * Insert @em_in into @em_tree. In case there is an overlapping range, handle
+ * the -EEXIST by either:
+ * a) Returning the existing extent in @em_in if @start is within the
+ * existing em.
+ * b) Merge the existing extent with @em_in passed in.
+ *
+ * Return 0 on success, otherwise -EEXIST.
+ *
+ */
+int btrfs_add_extent_mapping(struct extent_map_tree *em_tree,
+ struct extent_map **em_in, u64 start, u64 len)
+{
+ int ret;
+ struct extent_map *em = *em_in;
+
+ ret = add_extent_mapping(em_tree, em, 0);
+ /* it is possible that someone inserted the extent into the tree
+ * while we had the lock dropped. It is also possible that
+ * an overlapping map exists in the tree
+ */
+ if (ret == -EEXIST) {
+ struct extent_map *existing;
+
+ ret = 0;
+
+ existing = search_extent_mapping(em_tree, start, len);
+ /*
+ * existing will always be non-NULL, since there must be
+ * extent causing the -EEXIST.
+ */
+ if (start >= existing->start &&
+ start < extent_map_end(existing)) {
+ free_extent_map(em);
+ *em_in = existing;
+ ret = 0;
+ } else {
+ u64 orig_start = em->start;
+ u64 orig_len = em->len;
+
+ /*
+ * The existing extent map is the one nearest to
+ * the [start, start + len) range which overlaps
+ */
+ ret = merge_extent_mapping(em_tree, existing,
+ em, start);
+ if (ret) {
+ free_extent_map(em);
+ *em_in = NULL;
+ WARN_ONCE(ret,
+"unexpected error %d: merge existing(start %llu len %llu) with em(start %llu len %llu)\n",
+ ret, existing->start, existing->len,
+ orig_start, orig_len);
+ }
+ free_extent_map(existing);
+ }
+ }
+
+ ASSERT(ret == 0 || ret == -EEXIST);
+ return ret;
+}
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index 64365bbc9b16..b29f77bc0732 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -13,7 +13,6 @@
/* bits for the flags field */
#define EXTENT_FLAG_PINNED 0 /* this entry not yet on disk, don't free it */
#define EXTENT_FLAG_COMPRESSED 1
-#define EXTENT_FLAG_VACANCY 2 /* no file extent item found */
#define EXTENT_FLAG_PREALLOC 3 /* pre-allocated extent */
#define EXTENT_FLAG_LOGGING 4 /* Logging this extent */
#define EXTENT_FLAG_FILLING 5 /* Filling in a preallocated extent */
@@ -92,4 +91,6 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, u64 gen
void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em);
struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
u64 start, u64 len);
+int btrfs_add_extent_mapping(struct extent_map_tree *em_tree,
+ struct extent_map **em_in, u64 start, u64 len);
#endif
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index eb1bac7c8553..41ab9073d1d4 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -31,6 +31,7 @@
#include <linux/slab.h>
#include <linux/btrfs.h>
#include <linux/uio.h>
+#include <linux/iversion.h>
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
@@ -1504,7 +1505,7 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
ordered->file_offset + ordered->len > start_pos &&
ordered->file_offset <= last_pos) {
unlock_extent_cached(&inode->io_tree, start_pos,
- last_pos, cached_state, GFP_NOFS);
+ last_pos, cached_state);
for (i = 0; i < num_pages; i++) {
unlock_page(pages[i]);
put_page(pages[i]);
@@ -1519,7 +1520,7 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
clear_extent_bit(&inode->io_tree, start_pos, last_pos,
EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
- 0, 0, cached_state, GFP_NOFS);
+ 0, 0, cached_state);
*lockstart = start_pos;
*lockend = last_pos;
ret = 1;
@@ -1755,11 +1756,10 @@ again:
if (copied > 0)
ret = btrfs_dirty_pages(inode, pages, dirty_pages,
- pos, copied, NULL);
+ pos, copied, &cached_state);
if (extents_locked)
unlock_extent_cached(&BTRFS_I(inode)->io_tree,
- lockstart, lockend, &cached_state,
- GFP_NOFS);
+ lockstart, lockend, &cached_state);
btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
if (ret) {
btrfs_drop_pages(pages, num_pages);
@@ -2019,10 +2019,19 @@ int btrfs_release_file(struct inode *inode, struct file *filp)
static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
{
int ret;
+ struct blk_plug plug;
+ /*
+ * This is only called in fsync, which would do synchronous writes, so
+ * a plug can merge adjacent IOs as much as possible. Esp. in case of
+ * multiple disks using raid profile, a large IO can be split to
+ * several segments of stripe length (currently 64K).
+ */
+ blk_start_plug(&plug);
atomic_inc(&BTRFS_I(inode)->sync_writers);
ret = btrfs_fdatawrite_range(inode, start, end);
atomic_dec(&BTRFS_I(inode)->sync_writers);
+ blk_finish_plug(&plug);
return ret;
}
@@ -2450,6 +2459,46 @@ static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
return ret;
}
+static int btrfs_punch_hole_lock_range(struct inode *inode,
+ const u64 lockstart,
+ const u64 lockend,
+ struct extent_state **cached_state)
+{
+ while (1) {
+ struct btrfs_ordered_extent *ordered;
+ int ret;
+
+ truncate_pagecache_range(inode, lockstart, lockend);
+
+ lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ cached_state);
+ ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
+
+ /*
+ * We need to make sure we have no ordered extents in this range
+ * and nobody raced in and read a page in this range, if we did
+ * we need to try again.
+ */
+ if ((!ordered ||
+ (ordered->file_offset + ordered->len <= lockstart ||
+ ordered->file_offset > lockend)) &&
+ !btrfs_page_exists_in_range(inode, lockstart, lockend)) {
+ if (ordered)
+ btrfs_put_ordered_extent(ordered);
+ break;
+ }
+ if (ordered)
+ btrfs_put_ordered_extent(ordered);
+ unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
+ lockend, cached_state);
+ ret = btrfs_wait_ordered_range(inode, lockstart,
+ lockend - lockstart + 1);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@@ -2566,38 +2615,11 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
goto out_only_mutex;
}
- while (1) {
- struct btrfs_ordered_extent *ordered;
-
- truncate_pagecache_range(inode, lockstart, lockend);
-
- lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- &cached_state);
- ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
-
- /*
- * We need to make sure we have no ordered extents in this range
- * and nobody raced in and read a page in this range, if we did
- * we need to try again.
- */
- if ((!ordered ||
- (ordered->file_offset + ordered->len <= lockstart ||
- ordered->file_offset > lockend)) &&
- !btrfs_page_exists_in_range(inode, lockstart, lockend)) {
- if (ordered)
- btrfs_put_ordered_extent(ordered);
- break;
- }
- if (ordered)
- btrfs_put_ordered_extent(ordered);
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
- lockend, &cached_state, GFP_NOFS);
- ret = btrfs_wait_ordered_range(inode, lockstart,
- lockend - lockstart + 1);
- if (ret) {
- inode_unlock(inode);
- return ret;
- }
+ ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
+ &cached_state);
+ if (ret) {
+ inode_unlock(inode);
+ goto out_only_mutex;
}
path = btrfs_alloc_path();
@@ -2742,7 +2764,7 @@ out_free:
btrfs_free_block_rsv(fs_info, rsv);
out:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- &cached_state, GFP_NOFS);
+ &cached_state);
out_only_mutex:
if (!updated_inode && truncated_block && !ret && !err) {
/*
@@ -2806,6 +2828,234 @@ insert:
return 0;
}
+static int btrfs_fallocate_update_isize(struct inode *inode,
+ const u64 end,
+ const int mode)
+{
+ struct btrfs_trans_handle *trans;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ int ret;
+ int ret2;
+
+ if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
+ return 0;
+
+ trans = btrfs_start_transaction(root, 1);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+
+ inode->i_ctime = current_time(inode);
+ i_size_write(inode, end);
+ btrfs_ordered_update_i_size(inode, end, NULL);
+ ret = btrfs_update_inode(trans, root, inode);
+ ret2 = btrfs_end_transaction(trans);
+
+ return ret ? ret : ret2;
+}
+
+enum {
+ RANGE_BOUNDARY_WRITTEN_EXTENT = 0,
+ RANGE_BOUNDARY_PREALLOC_EXTENT = 1,
+ RANGE_BOUNDARY_HOLE = 2,
+};
+
+static int btrfs_zero_range_check_range_boundary(struct inode *inode,
+ u64 offset)
+{
+ const u64 sectorsize = btrfs_inode_sectorsize(inode);
+ struct extent_map *em;
+ int ret;
+
+ offset = round_down(offset, sectorsize);
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0);
+ if (IS_ERR(em))
+ return PTR_ERR(em);
+
+ if (em->block_start == EXTENT_MAP_HOLE)
+ ret = RANGE_BOUNDARY_HOLE;
+ else if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
+ ret = RANGE_BOUNDARY_PREALLOC_EXTENT;
+ else
+ ret = RANGE_BOUNDARY_WRITTEN_EXTENT;
+
+ free_extent_map(em);
+ return ret;
+}
+
+static int btrfs_zero_range(struct inode *inode,
+ loff_t offset,
+ loff_t len,
+ const int mode)
+{
+ struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
+ struct extent_map *em;
+ struct extent_changeset *data_reserved = NULL;
+ int ret;
+ u64 alloc_hint = 0;
+ const u64 sectorsize = btrfs_inode_sectorsize(inode);
+ u64 alloc_start = round_down(offset, sectorsize);
+ u64 alloc_end = round_up(offset + len, sectorsize);
+ u64 bytes_to_reserve = 0;
+ bool space_reserved = false;
+
+ inode_dio_wait(inode);
+
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0,
+ alloc_start, alloc_end - alloc_start, 0);
+ if (IS_ERR(em)) {
+ ret = PTR_ERR(em);
+ goto out;
+ }
+
+ /*
+ * Avoid hole punching and extent allocation for some cases. More cases
+ * could be considered, but these are unlikely common and we keep things
+ * as simple as possible for now. Also, intentionally, if the target
+ * range contains one or more prealloc extents together with regular
+ * extents and holes, we drop all the existing extents and allocate a
+ * new prealloc extent, so that we get a larger contiguous disk extent.
+ */
+ if (em->start <= alloc_start &&
+ test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
+ const u64 em_end = em->start + em->len;
+
+ if (em_end >= offset + len) {
+ /*
+ * The whole range is already a prealloc extent,
+ * do nothing except updating the inode's i_size if
+ * needed.
+ */
+ free_extent_map(em);
+ ret = btrfs_fallocate_update_isize(inode, offset + len,
+ mode);
+ goto out;
+ }
+ /*
+ * Part of the range is already a prealloc extent, so operate
+ * only on the remaining part of the range.
+ */
+ alloc_start = em_end;
+ ASSERT(IS_ALIGNED(alloc_start, sectorsize));
+ len = offset + len - alloc_start;
+ offset = alloc_start;
+ alloc_hint = em->block_start + em->len;
+ }
+ free_extent_map(em);
+
+ if (BTRFS_BYTES_TO_BLKS(fs_info, offset) ==
+ BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) {
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0,
+ alloc_start, sectorsize, 0);
+ if (IS_ERR(em)) {
+ ret = PTR_ERR(em);
+ goto out;
+ }
+
+ if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
+ free_extent_map(em);
+ ret = btrfs_fallocate_update_isize(inode, offset + len,
+ mode);
+ goto out;
+ }
+ if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) {
+ free_extent_map(em);
+ ret = btrfs_truncate_block(inode, offset, len, 0);
+ if (!ret)
+ ret = btrfs_fallocate_update_isize(inode,
+ offset + len,
+ mode);
+ return ret;
+ }
+ free_extent_map(em);
+ alloc_start = round_down(offset, sectorsize);
+ alloc_end = alloc_start + sectorsize;
+ goto reserve_space;
+ }
+
+ alloc_start = round_up(offset, sectorsize);
+ alloc_end = round_down(offset + len, sectorsize);
+
+ /*
+ * For unaligned ranges, check the pages at the boundaries, they might
+ * map to an extent, in which case we need to partially zero them, or
+ * they might map to a hole, in which case we need our allocation range
+ * to cover them.
+ */
+ if (!IS_ALIGNED(offset, sectorsize)) {
+ ret = btrfs_zero_range_check_range_boundary(inode, offset);
+ if (ret < 0)
+ goto out;
+ if (ret == RANGE_BOUNDARY_HOLE) {
+ alloc_start = round_down(offset, sectorsize);
+ ret = 0;
+ } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
+ ret = btrfs_truncate_block(inode, offset, 0, 0);
+ if (ret)
+ goto out;
+ } else {
+ ret = 0;
+ }
+ }
+
+ if (!IS_ALIGNED(offset + len, sectorsize)) {
+ ret = btrfs_zero_range_check_range_boundary(inode,
+ offset + len);
+ if (ret < 0)
+ goto out;
+ if (ret == RANGE_BOUNDARY_HOLE) {
+ alloc_end = round_up(offset + len, sectorsize);
+ ret = 0;
+ } else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
+ ret = btrfs_truncate_block(inode, offset + len, 0, 1);
+ if (ret)
+ goto out;
+ } else {
+ ret = 0;
+ }
+ }
+
+reserve_space:
+ if (alloc_start < alloc_end) {
+ struct extent_state *cached_state = NULL;
+ const u64 lockstart = alloc_start;
+ const u64 lockend = alloc_end - 1;
+
+ bytes_to_reserve = alloc_end - alloc_start;
+ ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
+ bytes_to_reserve);
+ if (ret < 0)
+ goto out;
+ space_reserved = true;
+ ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
+ alloc_start, bytes_to_reserve);
+ if (ret)
+ goto out;
+ ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
+ &cached_state);
+ if (ret)
+ goto out;
+ ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
+ alloc_end - alloc_start,
+ i_blocksize(inode),
+ offset + len, &alloc_hint);
+ unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
+ lockend, &cached_state);
+ /* btrfs_prealloc_file_range releases reserved space on error */
+ if (ret) {
+ space_reserved = false;
+ goto out;
+ }
+ }
+ ret = btrfs_fallocate_update_isize(inode, offset + len, mode);
+ out:
+ if (ret && space_reserved)
+ btrfs_free_reserved_data_space(inode, data_reserved,
+ alloc_start, bytes_to_reserve);
+ extent_changeset_free(data_reserved);
+
+ return ret;
+}
+
static long btrfs_fallocate(struct file *file, int mode,
loff_t offset, loff_t len)
{
@@ -2831,7 +3081,8 @@ static long btrfs_fallocate(struct file *file, int mode,
cur_offset = alloc_start;
/* Make sure we aren't being give some crap mode */
- if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
+ if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
+ FALLOC_FL_ZERO_RANGE))
return -EOPNOTSUPP;
if (mode & FALLOC_FL_PUNCH_HOLE)
@@ -2842,10 +3093,12 @@ static long btrfs_fallocate(struct file *file, int mode,
*
* For qgroup space, it will be checked later.
*/
- ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
- alloc_end - alloc_start);
- if (ret < 0)
- return ret;
+ if (!(mode & FALLOC_FL_ZERO_RANGE)) {
+ ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
+ alloc_end - alloc_start);
+ if (ret < 0)
+ return ret;
+ }
inode_lock(inode);
@@ -2887,6 +3140,12 @@ static long btrfs_fallocate(struct file *file, int mode,
if (ret)
goto out;
+ if (mode & FALLOC_FL_ZERO_RANGE) {
+ ret = btrfs_zero_range(inode, offset, len, mode);
+ inode_unlock(inode);
+ return ret;
+ }
+
locked_end = alloc_end - 1;
while (1) {
struct btrfs_ordered_extent *ordered;
@@ -2896,15 +3155,15 @@ static long btrfs_fallocate(struct file *file, int mode,
*/
lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
locked_end, &cached_state);
- ordered = btrfs_lookup_first_ordered_extent(inode,
- alloc_end - 1);
+ ordered = btrfs_lookup_first_ordered_extent(inode, locked_end);
+
if (ordered &&
ordered->file_offset + ordered->len > alloc_start &&
ordered->file_offset < alloc_end) {
btrfs_put_ordered_extent(ordered);
unlock_extent_cached(&BTRFS_I(inode)->io_tree,
alloc_start, locked_end,
- &cached_state, GFP_KERNEL);
+ &cached_state);
/*
* we can't wait on the range with the transaction
* running or with the extent lock held
@@ -2922,7 +3181,7 @@ static long btrfs_fallocate(struct file *file, int mode,
/* First, check if we exceed the qgroup limit */
INIT_LIST_HEAD(&reserve_list);
- while (1) {
+ while (cur_offset < alloc_end) {
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
alloc_end - cur_offset, 0);
if (IS_ERR(em)) {
@@ -2958,8 +3217,6 @@ static long btrfs_fallocate(struct file *file, int mode,
}
free_extent_map(em);
cur_offset = last_byte;
- if (cur_offset >= alloc_end)
- break;
}
/*
@@ -2982,37 +3239,18 @@ static long btrfs_fallocate(struct file *file, int mode,
if (ret < 0)
goto out_unlock;
- if (actual_end > inode->i_size &&
- !(mode & FALLOC_FL_KEEP_SIZE)) {
- struct btrfs_trans_handle *trans;
- struct btrfs_root *root = BTRFS_I(inode)->root;
-
- /*
- * We didn't need to allocate any more space, but we
- * still extended the size of the file so we need to
- * update i_size and the inode item.
- */
- trans = btrfs_start_transaction(root, 1);
- if (IS_ERR(trans)) {
- ret = PTR_ERR(trans);
- } else {
- inode->i_ctime = current_time(inode);
- i_size_write(inode, actual_end);
- btrfs_ordered_update_i_size(inode, actual_end, NULL);
- ret = btrfs_update_inode(trans, root, inode);
- if (ret)
- btrfs_end_transaction(trans);
- else
- ret = btrfs_end_transaction(trans);
- }
- }
+ /*
+ * We didn't need to allocate any more space, but we still extended the
+ * size of the file so we need to update i_size and the inode item.
+ */
+ ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
out_unlock:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
- &cached_state, GFP_KERNEL);
+ &cached_state);
out:
inode_unlock(inode);
/* Let go of our reservation. */
- if (ret != 0)
+ if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE))
btrfs_free_reserved_data_space(inode, data_reserved,
alloc_start, alloc_end - cur_offset);
extent_changeset_free(data_reserved);
@@ -3081,7 +3319,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
*offset = min_t(loff_t, start, inode->i_size);
}
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- &cached_state, GFP_NOFS);
+ &cached_state);
return ret;
}
@@ -3145,7 +3383,7 @@ void btrfs_auto_defrag_exit(void)
kmem_cache_destroy(btrfs_inode_defrag_cachep);
}
-int btrfs_auto_defrag_init(void)
+int __init btrfs_auto_defrag_init(void)
{
btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
sizeof(struct inode_defrag), 0,
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 586bb06472bb..a9f22ac50d6a 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -995,8 +995,7 @@ update_cache_item(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
if (ret < 0) {
clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
- EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
- GFP_NOFS);
+ EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL);
goto fail;
}
leaf = path->nodes[0];
@@ -1010,7 +1009,7 @@ update_cache_item(struct btrfs_trans_handle *trans,
clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
inode->i_size - 1,
EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0,
- NULL, GFP_NOFS);
+ NULL);
btrfs_release_path(path);
goto fail;
}
@@ -1107,8 +1106,7 @@ static int flush_dirty_cache(struct inode *inode)
ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
if (ret)
clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
- EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
- GFP_NOFS);
+ EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL);
return ret;
}
@@ -1129,8 +1127,7 @@ cleanup_write_cache_enospc(struct inode *inode,
{
io_ctl_drop_pages(io_ctl);
unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
- i_size_read(inode) - 1, cached_state,
- GFP_NOFS);
+ i_size_read(inode) - 1, cached_state);
}
static int __btrfs_wait_cache_io(struct btrfs_root *root,
@@ -1324,7 +1321,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
io_ctl_drop_pages(io_ctl);
unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
- i_size_read(inode) - 1, &cached_state, GFP_NOFS);
+ i_size_read(inode) - 1, &cached_state);
/*
* at this point the pages are under IO and we're happy,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index e1a7f3cb5be9..53ca025655fc 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -43,6 +43,7 @@
#include <linux/posix_acl_xattr.h>
#include <linux/uio.h>
#include <linux/magic.h>
+#include <linux/iversion.h>
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
@@ -536,9 +537,14 @@ again:
*
* If the compression fails for any reason, we set the pages
* dirty again later on.
+ *
+ * Note that the remaining part is redirtied, the start pointer
+ * has moved, the end is the original one.
*/
- extent_range_clear_dirty_for_io(inode, start, end);
- redirty = 1;
+ if (!redirty) {
+ extent_range_clear_dirty_for_io(inode, start, end);
+ redirty = 1;
+ }
/* Compression level is applied here and only here */
ret = btrfs_compress_pages(
@@ -765,11 +771,10 @@ retry:
* all those pages down to the drive.
*/
if (!page_started && !ret)
- extent_write_locked_range(io_tree,
- inode, async_extent->start,
+ extent_write_locked_range(inode,
+ async_extent->start,
async_extent->start +
async_extent->ram_size - 1,
- btrfs_get_extent,
WB_SYNC_ALL);
else if (ret)
unlock_page(async_cow->locked_page);
@@ -1203,7 +1208,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
u64 cur_end;
clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
- 1, 0, NULL, GFP_NOFS);
+ 1, 0, NULL);
while (start < end) {
async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
BUG_ON(!async_cow); /* -ENOMEM */
@@ -1951,7 +1956,21 @@ static blk_status_t __btrfs_submit_bio_done(void *private_data, struct bio *bio,
/*
* extent_io.c submission hook. This does the right thing for csum calculation
- * on write, or reading the csums from the tree before a read
+ * on write, or reading the csums from the tree before a read.
+ *
+ * Rules about async/sync submit,
+ * a) read: sync submit
+ *
+ * b) write without checksum: sync submit
+ *
+ * c) write with checksum:
+ * c-1) if bio is issued by fsync: sync submit
+ * (sync_writers != 0)
+ *
+ * c-2) if root is reloc root: sync submit
+ * (only in case of buffered IO)
+ *
+ * c-3) otherwise: async submit
*/
static blk_status_t btrfs_submit_bio_hook(void *private_data, struct bio *bio,
int mirror_num, unsigned long bio_flags,
@@ -2023,10 +2042,10 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
struct btrfs_ordered_sum *sum;
list_for_each_entry(sum, list, list) {
- trans->adding_csums = 1;
+ trans->adding_csums = true;
btrfs_csum_file_blocks(trans,
BTRFS_I(inode)->root->fs_info->csum_root, sum);
- trans->adding_csums = 0;
+ trans->adding_csums = false;
}
return 0;
}
@@ -2082,7 +2101,7 @@ again:
PAGE_SIZE);
if (ordered) {
unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
- page_end, &cached_state, GFP_NOFS);
+ page_end, &cached_state);
unlock_page(page);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
@@ -2098,14 +2117,21 @@ again:
goto out;
}
- btrfs_set_extent_delalloc(inode, page_start, page_end, 0, &cached_state,
- 0);
+ ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
+ &cached_state, 0);
+ if (ret) {
+ mapping_set_error(page->mapping, ret);
+ end_extent_writepage(page, ret, page_start, page_end);
+ ClearPageChecked(page);
+ goto out;
+ }
+
ClearPageChecked(page);
set_page_dirty(page);
btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
out:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
- &cached_state, GFP_NOFS);
+ &cached_state);
out_page:
unlock_page(page);
put_page(page);
@@ -2697,7 +2723,7 @@ out_free_path:
btrfs_end_transaction(trans);
out_unlock:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
- &cached, GFP_NOFS);
+ &cached);
iput(inode);
return ret;
}
@@ -2986,7 +3012,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
clear_extent_bit(io_tree, ordered_extent->file_offset,
ordered_extent->file_offset + ordered_extent->len - 1,
- EXTENT_DEFRAG, 0, 0, &cached_state, GFP_NOFS);
+ EXTENT_DEFRAG, 0, 0, &cached_state);
}
if (nolock)
@@ -3056,7 +3082,7 @@ out:
ordered_extent->len - 1,
clear_bits,
(clear_bits & EXTENT_LOCKED) ? 1 : 0,
- 0, &cached_state, GFP_NOFS);
+ 0, &cached_state);
}
if (trans)
@@ -3070,7 +3096,7 @@ out:
else
start = ordered_extent->file_offset;
end = ordered_extent->file_offset + ordered_extent->len - 1;
- clear_extent_uptodate(io_tree, start, end, NULL, GFP_NOFS);
+ clear_extent_uptodate(io_tree, start, end, NULL);
/* Drop the cache for the part of the extent we didn't write. */
btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0);
@@ -3777,7 +3803,8 @@ static int btrfs_read_locked_inode(struct inode *inode)
BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
- inode->i_version = btrfs_inode_sequence(leaf, inode_item);
+ inode_set_iversion_queried(inode,
+ btrfs_inode_sequence(leaf, inode_item));
inode->i_generation = BTRFS_I(inode)->generation;
inode->i_rdev = 0;
rdev = btrfs_inode_rdev(leaf, inode_item);
@@ -3945,7 +3972,8 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
&token);
btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
&token);
- btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
+ btrfs_set_token_inode_sequence(leaf, item, inode_peek_iversion(inode),
+ &token);
btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
@@ -4744,8 +4772,8 @@ int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
u64 block_start;
u64 block_end;
- if ((offset & (blocksize - 1)) == 0 &&
- (!len || ((len & (blocksize - 1)) == 0)))
+ if (IS_ALIGNED(offset, blocksize) &&
+ (!len || IS_ALIGNED(len, blocksize)))
goto out;
block_start = round_down(from, blocksize);
@@ -4787,7 +4815,7 @@ again:
ordered = btrfs_lookup_ordered_extent(inode, block_start);
if (ordered) {
unlock_extent_cached(io_tree, block_start, block_end,
- &cached_state, GFP_NOFS);
+ &cached_state);
unlock_page(page);
put_page(page);
btrfs_start_ordered_extent(inode, ordered, 1);
@@ -4798,13 +4826,13 @@ again:
clear_extent_bit(&BTRFS_I(inode)->io_tree, block_start, block_end,
EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
- 0, 0, &cached_state, GFP_NOFS);
+ 0, 0, &cached_state);
ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
&cached_state, 0);
if (ret) {
unlock_extent_cached(io_tree, block_start, block_end,
- &cached_state, GFP_NOFS);
+ &cached_state);
goto out_unlock;
}
@@ -4823,8 +4851,7 @@ again:
}
ClearPageChecked(page);
set_page_dirty(page);
- unlock_extent_cached(io_tree, block_start, block_end, &cached_state,
- GFP_NOFS);
+ unlock_extent_cached(io_tree, block_start, block_end, &cached_state);
out_unlock:
if (ret)
@@ -4925,7 +4952,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
if (!ordered)
break;
unlock_extent_cached(io_tree, hole_start, block_end - 1,
- &cached_state, GFP_NOFS);
+ &cached_state);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
}
@@ -4990,8 +5017,7 @@ next:
break;
}
free_extent_map(em);
- unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
- GFP_NOFS);
+ unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state);
return err;
}
@@ -5234,8 +5260,7 @@ static void evict_inode_truncate_pages(struct inode *inode)
clear_extent_bit(io_tree, start, end,
EXTENT_LOCKED | EXTENT_DIRTY |
EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
- EXTENT_DEFRAG, 1, 1,
- &cached_state, GFP_NOFS);
+ EXTENT_DEFRAG, 1, 1, &cached_state);
cond_resched();
spin_lock(&io_tree->lock);
@@ -5894,7 +5919,6 @@ static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx)
static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
{
struct inode *inode = file_inode(file);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_file_private *private = file->private_data;
struct btrfs_dir_item *di;
@@ -5962,9 +5986,6 @@ again:
if (btrfs_should_delete_dir_index(&del_list, found_key.offset))
goto next;
di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
- if (verify_dir_item(fs_info, leaf, slot, di))
- goto next;
-
name_len = btrfs_dir_name_len(leaf, di);
if ((total_len + sizeof(struct dir_entry) + name_len) >=
PAGE_SIZE) {
@@ -6104,19 +6125,20 @@ static int btrfs_update_time(struct inode *inode, struct timespec *now,
int flags)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
+ bool dirty = flags & ~S_VERSION;
if (btrfs_root_readonly(root))
return -EROFS;
if (flags & S_VERSION)
- inode_inc_iversion(inode);
+ dirty |= inode_maybe_inc_iversion(inode, dirty);
if (flags & S_CTIME)
inode->i_ctime = *now;
if (flags & S_MTIME)
inode->i_mtime = *now;
if (flags & S_ATIME)
inode->i_atime = *now;
- return btrfs_dirty_inode(inode);
+ return dirty ? btrfs_dirty_inode(inode) : 0;
}
/*
@@ -6297,7 +6319,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
}
/*
* index_cnt is ignored for everything but a dir,
- * btrfs_get_inode_index_count has an explanation for the magic
+ * btrfs_set_inode_index_count has an explanation for the magic
* number
*/
BTRFS_I(inode)->index_cnt = 2;
@@ -6560,7 +6582,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
out_unlock:
btrfs_end_transaction(trans);
- btrfs_balance_delayed_items(fs_info);
btrfs_btree_balance_dirty(fs_info);
if (drop_inode) {
inode_dec_link_count(inode);
@@ -6641,7 +6662,6 @@ out_unlock:
inode_dec_link_count(inode);
iput(inode);
}
- btrfs_balance_delayed_items(fs_info);
btrfs_btree_balance_dirty(fs_info);
return err;
@@ -6716,7 +6736,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent);
}
- btrfs_balance_delayed_items(fs_info);
fail:
if (trans)
btrfs_end_transaction(trans);
@@ -6794,7 +6813,6 @@ out_fail:
inode_dec_link_count(inode);
iput(inode);
}
- btrfs_balance_delayed_items(fs_info);
btrfs_btree_balance_dirty(fs_info);
return err;
@@ -6803,68 +6821,6 @@ out_fail_inode:
goto out_fail;
}
-/* Find next extent map of a given extent map, caller needs to ensure locks */
-static struct extent_map *next_extent_map(struct extent_map *em)
-{
- struct rb_node *next;
-
- next = rb_next(&em->rb_node);
- if (!next)
- return NULL;
- return container_of(next, struct extent_map, rb_node);
-}
-
-static struct extent_map *prev_extent_map(struct extent_map *em)
-{
- struct rb_node *prev;
-
- prev = rb_prev(&em->rb_node);
- if (!prev)
- return NULL;
- return container_of(prev, struct extent_map, rb_node);
-}
-
-/* helper for btfs_get_extent. Given an existing extent in the tree,
- * the existing extent is the nearest extent to map_start,
- * and an extent that you want to insert, deal with overlap and insert
- * the best fitted new extent into the tree.
- */
-static int merge_extent_mapping(struct extent_map_tree *em_tree,
- struct extent_map *existing,
- struct extent_map *em,
- u64 map_start)
-{
- struct extent_map *prev;
- struct extent_map *next;
- u64 start;
- u64 end;
- u64 start_diff;
-
- BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
-
- if (existing->start > map_start) {
- next = existing;
- prev = prev_extent_map(next);
- } else {
- prev = existing;
- next = next_extent_map(prev);
- }
-
- start = prev ? extent_map_end(prev) : em->start;
- start = max_t(u64, start, em->start);
- end = next ? next->start : extent_map_end(em);
- end = min_t(u64, end, extent_map_end(em));
- start_diff = start - em->start;
- em->start = start;
- em->len = end - start;
- if (em->block_start < EXTENT_MAP_LAST_BYTE &&
- !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
- em->block_start += start_diff;
- em->block_len -= start_diff;
- }
- return add_extent_mapping(em_tree, em, 0);
-}
-
static noinline int uncompress_inline(struct btrfs_path *path,
struct page *page,
size_t pg_offset, u64 extent_offset,
@@ -6939,10 +6895,8 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
struct extent_map *em = NULL;
struct extent_map_tree *em_tree = &inode->extent_tree;
struct extent_io_tree *io_tree = &inode->io_tree;
- struct btrfs_trans_handle *trans = NULL;
const bool new_inline = !page || create;
-again:
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len);
if (em)
@@ -6981,8 +6935,7 @@ again:
path->reada = READA_FORWARD;
}
- ret = btrfs_lookup_file_extent(trans, root, path,
- objectid, start, trans != NULL);
+ ret = btrfs_lookup_file_extent(NULL, root, path, objectid, start, 0);
if (ret < 0) {
err = ret;
goto out;
@@ -7083,7 +7036,7 @@ next:
em->orig_block_len = em->len;
em->orig_start = em->start;
ptr = btrfs_file_extent_inline_start(item) + extent_offset;
- if (create == 0 && !PageUptodate(page)) {
+ if (!PageUptodate(page)) {
if (btrfs_file_extent_compression(leaf, item) !=
BTRFS_COMPRESS_NONE) {
ret = uncompress_inline(path, page, pg_offset,
@@ -7104,25 +7057,6 @@ next:
kunmap(page);
}
flush_dcache_page(page);
- } else if (create && PageUptodate(page)) {
- BUG();
- if (!trans) {
- kunmap(page);
- free_extent_map(em);
- em = NULL;
-
- btrfs_release_path(path);
- trans = btrfs_join_transaction(root);
-
- if (IS_ERR(trans))
- return ERR_CAST(trans);
- goto again;
- }
- map = kmap(page);
- write_extent_buffer(leaf, map + pg_offset, ptr,
- copy_size);
- kunmap(page);
- btrfs_mark_buffer_dirty(leaf);
}
set_extent_uptodate(io_tree, em->start,
extent_map_end(em) - 1, NULL, GFP_NOFS);
@@ -7134,7 +7068,6 @@ not_found:
em->len = len;
not_found_em:
em->block_start = EXTENT_MAP_HOLE;
- set_bit(EXTENT_FLAG_VACANCY, &em->flags);
insert:
btrfs_release_path(path);
if (em->start > start || extent_map_end(em) <= start) {
@@ -7147,62 +7080,13 @@ insert:
err = 0;
write_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, em, 0);
- /* it is possible that someone inserted the extent into the tree
- * while we had the lock dropped. It is also possible that
- * an overlapping map exists in the tree
- */
- if (ret == -EEXIST) {
- struct extent_map *existing;
-
- ret = 0;
-
- existing = search_extent_mapping(em_tree, start, len);
- /*
- * existing will always be non-NULL, since there must be
- * extent causing the -EEXIST.
- */
- if (existing->start == em->start &&
- extent_map_end(existing) >= extent_map_end(em) &&
- em->block_start == existing->block_start) {
- /*
- * The existing extent map already encompasses the
- * entire extent map we tried to add.
- */
- free_extent_map(em);
- em = existing;
- err = 0;
-
- } else if (start >= extent_map_end(existing) ||
- start <= existing->start) {
- /*
- * The existing extent map is the one nearest to
- * the [start, start + len) range which overlaps
- */
- err = merge_extent_mapping(em_tree, existing,
- em, start);
- free_extent_map(existing);
- if (err) {
- free_extent_map(em);
- em = NULL;
- }
- } else {
- free_extent_map(em);
- em = existing;
- err = 0;
- }
- }
+ err = btrfs_add_extent_mapping(em_tree, &em, start, len);
write_unlock(&em_tree->lock);
out:
trace_btrfs_get_extent(root, inode, em);
btrfs_free_path(path);
- if (trans) {
- ret = btrfs_end_transaction(trans);
- if (!err)
- err = ret;
- }
if (err) {
free_extent_map(em);
return ERR_PTR(err);
@@ -7324,7 +7208,7 @@ struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode,
em->block_start = EXTENT_MAP_DELALLOC;
em->block_len = found;
}
- } else if (hole_em) {
+ } else {
return hole_em;
}
out:
@@ -7641,7 +7525,7 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
break;
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- cached_state, GFP_NOFS);
+ cached_state);
if (ordered) {
/*
@@ -7926,7 +7810,7 @@ unlock:
if (lockstart < lockend) {
clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
lockend, unlock_bits, 1, 0,
- &cached_state, GFP_NOFS);
+ &cached_state);
} else {
free_extent_state(cached_state);
}
@@ -7937,7 +7821,7 @@ unlock:
unlock_err:
clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- unlock_bits, 1, 0, &cached_state, GFP_NOFS);
+ unlock_bits, 1, 0, &cached_state);
err:
if (dio_data)
current->journal_info = dio_data;
@@ -7953,15 +7837,12 @@ static inline blk_status_t submit_dio_repair_bio(struct inode *inode,
BUG_ON(bio_op(bio) == REQ_OP_WRITE);
- bio_get(bio);
-
ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DIO_REPAIR);
if (ret)
- goto err;
+ return ret;
ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
-err:
- bio_put(bio);
+
return ret;
}
@@ -8015,6 +7896,7 @@ static blk_status_t dio_read_error(struct inode *inode, struct bio *failed_bio,
int segs;
int ret;
blk_status_t status;
+ struct bio_vec bvec;
BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
@@ -8030,8 +7912,9 @@ static blk_status_t dio_read_error(struct inode *inode, struct bio *failed_bio,
}
segs = bio_segments(failed_bio);
+ bio_get_first_bvec(failed_bio, &bvec);
if (segs > 1 ||
- (failed_bio->bi_io_vec->bv_len > btrfs_inode_sectorsize(inode)))
+ (bvec.bv_len > btrfs_inode_sectorsize(inode)))
read_mode |= REQ_FAILFAST_DEV;
isector = start - btrfs_io_bio(failed_bio)->logical;
@@ -8074,7 +7957,7 @@ static void btrfs_retry_endio_nocsum(struct bio *bio)
ASSERT(bio->bi_vcnt == 1);
io_tree = &BTRFS_I(inode)->io_tree;
failure_tree = &BTRFS_I(inode)->io_failure_tree;
- ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode));
+ ASSERT(bio_first_bvec_all(bio)->bv_len == btrfs_inode_sectorsize(inode));
done->uptodate = 1;
ASSERT(!bio_flagged(bio, BIO_CLONED));
@@ -8164,7 +8047,7 @@ static void btrfs_retry_endio(struct bio *bio)
uptodate = 1;
ASSERT(bio->bi_vcnt == 1);
- ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(done->inode));
+ ASSERT(bio_first_bvec_all(bio)->bv_len == btrfs_inode_sectorsize(done->inode));
io_tree = &BTRFS_I(inode)->io_tree;
failure_tree = &BTRFS_I(inode)->io_failure_tree;
@@ -8460,11 +8343,10 @@ __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, u64 file_offset,
bool write = bio_op(bio) == REQ_OP_WRITE;
blk_status_t ret;
+ /* Check btrfs_submit_bio_hook() for rules about async submit. */
if (async_submit)
async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
- bio_get(bio);
-
if (!write) {
ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
if (ret)
@@ -8497,7 +8379,6 @@ __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, u64 file_offset,
map:
ret = btrfs_map_bio(fs_info, bio, 0, 0);
err:
- bio_put(bio);
return ret;
}
@@ -8854,7 +8735,7 @@ static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
if (ret)
return ret;
- return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
+ return extent_fiemap(inode, fieinfo, start, len);
}
int btrfs_readpage(struct file *file, struct page *page)
@@ -8866,7 +8747,6 @@ int btrfs_readpage(struct file *file, struct page *page)
static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
{
- struct extent_io_tree *tree;
struct inode *inode = page->mapping->host;
int ret;
@@ -8885,8 +8765,7 @@ static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
redirty_page_for_writepage(wbc, page);
return AOP_WRITEPAGE_ACTIVATE;
}
- tree = &BTRFS_I(page->mapping->host)->io_tree;
- ret = extent_write_full_page(tree, page, btrfs_get_extent, wbc);
+ ret = extent_write_full_page(page, wbc);
btrfs_add_delayed_iput(inode);
return ret;
}
@@ -8897,7 +8776,7 @@ static int btrfs_writepages(struct address_space *mapping,
struct extent_io_tree *tree;
tree = &BTRFS_I(mapping->host)->io_tree;
- return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
+ return extent_writepages(tree, mapping, wbc);
}
static int
@@ -8906,8 +8785,7 @@ btrfs_readpages(struct file *file, struct address_space *mapping,
{
struct extent_io_tree *tree;
tree = &BTRFS_I(mapping->host)->io_tree;
- return extent_readpages(tree, mapping, pages, nr_pages,
- btrfs_get_extent);
+ return extent_readpages(tree, mapping, pages, nr_pages);
}
static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
{
@@ -8978,8 +8856,7 @@ again:
EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DELALLOC_NEW |
EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
- EXTENT_DEFRAG, 1, 0, &cached_state,
- GFP_NOFS);
+ EXTENT_DEFRAG, 1, 0, &cached_state);
/*
* whoever cleared the private bit is responsible
* for the finish_ordered_io
@@ -9036,7 +8913,7 @@ again:
EXTENT_LOCKED | EXTENT_DIRTY |
EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1, 1,
- &cached_state, GFP_NOFS);
+ &cached_state);
__btrfs_releasepage(page, GFP_NOFS);
}
@@ -9137,7 +9014,7 @@ again:
PAGE_SIZE);
if (ordered) {
unlock_extent_cached(io_tree, page_start, page_end,
- &cached_state, GFP_NOFS);
+ &cached_state);
unlock_page(page);
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
@@ -9164,13 +9041,13 @@ again:
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
- 0, 0, &cached_state, GFP_NOFS);
+ 0, 0, &cached_state);
ret = btrfs_set_extent_delalloc(inode, page_start, end, 0,
&cached_state, 0);
if (ret) {
unlock_extent_cached(io_tree, page_start, page_end,
- &cached_state, GFP_NOFS);
+ &cached_state);
ret = VM_FAULT_SIGBUS;
goto out_unlock;
}
@@ -9196,7 +9073,7 @@ again:
BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
- unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
+ unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
out_unlock:
if (!ret) {
@@ -9421,7 +9298,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
struct btrfs_inode *ei;
struct inode *inode;
- ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
+ ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_KERNEL);
if (!ei)
return NULL;
@@ -9573,7 +9450,7 @@ void btrfs_destroy_cachep(void)
kmem_cache_destroy(btrfs_free_space_cachep);
}
-int btrfs_init_cachep(void)
+int __init btrfs_init_cachep(void)
{
btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
sizeof(struct btrfs_inode), 0,
@@ -10688,7 +10565,6 @@ out:
btrfs_end_transaction(trans);
if (ret)
iput(inode);
- btrfs_balance_delayed_items(fs_info);
btrfs_btree_balance_dirty(fs_info);
return ret;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 2ef8acaac688..111ee282b777 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -43,6 +43,7 @@
#include <linux/uuid.h>
#include <linux/btrfs.h>
#include <linux/uaccess.h>
+#include <linux/iversion.h>
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
@@ -307,12 +308,10 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
ip->flags |= BTRFS_INODE_COMPRESS;
ip->flags &= ~BTRFS_INODE_NOCOMPRESS;
- if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
- comp = "lzo";
- else if (fs_info->compress_type == BTRFS_COMPRESS_ZLIB)
- comp = "zlib";
- else
- comp = "zstd";
+ comp = btrfs_compress_type2str(fs_info->compress_type);
+ if (!comp || comp[0] == 0)
+ comp = btrfs_compress_type2str(BTRFS_COMPRESS_ZLIB);
+
ret = btrfs_set_prop(inode, "btrfs.compression",
comp, strlen(comp), 0);
if (ret)
@@ -979,7 +978,7 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start)
/* get the big lock and read metadata off disk */
lock_extent_bits(io_tree, start, end, &cached);
em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0);
- unlock_extent_cached(io_tree, start, end, &cached, GFP_NOFS);
+ unlock_extent_cached(io_tree, start, end, &cached);
if (IS_ERR(em))
return NULL;
@@ -1130,7 +1129,7 @@ again:
ordered = btrfs_lookup_ordered_extent(inode,
page_start);
unlock_extent_cached(tree, page_start, page_end,
- &cached_state, GFP_NOFS);
+ &cached_state);
if (!ordered)
break;
@@ -1190,7 +1189,7 @@ again:
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0,
- &cached_state, GFP_NOFS);
+ &cached_state);
if (i_done != page_cnt) {
spin_lock(&BTRFS_I(inode)->lock);
@@ -1206,8 +1205,7 @@ again:
&cached_state);
unlock_extent_cached(&BTRFS_I(inode)->io_tree,
- page_start, page_end - 1, &cached_state,
- GFP_NOFS);
+ page_start, page_end - 1, &cached_state);
for (i = 0; i < i_done; i++) {
clear_page_dirty_for_io(pages[i]);
@@ -1503,7 +1501,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
goto out_free;
}
- if (!device->writeable) {
+ if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
btrfs_info(fs_info,
"resizer unable to apply on readonly device %llu",
devid);
@@ -1528,7 +1526,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
}
}
- if (device->is_tgtdev_for_dev_replace) {
+ if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
ret = -EPERM;
goto out_free;
}
@@ -2675,14 +2673,12 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
goto out;
}
- mutex_lock(&fs_info->volume_mutex);
if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) {
ret = btrfs_rm_device(fs_info, NULL, vol_args->devid);
} else {
vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
ret = btrfs_rm_device(fs_info, vol_args->name, 0);
}
- mutex_unlock(&fs_info->volume_mutex);
clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
if (!ret) {
@@ -2726,9 +2722,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
}
vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
- mutex_lock(&fs_info->volume_mutex);
ret = btrfs_rm_device(fs_info, vol_args->name, 0);
- mutex_unlock(&fs_info->volume_mutex);
if (!ret)
btrfs_info(fs_info, "disk deleted %s", vol_args->name);
@@ -2753,16 +2747,16 @@ static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info,
if (!fi_args)
return -ENOMEM;
- mutex_lock(&fs_devices->device_list_mutex);
+ rcu_read_lock();
fi_args->num_devices = fs_devices->num_devices;
- memcpy(&fi_args->fsid, fs_info->fsid, sizeof(fi_args->fsid));
- list_for_each_entry(device, &fs_devices->devices, dev_list) {
+ list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
if (device->devid > fi_args->max_id)
fi_args->max_id = device->devid;
}
- mutex_unlock(&fs_devices->device_list_mutex);
+ rcu_read_unlock();
+ memcpy(&fi_args->fsid, fs_info->fsid, sizeof(fi_args->fsid));
fi_args->nodesize = fs_info->nodesize;
fi_args->sectorsize = fs_info->sectorsize;
fi_args->clone_alignment = fs_info->sectorsize;
@@ -2779,7 +2773,6 @@ static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
{
struct btrfs_ioctl_dev_info_args *di_args;
struct btrfs_device *dev;
- struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
int ret = 0;
char *s_uuid = NULL;
@@ -2790,7 +2783,7 @@ static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
if (!btrfs_is_empty_uuid(di_args->uuid))
s_uuid = di_args->uuid;
- mutex_lock(&fs_devices->device_list_mutex);
+ rcu_read_lock();
dev = btrfs_find_device(fs_info, di_args->devid, s_uuid, NULL);
if (!dev) {
@@ -2805,17 +2798,15 @@ static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
if (dev->name) {
struct rcu_string *name;
- rcu_read_lock();
name = rcu_dereference(dev->name);
- strncpy(di_args->path, name->str, sizeof(di_args->path));
- rcu_read_unlock();
+ strncpy(di_args->path, name->str, sizeof(di_args->path) - 1);
di_args->path[sizeof(di_args->path) - 1] = 0;
} else {
di_args->path[0] = '\0';
}
out:
- mutex_unlock(&fs_devices->device_list_mutex);
+ rcu_read_unlock();
if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args)))
ret = -EFAULT;
diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
index f6a05f836629..b30a056963ab 100644
--- a/fs/btrfs/props.c
+++ b/fs/btrfs/props.c
@@ -164,7 +164,6 @@ static int iterate_object_props(struct btrfs_root *root,
size_t),
void *ctx)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
char *name_buf = NULL;
char *value_buf = NULL;
@@ -215,12 +214,6 @@ static int iterate_object_props(struct btrfs_root *root,
name_ptr = (unsigned long)(di + 1);
data_ptr = name_ptr + name_len;
- if (verify_dir_item(fs_info, leaf,
- path->slots[0], di)) {
- ret = -EIO;
- goto out;
- }
-
if (name_len <= XATTR_BTRFS_PREFIX_LEN ||
memcmp_extent_buffer(leaf, XATTR_BTRFS_PREFIX,
name_ptr,
@@ -430,11 +423,11 @@ static const char *prop_compression_extract(struct inode *inode)
{
switch (BTRFS_I(inode)->prop_compress) {
case BTRFS_COMPRESS_ZLIB:
- return "zlib";
case BTRFS_COMPRESS_LZO:
- return "lzo";
case BTRFS_COMPRESS_ZSTD:
- return "zstd";
+ return btrfs_compress_type2str(BTRFS_I(inode)->prop_compress);
+ default:
+ break;
}
return NULL;
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 168fd03ca3ac..9e61dd624f7b 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -2883,8 +2883,7 @@ cleanup:
ULIST_ITER_INIT(&uiter);
while ((unode = ulist_next(&reserved->range_changed, &uiter)))
clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val,
- unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL,
- GFP_NOFS);
+ unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL);
extent_changeset_release(reserved);
return ret;
}
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index a7f79254ecca..dec0907dfb8a 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -231,7 +231,6 @@ int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
cur = h + i;
INIT_LIST_HEAD(&cur->hash_list);
spin_lock_init(&cur->lock);
- init_waitqueue_head(&cur->wait);
}
x = cmpxchg(&info->stripe_hash_table, NULL, table);
@@ -595,14 +594,31 @@ static int rbio_can_merge(struct btrfs_raid_bio *last,
* bio list here, anyone else that wants to
* change this stripe needs to do their own rmw.
*/
- if (last->operation == BTRFS_RBIO_PARITY_SCRUB ||
- cur->operation == BTRFS_RBIO_PARITY_SCRUB)
+ if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
return 0;
- if (last->operation == BTRFS_RBIO_REBUILD_MISSING ||
- cur->operation == BTRFS_RBIO_REBUILD_MISSING)
+ if (last->operation == BTRFS_RBIO_REBUILD_MISSING)
return 0;
+ if (last->operation == BTRFS_RBIO_READ_REBUILD) {
+ int fa = last->faila;
+ int fb = last->failb;
+ int cur_fa = cur->faila;
+ int cur_fb = cur->failb;
+
+ if (last->faila >= last->failb) {
+ fa = last->failb;
+ fb = last->faila;
+ }
+
+ if (cur->faila >= cur->failb) {
+ cur_fa = cur->failb;
+ cur_fb = cur->faila;
+ }
+
+ if (fa != cur_fa || fb != cur_fb)
+ return 0;
+ }
return 1;
}
@@ -670,7 +686,6 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
struct btrfs_raid_bio *cur;
struct btrfs_raid_bio *pending;
unsigned long flags;
- DEFINE_WAIT(wait);
struct btrfs_raid_bio *freeit = NULL;
struct btrfs_raid_bio *cache_drop = NULL;
int ret = 0;
@@ -816,15 +831,6 @@ static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
}
goto done_nolock;
- /*
- * The barrier for this waitqueue_active is not needed,
- * we're protected by h->lock and can't miss a wakeup.
- */
- } else if (waitqueue_active(&h->wait)) {
- spin_unlock(&rbio->bio_list_lock);
- spin_unlock_irqrestore(&h->lock, flags);
- wake_up(&h->wait);
- goto done_nolock;
}
}
done:
@@ -858,10 +864,17 @@ static void __free_raid_bio(struct btrfs_raid_bio *rbio)
kfree(rbio);
}
-static void free_raid_bio(struct btrfs_raid_bio *rbio)
+static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
{
- unlock_stripe(rbio);
- __free_raid_bio(rbio);
+ struct bio *next;
+
+ while (cur) {
+ next = cur->bi_next;
+ cur->bi_next = NULL;
+ cur->bi_status = err;
+ bio_endio(cur);
+ cur = next;
+ }
}
/*
@@ -871,20 +884,26 @@ static void free_raid_bio(struct btrfs_raid_bio *rbio)
static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
{
struct bio *cur = bio_list_get(&rbio->bio_list);
- struct bio *next;
+ struct bio *extra;
if (rbio->generic_bio_cnt)
btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
- free_raid_bio(rbio);
+ /*
+ * At this moment, rbio->bio_list is empty, however since rbio does not
+ * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
+ * hash list, rbio may be merged with others so that rbio->bio_list
+ * becomes non-empty.
+ * Once unlock_stripe() is done, rbio->bio_list will not be updated any
+ * more and we can call bio_endio() on all queued bios.
+ */
+ unlock_stripe(rbio);
+ extra = bio_list_get(&rbio->bio_list);
+ __free_raid_bio(rbio);
- while (cur) {
- next = cur->bi_next;
- cur->bi_next = NULL;
- cur->bi_status = err;
- bio_endio(cur);
- cur = next;
- }
+ rbio_endio_bio_list(cur, err);
+ if (extra)
+ rbio_endio_bio_list(extra, err);
}
/*
@@ -1435,14 +1454,13 @@ static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
*/
static void set_bio_pages_uptodate(struct bio *bio)
{
- struct bio_vec bvec;
- struct bvec_iter iter;
+ struct bio_vec *bvec;
+ int i;
- if (bio_flagged(bio, BIO_CLONED))
- bio->bi_iter = btrfs_io_bio(bio)->iter;
+ ASSERT(!bio_flagged(bio, BIO_CLONED));
- bio_for_each_segment(bvec, bio, iter)
- SetPageUptodate(bvec.bv_page);
+ bio_for_each_segment_all(bvec, bio, i)
+ SetPageUptodate(bvec->bv_page);
}
/*
@@ -1969,7 +1987,22 @@ cleanup:
cleanup_io:
if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
- if (err == BLK_STS_OK)
+ /*
+ * - In case of two failures, where rbio->failb != -1:
+ *
+ * Do not cache this rbio since the above read reconstruction
+ * (raid6_datap_recov() or raid6_2data_recov()) may have
+ * changed some content of stripes which are not identical to
+ * on-disk content any more, otherwise, a later write/recover
+ * may steal stripe_pages from this rbio and end up with
+ * corruptions or rebuild failures.
+ *
+ * - In case of single failure, where rbio->failb == -1:
+ *
+ * Cache this rbio iff the above read reconstruction is
+ * excuted without problems.
+ */
+ if (err == BLK_STS_OK && rbio->failb < 0)
cache_rbio_pages(rbio);
else
clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
@@ -2170,11 +2203,21 @@ int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
}
/*
- * reconstruct from the q stripe if they are
- * asking for mirror 3
+ * Loop retry:
+ * for 'mirror == 2', reconstruct from all other stripes.
+ * for 'mirror_num > 2', select a stripe to fail on every retry.
*/
- if (mirror_num == 3)
- rbio->failb = rbio->real_stripes - 2;
+ if (mirror_num > 2) {
+ /*
+ * 'mirror == 3' is to fail the p stripe and
+ * reconstruct from the q stripe. 'mirror > 3' is to
+ * fail a data stripe and reconstruct from p+q stripe.
+ */
+ rbio->failb = rbio->real_stripes - (mirror_num - 1);
+ ASSERT(rbio->failb > 0);
+ if (rbio->failb <= rbio->faila)
+ rbio->failb--;
+ }
ret = lock_stripe_add(rbio);
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
index 34878699d363..171f3cce30e6 100644
--- a/fs/btrfs/ref-verify.c
+++ b/fs/btrfs/ref-verify.c
@@ -606,8 +606,7 @@ static int walk_down_tree(struct btrfs_root *root, struct btrfs_path *path,
}
/* Walk up to the next node that needs to be processed */
-static int walk_up_tree(struct btrfs_root *root, struct btrfs_path *path,
- int *level)
+static int walk_up_tree(struct btrfs_path *path, int *level)
{
int l;
@@ -984,7 +983,6 @@ void btrfs_free_ref_tree_range(struct btrfs_fs_info *fs_info, u64 start,
int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
{
struct btrfs_path *path;
- struct btrfs_root *root;
struct extent_buffer *eb;
u64 bytenr = 0, num_bytes = 0;
int ret, level;
@@ -1014,7 +1012,7 @@ int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
&bytenr, &num_bytes);
if (ret)
break;
- ret = walk_up_tree(root, path, &level);
+ ret = walk_up_tree(path, &level);
if (ret < 0)
break;
if (ret > 0) {
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 3338407ef0f0..aab0194efe46 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -387,13 +387,6 @@ again:
WARN_ON(btrfs_root_ref_dirid(leaf, ref) != dirid);
WARN_ON(btrfs_root_ref_name_len(leaf, ref) != name_len);
ptr = (unsigned long)(ref + 1);
- ret = btrfs_is_name_len_valid(leaf, path->slots[0], ptr,
- name_len);
- if (!ret) {
- err = -EIO;
- goto out;
- }
-
WARN_ON(memcmp_extent_buffer(leaf, name, ptr, name_len));
*sequence = btrfs_root_ref_sequence(leaf, ref);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index b2f871d80982..ec56f33feea9 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -301,6 +301,11 @@ static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
static void scrub_put_ctx(struct scrub_ctx *sctx);
+static inline int scrub_is_page_on_raid56(struct scrub_page *page)
+{
+ return page->recover &&
+ (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
+}
static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
{
@@ -1323,15 +1328,34 @@ nodatasum_case:
* could happen otherwise that a correct page would be
* overwritten by a bad one).
*/
- for (mirror_index = 0;
- mirror_index < BTRFS_MAX_MIRRORS &&
- sblocks_for_recheck[mirror_index].page_count > 0;
- mirror_index++) {
+ for (mirror_index = 0; ;mirror_index++) {
struct scrub_block *sblock_other;
if (mirror_index == failed_mirror_index)
continue;
- sblock_other = sblocks_for_recheck + mirror_index;
+
+ /* raid56's mirror can be more than BTRFS_MAX_MIRRORS */
+ if (!scrub_is_page_on_raid56(sblock_bad->pagev[0])) {
+ if (mirror_index >= BTRFS_MAX_MIRRORS)
+ break;
+ if (!sblocks_for_recheck[mirror_index].page_count)
+ break;
+
+ sblock_other = sblocks_for_recheck + mirror_index;
+ } else {
+ struct scrub_recover *r = sblock_bad->pagev[0]->recover;
+ int max_allowed = r->bbio->num_stripes -
+ r->bbio->num_tgtdevs;
+
+ if (mirror_index >= max_allowed)
+ break;
+ if (!sblocks_for_recheck[1].page_count)
+ break;
+
+ ASSERT(failed_mirror_index == 0);
+ sblock_other = sblocks_for_recheck + 1;
+ sblock_other->pagev[0]->mirror_num = 1 + mirror_index;
+ }
/* build and submit the bios, check checksums */
scrub_recheck_block(fs_info, sblock_other, 0);
@@ -1666,49 +1690,32 @@ leave_nomem:
return 0;
}
-struct scrub_bio_ret {
- struct completion event;
- blk_status_t status;
-};
-
static void scrub_bio_wait_endio(struct bio *bio)
{
- struct scrub_bio_ret *ret = bio->bi_private;
-
- ret->status = bio->bi_status;
- complete(&ret->event);
-}
-
-static inline int scrub_is_page_on_raid56(struct scrub_page *page)
-{
- return page->recover &&
- (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
+ complete(bio->bi_private);
}
static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
struct bio *bio,
struct scrub_page *page)
{
- struct scrub_bio_ret done;
+ DECLARE_COMPLETION_ONSTACK(done);
int ret;
+ int mirror_num;
- init_completion(&done.event);
- done.status = 0;
bio->bi_iter.bi_sector = page->logical >> 9;
bio->bi_private = &done;
bio->bi_end_io = scrub_bio_wait_endio;
+ mirror_num = page->sblock->pagev[0]->mirror_num;
ret = raid56_parity_recover(fs_info, bio, page->recover->bbio,
page->recover->map_length,
- page->mirror_num, 0);
+ mirror_num, 0);
if (ret)
return ret;
- wait_for_completion_io(&done.event);
- if (done.status)
- return -EIO;
-
- return 0;
+ wait_for_completion_io(&done);
+ return blk_status_to_errno(bio->bi_status);
}
/*
@@ -2535,7 +2542,7 @@ leave_nomem:
}
WARN_ON(sblock->page_count == 0);
- if (dev->missing) {
+ if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
/*
* This case should only be hit for RAID 5/6 device replace. See
* the comment in scrub_missing_raid56_pages() for details.
@@ -2870,7 +2877,7 @@ static int scrub_extent_for_parity(struct scrub_parity *sparity,
u8 csum[BTRFS_CSUM_SIZE];
u32 blocksize;
- if (dev->missing) {
+ if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) {
scrub_parity_mark_sectors_error(sparity, logical, len);
return 0;
}
@@ -4112,12 +4119,14 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
mutex_lock(&fs_info->fs_devices->device_list_mutex);
dev = btrfs_find_device(fs_info, devid, NULL, NULL);
- if (!dev || (dev->missing && !is_dev_replace)) {
+ if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
+ !is_dev_replace)) {
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
return -ENODEV;
}
- if (!is_dev_replace && !readonly && !dev->writeable) {
+ if (!is_dev_replace && !readonly &&
+ !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
rcu_read_lock();
name = rcu_dereference(dev->name);
@@ -4128,14 +4137,15 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
}
mutex_lock(&fs_info->scrub_lock);
- if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
+ if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
+ test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
mutex_unlock(&fs_info->scrub_lock);
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
return -EIO;
}
btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
- if (dev->scrub_device ||
+ if (dev->scrub_ctx ||
(!is_dev_replace &&
btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
@@ -4160,7 +4170,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
return PTR_ERR(sctx);
}
sctx->readonly = readonly;
- dev->scrub_device = sctx;
+ dev->scrub_ctx = sctx;
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
/*
@@ -4195,7 +4205,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
memcpy(progress, &sctx->stat, sizeof(*progress));
mutex_lock(&fs_info->scrub_lock);
- dev->scrub_device = NULL;
+ dev->scrub_ctx = NULL;
scrub_workers_put(fs_info);
mutex_unlock(&fs_info->scrub_lock);
@@ -4252,16 +4262,16 @@ int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
struct scrub_ctx *sctx;
mutex_lock(&fs_info->scrub_lock);
- sctx = dev->scrub_device;
+ sctx = dev->scrub_ctx;
if (!sctx) {
mutex_unlock(&fs_info->scrub_lock);
return -ENOTCONN;
}
atomic_inc(&sctx->cancel_req);
- while (dev->scrub_device) {
+ while (dev->scrub_ctx) {
mutex_unlock(&fs_info->scrub_lock);
wait_event(fs_info->scrub_pause_wait,
- dev->scrub_device == NULL);
+ dev->scrub_ctx == NULL);
mutex_lock(&fs_info->scrub_lock);
}
mutex_unlock(&fs_info->scrub_lock);
@@ -4278,7 +4288,7 @@ int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
mutex_lock(&fs_info->fs_devices->device_list_mutex);
dev = btrfs_find_device(fs_info, devid, NULL, NULL);
if (dev)
- sctx = dev->scrub_device;
+ sctx = dev->scrub_ctx;
if (sctx)
memcpy(progress, &sctx->stat, sizeof(*progress));
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
@@ -4478,8 +4488,7 @@ static int check_extent_to_block(struct btrfs_inode *inode, u64 start, u64 len,
free_extent_map(em);
out_unlock:
- unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
- GFP_NOFS);
+ unlock_extent_cached(io_tree, lockstart, lockend, &cached_state);
return ret;
}
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 20d3300bd268..f306c608dc28 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -1059,12 +1059,6 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
}
}
- ret = btrfs_is_name_len_valid(eb, path->slots[0],
- (unsigned long)(di + 1), name_len + data_len);
- if (!ret) {
- ret = -EIO;
- goto out;
- }
if (name_len + data_len > buf_len) {
buf_len = name_len + data_len;
if (is_vmalloc_addr(buf)) {
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 3a4dce153645..6e71a2a78363 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -61,12 +61,21 @@
#include "tests/btrfs-tests.h"
#include "qgroup.h"
-#include "backref.h"
#define CREATE_TRACE_POINTS
#include <trace/events/btrfs.h>
static const struct super_operations btrfs_super_ops;
+
+/*
+ * Types for mounting the default subvolume and a subvolume explicitly
+ * requested by subvol=/path. That way the callchain is straightforward and we
+ * don't have to play tricks with the mount options and recursive calls to
+ * btrfs_mount.
+ *
+ * The new btrfs_root_fs_type also servers as a tag for the bdev_holder.
+ */
static struct file_system_type btrfs_fs_type;
+static struct file_system_type btrfs_root_fs_type;
static int btrfs_remount(struct super_block *sb, int *flags, char *data);
@@ -98,30 +107,6 @@ const char *btrfs_decode_error(int errno)
return errstr;
}
-/* btrfs handle error by forcing the filesystem readonly */
-static void btrfs_handle_error(struct btrfs_fs_info *fs_info)
-{
- struct super_block *sb = fs_info->sb;
-
- if (sb_rdonly(sb))
- return;
-
- if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
- sb->s_flags |= SB_RDONLY;
- btrfs_info(fs_info, "forced readonly");
- /*
- * Note that a running device replace operation is not
- * canceled here although there is no way to update
- * the progress. It would add the risk of a deadlock,
- * therefore the canceling is omitted. The only penalty
- * is that some I/O remains active until the procedure
- * completes. The next time when the filesystem is
- * mounted writeable again, the device replace
- * operation continues.
- */
- }
-}
-
/*
* __btrfs_handle_fs_error decodes expected errors from the caller and
* invokes the approciate error response.
@@ -168,8 +153,23 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function
set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
/* Don't go through full error handling during mount */
- if (sb->s_flags & SB_BORN)
- btrfs_handle_error(fs_info);
+ if (!(sb->s_flags & SB_BORN))
+ return;
+
+ if (sb_rdonly(sb))
+ return;
+
+ /* btrfs handle error by forcing the filesystem readonly */
+ sb->s_flags |= SB_RDONLY;
+ btrfs_info(fs_info, "forced readonly");
+ /*
+ * Note that a running device replace operation is not canceled here
+ * although there is no way to update the progress. It would add the
+ * risk of a deadlock, therefore the canceling is omitted. The only
+ * penalty is that some I/O remains active until the procedure
+ * completes. The next time when the filesystem is mounted writeable
+ * again, the device replace operation continues.
+ */
}
#ifdef CONFIG_PRINTK
@@ -405,7 +405,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
unsigned long new_flags)
{
substring_t args[MAX_OPT_ARGS];
- char *p, *num, *orig = NULL;
+ char *p, *num;
u64 cache_gen;
int intarg;
int ret = 0;
@@ -428,16 +428,6 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
if (!options)
goto check;
- /*
- * strsep changes the string, duplicate it because parse_options
- * gets called twice
- */
- options = kstrdup(options, GFP_KERNEL);
- if (!options)
- return -ENOMEM;
-
- orig = options;
-
while ((p = strsep(&options, ",")) != NULL) {
int token;
if (!*p)
@@ -454,7 +444,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
case Opt_subvolrootid:
case Opt_device:
/*
- * These are parsed by btrfs_parse_early_options
+ * These are parsed by btrfs_parse_subvol_options
+ * and btrfs_parse_early_options
* and can be happily ignored here.
*/
break;
@@ -877,7 +868,6 @@ out:
btrfs_info(info, "disk space caching is enabled");
if (!ret && btrfs_test_opt(info, FREE_SPACE_TREE))
btrfs_info(info, "using free space tree");
- kfree(orig);
return ret;
}
@@ -888,11 +878,60 @@ out:
* only when we need to allocate a new super block.
*/
static int btrfs_parse_early_options(const char *options, fmode_t flags,
- void *holder, char **subvol_name, u64 *subvol_objectid,
- struct btrfs_fs_devices **fs_devices)
+ void *holder, struct btrfs_fs_devices **fs_devices)
{
substring_t args[MAX_OPT_ARGS];
char *device_name, *opts, *orig, *p;
+ int error = 0;
+
+ if (!options)
+ return 0;
+
+ /*
+ * strsep changes the string, duplicate it because btrfs_parse_options
+ * gets called later
+ */
+ opts = kstrdup(options, GFP_KERNEL);
+ if (!opts)
+ return -ENOMEM;
+ orig = opts;
+
+ while ((p = strsep(&opts, ",")) != NULL) {
+ int token;
+
+ if (!*p)
+ continue;
+
+ token = match_token(p, tokens, args);
+ if (token == Opt_device) {
+ device_name = match_strdup(&args[0]);
+ if (!device_name) {
+ error = -ENOMEM;
+ goto out;
+ }
+ error = btrfs_scan_one_device(device_name,
+ flags, holder, fs_devices);
+ kfree(device_name);
+ if (error)
+ goto out;
+ }
+ }
+
+out:
+ kfree(orig);
+ return error;
+}
+
+/*
+ * Parse mount options that are related to subvolume id
+ *
+ * The value is later passed to mount_subvol()
+ */
+static int btrfs_parse_subvol_options(const char *options, fmode_t flags,
+ char **subvol_name, u64 *subvol_objectid)
+{
+ substring_t args[MAX_OPT_ARGS];
+ char *opts, *orig, *p;
char *num = NULL;
int error = 0;
@@ -900,8 +939,8 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
return 0;
/*
- * strsep changes the string, duplicate it because parse_options
- * gets called twice
+ * strsep changes the string, duplicate it because
+ * btrfs_parse_early_options gets called later
*/
opts = kstrdup(options, GFP_KERNEL);
if (!opts)
@@ -940,18 +979,6 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
case Opt_subvolrootid:
pr_warn("BTRFS: 'subvolrootid' mount option is deprecated and has no effect\n");
break;
- case Opt_device:
- device_name = match_strdup(&args[0]);
- if (!device_name) {
- error = -ENOMEM;
- goto out;
- }
- error = btrfs_scan_one_device(device_name,
- flags, holder, fs_devices);
- kfree(device_name);
- if (error)
- goto out;
- break;
default:
break;
}
@@ -1243,7 +1270,7 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
{
struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb);
- char *compress_type;
+ const char *compress_type;
if (btrfs_test_opt(info, DEGRADED))
seq_puts(seq, ",degraded");
@@ -1259,12 +1286,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
num_online_cpus() + 2, 8))
seq_printf(seq, ",thread_pool=%d", info->thread_pool_size);
if (btrfs_test_opt(info, COMPRESS)) {
- if (info->compress_type == BTRFS_COMPRESS_ZLIB)
- compress_type = "zlib";
- else if (info->compress_type == BTRFS_COMPRESS_LZO)
- compress_type = "lzo";
- else
- compress_type = "zstd";
+ compress_type = btrfs_compress_type2str(info->compress_type);
if (btrfs_test_opt(info, FORCE_COMPRESS))
seq_printf(seq, ",compress-force=%s", compress_type);
else
@@ -1365,86 +1387,12 @@ static inline int is_subvolume_inode(struct inode *inode)
return 0;
}
-/*
- * This will add subvolid=0 to the argument string while removing any subvol=
- * and subvolid= arguments to make sure we get the top-level root for path
- * walking to the subvol we want.
- */
-static char *setup_root_args(char *args)
-{
- char *buf, *dst, *sep;
-
- if (!args)
- return kstrdup("subvolid=0", GFP_KERNEL);
-
- /* The worst case is that we add ",subvolid=0" to the end. */
- buf = dst = kmalloc(strlen(args) + strlen(",subvolid=0") + 1,
- GFP_KERNEL);
- if (!buf)
- return NULL;
-
- while (1) {
- sep = strchrnul(args, ',');
- if (!strstarts(args, "subvol=") &&
- !strstarts(args, "subvolid=")) {
- memcpy(dst, args, sep - args);
- dst += sep - args;
- *dst++ = ',';
- }
- if (*sep)
- args = sep + 1;
- else
- break;
- }
- strcpy(dst, "subvolid=0");
-
- return buf;
-}
-
static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
- int flags, const char *device_name,
- char *data)
+ const char *device_name, struct vfsmount *mnt)
{
struct dentry *root;
- struct vfsmount *mnt = NULL;
- char *newargs;
int ret;
- newargs = setup_root_args(data);
- if (!newargs) {
- root = ERR_PTR(-ENOMEM);
- goto out;
- }
-
- mnt = vfs_kern_mount(&btrfs_fs_type, flags, device_name, newargs);
- if (PTR_ERR_OR_ZERO(mnt) == -EBUSY) {
- if (flags & SB_RDONLY) {
- mnt = vfs_kern_mount(&btrfs_fs_type, flags & ~SB_RDONLY,
- device_name, newargs);
- } else {
- mnt = vfs_kern_mount(&btrfs_fs_type, flags | SB_RDONLY,
- device_name, newargs);
- if (IS_ERR(mnt)) {
- root = ERR_CAST(mnt);
- mnt = NULL;
- goto out;
- }
-
- down_write(&mnt->mnt_sb->s_umount);
- ret = btrfs_remount(mnt->mnt_sb, &flags, NULL);
- up_write(&mnt->mnt_sb->s_umount);
- if (ret < 0) {
- root = ERR_PTR(ret);
- goto out;
- }
- }
- }
- if (IS_ERR(mnt)) {
- root = ERR_CAST(mnt);
- mnt = NULL;
- goto out;
- }
-
if (!subvol_name) {
if (!subvol_objectid) {
ret = get_default_subvol_objectid(btrfs_sb(mnt->mnt_sb),
@@ -1500,7 +1448,6 @@ static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
out:
mntput(mnt);
- kfree(newargs);
kfree(subvol_name);
return root;
}
@@ -1558,11 +1505,11 @@ static int setup_security_options(struct btrfs_fs_info *fs_info,
/*
* Find a superblock for the given device / mount point.
*
- * Note: This is based on get_sb_bdev from fs/super.c with a few additions
- * for multiple device setup. Make sure to keep it in sync.
+ * Note: This is based on mount_bdev from fs/super.c with a few additions
+ * for multiple device setup. Make sure to keep it in sync.
*/
-static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
- const char *device_name, void *data)
+static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
+ int flags, const char *device_name, void *data)
{
struct block_device *bdev = NULL;
struct super_block *s;
@@ -1570,27 +1517,17 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
struct btrfs_fs_info *fs_info = NULL;
struct security_mnt_opts new_sec_opts;
fmode_t mode = FMODE_READ;
- char *subvol_name = NULL;
- u64 subvol_objectid = 0;
int error = 0;
if (!(flags & SB_RDONLY))
mode |= FMODE_WRITE;
error = btrfs_parse_early_options(data, mode, fs_type,
- &subvol_name, &subvol_objectid,
&fs_devices);
if (error) {
- kfree(subvol_name);
return ERR_PTR(error);
}
- if (subvol_name || subvol_objectid != BTRFS_FS_TREE_OBJECTID) {
- /* mount_subvol() will free subvol_name. */
- return mount_subvol(subvol_name, subvol_objectid, flags,
- device_name, data);
- }
-
security_init_mnt_opts(&new_sec_opts);
if (data) {
error = parse_security_options(data, &new_sec_opts);
@@ -1674,6 +1611,84 @@ error_sec_opts:
return ERR_PTR(error);
}
+/*
+ * Mount function which is called by VFS layer.
+ *
+ * In order to allow mounting a subvolume directly, btrfs uses mount_subtree()
+ * which needs vfsmount* of device's root (/). This means device's root has to
+ * be mounted internally in any case.
+ *
+ * Operation flow:
+ * 1. Parse subvol id related options for later use in mount_subvol().
+ *
+ * 2. Mount device's root (/) by calling vfs_kern_mount().
+ *
+ * NOTE: vfs_kern_mount() is used by VFS to call btrfs_mount() in the
+ * first place. In order to avoid calling btrfs_mount() again, we use
+ * different file_system_type which is not registered to VFS by
+ * register_filesystem() (btrfs_root_fs_type). As a result,
+ * btrfs_mount_root() is called. The return value will be used by
+ * mount_subtree() in mount_subvol().
+ *
+ * 3. Call mount_subvol() to get the dentry of subvolume. Since there is
+ * "btrfs subvolume set-default", mount_subvol() is called always.
+ */
+static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
+ const char *device_name, void *data)
+{
+ struct vfsmount *mnt_root;
+ struct dentry *root;
+ fmode_t mode = FMODE_READ;
+ char *subvol_name = NULL;
+ u64 subvol_objectid = 0;
+ int error = 0;
+
+ if (!(flags & SB_RDONLY))
+ mode |= FMODE_WRITE;
+
+ error = btrfs_parse_subvol_options(data, mode,
+ &subvol_name, &subvol_objectid);
+ if (error) {
+ kfree(subvol_name);
+ return ERR_PTR(error);
+ }
+
+ /* mount device's root (/) */
+ mnt_root = vfs_kern_mount(&btrfs_root_fs_type, flags, device_name, data);
+ if (PTR_ERR_OR_ZERO(mnt_root) == -EBUSY) {
+ if (flags & SB_RDONLY) {
+ mnt_root = vfs_kern_mount(&btrfs_root_fs_type,
+ flags & ~SB_RDONLY, device_name, data);
+ } else {
+ mnt_root = vfs_kern_mount(&btrfs_root_fs_type,
+ flags | SB_RDONLY, device_name, data);
+ if (IS_ERR(mnt_root)) {
+ root = ERR_CAST(mnt_root);
+ goto out;
+ }
+
+ down_write(&mnt_root->mnt_sb->s_umount);
+ error = btrfs_remount(mnt_root->mnt_sb, &flags, NULL);
+ up_write(&mnt_root->mnt_sb->s_umount);
+ if (error < 0) {
+ root = ERR_PTR(error);
+ mntput(mnt_root);
+ goto out;
+ }
+ }
+ }
+ if (IS_ERR(mnt_root)) {
+ root = ERR_CAST(mnt_root);
+ goto out;
+ }
+
+ /* mount_subvol() will free subvol_name and mnt_root */
+ root = mount_subvol(subvol_name, subvol_objectid, device_name, mnt_root);
+
+out:
+ return root;
+}
+
static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
int new_pool_size, int old_pool_size)
{
@@ -1820,7 +1835,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
goto restore;
}
- if (!btrfs_check_rw_degradable(fs_info)) {
+ if (!btrfs_check_rw_degradable(fs_info, NULL)) {
btrfs_warn(fs_info,
"too many missing devices, writeable remount is not allowed");
ret = -EACCES;
@@ -1972,8 +1987,10 @@ static int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
rcu_read_lock();
list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
- if (!device->in_fs_metadata || !device->bdev ||
- device->is_tgtdev_for_dev_replace)
+ if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
+ &device->dev_state) ||
+ !device->bdev ||
+ test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
continue;
if (i >= nr_devices)
@@ -2174,6 +2191,15 @@ static struct file_system_type btrfs_fs_type = {
.kill_sb = btrfs_kill_super,
.fs_flags = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA,
};
+
+static struct file_system_type btrfs_root_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "btrfs",
+ .mount = btrfs_mount_root,
+ .kill_sb = btrfs_kill_super,
+ .fs_flags = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA,
+};
+
MODULE_ALIAS_FS("btrfs");
static int btrfs_control_open(struct inode *inode, struct file *file)
@@ -2207,11 +2233,11 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
switch (cmd) {
case BTRFS_IOC_SCAN_DEV:
ret = btrfs_scan_one_device(vol->name, FMODE_READ,
- &btrfs_fs_type, &fs_devices);
+ &btrfs_root_fs_type, &fs_devices);
break;
case BTRFS_IOC_DEVICES_READY:
ret = btrfs_scan_one_device(vol->name, FMODE_READ,
- &btrfs_fs_type, &fs_devices);
+ &btrfs_root_fs_type, &fs_devices);
if (ret)
break;
ret = !(fs_devices->num_devices == fs_devices->total_devices);
@@ -2269,7 +2295,7 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
while (cur_devices) {
head = &cur_devices->devices;
list_for_each_entry(dev, head, dev_list) {
- if (dev->missing)
+ if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
continue;
if (!dev->name)
continue;
@@ -2324,7 +2350,7 @@ static struct miscdevice btrfs_misc = {
MODULE_ALIAS_MISCDEV(BTRFS_MINOR);
MODULE_ALIAS("devname:btrfs-control");
-static int btrfs_interface_init(void)
+static int __init btrfs_interface_init(void)
{
return misc_register(&btrfs_misc);
}
@@ -2334,7 +2360,7 @@ static void btrfs_interface_exit(void)
misc_deregister(&btrfs_misc);
}
-static void btrfs_print_mod_info(void)
+static void __init btrfs_print_mod_info(void)
{
pr_info("Btrfs loaded, crc32c=%s"
#ifdef CONFIG_BTRFS_DEBUG
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index a28bba801264..a8bafed931f4 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -897,7 +897,7 @@ static int btrfs_init_debugfs(void)
return 0;
}
-int btrfs_init_sysfs(void)
+int __init btrfs_init_sysfs(void)
{
int ret;
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
index d3f25376a0f8..9786d8cd0aa6 100644
--- a/fs/btrfs/tests/btrfs-tests.c
+++ b/fs/btrfs/tests/btrfs-tests.c
@@ -277,6 +277,9 @@ int btrfs_run_sanity_tests(void)
goto out;
}
}
+ ret = btrfs_test_extent_map();
+ if (ret)
+ goto out;
out:
btrfs_destroy_test_fs();
return ret;
diff --git a/fs/btrfs/tests/btrfs-tests.h b/fs/btrfs/tests/btrfs-tests.h
index 266f1e3d1784..bc0615bac3cc 100644
--- a/fs/btrfs/tests/btrfs-tests.h
+++ b/fs/btrfs/tests/btrfs-tests.h
@@ -33,6 +33,7 @@ int btrfs_test_extent_io(u32 sectorsize, u32 nodesize);
int btrfs_test_inodes(u32 sectorsize, u32 nodesize);
int btrfs_test_qgroups(u32 sectorsize, u32 nodesize);
int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize);
+int btrfs_test_extent_map(void);
struct inode *btrfs_new_test_inode(void);
struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize);
void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info);
diff --git a/fs/btrfs/tests/extent-map-tests.c b/fs/btrfs/tests/extent-map-tests.c
new file mode 100644
index 000000000000..70c993f01670
--- /dev/null
+++ b/fs/btrfs/tests/extent-map-tests.c
@@ -0,0 +1,366 @@
+/*
+ * Copyright (C) 2017 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/types.h>
+#include "btrfs-tests.h"
+#include "../ctree.h"
+
+static void free_extent_map_tree(struct extent_map_tree *em_tree)
+{
+ struct extent_map *em;
+ struct rb_node *node;
+
+ while (!RB_EMPTY_ROOT(&em_tree->map)) {
+ node = rb_first(&em_tree->map);
+ em = rb_entry(node, struct extent_map, rb_node);
+ remove_extent_mapping(em_tree, em);
+
+#ifdef CONFIG_BTRFS_DEBUG
+ if (refcount_read(&em->refs) != 1) {
+ test_msg(
+"em leak: em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx) refs %d\n",
+ em->start, em->len, em->block_start,
+ em->block_len, refcount_read(&em->refs));
+
+ refcount_set(&em->refs, 1);
+ }
+#endif
+ free_extent_map(em);
+ }
+}
+
+/*
+ * Test scenario:
+ *
+ * Suppose that no extent map has been loaded into memory yet, there is a file
+ * extent [0, 16K), followed by another file extent [16K, 20K), two dio reads
+ * are entering btrfs_get_extent() concurrently, t1 is reading [8K, 16K), t2 is
+ * reading [0, 8K)
+ *
+ * t1 t2
+ * btrfs_get_extent() btrfs_get_extent()
+ * -> lookup_extent_mapping() ->lookup_extent_mapping()
+ * -> add_extent_mapping(0, 16K)
+ * -> return em
+ * ->add_extent_mapping(0, 16K)
+ * -> #handle -EEXIST
+ */
+static void test_case_1(struct extent_map_tree *em_tree)
+{
+ struct extent_map *em;
+ u64 start = 0;
+ u64 len = SZ_8K;
+ int ret;
+
+ em = alloc_extent_map();
+ if (!em)
+ /* Skip the test on error. */
+ return;
+
+ /* Add [0, 16K) */
+ em->start = 0;
+ em->len = SZ_16K;
+ em->block_start = 0;
+ em->block_len = SZ_16K;
+ ret = add_extent_mapping(em_tree, em, 0);
+ ASSERT(ret == 0);
+ free_extent_map(em);
+
+ /* Add [16K, 20K) following [0, 16K) */
+ em = alloc_extent_map();
+ if (!em)
+ goto out;
+
+ em->start = SZ_16K;
+ em->len = SZ_4K;
+ em->block_start = SZ_32K; /* avoid merging */
+ em->block_len = SZ_4K;
+ ret = add_extent_mapping(em_tree, em, 0);
+ ASSERT(ret == 0);
+ free_extent_map(em);
+
+ em = alloc_extent_map();
+ if (!em)
+ goto out;
+
+ /* Add [0, 8K), should return [0, 16K) instead. */
+ em->start = start;
+ em->len = len;
+ em->block_start = start;
+ em->block_len = len;
+ ret = btrfs_add_extent_mapping(em_tree, &em, em->start, em->len);
+ if (ret)
+ test_msg("case1 [%llu %llu]: ret %d\n", start, start + len, ret);
+ if (em &&
+ (em->start != 0 || extent_map_end(em) != SZ_16K ||
+ em->block_start != 0 || em->block_len != SZ_16K))
+ test_msg(
+"case1 [%llu %llu]: ret %d return a wrong em (start %llu len %llu block_start %llu block_len %llu\n",
+ start, start + len, ret, em->start, em->len,
+ em->block_start, em->block_len);
+ free_extent_map(em);
+out:
+ /* free memory */
+ free_extent_map_tree(em_tree);
+}
+
+/*
+ * Test scenario:
+ *
+ * Reading the inline ending up with EEXIST, ie. read an inline
+ * extent and discard page cache and read it again.
+ */
+static void test_case_2(struct extent_map_tree *em_tree)
+{
+ struct extent_map *em;
+ int ret;
+
+ em = alloc_extent_map();
+ if (!em)
+ /* Skip the test on error. */
+ return;
+
+ /* Add [0, 1K) */
+ em->start = 0;
+ em->len = SZ_1K;
+ em->block_start = EXTENT_MAP_INLINE;
+ em->block_len = (u64)-1;
+ ret = add_extent_mapping(em_tree, em, 0);
+ ASSERT(ret == 0);
+ free_extent_map(em);
+
+ /* Add [4K, 4K) following [0, 1K) */
+ em = alloc_extent_map();
+ if (!em)
+ goto out;
+
+ em->start = SZ_4K;
+ em->len = SZ_4K;
+ em->block_start = SZ_4K;
+ em->block_len = SZ_4K;
+ ret = add_extent_mapping(em_tree, em, 0);
+ ASSERT(ret == 0);
+ free_extent_map(em);
+
+ em = alloc_extent_map();
+ if (!em)
+ goto out;
+
+ /* Add [0, 1K) */
+ em->start = 0;
+ em->len = SZ_1K;
+ em->block_start = EXTENT_MAP_INLINE;
+ em->block_len = (u64)-1;
+ ret = btrfs_add_extent_mapping(em_tree, &em, em->start, em->len);
+ if (ret)
+ test_msg("case2 [0 1K]: ret %d\n", ret);
+ if (em &&
+ (em->start != 0 || extent_map_end(em) != SZ_1K ||
+ em->block_start != EXTENT_MAP_INLINE || em->block_len != (u64)-1))
+ test_msg(
+"case2 [0 1K]: ret %d return a wrong em (start %llu len %llu block_start %llu block_len %llu\n",
+ ret, em->start, em->len, em->block_start,
+ em->block_len);
+ free_extent_map(em);
+out:
+ /* free memory */
+ free_extent_map_tree(em_tree);
+}
+
+static void __test_case_3(struct extent_map_tree *em_tree, u64 start)
+{
+ struct extent_map *em;
+ u64 len = SZ_4K;
+ int ret;
+
+ em = alloc_extent_map();
+ if (!em)
+ /* Skip this test on error. */
+ return;
+
+ /* Add [4K, 8K) */
+ em->start = SZ_4K;
+ em->len = SZ_4K;
+ em->block_start = SZ_4K;
+ em->block_len = SZ_4K;
+ ret = add_extent_mapping(em_tree, em, 0);
+ ASSERT(ret == 0);
+ free_extent_map(em);
+
+ em = alloc_extent_map();
+ if (!em)
+ goto out;
+
+ /* Add [0, 16K) */
+ em->start = 0;
+ em->len = SZ_16K;
+ em->block_start = 0;
+ em->block_len = SZ_16K;
+ ret = btrfs_add_extent_mapping(em_tree, &em, start, len);
+ if (ret)
+ test_msg("case3 [0x%llx 0x%llx): ret %d\n",
+ start, start + len, ret);
+ /*
+ * Since bytes within em are contiguous, em->block_start is identical to
+ * em->start.
+ */
+ if (em &&
+ (start < em->start || start + len > extent_map_end(em) ||
+ em->start != em->block_start || em->len != em->block_len))
+ test_msg(
+"case3 [0x%llx 0x%llx): ret %d em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx)\n",
+ start, start + len, ret, em->start, em->len,
+ em->block_start, em->block_len);
+ free_extent_map(em);
+out:
+ /* free memory */
+ free_extent_map_tree(em_tree);
+}
+
+/*
+ * Test scenario:
+ *
+ * Suppose that no extent map has been loaded into memory yet.
+ * There is a file extent [0, 16K), two jobs are running concurrently
+ * against it, t1 is buffered writing to [4K, 8K) and t2 is doing dio
+ * read from [0, 4K) or [8K, 12K) or [12K, 16K).
+ *
+ * t1 goes ahead of t2 and adds em [4K, 8K) into tree.
+ *
+ * t1 t2
+ * cow_file_range() btrfs_get_extent()
+ * -> lookup_extent_mapping()
+ * -> add_extent_mapping()
+ * -> add_extent_mapping()
+ */
+static void test_case_3(struct extent_map_tree *em_tree)
+{
+ __test_case_3(em_tree, 0);
+ __test_case_3(em_tree, SZ_8K);
+ __test_case_3(em_tree, (12 * 1024ULL));
+}
+
+static void __test_case_4(struct extent_map_tree *em_tree, u64 start)
+{
+ struct extent_map *em;
+ u64 len = SZ_4K;
+ int ret;
+
+ em = alloc_extent_map();
+ if (!em)
+ /* Skip this test on error. */
+ return;
+
+ /* Add [0K, 8K) */
+ em->start = 0;
+ em->len = SZ_8K;
+ em->block_start = 0;
+ em->block_len = SZ_8K;
+ ret = add_extent_mapping(em_tree, em, 0);
+ ASSERT(ret == 0);
+ free_extent_map(em);
+
+ em = alloc_extent_map();
+ if (!em)
+ goto out;
+
+ /* Add [8K, 24K) */
+ em->start = SZ_8K;
+ em->len = 24 * 1024ULL;
+ em->block_start = SZ_16K; /* avoid merging */
+ em->block_len = 24 * 1024ULL;
+ ret = add_extent_mapping(em_tree, em, 0);
+ ASSERT(ret == 0);
+ free_extent_map(em);
+
+ em = alloc_extent_map();
+ if (!em)
+ goto out;
+ /* Add [0K, 32K) */
+ em->start = 0;
+ em->len = SZ_32K;
+ em->block_start = 0;
+ em->block_len = SZ_32K;
+ ret = btrfs_add_extent_mapping(em_tree, &em, start, len);
+ if (ret)
+ test_msg("case4 [0x%llx 0x%llx): ret %d\n",
+ start, len, ret);
+ if (em &&
+ (start < em->start || start + len > extent_map_end(em)))
+ test_msg(
+"case4 [0x%llx 0x%llx): ret %d, added wrong em (start 0x%llx len 0x%llx block_start 0x%llx block_len 0x%llx)\n",
+ start, len, ret, em->start, em->len, em->block_start,
+ em->block_len);
+ free_extent_map(em);
+out:
+ /* free memory */
+ free_extent_map_tree(em_tree);
+}
+
+/*
+ * Test scenario:
+ *
+ * Suppose that no extent map has been loaded into memory yet.
+ * There is a file extent [0, 32K), two jobs are running concurrently
+ * against it, t1 is doing dio write to [8K, 32K) and t2 is doing dio
+ * read from [0, 4K) or [4K, 8K).
+ *
+ * t1 goes ahead of t2 and splits em [0, 32K) to em [0K, 8K) and [8K 32K).
+ *
+ * t1 t2
+ * btrfs_get_blocks_direct() btrfs_get_blocks_direct()
+ * -> btrfs_get_extent() -> btrfs_get_extent()
+ * -> lookup_extent_mapping()
+ * -> add_extent_mapping() -> lookup_extent_mapping()
+ * # load [0, 32K)
+ * -> btrfs_new_extent_direct()
+ * -> btrfs_drop_extent_cache()
+ * # split [0, 32K)
+ * -> add_extent_mapping()
+ * # add [8K, 32K)
+ * -> add_extent_mapping()
+ * # handle -EEXIST when adding
+ * # [0, 32K)
+ */
+static void test_case_4(struct extent_map_tree *em_tree)
+{
+ __test_case_4(em_tree, 0);
+ __test_case_4(em_tree, SZ_4K);
+}
+
+int btrfs_test_extent_map()
+{
+ struct extent_map_tree *em_tree;
+
+ test_msg("Running extent_map tests\n");
+
+ em_tree = kzalloc(sizeof(*em_tree), GFP_KERNEL);
+ if (!em_tree)
+ /* Skip the test on error. */
+ return 0;
+
+ extent_map_tree_init(em_tree);
+
+ test_case_1(em_tree);
+ test_case_2(em_tree);
+ test_case_3(em_tree);
+ test_case_4(em_tree);
+
+ kfree(em_tree);
+ return 0;
+}
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
index 30affb60da51..13420cd19ef0 100644
--- a/fs/btrfs/tests/inode-tests.c
+++ b/fs/btrfs/tests/inode-tests.c
@@ -288,10 +288,6 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
test_msg("Expected a hole, got %llu\n", em->block_start);
goto out;
}
- if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
- test_msg("Vacancy flag wasn't set properly\n");
- goto out;
- }
free_extent_map(em);
btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0);
@@ -1001,8 +997,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
BTRFS_MAX_EXTENT_SIZE >> 1,
(BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1,
EXTENT_DELALLOC | EXTENT_DIRTY |
- EXTENT_UPTODATE, 0, 0,
- NULL, GFP_KERNEL);
+ EXTENT_UPTODATE, 0, 0, NULL);
if (ret) {
test_msg("clear_extent_bit returned %d\n", ret);
goto out;
@@ -1070,8 +1065,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
BTRFS_MAX_EXTENT_SIZE + sectorsize,
BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1,
EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_UPTODATE, 0, 0,
- NULL, GFP_KERNEL);
+ EXTENT_UPTODATE, 0, 0, NULL);
if (ret) {
test_msg("clear_extent_bit returned %d\n", ret);
goto out;
@@ -1104,8 +1098,7 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
/* Empty */
ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_UPTODATE, 0, 0,
- NULL, GFP_KERNEL);
+ EXTENT_UPTODATE, 0, 0, NULL);
if (ret) {
test_msg("clear_extent_bit returned %d\n", ret);
goto out;
@@ -1121,8 +1114,7 @@ out:
if (ret)
clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_UPTODATE, 0, 0,
- NULL, GFP_KERNEL);
+ EXTENT_UPTODATE, 0, 0, NULL);
iput(inode);
btrfs_free_dummy_root(root);
btrfs_free_dummy_fs_info(fs_info);
@@ -1134,7 +1126,6 @@ int btrfs_test_inodes(u32 sectorsize, u32 nodesize)
int ret;
set_bit(EXTENT_FLAG_COMPRESSED, &compressed_only);
- set_bit(EXTENT_FLAG_VACANCY, &vacancy_only);
set_bit(EXTENT_FLAG_PREALLOC, &prealloc_only);
test_msg("Running btrfs_get_extent tests\n");
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 5a8c2649af2f..04f07144b45c 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -495,8 +495,8 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
if (current->journal_info) {
WARN_ON(type & TRANS_EXTWRITERS);
h = current->journal_info;
- h->use_count++;
- WARN_ON(h->use_count > 2);
+ refcount_inc(&h->use_count);
+ WARN_ON(refcount_read(&h->use_count) > 2);
h->orig_rsv = h->block_rsv;
h->block_rsv = NULL;
goto got_it;
@@ -567,7 +567,7 @@ again:
h->transid = cur_trans->transid;
h->transaction = cur_trans;
h->root = root;
- h->use_count = 1;
+ refcount_set(&h->use_count, 1);
h->fs_info = root->fs_info;
h->type = type;
@@ -837,8 +837,8 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
int err = 0;
int must_run_delayed_refs = 0;
- if (trans->use_count > 1) {
- trans->use_count--;
+ if (refcount_read(&trans->use_count) > 1) {
+ refcount_dec(&trans->use_count);
trans->block_rsv = trans->orig_rsv;
return 0;
}
@@ -1016,8 +1016,7 @@ static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info,
* it's safe to do it (through clear_btree_io_tree()).
*/
err = clear_extent_bit(dirty_pages, start, end,
- EXTENT_NEED_WAIT,
- 0, 0, &cached_state, GFP_NOFS);
+ EXTENT_NEED_WAIT, 0, 0, &cached_state);
if (err == -ENOMEM)
err = 0;
if (!err)
@@ -1869,7 +1868,7 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
struct btrfs_transaction *cur_trans = trans->transaction;
DEFINE_WAIT(wait);
- WARN_ON(trans->use_count > 1);
+ WARN_ON(refcount_read(&trans->use_count) > 1);
btrfs_abort_transaction(trans, err);
@@ -2266,16 +2265,13 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
}
ret = write_all_supers(fs_info, 0);
- if (ret) {
- mutex_unlock(&fs_info->tree_log_mutex);
- goto scrub_continue;
- }
-
/*
* the super is written, we can safely allow the tree-loggers
* to go about their business
*/
mutex_unlock(&fs_info->tree_log_mutex);
+ if (ret)
+ goto scrub_continue;
btrfs_finish_extent_commit(trans, fs_info);
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index c55e44560103..6beee072b1bd 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -58,6 +58,7 @@ struct btrfs_transaction {
/* Be protected by fs_info->trans_lock when we want to change it. */
enum btrfs_trans_state state;
+ int aborted;
struct list_head list;
struct extent_io_tree dirty_pages;
unsigned long start_time;
@@ -70,7 +71,6 @@ struct btrfs_transaction {
struct list_head dirty_bgs;
struct list_head io_bgs;
struct list_head dropped_roots;
- u64 num_dirty_bgs;
/*
* we need to make sure block group deletion doesn't race with
@@ -79,11 +79,11 @@ struct btrfs_transaction {
*/
struct mutex cache_write_mutex;
spinlock_t dirty_bgs_lock;
+ unsigned int num_dirty_bgs;
/* Protected by spin lock fs_info->unused_bgs_lock. */
struct list_head deleted_bgs;
spinlock_t dropped_roots_lock;
struct btrfs_delayed_ref_root delayed_refs;
- int aborted;
struct btrfs_fs_info *fs_info;
};
@@ -111,20 +111,19 @@ struct btrfs_trans_handle {
u64 transid;
u64 bytes_reserved;
u64 chunk_bytes_reserved;
- unsigned long use_count;
- unsigned long blocks_reserved;
unsigned long delayed_ref_updates;
struct btrfs_transaction *transaction;
struct btrfs_block_rsv *block_rsv;
struct btrfs_block_rsv *orig_rsv;
+ refcount_t use_count;
+ unsigned int type;
short aborted;
- short adding_csums;
+ bool adding_csums;
bool allocating_chunk;
bool can_flush_pending_bgs;
bool reloc_reserved;
bool sync;
bool dirty;
- unsigned int type;
struct btrfs_root *root;
struct btrfs_fs_info *fs_info;
struct list_head new_bgs;
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index ce4ed6ec8f39..c3c8d48f6618 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -30,6 +30,7 @@
#include "tree-checker.h"
#include "disk-io.h"
#include "compression.h"
+#include "hash.h"
/*
* Error message should follow the following format:
@@ -223,6 +224,142 @@ static int check_csum_item(struct btrfs_root *root, struct extent_buffer *leaf,
}
/*
+ * Customized reported for dir_item, only important new info is key->objectid,
+ * which represents inode number
+ */
+__printf(4, 5)
+static void dir_item_err(const struct btrfs_root *root,
+ const struct extent_buffer *eb, int slot,
+ const char *fmt, ...)
+{
+ struct btrfs_key key;
+ struct va_format vaf;
+ va_list args;
+
+ btrfs_item_key_to_cpu(eb, &key, slot);
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ btrfs_crit(root->fs_info,
+ "corrupt %s: root=%llu block=%llu slot=%d ino=%llu, %pV",
+ btrfs_header_level(eb) == 0 ? "leaf" : "node", root->objectid,
+ btrfs_header_bytenr(eb), slot, key.objectid, &vaf);
+ va_end(args);
+}
+
+static int check_dir_item(struct btrfs_root *root,
+ struct extent_buffer *leaf,
+ struct btrfs_key *key, int slot)
+{
+ struct btrfs_dir_item *di;
+ u32 item_size = btrfs_item_size_nr(leaf, slot);
+ u32 cur = 0;
+
+ di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
+ while (cur < item_size) {
+ u32 name_len;
+ u32 data_len;
+ u32 max_name_len;
+ u32 total_size;
+ u32 name_hash;
+ u8 dir_type;
+
+ /* header itself should not cross item boundary */
+ if (cur + sizeof(*di) > item_size) {
+ dir_item_err(root, leaf, slot,
+ "dir item header crosses item boundary, have %zu boundary %u",
+ cur + sizeof(*di), item_size);
+ return -EUCLEAN;
+ }
+
+ /* dir type check */
+ dir_type = btrfs_dir_type(leaf, di);
+ if (dir_type >= BTRFS_FT_MAX) {
+ dir_item_err(root, leaf, slot,
+ "invalid dir item type, have %u expect [0, %u)",
+ dir_type, BTRFS_FT_MAX);
+ return -EUCLEAN;
+ }
+
+ if (key->type == BTRFS_XATTR_ITEM_KEY &&
+ dir_type != BTRFS_FT_XATTR) {
+ dir_item_err(root, leaf, slot,
+ "invalid dir item type for XATTR key, have %u expect %u",
+ dir_type, BTRFS_FT_XATTR);
+ return -EUCLEAN;
+ }
+ if (dir_type == BTRFS_FT_XATTR &&
+ key->type != BTRFS_XATTR_ITEM_KEY) {
+ dir_item_err(root, leaf, slot,
+ "xattr dir type found for non-XATTR key");
+ return -EUCLEAN;
+ }
+ if (dir_type == BTRFS_FT_XATTR)
+ max_name_len = XATTR_NAME_MAX;
+ else
+ max_name_len = BTRFS_NAME_LEN;
+
+ /* Name/data length check */
+ name_len = btrfs_dir_name_len(leaf, di);
+ data_len = btrfs_dir_data_len(leaf, di);
+ if (name_len > max_name_len) {
+ dir_item_err(root, leaf, slot,
+ "dir item name len too long, have %u max %u",
+ name_len, max_name_len);
+ return -EUCLEAN;
+ }
+ if (name_len + data_len > BTRFS_MAX_XATTR_SIZE(root->fs_info)) {
+ dir_item_err(root, leaf, slot,
+ "dir item name and data len too long, have %u max %u",
+ name_len + data_len,
+ BTRFS_MAX_XATTR_SIZE(root->fs_info));
+ return -EUCLEAN;
+ }
+
+ if (data_len && dir_type != BTRFS_FT_XATTR) {
+ dir_item_err(root, leaf, slot,
+ "dir item with invalid data len, have %u expect 0",
+ data_len);
+ return -EUCLEAN;
+ }
+
+ total_size = sizeof(*di) + name_len + data_len;
+
+ /* header and name/data should not cross item boundary */
+ if (cur + total_size > item_size) {
+ dir_item_err(root, leaf, slot,
+ "dir item data crosses item boundary, have %u boundary %u",
+ cur + total_size, item_size);
+ return -EUCLEAN;
+ }
+
+ /*
+ * Special check for XATTR/DIR_ITEM, as key->offset is name
+ * hash, should match its name
+ */
+ if (key->type == BTRFS_DIR_ITEM_KEY ||
+ key->type == BTRFS_XATTR_ITEM_KEY) {
+ char namebuf[max(BTRFS_NAME_LEN, XATTR_NAME_MAX)];
+
+ read_extent_buffer(leaf, namebuf,
+ (unsigned long)(di + 1), name_len);
+ name_hash = btrfs_name_hash(namebuf, name_len);
+ if (key->offset != name_hash) {
+ dir_item_err(root, leaf, slot,
+ "name hash mismatch with key, have 0x%016x expect 0x%016llx",
+ name_hash, key->offset);
+ return -EUCLEAN;
+ }
+ }
+ cur += total_size;
+ di = (struct btrfs_dir_item *)((void *)di + total_size);
+ }
+ return 0;
+}
+
+/*
* Common point to switch the item-specific validation.
*/
static int check_leaf_item(struct btrfs_root *root,
@@ -238,6 +375,11 @@ static int check_leaf_item(struct btrfs_root *root,
case BTRFS_EXTENT_CSUM_KEY:
ret = check_csum_item(root, leaf, key, slot);
break;
+ case BTRFS_DIR_ITEM_KEY:
+ case BTRFS_DIR_INDEX_KEY:
+ case BTRFS_XATTR_ITEM_KEY:
+ ret = check_dir_item(root, leaf, key, slot);
+ break;
}
return ret;
}
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 7bf9b31561db..afadaadab18e 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -20,6 +20,7 @@
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/list_sort.h>
+#include <linux/iversion.h>
#include "tree-log.h"
#include "disk-io.h"
#include "locking.h"
@@ -1173,19 +1174,15 @@ next:
return 0;
}
-static int extref_get_fields(struct extent_buffer *eb, int slot,
- unsigned long ref_ptr, u32 *namelen, char **name,
- u64 *index, u64 *parent_objectid)
+static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
+ u32 *namelen, char **name, u64 *index,
+ u64 *parent_objectid)
{
struct btrfs_inode_extref *extref;
extref = (struct btrfs_inode_extref *)ref_ptr;
*namelen = btrfs_inode_extref_name_len(eb, extref);
- if (!btrfs_is_name_len_valid(eb, slot, (unsigned long)&extref->name,
- *namelen))
- return -EIO;
-
*name = kmalloc(*namelen, GFP_NOFS);
if (*name == NULL)
return -ENOMEM;
@@ -1200,19 +1197,14 @@ static int extref_get_fields(struct extent_buffer *eb, int slot,
return 0;
}
-static int ref_get_fields(struct extent_buffer *eb, int slot,
- unsigned long ref_ptr, u32 *namelen, char **name,
- u64 *index)
+static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
+ u32 *namelen, char **name, u64 *index)
{
struct btrfs_inode_ref *ref;
ref = (struct btrfs_inode_ref *)ref_ptr;
*namelen = btrfs_inode_ref_name_len(eb, ref);
- if (!btrfs_is_name_len_valid(eb, slot, (unsigned long)(ref + 1),
- *namelen))
- return -EIO;
-
*name = kmalloc(*namelen, GFP_NOFS);
if (*name == NULL)
return -ENOMEM;
@@ -1287,8 +1279,8 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
while (ref_ptr < ref_end) {
if (log_ref_ver) {
- ret = extref_get_fields(eb, slot, ref_ptr, &namelen,
- &name, &ref_index, &parent_objectid);
+ ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
+ &ref_index, &parent_objectid);
/*
* parent object can change from one array
* item to another.
@@ -1300,8 +1292,8 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
goto out;
}
} else {
- ret = ref_get_fields(eb, slot, ref_ptr, &namelen,
- &name, &ref_index);
+ ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
+ &ref_index);
}
if (ret)
goto out;
@@ -1835,7 +1827,6 @@ static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
struct extent_buffer *eb, int slot,
struct btrfs_key *key)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
int ret = 0;
u32 item_size = btrfs_item_size_nr(eb, slot);
struct btrfs_dir_item *di;
@@ -1848,8 +1839,6 @@ static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
ptr_end = ptr + item_size;
while (ptr < ptr_end) {
di = (struct btrfs_dir_item *)ptr;
- if (verify_dir_item(fs_info, eb, slot, di))
- return -EIO;
name_len = btrfs_dir_name_len(eb, di);
ret = replay_one_name(trans, root, path, eb, di, key);
if (ret < 0)
@@ -2024,11 +2013,6 @@ again:
ptr_end = ptr + item_size;
while (ptr < ptr_end) {
di = (struct btrfs_dir_item *)ptr;
- if (verify_dir_item(fs_info, eb, slot, di)) {
- ret = -EIO;
- goto out;
- }
-
name_len = btrfs_dir_name_len(eb, di);
name = kmalloc(name_len, GFP_NOFS);
if (!name) {
@@ -2109,7 +2093,6 @@ static int replay_xattr_deletes(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
const u64 ino)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key search_key;
struct btrfs_path *log_path;
int i;
@@ -2151,11 +2134,6 @@ process_leaf:
u32 this_len = sizeof(*di) + name_len + data_len;
char *name;
- ret = verify_dir_item(fs_info, path->nodes[0], i, di);
- if (ret) {
- ret = -EIO;
- goto out;
- }
name = kmalloc(name_len, GFP_NOFS);
if (!name) {
ret = -ENOMEM;
@@ -3609,7 +3587,8 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
&token);
- btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
+ btrfs_set_token_inode_sequence(leaf, item,
+ inode_peek_iversion(inode), &token);
btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
@@ -4572,12 +4551,6 @@ static int btrfs_check_ref_name_override(struct extent_buffer *eb,
this_len = sizeof(*extref) + this_name_len;
}
- ret = btrfs_is_name_len_valid(eb, slot, name_ptr,
- this_name_len);
- if (!ret) {
- ret = -EIO;
- goto out;
- }
if (this_name_len > name_len) {
char *new_name;
@@ -5432,11 +5405,10 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
struct dentry *parent,
const loff_t start,
const loff_t end,
- int exists_only,
+ int inode_only,
struct btrfs_log_ctx *ctx)
{
struct btrfs_fs_info *fs_info = root->fs_info;
- int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
struct super_block *sb;
struct dentry *old_parent = NULL;
int ret = 0;
@@ -5602,7 +5574,7 @@ int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
int ret;
ret = btrfs_log_inode_parent(trans, root, BTRFS_I(d_inode(dentry)),
- parent, start, end, 0, ctx);
+ parent, start, end, LOG_INODE_ALL, ctx);
dput(parent);
return ret;
@@ -5865,6 +5837,6 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans,
return 0;
return btrfs_log_inode_parent(trans, root, inode, parent, 0,
- LLONG_MAX, 1, NULL);
+ LLONG_MAX, LOG_INODE_EXISTS, NULL);
}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index a25684287501..b5036bd69e6a 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -145,6 +145,71 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
struct btrfs_bio **bbio_ret,
int mirror_num, int need_raid_map);
+/*
+ * Device locking
+ * ==============
+ *
+ * There are several mutexes that protect manipulation of devices and low-level
+ * structures like chunks but not block groups, extents or files
+ *
+ * uuid_mutex (global lock)
+ * ------------------------
+ * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
+ * the SCAN_DEV ioctl registration or from mount either implicitly (the first
+ * device) or requested by the device= mount option
+ *
+ * the mutex can be very coarse and can cover long-running operations
+ *
+ * protects: updates to fs_devices counters like missing devices, rw devices,
+ * seeding, structure cloning, openning/closing devices at mount/umount time
+ *
+ * global::fs_devs - add, remove, updates to the global list
+ *
+ * does not protect: manipulation of the fs_devices::devices list!
+ *
+ * btrfs_device::name - renames (write side), read is RCU
+ *
+ * fs_devices::device_list_mutex (per-fs, with RCU)
+ * ------------------------------------------------
+ * protects updates to fs_devices::devices, ie. adding and deleting
+ *
+ * simple list traversal with read-only actions can be done with RCU protection
+ *
+ * may be used to exclude some operations from running concurrently without any
+ * modifications to the list (see write_all_supers)
+ *
+ * volume_mutex
+ * ------------
+ * coarse lock owned by a mounted filesystem; used to exclude some operations
+ * that cannot run in parallel and affect the higher-level properties of the
+ * filesystem like: device add/deleting/resize/replace, or balance
+ *
+ * balance_mutex
+ * -------------
+ * protects balance structures (status, state) and context accessed from
+ * several places (internally, ioctl)
+ *
+ * chunk_mutex
+ * -----------
+ * protects chunks, adding or removing during allocation, trim or when a new
+ * device is added/removed
+ *
+ * cleaner_mutex
+ * -------------
+ * a big lock that is held by the cleaner thread and prevents running subvolume
+ * cleaning together with relocation or delayed iputs
+ *
+ *
+ * Lock nesting
+ * ============
+ *
+ * uuid_mutex
+ * volume_mutex
+ * device_list_mutex
+ * chunk_mutex
+ * balance_mutex
+ */
+
DEFINE_MUTEX(uuid_mutex);
static LIST_HEAD(fs_uuids);
struct list_head *btrfs_get_fs_uuids(void)
@@ -180,6 +245,13 @@ static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
return fs_devs;
}
+static void free_device(struct btrfs_device *device)
+{
+ rcu_string_free(device->name);
+ bio_put(device->flush_bio);
+ kfree(device);
+}
+
static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
{
struct btrfs_device *device;
@@ -188,9 +260,7 @@ static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
device = list_entry(fs_devices->devices.next,
struct btrfs_device, dev_list);
list_del(&device->dev_list);
- rcu_string_free(device->name);
- bio_put(device->flush_bio);
- kfree(device);
+ free_device(device);
}
kfree(fs_devices);
}
@@ -220,6 +290,11 @@ void btrfs_cleanup_fs_uuids(void)
}
}
+/*
+ * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error.
+ * Returned struct is not linked onto any lists and must be destroyed using
+ * free_device.
+ */
static struct btrfs_device *__alloc_device(void)
{
struct btrfs_device *dev;
@@ -244,7 +319,6 @@ static struct btrfs_device *__alloc_device(void)
spin_lock_init(&dev->io_lock);
- spin_lock_init(&dev->reada_lock);
atomic_set(&dev->reada_in_flight, 0);
atomic_set(&dev->dev_stats_ccnt, 0);
btrfs_device_data_ordered_init(dev);
@@ -530,45 +604,42 @@ static void pending_bios_fn(struct btrfs_work *work)
run_scheduled_bios(device);
}
-
-static void btrfs_free_stale_device(struct btrfs_device *cur_dev)
+/*
+ * Search and remove all stale (devices which are not mounted) devices.
+ * When both inputs are NULL, it will search and release all stale devices.
+ * path: Optional. When provided will it release all unmounted devices
+ * matching this path only.
+ * skip_dev: Optional. Will skip this device when searching for the stale
+ * devices.
+ */
+static void btrfs_free_stale_devices(const char *path,
+ struct btrfs_device *skip_dev)
{
- struct btrfs_fs_devices *fs_devs;
- struct btrfs_device *dev;
-
- if (!cur_dev->name)
- return;
+ struct btrfs_fs_devices *fs_devs, *tmp_fs_devs;
+ struct btrfs_device *dev, *tmp_dev;
- list_for_each_entry(fs_devs, &fs_uuids, list) {
- int del = 1;
+ list_for_each_entry_safe(fs_devs, tmp_fs_devs, &fs_uuids, list) {
if (fs_devs->opened)
continue;
- if (fs_devs->seeding)
- continue;
- list_for_each_entry(dev, &fs_devs->devices, dev_list) {
+ list_for_each_entry_safe(dev, tmp_dev,
+ &fs_devs->devices, dev_list) {
+ int not_found = 0;
- if (dev == cur_dev)
+ if (skip_dev && skip_dev == dev)
continue;
- if (!dev->name)
+ if (path && !dev->name)
continue;
- /*
- * Todo: This won't be enough. What if the same device
- * comes back (with new uuid and) with its mapper path?
- * But for now, this does help as mostly an admin will
- * either use mapper or non mapper path throughout.
- */
rcu_read_lock();
- del = strcmp(rcu_str_deref(dev->name),
- rcu_str_deref(cur_dev->name));
+ if (path)
+ not_found = strcmp(rcu_str_deref(dev->name),
+ path);
rcu_read_unlock();
- if (!del)
- break;
- }
+ if (not_found)
+ continue;
- if (!del) {
/* delete the stale device */
if (fs_devs->num_devices == 1) {
btrfs_sysfs_remove_fsid(fs_devs);
@@ -577,38 +648,99 @@ static void btrfs_free_stale_device(struct btrfs_device *cur_dev)
} else {
fs_devs->num_devices--;
list_del(&dev->dev_list);
- rcu_string_free(dev->name);
- bio_put(dev->flush_bio);
- kfree(dev);
+ free_device(dev);
}
- break;
}
}
}
+static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
+ struct btrfs_device *device, fmode_t flags,
+ void *holder)
+{
+ struct request_queue *q;
+ struct block_device *bdev;
+ struct buffer_head *bh;
+ struct btrfs_super_block *disk_super;
+ u64 devid;
+ int ret;
+
+ if (device->bdev)
+ return -EINVAL;
+ if (!device->name)
+ return -EINVAL;
+
+ ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
+ &bdev, &bh);
+ if (ret)
+ return ret;
+
+ disk_super = (struct btrfs_super_block *)bh->b_data;
+ devid = btrfs_stack_device_id(&disk_super->dev_item);
+ if (devid != device->devid)
+ goto error_brelse;
+
+ if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
+ goto error_brelse;
+
+ device->generation = btrfs_super_generation(disk_super);
+
+ if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
+ clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
+ fs_devices->seeding = 1;
+ } else {
+ if (bdev_read_only(bdev))
+ clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
+ else
+ set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
+ }
+
+ q = bdev_get_queue(bdev);
+ if (!blk_queue_nonrot(q))
+ fs_devices->rotating = 1;
+
+ device->bdev = bdev;
+ clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
+ device->mode = flags;
+
+ fs_devices->open_devices++;
+ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
+ device->devid != BTRFS_DEV_REPLACE_DEVID) {
+ fs_devices->rw_devices++;
+ list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
+ }
+ brelse(bh);
+
+ return 0;
+
+error_brelse:
+ brelse(bh);
+ blkdev_put(bdev, flags);
+
+ return -EINVAL;
+}
+
/*
* Add new device to list of registered devices
*
* Returns:
- * 1 - first time device is seen
- * 0 - device already known
- * < 0 - error
+ * device pointer which was just added or updated when successful
+ * error pointer when failed
*/
-static noinline int device_list_add(const char *path,
- struct btrfs_super_block *disk_super,
- u64 devid, struct btrfs_fs_devices **fs_devices_ret)
+static noinline struct btrfs_device *device_list_add(const char *path,
+ struct btrfs_super_block *disk_super)
{
struct btrfs_device *device;
struct btrfs_fs_devices *fs_devices;
struct rcu_string *name;
- int ret = 0;
u64 found_transid = btrfs_super_generation(disk_super);
+ u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
fs_devices = find_fsid(disk_super->fsid);
if (!fs_devices) {
fs_devices = alloc_fs_devices(disk_super->fsid);
if (IS_ERR(fs_devices))
- return PTR_ERR(fs_devices);
+ return ERR_CAST(fs_devices);
list_add(&fs_devices->list, &fs_uuids);
@@ -620,20 +752,19 @@ static noinline int device_list_add(const char *path,
if (!device) {
if (fs_devices->opened)
- return -EBUSY;
+ return ERR_PTR(-EBUSY);
device = btrfs_alloc_device(NULL, &devid,
disk_super->dev_item.uuid);
if (IS_ERR(device)) {
/* we can safely leave the fs_devices entry around */
- return PTR_ERR(device);
+ return device;
}
name = rcu_string_strdup(path, GFP_NOFS);
if (!name) {
- bio_put(device->flush_bio);
- kfree(device);
- return -ENOMEM;
+ free_device(device);
+ return ERR_PTR(-ENOMEM);
}
rcu_assign_pointer(device->name, name);
@@ -642,8 +773,16 @@ static noinline int device_list_add(const char *path,
fs_devices->num_devices++;
mutex_unlock(&fs_devices->device_list_mutex);
- ret = 1;
device->fs_devices = fs_devices;
+ btrfs_free_stale_devices(path, device);
+
+ if (disk_super->label[0])
+ pr_info("BTRFS: device label %s devid %llu transid %llu %s\n",
+ disk_super->label, devid, found_transid, path);
+ else
+ pr_info("BTRFS: device fsid %pU devid %llu transid %llu %s\n",
+ disk_super->fsid, devid, found_transid, path);
+
} else if (!device->name || strcmp(device->name->str, path)) {
/*
* When FS is already mounted.
@@ -679,17 +818,17 @@ static noinline int device_list_add(const char *path,
* with larger generation number or the last-in if
* generation are equal.
*/
- return -EEXIST;
+ return ERR_PTR(-EEXIST);
}
name = rcu_string_strdup(path, GFP_NOFS);
if (!name)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
rcu_string_free(device->name);
rcu_assign_pointer(device->name, name);
- if (device->missing) {
+ if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
fs_devices->missing_devices--;
- device->missing = 0;
+ clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
}
}
@@ -702,16 +841,9 @@ static noinline int device_list_add(const char *path,
if (!fs_devices->opened)
device->generation = found_transid;
- /*
- * if there is new btrfs on an already registered device,
- * then remove the stale device entry.
- */
- if (ret > 0)
- btrfs_free_stale_device(device);
-
- *fs_devices_ret = fs_devices;
+ fs_devices->total_devices = btrfs_super_num_devices(disk_super);
- return ret;
+ return device;
}
static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
@@ -744,8 +876,7 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
name = rcu_string_strdup(orig_dev->name->str,
GFP_KERNEL);
if (!name) {
- bio_put(device->flush_bio);
- kfree(device);
+ free_device(device);
goto error;
}
rcu_assign_pointer(device->name, name);
@@ -772,10 +903,12 @@ void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step)
again:
/* This is the initialized path, it is safe to release the devices. */
list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
- if (device->in_fs_metadata) {
- if (!device->is_tgtdev_for_dev_replace &&
- (!latest_dev ||
- device->generation > latest_dev->generation)) {
+ if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
+ &device->dev_state)) {
+ if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
+ &device->dev_state) &&
+ (!latest_dev ||
+ device->generation > latest_dev->generation)) {
latest_dev = device;
}
continue;
@@ -792,7 +925,8 @@ again:
* not, which means whether this device is
* used or whether it should be removed.
*/
- if (step == 0 || device->is_tgtdev_for_dev_replace) {
+ if (step == 0 || test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
+ &device->dev_state)) {
continue;
}
}
@@ -801,17 +935,16 @@ again:
device->bdev = NULL;
fs_devices->open_devices--;
}
- if (device->writeable) {
+ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
list_del_init(&device->dev_alloc_list);
- device->writeable = 0;
- if (!device->is_tgtdev_for_dev_replace)
+ clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
+ if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
+ &device->dev_state))
fs_devices->rw_devices--;
}
list_del_init(&device->dev_list);
fs_devices->num_devices--;
- rcu_string_free(device->name);
- bio_put(device->flush_bio);
- kfree(device);
+ free_device(device);
}
if (fs_devices->seed) {
@@ -824,35 +957,25 @@ again:
mutex_unlock(&uuid_mutex);
}
-static void __free_device(struct work_struct *work)
-{
- struct btrfs_device *device;
-
- device = container_of(work, struct btrfs_device, rcu_work);
- rcu_string_free(device->name);
- bio_put(device->flush_bio);
- kfree(device);
-}
-
-static void free_device(struct rcu_head *head)
+static void free_device_rcu(struct rcu_head *head)
{
struct btrfs_device *device;
device = container_of(head, struct btrfs_device, rcu);
-
- INIT_WORK(&device->rcu_work, __free_device);
- schedule_work(&device->rcu_work);
+ free_device(device);
}
static void btrfs_close_bdev(struct btrfs_device *device)
{
- if (device->bdev && device->writeable) {
+ if (!device->bdev)
+ return;
+
+ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
sync_blockdev(device->bdev);
invalidate_bdev(device->bdev);
}
- if (device->bdev)
- blkdev_put(device->bdev, device->mode);
+ blkdev_put(device->bdev, device->mode);
}
static void btrfs_prepare_close_one_device(struct btrfs_device *device)
@@ -864,13 +987,13 @@ static void btrfs_prepare_close_one_device(struct btrfs_device *device)
if (device->bdev)
fs_devices->open_devices--;
- if (device->writeable &&
+ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
device->devid != BTRFS_DEV_REPLACE_DEVID) {
list_del_init(&device->dev_alloc_list);
fs_devices->rw_devices--;
}
- if (device->missing)
+ if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
fs_devices->missing_devices--;
new_device = btrfs_alloc_device(NULL, &device->devid,
@@ -916,7 +1039,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
struct btrfs_device, dev_list);
list_del(&device->dev_list);
btrfs_close_bdev(device);
- call_rcu(&device->rcu, free_device);
+ call_rcu(&device->rcu, free_device_rcu);
}
WARN_ON(fs_devices->open_devices);
@@ -946,93 +1069,32 @@ int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
__btrfs_close_devices(fs_devices);
free_fs_devices(fs_devices);
}
- /*
- * Wait for rcu kworkers under __btrfs_close_devices
- * to finish all blkdev_puts so device is really
- * free when umount is done.
- */
- rcu_barrier();
return ret;
}
static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
fmode_t flags, void *holder)
{
- struct request_queue *q;
- struct block_device *bdev;
struct list_head *head = &fs_devices->devices;
struct btrfs_device *device;
struct btrfs_device *latest_dev = NULL;
- struct buffer_head *bh;
- struct btrfs_super_block *disk_super;
- u64 devid;
- int seeding = 1;
int ret = 0;
flags |= FMODE_EXCL;
list_for_each_entry(device, head, dev_list) {
- if (device->bdev)
- continue;
- if (!device->name)
- continue;
-
/* Just open everything we can; ignore failures here */
- if (btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
- &bdev, &bh))
+ if (btrfs_open_one_device(fs_devices, device, flags, holder))
continue;
- disk_super = (struct btrfs_super_block *)bh->b_data;
- devid = btrfs_stack_device_id(&disk_super->dev_item);
- if (devid != device->devid)
- goto error_brelse;
-
- if (memcmp(device->uuid, disk_super->dev_item.uuid,
- BTRFS_UUID_SIZE))
- goto error_brelse;
-
- device->generation = btrfs_super_generation(disk_super);
if (!latest_dev ||
device->generation > latest_dev->generation)
latest_dev = device;
-
- if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
- device->writeable = 0;
- } else {
- device->writeable = !bdev_read_only(bdev);
- seeding = 0;
- }
-
- q = bdev_get_queue(bdev);
- if (blk_queue_discard(q))
- device->can_discard = 1;
- if (!blk_queue_nonrot(q))
- fs_devices->rotating = 1;
-
- device->bdev = bdev;
- device->in_fs_metadata = 0;
- device->mode = flags;
-
- fs_devices->open_devices++;
- if (device->writeable &&
- device->devid != BTRFS_DEV_REPLACE_DEVID) {
- fs_devices->rw_devices++;
- list_add(&device->dev_alloc_list,
- &fs_devices->alloc_list);
- }
- brelse(bh);
- continue;
-
-error_brelse:
- brelse(bh);
- blkdev_put(bdev, flags);
- continue;
}
if (fs_devices->open_devices == 0) {
ret = -EINVAL;
goto out;
}
- fs_devices->seeding = seeding;
fs_devices->opened = 1;
fs_devices->latest_bdev = latest_dev->bdev;
fs_devices->total_rw_bytes = 0;
@@ -1116,12 +1178,10 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
struct btrfs_fs_devices **fs_devices_ret)
{
struct btrfs_super_block *disk_super;
+ struct btrfs_device *device;
struct block_device *bdev;
struct page *page;
- int ret = -EINVAL;
- u64 devid;
- u64 transid;
- u64 total_devices;
+ int ret = 0;
u64 bytenr;
/*
@@ -1140,26 +1200,16 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
goto error;
}
- if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super))
+ if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super)) {
+ ret = -EINVAL;
goto error_bdev_put;
-
- devid = btrfs_stack_device_id(&disk_super->dev_item);
- transid = btrfs_super_generation(disk_super);
- total_devices = btrfs_super_num_devices(disk_super);
-
- ret = device_list_add(path, disk_super, devid, fs_devices_ret);
- if (ret > 0) {
- if (disk_super->label[0]) {
- pr_info("BTRFS: device label %s ", disk_super->label);
- } else {
- pr_info("BTRFS: device fsid %pU ", disk_super->fsid);
- }
-
- pr_cont("devid %llu transid %llu %s\n", devid, transid, path);
- ret = 0;
}
- if (!ret && fs_devices_ret)
- (*fs_devices_ret)->total_devices = total_devices;
+
+ device = device_list_add(path, disk_super);
+ if (IS_ERR(device))
+ ret = PTR_ERR(device);
+ else
+ *fs_devices_ret = device->fs_devices;
btrfs_release_disk_super(page);
@@ -1185,7 +1235,8 @@ int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
*length = 0;
- if (start >= device->total_bytes || device->is_tgtdev_for_dev_replace)
+ if (start >= device->total_bytes ||
+ test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
return 0;
path = btrfs_alloc_path();
@@ -1363,7 +1414,8 @@ int find_free_dev_extent_start(struct btrfs_transaction *transaction,
max_hole_size = 0;
again:
- if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
+ if (search_start >= search_end ||
+ test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
ret = -ENOSPC;
goto out;
}
@@ -1570,8 +1622,8 @@ static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf;
struct btrfs_key key;
- WARN_ON(!device->in_fs_metadata);
- WARN_ON(device->is_tgtdev_for_dev_replace);
+ WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state));
+ WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -1661,7 +1713,7 @@ error:
* the device information is stored in the chunk root
* the btrfs_device struct should be fully filled in
*/
-static int btrfs_add_device(struct btrfs_trans_handle *trans,
+static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_device *device)
{
@@ -1817,7 +1869,8 @@ static struct btrfs_device * btrfs_find_next_active_device(
list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
if (next_device != device &&
- !next_device->missing && next_device->bdev)
+ !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
+ && next_device->bdev)
return next_device;
}
@@ -1858,6 +1911,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
u64 num_devices;
int ret = 0;
+ mutex_lock(&fs_info->volume_mutex);
mutex_lock(&uuid_mutex);
num_devices = fs_info->fs_devices->num_devices;
@@ -1877,17 +1931,18 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
if (ret)
goto out;
- if (device->is_tgtdev_for_dev_replace) {
+ if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
ret = BTRFS_ERROR_DEV_TGT_REPLACE;
goto out;
}
- if (device->writeable && fs_info->fs_devices->rw_devices == 1) {
+ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
+ fs_info->fs_devices->rw_devices == 1) {
ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
goto out;
}
- if (device->writeable) {
+ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
mutex_lock(&fs_info->chunk_mutex);
list_del_init(&device->dev_alloc_list);
device->fs_devices->rw_devices--;
@@ -1909,7 +1964,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
if (ret)
goto error_undo;
- device->in_fs_metadata = 0;
+ clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
btrfs_scrub_cancel_dev(fs_info, device);
/*
@@ -1929,7 +1984,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
device->fs_devices->num_devices--;
device->fs_devices->total_devices--;
- if (device->missing)
+ if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
device->fs_devices->missing_devices--;
btrfs_assign_next_active_device(fs_info, device, NULL);
@@ -1949,11 +2004,11 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
* the devices list. All that's left is to zero out the old
* supers and free the device.
*/
- if (device->writeable)
+ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
btrfs_scratch_superblocks(device->bdev, device->name->str);
btrfs_close_bdev(device);
- call_rcu(&device->rcu, free_device);
+ call_rcu(&device->rcu, free_device_rcu);
if (cur_devices->open_devices == 0) {
struct btrfs_fs_devices *fs_devices;
@@ -1972,10 +2027,11 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
out:
mutex_unlock(&uuid_mutex);
+ mutex_unlock(&fs_info->volume_mutex);
return ret;
error_undo:
- if (device->writeable) {
+ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
mutex_lock(&fs_info->chunk_mutex);
list_add(&device->dev_alloc_list,
&fs_info->fs_devices->alloc_list);
@@ -2003,10 +2059,10 @@ void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
list_del_rcu(&srcdev->dev_list);
list_del(&srcdev->dev_alloc_list);
fs_devices->num_devices--;
- if (srcdev->missing)
+ if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
fs_devices->missing_devices--;
- if (srcdev->writeable)
+ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
fs_devices->rw_devices--;
if (srcdev->bdev)
@@ -2018,13 +2074,13 @@ void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
{
struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
- if (srcdev->writeable) {
+ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) {
/* zero out the old super if it is writable */
btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
}
btrfs_close_bdev(srcdev);
- call_rcu(&srcdev->rcu, free_device);
+ call_rcu(&srcdev->rcu, free_device_rcu);
/* if this is no devs we rather delete the fs_devices */
if (!fs_devices->num_devices) {
@@ -2083,7 +2139,7 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
btrfs_close_bdev(tgtdev);
- call_rcu(&tgtdev->rcu, free_device);
+ call_rcu(&tgtdev->rcu, free_device_rcu);
}
static int btrfs_find_device_by_path(struct btrfs_fs_info *fs_info,
@@ -2128,7 +2184,8 @@ int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info,
* is held by the caller.
*/
list_for_each_entry(tmp, devices, dev_list) {
- if (tmp->in_fs_metadata && !tmp->bdev) {
+ if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
+ &tmp->dev_state) && !tmp->bdev) {
*device = tmp;
break;
}
@@ -2357,26 +2414,19 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
name = rcu_string_strdup(device_path, GFP_KERNEL);
if (!name) {
- bio_put(device->flush_bio);
- kfree(device);
ret = -ENOMEM;
- goto error;
+ goto error_free_device;
}
rcu_assign_pointer(device->name, name);
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
- rcu_string_free(device->name);
- bio_put(device->flush_bio);
- kfree(device);
ret = PTR_ERR(trans);
- goto error;
+ goto error_free_device;
}
q = bdev_get_queue(bdev);
- if (blk_queue_discard(q))
- device->can_discard = 1;
- device->writeable = 1;
+ set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
device->generation = trans->transid;
device->io_width = fs_info->sectorsize;
device->io_align = fs_info->sectorsize;
@@ -2387,8 +2437,8 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
device->commit_total_bytes = device->total_bytes;
device->fs_info = fs_info;
device->bdev = bdev;
- device->in_fs_metadata = 1;
- device->is_tgtdev_for_dev_replace = 0;
+ set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
+ clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
device->mode = FMODE_EXCL;
device->dev_stats_valid = 1;
set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
@@ -2449,7 +2499,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
}
}
- ret = btrfs_add_device(trans, fs_info, device);
+ ret = btrfs_add_dev_item(trans, fs_info, device);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto error_sysfs;
@@ -2510,9 +2560,8 @@ error_trans:
sb->s_flags |= SB_RDONLY;
if (trans)
btrfs_end_transaction(trans);
- rcu_string_free(device->name);
- bio_put(device->flush_bio);
- kfree(device);
+error_free_device:
+ free_device(device);
error:
blkdev_put(bdev, FMODE_EXCL);
if (seeding_dev && !unlocked) {
@@ -2527,7 +2576,6 @@ int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
struct btrfs_device *srcdev,
struct btrfs_device **device_out)
{
- struct request_queue *q;
struct btrfs_device *device;
struct block_device *bdev;
struct list_head *devices;
@@ -2578,18 +2626,14 @@ int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
name = rcu_string_strdup(device_path, GFP_KERNEL);
if (!name) {
- bio_put(device->flush_bio);
- kfree(device);
+ free_device(device);
ret = -ENOMEM;
goto error;
}
rcu_assign_pointer(device->name, name);
- q = bdev_get_queue(bdev);
- if (blk_queue_discard(q))
- device->can_discard = 1;
mutex_lock(&fs_info->fs_devices->device_list_mutex);
- device->writeable = 1;
+ set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
device->generation = 0;
device->io_width = fs_info->sectorsize;
device->io_align = fs_info->sectorsize;
@@ -2602,8 +2646,8 @@ int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
device->commit_bytes_used = device->bytes_used;
device->fs_info = fs_info;
device->bdev = bdev;
- device->in_fs_metadata = 1;
- device->is_tgtdev_for_dev_replace = 1;
+ set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
+ set_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
device->mode = FMODE_EXCL;
device->dev_stats_valid = 1;
set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
@@ -2631,7 +2675,7 @@ void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
tgtdev->io_align = sectorsize;
tgtdev->sector_size = sectorsize;
tgtdev->fs_info = fs_info;
- tgtdev->in_fs_metadata = 1;
+ set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &tgtdev->dev_state);
}
static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
@@ -2689,7 +2733,7 @@ int btrfs_grow_device(struct btrfs_trans_handle *trans,
u64 old_total;
u64 diff;
- if (!device->writeable)
+ if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
return -EACCES;
new_size = round_down(new_size, fs_info->sectorsize);
@@ -2699,7 +2743,7 @@ int btrfs_grow_device(struct btrfs_trans_handle *trans,
diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
if (new_size <= device->total_bytes ||
- device->is_tgtdev_for_dev_replace) {
+ test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
mutex_unlock(&fs_info->chunk_mutex);
return -EINVAL;
}
@@ -3043,6 +3087,48 @@ error:
return ret;
}
+/*
+ * return 1 : allocate a data chunk successfully,
+ * return <0: errors during allocating a data chunk,
+ * return 0 : no need to allocate a data chunk.
+ */
+static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
+ u64 chunk_offset)
+{
+ struct btrfs_block_group_cache *cache;
+ u64 bytes_used;
+ u64 chunk_type;
+
+ cache = btrfs_lookup_block_group(fs_info, chunk_offset);
+ ASSERT(cache);
+ chunk_type = cache->flags;
+ btrfs_put_block_group(cache);
+
+ if (chunk_type & BTRFS_BLOCK_GROUP_DATA) {
+ spin_lock(&fs_info->data_sinfo->lock);
+ bytes_used = fs_info->data_sinfo->bytes_used;
+ spin_unlock(&fs_info->data_sinfo->lock);
+
+ if (!bytes_used) {
+ struct btrfs_trans_handle *trans;
+ int ret;
+
+ trans = btrfs_join_transaction(fs_info->tree_root);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+
+ ret = btrfs_force_chunk_alloc(trans, fs_info,
+ BTRFS_BLOCK_GROUP_DATA);
+ btrfs_end_transaction(trans);
+ if (ret < 0)
+ return ret;
+
+ return 1;
+ }
+ }
+ return 0;
+}
+
static int insert_balance_item(struct btrfs_fs_info *fs_info,
struct btrfs_balance_control *bctl)
{
@@ -3501,7 +3587,6 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info)
u32 count_meta = 0;
u32 count_sys = 0;
int chunk_reserved = 0;
- u64 bytes_used = 0;
/* step one make some room on all the devices */
devices = &fs_info->fs_devices->devices;
@@ -3509,10 +3594,10 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info)
old_size = btrfs_device_get_total_bytes(device);
size_to_free = div_factor(old_size, 1);
size_to_free = min_t(u64, size_to_free, SZ_1M);
- if (!device->writeable ||
+ if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) ||
btrfs_device_get_total_bytes(device) -
btrfs_device_get_bytes_used(device) > size_to_free ||
- device->is_tgtdev_for_dev_replace)
+ test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
continue;
ret = btrfs_shrink_device(device, old_size - size_to_free);
@@ -3660,28 +3745,21 @@ again:
goto loop;
}
- ASSERT(fs_info->data_sinfo);
- spin_lock(&fs_info->data_sinfo->lock);
- bytes_used = fs_info->data_sinfo->bytes_used;
- spin_unlock(&fs_info->data_sinfo->lock);
-
- if ((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
- !chunk_reserved && !bytes_used) {
- trans = btrfs_start_transaction(chunk_root, 0);
- if (IS_ERR(trans)) {
- mutex_unlock(&fs_info->delete_unused_bgs_mutex);
- ret = PTR_ERR(trans);
- goto error;
- }
-
- ret = btrfs_force_chunk_alloc(trans, fs_info,
- BTRFS_BLOCK_GROUP_DATA);
- btrfs_end_transaction(trans);
+ if (!chunk_reserved) {
+ /*
+ * We may be relocating the only data chunk we have,
+ * which could potentially end up with losing data's
+ * raid profile, so lets allocate an empty one in
+ * advance.
+ */
+ ret = btrfs_may_alloc_data_chunk(fs_info,
+ found_key.offset);
if (ret < 0) {
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
goto error;
+ } else if (ret == 1) {
+ chunk_reserved = 1;
}
- chunk_reserved = 1;
}
ret = btrfs_relocate_chunk(fs_info, found_key.offset);
@@ -4380,7 +4458,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
new_size = round_down(new_size, fs_info->sectorsize);
diff = round_down(old_size - new_size, fs_info->sectorsize);
- if (device->is_tgtdev_for_dev_replace)
+ if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
return -EINVAL;
path = btrfs_alloc_path();
@@ -4392,7 +4470,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
mutex_lock(&fs_info->chunk_mutex);
btrfs_device_set_total_bytes(device, new_size);
- if (device->writeable) {
+ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
device->fs_devices->total_rw_bytes -= diff;
atomic64_sub(diff, &fs_info->free_chunk_space);
}
@@ -4444,6 +4522,18 @@ again:
chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
btrfs_release_path(path);
+ /*
+ * We may be relocating the only data chunk we have,
+ * which could potentially end up with losing data's
+ * raid profile, so lets allocate an empty one in
+ * advance.
+ */
+ ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
+ if (ret < 0) {
+ mutex_unlock(&fs_info->delete_unused_bgs_mutex);
+ goto done;
+ }
+
ret = btrfs_relocate_chunk(fs_info, chunk_offset);
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
if (ret && ret != -ENOSPC)
@@ -4517,7 +4607,7 @@ done:
if (ret) {
mutex_lock(&fs_info->chunk_mutex);
btrfs_device_set_total_bytes(device, old_size);
- if (device->writeable)
+ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
device->fs_devices->total_rw_bytes += diff;
atomic64_add(diff, &fs_info->free_chunk_space);
mutex_unlock(&fs_info->chunk_mutex);
@@ -4677,14 +4767,15 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
u64 max_avail;
u64 dev_offset;
- if (!device->writeable) {
+ if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
WARN(1, KERN_ERR
"BTRFS: read-only device in alloc_list\n");
continue;
}
- if (!device->in_fs_metadata ||
- device->is_tgtdev_for_dev_replace)
+ if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
+ &device->dev_state) ||
+ test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
continue;
if (device->total_bytes > device->bytes_used)
@@ -5032,12 +5123,13 @@ int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
map = em->map_lookup;
for (i = 0; i < map->num_stripes; i++) {
- if (map->stripes[i].dev->missing) {
+ if (test_bit(BTRFS_DEV_STATE_MISSING,
+ &map->stripes[i].dev->dev_state)) {
miss_ndevs++;
continue;
}
-
- if (!map->stripes[i].dev->writeable) {
+ if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
+ &map->stripes[i].dev->dev_state)) {
readonly = 1;
goto end;
}
@@ -5103,7 +5195,14 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
ret = 2;
else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
- ret = 3;
+ /*
+ * There could be two corrupted data stripes, we need
+ * to loop retry in order to rebuild the correct data.
+ *
+ * Fail a stripe at a time on every retry except the
+ * stripe under reconstruction.
+ */
+ ret = map->num_stripes;
else
ret = 1;
free_extent_map(em);
@@ -6003,15 +6102,14 @@ static void btrfs_end_bio(struct bio *bio)
dev = bbio->stripes[stripe_index].dev;
if (dev->bdev) {
if (bio_op(bio) == REQ_OP_WRITE)
- btrfs_dev_stat_inc(dev,
+ btrfs_dev_stat_inc_and_print(dev,
BTRFS_DEV_STAT_WRITE_ERRS);
else
- btrfs_dev_stat_inc(dev,
+ btrfs_dev_stat_inc_and_print(dev,
BTRFS_DEV_STAT_READ_ERRS);
if (bio->bi_opf & REQ_PREFLUSH)
- btrfs_dev_stat_inc(dev,
+ btrfs_dev_stat_inc_and_print(dev,
BTRFS_DEV_STAT_FLUSH_ERRS);
- btrfs_dev_stat_print_on_error(dev);
}
}
}
@@ -6061,16 +6159,15 @@ static noinline void btrfs_schedule_bio(struct btrfs_device *device,
int should_queue = 1;
struct btrfs_pending_bios *pending_bios;
- if (device->missing || !device->bdev) {
+ if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state) ||
+ !device->bdev) {
bio_io_error(bio);
return;
}
/* don't bother with additional async steps for reads, right now */
if (bio_op(bio) == REQ_OP_READ) {
- bio_get(bio);
btrfsic_submit_bio(bio);
- bio_put(bio);
return;
}
@@ -6207,7 +6304,8 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
dev = bbio->stripes[dev_nr].dev;
if (!dev || !dev->bdev ||
- (bio_op(first_bio) == REQ_OP_WRITE && !dev->writeable)) {
+ (bio_op(first_bio) == REQ_OP_WRITE &&
+ !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
bbio_error(bbio, first_bio, logical);
continue;
}
@@ -6256,7 +6354,7 @@ static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
device->fs_devices = fs_devices;
fs_devices->num_devices++;
- device->missing = 1;
+ set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
fs_devices->missing_devices++;
return device;
@@ -6272,8 +6370,8 @@ static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
* is generated.
*
* Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
- * on error. Returned struct is not linked onto any lists and can be
- * destroyed with kfree() right away.
+ * on error. Returned struct is not linked onto any lists and must be
+ * destroyed with free_device.
*/
struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
const u64 *devid,
@@ -6296,8 +6394,7 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
ret = find_next_devid(fs_info, &tmp);
if (ret) {
- bio_put(dev->flush_bio);
- kfree(dev);
+ free_device(dev);
return ERR_PTR(ret);
}
}
@@ -6476,7 +6573,9 @@ static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
}
btrfs_report_missing_device(fs_info, devid, uuid, false);
}
- map->stripes[i].dev->in_fs_metadata = 1;
+ set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
+ &(map->stripes[i].dev->dev_state));
+
}
write_lock(&map_tree->map_tree.lock);
@@ -6505,7 +6604,7 @@ static void fill_device_from_item(struct extent_buffer *leaf,
device->io_width = btrfs_device_io_width(leaf, dev_item);
device->sector_size = btrfs_device_sector_size(leaf, dev_item);
WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
- device->is_tgtdev_for_dev_replace = 0;
+ clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
ptr = btrfs_device_uuid(dev_item);
read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
@@ -6617,7 +6716,8 @@ static int read_one_dev(struct btrfs_fs_info *fs_info,
dev_uuid, false);
}
- if(!device->bdev && !device->missing) {
+ if (!device->bdev &&
+ !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
/*
* this happens when a device that was properly setup
* in the device info lists suddenly goes bad.
@@ -6625,12 +6725,13 @@ static int read_one_dev(struct btrfs_fs_info *fs_info,
* device->missing to one here
*/
device->fs_devices->missing_devices++;
- device->missing = 1;
+ set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
}
/* Move the device to its own fs_devices */
if (device->fs_devices != fs_devices) {
- ASSERT(device->missing);
+ ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
+ &device->dev_state));
list_move(&device->dev_list, &fs_devices->devices);
device->fs_devices->num_devices--;
@@ -6644,15 +6745,16 @@ static int read_one_dev(struct btrfs_fs_info *fs_info,
}
if (device->fs_devices != fs_info->fs_devices) {
- BUG_ON(device->writeable);
+ BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
if (device->generation !=
btrfs_device_generation(leaf, dev_item))
return -EINVAL;
}
fill_device_from_item(leaf, dev_item, device);
- device->in_fs_metadata = 1;
- if (device->writeable && !device->is_tgtdev_for_dev_replace) {
+ set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
+ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
+ !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
device->fs_devices->total_rw_bytes += device->total_bytes;
atomic64_add(device->total_bytes - device->bytes_used,
&fs_info->free_chunk_space);
@@ -6784,10 +6886,13 @@ out_short_read:
/*
* Check if all chunks in the fs are OK for read-write degraded mount
*
+ * If the @failing_dev is specified, it's accounted as missing.
+ *
* Return true if all chunks meet the minimal RW mount requirements.
* Return false if any chunk doesn't meet the minimal RW mount requirements.
*/
-bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info)
+bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
+ struct btrfs_device *failing_dev)
{
struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
struct extent_map *em;
@@ -6815,12 +6920,16 @@ bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info)
for (i = 0; i < map->num_stripes; i++) {
struct btrfs_device *dev = map->stripes[i].dev;
- if (!dev || !dev->bdev || dev->missing ||
+ if (!dev || !dev->bdev ||
+ test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
dev->last_flush_error)
missing++;
+ else if (failing_dev && failing_dev == dev)
+ missing++;
}
if (missing > max_tolerated) {
- btrfs_warn(fs_info,
+ if (!failing_dev)
+ btrfs_warn(fs_info,
"chunk %llu missing %d devices, max tolerance is %d for writeable mount",
em->start, missing, max_tolerated);
free_extent_map(em);
@@ -7091,10 +7200,24 @@ int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
mutex_lock(&fs_devices->device_list_mutex);
list_for_each_entry(device, &fs_devices->devices, dev_list) {
- if (!device->dev_stats_valid || !btrfs_dev_stats_dirty(device))
+ stats_cnt = atomic_read(&device->dev_stats_ccnt);
+ if (!device->dev_stats_valid || stats_cnt == 0)
continue;
- stats_cnt = atomic_read(&device->dev_stats_ccnt);
+
+ /*
+ * There is a LOAD-LOAD control dependency between the value of
+ * dev_stats_ccnt and updating the on-disk values which requires
+ * reading the in-memory counters. Such control dependencies
+ * require explicit read memory barriers.
+ *
+ * This memory barriers pairs with smp_mb__before_atomic in
+ * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
+ * barrier implied by atomic_xchg in
+ * btrfs_dev_stats_read_and_reset
+ */
+ smp_rmb();
+
ret = update_dev_stat_item(trans, fs_info, device);
if (!ret)
atomic_sub(stats_cnt, &device->dev_stats_ccnt);
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index ff15208344a7..28c28eeadff3 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -47,6 +47,12 @@ struct btrfs_pending_bios {
#define btrfs_device_data_ordered_init(device) do { } while (0)
#endif
+#define BTRFS_DEV_STATE_WRITEABLE (0)
+#define BTRFS_DEV_STATE_IN_FS_METADATA (1)
+#define BTRFS_DEV_STATE_MISSING (2)
+#define BTRFS_DEV_STATE_REPLACE_TGT (3)
+#define BTRFS_DEV_STATE_FLUSH_SENT (4)
+
struct btrfs_device {
struct list_head dev_list;
struct list_head dev_alloc_list;
@@ -69,11 +75,7 @@ struct btrfs_device {
/* the mode sent to blkdev_get */
fmode_t mode;
- int writeable;
- int in_fs_metadata;
- int missing;
- int can_discard;
- int is_tgtdev_for_dev_replace;
+ unsigned long dev_state;
blk_status_t last_flush_error;
int flush_bio_sent;
@@ -129,14 +131,12 @@ struct btrfs_device {
struct completion flush_wait;
/* per-device scrub information */
- struct scrub_ctx *scrub_device;
+ struct scrub_ctx *scrub_ctx;
struct btrfs_work work;
struct rcu_head rcu;
- struct work_struct rcu_work;
/* readahead state */
- spinlock_t reada_lock;
atomic_t reada_in_flight;
u64 reada_next;
struct reada_zone *reada_curr_zone;
@@ -489,15 +489,16 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 chunk_offset);
-static inline int btrfs_dev_stats_dirty(struct btrfs_device *dev)
-{
- return atomic_read(&dev->dev_stats_ccnt);
-}
-
static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
int index)
{
atomic_inc(dev->dev_stat_values + index);
+ /*
+ * This memory barrier orders stores updating statistics before stores
+ * updating dev_stats_ccnt.
+ *
+ * It pairs with smp_rmb() in btrfs_run_dev_stats().
+ */
smp_mb__before_atomic();
atomic_inc(&dev->dev_stats_ccnt);
}
@@ -514,7 +515,13 @@ static inline int btrfs_dev_stat_read_and_reset(struct btrfs_device *dev,
int ret;
ret = atomic_xchg(dev->dev_stat_values + index, 0);
- smp_mb__before_atomic();
+ /*
+ * atomic_xchg implies a full memory barriers as per atomic_t.txt:
+ * - RMW operations that have a return value are fully ordered;
+ *
+ * This implicit memory barriers is paired with the smp_rmb in
+ * btrfs_run_dev_stats
+ */
atomic_inc(&dev->dev_stats_ccnt);
return ret;
}
@@ -523,6 +530,12 @@ static inline void btrfs_dev_stat_set(struct btrfs_device *dev,
int index, unsigned long val)
{
atomic_set(dev->dev_stat_values + index, val);
+ /*
+ * This memory barrier orders stores updating statistics before stores
+ * updating dev_stats_ccnt.
+ *
+ * It pairs with smp_rmb() in btrfs_run_dev_stats().
+ */
smp_mb__before_atomic();
atomic_inc(&dev->dev_stats_ccnt);
}
@@ -540,7 +553,7 @@ void btrfs_update_commit_device_bytes_used(struct btrfs_fs_info *fs_info,
struct list_head *btrfs_get_fs_uuids(void);
void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info);
void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info);
-
-bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info);
+bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
+ struct btrfs_device *failing_dev);
#endif
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 2c7e53f9ff1b..de7d072c78ef 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -23,6 +23,7 @@
#include <linux/xattr.h>
#include <linux/security.h>
#include <linux/posix_acl_xattr.h>
+#include <linux/iversion.h>
#include "ctree.h"
#include "btrfs_inode.h"
#include "transaction.h"
@@ -267,7 +268,6 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
struct btrfs_key key;
struct inode *inode = d_inode(dentry);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_path *path;
int ret = 0;
@@ -336,11 +336,6 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
u32 this_len = sizeof(*di) + name_len + data_len;
unsigned long name_ptr = (unsigned long)(di + 1);
- if (verify_dir_item(fs_info, leaf, slot, di)) {
- ret = -EIO;
- goto err;
- }
-
total_size += name_len + 1;
/*
* We are just looking for how big our buffer needs to
diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
index 17f2dd8fddb8..01a4eab602a3 100644
--- a/fs/btrfs/zstd.c
+++ b/fs/btrfs/zstd.c
@@ -43,6 +43,8 @@ struct workspace {
size_t size;
char *buf;
struct list_head list;
+ ZSTD_inBuffer in_buf;
+ ZSTD_outBuffer out_buf;
};
static void zstd_free_workspace(struct list_head *ws)
@@ -94,8 +96,6 @@ static int zstd_compress_pages(struct list_head *ws,
int nr_pages = 0;
struct page *in_page = NULL; /* The current page to read */
struct page *out_page = NULL; /* The current page to write to */
- ZSTD_inBuffer in_buf = { NULL, 0, 0 };
- ZSTD_outBuffer out_buf = { NULL, 0, 0 };
unsigned long tot_in = 0;
unsigned long tot_out = 0;
unsigned long len = *total_out;
@@ -118,9 +118,9 @@ static int zstd_compress_pages(struct list_head *ws,
/* map in the first page of input data */
in_page = find_get_page(mapping, start >> PAGE_SHIFT);
- in_buf.src = kmap(in_page);
- in_buf.pos = 0;
- in_buf.size = min_t(size_t, len, PAGE_SIZE);
+ workspace->in_buf.src = kmap(in_page);
+ workspace->in_buf.pos = 0;
+ workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
/* Allocate and map in the output buffer */
@@ -130,14 +130,15 @@ static int zstd_compress_pages(struct list_head *ws,
goto out;
}
pages[nr_pages++] = out_page;
- out_buf.dst = kmap(out_page);
- out_buf.pos = 0;
- out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
+ workspace->out_buf.dst = kmap(out_page);
+ workspace->out_buf.pos = 0;
+ workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
while (1) {
size_t ret2;
- ret2 = ZSTD_compressStream(stream, &out_buf, &in_buf);
+ ret2 = ZSTD_compressStream(stream, &workspace->out_buf,
+ &workspace->in_buf);
if (ZSTD_isError(ret2)) {
pr_debug("BTRFS: ZSTD_compressStream returned %d\n",
ZSTD_getErrorCode(ret2));
@@ -146,22 +147,22 @@ static int zstd_compress_pages(struct list_head *ws,
}
/* Check to see if we are making it bigger */
- if (tot_in + in_buf.pos > 8192 &&
- tot_in + in_buf.pos <
- tot_out + out_buf.pos) {
+ if (tot_in + workspace->in_buf.pos > 8192 &&
+ tot_in + workspace->in_buf.pos <
+ tot_out + workspace->out_buf.pos) {
ret = -E2BIG;
goto out;
}
/* We've reached the end of our output range */
- if (out_buf.pos >= max_out) {
- tot_out += out_buf.pos;
+ if (workspace->out_buf.pos >= max_out) {
+ tot_out += workspace->out_buf.pos;
ret = -E2BIG;
goto out;
}
/* Check if we need more output space */
- if (out_buf.pos == out_buf.size) {
+ if (workspace->out_buf.pos == workspace->out_buf.size) {
tot_out += PAGE_SIZE;
max_out -= PAGE_SIZE;
kunmap(out_page);
@@ -176,19 +177,20 @@ static int zstd_compress_pages(struct list_head *ws,
goto out;
}
pages[nr_pages++] = out_page;
- out_buf.dst = kmap(out_page);
- out_buf.pos = 0;
- out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
+ workspace->out_buf.dst = kmap(out_page);
+ workspace->out_buf.pos = 0;
+ workspace->out_buf.size = min_t(size_t, max_out,
+ PAGE_SIZE);
}
/* We've reached the end of the input */
- if (in_buf.pos >= len) {
- tot_in += in_buf.pos;
+ if (workspace->in_buf.pos >= len) {
+ tot_in += workspace->in_buf.pos;
break;
}
/* Check if we need more input */
- if (in_buf.pos == in_buf.size) {
+ if (workspace->in_buf.pos == workspace->in_buf.size) {
tot_in += PAGE_SIZE;
kunmap(in_page);
put_page(in_page);
@@ -196,15 +198,15 @@ static int zstd_compress_pages(struct list_head *ws,
start += PAGE_SIZE;
len -= PAGE_SIZE;
in_page = find_get_page(mapping, start >> PAGE_SHIFT);
- in_buf.src = kmap(in_page);
- in_buf.pos = 0;
- in_buf.size = min_t(size_t, len, PAGE_SIZE);
+ workspace->in_buf.src = kmap(in_page);
+ workspace->in_buf.pos = 0;
+ workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
}
}
while (1) {
size_t ret2;
- ret2 = ZSTD_endStream(stream, &out_buf);
+ ret2 = ZSTD_endStream(stream, &workspace->out_buf);
if (ZSTD_isError(ret2)) {
pr_debug("BTRFS: ZSTD_endStream returned %d\n",
ZSTD_getErrorCode(ret2));
@@ -212,11 +214,11 @@ static int zstd_compress_pages(struct list_head *ws,
goto out;
}
if (ret2 == 0) {
- tot_out += out_buf.pos;
+ tot_out += workspace->out_buf.pos;
break;
}
- if (out_buf.pos >= max_out) {
- tot_out += out_buf.pos;
+ if (workspace->out_buf.pos >= max_out) {
+ tot_out += workspace->out_buf.pos;
ret = -E2BIG;
goto out;
}
@@ -235,9 +237,9 @@ static int zstd_compress_pages(struct list_head *ws,
goto out;
}
pages[nr_pages++] = out_page;
- out_buf.dst = kmap(out_page);
- out_buf.pos = 0;
- out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
+ workspace->out_buf.dst = kmap(out_page);
+ workspace->out_buf.pos = 0;
+ workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
}
if (tot_out >= tot_in) {
@@ -273,8 +275,6 @@ static int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
unsigned long buf_start;
unsigned long total_out = 0;
- ZSTD_inBuffer in_buf = { NULL, 0, 0 };
- ZSTD_outBuffer out_buf = { NULL, 0, 0 };
stream = ZSTD_initDStream(
ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
@@ -284,18 +284,19 @@ static int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
goto done;
}
- in_buf.src = kmap(pages_in[page_in_index]);
- in_buf.pos = 0;
- in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
+ workspace->in_buf.src = kmap(pages_in[page_in_index]);
+ workspace->in_buf.pos = 0;
+ workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
- out_buf.dst = workspace->buf;
- out_buf.pos = 0;
- out_buf.size = PAGE_SIZE;
+ workspace->out_buf.dst = workspace->buf;
+ workspace->out_buf.pos = 0;
+ workspace->out_buf.size = PAGE_SIZE;
while (1) {
size_t ret2;
- ret2 = ZSTD_decompressStream(stream, &out_buf, &in_buf);
+ ret2 = ZSTD_decompressStream(stream, &workspace->out_buf,
+ &workspace->in_buf);
if (ZSTD_isError(ret2)) {
pr_debug("BTRFS: ZSTD_decompressStream returned %d\n",
ZSTD_getErrorCode(ret2));
@@ -303,38 +304,38 @@ static int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
goto done;
}
buf_start = total_out;
- total_out += out_buf.pos;
- out_buf.pos = 0;
+ total_out += workspace->out_buf.pos;
+ workspace->out_buf.pos = 0;
- ret = btrfs_decompress_buf2page(out_buf.dst, buf_start,
- total_out, disk_start, orig_bio);
+ ret = btrfs_decompress_buf2page(workspace->out_buf.dst,
+ buf_start, total_out, disk_start, orig_bio);
if (ret == 0)
break;
- if (in_buf.pos >= srclen)
+ if (workspace->in_buf.pos >= srclen)
break;
/* Check if we've hit the end of a frame */
if (ret2 == 0)
break;
- if (in_buf.pos == in_buf.size) {
+ if (workspace->in_buf.pos == workspace->in_buf.size) {
kunmap(pages_in[page_in_index++]);
if (page_in_index >= total_pages_in) {
- in_buf.src = NULL;
+ workspace->in_buf.src = NULL;
ret = -EIO;
goto done;
}
srclen -= PAGE_SIZE;
- in_buf.src = kmap(pages_in[page_in_index]);
- in_buf.pos = 0;
- in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
+ workspace->in_buf.src = kmap(pages_in[page_in_index]);
+ workspace->in_buf.pos = 0;
+ workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
}
}
ret = 0;
zero_fill_bio(orig_bio);
done:
- if (in_buf.src)
+ if (workspace->in_buf.src)
kunmap(pages_in[page_in_index]);
return ret;
}
@@ -348,8 +349,6 @@ static int zstd_decompress(struct list_head *ws, unsigned char *data_in,
ZSTD_DStream *stream;
int ret = 0;
size_t ret2;
- ZSTD_inBuffer in_buf = { NULL, 0, 0 };
- ZSTD_outBuffer out_buf = { NULL, 0, 0 };
unsigned long total_out = 0;
unsigned long pg_offset = 0;
char *kaddr;
@@ -364,16 +363,17 @@ static int zstd_decompress(struct list_head *ws, unsigned char *data_in,
destlen = min_t(size_t, destlen, PAGE_SIZE);
- in_buf.src = data_in;
- in_buf.pos = 0;
- in_buf.size = srclen;
+ workspace->in_buf.src = data_in;
+ workspace->in_buf.pos = 0;
+ workspace->in_buf.size = srclen;
- out_buf.dst = workspace->buf;
- out_buf.pos = 0;
- out_buf.size = PAGE_SIZE;
+ workspace->out_buf.dst = workspace->buf;
+ workspace->out_buf.pos = 0;
+ workspace->out_buf.size = PAGE_SIZE;
ret2 = 1;
- while (pg_offset < destlen && in_buf.pos < in_buf.size) {
+ while (pg_offset < destlen
+ && workspace->in_buf.pos < workspace->in_buf.size) {
unsigned long buf_start;
unsigned long buf_offset;
unsigned long bytes;
@@ -384,7 +384,8 @@ static int zstd_decompress(struct list_head *ws, unsigned char *data_in,
ret = -EIO;
goto finish;
}
- ret2 = ZSTD_decompressStream(stream, &out_buf, &in_buf);
+ ret2 = ZSTD_decompressStream(stream, &workspace->out_buf,
+ &workspace->in_buf);
if (ZSTD_isError(ret2)) {
pr_debug("BTRFS: ZSTD_decompressStream returned %d\n",
ZSTD_getErrorCode(ret2));
@@ -393,8 +394,8 @@ static int zstd_decompress(struct list_head *ws, unsigned char *data_in,
}
buf_start = total_out;
- total_out += out_buf.pos;
- out_buf.pos = 0;
+ total_out += workspace->out_buf.pos;
+ workspace->out_buf.pos = 0;
if (total_out <= start_byte)
continue;
@@ -405,10 +406,11 @@ static int zstd_decompress(struct list_head *ws, unsigned char *data_in,
buf_offset = 0;
bytes = min_t(unsigned long, destlen - pg_offset,
- out_buf.size - buf_offset);
+ workspace->out_buf.size - buf_offset);
kaddr = kmap_atomic(dest_page);
- memcpy(kaddr + pg_offset, out_buf.dst + buf_offset, bytes);
+ memcpy(kaddr + pg_offset, workspace->out_buf.dst + buf_offset,
+ bytes);
kunmap_atomic(kaddr);
pg_offset += bytes;
diff --git a/fs/buffer.c b/fs/buffer.c
index 0736a6a2e2f0..9a73924db22f 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -53,13 +53,6 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
-void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
-{
- bh->b_end_io = handler;
- bh->b_private = private;
-}
-EXPORT_SYMBOL(init_buffer);
-
inline void touch_buffer(struct buffer_head *bh)
{
trace_block_touch_buffer(bh);
@@ -922,7 +915,8 @@ init_page_buffers(struct page *page, struct block_device *bdev,
do {
if (!buffer_mapped(bh)) {
- init_buffer(bh, NULL, NULL);
+ bh->b_end_io = NULL;
+ bh->b_private = NULL;
bh->b_bdev = bdev;
bh->b_blocknr = block;
if (uptodate)
@@ -3014,7 +3008,7 @@ static void end_bio_bh_io_sync(struct bio *bio)
void guard_bio_eod(int op, struct bio *bio)
{
sector_t maxsector;
- struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
+ struct bio_vec *bvec = bio_last_bvec_all(bio);
unsigned truncated_bytes;
struct hd_struct *part;
diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
index 1ee54ffd3a24..7edbd0679952 100644
--- a/fs/cachefiles/daemon.c
+++ b/fs/cachefiles/daemon.c
@@ -31,7 +31,7 @@ static ssize_t cachefiles_daemon_read(struct file *, char __user *, size_t,
loff_t *);
static ssize_t cachefiles_daemon_write(struct file *, const char __user *,
size_t, loff_t *);
-static unsigned int cachefiles_daemon_poll(struct file *,
+static __poll_t cachefiles_daemon_poll(struct file *,
struct poll_table_struct *);
static int cachefiles_daemon_frun(struct cachefiles_cache *, char *);
static int cachefiles_daemon_fcull(struct cachefiles_cache *, char *);
@@ -291,11 +291,11 @@ found_command:
* poll for culling state
* - use POLLOUT to indicate culling state
*/
-static unsigned int cachefiles_daemon_poll(struct file *file,
+static __poll_t cachefiles_daemon_poll(struct file *file,
struct poll_table_struct *poll)
{
struct cachefiles_cache *cache = file->private_data;
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, &cache->daemon_pollwq, poll);
mask = 0;
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index d5b2e12b5d02..c71971c01c63 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -196,6 +196,14 @@ config CIFS_SMB311
This dialect includes improved security negotiation features.
If unsure, say N
+config CIFS_SMB_DIRECT
+ bool "SMB Direct support (Experimental)"
+ depends on CIFS=m && INFINIBAND || CIFS=y && INFINIBAND=y
+ help
+ Enables SMB Direct experimental support for SMB 3.0, 3.02 and 3.1.1.
+ SMB Direct allows transferring SMB packets over RDMA. If unsure,
+ say N.
+
config CIFS_FSCACHE
bool "Provide CIFS client caching support"
depends on CIFS=m && FSCACHE || CIFS=y && FSCACHE=y
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile
index 7134f182720b..7e4a1e2f0696 100644
--- a/fs/cifs/Makefile
+++ b/fs/cifs/Makefile
@@ -19,3 +19,5 @@ cifs-$(CONFIG_CIFS_UPCALL) += cifs_spnego.o
cifs-$(CONFIG_CIFS_DFS_UPCALL) += dns_resolve.o cifs_dfs_ref.o
cifs-$(CONFIG_CIFS_FSCACHE) += fscache.o cache.o
+
+cifs-$(CONFIG_CIFS_SMB_DIRECT) += smbdirect.o
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index cbb9534b89b4..c7a863219fa3 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -30,6 +30,9 @@
#include "cifsproto.h"
#include "cifs_debug.h"
#include "cifsfs.h"
+#ifdef CONFIG_CIFS_SMB_DIRECT
+#include "smbdirect.h"
+#endif
void
cifs_dump_mem(char *label, void *data, int length)
@@ -107,6 +110,32 @@ void cifs_dump_mids(struct TCP_Server_Info *server)
}
#ifdef CONFIG_PROC_FS
+static void cifs_debug_tcon(struct seq_file *m, struct cifs_tcon *tcon)
+{
+ __u32 dev_type = le32_to_cpu(tcon->fsDevInfo.DeviceType);
+
+ seq_printf(m, "%s Mounts: %d ", tcon->treeName, tcon->tc_count);
+ if (tcon->nativeFileSystem)
+ seq_printf(m, "Type: %s ", tcon->nativeFileSystem);
+ seq_printf(m, "DevInfo: 0x%x Attributes: 0x%x\n\tPathComponentMax: %d Status: %d",
+ le32_to_cpu(tcon->fsDevInfo.DeviceCharacteristics),
+ le32_to_cpu(tcon->fsAttrInfo.Attributes),
+ le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength),
+ tcon->tidStatus);
+ if (dev_type == FILE_DEVICE_DISK)
+ seq_puts(m, " type: DISK ");
+ else if (dev_type == FILE_DEVICE_CD_ROM)
+ seq_puts(m, " type: CDROM ");
+ else
+ seq_printf(m, " type: %d ", dev_type);
+ if (tcon->ses->server->ops->dump_share_caps)
+ tcon->ses->server->ops->dump_share_caps(m, tcon);
+
+ if (tcon->need_reconnect)
+ seq_puts(m, "\tDISCONNECTED ");
+ seq_putc(m, '\n');
+}
+
static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
{
struct list_head *tmp1, *tmp2, *tmp3;
@@ -115,7 +144,6 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
struct cifs_ses *ses;
struct cifs_tcon *tcon;
int i, j;
- __u32 dev_type;
seq_puts(m,
"Display Internal CIFS Data Structures for Debugging\n"
@@ -152,6 +180,72 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
list_for_each(tmp1, &cifs_tcp_ses_list) {
server = list_entry(tmp1, struct TCP_Server_Info,
tcp_ses_list);
+
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ if (!server->rdma)
+ goto skip_rdma;
+
+ seq_printf(m, "\nSMBDirect (in hex) protocol version: %x "
+ "transport status: %x",
+ server->smbd_conn->protocol,
+ server->smbd_conn->transport_status);
+ seq_printf(m, "\nConn receive_credit_max: %x "
+ "send_credit_target: %x max_send_size: %x",
+ server->smbd_conn->receive_credit_max,
+ server->smbd_conn->send_credit_target,
+ server->smbd_conn->max_send_size);
+ seq_printf(m, "\nConn max_fragmented_recv_size: %x "
+ "max_fragmented_send_size: %x max_receive_size:%x",
+ server->smbd_conn->max_fragmented_recv_size,
+ server->smbd_conn->max_fragmented_send_size,
+ server->smbd_conn->max_receive_size);
+ seq_printf(m, "\nConn keep_alive_interval: %x "
+ "max_readwrite_size: %x rdma_readwrite_threshold: %x",
+ server->smbd_conn->keep_alive_interval,
+ server->smbd_conn->max_readwrite_size,
+ server->smbd_conn->rdma_readwrite_threshold);
+ seq_printf(m, "\nDebug count_get_receive_buffer: %x "
+ "count_put_receive_buffer: %x count_send_empty: %x",
+ server->smbd_conn->count_get_receive_buffer,
+ server->smbd_conn->count_put_receive_buffer,
+ server->smbd_conn->count_send_empty);
+ seq_printf(m, "\nRead Queue count_reassembly_queue: %x "
+ "count_enqueue_reassembly_queue: %x "
+ "count_dequeue_reassembly_queue: %x "
+ "fragment_reassembly_remaining: %x "
+ "reassembly_data_length: %x "
+ "reassembly_queue_length: %x",
+ server->smbd_conn->count_reassembly_queue,
+ server->smbd_conn->count_enqueue_reassembly_queue,
+ server->smbd_conn->count_dequeue_reassembly_queue,
+ server->smbd_conn->fragment_reassembly_remaining,
+ server->smbd_conn->reassembly_data_length,
+ server->smbd_conn->reassembly_queue_length);
+ seq_printf(m, "\nCurrent Credits send_credits: %x "
+ "receive_credits: %x receive_credit_target: %x",
+ atomic_read(&server->smbd_conn->send_credits),
+ atomic_read(&server->smbd_conn->receive_credits),
+ server->smbd_conn->receive_credit_target);
+ seq_printf(m, "\nPending send_pending: %x send_payload_pending:"
+ " %x smbd_send_pending: %x smbd_recv_pending: %x",
+ atomic_read(&server->smbd_conn->send_pending),
+ atomic_read(&server->smbd_conn->send_payload_pending),
+ server->smbd_conn->smbd_send_pending,
+ server->smbd_conn->smbd_recv_pending);
+ seq_printf(m, "\nReceive buffers count_receive_queue: %x "
+ "count_empty_packet_queue: %x",
+ server->smbd_conn->count_receive_queue,
+ server->smbd_conn->count_empty_packet_queue);
+ seq_printf(m, "\nMR responder_resources: %x "
+ "max_frmr_depth: %x mr_type: %x",
+ server->smbd_conn->responder_resources,
+ server->smbd_conn->max_frmr_depth,
+ server->smbd_conn->mr_type);
+ seq_printf(m, "\nMR mr_ready_count: %x mr_used_count: %x",
+ atomic_read(&server->smbd_conn->mr_ready_count),
+ atomic_read(&server->smbd_conn->mr_used_count));
+skip_rdma:
+#endif
seq_printf(m, "\nNumber of credits: %d", server->credits);
i++;
list_for_each(tmp2, &server->smb_ses_list) {
@@ -176,6 +270,8 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
ses->ses_count, ses->serverOS, ses->serverNOS,
ses->capabilities, ses->status);
}
+ if (server->rdma)
+ seq_printf(m, "RDMA\n\t");
seq_printf(m, "TCP status: %d\n\tLocal Users To "
"Server: %d SecMode: 0x%x Req On Wire: %d",
server->tcpStatus, server->srv_count,
@@ -189,35 +285,19 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
seq_puts(m, "\n\tShares:");
j = 0;
+
+ seq_printf(m, "\n\t%d) IPC: ", j);
+ if (ses->tcon_ipc)
+ cifs_debug_tcon(m, ses->tcon_ipc);
+ else
+ seq_puts(m, "none\n");
+
list_for_each(tmp3, &ses->tcon_list) {
tcon = list_entry(tmp3, struct cifs_tcon,
tcon_list);
++j;
- dev_type = le32_to_cpu(tcon->fsDevInfo.DeviceType);
- seq_printf(m, "\n\t%d) %s Mounts: %d ", j,
- tcon->treeName, tcon->tc_count);
- if (tcon->nativeFileSystem) {
- seq_printf(m, "Type: %s ",
- tcon->nativeFileSystem);
- }
- seq_printf(m, "DevInfo: 0x%x Attributes: 0x%x"
- "\n\tPathComponentMax: %d Status: %d",
- le32_to_cpu(tcon->fsDevInfo.DeviceCharacteristics),
- le32_to_cpu(tcon->fsAttrInfo.Attributes),
- le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength),
- tcon->tidStatus);
- if (dev_type == FILE_DEVICE_DISK)
- seq_puts(m, " type: DISK ");
- else if (dev_type == FILE_DEVICE_CD_ROM)
- seq_puts(m, " type: CDROM ");
- else
- seq_printf(m, " type: %d ", dev_type);
- if (server->ops->dump_share_caps)
- server->ops->dump_share_caps(m, tcon);
-
- if (tcon->need_reconnect)
- seq_puts(m, "\tDISCONNECTED ");
- seq_putc(m, '\n');
+ seq_printf(m, "\n\t%d) ", j);
+ cifs_debug_tcon(m, tcon);
}
seq_puts(m, "\n\tMIDs:\n");
@@ -374,6 +454,45 @@ static const struct file_operations cifs_stats_proc_fops = {
};
#endif /* STATS */
+#ifdef CONFIG_CIFS_SMB_DIRECT
+#define PROC_FILE_DEFINE(name) \
+static ssize_t name##_write(struct file *file, const char __user *buffer, \
+ size_t count, loff_t *ppos) \
+{ \
+ int rc; \
+ rc = kstrtoint_from_user(buffer, count, 10, & name); \
+ if (rc) \
+ return rc; \
+ return count; \
+} \
+static int name##_proc_show(struct seq_file *m, void *v) \
+{ \
+ seq_printf(m, "%d\n", name ); \
+ return 0; \
+} \
+static int name##_open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, name##_proc_show, NULL); \
+} \
+\
+static const struct file_operations cifs_##name##_proc_fops = { \
+ .open = name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ .write = name##_write, \
+}
+
+PROC_FILE_DEFINE(rdma_readwrite_threshold);
+PROC_FILE_DEFINE(smbd_max_frmr_depth);
+PROC_FILE_DEFINE(smbd_keep_alive_interval);
+PROC_FILE_DEFINE(smbd_max_receive_size);
+PROC_FILE_DEFINE(smbd_max_fragmented_recv_size);
+PROC_FILE_DEFINE(smbd_max_send_size);
+PROC_FILE_DEFINE(smbd_send_credit_target);
+PROC_FILE_DEFINE(smbd_receive_credit_max);
+#endif
+
static struct proc_dir_entry *proc_fs_cifs;
static const struct file_operations cifsFYI_proc_fops;
static const struct file_operations cifs_lookup_cache_proc_fops;
@@ -401,6 +520,24 @@ cifs_proc_init(void)
&cifs_security_flags_proc_fops);
proc_create("LookupCacheEnabled", 0, proc_fs_cifs,
&cifs_lookup_cache_proc_fops);
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ proc_create("rdma_readwrite_threshold", 0, proc_fs_cifs,
+ &cifs_rdma_readwrite_threshold_proc_fops);
+ proc_create("smbd_max_frmr_depth", 0, proc_fs_cifs,
+ &cifs_smbd_max_frmr_depth_proc_fops);
+ proc_create("smbd_keep_alive_interval", 0, proc_fs_cifs,
+ &cifs_smbd_keep_alive_interval_proc_fops);
+ proc_create("smbd_max_receive_size", 0, proc_fs_cifs,
+ &cifs_smbd_max_receive_size_proc_fops);
+ proc_create("smbd_max_fragmented_recv_size", 0, proc_fs_cifs,
+ &cifs_smbd_max_fragmented_recv_size_proc_fops);
+ proc_create("smbd_max_send_size", 0, proc_fs_cifs,
+ &cifs_smbd_max_send_size_proc_fops);
+ proc_create("smbd_send_credit_target", 0, proc_fs_cifs,
+ &cifs_smbd_send_credit_target_proc_fops);
+ proc_create("smbd_receive_credit_max", 0, proc_fs_cifs,
+ &cifs_smbd_receive_credit_max_proc_fops);
+#endif
}
void
@@ -418,6 +555,16 @@ cifs_proc_clean(void)
remove_proc_entry("SecurityFlags", proc_fs_cifs);
remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs);
remove_proc_entry("LookupCacheEnabled", proc_fs_cifs);
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ remove_proc_entry("rdma_readwrite_threshold", proc_fs_cifs);
+ remove_proc_entry("smbd_max_frmr_depth", proc_fs_cifs);
+ remove_proc_entry("smbd_keep_alive_interval", proc_fs_cifs);
+ remove_proc_entry("smbd_max_receive_size", proc_fs_cifs);
+ remove_proc_entry("smbd_max_fragmented_recv_size", proc_fs_cifs);
+ remove_proc_entry("smbd_max_send_size", proc_fs_cifs);
+ remove_proc_entry("smbd_send_credit_target", proc_fs_cifs);
+ remove_proc_entry("smbd_receive_credit_max", proc_fs_cifs);
+#endif
remove_proc_entry("fs/cifs", NULL);
}
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index b98436f5c7c7..13a8a77322c9 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -1125,7 +1125,7 @@ out:
return rc;
}
-/* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
+/* Translate the CIFS ACL (similar to NTFS ACL) for a file into mode bits */
int
cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
struct inode *inode, const char *path,
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 68abbb0db608..f2b0a7f124da 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -325,9 +325,8 @@ int calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt,
{
int i;
int rc;
- char password_with_pad[CIFS_ENCPWD_SIZE];
+ char password_with_pad[CIFS_ENCPWD_SIZE] = {0};
- memset(password_with_pad, 0, CIFS_ENCPWD_SIZE);
if (password)
strncpy(password_with_pad, password, CIFS_ENCPWD_SIZE);
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 31b7565b1617..a7be591d8e18 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -327,6 +327,8 @@ cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
default:
seq_puts(s, "(unknown)");
}
+ if (server->rdma)
+ seq_puts(s, ",rdma");
}
static void
@@ -1068,6 +1070,7 @@ const struct file_operations cifs_file_ops = {
.flush = cifs_flush,
.mmap = cifs_file_mmap,
.splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
.llseek = cifs_llseek,
.unlocked_ioctl = cifs_ioctl,
.copy_file_range = cifs_copy_file_range,
@@ -1086,6 +1089,7 @@ const struct file_operations cifs_file_strict_ops = {
.flush = cifs_flush,
.mmap = cifs_file_strict_mmap,
.splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
.llseek = cifs_llseek,
.unlocked_ioctl = cifs_ioctl,
.copy_file_range = cifs_copy_file_range,
@@ -1105,6 +1109,7 @@ const struct file_operations cifs_file_direct_ops = {
.flush = cifs_flush,
.mmap = cifs_file_mmap,
.splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
.unlocked_ioctl = cifs_ioctl,
.copy_file_range = cifs_copy_file_range,
.clone_file_range = cifs_clone_file_range,
@@ -1122,6 +1127,7 @@ const struct file_operations cifs_file_nobrl_ops = {
.flush = cifs_flush,
.mmap = cifs_file_mmap,
.splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
.llseek = cifs_llseek,
.unlocked_ioctl = cifs_ioctl,
.copy_file_range = cifs_copy_file_range,
@@ -1139,6 +1145,7 @@ const struct file_operations cifs_file_strict_nobrl_ops = {
.flush = cifs_flush,
.mmap = cifs_file_strict_mmap,
.splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
.llseek = cifs_llseek,
.unlocked_ioctl = cifs_ioctl,
.copy_file_range = cifs_copy_file_range,
@@ -1157,6 +1164,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = {
.flush = cifs_flush,
.mmap = cifs_file_mmap,
.splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
.unlocked_ioctl = cifs_ioctl,
.copy_file_range = cifs_copy_file_range,
.clone_file_range = cifs_clone_file_range,
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 5a10e566f0e6..013ba2aed8d9 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -149,5 +149,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
-#define CIFS_VERSION "2.10"
+#define CIFS_VERSION "2.11"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index b16583594d1a..48f7c197cd2d 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -64,8 +64,8 @@
#define RFC1001_NAME_LEN 15
#define RFC1001_NAME_LEN_WITH_NULL (RFC1001_NAME_LEN + 1)
-/* currently length of NIP6_FMT */
-#define SERVER_NAME_LENGTH 40
+/* maximum length of ip addr as a string (including ipv6 and sctp) */
+#define SERVER_NAME_LENGTH 80
#define SERVER_NAME_LEN_WITH_NULL (SERVER_NAME_LENGTH + 1)
/* echo interval in seconds */
@@ -230,8 +230,14 @@ struct smb_version_operations {
__u64 (*get_next_mid)(struct TCP_Server_Info *);
/* data offset from read response message */
unsigned int (*read_data_offset)(char *);
- /* data length from read response message */
- unsigned int (*read_data_length)(char *);
+ /*
+ * Data length from read response message
+ * When in_remaining is true, the returned data length is in
+ * message field DataRemaining for out-of-band data read (e.g through
+ * Memory Registration RDMA write in SMBD).
+ * Otherwise, the returned data length is in message field DataLength.
+ */
+ unsigned int (*read_data_length)(char *, bool in_remaining);
/* map smb to linux error */
int (*map_error)(char *, bool);
/* find mid corresponding to the response message */
@@ -532,6 +538,7 @@ struct smb_vol {
bool nopersistent:1;
bool resilient:1; /* noresilient not required since not fored for CA */
bool domainauto:1;
+ bool rdma:1;
unsigned int rsize;
unsigned int wsize;
bool sockopt_tcp_nodelay:1;
@@ -648,6 +655,10 @@ struct TCP_Server_Info {
bool sec_kerberos; /* supports plain Kerberos */
bool sec_mskerberos; /* supports legacy MS Kerberos */
bool large_buf; /* is current buffer large? */
+ /* use SMBD connection instead of socket */
+ bool rdma;
+ /* point to the SMBD connection if RDMA is used instead of socket */
+ struct smbd_connection *smbd_conn;
struct delayed_work echo; /* echo ping workqueue job */
char *smallbuf; /* pointer to current "small" buffer */
char *bigbuf; /* pointer to current "big" buffer */
@@ -822,12 +833,12 @@ static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net)
struct cifs_ses {
struct list_head smb_ses_list;
struct list_head tcon_list;
+ struct cifs_tcon *tcon_ipc;
struct mutex session_mutex;
struct TCP_Server_Info *server; /* pointer to server info */
int ses_count; /* reference counter */
enum statusEnum status;
unsigned overrideSecFlg; /* if non-zero override global sec flags */
- __u32 ipc_tid; /* special tid for connection to IPC share */
char *serverOS; /* name of operating system underlying server */
char *serverNOS; /* name of network operating system of server */
char *serverDomain; /* security realm of server */
@@ -835,8 +846,7 @@ struct cifs_ses {
kuid_t linux_uid; /* overriding owner of files on the mount */
kuid_t cred_uid; /* owner of credentials */
unsigned int capabilities;
- char serverName[SERVER_NAME_LEN_WITH_NULL * 2]; /* BB make bigger for
- TCP names - will ipv6 and sctp addresses fit? */
+ char serverName[SERVER_NAME_LEN_WITH_NULL];
char *user_name; /* must not be null except during init of sess
and after mount option parsing we fill it */
char *domainName;
@@ -931,7 +941,9 @@ struct cifs_tcon {
FILE_SYSTEM_DEVICE_INFO fsDevInfo;
FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if fs name truncated */
FILE_SYSTEM_UNIX_INFO fsUnixInfo;
- bool ipc:1; /* set if connection to IPC$ eg for RPC/PIPES */
+ bool ipc:1; /* set if connection to IPC$ share (always also pipe) */
+ bool pipe:1; /* set if connection to pipe share */
+ bool print:1; /* set if connection to printer share */
bool retry:1;
bool nocase:1;
bool seal:1; /* transport encryption for this mounted share */
@@ -944,7 +956,6 @@ struct cifs_tcon {
bool need_reopen_files:1; /* need to reopen tcon file handles */
bool use_resilient:1; /* use resilient instead of durable handles */
bool use_persistent:1; /* use persistent instead of durable handles */
- bool print:1; /* set if connection to printer share */
__le32 capabilities;
__u32 share_flags;
__u32 maximal_access;
@@ -1147,6 +1158,9 @@ struct cifs_readdata {
struct cifs_readdata *rdata,
struct iov_iter *iter);
struct kvec iov[2];
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ struct smbd_mr *mr;
+#endif
unsigned int pagesz;
unsigned int tailsz;
unsigned int credits;
@@ -1169,6 +1183,9 @@ struct cifs_writedata {
pid_t pid;
unsigned int bytes;
int result;
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ struct smbd_mr *mr;
+#endif
unsigned int pagesz;
unsigned int tailsz;
unsigned int credits;
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 4143c9dec463..93d565186698 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -106,6 +106,10 @@ extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
struct kvec *, int /* nvec to send */,
int * /* type of buf returned */, const int flags,
struct kvec * /* resp vec */);
+extern int smb2_send_recv(const unsigned int xid, struct cifs_ses *pses,
+ struct kvec *pkvec, int nvec_to_send,
+ int *pbuftype, const int flags,
+ struct kvec *presp);
extern int SendReceiveBlockingLock(const unsigned int xid,
struct cifs_tcon *ptcon,
struct smb_hdr *in_buf ,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 35dc5bf01ee2..4e0922d24eb2 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -43,6 +43,7 @@
#include "cifs_unicode.h"
#include "cifs_debug.h"
#include "fscache.h"
+#include "smbdirect.h"
#ifdef CONFIG_CIFS_POSIX
static struct {
@@ -1454,6 +1455,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
struct cifs_readdata *rdata = mid->callback_data;
char *buf = server->smallbuf;
unsigned int buflen = get_rfc1002_length(buf) + 4;
+ bool use_rdma_mr = false;
cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n",
__func__, mid->mid, rdata->offset, rdata->bytes);
@@ -1542,8 +1544,11 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
rdata->iov[0].iov_base, server->total_read);
/* how much data is in the response? */
- data_len = server->ops->read_data_length(buf);
- if (data_offset + data_len > buflen) {
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ use_rdma_mr = rdata->mr;
+#endif
+ data_len = server->ops->read_data_length(buf, use_rdma_mr);
+ if (!use_rdma_mr && (data_offset + data_len > buflen)) {
/* data_len is corrupt -- discard frame */
rdata->result = -EIO;
return cifs_readv_discard(server, mid);
@@ -1923,6 +1928,12 @@ cifs_writedata_release(struct kref *refcount)
{
struct cifs_writedata *wdata = container_of(refcount,
struct cifs_writedata, refcount);
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ if (wdata->mr) {
+ smbd_deregister_mr(wdata->mr);
+ wdata->mr = NULL;
+ }
+#endif
if (wdata->cfile)
cifsFileInfo_put(wdata->cfile);
@@ -4822,10 +4833,11 @@ CIFSGetDFSRefer(const unsigned int xid, struct cifs_ses *ses,
*target_nodes = NULL;
cifs_dbg(FYI, "In GetDFSRefer the path %s\n", search_name);
- if (ses == NULL)
+ if (ses == NULL || ses->tcon_ipc == NULL)
return -ENODEV;
+
getDFSRetry:
- rc = smb_init(SMB_COM_TRANSACTION2, 15, NULL, (void **) &pSMB,
+ rc = smb_init(SMB_COM_TRANSACTION2, 15, ses->tcon_ipc, (void **) &pSMB,
(void **) &pSMBr);
if (rc)
return rc;
@@ -4833,7 +4845,7 @@ getDFSRetry:
/* server pointer checked in called function,
but should never be null here anyway */
pSMB->hdr.Mid = get_next_mid(ses->server);
- pSMB->hdr.Tid = ses->ipc_tid;
+ pSMB->hdr.Tid = ses->tcon_ipc->tid;
pSMB->hdr.Uid = ses->Suid;
if (ses->capabilities & CAP_STATUS32)
pSMB->hdr.Flags2 |= SMBFLG2_ERR_STATUS;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 0bfc2280436d..a726f524fb84 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -44,7 +44,6 @@
#include <net/ipv6.h>
#include <linux/parser.h>
#include <linux/bvec.h>
-
#include "cifspdu.h"
#include "cifsglob.h"
#include "cifsproto.h"
@@ -56,6 +55,7 @@
#include "rfc1002pdu.h"
#include "fscache.h"
#include "smb2proto.h"
+#include "smbdirect.h"
#define CIFS_PORT 445
#define RFC1001_PORT 139
@@ -92,7 +92,7 @@ enum {
Opt_multiuser, Opt_sloppy, Opt_nosharesock,
Opt_persistent, Opt_nopersistent,
Opt_resilient, Opt_noresilient,
- Opt_domainauto,
+ Opt_domainauto, Opt_rdma,
/* Mount options which take numeric value */
Opt_backupuid, Opt_backupgid, Opt_uid,
@@ -183,6 +183,7 @@ static const match_table_t cifs_mount_option_tokens = {
{ Opt_resilient, "resilienthandles"},
{ Opt_noresilient, "noresilienthandles"},
{ Opt_domainauto, "domainauto"},
+ { Opt_rdma, "rdma"},
{ Opt_backupuid, "backupuid=%s" },
{ Opt_backupgid, "backupgid=%s" },
@@ -353,11 +354,12 @@ cifs_reconnect(struct TCP_Server_Info *server)
list_for_each(tmp, &server->smb_ses_list) {
ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
ses->need_reconnect = true;
- ses->ipc_tid = 0;
list_for_each(tmp2, &ses->tcon_list) {
tcon = list_entry(tmp2, struct cifs_tcon, tcon_list);
tcon->need_reconnect = true;
}
+ if (ses->tcon_ipc)
+ ses->tcon_ipc->need_reconnect = true;
}
spin_unlock(&cifs_tcp_ses_lock);
@@ -405,7 +407,10 @@ cifs_reconnect(struct TCP_Server_Info *server)
/* we should try only the port we connected to before */
mutex_lock(&server->srv_mutex);
- rc = generic_ip_connect(server);
+ if (cifs_rdma_enabled(server))
+ rc = smbd_reconnect(server);
+ else
+ rc = generic_ip_connect(server);
if (rc) {
cifs_dbg(FYI, "reconnect error %d\n", rc);
mutex_unlock(&server->srv_mutex);
@@ -538,8 +543,10 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
if (server_unresponsive(server))
return -ECONNABORTED;
-
- length = sock_recvmsg(server->ssocket, smb_msg, 0);
+ if (cifs_rdma_enabled(server) && server->smbd_conn)
+ length = smbd_recv(server->smbd_conn, smb_msg);
+ else
+ length = sock_recvmsg(server->ssocket, smb_msg, 0);
if (server->tcpStatus == CifsExiting)
return -ESHUTDOWN;
@@ -700,7 +707,10 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
wake_up_all(&server->request_q);
/* give those requests time to exit */
msleep(125);
-
+ if (cifs_rdma_enabled(server) && server->smbd_conn) {
+ smbd_destroy(server->smbd_conn);
+ server->smbd_conn = NULL;
+ }
if (server->ssocket) {
sock_release(server->ssocket);
server->ssocket = NULL;
@@ -1550,6 +1560,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
case Opt_domainauto:
vol->domainauto = true;
break;
+ case Opt_rdma:
+ vol->rdma = true;
+ break;
/* Numeric Values */
case Opt_backupuid:
@@ -1707,7 +1720,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
tmp_end++;
if (!(tmp_end < end && tmp_end[1] == delim)) {
/* No it is not. Set the password to NULL */
- kfree(vol->password);
+ kzfree(vol->password);
vol->password = NULL;
break;
}
@@ -1745,7 +1758,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
options = end;
}
- kfree(vol->password);
+ kzfree(vol->password);
/* Now build new password string */
temp_len = strlen(value);
vol->password = kzalloc(temp_len+1, GFP_KERNEL);
@@ -1951,6 +1964,19 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
goto cifs_parse_mount_err;
}
+ if (vol->rdma && vol->vals->protocol_id < SMB30_PROT_ID) {
+ cifs_dbg(VFS, "SMB Direct requires Version >=3.0\n");
+ goto cifs_parse_mount_err;
+ }
+
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ if (vol->rdma && vol->sign) {
+ cifs_dbg(VFS, "Currently SMB direct doesn't support signing."
+ " This is being fixed\n");
+ goto cifs_parse_mount_err;
+ }
+#endif
+
#ifndef CONFIG_KEYS
/* Muliuser mounts require CONFIG_KEYS support */
if (vol->multiuser) {
@@ -2162,6 +2188,9 @@ static int match_server(struct TCP_Server_Info *server, struct smb_vol *vol)
if (server->echo_interval != vol->echo_interval * HZ)
return 0;
+ if (server->rdma != vol->rdma)
+ return 0;
+
return 1;
}
@@ -2260,6 +2289,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
tcp_ses->noblocksnd = volume_info->noblocksnd;
tcp_ses->noautotune = volume_info->noautotune;
tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay;
+ tcp_ses->rdma = volume_info->rdma;
tcp_ses->in_flight = 0;
tcp_ses->credits = 1;
init_waitqueue_head(&tcp_ses->response_q);
@@ -2297,13 +2327,29 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
tcp_ses->echo_interval = volume_info->echo_interval * HZ;
else
tcp_ses->echo_interval = SMB_ECHO_INTERVAL_DEFAULT * HZ;
-
+ if (tcp_ses->rdma) {
+#ifndef CONFIG_CIFS_SMB_DIRECT
+ cifs_dbg(VFS, "CONFIG_CIFS_SMB_DIRECT is not enabled\n");
+ rc = -ENOENT;
+ goto out_err_crypto_release;
+#endif
+ tcp_ses->smbd_conn = smbd_get_connection(
+ tcp_ses, (struct sockaddr *)&volume_info->dstaddr);
+ if (tcp_ses->smbd_conn) {
+ cifs_dbg(VFS, "RDMA transport established\n");
+ rc = 0;
+ goto smbd_connected;
+ } else {
+ rc = -ENOENT;
+ goto out_err_crypto_release;
+ }
+ }
rc = ip_connect(tcp_ses);
if (rc < 0) {
cifs_dbg(VFS, "Error connecting to socket. Aborting operation.\n");
goto out_err_crypto_release;
}
-
+smbd_connected:
/*
* since we're in a cifs function already, we know that
* this will succeed. No need for try_module_get().
@@ -2381,6 +2427,93 @@ static int match_session(struct cifs_ses *ses, struct smb_vol *vol)
return 1;
}
+/**
+ * cifs_setup_ipc - helper to setup the IPC tcon for the session
+ *
+ * A new IPC connection is made and stored in the session
+ * tcon_ipc. The IPC tcon has the same lifetime as the session.
+ */
+static int
+cifs_setup_ipc(struct cifs_ses *ses, struct smb_vol *volume_info)
+{
+ int rc = 0, xid;
+ struct cifs_tcon *tcon;
+ struct nls_table *nls_codepage;
+ char unc[SERVER_NAME_LENGTH + sizeof("//x/IPC$")] = {0};
+ bool seal = false;
+
+ /*
+ * If the mount request that resulted in the creation of the
+ * session requires encryption, force IPC to be encrypted too.
+ */
+ if (volume_info->seal) {
+ if (ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)
+ seal = true;
+ else {
+ cifs_dbg(VFS,
+ "IPC: server doesn't support encryption\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ tcon = tconInfoAlloc();
+ if (tcon == NULL)
+ return -ENOMEM;
+
+ snprintf(unc, sizeof(unc), "\\\\%s\\IPC$", ses->serverName);
+
+ /* cannot fail */
+ nls_codepage = load_nls_default();
+
+ xid = get_xid();
+ tcon->ses = ses;
+ tcon->ipc = true;
+ tcon->seal = seal;
+ rc = ses->server->ops->tree_connect(xid, ses, unc, tcon, nls_codepage);
+ free_xid(xid);
+
+ if (rc) {
+ cifs_dbg(VFS, "failed to connect to IPC (rc=%d)\n", rc);
+ tconInfoFree(tcon);
+ goto out;
+ }
+
+ cifs_dbg(FYI, "IPC tcon rc = %d ipc tid = %d\n", rc, tcon->tid);
+
+ ses->tcon_ipc = tcon;
+out:
+ unload_nls(nls_codepage);
+ return rc;
+}
+
+/**
+ * cifs_free_ipc - helper to release the session IPC tcon
+ *
+ * Needs to be called everytime a session is destroyed
+ */
+static int
+cifs_free_ipc(struct cifs_ses *ses)
+{
+ int rc = 0, xid;
+ struct cifs_tcon *tcon = ses->tcon_ipc;
+
+ if (tcon == NULL)
+ return 0;
+
+ if (ses->server->ops->tree_disconnect) {
+ xid = get_xid();
+ rc = ses->server->ops->tree_disconnect(xid, tcon);
+ free_xid(xid);
+ }
+
+ if (rc)
+ cifs_dbg(FYI, "failed to disconnect IPC tcon (rc=%d)\n", rc);
+
+ tconInfoFree(tcon);
+ ses->tcon_ipc = NULL;
+ return rc;
+}
+
static struct cifs_ses *
cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
{
@@ -2421,6 +2554,8 @@ cifs_put_smb_ses(struct cifs_ses *ses)
ses->status = CifsExiting;
spin_unlock(&cifs_tcp_ses_lock);
+ cifs_free_ipc(ses);
+
if (ses->status == CifsExiting && server->ops->logoff) {
xid = get_xid();
rc = server->ops->logoff(xid, ses);
@@ -2569,6 +2704,13 @@ cifs_set_cifscreds(struct smb_vol *vol __attribute__((unused)),
}
#endif /* CONFIG_KEYS */
+/**
+ * cifs_get_smb_ses - get a session matching @volume_info data from @server
+ *
+ * This function assumes it is being called from cifs_mount() where we
+ * already got a server reference (server refcount +1). See
+ * cifs_get_tcon() for refcount explanations.
+ */
static struct cifs_ses *
cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
{
@@ -2665,6 +2807,9 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
spin_unlock(&cifs_tcp_ses_lock);
free_xid(xid);
+
+ cifs_setup_ipc(ses, volume_info);
+
return ses;
get_ses_fail:
@@ -2709,8 +2854,16 @@ void
cifs_put_tcon(struct cifs_tcon *tcon)
{
unsigned int xid;
- struct cifs_ses *ses = tcon->ses;
+ struct cifs_ses *ses;
+
+ /*
+ * IPC tcon share the lifetime of their session and are
+ * destroyed in the session put function
+ */
+ if (tcon == NULL || tcon->ipc)
+ return;
+ ses = tcon->ses;
cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count);
spin_lock(&cifs_tcp_ses_lock);
if (--tcon->tc_count > 0) {
@@ -2731,6 +2884,26 @@ cifs_put_tcon(struct cifs_tcon *tcon)
cifs_put_smb_ses(ses);
}
+/**
+ * cifs_get_tcon - get a tcon matching @volume_info data from @ses
+ *
+ * - tcon refcount is the number of mount points using the tcon.
+ * - ses refcount is the number of tcon using the session.
+ *
+ * 1. This function assumes it is being called from cifs_mount() where
+ * we already got a session reference (ses refcount +1).
+ *
+ * 2. Since we're in the context of adding a mount point, the end
+ * result should be either:
+ *
+ * a) a new tcon already allocated with refcount=1 (1 mount point) and
+ * its session refcount incremented (1 new tcon). This +1 was
+ * already done in (1).
+ *
+ * b) an existing tcon with refcount+1 (add a mount point to it) and
+ * identical ses refcount (no new tcon). Because of (1) we need to
+ * decrement the ses refcount.
+ */
static struct cifs_tcon *
cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
{
@@ -2739,8 +2912,11 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
tcon = cifs_find_tcon(ses, volume_info);
if (tcon) {
+ /*
+ * tcon has refcount already incremented but we need to
+ * decrement extra ses reference gotten by caller (case b)
+ */
cifs_dbg(FYI, "Found match on UNC path\n");
- /* existing tcon already has a reference */
cifs_put_smb_ses(ses);
return tcon;
}
@@ -2986,39 +3162,17 @@ get_dfs_path(const unsigned int xid, struct cifs_ses *ses, const char *old_path,
const struct nls_table *nls_codepage, unsigned int *num_referrals,
struct dfs_info3_param **referrals, int remap)
{
- char *temp_unc;
int rc = 0;
- if (!ses->server->ops->tree_connect || !ses->server->ops->get_dfs_refer)
+ if (!ses->server->ops->get_dfs_refer)
return -ENOSYS;
*num_referrals = 0;
*referrals = NULL;
- if (ses->ipc_tid == 0) {
- temp_unc = kmalloc(2 /* for slashes */ +
- strnlen(ses->serverName, SERVER_NAME_LEN_WITH_NULL * 2)
- + 1 + 4 /* slash IPC$ */ + 2, GFP_KERNEL);
- if (temp_unc == NULL)
- return -ENOMEM;
- temp_unc[0] = '\\';
- temp_unc[1] = '\\';
- strcpy(temp_unc + 2, ses->serverName);
- strcpy(temp_unc + 2 + strlen(ses->serverName), "\\IPC$");
- rc = ses->server->ops->tree_connect(xid, ses, temp_unc, NULL,
- nls_codepage);
- cifs_dbg(FYI, "Tcon rc = %d ipc_tid = %d\n", rc, ses->ipc_tid);
- kfree(temp_unc);
- }
- if (rc == 0)
- rc = ses->server->ops->get_dfs_refer(xid, ses, old_path,
- referrals, num_referrals,
- nls_codepage, remap);
- /*
- * BB - map targetUNCs to dfs_info3 structures, here or in
- * ses->server->ops->get_dfs_refer.
- */
-
+ rc = ses->server->ops->get_dfs_refer(xid, ses, old_path,
+ referrals, num_referrals,
+ nls_codepage, remap);
return rc;
}
@@ -3783,7 +3937,7 @@ try_mount_again:
tcon->unix_ext = 0; /* server does not support them */
/* do not care if a following call succeed - informational */
- if (!tcon->ipc && server->ops->qfs_tcon)
+ if (!tcon->pipe && server->ops->qfs_tcon)
server->ops->qfs_tcon(xid, tcon);
cifs_sb->wsize = server->ops->negotiate_wsize(tcon, volume_info);
@@ -3913,8 +4067,7 @@ out:
}
/*
- * Issue a TREE_CONNECT request. Note that for IPC$ shares, that the tcon
- * pointer may be NULL.
+ * Issue a TREE_CONNECT request.
*/
int
CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
@@ -3950,7 +4103,7 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
pSMB->AndXCommand = 0xFF;
pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO);
bcc_ptr = &pSMB->Password[0];
- if (!tcon || (ses->server->sec_mode & SECMODE_USER)) {
+ if (tcon->pipe || (ses->server->sec_mode & SECMODE_USER)) {
pSMB->PasswordLength = cpu_to_le16(1); /* minimum */
*bcc_ptr = 0; /* password is null byte */
bcc_ptr++; /* skip password */
@@ -4022,7 +4175,7 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
0);
/* above now done in SendReceive */
- if ((rc == 0) && (tcon != NULL)) {
+ if (rc == 0) {
bool is_unicode;
tcon->tidStatus = CifsGood;
@@ -4042,7 +4195,8 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
if ((bcc_ptr[0] == 'I') && (bcc_ptr[1] == 'P') &&
(bcc_ptr[2] == 'C')) {
cifs_dbg(FYI, "IPC connection\n");
- tcon->ipc = 1;
+ tcon->ipc = true;
+ tcon->pipe = true;
}
} else if (length == 2) {
if ((bcc_ptr[0] == 'A') && (bcc_ptr[1] == ':')) {
@@ -4069,9 +4223,6 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
else
tcon->Flags = 0;
cifs_dbg(FYI, "Tcon flags: 0x%x\n", tcon->Flags);
- } else if ((rc == 0) && tcon == NULL) {
- /* all we need to save for IPC$ connection */
- ses->ipc_tid = smb_buffer_response->Tid;
}
cifs_buf_release(smb_buffer);
@@ -4235,7 +4386,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
reset_cifs_unix_caps(0, tcon, NULL, vol_info);
out:
kfree(vol_info->username);
- kfree(vol_info->password);
+ kzfree(vol_info->password);
kfree(vol_info);
return tcon;
@@ -4387,7 +4538,7 @@ cifs_prune_tlinks(struct work_struct *work)
struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info,
prune_tlinks.work);
struct rb_root *root = &cifs_sb->tlink_tree;
- struct rb_node *node = rb_first(root);
+ struct rb_node *node;
struct rb_node *tmp;
struct tcon_link *tlink;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index df9f682708c6..7cee97b93a61 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -42,7 +42,7 @@
#include "cifs_debug.h"
#include "cifs_fs_sb.h"
#include "fscache.h"
-
+#include "smbdirect.h"
static inline int cifs_convert_flags(unsigned int flags)
{
@@ -2902,7 +2902,12 @@ cifs_readdata_release(struct kref *refcount)
{
struct cifs_readdata *rdata = container_of(refcount,
struct cifs_readdata, refcount);
-
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ if (rdata->mr) {
+ smbd_deregister_mr(rdata->mr);
+ rdata->mr = NULL;
+ }
+#endif
if (rdata->cfile)
cifsFileInfo_put(rdata->cfile);
@@ -3031,6 +3036,10 @@ uncached_fill_pages(struct TCP_Server_Info *server,
}
if (iter)
result = copy_page_from_iter(page, 0, n, iter);
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ else if (rdata->mr)
+ result = n;
+#endif
else
result = cifs_read_page_from_socket(server, page, n);
if (result < 0)
@@ -3471,20 +3480,18 @@ static const struct vm_operations_struct cifs_file_vm_ops = {
int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
{
- int rc, xid;
+ int xid, rc = 0;
struct inode *inode = file_inode(file);
xid = get_xid();
- if (!CIFS_CACHE_READ(CIFS_I(inode))) {
+ if (!CIFS_CACHE_READ(CIFS_I(inode)))
rc = cifs_zap_mapping(inode);
- if (rc)
- return rc;
- }
-
- rc = generic_file_mmap(file, vma);
- if (rc == 0)
+ if (!rc)
+ rc = generic_file_mmap(file, vma);
+ if (!rc)
vma->vm_ops = &cifs_file_vm_ops;
+
free_xid(xid);
return rc;
}
@@ -3494,16 +3501,16 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
int rc, xid;
xid = get_xid();
+
rc = cifs_revalidate_file(file);
- if (rc) {
+ if (rc)
cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
rc);
- free_xid(xid);
- return rc;
- }
- rc = generic_file_mmap(file, vma);
- if (rc == 0)
+ if (!rc)
+ rc = generic_file_mmap(file, vma);
+ if (!rc)
vma->vm_ops = &cifs_file_vm_ops;
+
free_xid(xid);
return rc;
}
@@ -3600,6 +3607,10 @@ readpages_fill_pages(struct TCP_Server_Info *server,
if (iter)
result = copy_page_from_iter(page, 0, n, iter);
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ else if (rdata->mr)
+ result = n;
+#endif
else
result = cifs_read_page_from_socket(server, page, n);
if (result < 0)
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index ecb99079363a..8f9a8cc7cc62 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1049,7 +1049,7 @@ iget_no_retry:
tcon->resource_id = CIFS_I(inode)->uniqueid;
#endif
- if (rc && tcon->ipc) {
+ if (rc && tcon->pipe) {
cifs_dbg(FYI, "ipc connection - fake read inode\n");
spin_lock(&inode->i_lock);
inode->i_mode |= S_IFDIR;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index eea93ac15ef0..a0dbced4a45c 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -98,14 +98,11 @@ sesInfoFree(struct cifs_ses *buf_to_free)
kfree(buf_to_free->serverOS);
kfree(buf_to_free->serverDomain);
kfree(buf_to_free->serverNOS);
- if (buf_to_free->password) {
- memset(buf_to_free->password, 0, strlen(buf_to_free->password));
- kfree(buf_to_free->password);
- }
+ kzfree(buf_to_free->password);
kfree(buf_to_free->user_name);
kfree(buf_to_free->domainName);
- kfree(buf_to_free->auth_key.response);
- kfree(buf_to_free);
+ kzfree(buf_to_free->auth_key.response);
+ kzfree(buf_to_free);
}
struct cifs_tcon *
@@ -136,10 +133,7 @@ tconInfoFree(struct cifs_tcon *buf_to_free)
}
atomic_dec(&tconInfoAllocCount);
kfree(buf_to_free->nativeFileSystem);
- if (buf_to_free->password) {
- memset(buf_to_free->password, 0, strlen(buf_to_free->password));
- kfree(buf_to_free->password);
- }
+ kzfree(buf_to_free->password);
kfree(buf_to_free);
}
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index a723df3e0197..3d495e440c87 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -87,9 +87,11 @@ cifs_read_data_offset(char *buf)
}
static unsigned int
-cifs_read_data_length(char *buf)
+cifs_read_data_length(char *buf, bool in_remaining)
{
READ_RSP *rsp = (READ_RSP *)buf;
+ /* It's a bug reading remaining data for SMB1 packets */
+ WARN_ON(in_remaining);
return (le16_to_cpu(rsp->DataLengthHigh) << 16) +
le16_to_cpu(rsp->DataLength);
}
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
index b4b1f0305f29..12af5dba742b 100644
--- a/fs/cifs/smb2file.c
+++ b/fs/cifs/smb2file.c
@@ -74,7 +74,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
nr_ioctl_req.Reserved = 0;
rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid,
fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY,
- true /* is_fsctl */, false /* use_ipc */,
+ true /* is_fsctl */,
(char *)&nr_ioctl_req, sizeof(nr_ioctl_req),
NULL, NULL /* no return info */);
if (rc == -EOPNOTSUPP) {
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 7b08a1446a7f..76d03abaa38c 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -578,7 +578,7 @@ smb2_is_valid_lease_break(char *buffer)
bool
smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
{
- struct smb2_oplock_break *rsp = (struct smb2_oplock_break *)buffer;
+ struct smb2_oplock_break_rsp *rsp = (struct smb2_oplock_break_rsp *)buffer;
struct list_head *tmp, *tmp1, *tmp2;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index ed88ab8a4774..eb68e2fcc500 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -32,6 +32,7 @@
#include "smb2status.h"
#include "smb2glob.h"
#include "cifs_ioctl.h"
+#include "smbdirect.h"
static int
change_conf(struct TCP_Server_Info *server)
@@ -250,7 +251,11 @@ smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
/* start with specified wsize, or default */
wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE;
wsize = min_t(unsigned int, wsize, server->max_write);
-
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ if (server->rdma)
+ wsize = min_t(unsigned int,
+ wsize, server->smbd_conn->max_readwrite_size);
+#endif
if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
@@ -266,6 +271,11 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
/* start with specified rsize, or default */
rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE;
rsize = min_t(unsigned int, rsize, server->max_read);
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ if (server->rdma)
+ rsize = min_t(unsigned int,
+ rsize, server->smbd_conn->max_readwrite_size);
+#endif
if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
@@ -283,7 +293,6 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
- false /* use_ipc */,
NULL /* no data input */, 0 /* no data input */,
(char **)&out_buf, &ret_data_len);
if (rc != 0)
@@ -782,7 +791,6 @@ SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
- false /* use_ipc */,
NULL, 0 /* no input */,
(char **)&res_key, &ret_data_len);
@@ -848,8 +856,7 @@ smb2_copychunk_range(const unsigned int xid,
/* Request server copy to target from src identified by key */
rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
- true /* is_fsctl */, false /* use_ipc */,
- (char *)pcchunk,
+ true /* is_fsctl */, (char *)pcchunk,
sizeof(struct copychunk_ioctl), (char **)&retbuf,
&ret_data_len);
if (rc == 0) {
@@ -947,9 +954,13 @@ smb2_read_data_offset(char *buf)
}
static unsigned int
-smb2_read_data_length(char *buf)
+smb2_read_data_length(char *buf, bool in_remaining)
{
struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
+
+ if (in_remaining)
+ return le32_to_cpu(rsp->DataRemaining);
+
return le32_to_cpu(rsp->DataLength);
}
@@ -1006,7 +1017,7 @@ static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
- true /* is_fctl */, false /* use_ipc */,
+ true /* is_fctl */,
&setsparse, 1, NULL, NULL);
if (rc) {
tcon->broken_sparse_sup = true;
@@ -1077,7 +1088,7 @@ smb2_duplicate_extents(const unsigned int xid,
rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
trgtfile->fid.volatile_fid,
FSCTL_DUPLICATE_EXTENTS_TO_FILE,
- true /* is_fsctl */, false /* use_ipc */,
+ true /* is_fsctl */,
(char *)&dup_ext_buf,
sizeof(struct duplicate_extents_to_file),
NULL,
@@ -1112,7 +1123,7 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid,
FSCTL_SET_INTEGRITY_INFORMATION,
- true /* is_fsctl */, false /* use_ipc */,
+ true /* is_fsctl */,
(char *)&integr_info,
sizeof(struct fsctl_set_integrity_information_req),
NULL,
@@ -1132,7 +1143,7 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid,
FSCTL_SRV_ENUMERATE_SNAPSHOTS,
- true /* is_fsctl */, false /* use_ipc */,
+ true /* is_fsctl */,
NULL, 0 /* no input data */,
(char **)&retbuf,
&ret_data_len);
@@ -1351,16 +1362,20 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
cifs_dbg(FYI, "smb2_get_dfs_refer path <%s>\n", search_name);
/*
- * Use any tcon from the current session. Here, the first one.
+ * Try to use the IPC tcon, otherwise just use any
*/
- spin_lock(&cifs_tcp_ses_lock);
- tcon = list_first_entry_or_null(&ses->tcon_list, struct cifs_tcon,
- tcon_list);
- if (tcon)
- tcon->tc_count++;
- spin_unlock(&cifs_tcp_ses_lock);
+ tcon = ses->tcon_ipc;
+ if (tcon == NULL) {
+ spin_lock(&cifs_tcp_ses_lock);
+ tcon = list_first_entry_or_null(&ses->tcon_list,
+ struct cifs_tcon,
+ tcon_list);
+ if (tcon)
+ tcon->tc_count++;
+ spin_unlock(&cifs_tcp_ses_lock);
+ }
- if (!tcon) {
+ if (tcon == NULL) {
cifs_dbg(VFS, "session %p has no tcon available for a dfs referral request\n",
ses);
rc = -ENOTCONN;
@@ -1389,20 +1404,11 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
memcpy(dfs_req->RequestFileName, utf16_path, utf16_path_len);
do {
- /* try first with IPC */
rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
FSCTL_DFS_GET_REFERRALS,
- true /* is_fsctl */, true /* use_ipc */,
+ true /* is_fsctl */,
(char *)dfs_req, dfs_req_size,
(char **)&dfs_rsp, &dfs_rsp_size);
- if (rc == -ENOTCONN) {
- /* try with normal tcon */
- rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
- FSCTL_DFS_GET_REFERRALS,
- true /* is_fsctl */, false /*use_ipc*/,
- (char *)dfs_req, dfs_req_size,
- (char **)&dfs_rsp, &dfs_rsp_size);
- }
} while (rc == -EAGAIN);
if (rc) {
@@ -1421,7 +1427,8 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
}
out:
- if (tcon) {
+ if (tcon && !tcon->ipc) {
+ /* ipc tcons are not refcounted */
spin_lock(&cifs_tcp_ses_lock);
tcon->tc_count--;
spin_unlock(&cifs_tcp_ses_lock);
@@ -1713,8 +1720,7 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
- true /* is_fctl */, false /* use_ipc */,
- (char *)&fsctl_buf,
+ true /* is_fctl */, (char *)&fsctl_buf,
sizeof(struct file_zero_data_information), NULL, NULL);
free_xid(xid);
return rc;
@@ -1748,8 +1754,7 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
- true /* is_fctl */, false /* use_ipc */,
- (char *)&fsctl_buf,
+ true /* is_fctl */, (char *)&fsctl_buf,
sizeof(struct file_zero_data_information), NULL, NULL);
free_xid(xid);
return rc;
@@ -2411,6 +2416,7 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
struct iov_iter iter;
struct kvec iov;
int length;
+ bool use_rdma_mr = false;
if (shdr->Command != SMB2_READ) {
cifs_dbg(VFS, "only big read responses are supported\n");
@@ -2437,7 +2443,10 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
}
data_offset = server->ops->read_data_offset(buf) + 4;
- data_len = server->ops->read_data_length(buf);
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ use_rdma_mr = rdata->mr;
+#endif
+ data_len = server->ops->read_data_length(buf, use_rdma_mr);
if (data_offset < server->vals->read_rsp_size) {
/*
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 01346b8b6edb..63778ac22fd9 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -48,6 +48,7 @@
#include "smb2glob.h"
#include "cifspdu.h"
#include "cifs_spnego.h"
+#include "smbdirect.h"
/*
* The following table defines the expected "StructureSize" of SMB2 requests
@@ -319,54 +320,16 @@ fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon, void *buf,
*total_len = parmsize + sizeof(struct smb2_sync_hdr);
}
-/* init request without RFC1001 length at the beginning */
-static int
-smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
- void **request_buf, unsigned int *total_len)
-{
- int rc;
- struct smb2_sync_hdr *shdr;
-
- rc = smb2_reconnect(smb2_command, tcon);
- if (rc)
- return rc;
-
- /* BB eventually switch this to SMB2 specific small buf size */
- *request_buf = cifs_small_buf_get();
- if (*request_buf == NULL) {
- /* BB should we add a retry in here if not a writepage? */
- return -ENOMEM;
- }
-
- shdr = (struct smb2_sync_hdr *)(*request_buf);
-
- fill_small_buf(smb2_command, tcon, shdr, total_len);
-
- if (tcon != NULL) {
-#ifdef CONFIG_CIFS_STATS2
- uint16_t com_code = le16_to_cpu(smb2_command);
-
- cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
-#endif
- cifs_stats_inc(&tcon->num_smbs_sent);
- }
-
- return rc;
-}
-
/*
* Allocate and return pointer to an SMB request hdr, and set basic
* SMB information in the SMB header. If the return code is zero, this
- * function must have filled in request_buf pointer. The returned buffer
- * has RFC1001 length at the beginning.
+ * function must have filled in request_buf pointer.
*/
static int
-small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon,
- void **request_buf)
+smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
+ void **request_buf, unsigned int *total_len)
{
int rc;
- unsigned int total_len;
- struct smb2_pdu *pdu;
rc = smb2_reconnect(smb2_command, tcon);
if (rc)
@@ -379,12 +342,9 @@ small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon,
return -ENOMEM;
}
- pdu = (struct smb2_pdu *)(*request_buf);
-
- fill_small_buf(smb2_command, tcon, get_sync_hdr(pdu), &total_len);
-
- /* Note this is only network field converted to big endian */
- pdu->hdr.smb2_buf_length = cpu_to_be32(total_len);
+ fill_small_buf(smb2_command, tcon,
+ (struct smb2_sync_hdr *)(*request_buf),
+ total_len);
if (tcon != NULL) {
#ifdef CONFIG_CIFS_STATS2
@@ -398,8 +358,8 @@ small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon,
}
#ifdef CONFIG_CIFS_SMB311
-/* offset is sizeof smb2_negotiate_req - 4 but rounded up to 8 bytes */
-#define OFFSET_OF_NEG_CONTEXT 0x68 /* sizeof(struct smb2_negotiate_req) - 4 */
+/* offset is sizeof smb2_negotiate_req but rounded up to 8 bytes */
+#define OFFSET_OF_NEG_CONTEXT 0x68 /* sizeof(struct smb2_negotiate_req) */
#define SMB2_PREAUTH_INTEGRITY_CAPABILITIES cpu_to_le16(1)
@@ -427,23 +387,25 @@ build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt)
}
static void
-assemble_neg_contexts(struct smb2_negotiate_req *req)
+assemble_neg_contexts(struct smb2_negotiate_req *req,
+ unsigned int *total_len)
{
-
- /* +4 is to account for the RFC1001 len field */
- char *pneg_ctxt = (char *)req + OFFSET_OF_NEG_CONTEXT + 4;
+ char *pneg_ctxt = (char *)req + OFFSET_OF_NEG_CONTEXT;
build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt);
/* Add 2 to size to round to 8 byte boundary */
+
pneg_ctxt += 2 + sizeof(struct smb2_preauth_neg_context);
build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt);
req->NegotiateContextOffset = cpu_to_le32(OFFSET_OF_NEG_CONTEXT);
req->NegotiateContextCount = cpu_to_le16(2);
- inc_rfc1001_len(req, 4 + sizeof(struct smb2_preauth_neg_context)
- + sizeof(struct smb2_encryption_neg_context)); /* calculate hash */
+
+ *total_len += 4 + sizeof(struct smb2_preauth_neg_context)
+ + sizeof(struct smb2_encryption_neg_context);
}
#else
-static void assemble_neg_contexts(struct smb2_negotiate_req *req)
+static void assemble_neg_contexts(struct smb2_negotiate_req *req,
+ unsigned int *total_len)
{
return;
}
@@ -477,6 +439,7 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
int blob_offset, blob_length;
char *security_blob;
int flags = CIFS_NEG_OP;
+ unsigned int total_len;
cifs_dbg(FYI, "Negotiate protocol\n");
@@ -485,30 +448,30 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
return -EIO;
}
- rc = small_smb2_init(SMB2_NEGOTIATE, NULL, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_NEGOTIATE, NULL, (void **) &req, &total_len);
if (rc)
return rc;
- req->hdr.sync_hdr.SessionId = 0;
+ req->sync_hdr.SessionId = 0;
if (strcmp(ses->server->vals->version_string,
SMB3ANY_VERSION_STRING) == 0) {
req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
req->DialectCount = cpu_to_le16(2);
- inc_rfc1001_len(req, 4);
+ total_len += 4;
} else if (strcmp(ses->server->vals->version_string,
SMBDEFAULT_VERSION_STRING) == 0) {
req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
req->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
req->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
req->DialectCount = cpu_to_le16(3);
- inc_rfc1001_len(req, 6);
+ total_len += 6;
} else {
/* otherwise send specific dialect */
req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id);
req->DialectCount = cpu_to_le16(1);
- inc_rfc1001_len(req, 2);
+ total_len += 2;
}
/* only one of SMB2 signing flags may be set in SMB2 request */
@@ -528,13 +491,12 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
memcpy(req->ClientGUID, server->client_guid,
SMB2_CLIENT_GUID_SIZE);
if (ses->server->vals->protocol_id == SMB311_PROT_ID)
- assemble_neg_contexts(req);
+ assemble_neg_contexts(req, &total_len);
}
iov[0].iov_base = (char *)req;
- /* 4 for rfc1002 length field */
- iov[0].iov_len = get_rfc1002_length(req) + 4;
+ iov[0].iov_len = total_len;
- rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+ rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base;
/*
@@ -654,6 +616,11 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
cifs_dbg(FYI, "validate negotiate\n");
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ if (tcon->ses->server->rdma)
+ return 0;
+#endif
+
/*
* validation ioctl must be signed, so no point sending this if we
* can not sign it (ie are not known user). Even if signing is not
@@ -713,7 +680,6 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
- false /* use_ipc */,
(char *)&vneg_inbuf, sizeof(struct validate_negotiate_info_req),
(char **)&pneg_rsp, &rsplen);
@@ -733,8 +699,7 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
}
/* check validate negotiate info response matches what we got earlier */
- if (pneg_rsp->Dialect !=
- cpu_to_le16(tcon->ses->server->vals->protocol_id))
+ if (pneg_rsp->Dialect != cpu_to_le16(tcon->ses->server->dialect))
goto vneg_out;
if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode))
@@ -806,20 +771,22 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
struct cifs_ses *ses = sess_data->ses;
struct smb2_sess_setup_req *req;
struct TCP_Server_Info *server = ses->server;
+ unsigned int total_len;
- rc = small_smb2_init(SMB2_SESSION_SETUP, NULL, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, (void **) &req,
+ &total_len);
if (rc)
return rc;
/* First session, not a reauthenticate */
- req->hdr.sync_hdr.SessionId = 0;
+ req->sync_hdr.SessionId = 0;
/* if reconnect, we need to send previous sess id, otherwise it is 0 */
req->PreviousSessionId = sess_data->previous_session;
req->Flags = 0; /* MBZ */
/* to enable echos and oplocks */
- req->hdr.sync_hdr.CreditRequest = cpu_to_le16(3);
+ req->sync_hdr.CreditRequest = cpu_to_le16(3);
/* only one of SMB2 signing flags may be set in SMB2 request */
if (server->sign)
@@ -833,8 +800,8 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
req->Channel = 0; /* MBZ */
sess_data->iov[0].iov_base = (char *)req;
- /* 4 for rfc1002 length field and 1 for pad */
- sess_data->iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
+ /* 1 for pad */
+ sess_data->iov[0].iov_len = total_len - 1;
/*
* This variable will be used to clear the buffer
* allocated above in case of any error in the calling function.
@@ -860,18 +827,15 @@ SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
/* Testing shows that buffer offset must be at location of Buffer[0] */
req->SecurityBufferOffset =
- cpu_to_le16(sizeof(struct smb2_sess_setup_req) -
- 1 /* pad */ - 4 /* rfc1001 len */);
+ cpu_to_le16(sizeof(struct smb2_sess_setup_req) - 1 /* pad */);
req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len);
- inc_rfc1001_len(req, sess_data->iov[1].iov_len - 1 /* pad */);
-
/* BB add code to build os and lm fields */
- rc = SendReceive2(sess_data->xid, sess_data->ses,
- sess_data->iov, 2,
- &sess_data->buf0_type,
- CIFS_LOG_ERROR | CIFS_NEG_OP, &rsp_iov);
+ rc = smb2_send_recv(sess_data->xid, sess_data->ses,
+ sess_data->iov, 2,
+ &sess_data->buf0_type,
+ CIFS_LOG_ERROR | CIFS_NEG_OP, &rsp_iov);
cifs_small_buf_release(sess_data->iov[0].iov_base);
memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec));
@@ -1092,7 +1056,7 @@ SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
goto out;
req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base;
- req->hdr.sync_hdr.SessionId = ses->Suid;
+ req->sync_hdr.SessionId = ses->Suid;
rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses,
sess_data->nls_cp);
@@ -1202,6 +1166,10 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
int rc = 0;
struct TCP_Server_Info *server;
int flags = 0;
+ unsigned int total_len;
+ struct kvec iov[1];
+ struct kvec rsp_iov;
+ int resp_buf_type;
cifs_dbg(FYI, "disconnect session %p\n", ses);
@@ -1214,19 +1182,24 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
if (ses->need_reconnect)
goto smb2_session_already_dead;
- rc = small_smb2_init(SMB2_LOGOFF, NULL, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_LOGOFF, NULL, (void **) &req, &total_len);
if (rc)
return rc;
/* since no tcon, smb2_init can not do this, so do here */
- req->hdr.sync_hdr.SessionId = ses->Suid;
+ req->sync_hdr.SessionId = ses->Suid;
if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
flags |= CIFS_TRANSFORM_REQ;
else if (server->sign)
- req->hdr.sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
+ req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
+
+ flags |= CIFS_NO_RESP;
+
+ iov[0].iov_base = (char *)req;
+ iov[0].iov_len = total_len;
- rc = SendReceiveNoRsp(xid, ses, (char *) req, flags);
+ rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
/*
* No tcon so can't do
@@ -1265,6 +1238,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
int unc_path_len;
__le16 *unc_path = NULL;
int flags = 0;
+ unsigned int total_len;
cifs_dbg(FYI, "TCON\n");
@@ -1283,40 +1257,30 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
}
/* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
- if (tcon)
- tcon->tid = 0;
+ tcon->tid = 0;
- rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_TREE_CONNECT, tcon, (void **) &req,
+ &total_len);
if (rc) {
kfree(unc_path);
return rc;
}
- if (tcon == NULL) {
- if ((ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA))
- flags |= CIFS_TRANSFORM_REQ;
-
- /* since no tcon, smb2_init can not do this, so do here */
- req->hdr.sync_hdr.SessionId = ses->Suid;
- if (ses->server->sign)
- req->hdr.sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
- } else if (encryption_required(tcon))
+ if (encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
iov[0].iov_base = (char *)req;
- /* 4 for rfc1002 length field and 1 for pad */
- iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
+ /* 1 for pad */
+ iov[0].iov_len = total_len - 1;
/* Testing shows that buffer offset must be at location of Buffer[0] */
req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)
- - 1 /* pad */ - 4 /* do not count rfc1001 len field */);
+ - 1 /* pad */);
req->PathLength = cpu_to_le16(unc_path_len - 2);
iov[1].iov_base = unc_path;
iov[1].iov_len = unc_path_len;
- inc_rfc1001_len(req, unc_path_len - 1 /* pad */);
-
- rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov);
+ rc = smb2_send_recv(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
@@ -1328,21 +1292,16 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
goto tcon_error_exit;
}
- if (tcon == NULL) {
- ses->ipc_tid = rsp->hdr.sync_hdr.TreeId;
- goto tcon_exit;
- }
-
switch (rsp->ShareType) {
case SMB2_SHARE_TYPE_DISK:
cifs_dbg(FYI, "connection to disk share\n");
break;
case SMB2_SHARE_TYPE_PIPE:
- tcon->ipc = true;
+ tcon->pipe = true;
cifs_dbg(FYI, "connection to pipe share\n");
break;
case SMB2_SHARE_TYPE_PRINT:
- tcon->ipc = true;
+ tcon->print = true;
cifs_dbg(FYI, "connection to printer\n");
break;
default:
@@ -1389,6 +1348,10 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
int rc = 0;
struct cifs_ses *ses = tcon->ses;
int flags = 0;
+ unsigned int total_len;
+ struct kvec iov[1];
+ struct kvec rsp_iov;
+ int resp_buf_type;
cifs_dbg(FYI, "Tree Disconnect\n");
@@ -1398,14 +1361,20 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
return 0;
- rc = small_smb2_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req,
+ &total_len);
if (rc)
return rc;
if (encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
- rc = SendReceiveNoRsp(xid, ses, (char *)req, flags);
+ flags |= CIFS_NO_RESP;
+
+ iov[0].iov_base = (char *)req;
+ iov[0].iov_len = total_len;
+
+ rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
if (rc)
cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
@@ -1505,11 +1474,10 @@ add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
if (!req->CreateContextsOffset)
req->CreateContextsOffset = cpu_to_le32(
- sizeof(struct smb2_create_req) - 4 +
+ sizeof(struct smb2_create_req) +
iov[num - 1].iov_len);
le32_add_cpu(&req->CreateContextsLength,
server->vals->create_lease_size);
- inc_rfc1001_len(&req->hdr, server->vals->create_lease_size);
*num_iovec = num + 1;
return 0;
}
@@ -1589,10 +1557,9 @@ add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec,
iov[num].iov_len = sizeof(struct create_durable_v2);
if (!req->CreateContextsOffset)
req->CreateContextsOffset =
- cpu_to_le32(sizeof(struct smb2_create_req) - 4 +
+ cpu_to_le32(sizeof(struct smb2_create_req) +
iov[1].iov_len);
le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable_v2));
- inc_rfc1001_len(&req->hdr, sizeof(struct create_durable_v2));
*num_iovec = num + 1;
return 0;
}
@@ -1613,12 +1580,10 @@ add_durable_reconnect_v2_context(struct kvec *iov, unsigned int *num_iovec,
iov[num].iov_len = sizeof(struct create_durable_handle_reconnect_v2);
if (!req->CreateContextsOffset)
req->CreateContextsOffset =
- cpu_to_le32(sizeof(struct smb2_create_req) - 4 +
+ cpu_to_le32(sizeof(struct smb2_create_req) +
iov[1].iov_len);
le32_add_cpu(&req->CreateContextsLength,
sizeof(struct create_durable_handle_reconnect_v2));
- inc_rfc1001_len(&req->hdr,
- sizeof(struct create_durable_handle_reconnect_v2));
*num_iovec = num + 1;
return 0;
}
@@ -1649,10 +1614,9 @@ add_durable_context(struct kvec *iov, unsigned int *num_iovec,
iov[num].iov_len = sizeof(struct create_durable);
if (!req->CreateContextsOffset)
req->CreateContextsOffset =
- cpu_to_le32(sizeof(struct smb2_create_req) - 4 +
+ cpu_to_le32(sizeof(struct smb2_create_req) +
iov[1].iov_len);
le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable));
- inc_rfc1001_len(&req->hdr, sizeof(struct create_durable));
*num_iovec = num + 1;
return 0;
}
@@ -1723,6 +1687,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
__u32 file_attributes = 0;
char *dhc_buf = NULL, *lc_buf = NULL;
int flags = 0;
+ unsigned int total_len;
cifs_dbg(FYI, "create/open\n");
@@ -1731,7 +1696,8 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
else
return -EIO;
- rc = small_smb2_init(SMB2_CREATE, tcon, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_CREATE, tcon, (void **) &req, &total_len);
+
if (rc)
return rc;
@@ -1752,12 +1718,10 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK);
iov[0].iov_base = (char *)req;
- /* 4 for rfc1002 length field */
- iov[0].iov_len = get_rfc1002_length(req) + 4;
/* -1 since last byte is buf[0] which is sent below (path) */
- iov[0].iov_len--;
+ iov[0].iov_len = total_len - 1;
- req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req) - 4);
+ req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
/* [MS-SMB2] 2.2.13 NameOffset:
* If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
@@ -1770,7 +1734,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
if (tcon->share_flags & SHI1005_FLAGS_DFS) {
int name_len;
- req->hdr.sync_hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
+ req->sync_hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
&name_len,
tcon->treeName, path);
@@ -1797,8 +1761,6 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
iov[1].iov_len = uni_path_len;
iov[1].iov_base = path;
- /* -1 since last byte is buf[0] which was counted in smb2_buf_len */
- inc_rfc1001_len(req, uni_path_len - 1);
if (!server->oplocks)
*oplock = SMB2_OPLOCK_LEVEL_NONE;
@@ -1836,7 +1798,8 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
dhc_buf = iov[n_iov-1].iov_base;
}
- rc = SendReceive2(xid, ses, iov, n_iov, &resp_buftype, flags, &rsp_iov);
+ rc = smb2_send_recv(xid, ses, iov, n_iov, &resp_buftype, flags,
+ &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
@@ -1877,7 +1840,7 @@ creat_exit:
*/
int
SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
- u64 volatile_fid, u32 opcode, bool is_fsctl, bool use_ipc,
+ u64 volatile_fid, u32 opcode, bool is_fsctl,
char *in_data, u32 indatalen,
char **out_data, u32 *plen /* returned data len */)
{
@@ -1891,6 +1854,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
int n_iov;
int rc = 0;
int flags = 0;
+ unsigned int total_len;
cifs_dbg(FYI, "SMB2 IOCTL\n");
@@ -1909,20 +1873,10 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
if (!ses || !(ses->server))
return -EIO;
- rc = small_smb2_init(SMB2_IOCTL, tcon, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_IOCTL, tcon, (void **) &req, &total_len);
if (rc)
return rc;
- if (use_ipc) {
- if (ses->ipc_tid == 0) {
- cifs_small_buf_release(req);
- return -ENOTCONN;
- }
-
- cifs_dbg(FYI, "replacing tid 0x%x with IPC tid 0x%x\n",
- req->hdr.sync_hdr.TreeId, ses->ipc_tid);
- req->hdr.sync_hdr.TreeId = ses->ipc_tid;
- }
if (encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
@@ -1934,7 +1888,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
req->InputCount = cpu_to_le32(indatalen);
/* do not set InputOffset if no input data */
req->InputOffset =
- cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer) - 4);
+ cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer));
iov[1].iov_base = in_data;
iov[1].iov_len = indatalen;
n_iov = 2;
@@ -1969,21 +1923,20 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
* but if input data passed to ioctl, we do not
* want to double count this, so we do not send
* the dummy one byte of data in iovec[0] if sending
- * input data (in iovec[1]). We also must add 4 bytes
- * in first iovec to allow for rfc1002 length field.
+ * input data (in iovec[1]).
*/
if (indatalen) {
- iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
- inc_rfc1001_len(req, indatalen - 1);
+ iov[0].iov_len = total_len - 1;
} else
- iov[0].iov_len = get_rfc1002_length(req) + 4;
+ iov[0].iov_len = total_len;
/* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */
if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
- req->hdr.sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
+ req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
- rc = SendReceive2(xid, ses, iov, n_iov, &resp_buftype, flags, &rsp_iov);
+ rc = smb2_send_recv(xid, ses, iov, n_iov, &resp_buftype, flags,
+ &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base;
@@ -2052,7 +2005,6 @@ SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
FSCTL_SET_COMPRESSION, true /* is_fsctl */,
- false /* use_ipc */,
(char *)&fsctl_input /* data input */,
2 /* in data len */, &ret_data /* out data */, NULL);
@@ -2073,13 +2025,14 @@ SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
int resp_buftype;
int rc = 0;
int flags = 0;
+ unsigned int total_len;
cifs_dbg(FYI, "Close\n");
if (!ses || !(ses->server))
return -EIO;
- rc = small_smb2_init(SMB2_CLOSE, tcon, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_CLOSE, tcon, (void **) &req, &total_len);
if (rc)
return rc;
@@ -2090,10 +2043,9 @@ SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
req->VolatileFileId = volatile_fid;
iov[0].iov_base = (char *)req;
- /* 4 for rfc1002 length field */
- iov[0].iov_len = get_rfc1002_length(req) + 4;
+ iov[0].iov_len = total_len;
- rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+ rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_close_rsp *)rsp_iov.iov_base;
@@ -2180,13 +2132,15 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
int resp_buftype;
struct cifs_ses *ses = tcon->ses;
int flags = 0;
+ unsigned int total_len;
cifs_dbg(FYI, "Query Info\n");
if (!ses || !(ses->server))
return -EIO;
- rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, (void **) &req,
+ &total_len);
if (rc)
return rc;
@@ -2203,15 +2157,14 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
* We do not use the input buffer (do not send extra byte)
*/
req->InputBufferOffset = 0;
- inc_rfc1001_len(req, -1);
req->OutputBufferLength = cpu_to_le32(output_len);
iov[0].iov_base = (char *)req;
- /* 4 for rfc1002 length field */
- iov[0].iov_len = get_rfc1002_length(req) + 4;
+ /* 1 for Buffer */
+ iov[0].iov_len = total_len - 1;
- rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+ rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
@@ -2338,6 +2291,10 @@ void smb2_reconnect_server(struct work_struct *work)
tcon_exist = true;
}
}
+ if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) {
+ list_add_tail(&ses->tcon_ipc->rlist, &tmp_list);
+ tcon_exist = true;
+ }
}
/*
* Get the reference to server struct to be sure that the last call of
@@ -2376,6 +2333,8 @@ SMB2_echo(struct TCP_Server_Info *server)
struct kvec iov[2];
struct smb_rqst rqst = { .rq_iov = iov,
.rq_nvec = 2 };
+ unsigned int total_len;
+ __be32 rfc1002_marker;
cifs_dbg(FYI, "In echo request\n");
@@ -2385,17 +2344,17 @@ SMB2_echo(struct TCP_Server_Info *server)
return rc;
}
- rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
+ rc = smb2_plain_req_init(SMB2_ECHO, NULL, (void **)&req, &total_len);
if (rc)
return rc;
- req->hdr.sync_hdr.CreditRequest = cpu_to_le16(1);
+ req->sync_hdr.CreditRequest = cpu_to_le16(1);
- /* 4 for rfc1002 length field */
iov[0].iov_len = 4;
- iov[0].iov_base = (char *)req;
- iov[1].iov_len = get_rfc1002_length(req);
- iov[1].iov_base = (char *)req + 4;
+ rfc1002_marker = cpu_to_be32(total_len);
+ iov[0].iov_base = &rfc1002_marker;
+ iov[1].iov_len = total_len;
+ iov[1].iov_base = (char *)req;
rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL,
server, CIFS_ECHO_OP);
@@ -2417,13 +2376,14 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
int resp_buftype;
int rc = 0;
int flags = 0;
+ unsigned int total_len;
cifs_dbg(FYI, "Flush\n");
if (!ses || !(ses->server))
return -EIO;
- rc = small_smb2_init(SMB2_FLUSH, tcon, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_FLUSH, tcon, (void **) &req, &total_len);
if (rc)
return rc;
@@ -2434,10 +2394,9 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
req->VolatileFileId = volatile_fid;
iov[0].iov_base = (char *)req;
- /* 4 for rfc1002 length field */
- iov[0].iov_len = get_rfc1002_length(req) + 4;
+ iov[0].iov_len = total_len;
- rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+ rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
if (rc != 0)
@@ -2453,18 +2412,21 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
*/
static int
smb2_new_read_req(void **buf, unsigned int *total_len,
- struct cifs_io_parms *io_parms, unsigned int remaining_bytes,
- int request_type)
+ struct cifs_io_parms *io_parms, struct cifs_readdata *rdata,
+ unsigned int remaining_bytes, int request_type)
{
int rc = -EACCES;
struct smb2_read_plain_req *req = NULL;
struct smb2_sync_hdr *shdr;
+ struct TCP_Server_Info *server;
rc = smb2_plain_req_init(SMB2_READ, io_parms->tcon, (void **) &req,
total_len);
if (rc)
return rc;
- if (io_parms->tcon->ses->server == NULL)
+
+ server = io_parms->tcon->ses->server;
+ if (server == NULL)
return -ECONNABORTED;
shdr = &req->sync_hdr;
@@ -2478,7 +2440,40 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
req->MinimumCount = 0;
req->Length = cpu_to_le32(io_parms->length);
req->Offset = cpu_to_le64(io_parms->offset);
-
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ /*
+ * If we want to do a RDMA write, fill in and append
+ * smbd_buffer_descriptor_v1 to the end of read request
+ */
+ if (server->rdma && rdata &&
+ rdata->bytes >= server->smbd_conn->rdma_readwrite_threshold) {
+
+ struct smbd_buffer_descriptor_v1 *v1;
+ bool need_invalidate =
+ io_parms->tcon->ses->server->dialect == SMB30_PROT_ID;
+
+ rdata->mr = smbd_register_mr(
+ server->smbd_conn, rdata->pages,
+ rdata->nr_pages, rdata->tailsz,
+ true, need_invalidate);
+ if (!rdata->mr)
+ return -ENOBUFS;
+
+ req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
+ if (need_invalidate)
+ req->Channel = SMB2_CHANNEL_RDMA_V1;
+ req->ReadChannelInfoOffset =
+ cpu_to_le16(offsetof(struct smb2_read_plain_req, Buffer));
+ req->ReadChannelInfoLength =
+ cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1));
+ v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
+ v1->offset = cpu_to_le64(rdata->mr->mr->iova);
+ v1->token = cpu_to_le32(rdata->mr->mr->rkey);
+ v1->length = cpu_to_le32(rdata->mr->mr->length);
+
+ *total_len += sizeof(*v1) - 1;
+ }
+#endif
if (request_type & CHAINED_REQUEST) {
if (!(request_type & END_OF_CHAIN)) {
/* next 8-byte aligned request */
@@ -2557,7 +2552,17 @@ smb2_readv_callback(struct mid_q_entry *mid)
if (rdata->result != -ENODATA)
rdata->result = -EIO;
}
-
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ /*
+ * If this rdata has a memmory registered, the MR can be freed
+ * MR needs to be freed as soon as I/O finishes to prevent deadlock
+ * because they have limited number and are used for future I/Os
+ */
+ if (rdata->mr) {
+ smbd_deregister_mr(rdata->mr);
+ rdata->mr = NULL;
+ }
+#endif
if (rdata->result)
cifs_stats_fail_inc(tcon, SMB2_READ_HE);
@@ -2592,7 +2597,8 @@ smb2_async_readv(struct cifs_readdata *rdata)
server = io_parms.tcon->ses->server;
- rc = smb2_new_read_req((void **) &buf, &total_len, &io_parms, 0, 0);
+ rc = smb2_new_read_req(
+ (void **) &buf, &total_len, &io_parms, rdata, 0, 0);
if (rc) {
if (rc == -EAGAIN && rdata->credits) {
/* credits was reset by reconnect */
@@ -2650,31 +2656,24 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
struct smb2_read_plain_req *req = NULL;
struct smb2_read_rsp *rsp = NULL;
struct smb2_sync_hdr *shdr;
- struct kvec iov[2];
+ struct kvec iov[1];
struct kvec rsp_iov;
unsigned int total_len;
- __be32 req_len;
- struct smb_rqst rqst = { .rq_iov = iov,
- .rq_nvec = 2 };
int flags = CIFS_LOG_ERROR;
struct cifs_ses *ses = io_parms->tcon->ses;
*nbytes = 0;
- rc = smb2_new_read_req((void **)&req, &total_len, io_parms, 0, 0);
+ rc = smb2_new_read_req((void **)&req, &total_len, io_parms, NULL, 0, 0);
if (rc)
return rc;
if (encryption_required(io_parms->tcon))
flags |= CIFS_TRANSFORM_REQ;
- req_len = cpu_to_be32(total_len);
-
- iov[0].iov_base = &req_len;
- iov[0].iov_len = sizeof(__be32);
- iov[1].iov_base = req;
- iov[1].iov_len = total_len;
+ iov[0].iov_base = (char *)req;
+ iov[0].iov_len = total_len;
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
@@ -2755,7 +2754,19 @@ smb2_writev_callback(struct mid_q_entry *mid)
wdata->result = -EIO;
break;
}
-
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ /*
+ * If this wdata has a memory registered, the MR can be freed
+ * The number of MRs available is limited, it's important to recover
+ * used MR as soon as I/O is finished. Hold MR longer in the later
+ * I/O process can possibly result in I/O deadlock due to lack of MR
+ * to send request on I/O retry
+ */
+ if (wdata->mr) {
+ smbd_deregister_mr(wdata->mr);
+ wdata->mr = NULL;
+ }
+#endif
if (wdata->result)
cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
@@ -2776,8 +2787,10 @@ smb2_async_writev(struct cifs_writedata *wdata,
struct TCP_Server_Info *server = tcon->ses->server;
struct kvec iov[2];
struct smb_rqst rqst = { };
+ unsigned int total_len;
+ __be32 rfc1002_marker;
- rc = small_smb2_init(SMB2_WRITE, tcon, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_WRITE, tcon, (void **) &req, &total_len);
if (rc) {
if (rc == -EAGAIN && wdata->credits) {
/* credits was reset by reconnect */
@@ -2793,7 +2806,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
if (encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
- shdr = get_sync_hdr(req);
+ shdr = (struct smb2_sync_hdr *)req;
shdr->ProcessId = cpu_to_le32(wdata->cfile->pid);
req->PersistentFileId = wdata->cfile->fid.persistent_fid;
@@ -2802,16 +2815,51 @@ smb2_async_writev(struct cifs_writedata *wdata,
req->WriteChannelInfoLength = 0;
req->Channel = 0;
req->Offset = cpu_to_le64(wdata->offset);
- /* 4 for rfc1002 length field */
req->DataOffset = cpu_to_le16(
- offsetof(struct smb2_write_req, Buffer) - 4);
+ offsetof(struct smb2_write_req, Buffer));
req->RemainingBytes = 0;
-
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ /*
+ * If we want to do a server RDMA read, fill in and append
+ * smbd_buffer_descriptor_v1 to the end of write request
+ */
+ if (server->rdma && wdata->bytes >=
+ server->smbd_conn->rdma_readwrite_threshold) {
+
+ struct smbd_buffer_descriptor_v1 *v1;
+ bool need_invalidate = server->dialect == SMB30_PROT_ID;
+
+ wdata->mr = smbd_register_mr(
+ server->smbd_conn, wdata->pages,
+ wdata->nr_pages, wdata->tailsz,
+ false, need_invalidate);
+ if (!wdata->mr) {
+ rc = -ENOBUFS;
+ goto async_writev_out;
+ }
+ req->Length = 0;
+ req->DataOffset = 0;
+ req->RemainingBytes =
+ cpu_to_le32((wdata->nr_pages-1)*PAGE_SIZE + wdata->tailsz);
+ req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
+ if (need_invalidate)
+ req->Channel = SMB2_CHANNEL_RDMA_V1;
+ req->WriteChannelInfoOffset =
+ cpu_to_le16(offsetof(struct smb2_write_req, Buffer));
+ req->WriteChannelInfoLength =
+ cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1));
+ v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
+ v1->offset = cpu_to_le64(wdata->mr->mr->iova);
+ v1->token = cpu_to_le32(wdata->mr->mr->rkey);
+ v1->length = cpu_to_le32(wdata->mr->mr->length);
+ }
+#endif
/* 4 for rfc1002 length field and 1 for Buffer */
iov[0].iov_len = 4;
- iov[0].iov_base = req;
- iov[1].iov_len = get_rfc1002_length(req) - 1;
- iov[1].iov_base = (char *)req + 4;
+ rfc1002_marker = cpu_to_be32(total_len - 1 + wdata->bytes);
+ iov[0].iov_base = &rfc1002_marker;
+ iov[1].iov_len = total_len - 1;
+ iov[1].iov_base = (char *)req;
rqst.rq_iov = iov;
rqst.rq_nvec = 2;
@@ -2819,13 +2867,22 @@ smb2_async_writev(struct cifs_writedata *wdata,
rqst.rq_npages = wdata->nr_pages;
rqst.rq_pagesz = wdata->pagesz;
rqst.rq_tailsz = wdata->tailsz;
-
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ if (wdata->mr) {
+ iov[1].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
+ rqst.rq_npages = 0;
+ }
+#endif
cifs_dbg(FYI, "async write at %llu %u bytes\n",
wdata->offset, wdata->bytes);
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ /* For RDMA read, I/O size is in RemainingBytes not in Length */
+ if (!wdata->mr)
+ req->Length = cpu_to_le32(wdata->bytes);
+#else
req->Length = cpu_to_le32(wdata->bytes);
-
- inc_rfc1001_len(&req->hdr, wdata->bytes - 1 /* Buffer */);
+#endif
if (wdata->credits) {
shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
@@ -2869,13 +2926,15 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
int resp_buftype;
struct kvec rsp_iov;
int flags = 0;
+ unsigned int total_len;
*nbytes = 0;
if (n_vec < 1)
return rc;
- rc = small_smb2_init(SMB2_WRITE, io_parms->tcon, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_WRITE, io_parms->tcon, (void **) &req,
+ &total_len);
if (rc)
return rc;
@@ -2885,7 +2944,7 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
if (encryption_required(io_parms->tcon))
flags |= CIFS_TRANSFORM_REQ;
- req->hdr.sync_hdr.ProcessId = cpu_to_le32(io_parms->pid);
+ req->sync_hdr.ProcessId = cpu_to_le32(io_parms->pid);
req->PersistentFileId = io_parms->persistent_fid;
req->VolatileFileId = io_parms->volatile_fid;
@@ -2894,20 +2953,16 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
req->Channel = 0;
req->Length = cpu_to_le32(io_parms->length);
req->Offset = cpu_to_le64(io_parms->offset);
- /* 4 for rfc1002 length field */
req->DataOffset = cpu_to_le16(
- offsetof(struct smb2_write_req, Buffer) - 4);
+ offsetof(struct smb2_write_req, Buffer));
req->RemainingBytes = 0;
iov[0].iov_base = (char *)req;
- /* 4 for rfc1002 length field and 1 for Buffer */
- iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
+ /* 1 for Buffer */
+ iov[0].iov_len = total_len - 1;
- /* length of entire message including data to be written */
- inc_rfc1001_len(req, io_parms->length - 1 /* Buffer */);
-
- rc = SendReceive2(xid, io_parms->tcon->ses, iov, n_vec + 1,
- &resp_buftype, flags, &rsp_iov);
+ rc = smb2_send_recv(xid, io_parms->tcon->ses, iov, n_vec + 1,
+ &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
@@ -2984,13 +3039,15 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
unsigned int output_size = CIFSMaxBufSize;
size_t info_buf_size;
int flags = 0;
+ unsigned int total_len;
if (ses && (ses->server))
server = ses->server;
else
return -EIO;
- rc = small_smb2_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req,
+ &total_len);
if (rc)
return rc;
@@ -3022,7 +3079,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
memcpy(bufptr, &asteriks, len);
req->FileNameOffset =
- cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1 - 4);
+ cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1);
req->FileNameLength = cpu_to_le16(len);
/*
* BB could be 30 bytes or so longer if we used SMB2 specific
@@ -3033,15 +3090,13 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
req->OutputBufferLength = cpu_to_le32(output_size);
iov[0].iov_base = (char *)req;
- /* 4 for RFC1001 length and 1 for Buffer */
- iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
+ /* 1 for Buffer */
+ iov[0].iov_len = total_len - 1;
iov[1].iov_base = (char *)(req->Buffer);
iov[1].iov_len = len;
- inc_rfc1001_len(req, len - 1 /* Buffer */);
-
- rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov);
+ rc = smb2_send_recv(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
@@ -3110,6 +3165,7 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
unsigned int i;
struct cifs_ses *ses = tcon->ses;
int flags = 0;
+ unsigned int total_len;
if (!ses || !(ses->server))
return -EIO;
@@ -3121,7 +3177,7 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
if (!iov)
return -ENOMEM;
- rc = small_smb2_init(SMB2_SET_INFO, tcon, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_SET_INFO, tcon, (void **) &req, &total_len);
if (rc) {
kfree(iov);
return rc;
@@ -3130,7 +3186,7 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
if (encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
- req->hdr.sync_hdr.ProcessId = cpu_to_le32(pid);
+ req->sync_hdr.ProcessId = cpu_to_le32(pid);
req->InfoType = info_type;
req->FileInfoClass = info_class;
@@ -3138,27 +3194,25 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
req->VolatileFileId = volatile_fid;
req->AdditionalInformation = cpu_to_le32(additional_info);
- /* 4 for RFC1001 length and 1 for Buffer */
req->BufferOffset =
- cpu_to_le16(sizeof(struct smb2_set_info_req) - 1 - 4);
+ cpu_to_le16(sizeof(struct smb2_set_info_req) - 1);
req->BufferLength = cpu_to_le32(*size);
- inc_rfc1001_len(req, *size - 1 /* Buffer */);
-
memcpy(req->Buffer, *data, *size);
+ total_len += *size;
iov[0].iov_base = (char *)req;
- /* 4 for RFC1001 length */
- iov[0].iov_len = get_rfc1002_length(req) + 4;
+ /* 1 for Buffer */
+ iov[0].iov_len = total_len - 1;
for (i = 1; i < num; i++) {
- inc_rfc1001_len(req, size[i]);
le32_add_cpu(&req->BufferLength, size[i]);
iov[i].iov_base = (char *)data[i];
iov[i].iov_len = size[i];
}
- rc = SendReceive2(xid, ses, iov, num, &resp_buftype, flags, &rsp_iov);
+ rc = smb2_send_recv(xid, ses, iov, num, &resp_buftype, flags,
+ &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base;
@@ -3310,11 +3364,17 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
__u8 oplock_level)
{
int rc;
- struct smb2_oplock_break *req = NULL;
+ struct smb2_oplock_break_req *req = NULL;
+ struct cifs_ses *ses = tcon->ses;
int flags = CIFS_OBREAK_OP;
+ unsigned int total_len;
+ struct kvec iov[1];
+ struct kvec rsp_iov;
+ int resp_buf_type;
cifs_dbg(FYI, "SMB2_oplock_break\n");
- rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req,
+ &total_len);
if (rc)
return rc;
@@ -3324,9 +3384,14 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
req->VolatileFid = volatile_fid;
req->PersistentFid = persistent_fid;
req->OplockLevel = oplock_level;
- req->hdr.sync_hdr.CreditRequest = cpu_to_le16(1);
+ req->sync_hdr.CreditRequest = cpu_to_le16(1);
- rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, flags);
+ flags |= CIFS_NO_RESP;
+
+ iov[0].iov_base = (char *)req;
+ iov[0].iov_len = total_len;
+
+ rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
if (rc) {
@@ -3355,13 +3420,15 @@ build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level,
{
int rc;
struct smb2_query_info_req *req;
+ unsigned int total_len;
cifs_dbg(FYI, "Query FSInfo level %d\n", level);
if ((tcon->ses == NULL) || (tcon->ses->server == NULL))
return -EIO;
- rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, (void **) &req,
+ &total_len);
if (rc)
return rc;
@@ -3369,15 +3436,14 @@ build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level,
req->FileInfoClass = level;
req->PersistentFileId = persistent_fid;
req->VolatileFileId = volatile_fid;
- /* 4 for rfc1002 length field and 1 for pad */
+ /* 1 for pad */
req->InputBufferOffset =
- cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
+ cpu_to_le16(sizeof(struct smb2_query_info_req) - 1);
req->OutputBufferLength = cpu_to_le32(
outbuf_len + sizeof(struct smb2_query_info_rsp) - 1 - 4);
iov->iov_base = (char *)req;
- /* 4 for rfc1002 length field */
- iov->iov_len = get_rfc1002_length(req) + 4;
+ iov->iov_len = total_len;
return 0;
}
@@ -3403,7 +3469,7 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
if (encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
- rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov);
+ rc = smb2_send_recv(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(iov.iov_base);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
@@ -3459,7 +3525,7 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
if (encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
- rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov);
+ rc = smb2_send_recv(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(iov.iov_base);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
@@ -3505,34 +3571,33 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
int resp_buf_type;
unsigned int count;
int flags = CIFS_NO_RESP;
+ unsigned int total_len;
cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock);
- rc = small_smb2_init(SMB2_LOCK, tcon, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_LOCK, tcon, (void **) &req, &total_len);
if (rc)
return rc;
if (encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
- req->hdr.sync_hdr.ProcessId = cpu_to_le32(pid);
+ req->sync_hdr.ProcessId = cpu_to_le32(pid);
req->LockCount = cpu_to_le16(num_lock);
req->PersistentFileId = persist_fid;
req->VolatileFileId = volatile_fid;
count = num_lock * sizeof(struct smb2_lock_element);
- inc_rfc1001_len(req, count - sizeof(struct smb2_lock_element));
iov[0].iov_base = (char *)req;
- /* 4 for rfc1002 length field and count for all locks */
- iov[0].iov_len = get_rfc1002_length(req) + 4 - count;
+ iov[0].iov_len = total_len - sizeof(struct smb2_lock_element);
iov[1].iov_base = (char *)buf;
iov[1].iov_len = count;
cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
- rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type, flags,
- &rsp_iov);
+ rc = smb2_send_recv(xid, tcon->ses, iov, 2, &resp_buf_type, flags,
+ &rsp_iov);
cifs_small_buf_release(req);
if (rc) {
cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc);
@@ -3565,24 +3630,35 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
{
int rc;
struct smb2_lease_ack *req = NULL;
+ struct cifs_ses *ses = tcon->ses;
int flags = CIFS_OBREAK_OP;
+ unsigned int total_len;
+ struct kvec iov[1];
+ struct kvec rsp_iov;
+ int resp_buf_type;
cifs_dbg(FYI, "SMB2_lease_break\n");
- rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req);
+ rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req,
+ &total_len);
if (rc)
return rc;
if (encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
- req->hdr.sync_hdr.CreditRequest = cpu_to_le16(1);
+ req->sync_hdr.CreditRequest = cpu_to_le16(1);
req->StructureSize = cpu_to_le16(36);
- inc_rfc1001_len(req, 12);
+ total_len += 12;
memcpy(req->LeaseKey, lease_key, 16);
req->LeaseState = lease_state;
- rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, flags);
+ flags |= CIFS_NO_RESP;
+
+ iov[0].iov_base = (char *)req;
+ iov[0].iov_len = total_len;
+
+ rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
if (rc) {
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index c2ec934be968..6eb9f9691ed4 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -195,7 +195,7 @@ struct smb2_symlink_err_rsp {
#define SMB2_CLIENT_GUID_SIZE 16
struct smb2_negotiate_req {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 36 */
__le16 DialectCount;
__le16 SecurityMode;
@@ -282,7 +282,7 @@ struct smb2_negotiate_rsp {
#define SMB2_SESSION_REQ_FLAG_ENCRYPT_DATA 0x04
struct smb2_sess_setup_req {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 25 */
__u8 Flags;
__u8 SecurityMode;
@@ -308,7 +308,7 @@ struct smb2_sess_setup_rsp {
} __packed;
struct smb2_logoff_req {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 4 */
__le16 Reserved;
} __packed;
@@ -323,7 +323,7 @@ struct smb2_logoff_rsp {
#define SMB2_SHAREFLAG_CLUSTER_RECONNECT 0x0001
struct smb2_tree_connect_req {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 9 */
__le16 Reserved; /* Flags in SMB3.1.1 */
__le16 PathOffset;
@@ -375,7 +375,7 @@ struct smb2_tree_connect_rsp {
#define SMB2_SHARE_CAP_ASYMMETRIC cpu_to_le32(0x00000080) /* 3.02 */
struct smb2_tree_disconnect_req {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 4 */
__le16 Reserved;
} __packed;
@@ -496,7 +496,7 @@ struct smb2_tree_disconnect_rsp {
#define SVHDX_OPEN_DEVICE_CONTEXT 0x83CE6F1AD851E0986E34401CC9BCFCE9
struct smb2_create_req {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 57 */
__u8 SecurityFlags;
__u8 RequestedOplockLevel;
@@ -753,7 +753,7 @@ struct duplicate_extents_to_file {
} __packed;
struct smb2_ioctl_req {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 57 */
__u16 Reserved;
__le32 CtlCode;
@@ -789,7 +789,7 @@ struct smb2_ioctl_rsp {
/* Currently defined values for close flags */
#define SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB cpu_to_le16(0x0001)
struct smb2_close_req {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 24 */
__le16 Flags;
__le32 Reserved;
@@ -812,7 +812,7 @@ struct smb2_close_rsp {
} __packed;
struct smb2_flush_req {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 24 */
__le16 Reserved1;
__le32 Reserved2;
@@ -830,9 +830,9 @@ struct smb2_flush_rsp {
#define SMB2_READFLAG_READ_UNBUFFERED 0x01
/* Channel field for read and write: exactly one of following flags can be set*/
-#define SMB2_CHANNEL_NONE 0x00000000
-#define SMB2_CHANNEL_RDMA_V1 0x00000001 /* SMB3 or later */
-#define SMB2_CHANNEL_RDMA_V1_INVALIDATE 0x00000002 /* SMB3.02 or later */
+#define SMB2_CHANNEL_NONE cpu_to_le32(0x00000000)
+#define SMB2_CHANNEL_RDMA_V1 cpu_to_le32(0x00000001) /* SMB3 or later */
+#define SMB2_CHANNEL_RDMA_V1_INVALIDATE cpu_to_le32(0x00000002) /* >= SMB3.02 */
/* SMB2 read request without RFC1001 length at the beginning */
struct smb2_read_plain_req {
@@ -847,8 +847,8 @@ struct smb2_read_plain_req {
__le32 MinimumCount;
__le32 Channel; /* MBZ except for SMB3 or later */
__le32 RemainingBytes;
- __le16 ReadChannelInfoOffset; /* Reserved MBZ */
- __le16 ReadChannelInfoLength; /* Reserved MBZ */
+ __le16 ReadChannelInfoOffset;
+ __le16 ReadChannelInfoLength;
__u8 Buffer[1];
} __packed;
@@ -868,7 +868,7 @@ struct smb2_read_rsp {
#define SMB2_WRITEFLAG_WRITE_UNBUFFERED 0x00000002 /* SMB3.02 or later */
struct smb2_write_req {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 49 */
__le16 DataOffset; /* offset from start of SMB2 header to write data */
__le32 Length;
@@ -877,8 +877,8 @@ struct smb2_write_req {
__u64 VolatileFileId; /* opaque endianness */
__le32 Channel; /* Reserved MBZ */
__le32 RemainingBytes;
- __le16 WriteChannelInfoOffset; /* Reserved MBZ */
- __le16 WriteChannelInfoLength; /* Reserved MBZ */
+ __le16 WriteChannelInfoOffset;
+ __le16 WriteChannelInfoLength;
__le32 Flags;
__u8 Buffer[1];
} __packed;
@@ -907,7 +907,7 @@ struct smb2_lock_element {
} __packed;
struct smb2_lock_req {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 48 */
__le16 LockCount;
__le32 Reserved;
@@ -924,7 +924,7 @@ struct smb2_lock_rsp {
} __packed;
struct smb2_echo_req {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 4 */
__u16 Reserved;
} __packed;
@@ -942,7 +942,7 @@ struct smb2_echo_rsp {
#define SMB2_REOPEN 0x10
struct smb2_query_directory_req {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 33 */
__u8 FileInformationClass;
__u8 Flags;
@@ -989,7 +989,7 @@ struct smb2_query_directory_rsp {
#define SL_INDEX_SPECIFIED 0x00000004
struct smb2_query_info_req {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 41 */
__u8 InfoType;
__u8 FileInfoClass;
@@ -1013,7 +1013,7 @@ struct smb2_query_info_rsp {
} __packed;
struct smb2_set_info_req {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 33 */
__u8 InfoType;
__u8 FileInfoClass;
@@ -1031,7 +1031,19 @@ struct smb2_set_info_rsp {
__le16 StructureSize; /* Must be 2 */
} __packed;
-struct smb2_oplock_break {
+/* oplock break without an rfc1002 header */
+struct smb2_oplock_break_req {
+ struct smb2_sync_hdr sync_hdr;
+ __le16 StructureSize; /* Must be 24 */
+ __u8 OplockLevel;
+ __u8 Reserved;
+ __le32 Reserved2;
+ __u64 PersistentFid;
+ __u64 VolatileFid;
+} __packed;
+
+/* oplock break with an rfc1002 header */
+struct smb2_oplock_break_rsp {
struct smb2_hdr hdr;
__le16 StructureSize; /* Must be 24 */
__u8 OplockLevel;
@@ -1057,7 +1069,7 @@ struct smb2_lease_break {
} __packed;
struct smb2_lease_ack {
- struct smb2_hdr hdr;
+ struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 36 */
__le16 Reserved;
__le32 Flags;
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index e9ab5227e7a8..05287b01f596 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -125,8 +125,7 @@ extern int SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms,
struct smb2_err_rsp **err_buf);
extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, u32 opcode,
- bool is_fsctl, bool use_ipc,
- char *in_data, u32 indatalen,
+ bool is_fsctl, char *in_data, u32 indatalen,
char **out_data, u32 *plen /* returned data len */);
extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id);
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
new file mode 100644
index 000000000000..5130492847eb
--- /dev/null
+++ b/fs/cifs/smbdirect.c
@@ -0,0 +1,2610 @@
+/*
+ * Copyright (C) 2017, Microsoft Corporation.
+ *
+ * Author(s): Long Li <longli@microsoft.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/highmem.h>
+#include "smbdirect.h"
+#include "cifs_debug.h"
+
+static struct smbd_response *get_empty_queue_buffer(
+ struct smbd_connection *info);
+static struct smbd_response *get_receive_buffer(
+ struct smbd_connection *info);
+static void put_receive_buffer(
+ struct smbd_connection *info,
+ struct smbd_response *response);
+static int allocate_receive_buffers(struct smbd_connection *info, int num_buf);
+static void destroy_receive_buffers(struct smbd_connection *info);
+
+static void put_empty_packet(
+ struct smbd_connection *info, struct smbd_response *response);
+static void enqueue_reassembly(
+ struct smbd_connection *info,
+ struct smbd_response *response, int data_length);
+static struct smbd_response *_get_first_reassembly(
+ struct smbd_connection *info);
+
+static int smbd_post_recv(
+ struct smbd_connection *info,
+ struct smbd_response *response);
+
+static int smbd_post_send_empty(struct smbd_connection *info);
+static int smbd_post_send_data(
+ struct smbd_connection *info,
+ struct kvec *iov, int n_vec, int remaining_data_length);
+static int smbd_post_send_page(struct smbd_connection *info,
+ struct page *page, unsigned long offset,
+ size_t size, int remaining_data_length);
+
+static void destroy_mr_list(struct smbd_connection *info);
+static int allocate_mr_list(struct smbd_connection *info);
+
+/* SMBD version number */
+#define SMBD_V1 0x0100
+
+/* Port numbers for SMBD transport */
+#define SMB_PORT 445
+#define SMBD_PORT 5445
+
+/* Address lookup and resolve timeout in ms */
+#define RDMA_RESOLVE_TIMEOUT 5000
+
+/* SMBD negotiation timeout in seconds */
+#define SMBD_NEGOTIATE_TIMEOUT 120
+
+/* SMBD minimum receive size and fragmented sized defined in [MS-SMBD] */
+#define SMBD_MIN_RECEIVE_SIZE 128
+#define SMBD_MIN_FRAGMENTED_SIZE 131072
+
+/*
+ * Default maximum number of RDMA read/write outstanding on this connection
+ * This value is possibly decreased during QP creation on hardware limit
+ */
+#define SMBD_CM_RESPONDER_RESOURCES 32
+
+/* Maximum number of retries on data transfer operations */
+#define SMBD_CM_RETRY 6
+/* No need to retry on Receiver Not Ready since SMBD manages credits */
+#define SMBD_CM_RNR_RETRY 0
+
+/*
+ * User configurable initial values per SMBD transport connection
+ * as defined in [MS-SMBD] 3.1.1.1
+ * Those may change after a SMBD negotiation
+ */
+/* The local peer's maximum number of credits to grant to the peer */
+int smbd_receive_credit_max = 255;
+
+/* The remote peer's credit request of local peer */
+int smbd_send_credit_target = 255;
+
+/* The maximum single message size can be sent to remote peer */
+int smbd_max_send_size = 1364;
+
+/* The maximum fragmented upper-layer payload receive size supported */
+int smbd_max_fragmented_recv_size = 1024 * 1024;
+
+/* The maximum single-message size which can be received */
+int smbd_max_receive_size = 8192;
+
+/* The timeout to initiate send of a keepalive message on idle */
+int smbd_keep_alive_interval = 120;
+
+/*
+ * User configurable initial values for RDMA transport
+ * The actual values used may be lower and are limited to hardware capabilities
+ */
+/* Default maximum number of SGEs in a RDMA write/read */
+int smbd_max_frmr_depth = 2048;
+
+/* If payload is less than this byte, use RDMA send/recv not read/write */
+int rdma_readwrite_threshold = 4096;
+
+/* Transport logging functions
+ * Logging are defined as classes. They can be OR'ed to define the actual
+ * logging level via module parameter smbd_logging_class
+ * e.g. cifs.smbd_logging_class=0xa0 will log all log_rdma_recv() and
+ * log_rdma_event()
+ */
+#define LOG_OUTGOING 0x1
+#define LOG_INCOMING 0x2
+#define LOG_READ 0x4
+#define LOG_WRITE 0x8
+#define LOG_RDMA_SEND 0x10
+#define LOG_RDMA_RECV 0x20
+#define LOG_KEEP_ALIVE 0x40
+#define LOG_RDMA_EVENT 0x80
+#define LOG_RDMA_MR 0x100
+static unsigned int smbd_logging_class;
+module_param(smbd_logging_class, uint, 0644);
+MODULE_PARM_DESC(smbd_logging_class,
+ "Logging class for SMBD transport 0x0 to 0x100");
+
+#define ERR 0x0
+#define INFO 0x1
+static unsigned int smbd_logging_level = ERR;
+module_param(smbd_logging_level, uint, 0644);
+MODULE_PARM_DESC(smbd_logging_level,
+ "Logging level for SMBD transport, 0 (default): error, 1: info");
+
+#define log_rdma(level, class, fmt, args...) \
+do { \
+ if (level <= smbd_logging_level || class & smbd_logging_class) \
+ cifs_dbg(VFS, "%s:%d " fmt, __func__, __LINE__, ##args);\
+} while (0)
+
+#define log_outgoing(level, fmt, args...) \
+ log_rdma(level, LOG_OUTGOING, fmt, ##args)
+#define log_incoming(level, fmt, args...) \
+ log_rdma(level, LOG_INCOMING, fmt, ##args)
+#define log_read(level, fmt, args...) log_rdma(level, LOG_READ, fmt, ##args)
+#define log_write(level, fmt, args...) log_rdma(level, LOG_WRITE, fmt, ##args)
+#define log_rdma_send(level, fmt, args...) \
+ log_rdma(level, LOG_RDMA_SEND, fmt, ##args)
+#define log_rdma_recv(level, fmt, args...) \
+ log_rdma(level, LOG_RDMA_RECV, fmt, ##args)
+#define log_keep_alive(level, fmt, args...) \
+ log_rdma(level, LOG_KEEP_ALIVE, fmt, ##args)
+#define log_rdma_event(level, fmt, args...) \
+ log_rdma(level, LOG_RDMA_EVENT, fmt, ##args)
+#define log_rdma_mr(level, fmt, args...) \
+ log_rdma(level, LOG_RDMA_MR, fmt, ##args)
+
+/*
+ * Destroy the transport and related RDMA and memory resources
+ * Need to go through all the pending counters and make sure on one is using
+ * the transport while it is destroyed
+ */
+static void smbd_destroy_rdma_work(struct work_struct *work)
+{
+ struct smbd_response *response;
+ struct smbd_connection *info =
+ container_of(work, struct smbd_connection, destroy_work);
+ unsigned long flags;
+
+ log_rdma_event(INFO, "destroying qp\n");
+ ib_drain_qp(info->id->qp);
+ rdma_destroy_qp(info->id);
+
+ /* Unblock all I/O waiting on the send queue */
+ wake_up_interruptible_all(&info->wait_send_queue);
+
+ log_rdma_event(INFO, "cancelling idle timer\n");
+ cancel_delayed_work_sync(&info->idle_timer_work);
+ log_rdma_event(INFO, "cancelling send immediate work\n");
+ cancel_delayed_work_sync(&info->send_immediate_work);
+
+ log_rdma_event(INFO, "wait for all send to finish\n");
+ wait_event(info->wait_smbd_send_pending,
+ info->smbd_send_pending == 0);
+
+ log_rdma_event(INFO, "wait for all recv to finish\n");
+ wake_up_interruptible(&info->wait_reassembly_queue);
+ wait_event(info->wait_smbd_recv_pending,
+ info->smbd_recv_pending == 0);
+
+ log_rdma_event(INFO, "wait for all send posted to IB to finish\n");
+ wait_event(info->wait_send_pending,
+ atomic_read(&info->send_pending) == 0);
+ wait_event(info->wait_send_payload_pending,
+ atomic_read(&info->send_payload_pending) == 0);
+
+ log_rdma_event(INFO, "freeing mr list\n");
+ wake_up_interruptible_all(&info->wait_mr);
+ wait_event(info->wait_for_mr_cleanup,
+ atomic_read(&info->mr_used_count) == 0);
+ destroy_mr_list(info);
+
+ /* It's not posssible for upper layer to get to reassembly */
+ log_rdma_event(INFO, "drain the reassembly queue\n");
+ do {
+ spin_lock_irqsave(&info->reassembly_queue_lock, flags);
+ response = _get_first_reassembly(info);
+ if (response) {
+ list_del(&response->list);
+ spin_unlock_irqrestore(
+ &info->reassembly_queue_lock, flags);
+ put_receive_buffer(info, response);
+ }
+ } while (response);
+ spin_unlock_irqrestore(&info->reassembly_queue_lock, flags);
+ info->reassembly_data_length = 0;
+
+ log_rdma_event(INFO, "free receive buffers\n");
+ wait_event(info->wait_receive_queues,
+ info->count_receive_queue + info->count_empty_packet_queue
+ == info->receive_credit_max);
+ destroy_receive_buffers(info);
+
+ ib_free_cq(info->send_cq);
+ ib_free_cq(info->recv_cq);
+ ib_dealloc_pd(info->pd);
+ rdma_destroy_id(info->id);
+
+ /* free mempools */
+ mempool_destroy(info->request_mempool);
+ kmem_cache_destroy(info->request_cache);
+
+ mempool_destroy(info->response_mempool);
+ kmem_cache_destroy(info->response_cache);
+
+ info->transport_status = SMBD_DESTROYED;
+ wake_up_all(&info->wait_destroy);
+}
+
+static int smbd_process_disconnected(struct smbd_connection *info)
+{
+ schedule_work(&info->destroy_work);
+ return 0;
+}
+
+static void smbd_disconnect_rdma_work(struct work_struct *work)
+{
+ struct smbd_connection *info =
+ container_of(work, struct smbd_connection, disconnect_work);
+
+ if (info->transport_status == SMBD_CONNECTED) {
+ info->transport_status = SMBD_DISCONNECTING;
+ rdma_disconnect(info->id);
+ }
+}
+
+static void smbd_disconnect_rdma_connection(struct smbd_connection *info)
+{
+ queue_work(info->workqueue, &info->disconnect_work);
+}
+
+/* Upcall from RDMA CM */
+static int smbd_conn_upcall(
+ struct rdma_cm_id *id, struct rdma_cm_event *event)
+{
+ struct smbd_connection *info = id->context;
+
+ log_rdma_event(INFO, "event=%d status=%d\n",
+ event->event, event->status);
+
+ switch (event->event) {
+ case RDMA_CM_EVENT_ADDR_RESOLVED:
+ case RDMA_CM_EVENT_ROUTE_RESOLVED:
+ info->ri_rc = 0;
+ complete(&info->ri_done);
+ break;
+
+ case RDMA_CM_EVENT_ADDR_ERROR:
+ info->ri_rc = -EHOSTUNREACH;
+ complete(&info->ri_done);
+ break;
+
+ case RDMA_CM_EVENT_ROUTE_ERROR:
+ info->ri_rc = -ENETUNREACH;
+ complete(&info->ri_done);
+ break;
+
+ case RDMA_CM_EVENT_ESTABLISHED:
+ log_rdma_event(INFO, "connected event=%d\n", event->event);
+ info->transport_status = SMBD_CONNECTED;
+ wake_up_interruptible(&info->conn_wait);
+ break;
+
+ case RDMA_CM_EVENT_CONNECT_ERROR:
+ case RDMA_CM_EVENT_UNREACHABLE:
+ case RDMA_CM_EVENT_REJECTED:
+ log_rdma_event(INFO, "connecting failed event=%d\n", event->event);
+ info->transport_status = SMBD_DISCONNECTED;
+ wake_up_interruptible(&info->conn_wait);
+ break;
+
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ case RDMA_CM_EVENT_DISCONNECTED:
+ /* This happenes when we fail the negotiation */
+ if (info->transport_status == SMBD_NEGOTIATE_FAILED) {
+ info->transport_status = SMBD_DISCONNECTED;
+ wake_up(&info->conn_wait);
+ break;
+ }
+
+ info->transport_status = SMBD_DISCONNECTED;
+ smbd_process_disconnected(info);
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* Upcall from RDMA QP */
+static void
+smbd_qp_async_error_upcall(struct ib_event *event, void *context)
+{
+ struct smbd_connection *info = context;
+
+ log_rdma_event(ERR, "%s on device %s info %p\n",
+ ib_event_msg(event->event), event->device->name, info);
+
+ switch (event->event) {
+ case IB_EVENT_CQ_ERR:
+ case IB_EVENT_QP_FATAL:
+ smbd_disconnect_rdma_connection(info);
+
+ default:
+ break;
+ }
+}
+
+static inline void *smbd_request_payload(struct smbd_request *request)
+{
+ return (void *)request->packet;
+}
+
+static inline void *smbd_response_payload(struct smbd_response *response)
+{
+ return (void *)response->packet;
+}
+
+/* Called when a RDMA send is done */
+static void send_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ int i;
+ struct smbd_request *request =
+ container_of(wc->wr_cqe, struct smbd_request, cqe);
+
+ log_rdma_send(INFO, "smbd_request %p completed wc->status=%d\n",
+ request, wc->status);
+
+ if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
+ log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n",
+ wc->status, wc->opcode);
+ smbd_disconnect_rdma_connection(request->info);
+ }
+
+ for (i = 0; i < request->num_sge; i++)
+ ib_dma_unmap_single(request->info->id->device,
+ request->sge[i].addr,
+ request->sge[i].length,
+ DMA_TO_DEVICE);
+
+ if (request->has_payload) {
+ if (atomic_dec_and_test(&request->info->send_payload_pending))
+ wake_up(&request->info->wait_send_payload_pending);
+ } else {
+ if (atomic_dec_and_test(&request->info->send_pending))
+ wake_up(&request->info->wait_send_pending);
+ }
+
+ mempool_free(request, request->info->request_mempool);
+}
+
+static void dump_smbd_negotiate_resp(struct smbd_negotiate_resp *resp)
+{
+ log_rdma_event(INFO, "resp message min_version %u max_version %u "
+ "negotiated_version %u credits_requested %u "
+ "credits_granted %u status %u max_readwrite_size %u "
+ "preferred_send_size %u max_receive_size %u "
+ "max_fragmented_size %u\n",
+ resp->min_version, resp->max_version, resp->negotiated_version,
+ resp->credits_requested, resp->credits_granted, resp->status,
+ resp->max_readwrite_size, resp->preferred_send_size,
+ resp->max_receive_size, resp->max_fragmented_size);
+}
+
+/*
+ * Process a negotiation response message, according to [MS-SMBD]3.1.5.7
+ * response, packet_length: the negotiation response message
+ * return value: true if negotiation is a success, false if failed
+ */
+static bool process_negotiation_response(
+ struct smbd_response *response, int packet_length)
+{
+ struct smbd_connection *info = response->info;
+ struct smbd_negotiate_resp *packet = smbd_response_payload(response);
+
+ if (packet_length < sizeof(struct smbd_negotiate_resp)) {
+ log_rdma_event(ERR,
+ "error: packet_length=%d\n", packet_length);
+ return false;
+ }
+
+ if (le16_to_cpu(packet->negotiated_version) != SMBD_V1) {
+ log_rdma_event(ERR, "error: negotiated_version=%x\n",
+ le16_to_cpu(packet->negotiated_version));
+ return false;
+ }
+ info->protocol = le16_to_cpu(packet->negotiated_version);
+
+ if (packet->credits_requested == 0) {
+ log_rdma_event(ERR, "error: credits_requested==0\n");
+ return false;
+ }
+ info->receive_credit_target = le16_to_cpu(packet->credits_requested);
+
+ if (packet->credits_granted == 0) {
+ log_rdma_event(ERR, "error: credits_granted==0\n");
+ return false;
+ }
+ atomic_set(&info->send_credits, le16_to_cpu(packet->credits_granted));
+
+ atomic_set(&info->receive_credits, 0);
+
+ if (le32_to_cpu(packet->preferred_send_size) > info->max_receive_size) {
+ log_rdma_event(ERR, "error: preferred_send_size=%d\n",
+ le32_to_cpu(packet->preferred_send_size));
+ return false;
+ }
+ info->max_receive_size = le32_to_cpu(packet->preferred_send_size);
+
+ if (le32_to_cpu(packet->max_receive_size) < SMBD_MIN_RECEIVE_SIZE) {
+ log_rdma_event(ERR, "error: max_receive_size=%d\n",
+ le32_to_cpu(packet->max_receive_size));
+ return false;
+ }
+ info->max_send_size = min_t(int, info->max_send_size,
+ le32_to_cpu(packet->max_receive_size));
+
+ if (le32_to_cpu(packet->max_fragmented_size) <
+ SMBD_MIN_FRAGMENTED_SIZE) {
+ log_rdma_event(ERR, "error: max_fragmented_size=%d\n",
+ le32_to_cpu(packet->max_fragmented_size));
+ return false;
+ }
+ info->max_fragmented_send_size =
+ le32_to_cpu(packet->max_fragmented_size);
+ info->rdma_readwrite_threshold =
+ rdma_readwrite_threshold > info->max_fragmented_send_size ?
+ info->max_fragmented_send_size :
+ rdma_readwrite_threshold;
+
+
+ info->max_readwrite_size = min_t(u32,
+ le32_to_cpu(packet->max_readwrite_size),
+ info->max_frmr_depth * PAGE_SIZE);
+ info->max_frmr_depth = info->max_readwrite_size / PAGE_SIZE;
+
+ return true;
+}
+
+/*
+ * Check and schedule to send an immediate packet
+ * This is used to extend credtis to remote peer to keep the transport busy
+ */
+static void check_and_send_immediate(struct smbd_connection *info)
+{
+ if (info->transport_status != SMBD_CONNECTED)
+ return;
+
+ info->send_immediate = true;
+
+ /*
+ * Promptly send a packet if our peer is running low on receive
+ * credits
+ */
+ if (atomic_read(&info->receive_credits) <
+ info->receive_credit_target - 1)
+ queue_delayed_work(
+ info->workqueue, &info->send_immediate_work, 0);
+}
+
+static void smbd_post_send_credits(struct work_struct *work)
+{
+ int ret = 0;
+ int use_receive_queue = 1;
+ int rc;
+ struct smbd_response *response;
+ struct smbd_connection *info =
+ container_of(work, struct smbd_connection,
+ post_send_credits_work);
+
+ if (info->transport_status != SMBD_CONNECTED) {
+ wake_up(&info->wait_receive_queues);
+ return;
+ }
+
+ if (info->receive_credit_target >
+ atomic_read(&info->receive_credits)) {
+ while (true) {
+ if (use_receive_queue)
+ response = get_receive_buffer(info);
+ else
+ response = get_empty_queue_buffer(info);
+ if (!response) {
+ /* now switch to emtpy packet queue */
+ if (use_receive_queue) {
+ use_receive_queue = 0;
+ continue;
+ } else
+ break;
+ }
+
+ response->type = SMBD_TRANSFER_DATA;
+ response->first_segment = false;
+ rc = smbd_post_recv(info, response);
+ if (rc) {
+ log_rdma_recv(ERR,
+ "post_recv failed rc=%d\n", rc);
+ put_receive_buffer(info, response);
+ break;
+ }
+
+ ret++;
+ }
+ }
+
+ spin_lock(&info->lock_new_credits_offered);
+ info->new_credits_offered += ret;
+ spin_unlock(&info->lock_new_credits_offered);
+
+ atomic_add(ret, &info->receive_credits);
+
+ /* Check if we can post new receive and grant credits to peer */
+ check_and_send_immediate(info);
+}
+
+static void smbd_recv_done_work(struct work_struct *work)
+{
+ struct smbd_connection *info =
+ container_of(work, struct smbd_connection, recv_done_work);
+
+ /*
+ * We may have new send credits granted from remote peer
+ * If any sender is blcoked on lack of credets, unblock it
+ */
+ if (atomic_read(&info->send_credits))
+ wake_up_interruptible(&info->wait_send_queue);
+
+ /*
+ * Check if we need to send something to remote peer to
+ * grant more credits or respond to KEEP_ALIVE packet
+ */
+ check_and_send_immediate(info);
+}
+
+/* Called from softirq, when recv is done */
+static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct smbd_data_transfer *data_transfer;
+ struct smbd_response *response =
+ container_of(wc->wr_cqe, struct smbd_response, cqe);
+ struct smbd_connection *info = response->info;
+ int data_length = 0;
+
+ log_rdma_recv(INFO, "response=%p type=%d wc status=%d wc opcode %d "
+ "byte_len=%d pkey_index=%x\n",
+ response, response->type, wc->status, wc->opcode,
+ wc->byte_len, wc->pkey_index);
+
+ if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
+ log_rdma_recv(INFO, "wc->status=%d opcode=%d\n",
+ wc->status, wc->opcode);
+ smbd_disconnect_rdma_connection(info);
+ goto error;
+ }
+
+ ib_dma_sync_single_for_cpu(
+ wc->qp->device,
+ response->sge.addr,
+ response->sge.length,
+ DMA_FROM_DEVICE);
+
+ switch (response->type) {
+ /* SMBD negotiation response */
+ case SMBD_NEGOTIATE_RESP:
+ dump_smbd_negotiate_resp(smbd_response_payload(response));
+ info->full_packet_received = true;
+ info->negotiate_done =
+ process_negotiation_response(response, wc->byte_len);
+ complete(&info->negotiate_completion);
+ break;
+
+ /* SMBD data transfer packet */
+ case SMBD_TRANSFER_DATA:
+ data_transfer = smbd_response_payload(response);
+ data_length = le32_to_cpu(data_transfer->data_length);
+
+ /*
+ * If this is a packet with data playload place the data in
+ * reassembly queue and wake up the reading thread
+ */
+ if (data_length) {
+ if (info->full_packet_received)
+ response->first_segment = true;
+
+ if (le32_to_cpu(data_transfer->remaining_data_length))
+ info->full_packet_received = false;
+ else
+ info->full_packet_received = true;
+
+ enqueue_reassembly(
+ info,
+ response,
+ data_length);
+ } else
+ put_empty_packet(info, response);
+
+ if (data_length)
+ wake_up_interruptible(&info->wait_reassembly_queue);
+
+ atomic_dec(&info->receive_credits);
+ info->receive_credit_target =
+ le16_to_cpu(data_transfer->credits_requested);
+ atomic_add(le16_to_cpu(data_transfer->credits_granted),
+ &info->send_credits);
+
+ log_incoming(INFO, "data flags %d data_offset %d "
+ "data_length %d remaining_data_length %d\n",
+ le16_to_cpu(data_transfer->flags),
+ le32_to_cpu(data_transfer->data_offset),
+ le32_to_cpu(data_transfer->data_length),
+ le32_to_cpu(data_transfer->remaining_data_length));
+
+ /* Send a KEEP_ALIVE response right away if requested */
+ info->keep_alive_requested = KEEP_ALIVE_NONE;
+ if (le16_to_cpu(data_transfer->flags) &
+ SMB_DIRECT_RESPONSE_REQUESTED) {
+ info->keep_alive_requested = KEEP_ALIVE_PENDING;
+ }
+
+ queue_work(info->workqueue, &info->recv_done_work);
+ return;
+
+ default:
+ log_rdma_recv(ERR,
+ "unexpected response type=%d\n", response->type);
+ }
+
+error:
+ put_receive_buffer(info, response);
+}
+
+static struct rdma_cm_id *smbd_create_id(
+ struct smbd_connection *info,
+ struct sockaddr *dstaddr, int port)
+{
+ struct rdma_cm_id *id;
+ int rc;
+ __be16 *sport;
+
+ id = rdma_create_id(&init_net, smbd_conn_upcall, info,
+ RDMA_PS_TCP, IB_QPT_RC);
+ if (IS_ERR(id)) {
+ rc = PTR_ERR(id);
+ log_rdma_event(ERR, "rdma_create_id() failed %i\n", rc);
+ return id;
+ }
+
+ if (dstaddr->sa_family == AF_INET6)
+ sport = &((struct sockaddr_in6 *)dstaddr)->sin6_port;
+ else
+ sport = &((struct sockaddr_in *)dstaddr)->sin_port;
+
+ *sport = htons(port);
+
+ init_completion(&info->ri_done);
+ info->ri_rc = -ETIMEDOUT;
+
+ rc = rdma_resolve_addr(id, NULL, (struct sockaddr *)dstaddr,
+ RDMA_RESOLVE_TIMEOUT);
+ if (rc) {
+ log_rdma_event(ERR, "rdma_resolve_addr() failed %i\n", rc);
+ goto out;
+ }
+ wait_for_completion_interruptible_timeout(
+ &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
+ rc = info->ri_rc;
+ if (rc) {
+ log_rdma_event(ERR, "rdma_resolve_addr() completed %i\n", rc);
+ goto out;
+ }
+
+ info->ri_rc = -ETIMEDOUT;
+ rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
+ if (rc) {
+ log_rdma_event(ERR, "rdma_resolve_route() failed %i\n", rc);
+ goto out;
+ }
+ wait_for_completion_interruptible_timeout(
+ &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
+ rc = info->ri_rc;
+ if (rc) {
+ log_rdma_event(ERR, "rdma_resolve_route() completed %i\n", rc);
+ goto out;
+ }
+
+ return id;
+
+out:
+ rdma_destroy_id(id);
+ return ERR_PTR(rc);
+}
+
+/*
+ * Test if FRWR (Fast Registration Work Requests) is supported on the device
+ * This implementation requries FRWR on RDMA read/write
+ * return value: true if it is supported
+ */
+static bool frwr_is_supported(struct ib_device_attr *attrs)
+{
+ if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
+ return false;
+ if (attrs->max_fast_reg_page_list_len == 0)
+ return false;
+ return true;
+}
+
+static int smbd_ia_open(
+ struct smbd_connection *info,
+ struct sockaddr *dstaddr, int port)
+{
+ int rc;
+
+ info->id = smbd_create_id(info, dstaddr, port);
+ if (IS_ERR(info->id)) {
+ rc = PTR_ERR(info->id);
+ goto out1;
+ }
+
+ if (!frwr_is_supported(&info->id->device->attrs)) {
+ log_rdma_event(ERR,
+ "Fast Registration Work Requests "
+ "(FRWR) is not supported\n");
+ log_rdma_event(ERR,
+ "Device capability flags = %llx "
+ "max_fast_reg_page_list_len = %u\n",
+ info->id->device->attrs.device_cap_flags,
+ info->id->device->attrs.max_fast_reg_page_list_len);
+ rc = -EPROTONOSUPPORT;
+ goto out2;
+ }
+ info->max_frmr_depth = min_t(int,
+ smbd_max_frmr_depth,
+ info->id->device->attrs.max_fast_reg_page_list_len);
+ info->mr_type = IB_MR_TYPE_MEM_REG;
+ if (info->id->device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
+ info->mr_type = IB_MR_TYPE_SG_GAPS;
+
+ info->pd = ib_alloc_pd(info->id->device, 0);
+ if (IS_ERR(info->pd)) {
+ rc = PTR_ERR(info->pd);
+ log_rdma_event(ERR, "ib_alloc_pd() returned %d\n", rc);
+ goto out2;
+ }
+
+ return 0;
+
+out2:
+ rdma_destroy_id(info->id);
+ info->id = NULL;
+
+out1:
+ return rc;
+}
+
+/*
+ * Send a negotiation request message to the peer
+ * The negotiation procedure is in [MS-SMBD] 3.1.5.2 and 3.1.5.3
+ * After negotiation, the transport is connected and ready for
+ * carrying upper layer SMB payload
+ */
+static int smbd_post_send_negotiate_req(struct smbd_connection *info)
+{
+ struct ib_send_wr send_wr, *send_wr_fail;
+ int rc = -ENOMEM;
+ struct smbd_request *request;
+ struct smbd_negotiate_req *packet;
+
+ request = mempool_alloc(info->request_mempool, GFP_KERNEL);
+ if (!request)
+ return rc;
+
+ request->info = info;
+
+ packet = smbd_request_payload(request);
+ packet->min_version = cpu_to_le16(SMBD_V1);
+ packet->max_version = cpu_to_le16(SMBD_V1);
+ packet->reserved = 0;
+ packet->credits_requested = cpu_to_le16(info->send_credit_target);
+ packet->preferred_send_size = cpu_to_le32(info->max_send_size);
+ packet->max_receive_size = cpu_to_le32(info->max_receive_size);
+ packet->max_fragmented_size =
+ cpu_to_le32(info->max_fragmented_recv_size);
+
+ request->num_sge = 1;
+ request->sge[0].addr = ib_dma_map_single(
+ info->id->device, (void *)packet,
+ sizeof(*packet), DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
+ rc = -EIO;
+ goto dma_mapping_failed;
+ }
+
+ request->sge[0].length = sizeof(*packet);
+ request->sge[0].lkey = info->pd->local_dma_lkey;
+
+ ib_dma_sync_single_for_device(
+ info->id->device, request->sge[0].addr,
+ request->sge[0].length, DMA_TO_DEVICE);
+
+ request->cqe.done = send_done;
+
+ send_wr.next = NULL;
+ send_wr.wr_cqe = &request->cqe;
+ send_wr.sg_list = request->sge;
+ send_wr.num_sge = request->num_sge;
+ send_wr.opcode = IB_WR_SEND;
+ send_wr.send_flags = IB_SEND_SIGNALED;
+
+ log_rdma_send(INFO, "sge addr=%llx length=%x lkey=%x\n",
+ request->sge[0].addr,
+ request->sge[0].length, request->sge[0].lkey);
+
+ request->has_payload = false;
+ atomic_inc(&info->send_pending);
+ rc = ib_post_send(info->id->qp, &send_wr, &send_wr_fail);
+ if (!rc)
+ return 0;
+
+ /* if we reach here, post send failed */
+ log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
+ atomic_dec(&info->send_pending);
+ ib_dma_unmap_single(info->id->device, request->sge[0].addr,
+ request->sge[0].length, DMA_TO_DEVICE);
+
+dma_mapping_failed:
+ mempool_free(request, info->request_mempool);
+ return rc;
+}
+
+/*
+ * Extend the credits to remote peer
+ * This implements [MS-SMBD] 3.1.5.9
+ * The idea is that we should extend credits to remote peer as quickly as
+ * it's allowed, to maintain data flow. We allocate as much receive
+ * buffer as possible, and extend the receive credits to remote peer
+ * return value: the new credtis being granted.
+ */
+static int manage_credits_prior_sending(struct smbd_connection *info)
+{
+ int new_credits;
+
+ spin_lock(&info->lock_new_credits_offered);
+ new_credits = info->new_credits_offered;
+ info->new_credits_offered = 0;
+ spin_unlock(&info->lock_new_credits_offered);
+
+ return new_credits;
+}
+
+/*
+ * Check if we need to send a KEEP_ALIVE message
+ * The idle connection timer triggers a KEEP_ALIVE message when expires
+ * SMB_DIRECT_RESPONSE_REQUESTED is set in the message flag to have peer send
+ * back a response.
+ * return value:
+ * 1 if SMB_DIRECT_RESPONSE_REQUESTED needs to be set
+ * 0: otherwise
+ */
+static int manage_keep_alive_before_sending(struct smbd_connection *info)
+{
+ if (info->keep_alive_requested == KEEP_ALIVE_PENDING) {
+ info->keep_alive_requested = KEEP_ALIVE_SENT;
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Build and prepare the SMBD packet header
+ * This function waits for avaialbe send credits and build a SMBD packet
+ * header. The caller then optional append payload to the packet after
+ * the header
+ * intput values
+ * size: the size of the payload
+ * remaining_data_length: remaining data to send if this is part of a
+ * fragmented packet
+ * output values
+ * request_out: the request allocated from this function
+ * return values: 0 on success, otherwise actual error code returned
+ */
+static int smbd_create_header(struct smbd_connection *info,
+ int size, int remaining_data_length,
+ struct smbd_request **request_out)
+{
+ struct smbd_request *request;
+ struct smbd_data_transfer *packet;
+ int header_length;
+ int rc;
+
+ /* Wait for send credits. A SMBD packet needs one credit */
+ rc = wait_event_interruptible(info->wait_send_queue,
+ atomic_read(&info->send_credits) > 0 ||
+ info->transport_status != SMBD_CONNECTED);
+ if (rc)
+ return rc;
+
+ if (info->transport_status != SMBD_CONNECTED) {
+ log_outgoing(ERR, "disconnected not sending\n");
+ return -ENOENT;
+ }
+ atomic_dec(&info->send_credits);
+
+ request = mempool_alloc(info->request_mempool, GFP_KERNEL);
+ if (!request) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ request->info = info;
+
+ /* Fill in the packet header */
+ packet = smbd_request_payload(request);
+ packet->credits_requested = cpu_to_le16(info->send_credit_target);
+ packet->credits_granted =
+ cpu_to_le16(manage_credits_prior_sending(info));
+ info->send_immediate = false;
+
+ packet->flags = 0;
+ if (manage_keep_alive_before_sending(info))
+ packet->flags |= cpu_to_le16(SMB_DIRECT_RESPONSE_REQUESTED);
+
+ packet->reserved = 0;
+ if (!size)
+ packet->data_offset = 0;
+ else
+ packet->data_offset = cpu_to_le32(24);
+ packet->data_length = cpu_to_le32(size);
+ packet->remaining_data_length = cpu_to_le32(remaining_data_length);
+ packet->padding = 0;
+
+ log_outgoing(INFO, "credits_requested=%d credits_granted=%d "
+ "data_offset=%d data_length=%d remaining_data_length=%d\n",
+ le16_to_cpu(packet->credits_requested),
+ le16_to_cpu(packet->credits_granted),
+ le32_to_cpu(packet->data_offset),
+ le32_to_cpu(packet->data_length),
+ le32_to_cpu(packet->remaining_data_length));
+
+ /* Map the packet to DMA */
+ header_length = sizeof(struct smbd_data_transfer);
+ /* If this is a packet without payload, don't send padding */
+ if (!size)
+ header_length = offsetof(struct smbd_data_transfer, padding);
+
+ request->num_sge = 1;
+ request->sge[0].addr = ib_dma_map_single(info->id->device,
+ (void *)packet,
+ header_length,
+ DMA_BIDIRECTIONAL);
+ if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
+ mempool_free(request, info->request_mempool);
+ rc = -EIO;
+ goto err;
+ }
+
+ request->sge[0].length = header_length;
+ request->sge[0].lkey = info->pd->local_dma_lkey;
+
+ *request_out = request;
+ return 0;
+
+err:
+ atomic_inc(&info->send_credits);
+ return rc;
+}
+
+static void smbd_destroy_header(struct smbd_connection *info,
+ struct smbd_request *request)
+{
+
+ ib_dma_unmap_single(info->id->device,
+ request->sge[0].addr,
+ request->sge[0].length,
+ DMA_TO_DEVICE);
+ mempool_free(request, info->request_mempool);
+ atomic_inc(&info->send_credits);
+}
+
+/* Post the send request */
+static int smbd_post_send(struct smbd_connection *info,
+ struct smbd_request *request, bool has_payload)
+{
+ struct ib_send_wr send_wr, *send_wr_fail;
+ int rc, i;
+
+ for (i = 0; i < request->num_sge; i++) {
+ log_rdma_send(INFO,
+ "rdma_request sge[%d] addr=%llu legnth=%u\n",
+ i, request->sge[0].addr, request->sge[0].length);
+ ib_dma_sync_single_for_device(
+ info->id->device,
+ request->sge[i].addr,
+ request->sge[i].length,
+ DMA_TO_DEVICE);
+ }
+
+ request->cqe.done = send_done;
+
+ send_wr.next = NULL;
+ send_wr.wr_cqe = &request->cqe;
+ send_wr.sg_list = request->sge;
+ send_wr.num_sge = request->num_sge;
+ send_wr.opcode = IB_WR_SEND;
+ send_wr.send_flags = IB_SEND_SIGNALED;
+
+ if (has_payload) {
+ request->has_payload = true;
+ atomic_inc(&info->send_payload_pending);
+ } else {
+ request->has_payload = false;
+ atomic_inc(&info->send_pending);
+ }
+
+ rc = ib_post_send(info->id->qp, &send_wr, &send_wr_fail);
+ if (rc) {
+ log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
+ if (has_payload) {
+ if (atomic_dec_and_test(&info->send_payload_pending))
+ wake_up(&info->wait_send_payload_pending);
+ } else {
+ if (atomic_dec_and_test(&info->send_pending))
+ wake_up(&info->wait_send_pending);
+ }
+ } else
+ /* Reset timer for idle connection after packet is sent */
+ mod_delayed_work(info->workqueue, &info->idle_timer_work,
+ info->keep_alive_interval*HZ);
+
+ return rc;
+}
+
+static int smbd_post_send_sgl(struct smbd_connection *info,
+ struct scatterlist *sgl, int data_length, int remaining_data_length)
+{
+ int num_sgs;
+ int i, rc;
+ struct smbd_request *request;
+ struct scatterlist *sg;
+
+ rc = smbd_create_header(
+ info, data_length, remaining_data_length, &request);
+ if (rc)
+ return rc;
+
+ num_sgs = sgl ? sg_nents(sgl) : 0;
+ for_each_sg(sgl, sg, num_sgs, i) {
+ request->sge[i+1].addr =
+ ib_dma_map_page(info->id->device, sg_page(sg),
+ sg->offset, sg->length, DMA_BIDIRECTIONAL);
+ if (ib_dma_mapping_error(
+ info->id->device, request->sge[i+1].addr)) {
+ rc = -EIO;
+ request->sge[i+1].addr = 0;
+ goto dma_mapping_failure;
+ }
+ request->sge[i+1].length = sg->length;
+ request->sge[i+1].lkey = info->pd->local_dma_lkey;
+ request->num_sge++;
+ }
+
+ rc = smbd_post_send(info, request, data_length);
+ if (!rc)
+ return 0;
+
+dma_mapping_failure:
+ for (i = 1; i < request->num_sge; i++)
+ if (request->sge[i].addr)
+ ib_dma_unmap_single(info->id->device,
+ request->sge[i].addr,
+ request->sge[i].length,
+ DMA_TO_DEVICE);
+ smbd_destroy_header(info, request);
+ return rc;
+}
+
+/*
+ * Send a page
+ * page: the page to send
+ * offset: offset in the page to send
+ * size: length in the page to send
+ * remaining_data_length: remaining data to send in this payload
+ */
+static int smbd_post_send_page(struct smbd_connection *info, struct page *page,
+ unsigned long offset, size_t size, int remaining_data_length)
+{
+ struct scatterlist sgl;
+
+ sg_init_table(&sgl, 1);
+ sg_set_page(&sgl, page, size, offset);
+
+ return smbd_post_send_sgl(info, &sgl, size, remaining_data_length);
+}
+
+/*
+ * Send an empty message
+ * Empty message is used to extend credits to peer to for keep live
+ * while there is no upper layer payload to send at the time
+ */
+static int smbd_post_send_empty(struct smbd_connection *info)
+{
+ info->count_send_empty++;
+ return smbd_post_send_sgl(info, NULL, 0, 0);
+}
+
+/*
+ * Send a data buffer
+ * iov: the iov array describing the data buffers
+ * n_vec: number of iov array
+ * remaining_data_length: remaining data to send following this packet
+ * in segmented SMBD packet
+ */
+static int smbd_post_send_data(
+ struct smbd_connection *info, struct kvec *iov, int n_vec,
+ int remaining_data_length)
+{
+ int i;
+ u32 data_length = 0;
+ struct scatterlist sgl[SMBDIRECT_MAX_SGE];
+
+ if (n_vec > SMBDIRECT_MAX_SGE) {
+ cifs_dbg(VFS, "Can't fit data to SGL, n_vec=%d\n", n_vec);
+ return -ENOMEM;
+ }
+
+ sg_init_table(sgl, n_vec);
+ for (i = 0; i < n_vec; i++) {
+ data_length += iov[i].iov_len;
+ sg_set_buf(&sgl[i], iov[i].iov_base, iov[i].iov_len);
+ }
+
+ return smbd_post_send_sgl(info, sgl, data_length, remaining_data_length);
+}
+
+/*
+ * Post a receive request to the transport
+ * The remote peer can only send data when a receive request is posted
+ * The interaction is controlled by send/receive credit system
+ */
+static int smbd_post_recv(
+ struct smbd_connection *info, struct smbd_response *response)
+{
+ struct ib_recv_wr recv_wr, *recv_wr_fail = NULL;
+ int rc = -EIO;
+
+ response->sge.addr = ib_dma_map_single(
+ info->id->device, response->packet,
+ info->max_receive_size, DMA_FROM_DEVICE);
+ if (ib_dma_mapping_error(info->id->device, response->sge.addr))
+ return rc;
+
+ response->sge.length = info->max_receive_size;
+ response->sge.lkey = info->pd->local_dma_lkey;
+
+ response->cqe.done = recv_done;
+
+ recv_wr.wr_cqe = &response->cqe;
+ recv_wr.next = NULL;
+ recv_wr.sg_list = &response->sge;
+ recv_wr.num_sge = 1;
+
+ rc = ib_post_recv(info->id->qp, &recv_wr, &recv_wr_fail);
+ if (rc) {
+ ib_dma_unmap_single(info->id->device, response->sge.addr,
+ response->sge.length, DMA_FROM_DEVICE);
+
+ log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc);
+ }
+
+ return rc;
+}
+
+/* Perform SMBD negotiate according to [MS-SMBD] 3.1.5.2 */
+static int smbd_negotiate(struct smbd_connection *info)
+{
+ int rc;
+ struct smbd_response *response = get_receive_buffer(info);
+
+ response->type = SMBD_NEGOTIATE_RESP;
+ rc = smbd_post_recv(info, response);
+ log_rdma_event(INFO,
+ "smbd_post_recv rc=%d iov.addr=%llx iov.length=%x "
+ "iov.lkey=%x\n",
+ rc, response->sge.addr,
+ response->sge.length, response->sge.lkey);
+ if (rc)
+ return rc;
+
+ init_completion(&info->negotiate_completion);
+ info->negotiate_done = false;
+ rc = smbd_post_send_negotiate_req(info);
+ if (rc)
+ return rc;
+
+ rc = wait_for_completion_interruptible_timeout(
+ &info->negotiate_completion, SMBD_NEGOTIATE_TIMEOUT * HZ);
+ log_rdma_event(INFO, "wait_for_completion_timeout rc=%d\n", rc);
+
+ if (info->negotiate_done)
+ return 0;
+
+ if (rc == 0)
+ rc = -ETIMEDOUT;
+ else if (rc == -ERESTARTSYS)
+ rc = -EINTR;
+ else
+ rc = -ENOTCONN;
+
+ return rc;
+}
+
+static void put_empty_packet(
+ struct smbd_connection *info, struct smbd_response *response)
+{
+ spin_lock(&info->empty_packet_queue_lock);
+ list_add_tail(&response->list, &info->empty_packet_queue);
+ info->count_empty_packet_queue++;
+ spin_unlock(&info->empty_packet_queue_lock);
+
+ queue_work(info->workqueue, &info->post_send_credits_work);
+}
+
+/*
+ * Implement Connection.FragmentReassemblyBuffer defined in [MS-SMBD] 3.1.1.1
+ * This is a queue for reassembling upper layer payload and present to upper
+ * layer. All the inncoming payload go to the reassembly queue, regardless of
+ * if reassembly is required. The uuper layer code reads from the queue for all
+ * incoming payloads.
+ * Put a received packet to the reassembly queue
+ * response: the packet received
+ * data_length: the size of payload in this packet
+ */
+static void enqueue_reassembly(
+ struct smbd_connection *info,
+ struct smbd_response *response,
+ int data_length)
+{
+ spin_lock(&info->reassembly_queue_lock);
+ list_add_tail(&response->list, &info->reassembly_queue);
+ info->reassembly_queue_length++;
+ /*
+ * Make sure reassembly_data_length is updated after list and
+ * reassembly_queue_length are updated. On the dequeue side
+ * reassembly_data_length is checked without a lock to determine
+ * if reassembly_queue_length and list is up to date
+ */
+ virt_wmb();
+ info->reassembly_data_length += data_length;
+ spin_unlock(&info->reassembly_queue_lock);
+ info->count_reassembly_queue++;
+ info->count_enqueue_reassembly_queue++;
+}
+
+/*
+ * Get the first entry at the front of reassembly queue
+ * Caller is responsible for locking
+ * return value: the first entry if any, NULL if queue is empty
+ */
+static struct smbd_response *_get_first_reassembly(struct smbd_connection *info)
+{
+ struct smbd_response *ret = NULL;
+
+ if (!list_empty(&info->reassembly_queue)) {
+ ret = list_first_entry(
+ &info->reassembly_queue,
+ struct smbd_response, list);
+ }
+ return ret;
+}
+
+static struct smbd_response *get_empty_queue_buffer(
+ struct smbd_connection *info)
+{
+ struct smbd_response *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->empty_packet_queue_lock, flags);
+ if (!list_empty(&info->empty_packet_queue)) {
+ ret = list_first_entry(
+ &info->empty_packet_queue,
+ struct smbd_response, list);
+ list_del(&ret->list);
+ info->count_empty_packet_queue--;
+ }
+ spin_unlock_irqrestore(&info->empty_packet_queue_lock, flags);
+
+ return ret;
+}
+
+/*
+ * Get a receive buffer
+ * For each remote send, we need to post a receive. The receive buffers are
+ * pre-allocated in advance.
+ * return value: the receive buffer, NULL if none is available
+ */
+static struct smbd_response *get_receive_buffer(struct smbd_connection *info)
+{
+ struct smbd_response *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->receive_queue_lock, flags);
+ if (!list_empty(&info->receive_queue)) {
+ ret = list_first_entry(
+ &info->receive_queue,
+ struct smbd_response, list);
+ list_del(&ret->list);
+ info->count_receive_queue--;
+ info->count_get_receive_buffer++;
+ }
+ spin_unlock_irqrestore(&info->receive_queue_lock, flags);
+
+ return ret;
+}
+
+/*
+ * Return a receive buffer
+ * Upon returning of a receive buffer, we can post new receive and extend
+ * more receive credits to remote peer. This is done immediately after a
+ * receive buffer is returned.
+ */
+static void put_receive_buffer(
+ struct smbd_connection *info, struct smbd_response *response)
+{
+ unsigned long flags;
+
+ ib_dma_unmap_single(info->id->device, response->sge.addr,
+ response->sge.length, DMA_FROM_DEVICE);
+
+ spin_lock_irqsave(&info->receive_queue_lock, flags);
+ list_add_tail(&response->list, &info->receive_queue);
+ info->count_receive_queue++;
+ info->count_put_receive_buffer++;
+ spin_unlock_irqrestore(&info->receive_queue_lock, flags);
+
+ queue_work(info->workqueue, &info->post_send_credits_work);
+}
+
+/* Preallocate all receive buffer on transport establishment */
+static int allocate_receive_buffers(struct smbd_connection *info, int num_buf)
+{
+ int i;
+ struct smbd_response *response;
+
+ INIT_LIST_HEAD(&info->reassembly_queue);
+ spin_lock_init(&info->reassembly_queue_lock);
+ info->reassembly_data_length = 0;
+ info->reassembly_queue_length = 0;
+
+ INIT_LIST_HEAD(&info->receive_queue);
+ spin_lock_init(&info->receive_queue_lock);
+ info->count_receive_queue = 0;
+
+ INIT_LIST_HEAD(&info->empty_packet_queue);
+ spin_lock_init(&info->empty_packet_queue_lock);
+ info->count_empty_packet_queue = 0;
+
+ init_waitqueue_head(&info->wait_receive_queues);
+
+ for (i = 0; i < num_buf; i++) {
+ response = mempool_alloc(info->response_mempool, GFP_KERNEL);
+ if (!response)
+ goto allocate_failed;
+
+ response->info = info;
+ list_add_tail(&response->list, &info->receive_queue);
+ info->count_receive_queue++;
+ }
+
+ return 0;
+
+allocate_failed:
+ while (!list_empty(&info->receive_queue)) {
+ response = list_first_entry(
+ &info->receive_queue,
+ struct smbd_response, list);
+ list_del(&response->list);
+ info->count_receive_queue--;
+
+ mempool_free(response, info->response_mempool);
+ }
+ return -ENOMEM;
+}
+
+static void destroy_receive_buffers(struct smbd_connection *info)
+{
+ struct smbd_response *response;
+
+ while ((response = get_receive_buffer(info)))
+ mempool_free(response, info->response_mempool);
+
+ while ((response = get_empty_queue_buffer(info)))
+ mempool_free(response, info->response_mempool);
+}
+
+/*
+ * Check and send an immediate or keep alive packet
+ * The condition to send those packets are defined in [MS-SMBD] 3.1.1.1
+ * Connection.KeepaliveRequested and Connection.SendImmediate
+ * The idea is to extend credits to server as soon as it becomes available
+ */
+static void send_immediate_work(struct work_struct *work)
+{
+ struct smbd_connection *info = container_of(
+ work, struct smbd_connection,
+ send_immediate_work.work);
+
+ if (info->keep_alive_requested == KEEP_ALIVE_PENDING ||
+ info->send_immediate) {
+ log_keep_alive(INFO, "send an empty message\n");
+ smbd_post_send_empty(info);
+ }
+}
+
+/* Implement idle connection timer [MS-SMBD] 3.1.6.2 */
+static void idle_connection_timer(struct work_struct *work)
+{
+ struct smbd_connection *info = container_of(
+ work, struct smbd_connection,
+ idle_timer_work.work);
+
+ if (info->keep_alive_requested != KEEP_ALIVE_NONE) {
+ log_keep_alive(ERR,
+ "error status info->keep_alive_requested=%d\n",
+ info->keep_alive_requested);
+ smbd_disconnect_rdma_connection(info);
+ return;
+ }
+
+ log_keep_alive(INFO, "about to send an empty idle message\n");
+ smbd_post_send_empty(info);
+
+ /* Setup the next idle timeout work */
+ queue_delayed_work(info->workqueue, &info->idle_timer_work,
+ info->keep_alive_interval*HZ);
+}
+
+/* Destroy this SMBD connection, called from upper layer */
+void smbd_destroy(struct smbd_connection *info)
+{
+ log_rdma_event(INFO, "destroying rdma session\n");
+
+ /* Kick off the disconnection process */
+ smbd_disconnect_rdma_connection(info);
+
+ log_rdma_event(INFO, "wait for transport being destroyed\n");
+ wait_event(info->wait_destroy,
+ info->transport_status == SMBD_DESTROYED);
+
+ destroy_workqueue(info->workqueue);
+ kfree(info);
+}
+
+/*
+ * Reconnect this SMBD connection, called from upper layer
+ * return value: 0 on success, or actual error code
+ */
+int smbd_reconnect(struct TCP_Server_Info *server)
+{
+ log_rdma_event(INFO, "reconnecting rdma session\n");
+
+ if (!server->smbd_conn) {
+ log_rdma_event(ERR, "rdma session already destroyed\n");
+ return -EINVAL;
+ }
+
+ /*
+ * This is possible if transport is disconnected and we haven't received
+ * notification from RDMA, but upper layer has detected timeout
+ */
+ if (server->smbd_conn->transport_status == SMBD_CONNECTED) {
+ log_rdma_event(INFO, "disconnecting transport\n");
+ smbd_disconnect_rdma_connection(server->smbd_conn);
+ }
+
+ /* wait until the transport is destroyed */
+ wait_event(server->smbd_conn->wait_destroy,
+ server->smbd_conn->transport_status == SMBD_DESTROYED);
+
+ destroy_workqueue(server->smbd_conn->workqueue);
+ kfree(server->smbd_conn);
+
+ log_rdma_event(INFO, "creating rdma session\n");
+ server->smbd_conn = smbd_get_connection(
+ server, (struct sockaddr *) &server->dstaddr);
+
+ return server->smbd_conn ? 0 : -ENOENT;
+}
+
+static void destroy_caches_and_workqueue(struct smbd_connection *info)
+{
+ destroy_receive_buffers(info);
+ destroy_workqueue(info->workqueue);
+ mempool_destroy(info->response_mempool);
+ kmem_cache_destroy(info->response_cache);
+ mempool_destroy(info->request_mempool);
+ kmem_cache_destroy(info->request_cache);
+}
+
+#define MAX_NAME_LEN 80
+static int allocate_caches_and_workqueue(struct smbd_connection *info)
+{
+ char name[MAX_NAME_LEN];
+ int rc;
+
+ snprintf(name, MAX_NAME_LEN, "smbd_request_%p", info);
+ info->request_cache =
+ kmem_cache_create(
+ name,
+ sizeof(struct smbd_request) +
+ sizeof(struct smbd_data_transfer),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!info->request_cache)
+ return -ENOMEM;
+
+ info->request_mempool =
+ mempool_create(info->send_credit_target, mempool_alloc_slab,
+ mempool_free_slab, info->request_cache);
+ if (!info->request_mempool)
+ goto out1;
+
+ snprintf(name, MAX_NAME_LEN, "smbd_response_%p", info);
+ info->response_cache =
+ kmem_cache_create(
+ name,
+ sizeof(struct smbd_response) +
+ info->max_receive_size,
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!info->response_cache)
+ goto out2;
+
+ info->response_mempool =
+ mempool_create(info->receive_credit_max, mempool_alloc_slab,
+ mempool_free_slab, info->response_cache);
+ if (!info->response_mempool)
+ goto out3;
+
+ snprintf(name, MAX_NAME_LEN, "smbd_%p", info);
+ info->workqueue = create_workqueue(name);
+ if (!info->workqueue)
+ goto out4;
+
+ rc = allocate_receive_buffers(info, info->receive_credit_max);
+ if (rc) {
+ log_rdma_event(ERR, "failed to allocate receive buffers\n");
+ goto out5;
+ }
+
+ return 0;
+
+out5:
+ destroy_workqueue(info->workqueue);
+out4:
+ mempool_destroy(info->response_mempool);
+out3:
+ kmem_cache_destroy(info->response_cache);
+out2:
+ mempool_destroy(info->request_mempool);
+out1:
+ kmem_cache_destroy(info->request_cache);
+ return -ENOMEM;
+}
+
+/* Create a SMBD connection, called by upper layer */
+static struct smbd_connection *_smbd_get_connection(
+ struct TCP_Server_Info *server, struct sockaddr *dstaddr, int port)
+{
+ int rc;
+ struct smbd_connection *info;
+ struct rdma_conn_param conn_param;
+ struct ib_qp_init_attr qp_attr;
+ struct sockaddr_in *addr_in = (struct sockaddr_in *) dstaddr;
+ struct ib_port_immutable port_immutable;
+ u32 ird_ord_hdr[2];
+
+ info = kzalloc(sizeof(struct smbd_connection), GFP_KERNEL);
+ if (!info)
+ return NULL;
+
+ info->transport_status = SMBD_CONNECTING;
+ rc = smbd_ia_open(info, dstaddr, port);
+ if (rc) {
+ log_rdma_event(INFO, "smbd_ia_open rc=%d\n", rc);
+ goto create_id_failed;
+ }
+
+ if (smbd_send_credit_target > info->id->device->attrs.max_cqe ||
+ smbd_send_credit_target > info->id->device->attrs.max_qp_wr) {
+ log_rdma_event(ERR,
+ "consider lowering send_credit_target = %d. "
+ "Possible CQE overrun, device "
+ "reporting max_cpe %d max_qp_wr %d\n",
+ smbd_send_credit_target,
+ info->id->device->attrs.max_cqe,
+ info->id->device->attrs.max_qp_wr);
+ goto config_failed;
+ }
+
+ if (smbd_receive_credit_max > info->id->device->attrs.max_cqe ||
+ smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) {
+ log_rdma_event(ERR,
+ "consider lowering receive_credit_max = %d. "
+ "Possible CQE overrun, device "
+ "reporting max_cpe %d max_qp_wr %d\n",
+ smbd_receive_credit_max,
+ info->id->device->attrs.max_cqe,
+ info->id->device->attrs.max_qp_wr);
+ goto config_failed;
+ }
+
+ info->receive_credit_max = smbd_receive_credit_max;
+ info->send_credit_target = smbd_send_credit_target;
+ info->max_send_size = smbd_max_send_size;
+ info->max_fragmented_recv_size = smbd_max_fragmented_recv_size;
+ info->max_receive_size = smbd_max_receive_size;
+ info->keep_alive_interval = smbd_keep_alive_interval;
+
+ if (info->id->device->attrs.max_sge < SMBDIRECT_MAX_SGE) {
+ log_rdma_event(ERR, "warning: device max_sge = %d too small\n",
+ info->id->device->attrs.max_sge);
+ log_rdma_event(ERR, "Queue Pair creation may fail\n");
+ }
+
+ info->send_cq = NULL;
+ info->recv_cq = NULL;
+ info->send_cq = ib_alloc_cq(info->id->device, info,
+ info->send_credit_target, 0, IB_POLL_SOFTIRQ);
+ if (IS_ERR(info->send_cq)) {
+ info->send_cq = NULL;
+ goto alloc_cq_failed;
+ }
+
+ info->recv_cq = ib_alloc_cq(info->id->device, info,
+ info->receive_credit_max, 0, IB_POLL_SOFTIRQ);
+ if (IS_ERR(info->recv_cq)) {
+ info->recv_cq = NULL;
+ goto alloc_cq_failed;
+ }
+
+ memset(&qp_attr, 0, sizeof(qp_attr));
+ qp_attr.event_handler = smbd_qp_async_error_upcall;
+ qp_attr.qp_context = info;
+ qp_attr.cap.max_send_wr = info->send_credit_target;
+ qp_attr.cap.max_recv_wr = info->receive_credit_max;
+ qp_attr.cap.max_send_sge = SMBDIRECT_MAX_SGE;
+ qp_attr.cap.max_recv_sge = SMBDIRECT_MAX_SGE;
+ qp_attr.cap.max_inline_data = 0;
+ qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+ qp_attr.qp_type = IB_QPT_RC;
+ qp_attr.send_cq = info->send_cq;
+ qp_attr.recv_cq = info->recv_cq;
+ qp_attr.port_num = ~0;
+
+ rc = rdma_create_qp(info->id, info->pd, &qp_attr);
+ if (rc) {
+ log_rdma_event(ERR, "rdma_create_qp failed %i\n", rc);
+ goto create_qp_failed;
+ }
+
+ memset(&conn_param, 0, sizeof(conn_param));
+ conn_param.initiator_depth = 0;
+
+ conn_param.responder_resources =
+ info->id->device->attrs.max_qp_rd_atom
+ < SMBD_CM_RESPONDER_RESOURCES ?
+ info->id->device->attrs.max_qp_rd_atom :
+ SMBD_CM_RESPONDER_RESOURCES;
+ info->responder_resources = conn_param.responder_resources;
+ log_rdma_mr(INFO, "responder_resources=%d\n",
+ info->responder_resources);
+
+ /* Need to send IRD/ORD in private data for iWARP */
+ info->id->device->get_port_immutable(
+ info->id->device, info->id->port_num, &port_immutable);
+ if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
+ ird_ord_hdr[0] = info->responder_resources;
+ ird_ord_hdr[1] = 1;
+ conn_param.private_data = ird_ord_hdr;
+ conn_param.private_data_len = sizeof(ird_ord_hdr);
+ } else {
+ conn_param.private_data = NULL;
+ conn_param.private_data_len = 0;
+ }
+
+ conn_param.retry_count = SMBD_CM_RETRY;
+ conn_param.rnr_retry_count = SMBD_CM_RNR_RETRY;
+ conn_param.flow_control = 0;
+ init_waitqueue_head(&info->wait_destroy);
+
+ log_rdma_event(INFO, "connecting to IP %pI4 port %d\n",
+ &addr_in->sin_addr, port);
+
+ init_waitqueue_head(&info->conn_wait);
+ rc = rdma_connect(info->id, &conn_param);
+ if (rc) {
+ log_rdma_event(ERR, "rdma_connect() failed with %i\n", rc);
+ goto rdma_connect_failed;
+ }
+
+ wait_event_interruptible(
+ info->conn_wait, info->transport_status != SMBD_CONNECTING);
+
+ if (info->transport_status != SMBD_CONNECTED) {
+ log_rdma_event(ERR, "rdma_connect failed port=%d\n", port);
+ goto rdma_connect_failed;
+ }
+
+ log_rdma_event(INFO, "rdma_connect connected\n");
+
+ rc = allocate_caches_and_workqueue(info);
+ if (rc) {
+ log_rdma_event(ERR, "cache allocation failed\n");
+ goto allocate_cache_failed;
+ }
+
+ init_waitqueue_head(&info->wait_send_queue);
+ init_waitqueue_head(&info->wait_reassembly_queue);
+
+ INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer);
+ INIT_DELAYED_WORK(&info->send_immediate_work, send_immediate_work);
+ queue_delayed_work(info->workqueue, &info->idle_timer_work,
+ info->keep_alive_interval*HZ);
+
+ init_waitqueue_head(&info->wait_smbd_send_pending);
+ info->smbd_send_pending = 0;
+
+ init_waitqueue_head(&info->wait_smbd_recv_pending);
+ info->smbd_recv_pending = 0;
+
+ init_waitqueue_head(&info->wait_send_pending);
+ atomic_set(&info->send_pending, 0);
+
+ init_waitqueue_head(&info->wait_send_payload_pending);
+ atomic_set(&info->send_payload_pending, 0);
+
+ INIT_WORK(&info->disconnect_work, smbd_disconnect_rdma_work);
+ INIT_WORK(&info->destroy_work, smbd_destroy_rdma_work);
+ INIT_WORK(&info->recv_done_work, smbd_recv_done_work);
+ INIT_WORK(&info->post_send_credits_work, smbd_post_send_credits);
+ info->new_credits_offered = 0;
+ spin_lock_init(&info->lock_new_credits_offered);
+
+ rc = smbd_negotiate(info);
+ if (rc) {
+ log_rdma_event(ERR, "smbd_negotiate rc=%d\n", rc);
+ goto negotiation_failed;
+ }
+
+ rc = allocate_mr_list(info);
+ if (rc) {
+ log_rdma_mr(ERR, "memory registration allocation failed\n");
+ goto allocate_mr_failed;
+ }
+
+ return info;
+
+allocate_mr_failed:
+ /* At this point, need to a full transport shutdown */
+ smbd_destroy(info);
+ return NULL;
+
+negotiation_failed:
+ cancel_delayed_work_sync(&info->idle_timer_work);
+ destroy_caches_and_workqueue(info);
+ info->transport_status = SMBD_NEGOTIATE_FAILED;
+ init_waitqueue_head(&info->conn_wait);
+ rdma_disconnect(info->id);
+ wait_event(info->conn_wait,
+ info->transport_status == SMBD_DISCONNECTED);
+
+allocate_cache_failed:
+rdma_connect_failed:
+ rdma_destroy_qp(info->id);
+
+create_qp_failed:
+alloc_cq_failed:
+ if (info->send_cq)
+ ib_free_cq(info->send_cq);
+ if (info->recv_cq)
+ ib_free_cq(info->recv_cq);
+
+config_failed:
+ ib_dealloc_pd(info->pd);
+ rdma_destroy_id(info->id);
+
+create_id_failed:
+ kfree(info);
+ return NULL;
+}
+
+struct smbd_connection *smbd_get_connection(
+ struct TCP_Server_Info *server, struct sockaddr *dstaddr)
+{
+ struct smbd_connection *ret;
+ int port = SMBD_PORT;
+
+try_again:
+ ret = _smbd_get_connection(server, dstaddr, port);
+
+ /* Try SMB_PORT if SMBD_PORT doesn't work */
+ if (!ret && port == SMBD_PORT) {
+ port = SMB_PORT;
+ goto try_again;
+ }
+ return ret;
+}
+
+/*
+ * Receive data from receive reassembly queue
+ * All the incoming data packets are placed in reassembly queue
+ * buf: the buffer to read data into
+ * size: the length of data to read
+ * return value: actual data read
+ * Note: this implementation copies the data from reassebmly queue to receive
+ * buffers used by upper layer. This is not the optimal code path. A better way
+ * to do it is to not have upper layer allocate its receive buffers but rather
+ * borrow the buffer from reassembly queue, and return it after data is
+ * consumed. But this will require more changes to upper layer code, and also
+ * need to consider packet boundaries while they still being reassembled.
+ */
+static int smbd_recv_buf(struct smbd_connection *info, char *buf,
+ unsigned int size)
+{
+ struct smbd_response *response;
+ struct smbd_data_transfer *data_transfer;
+ int to_copy, to_read, data_read, offset;
+ u32 data_length, remaining_data_length, data_offset;
+ int rc;
+
+again:
+ if (info->transport_status != SMBD_CONNECTED) {
+ log_read(ERR, "disconnected\n");
+ return -ENODEV;
+ }
+
+ /*
+ * No need to hold the reassembly queue lock all the time as we are
+ * the only one reading from the front of the queue. The transport
+ * may add more entries to the back of the queue at the same time
+ */
+ log_read(INFO, "size=%d info->reassembly_data_length=%d\n", size,
+ info->reassembly_data_length);
+ if (info->reassembly_data_length >= size) {
+ int queue_length;
+ int queue_removed = 0;
+
+ /*
+ * Need to make sure reassembly_data_length is read before
+ * reading reassembly_queue_length and calling
+ * _get_first_reassembly. This call is lock free
+ * as we never read at the end of the queue which are being
+ * updated in SOFTIRQ as more data is received
+ */
+ virt_rmb();
+ queue_length = info->reassembly_queue_length;
+ data_read = 0;
+ to_read = size;
+ offset = info->first_entry_offset;
+ while (data_read < size) {
+ response = _get_first_reassembly(info);
+ data_transfer = smbd_response_payload(response);
+ data_length = le32_to_cpu(data_transfer->data_length);
+ remaining_data_length =
+ le32_to_cpu(
+ data_transfer->remaining_data_length);
+ data_offset = le32_to_cpu(data_transfer->data_offset);
+
+ /*
+ * The upper layer expects RFC1002 length at the
+ * beginning of the payload. Return it to indicate
+ * the total length of the packet. This minimize the
+ * change to upper layer packet processing logic. This
+ * will be eventually remove when an intermediate
+ * transport layer is added
+ */
+ if (response->first_segment && size == 4) {
+ unsigned int rfc1002_len =
+ data_length + remaining_data_length;
+ *((__be32 *)buf) = cpu_to_be32(rfc1002_len);
+ data_read = 4;
+ response->first_segment = false;
+ log_read(INFO, "returning rfc1002 length %d\n",
+ rfc1002_len);
+ goto read_rfc1002_done;
+ }
+
+ to_copy = min_t(int, data_length - offset, to_read);
+ memcpy(
+ buf + data_read,
+ (char *)data_transfer + data_offset + offset,
+ to_copy);
+
+ /* move on to the next buffer? */
+ if (to_copy == data_length - offset) {
+ queue_length--;
+ /*
+ * No need to lock if we are not at the
+ * end of the queue
+ */
+ if (!queue_length)
+ spin_lock_irq(
+ &info->reassembly_queue_lock);
+ list_del(&response->list);
+ queue_removed++;
+ if (!queue_length)
+ spin_unlock_irq(
+ &info->reassembly_queue_lock);
+
+ info->count_reassembly_queue--;
+ info->count_dequeue_reassembly_queue++;
+ put_receive_buffer(info, response);
+ offset = 0;
+ log_read(INFO, "put_receive_buffer offset=0\n");
+ } else
+ offset += to_copy;
+
+ to_read -= to_copy;
+ data_read += to_copy;
+
+ log_read(INFO, "_get_first_reassembly memcpy %d bytes "
+ "data_transfer_length-offset=%d after that "
+ "to_read=%d data_read=%d offset=%d\n",
+ to_copy, data_length - offset,
+ to_read, data_read, offset);
+ }
+
+ spin_lock_irq(&info->reassembly_queue_lock);
+ info->reassembly_data_length -= data_read;
+ info->reassembly_queue_length -= queue_removed;
+ spin_unlock_irq(&info->reassembly_queue_lock);
+
+ info->first_entry_offset = offset;
+ log_read(INFO, "returning to thread data_read=%d "
+ "reassembly_data_length=%d first_entry_offset=%d\n",
+ data_read, info->reassembly_data_length,
+ info->first_entry_offset);
+read_rfc1002_done:
+ return data_read;
+ }
+
+ log_read(INFO, "wait_event on more data\n");
+ rc = wait_event_interruptible(
+ info->wait_reassembly_queue,
+ info->reassembly_data_length >= size ||
+ info->transport_status != SMBD_CONNECTED);
+ /* Don't return any data if interrupted */
+ if (rc)
+ return -ENODEV;
+
+ goto again;
+}
+
+/*
+ * Receive a page from receive reassembly queue
+ * page: the page to read data into
+ * to_read: the length of data to read
+ * return value: actual data read
+ */
+static int smbd_recv_page(struct smbd_connection *info,
+ struct page *page, unsigned int to_read)
+{
+ int ret;
+ char *to_address;
+
+ /* make sure we have the page ready for read */
+ ret = wait_event_interruptible(
+ info->wait_reassembly_queue,
+ info->reassembly_data_length >= to_read ||
+ info->transport_status != SMBD_CONNECTED);
+ if (ret)
+ return 0;
+
+ /* now we can read from reassembly queue and not sleep */
+ to_address = kmap_atomic(page);
+
+ log_read(INFO, "reading from page=%p address=%p to_read=%d\n",
+ page, to_address, to_read);
+
+ ret = smbd_recv_buf(info, to_address, to_read);
+ kunmap_atomic(to_address);
+
+ return ret;
+}
+
+/*
+ * Receive data from transport
+ * msg: a msghdr point to the buffer, can be ITER_KVEC or ITER_BVEC
+ * return: total bytes read, or 0. SMB Direct will not do partial read.
+ */
+int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
+{
+ char *buf;
+ struct page *page;
+ unsigned int to_read;
+ int rc;
+
+ info->smbd_recv_pending++;
+
+ switch (msg->msg_iter.type) {
+ case READ | ITER_KVEC:
+ buf = msg->msg_iter.kvec->iov_base;
+ to_read = msg->msg_iter.kvec->iov_len;
+ rc = smbd_recv_buf(info, buf, to_read);
+ break;
+
+ case READ | ITER_BVEC:
+ page = msg->msg_iter.bvec->bv_page;
+ to_read = msg->msg_iter.bvec->bv_len;
+ rc = smbd_recv_page(info, page, to_read);
+ break;
+
+ default:
+ /* It's a bug in upper layer to get there */
+ cifs_dbg(VFS, "CIFS: invalid msg type %d\n",
+ msg->msg_iter.type);
+ rc = -EIO;
+ }
+
+ info->smbd_recv_pending--;
+ wake_up(&info->wait_smbd_recv_pending);
+
+ /* SMBDirect will read it all or nothing */
+ if (rc > 0)
+ msg->msg_iter.count = 0;
+ return rc;
+}
+
+/*
+ * Send data to transport
+ * Each rqst is transported as a SMBDirect payload
+ * rqst: the data to write
+ * return value: 0 if successfully write, otherwise error code
+ */
+int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
+{
+ struct kvec vec;
+ int nvecs;
+ int size;
+ int buflen = 0, remaining_data_length;
+ int start, i, j;
+ int max_iov_size =
+ info->max_send_size - sizeof(struct smbd_data_transfer);
+ struct kvec iov[SMBDIRECT_MAX_SGE];
+ int rc;
+
+ info->smbd_send_pending++;
+ if (info->transport_status != SMBD_CONNECTED) {
+ rc = -ENODEV;
+ goto done;
+ }
+
+ /*
+ * This usually means a configuration error
+ * We use RDMA read/write for packet size > rdma_readwrite_threshold
+ * as long as it's properly configured we should never get into this
+ * situation
+ */
+ if (rqst->rq_nvec + rqst->rq_npages > SMBDIRECT_MAX_SGE) {
+ log_write(ERR, "maximum send segment %x exceeding %x\n",
+ rqst->rq_nvec + rqst->rq_npages, SMBDIRECT_MAX_SGE);
+ rc = -EINVAL;
+ goto done;
+ }
+
+ /*
+ * Remove the RFC1002 length defined in MS-SMB2 section 2.1
+ * It is used only for TCP transport
+ * In future we may want to add a transport layer under protocol
+ * layer so this will only be issued to TCP transport
+ */
+ iov[0].iov_base = (char *)rqst->rq_iov[0].iov_base + 4;
+ iov[0].iov_len = rqst->rq_iov[0].iov_len - 4;
+ buflen += iov[0].iov_len;
+
+ /* total up iov array first */
+ for (i = 1; i < rqst->rq_nvec; i++) {
+ iov[i].iov_base = rqst->rq_iov[i].iov_base;
+ iov[i].iov_len = rqst->rq_iov[i].iov_len;
+ buflen += iov[i].iov_len;
+ }
+
+ /* add in the page array if there is one */
+ if (rqst->rq_npages) {
+ buflen += rqst->rq_pagesz * (rqst->rq_npages - 1);
+ buflen += rqst->rq_tailsz;
+ }
+
+ if (buflen + sizeof(struct smbd_data_transfer) >
+ info->max_fragmented_send_size) {
+ log_write(ERR, "payload size %d > max size %d\n",
+ buflen, info->max_fragmented_send_size);
+ rc = -EINVAL;
+ goto done;
+ }
+
+ remaining_data_length = buflen;
+
+ log_write(INFO, "rqst->rq_nvec=%d rqst->rq_npages=%d rq_pagesz=%d "
+ "rq_tailsz=%d buflen=%d\n",
+ rqst->rq_nvec, rqst->rq_npages, rqst->rq_pagesz,
+ rqst->rq_tailsz, buflen);
+
+ start = i = iov[0].iov_len ? 0 : 1;
+ buflen = 0;
+ while (true) {
+ buflen += iov[i].iov_len;
+ if (buflen > max_iov_size) {
+ if (i > start) {
+ remaining_data_length -=
+ (buflen-iov[i].iov_len);
+ log_write(INFO, "sending iov[] from start=%d "
+ "i=%d nvecs=%d "
+ "remaining_data_length=%d\n",
+ start, i, i-start,
+ remaining_data_length);
+ rc = smbd_post_send_data(
+ info, &iov[start], i-start,
+ remaining_data_length);
+ if (rc)
+ goto done;
+ } else {
+ /* iov[start] is too big, break it */
+ nvecs = (buflen+max_iov_size-1)/max_iov_size;
+ log_write(INFO, "iov[%d] iov_base=%p buflen=%d"
+ " break to %d vectors\n",
+ start, iov[start].iov_base,
+ buflen, nvecs);
+ for (j = 0; j < nvecs; j++) {
+ vec.iov_base =
+ (char *)iov[start].iov_base +
+ j*max_iov_size;
+ vec.iov_len = max_iov_size;
+ if (j == nvecs-1)
+ vec.iov_len =
+ buflen -
+ max_iov_size*(nvecs-1);
+ remaining_data_length -= vec.iov_len;
+ log_write(INFO,
+ "sending vec j=%d iov_base=%p"
+ " iov_len=%zu "
+ "remaining_data_length=%d\n",
+ j, vec.iov_base, vec.iov_len,
+ remaining_data_length);
+ rc = smbd_post_send_data(
+ info, &vec, 1,
+ remaining_data_length);
+ if (rc)
+ goto done;
+ }
+ i++;
+ }
+ start = i;
+ buflen = 0;
+ } else {
+ i++;
+ if (i == rqst->rq_nvec) {
+ /* send out all remaining vecs */
+ remaining_data_length -= buflen;
+ log_write(INFO,
+ "sending iov[] from start=%d i=%d "
+ "nvecs=%d remaining_data_length=%d\n",
+ start, i, i-start,
+ remaining_data_length);
+ rc = smbd_post_send_data(info, &iov[start],
+ i-start, remaining_data_length);
+ if (rc)
+ goto done;
+ break;
+ }
+ }
+ log_write(INFO, "looping i=%d buflen=%d\n", i, buflen);
+ }
+
+ /* now sending pages if there are any */
+ for (i = 0; i < rqst->rq_npages; i++) {
+ buflen = (i == rqst->rq_npages-1) ?
+ rqst->rq_tailsz : rqst->rq_pagesz;
+ nvecs = (buflen + max_iov_size - 1) / max_iov_size;
+ log_write(INFO, "sending pages buflen=%d nvecs=%d\n",
+ buflen, nvecs);
+ for (j = 0; j < nvecs; j++) {
+ size = max_iov_size;
+ if (j == nvecs-1)
+ size = buflen - j*max_iov_size;
+ remaining_data_length -= size;
+ log_write(INFO, "sending pages i=%d offset=%d size=%d"
+ " remaining_data_length=%d\n",
+ i, j*max_iov_size, size, remaining_data_length);
+ rc = smbd_post_send_page(
+ info, rqst->rq_pages[i], j*max_iov_size,
+ size, remaining_data_length);
+ if (rc)
+ goto done;
+ }
+ }
+
+done:
+ /*
+ * As an optimization, we don't wait for individual I/O to finish
+ * before sending the next one.
+ * Send them all and wait for pending send count to get to 0
+ * that means all the I/Os have been out and we are good to return
+ */
+
+ wait_event(info->wait_send_payload_pending,
+ atomic_read(&info->send_payload_pending) == 0);
+
+ info->smbd_send_pending--;
+ wake_up(&info->wait_smbd_send_pending);
+
+ return rc;
+}
+
+static void register_mr_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct smbd_mr *mr;
+ struct ib_cqe *cqe;
+
+ if (wc->status) {
+ log_rdma_mr(ERR, "status=%d\n", wc->status);
+ cqe = wc->wr_cqe;
+ mr = container_of(cqe, struct smbd_mr, cqe);
+ smbd_disconnect_rdma_connection(mr->conn);
+ }
+}
+
+/*
+ * The work queue function that recovers MRs
+ * We need to call ib_dereg_mr() and ib_alloc_mr() before this MR can be used
+ * again. Both calls are slow, so finish them in a workqueue. This will not
+ * block I/O path.
+ * There is one workqueue that recovers MRs, there is no need to lock as the
+ * I/O requests calling smbd_register_mr will never update the links in the
+ * mr_list.
+ */
+static void smbd_mr_recovery_work(struct work_struct *work)
+{
+ struct smbd_connection *info =
+ container_of(work, struct smbd_connection, mr_recovery_work);
+ struct smbd_mr *smbdirect_mr;
+ int rc;
+
+ list_for_each_entry(smbdirect_mr, &info->mr_list, list) {
+ if (smbdirect_mr->state == MR_INVALIDATED ||
+ smbdirect_mr->state == MR_ERROR) {
+
+ if (smbdirect_mr->state == MR_INVALIDATED) {
+ ib_dma_unmap_sg(
+ info->id->device, smbdirect_mr->sgl,
+ smbdirect_mr->sgl_count,
+ smbdirect_mr->dir);
+ smbdirect_mr->state = MR_READY;
+ } else if (smbdirect_mr->state == MR_ERROR) {
+
+ /* recover this MR entry */
+ rc = ib_dereg_mr(smbdirect_mr->mr);
+ if (rc) {
+ log_rdma_mr(ERR,
+ "ib_dereg_mr faield rc=%x\n",
+ rc);
+ smbd_disconnect_rdma_connection(info);
+ }
+
+ smbdirect_mr->mr = ib_alloc_mr(
+ info->pd, info->mr_type,
+ info->max_frmr_depth);
+ if (IS_ERR(smbdirect_mr->mr)) {
+ log_rdma_mr(ERR,
+ "ib_alloc_mr failed mr_type=%x "
+ "max_frmr_depth=%x\n",
+ info->mr_type,
+ info->max_frmr_depth);
+ smbd_disconnect_rdma_connection(info);
+ }
+
+ smbdirect_mr->state = MR_READY;
+ }
+ /* smbdirect_mr->state is updated by this function
+ * and is read and updated by I/O issuing CPUs trying
+ * to get a MR, the call to atomic_inc_return
+ * implicates a memory barrier and guarantees this
+ * value is updated before waking up any calls to
+ * get_mr() from the I/O issuing CPUs
+ */
+ if (atomic_inc_return(&info->mr_ready_count) == 1)
+ wake_up_interruptible(&info->wait_mr);
+ }
+ }
+}
+
+static void destroy_mr_list(struct smbd_connection *info)
+{
+ struct smbd_mr *mr, *tmp;
+
+ cancel_work_sync(&info->mr_recovery_work);
+ list_for_each_entry_safe(mr, tmp, &info->mr_list, list) {
+ if (mr->state == MR_INVALIDATED)
+ ib_dma_unmap_sg(info->id->device, mr->sgl,
+ mr->sgl_count, mr->dir);
+ ib_dereg_mr(mr->mr);
+ kfree(mr->sgl);
+ kfree(mr);
+ }
+}
+
+/*
+ * Allocate MRs used for RDMA read/write
+ * The number of MRs will not exceed hardware capability in responder_resources
+ * All MRs are kept in mr_list. The MR can be recovered after it's used
+ * Recovery is done in smbd_mr_recovery_work. The content of list entry changes
+ * as MRs are used and recovered for I/O, but the list links will not change
+ */
+static int allocate_mr_list(struct smbd_connection *info)
+{
+ int i;
+ struct smbd_mr *smbdirect_mr, *tmp;
+
+ INIT_LIST_HEAD(&info->mr_list);
+ init_waitqueue_head(&info->wait_mr);
+ spin_lock_init(&info->mr_list_lock);
+ atomic_set(&info->mr_ready_count, 0);
+ atomic_set(&info->mr_used_count, 0);
+ init_waitqueue_head(&info->wait_for_mr_cleanup);
+ /* Allocate more MRs (2x) than hardware responder_resources */
+ for (i = 0; i < info->responder_resources * 2; i++) {
+ smbdirect_mr = kzalloc(sizeof(*smbdirect_mr), GFP_KERNEL);
+ if (!smbdirect_mr)
+ goto out;
+ smbdirect_mr->mr = ib_alloc_mr(info->pd, info->mr_type,
+ info->max_frmr_depth);
+ if (IS_ERR(smbdirect_mr->mr)) {
+ log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x "
+ "max_frmr_depth=%x\n",
+ info->mr_type, info->max_frmr_depth);
+ goto out;
+ }
+ smbdirect_mr->sgl = kcalloc(
+ info->max_frmr_depth,
+ sizeof(struct scatterlist),
+ GFP_KERNEL);
+ if (!smbdirect_mr->sgl) {
+ log_rdma_mr(ERR, "failed to allocate sgl\n");
+ ib_dereg_mr(smbdirect_mr->mr);
+ goto out;
+ }
+ smbdirect_mr->state = MR_READY;
+ smbdirect_mr->conn = info;
+
+ list_add_tail(&smbdirect_mr->list, &info->mr_list);
+ atomic_inc(&info->mr_ready_count);
+ }
+ INIT_WORK(&info->mr_recovery_work, smbd_mr_recovery_work);
+ return 0;
+
+out:
+ kfree(smbdirect_mr);
+
+ list_for_each_entry_safe(smbdirect_mr, tmp, &info->mr_list, list) {
+ ib_dereg_mr(smbdirect_mr->mr);
+ kfree(smbdirect_mr->sgl);
+ kfree(smbdirect_mr);
+ }
+ return -ENOMEM;
+}
+
+/*
+ * Get a MR from mr_list. This function waits until there is at least one
+ * MR available in the list. It may access the list while the
+ * smbd_mr_recovery_work is recovering the MR list. This doesn't need a lock
+ * as they never modify the same places. However, there may be several CPUs
+ * issueing I/O trying to get MR at the same time, mr_list_lock is used to
+ * protect this situation.
+ */
+static struct smbd_mr *get_mr(struct smbd_connection *info)
+{
+ struct smbd_mr *ret;
+ int rc;
+again:
+ rc = wait_event_interruptible(info->wait_mr,
+ atomic_read(&info->mr_ready_count) ||
+ info->transport_status != SMBD_CONNECTED);
+ if (rc) {
+ log_rdma_mr(ERR, "wait_event_interruptible rc=%x\n", rc);
+ return NULL;
+ }
+
+ if (info->transport_status != SMBD_CONNECTED) {
+ log_rdma_mr(ERR, "info->transport_status=%x\n",
+ info->transport_status);
+ return NULL;
+ }
+
+ spin_lock(&info->mr_list_lock);
+ list_for_each_entry(ret, &info->mr_list, list) {
+ if (ret->state == MR_READY) {
+ ret->state = MR_REGISTERED;
+ spin_unlock(&info->mr_list_lock);
+ atomic_dec(&info->mr_ready_count);
+ atomic_inc(&info->mr_used_count);
+ return ret;
+ }
+ }
+
+ spin_unlock(&info->mr_list_lock);
+ /*
+ * It is possible that we could fail to get MR because other processes may
+ * try to acquire a MR at the same time. If this is the case, retry it.
+ */
+ goto again;
+}
+
+/*
+ * Register memory for RDMA read/write
+ * pages[]: the list of pages to register memory with
+ * num_pages: the number of pages to register
+ * tailsz: if non-zero, the bytes to register in the last page
+ * writing: true if this is a RDMA write (SMB read), false for RDMA read
+ * need_invalidate: true if this MR needs to be locally invalidated after I/O
+ * return value: the MR registered, NULL if failed.
+ */
+struct smbd_mr *smbd_register_mr(
+ struct smbd_connection *info, struct page *pages[], int num_pages,
+ int tailsz, bool writing, bool need_invalidate)
+{
+ struct smbd_mr *smbdirect_mr;
+ int rc, i;
+ enum dma_data_direction dir;
+ struct ib_reg_wr *reg_wr;
+ struct ib_send_wr *bad_wr;
+
+ if (num_pages > info->max_frmr_depth) {
+ log_rdma_mr(ERR, "num_pages=%d max_frmr_depth=%d\n",
+ num_pages, info->max_frmr_depth);
+ return NULL;
+ }
+
+ smbdirect_mr = get_mr(info);
+ if (!smbdirect_mr) {
+ log_rdma_mr(ERR, "get_mr returning NULL\n");
+ return NULL;
+ }
+ smbdirect_mr->need_invalidate = need_invalidate;
+ smbdirect_mr->sgl_count = num_pages;
+ sg_init_table(smbdirect_mr->sgl, num_pages);
+
+ for (i = 0; i < num_pages - 1; i++)
+ sg_set_page(&smbdirect_mr->sgl[i], pages[i], PAGE_SIZE, 0);
+
+ sg_set_page(&smbdirect_mr->sgl[i], pages[i],
+ tailsz ? tailsz : PAGE_SIZE, 0);
+
+ dir = writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ smbdirect_mr->dir = dir;
+ rc = ib_dma_map_sg(info->id->device, smbdirect_mr->sgl, num_pages, dir);
+ if (!rc) {
+ log_rdma_mr(INFO, "ib_dma_map_sg num_pages=%x dir=%x rc=%x\n",
+ num_pages, dir, rc);
+ goto dma_map_error;
+ }
+
+ rc = ib_map_mr_sg(smbdirect_mr->mr, smbdirect_mr->sgl, num_pages,
+ NULL, PAGE_SIZE);
+ if (rc != num_pages) {
+ log_rdma_mr(INFO,
+ "ib_map_mr_sg failed rc = %x num_pages = %x\n",
+ rc, num_pages);
+ goto map_mr_error;
+ }
+
+ ib_update_fast_reg_key(smbdirect_mr->mr,
+ ib_inc_rkey(smbdirect_mr->mr->rkey));
+ reg_wr = &smbdirect_mr->wr;
+ reg_wr->wr.opcode = IB_WR_REG_MR;
+ smbdirect_mr->cqe.done = register_mr_done;
+ reg_wr->wr.wr_cqe = &smbdirect_mr->cqe;
+ reg_wr->wr.num_sge = 0;
+ reg_wr->wr.send_flags = IB_SEND_SIGNALED;
+ reg_wr->mr = smbdirect_mr->mr;
+ reg_wr->key = smbdirect_mr->mr->rkey;
+ reg_wr->access = writing ?
+ IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
+ IB_ACCESS_REMOTE_READ;
+
+ /*
+ * There is no need for waiting for complemtion on ib_post_send
+ * on IB_WR_REG_MR. Hardware enforces a barrier and order of execution
+ * on the next ib_post_send when we actaully send I/O to remote peer
+ */
+ rc = ib_post_send(info->id->qp, &reg_wr->wr, &bad_wr);
+ if (!rc)
+ return smbdirect_mr;
+
+ log_rdma_mr(ERR, "ib_post_send failed rc=%x reg_wr->key=%x\n",
+ rc, reg_wr->key);
+
+ /* If all failed, attempt to recover this MR by setting it MR_ERROR*/
+map_mr_error:
+ ib_dma_unmap_sg(info->id->device, smbdirect_mr->sgl,
+ smbdirect_mr->sgl_count, smbdirect_mr->dir);
+
+dma_map_error:
+ smbdirect_mr->state = MR_ERROR;
+ if (atomic_dec_and_test(&info->mr_used_count))
+ wake_up(&info->wait_for_mr_cleanup);
+
+ return NULL;
+}
+
+static void local_inv_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct smbd_mr *smbdirect_mr;
+ struct ib_cqe *cqe;
+
+ cqe = wc->wr_cqe;
+ smbdirect_mr = container_of(cqe, struct smbd_mr, cqe);
+ smbdirect_mr->state = MR_INVALIDATED;
+ if (wc->status != IB_WC_SUCCESS) {
+ log_rdma_mr(ERR, "invalidate failed status=%x\n", wc->status);
+ smbdirect_mr->state = MR_ERROR;
+ }
+ complete(&smbdirect_mr->invalidate_done);
+}
+
+/*
+ * Deregister a MR after I/O is done
+ * This function may wait if remote invalidation is not used
+ * and we have to locally invalidate the buffer to prevent data is being
+ * modified by remote peer after upper layer consumes it
+ */
+int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
+{
+ struct ib_send_wr *wr, *bad_wr;
+ struct smbd_connection *info = smbdirect_mr->conn;
+ int rc = 0;
+
+ if (smbdirect_mr->need_invalidate) {
+ /* Need to finish local invalidation before returning */
+ wr = &smbdirect_mr->inv_wr;
+ wr->opcode = IB_WR_LOCAL_INV;
+ smbdirect_mr->cqe.done = local_inv_done;
+ wr->wr_cqe = &smbdirect_mr->cqe;
+ wr->num_sge = 0;
+ wr->ex.invalidate_rkey = smbdirect_mr->mr->rkey;
+ wr->send_flags = IB_SEND_SIGNALED;
+
+ init_completion(&smbdirect_mr->invalidate_done);
+ rc = ib_post_send(info->id->qp, wr, &bad_wr);
+ if (rc) {
+ log_rdma_mr(ERR, "ib_post_send failed rc=%x\n", rc);
+ smbd_disconnect_rdma_connection(info);
+ goto done;
+ }
+ wait_for_completion(&smbdirect_mr->invalidate_done);
+ smbdirect_mr->need_invalidate = false;
+ } else
+ /*
+ * For remote invalidation, just set it to MR_INVALIDATED
+ * and defer to mr_recovery_work to recover the MR for next use
+ */
+ smbdirect_mr->state = MR_INVALIDATED;
+
+ /*
+ * Schedule the work to do MR recovery for future I/Os
+ * MR recovery is slow and we don't want it to block the current I/O
+ */
+ queue_work(info->workqueue, &info->mr_recovery_work);
+
+done:
+ if (atomic_dec_and_test(&info->mr_used_count))
+ wake_up(&info->wait_for_mr_cleanup);
+
+ return rc;
+}
diff --git a/fs/cifs/smbdirect.h b/fs/cifs/smbdirect.h
new file mode 100644
index 000000000000..f9038daea194
--- /dev/null
+++ b/fs/cifs/smbdirect.h
@@ -0,0 +1,338 @@
+/*
+ * Copyright (C) 2017, Microsoft Corporation.
+ *
+ * Author(s): Long Li <longli@microsoft.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ */
+#ifndef _SMBDIRECT_H
+#define _SMBDIRECT_H
+
+#ifdef CONFIG_CIFS_SMB_DIRECT
+#define cifs_rdma_enabled(server) ((server)->rdma)
+
+#include "cifsglob.h"
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_cm.h>
+#include <linux/mempool.h>
+
+extern int rdma_readwrite_threshold;
+extern int smbd_max_frmr_depth;
+extern int smbd_keep_alive_interval;
+extern int smbd_max_receive_size;
+extern int smbd_max_fragmented_recv_size;
+extern int smbd_max_send_size;
+extern int smbd_send_credit_target;
+extern int smbd_receive_credit_max;
+
+enum keep_alive_status {
+ KEEP_ALIVE_NONE,
+ KEEP_ALIVE_PENDING,
+ KEEP_ALIVE_SENT,
+};
+
+enum smbd_connection_status {
+ SMBD_CREATED,
+ SMBD_CONNECTING,
+ SMBD_CONNECTED,
+ SMBD_NEGOTIATE_FAILED,
+ SMBD_DISCONNECTING,
+ SMBD_DISCONNECTED,
+ SMBD_DESTROYED
+};
+
+/*
+ * The context for the SMBDirect transport
+ * Everything related to the transport is here. It has several logical parts
+ * 1. RDMA related structures
+ * 2. SMBDirect connection parameters
+ * 3. Memory registrations
+ * 4. Receive and reassembly queues for data receive path
+ * 5. mempools for allocating packets
+ */
+struct smbd_connection {
+ enum smbd_connection_status transport_status;
+
+ /* RDMA related */
+ struct rdma_cm_id *id;
+ struct ib_qp_init_attr qp_attr;
+ struct ib_pd *pd;
+ struct ib_cq *send_cq, *recv_cq;
+ struct ib_device_attr dev_attr;
+ int ri_rc;
+ struct completion ri_done;
+ wait_queue_head_t conn_wait;
+ wait_queue_head_t wait_destroy;
+
+ struct completion negotiate_completion;
+ bool negotiate_done;
+
+ struct work_struct destroy_work;
+ struct work_struct disconnect_work;
+ struct work_struct recv_done_work;
+ struct work_struct post_send_credits_work;
+
+ spinlock_t lock_new_credits_offered;
+ int new_credits_offered;
+
+ /* Connection parameters defined in [MS-SMBD] 3.1.1.1 */
+ int receive_credit_max;
+ int send_credit_target;
+ int max_send_size;
+ int max_fragmented_recv_size;
+ int max_fragmented_send_size;
+ int max_receive_size;
+ int keep_alive_interval;
+ int max_readwrite_size;
+ enum keep_alive_status keep_alive_requested;
+ int protocol;
+ atomic_t send_credits;
+ atomic_t receive_credits;
+ int receive_credit_target;
+ int fragment_reassembly_remaining;
+
+ /* Memory registrations */
+ /* Maximum number of RDMA read/write outstanding on this connection */
+ int responder_resources;
+ /* Maximum number of SGEs in a RDMA write/read */
+ int max_frmr_depth;
+ /*
+ * If payload is less than or equal to the threshold,
+ * use RDMA send/recv to send upper layer I/O.
+ * If payload is more than the threshold,
+ * use RDMA read/write through memory registration for I/O.
+ */
+ int rdma_readwrite_threshold;
+ enum ib_mr_type mr_type;
+ struct list_head mr_list;
+ spinlock_t mr_list_lock;
+ /* The number of available MRs ready for memory registration */
+ atomic_t mr_ready_count;
+ atomic_t mr_used_count;
+ wait_queue_head_t wait_mr;
+ struct work_struct mr_recovery_work;
+ /* Used by transport to wait until all MRs are returned */
+ wait_queue_head_t wait_for_mr_cleanup;
+
+ /* Activity accoutning */
+ /* Pending reqeusts issued from upper layer */
+ int smbd_send_pending;
+ wait_queue_head_t wait_smbd_send_pending;
+
+ int smbd_recv_pending;
+ wait_queue_head_t wait_smbd_recv_pending;
+
+ atomic_t send_pending;
+ wait_queue_head_t wait_send_pending;
+ atomic_t send_payload_pending;
+ wait_queue_head_t wait_send_payload_pending;
+
+ /* Receive queue */
+ struct list_head receive_queue;
+ int count_receive_queue;
+ spinlock_t receive_queue_lock;
+
+ struct list_head empty_packet_queue;
+ int count_empty_packet_queue;
+ spinlock_t empty_packet_queue_lock;
+
+ wait_queue_head_t wait_receive_queues;
+
+ /* Reassembly queue */
+ struct list_head reassembly_queue;
+ spinlock_t reassembly_queue_lock;
+ wait_queue_head_t wait_reassembly_queue;
+
+ /* total data length of reassembly queue */
+ int reassembly_data_length;
+ int reassembly_queue_length;
+ /* the offset to first buffer in reassembly queue */
+ int first_entry_offset;
+
+ bool send_immediate;
+
+ wait_queue_head_t wait_send_queue;
+
+ /*
+ * Indicate if we have received a full packet on the connection
+ * This is used to identify the first SMBD packet of a assembled
+ * payload (SMB packet) in reassembly queue so we can return a
+ * RFC1002 length to upper layer to indicate the length of the SMB
+ * packet received
+ */
+ bool full_packet_received;
+
+ struct workqueue_struct *workqueue;
+ struct delayed_work idle_timer_work;
+ struct delayed_work send_immediate_work;
+
+ /* Memory pool for preallocating buffers */
+ /* request pool for RDMA send */
+ struct kmem_cache *request_cache;
+ mempool_t *request_mempool;
+
+ /* response pool for RDMA receive */
+ struct kmem_cache *response_cache;
+ mempool_t *response_mempool;
+
+ /* for debug purposes */
+ unsigned int count_get_receive_buffer;
+ unsigned int count_put_receive_buffer;
+ unsigned int count_reassembly_queue;
+ unsigned int count_enqueue_reassembly_queue;
+ unsigned int count_dequeue_reassembly_queue;
+ unsigned int count_send_empty;
+};
+
+enum smbd_message_type {
+ SMBD_NEGOTIATE_RESP,
+ SMBD_TRANSFER_DATA,
+};
+
+#define SMB_DIRECT_RESPONSE_REQUESTED 0x0001
+
+/* SMBD negotiation request packet [MS-SMBD] 2.2.1 */
+struct smbd_negotiate_req {
+ __le16 min_version;
+ __le16 max_version;
+ __le16 reserved;
+ __le16 credits_requested;
+ __le32 preferred_send_size;
+ __le32 max_receive_size;
+ __le32 max_fragmented_size;
+} __packed;
+
+/* SMBD negotiation response packet [MS-SMBD] 2.2.2 */
+struct smbd_negotiate_resp {
+ __le16 min_version;
+ __le16 max_version;
+ __le16 negotiated_version;
+ __le16 reserved;
+ __le16 credits_requested;
+ __le16 credits_granted;
+ __le32 status;
+ __le32 max_readwrite_size;
+ __le32 preferred_send_size;
+ __le32 max_receive_size;
+ __le32 max_fragmented_size;
+} __packed;
+
+/* SMBD data transfer packet with payload [MS-SMBD] 2.2.3 */
+struct smbd_data_transfer {
+ __le16 credits_requested;
+ __le16 credits_granted;
+ __le16 flags;
+ __le16 reserved;
+ __le32 remaining_data_length;
+ __le32 data_offset;
+ __le32 data_length;
+ __le32 padding;
+ __u8 buffer[];
+} __packed;
+
+/* The packet fields for a registered RDMA buffer */
+struct smbd_buffer_descriptor_v1 {
+ __le64 offset;
+ __le32 token;
+ __le32 length;
+} __packed;
+
+/* Default maximum number of SGEs in a RDMA send/recv */
+#define SMBDIRECT_MAX_SGE 16
+/* The context for a SMBD request */
+struct smbd_request {
+ struct smbd_connection *info;
+ struct ib_cqe cqe;
+
+ /* true if this request carries upper layer payload */
+ bool has_payload;
+
+ /* the SGE entries for this packet */
+ struct ib_sge sge[SMBDIRECT_MAX_SGE];
+ int num_sge;
+
+ /* SMBD packet header follows this structure */
+ u8 packet[];
+};
+
+/* The context for a SMBD response */
+struct smbd_response {
+ struct smbd_connection *info;
+ struct ib_cqe cqe;
+ struct ib_sge sge;
+
+ enum smbd_message_type type;
+
+ /* Link to receive queue or reassembly queue */
+ struct list_head list;
+
+ /* Indicate if this is the 1st packet of a payload */
+ bool first_segment;
+
+ /* SMBD packet header and payload follows this structure */
+ u8 packet[];
+};
+
+/* Create a SMBDirect session */
+struct smbd_connection *smbd_get_connection(
+ struct TCP_Server_Info *server, struct sockaddr *dstaddr);
+
+/* Reconnect SMBDirect session */
+int smbd_reconnect(struct TCP_Server_Info *server);
+/* Destroy SMBDirect session */
+void smbd_destroy(struct smbd_connection *info);
+
+/* Interface for carrying upper layer I/O through send/recv */
+int smbd_recv(struct smbd_connection *info, struct msghdr *msg);
+int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst);
+
+enum mr_state {
+ MR_READY,
+ MR_REGISTERED,
+ MR_INVALIDATED,
+ MR_ERROR
+};
+
+struct smbd_mr {
+ struct smbd_connection *conn;
+ struct list_head list;
+ enum mr_state state;
+ struct ib_mr *mr;
+ struct scatterlist *sgl;
+ int sgl_count;
+ enum dma_data_direction dir;
+ union {
+ struct ib_reg_wr wr;
+ struct ib_send_wr inv_wr;
+ };
+ struct ib_cqe cqe;
+ bool need_invalidate;
+ struct completion invalidate_done;
+};
+
+/* Interfaces to register and deregister MR for RDMA read/write */
+struct smbd_mr *smbd_register_mr(
+ struct smbd_connection *info, struct page *pages[], int num_pages,
+ int tailsz, bool writing, bool need_invalidate);
+int smbd_deregister_mr(struct smbd_mr *mr);
+
+#else
+#define cifs_rdma_enabled(server) 0
+struct smbd_connection {};
+static inline void *smbd_get_connection(
+ struct TCP_Server_Info *server, struct sockaddr *dstaddr) {return NULL;}
+static inline int smbd_reconnect(struct TCP_Server_Info *server) {return -1; }
+static inline void smbd_destroy(struct smbd_connection *info) {}
+static inline int smbd_recv(struct smbd_connection *info, struct msghdr *msg) {return -1; }
+static inline int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst) {return -1; }
+#endif
+
+#endif
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 7efbab013957..9779b3292d8e 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -37,6 +37,10 @@
#include "cifsglob.h"
#include "cifsproto.h"
#include "cifs_debug.h"
+#include "smbdirect.h"
+
+/* Max number of iovectors we can use off the stack when sending requests. */
+#define CIFS_MAX_IOV_SIZE 8
void
cifs_wake_up_task(struct mid_q_entry *mid)
@@ -229,7 +233,10 @@ __smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
struct socket *ssocket = server->ssocket;
struct msghdr smb_msg;
int val = 1;
-
+ if (cifs_rdma_enabled(server) && server->smbd_conn) {
+ rc = smbd_send(server->smbd_conn, rqst);
+ goto smbd_done;
+ }
if (ssocket == NULL)
return -ENOTSOCK;
@@ -298,7 +305,7 @@ uncork:
*/
server->tcpStatus = CifsNeedReconnect;
}
-
+smbd_done:
if (rc < 0 && rc != -EINTR)
cifs_dbg(VFS, "Error %d sending data on socket to server\n",
rc);
@@ -803,12 +810,16 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
const int flags, struct kvec *resp_iov)
{
struct smb_rqst rqst;
- struct kvec *new_iov;
+ struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
int rc;
- new_iov = kmalloc(sizeof(struct kvec) * (n_vec + 1), GFP_KERNEL);
- if (!new_iov)
- return -ENOMEM;
+ if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
+ new_iov = kmalloc(sizeof(struct kvec) * (n_vec + 1),
+ GFP_KERNEL);
+ if (!new_iov)
+ return -ENOMEM;
+ } else
+ new_iov = s_iov;
/* 1st iov is a RFC1001 length followed by the rest of the packet */
memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
@@ -823,7 +834,51 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
rqst.rq_nvec = n_vec + 1;
rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
- kfree(new_iov);
+ if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
+ kfree(new_iov);
+ return rc;
+}
+
+/* Like SendReceive2 but iov[0] does not contain an rfc1002 header */
+int
+smb2_send_recv(const unsigned int xid, struct cifs_ses *ses,
+ struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
+ const int flags, struct kvec *resp_iov)
+{
+ struct smb_rqst rqst;
+ struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
+ int rc;
+ int i;
+ __u32 count;
+ __be32 rfc1002_marker;
+
+ if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
+ new_iov = kmalloc(sizeof(struct kvec) * (n_vec + 1),
+ GFP_KERNEL);
+ if (!new_iov)
+ return -ENOMEM;
+ } else
+ new_iov = s_iov;
+
+ /* 1st iov is an RFC1002 Session Message length */
+ memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
+
+ count = 0;
+ for (i = 1; i < n_vec + 1; i++)
+ count += new_iov[i].iov_len;
+
+ rfc1002_marker = cpu_to_be32(count);
+
+ new_iov[0].iov_base = &rfc1002_marker;
+ new_iov[0].iov_len = 4;
+
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = new_iov;
+ rqst.rq_nvec = n_vec + 1;
+
+ rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
+ if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
+ kfree(new_iov);
return rc;
}
diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c
index f40e3953e7fe..49d3c6fda89a 100644
--- a/fs/coda/psdev.c
+++ b/fs/coda/psdev.c
@@ -61,10 +61,10 @@ static struct class *coda_psdev_class;
* Device operations
*/
-static unsigned int coda_psdev_poll(struct file *file, poll_table * wait)
+static __poll_t coda_psdev_poll(struct file *file, poll_table * wait)
{
struct venus_comm *vcp = (struct venus_comm *) file->private_data;
- unsigned int mask = POLLOUT | POLLWRNORM;
+ __poll_t mask = POLLOUT | POLLWRNORM;
poll_wait(file, &vcp->vc_waitq, wait);
mutex_lock(&vcp->vc_mutex);
diff --git a/fs/dcache.c b/fs/dcache.c
index 5c7df1df81ff..c6d996ee2d61 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -32,7 +32,6 @@
#include <linux/swap.h>
#include <linux/bootmem.h>
#include <linux/fs_struct.h>
-#include <linux/hardirq.h>
#include <linux/bit_spinlock.h>
#include <linux/rculist_bl.h>
#include <linux/prefetch.h>
@@ -49,8 +48,8 @@
* - i_dentry, d_u.d_alias, d_inode of aliases
* dcache_hash_bucket lock protects:
* - the dcache hash table
- * s_anon bl list spinlock protects:
- * - the s_anon list (see __d_drop)
+ * s_roots bl list spinlock protects:
+ * - the s_roots list (see __d_drop)
* dentry->d_sb->s_dentry_lru_lock protects:
* - the dcache lru lists and counters
* d_lock protects:
@@ -68,7 +67,7 @@
* dentry->d_lock
* dentry->d_sb->s_dentry_lru_lock
* dcache_hash_bucket lock
- * s_anon lock
+ * s_roots lock
*
* If there is an ancestor relationship:
* dentry->d_parent->...->d_parent->d_lock
@@ -104,14 +103,13 @@ EXPORT_SYMBOL(slash_name);
* information, yet avoid using a prime hash-size or similar.
*/
-static unsigned int d_hash_mask __read_mostly;
static unsigned int d_hash_shift __read_mostly;
static struct hlist_bl_head *dentry_hashtable __read_mostly;
static inline struct hlist_bl_head *d_hash(unsigned int hash)
{
- return dentry_hashtable + (hash >> (32 - d_hash_shift));
+ return dentry_hashtable + (hash >> d_hash_shift);
}
#define IN_LOOKUP_SHIFT 10
@@ -477,10 +475,10 @@ void __d_drop(struct dentry *dentry)
/*
* Hashed dentries are normally on the dentry hashtable,
* with the exception of those newly allocated by
- * d_obtain_alias, which are always IS_ROOT:
+ * d_obtain_root, which are always IS_ROOT:
*/
if (unlikely(IS_ROOT(dentry)))
- b = &dentry->d_sb->s_anon;
+ b = &dentry->d_sb->s_roots;
else
b = d_hash(dentry->d_name.hash);
@@ -1500,8 +1498,8 @@ void shrink_dcache_for_umount(struct super_block *sb)
sb->s_root = NULL;
do_one_tree(dentry);
- while (!hlist_bl_empty(&sb->s_anon)) {
- dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash));
+ while (!hlist_bl_empty(&sb->s_roots)) {
+ dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_roots), struct dentry, d_hash));
do_one_tree(dentry);
}
}
@@ -1636,8 +1634,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
dname[name->len] = 0;
/* Make sure we always see the terminating NUL character */
- smp_wmb();
- dentry->d_name.name = dname;
+ smp_store_release(&dentry->d_name.name, dname); /* ^^^ */
dentry->d_lockref.count = 1;
dentry->d_flags = 0;
@@ -1965,9 +1962,11 @@ static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected)
spin_lock(&tmp->d_lock);
__d_set_inode_and_type(tmp, inode, add_flags);
hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry);
- hlist_bl_lock(&tmp->d_sb->s_anon);
- hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
- hlist_bl_unlock(&tmp->d_sb->s_anon);
+ if (!disconnected) {
+ hlist_bl_lock(&tmp->d_sb->s_roots);
+ hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_roots);
+ hlist_bl_unlock(&tmp->d_sb->s_roots);
+ }
spin_unlock(&tmp->d_lock);
spin_unlock(&inode->i_lock);
@@ -3047,17 +3046,14 @@ static int prepend(char **buffer, int *buflen, const char *str, int namelen)
* retry it again when a d_move() does happen. So any garbage in the buffer
* due to mismatched pointer and length will be discarded.
*
- * Data dependency barrier is needed to make sure that we see that terminating
- * NUL. Alpha strikes again, film at 11...
+ * Load acquire is needed to make sure that we see that terminating NUL.
*/
static int prepend_name(char **buffer, int *buflen, const struct qstr *name)
{
- const char *dname = READ_ONCE(name->name);
+ const char *dname = smp_load_acquire(&name->name); /* ^^^ */
u32 dlen = READ_ONCE(name->len);
char *p;
- smp_read_barrier_depends();
-
*buflen -= dlen + 1;
if (*buflen < 0)
return -ENAMETOOLONG;
@@ -3589,9 +3585,10 @@ static void __init dcache_init_early(void)
13,
HASH_EARLY | HASH_ZERO,
&d_hash_shift,
- &d_hash_mask,
+ NULL,
0,
0);
+ d_hash_shift = 32 - d_hash_shift;
}
static void __init dcache_init(void)
@@ -3615,9 +3612,10 @@ static void __init dcache_init(void)
13,
HASH_ZERO,
&d_hash_shift,
- &d_hash_mask,
+ NULL,
0,
0);
+ d_hash_shift = 32 - d_hash_shift;
}
/* SLAB cache for __getname() consumers */
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index cd12e6576b48..6fdbf21be318 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -206,11 +206,11 @@ FULL_PROXY_FUNC(unlocked_ioctl, long, filp,
PROTO(struct file *filp, unsigned int cmd, unsigned long arg),
ARGS(filp, cmd, arg));
-static unsigned int full_proxy_poll(struct file *filp,
+static __poll_t full_proxy_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct dentry *dentry = F_DENTRY(filp);
- unsigned int r = 0;
+ __poll_t r = 0;
const struct file_operations *real_fops;
if (debugfs_file_get(dentry))
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 7eae33ffa3fc..e31d6ed3ec32 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -168,11 +168,11 @@ struct vfsmount *devpts_mntget(struct file *filp, struct pts_fs_info *fsi)
dput(path.dentry);
if (err) {
mntput(path.mnt);
- path.mnt = ERR_PTR(err);
+ return ERR_PTR(err);
}
if (DEVPTS_SB(path.mnt->mnt_sb) != fsi) {
mntput(path.mnt);
- path.mnt = ERR_PTR(-ENODEV);
+ return ERR_PTR(-ENODEV);
}
return path.mnt;
}
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 3aafb3343a65..a0ca9e48e993 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -219,6 +219,27 @@ static inline struct page *dio_get_page(struct dio *dio,
return dio->pages[sdio->head];
}
+/*
+ * Warn about a page cache invalidation failure during a direct io write.
+ */
+void dio_warn_stale_pagecache(struct file *filp)
+{
+ static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST);
+ char pathname[128];
+ struct inode *inode = file_inode(filp);
+ char *path;
+
+ errseq_set(&inode->i_mapping->wb_err, -EIO);
+ if (__ratelimit(&_rs)) {
+ path = file_path(filp, pathname, sizeof(pathname));
+ if (IS_ERR(path))
+ path = "(unknown)";
+ pr_crit("Page cache invalidation failure on direct I/O. Possible data corruption due to collision with buffered I/O!\n");
+ pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid,
+ current->comm);
+ }
+}
+
/**
* dio_complete() - called when all DIO BIO I/O has been completed
* @offset: the byte offset in the file of the completed operation
@@ -290,7 +311,8 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
err = invalidate_inode_pages2_range(dio->inode->i_mapping,
offset >> PAGE_SHIFT,
(offset + ret - 1) >> PAGE_SHIFT);
- WARN_ON_ONCE(err);
+ if (err)
+ dio_warn_stale_pagecache(dio->iocb->ki_filp);
}
if (!(dio->flags & DIO_SKIP_DIO_COUNT))
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 05707850f93a..cff79ea0c01d 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -675,9 +675,9 @@ static int receive_from_sock(struct connection *con)
nvec = 2;
}
len = iov[0].iov_len + iov[1].iov_len;
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, iov, nvec, len);
- r = ret = kernel_recvmsg(con->sock, &msg, iov, nvec, len,
- MSG_DONTWAIT | MSG_NOSIGNAL);
+ r = ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT | MSG_NOSIGNAL);
if (ret <= 0)
goto out_close;
else if (ret == len)
diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
index e631b1689228..a4c63e9e6385 100644
--- a/fs/dlm/plock.c
+++ b/fs/dlm/plock.c
@@ -463,9 +463,9 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
return count;
}
-static unsigned int dev_poll(struct file *file, poll_table *wait)
+static __poll_t dev_poll(struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, &send_wq, wait);
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index d18e7a539f11..662432af8ce8 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -887,7 +887,7 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
return rv;
}
-static unsigned int device_poll(struct file *file, poll_table *wait)
+static __poll_t device_poll(struct file *file, poll_table *wait)
{
struct dlm_user_proc *proc = file->private_data;
diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
index f09cacaf8c80..7423e792a092 100644
--- a/fs/ecryptfs/miscdev.c
+++ b/fs/ecryptfs/miscdev.c
@@ -38,11 +38,11 @@ static atomic_t ecryptfs_num_miscdev_opens;
*
* Returns the poll mask
*/
-static unsigned int
+static __poll_t
ecryptfs_miscdev_poll(struct file *file, poll_table *pt)
{
struct ecryptfs_daemon *daemon = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
mutex_lock(&daemon->mux);
if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 2fb4eadaa118..04fd824142a1 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -80,24 +80,11 @@ static void eventfd_free(struct kref *kref)
}
/**
- * eventfd_ctx_get - Acquires a reference to the internal eventfd context.
- * @ctx: [in] Pointer to the eventfd context.
- *
- * Returns: In case of success, returns a pointer to the eventfd context.
- */
-struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx)
-{
- kref_get(&ctx->kref);
- return ctx;
-}
-EXPORT_SYMBOL_GPL(eventfd_ctx_get);
-
-/**
* eventfd_ctx_put - Releases a reference to the internal eventfd context.
* @ctx: [in] Pointer to eventfd context.
*
* The eventfd context reference must have been previously acquired either
- * with eventfd_ctx_get() or eventfd_ctx_fdget().
+ * with eventfd_ctx_fdget() or eventfd_ctx_fileget().
*/
void eventfd_ctx_put(struct eventfd_ctx *ctx)
{
@@ -114,10 +101,10 @@ static int eventfd_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int eventfd_poll(struct file *file, poll_table *wait)
+static __poll_t eventfd_poll(struct file *file, poll_table *wait)
{
struct eventfd_ctx *ctx = file->private_data;
- unsigned int events = 0;
+ __poll_t events = 0;
u64 count;
poll_wait(file, &ctx->wqh, wait);
@@ -207,36 +194,27 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w
}
EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue);
-/**
- * eventfd_ctx_read - Reads the eventfd counter or wait if it is zero.
- * @ctx: [in] Pointer to eventfd context.
- * @no_wait: [in] Different from zero if the operation should not block.
- * @cnt: [out] Pointer to the 64-bit counter value.
- *
- * Returns %0 if successful, or the following error codes:
- *
- * - -EAGAIN : The operation would have blocked but @no_wait was non-zero.
- * - -ERESTARTSYS : A signal interrupted the wait operation.
- *
- * If @no_wait is zero, the function might sleep until the eventfd internal
- * counter becomes greater than zero.
- */
-ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt)
+static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
{
+ struct eventfd_ctx *ctx = file->private_data;
ssize_t res;
+ __u64 ucnt = 0;
DECLARE_WAITQUEUE(wait, current);
+ if (count < sizeof(ucnt))
+ return -EINVAL;
+
spin_lock_irq(&ctx->wqh.lock);
- *cnt = 0;
res = -EAGAIN;
if (ctx->count > 0)
- res = 0;
- else if (!no_wait) {
+ res = sizeof(ucnt);
+ else if (!(file->f_flags & O_NONBLOCK)) {
__add_wait_queue(&ctx->wqh, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
if (ctx->count > 0) {
- res = 0;
+ res = sizeof(ucnt);
break;
}
if (signal_pending(current)) {
@@ -250,31 +228,17 @@ ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt)
__remove_wait_queue(&ctx->wqh, &wait);
__set_current_state(TASK_RUNNING);
}
- if (likely(res == 0)) {
- eventfd_ctx_do_read(ctx, cnt);
+ if (likely(res > 0)) {
+ eventfd_ctx_do_read(ctx, &ucnt);
if (waitqueue_active(&ctx->wqh))
wake_up_locked_poll(&ctx->wqh, POLLOUT);
}
spin_unlock_irq(&ctx->wqh.lock);
- return res;
-}
-EXPORT_SYMBOL_GPL(eventfd_ctx_read);
-
-static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
- loff_t *ppos)
-{
- struct eventfd_ctx *ctx = file->private_data;
- ssize_t res;
- __u64 cnt;
-
- if (count < sizeof(cnt))
- return -EINVAL;
- res = eventfd_ctx_read(ctx, file->f_flags & O_NONBLOCK, &cnt);
- if (res < 0)
- return res;
+ if (res > 0 && put_user(ucnt, (__u64 __user *)buf))
+ return -EFAULT;
- return put_user(cnt, (__u64 __user *) buf) ? -EFAULT : sizeof(cnt);
+ return res;
}
static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
@@ -405,79 +369,44 @@ EXPORT_SYMBOL_GPL(eventfd_ctx_fdget);
*/
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file)
{
+ struct eventfd_ctx *ctx;
+
if (file->f_op != &eventfd_fops)
return ERR_PTR(-EINVAL);
- return eventfd_ctx_get(file->private_data);
+ ctx = file->private_data;
+ kref_get(&ctx->kref);
+ return ctx;
}
EXPORT_SYMBOL_GPL(eventfd_ctx_fileget);
-/**
- * eventfd_file_create - Creates an eventfd file pointer.
- * @count: Initial eventfd counter value.
- * @flags: Flags for the eventfd file.
- *
- * This function creates an eventfd file pointer, w/out installing it into
- * the fd table. This is useful when the eventfd file is used during the
- * initialization of data structures that require extra setup after the eventfd
- * creation. So the eventfd creation is split into the file pointer creation
- * phase, and the file descriptor installation phase.
- * In this way races with userspace closing the newly installed file descriptor
- * can be avoided.
- * Returns an eventfd file pointer, or a proper error pointer.
- */
-struct file *eventfd_file_create(unsigned int count, int flags)
+SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
{
- struct file *file;
struct eventfd_ctx *ctx;
+ int fd;
/* Check the EFD_* constants for consistency. */
BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC);
BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK);
if (flags & ~EFD_FLAGS_SET)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
kref_init(&ctx->kref);
init_waitqueue_head(&ctx->wqh);
ctx->count = count;
ctx->flags = flags;
- file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx,
- O_RDWR | (flags & EFD_SHARED_FCNTL_FLAGS));
- if (IS_ERR(file))
+ fd = anon_inode_getfd("[eventfd]", &eventfd_fops, ctx,
+ O_RDWR | (flags & EFD_SHARED_FCNTL_FLAGS));
+ if (fd < 0)
eventfd_free_ctx(ctx);
- return file;
-}
-
-SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
-{
- int fd, error;
- struct file *file;
-
- error = get_unused_fd_flags(flags & EFD_SHARED_FCNTL_FLAGS);
- if (error < 0)
- return error;
- fd = error;
-
- file = eventfd_file_create(count, flags);
- if (IS_ERR(file)) {
- error = PTR_ERR(file);
- goto err_put_unused_fd;
- }
- fd_install(fd, file);
-
return fd;
-
-err_put_unused_fd:
- put_unused_fd(fd);
-
- return error;
}
SYSCALL_DEFINE1(eventfd, unsigned int, count)
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index afd548ebc328..42e35a6977c9 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -874,7 +874,8 @@ static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
* the ep->mtx so we need to start from depth=1, such that mutex_lock_nested()
* is correctly annotated.
*/
-static unsigned int ep_item_poll(struct epitem *epi, poll_table *pt, int depth)
+static unsigned int ep_item_poll(const struct epitem *epi, poll_table *pt,
+ int depth)
{
struct eventpoll *ep;
bool locked;
@@ -920,7 +921,7 @@ static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
return 0;
}
-static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
+static __poll_t ep_eventpoll_poll(struct file *file, poll_table *wait)
{
struct eventpoll *ep = file->private_data;
int depth = 0;
@@ -1117,6 +1118,7 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
unsigned long flags;
struct epitem *epi = ep_item_from_wait(wait);
struct eventpoll *ep = epi->ep;
+ __poll_t pollflags = key_to_poll(key);
int ewake = 0;
spin_lock_irqsave(&ep->lock, flags);
@@ -1138,7 +1140,7 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
* callback. We need to be able to handle both cases here, hence the
* test for "key" != NULL before the event match test.
*/
- if (key && !((unsigned long) key & epi->event.events))
+ if (pollflags && !(pollflags & epi->event.events))
goto out_unlock;
/*
@@ -1175,8 +1177,8 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
*/
if (waitqueue_active(&ep->wq)) {
if ((epi->event.events & EPOLLEXCLUSIVE) &&
- !((unsigned long)key & POLLFREE)) {
- switch ((unsigned long)key & EPOLLINOUT_BITS) {
+ !(pollflags & POLLFREE)) {
+ switch (pollflags & EPOLLINOUT_BITS) {
case POLLIN:
if (epi->event.events & POLLIN)
ewake = 1;
@@ -1205,7 +1207,7 @@ out_unlock:
if (!(epi->event.events & EPOLLEXCLUSIVE))
ewake = 1;
- if ((unsigned long)key & POLLFREE) {
+ if (pollflags & POLLFREE) {
/*
* If we race with ep_remove_wait_queue() it can miss
* ->whead = NULL and do another remove_wait_queue() after
@@ -1409,7 +1411,7 @@ static noinline void ep_destroy_wakeup_source(struct epitem *epi)
/*
* Must be called with "mtx" held.
*/
-static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
+static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
struct file *tfile, int fd, int full_check)
{
int error, revents, pwake = 0;
@@ -1486,7 +1488,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
ep_set_busy_poll_napi_id(epi);
/* If the file is already "ready" we drop it inside the ready list */
- if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) {
+ if (revents && !ep_is_linked(&epi->rdllink)) {
list_add_tail(&epi->rdllink, &ep->rdllist);
ep_pm_stay_awake(epi);
@@ -1540,10 +1542,10 @@ error_create_wakeup_source:
* Modify the interest event mask by dropping an event if the new mask
* has a match in the current file status. Must be called with "mtx" held.
*/
-static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event)
+static int ep_modify(struct eventpoll *ep, struct epitem *epi,
+ const struct epoll_event *event)
{
int pwake = 0;
- unsigned int revents;
poll_table pt;
init_poll_funcptr(&pt, NULL);
@@ -1585,14 +1587,10 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_even
/*
* Get current event bits. We can safely use the file* here because
* its usage count has been increased by the caller of this function.
- */
- revents = ep_item_poll(epi, &pt, 1);
-
- /*
* If the item is "hot" and it is not registered inside the ready
* list, push it inside.
*/
- if (revents & event->events) {
+ if (ep_item_poll(epi, &pt, 1)) {
spin_lock_irq(&ep->lock);
if (!ep_is_linked(&epi->rdllink)) {
list_add_tail(&epi->rdllink, &ep->rdllist);
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index 98233a97b7b8..c5a53fcc43ea 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -31,6 +31,7 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include <linux/iversion.h>
#include "exofs.h"
static inline unsigned exofs_chunk_size(struct inode *inode)
@@ -60,7 +61,7 @@ static int exofs_commit_chunk(struct page *page, loff_t pos, unsigned len)
struct inode *dir = mapping->host;
int err = 0;
- dir->i_version++;
+ inode_inc_iversion(dir);
if (!PageUptodate(page))
SetPageUptodate(page);
@@ -241,7 +242,7 @@ exofs_readdir(struct file *file, struct dir_context *ctx)
unsigned long n = pos >> PAGE_SHIFT;
unsigned long npages = dir_pages(inode);
unsigned chunk_mask = ~(exofs_chunk_size(inode)-1);
- int need_revalidate = (file->f_version != inode->i_version);
+ bool need_revalidate = inode_cmp_iversion(inode, file->f_version);
if (pos > inode->i_size - EXOFS_DIR_REC_LEN(1))
return 0;
@@ -264,8 +265,8 @@ exofs_readdir(struct file *file, struct dir_context *ctx)
chunk_mask);
ctx->pos = (n<<PAGE_SHIFT) + offset;
}
- file->f_version = inode->i_version;
- need_revalidate = 0;
+ file->f_version = inode_query_iversion(inode);
+ need_revalidate = false;
}
de = (struct exofs_dir_entry *)(kaddr + offset);
limit = kaddr + exofs_last_byte(inode, n) -
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index 819624cfc8da..7e244093c0e5 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -38,6 +38,7 @@
#include <linux/module.h>
#include <linux/exportfs.h>
#include <linux/slab.h>
+#include <linux/iversion.h>
#include "exofs.h"
@@ -159,7 +160,7 @@ static struct inode *exofs_alloc_inode(struct super_block *sb)
if (!oi)
return NULL;
- oi->vfs_inode.i_version = 1;
+ inode_set_iversion(&oi->vfs_inode, 1);
return &oi->vfs_inode;
}
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 987647986f47..4111085a129f 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -26,6 +26,7 @@
#include <linux/buffer_head.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
+#include <linux/iversion.h>
typedef struct ext2_dir_entry_2 ext2_dirent;
@@ -92,7 +93,7 @@ static int ext2_commit_chunk(struct page *page, loff_t pos, unsigned len)
struct inode *dir = mapping->host;
int err = 0;
- dir->i_version++;
+ inode_inc_iversion(dir);
block_write_end(NULL, mapping, pos, len, len, page, NULL);
if (pos+len > dir->i_size) {
@@ -293,7 +294,7 @@ ext2_readdir(struct file *file, struct dir_context *ctx)
unsigned long npages = dir_pages(inode);
unsigned chunk_mask = ~(ext2_chunk_size(inode)-1);
unsigned char *types = NULL;
- int need_revalidate = file->f_version != inode->i_version;
+ bool need_revalidate = inode_cmp_iversion(inode, file->f_version);
if (pos > inode->i_size - EXT2_DIR_REC_LEN(1))
return 0;
@@ -319,8 +320,8 @@ ext2_readdir(struct file *file, struct dir_context *ctx)
offset = ext2_validate_entry(kaddr, offset, chunk_mask);
ctx->pos = (n<<PAGE_SHIFT) + offset;
}
- file->f_version = inode->i_version;
- need_revalidate = 0;
+ file->f_version = inode_query_iversion(inode);
+ need_revalidate = false;
}
de = (ext2_dirent *)(kaddr+offset);
limit = kaddr + ext2_last_byte(inode, n) - EXT2_DIR_REC_LEN(1);
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 7646818ab266..554c98b8a93a 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -33,6 +33,7 @@
#include <linux/quotaops.h>
#include <linux/uaccess.h>
#include <linux/dax.h>
+#include <linux/iversion.h>
#include "ext2.h"
#include "xattr.h"
#include "acl.h"
@@ -184,7 +185,7 @@ static struct inode *ext2_alloc_inode(struct super_block *sb)
if (!ei)
return NULL;
ei->i_block_alloc_info = NULL;
- ei->vfs_inode.i_version = 1;
+ inode_set_iversion(&ei->vfs_inode, 1);
#ifdef CONFIG_QUOTA
memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
#endif
@@ -1569,7 +1570,7 @@ out:
return err;
if (inode->i_size < off+len-towrite)
i_size_write(inode, off+len-towrite);
- inode->i_version++;
+ inode_inc_iversion(inode);
inode->i_mtime = inode->i_ctime = current_time(inode);
mark_inode_dirty(inode);
return len - towrite;
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index d5babc9f222b..afda0a0499ce 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -25,6 +25,7 @@
#include <linux/fs.h>
#include <linux/buffer_head.h>
#include <linux/slab.h>
+#include <linux/iversion.h>
#include "ext4.h"
#include "xattr.h"
@@ -208,7 +209,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
* readdir(2), then we might be pointing to an invalid
* dirent right now. Scan from the start of the block
* to make sure. */
- if (file->f_version != inode->i_version) {
+ if (inode_cmp_iversion(inode, file->f_version)) {
for (i = 0; i < sb->s_blocksize && i < offset; ) {
de = (struct ext4_dir_entry_2 *)
(bh->b_data + i);
@@ -227,7 +228,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
offset = i;
ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1))
| offset;
- file->f_version = inode->i_version;
+ file->f_version = inode_query_iversion(inode);
}
while (ctx->pos < inode->i_size
@@ -568,10 +569,10 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
* cached entries.
*/
if ((!info->curr_node) ||
- (file->f_version != inode->i_version)) {
+ inode_cmp_iversion(inode, file->f_version)) {
info->curr_node = NULL;
free_rb_tree_fname(&info->root);
- file->f_version = inode->i_version;
+ file->f_version = inode_query_iversion(inode);
ret = ext4_htree_fill_tree(file, info->curr_hash,
info->curr_minor_hash,
&info->next_hash);
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 1367553c43bb..a8b987b71173 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -14,6 +14,7 @@
#include <linux/iomap.h>
#include <linux/fiemap.h>
+#include <linux/iversion.h>
#include "ext4_jbd2.h"
#include "ext4.h"
@@ -1042,7 +1043,7 @@ static int ext4_add_dirent_to_inline(handle_t *handle,
*/
dir->i_mtime = dir->i_ctime = current_time(dir);
ext4_update_dx_flag(dir);
- dir->i_version++;
+ inode_inc_iversion(dir);
return 1;
}
@@ -1494,7 +1495,7 @@ int ext4_read_inline_dir(struct file *file,
* dirent right now. Scan from the start of the inline
* dir to make sure.
*/
- if (file->f_version != inode->i_version) {
+ if (inode_cmp_iversion(inode, file->f_version)) {
for (i = 0; i < extra_size && i < offset;) {
/*
* "." is with offset 0 and
@@ -1526,7 +1527,7 @@ int ext4_read_inline_dir(struct file *file,
}
offset = i;
ctx->pos = offset;
- file->f_version = inode->i_version;
+ file->f_version = inode_query_iversion(inode);
}
while (ctx->pos < extra_size) {
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 534a9130f625..0eff5b761c6e 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -39,6 +39,7 @@
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/iomap.h>
+#include <linux/iversion.h>
#include "ext4_jbd2.h"
#include "xattr.h"
@@ -4882,12 +4883,14 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
- inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
+ u64 ivers = le32_to_cpu(raw_inode->i_disk_version);
+
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
- inode->i_version |=
+ ivers |=
(__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
}
+ inode_set_iversion_queried(inode, ivers);
}
ret = 0;
@@ -5173,11 +5176,13 @@ static int ext4_do_update_inode(handle_t *handle,
}
if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
- raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
+ u64 ivers = inode_peek_iversion(inode);
+
+ raw_inode->i_disk_version = cpu_to_le32(ivers);
if (ei->i_extra_isize) {
if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
raw_inode->i_version_hi =
- cpu_to_le32(inode->i_version >> 32);
+ cpu_to_le32(ivers >> 32);
raw_inode->i_extra_isize =
cpu_to_le16(ei->i_extra_isize);
}
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 1eec25014f62..7e99ad02f1ba 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -19,6 +19,7 @@
#include <linux/uuid.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
+#include <linux/iversion.h>
#include "ext4_jbd2.h"
#include "ext4.h"
#include <linux/fsmap.h>
@@ -144,7 +145,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
i_gid_write(inode_bl, 0);
inode_bl->i_flags = 0;
ei_bl->i_flags = 0;
- inode_bl->i_version = 1;
+ inode_set_iversion(inode_bl, 1);
i_size_write(inode_bl, 0);
inode_bl->i_mode = S_IFREG;
if (ext4_has_feature_extents(sb)) {
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index e750d68fbcb5..6660686e505a 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -34,6 +34,7 @@
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
#include <linux/bio.h>
+#include <linux/iversion.h>
#include "ext4.h"
#include "ext4_jbd2.h"
@@ -2959,7 +2960,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
"empty directory '%.*s' has too many links (%u)",
dentry->d_name.len, dentry->d_name.name,
inode->i_nlink);
- inode->i_version++;
+ inode_inc_iversion(inode);
clear_nlink(inode);
/* There's no need to set i_disksize: the fact that i_nlink is
* zero will ensure that the right thing happens during any
@@ -3365,7 +3366,7 @@ static int ext4_setent(handle_t *handle, struct ext4_renament *ent,
ent->de->inode = cpu_to_le32(ino);
if (ext4_has_feature_filetype(ent->dir->i_sb))
ent->de->file_type = file_type;
- ent->dir->i_version++;
+ inode_inc_iversion(ent->dir);
ent->dir->i_ctime = ent->dir->i_mtime =
current_time(ent->dir);
ext4_mark_inode_dirty(handle, ent->dir);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 7c46693a14d7..5de959fb0244 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -40,6 +40,7 @@
#include <linux/dax.h>
#include <linux/cleancache.h>
#include <linux/uaccess.h>
+#include <linux/iversion.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
@@ -967,7 +968,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
if (!ei)
return NULL;
- ei->vfs_inode.i_version = 1;
+ inode_set_iversion(&ei->vfs_inode, 1);
spin_lock_init(&ei->i_raw_lock);
INIT_LIST_HEAD(&ei->i_prealloc_list);
spin_lock_init(&ei->i_prealloc_lock);
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 218a7ba57819..63656dbafdc4 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -56,6 +56,7 @@
#include <linux/slab.h>
#include <linux/mbcache.h>
#include <linux/quotaops.h>
+#include <linux/iversion.h>
#include "ext4_jbd2.h"
#include "ext4.h"
#include "xattr.h"
@@ -294,13 +295,13 @@ ext4_xattr_inode_hash(struct ext4_sb_info *sbi, const void *buffer, size_t size)
static u64 ext4_xattr_inode_get_ref(struct inode *ea_inode)
{
return ((u64)ea_inode->i_ctime.tv_sec << 32) |
- ((u32)ea_inode->i_version);
+ (u32) inode_peek_iversion_raw(ea_inode);
}
static void ext4_xattr_inode_set_ref(struct inode *ea_inode, u64 ref_count)
{
ea_inode->i_ctime.tv_sec = (u32)(ref_count >> 32);
- ea_inode->i_version = (u32)ref_count;
+ inode_set_iversion_raw(ea_inode, ref_count & 0xffffffff);
}
static u32 ext4_xattr_inode_get_hash(struct inode *ea_inode)
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index 2bb7c9fc5144..111824199a88 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -270,7 +270,7 @@ static struct posix_acl *f2fs_acl_clone(const struct posix_acl *acl,
sizeof(struct posix_acl_entry);
clone = kmemdup(acl, size, flags);
if (clone)
- atomic_set(&clone->a_refcount, 1);
+ refcount_set(&clone->a_refcount, 1);
}
return clone;
}
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 4aa69bc1c70a..512dca8abc7d 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -237,12 +237,15 @@ static int __f2fs_write_meta_page(struct page *page,
trace_f2fs_writepage(page, META);
+ if (unlikely(f2fs_cp_error(sbi))) {
+ dec_page_count(sbi, F2FS_DIRTY_META);
+ unlock_page(page);
+ return 0;
+ }
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
goto redirty_out;
- if (unlikely(f2fs_cp_error(sbi)))
- goto redirty_out;
write_meta_page(sbi, page, io_type);
dec_page_count(sbi, F2FS_DIRTY_META);
@@ -793,7 +796,7 @@ int get_valid_checkpoint(struct f2fs_sb_info *sbi)
block_t cp_blk_no;
int i;
- sbi->ckpt = kzalloc(cp_blks * blk_size, GFP_KERNEL);
+ sbi->ckpt = f2fs_kzalloc(sbi, cp_blks * blk_size, GFP_KERNEL);
if (!sbi->ckpt)
return -ENOMEM;
/*
@@ -1154,6 +1157,7 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* set this flag to activate crc|cp_ver for recovery */
__set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG);
+ __clear_ckpt_flags(ckpt, CP_NOCRC_RECOVERY_FLAG);
spin_unlock_irqrestore(&sbi->cp_lock, flags);
}
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 516fa0d3ff9c..7578ed1a85e0 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -56,7 +56,7 @@ static void f2fs_read_end_io(struct bio *bio)
int i;
#ifdef CONFIG_F2FS_FAULT_INJECTION
- if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO)) {
+ if (time_to_inject(F2FS_P_SB(bio_first_page_all(bio)), FAULT_IO)) {
f2fs_show_injection_info(FAULT_IO);
bio->bi_status = BLK_STS_IOERR;
}
@@ -111,8 +111,13 @@ static void f2fs_write_end_io(struct bio *bio)
if (unlikely(bio->bi_status)) {
mapping_set_error(page->mapping, -EIO);
- f2fs_stop_checkpoint(sbi, true);
+ if (type == F2FS_WB_CP_DATA)
+ f2fs_stop_checkpoint(sbi, true);
}
+
+ f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
+ page->index != nid_of_node(page));
+
dec_page_count(sbi, type);
clear_cold_data(page);
end_page_writeback(page);
@@ -169,6 +174,7 @@ static bool __same_bdev(struct f2fs_sb_info *sbi,
* Low-level block read/write IO operations.
*/
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
+ struct writeback_control *wbc,
int npages, bool is_read)
{
struct bio *bio;
@@ -178,6 +184,8 @@ static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
f2fs_target_device(sbi, blk_addr, bio);
bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
bio->bi_private = is_read ? NULL : sbi;
+ if (wbc)
+ wbc_init_bio(wbc, bio);
return bio;
}
@@ -373,7 +381,8 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
f2fs_trace_ios(fio, 0);
/* Allocate a new bio */
- bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->op));
+ bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc,
+ 1, is_read_io(fio->op));
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio_put(bio);
@@ -435,7 +444,7 @@ alloc_new:
dec_page_count(sbi, WB_DATA_TYPE(bio_page));
goto out_fail;
}
- io->bio = __bio_alloc(sbi, fio->new_blkaddr,
+ io->bio = __bio_alloc(sbi, fio->new_blkaddr, fio->io_wbc,
BIO_MAX_PAGES, false);
io->fio = *fio;
}
@@ -445,6 +454,9 @@ alloc_new:
goto alloc_new;
}
+ if (fio->io_wbc)
+ wbc_account_io(fio->io_wbc, bio_page, PAGE_SIZE);
+
io->last_block_in_bio = fio->new_blkaddr;
f2fs_trace_ios(fio, 0);
@@ -783,7 +795,7 @@ got_it:
return page;
}
-static int __allocate_data_block(struct dnode_of_data *dn)
+static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
struct f2fs_summary sum;
@@ -808,7 +820,7 @@ alloc:
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
- &sum, CURSEG_WARM_DATA, NULL, false);
+ &sum, seg_type, NULL, false);
set_data_blkaddr(dn);
/* update i_size */
@@ -831,10 +843,12 @@ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
{
struct inode *inode = file_inode(iocb->ki_filp);
struct f2fs_map_blocks map;
+ int flag;
int err = 0;
+ bool direct_io = iocb->ki_flags & IOCB_DIRECT;
/* convert inline data for Direct I/O*/
- if (iocb->ki_flags & IOCB_DIRECT) {
+ if (direct_io) {
err = f2fs_convert_inline_inode(inode);
if (err)
return err;
@@ -851,19 +865,33 @@ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
map.m_len = 0;
map.m_next_pgofs = NULL;
+ map.m_next_extent = NULL;
+ map.m_seg_type = NO_CHECK_TYPE;
- if (iocb->ki_flags & IOCB_DIRECT)
- return f2fs_map_blocks(inode, &map, 1,
- __force_buffered_io(inode, WRITE) ?
- F2FS_GET_BLOCK_PRE_AIO :
- F2FS_GET_BLOCK_PRE_DIO);
+ if (direct_io) {
+ map.m_seg_type = rw_hint_to_seg_type(iocb->ki_hint);
+ flag = __force_buffered_io(inode, WRITE) ?
+ F2FS_GET_BLOCK_PRE_AIO :
+ F2FS_GET_BLOCK_PRE_DIO;
+ goto map_blocks;
+ }
if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
err = f2fs_convert_inline_inode(inode);
if (err)
return err;
}
- if (!f2fs_has_inline_data(inode))
- return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
+ if (f2fs_has_inline_data(inode))
+ return err;
+
+ flag = F2FS_GET_BLOCK_PRE_AIO;
+
+map_blocks:
+ err = f2fs_map_blocks(inode, &map, 1, flag);
+ if (map.m_len > 0 && err == -ENOSPC) {
+ if (!direct_io)
+ set_inode_flag(inode, FI_NO_PREALLOC);
+ err = 0;
+ }
return err;
}
@@ -904,6 +932,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
blkcnt_t prealloc;
struct extent_info ei = {0,0,0};
block_t blkaddr;
+ unsigned int start_pgofs;
if (!maxblocks)
return 0;
@@ -919,6 +948,8 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
map->m_pblk = ei.blk + pgofs - ei.fofs;
map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
map->m_flags = F2FS_MAP_MAPPED;
+ if (map->m_next_extent)
+ *map->m_next_extent = pgofs + map->m_len;
goto out;
}
@@ -937,10 +968,14 @@ next_dnode:
if (map->m_next_pgofs)
*map->m_next_pgofs =
get_next_page_offset(&dn, pgofs);
+ if (map->m_next_extent)
+ *map->m_next_extent =
+ get_next_page_offset(&dn, pgofs);
}
goto unlock_out;
}
+ start_pgofs = pgofs;
prealloc = 0;
last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
@@ -960,7 +995,8 @@ next_block:
last_ofs_in_node = dn.ofs_in_node;
}
} else {
- err = __allocate_data_block(&dn);
+ err = __allocate_data_block(&dn,
+ map->m_seg_type);
if (!err)
set_inode_flag(inode, FI_APPEND_WRITE);
}
@@ -973,14 +1009,20 @@ next_block:
map->m_pblk = 0;
goto sync_out;
}
+ if (flag == F2FS_GET_BLOCK_PRECACHE)
+ goto sync_out;
if (flag == F2FS_GET_BLOCK_FIEMAP &&
blkaddr == NULL_ADDR) {
if (map->m_next_pgofs)
*map->m_next_pgofs = pgofs + 1;
+ goto sync_out;
}
- if (flag != F2FS_GET_BLOCK_FIEMAP ||
- blkaddr != NEW_ADDR)
+ if (flag != F2FS_GET_BLOCK_FIEMAP) {
+ /* for defragment case */
+ if (map->m_next_pgofs)
+ *map->m_next_pgofs = pgofs + 1;
goto sync_out;
+ }
}
}
@@ -1031,6 +1073,16 @@ skip:
else if (dn.ofs_in_node < end_offset)
goto next_block;
+ if (flag == F2FS_GET_BLOCK_PRECACHE) {
+ if (map->m_flags & F2FS_MAP_MAPPED) {
+ unsigned int ofs = start_pgofs - map->m_lblk;
+
+ f2fs_update_extent_cache_range(&dn,
+ start_pgofs, map->m_pblk + ofs,
+ map->m_len - ofs);
+ }
+ }
+
f2fs_put_dnode(&dn);
if (create) {
@@ -1040,6 +1092,17 @@ skip:
goto next_dnode;
sync_out:
+ if (flag == F2FS_GET_BLOCK_PRECACHE) {
+ if (map->m_flags & F2FS_MAP_MAPPED) {
+ unsigned int ofs = start_pgofs - map->m_lblk;
+
+ f2fs_update_extent_cache_range(&dn,
+ start_pgofs, map->m_pblk + ofs,
+ map->m_len - ofs);
+ }
+ if (map->m_next_extent)
+ *map->m_next_extent = pgofs + 1;
+ }
f2fs_put_dnode(&dn);
unlock_out:
if (create) {
@@ -1053,7 +1116,7 @@ out:
static int __get_data_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh, int create, int flag,
- pgoff_t *next_pgofs)
+ pgoff_t *next_pgofs, int seg_type)
{
struct f2fs_map_blocks map;
int err;
@@ -1061,6 +1124,8 @@ static int __get_data_block(struct inode *inode, sector_t iblock,
map.m_lblk = iblock;
map.m_len = bh->b_size >> inode->i_blkbits;
map.m_next_pgofs = next_pgofs;
+ map.m_next_extent = NULL;
+ map.m_seg_type = seg_type;
err = f2fs_map_blocks(inode, &map, create, flag);
if (!err) {
@@ -1076,14 +1141,17 @@ static int get_data_block(struct inode *inode, sector_t iblock,
pgoff_t *next_pgofs)
{
return __get_data_block(inode, iblock, bh_result, create,
- flag, next_pgofs);
+ flag, next_pgofs,
+ NO_CHECK_TYPE);
}
static int get_data_block_dio(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
return __get_data_block(inode, iblock, bh_result, create,
- F2FS_GET_BLOCK_DEFAULT, NULL);
+ F2FS_GET_BLOCK_DEFAULT, NULL,
+ rw_hint_to_seg_type(
+ inode->i_write_hint));
}
static int get_data_block_bmap(struct inode *inode, sector_t iblock,
@@ -1094,7 +1162,8 @@ static int get_data_block_bmap(struct inode *inode, sector_t iblock,
return -EFBIG;
return __get_data_block(inode, iblock, bh_result, create,
- F2FS_GET_BLOCK_BMAP, NULL);
+ F2FS_GET_BLOCK_BMAP, NULL,
+ NO_CHECK_TYPE);
}
static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
@@ -1107,6 +1176,68 @@ static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
return (blk << inode->i_blkbits);
}
+static int f2fs_xattr_fiemap(struct inode *inode,
+ struct fiemap_extent_info *fieinfo)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct page *page;
+ struct node_info ni;
+ __u64 phys = 0, len;
+ __u32 flags;
+ nid_t xnid = F2FS_I(inode)->i_xattr_nid;
+ int err = 0;
+
+ if (f2fs_has_inline_xattr(inode)) {
+ int offset;
+
+ page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
+ inode->i_ino, false);
+ if (!page)
+ return -ENOMEM;
+
+ get_node_info(sbi, inode->i_ino, &ni);
+
+ phys = (__u64)blk_to_logical(inode, ni.blk_addr);
+ offset = offsetof(struct f2fs_inode, i_addr) +
+ sizeof(__le32) * (DEF_ADDRS_PER_INODE -
+ get_inline_xattr_addrs(inode));
+
+ phys += offset;
+ len = inline_xattr_size(inode);
+
+ f2fs_put_page(page, 1);
+
+ flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
+
+ if (!xnid)
+ flags |= FIEMAP_EXTENT_LAST;
+
+ err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
+ if (err || err == 1)
+ return err;
+ }
+
+ if (xnid) {
+ page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
+ if (!page)
+ return -ENOMEM;
+
+ get_node_info(sbi, xnid, &ni);
+
+ phys = (__u64)blk_to_logical(inode, ni.blk_addr);
+ len = inode->i_sb->s_blocksize;
+
+ f2fs_put_page(page, 1);
+
+ flags = FIEMAP_EXTENT_LAST;
+ }
+
+ if (phys)
+ err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
+
+ return (err < 0 ? err : 0);
+}
+
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len)
{
@@ -1117,18 +1248,29 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u32 flags = 0;
int ret = 0;
- ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
+ if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
+ ret = f2fs_precache_extents(inode);
+ if (ret)
+ return ret;
+ }
+
+ ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC | FIEMAP_FLAG_XATTR);
if (ret)
return ret;
+ inode_lock(inode);
+
+ if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
+ ret = f2fs_xattr_fiemap(inode, fieinfo);
+ goto out;
+ }
+
if (f2fs_has_inline_data(inode)) {
ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
if (ret != -EAGAIN)
- return ret;
+ goto out;
}
- inode_lock(inode);
-
if (logical_to_blk(inode, len) == 0)
len = blk_to_logical(inode, 1);
@@ -1198,7 +1340,6 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
unsigned nr_pages)
{
struct bio *bio = NULL;
- unsigned page_idx;
sector_t last_block_in_bio = 0;
struct inode *inode = mapping->host;
const unsigned blkbits = inode->i_blkbits;
@@ -1214,9 +1355,10 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
map.m_len = 0;
map.m_flags = 0;
map.m_next_pgofs = NULL;
+ map.m_next_extent = NULL;
+ map.m_seg_type = NO_CHECK_TYPE;
- for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
-
+ for (; nr_pages; nr_pages--) {
if (pages) {
page = list_last_entry(pages, struct page, lru);
@@ -1376,18 +1518,79 @@ retry_encrypt:
return PTR_ERR(fio->encrypted_page);
}
+static inline bool check_inplace_update_policy(struct inode *inode,
+ struct f2fs_io_info *fio)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ unsigned int policy = SM_I(sbi)->ipu_policy;
+
+ if (policy & (0x1 << F2FS_IPU_FORCE))
+ return true;
+ if (policy & (0x1 << F2FS_IPU_SSR) && need_SSR(sbi))
+ return true;
+ if (policy & (0x1 << F2FS_IPU_UTIL) &&
+ utilization(sbi) > SM_I(sbi)->min_ipu_util)
+ return true;
+ if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && need_SSR(sbi) &&
+ utilization(sbi) > SM_I(sbi)->min_ipu_util)
+ return true;
+
+ /*
+ * IPU for rewrite async pages
+ */
+ if (policy & (0x1 << F2FS_IPU_ASYNC) &&
+ fio && fio->op == REQ_OP_WRITE &&
+ !(fio->op_flags & REQ_SYNC) &&
+ !f2fs_encrypted_inode(inode))
+ return true;
+
+ /* this is only set during fdatasync */
+ if (policy & (0x1 << F2FS_IPU_FSYNC) &&
+ is_inode_flag_set(inode, FI_NEED_IPU))
+ return true;
+
+ return false;
+}
+
+bool should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
+{
+ if (f2fs_is_pinned_file(inode))
+ return true;
+
+ /* if this is cold file, we should overwrite to avoid fragmentation */
+ if (file_is_cold(inode))
+ return true;
+
+ return check_inplace_update_policy(inode, fio);
+}
+
+bool should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+ if (test_opt(sbi, LFS))
+ return true;
+ if (S_ISDIR(inode->i_mode))
+ return true;
+ if (f2fs_is_atomic_file(inode))
+ return true;
+ if (fio) {
+ if (is_cold_data(fio->page))
+ return true;
+ if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
+ return true;
+ }
+ return false;
+}
+
static inline bool need_inplace_update(struct f2fs_io_info *fio)
{
struct inode *inode = fio->page->mapping->host;
- if (S_ISDIR(inode->i_mode) || f2fs_is_atomic_file(inode))
- return false;
- if (is_cold_data(fio->page))
- return false;
- if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
+ if (should_update_outplace(inode, fio))
return false;
- return need_inplace_update_policy(inode, fio);
+ return should_update_inplace(inode, fio);
}
static inline bool valid_ipu_blkaddr(struct f2fs_io_info *fio)
@@ -1508,10 +1711,17 @@ static int __write_data_page(struct page *page, bool *submitted,
.submitted = false,
.need_lock = LOCK_RETRY,
.io_type = io_type,
+ .io_wbc = wbc,
};
trace_f2fs_writepage(page, DATA);
+ /* we should bypass data pages to proceed the kworkder jobs */
+ if (unlikely(f2fs_cp_error(sbi))) {
+ mapping_set_error(page->mapping, -EIO);
+ goto out;
+ }
+
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
@@ -1536,12 +1746,6 @@ write:
available_free_memory(sbi, BASE_CHECK))))
goto redirty_out;
- /* we should bypass data pages to proceed the kworkder jobs */
- if (unlikely(f2fs_cp_error(sbi))) {
- mapping_set_error(page->mapping, -EIO);
- goto out;
- }
-
/* Dentry blocks are controlled by checkpoint */
if (S_ISDIR(inode->i_mode)) {
fio.need_lock = LOCK_DONE;
@@ -1571,10 +1775,14 @@ write:
}
}
- down_write(&F2FS_I(inode)->i_sem);
- if (F2FS_I(inode)->last_disk_size < psize)
- F2FS_I(inode)->last_disk_size = psize;
- up_write(&F2FS_I(inode)->i_sem);
+ if (err) {
+ file_set_keep_isize(inode);
+ } else {
+ down_write(&F2FS_I(inode)->i_sem);
+ if (F2FS_I(inode)->last_disk_size < psize)
+ F2FS_I(inode)->last_disk_size = psize;
+ up_write(&F2FS_I(inode)->i_sem);
+ }
done:
if (err && err != -ENOENT)
@@ -1933,7 +2141,7 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct page *page = NULL;
pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
- bool need_balance = false;
+ bool need_balance = false, drop_atomic = false;
block_t blkaddr = NULL_ADDR;
int err = 0;
@@ -1942,6 +2150,7 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
if (f2fs_is_atomic_file(inode) &&
!available_free_memory(sbi, INMEM_PAGES)) {
err = -ENOMEM;
+ drop_atomic = true;
goto fail;
}
@@ -2022,7 +2231,7 @@ repeat:
fail:
f2fs_put_page(page, 1);
f2fs_write_failed(mapping, pos + len);
- if (f2fs_is_atomic_file(inode))
+ if (drop_atomic)
drop_inmem_pages_all(sbi);
return err;
}
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index ecada8425268..a66107b5cfff 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -49,14 +49,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->ndirty_imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
si->ndirty_dirs = sbi->ndirty_inode[DIR_INODE];
si->ndirty_files = sbi->ndirty_inode[FILE_INODE];
-
- si->nquota_files = 0;
- if (f2fs_sb_has_quota_ino(sbi->sb)) {
- for (i = 0; i < MAXQUOTAS; i++) {
- if (f2fs_qf_ino(sbi->sb, i))
- si->nquota_files++;
- }
- }
+ si->nquota_files = sbi->nquota_files;
si->ndirty_all = sbi->ndirty_inode[DIRTY_META];
si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES);
si->aw_cnt = atomic_read(&sbi->aw_cnt);
@@ -186,7 +179,6 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
si->base_mem += sizeof(struct f2fs_sb_info) + sbi->sb->s_blocksize;
si->base_mem += 2 * sizeof(struct f2fs_inode_info);
si->base_mem += sizeof(*sbi->ckpt);
- si->base_mem += sizeof(struct percpu_counter) * NR_COUNT_TYPE;
/* build sm */
si->base_mem += sizeof(struct f2fs_sm_info);
@@ -447,7 +439,7 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
struct f2fs_stat_info *si;
- si = kzalloc(sizeof(struct f2fs_stat_info), GFP_KERNEL);
+ si = f2fs_kzalloc(sbi, sizeof(struct f2fs_stat_info), GFP_KERNEL);
if (!si)
return -ENOMEM;
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 2d98d877c09d..f00b5ed8c011 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -713,6 +713,8 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
+ add_ino_entry(F2FS_I_SB(dir), dir->i_ino, TRANS_DIR_INO);
+
if (f2fs_has_inline_dentry(dir))
return f2fs_delete_inline_entry(dentry, page, dir, inode);
@@ -798,6 +800,7 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
unsigned int bit_pos;
struct f2fs_dir_entry *de = NULL;
struct fscrypt_str de_name = FSTR_INIT(NULL, 0);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(d->inode);
bit_pos = ((unsigned long)ctx->pos % d->max);
@@ -836,6 +839,9 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
le32_to_cpu(de->ino), d_type))
return 1;
+ if (sbi->readdir_ra == 1)
+ ra_node_page(sbi, le32_to_cpu(de->ino));
+
bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
ctx->pos = start_pos + bit_pos;
}
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 6abf26c31d01..6300ac5bcbe4 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -19,6 +19,7 @@
#include <linux/magic.h>
#include <linux/kobject.h>
#include <linux/sched.h>
+#include <linux/cred.h>
#include <linux/vmalloc.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
@@ -43,6 +44,7 @@
#ifdef CONFIG_F2FS_FAULT_INJECTION
enum {
FAULT_KMALLOC,
+ FAULT_KVMALLOC,
FAULT_PAGE_ALLOC,
FAULT_PAGE_GET,
FAULT_ALLOC_BIO,
@@ -94,6 +96,7 @@ extern char *fault_name[FAULT_MAX];
#define F2FS_MOUNT_PRJQUOTA 0x00200000
#define F2FS_MOUNT_QUOTA 0x00400000
#define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000
+#define F2FS_MOUNT_RESERVE_ROOT 0x01000000
#define clear_opt(sbi, option) ((sbi)->mount_opt.opt &= ~F2FS_MOUNT_##option)
#define set_opt(sbi, option) ((sbi)->mount_opt.opt |= F2FS_MOUNT_##option)
@@ -121,6 +124,7 @@ struct f2fs_mount_info {
#define F2FS_FEATURE_INODE_CHKSUM 0x0020
#define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x0040
#define F2FS_FEATURE_QUOTA_INO 0x0080
+#define F2FS_FEATURE_INODE_CRTIME 0x0100
#define F2FS_HAS_FEATURE(sb, mask) \
((F2FS_SB(sb)->raw_super->feature & cpu_to_le32(mask)) != 0)
@@ -130,6 +134,12 @@ struct f2fs_mount_info {
(F2FS_SB(sb)->raw_super->feature &= ~cpu_to_le32(mask))
/*
+ * Default values for user and/or group using reserved blocks
+ */
+#define F2FS_DEF_RESUID 0
+#define F2FS_DEF_RESGID 0
+
+/*
* For checkpoint manager
*/
enum {
@@ -179,6 +189,7 @@ enum {
ORPHAN_INO, /* for orphan ino list */
APPEND_INO, /* for append ino list */
UPDATE_INO, /* for update ino list */
+ TRANS_DIR_INO, /* for trasactions dir ino list */
FLUSH_INO, /* for multiple device flushing */
MAX_INO_ENTRY, /* max. list */
};
@@ -264,7 +275,6 @@ struct discard_cmd_control {
struct task_struct *f2fs_issue_discard; /* discard thread */
struct list_head entry_list; /* 4KB discard entry list */
struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */
- unsigned char pend_list_tag[MAX_PLIST_NUM];/* tag for pending entries */
struct list_head wait_list; /* store on-flushing entries */
struct list_head fstrim_list; /* in-flight discard from fstrim */
wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */
@@ -347,6 +357,9 @@ static inline bool __has_cursum_space(struct f2fs_journal *journal,
#define F2FS_IOC_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11, \
struct f2fs_gc_range)
#define F2FS_IOC_GET_FEATURES _IOR(F2FS_IOCTL_MAGIC, 12, __u32)
+#define F2FS_IOC_SET_PIN_FILE _IOW(F2FS_IOCTL_MAGIC, 13, __u32)
+#define F2FS_IOC_GET_PIN_FILE _IOR(F2FS_IOCTL_MAGIC, 14, __u32)
+#define F2FS_IOC_PRECACHE_EXTENTS _IO(F2FS_IOCTL_MAGIC, 15)
#define F2FS_IOC_SET_ENCRYPTION_POLICY FS_IOC_SET_ENCRYPTION_POLICY
#define F2FS_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY
@@ -402,10 +415,9 @@ struct f2fs_flush_device {
#define DEF_MIN_INLINE_SIZE 1
static inline int get_extra_isize(struct inode *inode);
static inline int get_inline_xattr_addrs(struct inode *inode);
-#define F2FS_INLINE_XATTR_ADDRS(inode) get_inline_xattr_addrs(inode)
#define MAX_INLINE_DATA(inode) (sizeof(__le32) * \
(CUR_ADDRS_PER_INODE(inode) - \
- F2FS_INLINE_XATTR_ADDRS(inode) - \
+ get_inline_xattr_addrs(inode) - \
DEF_INLINE_RESERVED_SIZE))
/* for inline dir */
@@ -542,6 +554,8 @@ struct f2fs_map_blocks {
unsigned int m_len;
unsigned int m_flags;
pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */
+ pgoff_t *m_next_extent; /* point to next possible extent */
+ int m_seg_type;
};
/* for flag in get_data_block */
@@ -551,6 +565,7 @@ enum {
F2FS_GET_BLOCK_BMAP,
F2FS_GET_BLOCK_PRE_DIO,
F2FS_GET_BLOCK_PRE_AIO,
+ F2FS_GET_BLOCK_PRECACHE,
};
/*
@@ -583,7 +598,10 @@ struct f2fs_inode_info {
unsigned long i_flags; /* keep an inode flags for ioctl */
unsigned char i_advise; /* use to give file attribute hints */
unsigned char i_dir_level; /* use for dentry level for large dir */
- unsigned int i_current_depth; /* use only in directory structure */
+ union {
+ unsigned int i_current_depth; /* only for directory depth */
+ unsigned short i_gc_failures; /* only for regular file */
+ };
unsigned int i_pino; /* parent inode number */
umode_t i_acl_mode; /* keep file acl mode temporarily */
@@ -618,6 +636,7 @@ struct f2fs_inode_info {
int i_extra_isize; /* size of extra space located in i_addr */
kprojid_t i_projid; /* id for project quota */
int i_inline_xattr_size; /* inline xattr size */
+ struct timespec i_crtime; /* inode creation time */
};
static inline void get_extent_info(struct extent_info *ext,
@@ -922,6 +941,7 @@ enum cp_reason_type {
CP_NODE_NEED_CP,
CP_FASTBOOT_MODE,
CP_SPEC_LOG_NUM,
+ CP_RECOVER_DIR,
};
enum iostat_type {
@@ -957,6 +977,7 @@ struct f2fs_io_info {
int need_lock; /* indicate we need to lock cp_rwsem */
bool in_list; /* indicate fio is in io_list */
enum iostat_type io_type; /* io type */
+ struct writeback_control *io_wbc; /* writeback control */
};
#define is_read_io(rw) ((rw) == READ)
@@ -1093,6 +1114,7 @@ struct f2fs_sb_info {
int dir_level; /* directory level */
int inline_xattr_size; /* inline xattr size */
unsigned int trigger_ssr_threshold; /* threshold to trigger ssr */
+ int readdir_ra; /* readahead inode in readdir */
block_t user_block_count; /* # of user blocks */
block_t total_valid_block_count; /* # of valid blocks */
@@ -1100,6 +1122,11 @@ struct f2fs_sb_info {
block_t last_valid_block_count; /* for recovery */
block_t reserved_blocks; /* configurable reserved blocks */
block_t current_reserved_blocks; /* current reserved blocks */
+ block_t root_reserved_blocks; /* root reserved blocks */
+ kuid_t s_resuid; /* reserved blocks for uid */
+ kgid_t s_resgid; /* reserved blocks for gid */
+
+ unsigned int nquota_files; /* # of quota sysfile */
u32 s_next_generation; /* for NFS support */
@@ -1124,6 +1151,9 @@ struct f2fs_sb_info {
/* threshold for converting bg victims for fg */
u64 fggc_threshold;
+ /* threshold for gc trials on pinned files */
+ u64 gc_pin_file_threshold;
+
/* maximum # of trials to find a victim segment for SSR and GC */
unsigned int max_victim_search;
@@ -1250,33 +1280,7 @@ static inline bool is_idle(struct f2fs_sb_info *sbi)
/*
* Inline functions
*/
-static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
- unsigned int length)
-{
- SHASH_DESC_ON_STACK(shash, sbi->s_chksum_driver);
- u32 *ctx = (u32 *)shash_desc_ctx(shash);
- u32 retval;
- int err;
-
- shash->tfm = sbi->s_chksum_driver;
- shash->flags = 0;
- *ctx = F2FS_SUPER_MAGIC;
-
- err = crypto_shash_update(shash, address, length);
- BUG_ON(err);
-
- retval = *ctx;
- barrier_data(ctx);
- return retval;
-}
-
-static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
- void *buf, size_t buf_size)
-{
- return f2fs_crc32(sbi, buf, buf_size) == blk_crc;
-}
-
-static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc,
+static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc,
const void *address, unsigned int length)
{
struct {
@@ -1297,6 +1301,24 @@ static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc,
return *(u32 *)desc.ctx;
}
+static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
+ unsigned int length)
+{
+ return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length);
+}
+
+static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
+ void *buf, size_t buf_size)
+{
+ return f2fs_crc32(sbi, buf, buf_size) == blk_crc;
+}
+
+static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc,
+ const void *address, unsigned int length)
+{
+ return __f2fs_crc32(sbi, crc, address, length);
+}
+
static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
{
return container_of(inode, struct f2fs_inode_info, vfs_inode);
@@ -1555,6 +1577,25 @@ static inline bool f2fs_has_xattr_block(unsigned int ofs)
return ofs == XATTR_NODE_OFFSET;
}
+static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
+ struct inode *inode)
+{
+ if (!inode)
+ return true;
+ if (!test_opt(sbi, RESERVE_ROOT))
+ return false;
+ if (IS_NOQUOTA(inode))
+ return true;
+ if (capable(CAP_SYS_RESOURCE))
+ return true;
+ if (uid_eq(sbi->s_resuid, current_fsuid()))
+ return true;
+ if (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) &&
+ in_group_p(sbi->s_resgid))
+ return true;
+ return false;
+}
+
static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
struct inode *inode, blkcnt_t *count)
@@ -1584,11 +1625,17 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
sbi->total_valid_block_count += (block_t)(*count);
avail_user_block_count = sbi->user_block_count -
sbi->current_reserved_blocks;
+
+ if (!__allow_reserved_blocks(sbi, inode))
+ avail_user_block_count -= sbi->root_reserved_blocks;
+
if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
diff = sbi->total_valid_block_count - avail_user_block_count;
+ if (diff > *count)
+ diff = *count;
*count -= diff;
release = diff;
- sbi->total_valid_block_count = avail_user_block_count;
+ sbi->total_valid_block_count -= diff;
if (!*count) {
spin_unlock(&sbi->stat_lock);
percpu_counter_sub(&sbi->alloc_valid_block_count, diff);
@@ -1597,7 +1644,7 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
}
spin_unlock(&sbi->stat_lock);
- if (release)
+ if (unlikely(release))
dquot_release_reservation_block(inode, release);
f2fs_i_blocks_write(inode, *count, true, true);
return 0;
@@ -1777,9 +1824,13 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
spin_lock(&sbi->stat_lock);
- valid_block_count = sbi->total_valid_block_count + 1;
- if (unlikely(valid_block_count + sbi->current_reserved_blocks >
- sbi->user_block_count)) {
+ valid_block_count = sbi->total_valid_block_count +
+ sbi->current_reserved_blocks + 1;
+
+ if (!__allow_reserved_blocks(sbi, inode))
+ valid_block_count += sbi->root_reserved_blocks;
+
+ if (unlikely(valid_block_count > sbi->user_block_count)) {
spin_unlock(&sbi->stat_lock);
goto enospc;
}
@@ -1992,11 +2043,11 @@ static inline block_t datablock_addr(struct inode *inode,
raw_node = F2FS_NODE(node_page);
/* from GC path only */
- if (!inode) {
- if (is_inode)
+ if (is_inode) {
+ if (!inode)
base = offset_in_addr(&raw_node->i);
- } else if (f2fs_has_extra_attr(inode) && is_inode) {
- base = get_extra_isize(inode);
+ else if (f2fs_has_extra_attr(inode))
+ base = get_extra_isize(inode);
}
addr_array = blkaddr_in_node(raw_node);
@@ -2107,6 +2158,7 @@ enum {
FI_HOT_DATA, /* indicate file is hot */
FI_EXTRA_ATTR, /* indicate file has extra attribute */
FI_PROJ_INHERIT, /* indicate file inherits projectid */
+ FI_PIN_FILE, /* indicate file should not be gced */
};
static inline void __mark_inode_dirty_flag(struct inode *inode,
@@ -2116,10 +2168,12 @@ static inline void __mark_inode_dirty_flag(struct inode *inode,
case FI_INLINE_XATTR:
case FI_INLINE_DATA:
case FI_INLINE_DENTRY:
+ case FI_NEW_INODE:
if (set)
return;
case FI_DATA_EXIST:
case FI_INLINE_DOTS:
+ case FI_PIN_FILE:
f2fs_mark_inode_dirty_sync(inode, true);
}
}
@@ -2200,6 +2254,13 @@ static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth)
f2fs_mark_inode_dirty_sync(inode, true);
}
+static inline void f2fs_i_gc_failures_write(struct inode *inode,
+ unsigned int count)
+{
+ F2FS_I(inode)->i_gc_failures = count;
+ f2fs_mark_inode_dirty_sync(inode, true);
+}
+
static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid)
{
F2FS_I(inode)->i_xattr_nid = xnid;
@@ -2228,6 +2289,8 @@ static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
set_bit(FI_INLINE_DOTS, &fi->flags);
if (ri->i_inline & F2FS_EXTRA_ATTR)
set_bit(FI_EXTRA_ATTR, &fi->flags);
+ if (ri->i_inline & F2FS_PIN_FILE)
+ set_bit(FI_PIN_FILE, &fi->flags);
}
static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
@@ -2246,6 +2309,8 @@ static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
ri->i_inline |= F2FS_INLINE_DOTS;
if (is_inode_flag_set(inode, FI_EXTRA_ATTR))
ri->i_inline |= F2FS_EXTRA_ATTR;
+ if (is_inode_flag_set(inode, FI_PIN_FILE))
+ ri->i_inline |= F2FS_PIN_FILE;
}
static inline int f2fs_has_extra_attr(struct inode *inode)
@@ -2260,7 +2325,7 @@ static inline int f2fs_has_inline_xattr(struct inode *inode)
static inline unsigned int addrs_per_inode(struct inode *inode)
{
- return CUR_ADDRS_PER_INODE(inode) - F2FS_INLINE_XATTR_ADDRS(inode);
+ return CUR_ADDRS_PER_INODE(inode) - get_inline_xattr_addrs(inode);
}
static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
@@ -2268,7 +2333,7 @@ static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
struct f2fs_inode *ri = F2FS_INODE(page);
return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE -
- F2FS_INLINE_XATTR_ADDRS(inode)]);
+ get_inline_xattr_addrs(inode)]);
}
static inline int inline_xattr_size(struct inode *inode)
@@ -2291,6 +2356,11 @@ static inline int f2fs_has_inline_dots(struct inode *inode)
return is_inode_flag_set(inode, FI_INLINE_DOTS);
}
+static inline bool f2fs_is_pinned_file(struct inode *inode)
+{
+ return is_inode_flag_set(inode, FI_PIN_FILE);
+}
+
static inline bool f2fs_is_atomic_file(struct inode *inode)
{
return is_inode_flag_set(inode, FI_ATOMIC_FILE);
@@ -2418,12 +2488,35 @@ static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
return kmalloc(size, flags);
}
+static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi,
+ size_t size, gfp_t flags)
+{
+ return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO);
+}
+
+static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi,
+ size_t size, gfp_t flags)
+{
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(sbi, FAULT_KVMALLOC)) {
+ f2fs_show_injection_info(FAULT_KVMALLOC);
+ return NULL;
+ }
+#endif
+ return kvmalloc(size, flags);
+}
+
+static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi,
+ size_t size, gfp_t flags)
+{
+ return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO);
+}
+
static inline int get_extra_isize(struct inode *inode)
{
return F2FS_I(inode)->i_extra_isize / sizeof(__le32);
}
-static inline int f2fs_sb_has_flexible_inline_xattr(struct super_block *sb);
static inline int get_inline_xattr_addrs(struct inode *inode)
{
return F2FS_I(inode)->i_inline_xattr_size;
@@ -2479,9 +2572,11 @@ int f2fs_getattr(const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags);
int f2fs_setattr(struct dentry *dentry, struct iattr *attr);
int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
-int truncate_data_blocks_range(struct dnode_of_data *dn, int count);
+void truncate_data_blocks_range(struct dnode_of_data *dn, int count);
+int f2fs_precache_extents(struct inode *inode);
long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+int f2fs_pin_file_control(struct inode *inode, bool inc);
/*
* inode.c
@@ -2492,8 +2587,8 @@ void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page);
struct inode *f2fs_iget(struct super_block *sb, unsigned long ino);
struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino);
int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink);
-int update_inode(struct inode *inode, struct page *node_page);
-int update_inode_page(struct inode *inode);
+void update_inode(struct inode *inode, struct page *node_page);
+void update_inode_page(struct inode *inode);
int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc);
void f2fs_evict_inode(struct inode *inode);
void handle_failed_inode(struct inode *inode);
@@ -2604,10 +2699,9 @@ void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
void recover_inline_xattr(struct inode *inode, struct page *page);
-int recover_xattr_data(struct inode *inode, struct page *page,
- block_t blkaddr);
+int recover_xattr_data(struct inode *inode, struct page *page);
int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
-int restore_node_summary(struct f2fs_sb_info *sbi,
+void restore_node_summary(struct f2fs_sb_info *sbi,
unsigned int segno, struct f2fs_summary_block *sum);
void flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
int build_node_manager(struct f2fs_sb_info *sbi);
@@ -2634,6 +2728,7 @@ void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
void init_discard_policy(struct discard_policy *dpolicy, int discard_type,
unsigned int granularity);
+void drop_discard_cmd(struct f2fs_sb_info *sbi);
void stop_discard_thread(struct f2fs_sb_info *sbi);
bool f2fs_wait_discard_bios(struct f2fs_sb_info *sbi);
void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc);
@@ -2672,6 +2767,7 @@ int build_segment_manager(struct f2fs_sb_info *sbi);
void destroy_segment_manager(struct f2fs_sb_info *sbi);
int __init create_segment_manager_caches(void);
void destroy_segment_manager_caches(void);
+int rw_hint_to_seg_type(enum rw_hint hint);
/*
* checkpoint.c
@@ -2741,6 +2837,8 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
int create, int flag);
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len);
+bool should_update_inplace(struct inode *inode, struct f2fs_io_info *fio);
+bool should_update_outplace(struct inode *inode, struct f2fs_io_info *fio);
void f2fs_set_page_dirty_nobuffers(struct page *page);
int __f2fs_write_data_pages(struct address_space *mapping,
struct writeback_control *wbc,
@@ -3109,6 +3207,11 @@ static inline int f2fs_sb_has_quota_ino(struct super_block *sb)
return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_QUOTA_INO);
}
+static inline int f2fs_sb_has_inode_crtime(struct super_block *sb)
+{
+ return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_INODE_CRTIME);
+}
+
#ifdef CONFIG_BLK_DEV_ZONED
static inline int get_blkz_type(struct f2fs_sb_info *sbi,
struct block_device *bdev, block_t blkaddr)
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 7874bbd7311d..672a542e5464 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -165,6 +165,9 @@ static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
cp_reason = CP_FASTBOOT_MODE;
else if (sbi->active_logs == 2)
cp_reason = CP_SPEC_LOG_NUM;
+ else if (need_dentry_mark(sbi, inode->i_ino) &&
+ exist_written_data(sbi, F2FS_I(inode)->i_pino, TRANS_DIR_INO))
+ cp_reason = CP_RECOVER_DIR;
return cp_reason;
}
@@ -472,26 +475,14 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
static int f2fs_file_open(struct inode *inode, struct file *filp)
{
- struct dentry *dir;
+ int err = fscrypt_file_open(inode, filp);
- if (f2fs_encrypted_inode(inode)) {
- int ret = fscrypt_get_encryption_info(inode);
- if (ret)
- return -EACCES;
- if (!fscrypt_has_encryption_key(inode))
- return -ENOKEY;
- }
- dir = dget_parent(file_dentry(filp));
- if (f2fs_encrypted_inode(d_inode(dir)) &&
- !fscrypt_has_permitted_context(d_inode(dir), inode)) {
- dput(dir);
- return -EPERM;
- }
- dput(dir);
+ if (err)
+ return err;
return dquot_file_open(inode, filp);
}
-int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
+void truncate_data_blocks_range(struct dnode_of_data *dn, int count)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
struct f2fs_node *raw_node;
@@ -534,7 +525,6 @@ int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
f2fs_update_time(sbi, REQ_TIME);
trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
dn->ofs_in_node, nr_free);
- return nr_free;
}
void truncate_data_blocks(struct dnode_of_data *dn)
@@ -682,8 +672,17 @@ int f2fs_getattr(const struct path *path, struct kstat *stat,
{
struct inode *inode = d_inode(path->dentry);
struct f2fs_inode_info *fi = F2FS_I(inode);
+ struct f2fs_inode *ri;
unsigned int flags;
+ if (f2fs_has_extra_attr(inode) &&
+ f2fs_sb_has_inode_crtime(inode->i_sb) &&
+ F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
+ stat->result_mask |= STATX_BTIME;
+ stat->btime.tv_sec = fi->i_crtime.tv_sec;
+ stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
+ }
+
flags = fi->i_flags & (FS_FL_USER_VISIBLE | FS_PROJINHERIT_FL);
if (flags & FS_APPEND_FL)
stat->attributes |= STATX_ATTR_APPEND;
@@ -755,6 +754,10 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
if (err)
return err;
+ err = fscrypt_prepare_setattr(dentry, attr);
+ if (err)
+ return err;
+
if (is_quota_modification(inode, attr)) {
err = dquot_initialize(inode);
if (err)
@@ -770,14 +773,6 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
}
if (attr->ia_valid & ATTR_SIZE) {
- if (f2fs_encrypted_inode(inode)) {
- err = fscrypt_get_encryption_info(inode);
- if (err)
- return err;
- if (!fscrypt_has_encryption_key(inode))
- return -ENOKEY;
- }
-
if (attr->ia_size <= i_size_read(inode)) {
down_write(&F2FS_I(inode)->i_mmap_sem);
truncate_setsize(inode, attr->ia_size);
@@ -1114,11 +1109,13 @@ static int __exchange_data_block(struct inode *src_inode,
while (len) {
olen = min((pgoff_t)4 * ADDRS_PER_BLOCK, len);
- src_blkaddr = kvzalloc(sizeof(block_t) * olen, GFP_KERNEL);
+ src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
+ sizeof(block_t) * olen, GFP_KERNEL);
if (!src_blkaddr)
return -ENOMEM;
- do_replace = kvzalloc(sizeof(int) * olen, GFP_KERNEL);
+ do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
+ sizeof(int) * olen, GFP_KERNEL);
if (!do_replace) {
kvfree(src_blkaddr);
return -ENOMEM;
@@ -1186,14 +1183,14 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
pg_start = offset >> PAGE_SHIFT;
pg_end = (offset + len) >> PAGE_SHIFT;
+ /* avoid gc operation during block exchange */
+ down_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
+
down_write(&F2FS_I(inode)->i_mmap_sem);
/* write out all dirty pages from offset */
ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
if (ret)
- goto out;
-
- /* avoid gc operation during block exchange */
- down_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
+ goto out_unlock;
truncate_pagecache(inode, offset);
@@ -1212,9 +1209,8 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
if (!ret)
f2fs_i_size_write(inode, new_size);
out_unlock:
- up_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
-out:
up_write(&F2FS_I(inode)->i_mmap_sem);
+ up_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
return ret;
}
@@ -1385,6 +1381,9 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
f2fs_balance_fs(sbi, true);
+ /* avoid gc operation during block exchange */
+ down_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
+
down_write(&F2FS_I(inode)->i_mmap_sem);
ret = truncate_blocks(inode, i_size_read(inode), true);
if (ret)
@@ -1395,9 +1394,6 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
if (ret)
goto out;
- /* avoid gc operation during block exchange */
- down_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
-
truncate_pagecache(inode, offset);
pg_start = offset >> PAGE_SHIFT;
@@ -1425,10 +1421,9 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
if (!ret)
f2fs_i_size_write(inode, new_size);
-
- up_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
out:
up_write(&F2FS_I(inode)->i_mmap_sem);
+ up_write(&F2FS_I(inode)->dio_rwsem[WRITE]);
return ret;
}
@@ -1436,7 +1431,8 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
loff_t len, int mode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct f2fs_map_blocks map = { .m_next_pgofs = NULL };
+ struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
+ .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE };
pgoff_t pg_end;
loff_t new_size = i_size_read(inode);
loff_t off_end;
@@ -1852,14 +1848,20 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
switch (in) {
case F2FS_GOING_DOWN_FULLSYNC:
sb = freeze_bdev(sb->s_bdev);
- if (sb && !IS_ERR(sb)) {
+ if (IS_ERR(sb)) {
+ ret = PTR_ERR(sb);
+ goto out;
+ }
+ if (sb) {
f2fs_stop_checkpoint(sbi, false);
thaw_bdev(sb->s_bdev, sb);
}
break;
case F2FS_GOING_DOWN_METASYNC:
/* do checkpoint only */
- f2fs_sync_fs(sb, 1);
+ ret = f2fs_sync_fs(sb, 1);
+ if (ret)
+ goto out;
f2fs_stop_checkpoint(sbi, false);
break;
case F2FS_GOING_DOWN_NOSYNC:
@@ -1873,6 +1875,13 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
ret = -EINVAL;
goto out;
}
+
+ stop_gc_thread(sbi);
+ stop_discard_thread(sbi);
+
+ drop_discard_cmd(sbi);
+ clear_opt(sbi, DISCARD);
+
f2fs_update_time(sbi, REQ_TIME);
out:
mnt_drop_write_file(filp);
@@ -2084,9 +2093,10 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
struct f2fs_defragment *range)
{
struct inode *inode = file_inode(filp);
- struct f2fs_map_blocks map = { .m_next_pgofs = NULL };
+ struct f2fs_map_blocks map = { .m_next_extent = NULL,
+ .m_seg_type = NO_CHECK_TYPE };
struct extent_info ei = {0,0,0};
- pgoff_t pg_start, pg_end;
+ pgoff_t pg_start, pg_end, next_pgofs;
unsigned int blk_per_seg = sbi->blocks_per_seg;
unsigned int total = 0, sec_num;
block_t blk_end = 0;
@@ -2094,7 +2104,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
int err;
/* if in-place-update policy is enabled, don't waste time here */
- if (need_inplace_update_policy(inode, NULL))
+ if (should_update_inplace(inode, NULL))
return -EINVAL;
pg_start = range->start >> PAGE_SHIFT;
@@ -2120,6 +2130,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
}
map.m_lblk = pg_start;
+ map.m_next_pgofs = &next_pgofs;
/*
* lookup mapping info in dnode page cache, skip defragmenting if all
@@ -2133,14 +2144,16 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
goto out;
if (!(map.m_flags & F2FS_MAP_FLAGS)) {
- map.m_lblk++;
+ map.m_lblk = next_pgofs;
continue;
}
- if (blk_end && blk_end != map.m_pblk) {
+ if (blk_end && blk_end != map.m_pblk)
fragmented = true;
- break;
- }
+
+ /* record total count of block that we're going to move */
+ total += map.m_len;
+
blk_end = map.m_pblk + map.m_len;
map.m_lblk += map.m_len;
@@ -2149,10 +2162,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
if (!fragmented)
goto out;
- map.m_lblk = pg_start;
- map.m_len = pg_end - pg_start;
-
- sec_num = (map.m_len + BLKS_PER_SEC(sbi) - 1) / BLKS_PER_SEC(sbi);
+ sec_num = (total + BLKS_PER_SEC(sbi) - 1) / BLKS_PER_SEC(sbi);
/*
* make sure there are enough free section for LFS allocation, this can
@@ -2164,6 +2174,10 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
goto out;
}
+ map.m_lblk = pg_start;
+ map.m_len = pg_end - pg_start;
+ total = 0;
+
while (map.m_lblk < pg_end) {
pgoff_t idx;
int cnt = 0;
@@ -2175,7 +2189,7 @@ do_map:
goto clear_out;
if (!(map.m_flags & F2FS_MAP_FLAGS)) {
- map.m_lblk++;
+ map.m_lblk = next_pgofs;
continue;
}
@@ -2681,6 +2695,125 @@ static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
return 0;
}
+int f2fs_pin_file_control(struct inode *inode, bool inc)
+{
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+ /* Use i_gc_failures for normal file as a risk signal. */
+ if (inc)
+ f2fs_i_gc_failures_write(inode, fi->i_gc_failures + 1);
+
+ if (fi->i_gc_failures > sbi->gc_pin_file_threshold) {
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: Enable GC = ino %lx after %x GC trials\n",
+ __func__, inode->i_ino, fi->i_gc_failures);
+ clear_inode_flag(inode, FI_PIN_FILE);
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ __u32 pin;
+ int ret = 0;
+
+ if (!inode_owner_or_capable(inode))
+ return -EACCES;
+
+ if (get_user(pin, (__u32 __user *)arg))
+ return -EFAULT;
+
+ if (!S_ISREG(inode->i_mode))
+ return -EINVAL;
+
+ if (f2fs_readonly(F2FS_I_SB(inode)->sb))
+ return -EROFS;
+
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ inode_lock(inode);
+
+ if (should_update_outplace(inode, NULL)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!pin) {
+ clear_inode_flag(inode, FI_PIN_FILE);
+ F2FS_I(inode)->i_gc_failures = 1;
+ goto done;
+ }
+
+ if (f2fs_pin_file_control(inode, false)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ ret = f2fs_convert_inline_inode(inode);
+ if (ret)
+ goto out;
+
+ set_inode_flag(inode, FI_PIN_FILE);
+ ret = F2FS_I(inode)->i_gc_failures;
+done:
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+out:
+ inode_unlock(inode);
+ mnt_drop_write_file(filp);
+ return ret;
+}
+
+static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ __u32 pin = 0;
+
+ if (is_inode_flag_set(inode, FI_PIN_FILE))
+ pin = F2FS_I(inode)->i_gc_failures;
+ return put_user(pin, (u32 __user *)arg);
+}
+
+int f2fs_precache_extents(struct inode *inode)
+{
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ struct f2fs_map_blocks map;
+ pgoff_t m_next_extent;
+ loff_t end;
+ int err;
+
+ if (is_inode_flag_set(inode, FI_NO_EXTENT))
+ return -EOPNOTSUPP;
+
+ map.m_lblk = 0;
+ map.m_next_pgofs = NULL;
+ map.m_next_extent = &m_next_extent;
+ map.m_seg_type = NO_CHECK_TYPE;
+ end = F2FS_I_SB(inode)->max_file_blocks;
+
+ while (map.m_lblk < end) {
+ map.m_len = end - map.m_lblk;
+
+ down_write(&fi->dio_rwsem[WRITE]);
+ err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
+ up_write(&fi->dio_rwsem[WRITE]);
+ if (err)
+ return err;
+
+ map.m_lblk = m_next_extent;
+ }
+
+ return err;
+}
+
+static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
+{
+ return f2fs_precache_extents(file_inode(filp));
+}
+
long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
@@ -2731,6 +2864,12 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return f2fs_ioc_fsgetxattr(filp, arg);
case F2FS_IOC_FSSETXATTR:
return f2fs_ioc_fssetxattr(filp, arg);
+ case F2FS_IOC_GET_PIN_FILE:
+ return f2fs_ioc_get_pin_file(filp, arg);
+ case F2FS_IOC_SET_PIN_FILE:
+ return f2fs_ioc_set_pin_file(filp, arg);
+ case F2FS_IOC_PRECACHE_EXTENTS:
+ return f2fs_ioc_precache_extents(filp, arg);
default:
return -ENOTTY;
}
@@ -2806,6 +2945,9 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case F2FS_IOC_GET_FEATURES:
case F2FS_IOC_FSGETXATTR:
case F2FS_IOC_FSSETXATTR:
+ case F2FS_IOC_GET_PIN_FILE:
+ case F2FS_IOC_SET_PIN_FILE:
+ case F2FS_IOC_PRECACHE_EXTENTS:
break;
default:
return -ENOIOCTLCMD;
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index d844dcb80570..aa720cc44509 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -624,6 +624,11 @@ static void move_data_block(struct inode *inode, block_t bidx,
if (f2fs_is_atomic_file(inode))
goto out;
+ if (f2fs_is_pinned_file(inode)) {
+ f2fs_pin_file_control(inode, true);
+ goto out;
+ }
+
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
if (err)
@@ -686,7 +691,12 @@ static void move_data_block(struct inode *inode, block_t bidx,
fio.op = REQ_OP_WRITE;
fio.op_flags = REQ_SYNC;
fio.new_blkaddr = newaddr;
- f2fs_submit_page_write(&fio);
+ err = f2fs_submit_page_write(&fio);
+ if (err) {
+ if (PageWriteback(fio.encrypted_page))
+ end_page_writeback(fio.encrypted_page);
+ goto put_page_out;
+ }
f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
@@ -720,6 +730,11 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type,
if (f2fs_is_atomic_file(inode))
goto out;
+ if (f2fs_is_pinned_file(inode)) {
+ if (gc_type == FG_GC)
+ f2fs_pin_file_control(inode, true);
+ goto out;
+ }
if (gc_type == BG_GC) {
if (PageWriteback(page))
@@ -1091,6 +1106,7 @@ void build_gc_manager(struct f2fs_sb_info *sbi)
sbi->fggc_threshold = div64_u64((main_count - ovp_count) *
BLKS_PER_SEC(sbi), (main_count - resv_count));
+ sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
/* give warm/cold data area from slower device */
if (sbi->s_ndevs && sbi->segs_per_sec == 1)
diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h
index 9325191fab2d..b0045d4c8d1e 100644
--- a/fs/f2fs/gc.h
+++ b/fs/f2fs/gc.h
@@ -20,6 +20,8 @@
#define LIMIT_INVALID_BLOCK 40 /* percentage over total user space */
#define LIMIT_FREE_BLOCK 40 /* percentage over invalid + free space */
+#define DEF_GC_FAILED_PINNED_FILES 2048
+
/* Search max. number of dirty segments to select a victim segment */
#define DEF_MAX_VICTIM_SEARCH 4096 /* covers 8GB */
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index b4c4f2b25304..89c838bfb067 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -22,6 +22,9 @@
void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
{
+ if (is_inode_flag_set(inode, FI_NEW_INODE))
+ return;
+
if (f2fs_inode_dirtied(inode, sync))
return;
@@ -275,6 +278,12 @@ static int do_read_inode(struct inode *inode)
i_projid = F2FS_DEF_PROJID;
fi->i_projid = make_kprojid(&init_user_ns, i_projid);
+ if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi->sb) &&
+ F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
+ fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime);
+ fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
+ }
+
f2fs_put_page(node_page, 1);
stat_inc_inline_xattr(inode);
@@ -360,14 +369,15 @@ retry:
return inode;
}
-int update_inode(struct inode *inode, struct page *node_page)
+void update_inode(struct inode *inode, struct page *node_page)
{
struct f2fs_inode *ri;
struct extent_tree *et = F2FS_I(inode)->extent_tree;
- f2fs_inode_synced(inode);
-
f2fs_wait_on_page_writeback(node_page, NODE, true);
+ set_page_dirty(node_page);
+
+ f2fs_inode_synced(inode);
ri = F2FS_INODE(node_page);
@@ -417,6 +427,15 @@ int update_inode(struct inode *inode, struct page *node_page)
F2FS_I(inode)->i_projid);
ri->i_projid = cpu_to_le32(i_projid);
}
+
+ if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)->sb) &&
+ F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
+ i_crtime)) {
+ ri->i_crtime =
+ cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec);
+ ri->i_crtime_nsec =
+ cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec);
+ }
}
__set_inode_rdev(inode, ri);
@@ -426,14 +445,12 @@ int update_inode(struct inode *inode, struct page *node_page)
if (inode->i_nlink == 0)
clear_inline_node(node_page);
- return set_page_dirty(node_page);
}
-int update_inode_page(struct inode *inode)
+void update_inode_page(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct page *node_page;
- int ret = 0;
retry:
node_page = get_node_page(sbi, inode->i_ino);
if (IS_ERR(node_page)) {
@@ -444,11 +461,10 @@ retry:
} else if (err != -ENOENT) {
f2fs_stop_checkpoint(sbi, false);
}
- return 0;
+ return;
}
- ret = update_inode(inode, node_page);
+ update_inode(inode, node_page);
f2fs_put_page(node_page, 1);
- return ret;
}
int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 28bdf8828e73..c4c94c7e9f4f 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -50,7 +50,8 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
inode->i_ino = ino;
inode->i_blocks = 0;
- inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
+ inode->i_mtime = inode->i_atime = inode->i_ctime =
+ F2FS_I(inode)->i_crtime = current_time(inode);
inode->i_generation = sbi->s_next_generation++;
err = insert_inode_locked(inode);
@@ -74,12 +75,12 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
if (err)
goto fail_drop;
+ set_inode_flag(inode, FI_NEW_INODE);
+
/* If the directory encrypted, then we should encrypt the inode. */
if (f2fs_encrypted_inode(dir) && f2fs_may_encrypt(inode))
f2fs_set_encrypted_inode(inode);
- set_inode_flag(inode, FI_NEW_INODE);
-
if (f2fs_sb_has_extra_attr(sbi->sb)) {
set_inode_flag(inode, FI_EXTRA_ATTR);
F2FS_I(inode)->i_extra_isize = F2FS_TOTAL_EXTRA_ATTR_SIZE;
@@ -240,9 +241,9 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
- if (f2fs_encrypted_inode(dir) &&
- !fscrypt_has_permitted_context(dir, inode))
- return -EPERM;
+ err = fscrypt_prepare_link(old_dentry, dir, dentry);
+ if (err)
+ return err;
if (is_inode_flag_set(dir, FI_PROJ_INHERIT) &&
(!projid_eq(F2FS_I(dir)->i_projid,
@@ -357,20 +358,9 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
trace_f2fs_lookup_start(dir, dentry, flags);
- if (f2fs_encrypted_inode(dir)) {
- err = fscrypt_get_encryption_info(dir);
-
- /*
- * DCACHE_ENCRYPTED_WITH_KEY is set if the dentry is
- * created while the directory was encrypted and we
- * don't have access to the key.
- */
- if (fscrypt_has_encryption_key(dir))
- fscrypt_set_encrypted_dentry(dentry);
- fscrypt_set_d_op(dentry);
- if (err && err != -ENOKEY)
- goto out;
- }
+ err = fscrypt_prepare_lookup(dir, dentry, flags);
+ if (err)
+ goto out;
if (dentry->d_name.len > F2FS_NAME_LEN) {
err = -ENAMETOOLONG;
@@ -544,7 +534,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
struct qstr istr = QSTR_INIT(symname, len);
struct fscrypt_str ostr;
- sd = kzalloc(disk_link.len, GFP_NOFS);
+ sd = f2fs_kzalloc(sbi, disk_link.len, GFP_NOFS);
if (!sd) {
err = -ENOMEM;
goto err_out;
@@ -800,18 +790,6 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
- if ((f2fs_encrypted_inode(old_dir) &&
- !fscrypt_has_encryption_key(old_dir)) ||
- (f2fs_encrypted_inode(new_dir) &&
- !fscrypt_has_encryption_key(new_dir)))
- return -ENOKEY;
-
- if ((old_dir != new_dir) && f2fs_encrypted_inode(new_dir) &&
- !fscrypt_has_permitted_context(new_dir, old_inode)) {
- err = -EPERM;
- goto out;
- }
-
if (is_inode_flag_set(new_dir, FI_PROJ_INHERIT) &&
(!projid_eq(F2FS_I(new_dir)->i_projid,
F2FS_I(old_dentry->d_inode)->i_projid)))
@@ -958,6 +936,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
f2fs_i_links_write(old_dir, false);
}
+ add_ino_entry(sbi, new_dir->i_ino, TRANS_DIR_INO);
f2fs_unlock_op(sbi);
@@ -1002,18 +981,6 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
- if ((f2fs_encrypted_inode(old_dir) &&
- !fscrypt_has_encryption_key(old_dir)) ||
- (f2fs_encrypted_inode(new_dir) &&
- !fscrypt_has_encryption_key(new_dir)))
- return -ENOKEY;
-
- if ((f2fs_encrypted_inode(old_dir) || f2fs_encrypted_inode(new_dir)) &&
- (old_dir != new_dir) &&
- (!fscrypt_has_permitted_context(new_dir, old_inode) ||
- !fscrypt_has_permitted_context(old_dir, new_inode)))
- return -EPERM;
-
if ((is_inode_flag_set(new_dir, FI_PROJ_INHERIT) &&
!projid_eq(F2FS_I(new_dir)->i_projid,
F2FS_I(old_dentry->d_inode)->i_projid)) ||
@@ -1124,6 +1091,9 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
}
f2fs_mark_inode_dirty_sync(new_dir, false);
+ add_ino_entry(sbi, old_dir->i_ino, TRANS_DIR_INO);
+ add_ino_entry(sbi, new_dir->i_ino, TRANS_DIR_INO);
+
f2fs_unlock_op(sbi);
if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
@@ -1153,9 +1123,16 @@ static int f2fs_rename2(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
+ int err;
+
if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
return -EINVAL;
+ err = fscrypt_prepare_rename(old_dir, old_dentry, new_dir, new_dentry,
+ flags);
+ if (err)
+ return err;
+
if (flags & RENAME_EXCHANGE) {
return f2fs_cross_rename(old_dir, old_dentry,
new_dir, new_dentry);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index d3322752426f..177c438e4a56 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -143,11 +143,9 @@ static struct nat_entry *__alloc_nat_entry(nid_t nid, bool no_fail)
struct nat_entry *new;
if (no_fail)
- new = f2fs_kmem_cache_alloc(nat_entry_slab,
- GFP_NOFS | __GFP_ZERO);
+ new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO);
else
- new = kmem_cache_alloc(nat_entry_slab,
- GFP_NOFS | __GFP_ZERO);
+ new = kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO);
if (new) {
nat_set_nid(new, nid);
nat_reset_flag(new);
@@ -702,7 +700,6 @@ static void truncate_node(struct dnode_of_data *dn)
struct node_info ni;
get_node_info(sbi, dn->nid, &ni);
- f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR);
/* Deallocate node address */
invalidate_blocks(sbi, ni.blk_addr);
@@ -1336,14 +1333,19 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
.encrypted_page = NULL,
.submitted = false,
.io_type = io_type,
+ .io_wbc = wbc,
};
trace_f2fs_writepage(page, NODE);
+ if (unlikely(f2fs_cp_error(sbi))) {
+ dec_page_count(sbi, F2FS_DIRTY_NODES);
+ unlock_page(page);
+ return 0;
+ }
+
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
- if (unlikely(f2fs_cp_error(sbi)))
- goto redirty_out;
/* get old block addr of this node page */
nid = nid_of_node(page);
@@ -1580,12 +1582,6 @@ next_step:
struct page *page = pvec.pages[i];
bool submitted = false;
- if (unlikely(f2fs_cp_error(sbi))) {
- pagevec_release(&pvec);
- ret = -EIO;
- goto out;
- }
-
/*
* flushing sequence with step:
* 0. indirect nodes
@@ -1655,9 +1651,12 @@ continue_unlock:
step++;
goto next_step;
}
-out:
+
if (nwritten)
f2fs_submit_merged_write(sbi, NODE);
+
+ if (unlikely(f2fs_cp_error(sbi)))
+ return -EIO;
return ret;
}
@@ -1812,8 +1811,33 @@ static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
}
}
+static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
+ bool set, bool build)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+ unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
+ unsigned int nid_ofs = nid - START_NID(nid);
+
+ if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
+ return;
+
+ if (set) {
+ if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
+ return;
+ __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
+ nm_i->free_nid_count[nat_ofs]++;
+ } else {
+ if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
+ return;
+ __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
+ if (!build)
+ nm_i->free_nid_count[nat_ofs]--;
+ }
+}
+
/* return if the nid is recognized as free */
-static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
+static bool add_free_nid(struct f2fs_sb_info *sbi,
+ nid_t nid, bool build, bool update)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i, *e;
@@ -1829,8 +1853,7 @@ static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
i->nid = nid;
i->state = FREE_NID;
- if (radix_tree_preload(GFP_NOFS))
- goto err;
+ radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
spin_lock(&nm_i->nid_list_lock);
@@ -1871,9 +1894,14 @@ static bool add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
ret = true;
err = __insert_free_nid(sbi, i, FREE_NID);
err_out:
+ if (update) {
+ update_free_nid_bitmap(sbi, nid, ret, build);
+ if (!build)
+ nm_i->available_nids++;
+ }
spin_unlock(&nm_i->nid_list_lock);
radix_tree_preload_end();
-err:
+
if (err)
kmem_cache_free(free_nid_slab, i);
return ret;
@@ -1897,30 +1925,6 @@ static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
kmem_cache_free(free_nid_slab, i);
}
-static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
- bool set, bool build)
-{
- struct f2fs_nm_info *nm_i = NM_I(sbi);
- unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
- unsigned int nid_ofs = nid - START_NID(nid);
-
- if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
- return;
-
- if (set) {
- if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
- return;
- __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
- nm_i->free_nid_count[nat_ofs]++;
- } else {
- if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
- return;
- __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
- if (!build)
- nm_i->free_nid_count[nat_ofs]--;
- }
-}
-
static void scan_nat_page(struct f2fs_sb_info *sbi,
struct page *nat_page, nid_t start_nid)
{
@@ -1930,26 +1934,23 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
int i;
- if (test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
- return;
-
__set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
i = start_nid % NAT_ENTRY_PER_BLOCK;
for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
- bool freed = false;
-
if (unlikely(start_nid >= nm_i->max_nid))
break;
blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
- if (blk_addr == NULL_ADDR)
- freed = add_free_nid(sbi, start_nid, true);
- spin_lock(&NM_I(sbi)->nid_list_lock);
- update_free_nid_bitmap(sbi, start_nid, freed, true);
- spin_unlock(&NM_I(sbi)->nid_list_lock);
+ if (blk_addr == NULL_ADDR) {
+ add_free_nid(sbi, start_nid, true, true);
+ } else {
+ spin_lock(&NM_I(sbi)->nid_list_lock);
+ update_free_nid_bitmap(sbi, start_nid, false, true);
+ spin_unlock(&NM_I(sbi)->nid_list_lock);
+ }
}
}
@@ -1967,7 +1968,7 @@ static void scan_curseg_cache(struct f2fs_sb_info *sbi)
addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
nid = le32_to_cpu(nid_in_journal(journal, i));
if (addr == NULL_ADDR)
- add_free_nid(sbi, nid, true);
+ add_free_nid(sbi, nid, true, false);
else
remove_free_nid(sbi, nid);
}
@@ -1994,7 +1995,7 @@ static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
break;
nid = i * NAT_ENTRY_PER_BLOCK + idx;
- add_free_nid(sbi, nid, true);
+ add_free_nid(sbi, nid, true, false);
if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS)
goto out;
@@ -2037,10 +2038,13 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
down_read(&nm_i->nat_tree_lock);
while (1) {
- struct page *page = get_current_nat_page(sbi, nid);
+ if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
+ nm_i->nat_block_bitmap)) {
+ struct page *page = get_current_nat_page(sbi, nid);
- scan_nat_page(sbi, page, nid);
- f2fs_put_page(page, 1);
+ scan_nat_page(sbi, page, nid);
+ f2fs_put_page(page, 1);
+ }
nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
if (unlikely(nid >= nm_i->max_nid))
@@ -2203,7 +2207,9 @@ void recover_inline_xattr(struct inode *inode, struct page *page)
f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage));
ri = F2FS_INODE(page);
- if (!(ri->i_inline & F2FS_INLINE_XATTR)) {
+ if (ri->i_inline & F2FS_INLINE_XATTR) {
+ set_inode_flag(inode, FI_INLINE_XATTR);
+ } else {
clear_inode_flag(inode, FI_INLINE_XATTR);
goto update_inode;
}
@@ -2219,7 +2225,7 @@ update_inode:
f2fs_put_page(ipage, 1);
}
-int recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr)
+int recover_xattr_data(struct inode *inode, struct page *page)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
@@ -2233,7 +2239,6 @@ int recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr)
/* 1: invalidate the previous xattr nid */
get_node_info(sbi, prev_xnid, &ni);
- f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR);
invalidate_blocks(sbi, ni.blk_addr);
dec_valid_node_count(sbi, inode, false);
set_node_addr(sbi, &ni, NULL_ADDR, false);
@@ -2322,7 +2327,7 @@ retry:
return 0;
}
-int restore_node_summary(struct f2fs_sb_info *sbi,
+void restore_node_summary(struct f2fs_sb_info *sbi,
unsigned int segno, struct f2fs_summary_block *sum)
{
struct f2fs_node *rn;
@@ -2355,7 +2360,6 @@ int restore_node_summary(struct f2fs_sb_info *sbi,
invalidate_mapping_pages(META_MAPPING(sbi), addr,
addr + nrpages);
}
- return 0;
}
static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
@@ -2497,11 +2501,7 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
nat_reset_flag(ne);
__clear_nat_cache_dirty(NM_I(sbi), set, ne);
if (nat_get_blkaddr(ne) == NULL_ADDR) {
- add_free_nid(sbi, nid, false);
- spin_lock(&NM_I(sbi)->nid_list_lock);
- NM_I(sbi)->available_nids++;
- update_free_nid_bitmap(sbi, nid, true, false);
- spin_unlock(&NM_I(sbi)->nid_list_lock);
+ add_free_nid(sbi, nid, false, true);
} else {
spin_lock(&NM_I(sbi)->nid_list_lock);
update_free_nid_bitmap(sbi, nid, false, false);
@@ -2582,8 +2582,8 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
nm_i->nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) + 8 +
F2FS_BLKSIZE - 1);
- nm_i->nat_bits = kzalloc(nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS,
- GFP_KERNEL);
+ nm_i->nat_bits = f2fs_kzalloc(sbi,
+ nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
if (!nm_i->nat_bits)
return -ENOMEM;
@@ -2661,7 +2661,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
/* not used nids: 0, node, meta, (and root counted as valid node) */
nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
- F2FS_RESERVED_NODE_NUM;
+ sbi->nquota_files - F2FS_RESERVED_NODE_NUM;
nm_i->nid_cnt[FREE_NID] = 0;
nm_i->nid_cnt[PREALLOC_NID] = 0;
nm_i->nat_cnt = 0;
@@ -2708,17 +2708,17 @@ static int init_free_nid_cache(struct f2fs_sb_info *sbi)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
- nm_i->free_nid_bitmap = kvzalloc(nm_i->nat_blocks *
+ nm_i->free_nid_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks *
NAT_ENTRY_BITMAP_SIZE, GFP_KERNEL);
if (!nm_i->free_nid_bitmap)
return -ENOMEM;
- nm_i->nat_block_bitmap = kvzalloc(nm_i->nat_blocks / 8,
+ nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
GFP_KERNEL);
if (!nm_i->nat_block_bitmap)
return -ENOMEM;
- nm_i->free_nid_count = kvzalloc(nm_i->nat_blocks *
+ nm_i->free_nid_count = f2fs_kvzalloc(sbi, nm_i->nat_blocks *
sizeof(unsigned short), GFP_KERNEL);
if (!nm_i->free_nid_count)
return -ENOMEM;
@@ -2729,7 +2729,8 @@ int build_node_manager(struct f2fs_sb_info *sbi)
{
int err;
- sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
+ sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info),
+ GFP_KERNEL);
if (!sbi->nm_info)
return -ENOMEM;
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index 0ee3e5ff49a3..081ef0d672bf 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -305,6 +305,10 @@ static inline bool is_recoverable_dnode(struct page *page)
struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
__u64 cp_ver = cur_cp_version(ckpt);
+ /* Don't care crc part, if fsck.f2fs sets it. */
+ if (__is_set_ckpt_flags(ckpt, CP_NOCRC_RECOVERY_FLAG))
+ return (cp_ver << 32) == (cpver_of_node(page) << 32);
+
if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG))
cp_ver |= (cur_cp_crc(ckpt) << 32);
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index b3a14b0429f2..337f3363f48f 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -195,6 +195,20 @@ out:
return err;
}
+static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri)
+{
+ if (ri->i_inline & F2FS_PIN_FILE)
+ set_inode_flag(inode, FI_PIN_FILE);
+ else
+ clear_inode_flag(inode, FI_PIN_FILE);
+ if (ri->i_inline & F2FS_DATA_EXIST)
+ set_inode_flag(inode, FI_DATA_EXIST);
+ else
+ clear_inode_flag(inode, FI_DATA_EXIST);
+ if (!(ri->i_inline & F2FS_INLINE_DOTS))
+ clear_inode_flag(inode, FI_INLINE_DOTS);
+}
+
static void recover_inode(struct inode *inode, struct page *page)
{
struct f2fs_inode *raw = F2FS_INODE(page);
@@ -211,13 +225,16 @@ static void recover_inode(struct inode *inode, struct page *page)
F2FS_I(inode)->i_advise = raw->i_advise;
+ recover_inline_flags(inode, raw);
+
if (file_enc_name(inode))
name = "<encrypted>";
else
name = F2FS_INODE(page)->i_name;
- f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
- ino_of_node(page), name);
+ f2fs_msg(inode->i_sb, KERN_NOTICE,
+ "recover_inode: ino = %x, name = %s, inline = %x",
+ ino_of_node(page), name, raw->i_inline);
}
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
@@ -404,7 +421,7 @@ truncate_out:
}
static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
- struct page *page, block_t blkaddr)
+ struct page *page)
{
struct dnode_of_data dn;
struct node_info ni;
@@ -415,7 +432,7 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
if (IS_INODE(page)) {
recover_inline_xattr(inode, page);
} else if (f2fs_has_xattr_block(ofs_of_node(page))) {
- err = recover_xattr_data(inode, page, blkaddr);
+ err = recover_xattr_data(inode, page);
if (!err)
recovered++;
goto out;
@@ -568,7 +585,7 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
break;
}
}
- err = do_recover_data(sbi, entry->inode, page, blkaddr);
+ err = do_recover_data(sbi, entry->inode, page);
if (err) {
f2fs_put_page(page, 1);
break;
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index c117e0913f2a..b16a8e6625aa 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -248,7 +248,11 @@ retry:
goto next;
}
get_node_info(sbi, dn.nid, &ni);
- f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
+ if (cur->old_addr == NEW_ADDR) {
+ invalidate_blocks(sbi, dn.data_blkaddr);
+ f2fs_update_data_blkaddr(&dn, NEW_ADDR);
+ } else
+ f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
cur->old_addr, ni.version, true, true);
f2fs_put_dnode(&dn);
}
@@ -657,7 +661,7 @@ int create_flush_cmd_control(struct f2fs_sb_info *sbi)
goto init_thread;
}
- fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
+ fcc = f2fs_kzalloc(sbi, sizeof(struct flush_cmd_control), GFP_KERNEL);
if (!fcc)
return -ENOMEM;
atomic_set(&fcc->issued_flush, 0);
@@ -884,7 +888,7 @@ static void f2fs_submit_discard_endio(struct bio *bio)
bio_put(bio);
}
-void __check_sit_bitmap(struct f2fs_sb_info *sbi,
+static void __check_sit_bitmap(struct f2fs_sb_info *sbi,
block_t start, block_t end)
{
#ifdef CONFIG_F2FS_CHECK_FS
@@ -1204,6 +1208,8 @@ static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
pend_list = &dcc->pend_list[i];
mutex_lock(&dcc->cmd_lock);
+ if (list_empty(pend_list))
+ goto next;
f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root));
blk_start_plug(&plug);
list_for_each_entry_safe(dc, tmp, pend_list, list) {
@@ -1222,6 +1228,7 @@ skip:
break;
}
blk_finish_plug(&plug);
+next:
mutex_unlock(&dcc->cmd_lock);
if (iter >= dpolicy->max_requests)
@@ -1256,6 +1263,11 @@ static bool __drop_discard_cmd(struct f2fs_sb_info *sbi)
return dropped;
}
+void drop_discard_cmd(struct f2fs_sb_info *sbi)
+{
+ __drop_discard_cmd(sbi);
+}
+
static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi,
struct discard_cmd *dc)
{
@@ -1324,7 +1336,7 @@ static void __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
}
/* This should be covered by global mutex, &sit_i->sentry_lock */
-void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
+static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct discard_cmd *dc;
@@ -1394,6 +1406,8 @@ static int issue_discard_thread(void *data)
msecs_to_jiffies(wait_ms));
if (try_to_freeze())
continue;
+ if (f2fs_readonly(sbi->sb))
+ continue;
if (kthread_should_stop())
return 0;
@@ -1703,25 +1717,20 @@ void init_discard_policy(struct discard_policy *dpolicy,
dpolicy->sync = true;
dpolicy->granularity = granularity;
+ dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
+ dpolicy->io_aware_gran = MAX_PLIST_NUM;
+
if (discard_type == DPOLICY_BG) {
dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
- dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
- dpolicy->io_aware_gran = MAX_PLIST_NUM;
dpolicy->io_aware = true;
} else if (discard_type == DPOLICY_FORCE) {
dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
- dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
- dpolicy->io_aware_gran = MAX_PLIST_NUM;
dpolicy->io_aware = true;
} else if (discard_type == DPOLICY_FSTRIM) {
- dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
- dpolicy->io_aware_gran = MAX_PLIST_NUM;
dpolicy->io_aware = false;
} else if (discard_type == DPOLICY_UMOUNT) {
- dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
- dpolicy->io_aware_gran = MAX_PLIST_NUM;
dpolicy->io_aware = false;
}
}
@@ -1737,7 +1746,7 @@ static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
goto init_thread;
}
- dcc = kzalloc(sizeof(struct discard_cmd_control), GFP_KERNEL);
+ dcc = f2fs_kzalloc(sbi, sizeof(struct discard_cmd_control), GFP_KERNEL);
if (!dcc)
return -ENOMEM;
@@ -2739,6 +2748,7 @@ void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
}
}
+ f2fs_bug_on(sbi, !IS_DATASEG(type));
curseg = CURSEG_I(sbi, type);
mutex_lock(&curseg->curseg_mutex);
@@ -2823,7 +2833,7 @@ void f2fs_wait_on_block_writeback(struct f2fs_sb_info *sbi, block_t blkaddr)
}
}
-static int read_compacted_summaries(struct f2fs_sb_info *sbi)
+static void read_compacted_summaries(struct f2fs_sb_info *sbi)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
struct curseg_info *seg_i;
@@ -2880,7 +2890,6 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi)
}
}
f2fs_put_page(page, 1);
- return 0;
}
static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
@@ -2926,13 +2935,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
ns->ofs_in_node = 0;
}
} else {
- int err;
-
- err = restore_node_summary(sbi, segno, sum);
- if (err) {
- f2fs_put_page(new, 1);
- return err;
- }
+ restore_node_summary(sbi, segno, sum);
}
}
@@ -2971,8 +2974,7 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
META_CP, true);
/* restore for compacted data summary */
- if (read_compacted_summaries(sbi))
- return -EINVAL;
+ read_compacted_summaries(sbi);
type = CURSEG_HOT_NODE;
}
@@ -3108,28 +3110,19 @@ static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
unsigned int start)
{
struct sit_info *sit_i = SIT_I(sbi);
- struct page *src_page, *dst_page;
+ struct page *page;
pgoff_t src_off, dst_off;
- void *src_addr, *dst_addr;
src_off = current_sit_addr(sbi, start);
dst_off = next_sit_addr(sbi, src_off);
- /* get current sit block page without lock */
- src_page = get_meta_page(sbi, src_off);
- dst_page = grab_meta_page(sbi, dst_off);
- f2fs_bug_on(sbi, PageDirty(src_page));
-
- src_addr = page_address(src_page);
- dst_addr = page_address(dst_page);
- memcpy(dst_addr, src_addr, PAGE_SIZE);
-
- set_page_dirty(dst_page);
- f2fs_put_page(src_page, 1);
+ page = grab_meta_page(sbi, dst_off);
+ seg_info_to_sit_page(sbi, page, start);
+ set_page_dirty(page);
set_to_next_sit(sit_i, start);
- return dst_page;
+ return page;
}
static struct sit_entry_set *grab_sit_entry_set(void)
@@ -3338,52 +3331,54 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
unsigned int bitmap_size;
/* allocate memory for SIT information */
- sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
+ sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL);
if (!sit_i)
return -ENOMEM;
SM_I(sbi)->sit_info = sit_i;
- sit_i->sentries = kvzalloc(MAIN_SEGS(sbi) *
+ sit_i->sentries = f2fs_kvzalloc(sbi, MAIN_SEGS(sbi) *
sizeof(struct seg_entry), GFP_KERNEL);
if (!sit_i->sentries)
return -ENOMEM;
bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
- sit_i->dirty_sentries_bitmap = kvzalloc(bitmap_size, GFP_KERNEL);
+ sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, bitmap_size,
+ GFP_KERNEL);
if (!sit_i->dirty_sentries_bitmap)
return -ENOMEM;
for (start = 0; start < MAIN_SEGS(sbi); start++) {
sit_i->sentries[start].cur_valid_map
- = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
+ = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
sit_i->sentries[start].ckpt_valid_map
- = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
+ = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
if (!sit_i->sentries[start].cur_valid_map ||
!sit_i->sentries[start].ckpt_valid_map)
return -ENOMEM;
#ifdef CONFIG_F2FS_CHECK_FS
sit_i->sentries[start].cur_valid_map_mir
- = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
+ = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
if (!sit_i->sentries[start].cur_valid_map_mir)
return -ENOMEM;
#endif
if (f2fs_discard_en(sbi)) {
sit_i->sentries[start].discard_map
- = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
+ = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE,
+ GFP_KERNEL);
if (!sit_i->sentries[start].discard_map)
return -ENOMEM;
}
}
- sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
+ sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
if (!sit_i->tmp_map)
return -ENOMEM;
if (sbi->segs_per_sec > 1) {
- sit_i->sec_entries = kvzalloc(MAIN_SECS(sbi) *
+ sit_i->sec_entries = f2fs_kvzalloc(sbi, MAIN_SECS(sbi) *
sizeof(struct sec_entry), GFP_KERNEL);
if (!sit_i->sec_entries)
return -ENOMEM;
@@ -3427,19 +3422,19 @@ static int build_free_segmap(struct f2fs_sb_info *sbi)
unsigned int bitmap_size, sec_bitmap_size;
/* allocate memory for free segmap information */
- free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
+ free_i = f2fs_kzalloc(sbi, sizeof(struct free_segmap_info), GFP_KERNEL);
if (!free_i)
return -ENOMEM;
SM_I(sbi)->free_info = free_i;
bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
- free_i->free_segmap = kvmalloc(bitmap_size, GFP_KERNEL);
+ free_i->free_segmap = f2fs_kvmalloc(sbi, bitmap_size, GFP_KERNEL);
if (!free_i->free_segmap)
return -ENOMEM;
sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
- free_i->free_secmap = kvmalloc(sec_bitmap_size, GFP_KERNEL);
+ free_i->free_secmap = f2fs_kvmalloc(sbi, sec_bitmap_size, GFP_KERNEL);
if (!free_i->free_secmap)
return -ENOMEM;
@@ -3460,7 +3455,7 @@ static int build_curseg(struct f2fs_sb_info *sbi)
struct curseg_info *array;
int i;
- array = kcalloc(NR_CURSEG_TYPE, sizeof(*array), GFP_KERNEL);
+ array = f2fs_kzalloc(sbi, sizeof(*array) * NR_CURSEG_TYPE, GFP_KERNEL);
if (!array)
return -ENOMEM;
@@ -3468,12 +3463,12 @@ static int build_curseg(struct f2fs_sb_info *sbi)
for (i = 0; i < NR_CURSEG_TYPE; i++) {
mutex_init(&array[i].curseg_mutex);
- array[i].sum_blk = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ array[i].sum_blk = f2fs_kzalloc(sbi, PAGE_SIZE, GFP_KERNEL);
if (!array[i].sum_blk)
return -ENOMEM;
init_rwsem(&array[i].journal_rwsem);
- array[i].journal = kzalloc(sizeof(struct f2fs_journal),
- GFP_KERNEL);
+ array[i].journal = f2fs_kzalloc(sbi,
+ sizeof(struct f2fs_journal), GFP_KERNEL);
if (!array[i].journal)
return -ENOMEM;
array[i].segno = NULL_SEGNO;
@@ -3482,7 +3477,7 @@ static int build_curseg(struct f2fs_sb_info *sbi)
return restore_curseg_summaries(sbi);
}
-static void build_sit_entries(struct f2fs_sb_info *sbi)
+static int build_sit_entries(struct f2fs_sb_info *sbi)
{
struct sit_info *sit_i = SIT_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
@@ -3492,6 +3487,7 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
int sit_blk_cnt = SIT_BLK_CNT(sbi);
unsigned int i, start, end;
unsigned int readed, start_blk = 0;
+ int err = 0;
do {
readed = ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
@@ -3510,7 +3506,9 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
f2fs_put_page(page, 1);
- check_block_count(sbi, start, &sit);
+ err = check_block_count(sbi, start, &sit);
+ if (err)
+ return err;
seg_info_from_raw_sit(se, &sit);
/* build discard map only one time */
@@ -3545,7 +3543,9 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
old_valid_blocks = se->valid_blocks;
- check_block_count(sbi, start, &sit);
+ err = check_block_count(sbi, start, &sit);
+ if (err)
+ break;
seg_info_from_raw_sit(se, &sit);
if (f2fs_discard_en(sbi)) {
@@ -3565,6 +3565,7 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
se->valid_blocks - old_valid_blocks;
}
up_read(&curseg->journal_rwsem);
+ return err;
}
static void init_free_segmap(struct f2fs_sb_info *sbi)
@@ -3619,7 +3620,7 @@ static int init_victim_secmap(struct f2fs_sb_info *sbi)
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
- dirty_i->victim_secmap = kvzalloc(bitmap_size, GFP_KERNEL);
+ dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
if (!dirty_i->victim_secmap)
return -ENOMEM;
return 0;
@@ -3631,7 +3632,8 @@ static int build_dirty_segmap(struct f2fs_sb_info *sbi)
unsigned int bitmap_size, i;
/* allocate memory for dirty segments list information */
- dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
+ dirty_i = f2fs_kzalloc(sbi, sizeof(struct dirty_seglist_info),
+ GFP_KERNEL);
if (!dirty_i)
return -ENOMEM;
@@ -3641,7 +3643,8 @@ static int build_dirty_segmap(struct f2fs_sb_info *sbi)
bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
for (i = 0; i < NR_DIRTY_TYPE; i++) {
- dirty_i->dirty_segmap[i] = kvzalloc(bitmap_size, GFP_KERNEL);
+ dirty_i->dirty_segmap[i] = f2fs_kvzalloc(sbi, bitmap_size,
+ GFP_KERNEL);
if (!dirty_i->dirty_segmap[i])
return -ENOMEM;
}
@@ -3685,7 +3688,7 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
struct f2fs_sm_info *sm_info;
int err;
- sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
+ sm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_sm_info), GFP_KERNEL);
if (!sm_info)
return -ENOMEM;
@@ -3737,7 +3740,9 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
return err;
/* reinit free segmap based on SIT */
- build_sit_entries(sbi);
+ err = build_sit_entries(sbi);
+ if (err)
+ return err;
init_free_segmap(sbi);
err = build_dirty_segmap(sbi);
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index d1d394cdf61d..f11c4bc82c78 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -348,16 +348,41 @@ static inline void seg_info_from_raw_sit(struct seg_entry *se,
se->mtime = le64_to_cpu(rs->mtime);
}
-static inline void seg_info_to_raw_sit(struct seg_entry *se,
+static inline void __seg_info_to_raw_sit(struct seg_entry *se,
struct f2fs_sit_entry *rs)
{
unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) |
se->valid_blocks;
rs->vblocks = cpu_to_le16(raw_vblocks);
memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
+ rs->mtime = cpu_to_le64(se->mtime);
+}
+
+static inline void seg_info_to_sit_page(struct f2fs_sb_info *sbi,
+ struct page *page, unsigned int start)
+{
+ struct f2fs_sit_block *raw_sit;
+ struct seg_entry *se;
+ struct f2fs_sit_entry *rs;
+ unsigned int end = min(start + SIT_ENTRY_PER_BLOCK,
+ (unsigned long)MAIN_SEGS(sbi));
+ int i;
+
+ raw_sit = (struct f2fs_sit_block *)page_address(page);
+ for (i = 0; i < end - start; i++) {
+ rs = &raw_sit->entries[i];
+ se = get_seg_entry(sbi, start + i);
+ __seg_info_to_raw_sit(se, rs);
+ }
+}
+
+static inline void seg_info_to_raw_sit(struct seg_entry *se,
+ struct f2fs_sit_entry *rs)
+{
+ __seg_info_to_raw_sit(se, rs);
+
memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
se->ckpt_valid_blocks = se->valid_blocks;
- rs->mtime = cpu_to_le64(se->mtime);
}
static inline unsigned int find_next_inuse(struct free_segmap_info *free_i,
@@ -580,47 +605,6 @@ enum {
F2FS_IPU_ASYNC,
};
-static inline bool need_inplace_update_policy(struct inode *inode,
- struct f2fs_io_info *fio)
-{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- unsigned int policy = SM_I(sbi)->ipu_policy;
-
- if (test_opt(sbi, LFS))
- return false;
-
- /* if this is cold file, we should overwrite to avoid fragmentation */
- if (file_is_cold(inode))
- return true;
-
- if (policy & (0x1 << F2FS_IPU_FORCE))
- return true;
- if (policy & (0x1 << F2FS_IPU_SSR) && need_SSR(sbi))
- return true;
- if (policy & (0x1 << F2FS_IPU_UTIL) &&
- utilization(sbi) > SM_I(sbi)->min_ipu_util)
- return true;
- if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && need_SSR(sbi) &&
- utilization(sbi) > SM_I(sbi)->min_ipu_util)
- return true;
-
- /*
- * IPU for rewrite async pages
- */
- if (policy & (0x1 << F2FS_IPU_ASYNC) &&
- fio && fio->op == REQ_OP_WRITE &&
- !(fio->op_flags & REQ_SYNC) &&
- !f2fs_encrypted_inode(inode))
- return true;
-
- /* this is only set during fdatasync */
- if (policy & (0x1 << F2FS_IPU_FSYNC) &&
- is_inode_flag_set(inode, FI_NEED_IPU))
- return true;
-
- return false;
-}
-
static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi,
int type)
{
@@ -655,7 +639,7 @@ static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
/*
* Summary block is always treated as an invalid block
*/
-static inline void check_block_count(struct f2fs_sb_info *sbi,
+static inline int check_block_count(struct f2fs_sb_info *sbi,
int segno, struct f2fs_sit_entry *raw_sit)
{
#ifdef CONFIG_F2FS_CHECK_FS
@@ -677,11 +661,25 @@ static inline void check_block_count(struct f2fs_sb_info *sbi,
cur_pos = next_pos;
is_valid = !is_valid;
} while (cur_pos < sbi->blocks_per_seg);
- BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks);
+
+ if (unlikely(GET_SIT_VBLOCKS(raw_sit) != valid_blocks)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Mismatch valid blocks %d vs. %d",
+ GET_SIT_VBLOCKS(raw_sit), valid_blocks);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ return -EINVAL;
+ }
#endif
/* check segment usage, and check boundary of a given segment number */
- f2fs_bug_on(sbi, GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
- || segno > TOTAL_SEGS(sbi) - 1);
+ if (unlikely(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
+ || segno > TOTAL_SEGS(sbi) - 1)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Wrong valid blocks %d or segno %u",
+ GET_SIT_VBLOCKS(raw_sit), segno);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ return -EINVAL;
+ }
+ return 0;
}
static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 708155d9c2e4..8173ae688814 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -43,6 +43,7 @@ static struct kmem_cache *f2fs_inode_cachep;
char *fault_name[FAULT_MAX] = {
[FAULT_KMALLOC] = "kmalloc",
+ [FAULT_KVMALLOC] = "kvmalloc",
[FAULT_PAGE_ALLOC] = "page alloc",
[FAULT_PAGE_GET] = "page get",
[FAULT_ALLOC_BIO] = "alloc bio",
@@ -106,6 +107,9 @@ enum {
Opt_noextent_cache,
Opt_noinline_data,
Opt_data_flush,
+ Opt_reserve_root,
+ Opt_resgid,
+ Opt_resuid,
Opt_mode,
Opt_io_size_bits,
Opt_fault_injection,
@@ -156,6 +160,9 @@ static match_table_t f2fs_tokens = {
{Opt_noextent_cache, "noextent_cache"},
{Opt_noinline_data, "noinline_data"},
{Opt_data_flush, "data_flush"},
+ {Opt_reserve_root, "reserve_root=%u"},
+ {Opt_resgid, "resgid=%u"},
+ {Opt_resuid, "resuid=%u"},
{Opt_mode, "mode=%s"},
{Opt_io_size_bits, "io_bits=%u"},
{Opt_fault_injection, "fault_injection=%u"},
@@ -190,6 +197,28 @@ void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
va_end(args);
}
+static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
+{
+ block_t limit = (sbi->user_block_count << 1) / 1000;
+
+ /* limit is 0.2% */
+ if (test_opt(sbi, RESERVE_ROOT) && sbi->root_reserved_blocks > limit) {
+ sbi->root_reserved_blocks = limit;
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "Reduce reserved blocks for root = %u",
+ sbi->root_reserved_blocks);
+ }
+ if (!test_opt(sbi, RESERVE_ROOT) &&
+ (!uid_eq(sbi->s_resuid,
+ make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
+ !gid_eq(sbi->s_resgid,
+ make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
+ from_kuid_munged(&init_user_ns, sbi->s_resuid),
+ from_kgid_munged(&init_user_ns, sbi->s_resgid));
+}
+
static void init_once(void *foo)
{
struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
@@ -320,6 +349,8 @@ static int parse_options(struct super_block *sb, char *options)
substring_t args[MAX_OPT_ARGS];
char *p, *name;
int arg = 0;
+ kuid_t uid;
+ kgid_t gid;
#ifdef CONFIG_QUOTA
int ret;
#endif
@@ -487,6 +518,40 @@ static int parse_options(struct super_block *sb, char *options)
case Opt_data_flush:
set_opt(sbi, DATA_FLUSH);
break;
+ case Opt_reserve_root:
+ if (args->from && match_int(args, &arg))
+ return -EINVAL;
+ if (test_opt(sbi, RESERVE_ROOT)) {
+ f2fs_msg(sb, KERN_INFO,
+ "Preserve previous reserve_root=%u",
+ sbi->root_reserved_blocks);
+ } else {
+ sbi->root_reserved_blocks = arg;
+ set_opt(sbi, RESERVE_ROOT);
+ }
+ break;
+ case Opt_resuid:
+ if (args->from && match_int(args, &arg))
+ return -EINVAL;
+ uid = make_kuid(current_user_ns(), arg);
+ if (!uid_valid(uid)) {
+ f2fs_msg(sb, KERN_ERR,
+ "Invalid uid value %d", arg);
+ return -EINVAL;
+ }
+ sbi->s_resuid = uid;
+ break;
+ case Opt_resgid:
+ if (args->from && match_int(args, &arg))
+ return -EINVAL;
+ gid = make_kgid(current_user_ns(), arg);
+ if (!gid_valid(gid)) {
+ f2fs_msg(sb, KERN_ERR,
+ "Invalid gid value %d", arg);
+ return -EINVAL;
+ }
+ sbi->s_resgid = gid;
+ break;
case Opt_mode:
name = match_strdup(&args[0]);
@@ -993,22 +1058,25 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
struct super_block *sb = dentry->d_sb;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
- block_t total_count, user_block_count, start_count, ovp_count;
+ block_t total_count, user_block_count, start_count;
u64 avail_node_count;
total_count = le64_to_cpu(sbi->raw_super->block_count);
user_block_count = sbi->user_block_count;
start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
- ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
buf->f_type = F2FS_SUPER_MAGIC;
buf->f_bsize = sbi->blocksize;
buf->f_blocks = total_count - start_count;
- buf->f_bfree = user_block_count - valid_user_blocks(sbi) + ovp_count;
- buf->f_bavail = user_block_count - valid_user_blocks(sbi) -
+ buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
sbi->current_reserved_blocks;
+ if (buf->f_bfree > sbi->root_reserved_blocks)
+ buf->f_bavail = buf->f_bfree - sbi->root_reserved_blocks;
+ else
+ buf->f_bavail = 0;
- avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
+ avail_node_count = sbi->total_node_count - sbi->nquota_files -
+ F2FS_RESERVED_NODE_NUM;
if (avail_node_count > user_block_count) {
buf->f_files = user_block_count;
@@ -1134,6 +1202,11 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
else if (test_opt(sbi, LFS))
seq_puts(seq, "lfs");
seq_printf(seq, ",active_logs=%u", sbi->active_logs);
+ if (test_opt(sbi, RESERVE_ROOT))
+ seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
+ sbi->root_reserved_blocks,
+ from_kuid_munged(&init_user_ns, sbi->s_resuid),
+ from_kgid_munged(&init_user_ns, sbi->s_resgid));
if (F2FS_IO_SIZE_BITS(sbi))
seq_printf(seq, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi));
#ifdef CONFIG_F2FS_FAULT_INJECTION
@@ -1263,7 +1336,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
err = dquot_suspend(sb, -1);
if (err < 0)
goto restore_opts;
- } else {
+ } else if (f2fs_readonly(sb) && !(*flags & MS_RDONLY)) {
/* dquot_resume needs RW */
sb->s_flags &= ~SB_RDONLY;
if (sb_any_quota_suspended(sb)) {
@@ -1332,6 +1405,7 @@ skip:
sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
(test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
+ limit_reserve_root(sbi);
return 0;
restore_gc:
if (need_restart_gc) {
@@ -1656,7 +1730,7 @@ void f2fs_quota_off_umount(struct super_block *sb)
f2fs_quota_off(sb, type);
}
-int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
+static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
{
*projid = F2FS_I(inode)->i_projid;
return 0;
@@ -2148,14 +2222,15 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
FDEV(devi).nr_blkz++;
- FDEV(devi).blkz_type = kmalloc(FDEV(devi).nr_blkz, GFP_KERNEL);
+ FDEV(devi).blkz_type = f2fs_kmalloc(sbi, FDEV(devi).nr_blkz,
+ GFP_KERNEL);
if (!FDEV(devi).blkz_type)
return -ENOMEM;
#define F2FS_REPORT_NR_ZONES 4096
- zones = kcalloc(F2FS_REPORT_NR_ZONES, sizeof(struct blk_zone),
- GFP_KERNEL);
+ zones = f2fs_kzalloc(sbi, sizeof(struct blk_zone) *
+ F2FS_REPORT_NR_ZONES, GFP_KERNEL);
if (!zones)
return -ENOMEM;
@@ -2295,8 +2370,8 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
* Initialize multiple devices information, or single
* zoned block device information.
*/
- sbi->devs = kcalloc(max_devices, sizeof(struct f2fs_dev_info),
- GFP_KERNEL);
+ sbi->devs = f2fs_kzalloc(sbi, sizeof(struct f2fs_dev_info) *
+ max_devices, GFP_KERNEL);
if (!sbi->devs)
return -ENOMEM;
@@ -2419,6 +2494,9 @@ try_onemore:
sb->s_fs_info = sbi;
sbi->raw_super = raw_super;
+ sbi->s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
+ sbi->s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
+
/* precompute checksum seed for metadata */
if (f2fs_sb_has_inode_chksum(sb))
sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
@@ -2462,6 +2540,13 @@ try_onemore:
else
sb->s_qcop = &f2fs_quotactl_ops;
sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
+
+ if (f2fs_sb_has_quota_ino(sbi->sb)) {
+ for (i = 0; i < MAXQUOTAS; i++) {
+ if (f2fs_qf_ino(sbi->sb, i))
+ sbi->nquota_files++;
+ }
+ }
#endif
sb->s_op = &f2fs_sops;
@@ -2475,6 +2560,7 @@ try_onemore:
sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
(test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
+ sb->s_iflags |= SB_I_CGROUPWB;
/* init f2fs-specific super block info */
sbi->valid_super_block = valid_super_block;
@@ -2495,8 +2581,9 @@ try_onemore:
int n = (i == META) ? 1: NR_TEMP_TYPE;
int j;
- sbi->write_io[i] = kmalloc(n * sizeof(struct f2fs_bio_info),
- GFP_KERNEL);
+ sbi->write_io[i] = f2fs_kmalloc(sbi,
+ n * sizeof(struct f2fs_bio_info),
+ GFP_KERNEL);
if (!sbi->write_io[i]) {
err = -ENOMEM;
goto free_options;
@@ -2517,14 +2604,14 @@ try_onemore:
err = init_percpu_info(sbi);
if (err)
- goto free_options;
+ goto free_bio_info;
if (F2FS_IO_SIZE(sbi) > 1) {
sbi->write_io_dummy =
mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
if (!sbi->write_io_dummy) {
err = -ENOMEM;
- goto free_options;
+ goto free_percpu;
}
}
@@ -2559,6 +2646,7 @@ try_onemore:
sbi->last_valid_block_count = sbi->total_valid_block_count;
sbi->reserved_blocks = 0;
sbi->current_reserved_blocks = 0;
+ limit_reserve_root(sbi);
for (i = 0; i < NR_INODE_TYPE; i++) {
INIT_LIST_HEAD(&sbi->inode_list[i]);
@@ -2604,18 +2692,16 @@ try_onemore:
goto free_nm;
}
- f2fs_join_shrinker(sbi);
-
err = f2fs_build_stats(sbi);
if (err)
- goto free_nm;
+ goto free_node_inode;
/* read root inode and dentry */
root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
if (IS_ERR(root)) {
f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
err = PTR_ERR(root);
- goto free_node_inode;
+ goto free_stats;
}
if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
iput(root);
@@ -2711,6 +2797,8 @@ skip_recovery:
sbi->valid_super_block ? 1 : 2, err);
}
+ f2fs_join_shrinker(sbi);
+
f2fs_msg(sbi->sb, KERN_NOTICE, "Mounted with checkpoint version = %llx",
cur_cp_version(F2FS_CKPT(sbi)));
f2fs_update_time(sbi, CP_TIME);
@@ -2737,14 +2825,12 @@ free_sysfs:
free_root_inode:
dput(sb->s_root);
sb->s_root = NULL;
+free_stats:
+ f2fs_destroy_stats(sbi);
free_node_inode:
- truncate_inode_pages_final(NODE_MAPPING(sbi));
- mutex_lock(&sbi->umount_mutex);
release_ino_entry(sbi, true);
- f2fs_leave_shrinker(sbi);
+ truncate_inode_pages_final(NODE_MAPPING(sbi));
iput(sbi->node_inode);
- mutex_unlock(&sbi->umount_mutex);
- f2fs_destroy_stats(sbi);
free_nm:
destroy_node_manager(sbi);
free_sm:
@@ -2757,10 +2843,12 @@ free_meta_inode:
iput(sbi->meta_inode);
free_io_dummy:
mempool_destroy(sbi->write_io_dummy);
-free_options:
+free_percpu:
+ destroy_percpu_info(sbi);
+free_bio_info:
for (i = 0; i < NR_PAGE_TYPE; i++)
kfree(sbi->write_io[i]);
- destroy_percpu_info(sbi);
+free_options:
#ifdef CONFIG_QUOTA
for (i = 0; i < MAXQUOTAS; i++)
kfree(sbi->s_qf_names[i]);
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 9835348b6e5d..d978c7b6ea04 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -113,6 +113,9 @@ static ssize_t features_show(struct f2fs_attr *a,
if (f2fs_sb_has_quota_ino(sb))
len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
len ? ", " : "", "quota_ino");
+ if (f2fs_sb_has_inode_crtime(sb))
+ len += snprintf(buf + len, PAGE_SIZE - len, "%s%s",
+ len ? ", " : "", "inode_crtime");
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
return len;
}
@@ -162,7 +165,8 @@ static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
#endif
if (a->struct_type == RESERVED_BLOCKS) {
spin_lock(&sbi->stat_lock);
- if (t > (unsigned long)sbi->user_block_count) {
+ if (t > (unsigned long)(sbi->user_block_count -
+ sbi->root_reserved_blocks)) {
spin_unlock(&sbi->stat_lock);
return -EINVAL;
}
@@ -231,6 +235,7 @@ enum feat_id {
FEAT_INODE_CHECKSUM,
FEAT_FLEXIBLE_INLINE_XATTR,
FEAT_QUOTA_INO,
+ FEAT_INODE_CRTIME,
};
static ssize_t f2fs_feature_show(struct f2fs_attr *a,
@@ -245,6 +250,7 @@ static ssize_t f2fs_feature_show(struct f2fs_attr *a,
case FEAT_INODE_CHECKSUM:
case FEAT_FLEXIBLE_INLINE_XATTR:
case FEAT_QUOTA_INO:
+ case FEAT_INODE_CRTIME:
return snprintf(buf, PAGE_SIZE, "supported\n");
}
return 0;
@@ -299,6 +305,8 @@ F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, interval_time[CP_TIME]);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, iostat_enable, iostat_enable);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, readdir_ra, readdir_ra);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_pin_file_thresh, gc_pin_file_threshold);
#ifdef CONFIG_F2FS_FAULT_INJECTION
F2FS_RW_ATTR(FAULT_INFO_RATE, f2fs_fault_info, inject_rate, inject_rate);
F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type);
@@ -320,6 +328,7 @@ F2FS_FEATURE_RO_ATTR(project_quota, FEAT_PROJECT_QUOTA);
F2FS_FEATURE_RO_ATTR(inode_checksum, FEAT_INODE_CHECKSUM);
F2FS_FEATURE_RO_ATTR(flexible_inline_xattr, FEAT_FLEXIBLE_INLINE_XATTR);
F2FS_FEATURE_RO_ATTR(quota_ino, FEAT_QUOTA_INO);
+F2FS_FEATURE_RO_ATTR(inode_crtime, FEAT_INODE_CRTIME);
#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
static struct attribute *f2fs_attrs[] = {
@@ -346,6 +355,8 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(cp_interval),
ATTR_LIST(idle_interval),
ATTR_LIST(iostat_enable),
+ ATTR_LIST(readdir_ra),
+ ATTR_LIST(gc_pin_file_thresh),
#ifdef CONFIG_F2FS_FAULT_INJECTION
ATTR_LIST(inject_rate),
ATTR_LIST(inject_type),
@@ -371,6 +382,7 @@ static struct attribute *f2fs_feat_attrs[] = {
ATTR_LIST(inode_checksum),
ATTR_LIST(flexible_inline_xattr),
ATTR_LIST(quota_ino),
+ ATTR_LIST(inode_crtime),
NULL,
};
diff --git a/fs/f2fs/trace.c b/fs/f2fs/trace.c
index bccbbf2616d2..a1fcd00bbb2b 100644
--- a/fs/f2fs/trace.c
+++ b/fs/f2fs/trace.c
@@ -17,7 +17,7 @@
#include "trace.h"
static RADIX_TREE(pids, GFP_ATOMIC);
-static spinlock_t pids_lock;
+static struct mutex pids_lock;
static struct last_io_info last_io;
static inline void __print_last_io(void)
@@ -64,7 +64,7 @@ void f2fs_trace_pid(struct page *page)
if (radix_tree_preload(GFP_NOFS))
return;
- spin_lock(&pids_lock);
+ mutex_lock(&pids_lock);
p = radix_tree_lookup(&pids, pid);
if (p == current)
goto out;
@@ -77,7 +77,7 @@ void f2fs_trace_pid(struct page *page)
MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
pid, current->comm);
out:
- spin_unlock(&pids_lock);
+ mutex_unlock(&pids_lock);
radix_tree_preload_end();
}
@@ -122,7 +122,7 @@ void f2fs_trace_ios(struct f2fs_io_info *fio, int flush)
void f2fs_build_trace_ios(void)
{
- spin_lock_init(&pids_lock);
+ mutex_init(&pids_lock);
}
#define PIDVEC_SIZE 128
@@ -150,7 +150,7 @@ void f2fs_destroy_trace_ios(void)
pid_t next_pid = 0;
unsigned int found;
- spin_lock(&pids_lock);
+ mutex_lock(&pids_lock);
while ((found = gang_lookup_pids(pid, next_pid, PIDVEC_SIZE))) {
unsigned idx;
@@ -158,5 +158,5 @@ void f2fs_destroy_trace_ios(void)
for (idx = 0; idx < found; idx++)
radix_tree_delete(&pids, pid[idx]);
}
- spin_unlock(&pids_lock);
+ mutex_unlock(&pids_lock);
}
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index ec8961ef8cac..ae2dfa709f5d 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -298,8 +298,8 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
if (!size && !inline_size)
return -ENODATA;
- txattr_addr = kzalloc(inline_size + size + XATTR_PADDING_SIZE,
- GFP_F2FS_ZERO);
+ txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode),
+ inline_size + size + XATTR_PADDING_SIZE, GFP_NOFS);
if (!txattr_addr)
return -ENOMEM;
@@ -351,8 +351,8 @@ static int read_all_xattrs(struct inode *inode, struct page *ipage,
void *txattr_addr;
int err;
- txattr_addr = kzalloc(inline_size + size + XATTR_PADDING_SIZE,
- GFP_F2FS_ZERO);
+ txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode),
+ inline_size + size + XATTR_PADDING_SIZE, GFP_NOFS);
if (!txattr_addr)
return -ENOMEM;
@@ -433,6 +433,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
if (F2FS_I(inode)->i_xattr_nid) {
xpage = get_node_page(sbi, F2FS_I(inode)->i_xattr_nid);
if (IS_ERR(xpage)) {
+ err = PTR_ERR(xpage);
alloc_nid_failed(sbi, new_nid);
goto in_page_out;
}
@@ -443,6 +444,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
set_new_dnode(&dn, inode, NULL, NULL, new_nid);
xpage = new_node_page(&dn, XATTR_NODE_OFFSET);
if (IS_ERR(xpage)) {
+ err = PTR_ERR(xpage);
alloc_nid_failed(sbi, new_nid);
goto in_page_out;
}
@@ -598,7 +600,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
goto exit;
}
- if (f2fs_xattr_value_same(here, value, size))
+ if (value && f2fs_xattr_value_same(here, value, size))
goto exit;
} else if ((flags & XATTR_REPLACE)) {
error = -ENODATA;
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index b833ffeee1e1..8e100c3bf72c 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/compat.h>
#include <linux/uaccess.h>
+#include <linux/iversion.h>
#include "fat.h"
/*
@@ -1055,7 +1056,7 @@ int fat_remove_entries(struct inode *dir, struct fat_slot_info *sinfo)
brelse(bh);
if (err)
return err;
- dir->i_version++;
+ inode_inc_iversion(dir);
if (nr_slots) {
/*
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 20a0a89eaca5..ffbbf0520d9e 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -20,6 +20,7 @@
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <asm/unaligned.h>
+#include <linux/iversion.h>
#include "fat.h"
#ifndef CONFIG_FAT_DEFAULT_IOCHARSET
@@ -507,7 +508,7 @@ int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
MSDOS_I(inode)->i_pos = 0;
inode->i_uid = sbi->options.fs_uid;
inode->i_gid = sbi->options.fs_gid;
- inode->i_version++;
+ inode_inc_iversion(inode);
inode->i_generation = get_seconds();
if ((de->attr & ATTR_DIR) && !IS_FREE(de->name)) {
@@ -590,7 +591,7 @@ struct inode *fat_build_inode(struct super_block *sb,
goto out;
}
inode->i_ino = iunique(sb, MSDOS_ROOT_INO);
- inode->i_version = 1;
+ inode_set_iversion(inode, 1);
err = fat_fill_inode(inode, de);
if (err) {
iput(inode);
@@ -1377,7 +1378,7 @@ static int fat_read_root(struct inode *inode)
MSDOS_I(inode)->i_pos = MSDOS_ROOT_INO;
inode->i_uid = sbi->options.fs_uid;
inode->i_gid = sbi->options.fs_gid;
- inode->i_version++;
+ inode_inc_iversion(inode);
inode->i_generation = 0;
inode->i_mode = fat_make_mode(sbi, ATTR_DIR, S_IRWXUGO);
inode->i_op = sbi->dir_ops;
@@ -1828,7 +1829,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
if (!root_inode)
goto out_fail;
root_inode->i_ino = MSDOS_ROOT_INO;
- root_inode->i_version = 1;
+ inode_set_iversion(root_inode, 1);
error = fat_read_root(root_inode);
if (error < 0) {
iput(root_inode);
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index d24d2758a363..582ca731a6c9 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -7,6 +7,7 @@
*/
#include <linux/module.h>
+#include <linux/iversion.h>
#include "fat.h"
/* Characters that are undesirable in an MS-DOS file name */
@@ -480,7 +481,7 @@ static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name,
} else
mark_inode_dirty(old_inode);
- old_dir->i_version++;
+ inode_inc_iversion(old_dir);
old_dir->i_ctime = old_dir->i_mtime = current_time(old_dir);
if (IS_DIRSYNC(old_dir))
(void)fat_sync_inode(old_dir);
@@ -508,7 +509,7 @@ static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name,
goto out;
new_i_pos = sinfo.i_pos;
}
- new_dir->i_version++;
+ inode_inc_iversion(new_dir);
fat_detach(old_inode);
fat_attach(old_inode, new_i_pos);
@@ -540,7 +541,7 @@ static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name,
old_sinfo.bh = NULL;
if (err)
goto error_dotdot;
- old_dir->i_version++;
+ inode_inc_iversion(old_dir);
old_dir->i_ctime = old_dir->i_mtime = ts;
if (IS_DIRSYNC(old_dir))
(void)fat_sync_inode(old_dir);
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 02c066663a3a..cefea792cde8 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -20,7 +20,7 @@
#include <linux/slab.h>
#include <linux/namei.h>
#include <linux/kernel.h>
-
+#include <linux/iversion.h>
#include "fat.h"
static inline unsigned long vfat_d_version(struct dentry *dentry)
@@ -46,7 +46,7 @@ static int vfat_revalidate_shortname(struct dentry *dentry)
{
int ret = 1;
spin_lock(&dentry->d_lock);
- if (vfat_d_version(dentry) != d_inode(dentry->d_parent)->i_version)
+ if (inode_cmp_iversion(d_inode(dentry->d_parent), vfat_d_version(dentry)))
ret = 0;
spin_unlock(&dentry->d_lock);
return ret;
@@ -759,7 +759,7 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
if (!inode)
- vfat_d_version_set(dentry, dir->i_version);
+ vfat_d_version_set(dentry, inode_query_iversion(dir));
return d_splice_alias(inode, dentry);
error:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
@@ -781,7 +781,7 @@ static int vfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
err = vfat_add_entry(dir, &dentry->d_name, 0, 0, &ts, &sinfo);
if (err)
goto out;
- dir->i_version++;
+ inode_inc_iversion(dir);
inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos);
brelse(sinfo.bh);
@@ -789,7 +789,7 @@ static int vfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
err = PTR_ERR(inode);
goto out;
}
- inode->i_version++;
+ inode_inc_iversion(inode);
inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
@@ -823,7 +823,7 @@ static int vfat_rmdir(struct inode *dir, struct dentry *dentry)
clear_nlink(inode);
inode->i_mtime = inode->i_atime = current_time(inode);
fat_detach(inode);
- vfat_d_version_set(dentry, dir->i_version);
+ vfat_d_version_set(dentry, inode_query_iversion(dir));
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
@@ -849,7 +849,7 @@ static int vfat_unlink(struct inode *dir, struct dentry *dentry)
clear_nlink(inode);
inode->i_mtime = inode->i_atime = current_time(inode);
fat_detach(inode);
- vfat_d_version_set(dentry, dir->i_version);
+ vfat_d_version_set(dentry, inode_query_iversion(dir));
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
@@ -875,7 +875,7 @@ static int vfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
err = vfat_add_entry(dir, &dentry->d_name, 1, cluster, &ts, &sinfo);
if (err)
goto out_free;
- dir->i_version++;
+ inode_inc_iversion(dir);
inc_nlink(dir);
inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos);
@@ -885,7 +885,7 @@ static int vfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
/* the directory was completed, just return a error */
goto out;
}
- inode->i_version++;
+ inode_inc_iversion(inode);
set_nlink(inode, 2);
inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
@@ -951,7 +951,7 @@ static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
goto out;
new_i_pos = sinfo.i_pos;
}
- new_dir->i_version++;
+ inode_inc_iversion(new_dir);
fat_detach(old_inode);
fat_attach(old_inode, new_i_pos);
@@ -979,7 +979,7 @@ static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
old_sinfo.bh = NULL;
if (err)
goto error_dotdot;
- old_dir->i_version++;
+ inode_inc_iversion(old_dir);
old_dir->i_ctime = old_dir->i_mtime = ts;
if (IS_DIRSYNC(old_dir))
(void)fat_sync_inode(old_dir);
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 0522e283a4f4..c7b9e0948107 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -690,7 +690,7 @@ COMPAT_SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd,
/* Table to convert sigio signal codes into poll band bitmaps */
-static const long band_table[NSIGPOLL] = {
+static const __poll_t band_table[NSIGPOLL] = {
POLLIN | POLLRDNORM, /* POLL_IN */
POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */
POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */
@@ -737,6 +737,7 @@ static void send_sigio_to_task(struct task_struct *p,
delivered even if we can't queue. Failure to
queue in this case _should_ be reported; we fall
back to SIGIO in that case. --sct */
+ clear_siginfo(&si);
si.si_signo = signum;
si.si_errno = 0;
si.si_code = reason;
@@ -758,7 +759,7 @@ static void send_sigio_to_task(struct task_struct *p,
if (reason - POLL_IN >= NSIGPOLL)
si.si_band = ~0L;
else
- si.si_band = band_table[reason - POLL_IN];
+ si.si_band = mangle_poll(band_table[reason - POLL_IN]);
si.si_fd = fd;
if (!do_send_sig_info(signum, &si, p, group))
break;
diff --git a/fs/file.c b/fs/file.c
index 3b080834b870..42f0db4bd0fb 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -11,18 +11,13 @@
#include <linux/export.h>
#include <linux/fs.h>
#include <linux/mm.h>
-#include <linux/mmzone.h>
-#include <linux/time.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
-#include <linux/vmalloc.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/bitops.h>
-#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
-#include <linux/workqueue.h>
unsigned int sysctl_nr_open __read_mostly = 1024*1024;
unsigned int sysctl_nr_open_min = BITS_PER_LONG;
@@ -391,7 +386,7 @@ static struct fdtable *close_files(struct files_struct * files)
struct file * file = xchg(&fdt->fd[i], NULL);
if (file) {
filp_close(file, files);
- cond_resched_rcu_qs();
+ cond_resched();
}
}
i++;
diff --git a/fs/file_table.c b/fs/file_table.c
index 2dc9f38bd195..7ec0b3e5f05d 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -23,7 +23,6 @@
#include <linux/sysctl.h>
#include <linux/percpu_counter.h>
#include <linux/percpu.h>
-#include <linux/hardirq.h>
#include <linux/task_work.h>
#include <linux/ima.h>
#include <linux/swap.h>
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index cea4836385b7..d4d04fee568a 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -126,7 +126,7 @@ static void wb_io_lists_depopulated(struct bdi_writeback *wb)
* inode_io_list_move_locked - move an inode onto a bdi_writeback IO list
* @inode: inode to be moved
* @wb: target bdi_writeback
- * @head: one of @wb->b_{dirty|io|more_io}
+ * @head: one of @wb->b_{dirty|io|more_io|dirty_time}
*
* Move @inode->i_io_list to @list of @wb and set %WB_has_dirty_io.
* Returns %true if @inode is the first occupant of the !dirty_time IO
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 17f0d05bfd4c..aa089a6925d0 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -2004,9 +2004,9 @@ out:
return ret;
}
-static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
+static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
{
- unsigned mask = POLLOUT | POLLWRNORM;
+ __poll_t mask = POLLOUT | POLLWRNORM;
struct fuse_iqueue *fiq;
struct fuse_dev *fud = fuse_get_dev(file);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index cb7dff5c45d7..e85e974dd211 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -2751,7 +2751,7 @@ static void fuse_register_polled_file(struct fuse_conn *fc,
spin_unlock(&fc->lock);
}
-unsigned fuse_file_poll(struct file *file, poll_table *wait)
+__poll_t fuse_file_poll(struct file *file, poll_table *wait)
{
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
@@ -2764,7 +2764,7 @@ unsigned fuse_file_poll(struct file *file, poll_table *wait)
return DEFAULT_POLLMASK;
poll_wait(file, &ff->poll_wait, wait);
- inarg.events = (__u32)poll_requested_events(wait);
+ inarg.events = mangle_poll(poll_requested_events(wait));
/*
* Ask for notification iff there's someone waiting for it.
@@ -2786,7 +2786,7 @@ unsigned fuse_file_poll(struct file *file, poll_table *wait)
err = fuse_simple_request(fc, &args);
if (!err)
- return outarg.revents;
+ return demangle_poll(outarg.revents);
if (err == -ENOSYS) {
fc->no_poll = 1;
return DEFAULT_POLLMASK;
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index d5773ca67ad2..c4c093bbf456 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -951,7 +951,7 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
unsigned int flags);
long fuse_ioctl_common(struct file *file, unsigned int cmd,
unsigned long arg, unsigned int flags);
-unsigned fuse_file_poll(struct file *file, poll_table *wait);
+__poll_t fuse_file_poll(struct file *file, poll_table *wait);
int fuse_dev_release(struct inode *inode, struct file *file);
bool fuse_write_update_size(struct inode *inode, loff_t pos);
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index 43c827a7cce5..c0225d4b5435 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -3,6 +3,8 @@ config GFS2_FS
depends on (64BIT || LBDAF)
select FS_POSIX_ACL
select CRC32
+ select CRYPTO
+ select CRYPTO_CRC32C
select QUOTACTL
select FS_IOMAP
help
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 1daf15a1f00c..2f725b4a386b 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -39,18 +39,21 @@
static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
- unsigned int from, unsigned int to)
+ unsigned int from, unsigned int len)
{
struct buffer_head *head = page_buffers(page);
unsigned int bsize = head->b_size;
struct buffer_head *bh;
+ unsigned int to = from + len;
unsigned int start, end;
for (bh = head, start = 0; bh != head || !start;
bh = bh->b_this_page, start = end) {
end = start + bsize;
- if (end <= from || start >= to)
+ if (end <= from)
continue;
+ if (start >= to)
+ break;
if (gfs2_is_jdata(ip))
set_buffer_uptodate(bh);
gfs2_trans_add_data(ip->i_gl, bh);
@@ -189,7 +192,7 @@ static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *w
create_empty_buffers(page, inode->i_sb->s_blocksize,
BIT(BH_Dirty)|BIT(BH_Uptodate));
}
- gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
+ gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
}
return gfs2_write_full_page(page, gfs2_get_block_noalloc, wbc);
}
@@ -255,7 +258,6 @@ static int gfs2_writepages(struct address_space *mapping,
* @wbc: The writeback control
* @pvec: The vector of pages
* @nr_pages: The number of pages to write
- * @end: End position
* @done_index: Page index
*
* Returns: non-zero if loop should terminate, zero otherwise
@@ -264,7 +266,7 @@ static int gfs2_writepages(struct address_space *mapping,
static int gfs2_write_jdata_pagevec(struct address_space *mapping,
struct writeback_control *wbc,
struct pagevec *pvec,
- int nr_pages, pgoff_t end,
+ int nr_pages,
pgoff_t *done_index)
{
struct inode *inode = mapping->host;
@@ -402,7 +404,7 @@ retry:
if (nr_pages == 0)
break;
- ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, end, &done_index);
+ ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
if (ret)
done = 1;
if (ret > 0)
@@ -446,7 +448,8 @@ static int gfs2_jdata_writepages(struct address_space *mapping,
ret = gfs2_write_cache_jdata(mapping, wbc);
if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
- gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH);
+ gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
+ GFS2_LFC_JDATA_WPAGES);
ret = gfs2_write_cache_jdata(mapping, wbc);
}
return ret;
@@ -483,8 +486,8 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
return error;
kaddr = kmap_atomic(page);
- if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
- dsize = (dibh->b_size - sizeof(struct gfs2_dinode));
+ if (dsize > gfs2_max_stuffed_size(ip))
+ dsize = gfs2_max_stuffed_size(ip);
memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
kunmap_atomic(kaddr);
@@ -501,10 +504,9 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
* @file: The file to read a page for
* @page: The page to read
*
- * This is the core of gfs2's readpage. Its used by the internal file
- * reading code as in that case we already hold the glock. Also its
+ * This is the core of gfs2's readpage. It's used by the internal file
+ * reading code as in that case we already hold the glock. Also it's
* called by gfs2_readpage() once the required lock has been granted.
- *
*/
static int __gfs2_readpage(void *file, struct page *page)
@@ -725,7 +727,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
if (gfs2_is_stuffed(ip)) {
error = 0;
- if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
+ if (pos + len > gfs2_max_stuffed_size(ip)) {
error = gfs2_unstuff_dinode(ip, page);
if (error == 0)
goto prepare_write;
@@ -832,7 +834,8 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
void *kaddr;
unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
- BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode)));
+ BUG_ON(pos + len > gfs2_max_stuffed_size(ip));
+
kaddr = kmap_atomic(page);
memcpy(buf + pos, kaddr + pos, copied);
flush_dcache_page(page);
@@ -890,8 +893,6 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
struct buffer_head *dibh;
- unsigned int from = pos & (PAGE_SIZE - 1);
- unsigned int to = from + len;
int ret;
struct gfs2_trans *tr = current->journal_info;
BUG_ON(!tr);
@@ -909,7 +910,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page);
if (!gfs2_is_writeback(ip))
- gfs2_page_add_databufs(ip, page, from, to);
+ gfs2_page_add_databufs(ip, page, pos & ~PAGE_MASK, len);
ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
if (tr->tr_num_buf_new)
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index d5f0d96169c5..86863792f36a 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -69,8 +69,8 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
void *kaddr = kmap(page);
u64 dsize = i_size_read(inode);
- if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
- dsize = dibh->b_size - sizeof(struct gfs2_dinode);
+ if (dsize > gfs2_max_stuffed_size(ip))
+ dsize = gfs2_max_stuffed_size(ip);
memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
@@ -279,14 +279,13 @@ static inline __be64 *metapointer(unsigned int height, const struct metapath *mp
return p + mp->mp_list[height];
}
-static void gfs2_metapath_ra(struct gfs2_glock *gl,
- const struct buffer_head *bh, const __be64 *pos)
+static void gfs2_metapath_ra(struct gfs2_glock *gl, __be64 *start, __be64 *end)
{
- struct buffer_head *rabh;
- const __be64 *endp = (const __be64 *)(bh->b_data + bh->b_size);
const __be64 *t;
- for (t = pos; t < endp; t++) {
+ for (t = start; t < end; t++) {
+ struct buffer_head *rabh;
+
if (!*t)
continue;
@@ -305,21 +304,22 @@ static void gfs2_metapath_ra(struct gfs2_glock *gl,
}
}
-/**
- * lookup_mp_height - helper function for lookup_metapath
- * @ip: the inode
- * @mp: the metapath
- * @h: the height which needs looking up
- */
-static int lookup_mp_height(struct gfs2_inode *ip, struct metapath *mp, int h)
+static int __fillup_metapath(struct gfs2_inode *ip, struct metapath *mp,
+ unsigned int x, unsigned int h)
{
- __be64 *ptr = metapointer(h, mp);
- u64 dblock = be64_to_cpu(*ptr);
-
- if (!dblock)
- return h + 1;
+ for (; x < h; x++) {
+ __be64 *ptr = metapointer(x, mp);
+ u64 dblock = be64_to_cpu(*ptr);
+ int ret;
- return gfs2_meta_indirect_buffer(ip, h + 1, dblock, &mp->mp_bh[h + 1]);
+ if (!dblock)
+ break;
+ ret = gfs2_meta_indirect_buffer(ip, x + 1, dblock, &mp->mp_bh[x + 1]);
+ if (ret)
+ return ret;
+ }
+ mp->mp_aheight = x + 1;
+ return 0;
}
/**
@@ -336,25 +336,12 @@ static int lookup_mp_height(struct gfs2_inode *ip, struct metapath *mp, int h)
* at which it found the unallocated block. Blocks which are found are
* added to the mp->mp_bh[] list.
*
- * Returns: error or height of metadata tree
+ * Returns: error
*/
static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
{
- unsigned int end_of_metadata = ip->i_height - 1;
- unsigned int x;
- int ret;
-
- for (x = 0; x < end_of_metadata; x++) {
- ret = lookup_mp_height(ip, mp, x);
- if (ret)
- goto out;
- }
-
- ret = ip->i_height;
-out:
- mp->mp_aheight = ret;
- return ret;
+ return __fillup_metapath(ip, mp, 0, ip->i_height - 1);
}
/**
@@ -365,25 +352,25 @@ out:
*
* Similar to lookup_metapath, but does lookups for a range of heights
*
- * Returns: error or height of metadata tree
+ * Returns: error or the number of buffers filled
*/
static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
{
- unsigned int start_h = h - 1;
+ unsigned int x = 0;
int ret;
if (h) {
/* find the first buffer we need to look up. */
- while (start_h > 0 && mp->mp_bh[start_h] == NULL)
- start_h--;
- for (; start_h < h; start_h++) {
- ret = lookup_mp_height(ip, mp, start_h);
- if (ret)
- return ret;
+ for (x = h - 1; x > 0; x--) {
+ if (mp->mp_bh[x])
+ break;
}
}
- return ip->i_height;
+ ret = __fillup_metapath(ip, mp, x, h);
+ if (ret)
+ return ret;
+ return mp->mp_aheight - x - 1;
}
static inline void release_metapath(struct metapath *mp)
@@ -474,13 +461,6 @@ enum alloc_state {
/* ALLOC_UNSTUFF = 3, TBD and rather complicated */
};
-static inline unsigned int hptrs(struct gfs2_sbd *sdp, const unsigned int hgt)
-{
- if (hgt)
- return sdp->sd_inptrs;
- return sdp->sd_diptrs;
-}
-
/**
* gfs2_bmap_alloc - Build a metadata tree of the requested height
* @inode: The GFS2 inode
@@ -788,7 +768,7 @@ int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
goto do_alloc;
ret = lookup_metapath(ip, &mp);
- if (ret < 0)
+ if (ret)
goto out_release;
if (mp.mp_aheight != ip->i_height)
@@ -913,17 +893,18 @@ int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsi
}
/**
- * gfs2_block_truncate_page - Deal with zeroing out data for truncate
+ * gfs2_block_zero_range - Deal with zeroing out data
*
* This is partly borrowed from ext3.
*/
-static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
+static int gfs2_block_zero_range(struct inode *inode, loff_t from,
+ unsigned int length)
{
- struct inode *inode = mapping->host;
+ struct address_space *mapping = inode->i_mapping;
struct gfs2_inode *ip = GFS2_I(inode);
unsigned long index = from >> PAGE_SHIFT;
unsigned offset = from & (PAGE_SIZE-1);
- unsigned blocksize, iblock, length, pos;
+ unsigned blocksize, iblock, pos;
struct buffer_head *bh;
struct page *page;
int err;
@@ -933,7 +914,6 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
return 0;
blocksize = inode->i_sb->s_blocksize;
- length = blocksize - (offset & (blocksize - 1));
iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
if (!page_has_buffers(page))
@@ -1003,11 +983,24 @@ static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize
int error;
while (oldsize != newsize) {
+ struct gfs2_trans *tr;
+ unsigned int offs;
+
chunk = oldsize - newsize;
if (chunk > max_chunk)
chunk = max_chunk;
+
+ offs = oldsize & ~PAGE_MASK;
+ if (offs && chunk > PAGE_SIZE)
+ chunk = offs + ((chunk - offs) & PAGE_MASK);
+
truncate_pagecache(inode, oldsize - chunk);
oldsize -= chunk;
+
+ tr = current->journal_info;
+ if (!test_bit(TR_TOUCHED, &tr->tr_flags))
+ continue;
+
gfs2_trans_end(sdp);
error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
if (error)
@@ -1017,13 +1010,13 @@ static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize
return 0;
}
-static int trunc_start(struct inode *inode, u64 oldsize, u64 newsize)
+static int trunc_start(struct inode *inode, u64 newsize)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
- struct address_space *mapping = inode->i_mapping;
- struct buffer_head *dibh;
+ struct buffer_head *dibh = NULL;
int journaled = gfs2_is_jdata(ip);
+ u64 oldsize = inode->i_size;
int error;
if (journaled)
@@ -1042,10 +1035,13 @@ static int trunc_start(struct inode *inode, u64 oldsize, u64 newsize)
if (gfs2_is_stuffed(ip)) {
gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
} else {
- if (newsize & (u64)(sdp->sd_sb.sb_bsize - 1)) {
- error = gfs2_block_truncate_page(mapping, newsize);
+ unsigned int blocksize = i_blocksize(inode);
+ unsigned int offs = newsize & (blocksize - 1);
+ if (offs) {
+ error = gfs2_block_zero_range(inode, newsize,
+ blocksize - offs);
if (error)
- goto out_brelse;
+ goto out;
}
ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
}
@@ -1059,15 +1055,10 @@ static int trunc_start(struct inode *inode, u64 oldsize, u64 newsize)
else
truncate_pagecache(inode, newsize);
- if (error) {
- brelse(dibh);
- return error;
- }
-
-out_brelse:
- brelse(dibh);
out:
- gfs2_trans_end(sdp);
+ brelse(dibh);
+ if (current->journal_info)
+ gfs2_trans_end(sdp);
return error;
}
@@ -1075,10 +1066,11 @@ out:
* sweep_bh_for_rgrps - find an rgrp in a meta buffer and free blocks therein
* @ip: inode
* @rg_gh: holder of resource group glock
- * @mp: current metapath fully populated with buffers
+ * @bh: buffer head to sweep
+ * @start: starting point in bh
+ * @end: end point in bh
+ * @meta: true if bh points to metadata (rather than data)
* @btotal: place to keep count of total blocks freed
- * @hgt: height we're processing
- * @first: true if this is the first call to this function for this height
*
* We sweep a metadata buffer (provided by the metapath) for blocks we need to
* free, and free them all. However, we do it one rgrp at a time. If this
@@ -1093,47 +1085,46 @@ out:
* *btotal has the total number of blocks freed
*/
static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh,
- const struct metapath *mp, u32 *btotal, int hgt,
- bool preserve1)
+ struct buffer_head *bh, __be64 *start, __be64 *end,
+ bool meta, u32 *btotal)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrpd *rgd;
struct gfs2_trans *tr;
- struct buffer_head *bh = mp->mp_bh[hgt];
- __be64 *top, *bottom, *p;
+ __be64 *p;
int blks_outside_rgrp;
u64 bn, bstart, isize_blks;
s64 blen; /* needs to be s64 or gfs2_add_inode_blocks breaks */
- int meta = ((hgt != ip->i_height - 1) ? 1 : 0);
int ret = 0;
bool buf_in_tr = false; /* buffer was added to transaction */
- if (gfs2_metatype_check(sdp, bh,
- (hgt ? GFS2_METATYPE_IN : GFS2_METATYPE_DI)))
- return -EIO;
-
more_rgrps:
+ rgd = NULL;
+ if (gfs2_holder_initialized(rd_gh)) {
+ rgd = gfs2_glock2rgrp(rd_gh->gh_gl);
+ gfs2_assert_withdraw(sdp,
+ gfs2_glock_is_locked_by_me(rd_gh->gh_gl));
+ }
blks_outside_rgrp = 0;
bstart = 0;
blen = 0;
- top = metapointer(hgt, mp); /* first ptr from metapath */
- /* If we're keeping some data at the truncation point, we've got to
- preserve the metadata tree by adding 1 to the starting metapath. */
- if (preserve1)
- top++;
- bottom = (__be64 *)(bh->b_data + bh->b_size);
-
- for (p = top; p < bottom; p++) {
+ for (p = start; p < end; p++) {
if (!*p)
continue;
bn = be64_to_cpu(*p);
- if (gfs2_holder_initialized(rd_gh)) {
- rgd = gfs2_glock2rgrp(rd_gh->gh_gl);
- gfs2_assert_withdraw(sdp,
- gfs2_glock_is_locked_by_me(rd_gh->gh_gl));
+
+ if (rgd) {
+ if (!rgrp_contains_block(rgd, bn)) {
+ blks_outside_rgrp++;
+ continue;
+ }
} else {
- rgd = gfs2_blk2rgrpd(sdp, bn, false);
+ rgd = gfs2_blk2rgrpd(sdp, bn, true);
+ if (unlikely(!rgd)) {
+ ret = -EIO;
+ goto out;
+ }
ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
0, rd_gh);
if (ret)
@@ -1145,11 +1136,6 @@ more_rgrps:
gfs2_rs_deltree(&ip->i_res);
}
- if (!rgrp_contains_block(rgd, bn)) {
- blks_outside_rgrp++;
- continue;
- }
-
/* The size of our transactions will be unknown until we
actually process all the metadata blocks that relate to
the rgrp. So we estimate. We know it can't be more than
@@ -1168,7 +1154,7 @@ more_rgrps:
jblocks_rqsted += isize_blks;
revokes = jblocks_rqsted;
if (meta)
- revokes += hptrs(sdp, hgt);
+ revokes += end - start;
else if (ip->i_depth)
revokes += sdp->sd_inptrs;
ret = gfs2_trans_begin(sdp, jblocks_rqsted, revokes);
@@ -1226,7 +1212,11 @@ out_unlock:
outside the rgrp we just processed,
do it all over again. */
if (current->journal_info) {
- struct buffer_head *dibh = mp->mp_bh[0];
+ struct buffer_head *dibh;
+
+ ret = gfs2_meta_inode_buffer(ip, &dibh);
+ if (ret)
+ goto out;
/* Every transaction boundary, we rewrite the dinode
to keep its di_blocks current in case of failure. */
@@ -1234,6 +1224,7 @@ out_unlock:
current_time(&ip->i_inode);
gfs2_trans_add_meta(ip->i_gl, dibh);
gfs2_dinode_out(ip, dibh->b_data);
+ brelse(dibh);
up_write(&ip->i_rw_mutex);
gfs2_trans_end(sdp);
}
@@ -1245,38 +1236,48 @@ out:
return ret;
}
+static bool mp_eq_to_hgt(struct metapath *mp, __u16 *list, unsigned int h)
+{
+ if (memcmp(mp->mp_list, list, h * sizeof(mp->mp_list[0])))
+ return false;
+ return true;
+}
+
/**
* find_nonnull_ptr - find a non-null pointer given a metapath and height
- * assumes the metapath is valid (with buffers) out to height h
* @mp: starting metapath
* @h: desired height to search
*
+ * Assumes the metapath is valid (with buffers) out to height h.
* Returns: true if a non-null pointer was found in the metapath buffer
* false if all remaining pointers are NULL in the buffer
*/
static bool find_nonnull_ptr(struct gfs2_sbd *sdp, struct metapath *mp,
- unsigned int h)
+ unsigned int h,
+ __u16 *end_list, unsigned int end_aligned)
{
- __be64 *ptr;
- unsigned int ptrs = hptrs(sdp, h) - 1;
+ struct buffer_head *bh = mp->mp_bh[h];
+ __be64 *first, *ptr, *end;
+
+ first = metaptr1(h, mp);
+ ptr = first + mp->mp_list[h];
+ end = (__be64 *)(bh->b_data + bh->b_size);
+ if (end_list && mp_eq_to_hgt(mp, end_list, h)) {
+ bool keep_end = h < end_aligned;
+ end = first + end_list[h] + keep_end;
+ }
- while (true) {
- ptr = metapointer(h, mp);
+ while (ptr < end) {
if (*ptr) { /* if we have a non-null pointer */
- /* Now zero the metapath after the current height. */
+ mp->mp_list[h] = ptr - first;
h++;
if (h < GFS2_MAX_META_HEIGHT)
- memset(&mp->mp_list[h], 0,
- (GFS2_MAX_META_HEIGHT - h) *
- sizeof(mp->mp_list[0]));
+ mp->mp_list[h] = 0;
return true;
}
-
- if (mp->mp_list[h] < ptrs)
- mp->mp_list[h]++;
- else
- return false; /* no more pointers in this buffer */
+ ptr++;
}
+ return false;
}
enum dealloc_states {
@@ -1286,49 +1287,126 @@ enum dealloc_states {
DEALLOC_DONE = 3, /* process complete */
};
-static bool mp_eq_to_hgt(struct metapath *mp, __u16 *nbof, unsigned int h)
+static inline void
+metapointer_range(struct metapath *mp, int height,
+ __u16 *start_list, unsigned int start_aligned,
+ __u16 *end_list, unsigned int end_aligned,
+ __be64 **start, __be64 **end)
{
- if (memcmp(mp->mp_list, nbof, h * sizeof(mp->mp_list[0])))
- return false;
- return true;
+ struct buffer_head *bh = mp->mp_bh[height];
+ __be64 *first;
+
+ first = metaptr1(height, mp);
+ *start = first;
+ if (mp_eq_to_hgt(mp, start_list, height)) {
+ bool keep_start = height < start_aligned;
+ *start = first + start_list[height] + keep_start;
+ }
+ *end = (__be64 *)(bh->b_data + bh->b_size);
+ if (end_list && mp_eq_to_hgt(mp, end_list, height)) {
+ bool keep_end = height < end_aligned;
+ *end = first + end_list[height] + keep_end;
+ }
+}
+
+static inline bool walk_done(struct gfs2_sbd *sdp,
+ struct metapath *mp, int height,
+ __u16 *end_list, unsigned int end_aligned)
+{
+ __u16 end;
+
+ if (end_list) {
+ bool keep_end = height < end_aligned;
+ if (!mp_eq_to_hgt(mp, end_list, height))
+ return false;
+ end = end_list[height] + keep_end;
+ } else
+ end = (height > 0) ? sdp->sd_inptrs : sdp->sd_diptrs;
+ return mp->mp_list[height] >= end;
}
/**
- * trunc_dealloc - truncate a file down to a desired size
+ * punch_hole - deallocate blocks in a file
* @ip: inode to truncate
- * @newsize: The desired size of the file
+ * @offset: the start of the hole
+ * @length: the size of the hole (or 0 for truncate)
+ *
+ * Punch a hole into a file or truncate a file at a given position. This
+ * function operates in whole blocks (@offset and @length are rounded
+ * accordingly); partially filled blocks must be cleared otherwise.
*
- * This function truncates a file to newsize. It works from the
- * bottom up, and from the right to the left. In other words, it strips off
- * the highest layer (data) before stripping any of the metadata. Doing it
- * this way is best in case the operation is interrupted by power failure, etc.
- * The dinode is rewritten in every transaction to guarantee integrity.
+ * This function works from the bottom up, and from the right to the left. In
+ * other words, it strips off the highest layer (data) before stripping any of
+ * the metadata. Doing it this way is best in case the operation is interrupted
+ * by power failure, etc. The dinode is rewritten in every transaction to
+ * guarantee integrity.
*/
-static int trunc_dealloc(struct gfs2_inode *ip, u64 newsize)
+static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- struct metapath mp;
+ struct metapath mp = {};
struct buffer_head *dibh, *bh;
struct gfs2_holder rd_gh;
- u64 lblock;
- __u16 nbof[GFS2_MAX_META_HEIGHT]; /* new beginning of truncation */
+ unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
+ u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift;
+ __u16 start_list[GFS2_MAX_META_HEIGHT];
+ __u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL;
+ unsigned int start_aligned, uninitialized_var(end_aligned);
unsigned int strip_h = ip->i_height - 1;
u32 btotal = 0;
int ret, state;
int mp_h; /* metapath buffers are read in to this height */
- sector_t last_ra = 0;
u64 prev_bnr = 0;
- bool preserve1; /* need to preserve the first meta pointer? */
+ __be64 *start, *end;
- if (!newsize)
- lblock = 0;
- else
- lblock = (newsize - 1) >> sdp->sd_sb.sb_bsize_shift;
+ /*
+ * The start position of the hole is defined by lblock, start_list, and
+ * start_aligned. The end position of the hole is defined by lend,
+ * end_list, and end_aligned.
+ *
+ * start_aligned and end_aligned define down to which height the start
+ * and end positions are aligned to the metadata tree (i.e., the
+ * position is a multiple of the metadata granularity at the height
+ * above). This determines at which heights additional meta pointers
+ * needs to be preserved for the remaining data.
+ */
+
+ if (length) {
+ u64 maxsize = sdp->sd_heightsize[ip->i_height];
+ u64 end_offset = offset + length;
+ u64 lend;
+
+ /*
+ * Clip the end at the maximum file size for the given height:
+ * that's how far the metadata goes; files bigger than that
+ * will have additional layers of indirection.
+ */
+ if (end_offset > maxsize)
+ end_offset = maxsize;
+ lend = end_offset >> bsize_shift;
+
+ if (lblock >= lend)
+ return 0;
+
+ find_metapath(sdp, lend, &mp, ip->i_height);
+ end_list = __end_list;
+ memcpy(end_list, mp.mp_list, sizeof(mp.mp_list));
+
+ for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
+ if (end_list[mp_h])
+ break;
+ }
+ end_aligned = mp_h;
+ }
- memset(&mp, 0, sizeof(mp));
find_metapath(sdp, lblock, &mp, ip->i_height);
+ memcpy(start_list, mp.mp_list, sizeof(start_list));
- memcpy(&nbof, &mp.mp_list, sizeof(nbof));
+ for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
+ if (start_list[mp_h])
+ break;
+ }
+ start_aligned = mp_h;
ret = gfs2_meta_inode_buffer(ip, &dibh);
if (ret)
@@ -1336,7 +1414,17 @@ static int trunc_dealloc(struct gfs2_inode *ip, u64 newsize)
mp.mp_bh[0] = dibh;
ret = lookup_metapath(ip, &mp);
- if (ret == ip->i_height)
+ if (ret)
+ goto out_metapath;
+
+ /* issue read-ahead on metadata */
+ for (mp_h = 0; mp_h < mp.mp_aheight - 1; mp_h++) {
+ metapointer_range(&mp, mp_h, start_list, start_aligned,
+ end_list, end_aligned, &start, &end);
+ gfs2_metapath_ra(ip->i_gl, start, end);
+ }
+
+ if (mp.mp_aheight == ip->i_height)
state = DEALLOC_MP_FULL; /* We have a complete metapath */
else
state = DEALLOC_FILL_MP; /* deal with partial metapath */
@@ -1357,20 +1445,6 @@ static int trunc_dealloc(struct gfs2_inode *ip, u64 newsize)
/* Truncate a full metapath at the given strip height.
* Note that strip_h == mp_h in order to be in this state. */
case DEALLOC_MP_FULL:
- if (mp_h > 0) { /* issue read-ahead on metadata */
- __be64 *top;
-
- bh = mp.mp_bh[mp_h - 1];
- if (bh->b_blocknr != last_ra) {
- last_ra = bh->b_blocknr;
- top = metaptr1(mp_h - 1, &mp);
- gfs2_metapath_ra(ip->i_gl, bh, top);
- }
- }
- /* If we're truncating to a non-zero size and the mp is
- at the beginning of file for the strip height, we
- need to preserve the first metadata pointer. */
- preserve1 = (newsize && mp_eq_to_hgt(&mp, nbof, mp_h));
bh = mp.mp_bh[mp_h];
gfs2_assert_withdraw(sdp, bh);
if (gfs2_assert_withdraw(sdp,
@@ -1382,8 +1456,28 @@ static int trunc_dealloc(struct gfs2_inode *ip, u64 newsize)
prev_bnr, ip->i_height, strip_h, mp_h);
}
prev_bnr = bh->b_blocknr;
- ret = sweep_bh_for_rgrps(ip, &rd_gh, &mp, &btotal,
- mp_h, preserve1);
+
+ if (gfs2_metatype_check(sdp, bh,
+ (mp_h ? GFS2_METATYPE_IN :
+ GFS2_METATYPE_DI))) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /*
+ * Below, passing end_aligned as 0 gives us the
+ * metapointer range excluding the end point: the end
+ * point is the first metapath we must not deallocate!
+ */
+
+ metapointer_range(&mp, mp_h, start_list, start_aligned,
+ end_list, 0 /* end_aligned */,
+ &start, &end);
+ ret = sweep_bh_for_rgrps(ip, &rd_gh, mp.mp_bh[mp_h],
+ start, end,
+ mp_h != ip->i_height - 1,
+ &btotal);
+
/* If we hit an error or just swept dinode buffer,
just exit. */
if (ret || !mp_h) {
@@ -1407,20 +1501,20 @@ static int trunc_dealloc(struct gfs2_inode *ip, u64 newsize)
stripping the previous level of metadata. */
if (mp_h == 0) {
strip_h--;
- memcpy(&mp.mp_list, &nbof, sizeof(nbof));
+ memcpy(mp.mp_list, start_list, sizeof(start_list));
mp_h = strip_h;
state = DEALLOC_FILL_MP;
break;
}
mp.mp_list[mp_h] = 0;
mp_h--; /* search one metadata height down */
- if (mp.mp_list[mp_h] >= hptrs(sdp, mp_h) - 1)
- break; /* loop around in the same state */
mp.mp_list[mp_h]++;
+ if (walk_done(sdp, &mp, mp_h, end_list, end_aligned))
+ break;
/* Here we've found a part of the metapath that is not
* allocated. We need to search at that height for the
* next non-null pointer. */
- if (find_nonnull_ptr(sdp, &mp, mp_h)) {
+ if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned)) {
state = DEALLOC_FILL_MP;
mp_h++;
}
@@ -1435,18 +1529,29 @@ static int trunc_dealloc(struct gfs2_inode *ip, u64 newsize)
if (ret < 0)
goto out;
+ /* issue read-ahead on metadata */
+ if (mp.mp_aheight > 1) {
+ for (; ret > 1; ret--) {
+ metapointer_range(&mp, mp.mp_aheight - ret,
+ start_list, start_aligned,
+ end_list, end_aligned,
+ &start, &end);
+ gfs2_metapath_ra(ip->i_gl, start, end);
+ }
+ }
+
/* If buffers found for the entire strip height */
- if ((ret == ip->i_height) && (mp_h == strip_h)) {
+ if (mp.mp_aheight - 1 == strip_h) {
state = DEALLOC_MP_FULL;
break;
}
- if (ret < ip->i_height) /* We have a partial height */
- mp_h = ret - 1;
+ if (mp.mp_aheight < ip->i_height) /* We have a partial height */
+ mp_h = mp.mp_aheight - 1;
/* If we find a non-null block pointer, crawl a bit
higher up in the metapath and try again, otherwise
we need to look lower for a new starting point. */
- if (find_nonnull_ptr(sdp, &mp, mp_h))
+ if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned))
mp_h++;
else
state = DEALLOC_MP_LOWER;
@@ -1524,7 +1629,6 @@ out:
/**
* do_shrink - make a file smaller
* @inode: the inode
- * @oldsize: the current inode size
* @newsize: the size to make the file
*
* Called with an exclusive lock on @inode. The @size must
@@ -1533,18 +1637,18 @@ out:
* Returns: errno
*/
-static int do_shrink(struct inode *inode, u64 oldsize, u64 newsize)
+static int do_shrink(struct inode *inode, u64 newsize)
{
struct gfs2_inode *ip = GFS2_I(inode);
int error;
- error = trunc_start(inode, oldsize, newsize);
+ error = trunc_start(inode, newsize);
if (error < 0)
return error;
if (gfs2_is_stuffed(ip))
return 0;
- error = trunc_dealloc(ip, newsize);
+ error = punch_hole(ip, newsize, 0);
if (error == 0)
error = trunc_end(ip);
@@ -1553,10 +1657,9 @@ static int do_shrink(struct inode *inode, u64 oldsize, u64 newsize)
void gfs2_trim_blocks(struct inode *inode)
{
- u64 size = inode->i_size;
int ret;
- ret = do_shrink(inode, size, size);
+ ret = do_shrink(inode, inode->i_size);
WARN_ON(ret != 0);
}
@@ -1589,8 +1692,7 @@ static int do_grow(struct inode *inode, u64 size)
int error;
int unstuff = 0;
- if (gfs2_is_stuffed(ip) &&
- (size > (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)))) {
+ if (gfs2_is_stuffed(ip) && size > gfs2_max_stuffed_size(ip)) {
error = gfs2_quota_lock_check(ip, &ap);
if (error)
return error;
@@ -1650,7 +1752,6 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize)
{
struct gfs2_inode *ip = GFS2_I(inode);
int ret;
- u64 oldsize;
BUG_ON(!S_ISREG(inode->i_mode));
@@ -1664,13 +1765,12 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize)
if (ret)
goto out;
- oldsize = inode->i_size;
- if (newsize >= oldsize) {
+ if (newsize >= inode->i_size) {
ret = do_grow(inode, newsize);
goto out;
}
- ret = do_shrink(inode, oldsize, newsize);
+ ret = do_shrink(inode, newsize);
out:
gfs2_rsqa_delete(ip, NULL);
return ret;
@@ -1679,7 +1779,7 @@ out:
int gfs2_truncatei_resume(struct gfs2_inode *ip)
{
int error;
- error = trunc_dealloc(ip, i_size_read(&ip->i_inode));
+ error = punch_hole(ip, i_size_read(&ip->i_inode), 0);
if (!error)
error = trunc_end(ip);
return error;
@@ -1687,7 +1787,7 @@ int gfs2_truncatei_resume(struct gfs2_inode *ip)
int gfs2_file_dealloc(struct gfs2_inode *ip)
{
- return trunc_dealloc(ip, 0);
+ return punch_hole(ip, 0, 0);
}
/**
@@ -1827,8 +1927,7 @@ int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
return 0;
if (gfs2_is_stuffed(ip)) {
- if (offset + len >
- sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode))
+ if (offset + len > gfs2_max_stuffed_size(ip))
return 1;
return 0;
}
@@ -1855,3 +1954,123 @@ int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
return 0;
}
+static int stuffed_zero_range(struct inode *inode, loff_t offset, loff_t length)
+{
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct buffer_head *dibh;
+ int error;
+
+ if (offset >= inode->i_size)
+ return 0;
+ if (offset + length > inode->i_size)
+ length = inode->i_size - offset;
+
+ error = gfs2_meta_inode_buffer(ip, &dibh);
+ if (error)
+ return error;
+ gfs2_trans_add_meta(ip->i_gl, dibh);
+ memset(dibh->b_data + sizeof(struct gfs2_dinode) + offset, 0,
+ length);
+ brelse(dibh);
+ return 0;
+}
+
+static int gfs2_journaled_truncate_range(struct inode *inode, loff_t offset,
+ loff_t length)
+{
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+ loff_t max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
+ int error;
+
+ while (length) {
+ struct gfs2_trans *tr;
+ loff_t chunk;
+ unsigned int offs;
+
+ chunk = length;
+ if (chunk > max_chunk)
+ chunk = max_chunk;
+
+ offs = offset & ~PAGE_MASK;
+ if (offs && chunk > PAGE_SIZE)
+ chunk = offs + ((chunk - offs) & PAGE_MASK);
+
+ truncate_pagecache_range(inode, offset, chunk);
+ offset += chunk;
+ length -= chunk;
+
+ tr = current->journal_info;
+ if (!test_bit(TR_TOUCHED, &tr->tr_flags))
+ continue;
+
+ gfs2_trans_end(sdp);
+ error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
+ if (error)
+ return error;
+ }
+ return 0;
+}
+
+int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length)
+{
+ struct inode *inode = file_inode(file);
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+ int error;
+
+ if (gfs2_is_jdata(ip))
+ error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA,
+ GFS2_JTRUNC_REVOKES);
+ else
+ error = gfs2_trans_begin(sdp, RES_DINODE, 0);
+ if (error)
+ return error;
+
+ if (gfs2_is_stuffed(ip)) {
+ error = stuffed_zero_range(inode, offset, length);
+ if (error)
+ goto out;
+ } else {
+ unsigned int start_off, end_off, blocksize;
+
+ blocksize = i_blocksize(inode);
+ start_off = offset & (blocksize - 1);
+ end_off = (offset + length) & (blocksize - 1);
+ if (start_off) {
+ unsigned int len = length;
+ if (length > blocksize - start_off)
+ len = blocksize - start_off;
+ error = gfs2_block_zero_range(inode, offset, len);
+ if (error)
+ goto out;
+ if (start_off + length < blocksize)
+ end_off = 0;
+ }
+ if (end_off) {
+ error = gfs2_block_zero_range(inode,
+ offset + length - end_off, end_off);
+ if (error)
+ goto out;
+ }
+ }
+
+ if (gfs2_is_jdata(ip)) {
+ BUG_ON(!current->journal_info);
+ gfs2_journaled_truncate_range(inode, offset, length);
+ } else
+ truncate_pagecache_range(inode, offset, offset + length - 1);
+
+ file_update_time(file);
+ mark_inode_dirty(inode);
+
+ if (current->journal_info)
+ gfs2_trans_end(sdp);
+
+ if (!gfs2_is_stuffed(ip))
+ error = punch_hole(ip, offset, length);
+
+out:
+ if (current->journal_info)
+ gfs2_trans_end(sdp);
+ return error;
+}
diff --git a/fs/gfs2/bmap.h b/fs/gfs2/bmap.h
index 443cc182cf18..c3402fe00653 100644
--- a/fs/gfs2/bmap.h
+++ b/fs/gfs2/bmap.h
@@ -61,5 +61,6 @@ extern int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
unsigned int len);
extern int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd);
extern void gfs2_free_journal_extents(struct gfs2_jdesc *jd);
+extern int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length);
#endif /* __BMAP_DOT_H__ */
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 06a0d1947c77..7c21aea0266b 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -170,8 +170,7 @@ static int gfs2_dir_write_data(struct gfs2_inode *ip, const char *buf,
if (!size)
return 0;
- if (gfs2_is_stuffed(ip) &&
- offset + size <= sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode))
+ if (gfs2_is_stuffed(ip) && offset + size <= gfs2_max_stuffed_size(ip))
return gfs2_dir_write_stuffed(ip, buf, (unsigned int)offset,
size);
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 58705ef8643a..4f88e201b3f0 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -246,7 +246,9 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
}
if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
if (new_flags & GFS2_DIF_JDATA)
- gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH);
+ gfs2_log_flush(sdp, ip->i_gl,
+ GFS2_LOG_HEAD_FLUSH_NORMAL |
+ GFS2_LFC_SET_FLAGS);
error = filemap_fdatawrite(inode->i_mapping);
if (error)
goto out;
@@ -924,7 +926,7 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t le
struct gfs2_holder gh;
int ret;
- if (mode & ~FALLOC_FL_KEEP_SIZE)
+ if (mode & ~(FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE))
return -EOPNOTSUPP;
/* fallocate is needed by gfs2_grow to reserve space in the rindex */
if (gfs2_is_jdata(ip) && inode != sdp->sd_rindex)
@@ -948,13 +950,18 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t le
if (ret)
goto out_unlock;
- ret = gfs2_rsqa_alloc(ip);
- if (ret)
- goto out_putw;
+ if (mode & FALLOC_FL_PUNCH_HOLE) {
+ ret = __gfs2_punch_hole(file, offset, len);
+ } else {
+ ret = gfs2_rsqa_alloc(ip);
+ if (ret)
+ goto out_putw;
- ret = __gfs2_fallocate(file, mode, offset, len);
- if (ret)
- gfs2_rs_deltree(&ip->i_res);
+ ret = __gfs2_fallocate(file, mode, offset, len);
+
+ if (ret)
+ gfs2_rs_deltree(&ip->i_res);
+ }
out_putw:
put_write_access(inode);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index cdd1c5f06f45..d8782a7a1e7d 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -107,7 +107,8 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
__gfs2_ail_flush(gl, 0, tr.tr_revokes);
gfs2_trans_end(sdp);
- gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
+ gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
+ GFS2_LFC_AIL_EMPTY_GL);
}
void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
@@ -128,7 +129,8 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
return;
__gfs2_ail_flush(gl, fsync, max_revokes);
gfs2_trans_end(sdp);
- gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
+ gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
+ GFS2_LFC_AIL_FLUSH);
}
/**
@@ -157,7 +159,8 @@ static void rgrp_go_sync(struct gfs2_glock *gl)
return;
GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
- gfs2_log_flush(sdp, gl, NORMAL_FLUSH);
+ gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
+ GFS2_LFC_RGRP_GO_SYNC);
filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
mapping_set_error(mapping, error);
@@ -252,7 +255,8 @@ static void inode_go_sync(struct gfs2_glock *gl)
GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
- gfs2_log_flush(gl->gl_name.ln_sbd, gl, NORMAL_FLUSH);
+ gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
+ GFS2_LFC_INODE_GO_SYNC);
filemap_fdatawrite(metamapping);
if (isreg) {
struct address_space *mapping = ip->i_inode.i_mapping;
@@ -303,7 +307,9 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
}
if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
- gfs2_log_flush(gl->gl_name.ln_sbd, NULL, NORMAL_FLUSH);
+ gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
+ GFS2_LOG_HEAD_FLUSH_NORMAL |
+ GFS2_LFC_INODE_GO_INVAL);
gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
}
if (ip && S_ISREG(ip->i_inode.i_mode))
@@ -495,7 +501,8 @@ static void freeze_go_sync(struct gfs2_glock *gl)
gfs2_assert_withdraw(sdp, 0);
}
queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
- gfs2_log_flush(sdp, NULL, FREEZE_FLUSH);
+ gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
+ GFS2_LFC_FREEZE_GO_SYNC);
}
}
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 6e18e9793ec4..e0557b8a590a 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -44,7 +44,6 @@ struct gfs2_log_header_host {
u32 lh_flags; /* GFS2_LOG_HEAD_... */
u32 lh_tail; /* Block number of log tail */
u32 lh_blkno;
- u32 lh_hash;
};
/*
@@ -861,5 +860,10 @@ static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which)
extern struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl);
+static inline unsigned gfs2_max_stuffed_size(const struct gfs2_inode *ip)
+{
+ return GFS2_SB(&ip->i_inode)->sd_sb.sb_bsize - sizeof(struct gfs2_dinode);
+}
+
#endif /* __INCORE_DOT_H__ */
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 4e971b1c7f92..59e0560180ec 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -1152,12 +1152,11 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
error = gfs2_trans_begin(sdp, 2*RES_DINODE + 3*RES_LEAF + RES_RG_BIT, 0);
if (error)
- goto out_end_trans;
+ goto out_gunlock;
error = gfs2_unlink_inode(dip, dentry);
-
-out_end_trans:
gfs2_trans_end(sdp);
+
out_gunlock:
gfs2_glock_dq(ghs + 2);
out_rgrp:
@@ -1184,11 +1183,10 @@ out_inodes:
static int gfs2_symlink(struct inode *dir, struct dentry *dentry,
const char *symname)
{
- struct gfs2_sbd *sdp = GFS2_SB(dir);
unsigned int size;
size = strlen(symname);
- if (size > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode) - 1)
+ if (size >= gfs2_max_stuffed_size(GFS2_I(dir)))
return -ENAMETOOLONG;
return gfs2_create_inode(dir, dentry, NULL, S_IFLNK | S_IRWXUGO, 0, symname, size, 0, NULL);
@@ -1205,8 +1203,7 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry,
static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
- struct gfs2_sbd *sdp = GFS2_SB(dir);
- unsigned dsize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode);
+ unsigned dsize = gfs2_max_stuffed_size(GFS2_I(dir));
return gfs2_create_inode(dir, dentry, NULL, S_IFDIR | mode, 0, NULL, dsize, 0, NULL);
}
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 65f33a0ac190..006c6164f759 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -1091,7 +1091,7 @@ static void gdlm_recover_slot(void *arg, struct dlm_slot *slot)
spin_lock(&ls->ls_recover_spin);
if (ls->ls_recover_size < jid + 1) {
- fs_err(sdp, "recover_slot jid %d gen %u short size %d",
+ fs_err(sdp, "recover_slot jid %d gen %u short size %d\n",
jid, ls->ls_recover_block, ls->ls_recover_size);
spin_unlock(&ls->ls_recover_spin);
return;
@@ -1153,7 +1153,7 @@ static void gdlm_recovery_result(struct gfs2_sbd *sdp, unsigned int jid,
return;
}
if (ls->ls_recover_size < jid + 1) {
- fs_err(sdp, "recovery_result jid %d short size %d",
+ fs_err(sdp, "recovery_result jid %d short size %d\n",
jid, ls->ls_recover_size);
spin_unlock(&ls->ls_recover_spin);
return;
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index f72c44231406..cf6b46247df4 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -14,6 +14,7 @@
#include <linux/buffer_head.h>
#include <linux/gfs2_ondisk.h>
#include <linux/crc32.h>
+#include <linux/crc32c.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
@@ -538,9 +539,12 @@ static void gfs2_ordered_write(struct gfs2_sbd *sdp)
list_sort(NULL, &sdp->sd_log_le_ordered, &ip_cmp);
while (!list_empty(&sdp->sd_log_le_ordered)) {
ip = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_inode, i_ordered);
- list_move(&ip->i_ordered, &written);
- if (ip->i_inode.i_mapping->nrpages == 0)
+ if (ip->i_inode.i_mapping->nrpages == 0) {
+ test_and_clear_bit(GIF_ORDERED, &ip->i_flags);
+ list_del(&ip->i_ordered);
continue;
+ }
+ list_move(&ip->i_ordered, &written);
spin_unlock(&sdp->sd_ordered_lock);
filemap_fdatawrite(ip->i_inode.i_mapping);
spin_lock(&sdp->sd_ordered_lock);
@@ -648,49 +652,102 @@ out_of_blocks:
}
/**
- * log_write_header - Get and initialize a journal header buffer
+ * write_log_header - Write a journal log header buffer at sd_log_flush_head
* @sdp: The GFS2 superblock
+ * @jd: journal descriptor of the journal to which we are writing
+ * @seq: sequence number
+ * @tail: tail of the log
+ * @flags: log header flags GFS2_LOG_HEAD_*
+ * @op_flags: flags to pass to the bio
*
* Returns: the initialized log buffer descriptor
*/
-static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
+void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
+ u64 seq, u32 tail, u32 flags, int op_flags)
{
struct gfs2_log_header *lh;
- unsigned int tail;
- u32 hash;
- int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
+ u32 hash, crc;
struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
- enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
+ struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
+ struct timespec64 tv;
+ struct super_block *sb = sdp->sd_vfs;
+ u64 addr;
+
lh = page_address(page);
clear_page(lh);
- gfs2_assert_withdraw(sdp, (state != SFS_FROZEN));
-
- tail = current_tail(sdp);
-
lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
lh->lh_header.__pad0 = cpu_to_be64(0);
lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
- lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++);
+ lh->lh_sequence = cpu_to_be64(seq);
lh->lh_flags = cpu_to_be32(flags);
lh->lh_tail = cpu_to_be32(tail);
lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head);
- hash = gfs2_disk_hash(page_address(page), sizeof(struct gfs2_log_header));
+ hash = ~crc32(~0, lh, LH_V1_SIZE);
lh->lh_hash = cpu_to_be32(hash);
+ tv = current_kernel_time64();
+ lh->lh_nsec = cpu_to_be32(tv.tv_nsec);
+ lh->lh_sec = cpu_to_be64(tv.tv_sec);
+ addr = gfs2_log_bmap(sdp);
+ lh->lh_addr = cpu_to_be64(addr);
+ lh->lh_jinode = cpu_to_be64(GFS2_I(jd->jd_inode)->i_no_addr);
+
+ /* We may only write local statfs, quota, etc., when writing to our
+ own journal. The values are left 0 when recovering a journal
+ different from our own. */
+ if (!(flags & GFS2_LOG_HEAD_RECOVERY)) {
+ lh->lh_statfs_addr =
+ cpu_to_be64(GFS2_I(sdp->sd_sc_inode)->i_no_addr);
+ lh->lh_quota_addr =
+ cpu_to_be64(GFS2_I(sdp->sd_qc_inode)->i_no_addr);
+
+ spin_lock(&sdp->sd_statfs_spin);
+ lh->lh_local_total = cpu_to_be64(l_sc->sc_total);
+ lh->lh_local_free = cpu_to_be64(l_sc->sc_free);
+ lh->lh_local_dinodes = cpu_to_be64(l_sc->sc_dinodes);
+ spin_unlock(&sdp->sd_statfs_spin);
+ }
+
+ BUILD_BUG_ON(offsetof(struct gfs2_log_header, lh_crc) != LH_V1_SIZE);
+
+ crc = crc32c(~0, (void *)lh + LH_V1_SIZE + 4,
+ sb->s_blocksize - LH_V1_SIZE - 4);
+ lh->lh_crc = cpu_to_be32(crc);
+
+ gfs2_log_write(sdp, page, sb->s_blocksize, 0, addr);
+ gfs2_log_flush_bio(sdp, REQ_OP_WRITE, op_flags);
+ log_flush_wait(sdp);
+}
+
+/**
+ * log_write_header - Get and initialize a journal header buffer
+ * @sdp: The GFS2 superblock
+ * @flags: The log header flags, including log header origin
+ *
+ * Returns: the initialized log buffer descriptor
+ */
+
+static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
+{
+ unsigned int tail;
+ int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC;
+ enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
+
+ gfs2_assert_withdraw(sdp, (state != SFS_FROZEN));
+ tail = current_tail(sdp);
+
if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
gfs2_ordered_wait(sdp);
log_flush_wait(sdp);
op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
}
-
sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
- gfs2_log_write_page(sdp, page);
- gfs2_log_flush_bio(sdp, REQ_OP_WRITE, op_flags);
- log_flush_wait(sdp);
+ gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++, tail,
+ flags, op_flags);
if (sdp->sd_log_tail != tail)
log_pull_tail(sdp, tail);
@@ -700,11 +757,11 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
* gfs2_log_flush - flush incore transaction(s)
* @sdp: the filesystem
* @gl: The glock structure to flush. If NULL, flush the whole incore log
+ * @flags: The log header flags: GFS2_LOG_HEAD_FLUSH_* and debug flags
*
*/
-void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
- enum gfs2_flush_type type)
+void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
{
struct gfs2_trans *tr;
enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
@@ -716,9 +773,9 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
up_write(&sdp->sd_log_flush_lock);
return;
}
- trace_gfs2_log_flush(sdp, 1);
+ trace_gfs2_log_flush(sdp, 1, flags);
- if (type == SHUTDOWN_FLUSH)
+ if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN)
clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
sdp->sd_log_flush_head = sdp->sd_log_head;
@@ -743,11 +800,11 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
if (sdp->sd_log_head != sdp->sd_log_flush_head) {
log_flush_wait(sdp);
- log_write_header(sdp, 0);
+ log_write_header(sdp, flags);
} else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
trace_gfs2_log_blocks(sdp, -1);
- log_write_header(sdp, 0);
+ log_write_header(sdp, flags);
}
lops_after_commit(sdp, tr);
@@ -764,7 +821,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
spin_unlock(&sdp->sd_ail_lock);
gfs2_log_unlock(sdp);
- if (type != NORMAL_FLUSH) {
+ if (!(flags & GFS2_LOG_HEAD_FLUSH_NORMAL)) {
if (!sdp->sd_log_idle) {
for (;;) {
gfs2_ail1_start(sdp);
@@ -774,16 +831,17 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
}
atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
trace_gfs2_log_blocks(sdp, -1);
- log_write_header(sdp, 0);
+ log_write_header(sdp, flags);
sdp->sd_log_head = sdp->sd_log_flush_head;
}
- if (type == SHUTDOWN_FLUSH || type == FREEZE_FLUSH)
+ if (flags & (GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
+ GFS2_LOG_HEAD_FLUSH_FREEZE))
gfs2_log_shutdown(sdp);
- if (type == FREEZE_FLUSH)
+ if (flags & GFS2_LOG_HEAD_FLUSH_FREEZE)
atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
}
- trace_gfs2_log_flush(sdp, 0);
+ trace_gfs2_log_flush(sdp, 0, flags);
up_write(&sdp->sd_log_flush_lock);
kfree(tr);
@@ -879,7 +937,7 @@ void gfs2_log_shutdown(struct gfs2_sbd *sdp)
sdp->sd_log_flush_head = sdp->sd_log_head;
- log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT);
+ log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT | GFS2_LFC_SHUTDOWN);
gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
@@ -935,7 +993,8 @@ int gfs2_logd(void *data)
did_flush = false;
if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
gfs2_ail1_empty(sdp);
- gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
+ gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
+ GFS2_LFC_LOGD_JFLUSH_REQD);
did_flush = true;
}
@@ -943,7 +1002,8 @@ int gfs2_logd(void *data)
gfs2_ail1_start(sdp);
gfs2_ail1_wait(sdp);
gfs2_ail1_empty(sdp);
- gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
+ gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
+ GFS2_LFC_LOGD_AIL_FLUSH_REQD);
did_flush = true;
}
diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h
index 9499a6049212..93b52ac1ca1f 100644
--- a/fs/gfs2/log.h
+++ b/fs/gfs2/log.h
@@ -65,14 +65,10 @@ extern unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
extern void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks);
extern int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks);
-enum gfs2_flush_type {
- NORMAL_FLUSH = 0,
- SYNC_FLUSH,
- SHUTDOWN_FLUSH,
- FREEZE_FLUSH
-};
+extern void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
+ u64 seq, u32 tail, u32 flags, int op_flags);
extern void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
- enum gfs2_flush_type type);
+ u32 type);
extern void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans);
extern void gfs2_remove_from_ail(struct gfs2_bufdata *bd);
extern void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index c8ff7b7954f0..4d6567990baf 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -18,6 +18,7 @@
#include <linux/fs.h>
#include <linux/list_sort.h>
+#include "dir.h"
#include "gfs2.h"
#include "incore.h"
#include "inode.h"
@@ -138,7 +139,7 @@ static void gfs2_log_incr_head(struct gfs2_sbd *sdp)
sdp->sd_log_flush_head = 0;
}
-static u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
+u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
{
unsigned int lbn = sdp->sd_log_flush_head;
struct gfs2_journal_extent *je;
@@ -161,7 +162,7 @@ static u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
* @bvec: The bio_vec
* @error: The i/o status
*
- * This finds the relavent buffers and unlocks then and sets the
+ * This finds the relevant buffers and unlocks them and sets the
* error flag according to the status of the i/o request. This is
* used when the log is writing data which has an in-place version
* that is pinned in the pagecache.
@@ -306,23 +307,22 @@ static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno)
return gfs2_log_alloc_bio(sdp, blkno);
}
-
/**
* gfs2_log_write - write to log
* @sdp: the filesystem
* @page: the page to write
* @size: the size of the data to write
* @offset: the offset within the page
+ * @blkno: block number of the log entry
*
* Try and add the page segment to the current bio. If that fails,
* submit the current bio to the device and create a new one, and
* then add the page segment to that.
*/
-static void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
- unsigned size, unsigned offset)
+void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
+ unsigned size, unsigned offset, u64 blkno)
{
- u64 blkno = gfs2_log_bmap(sdp);
struct bio *bio;
int ret;
@@ -348,7 +348,8 @@ static void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
{
- gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh));
+ gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh),
+ gfs2_log_bmap(sdp));
}
/**
@@ -365,7 +366,8 @@ static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
{
struct super_block *sb = sdp->sd_vfs;
- gfs2_log_write(sdp, page, sb->s_blocksize, 0);
+ gfs2_log_write(sdp, page, sb->s_blocksize, 0,
+ gfs2_log_bmap(sdp));
}
static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h
index e529f536c117..e4949394f054 100644
--- a/fs/gfs2/lops.h
+++ b/fs/gfs2/lops.h
@@ -26,6 +26,9 @@ extern const struct gfs2_log_operations gfs2_revoke_lops;
extern const struct gfs2_log_operations gfs2_databuf_lops;
extern const struct gfs2_log_operations *gfs2_log_ops[];
+extern u64 gfs2_log_bmap(struct gfs2_sbd *sdp);
+extern void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
+ unsigned size, unsigned offset, u64 blkno);
extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page);
extern void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int op, int op_flags);
extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 0a89e6f7a314..2d55e2c3333c 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -93,7 +93,7 @@ static int __init init_gfs2_fs(void)
error = gfs2_glock_init();
if (error)
- goto fail;
+ goto fail_glock;
error = -ENOMEM;
gfs2_glock_cachep = kmem_cache_create("gfs2_glock",
@@ -101,7 +101,7 @@ static int __init init_gfs2_fs(void)
0, 0,
gfs2_init_glock_once);
if (!gfs2_glock_cachep)
- goto fail;
+ goto fail_cachep1;
gfs2_glock_aspace_cachep = kmem_cache_create("gfs2_glock(aspace)",
sizeof(struct gfs2_glock) +
@@ -109,7 +109,7 @@ static int __init init_gfs2_fs(void)
0, 0, gfs2_init_gl_aspace_once);
if (!gfs2_glock_aspace_cachep)
- goto fail;
+ goto fail_cachep2;
gfs2_inode_cachep = kmem_cache_create("gfs2_inode",
sizeof(struct gfs2_inode),
@@ -118,107 +118,105 @@ static int __init init_gfs2_fs(void)
SLAB_ACCOUNT,
gfs2_init_inode_once);
if (!gfs2_inode_cachep)
- goto fail;
+ goto fail_cachep3;
gfs2_bufdata_cachep = kmem_cache_create("gfs2_bufdata",
sizeof(struct gfs2_bufdata),
0, 0, NULL);
if (!gfs2_bufdata_cachep)
- goto fail;
+ goto fail_cachep4;
gfs2_rgrpd_cachep = kmem_cache_create("gfs2_rgrpd",
sizeof(struct gfs2_rgrpd),
0, 0, NULL);
if (!gfs2_rgrpd_cachep)
- goto fail;
+ goto fail_cachep5;
gfs2_quotad_cachep = kmem_cache_create("gfs2_quotad",
sizeof(struct gfs2_quota_data),
0, 0, NULL);
if (!gfs2_quotad_cachep)
- goto fail;
+ goto fail_cachep6;
gfs2_qadata_cachep = kmem_cache_create("gfs2_qadata",
sizeof(struct gfs2_qadata),
0, 0, NULL);
if (!gfs2_qadata_cachep)
- goto fail;
+ goto fail_cachep7;
error = register_shrinker(&gfs2_qd_shrinker);
if (error)
- goto fail;
+ goto fail_shrinker;
error = register_filesystem(&gfs2_fs_type);
if (error)
- goto fail;
+ goto fail_fs1;
error = register_filesystem(&gfs2meta_fs_type);
if (error)
- goto fail_unregister;
+ goto fail_fs2;
error = -ENOMEM;
gfs_recovery_wq = alloc_workqueue("gfs_recovery",
WQ_MEM_RECLAIM | WQ_FREEZABLE, 0);
if (!gfs_recovery_wq)
- goto fail_wq;
+ goto fail_wq1;
gfs2_control_wq = alloc_workqueue("gfs2_control",
WQ_UNBOUND | WQ_FREEZABLE, 0);
if (!gfs2_control_wq)
- goto fail_recovery;
+ goto fail_wq2;
gfs2_freeze_wq = alloc_workqueue("freeze_workqueue", 0, 0);
if (!gfs2_freeze_wq)
- goto fail_control;
+ goto fail_wq3;
gfs2_page_pool = mempool_create_page_pool(64, 0);
if (!gfs2_page_pool)
- goto fail_freeze;
+ goto fail_mempool;
- gfs2_register_debugfs();
+ error = gfs2_register_debugfs();
+ if (error)
+ goto fail_debugfs;
pr_info("GFS2 installed\n");
return 0;
-fail_freeze:
+fail_debugfs:
+ mempool_destroy(gfs2_page_pool);
+fail_mempool:
destroy_workqueue(gfs2_freeze_wq);
-fail_control:
+fail_wq3:
destroy_workqueue(gfs2_control_wq);
-fail_recovery:
+fail_wq2:
destroy_workqueue(gfs_recovery_wq);
-fail_wq:
+fail_wq1:
unregister_filesystem(&gfs2meta_fs_type);
-fail_unregister:
+fail_fs2:
unregister_filesystem(&gfs2_fs_type);
-fail:
- list_lru_destroy(&gfs2_qd_lru);
-fail_lru:
+fail_fs1:
unregister_shrinker(&gfs2_qd_shrinker);
+fail_shrinker:
+ kmem_cache_destroy(gfs2_qadata_cachep);
+fail_cachep7:
+ kmem_cache_destroy(gfs2_quotad_cachep);
+fail_cachep6:
+ kmem_cache_destroy(gfs2_rgrpd_cachep);
+fail_cachep5:
+ kmem_cache_destroy(gfs2_bufdata_cachep);
+fail_cachep4:
+ kmem_cache_destroy(gfs2_inode_cachep);
+fail_cachep3:
+ kmem_cache_destroy(gfs2_glock_aspace_cachep);
+fail_cachep2:
+ kmem_cache_destroy(gfs2_glock_cachep);
+fail_cachep1:
gfs2_glock_exit();
-
- if (gfs2_qadata_cachep)
- kmem_cache_destroy(gfs2_qadata_cachep);
-
- if (gfs2_quotad_cachep)
- kmem_cache_destroy(gfs2_quotad_cachep);
-
- if (gfs2_rgrpd_cachep)
- kmem_cache_destroy(gfs2_rgrpd_cachep);
-
- if (gfs2_bufdata_cachep)
- kmem_cache_destroy(gfs2_bufdata_cachep);
-
- if (gfs2_inode_cachep)
- kmem_cache_destroy(gfs2_inode_cachep);
-
- if (gfs2_glock_aspace_cachep)
- kmem_cache_destroy(gfs2_glock_aspace_cachep);
-
- if (gfs2_glock_cachep)
- kmem_cache_destroy(gfs2_glock_cachep);
-
+fail_glock:
+ list_lru_destroy(&gfs2_qd_lru);
+fail_lru:
gfs2_sys_uninit();
return error;
}
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index ad55eb86a250..e6a0a8a89ea7 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -1382,7 +1382,7 @@ static void gfs2_kill_sb(struct super_block *sb)
return;
}
- gfs2_log_flush(sdp, NULL, SYNC_FLUSH);
+ gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SYNC | GFS2_LFC_KILL_SB);
dput(sdp->sd_root_dir);
dput(sdp->sd_master_dir);
sdp->sd_root_dir = NULL;
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index e700fb162664..7a98abd340ee 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -955,7 +955,8 @@ out:
gfs2_glock_dq_uninit(&ghs[qx]);
inode_unlock(&ip->i_inode);
kfree(ghs);
- gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl, NORMAL_FLUSH);
+ gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl,
+ GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC);
return error;
}
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 9395a3db1a60..b6b258998bcd 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -14,12 +14,14 @@
#include <linux/buffer_head.h>
#include <linux/gfs2_ondisk.h>
#include <linux/crc32.h>
+#include <linux/crc32c.h>
#include "gfs2.h"
#include "incore.h"
#include "bmap.h"
#include "glock.h"
#include "glops.h"
+#include "log.h"
#include "lops.h"
#include "meta_io.h"
#include "recovery.h"
@@ -117,22 +119,6 @@ void gfs2_revoke_clean(struct gfs2_jdesc *jd)
}
}
-static int gfs2_log_header_in(struct gfs2_log_header_host *lh, const void *buf)
-{
- const struct gfs2_log_header *str = buf;
-
- if (str->lh_header.mh_magic != cpu_to_be32(GFS2_MAGIC) ||
- str->lh_header.mh_type != cpu_to_be32(GFS2_METATYPE_LH))
- return 1;
-
- lh->lh_sequence = be64_to_cpu(str->lh_sequence);
- lh->lh_flags = be32_to_cpu(str->lh_flags);
- lh->lh_tail = be32_to_cpu(str->lh_tail);
- lh->lh_blkno = be32_to_cpu(str->lh_blkno);
- lh->lh_hash = be32_to_cpu(str->lh_hash);
- return 0;
-}
-
/**
* get_log_header - read the log header for a given segment
* @jd: the journal
@@ -150,29 +136,37 @@ static int gfs2_log_header_in(struct gfs2_log_header_host *lh, const void *buf)
static int get_log_header(struct gfs2_jdesc *jd, unsigned int blk,
struct gfs2_log_header_host *head)
{
+ struct gfs2_log_header *lh;
struct buffer_head *bh;
- struct gfs2_log_header_host uninitialized_var(lh);
- const u32 nothing = 0;
- u32 hash;
+ u32 hash, crc;
int error;
error = gfs2_replay_read_block(jd, blk, &bh);
if (error)
return error;
+ lh = (void *)bh->b_data;
- hash = crc32_le((u32)~0, bh->b_data, sizeof(struct gfs2_log_header) -
- sizeof(u32));
- hash = crc32_le(hash, (unsigned char const *)&nothing, sizeof(nothing));
- hash ^= (u32)~0;
- error = gfs2_log_header_in(&lh, bh->b_data);
- brelse(bh);
+ hash = crc32(~0, lh, LH_V1_SIZE - 4);
+ hash = ~crc32_le_shift(hash, 4); /* assume lh_hash is zero */
- if (error || lh.lh_blkno != blk || lh.lh_hash != hash)
- return 1;
+ crc = crc32c(~0, (void *)lh + LH_V1_SIZE + 4,
+ bh->b_size - LH_V1_SIZE - 4);
- *head = lh;
+ error = lh->lh_header.mh_magic != cpu_to_be32(GFS2_MAGIC) ||
+ lh->lh_header.mh_type != cpu_to_be32(GFS2_METATYPE_LH) ||
+ be32_to_cpu(lh->lh_blkno) != blk ||
+ be32_to_cpu(lh->lh_hash) != hash ||
+ (lh->lh_crc != 0 && be32_to_cpu(lh->lh_crc) != crc);
- return 0;
+ brelse(bh);
+
+ if (!error) {
+ head->lh_sequence = be64_to_cpu(lh->lh_sequence);
+ head->lh_flags = be32_to_cpu(lh->lh_flags);
+ head->lh_tail = be32_to_cpu(lh->lh_tail);
+ head->lh_blkno = be32_to_cpu(lh->lh_blkno);
+ }
+ return error;
}
/**
@@ -370,62 +364,22 @@ static int foreach_descriptor(struct gfs2_jdesc *jd, unsigned int start,
/**
* clean_journal - mark a dirty journal as being clean
- * @sdp: the filesystem
* @jd: the journal
- * @gl: the journal's glock
* @head: the head journal to start from
*
* Returns: errno
*/
-static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
+static void clean_journal(struct gfs2_jdesc *jd,
+ struct gfs2_log_header_host *head)
{
- struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
- unsigned int lblock;
- struct gfs2_log_header *lh;
- u32 hash;
- struct buffer_head *bh;
- int error;
- struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
-
- lblock = head->lh_blkno;
- gfs2_replay_incr_blk(jd, &lblock);
- bh_map.b_size = 1 << ip->i_inode.i_blkbits;
- error = gfs2_block_map(&ip->i_inode, lblock, &bh_map, 0);
- if (error)
- return error;
- if (!bh_map.b_blocknr) {
- gfs2_consist_inode(ip);
- return -EIO;
- }
-
- bh = sb_getblk(sdp->sd_vfs, bh_map.b_blocknr);
- lock_buffer(bh);
- memset(bh->b_data, 0, bh->b_size);
- set_buffer_uptodate(bh);
- clear_buffer_dirty(bh);
- unlock_buffer(bh);
-
- lh = (struct gfs2_log_header *)bh->b_data;
- memset(lh, 0, sizeof(struct gfs2_log_header));
- lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
- lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
- lh->lh_header.__pad0 = cpu_to_be64(0);
- lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
- lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
- lh->lh_sequence = cpu_to_be64(head->lh_sequence + 1);
- lh->lh_flags = cpu_to_be32(GFS2_LOG_HEAD_UNMOUNT);
- lh->lh_blkno = cpu_to_be32(lblock);
- hash = gfs2_disk_hash((const char *)lh, sizeof(struct gfs2_log_header));
- lh->lh_hash = cpu_to_be32(hash);
-
- set_buffer_dirty(bh);
- if (sync_dirty_buffer(bh))
- gfs2_io_error_bh(sdp, bh);
- brelse(bh);
- return error;
+ sdp->sd_log_flush_head = head->lh_blkno;
+ gfs2_replay_incr_blk(jd, &sdp->sd_log_flush_head);
+ gfs2_write_log_header(sdp, jd, head->lh_sequence + 1, 0,
+ GFS2_LOG_HEAD_UNMOUNT | GFS2_LOG_HEAD_RECOVERY,
+ REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC);
}
@@ -552,9 +506,7 @@ void gfs2_recover_func(struct work_struct *work)
goto fail_gunlock_thaw;
}
- error = clean_journal(jd, &head);
- if (error)
- goto fail_gunlock_thaw;
+ clean_journal(jd, &head);
gfs2_glock_dq_uninit(&thaw_gh);
t = DIV_ROUND_UP(jiffies - t, HZ);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 95b2a57ded33..8b683917a27e 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -34,6 +34,7 @@
#include "log.h"
#include "inode.h"
#include "trace_gfs2.h"
+#include "dir.h"
#define BFITNOENT ((u32)~0)
#define NO_BLOCK ((u64)~0)
@@ -489,6 +490,13 @@ void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
* @blk: The data block number
* @exact: True if this needs to be an exact match
*
+ * The @exact argument should be set to true by most callers. The exception
+ * is when we need to match blocks which are not represented by the rgrp
+ * bitmap, but which are part of the rgrp (i.e. padding blocks) which are
+ * there for alignment purposes. Another way of looking at it is that @exact
+ * matches only valid data/metadata blocks, but with @exact false, it will
+ * match any block within the extent of the rgrp.
+ *
* Returns: The resource group, or NULL if not found
*/
@@ -1040,17 +1048,30 @@ static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
rgd->rd_free = be32_to_cpu(str->rg_free);
rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
+ /* rd_data0, rd_data and rd_bitbytes already set from rindex */
}
static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
{
+ struct gfs2_rgrpd *next = gfs2_rgrpd_get_next(rgd);
struct gfs2_rgrp *str = buf;
+ u32 crc;
str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK);
str->rg_free = cpu_to_be32(rgd->rd_free);
str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
- str->__pad = cpu_to_be32(0);
+ if (next == NULL)
+ str->rg_skip = 0;
+ else if (next->rd_addr > rgd->rd_addr)
+ str->rg_skip = cpu_to_be32(next->rd_addr - rgd->rd_addr);
str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
+ str->rg_data0 = cpu_to_be64(rgd->rd_data0);
+ str->rg_data = cpu_to_be32(rgd->rd_data);
+ str->rg_bitbytes = cpu_to_be32(rgd->rd_bitbytes);
+ str->rg_crc = 0;
+ crc = gfs2_disk_hash(buf, sizeof(struct gfs2_rgrp));
+ str->rg_crc = cpu_to_be32(crc);
+
memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
}
@@ -1318,7 +1339,7 @@ start_new_extent:
fail:
if (sdp->sd_args.ar_discard)
- fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem", rv);
+ fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem\n", rv);
sdp->sd_args.ar_discard = 0;
return -EIO;
}
@@ -2072,7 +2093,8 @@ next_rgrp:
}
/* Flushing the log may release space */
if (loops == 2)
- gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
+ gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
+ GFS2_LFC_INPLACE_RESERVE);
}
return -ENOSPC;
@@ -2453,12 +2475,12 @@ void gfs2_unlink_di(struct inode *inode)
update_rgrp_lvb_unlinked(rgd, 1);
}
-static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
+void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = rgd->rd_sbd;
struct gfs2_rgrpd *tmp_rgd;
- tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE);
+ tmp_rgd = rgblk_free(sdp, ip->i_no_addr, 1, GFS2_BLKST_FREE);
if (!tmp_rgd)
return;
gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
@@ -2474,12 +2496,6 @@ static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
update_rgrp_lvb_unlinked(rgd, -1);
gfs2_statfs_change(sdp, 0, +1, -1);
-}
-
-
-void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
-{
- gfs2_free_uninit_di(rgd, ip->i_no_addr);
trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
gfs2_meta_wipe(ip, ip->i_no_addr, 1);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index d81d46e19726..620be0521866 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -757,7 +757,9 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
bool flush_all = (wbc->sync_mode == WB_SYNC_ALL || gfs2_is_jdata(ip));
if (flush_all)
- gfs2_log_flush(GFS2_SB(inode), ip->i_gl, NORMAL_FLUSH);
+ gfs2_log_flush(GFS2_SB(inode), ip->i_gl,
+ GFS2_LOG_HEAD_FLUSH_NORMAL |
+ GFS2_LFC_WRITE_INODE);
if (bdi->wb.dirty_exceeded)
gfs2_ail1_flush(sdp, wbc);
else
@@ -766,6 +768,12 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
ret = filemap_fdatawait(metamapping);
if (ret)
mark_inode_dirty_sync(inode);
+ else {
+ spin_lock(&inode->i_lock);
+ if (!(inode->i_flags & I_DIRTY))
+ gfs2_ordered_del_inode(ip);
+ spin_unlock(&inode->i_lock);
+ }
return ret;
}
@@ -853,7 +861,8 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
gfs2_quota_sync(sdp->sd_vfs, 0);
gfs2_statfs_sync(sdp->sd_vfs, 0);
- gfs2_log_flush(sdp, NULL, SHUTDOWN_FLUSH);
+ gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
+ GFS2_LFC_MAKE_FS_RO);
wait_event(sdp->sd_reserving_log_wait, atomic_read(&sdp->sd_reserving_log) == 0);
gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks);
@@ -946,7 +955,8 @@ static int gfs2_sync_fs(struct super_block *sb, int wait)
gfs2_quota_sync(sb, -1);
if (wait)
- gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
+ gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
+ GFS2_LFC_SYNC_FS);
return sdp->sd_log_error;
}
@@ -1650,7 +1660,8 @@ alloc_failed:
goto out_unlock;
out_truncate:
- gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH);
+ gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
+ GFS2_LFC_EVICT_INODE);
metamapping = gfs2_glock2aspace(ip->i_gl);
if (test_bit(GLF_DIRTY, &ip->i_gl->gl_flags)) {
filemap_fdatawrite(metamapping);
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 9eb9d0a1abd9..c191fa58a1df 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -112,7 +112,7 @@ static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
}
if (error) {
- fs_warn(sdp, "freeze %d error %d", n, error);
+ fs_warn(sdp, "freeze %d error %d\n", n, error);
return error;
}
@@ -679,7 +679,7 @@ fail_tune:
sysfs_remove_group(&sdp->sd_kobj, &tune_group);
fail_reg:
free_percpu(sdp->sd_lkstats);
- fs_err(sdp, "error %d adding sysfs files", error);
+ fs_err(sdp, "error %d adding sysfs files\n", error);
if (sysfs_frees_sdp)
kobject_put(&sdp->sd_kobj);
else
diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h
index f67a709589d3..b9318b49ff8f 100644
--- a/fs/gfs2/trace_gfs2.h
+++ b/fs/gfs2/trace_gfs2.h
@@ -353,26 +353,29 @@ TRACE_EVENT(gfs2_pin,
/* Flushing the log */
TRACE_EVENT(gfs2_log_flush,
- TP_PROTO(const struct gfs2_sbd *sdp, int start),
+ TP_PROTO(const struct gfs2_sbd *sdp, int start, u32 flags),
- TP_ARGS(sdp, start),
+ TP_ARGS(sdp, start, flags),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( int, start )
__field( u64, log_seq )
+ __field( u32, flags )
),
TP_fast_assign(
__entry->dev = sdp->sd_vfs->s_dev;
__entry->start = start;
__entry->log_seq = sdp->sd_log_sequence;
+ __entry->flags = flags;
),
- TP_printk("%u,%u log flush %s %llu",
+ TP_printk("%u,%u log flush %s %llu %llx",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->start ? "start" : "end",
- (unsigned long long)__entry->log_seq)
+ (unsigned long long)__entry->log_seq,
+ (unsigned long long)__entry->flags)
);
/* Reserving/releasing blocks in the log */
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index ca8b72d0a831..c75cacaa349b 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -92,7 +92,6 @@ void gfs2_trans_end(struct gfs2_sbd *sdp)
s64 nbuf;
int alloced = test_bit(TR_ALLOCED, &tr->tr_flags);
- BUG_ON(!tr);
current->journal_info = NULL;
if (!test_bit(TR_TOUCHED, &tr->tr_flags)) {
@@ -118,7 +117,8 @@ void gfs2_trans_end(struct gfs2_sbd *sdp)
up_read(&sdp->sd_log_flush_lock);
if (sdp->sd_vfs->s_flags & SB_SYNCHRONOUS)
- gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
+ gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
+ GFS2_LFC_TRANS_END);
if (alloced)
sb_end_intwrite(sdp->sd_vfs);
}
diff --git a/fs/inode.c b/fs/inode.c
index 03102d6ef044..e2ca0f4b5151 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -18,6 +18,7 @@
#include <linux/buffer_head.h> /* for inode_has_buffers */
#include <linux/ratelimit.h>
#include <linux/list_lru.h>
+#include <linux/iversion.h>
#include <trace/events/writeback.h>
#include "internal.h"
@@ -1634,17 +1635,21 @@ static int relatime_need_update(const struct path *path, struct inode *inode,
int generic_update_time(struct inode *inode, struct timespec *time, int flags)
{
int iflags = I_DIRTY_TIME;
+ bool dirty = false;
if (flags & S_ATIME)
inode->i_atime = *time;
if (flags & S_VERSION)
- inode_inc_iversion(inode);
+ dirty = inode_maybe_inc_iversion(inode, false);
if (flags & S_CTIME)
inode->i_ctime = *time;
if (flags & S_MTIME)
inode->i_mtime = *time;
+ if ((flags & (S_ATIME | S_CTIME | S_MTIME)) &&
+ !(inode->i_sb->s_flags & SB_LAZYTIME))
+ dirty = true;
- if (!(inode->i_sb->s_flags & SB_LAZYTIME) || (flags & S_VERSION))
+ if (dirty)
iflags |= I_DIRTY_SYNC;
__mark_inode_dirty(inode, iflags);
return 0;
@@ -1863,7 +1868,7 @@ int file_update_time(struct file *file)
if (!timespec_equal(&inode->i_ctime, &now))
sync_it |= S_CTIME;
- if (IS_I_VERSION(inode))
+ if (IS_I_VERSION(inode) && inode_iversion_need_inc(inode))
sync_it |= S_VERSION;
if (!sync_it)
diff --git a/fs/iomap.c b/fs/iomap.c
index 47d29ccffaef..afd163586aa0 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -65,6 +65,8 @@ iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
return ret;
if (WARN_ON(iomap.offset > pos))
return -EIO;
+ if (WARN_ON(iomap.length == 0))
+ return -EIO;
/*
* Cut down the length to the one actually provided by the filesystem,
@@ -753,7 +755,8 @@ static ssize_t iomap_dio_complete(struct iomap_dio *dio)
err = invalidate_inode_pages2_range(inode->i_mapping,
offset >> PAGE_SHIFT,
(offset + dio->size - 1) >> PAGE_SHIFT);
- WARN_ON_ONCE(err);
+ if (err)
+ dio_warn_stale_pagecache(iocb->ki_filp);
}
inode_dio_end(file_inode(iocb->ki_filp));
@@ -1018,9 +1021,16 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
if (ret)
goto out_free_dio;
+ /*
+ * Try to invalidate cache pages for the range we're direct
+ * writing. If this invalidation fails, tough, the write will
+ * still work, but racing two incompatible write paths is a
+ * pretty crazy thing to do, so we don't support it 100%.
+ */
ret = invalidate_inode_pages2_range(mapping,
start >> PAGE_SHIFT, end >> PAGE_SHIFT);
- WARN_ON_ONCE(ret);
+ if (ret)
+ dio_warn_stale_pagecache(iocb->ki_filp);
ret = 0;
if (iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) &&
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index d8c274d39ddb..eab04eca95a3 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -362,7 +362,6 @@ error_io:
ret = -EIO;
error:
mutex_unlock(&f->sem);
- jffs2_do_clear_inode(c, f);
iget_failed(inode);
return ERR_PTR(ret);
}
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index 9698e51656b1..c53d9cc5ae7a 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -832,7 +832,7 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
* to see if it supports poll (Neither 'poll' nor 'select' return
* an appropriate error code). When in doubt, set a suitable timeout value.
*/
-static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
+static __poll_t kernfs_fop_poll(struct file *filp, poll_table *wait)
{
struct kernfs_open_file *of = kernfs_of(filp);
struct kernfs_node *kn = kernfs_dentry_node(filp->f_path.dentry);
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 066ac313ae5c..a2c0dfc6fdc0 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -48,13 +48,13 @@ void nlmclnt_next_cookie(struct nlm_cookie *c)
static struct nlm_lockowner *nlm_get_lockowner(struct nlm_lockowner *lockowner)
{
- atomic_inc(&lockowner->count);
+ refcount_inc(&lockowner->count);
return lockowner;
}
static void nlm_put_lockowner(struct nlm_lockowner *lockowner)
{
- if (!atomic_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
+ if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
return;
list_del(&lockowner->list);
spin_unlock(&lockowner->host->h_lock);
@@ -105,7 +105,7 @@ static struct nlm_lockowner *nlm_find_lockowner(struct nlm_host *host, fl_owner_
res = __nlm_find_lockowner(host, owner);
if (res == NULL && new != NULL) {
res = new;
- atomic_set(&new->count, 1);
+ refcount_set(&new->count, 1);
new->owner = owner;
new->pid = __nlm_alloc_pid(host);
new->host = nlm_get_host(host);
@@ -204,7 +204,7 @@ struct nlm_rqst *nlm_alloc_call(struct nlm_host *host)
for(;;) {
call = kzalloc(sizeof(*call), GFP_KERNEL);
if (call != NULL) {
- atomic_set(&call->a_count, 1);
+ refcount_set(&call->a_count, 1);
locks_init_lock(&call->a_args.lock.fl);
locks_init_lock(&call->a_res.lock.fl);
call->a_host = nlm_get_host(host);
@@ -222,7 +222,7 @@ void nlmclnt_release_call(struct nlm_rqst *call)
{
const struct nlmclnt_operations *nlmclnt_ops = call->a_host->h_nlmclnt_ops;
- if (!atomic_dec_and_test(&call->a_count))
+ if (!refcount_dec_and_test(&call->a_count))
return;
if (nlmclnt_ops && nlmclnt_ops->nlmclnt_release_call)
nlmclnt_ops->nlmclnt_release_call(call->a_callback_data);
@@ -678,7 +678,7 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
goto out;
}
- atomic_inc(&req->a_count);
+ refcount_inc(&req->a_count);
status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
if (status < 0)
@@ -769,7 +769,7 @@ static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl
nlmclnt_setlockargs(req, fl);
req->a_args.block = block;
- atomic_inc(&req->a_count);
+ refcount_inc(&req->a_count);
status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
NLMPROC_CANCEL, &nlmclnt_cancel_ops);
if (status == 0 && req->a_res.status == nlm_lck_denied)
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 826a89184f90..d35cd6be0675 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -114,7 +114,7 @@ static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni,
unsigned long now = jiffies;
if (nsm != NULL)
- atomic_inc(&nsm->sm_count);
+ refcount_inc(&nsm->sm_count);
else {
host = NULL;
nsm = nsm_get_handle(ni->net, ni->sap, ni->salen,
@@ -151,7 +151,7 @@ static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni,
host->h_state = 0;
host->h_nsmstate = 0;
host->h_pidcount = 0;
- atomic_set(&host->h_count, 1);
+ refcount_set(&host->h_count, 1);
mutex_init(&host->h_mutex);
host->h_nextrebind = now + NLM_HOST_REBIND;
host->h_expires = now + NLM_HOST_EXPIRE;
@@ -290,7 +290,7 @@ void nlmclnt_release_host(struct nlm_host *host)
WARN_ON_ONCE(host->h_server);
- if (atomic_dec_and_test(&host->h_count)) {
+ if (refcount_dec_and_test(&host->h_count)) {
WARN_ON_ONCE(!list_empty(&host->h_lockowners));
WARN_ON_ONCE(!list_empty(&host->h_granted));
WARN_ON_ONCE(!list_empty(&host->h_reclaim));
@@ -388,6 +388,8 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
ln->nrhosts++;
nrhosts++;
+ refcount_inc(&host->h_count);
+
dprintk("lockd: %s created host %s (%s)\n",
__func__, host->h_name, host->h_addrbuf);
@@ -410,7 +412,7 @@ void nlmsvc_release_host(struct nlm_host *host)
dprintk("lockd: release server host %s\n", host->h_name);
WARN_ON_ONCE(!host->h_server);
- atomic_dec(&host->h_count);
+ refcount_dec(&host->h_count);
}
/*
@@ -504,7 +506,7 @@ struct nlm_host * nlm_get_host(struct nlm_host *host)
{
if (host) {
dprintk("lockd: get host %s\n", host->h_name);
- atomic_inc(&host->h_count);
+ refcount_inc(&host->h_count);
host->h_expires = jiffies + NLM_HOST_EXPIRE;
}
return host;
@@ -593,7 +595,7 @@ static void nlm_complain_hosts(struct net *net)
if (net && host->net != net)
continue;
dprintk(" %s (cnt %d use %d exp %ld net %x)\n",
- host->h_name, atomic_read(&host->h_count),
+ host->h_name, refcount_read(&host->h_count),
host->h_inuse, host->h_expires, host->net->ns.inum);
}
}
@@ -662,16 +664,16 @@ nlm_gc_hosts(struct net *net)
for_each_host_safe(host, next, chain, nlm_server_hosts) {
if (net && host->net != net)
continue;
- if (atomic_read(&host->h_count) || host->h_inuse
- || time_before(jiffies, host->h_expires)) {
+ if (host->h_inuse || time_before(jiffies, host->h_expires)) {
dprintk("nlm_gc_hosts skipping %s "
"(cnt %d use %d exp %ld net %x)\n",
- host->h_name, atomic_read(&host->h_count),
+ host->h_name, refcount_read(&host->h_count),
host->h_inuse, host->h_expires,
host->net->ns.inum);
continue;
}
- nlm_destroy_host_locked(host);
+ if (refcount_dec_if_one(&host->h_count))
+ nlm_destroy_host_locked(host);
}
if (net) {
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index 96cfb2967ac7..654594ef4f94 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -191,7 +191,7 @@ void nsm_unmonitor(const struct nlm_host *host)
struct nsm_res res;
int status;
- if (atomic_read(&nsm->sm_count) == 1
+ if (refcount_read(&nsm->sm_count) == 1
&& nsm->sm_monitored && !nsm->sm_sticky) {
dprintk("lockd: nsm_unmonitor(%s)\n", nsm->sm_name);
@@ -279,7 +279,7 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap,
if (unlikely(new == NULL))
return NULL;
- atomic_set(&new->sm_count, 1);
+ refcount_set(&new->sm_count, 1);
new->sm_name = (char *)(new + 1);
memcpy(nsm_addr(new), sap, salen);
new->sm_addrlen = salen;
@@ -337,13 +337,13 @@ retry:
cached = nsm_lookup_addr(&ln->nsm_handles, sap);
if (cached != NULL) {
- atomic_inc(&cached->sm_count);
+ refcount_inc(&cached->sm_count);
spin_unlock(&nsm_lock);
kfree(new);
dprintk("lockd: found nsm_handle for %s (%s), "
"cnt %d\n", cached->sm_name,
cached->sm_addrbuf,
- atomic_read(&cached->sm_count));
+ refcount_read(&cached->sm_count));
return cached;
}
@@ -388,12 +388,12 @@ struct nsm_handle *nsm_reboot_lookup(const struct net *net,
return cached;
}
- atomic_inc(&cached->sm_count);
+ refcount_inc(&cached->sm_count);
spin_unlock(&nsm_lock);
dprintk("lockd: host %s (%s) rebooted, cnt %d\n",
cached->sm_name, cached->sm_addrbuf,
- atomic_read(&cached->sm_count));
+ refcount_read(&cached->sm_count));
return cached;
}
@@ -404,7 +404,7 @@ struct nsm_handle *nsm_reboot_lookup(const struct net *net,
*/
void nsm_release(struct nsm_handle *nsm)
{
- if (atomic_dec_and_lock(&nsm->sm_count, &nsm_lock)) {
+ if (refcount_dec_and_lock(&nsm->sm_count, &nsm_lock)) {
list_del(&nsm->sm_link);
spin_unlock(&nsm_lock);
dprintk("lockd: destroyed nsm_handle for %s (%s)\n",
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
index 0d670c5c378f..ea77c66d3cc3 100644
--- a/fs/lockd/svcproc.c
+++ b/fs/lockd/svcproc.c
@@ -295,7 +295,7 @@ static void nlmsvc_callback_exit(struct rpc_task *task, void *data)
void nlmsvc_release_call(struct nlm_rqst *call)
{
- if (!atomic_dec_and_test(&call->a_count))
+ if (!refcount_dec_and_test(&call->a_count))
return;
nlmsvc_release_host(call->a_host);
kfree(call);
diff --git a/fs/namei.c b/fs/namei.c
index 9cc91fb7f156..921ae32dbc80 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -391,50 +391,6 @@ static inline int do_inode_permission(struct inode *inode, int mask)
}
/**
- * __inode_permission - Check for access rights to a given inode
- * @inode: Inode to check permission on
- * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
- *
- * Check for read/write/execute permissions on an inode.
- *
- * When checking for MAY_APPEND, MAY_WRITE must also be set in @mask.
- *
- * This does not check for a read-only file system. You probably want
- * inode_permission().
- */
-int __inode_permission(struct inode *inode, int mask)
-{
- int retval;
-
- if (unlikely(mask & MAY_WRITE)) {
- /*
- * Nobody gets write access to an immutable file.
- */
- if (IS_IMMUTABLE(inode))
- return -EPERM;
-
- /*
- * Updating mtime will likely cause i_uid and i_gid to be
- * written back improperly if their true value is unknown
- * to the vfs.
- */
- if (HAS_UNMAPPED_ID(inode))
- return -EACCES;
- }
-
- retval = do_inode_permission(inode, mask);
- if (retval)
- return retval;
-
- retval = devcgroup_inode_permission(inode, mask);
- if (retval)
- return retval;
-
- return security_inode_permission(inode, mask);
-}
-EXPORT_SYMBOL(__inode_permission);
-
-/**
* sb_permission - Check superblock-level permissions
* @sb: Superblock of inode to check permission on
* @inode: Inode to check permission on
@@ -472,7 +428,32 @@ int inode_permission(struct inode *inode, int mask)
retval = sb_permission(inode->i_sb, inode, mask);
if (retval)
return retval;
- return __inode_permission(inode, mask);
+
+ if (unlikely(mask & MAY_WRITE)) {
+ /*
+ * Nobody gets write access to an immutable file.
+ */
+ if (IS_IMMUTABLE(inode))
+ return -EPERM;
+
+ /*
+ * Updating mtime will likely cause i_uid and i_gid to be
+ * written back improperly if their true value is unknown
+ * to the vfs.
+ */
+ if (HAS_UNMAPPED_ID(inode))
+ return -EACCES;
+ }
+
+ retval = do_inode_permission(inode, mask);
+ if (retval)
+ return retval;
+
+ retval = devcgroup_inode_permission(inode, mask);
+ if (retval)
+ return retval;
+
+ return security_inode_permission(inode, mask);
}
EXPORT_SYMBOL(inode_permission);
@@ -1133,9 +1114,6 @@ static int follow_automount(struct path *path, struct nameidata *nd,
path->dentry->d_inode)
return -EISDIR;
- if (path->dentry->d_sb->s_user_ns != &init_user_ns)
- return -EACCES;
-
nd->total_link_count++;
if (nd->total_link_count >= 40)
return -ELOOP;
@@ -2898,6 +2876,27 @@ int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
}
EXPORT_SYMBOL(vfs_create);
+int vfs_mkobj(struct dentry *dentry, umode_t mode,
+ int (*f)(struct dentry *, umode_t, void *),
+ void *arg)
+{
+ struct inode *dir = dentry->d_parent->d_inode;
+ int error = may_create(dir, dentry);
+ if (error)
+ return error;
+
+ mode &= S_IALLUGO;
+ mode |= S_IFREG;
+ error = security_inode_create(dir, dentry, mode);
+ if (error)
+ return error;
+ error = f(dentry, mode, arg);
+ if (!error)
+ fsnotify_create(dir, dentry);
+ return error;
+}
+EXPORT_SYMBOL(vfs_mkobj);
+
bool may_open_dev(const struct path *path)
{
return !(path->mnt->mnt_flags & MNT_NODEV) &&
diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c
index efb176b1751a..4c13174d85b7 100644
--- a/fs/ncpfs/sock.c
+++ b/fs/ncpfs/sock.c
@@ -39,7 +39,8 @@ static int _recv(struct socket *sock, void *buf, int size, unsigned flags)
{
struct msghdr msg = {NULL, };
struct kvec iov = {buf, size};
- return kernel_recvmsg(sock, &msg, &iov, 1, size, flags);
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, size);
+ return sock_recvmsg(sock, &msg, flags);
}
static int _send(struct socket *sock, const void *buff, int len)
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 995d707537da..7cb5c38c19e4 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -137,6 +137,11 @@ bl_alloc_init_bio(int npg, struct block_device *bdev, sector_t disk_sector,
return bio;
}
+static bool offset_in_map(u64 offset, struct pnfs_block_dev_map *map)
+{
+ return offset >= map->start && offset < map->start + map->len;
+}
+
static struct bio *
do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect,
struct page *page, struct pnfs_block_dev_map *map,
@@ -156,8 +161,8 @@ do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect,
/* translate to physical disk offset */
disk_addr = (u64)isect << SECTOR_SHIFT;
- if (disk_addr < map->start || disk_addr >= map->start + map->len) {
- if (!dev->map(dev, disk_addr, map))
+ if (!offset_in_map(disk_addr, map)) {
+ if (!dev->map(dev, disk_addr, map) || !offset_in_map(disk_addr, map))
return ERR_PTR(-EIO);
bio = bl_submit_bio(bio);
}
@@ -184,6 +189,29 @@ retry:
return bio;
}
+static void bl_mark_devices_unavailable(struct nfs_pgio_header *header, bool rw)
+{
+ struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
+ size_t bytes_left = header->args.count;
+ sector_t isect, extent_length = 0;
+ struct pnfs_block_extent be;
+
+ isect = header->args.offset >> SECTOR_SHIFT;
+ bytes_left += header->args.offset - (isect << SECTOR_SHIFT);
+
+ while (bytes_left > 0) {
+ if (!ext_tree_lookup(bl, isect, &be, rw))
+ return;
+ extent_length = be.be_length - (isect - be.be_f_offset);
+ nfs4_mark_deviceid_unavailable(be.be_device);
+ isect += extent_length;
+ if (bytes_left > extent_length << SECTOR_SHIFT)
+ bytes_left -= extent_length << SECTOR_SHIFT;
+ else
+ bytes_left = 0;
+ }
+}
+
static void bl_end_io_read(struct bio *bio)
{
struct parallel_io *par = bio->bi_private;
@@ -194,6 +222,7 @@ static void bl_end_io_read(struct bio *bio)
if (!header->pnfs_error)
header->pnfs_error = -EIO;
pnfs_set_lo_fail(header->lseg);
+ bl_mark_devices_unavailable(header, false);
}
bio_put(bio);
@@ -323,6 +352,7 @@ static void bl_end_io_write(struct bio *bio)
if (!header->pnfs_error)
header->pnfs_error = -EIO;
pnfs_set_lo_fail(header->lseg);
+ bl_mark_devices_unavailable(header, true);
}
bio_put(bio);
put_parallel(par);
@@ -552,6 +582,31 @@ static int decode_sector_number(__be32 **rp, sector_t *sp)
return 0;
}
+static struct nfs4_deviceid_node *
+bl_find_get_deviceid(struct nfs_server *server,
+ const struct nfs4_deviceid *id, struct rpc_cred *cred,
+ gfp_t gfp_mask)
+{
+ struct nfs4_deviceid_node *node;
+ unsigned long start, end;
+
+retry:
+ node = nfs4_find_get_deviceid(server, id, cred, gfp_mask);
+ if (!node)
+ return ERR_PTR(-ENODEV);
+
+ if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags) == 0)
+ return node;
+
+ end = jiffies;
+ start = end - PNFS_DEVICE_RETRY_TIMEOUT;
+ if (!time_in_range(node->timestamp_unavailable, start, end)) {
+ nfs4_delete_deviceid(node->ld, node->nfs_client, id);
+ goto retry;
+ }
+ return ERR_PTR(-ENODEV);
+}
+
static int
bl_alloc_extent(struct xdr_stream *xdr, struct pnfs_layout_hdr *lo,
struct layout_verification *lv, struct list_head *extents,
@@ -573,16 +628,18 @@ bl_alloc_extent(struct xdr_stream *xdr, struct pnfs_layout_hdr *lo,
memcpy(&id, p, NFS4_DEVICEID4_SIZE);
p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
- error = -EIO;
- be->be_device = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), &id,
+ be->be_device = bl_find_get_deviceid(NFS_SERVER(lo->plh_inode), &id,
lo->plh_lc_cred, gfp_mask);
- if (!be->be_device)
+ if (IS_ERR(be->be_device)) {
+ error = PTR_ERR(be->be_device);
goto out_free_be;
+ }
/*
* The next three values are read in as bytes, but stored in the
* extent structure in 512-byte granularity.
*/
+ error = -EIO;
if (decode_sector_number(&p, &be->be_f_offset) < 0)
goto out_put_deviceid;
if (decode_sector_number(&p, &be->be_length) < 0)
@@ -692,11 +749,16 @@ out_free_scratch:
__free_page(scratch);
out:
dprintk("%s returns %d\n", __func__, status);
- if (status) {
+ switch (status) {
+ case -ENODEV:
+ /* Our extent block devices are unavailable */
+ set_bit(NFS_LSEG_UNAVAILABLE, &lseg->pls_flags);
+ case 0:
+ return lseg;
+ default:
kfree(lseg);
return ERR_PTR(status);
}
- return lseg;
}
static void
@@ -798,6 +860,13 @@ bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
}
pnfs_generic_pg_init_read(pgio, req);
+
+ if (pgio->pg_lseg &&
+ test_bit(NFS_LSEG_UNAVAILABLE, &pgio->pg_lseg->pls_flags)) {
+ pnfs_error_mark_layout_for_return(pgio->pg_inode, pgio->pg_lseg);
+ pnfs_set_lo_fail(pgio->pg_lseg);
+ nfs_pageio_reset_read_mds(pgio);
+ }
}
/*
@@ -853,6 +922,14 @@ bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
wb_size = nfs_dreq_bytes_left(pgio->pg_dreq);
pnfs_generic_pg_init_write(pgio, req, wb_size);
+
+ if (pgio->pg_lseg &&
+ test_bit(NFS_LSEG_UNAVAILABLE, &pgio->pg_lseg->pls_flags)) {
+
+ pnfs_error_mark_layout_for_return(pgio->pg_inode, pgio->pg_lseg);
+ pnfs_set_lo_fail(pgio->pg_lseg);
+ nfs_pageio_reset_write_mds(pgio);
+ }
}
/*
@@ -887,6 +964,7 @@ static struct pnfs_layoutdriver_type blocklayout_type = {
.name = "LAYOUT_BLOCK_VOLUME",
.owner = THIS_MODULE,
.flags = PNFS_LAYOUTRET_ON_SETATTR |
+ PNFS_LAYOUTRET_ON_ERROR |
PNFS_READ_WHOLE_PAGE,
.read_pagelist = bl_read_pagelist,
.write_pagelist = bl_write_pagelist,
@@ -910,6 +988,7 @@ static struct pnfs_layoutdriver_type scsilayout_type = {
.name = "LAYOUT_SCSI",
.owner = THIS_MODULE,
.flags = PNFS_LAYOUTRET_ON_SETATTR |
+ PNFS_LAYOUTRET_ON_ERROR |
PNFS_READ_WHOLE_PAGE,
.read_pagelist = bl_read_pagelist,
.write_pagelist = bl_write_pagelist,
@@ -967,6 +1046,7 @@ static void __exit nfs4blocklayout_exit(void)
}
MODULE_ALIAS("nfs-layouttype4-3");
+MODULE_ALIAS("nfs-layouttype4-5");
module_init(nfs4blocklayout_init);
module_exit(nfs4blocklayout_exit);
diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
index efc007f00742..716bc75e9ed2 100644
--- a/fs/nfs/blocklayout/blocklayout.h
+++ b/fs/nfs/blocklayout/blocklayout.h
@@ -92,10 +92,9 @@ struct pnfs_block_volume {
};
struct pnfs_block_dev_map {
- sector_t start;
- sector_t len;
-
- sector_t disk_offset;
+ u64 start;
+ u64 len;
+ u64 disk_offset;
struct block_device *bdev;
};
diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c
index 95f74bd2c067..a7efd83779d2 100644
--- a/fs/nfs/blocklayout/dev.c
+++ b/fs/nfs/blocklayout/dev.c
@@ -533,14 +533,11 @@ bl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
goto out_free_volumes;
ret = bl_parse_deviceid(server, top, volumes, nr_volumes - 1, gfp_mask);
- if (ret) {
- bl_free_device(top);
- kfree(top);
- goto out_free_volumes;
- }
node = &top->node;
nfs4_init_deviceid_node(node, server, &pdev->dev_id);
+ if (ret)
+ nfs4_mark_deviceid_unavailable(node);
out_free_volumes:
kfree(volumes);
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index ade44ca0c66c..d8b47624fee2 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -12,6 +12,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/iversion.h>
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
@@ -347,7 +348,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
nfs4_stateid_copy(&delegation->stateid, &res->delegation);
delegation->type = res->delegation_type;
delegation->pagemod_limit = res->pagemod_limit;
- delegation->change_attr = inode->i_version;
+ delegation->change_attr = inode_peek_iversion_raw(inode);
delegation->cred = get_rpccred(cred);
delegation->inode = inode;
delegation->flags = 1<<NFS_DELEGATION_REFERENCED;
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index d2972d537469..8c10b0562e75 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -775,10 +775,8 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
spin_lock(&dreq->lock);
- if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
- dreq->flags = 0;
+ if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
dreq->error = hdr->error;
- }
if (dreq->error == 0) {
nfs_direct_good_bytes(dreq, hdr);
if (nfs_write_need_commit(hdr)) {
diff --git a/fs/nfs/export.c b/fs/nfs/export.c
index 83fd09fc8f77..ab5de3246c5c 100644
--- a/fs/nfs/export.c
+++ b/fs/nfs/export.c
@@ -48,10 +48,6 @@ nfs_encode_fh(struct inode *inode, __u32 *p, int *max_len, struct inode *parent)
*max_len = len;
return FILEID_INVALID;
}
- if (IS_AUTOMOUNT(inode)) {
- *max_len = FILEID_INVALID;
- goto out;
- }
p[FILEID_HIGH_OFF] = NFS_FILEID(inode) >> 32;
p[FILEID_LOW_OFF] = NFS_FILEID(inode);
@@ -59,7 +55,6 @@ nfs_encode_fh(struct inode *inode, __u32 *p, int *max_len, struct inode *parent)
p[len - 1] = 0; /* Padding */
nfs_copy_fh(clnt_fh, server_fh);
*max_len = len;
-out:
dprintk("%s: result fh fileid %llu mode %u size %d\n",
__func__, NFS_FILEID(inode), inode->i_mode, *max_len);
return *max_len;
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index 4e54d8b5413a..d175724ff566 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -895,9 +895,7 @@ fl_pnfs_update_layout(struct inode *ino,
lseg = pnfs_update_layout(ino, ctx, pos, count, iomode, strict_iomode,
gfp_flags);
- if (!lseg)
- lseg = ERR_PTR(-ENOMEM);
- if (IS_ERR(lseg))
+ if (IS_ERR_OR_NULL(lseg))
goto out;
lo = NFS_I(ino)->layout;
diff --git a/fs/nfs/fscache-index.c b/fs/nfs/fscache-index.c
index 3025fe8584a0..0ee4b93d36ea 100644
--- a/fs/nfs/fscache-index.c
+++ b/fs/nfs/fscache-index.c
@@ -16,6 +16,7 @@
#include <linux/nfs_fs.h>
#include <linux/nfs_fs_sb.h>
#include <linux/in6.h>
+#include <linux/iversion.h>
#include "internal.h"
#include "fscache.h"
@@ -211,7 +212,7 @@ static uint16_t nfs_fscache_inode_get_aux(const void *cookie_netfs_data,
auxdata.ctime = nfsi->vfs_inode.i_ctime;
if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
- auxdata.change_attr = nfsi->vfs_inode.i_version;
+ auxdata.change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode);
if (bufmax > sizeof(auxdata))
bufmax = sizeof(auxdata);
@@ -243,7 +244,7 @@ enum fscache_checkaux nfs_fscache_inode_check_aux(void *cookie_netfs_data,
auxdata.ctime = nfsi->vfs_inode.i_ctime;
if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
- auxdata.change_attr = nfsi->vfs_inode.i_version;
+ auxdata.change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode);
if (memcmp(data, &auxdata, datalen) != 0)
return FSCACHE_CHECKAUX_OBSOLETE;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index b992d2382ffa..ceeaf0fb6657 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -38,8 +38,8 @@
#include <linux/slab.h>
#include <linux/compat.h>
#include <linux/freezer.h>
-
#include <linux/uaccess.h>
+#include <linux/iversion.h>
#include "nfs4_fs.h"
#include "callback.h"
@@ -483,7 +483,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
memset(&inode->i_atime, 0, sizeof(inode->i_atime));
memset(&inode->i_mtime, 0, sizeof(inode->i_mtime));
memset(&inode->i_ctime, 0, sizeof(inode->i_ctime));
- inode->i_version = 0;
+ inode_set_iversion_raw(inode, 0);
inode->i_size = 0;
clear_nlink(inode);
inode->i_uid = make_kuid(&init_user_ns, -2);
@@ -508,7 +508,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
else if (nfs_server_capable(inode, NFS_CAP_CTIME))
nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
- inode->i_version = fattr->change_attr;
+ inode_set_iversion_raw(inode, fattr->change_attr);
else
nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
| NFS_INO_REVAL_PAGECACHE);
@@ -735,12 +735,20 @@ int nfs_getattr(const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
- int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME;
+ struct nfs_server *server = NFS_SERVER(inode);
+ unsigned long cache_validity;
int err = 0;
+ bool force_sync = query_flags & AT_STATX_FORCE_SYNC;
+ bool do_update = false;
trace_nfs_getattr_enter(inode);
+
+ if ((query_flags & AT_STATX_DONT_SYNC) && !force_sync)
+ goto out_no_update;
+
/* Flush out writes to the server in order to update c/mtime. */
- if (S_ISREG(inode->i_mode)) {
+ if ((request_mask & (STATX_CTIME|STATX_MTIME)) &&
+ S_ISREG(inode->i_mode)) {
err = filemap_write_and_wait(inode->i_mapping);
if (err)
goto out;
@@ -757,24 +765,42 @@ int nfs_getattr(const struct path *path, struct kstat *stat,
*/
if ((path->mnt->mnt_flags & MNT_NOATIME) ||
((path->mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
- need_atime = 0;
-
- if (need_atime || nfs_need_revalidate_inode(inode)) {
- struct nfs_server *server = NFS_SERVER(inode);
-
+ request_mask &= ~STATX_ATIME;
+
+ /* Is the user requesting attributes that might need revalidation? */
+ if (!(request_mask & (STATX_MODE|STATX_NLINK|STATX_ATIME|STATX_CTIME|
+ STATX_MTIME|STATX_UID|STATX_GID|
+ STATX_SIZE|STATX_BLOCKS)))
+ goto out_no_revalidate;
+
+ /* Check whether the cached attributes are stale */
+ do_update |= force_sync || nfs_attribute_cache_expired(inode);
+ cache_validity = READ_ONCE(NFS_I(inode)->cache_validity);
+ do_update |= cache_validity &
+ (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_LABEL);
+ if (request_mask & STATX_ATIME)
+ do_update |= cache_validity & NFS_INO_INVALID_ATIME;
+ if (request_mask & (STATX_CTIME|STATX_MTIME))
+ do_update |= cache_validity & NFS_INO_REVAL_PAGECACHE;
+ if (do_update) {
+ /* Update the attribute cache */
if (!(server->flags & NFS_MOUNT_NOAC))
nfs_readdirplus_parent_cache_miss(path->dentry);
else
nfs_readdirplus_parent_cache_hit(path->dentry);
err = __nfs_revalidate_inode(server, inode);
+ if (err)
+ goto out;
} else
nfs_readdirplus_parent_cache_hit(path->dentry);
- if (!err) {
- generic_fillattr(inode, stat);
- stat->ino = nfs_compat_user_ino64(NFS_FILEID(inode));
- if (S_ISDIR(inode->i_mode))
- stat->blksize = NFS_SERVER(inode)->dtsize;
- }
+out_no_revalidate:
+ /* Only return attributes that were revalidated. */
+ stat->result_mask &= request_mask;
+out_no_update:
+ generic_fillattr(inode, stat);
+ stat->ino = nfs_compat_user_ino64(NFS_FILEID(inode));
+ if (S_ISDIR(inode->i_mode))
+ stat->blksize = NFS_SERVER(inode)->dtsize;
out:
trace_nfs_getattr_exit(inode, err);
return err;
@@ -1144,7 +1170,6 @@ static int nfs_invalidate_mapping(struct inode *inode, struct address_space *map
if (mapping->nrpages != 0) {
if (S_ISREG(inode->i_mode)) {
- unmap_mapping_range(mapping, 0, 0, 0);
ret = nfs_sync_mapping(mapping);
if (ret < 0)
return ret;
@@ -1289,8 +1314,8 @@ static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr
if ((fattr->valid & NFS_ATTR_FATTR_PRECHANGE)
&& (fattr->valid & NFS_ATTR_FATTR_CHANGE)
- && inode->i_version == fattr->pre_change_attr) {
- inode->i_version = fattr->change_attr;
+ && !inode_cmp_iversion_raw(inode, fattr->pre_change_attr)) {
+ inode_set_iversion_raw(inode, fattr->change_attr);
if (S_ISDIR(inode->i_mode))
nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA);
ret |= NFS_INO_INVALID_ATTR;
@@ -1348,7 +1373,7 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
if (!nfs_file_has_buffered_writers(nfsi)) {
/* Verify a few of the more important attributes */
- if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 && inode->i_version != fattr->change_attr)
+ if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 && inode_cmp_iversion_raw(inode, fattr->change_attr))
invalid |= NFS_INO_INVALID_ATTR | NFS_INO_REVAL_PAGECACHE;
if ((fattr->valid & NFS_ATTR_FATTR_MTIME) && !timespec_equal(&inode->i_mtime, &fattr->mtime))
@@ -1642,7 +1667,7 @@ int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fa
}
if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 &&
(fattr->valid & NFS_ATTR_FATTR_PRECHANGE) == 0) {
- fattr->pre_change_attr = inode->i_version;
+ fattr->pre_change_attr = inode_peek_iversion_raw(inode);
fattr->valid |= NFS_ATTR_FATTR_PRECHANGE;
}
if ((fattr->valid & NFS_ATTR_FATTR_CTIME) != 0 &&
@@ -1778,7 +1803,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
/* More cache consistency checks */
if (fattr->valid & NFS_ATTR_FATTR_CHANGE) {
- if (inode->i_version != fattr->change_attr) {
+ if (inode_cmp_iversion_raw(inode, fattr->change_attr)) {
dprintk("NFS: change_attr change on server for file %s/%ld\n",
inode->i_sb->s_id, inode->i_ino);
/* Could it be a race with writeback? */
@@ -1790,7 +1815,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
if (S_ISDIR(inode->i_mode))
nfs_force_lookup_revalidate(inode);
}
- inode->i_version = fattr->change_attr;
+ inode_set_iversion_raw(inode, fattr->change_attr);
}
} else {
nfsi->cache_validity |= save_cache_validity;
diff --git a/fs/nfs/io.c b/fs/nfs/io.c
index 20fef85d2bb1..9034b4926909 100644
--- a/fs/nfs/io.c
+++ b/fs/nfs/io.c
@@ -99,7 +99,7 @@ static void nfs_block_buffered(struct nfs_inode *nfsi, struct inode *inode)
{
if (!test_bit(NFS_INO_ODIRECT, &nfsi->flags)) {
set_bit(NFS_INO_ODIRECT, &nfsi->flags);
- nfs_wb_all(inode);
+ nfs_sync_mapping(inode->i_mapping);
}
}
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 65a7e5da508c..04612c24d394 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -861,6 +861,7 @@ static int nfs4_set_client(struct nfs_server *server,
set_bit(NFS_CS_MIGRATION, &cl_init.init_flags);
if (test_bit(NFS_MIG_TSM_POSSIBLE, &server->mig_status))
set_bit(NFS_CS_TSM_POSSIBLE, &cl_init.init_flags);
+ server->port = rpc_get_port(addr);
/* Allocate or find a client reference we can use */
clp = nfs_get_client(&cl_init);
@@ -1123,19 +1124,36 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
/* Initialise the client representation from the parent server */
nfs_server_copy_userdata(server, parent_server);
- /* Get a client representation.
- * Note: NFSv4 always uses TCP, */
+ /* Get a client representation */
+#ifdef CONFIG_SUNRPC_XPRT_RDMA
+ rpc_set_port(data->addr, NFS_RDMA_PORT);
error = nfs4_set_client(server, data->hostname,
data->addr,
data->addrlen,
parent_client->cl_ipaddr,
- rpc_protocol(parent_server->client),
+ XPRT_TRANSPORT_RDMA,
+ parent_server->client->cl_timeout,
+ parent_client->cl_mvops->minor_version,
+ parent_client->cl_net);
+ if (!error)
+ goto init_server;
+#endif /* CONFIG_SUNRPC_XPRT_RDMA */
+
+ rpc_set_port(data->addr, NFS_PORT);
+ error = nfs4_set_client(server, data->hostname,
+ data->addr,
+ data->addrlen,
+ parent_client->cl_ipaddr,
+ XPRT_TRANSPORT_TCP,
parent_server->client->cl_timeout,
parent_client->cl_mvops->minor_version,
parent_client->cl_net);
if (error < 0)
goto error;
+#ifdef CONFIG_SUNRPC_XPRT_RDMA
+init_server:
+#endif
error = nfs_init_server_rpcclient(server, parent_server->client->cl_timeout, data->authflavor);
if (error < 0)
goto error;
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 626d1382002e..6b3b372b59b9 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -8,7 +8,6 @@
#include <linux/file.h>
#include <linux/falloc.h>
#include <linux/nfs_fs.h>
-#include <uapi/linux/btrfs.h> /* BTRFS_IOC_CLONE/BTRFS_IOC_CLONE_RANGE */
#include "delegation.h"
#include "internal.h"
#include "iostat.h"
diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c
index 30426c1a1bbd..22dc30a679a0 100644
--- a/fs/nfs/nfs4idmap.c
+++ b/fs/nfs/nfs4idmap.c
@@ -568,9 +568,13 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
struct idmap_msg *im;
struct idmap *idmap = (struct idmap *)aux;
struct key *key = cons->key;
- int ret = -ENOMEM;
+ int ret = -ENOKEY;
+
+ if (!aux)
+ goto out1;
/* msg and im are freed in idmap_pipe_destroy_msg */
+ ret = -ENOMEM;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
goto out1;
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index 8c3f327d858d..24f06dcc2b08 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -270,8 +270,6 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
if (mountdata->addrlen == 0)
continue;
- rpc_set_port(mountdata->addr, NFS_PORT);
-
memcpy(page2, buf->data, buf->len);
page2[buf->len] = '\0';
mountdata->hostname = page2;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 56fa5a16e097..47f3c273245e 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -54,6 +54,7 @@
#include <linux/xattr.h>
#include <linux/utsname.h>
#include <linux/freezer.h>
+#include <linux/iversion.h>
#include "nfs4_fs.h"
#include "delegation.h"
@@ -1045,16 +1046,16 @@ static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo,
spin_lock(&dir->i_lock);
nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
- if (cinfo->atomic && cinfo->before == dir->i_version) {
+ if (cinfo->atomic && cinfo->before == inode_peek_iversion_raw(dir)) {
nfsi->cache_validity &= ~NFS_INO_REVAL_PAGECACHE;
nfsi->attrtimeo_timestamp = jiffies;
} else {
nfs_force_lookup_revalidate(dir);
- if (cinfo->before != dir->i_version)
+ if (cinfo->before != inode_peek_iversion_raw(dir))
nfsi->cache_validity |= NFS_INO_INVALID_ACCESS |
NFS_INO_INVALID_ACL;
}
- dir->i_version = cinfo->after;
+ inode_set_iversion_raw(dir, cinfo->after);
nfsi->read_cache_jiffies = timestamp;
nfsi->attr_gencount = nfs_inc_attr_generation_counter();
nfs_fscache_invalidate(dir);
@@ -2019,7 +2020,7 @@ static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *sta
return ret;
}
-static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err)
+static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err)
{
switch (err) {
default:
@@ -2066,7 +2067,11 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
return -EAGAIN;
case -ENOMEM:
case -NFS4ERR_DENIED:
- /* kill_proc(fl->fl_pid, SIGLOST, 1); */
+ if (fl) {
+ struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner;
+ if (lsp)
+ set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
+ }
return 0;
}
return err;
@@ -2102,7 +2107,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
err = nfs4_open_recover_helper(opendata, FMODE_READ);
}
nfs4_opendata_put(opendata);
- return nfs4_handle_delegation_recall_error(server, state, stateid, err);
+ return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
}
static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
@@ -2454,7 +2459,8 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
data->file_created = true;
else if (o_res->cinfo.before != o_res->cinfo.after)
data->file_created = true;
- if (data->file_created || dir->i_version != o_res->cinfo.after)
+ if (data->file_created ||
+ inode_peek_iversion_raw(dir) != o_res->cinfo.after)
update_changeattr(dir, &o_res->cinfo,
o_res->f_attr->time_start);
}
@@ -3148,6 +3154,11 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
struct nfs4_state *state = calldata->state;
struct nfs_server *server = NFS_SERVER(calldata->inode);
nfs4_stateid *res_stateid = NULL;
+ struct nfs4_exception exception = {
+ .state = state,
+ .inode = calldata->inode,
+ .stateid = &calldata->arg.stateid,
+ };
dprintk("%s: begin!\n", __func__);
if (!nfs4_sequence_done(task, &calldata->res.seq_res))
@@ -3213,7 +3224,9 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
case -NFS4ERR_BAD_STATEID:
break;
default:
- if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN)
+ task->tk_status = nfs4_async_handle_exception(task,
+ server, task->tk_status, &exception);
+ if (exception.retry)
goto out_restart;
}
nfs_clear_open_stateid(state, &calldata->arg.stateid,
@@ -5757,6 +5770,10 @@ struct nfs4_delegreturndata {
static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
{
struct nfs4_delegreturndata *data = calldata;
+ struct nfs4_exception exception = {
+ .inode = data->inode,
+ .stateid = &data->stateid,
+ };
if (!nfs4_sequence_done(task, &data->res.seq_res))
return;
@@ -5818,10 +5835,11 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
}
/* Fallthrough */
default:
- if (nfs4_async_handle_error(task, data->res.server,
- NULL, NULL) == -EAGAIN) {
+ task->tk_status = nfs4_async_handle_exception(task,
+ data->res.server, task->tk_status,
+ &exception);
+ if (exception.retry)
goto out_restart;
- }
}
data->rpc_status = task->tk_status;
return;
@@ -6059,6 +6077,10 @@ static void nfs4_locku_release_calldata(void *data)
static void nfs4_locku_done(struct rpc_task *task, void *data)
{
struct nfs4_unlockdata *calldata = data;
+ struct nfs4_exception exception = {
+ .inode = calldata->lsp->ls_state->inode,
+ .stateid = &calldata->arg.stateid,
+ };
if (!nfs4_sequence_done(task, &calldata->res.seq_res))
return;
@@ -6082,8 +6104,10 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
rpc_restart_call_prepare(task);
break;
default:
- if (nfs4_async_handle_error(task, calldata->server,
- NULL, NULL) == -EAGAIN)
+ task->tk_status = nfs4_async_handle_exception(task,
+ calldata->server, task->tk_status,
+ &exception);
+ if (exception.retry)
rpc_restart_call_prepare(task);
}
nfs_release_seqid(calldata->arg.seqid);
@@ -6739,7 +6763,7 @@ int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state,
if (err != 0)
return err;
err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
- return nfs4_handle_delegation_recall_error(server, state, stateid, err);
+ return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err);
}
struct nfs_release_lockowner_data {
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index e4f4a09ed9f4..91a4d4eeb235 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1482,6 +1482,7 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_
struct inode *inode = state->inode;
struct nfs_inode *nfsi = NFS_I(inode);
struct file_lock *fl;
+ struct nfs4_lock_state *lsp;
int status = 0;
struct file_lock_context *flctx = inode->i_flctx;
struct list_head *list;
@@ -1522,7 +1523,9 @@ restart:
case -NFS4ERR_DENIED:
case -NFS4ERR_RECLAIM_BAD:
case -NFS4ERR_RECLAIM_CONFLICT:
- /* kill_proc(fl->fl_pid, SIGLOST, 1); */
+ lsp = fl->fl_u.nfs4_fl.owner;
+ if (lsp)
+ set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
status = 0;
}
spin_lock(&flctx->flc_lock);
diff --git a/fs/nfs/nfs4sysctl.c b/fs/nfs/nfs4sysctl.c
index 0d91d84e5822..c394e4447100 100644
--- a/fs/nfs/nfs4sysctl.c
+++ b/fs/nfs/nfs4sysctl.c
@@ -32,7 +32,7 @@ static struct ctl_table nfs4_cb_sysctls[] = {
.data = &nfs_idmap_cache_timeout,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
+ .proc_handler = proc_dointvec,
},
{ }
};
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 77c6729e57f0..65c9c4175145 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -7678,6 +7678,22 @@ nfs4_stat_to_errno(int stat)
.p_name = #proc, \
}
+#if defined(CONFIG_NFS_V4_1)
+#define PROC41(proc, argtype, restype) \
+ PROC(proc, argtype, restype)
+#else
+#define PROC41(proc, argtype, restype) \
+ STUB(proc)
+#endif
+
+#if defined(CONFIG_NFS_V4_2)
+#define PROC42(proc, argtype, restype) \
+ PROC(proc, argtype, restype)
+#else
+#define PROC42(proc, argtype, restype) \
+ STUB(proc)
+#endif
+
const struct rpc_procinfo nfs4_procedures[] = {
PROC(READ, enc_read, dec_read),
PROC(WRITE, enc_write, dec_write),
@@ -7698,7 +7714,6 @@ const struct rpc_procinfo nfs4_procedures[] = {
PROC(ACCESS, enc_access, dec_access),
PROC(GETATTR, enc_getattr, dec_getattr),
PROC(LOOKUP, enc_lookup, dec_lookup),
- PROC(LOOKUPP, enc_lookupp, dec_lookupp),
PROC(LOOKUP_ROOT, enc_lookup_root, dec_lookup_root),
PROC(REMOVE, enc_remove, dec_remove),
PROC(RENAME, enc_rename, dec_rename),
@@ -7717,33 +7732,30 @@ const struct rpc_procinfo nfs4_procedures[] = {
PROC(RELEASE_LOCKOWNER, enc_release_lockowner, dec_release_lockowner),
PROC(SECINFO, enc_secinfo, dec_secinfo),
PROC(FSID_PRESENT, enc_fsid_present, dec_fsid_present),
-#if defined(CONFIG_NFS_V4_1)
- PROC(EXCHANGE_ID, enc_exchange_id, dec_exchange_id),
- PROC(CREATE_SESSION, enc_create_session, dec_create_session),
- PROC(DESTROY_SESSION, enc_destroy_session, dec_destroy_session),
- PROC(SEQUENCE, enc_sequence, dec_sequence),
- PROC(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time),
- PROC(RECLAIM_COMPLETE, enc_reclaim_complete, dec_reclaim_complete),
- PROC(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo),
- PROC(LAYOUTGET, enc_layoutget, dec_layoutget),
- PROC(LAYOUTCOMMIT, enc_layoutcommit, dec_layoutcommit),
- PROC(LAYOUTRETURN, enc_layoutreturn, dec_layoutreturn),
- PROC(SECINFO_NO_NAME, enc_secinfo_no_name, dec_secinfo_no_name),
- PROC(TEST_STATEID, enc_test_stateid, dec_test_stateid),
- PROC(FREE_STATEID, enc_free_stateid, dec_free_stateid),
+ PROC41(EXCHANGE_ID, enc_exchange_id, dec_exchange_id),
+ PROC41(CREATE_SESSION, enc_create_session, dec_create_session),
+ PROC41(DESTROY_SESSION, enc_destroy_session, dec_destroy_session),
+ PROC41(SEQUENCE, enc_sequence, dec_sequence),
+ PROC41(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time),
+ PROC41(RECLAIM_COMPLETE,enc_reclaim_complete, dec_reclaim_complete),
+ PROC41(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo),
+ PROC41(LAYOUTGET, enc_layoutget, dec_layoutget),
+ PROC41(LAYOUTCOMMIT, enc_layoutcommit, dec_layoutcommit),
+ PROC41(LAYOUTRETURN, enc_layoutreturn, dec_layoutreturn),
+ PROC41(SECINFO_NO_NAME, enc_secinfo_no_name, dec_secinfo_no_name),
+ PROC41(TEST_STATEID, enc_test_stateid, dec_test_stateid),
+ PROC41(FREE_STATEID, enc_free_stateid, dec_free_stateid),
STUB(GETDEVICELIST),
- PROC(BIND_CONN_TO_SESSION,
+ PROC41(BIND_CONN_TO_SESSION,
enc_bind_conn_to_session, dec_bind_conn_to_session),
- PROC(DESTROY_CLIENTID, enc_destroy_clientid, dec_destroy_clientid),
-#endif /* CONFIG_NFS_V4_1 */
-#ifdef CONFIG_NFS_V4_2
- PROC(SEEK, enc_seek, dec_seek),
- PROC(ALLOCATE, enc_allocate, dec_allocate),
- PROC(DEALLOCATE, enc_deallocate, dec_deallocate),
- PROC(LAYOUTSTATS, enc_layoutstats, dec_layoutstats),
- PROC(CLONE, enc_clone, dec_clone),
- PROC(COPY, enc_copy, dec_copy),
-#endif /* CONFIG_NFS_V4_2 */
+ PROC41(DESTROY_CLIENTID,enc_destroy_clientid, dec_destroy_clientid),
+ PROC42(SEEK, enc_seek, dec_seek),
+ PROC42(ALLOCATE, enc_allocate, dec_allocate),
+ PROC42(DEALLOCATE, enc_deallocate, dec_deallocate),
+ PROC42(LAYOUTSTATS, enc_layoutstats, dec_layoutstats),
+ PROC42(CLONE, enc_clone, dec_clone),
+ PROC42(COPY, enc_copy, dec_copy),
+ PROC(LOOKUPP, enc_lookupp, dec_lookupp),
};
static unsigned int nfs_version4_counts[ARRAY_SIZE(nfs4_procedures)];
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index 093290c42d7c..bd60f8d1e181 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -9,6 +9,7 @@
#define _TRACE_NFS_H
#include <linux/tracepoint.h>
+#include <linux/iversion.h>
#define nfs_show_file_type(ftype) \
__print_symbolic(ftype, \
@@ -61,7 +62,7 @@ DECLARE_EVENT_CLASS(nfs_inode_event,
__entry->dev = inode->i_sb->s_dev;
__entry->fileid = nfsi->fileid;
__entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
- __entry->version = inode->i_version;
+ __entry->version = inode_peek_iversion_raw(inode);
),
TP_printk(
@@ -100,7 +101,7 @@ DECLARE_EVENT_CLASS(nfs_inode_event_done,
__entry->fileid = nfsi->fileid;
__entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
__entry->type = nfs_umode_to_dtype(inode->i_mode);
- __entry->version = inode->i_version;
+ __entry->version = inode_peek_iversion_raw(inode);
__entry->size = i_size_read(inode);
__entry->nfsi_flags = nfsi->flags;
__entry->cache_validity = nfsi->cache_validity;
@@ -796,15 +797,15 @@ TRACE_EVENT(nfs_readpage_done,
)
);
-/*
- * XXX: I tried using NFS_UNSTABLE and friends in this table, but they
- * all evaluate to 0 for some reason, even if I include linux/nfs.h.
- */
+TRACE_DEFINE_ENUM(NFS_UNSTABLE);
+TRACE_DEFINE_ENUM(NFS_DATA_SYNC);
+TRACE_DEFINE_ENUM(NFS_FILE_SYNC);
+
#define nfs_show_stable(stable) \
__print_symbolic(stable, \
- { 0, " (UNSTABLE)" }, \
- { 1, " (DATA_SYNC)" }, \
- { 2, " (FILE_SYNC)" })
+ { NFS_UNSTABLE, "UNSTABLE" }, \
+ { NFS_DATA_SYNC, "DATA_SYNC" }, \
+ { NFS_FILE_SYNC, "FILE_SYNC" })
TRACE_EVENT(nfs_initiate_write,
TP_PROTO(
@@ -837,12 +838,12 @@ TRACE_EVENT(nfs_initiate_write,
TP_printk(
"fileid=%02x:%02x:%llu fhandle=0x%08x "
- "offset=%lld count=%lu stable=%d%s",
+ "offset=%lld count=%lu stable=%s",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
__entry->fhandle,
__entry->offset, __entry->count,
- __entry->stable, nfs_show_stable(__entry->stable)
+ nfs_show_stable(__entry->stable)
)
);
@@ -881,13 +882,13 @@ TRACE_EVENT(nfs_writeback_done,
TP_printk(
"fileid=%02x:%02x:%llu fhandle=0x%08x "
- "offset=%lld status=%d stable=%d%s "
+ "offset=%lld status=%d stable=%s "
"verifier 0x%016llx",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
__entry->fhandle,
__entry->offset, __entry->status,
- __entry->stable, nfs_show_stable(__entry->stable),
+ nfs_show_stable(__entry->stable),
__entry->verifier
)
);
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index d0543e19098a..18a7626ac638 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -537,7 +537,7 @@ EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
* @cinfo: Commit information for the call (writes only)
*/
static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
- unsigned int count, unsigned int offset,
+ unsigned int count,
int how, struct nfs_commit_info *cinfo)
{
struct nfs_page *req = hdr->req;
@@ -546,10 +546,10 @@ static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
* NB: take care not to mess about with hdr->commit et al. */
hdr->args.fh = NFS_FH(hdr->inode);
- hdr->args.offset = req_offset(req) + offset;
+ hdr->args.offset = req_offset(req);
/* pnfs_set_layoutcommit needs this */
hdr->mds_offset = hdr->args.offset;
- hdr->args.pgbase = req->wb_pgbase + offset;
+ hdr->args.pgbase = req->wb_pgbase;
hdr->args.pages = hdr->page_array.pagevec;
hdr->args.count = count;
hdr->args.context = get_nfs_open_context(req->wb_context);
@@ -789,7 +789,7 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
desc->pg_ioflags &= ~FLUSH_COND_STABLE;
/* Set up the argument struct */
- nfs_pgio_rpcsetup(hdr, mirror->pg_count, 0, desc->pg_ioflags, &cinfo);
+ nfs_pgio_rpcsetup(hdr, mirror->pg_count, desc->pg_ioflags, &cinfo);
desc->pg_rpc_callops = &nfs_pgio_common_ops;
return 0;
}
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index d602fe9e1ac8..c13e826614b5 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -655,7 +655,7 @@ pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
return 0;
list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
if (pnfs_match_lseg_recall(lseg, recall_range, seq)) {
- dprintk("%s: freeing lseg %p iomode %d seq %u"
+ dprintk("%s: freeing lseg %p iomode %d seq %u "
"offset %llu length %llu\n", __func__,
lseg, lseg->pls_range.iomode, lseg->pls_seq,
lseg->pls_range.offset, lseg->pls_range.length);
@@ -2255,7 +2255,7 @@ pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
nfs_pageio_reset_write_mds(desc);
mirror->pg_recoalesce = 1;
}
- hdr->release(hdr);
+ hdr->completion_ops->completion(hdr);
}
static enum pnfs_try_status
@@ -2378,7 +2378,7 @@ pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
nfs_pageio_reset_read_mds(desc);
mirror->pg_recoalesce = 1;
}
- hdr->release(hdr);
+ hdr->completion_ops->completion(hdr);
}
/*
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 8d507c361d98..daf6cbf5c15f 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -40,6 +40,7 @@ enum {
NFS_LSEG_ROC, /* roc bit received from server */
NFS_LSEG_LAYOUTCOMMIT, /* layoutcommit bit set for layoutcommit */
NFS_LSEG_LAYOUTRETURN, /* layoutreturn bit set for layoutreturn */
+ NFS_LSEG_UNAVAILABLE, /* unavailable bit set for temporary problem */
};
/* Individual ip address */
@@ -86,6 +87,7 @@ enum pnfs_try_status {
*/
#define NFS4_DEF_DS_TIMEO 600 /* in tenths of a second */
#define NFS4_DEF_DS_RETRANS 5
+#define PNFS_DEVICE_RETRY_TIMEOUT (120*HZ)
/* error codes for internal use */
#define NFS4ERR_RESET_TO_MDS 12001
@@ -524,8 +526,10 @@ static inline int pnfs_return_layout(struct inode *ino)
struct nfs_inode *nfsi = NFS_I(ino);
struct nfs_server *nfss = NFS_SERVER(ino);
- if (pnfs_enabled_sb(nfss) && nfsi->layout)
+ if (pnfs_enabled_sb(nfss) && nfsi->layout) {
+ set_bit(NFS_LAYOUT_RETURN_REQUESTED, &nfsi->layout->plh_flags);
return _pnfs_return_layout(ino);
+ }
return 0;
}
diff --git a/fs/nfs/pnfs_dev.c b/fs/nfs/pnfs_dev.c
index 2961fcd7a2df..e8a07b3f9aaa 100644
--- a/fs/nfs/pnfs_dev.c
+++ b/fs/nfs/pnfs_dev.c
@@ -43,7 +43,6 @@
#define NFS4_DEVICE_ID_HASH_SIZE (1 << NFS4_DEVICE_ID_HASH_BITS)
#define NFS4_DEVICE_ID_HASH_MASK (NFS4_DEVICE_ID_HASH_SIZE - 1)
-#define PNFS_DEVICE_RETRY_TIMEOUT (120*HZ)
static struct hlist_head nfs4_deviceid_cache[NFS4_DEVICE_ID_HASH_SIZE];
static DEFINE_SPINLOCK(nfs4_deviceid_lock);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 4a379d7918f2..7428a669d7a7 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -23,6 +23,7 @@
#include <linux/export.h>
#include <linux/freezer.h>
#include <linux/wait.h>
+#include <linux/iversion.h>
#include <linux/uaccess.h>
@@ -753,11 +754,8 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
*/
spin_lock(&mapping->private_lock);
if (!nfs_have_writebacks(inode) &&
- NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) {
- spin_lock(&inode->i_lock);
- inode->i_version++;
- spin_unlock(&inode->i_lock);
- }
+ NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
+ inode_inc_iversion_raw(inode);
if (likely(!PageSwapCache(req->wb_page))) {
set_bit(PG_MAPPED, &req->wb_flags);
SetPagePrivate(req->wb_page);
@@ -1837,6 +1835,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags);
next:
nfs_unlock_and_release_request(req);
+ /* Latency breaker */
+ cond_resched();
}
nfss = NFS_SERVER(data->inode);
if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
index 43f31cf49bae..b8444189223b 100644
--- a/fs/nfsd/nfsfh.h
+++ b/fs/nfsd/nfsfh.h
@@ -11,6 +11,7 @@
#include <linux/crc32.h>
#include <linux/sunrpc/svc.h>
#include <uapi/linux/nfsd/nfsfh.h>
+#include <linux/iversion.h>
static inline __u32 ino_t_to_u32(ino_t ino)
{
@@ -259,7 +260,7 @@ static inline u64 nfsd4_change_attribute(struct inode *inode)
chattr = inode->i_ctime.tv_sec;
chattr <<= 30;
chattr += inode->i_ctime.tv_nsec;
- chattr += inode->i_version;
+ chattr += inode_query_iversion(inode);
return chattr;
}
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index d0d4bc4c4b70..ef08d64c84b8 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -239,10 +239,10 @@ out_close_fd:
}
/* intofiy userspace file descriptor functions */
-static unsigned int fanotify_poll(struct file *file, poll_table *wait)
+static __poll_t fanotify_poll(struct file *file, poll_table *wait)
{
struct fsnotify_group *group = file->private_data;
- int ret = 0;
+ __poll_t ret = 0;
poll_wait(file, &group->notification_waitq, wait);
spin_lock(&group->notification_lock);
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index d3c20e0bb046..5c29bf16814f 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -107,10 +107,10 @@ static inline u32 inotify_mask_to_arg(__u32 mask)
}
/* intofiy userspace file descriptor functions */
-static unsigned int inotify_poll(struct file *file, poll_table *wait)
+static __poll_t inotify_poll(struct file *file, poll_table *wait)
{
struct fsnotify_group *group = file->private_data;
- int ret = 0;
+ __poll_t ret = 0;
poll_wait(file, &group->notification_waitq, wait);
spin_lock(&group->notification_lock);
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 7c410f879412..1c1ee489284b 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -560,13 +560,6 @@ static int ntfs_read_locked_inode(struct inode *vi)
ntfs_debug("Entering for i_ino 0x%lx.", vi->i_ino);
/* Setup the generic vfs inode parts now. */
-
- /*
- * This is for checking whether an inode has changed w.r.t. a file so
- * that the file can be updated if necessary (compare with f_version).
- */
- vi->i_version = 1;
-
vi->i_uid = vol->uid;
vi->i_gid = vol->gid;
vi->i_mode = 0;
@@ -1240,7 +1233,6 @@ static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi)
base_ni = NTFS_I(base_vi);
/* Just mirror the values from the base inode. */
- vi->i_version = base_vi->i_version;
vi->i_uid = base_vi->i_uid;
vi->i_gid = base_vi->i_gid;
set_nlink(vi, base_vi->i_nlink);
@@ -1507,7 +1499,6 @@ static int ntfs_read_locked_index_inode(struct inode *base_vi, struct inode *vi)
ni = NTFS_I(vi);
base_ni = NTFS_I(base_vi);
/* Just mirror the values from the base inode. */
- vi->i_version = base_vi->i_version;
vi->i_uid = base_vi->i_uid;
vi->i_gid = base_vi->i_gid;
set_nlink(vi, base_vi->i_nlink);
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index ee8392aee9f6..2831f495a674 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -2641,12 +2641,6 @@ mft_rec_already_initialized:
goto undo_mftbmp_alloc;
}
vi->i_ino = bit;
- /*
- * This is for checking whether an inode has changed w.r.t. a
- * file so that the file can be updated if necessary (compare
- * with f_version).
- */
- vi->i_version = 1;
/* The owner and group come from the ntfs volume. */
vi->i_uid = vol->uid;
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index bebe59feca58..eac5140aac47 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -918,7 +918,8 @@ static int o2net_recv_tcp_msg(struct socket *sock, void *data, size_t len)
{
struct kvec vec = { .iov_len = len, .iov_base = data, };
struct msghdr msg = { .msg_flags = MSG_DONTWAIT, };
- return kernel_recvmsg(sock, &msg, &vec, 1, len, msg.msg_flags);
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, len);
+ return sock_recvmsg(sock, &msg, MSG_DONTWAIT);
}
static int o2net_send_tcp_msg(struct socket *sock, struct kvec *vec,
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index febe6312ceff..32f9c72dff17 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -42,6 +42,7 @@
#include <linux/highmem.h>
#include <linux/quotaops.h>
#include <linux/sort.h>
+#include <linux/iversion.h>
#include <cluster/masklog.h>
@@ -1174,7 +1175,7 @@ static int __ocfs2_delete_entry(handle_t *handle, struct inode *dir,
le16_add_cpu(&pde->rec_len,
le16_to_cpu(de->rec_len));
de->inode = 0;
- dir->i_version++;
+ inode_inc_iversion(dir);
ocfs2_journal_dirty(handle, bh);
goto bail;
}
@@ -1729,7 +1730,7 @@ int __ocfs2_add_entry(handle_t *handle,
if (ocfs2_dir_indexed(dir))
ocfs2_recalc_free_list(dir, handle, lookup);
- dir->i_version++;
+ inode_inc_iversion(dir);
ocfs2_journal_dirty(handle, insert_bh);
retval = 0;
goto bail;
@@ -1775,7 +1776,7 @@ static int ocfs2_dir_foreach_blk_id(struct inode *inode,
* readdir(2), then we might be pointing to an invalid
* dirent right now. Scan from the start of the block
* to make sure. */
- if (*f_version != inode->i_version) {
+ if (inode_cmp_iversion(inode, *f_version)) {
for (i = 0; i < i_size_read(inode) && i < offset; ) {
de = (struct ocfs2_dir_entry *)
(data->id_data + i);
@@ -1791,7 +1792,7 @@ static int ocfs2_dir_foreach_blk_id(struct inode *inode,
i += le16_to_cpu(de->rec_len);
}
ctx->pos = offset = i;
- *f_version = inode->i_version;
+ *f_version = inode_query_iversion(inode);
}
de = (struct ocfs2_dir_entry *) (data->id_data + ctx->pos);
@@ -1869,7 +1870,7 @@ static int ocfs2_dir_foreach_blk_el(struct inode *inode,
* readdir(2), then we might be pointing to an invalid
* dirent right now. Scan from the start of the block
* to make sure. */
- if (*f_version != inode->i_version) {
+ if (inode_cmp_iversion(inode, *f_version)) {
for (i = 0; i < sb->s_blocksize && i < offset; ) {
de = (struct ocfs2_dir_entry *) (bh->b_data + i);
/* It's too expensive to do a full
@@ -1886,7 +1887,7 @@ static int ocfs2_dir_foreach_blk_el(struct inode *inode,
offset = i;
ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1))
| offset;
- *f_version = inode->i_version;
+ *f_version = inode_query_iversion(inode);
}
while (ctx->pos < i_size_read(inode)
@@ -1940,7 +1941,7 @@ static int ocfs2_dir_foreach_blk(struct inode *inode, u64 *f_version,
*/
int ocfs2_dir_foreach(struct inode *inode, struct dir_context *ctx)
{
- u64 version = inode->i_version;
+ u64 version = inode_query_iversion(inode);
ocfs2_dir_foreach_blk(inode, &version, ctx, true);
return 0;
}
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 9c7c18c0e129..385fcefa8bc5 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -220,9 +220,9 @@ static int dlmfs_file_setattr(struct dentry *dentry, struct iattr *attr)
return 0;
}
-static unsigned int dlmfs_file_poll(struct file *file, poll_table *wait)
+static __poll_t dlmfs_file_poll(struct file *file, poll_table *wait)
{
- int event = 0;
+ __poll_t event = 0;
struct inode *inode = file_inode(file);
struct dlmfs_inode_private *ip = DLMFS_I(inode);
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 1a1e0078ab38..d51b80edd972 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -28,6 +28,7 @@
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
+#include <linux/iversion.h>
#include <asm/byteorder.h>
@@ -302,7 +303,7 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
OCFS2_I(inode)->ip_attr = le32_to_cpu(fe->i_attr);
OCFS2_I(inode)->ip_dyn_features = le16_to_cpu(fe->i_dyn_features);
- inode->i_version = 1;
+ inode_set_iversion(inode, 1);
inode->i_generation = le32_to_cpu(fe->i_generation);
inode->i_rdev = huge_decode_dev(le64_to_cpu(fe->id1.dev1.i_rdev));
inode->i_mode = le16_to_cpu(fe->i_mode);
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 3b0a10d9b36f..c801eddc4bf3 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -41,6 +41,7 @@
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/quotaops.h>
+#include <linux/iversion.h>
#include <cluster/masklog.h>
@@ -1520,7 +1521,7 @@ static int ocfs2_rename(struct inode *old_dir,
mlog_errno(status);
goto bail;
}
- new_dir->i_version++;
+ inode_inc_iversion(new_dir);
if (S_ISDIR(new_inode->i_mode))
ocfs2_set_links_count(newfe, 0);
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index b39d14cbfa34..7a922190a8c7 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -12,6 +12,7 @@
#include <linux/writeback.h>
#include <linux/workqueue.h>
#include <linux/llist.h>
+#include <linux/iversion.h>
#include <cluster/masklog.h>
@@ -289,7 +290,7 @@ out:
mlog_errno(err);
return err;
}
- gqinode->i_version++;
+ inode_inc_iversion(gqinode);
ocfs2_mark_inode_dirty(handle, gqinode, oinfo->dqi_gqi_bh);
return len;
}
diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c
index c584ad8d023c..f073cd9e6687 100644
--- a/fs/orangefs/devorangefs-req.c
+++ b/fs/orangefs/devorangefs-req.c
@@ -815,15 +815,15 @@ void orangefs_dev_cleanup(void)
ORANGEFS_REQDEVICE_NAME);
}
-static unsigned int orangefs_devreq_poll(struct file *file,
+static __poll_t orangefs_devreq_poll(struct file *file,
struct poll_table_struct *poll_table)
{
- int poll_revent_mask = 0;
+ __poll_t poll_revent_mask = 0;
poll_wait(file, &orangefs_request_list_waitq, poll_table);
if (!list_empty(&orangefs_request_list))
- poll_revent_mask |= POLL_IN;
+ poll_revent_mask |= POLLIN;
return poll_revent_mask;
}
diff --git a/fs/pipe.c b/fs/pipe.c
index 6d98566201ef..a449ca0ec0c6 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -515,10 +515,10 @@ static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
/* No kernel lock held - fine */
-static unsigned int
+static __poll_t
pipe_poll(struct file *filp, poll_table *wait)
{
- unsigned int mask;
+ __poll_t mask;
struct pipe_inode_info *pipe = filp->private_data;
int nrbufs;
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index eebf5f6cf6d5..2fd0fde16fe1 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -43,7 +43,7 @@ struct posix_acl *get_cached_acl(struct inode *inode, int type)
rcu_read_lock();
acl = rcu_dereference(*p);
if (!acl || is_uncached_acl(acl) ||
- atomic_inc_not_zero(&acl->a_refcount))
+ refcount_inc_not_zero(&acl->a_refcount))
break;
rcu_read_unlock();
cpu_relax();
@@ -164,7 +164,7 @@ EXPORT_SYMBOL(get_acl);
void
posix_acl_init(struct posix_acl *acl, int count)
{
- atomic_set(&acl->a_refcount, 1);
+ refcount_set(&acl->a_refcount, 1);
acl->a_count = count;
}
EXPORT_SYMBOL(posix_acl_init);
@@ -197,7 +197,7 @@ posix_acl_clone(const struct posix_acl *acl, gfp_t flags)
sizeof(struct posix_acl_entry);
clone = kmemdup(acl, size, flags);
if (clone)
- atomic_set(&clone->a_refcount, 1);
+ refcount_set(&clone->a_refcount, 1);
}
return clone;
}
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index dd0f82622427..8dacaabb9f37 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -234,11 +234,11 @@ static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t
return rv;
}
-static unsigned int proc_reg_poll(struct file *file, struct poll_table_struct *pts)
+static __poll_t proc_reg_poll(struct file *file, struct poll_table_struct *pts)
{
struct proc_dir_entry *pde = PDE(file_inode(file));
- unsigned int rv = DEFAULT_POLLMASK;
- unsigned int (*poll)(struct file *, struct poll_table_struct *);
+ __poll_t rv = DEFAULT_POLLMASK;
+ __poll_t (*poll)(struct file *, struct poll_table_struct *);
if (use_pde(pde)) {
poll = pde->proc_fops->poll;
if (poll)
diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
index e0f8774acd65..f0bfb45c3f9f 100644
--- a/fs/proc/kmsg.c
+++ b/fs/proc/kmsg.c
@@ -40,7 +40,7 @@ static ssize_t kmsg_read(struct file *file, char __user *buf,
return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_PROC);
}
-static unsigned int kmsg_poll(struct file *file, poll_table *wait)
+static __poll_t kmsg_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &log_wait, wait);
if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_PROC))
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index c5cbbdff3c3d..63325377621a 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -630,12 +630,12 @@ static int proc_sys_open(struct inode *inode, struct file *filp)
return 0;
}
-static unsigned int proc_sys_poll(struct file *filp, poll_table *wait)
+static __poll_t proc_sys_poll(struct file *filp, poll_table *wait)
{
struct inode *inode = file_inode(filp);
struct ctl_table_header *head = grab_header(inode);
struct ctl_table *table = PROC_I(inode)->sysctl_entry;
- unsigned int ret = DEFAULT_POLLMASK;
+ __poll_t ret = DEFAULT_POLLMASK;
unsigned long event;
/* sysctl was unregistered */
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
index b786840facd9..c8528d587e09 100644
--- a/fs/proc_namespace.c
+++ b/fs/proc_namespace.c
@@ -18,12 +18,12 @@
#include "pnode.h"
#include "internal.h"
-static unsigned mounts_poll(struct file *file, poll_table *wait)
+static __poll_t mounts_poll(struct file *file, poll_table *wait)
{
struct seq_file *m = file->private_data;
struct proc_mounts *p = m->private;
struct mnt_namespace *ns = p->ns;
- unsigned res = POLLIN | POLLRDNORM;
+ __poll_t res = POLLIN | POLLRDNORM;
int event;
poll_wait(file, &p->ns->poll, wait);
diff --git a/fs/select.c b/fs/select.c
index 6de493bb42a4..ec14171dd78a 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -212,7 +212,7 @@ static int pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key
struct poll_table_entry *entry;
entry = container_of(wait, struct poll_table_entry, wait);
- if (key && !((unsigned long)key & entry->key))
+ if (key && !(key_to_poll(key) & entry->key))
return 0;
return __pollwake(wait, mode, sync, key);
}
@@ -438,7 +438,7 @@ get_max:
static inline void wait_key_set(poll_table *wait, unsigned long in,
unsigned long out, unsigned long bit,
- unsigned int ll_flag)
+ __poll_t ll_flag)
{
wait->_key = POLLEX_SET | ll_flag;
if (in & bit)
@@ -454,7 +454,7 @@ static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
poll_table *wait;
int retval, i, timed_out = 0;
u64 slack = 0;
- unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
+ __poll_t busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
unsigned long busy_start = 0;
rcu_read_lock();
@@ -484,8 +484,9 @@ static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
- unsigned long in, out, ex, all_bits, bit = 1, mask, j;
+ unsigned long in, out, ex, all_bits, bit = 1, j;
unsigned long res_in = 0, res_out = 0, res_ex = 0;
+ __poll_t mask;
in = *inp++; out = *outp++; ex = *exp++;
all_bits = in | out | ex;
@@ -802,11 +803,11 @@ struct poll_list {
* pwait poll_table will be used by the fd-provided poll handler for waiting,
* if pwait->_qproc is non-NULL.
*/
-static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait,
+static inline __poll_t do_pollfd(struct pollfd *pollfd, poll_table *pwait,
bool *can_busy_poll,
- unsigned int busy_flag)
+ __poll_t busy_flag)
{
- unsigned int mask;
+ __poll_t mask;
int fd;
mask = 0;
@@ -815,20 +816,24 @@ static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait,
struct fd f = fdget(fd);
mask = POLLNVAL;
if (f.file) {
+ /* userland u16 ->events contains POLL... bitmap */
+ __poll_t filter = demangle_poll(pollfd->events) |
+ POLLERR | POLLHUP;
mask = DEFAULT_POLLMASK;
if (f.file->f_op->poll) {
- pwait->_key = pollfd->events|POLLERR|POLLHUP;
+ pwait->_key = filter;
pwait->_key |= busy_flag;
mask = f.file->f_op->poll(f.file, pwait);
if (mask & busy_flag)
*can_busy_poll = true;
}
/* Mask out unneeded events. */
- mask &= pollfd->events | POLLERR | POLLHUP;
+ mask &= filter;
fdput(f);
}
}
- pollfd->revents = mask;
+ /* ... and so does ->revents */
+ pollfd->revents = mangle_poll(mask);
return mask;
}
@@ -840,7 +845,7 @@ static int do_poll(struct poll_list *list, struct poll_wqueues *wait,
ktime_t expire, *to = NULL;
int timed_out = 0, count = 0;
u64 slack = 0;
- unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
+ __poll_t busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
unsigned long busy_start = 0;
/* Optimise the no-wait case */
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 5f1ff8756595..31e923bec99a 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -58,10 +58,10 @@ static int signalfd_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int signalfd_poll(struct file *file, poll_table *wait)
+static __poll_t signalfd_poll(struct file *file, poll_table *wait)
{
struct signalfd_ctx *ctx = file->private_data;
- unsigned int events = 0;
+ __poll_t events = 0;
poll_wait(file, &current->sighand->signalfd_wqh, wait);
diff --git a/fs/super.c b/fs/super.c
index 06bd25d90ba5..672538ca9831 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -225,7 +225,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
if (s->s_user_ns != &init_user_ns)
s->s_iflags |= SB_I_NODEV;
INIT_HLIST_NODE(&s->s_instances);
- INIT_HLIST_BL_HEAD(&s->s_anon);
+ INIT_HLIST_BL_HEAD(&s->s_roots);
mutex_init(&s->s_sync_lock);
INIT_LIST_HEAD(&s->s_inodes);
spin_lock_init(&s->s_inode_list_lock);
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 040612ec9598..0510717f3a53 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -227,10 +227,10 @@ static int timerfd_release(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int timerfd_poll(struct file *file, poll_table *wait)
+static __poll_t timerfd_poll(struct file *file, poll_table *wait)
{
struct timerfd_ctx *ctx = file->private_data;
- unsigned int events = 0;
+ __poll_t events = 0;
unsigned long flags;
poll_wait(file, &ctx->wqh, wait);
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 417fe0b29f23..a2ea4856e67b 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -220,20 +220,9 @@ static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry,
dbg_gen("'%pd' in dir ino %lu", dentry, dir->i_ino);
- if (ubifs_crypt_is_encrypted(dir)) {
- err = fscrypt_get_encryption_info(dir);
-
- /*
- * DCACHE_ENCRYPTED_WITH_KEY is set if the dentry is
- * created while the directory was encrypted and we
- * have access to the key.
- */
- if (fscrypt_has_encryption_key(dir))
- fscrypt_set_encrypted_dentry(dentry);
- fscrypt_set_d_op(dentry);
- if (err && err != -ENOKEY)
- return ERR_PTR(err);
- }
+ err = fscrypt_prepare_lookup(dir, dentry, flags);
+ if (err)
+ return ERR_PTR(err);
err = fscrypt_setup_filename(dir, &dentry->d_name, 1, &nm);
if (err)
@@ -743,9 +732,9 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
ubifs_assert(inode_is_locked(dir));
ubifs_assert(inode_is_locked(inode));
- if (ubifs_crypt_is_encrypted(dir) &&
- !fscrypt_has_permitted_context(dir, inode))
- return -EPERM;
+ err = fscrypt_prepare_link(old_dentry, dir, dentry);
+ if (err)
+ return err;
err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
if (err)
@@ -1353,12 +1342,6 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
if (unlink)
ubifs_assert(inode_is_locked(new_inode));
- if (old_dir != new_dir) {
- if (ubifs_crypt_is_encrypted(new_dir) &&
- !fscrypt_has_permitted_context(new_dir, old_inode))
- return -EPERM;
- }
-
if (unlink && is_dir) {
err = ubifs_check_dir_empty(new_inode);
if (err)
@@ -1573,13 +1556,6 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry,
ubifs_assert(fst_inode && snd_inode);
- if ((ubifs_crypt_is_encrypted(old_dir) ||
- ubifs_crypt_is_encrypted(new_dir)) &&
- (old_dir != new_dir) &&
- (!fscrypt_has_permitted_context(new_dir, fst_inode) ||
- !fscrypt_has_permitted_context(old_dir, snd_inode)))
- return -EPERM;
-
err = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &fst_nm);
if (err)
return err;
@@ -1624,12 +1600,19 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
+ int err;
+
if (flags & ~(RENAME_NOREPLACE | RENAME_WHITEOUT | RENAME_EXCHANGE))
return -EINVAL;
ubifs_assert(inode_is_locked(old_dir));
ubifs_assert(inode_is_locked(new_dir));
+ err = fscrypt_prepare_rename(old_dir, old_dentry, new_dir, new_dentry,
+ flags);
+ if (err)
+ return err;
+
if (flags & RENAME_EXCHANGE)
return ubifs_xrename(old_dir, old_dentry, new_dir, new_dentry);
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index dfe85069586e..9fe194a4fa9b 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1284,13 +1284,9 @@ int ubifs_setattr(struct dentry *dentry, struct iattr *attr)
if (err)
return err;
- if (ubifs_crypt_is_encrypted(inode) && (attr->ia_valid & ATTR_SIZE)) {
- err = fscrypt_get_encryption_info(inode);
- if (err)
- return err;
- if (!fscrypt_has_encryption_key(inode))
- return -ENOKEY;
- }
+ err = fscrypt_prepare_setattr(dentry, attr);
+ if (err)
+ return err;
if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size < inode->i_size)
/* Truncation to a smaller size */
@@ -1629,35 +1625,6 @@ static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
return 0;
}
-static int ubifs_file_open(struct inode *inode, struct file *filp)
-{
- int ret;
- struct dentry *dir;
- struct ubifs_info *c = inode->i_sb->s_fs_info;
-
- if (ubifs_crypt_is_encrypted(inode)) {
- ret = fscrypt_get_encryption_info(inode);
- if (ret)
- return -EACCES;
- if (!fscrypt_has_encryption_key(inode))
- return -ENOKEY;
- }
-
- dir = dget_parent(file_dentry(filp));
- if (ubifs_crypt_is_encrypted(d_inode(dir)) &&
- !fscrypt_has_permitted_context(d_inode(dir), inode)) {
- ubifs_err(c, "Inconsistent encryption contexts: %lu/%lu",
- (unsigned long) d_inode(dir)->i_ino,
- (unsigned long) inode->i_ino);
- dput(dir);
- ubifs_ro_mode(c, -EPERM);
- return -EPERM;
- }
- dput(dir);
-
- return 0;
-}
-
static const char *ubifs_get_link(struct dentry *dentry,
struct inode *inode,
struct delayed_call *done)
@@ -1746,7 +1713,7 @@ const struct file_operations ubifs_file_operations = {
.unlocked_ioctl = ubifs_ioctl,
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
- .open = ubifs_file_open,
+ .open = fscrypt_file_open,
#ifdef CONFIG_COMPAT
.compat_ioctl = ubifs_compat_ioctl,
#endif
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index 0a213dcba2a1..ba3d0e0f8615 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -1890,35 +1890,28 @@ static int search_dh_cookie(struct ubifs_info *c, const union ubifs_key *key,
union ubifs_key *dkey;
for (;;) {
- if (!err) {
- err = tnc_next(c, &znode, n);
- if (err)
- goto out;
- }
-
zbr = &znode->zbranch[*n];
dkey = &zbr->key;
if (key_inum(c, dkey) != key_inum(c, key) ||
key_type(c, dkey) != key_type(c, key)) {
- err = -ENOENT;
- goto out;
+ return -ENOENT;
}
err = tnc_read_hashed_node(c, zbr, dent);
if (err)
- goto out;
+ return err;
if (key_hash(c, key) == key_hash(c, dkey) &&
le32_to_cpu(dent->cookie) == cookie) {
*zn = znode;
- goto out;
+ return 0;
}
- }
-
-out:
- return err;
+ err = tnc_next(c, &znode, n);
+ if (err)
+ return err;
+ }
}
static int do_lookup_dh(struct ubifs_info *c, const union ubifs_key *key,
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 5ddc89d564fd..759f1a209dbb 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -381,8 +381,6 @@ ssize_t ubifs_xattr_get(struct inode *host, const char *name, void *buf,
if (buf) {
/* If @buf is %NULL we are supposed to return the length */
if (ui->data_len > size) {
- ubifs_err(c, "buffer size %zd, xattr len %d",
- size, ui->data_len);
err = -ERANGE;
goto out_iput;
}
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index 2edc1755b7c5..50dfce000864 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -20,6 +20,7 @@
#include <linux/time.h>
#include <linux/fs.h>
#include <linux/swap.h>
+#include <linux/iversion.h>
#include "ufs_fs.h"
#include "ufs.h"
@@ -47,7 +48,7 @@ static int ufs_commit_chunk(struct page *page, loff_t pos, unsigned len)
struct inode *dir = mapping->host;
int err = 0;
- dir->i_version++;
+ inode_inc_iversion(dir);
block_write_end(NULL, mapping, pos, len, len, page, NULL);
if (pos+len > dir->i_size) {
i_size_write(dir, pos+len);
@@ -428,7 +429,7 @@ ufs_readdir(struct file *file, struct dir_context *ctx)
unsigned long n = pos >> PAGE_SHIFT;
unsigned long npages = dir_pages(inode);
unsigned chunk_mask = ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1);
- int need_revalidate = file->f_version != inode->i_version;
+ bool need_revalidate = inode_cmp_iversion(inode, file->f_version);
unsigned flags = UFS_SB(sb)->s_flags;
UFSD("BEGIN\n");
@@ -455,8 +456,8 @@ ufs_readdir(struct file *file, struct dir_context *ctx)
offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask);
ctx->pos = (n<<PAGE_SHIFT) + offset;
}
- file->f_version = inode->i_version;
- need_revalidate = 0;
+ file->f_version = inode_query_iversion(inode);
+ need_revalidate = false;
}
de = (struct ufs_dir_entry *)(kaddr+offset);
limit = kaddr + ufs_last_byte(inode, n) - UFS_DIR_REC_LEN(1);
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index afb601c0dda0..c843ec858cf7 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -36,6 +36,7 @@
#include <linux/mm.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
+#include <linux/iversion.h>
#include "ufs_fs.h"
#include "ufs.h"
@@ -693,7 +694,7 @@ struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
if (err)
goto bad_inode;
- inode->i_version++;
+ inode_inc_iversion(inode);
ufsi->i_lastfrag =
(inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
ufsi->i_dir_start_lookup = 0;
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 4d497e9c6883..b6ba80e05bff 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -88,6 +88,7 @@
#include <linux/log2.h>
#include <linux/mount.h>
#include <linux/seq_file.h>
+#include <linux/iversion.h>
#include "ufs_fs.h"
#include "ufs.h"
@@ -1440,7 +1441,7 @@ static struct inode *ufs_alloc_inode(struct super_block *sb)
if (!ei)
return NULL;
- ei->vfs_inode.i_version = 1;
+ inode_set_iversion(&ei->vfs_inode, 1);
seqlock_init(&ei->meta_lock);
mutex_init(&ei->truncate_mutex);
return &ei->vfs_inode;
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 41a75f9f23fd..743eaa646898 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -937,10 +937,10 @@ static inline struct userfaultfd_wait_queue *find_userfault_evt(
return find_userfault_in(&ctx->event_wqh);
}
-static unsigned int userfaultfd_poll(struct file *file, poll_table *wait)
+static __poll_t userfaultfd_poll(struct file *file, poll_table *wait)
{
struct userfaultfd_ctx *ctx = file->private_data;
- unsigned int ret;
+ __poll_t ret;
poll_wait(file, &ctx->fd_wqh, wait);
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 83ed7715f856..c02781a4c091 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -167,7 +167,7 @@ xfs_alloc_lookup_ge(
* Lookup the first record less than or equal to [bno, len]
* in the btree given by cur.
*/
-static int /* error */
+int /* error */
xfs_alloc_lookup_le(
struct xfs_btree_cur *cur, /* btree cursor */
xfs_agblock_t bno, /* starting block of extent */
@@ -520,7 +520,7 @@ xfs_alloc_fixup_trees(
return 0;
}
-static bool
+static xfs_failaddr_t
xfs_agfl_verify(
struct xfs_buf *bp)
{
@@ -528,10 +528,19 @@ xfs_agfl_verify(
struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
int i;
+ /*
+ * There is no verification of non-crc AGFLs because mkfs does not
+ * initialise the AGFL to zero or NULL. Hence the only valid part of the
+ * AGFL is what the AGF says is active. We can't get to the AGF, so we
+ * can't verify just those entries are valid.
+ */
+ if (!xfs_sb_version_hascrc(&mp->m_sb))
+ return NULL;
+
if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid))
- return false;
+ return __this_address;
if (be32_to_cpu(agfl->agfl_magicnum) != XFS_AGFL_MAGIC)
- return false;
+ return __this_address;
/*
* during growfs operations, the perag is not fully initialised,
* so we can't use it for any useful checking. growfs ensures we can't
@@ -539,16 +548,17 @@ xfs_agfl_verify(
* so we can detect and avoid this problem.
*/
if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
- return false;
+ return __this_address;
for (i = 0; i < XFS_AGFL_SIZE(mp); i++) {
if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK &&
be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks)
- return false;
+ return __this_address;
}
- return xfs_log_check_lsn(mp,
- be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn));
+ if (!xfs_log_check_lsn(mp, be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn)))
+ return __this_address;
+ return NULL;
}
static void
@@ -556,6 +566,7 @@ xfs_agfl_read_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
+ xfs_failaddr_t fa;
/*
* There is no verification of non-crc AGFLs because mkfs does not
@@ -567,28 +578,29 @@ xfs_agfl_read_verify(
return;
if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF))
- xfs_buf_ioerror(bp, -EFSBADCRC);
- else if (!xfs_agfl_verify(bp))
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
-
- if (bp->b_error)
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_agfl_verify(bp);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ }
}
static void
xfs_agfl_write_verify(
struct xfs_buf *bp)
{
- struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
+ xfs_failaddr_t fa;
/* no verification of non-crc AGFLs */
if (!xfs_sb_version_hascrc(&mp->m_sb))
return;
- if (!xfs_agfl_verify(bp)) {
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ fa = xfs_agfl_verify(bp);
+ if (fa) {
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
@@ -602,6 +614,7 @@ const struct xfs_buf_ops xfs_agfl_buf_ops = {
.name = "xfs_agfl",
.verify_read = xfs_agfl_read_verify,
.verify_write = xfs_agfl_write_verify,
+ .verify_struct = xfs_agfl_verify,
};
/*
@@ -2397,19 +2410,19 @@ xfs_alloc_put_freelist(
return 0;
}
-static bool
+static xfs_failaddr_t
xfs_agf_verify(
- struct xfs_mount *mp,
- struct xfs_buf *bp)
- {
- struct xfs_agf *agf = XFS_BUF_TO_AGF(bp);
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(bp);
if (xfs_sb_version_hascrc(&mp->m_sb)) {
if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
- return false;
+ return __this_address;
if (!xfs_log_check_lsn(mp,
be64_to_cpu(XFS_BUF_TO_AGF(bp)->agf_lsn)))
- return false;
+ return __this_address;
}
if (!(agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
@@ -2418,18 +2431,18 @@ xfs_agf_verify(
be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) &&
be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) &&
be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp)))
- return false;
+ return __this_address;
if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 ||
be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS ||
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) > XFS_BTREE_MAXLEVELS)
- return false;
+ return __this_address;
if (xfs_sb_version_hasrmapbt(&mp->m_sb) &&
(be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) < 1 ||
be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS))
- return false;
+ return __this_address;
/*
* during growfs operations, the perag is not fully initialised,
@@ -2438,18 +2451,18 @@ xfs_agf_verify(
* so we can detect and avoid this problem.
*/
if (bp->b_pag && be32_to_cpu(agf->agf_seqno) != bp->b_pag->pag_agno)
- return false;
+ return __this_address;
if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length))
- return false;
+ return __this_address;
if (xfs_sb_version_hasreflink(&mp->m_sb) &&
(be32_to_cpu(agf->agf_refcount_level) < 1 ||
be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS))
- return false;
+ return __this_address;
- return true;;
+ return NULL;
}
@@ -2458,28 +2471,29 @@ xfs_agf_read_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
+ xfs_failaddr_t fa;
if (xfs_sb_version_hascrc(&mp->m_sb) &&
!xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
- xfs_buf_ioerror(bp, -EFSBADCRC);
- else if (XFS_TEST_ERROR(!xfs_agf_verify(mp, bp), mp,
- XFS_ERRTAG_ALLOC_READ_AGF))
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
-
- if (bp->b_error)
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_agf_verify(bp);
+ if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_ALLOC_READ_AGF))
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ }
}
static void
xfs_agf_write_verify(
struct xfs_buf *bp)
{
- struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
+ xfs_failaddr_t fa;
- if (!xfs_agf_verify(mp, bp)) {
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ fa = xfs_agf_verify(bp);
+ if (fa) {
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
@@ -2496,6 +2510,7 @@ const struct xfs_buf_ops xfs_agf_buf_ops = {
.name = "xfs_agf",
.verify_read = xfs_agf_read_verify,
.verify_write = xfs_agf_write_verify,
+ .verify_struct = xfs_agf_verify,
};
/*
@@ -2981,3 +2996,22 @@ xfs_verify_fsbno(
return false;
return xfs_verify_agbno(mp, agno, XFS_FSB_TO_AGBNO(mp, fsbno));
}
+
+/* Is there a record covering a given extent? */
+int
+xfs_alloc_has_record(
+ struct xfs_btree_cur *cur,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ bool *exists)
+{
+ union xfs_btree_irec low;
+ union xfs_btree_irec high;
+
+ memset(&low, 0, sizeof(low));
+ low.a.ar_startblock = bno;
+ memset(&high, 0xFF, sizeof(high));
+ high.a.ar_startblock = bno + len - 1;
+
+ return xfs_btree_has_record(cur, &low, &high, exists);
+}
diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
index 7ba2d129d504..65a0cafe06e4 100644
--- a/fs/xfs/libxfs/xfs_alloc.h
+++ b/fs/xfs/libxfs/xfs_alloc.h
@@ -198,6 +198,13 @@ xfs_free_extent(
enum xfs_ag_resv_type type); /* block reservation type */
int /* error */
+xfs_alloc_lookup_le(
+ struct xfs_btree_cur *cur, /* btree cursor */
+ xfs_agblock_t bno, /* starting block of extent */
+ xfs_extlen_t len, /* length of extent */
+ int *stat); /* success/failure */
+
+int /* error */
xfs_alloc_lookup_ge(
struct xfs_btree_cur *cur, /* btree cursor */
xfs_agblock_t bno, /* starting block of extent */
@@ -237,4 +244,7 @@ bool xfs_verify_agbno(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agblock_t agbno);
bool xfs_verify_fsbno(struct xfs_mount *mp, xfs_fsblock_t fsbno);
+int xfs_alloc_has_record(struct xfs_btree_cur *cur, xfs_agblock_t bno,
+ xfs_extlen_t len, bool *exist);
+
#endif /* __XFS_ALLOC_H__ */
diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c
index cfde0a0f9706..6840b588187e 100644
--- a/fs/xfs/libxfs/xfs_alloc_btree.c
+++ b/fs/xfs/libxfs/xfs_alloc_btree.c
@@ -307,13 +307,14 @@ xfs_cntbt_diff_two_keys(
be32_to_cpu(k2->alloc.ar_startblock);
}
-static bool
+static xfs_failaddr_t
xfs_allocbt_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
struct xfs_perag *pag = bp->b_pag;
+ xfs_failaddr_t fa;
unsigned int level;
/*
@@ -331,29 +332,31 @@ xfs_allocbt_verify(
level = be16_to_cpu(block->bb_level);
switch (block->bb_magic) {
case cpu_to_be32(XFS_ABTB_CRC_MAGIC):
- if (!xfs_btree_sblock_v5hdr_verify(bp))
- return false;
+ fa = xfs_btree_sblock_v5hdr_verify(bp);
+ if (fa)
+ return fa;
/* fall through */
case cpu_to_be32(XFS_ABTB_MAGIC):
if (pag && pag->pagf_init) {
if (level >= pag->pagf_levels[XFS_BTNUM_BNOi])
- return false;
+ return __this_address;
} else if (level >= mp->m_ag_maxlevels)
- return false;
+ return __this_address;
break;
case cpu_to_be32(XFS_ABTC_CRC_MAGIC):
- if (!xfs_btree_sblock_v5hdr_verify(bp))
- return false;
+ fa = xfs_btree_sblock_v5hdr_verify(bp);
+ if (fa)
+ return fa;
/* fall through */
case cpu_to_be32(XFS_ABTC_MAGIC):
if (pag && pag->pagf_init) {
if (level >= pag->pagf_levels[XFS_BTNUM_CNTi])
- return false;
+ return __this_address;
} else if (level >= mp->m_ag_maxlevels)
- return false;
+ return __this_address;
break;
default:
- return false;
+ return __this_address;
}
return xfs_btree_sblock_verify(bp, mp->m_alloc_mxr[level != 0]);
@@ -363,25 +366,30 @@ static void
xfs_allocbt_read_verify(
struct xfs_buf *bp)
{
+ xfs_failaddr_t fa;
+
if (!xfs_btree_sblock_verify_crc(bp))
- xfs_buf_ioerror(bp, -EFSBADCRC);
- else if (!xfs_allocbt_verify(bp))
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_allocbt_verify(bp);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ }
- if (bp->b_error) {
+ if (bp->b_error)
trace_xfs_btree_corrupt(bp, _RET_IP_);
- xfs_verifier_error(bp);
- }
}
static void
xfs_allocbt_write_verify(
struct xfs_buf *bp)
{
- if (!xfs_allocbt_verify(bp)) {
+ xfs_failaddr_t fa;
+
+ fa = xfs_allocbt_verify(bp);
+ if (fa) {
trace_xfs_btree_corrupt(bp, _RET_IP_);
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
xfs_btree_sblock_calc_crc(bp);
@@ -392,6 +400,7 @@ const struct xfs_buf_ops xfs_allocbt_buf_ops = {
.name = "xfs_allocbt",
.verify_read = xfs_allocbt_read_verify,
.verify_write = xfs_allocbt_write_verify,
+ .verify_struct = xfs_allocbt_verify,
};
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index a76914db72ef..ce4a34a2751d 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -717,7 +717,6 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
return error;
out_defer_cancel:
xfs_defer_cancel(args->dfops);
- args->trans = NULL;
return error;
}
@@ -770,7 +769,6 @@ xfs_attr_leaf_removename(xfs_da_args_t *args)
return 0;
out_defer_cancel:
xfs_defer_cancel(args->dfops);
- args->trans = NULL;
return error;
}
@@ -1045,7 +1043,6 @@ out:
return retval;
out_defer_cancel:
xfs_defer_cancel(args->dfops);
- args->trans = NULL;
goto out;
}
@@ -1186,7 +1183,6 @@ out:
return error;
out_defer_cancel:
xfs_defer_cancel(args->dfops);
- args->trans = NULL;
goto out;
}
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 601eaa36f1ad..2135b8e67dcc 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -247,14 +247,15 @@ xfs_attr3_leaf_hdr_to_disk(
}
}
-static bool
+static xfs_failaddr_t
xfs_attr3_leaf_verify(
- struct xfs_buf *bp)
+ struct xfs_buf *bp)
{
- struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_attr_leafblock *leaf = bp->b_addr;
- struct xfs_perag *pag = bp->b_pag;
- struct xfs_attr3_icleaf_hdr ichdr;
+ struct xfs_attr3_icleaf_hdr ichdr;
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_attr_leafblock *leaf = bp->b_addr;
+ struct xfs_perag *pag = bp->b_pag;
+ struct xfs_attr_leaf_entry *entries;
xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
@@ -262,17 +263,17 @@ xfs_attr3_leaf_verify(
struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
if (ichdr.magic != XFS_ATTR3_LEAF_MAGIC)
- return false;
+ return __this_address;
if (!uuid_equal(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid))
- return false;
+ return __this_address;
if (be64_to_cpu(hdr3->info.blkno) != bp->b_bn)
- return false;
+ return __this_address;
if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->info.lsn)))
- return false;
+ return __this_address;
} else {
if (ichdr.magic != XFS_ATTR_LEAF_MAGIC)
- return false;
+ return __this_address;
}
/*
* In recovery there is a transient state where count == 0 is valid
@@ -280,12 +281,27 @@ xfs_attr3_leaf_verify(
* if the attr didn't fit in shortform.
*/
if (pag && pag->pagf_init && ichdr.count == 0)
- return false;
+ return __this_address;
+
+ /*
+ * firstused is the block offset of the first name info structure.
+ * Make sure it doesn't go off the block or crash into the header.
+ */
+ if (ichdr.firstused > mp->m_attr_geo->blksize)
+ return __this_address;
+ if (ichdr.firstused < xfs_attr3_leaf_hdr_size(leaf))
+ return __this_address;
+
+ /* Make sure the entries array doesn't crash into the name info. */
+ entries = xfs_attr3_leaf_entryp(bp->b_addr);
+ if ((char *)&entries[ichdr.count] >
+ (char *)bp->b_addr + ichdr.firstused)
+ return __this_address;
/* XXX: need to range check rest of attr header values */
/* XXX: hash order check? */
- return true;
+ return NULL;
}
static void
@@ -293,12 +309,13 @@ xfs_attr3_leaf_write_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
struct xfs_attr3_leaf_hdr *hdr3 = bp->b_addr;
+ xfs_failaddr_t fa;
- if (!xfs_attr3_leaf_verify(bp)) {
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ fa = xfs_attr3_leaf_verify(bp);
+ if (fa) {
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
@@ -322,21 +339,23 @@ xfs_attr3_leaf_read_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
+ xfs_failaddr_t fa;
if (xfs_sb_version_hascrc(&mp->m_sb) &&
!xfs_buf_verify_cksum(bp, XFS_ATTR3_LEAF_CRC_OFF))
- xfs_buf_ioerror(bp, -EFSBADCRC);
- else if (!xfs_attr3_leaf_verify(bp))
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
-
- if (bp->b_error)
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_attr3_leaf_verify(bp);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ }
}
const struct xfs_buf_ops xfs_attr3_leaf_buf_ops = {
.name = "xfs_attr3_leaf",
.verify_read = xfs_attr3_leaf_read_verify,
.verify_write = xfs_attr3_leaf_write_verify,
+ .verify_struct = xfs_attr3_leaf_verify,
};
int
@@ -870,6 +889,80 @@ xfs_attr_shortform_allfit(
return xfs_attr_shortform_bytesfit(dp, bytes);
}
+/* Verify the consistency of an inline attribute fork. */
+xfs_failaddr_t
+xfs_attr_shortform_verify(
+ struct xfs_inode *ip)
+{
+ struct xfs_attr_shortform *sfp;
+ struct xfs_attr_sf_entry *sfep;
+ struct xfs_attr_sf_entry *next_sfep;
+ char *endp;
+ struct xfs_ifork *ifp;
+ int i;
+ int size;
+
+ ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_LOCAL);
+ ifp = XFS_IFORK_PTR(ip, XFS_ATTR_FORK);
+ sfp = (struct xfs_attr_shortform *)ifp->if_u1.if_data;
+ size = ifp->if_bytes;
+
+ /*
+ * Give up if the attribute is way too short.
+ */
+ if (size < sizeof(struct xfs_attr_sf_hdr))
+ return __this_address;
+
+ endp = (char *)sfp + size;
+
+ /* Check all reported entries */
+ sfep = &sfp->list[0];
+ for (i = 0; i < sfp->hdr.count; i++) {
+ /*
+ * struct xfs_attr_sf_entry has a variable length.
+ * Check the fixed-offset parts of the structure are
+ * within the data buffer.
+ */
+ if (((char *)sfep + sizeof(*sfep)) >= endp)
+ return __this_address;
+
+ /* Don't allow names with known bad length. */
+ if (sfep->namelen == 0)
+ return __this_address;
+
+ /*
+ * Check that the variable-length part of the structure is
+ * within the data buffer. The next entry starts after the
+ * name component, so nextentry is an acceptable test.
+ */
+ next_sfep = XFS_ATTR_SF_NEXTENTRY(sfep);
+ if ((char *)next_sfep > endp)
+ return __this_address;
+
+ /*
+ * Check for unknown flags. Short form doesn't support
+ * the incomplete or local bits, so we can use the namespace
+ * mask here.
+ */
+ if (sfep->flags & ~XFS_ATTR_NSP_ONDISK_MASK)
+ return __this_address;
+
+ /*
+ * Check for invalid namespace combinations. We only allow
+ * one namespace flag per xattr, so we can just count the
+ * bits (i.e. hweight) here.
+ */
+ if (hweight8(sfep->flags & XFS_ATTR_NSP_ONDISK_MASK) > 1)
+ return __this_address;
+
+ sfep = next_sfep;
+ }
+ if ((void *)sfep != (void *)endp)
+ return __this_address;
+
+ return NULL;
+}
+
/*
* Convert a leaf attribute list to shortform attribute list
*/
@@ -2173,7 +2266,8 @@ xfs_attr3_leaf_lookup_int(
leaf = bp->b_addr;
xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf);
entries = xfs_attr3_leaf_entryp(leaf);
- ASSERT(ichdr.count < args->geo->blksize / 8);
+ if (ichdr.count >= args->geo->blksize / 8)
+ return -EFSCORRUPTED;
/*
* Binary search. (note: small blocks will skip this loop)
@@ -2189,8 +2283,10 @@ xfs_attr3_leaf_lookup_int(
else
break;
}
- ASSERT(probe >= 0 && (!ichdr.count || probe < ichdr.count));
- ASSERT(span <= 4 || be32_to_cpu(entry->hashval) == hashval);
+ if (!(probe >= 0 && (!ichdr.count || probe < ichdr.count)))
+ return -EFSCORRUPTED;
+ if (!(span <= 4 || be32_to_cpu(entry->hashval) == hashval))
+ return -EFSCORRUPTED;
/*
* Since we may have duplicate hashval's, find the first matching
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.h b/fs/xfs/libxfs/xfs_attr_leaf.h
index 894124efb421..4da08af5b134 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.h
+++ b/fs/xfs/libxfs/xfs_attr_leaf.h
@@ -53,6 +53,7 @@ int xfs_attr_shortform_to_leaf(struct xfs_da_args *args,
int xfs_attr_shortform_remove(struct xfs_da_args *args);
int xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp);
int xfs_attr_shortform_bytesfit(struct xfs_inode *dp, int bytes);
+xfs_failaddr_t xfs_attr_shortform_verify(struct xfs_inode *ip);
void xfs_attr_fork_remove(struct xfs_inode *ip, struct xfs_trans *tp);
/*
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index d56caf037ca0..21be186067a2 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -65,7 +65,7 @@ xfs_attr3_rmt_blocks(
* does CRC, location and bounds checking, the unpacking function checks the
* attribute parameters and owner.
*/
-static bool
+static xfs_failaddr_t
xfs_attr3_rmt_hdr_ok(
void *ptr,
xfs_ino_t ino,
@@ -76,19 +76,19 @@ xfs_attr3_rmt_hdr_ok(
struct xfs_attr3_rmt_hdr *rmt = ptr;
if (bno != be64_to_cpu(rmt->rm_blkno))
- return false;
+ return __this_address;
if (offset != be32_to_cpu(rmt->rm_offset))
- return false;
+ return __this_address;
if (size != be32_to_cpu(rmt->rm_bytes))
- return false;
+ return __this_address;
if (ino != be64_to_cpu(rmt->rm_owner))
- return false;
+ return __this_address;
/* ok */
- return true;
+ return NULL;
}
-static bool
+static xfs_failaddr_t
xfs_attr3_rmt_verify(
struct xfs_mount *mp,
void *ptr,
@@ -98,27 +98,29 @@ xfs_attr3_rmt_verify(
struct xfs_attr3_rmt_hdr *rmt = ptr;
if (!xfs_sb_version_hascrc(&mp->m_sb))
- return false;
+ return __this_address;
if (rmt->rm_magic != cpu_to_be32(XFS_ATTR3_RMT_MAGIC))
- return false;
+ return __this_address;
if (!uuid_equal(&rmt->rm_uuid, &mp->m_sb.sb_meta_uuid))
- return false;
+ return __this_address;
if (be64_to_cpu(rmt->rm_blkno) != bno)
- return false;
+ return __this_address;
if (be32_to_cpu(rmt->rm_bytes) > fsbsize - sizeof(*rmt))
- return false;
+ return __this_address;
if (be32_to_cpu(rmt->rm_offset) +
be32_to_cpu(rmt->rm_bytes) > XFS_XATTR_SIZE_MAX)
- return false;
+ return __this_address;
if (rmt->rm_owner == 0)
- return false;
+ return __this_address;
- return true;
+ return NULL;
}
-static void
-xfs_attr3_rmt_read_verify(
- struct xfs_buf *bp)
+static int
+__xfs_attr3_rmt_read_verify(
+ struct xfs_buf *bp,
+ bool check_crc,
+ xfs_failaddr_t *failaddr)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
char *ptr;
@@ -128,7 +130,7 @@ xfs_attr3_rmt_read_verify(
/* no verification of non-crc buffers */
if (!xfs_sb_version_hascrc(&mp->m_sb))
- return;
+ return 0;
ptr = bp->b_addr;
bno = bp->b_bn;
@@ -136,23 +138,48 @@ xfs_attr3_rmt_read_verify(
ASSERT(len >= blksize);
while (len > 0) {
- if (!xfs_verify_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF)) {
- xfs_buf_ioerror(bp, -EFSBADCRC);
- break;
- }
- if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) {
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- break;
+ if (check_crc &&
+ !xfs_verify_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF)) {
+ *failaddr = __this_address;
+ return -EFSBADCRC;
}
+ *failaddr = xfs_attr3_rmt_verify(mp, ptr, blksize, bno);
+ if (*failaddr)
+ return -EFSCORRUPTED;
len -= blksize;
ptr += blksize;
bno += BTOBB(blksize);
}
- if (bp->b_error)
- xfs_verifier_error(bp);
- else
- ASSERT(len == 0);
+ if (len != 0) {
+ *failaddr = __this_address;
+ return -EFSCORRUPTED;
+ }
+
+ return 0;
+}
+
+static void
+xfs_attr3_rmt_read_verify(
+ struct xfs_buf *bp)
+{
+ xfs_failaddr_t fa;
+ int error;
+
+ error = __xfs_attr3_rmt_read_verify(bp, true, &fa);
+ if (error)
+ xfs_verifier_error(bp, error, fa);
+}
+
+static xfs_failaddr_t
+xfs_attr3_rmt_verify_struct(
+ struct xfs_buf *bp)
+{
+ xfs_failaddr_t fa;
+ int error;
+
+ error = __xfs_attr3_rmt_read_verify(bp, false, &fa);
+ return error ? fa : NULL;
}
static void
@@ -160,6 +187,7 @@ xfs_attr3_rmt_write_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
+ xfs_failaddr_t fa;
int blksize = mp->m_attr_geo->blksize;
char *ptr;
int len;
@@ -177,9 +205,9 @@ xfs_attr3_rmt_write_verify(
while (len > 0) {
struct xfs_attr3_rmt_hdr *rmt = (struct xfs_attr3_rmt_hdr *)ptr;
- if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) {
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ fa = xfs_attr3_rmt_verify(mp, ptr, blksize, bno);
+ if (fa) {
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
@@ -188,8 +216,7 @@ xfs_attr3_rmt_write_verify(
* xfs_attr3_rmt_hdr_set() for the explanation.
*/
if (rmt->rm_lsn != cpu_to_be64(NULLCOMMITLSN)) {
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
return;
}
xfs_update_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF);
@@ -198,13 +225,16 @@ xfs_attr3_rmt_write_verify(
ptr += blksize;
bno += BTOBB(blksize);
}
- ASSERT(len == 0);
+
+ if (len != 0)
+ xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
}
const struct xfs_buf_ops xfs_attr3_rmt_buf_ops = {
.name = "xfs_attr3_rmt",
.verify_read = xfs_attr3_rmt_read_verify,
.verify_write = xfs_attr3_rmt_write_verify,
+ .verify_struct = xfs_attr3_rmt_verify_struct,
};
STATIC int
@@ -269,7 +299,7 @@ xfs_attr_rmtval_copyout(
byte_cnt = min(*valuelen, byte_cnt);
if (xfs_sb_version_hascrc(&mp->m_sb)) {
- if (!xfs_attr3_rmt_hdr_ok(src, ino, *offset,
+ if (xfs_attr3_rmt_hdr_ok(src, ino, *offset,
byte_cnt, bno)) {
xfs_alert(mp,
"remote attribute header mismatch bno/off/len/owner (0x%llx/0x%x/Ox%x/0x%llx)",
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 1bddbba6b80c..daae00ed30c5 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -400,7 +400,7 @@ xfs_bmap_check_leaf_extents(
pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
bno = be64_to_cpu(*pp);
XFS_WANT_CORRUPTED_GOTO(mp,
- XFS_FSB_SANITY_CHECK(mp, bno), error0);
+ xfs_verify_fsbno(mp, bno), error0);
if (bp_release) {
bp_release = 0;
xfs_trans_brelse(NULL, bp);
@@ -1220,7 +1220,7 @@ xfs_iread_extents(
pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
bno = be64_to_cpu(*pp);
XFS_WANT_CORRUPTED_GOTO(mp,
- XFS_FSB_SANITY_CHECK(mp, bno), out_brelse);
+ xfs_verify_fsbno(mp, bno), out_brelse);
xfs_trans_brelse(tp, bp);
}
@@ -3337,6 +3337,49 @@ xfs_bmap_btalloc_filestreams(
return 0;
}
+/* Update all inode and quota accounting for the allocation we just did. */
+static void
+xfs_bmap_btalloc_accounting(
+ struct xfs_bmalloca *ap,
+ struct xfs_alloc_arg *args)
+{
+ if (ap->flags & XFS_BMAPI_COWFORK) {
+ /*
+ * COW fork blocks are in-core only and thus are treated as
+ * in-core quota reservation (like delalloc blocks) even when
+ * converted to real blocks. The quota reservation is not
+ * accounted to disk until blocks are remapped to the data
+ * fork. So if these blocks were previously delalloc, we
+ * already have quota reservation and there's nothing to do
+ * yet.
+ */
+ if (ap->wasdel)
+ return;
+
+ /*
+ * Otherwise, we've allocated blocks in a hole. The transaction
+ * has acquired in-core quota reservation for this extent.
+ * Rather than account these as real blocks, however, we reduce
+ * the transaction quota reservation based on the allocation.
+ * This essentially transfers the transaction quota reservation
+ * to that of a delalloc extent.
+ */
+ ap->ip->i_delayed_blks += args->len;
+ xfs_trans_mod_dquot_byino(ap->tp, ap->ip, XFS_TRANS_DQ_RES_BLKS,
+ -(long)args->len);
+ return;
+ }
+
+ /* data/attr fork only */
+ ap->ip->i_d.di_nblocks += args->len;
+ xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
+ if (ap->wasdel)
+ ap->ip->i_delayed_blks -= args->len;
+ xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
+ ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : XFS_TRANS_DQ_BCOUNT,
+ args->len);
+}
+
STATIC int
xfs_bmap_btalloc(
struct xfs_bmalloca *ap) /* bmap alloc argument struct */
@@ -3347,6 +3390,8 @@ xfs_bmap_btalloc(
xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
xfs_agnumber_t ag;
xfs_alloc_arg_t args;
+ xfs_fileoff_t orig_offset;
+ xfs_extlen_t orig_length;
xfs_extlen_t blen;
xfs_extlen_t nextminlen = 0;
int nullfb; /* true if ap->firstblock isn't set */
@@ -3356,6 +3401,8 @@ xfs_bmap_btalloc(
int stripe_align;
ASSERT(ap->length);
+ orig_offset = ap->offset;
+ orig_length = ap->length;
mp = ap->ip->i_mount;
@@ -3571,19 +3618,23 @@ xfs_bmap_btalloc(
*ap->firstblock = args.fsbno;
ASSERT(nullfb || fb_agno <= args.agno);
ap->length = args.len;
- if (!(ap->flags & XFS_BMAPI_COWFORK))
- ap->ip->i_d.di_nblocks += args.len;
- xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
- if (ap->wasdel)
- ap->ip->i_delayed_blks -= args.len;
/*
- * Adjust the disk quota also. This was reserved
- * earlier.
+ * If the extent size hint is active, we tried to round the
+ * caller's allocation request offset down to extsz and the
+ * length up to another extsz boundary. If we found a free
+ * extent we mapped it in starting at this new offset. If the
+ * newly mapped space isn't long enough to cover any of the
+ * range of offsets that was originally requested, move the
+ * mapping up so that we can fill as much of the caller's
+ * original request as possible. Free space is apparently
+ * very fragmented so we're unlikely to be able to satisfy the
+ * hints anyway.
*/
- xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
- ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
- XFS_TRANS_DQ_BCOUNT,
- (long) args.len);
+ if (ap->length <= orig_length)
+ ap->offset = orig_offset;
+ else if (ap->offset + ap->length < orig_offset + orig_length)
+ ap->offset = orig_offset + orig_length - ap->length;
+ xfs_bmap_btalloc_accounting(ap, &args);
} else {
ap->blkno = NULLFSBLOCK;
ap->length = 0;
@@ -3876,8 +3927,6 @@ xfs_bmapi_reserve_delalloc(
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
xfs_extlen_t alen;
xfs_extlen_t indlen;
- char rt = XFS_IS_REALTIME_INODE(ip);
- xfs_extlen_t extsz;
int error;
xfs_fileoff_t aoff = off;
@@ -3892,31 +3941,25 @@ xfs_bmapi_reserve_delalloc(
prealloc = alen - len;
/* Figure out the extent size, adjust alen */
- if (whichfork == XFS_COW_FORK)
- extsz = xfs_get_cowextsz_hint(ip);
- else
- extsz = xfs_get_extsz_hint(ip);
- if (extsz) {
+ if (whichfork == XFS_COW_FORK) {
struct xfs_bmbt_irec prev;
+ xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip);
if (!xfs_iext_peek_prev_extent(ifp, icur, &prev))
prev.br_startoff = NULLFILEOFF;
- error = xfs_bmap_extsize_align(mp, got, &prev, extsz, rt, eof,
+ error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof,
1, 0, &aoff, &alen);
ASSERT(!error);
}
- if (rt)
- extsz = alen / mp->m_sb.sb_rextsize;
-
/*
* Make a transaction-less quota reservation for delayed allocation
* blocks. This number gets adjusted later. We return if we haven't
* allocated blocks already inside this loop.
*/
error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0,
- rt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
+ XFS_QMOPT_RES_REGBLKS);
if (error)
return error;
@@ -3927,12 +3970,7 @@ xfs_bmapi_reserve_delalloc(
indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
ASSERT(indlen > 0);
- if (rt) {
- error = xfs_mod_frextents(mp, -((int64_t)extsz));
- } else {
- error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
- }
-
+ error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
if (error)
goto out_unreserve_quota;
@@ -3963,14 +4001,11 @@ xfs_bmapi_reserve_delalloc(
return 0;
out_unreserve_blocks:
- if (rt)
- xfs_mod_frextents(mp, extsz);
- else
- xfs_mod_fdblocks(mp, alen, false);
+ xfs_mod_fdblocks(mp, alen, false);
out_unreserve_quota:
if (XFS_IS_QUOTA_ON(mp))
- xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, rt ?
- XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
+ xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0,
+ XFS_QMOPT_RES_REGBLKS);
return error;
}
@@ -4304,8 +4339,16 @@ xfs_bmapi_write(
while (bno < end && n < *nmap) {
bool need_alloc = false, wasdelay = false;
- /* in hole or beyoned EOF? */
+ /* in hole or beyond EOF? */
if (eof || bma.got.br_startoff > bno) {
+ /*
+ * CoW fork conversions should /never/ hit EOF or
+ * holes. There should always be something for us
+ * to work on.
+ */
+ ASSERT(!((flags & XFS_BMAPI_CONVERT) &&
+ (flags & XFS_BMAPI_COWFORK)));
+
if (flags & XFS_BMAPI_DELALLOC) {
/*
* For the COW fork we can reasonably get a
@@ -4824,6 +4867,7 @@ xfs_bmap_del_extent_cow(
xfs_iext_insert(ip, icur, &new, state);
break;
}
+ ip->i_delayed_blks -= del->br_blockcount;
}
/*
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index c10aecaaae44..9faf479aba49 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -425,33 +425,29 @@ xfs_bmbt_diff_two_keys(
be64_to_cpu(k2->bmbt.br_startoff);
}
-static bool
+static xfs_failaddr_t
xfs_bmbt_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
+ xfs_failaddr_t fa;
unsigned int level;
switch (block->bb_magic) {
case cpu_to_be32(XFS_BMAP_CRC_MAGIC):
- if (!xfs_sb_version_hascrc(&mp->m_sb))
- return false;
- if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid))
- return false;
- if (be64_to_cpu(block->bb_u.l.bb_blkno) != bp->b_bn)
- return false;
/*
* XXX: need a better way of verifying the owner here. Right now
* just make sure there has been one set.
*/
- if (be64_to_cpu(block->bb_u.l.bb_owner) == 0)
- return false;
+ fa = xfs_btree_lblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN);
+ if (fa)
+ return fa;
/* fall through */
case cpu_to_be32(XFS_BMAP_MAGIC):
break;
default:
- return false;
+ return __this_address;
}
/*
@@ -463,46 +459,39 @@ xfs_bmbt_verify(
*/
level = be16_to_cpu(block->bb_level);
if (level > max(mp->m_bm_maxlevels[0], mp->m_bm_maxlevels[1]))
- return false;
- if (be16_to_cpu(block->bb_numrecs) > mp->m_bmap_dmxr[level != 0])
- return false;
-
- /* sibling pointer verification */
- if (!block->bb_u.l.bb_leftsib ||
- (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLFSBLOCK) &&
- !XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_u.l.bb_leftsib))))
- return false;
- if (!block->bb_u.l.bb_rightsib ||
- (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK) &&
- !XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_u.l.bb_rightsib))))
- return false;
-
- return true;
+ return __this_address;
+
+ return xfs_btree_lblock_verify(bp, mp->m_bmap_dmxr[level != 0]);
}
static void
xfs_bmbt_read_verify(
struct xfs_buf *bp)
{
+ xfs_failaddr_t fa;
+
if (!xfs_btree_lblock_verify_crc(bp))
- xfs_buf_ioerror(bp, -EFSBADCRC);
- else if (!xfs_bmbt_verify(bp))
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_bmbt_verify(bp);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ }
- if (bp->b_error) {
+ if (bp->b_error)
trace_xfs_btree_corrupt(bp, _RET_IP_);
- xfs_verifier_error(bp);
- }
}
static void
xfs_bmbt_write_verify(
struct xfs_buf *bp)
{
- if (!xfs_bmbt_verify(bp)) {
+ xfs_failaddr_t fa;
+
+ fa = xfs_bmbt_verify(bp);
+ if (fa) {
trace_xfs_btree_corrupt(bp, _RET_IP_);
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
xfs_btree_lblock_calc_crc(bp);
@@ -512,6 +501,7 @@ const struct xfs_buf_ops xfs_bmbt_buf_ops = {
.name = "xfs_bmbt",
.verify_read = xfs_bmbt_read_verify,
.verify_write = xfs_bmbt_write_verify,
+ .verify_struct = xfs_bmbt_verify,
};
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 5f33adf8eecb..79ee4a1951d1 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -273,7 +273,7 @@ xfs_btree_lblock_calc_crc(
struct xfs_buf *bp)
{
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
if (!xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb))
return;
@@ -311,7 +311,7 @@ xfs_btree_sblock_calc_crc(
struct xfs_buf *bp)
{
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
if (!xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb))
return;
@@ -329,7 +329,7 @@ xfs_btree_sblock_verify_crc(
if (xfs_sb_version_hascrc(&mp->m_sb)) {
if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.s.bb_lsn)))
- return false;
+ return __this_address;
return xfs_buf_verify_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF);
}
@@ -853,7 +853,7 @@ xfs_btree_read_bufl(
xfs_daddr_t d; /* real disk block address */
int error;
- if (!XFS_FSB_SANITY_CHECK(mp, fsbno))
+ if (!xfs_verify_fsbno(mp, fsbno))
return -EFSCORRUPTED;
d = XFS_FSB_TO_DADDR(mp, fsbno);
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d,
@@ -4529,6 +4529,51 @@ xfs_btree_change_owner(
&bbcoi);
}
+/* Verify the v5 fields of a long-format btree block. */
+xfs_failaddr_t
+xfs_btree_lblock_v5hdr_verify(
+ struct xfs_buf *bp,
+ uint64_t owner)
+{
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
+
+ if (!xfs_sb_version_hascrc(&mp->m_sb))
+ return __this_address;
+ if (!uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid))
+ return __this_address;
+ if (block->bb_u.l.bb_blkno != cpu_to_be64(bp->b_bn))
+ return __this_address;
+ if (owner != XFS_RMAP_OWN_UNKNOWN &&
+ be64_to_cpu(block->bb_u.l.bb_owner) != owner)
+ return __this_address;
+ return NULL;
+}
+
+/* Verify a long-format btree block. */
+xfs_failaddr_t
+xfs_btree_lblock_verify(
+ struct xfs_buf *bp,
+ unsigned int max_recs)
+{
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
+
+ /* numrecs verification */
+ if (be16_to_cpu(block->bb_numrecs) > max_recs)
+ return __this_address;
+
+ /* sibling pointer verification */
+ if (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLFSBLOCK) &&
+ !xfs_verify_fsbno(mp, be64_to_cpu(block->bb_u.l.bb_leftsib)))
+ return __this_address;
+ if (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK) &&
+ !xfs_verify_fsbno(mp, be64_to_cpu(block->bb_u.l.bb_rightsib)))
+ return __this_address;
+
+ return NULL;
+}
+
/**
* xfs_btree_sblock_v5hdr_verify() -- verify the v5 fields of a short-format
* btree block
@@ -4537,7 +4582,7 @@ xfs_btree_change_owner(
* @max_recs: pointer to the m_*_mxr max records field in the xfs mount
* @pag_max_level: pointer to the per-ag max level field
*/
-bool
+xfs_failaddr_t
xfs_btree_sblock_v5hdr_verify(
struct xfs_buf *bp)
{
@@ -4546,14 +4591,14 @@ xfs_btree_sblock_v5hdr_verify(
struct xfs_perag *pag = bp->b_pag;
if (!xfs_sb_version_hascrc(&mp->m_sb))
- return false;
+ return __this_address;
if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid))
- return false;
+ return __this_address;
if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn))
- return false;
+ return __this_address;
if (pag && be32_to_cpu(block->bb_u.s.bb_owner) != pag->pag_agno)
- return false;
- return true;
+ return __this_address;
+ return NULL;
}
/**
@@ -4562,29 +4607,29 @@ xfs_btree_sblock_v5hdr_verify(
* @bp: buffer containing the btree block
* @max_recs: maximum records allowed in this btree node
*/
-bool
+xfs_failaddr_t
xfs_btree_sblock_verify(
struct xfs_buf *bp,
unsigned int max_recs)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
+ xfs_agblock_t agno;
/* numrecs verification */
if (be16_to_cpu(block->bb_numrecs) > max_recs)
- return false;
+ return __this_address;
/* sibling pointer verification */
- if (!block->bb_u.s.bb_leftsib ||
- (be32_to_cpu(block->bb_u.s.bb_leftsib) >= mp->m_sb.sb_agblocks &&
- block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK)))
- return false;
- if (!block->bb_u.s.bb_rightsib ||
- (be32_to_cpu(block->bb_u.s.bb_rightsib) >= mp->m_sb.sb_agblocks &&
- block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK)))
- return false;
+ agno = xfs_daddr_to_agno(mp, XFS_BUF_ADDR(bp));
+ if (block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK) &&
+ !xfs_verify_agbno(mp, agno, be32_to_cpu(block->bb_u.s.bb_leftsib)))
+ return __this_address;
+ if (block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK) &&
+ !xfs_verify_agbno(mp, agno, be32_to_cpu(block->bb_u.s.bb_rightsib)))
+ return __this_address;
- return true;
+ return NULL;
}
/*
@@ -4953,3 +4998,33 @@ xfs_btree_diff_two_ptrs(
return (int64_t)be64_to_cpu(a->l) - be64_to_cpu(b->l);
return (int64_t)be32_to_cpu(a->s) - be32_to_cpu(b->s);
}
+
+/* If there's an extent, we're done. */
+STATIC int
+xfs_btree_has_record_helper(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_rec *rec,
+ void *priv)
+{
+ return XFS_BTREE_QUERY_RANGE_ABORT;
+}
+
+/* Is there a record covering a given range of keys? */
+int
+xfs_btree_has_record(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_irec *low,
+ union xfs_btree_irec *high,
+ bool *exists)
+{
+ int error;
+
+ error = xfs_btree_query_range(cur, low, high,
+ &xfs_btree_has_record_helper, NULL);
+ if (error == XFS_BTREE_QUERY_RANGE_ABORT) {
+ *exists = true;
+ return 0;
+ }
+ *exists = false;
+ return error;
+}
diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
index b57501c6f71d..50440b5618e8 100644
--- a/fs/xfs/libxfs/xfs_btree.h
+++ b/fs/xfs/libxfs/xfs_btree.h
@@ -473,10 +473,6 @@ static inline int xfs_btree_get_level(struct xfs_btree_block *block)
#define XFS_FILBLKS_MIN(a,b) min_t(xfs_filblks_t, (a), (b))
#define XFS_FILBLKS_MAX(a,b) max_t(xfs_filblks_t, (a), (b))
-#define XFS_FSB_SANITY_CHECK(mp,fsb) \
- (fsb && XFS_FSB_TO_AGNO(mp, fsb) < mp->m_sb.sb_agcount && \
- XFS_FSB_TO_AGBNO(mp, fsb) < mp->m_sb.sb_agblocks)
-
/*
* Trace hooks. Currently not implemented as they need to be ported
* over to the generic tracing functionality, which is some effort.
@@ -496,8 +492,14 @@ static inline int xfs_btree_get_level(struct xfs_btree_block *block)
#define XFS_BTREE_TRACE_ARGR(c, r)
#define XFS_BTREE_TRACE_CURSOR(c, t)
-bool xfs_btree_sblock_v5hdr_verify(struct xfs_buf *bp);
-bool xfs_btree_sblock_verify(struct xfs_buf *bp, unsigned int max_recs);
+xfs_failaddr_t xfs_btree_sblock_v5hdr_verify(struct xfs_buf *bp);
+xfs_failaddr_t xfs_btree_sblock_verify(struct xfs_buf *bp,
+ unsigned int max_recs);
+xfs_failaddr_t xfs_btree_lblock_v5hdr_verify(struct xfs_buf *bp,
+ uint64_t owner);
+xfs_failaddr_t xfs_btree_lblock_verify(struct xfs_buf *bp,
+ unsigned int max_recs);
+
uint xfs_btree_compute_maxlevels(struct xfs_mount *mp, uint *limits,
unsigned long len);
xfs_extlen_t xfs_btree_calc_size(struct xfs_mount *mp, uint *limits,
@@ -545,5 +547,7 @@ void xfs_btree_get_keys(struct xfs_btree_cur *cur,
struct xfs_btree_block *block, union xfs_btree_key *key);
union xfs_btree_key *xfs_btree_high_key_from_key(struct xfs_btree_cur *cur,
union xfs_btree_key *key);
+int xfs_btree_has_record(struct xfs_btree_cur *cur, union xfs_btree_irec *low,
+ union xfs_btree_irec *high, bool *exists);
#endif /* __XFS_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index 651611530d2f..ea187b4a7991 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -128,7 +128,7 @@ xfs_da_state_free(xfs_da_state_t *state)
kmem_zone_free(xfs_da_state_zone, state);
}
-static bool
+static xfs_failaddr_t
xfs_da3_node_verify(
struct xfs_buf *bp)
{
@@ -145,24 +145,24 @@ xfs_da3_node_verify(
struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
if (ichdr.magic != XFS_DA3_NODE_MAGIC)
- return false;
+ return __this_address;
if (!uuid_equal(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid))
- return false;
+ return __this_address;
if (be64_to_cpu(hdr3->info.blkno) != bp->b_bn)
- return false;
+ return __this_address;
if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->info.lsn)))
- return false;
+ return __this_address;
} else {
if (ichdr.magic != XFS_DA_NODE_MAGIC)
- return false;
+ return __this_address;
}
if (ichdr.level == 0)
- return false;
+ return __this_address;
if (ichdr.level > XFS_DA_NODE_MAXDEPTH)
- return false;
+ return __this_address;
if (ichdr.count == 0)
- return false;
+ return __this_address;
/*
* we don't know if the node is for and attribute or directory tree,
@@ -170,11 +170,11 @@ xfs_da3_node_verify(
*/
if (ichdr.count > mp->m_dir_geo->node_ents &&
ichdr.count > mp->m_attr_geo->node_ents)
- return false;
+ return __this_address;
/* XXX: hash order check? */
- return true;
+ return NULL;
}
static void
@@ -182,12 +182,13 @@ xfs_da3_node_write_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
+ xfs_failaddr_t fa;
- if (!xfs_da3_node_verify(bp)) {
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ fa = xfs_da3_node_verify(bp);
+ if (fa) {
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
@@ -211,19 +212,20 @@ xfs_da3_node_read_verify(
struct xfs_buf *bp)
{
struct xfs_da_blkinfo *info = bp->b_addr;
+ xfs_failaddr_t fa;
switch (be16_to_cpu(info->magic)) {
case XFS_DA3_NODE_MAGIC:
if (!xfs_buf_verify_cksum(bp, XFS_DA3_NODE_CRC_OFF)) {
- xfs_buf_ioerror(bp, -EFSBADCRC);
+ xfs_verifier_error(bp, -EFSBADCRC,
+ __this_address);
break;
}
/* fall through */
case XFS_DA_NODE_MAGIC:
- if (!xfs_da3_node_verify(bp)) {
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- break;
- }
+ fa = xfs_da3_node_verify(bp);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
case XFS_ATTR_LEAF_MAGIC:
case XFS_ATTR3_LEAF_MAGIC:
@@ -236,18 +238,40 @@ xfs_da3_node_read_verify(
bp->b_ops->verify_read(bp);
return;
default:
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
+ xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
break;
}
+}
+
+/* Verify the structure of a da3 block. */
+static xfs_failaddr_t
+xfs_da3_node_verify_struct(
+ struct xfs_buf *bp)
+{
+ struct xfs_da_blkinfo *info = bp->b_addr;
- /* corrupt block */
- xfs_verifier_error(bp);
+ switch (be16_to_cpu(info->magic)) {
+ case XFS_DA3_NODE_MAGIC:
+ case XFS_DA_NODE_MAGIC:
+ return xfs_da3_node_verify(bp);
+ case XFS_ATTR_LEAF_MAGIC:
+ case XFS_ATTR3_LEAF_MAGIC:
+ bp->b_ops = &xfs_attr3_leaf_buf_ops;
+ return bp->b_ops->verify_struct(bp);
+ case XFS_DIR2_LEAFN_MAGIC:
+ case XFS_DIR3_LEAFN_MAGIC:
+ bp->b_ops = &xfs_dir3_leafn_buf_ops;
+ return bp->b_ops->verify_struct(bp);
+ default:
+ return __this_address;
+ }
}
const struct xfs_buf_ops xfs_da3_node_buf_ops = {
.name = "xfs_da3_node",
.verify_read = xfs_da3_node_read_verify,
.verify_write = xfs_da3_node_write_verify,
+ .verify_struct = xfs_da3_node_verify_struct,
};
int
diff --git a/fs/xfs/libxfs/xfs_da_format.h b/fs/xfs/libxfs/xfs_da_format.h
index 3771edcb301d..7e77299b7789 100644
--- a/fs/xfs/libxfs/xfs_da_format.h
+++ b/fs/xfs/libxfs/xfs_da_format.h
@@ -875,4 +875,10 @@ struct xfs_attr3_rmt_hdr {
((bufsize) - (xfs_sb_version_hascrc(&(mp)->m_sb) ? \
sizeof(struct xfs_attr3_rmt_hdr) : 0))
+/* Number of bytes in a directory block. */
+static inline unsigned int xfs_dir2_dirblock_bytes(struct xfs_sb *sbp)
+{
+ return 1 << (sbp->sb_blocklog + sbp->sb_dirblklog);
+}
+
#endif /* __XFS_DA_FORMAT_H__ */
diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c
index e10778c102ea..92f94e190f04 100644
--- a/fs/xfs/libxfs/xfs_dir2.c
+++ b/fs/xfs/libxfs/xfs_dir2.c
@@ -119,8 +119,7 @@ xfs_da_mount(
ASSERT(mp->m_sb.sb_versionnum & XFS_SB_VERSION_DIRV2BIT);
- ASSERT((1 << (mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog)) <=
- XFS_MAX_BLOCKSIZE);
+ ASSERT(xfs_dir2_dirblock_bytes(&mp->m_sb) <= XFS_MAX_BLOCKSIZE);
mp->m_dir_inode_ops = xfs_dir_get_ops(mp, NULL);
mp->m_nondir_inode_ops = xfs_nondir_get_ops(mp, NULL);
@@ -140,7 +139,7 @@ xfs_da_mount(
dageo = mp->m_dir_geo;
dageo->blklog = mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog;
dageo->fsblog = mp->m_sb.sb_blocklog;
- dageo->blksize = 1 << dageo->blklog;
+ dageo->blksize = xfs_dir2_dirblock_bytes(&mp->m_sb);
dageo->fsbcount = 1 << mp->m_sb.sb_dirblklog;
/*
diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h
index 1a8f2cf977ca..388d67c5c903 100644
--- a/fs/xfs/libxfs/xfs_dir2.h
+++ b/fs/xfs/libxfs/xfs_dir2.h
@@ -340,5 +340,7 @@ xfs_dir2_leaf_tail_p(struct xfs_da_geometry *geo, struct xfs_dir2_leaf *lp)
#define XFS_READDIR_BUFSIZE (32768)
unsigned char xfs_dir3_get_dtype(struct xfs_mount *mp, uint8_t filetype);
+void *xfs_dir3_data_endp(struct xfs_da_geometry *geo,
+ struct xfs_dir2_data_hdr *hdr);
#endif /* __XFS_DIR2_H__ */
diff --git a/fs/xfs/libxfs/xfs_dir2_block.c b/fs/xfs/libxfs/xfs_dir2_block.c
index 43c902f7a68d..2da86a394bcf 100644
--- a/fs/xfs/libxfs/xfs_dir2_block.c
+++ b/fs/xfs/libxfs/xfs_dir2_block.c
@@ -58,7 +58,7 @@ xfs_dir_startup(void)
xfs_dir_hash_dotdot = xfs_da_hashname((unsigned char *)"..", 2);
}
-static bool
+static xfs_failaddr_t
xfs_dir3_block_verify(
struct xfs_buf *bp)
{
@@ -67,20 +67,18 @@ xfs_dir3_block_verify(
if (xfs_sb_version_hascrc(&mp->m_sb)) {
if (hdr3->magic != cpu_to_be32(XFS_DIR3_BLOCK_MAGIC))
- return false;
+ return __this_address;
if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_meta_uuid))
- return false;
+ return __this_address;
if (be64_to_cpu(hdr3->blkno) != bp->b_bn)
- return false;
+ return __this_address;
if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->lsn)))
- return false;
+ return __this_address;
} else {
if (hdr3->magic != cpu_to_be32(XFS_DIR2_BLOCK_MAGIC))
- return false;
+ return __this_address;
}
- if (__xfs_dir3_data_check(NULL, bp))
- return false;
- return true;
+ return __xfs_dir3_data_check(NULL, bp);
}
static void
@@ -88,15 +86,16 @@ xfs_dir3_block_read_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
+ xfs_failaddr_t fa;
if (xfs_sb_version_hascrc(&mp->m_sb) &&
!xfs_buf_verify_cksum(bp, XFS_DIR3_DATA_CRC_OFF))
- xfs_buf_ioerror(bp, -EFSBADCRC);
- else if (!xfs_dir3_block_verify(bp))
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
-
- if (bp->b_error)
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_dir3_block_verify(bp);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ }
}
static void
@@ -104,12 +103,13 @@ xfs_dir3_block_write_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr;
+ xfs_failaddr_t fa;
- if (!xfs_dir3_block_verify(bp)) {
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ fa = xfs_dir3_block_verify(bp);
+ if (fa) {
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
@@ -126,6 +126,7 @@ const struct xfs_buf_ops xfs_dir3_block_buf_ops = {
.name = "xfs_dir3_block",
.verify_read = xfs_dir3_block_read_verify,
.verify_write = xfs_dir3_block_write_verify,
+ .verify_struct = xfs_dir3_block_verify,
};
int
diff --git a/fs/xfs/libxfs/xfs_dir2_data.c b/fs/xfs/libxfs/xfs_dir2_data.c
index 8727a43115ef..920279485275 100644
--- a/fs/xfs/libxfs/xfs_dir2_data.c
+++ b/fs/xfs/libxfs/xfs_dir2_data.c
@@ -36,9 +36,9 @@
/*
* Check the consistency of the data block.
* The input can also be a block-format directory.
- * Return 0 is the buffer is good, otherwise an error.
+ * Return NULL if the buffer is good, otherwise the address of the error.
*/
-int
+xfs_failaddr_t
__xfs_dir3_data_check(
struct xfs_inode *dp, /* incore inode pointer */
struct xfs_buf *bp) /* data block's buffer */
@@ -73,6 +73,14 @@ __xfs_dir3_data_check(
*/
ops = xfs_dir_get_ops(mp, dp);
+ /*
+ * If this isn't a directory, or we don't get handed the dir ops,
+ * something is seriously wrong. Bail out.
+ */
+ if ((dp && !S_ISDIR(VFS_I(dp)->i_mode)) ||
+ ops != xfs_dir_get_ops(mp, NULL))
+ return __this_address;
+
hdr = bp->b_addr;
p = (char *)ops->data_entry_p(hdr);
@@ -81,7 +89,6 @@ __xfs_dir3_data_check(
case cpu_to_be32(XFS_DIR2_BLOCK_MAGIC):
btp = xfs_dir2_block_tail_p(geo, hdr);
lep = xfs_dir2_block_leaf_p(btp);
- endp = (char *)lep;
/*
* The number of leaf entries is limited by the size of the
@@ -90,17 +97,19 @@ __xfs_dir3_data_check(
* so just ensure that the count falls somewhere inside the
* block right now.
*/
- XFS_WANT_CORRUPTED_RETURN(mp, be32_to_cpu(btp->count) <
- ((char *)btp - p) / sizeof(struct xfs_dir2_leaf_entry));
+ if (be32_to_cpu(btp->count) >=
+ ((char *)btp - p) / sizeof(struct xfs_dir2_leaf_entry))
+ return __this_address;
break;
case cpu_to_be32(XFS_DIR3_DATA_MAGIC):
case cpu_to_be32(XFS_DIR2_DATA_MAGIC):
- endp = (char *)hdr + geo->blksize;
break;
default:
- XFS_ERROR_REPORT("Bad Magic", XFS_ERRLEVEL_LOW, mp);
- return -EFSCORRUPTED;
+ return __this_address;
}
+ endp = xfs_dir3_data_endp(geo, hdr);
+ if (!endp)
+ return __this_address;
/*
* Account for zero bestfree entries.
@@ -108,22 +117,25 @@ __xfs_dir3_data_check(
bf = ops->data_bestfree_p(hdr);
count = lastfree = freeseen = 0;
if (!bf[0].length) {
- XFS_WANT_CORRUPTED_RETURN(mp, !bf[0].offset);
+ if (bf[0].offset)
+ return __this_address;
freeseen |= 1 << 0;
}
if (!bf[1].length) {
- XFS_WANT_CORRUPTED_RETURN(mp, !bf[1].offset);
+ if (bf[1].offset)
+ return __this_address;
freeseen |= 1 << 1;
}
if (!bf[2].length) {
- XFS_WANT_CORRUPTED_RETURN(mp, !bf[2].offset);
+ if (bf[2].offset)
+ return __this_address;
freeseen |= 1 << 2;
}
- XFS_WANT_CORRUPTED_RETURN(mp, be16_to_cpu(bf[0].length) >=
- be16_to_cpu(bf[1].length));
- XFS_WANT_CORRUPTED_RETURN(mp, be16_to_cpu(bf[1].length) >=
- be16_to_cpu(bf[2].length));
+ if (be16_to_cpu(bf[0].length) < be16_to_cpu(bf[1].length))
+ return __this_address;
+ if (be16_to_cpu(bf[1].length) < be16_to_cpu(bf[2].length))
+ return __this_address;
/*
* Loop over the data/unused entries.
*/
@@ -135,22 +147,23 @@ __xfs_dir3_data_check(
* doesn't need to be there.
*/
if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
- XFS_WANT_CORRUPTED_RETURN(mp, lastfree == 0);
- XFS_WANT_CORRUPTED_RETURN(mp, endp >=
- p + be16_to_cpu(dup->length));
- XFS_WANT_CORRUPTED_RETURN(mp,
- be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)) ==
- (char *)dup - (char *)hdr);
+ if (lastfree != 0)
+ return __this_address;
+ if (endp < p + be16_to_cpu(dup->length))
+ return __this_address;
+ if (be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)) !=
+ (char *)dup - (char *)hdr)
+ return __this_address;
dfp = xfs_dir2_data_freefind(hdr, bf, dup);
if (dfp) {
i = (int)(dfp - bf);
- XFS_WANT_CORRUPTED_RETURN(mp,
- (freeseen & (1 << i)) == 0);
+ if ((freeseen & (1 << i)) != 0)
+ return __this_address;
freeseen |= 1 << i;
} else {
- XFS_WANT_CORRUPTED_RETURN(mp,
- be16_to_cpu(dup->length) <=
- be16_to_cpu(bf[2].length));
+ if (be16_to_cpu(dup->length) >
+ be16_to_cpu(bf[2].length))
+ return __this_address;
}
p += be16_to_cpu(dup->length);
lastfree = 1;
@@ -163,16 +176,17 @@ __xfs_dir3_data_check(
* The linear search is crude but this is DEBUG code.
*/
dep = (xfs_dir2_data_entry_t *)p;
- XFS_WANT_CORRUPTED_RETURN(mp, dep->namelen != 0);
- XFS_WANT_CORRUPTED_RETURN(mp,
- !xfs_dir_ino_validate(mp, be64_to_cpu(dep->inumber)));
- XFS_WANT_CORRUPTED_RETURN(mp, endp >=
- p + ops->data_entsize(dep->namelen));
- XFS_WANT_CORRUPTED_RETURN(mp,
- be16_to_cpu(*ops->data_entry_tag_p(dep)) ==
- (char *)dep - (char *)hdr);
- XFS_WANT_CORRUPTED_RETURN(mp,
- ops->data_get_ftype(dep) < XFS_DIR3_FT_MAX);
+ if (dep->namelen == 0)
+ return __this_address;
+ if (xfs_dir_ino_validate(mp, be64_to_cpu(dep->inumber)))
+ return __this_address;
+ if (endp < p + ops->data_entsize(dep->namelen))
+ return __this_address;
+ if (be16_to_cpu(*ops->data_entry_tag_p(dep)) !=
+ (char *)dep - (char *)hdr)
+ return __this_address;
+ if (ops->data_get_ftype(dep) >= XFS_DIR3_FT_MAX)
+ return __this_address;
count++;
lastfree = 0;
if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) ||
@@ -188,34 +202,52 @@ __xfs_dir3_data_check(
be32_to_cpu(lep[i].hashval) == hash)
break;
}
- XFS_WANT_CORRUPTED_RETURN(mp,
- i < be32_to_cpu(btp->count));
+ if (i >= be32_to_cpu(btp->count))
+ return __this_address;
}
p += ops->data_entsize(dep->namelen);
}
/*
* Need to have seen all the entries and all the bestfree slots.
*/
- XFS_WANT_CORRUPTED_RETURN(mp, freeseen == 7);
+ if (freeseen != 7)
+ return __this_address;
if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC)) {
for (i = stale = 0; i < be32_to_cpu(btp->count); i++) {
if (lep[i].address ==
cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
stale++;
- if (i > 0)
- XFS_WANT_CORRUPTED_RETURN(mp,
- be32_to_cpu(lep[i].hashval) >=
- be32_to_cpu(lep[i - 1].hashval));
+ if (i > 0 && be32_to_cpu(lep[i].hashval) <
+ be32_to_cpu(lep[i - 1].hashval))
+ return __this_address;
}
- XFS_WANT_CORRUPTED_RETURN(mp, count ==
- be32_to_cpu(btp->count) - be32_to_cpu(btp->stale));
- XFS_WANT_CORRUPTED_RETURN(mp, stale == be32_to_cpu(btp->stale));
+ if (count != be32_to_cpu(btp->count) - be32_to_cpu(btp->stale))
+ return __this_address;
+ if (stale != be32_to_cpu(btp->stale))
+ return __this_address;
}
- return 0;
+ return NULL;
+}
+
+#ifdef DEBUG
+void
+xfs_dir3_data_check(
+ struct xfs_inode *dp,
+ struct xfs_buf *bp)
+{
+ xfs_failaddr_t fa;
+
+ fa = __xfs_dir3_data_check(dp, bp);
+ if (!fa)
+ return;
+ xfs_corruption_error(__func__, XFS_ERRLEVEL_LOW, dp->i_mount,
+ bp->b_addr, __FILE__, __LINE__, fa);
+ ASSERT(0);
}
+#endif
-static bool
+static xfs_failaddr_t
xfs_dir3_data_verify(
struct xfs_buf *bp)
{
@@ -224,20 +256,18 @@ xfs_dir3_data_verify(
if (xfs_sb_version_hascrc(&mp->m_sb)) {
if (hdr3->magic != cpu_to_be32(XFS_DIR3_DATA_MAGIC))
- return false;
+ return __this_address;
if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_meta_uuid))
- return false;
+ return __this_address;
if (be64_to_cpu(hdr3->blkno) != bp->b_bn)
- return false;
+ return __this_address;
if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->lsn)))
- return false;
+ return __this_address;
} else {
if (hdr3->magic != cpu_to_be32(XFS_DIR2_DATA_MAGIC))
- return false;
+ return __this_address;
}
- if (__xfs_dir3_data_check(NULL, bp))
- return false;
- return true;
+ return __xfs_dir3_data_check(NULL, bp);
}
/*
@@ -263,8 +293,7 @@ xfs_dir3_data_reada_verify(
bp->b_ops->verify_read(bp);
return;
default:
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
break;
}
}
@@ -274,15 +303,16 @@ xfs_dir3_data_read_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
+ xfs_failaddr_t fa;
if (xfs_sb_version_hascrc(&mp->m_sb) &&
- !xfs_buf_verify_cksum(bp, XFS_DIR3_DATA_CRC_OFF))
- xfs_buf_ioerror(bp, -EFSBADCRC);
- else if (!xfs_dir3_data_verify(bp))
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
-
- if (bp->b_error)
- xfs_verifier_error(bp);
+ !xfs_buf_verify_cksum(bp, XFS_DIR3_DATA_CRC_OFF))
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_dir3_data_verify(bp);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ }
}
static void
@@ -290,12 +320,13 @@ xfs_dir3_data_write_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr;
+ xfs_failaddr_t fa;
- if (!xfs_dir3_data_verify(bp)) {
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ fa = xfs_dir3_data_verify(bp);
+ if (fa) {
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
@@ -312,6 +343,7 @@ const struct xfs_buf_ops xfs_dir3_data_buf_ops = {
.name = "xfs_dir3_data",
.verify_read = xfs_dir3_data_read_verify,
.verify_write = xfs_dir3_data_write_verify,
+ .verify_struct = xfs_dir3_data_verify,
};
static const struct xfs_buf_ops xfs_dir3_data_reada_buf_ops = {
@@ -515,7 +547,6 @@ xfs_dir2_data_freescan_int(
struct xfs_dir2_data_hdr *hdr,
int *loghead)
{
- xfs_dir2_block_tail_t *btp; /* block tail */
xfs_dir2_data_entry_t *dep; /* active data entry */
xfs_dir2_data_unused_t *dup; /* unused data entry */
struct xfs_dir2_data_free *bf;
@@ -537,12 +568,7 @@ xfs_dir2_data_freescan_int(
* Set up pointers.
*/
p = (char *)ops->data_entry_p(hdr);
- if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) ||
- hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC)) {
- btp = xfs_dir2_block_tail_p(geo, hdr);
- endp = (char *)xfs_dir2_block_leaf_p(btp);
- } else
- endp = (char *)hdr + geo->blksize;
+ endp = xfs_dir3_data_endp(geo, hdr);
/*
* Loop over the block's entries.
*/
@@ -755,17 +781,9 @@ xfs_dir2_data_make_free(
/*
* Figure out where the end of the data area is.
*/
- if (hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
- hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC))
- endptr = (char *)hdr + args->geo->blksize;
- else {
- xfs_dir2_block_tail_t *btp; /* block tail */
+ endptr = xfs_dir3_data_endp(args->geo, hdr);
+ ASSERT(endptr != NULL);
- ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) ||
- hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC));
- btp = xfs_dir2_block_tail_p(args->geo, hdr);
- endptr = (char *)xfs_dir2_block_leaf_p(btp);
- }
/*
* If this isn't the start of the block, then back up to
* the previous entry and see if it's free.
@@ -1067,3 +1085,21 @@ xfs_dir2_data_use_free(
}
*needscanp = needscan;
}
+
+/* Find the end of the entry data in a data/block format dir block. */
+void *
+xfs_dir3_data_endp(
+ struct xfs_da_geometry *geo,
+ struct xfs_dir2_data_hdr *hdr)
+{
+ switch (hdr->magic) {
+ case cpu_to_be32(XFS_DIR3_BLOCK_MAGIC):
+ case cpu_to_be32(XFS_DIR2_BLOCK_MAGIC):
+ return xfs_dir2_block_leaf_p(xfs_dir2_block_tail_p(geo, hdr));
+ case cpu_to_be32(XFS_DIR3_DATA_MAGIC):
+ case cpu_to_be32(XFS_DIR2_DATA_MAGIC):
+ return (char *)hdr + geo->blksize;
+ default:
+ return NULL;
+ }
+}
diff --git a/fs/xfs/libxfs/xfs_dir2_leaf.c b/fs/xfs/libxfs/xfs_dir2_leaf.c
index 27297a689d9c..d7e630f41f9c 100644
--- a/fs/xfs/libxfs/xfs_dir2_leaf.c
+++ b/fs/xfs/libxfs/xfs_dir2_leaf.c
@@ -50,13 +50,7 @@ static void xfs_dir3_leaf_log_tail(struct xfs_da_args *args,
* Pop an assert if something is wrong.
*/
#ifdef DEBUG
-#define xfs_dir3_leaf_check(dp, bp) \
-do { \
- if (!xfs_dir3_leaf1_check((dp), (bp))) \
- ASSERT(0); \
-} while (0);
-
-STATIC bool
+static xfs_failaddr_t
xfs_dir3_leaf1_check(
struct xfs_inode *dp,
struct xfs_buf *bp)
@@ -69,17 +63,32 @@ xfs_dir3_leaf1_check(
if (leafhdr.magic == XFS_DIR3_LEAF1_MAGIC) {
struct xfs_dir3_leaf_hdr *leaf3 = bp->b_addr;
if (be64_to_cpu(leaf3->info.blkno) != bp->b_bn)
- return false;
+ return __this_address;
} else if (leafhdr.magic != XFS_DIR2_LEAF1_MAGIC)
- return false;
+ return __this_address;
return xfs_dir3_leaf_check_int(dp->i_mount, dp, &leafhdr, leaf);
}
+
+static inline void
+xfs_dir3_leaf_check(
+ struct xfs_inode *dp,
+ struct xfs_buf *bp)
+{
+ xfs_failaddr_t fa;
+
+ fa = xfs_dir3_leaf1_check(dp, bp);
+ if (!fa)
+ return;
+ xfs_corruption_error(__func__, XFS_ERRLEVEL_LOW, dp->i_mount,
+ bp->b_addr, __FILE__, __LINE__, fa);
+ ASSERT(0);
+}
#else
#define xfs_dir3_leaf_check(dp, bp)
#endif
-bool
+xfs_failaddr_t
xfs_dir3_leaf_check_int(
struct xfs_mount *mp,
struct xfs_inode *dp,
@@ -114,27 +123,27 @@ xfs_dir3_leaf_check_int(
* We can deduce a value for that from di_size.
*/
if (hdr->count > ops->leaf_max_ents(geo))
- return false;
+ return __this_address;
/* Leaves and bests don't overlap in leaf format. */
if ((hdr->magic == XFS_DIR2_LEAF1_MAGIC ||
hdr->magic == XFS_DIR3_LEAF1_MAGIC) &&
(char *)&ents[hdr->count] > (char *)xfs_dir2_leaf_bests_p(ltp))
- return false;
+ return __this_address;
/* Check hash value order, count stale entries. */
for (i = stale = 0; i < hdr->count; i++) {
if (i + 1 < hdr->count) {
if (be32_to_cpu(ents[i].hashval) >
be32_to_cpu(ents[i + 1].hashval))
- return false;
+ return __this_address;
}
if (ents[i].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
stale++;
}
if (hdr->stale != stale)
- return false;
- return true;
+ return __this_address;
+ return NULL;
}
/*
@@ -142,7 +151,7 @@ xfs_dir3_leaf_check_int(
* kernels we don't get assertion failures in xfs_dir3_leaf_hdr_from_disk() due
* to incorrect magic numbers.
*/
-static bool
+static xfs_failaddr_t
xfs_dir3_leaf_verify(
struct xfs_buf *bp,
uint16_t magic)
@@ -160,16 +169,16 @@ xfs_dir3_leaf_verify(
: XFS_DIR3_LEAFN_MAGIC;
if (leaf3->info.hdr.magic != cpu_to_be16(magic3))
- return false;
+ return __this_address;
if (!uuid_equal(&leaf3->info.uuid, &mp->m_sb.sb_meta_uuid))
- return false;
+ return __this_address;
if (be64_to_cpu(leaf3->info.blkno) != bp->b_bn)
- return false;
+ return __this_address;
if (!xfs_log_check_lsn(mp, be64_to_cpu(leaf3->info.lsn)))
- return false;
+ return __this_address;
} else {
if (leaf->hdr.info.magic != cpu_to_be16(magic))
- return false;
+ return __this_address;
}
return xfs_dir3_leaf_check_int(mp, NULL, NULL, leaf);
@@ -181,15 +190,16 @@ __read_verify(
uint16_t magic)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
+ xfs_failaddr_t fa;
if (xfs_sb_version_hascrc(&mp->m_sb) &&
!xfs_buf_verify_cksum(bp, XFS_DIR3_LEAF_CRC_OFF))
- xfs_buf_ioerror(bp, -EFSBADCRC);
- else if (!xfs_dir3_leaf_verify(bp, magic))
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
-
- if (bp->b_error)
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_dir3_leaf_verify(bp, magic);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ }
}
static void
@@ -198,12 +208,13 @@ __write_verify(
uint16_t magic)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
struct xfs_dir3_leaf_hdr *hdr3 = bp->b_addr;
+ xfs_failaddr_t fa;
- if (!xfs_dir3_leaf_verify(bp, magic)) {
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ fa = xfs_dir3_leaf_verify(bp, magic);
+ if (fa) {
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
@@ -216,6 +227,13 @@ __write_verify(
xfs_buf_update_cksum(bp, XFS_DIR3_LEAF_CRC_OFF);
}
+static xfs_failaddr_t
+xfs_dir3_leaf1_verify(
+ struct xfs_buf *bp)
+{
+ return xfs_dir3_leaf_verify(bp, XFS_DIR2_LEAF1_MAGIC);
+}
+
static void
xfs_dir3_leaf1_read_verify(
struct xfs_buf *bp)
@@ -230,6 +248,13 @@ xfs_dir3_leaf1_write_verify(
__write_verify(bp, XFS_DIR2_LEAF1_MAGIC);
}
+static xfs_failaddr_t
+xfs_dir3_leafn_verify(
+ struct xfs_buf *bp)
+{
+ return xfs_dir3_leaf_verify(bp, XFS_DIR2_LEAFN_MAGIC);
+}
+
static void
xfs_dir3_leafn_read_verify(
struct xfs_buf *bp)
@@ -248,12 +273,14 @@ const struct xfs_buf_ops xfs_dir3_leaf1_buf_ops = {
.name = "xfs_dir3_leaf1",
.verify_read = xfs_dir3_leaf1_read_verify,
.verify_write = xfs_dir3_leaf1_write_verify,
+ .verify_struct = xfs_dir3_leaf1_verify,
};
const struct xfs_buf_ops xfs_dir3_leafn_buf_ops = {
.name = "xfs_dir3_leafn",
.verify_read = xfs_dir3_leafn_read_verify,
.verify_write = xfs_dir3_leafn_write_verify,
+ .verify_struct = xfs_dir3_leafn_verify,
};
int
diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c
index 682e2bf370c7..239d97a64296 100644
--- a/fs/xfs/libxfs/xfs_dir2_node.c
+++ b/fs/xfs/libxfs/xfs_dir2_node.c
@@ -53,13 +53,7 @@ static int xfs_dir2_node_addname_int(xfs_da_args_t *args,
* Check internal consistency of a leafn block.
*/
#ifdef DEBUG
-#define xfs_dir3_leaf_check(dp, bp) \
-do { \
- if (!xfs_dir3_leafn_check((dp), (bp))) \
- ASSERT(0); \
-} while (0);
-
-static bool
+static xfs_failaddr_t
xfs_dir3_leafn_check(
struct xfs_inode *dp,
struct xfs_buf *bp)
@@ -72,17 +66,32 @@ xfs_dir3_leafn_check(
if (leafhdr.magic == XFS_DIR3_LEAFN_MAGIC) {
struct xfs_dir3_leaf_hdr *leaf3 = bp->b_addr;
if (be64_to_cpu(leaf3->info.blkno) != bp->b_bn)
- return false;
+ return __this_address;
} else if (leafhdr.magic != XFS_DIR2_LEAFN_MAGIC)
- return false;
+ return __this_address;
return xfs_dir3_leaf_check_int(dp->i_mount, dp, &leafhdr, leaf);
}
+
+static inline void
+xfs_dir3_leaf_check(
+ struct xfs_inode *dp,
+ struct xfs_buf *bp)
+{
+ xfs_failaddr_t fa;
+
+ fa = xfs_dir3_leafn_check(dp, bp);
+ if (!fa)
+ return;
+ xfs_corruption_error(__func__, XFS_ERRLEVEL_LOW, dp->i_mount,
+ bp->b_addr, __FILE__, __LINE__, fa);
+ ASSERT(0);
+}
#else
#define xfs_dir3_leaf_check(dp, bp)
#endif
-static bool
+static xfs_failaddr_t
xfs_dir3_free_verify(
struct xfs_buf *bp)
{
@@ -93,21 +102,21 @@ xfs_dir3_free_verify(
struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr;
if (hdr3->magic != cpu_to_be32(XFS_DIR3_FREE_MAGIC))
- return false;
+ return __this_address;
if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_meta_uuid))
- return false;
+ return __this_address;
if (be64_to_cpu(hdr3->blkno) != bp->b_bn)
- return false;
+ return __this_address;
if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->lsn)))
- return false;
+ return __this_address;
} else {
if (hdr->magic != cpu_to_be32(XFS_DIR2_FREE_MAGIC))
- return false;
+ return __this_address;
}
/* XXX: should bounds check the xfs_dir3_icfree_hdr here */
- return true;
+ return NULL;
}
static void
@@ -115,15 +124,16 @@ xfs_dir3_free_read_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
+ xfs_failaddr_t fa;
if (xfs_sb_version_hascrc(&mp->m_sb) &&
!xfs_buf_verify_cksum(bp, XFS_DIR3_FREE_CRC_OFF))
- xfs_buf_ioerror(bp, -EFSBADCRC);
- else if (!xfs_dir3_free_verify(bp))
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
-
- if (bp->b_error)
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_dir3_free_verify(bp);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ }
}
static void
@@ -131,12 +141,13 @@ xfs_dir3_free_write_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr;
+ xfs_failaddr_t fa;
- if (!xfs_dir3_free_verify(bp)) {
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ fa = xfs_dir3_free_verify(bp);
+ if (fa) {
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
@@ -153,10 +164,11 @@ const struct xfs_buf_ops xfs_dir3_free_buf_ops = {
.name = "xfs_dir3_free",
.verify_read = xfs_dir3_free_read_verify,
.verify_write = xfs_dir3_free_write_verify,
+ .verify_struct = xfs_dir3_free_verify,
};
/* Everything ok in the free block header? */
-static bool
+static xfs_failaddr_t
xfs_dir3_free_header_check(
struct xfs_inode *dp,
xfs_dablk_t fbno,
@@ -174,22 +186,22 @@ xfs_dir3_free_header_check(
struct xfs_dir3_free_hdr *hdr3 = bp->b_addr;
if (be32_to_cpu(hdr3->firstdb) != firstdb)
- return false;
+ return __this_address;
if (be32_to_cpu(hdr3->nvalid) > maxbests)
- return false;
+ return __this_address;
if (be32_to_cpu(hdr3->nvalid) < be32_to_cpu(hdr3->nused))
- return false;
+ return __this_address;
} else {
struct xfs_dir2_free_hdr *hdr = bp->b_addr;
if (be32_to_cpu(hdr->firstdb) != firstdb)
- return false;
+ return __this_address;
if (be32_to_cpu(hdr->nvalid) > maxbests)
- return false;
+ return __this_address;
if (be32_to_cpu(hdr->nvalid) < be32_to_cpu(hdr->nused))
- return false;
+ return __this_address;
}
- return true;
+ return NULL;
}
static int
@@ -200,6 +212,7 @@ __xfs_dir3_free_read(
xfs_daddr_t mappedbno,
struct xfs_buf **bpp)
{
+ xfs_failaddr_t fa;
int err;
err = xfs_da_read_buf(tp, dp, fbno, mappedbno, bpp,
@@ -208,9 +221,9 @@ __xfs_dir3_free_read(
return err;
/* Check things that we can't do in the verifier. */
- if (!xfs_dir3_free_header_check(dp, fbno, *bpp)) {
- xfs_buf_ioerror(*bpp, -EFSCORRUPTED);
- xfs_verifier_error(*bpp);
+ fa = xfs_dir3_free_header_check(dp, fbno, *bpp);
+ if (fa) {
+ xfs_verifier_error(*bpp, -EFSCORRUPTED, fa);
xfs_trans_brelse(tp, *bpp);
return -EFSCORRUPTED;
}
@@ -1906,7 +1919,7 @@ xfs_dir2_node_addname_int(
(unsigned long long)ifbno, lastfbno);
if (fblk) {
xfs_alert(mp,
- " fblk 0x%p blkno %llu index %d magic 0x%x",
+ " fblk "PTR_FMT" blkno %llu index %d magic 0x%x",
fblk,
(unsigned long long)fblk->blkno,
fblk->index,
diff --git a/fs/xfs/libxfs/xfs_dir2_priv.h b/fs/xfs/libxfs/xfs_dir2_priv.h
index 4badd26c47e6..753aeeeffc18 100644
--- a/fs/xfs/libxfs/xfs_dir2_priv.h
+++ b/fs/xfs/libxfs/xfs_dir2_priv.h
@@ -39,12 +39,13 @@ extern int xfs_dir2_leaf_to_block(struct xfs_da_args *args,
/* xfs_dir2_data.c */
#ifdef DEBUG
-#define xfs_dir3_data_check(dp,bp) __xfs_dir3_data_check(dp, bp);
+extern void xfs_dir3_data_check(struct xfs_inode *dp, struct xfs_buf *bp);
#else
#define xfs_dir3_data_check(dp,bp)
#endif
-extern int __xfs_dir3_data_check(struct xfs_inode *dp, struct xfs_buf *bp);
+extern xfs_failaddr_t __xfs_dir3_data_check(struct xfs_inode *dp,
+ struct xfs_buf *bp);
extern int xfs_dir3_data_read(struct xfs_trans *tp, struct xfs_inode *dp,
xfs_dablk_t bno, xfs_daddr_t mapped_bno, struct xfs_buf **bpp);
extern int xfs_dir3_data_readahead(struct xfs_inode *dp, xfs_dablk_t bno,
@@ -89,8 +90,9 @@ xfs_dir3_leaf_find_entry(struct xfs_dir3_icleaf_hdr *leafhdr,
int lowstale, int highstale, int *lfloglow, int *lfloghigh);
extern int xfs_dir2_node_to_leaf(struct xfs_da_state *state);
-extern bool xfs_dir3_leaf_check_int(struct xfs_mount *mp, struct xfs_inode *dp,
- struct xfs_dir3_icleaf_hdr *hdr, struct xfs_dir2_leaf *leaf);
+extern xfs_failaddr_t xfs_dir3_leaf_check_int(struct xfs_mount *mp,
+ struct xfs_inode *dp, struct xfs_dir3_icleaf_hdr *hdr,
+ struct xfs_dir2_leaf *leaf);
/* xfs_dir2_node.c */
extern int xfs_dir2_leaf_to_node(struct xfs_da_args *args,
@@ -127,7 +129,7 @@ extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino);
extern int xfs_dir2_sf_lookup(struct xfs_da_args *args);
extern int xfs_dir2_sf_removename(struct xfs_da_args *args);
extern int xfs_dir2_sf_replace(struct xfs_da_args *args);
-extern int xfs_dir2_sf_verify(struct xfs_inode *ip);
+extern xfs_failaddr_t xfs_dir2_sf_verify(struct xfs_inode *ip);
/* xfs_dir2_readdir.c */
extern int xfs_readdir(struct xfs_trans *tp, struct xfs_inode *dp,
diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c
index be8b9755f66a..0c75a7f00883 100644
--- a/fs/xfs/libxfs/xfs_dir2_sf.c
+++ b/fs/xfs/libxfs/xfs_dir2_sf.c
@@ -156,7 +156,6 @@ xfs_dir2_block_to_sf(
xfs_dir2_sf_hdr_t *sfhp) /* shortform directory hdr */
{
xfs_dir2_data_hdr_t *hdr; /* block header */
- xfs_dir2_block_tail_t *btp; /* block tail pointer */
xfs_dir2_data_entry_t *dep; /* data entry pointer */
xfs_inode_t *dp; /* incore directory inode */
xfs_dir2_data_unused_t *dup; /* unused data pointer */
@@ -192,9 +191,8 @@ xfs_dir2_block_to_sf(
/*
* Set up to loop over the block's entries.
*/
- btp = xfs_dir2_block_tail_p(args->geo, hdr);
ptr = (char *)dp->d_ops->data_entry_p(hdr);
- endptr = (char *)xfs_dir2_block_leaf_p(btp);
+ endptr = xfs_dir3_data_endp(args->geo, hdr);
sfep = xfs_dir2_sf_firstentry(sfp);
/*
* Loop over the active and unused entries.
@@ -630,7 +628,7 @@ xfs_dir2_sf_check(
#endif /* DEBUG */
/* Verify the consistency of an inline directory. */
-int
+xfs_failaddr_t
xfs_dir2_sf_verify(
struct xfs_inode *ip)
{
@@ -665,7 +663,7 @@ xfs_dir2_sf_verify(
*/
if (size <= offsetof(struct xfs_dir2_sf_hdr, parent) ||
size < xfs_dir2_sf_hdr_size(sfp->i8count))
- return -EFSCORRUPTED;
+ return __this_address;
endp = (char *)sfp + size;
@@ -674,7 +672,7 @@ xfs_dir2_sf_verify(
i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
error = xfs_dir_ino_validate(mp, ino);
if (error)
- return error;
+ return __this_address;
offset = dops->data_first_offset;
/* Check all reported entries */
@@ -686,11 +684,11 @@ xfs_dir2_sf_verify(
* within the data buffer.
*/
if (((char *)sfep + sizeof(*sfep)) >= endp)
- return -EFSCORRUPTED;
+ return __this_address;
/* Don't allow names with known bad length. */
if (sfep->namelen == 0)
- return -EFSCORRUPTED;
+ return __this_address;
/*
* Check that the variable-length part of the structure is
@@ -699,23 +697,23 @@ xfs_dir2_sf_verify(
*/
next_sfep = dops->sf_nextentry(sfp, sfep);
if (endp < (char *)next_sfep)
- return -EFSCORRUPTED;
+ return __this_address;
/* Check that the offsets always increase. */
if (xfs_dir2_sf_get_offset(sfep) < offset)
- return -EFSCORRUPTED;
+ return __this_address;
/* Check the inode number. */
ino = dops->sf_get_ino(sfp, sfep);
i8count += ino > XFS_DIR2_MAX_SHORT_INUM;
error = xfs_dir_ino_validate(mp, ino);
if (error)
- return error;
+ return __this_address;
/* Check the file type. */
filetype = dops->sf_get_ftype(sfep);
if (filetype >= XFS_DIR3_FT_MAX)
- return -EFSCORRUPTED;
+ return __this_address;
offset = xfs_dir2_sf_get_offset(sfep) +
dops->data_entsize(sfep->namelen);
@@ -723,16 +721,16 @@ xfs_dir2_sf_verify(
sfep = next_sfep;
}
if (i8count != sfp->i8count)
- return -EFSCORRUPTED;
+ return __this_address;
if ((void *)sfep != (void *)endp)
- return -EFSCORRUPTED;
+ return __this_address;
/* Make sure this whole thing ought to be in local format. */
if (offset + (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
(uint)sizeof(xfs_dir2_block_tail_t) > mp->m_dir_geo->blksize)
- return -EFSCORRUPTED;
+ return __this_address;
- return 0;
+ return NULL;
}
/*
diff --git a/fs/xfs/libxfs/xfs_dquot_buf.c b/fs/xfs/libxfs/xfs_dquot_buf.c
index 747085b4ef44..8b7a6c3cb599 100644
--- a/fs/xfs/libxfs/xfs_dquot_buf.c
+++ b/fs/xfs/libxfs/xfs_dquot_buf.c
@@ -42,18 +42,14 @@ xfs_calc_dquots_per_chunk(
/*
* Do some primitive error checking on ondisk dquot data structures.
*/
-int
-xfs_dqcheck(
+xfs_failaddr_t
+xfs_dquot_verify(
struct xfs_mount *mp,
xfs_disk_dquot_t *ddq,
xfs_dqid_t id,
uint type, /* used only when IO_dorepair is true */
- uint flags,
- const char *str)
+ uint flags)
{
- xfs_dqblk_t *d = (xfs_dqblk_t *)ddq;
- int errs = 0;
-
/*
* We can encounter an uninitialized dquot buffer for 2 reasons:
* 1. If we crash while deleting the quotainode(s), and those blks got
@@ -69,87 +65,57 @@ xfs_dqcheck(
* This is all fine; things are still consistent, and we haven't lost
* any quota information. Just don't complain about bad dquot blks.
*/
- if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) {
- if (flags & XFS_QMOPT_DOWARN)
- xfs_alert(mp,
- "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
- str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
- errs++;
- }
- if (ddq->d_version != XFS_DQUOT_VERSION) {
- if (flags & XFS_QMOPT_DOWARN)
- xfs_alert(mp,
- "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
- str, id, ddq->d_version, XFS_DQUOT_VERSION);
- errs++;
- }
+ if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC))
+ return __this_address;
+ if (ddq->d_version != XFS_DQUOT_VERSION)
+ return __this_address;
if (ddq->d_flags != XFS_DQ_USER &&
ddq->d_flags != XFS_DQ_PROJ &&
- ddq->d_flags != XFS_DQ_GROUP) {
- if (flags & XFS_QMOPT_DOWARN)
- xfs_alert(mp,
- "%s : XFS dquot ID 0x%x, unknown flags 0x%x",
- str, id, ddq->d_flags);
- errs++;
- }
+ ddq->d_flags != XFS_DQ_GROUP)
+ return __this_address;
- if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
- if (flags & XFS_QMOPT_DOWARN)
- xfs_alert(mp,
- "%s : ondisk-dquot 0x%p, ID mismatch: "
- "0x%x expected, found id 0x%x",
- str, ddq, id, be32_to_cpu(ddq->d_id));
- errs++;
- }
+ if (id != -1 && id != be32_to_cpu(ddq->d_id))
+ return __this_address;
- if (!errs && ddq->d_id) {
- if (ddq->d_blk_softlimit &&
- be64_to_cpu(ddq->d_bcount) >
- be64_to_cpu(ddq->d_blk_softlimit)) {
- if (!ddq->d_btimer) {
- if (flags & XFS_QMOPT_DOWARN)
- xfs_alert(mp,
- "%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED",
- str, (int)be32_to_cpu(ddq->d_id), ddq);
- errs++;
- }
- }
- if (ddq->d_ino_softlimit &&
- be64_to_cpu(ddq->d_icount) >
- be64_to_cpu(ddq->d_ino_softlimit)) {
- if (!ddq->d_itimer) {
- if (flags & XFS_QMOPT_DOWARN)
- xfs_alert(mp,
- "%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED",
- str, (int)be32_to_cpu(ddq->d_id), ddq);
- errs++;
- }
- }
- if (ddq->d_rtb_softlimit &&
- be64_to_cpu(ddq->d_rtbcount) >
- be64_to_cpu(ddq->d_rtb_softlimit)) {
- if (!ddq->d_rtbtimer) {
- if (flags & XFS_QMOPT_DOWARN)
- xfs_alert(mp,
- "%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED",
- str, (int)be32_to_cpu(ddq->d_id), ddq);
- errs++;
- }
- }
- }
+ if (!ddq->d_id)
+ return NULL;
+
+ if (ddq->d_blk_softlimit &&
+ be64_to_cpu(ddq->d_bcount) > be64_to_cpu(ddq->d_blk_softlimit) &&
+ !ddq->d_btimer)
+ return __this_address;
+
+ if (ddq->d_ino_softlimit &&
+ be64_to_cpu(ddq->d_icount) > be64_to_cpu(ddq->d_ino_softlimit) &&
+ !ddq->d_itimer)
+ return __this_address;
- if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
- return errs;
+ if (ddq->d_rtb_softlimit &&
+ be64_to_cpu(ddq->d_rtbcount) > be64_to_cpu(ddq->d_rtb_softlimit) &&
+ !ddq->d_rtbtimer)
+ return __this_address;
+
+ return NULL;
+}
+
+/*
+ * Do some primitive error checking on ondisk dquot data structures.
+ */
+int
+xfs_dquot_repair(
+ struct xfs_mount *mp,
+ struct xfs_disk_dquot *ddq,
+ xfs_dqid_t id,
+ uint type)
+{
+ struct xfs_dqblk *d = (struct xfs_dqblk *)ddq;
- if (flags & XFS_QMOPT_DOWARN)
- xfs_notice(mp, "Re-initializing dquot ID 0x%x", id);
/*
* Typically, a repair is only requested by quotacheck.
*/
ASSERT(id != -1);
- ASSERT(flags & XFS_QMOPT_DQREPAIR);
memset(d, 0, sizeof(xfs_dqblk_t));
d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
@@ -163,7 +129,7 @@ xfs_dqcheck(
XFS_DQUOT_CRC_OFF);
}
- return errs;
+ return 0;
}
STATIC bool
@@ -198,13 +164,13 @@ xfs_dquot_buf_verify_crc(
return true;
}
-STATIC bool
+STATIC xfs_failaddr_t
xfs_dquot_buf_verify(
struct xfs_mount *mp,
- struct xfs_buf *bp,
- int warn)
+ struct xfs_buf *bp)
{
struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr;
+ xfs_failaddr_t fa;
xfs_dqid_t id = 0;
int ndquots;
int i;
@@ -228,33 +194,43 @@ xfs_dquot_buf_verify(
*/
for (i = 0; i < ndquots; i++) {
struct xfs_disk_dquot *ddq;
- int error;
ddq = &d[i].dd_diskdq;
if (i == 0)
id = be32_to_cpu(ddq->d_id);
- error = xfs_dqcheck(mp, ddq, id + i, 0, warn, __func__);
- if (error)
- return false;
+ fa = xfs_dquot_verify(mp, ddq, id + i, 0, 0);
+ if (fa)
+ return fa;
}
- return true;
+
+ return NULL;
+}
+
+static xfs_failaddr_t
+xfs_dquot_buf_verify_struct(
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+
+ return xfs_dquot_buf_verify(mp, bp);
}
static void
xfs_dquot_buf_read_verify(
- struct xfs_buf *bp)
+ struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
+ xfs_failaddr_t fa;
if (!xfs_dquot_buf_verify_crc(mp, bp))
- xfs_buf_ioerror(bp, -EFSBADCRC);
- else if (!xfs_dquot_buf_verify(mp, bp, XFS_QMOPT_DOWARN))
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
-
- if (bp->b_error)
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_dquot_buf_verify(mp, bp);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
+ }
}
/*
@@ -270,7 +246,7 @@ xfs_dquot_buf_readahead_verify(
struct xfs_mount *mp = bp->b_target->bt_mount;
if (!xfs_dquot_buf_verify_crc(mp, bp) ||
- !xfs_dquot_buf_verify(mp, bp, 0)) {
+ xfs_dquot_buf_verify(mp, bp) != NULL) {
xfs_buf_ioerror(bp, -EIO);
bp->b_flags &= ~XBF_DONE;
}
@@ -283,21 +259,21 @@ xfs_dquot_buf_readahead_verify(
*/
static void
xfs_dquot_buf_write_verify(
- struct xfs_buf *bp)
+ struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
+ xfs_failaddr_t fa;
- if (!xfs_dquot_buf_verify(mp, bp, XFS_QMOPT_DOWARN)) {
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
- return;
- }
+ fa = xfs_dquot_buf_verify(mp, bp);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
}
const struct xfs_buf_ops xfs_dquot_buf_ops = {
.name = "xfs_dquot",
.verify_read = xfs_dquot_buf_read_verify,
.verify_write = xfs_dquot_buf_write_verify,
+ .verify_struct = xfs_dquot_buf_verify_struct,
};
const struct xfs_buf_ops xfs_dquot_buf_ra_ops = {
diff --git a/fs/xfs/libxfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h
index b90924104596..faf1a4edd618 100644
--- a/fs/xfs/libxfs/xfs_fs.h
+++ b/fs/xfs/libxfs/xfs_fs.h
@@ -233,6 +233,13 @@ typedef struct xfs_fsop_resblks {
#define XFS_MAX_LOG_BLOCKS (1024 * 1024ULL)
#define XFS_MIN_LOG_BYTES (10 * 1024 * 1024ULL)
+/*
+ * Limits on sb_agblocks/sb_agblklog -- mkfs won't format AGs smaller than
+ * 16MB or larger than 1TB.
+ */
+#define XFS_MIN_AG_BYTES (1ULL << 24) /* 16 MB */
+#define XFS_MAX_AG_BYTES (1ULL << 40) /* 1 TB */
+
/* keep the maximum size under 2^31 by a small amount */
#define XFS_MAX_LOG_BYTES \
((2 * 1024 * 1024 * 1024ULL) - XFS_MIN_LOG_BYTES)
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 3b57ef0f2f76..0e2cf5f0be1f 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -2491,7 +2491,7 @@ xfs_check_agi_unlinked(
#define xfs_check_agi_unlinked(agi)
#endif
-static bool
+static xfs_failaddr_t
xfs_agi_verify(
struct xfs_buf *bp)
{
@@ -2500,28 +2500,28 @@ xfs_agi_verify(
if (xfs_sb_version_hascrc(&mp->m_sb)) {
if (!uuid_equal(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid))
- return false;
+ return __this_address;
if (!xfs_log_check_lsn(mp,
be64_to_cpu(XFS_BUF_TO_AGI(bp)->agi_lsn)))
- return false;
+ return __this_address;
}
/*
* Validate the magic number of the agi block.
*/
if (agi->agi_magicnum != cpu_to_be32(XFS_AGI_MAGIC))
- return false;
+ return __this_address;
if (!XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)))
- return false;
+ return __this_address;
if (be32_to_cpu(agi->agi_level) < 1 ||
be32_to_cpu(agi->agi_level) > XFS_BTREE_MAXLEVELS)
- return false;
+ return __this_address;
if (xfs_sb_version_hasfinobt(&mp->m_sb) &&
(be32_to_cpu(agi->agi_free_level) < 1 ||
be32_to_cpu(agi->agi_free_level) > XFS_BTREE_MAXLEVELS))
- return false;
+ return __this_address;
/*
* during growfs operations, the perag is not fully initialised,
@@ -2530,10 +2530,10 @@ xfs_agi_verify(
* so we can detect and avoid this problem.
*/
if (bp->b_pag && be32_to_cpu(agi->agi_seqno) != bp->b_pag->pag_agno)
- return false;
+ return __this_address;
xfs_check_agi_unlinked(agi);
- return true;
+ return NULL;
}
static void
@@ -2541,28 +2541,29 @@ xfs_agi_read_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
+ xfs_failaddr_t fa;
if (xfs_sb_version_hascrc(&mp->m_sb) &&
!xfs_buf_verify_cksum(bp, XFS_AGI_CRC_OFF))
- xfs_buf_ioerror(bp, -EFSBADCRC);
- else if (XFS_TEST_ERROR(!xfs_agi_verify(bp), mp,
- XFS_ERRTAG_IALLOC_READ_AGI))
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
-
- if (bp->b_error)
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_agi_verify(bp);
+ if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_IALLOC_READ_AGI))
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ }
}
static void
xfs_agi_write_verify(
struct xfs_buf *bp)
{
- struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
+ xfs_failaddr_t fa;
- if (!xfs_agi_verify(bp)) {
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ fa = xfs_agi_verify(bp);
+ if (fa) {
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
@@ -2578,6 +2579,7 @@ const struct xfs_buf_ops xfs_agi_buf_ops = {
.name = "xfs_agi",
.verify_read = xfs_agi_read_verify,
.verify_write = xfs_agi_write_verify,
+ .verify_struct = xfs_agi_verify,
};
/*
@@ -2751,3 +2753,102 @@ xfs_verify_dir_ino(
return false;
return xfs_verify_ino(mp, ino);
}
+
+/* Is there an inode record covering a given range of inode numbers? */
+int
+xfs_ialloc_has_inode_record(
+ struct xfs_btree_cur *cur,
+ xfs_agino_t low,
+ xfs_agino_t high,
+ bool *exists)
+{
+ struct xfs_inobt_rec_incore irec;
+ xfs_agino_t agino;
+ uint16_t holemask;
+ int has_record;
+ int i;
+ int error;
+
+ *exists = false;
+ error = xfs_inobt_lookup(cur, low, XFS_LOOKUP_LE, &has_record);
+ while (error == 0 && has_record) {
+ error = xfs_inobt_get_rec(cur, &irec, &has_record);
+ if (error || irec.ir_startino > high)
+ break;
+
+ agino = irec.ir_startino;
+ holemask = irec.ir_holemask;
+ for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; holemask >>= 1,
+ i++, agino += XFS_INODES_PER_HOLEMASK_BIT) {
+ if (holemask & 1)
+ continue;
+ if (agino + XFS_INODES_PER_HOLEMASK_BIT > low &&
+ agino <= high) {
+ *exists = true;
+ return 0;
+ }
+ }
+
+ error = xfs_btree_increment(cur, 0, &has_record);
+ }
+ return error;
+}
+
+/* Is there an inode record covering a given extent? */
+int
+xfs_ialloc_has_inodes_at_extent(
+ struct xfs_btree_cur *cur,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ bool *exists)
+{
+ xfs_agino_t low;
+ xfs_agino_t high;
+
+ low = XFS_OFFBNO_TO_AGINO(cur->bc_mp, bno, 0);
+ high = XFS_OFFBNO_TO_AGINO(cur->bc_mp, bno + len, 0) - 1;
+
+ return xfs_ialloc_has_inode_record(cur, low, high, exists);
+}
+
+struct xfs_ialloc_count_inodes {
+ xfs_agino_t count;
+ xfs_agino_t freecount;
+};
+
+/* Record inode counts across all inobt records. */
+STATIC int
+xfs_ialloc_count_inodes_rec(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_rec *rec,
+ void *priv)
+{
+ struct xfs_inobt_rec_incore irec;
+ struct xfs_ialloc_count_inodes *ci = priv;
+
+ xfs_inobt_btrec_to_irec(cur->bc_mp, rec, &irec);
+ ci->count += irec.ir_count;
+ ci->freecount += irec.ir_freecount;
+
+ return 0;
+}
+
+/* Count allocated and free inodes under an inobt. */
+int
+xfs_ialloc_count_inodes(
+ struct xfs_btree_cur *cur,
+ xfs_agino_t *count,
+ xfs_agino_t *freecount)
+{
+ struct xfs_ialloc_count_inodes ci = {0};
+ int error;
+
+ ASSERT(cur->bc_btnum == XFS_BTNUM_INO);
+ error = xfs_btree_query_all(cur, xfs_ialloc_count_inodes_rec, &ci);
+ if (error)
+ return error;
+
+ *count = ci.count;
+ *freecount = ci.freecount;
+ return 0;
+}
diff --git a/fs/xfs/libxfs/xfs_ialloc.h b/fs/xfs/libxfs/xfs_ialloc.h
index 66a8de0b1caa..c5402bb4ce0c 100644
--- a/fs/xfs/libxfs/xfs_ialloc.h
+++ b/fs/xfs/libxfs/xfs_ialloc.h
@@ -170,6 +170,12 @@ int xfs_read_agi(struct xfs_mount *mp, struct xfs_trans *tp,
union xfs_btree_rec;
void xfs_inobt_btrec_to_irec(struct xfs_mount *mp, union xfs_btree_rec *rec,
struct xfs_inobt_rec_incore *irec);
+int xfs_ialloc_has_inodes_at_extent(struct xfs_btree_cur *cur,
+ xfs_agblock_t bno, xfs_extlen_t len, bool *exists);
+int xfs_ialloc_has_inode_record(struct xfs_btree_cur *cur, xfs_agino_t low,
+ xfs_agino_t high, bool *exists);
+int xfs_ialloc_count_inodes(struct xfs_btree_cur *cur, xfs_agino_t *count,
+ xfs_agino_t *freecount);
int xfs_ialloc_cluster_alignment(struct xfs_mount *mp);
void xfs_ialloc_agino_range(struct xfs_mount *mp, xfs_agnumber_t agno,
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index 317caba9faa6..af197a5f3a82 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -141,21 +141,42 @@ xfs_finobt_alloc_block(
union xfs_btree_ptr *new,
int *stat)
{
+ if (cur->bc_mp->m_inotbt_nores)
+ return xfs_inobt_alloc_block(cur, start, new, stat);
return __xfs_inobt_alloc_block(cur, start, new, stat,
XFS_AG_RESV_METADATA);
}
STATIC int
-xfs_inobt_free_block(
+__xfs_inobt_free_block(
struct xfs_btree_cur *cur,
- struct xfs_buf *bp)
+ struct xfs_buf *bp,
+ enum xfs_ag_resv_type resv)
{
struct xfs_owner_info oinfo;
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT);
return xfs_free_extent(cur->bc_tp,
XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp)), 1,
- &oinfo, XFS_AG_RESV_NONE);
+ &oinfo, resv);
+}
+
+STATIC int
+xfs_inobt_free_block(
+ struct xfs_btree_cur *cur,
+ struct xfs_buf *bp)
+{
+ return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_NONE);
+}
+
+STATIC int
+xfs_finobt_free_block(
+ struct xfs_btree_cur *cur,
+ struct xfs_buf *bp)
+{
+ if (cur->bc_mp->m_inotbt_nores)
+ return xfs_inobt_free_block(cur, bp);
+ return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_METADATA);
}
STATIC int
@@ -250,12 +271,13 @@ xfs_inobt_diff_two_keys(
be32_to_cpu(k2->inobt.ir_startino);
}
-static int
+static xfs_failaddr_t
xfs_inobt_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
+ xfs_failaddr_t fa;
unsigned int level;
/*
@@ -271,20 +293,21 @@ xfs_inobt_verify(
switch (block->bb_magic) {
case cpu_to_be32(XFS_IBT_CRC_MAGIC):
case cpu_to_be32(XFS_FIBT_CRC_MAGIC):
- if (!xfs_btree_sblock_v5hdr_verify(bp))
- return false;
+ fa = xfs_btree_sblock_v5hdr_verify(bp);
+ if (fa)
+ return fa;
/* fall through */
case cpu_to_be32(XFS_IBT_MAGIC):
case cpu_to_be32(XFS_FIBT_MAGIC):
break;
default:
- return 0;
+ return NULL;
}
/* level verification */
level = be16_to_cpu(block->bb_level);
if (level >= mp->m_in_maxlevels)
- return false;
+ return __this_address;
return xfs_btree_sblock_verify(bp, mp->m_inobt_mxr[level != 0]);
}
@@ -293,25 +316,30 @@ static void
xfs_inobt_read_verify(
struct xfs_buf *bp)
{
+ xfs_failaddr_t fa;
+
if (!xfs_btree_sblock_verify_crc(bp))
- xfs_buf_ioerror(bp, -EFSBADCRC);
- else if (!xfs_inobt_verify(bp))
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_inobt_verify(bp);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ }
- if (bp->b_error) {
+ if (bp->b_error)
trace_xfs_btree_corrupt(bp, _RET_IP_);
- xfs_verifier_error(bp);
- }
}
static void
xfs_inobt_write_verify(
struct xfs_buf *bp)
{
- if (!xfs_inobt_verify(bp)) {
+ xfs_failaddr_t fa;
+
+ fa = xfs_inobt_verify(bp);
+ if (fa) {
trace_xfs_btree_corrupt(bp, _RET_IP_);
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
xfs_btree_sblock_calc_crc(bp);
@@ -322,6 +350,7 @@ const struct xfs_buf_ops xfs_inobt_buf_ops = {
.name = "xfs_inobt",
.verify_read = xfs_inobt_read_verify,
.verify_write = xfs_inobt_write_verify,
+ .verify_struct = xfs_inobt_verify,
};
STATIC int
@@ -372,7 +401,7 @@ static const struct xfs_btree_ops xfs_finobt_ops = {
.dup_cursor = xfs_inobt_dup_cursor,
.set_root = xfs_finobt_set_root,
.alloc_block = xfs_finobt_alloc_block,
- .free_block = xfs_inobt_free_block,
+ .free_block = xfs_finobt_free_block,
.get_minrecs = xfs_inobt_get_minrecs,
.get_maxrecs = xfs_inobt_get_maxrecs,
.init_key_from_rec = xfs_inobt_init_key_from_rec,
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 6b7989038d75..4fe17b368316 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -32,6 +32,8 @@
#include "xfs_ialloc.h"
#include "xfs_dir2.h"
+#include <linux/iversion.h>
+
/*
* Check that none of the inode's in the buffer have a next
* unlinked field of 0.
@@ -113,8 +115,7 @@ xfs_inode_buf_verify(
return;
}
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
#ifdef DEBUG
xfs_alert(mp,
"bad inode magic/vsn daddr %lld #%d (magic=%x)",
@@ -264,7 +265,8 @@ xfs_inode_from_disk(
to->di_flags = be16_to_cpu(from->di_flags);
if (to->di_version == 3) {
- inode->i_version = be64_to_cpu(from->di_changecount);
+ inode_set_iversion_queried(inode,
+ be64_to_cpu(from->di_changecount));
to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec);
to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec);
to->di_flags2 = be64_to_cpu(from->di_flags2);
@@ -314,7 +316,7 @@ xfs_inode_to_disk(
to->di_flags = cpu_to_be16(from->di_flags);
if (from->di_version == 3) {
- to->di_changecount = cpu_to_be64(inode->i_version);
+ to->di_changecount = cpu_to_be64(inode_peek_iversion(inode));
to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
to->di_flags2 = cpu_to_be64(from->di_flags2);
@@ -381,7 +383,7 @@ xfs_log_dinode_to_disk(
}
}
-bool
+xfs_failaddr_t
xfs_dinode_verify(
struct xfs_mount *mp,
xfs_ino_t ino,
@@ -390,53 +392,122 @@ xfs_dinode_verify(
uint16_t mode;
uint16_t flags;
uint64_t flags2;
+ uint64_t di_size;
if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
- return false;
+ return __this_address;
+
+ /* Verify v3 integrity information first */
+ if (dip->di_version >= 3) {
+ if (!xfs_sb_version_hascrc(&mp->m_sb))
+ return __this_address;
+ if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
+ XFS_DINODE_CRC_OFF))
+ return __this_address;
+ if (be64_to_cpu(dip->di_ino) != ino)
+ return __this_address;
+ if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid))
+ return __this_address;
+ }
/* don't allow invalid i_size */
- if (be64_to_cpu(dip->di_size) & (1ULL << 63))
- return false;
+ di_size = be64_to_cpu(dip->di_size);
+ if (di_size & (1ULL << 63))
+ return __this_address;
mode = be16_to_cpu(dip->di_mode);
if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
- return false;
+ return __this_address;
/* No zero-length symlinks/dirs. */
- if ((S_ISLNK(mode) || S_ISDIR(mode)) && dip->di_size == 0)
- return false;
+ if ((S_ISLNK(mode) || S_ISDIR(mode)) && di_size == 0)
+ return __this_address;
+
+ /* Fork checks carried over from xfs_iformat_fork */
+ if (mode &&
+ be32_to_cpu(dip->di_nextents) + be16_to_cpu(dip->di_anextents) >
+ be64_to_cpu(dip->di_nblocks))
+ return __this_address;
+
+ if (mode && XFS_DFORK_BOFF(dip) > mp->m_sb.sb_inodesize)
+ return __this_address;
+
+ flags = be16_to_cpu(dip->di_flags);
+
+ if (mode && (flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
+ return __this_address;
+
+ /* Do we have appropriate data fork formats for the mode? */
+ switch (mode & S_IFMT) {
+ case S_IFIFO:
+ case S_IFCHR:
+ case S_IFBLK:
+ case S_IFSOCK:
+ if (dip->di_format != XFS_DINODE_FMT_DEV)
+ return __this_address;
+ break;
+ case S_IFREG:
+ case S_IFLNK:
+ case S_IFDIR:
+ switch (dip->di_format) {
+ case XFS_DINODE_FMT_LOCAL:
+ /*
+ * no local regular files yet
+ */
+ if (S_ISREG(mode))
+ return __this_address;
+ if (di_size > XFS_DFORK_DSIZE(dip, mp))
+ return __this_address;
+ /* fall through */
+ case XFS_DINODE_FMT_EXTENTS:
+ case XFS_DINODE_FMT_BTREE:
+ break;
+ default:
+ return __this_address;
+ }
+ break;
+ case 0:
+ /* Uninitialized inode ok. */
+ break;
+ default:
+ return __this_address;
+ }
+
+ if (XFS_DFORK_Q(dip)) {
+ switch (dip->di_aformat) {
+ case XFS_DINODE_FMT_LOCAL:
+ case XFS_DINODE_FMT_EXTENTS:
+ case XFS_DINODE_FMT_BTREE:
+ break;
+ default:
+ return __this_address;
+ }
+ }
/* only version 3 or greater inodes are extensively verified here */
if (dip->di_version < 3)
- return true;
-
- if (!xfs_sb_version_hascrc(&mp->m_sb))
- return false;
- if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
- XFS_DINODE_CRC_OFF))
- return false;
- if (be64_to_cpu(dip->di_ino) != ino)
- return false;
- if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid))
- return false;
+ return NULL;
- flags = be16_to_cpu(dip->di_flags);
flags2 = be64_to_cpu(dip->di_flags2);
/* don't allow reflink/cowextsize if we don't have reflink */
if ((flags2 & (XFS_DIFLAG2_REFLINK | XFS_DIFLAG2_COWEXTSIZE)) &&
!xfs_sb_version_hasreflink(&mp->m_sb))
- return false;
+ return __this_address;
+
+ /* only regular files get reflink */
+ if ((flags2 & XFS_DIFLAG2_REFLINK) && (mode & S_IFMT) != S_IFREG)
+ return __this_address;
/* don't let reflink and realtime mix */
if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags & XFS_DIFLAG_REALTIME))
- return false;
+ return __this_address;
/* don't let reflink and dax mix */
if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags2 & XFS_DIFLAG2_DAX))
- return false;
+ return __this_address;
- return true;
+ return NULL;
}
void
@@ -476,6 +547,7 @@ xfs_iread(
{
xfs_buf_t *bp;
xfs_dinode_t *dip;
+ xfs_failaddr_t fa;
int error;
/*
@@ -507,11 +579,10 @@ xfs_iread(
return error;
/* even unallocated inodes are verified */
- if (!xfs_dinode_verify(mp, ip->i_ino, dip)) {
- xfs_alert(mp, "%s: validation failed for inode %lld",
- __func__, ip->i_ino);
-
- XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, dip);
+ fa = xfs_dinode_verify(mp, ip->i_ino, dip);
+ if (fa) {
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED, "dinode", dip,
+ sizeof(*dip), fa);
error = -EFSCORRUPTED;
goto out_brelse;
}
diff --git a/fs/xfs/libxfs/xfs_inode_buf.h b/fs/xfs/libxfs/xfs_inode_buf.h
index a9c97a356c30..8a5e1da52d74 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.h
+++ b/fs/xfs/libxfs/xfs_inode_buf.h
@@ -82,7 +82,7 @@ void xfs_inobp_check(struct xfs_mount *, struct xfs_buf *);
#define xfs_inobp_check(mp, bp)
#endif /* DEBUG */
-bool xfs_dinode_verify(struct xfs_mount *mp, xfs_ino_t ino,
- struct xfs_dinode *dip);
+xfs_failaddr_t xfs_dinode_verify(struct xfs_mount *mp, xfs_ino_t ino,
+ struct xfs_dinode *dip);
#endif /* __XFS_INODE_BUF_H__ */
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index c79a1616b79d..866d2861c625 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -35,6 +35,8 @@
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include "xfs_dir2_priv.h"
+#include "xfs_attr_leaf.h"
+#include "xfs_shared.h"
kmem_zone_t *xfs_ifork_zone;
@@ -62,69 +64,11 @@ xfs_iformat_fork(
int error = 0;
xfs_fsize_t di_size;
- if (unlikely(be32_to_cpu(dip->di_nextents) +
- be16_to_cpu(dip->di_anextents) >
- be64_to_cpu(dip->di_nblocks))) {
- xfs_warn(ip->i_mount,
- "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.",
- (unsigned long long)ip->i_ino,
- (int)(be32_to_cpu(dip->di_nextents) +
- be16_to_cpu(dip->di_anextents)),
- (unsigned long long)
- be64_to_cpu(dip->di_nblocks));
- XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW,
- ip->i_mount, dip);
- return -EFSCORRUPTED;
- }
-
- if (unlikely(dip->di_forkoff > ip->i_mount->m_sb.sb_inodesize)) {
- xfs_warn(ip->i_mount, "corrupt dinode %Lu, forkoff = 0x%x.",
- (unsigned long long)ip->i_ino,
- dip->di_forkoff);
- XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW,
- ip->i_mount, dip);
- return -EFSCORRUPTED;
- }
-
- if (unlikely((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) &&
- !ip->i_mount->m_rtdev_targp)) {
- xfs_warn(ip->i_mount,
- "corrupt dinode %Lu, has realtime flag set.",
- ip->i_ino);
- XFS_CORRUPTION_ERROR("xfs_iformat(realtime)",
- XFS_ERRLEVEL_LOW, ip->i_mount, dip);
- return -EFSCORRUPTED;
- }
-
- if (unlikely(xfs_is_reflink_inode(ip) && !S_ISREG(inode->i_mode))) {
- xfs_warn(ip->i_mount,
- "corrupt dinode %llu, wrong file type for reflink.",
- ip->i_ino);
- XFS_CORRUPTION_ERROR("xfs_iformat(reflink)",
- XFS_ERRLEVEL_LOW, ip->i_mount, dip);
- return -EFSCORRUPTED;
- }
-
- if (unlikely(xfs_is_reflink_inode(ip) &&
- (ip->i_d.di_flags & XFS_DIFLAG_REALTIME))) {
- xfs_warn(ip->i_mount,
- "corrupt dinode %llu, has reflink+realtime flag set.",
- ip->i_ino);
- XFS_CORRUPTION_ERROR("xfs_iformat(reflink)",
- XFS_ERRLEVEL_LOW, ip->i_mount, dip);
- return -EFSCORRUPTED;
- }
-
switch (inode->i_mode & S_IFMT) {
case S_IFIFO:
case S_IFCHR:
case S_IFBLK:
case S_IFSOCK:
- if (unlikely(dip->di_format != XFS_DINODE_FMT_DEV)) {
- XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW,
- ip->i_mount, dip);
- return -EFSCORRUPTED;
- }
ip->i_d.di_size = 0;
inode->i_rdev = xfs_to_linux_dev_t(xfs_dinode_get_rdev(dip));
break;
@@ -134,32 +78,7 @@ xfs_iformat_fork(
case S_IFDIR:
switch (dip->di_format) {
case XFS_DINODE_FMT_LOCAL:
- /*
- * no local regular files yet
- */
- if (unlikely(S_ISREG(be16_to_cpu(dip->di_mode)))) {
- xfs_warn(ip->i_mount,
- "corrupt inode %Lu (local format for regular file).",
- (unsigned long long) ip->i_ino);
- XFS_CORRUPTION_ERROR("xfs_iformat(4)",
- XFS_ERRLEVEL_LOW,
- ip->i_mount, dip);
- return -EFSCORRUPTED;
- }
-
di_size = be64_to_cpu(dip->di_size);
- if (unlikely(di_size < 0 ||
- di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) {
- xfs_warn(ip->i_mount,
- "corrupt inode %Lu (bad size %Ld for local inode).",
- (unsigned long long) ip->i_ino,
- (long long) di_size);
- XFS_CORRUPTION_ERROR("xfs_iformat(5)",
- XFS_ERRLEVEL_LOW,
- ip->i_mount, dip);
- return -EFSCORRUPTED;
- }
-
size = (int)di_size;
error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size);
break;
@@ -170,28 +89,16 @@ xfs_iformat_fork(
error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK);
break;
default:
- XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW,
- ip->i_mount);
return -EFSCORRUPTED;
}
break;
default:
- XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount);
return -EFSCORRUPTED;
}
if (error)
return error;
- /* Check inline dir contents. */
- if (S_ISDIR(inode->i_mode) && dip->di_format == XFS_DINODE_FMT_LOCAL) {
- error = xfs_dir2_sf_verify(ip);
- if (error) {
- xfs_idestroy_fork(ip, XFS_DATA_FORK);
- return error;
- }
- }
-
if (xfs_is_reflink_inode(ip)) {
ASSERT(ip->i_cowfp == NULL);
xfs_ifork_init_cow(ip);
@@ -208,18 +115,6 @@ xfs_iformat_fork(
atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip);
size = be16_to_cpu(atp->hdr.totsize);
- if (unlikely(size < sizeof(struct xfs_attr_sf_hdr))) {
- xfs_warn(ip->i_mount,
- "corrupt inode %Lu (bad attr fork size %Ld).",
- (unsigned long long) ip->i_ino,
- (long long) size);
- XFS_CORRUPTION_ERROR("xfs_iformat(8)",
- XFS_ERRLEVEL_LOW,
- ip->i_mount, dip);
- error = -EFSCORRUPTED;
- break;
- }
-
error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size);
break;
case XFS_DINODE_FMT_EXTENTS:
@@ -403,6 +298,7 @@ xfs_iformat_btree(
*/
if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <=
XFS_IFORK_MAXEXT(ip, whichfork) ||
+ nrecs == 0 ||
XFS_BMDR_SPACE_CALC(nrecs) >
XFS_DFORK_SIZE(dip, mp, whichfork) ||
XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks) ||
@@ -827,3 +723,45 @@ xfs_ifork_init_cow(
ip->i_cformat = XFS_DINODE_FMT_EXTENTS;
ip->i_cnextents = 0;
}
+
+/* Default fork content verifiers. */
+struct xfs_ifork_ops xfs_default_ifork_ops = {
+ .verify_attr = xfs_attr_shortform_verify,
+ .verify_dir = xfs_dir2_sf_verify,
+ .verify_symlink = xfs_symlink_shortform_verify,
+};
+
+/* Verify the inline contents of the data fork of an inode. */
+xfs_failaddr_t
+xfs_ifork_verify_data(
+ struct xfs_inode *ip,
+ struct xfs_ifork_ops *ops)
+{
+ /* Non-local data fork, we're done. */
+ if (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
+ return NULL;
+
+ /* Check the inline data fork if there is one. */
+ switch (VFS_I(ip)->i_mode & S_IFMT) {
+ case S_IFDIR:
+ return ops->verify_dir(ip);
+ case S_IFLNK:
+ return ops->verify_symlink(ip);
+ default:
+ return NULL;
+ }
+}
+
+/* Verify the inline contents of the attr fork of an inode. */
+xfs_failaddr_t
+xfs_ifork_verify_attr(
+ struct xfs_inode *ip,
+ struct xfs_ifork_ops *ops)
+{
+ /* There has to be an attr fork allocated if aformat is local. */
+ if (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
+ return NULL;
+ if (!XFS_IFORK_PTR(ip, XFS_ATTR_FORK))
+ return __this_address;
+ return ops->verify_attr(ip);
+}
diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h
index b9f0098e33b8..dd8aba0dd119 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.h
+++ b/fs/xfs/libxfs/xfs_inode_fork.h
@@ -186,4 +186,18 @@ extern struct kmem_zone *xfs_ifork_zone;
extern void xfs_ifork_init_cow(struct xfs_inode *ip);
+typedef xfs_failaddr_t (*xfs_ifork_verifier_t)(struct xfs_inode *);
+
+struct xfs_ifork_ops {
+ xfs_ifork_verifier_t verify_symlink;
+ xfs_ifork_verifier_t verify_dir;
+ xfs_ifork_verifier_t verify_attr;
+};
+extern struct xfs_ifork_ops xfs_default_ifork_ops;
+
+xfs_failaddr_t xfs_ifork_verify_data(struct xfs_inode *ip,
+ struct xfs_ifork_ops *ops);
+xfs_failaddr_t xfs_ifork_verify_attr(struct xfs_inode *ip,
+ struct xfs_ifork_ops *ops);
+
#endif /* __XFS_INODE_FORK_H__ */
diff --git a/fs/xfs/libxfs/xfs_log_rlimit.c b/fs/xfs/libxfs/xfs_log_rlimit.c
index c10597973333..cc4cbe290939 100644
--- a/fs/xfs/libxfs/xfs_log_rlimit.c
+++ b/fs/xfs/libxfs/xfs_log_rlimit.c
@@ -55,7 +55,7 @@ xfs_log_calc_max_attrsetm_res(
* the maximum one in terms of the pre-calculated values which were done
* at mount time.
*/
-STATIC void
+void
xfs_log_get_max_trans_res(
struct xfs_mount *mp,
struct xfs_trans_res *max_resp)
diff --git a/fs/xfs/libxfs/xfs_quota_defs.h b/fs/xfs/libxfs/xfs_quota_defs.h
index d69c772271cb..bb1b13a9b5f4 100644
--- a/fs/xfs/libxfs/xfs_quota_defs.h
+++ b/fs/xfs/libxfs/xfs_quota_defs.h
@@ -112,8 +112,6 @@ typedef uint16_t xfs_qwarncnt_t;
#define XFS_QMOPT_PQUOTA 0x0000008 /* project dquot requested */
#define XFS_QMOPT_FORCE_RES 0x0000010 /* ignore quota limits */
#define XFS_QMOPT_SBVERSION 0x0000040 /* change superblock version num */
-#define XFS_QMOPT_DOWARN 0x0000400 /* increase warning cnt if needed */
-#define XFS_QMOPT_DQREPAIR 0x0001000 /* repair dquot if damaged */
#define XFS_QMOPT_GQUOTA 0x0002000 /* group dquot requested */
#define XFS_QMOPT_ENOSPC 0x0004000 /* enospc instead of edquot (prj) */
#define XFS_QMOPT_DQNEXT 0x0008000 /* return next dquot >= this ID */
@@ -153,8 +151,11 @@ typedef uint16_t xfs_qwarncnt_t;
(XFS_QMOPT_UQUOTA | XFS_QMOPT_PQUOTA | XFS_QMOPT_GQUOTA)
#define XFS_QMOPT_RESBLK_MASK (XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_RES_RTBLKS)
-extern int xfs_dqcheck(struct xfs_mount *mp, xfs_disk_dquot_t *ddq,
- xfs_dqid_t id, uint type, uint flags, const char *str);
+extern xfs_failaddr_t xfs_dquot_verify(struct xfs_mount *mp,
+ struct xfs_disk_dquot *ddq, xfs_dqid_t id, uint type,
+ uint flags);
extern int xfs_calc_dquots_per_chunk(unsigned int nbblks);
+extern int xfs_dquot_repair(struct xfs_mount *mp, struct xfs_disk_dquot *ddq,
+ xfs_dqid_t id, uint type);
#endif /* __XFS_QUOTA_H__ */
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index c40d26763075..bee68c23d612 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -1696,3 +1696,22 @@ out_cursor:
xfs_trans_brelse(tp, agbp);
goto out_trans;
}
+
+/* Is there a record covering a given extent? */
+int
+xfs_refcount_has_record(
+ struct xfs_btree_cur *cur,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ bool *exists)
+{
+ union xfs_btree_irec low;
+ union xfs_btree_irec high;
+
+ memset(&low, 0, sizeof(low));
+ low.rc.rc_startblock = bno;
+ memset(&high, 0xFF, sizeof(high));
+ high.rc.rc_startblock = bno + len - 1;
+
+ return xfs_btree_has_record(cur, &low, &high, exists);
+}
diff --git a/fs/xfs/libxfs/xfs_refcount.h b/fs/xfs/libxfs/xfs_refcount.h
index eafb9d1f3b37..2a731ac68fe4 100644
--- a/fs/xfs/libxfs/xfs_refcount.h
+++ b/fs/xfs/libxfs/xfs_refcount.h
@@ -83,4 +83,7 @@ static inline xfs_fileoff_t xfs_refcount_max_unmap(int log_res)
return (log_res * 3 / 4) / XFS_REFCOUNT_ITEM_OVERHEAD;
}
+extern int xfs_refcount_has_record(struct xfs_btree_cur *cur,
+ xfs_agblock_t bno, xfs_extlen_t len, bool *exists);
+
#endif /* __XFS_REFCOUNT_H__ */
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c
index 3c59dd3d58d7..8479769e470d 100644
--- a/fs/xfs/libxfs/xfs_refcount_btree.c
+++ b/fs/xfs/libxfs/xfs_refcount_btree.c
@@ -223,29 +223,31 @@ xfs_refcountbt_diff_two_keys(
be32_to_cpu(k2->refc.rc_startblock);
}
-STATIC bool
+STATIC xfs_failaddr_t
xfs_refcountbt_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
struct xfs_perag *pag = bp->b_pag;
+ xfs_failaddr_t fa;
unsigned int level;
if (block->bb_magic != cpu_to_be32(XFS_REFC_CRC_MAGIC))
- return false;
+ return __this_address;
if (!xfs_sb_version_hasreflink(&mp->m_sb))
- return false;
- if (!xfs_btree_sblock_v5hdr_verify(bp))
- return false;
+ return __this_address;
+ fa = xfs_btree_sblock_v5hdr_verify(bp);
+ if (fa)
+ return fa;
level = be16_to_cpu(block->bb_level);
if (pag && pag->pagf_init) {
if (level >= pag->pagf_refcount_level)
- return false;
+ return __this_address;
} else if (level >= mp->m_refc_maxlevels)
- return false;
+ return __this_address;
return xfs_btree_sblock_verify(bp, mp->m_refc_mxr[level != 0]);
}
@@ -254,25 +256,30 @@ STATIC void
xfs_refcountbt_read_verify(
struct xfs_buf *bp)
{
+ xfs_failaddr_t fa;
+
if (!xfs_btree_sblock_verify_crc(bp))
- xfs_buf_ioerror(bp, -EFSBADCRC);
- else if (!xfs_refcountbt_verify(bp))
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_refcountbt_verify(bp);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ }
- if (bp->b_error) {
+ if (bp->b_error)
trace_xfs_btree_corrupt(bp, _RET_IP_);
- xfs_verifier_error(bp);
- }
}
STATIC void
xfs_refcountbt_write_verify(
struct xfs_buf *bp)
{
- if (!xfs_refcountbt_verify(bp)) {
+ xfs_failaddr_t fa;
+
+ fa = xfs_refcountbt_verify(bp);
+ if (fa) {
trace_xfs_btree_corrupt(bp, _RET_IP_);
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
xfs_btree_sblock_calc_crc(bp);
@@ -283,6 +290,7 @@ const struct xfs_buf_ops xfs_refcountbt_buf_ops = {
.name = "xfs_refcountbt",
.verify_read = xfs_refcountbt_read_verify,
.verify_write = xfs_refcountbt_write_verify,
+ .verify_struct = xfs_refcountbt_verify,
};
STATIC int
diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
index 50db920ceeeb..79822cf6ebe3 100644
--- a/fs/xfs/libxfs/xfs_rmap.c
+++ b/fs/xfs/libxfs/xfs_rmap.c
@@ -2387,3 +2387,70 @@ xfs_rmap_compare(
else
return 0;
}
+
+/* Is there a record covering a given extent? */
+int
+xfs_rmap_has_record(
+ struct xfs_btree_cur *cur,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ bool *exists)
+{
+ union xfs_btree_irec low;
+ union xfs_btree_irec high;
+
+ memset(&low, 0, sizeof(low));
+ low.r.rm_startblock = bno;
+ memset(&high, 0xFF, sizeof(high));
+ high.r.rm_startblock = bno + len - 1;
+
+ return xfs_btree_has_record(cur, &low, &high, exists);
+}
+
+/*
+ * Is there a record for this owner completely covering a given physical
+ * extent? If so, *has_rmap will be set to true. If there is no record
+ * or the record only covers part of the range, we set *has_rmap to false.
+ * This function doesn't perform range lookups or offset checks, so it is
+ * not suitable for checking data fork blocks.
+ */
+int
+xfs_rmap_record_exists(
+ struct xfs_btree_cur *cur,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ struct xfs_owner_info *oinfo,
+ bool *has_rmap)
+{
+ uint64_t owner;
+ uint64_t offset;
+ unsigned int flags;
+ int has_record;
+ struct xfs_rmap_irec irec;
+ int error;
+
+ xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
+ ASSERT(XFS_RMAP_NON_INODE_OWNER(owner) ||
+ (flags & XFS_RMAP_BMBT_BLOCK));
+
+ error = xfs_rmap_lookup_le(cur, bno, len, owner, offset, flags,
+ &has_record);
+ if (error)
+ return error;
+ if (!has_record) {
+ *has_rmap = false;
+ return 0;
+ }
+
+ error = xfs_rmap_get_rec(cur, &irec, &has_record);
+ if (error)
+ return error;
+ if (!has_record) {
+ *has_rmap = false;
+ return 0;
+ }
+
+ *has_rmap = (irec.rm_owner == owner && irec.rm_startblock <= bno &&
+ irec.rm_startblock + irec.rm_blockcount >= bno + len);
+ return 0;
+}
diff --git a/fs/xfs/libxfs/xfs_rmap.h b/fs/xfs/libxfs/xfs_rmap.h
index 0fcd5b1ba729..380e53be98d5 100644
--- a/fs/xfs/libxfs/xfs_rmap.h
+++ b/fs/xfs/libxfs/xfs_rmap.h
@@ -233,5 +233,10 @@ int xfs_rmap_compare(const struct xfs_rmap_irec *a,
union xfs_btree_rec;
int xfs_rmap_btrec_to_irec(union xfs_btree_rec *rec,
struct xfs_rmap_irec *irec);
+int xfs_rmap_has_record(struct xfs_btree_cur *cur, xfs_agblock_t bno,
+ xfs_extlen_t len, bool *exists);
+int xfs_rmap_record_exists(struct xfs_btree_cur *cur, xfs_agblock_t bno,
+ xfs_extlen_t len, struct xfs_owner_info *oinfo,
+ bool *has_rmap);
#endif /* __XFS_RMAP_H__ */
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c
index 9d9c9192584c..e829c3e489ea 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.c
+++ b/fs/xfs/libxfs/xfs_rmap_btree.c
@@ -303,13 +303,14 @@ xfs_rmapbt_diff_two_keys(
return 0;
}
-static bool
+static xfs_failaddr_t
xfs_rmapbt_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
struct xfs_perag *pag = bp->b_pag;
+ xfs_failaddr_t fa;
unsigned int level;
/*
@@ -325,19 +326,20 @@ xfs_rmapbt_verify(
* in this case.
*/
if (block->bb_magic != cpu_to_be32(XFS_RMAP_CRC_MAGIC))
- return false;
+ return __this_address;
if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
- return false;
- if (!xfs_btree_sblock_v5hdr_verify(bp))
- return false;
+ return __this_address;
+ fa = xfs_btree_sblock_v5hdr_verify(bp);
+ if (fa)
+ return fa;
level = be16_to_cpu(block->bb_level);
if (pag && pag->pagf_init) {
if (level >= pag->pagf_levels[XFS_BTNUM_RMAPi])
- return false;
+ return __this_address;
} else if (level >= mp->m_rmap_maxlevels)
- return false;
+ return __this_address;
return xfs_btree_sblock_verify(bp, mp->m_rmap_mxr[level != 0]);
}
@@ -346,25 +348,30 @@ static void
xfs_rmapbt_read_verify(
struct xfs_buf *bp)
{
+ xfs_failaddr_t fa;
+
if (!xfs_btree_sblock_verify_crc(bp))
- xfs_buf_ioerror(bp, -EFSBADCRC);
- else if (!xfs_rmapbt_verify(bp))
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_rmapbt_verify(bp);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ }
- if (bp->b_error) {
+ if (bp->b_error)
trace_xfs_btree_corrupt(bp, _RET_IP_);
- xfs_verifier_error(bp);
- }
}
static void
xfs_rmapbt_write_verify(
struct xfs_buf *bp)
{
- if (!xfs_rmapbt_verify(bp)) {
+ xfs_failaddr_t fa;
+
+ fa = xfs_rmapbt_verify(bp);
+ if (fa) {
trace_xfs_btree_corrupt(bp, _RET_IP_);
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
xfs_btree_sblock_calc_crc(bp);
@@ -375,6 +382,7 @@ const struct xfs_buf_ops xfs_rmapbt_buf_ops = {
.name = "xfs_rmapbt",
.verify_read = xfs_rmapbt_read_verify,
.verify_write = xfs_rmapbt_write_verify,
+ .verify_struct = xfs_rmapbt_verify,
};
STATIC int
diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c
index 3fb29a5ea915..106be2d0bb88 100644
--- a/fs/xfs/libxfs/xfs_rtbitmap.c
+++ b/fs/xfs/libxfs/xfs_rtbitmap.c
@@ -1097,3 +1097,24 @@ xfs_verify_rtbno(
{
return rtbno < mp->m_sb.sb_rblocks;
}
+
+/* Is the given extent all free? */
+int
+xfs_rtalloc_extent_is_free(
+ struct xfs_mount *mp,
+ struct xfs_trans *tp,
+ xfs_rtblock_t start,
+ xfs_extlen_t len,
+ bool *is_free)
+{
+ xfs_rtblock_t end;
+ int matches;
+ int error;
+
+ error = xfs_rtcheck_range(mp, tp, start, len, 1, &end, &matches);
+ if (error)
+ return error;
+
+ *is_free = matches;
+ return 0;
+}
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 9b5aae2bcc0b..46af6aa60a8e 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -40,6 +40,8 @@
#include "xfs_rmap_btree.h"
#include "xfs_bmap.h"
#include "xfs_refcount_btree.h"
+#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
/*
* Physical superblock buffer manipulations. Shared with libxfs in userspace.
@@ -116,6 +118,9 @@ xfs_mount_validate_sb(
bool check_inprogress,
bool check_version)
{
+ u32 agcount = 0;
+ u32 rem;
+
if (sbp->sb_magicnum != XFS_SB_MAGIC) {
xfs_warn(mp, "bad magic number");
return -EWRONGFS;
@@ -226,6 +231,13 @@ xfs_mount_validate_sb(
return -EINVAL;
}
+ /* Compute agcount for this number of dblocks and agblocks */
+ if (sbp->sb_agblocks) {
+ agcount = div_u64_rem(sbp->sb_dblocks, sbp->sb_agblocks, &rem);
+ if (rem)
+ agcount++;
+ }
+
/*
* More sanity checking. Most of these were stolen directly from
* xfs_repair.
@@ -250,6 +262,10 @@ xfs_mount_validate_sb(
sbp->sb_inodesize != (1 << sbp->sb_inodelog) ||
sbp->sb_logsunit > XLOG_MAX_RECORD_BSIZE ||
sbp->sb_inopblock != howmany(sbp->sb_blocksize,sbp->sb_inodesize) ||
+ XFS_FSB_TO_B(mp, sbp->sb_agblocks) < XFS_MIN_AG_BYTES ||
+ XFS_FSB_TO_B(mp, sbp->sb_agblocks) > XFS_MAX_AG_BYTES ||
+ sbp->sb_agblklog != xfs_highbit32(sbp->sb_agblocks - 1) + 1 ||
+ agcount == 0 || agcount != sbp->sb_agcount ||
(sbp->sb_blocklog - sbp->sb_inodelog != sbp->sb_inopblog) ||
(sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE) ||
(sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) ||
@@ -640,11 +656,10 @@ xfs_sb_read_verify(
error = xfs_sb_verify(bp, true);
out_error:
- if (error) {
+ if (error == -EFSCORRUPTED || error == -EFSBADCRC)
+ xfs_verifier_error(bp, error, __this_address);
+ else if (error)
xfs_buf_ioerror(bp, error);
- if (error == -EFSCORRUPTED || error == -EFSBADCRC)
- xfs_verifier_error(bp);
- }
}
/*
@@ -673,13 +688,12 @@ xfs_sb_write_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
int error;
error = xfs_sb_verify(bp, false);
if (error) {
- xfs_buf_ioerror(bp, error);
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, error, __this_address);
return;
}
@@ -876,3 +890,88 @@ xfs_sync_sb(
xfs_trans_set_sync(tp);
return xfs_trans_commit(tp);
}
+
+int
+xfs_fs_geometry(
+ struct xfs_sb *sbp,
+ struct xfs_fsop_geom *geo,
+ int struct_version)
+{
+ memset(geo, 0, sizeof(struct xfs_fsop_geom));
+
+ geo->blocksize = sbp->sb_blocksize;
+ geo->rtextsize = sbp->sb_rextsize;
+ geo->agblocks = sbp->sb_agblocks;
+ geo->agcount = sbp->sb_agcount;
+ geo->logblocks = sbp->sb_logblocks;
+ geo->sectsize = sbp->sb_sectsize;
+ geo->inodesize = sbp->sb_inodesize;
+ geo->imaxpct = sbp->sb_imax_pct;
+ geo->datablocks = sbp->sb_dblocks;
+ geo->rtblocks = sbp->sb_rblocks;
+ geo->rtextents = sbp->sb_rextents;
+ geo->logstart = sbp->sb_logstart;
+ BUILD_BUG_ON(sizeof(geo->uuid) != sizeof(sbp->sb_uuid));
+ memcpy(geo->uuid, &sbp->sb_uuid, sizeof(sbp->sb_uuid));
+
+ if (struct_version < 2)
+ return 0;
+
+ geo->sunit = sbp->sb_unit;
+ geo->swidth = sbp->sb_width;
+
+ if (struct_version < 3)
+ return 0;
+
+ geo->version = XFS_FSOP_GEOM_VERSION;
+ geo->flags = XFS_FSOP_GEOM_FLAGS_NLINK |
+ XFS_FSOP_GEOM_FLAGS_DIRV2;
+ if (xfs_sb_version_hasattr(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_ATTR;
+ if (xfs_sb_version_hasquota(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_QUOTA;
+ if (xfs_sb_version_hasalign(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_IALIGN;
+ if (xfs_sb_version_hasdalign(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_DALIGN;
+ if (xfs_sb_version_hasextflgbit(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_EXTFLG;
+ if (xfs_sb_version_hassector(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_SECTOR;
+ if (xfs_sb_version_hasasciici(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_DIRV2CI;
+ if (xfs_sb_version_haslazysbcount(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_LAZYSB;
+ if (xfs_sb_version_hasattr2(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_ATTR2;
+ if (xfs_sb_version_hasprojid32bit(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_PROJID32;
+ if (xfs_sb_version_hascrc(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_V5SB;
+ if (xfs_sb_version_hasftype(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_FTYPE;
+ if (xfs_sb_version_hasfinobt(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_FINOBT;
+ if (xfs_sb_version_hassparseinodes(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_SPINODES;
+ if (xfs_sb_version_hasrmapbt(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_RMAPBT;
+ if (xfs_sb_version_hasreflink(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_REFLINK;
+ if (xfs_sb_version_hassector(sbp))
+ geo->logsectsize = sbp->sb_logsectsize;
+ else
+ geo->logsectsize = BBSIZE;
+ geo->rtsectsize = sbp->sb_blocksize;
+ geo->dirblocksize = xfs_dir2_dirblock_bytes(sbp);
+
+ if (struct_version < 4)
+ return 0;
+
+ if (xfs_sb_version_haslogv2(sbp))
+ geo->flags |= XFS_FSOP_GEOM_FLAGS_LOGV2;
+
+ geo->logsunit = sbp->sb_logsunit;
+
+ return 0;
+}
diff --git a/fs/xfs/libxfs/xfs_sb.h b/fs/xfs/libxfs/xfs_sb.h
index 961e6475a309..63dcd2a1a657 100644
--- a/fs/xfs/libxfs/xfs_sb.h
+++ b/fs/xfs/libxfs/xfs_sb.h
@@ -34,4 +34,8 @@ extern void xfs_sb_from_disk(struct xfs_sb *to, struct xfs_dsb *from);
extern void xfs_sb_to_disk(struct xfs_dsb *to, struct xfs_sb *from);
extern void xfs_sb_quota_from_disk(struct xfs_sb *sbp);
+#define XFS_FS_GEOM_MAX_STRUCT_VER (4)
+extern int xfs_fs_geometry(struct xfs_sb *sbp, struct xfs_fsop_geom *geo,
+ int struct_version);
+
#endif /* __XFS_SB_H__ */
diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h
index c6f4eb46fe26..d0b84da0cb1e 100644
--- a/fs/xfs/libxfs/xfs_shared.h
+++ b/fs/xfs/libxfs/xfs_shared.h
@@ -76,6 +76,9 @@ struct xfs_log_item_desc {
int xfs_log_calc_unit_res(struct xfs_mount *mp, int unit_bytes);
int xfs_log_calc_minimum_size(struct xfs_mount *);
+struct xfs_trans_res;
+void xfs_log_get_max_trans_res(struct xfs_mount *mp,
+ struct xfs_trans_res *max_resp);
/*
* Values for t_flags.
@@ -143,5 +146,6 @@ bool xfs_symlink_hdr_ok(xfs_ino_t ino, uint32_t offset,
uint32_t size, struct xfs_buf *bp);
void xfs_symlink_local_to_remote(struct xfs_trans *tp, struct xfs_buf *bp,
struct xfs_inode *ip, struct xfs_ifork *ifp);
+xfs_failaddr_t xfs_symlink_shortform_verify(struct xfs_inode *ip);
#endif /* __XFS_SHARED_H__ */
diff --git a/fs/xfs/libxfs/xfs_symlink_remote.c b/fs/xfs/libxfs/xfs_symlink_remote.c
index c484877129a0..5ef5f354587e 100644
--- a/fs/xfs/libxfs/xfs_symlink_remote.c
+++ b/fs/xfs/libxfs/xfs_symlink_remote.c
@@ -98,7 +98,7 @@ xfs_symlink_hdr_ok(
return true;
}
-static bool
+static xfs_failaddr_t
xfs_symlink_verify(
struct xfs_buf *bp)
{
@@ -106,22 +106,22 @@ xfs_symlink_verify(
struct xfs_dsymlink_hdr *dsl = bp->b_addr;
if (!xfs_sb_version_hascrc(&mp->m_sb))
- return false;
+ return __this_address;
if (dsl->sl_magic != cpu_to_be32(XFS_SYMLINK_MAGIC))
- return false;
+ return __this_address;
if (!uuid_equal(&dsl->sl_uuid, &mp->m_sb.sb_meta_uuid))
- return false;
+ return __this_address;
if (bp->b_bn != be64_to_cpu(dsl->sl_blkno))
- return false;
+ return __this_address;
if (be32_to_cpu(dsl->sl_offset) +
be32_to_cpu(dsl->sl_bytes) >= XFS_SYMLINK_MAXLEN)
- return false;
+ return __this_address;
if (dsl->sl_owner == 0)
- return false;
+ return __this_address;
if (!xfs_log_check_lsn(mp, be64_to_cpu(dsl->sl_lsn)))
- return false;
+ return __this_address;
- return true;
+ return NULL;
}
static void
@@ -129,18 +129,19 @@ xfs_symlink_read_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
+ xfs_failaddr_t fa;
/* no verification of non-crc buffers */
if (!xfs_sb_version_hascrc(&mp->m_sb))
return;
if (!xfs_buf_verify_cksum(bp, XFS_SYMLINK_CRC_OFF))
- xfs_buf_ioerror(bp, -EFSBADCRC);
- else if (!xfs_symlink_verify(bp))
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
-
- if (bp->b_error)
- xfs_verifier_error(bp);
+ xfs_verifier_error(bp, -EFSBADCRC, __this_address);
+ else {
+ fa = xfs_symlink_verify(bp);
+ if (fa)
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
+ }
}
static void
@@ -148,15 +149,16 @@ xfs_symlink_write_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
+ xfs_failaddr_t fa;
/* no verification of non-crc buffers */
if (!xfs_sb_version_hascrc(&mp->m_sb))
return;
- if (!xfs_symlink_verify(bp)) {
- xfs_buf_ioerror(bp, -EFSCORRUPTED);
- xfs_verifier_error(bp);
+ fa = xfs_symlink_verify(bp);
+ if (fa) {
+ xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
@@ -171,6 +173,7 @@ const struct xfs_buf_ops xfs_symlink_buf_ops = {
.name = "xfs_symlink",
.verify_read = xfs_symlink_read_verify,
.verify_write = xfs_symlink_write_verify,
+ .verify_struct = xfs_symlink_verify,
};
void
@@ -207,3 +210,37 @@ xfs_symlink_local_to_remote(
xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsymlink_hdr) +
ifp->if_bytes - 1);
}
+
+/* Verify the consistency of an inline symlink. */
+xfs_failaddr_t
+xfs_symlink_shortform_verify(
+ struct xfs_inode *ip)
+{
+ char *sfp;
+ char *endp;
+ struct xfs_ifork *ifp;
+ int size;
+
+ ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_LOCAL);
+ ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+ sfp = (char *)ifp->if_u1.if_data;
+ size = ifp->if_bytes;
+ endp = sfp + size;
+
+ /* Zero length symlinks can exist while we're deleting a remote one. */
+ if (size == 0)
+ return NULL;
+
+ /* No negative sizes or overly long symlink targets. */
+ if (size < 0 || size > XFS_SYMLINK_MAXLEN)
+ return __this_address;
+
+ /* No NULLs in the target either. */
+ if (memchr(sfp, 0, size - 1))
+ return __this_address;
+
+ /* We /did/ null-terminate the buffer, right? */
+ if (*endp != 0)
+ return __this_address;
+ return NULL;
+}
diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c
index 6bd916bd35e2..5f17641f040f 100644
--- a/fs/xfs/libxfs/xfs_trans_resv.c
+++ b/fs/xfs/libxfs/xfs_trans_resv.c
@@ -34,6 +34,9 @@
#include "xfs_trans_space.h"
#include "xfs_trace.h"
+#define _ALLOC true
+#define _FREE false
+
/*
* A buffer has a format structure overhead in the log in addition
* to the data, so we need to take this into account when reserving
@@ -132,43 +135,77 @@ xfs_calc_inode_res(
}
/*
- * The free inode btree is a conditional feature and the log reservation
- * requirements differ slightly from that of the traditional inode allocation
- * btree. The finobt tracks records for inode chunks with at least one free
- * inode. A record can be removed from the tree for an inode allocation
- * or free and thus the finobt reservation is unconditional across:
+ * Inode btree record insertion/removal modifies the inode btree and free space
+ * btrees (since the inobt does not use the agfl). This requires the following
+ * reservation:
*
- * - inode allocation
- * - inode free
- * - inode chunk allocation
+ * the inode btree: max depth * blocksize
+ * the allocation btrees: 2 trees * (max depth - 1) * block size
*
- * The 'modify' param indicates to include the record modification scenario. The
- * 'alloc' param indicates to include the reservation for free space btree
- * modifications on behalf of finobt modifications. This is required only for
- * transactions that do not already account for free space btree modifications.
+ * The caller must account for SB and AG header modifications, etc.
+ */
+STATIC uint
+xfs_calc_inobt_res(
+ struct xfs_mount *mp)
+{
+ return xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1)) +
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
+ XFS_FSB_TO_B(mp, 1));
+}
+
+/*
+ * The free inode btree is a conditional feature. The behavior differs slightly
+ * from that of the traditional inode btree in that the finobt tracks records
+ * for inode chunks with at least one free inode. A record can be removed from
+ * the tree during individual inode allocation. Therefore the finobt
+ * reservation is unconditional for both the inode chunk allocation and
+ * individual inode allocation (modify) cases.
*
- * the free inode btree: max depth * block size
- * the allocation btrees: 2 trees * (max depth - 1) * block size
- * the free inode btree entry: block size
+ * Behavior aside, the reservation for finobt modification is equivalent to the
+ * traditional inobt: cover a full finobt shape change plus block allocation.
*/
STATIC uint
xfs_calc_finobt_res(
- struct xfs_mount *mp,
- int alloc,
- int modify)
+ struct xfs_mount *mp)
{
- uint res;
-
if (!xfs_sb_version_hasfinobt(&mp->m_sb))
return 0;
- res = xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1));
- if (alloc)
- res += xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
- XFS_FSB_TO_B(mp, 1));
- if (modify)
- res += (uint)XFS_FSB_TO_B(mp, 1);
+ return xfs_calc_inobt_res(mp);
+}
+/*
+ * Calculate the reservation required to allocate or free an inode chunk. This
+ * includes:
+ *
+ * the allocation btrees: 2 trees * (max depth - 1) * block size
+ * the inode chunk: m_ialloc_blks * N
+ *
+ * The size N of the inode chunk reservation depends on whether it is for
+ * allocation or free and which type of create transaction is in use. An inode
+ * chunk free always invalidates the buffers and only requires reservation for
+ * headers (N == 0). An inode chunk allocation requires a chunk sized
+ * reservation on v4 and older superblocks to initialize the chunk. No chunk
+ * reservation is required for allocation on v5 supers, which use ordered
+ * buffers to initialize.
+ */
+STATIC uint
+xfs_calc_inode_chunk_res(
+ struct xfs_mount *mp,
+ bool alloc)
+{
+ uint res, size = 0;
+
+ res = xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
+ XFS_FSB_TO_B(mp, 1));
+ if (alloc) {
+ /* icreate tx uses ordered buffers */
+ if (xfs_sb_version_hascrc(&mp->m_sb))
+ return res;
+ size = XFS_FSB_TO_B(mp, 1);
+ }
+
+ res += xfs_calc_buf_res(mp->m_ialloc_blks, size);
return res;
}
@@ -232,8 +269,6 @@ xfs_calc_write_reservation(
* the super block to reflect the freed blocks: sector size
* worst case split in allocation btrees per extent assuming 4 extents:
* 4 exts * 2 trees * (2 * max depth - 1) * block size
- * the inode btree: max depth * blocksize
- * the allocation btrees: 2 trees * (max depth - 1) * block size
*/
STATIC uint
xfs_calc_itruncate_reservation(
@@ -245,12 +280,7 @@ xfs_calc_itruncate_reservation(
XFS_FSB_TO_B(mp, 1))),
(xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4),
- XFS_FSB_TO_B(mp, 1)) +
- xfs_calc_buf_res(5, 0) +
- xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
- XFS_FSB_TO_B(mp, 1)) +
- xfs_calc_buf_res(2 + mp->m_ialloc_blks +
- mp->m_in_maxlevels, 0)));
+ XFS_FSB_TO_B(mp, 1))));
}
/*
@@ -282,13 +312,14 @@ xfs_calc_rename_reservation(
* For removing an inode from unlinked list at first, we can modify:
* the agi hash list and counters: sector size
* the on disk inode before ours in the agi hash list: inode cluster size
+ * the on disk inode in the agi hash list: inode cluster size
*/
STATIC uint
xfs_calc_iunlink_remove_reservation(
struct xfs_mount *mp)
{
return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
- max_t(uint, XFS_FSB_TO_B(mp, 1), mp->m_inode_cluster_size);
+ 2 * max_t(uint, XFS_FSB_TO_B(mp, 1), mp->m_inode_cluster_size);
}
/*
@@ -320,13 +351,13 @@ xfs_calc_link_reservation(
/*
* For adding an inode to unlinked list we can modify:
* the agi hash list: sector size
- * the unlinked inode: inode size
+ * the on disk inode: inode cluster size
*/
STATIC uint
xfs_calc_iunlink_add_reservation(xfs_mount_t *mp)
{
return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
- xfs_calc_inode_res(mp, 1);
+ max_t(uint, XFS_FSB_TO_B(mp, 1), mp->m_inode_cluster_size);
}
/*
@@ -379,45 +410,16 @@ xfs_calc_create_resv_modify(
xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
(uint)XFS_FSB_TO_B(mp, 1) +
xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp), XFS_FSB_TO_B(mp, 1)) +
- xfs_calc_finobt_res(mp, 1, 1);
-}
-
-/*
- * For create we can allocate some inodes giving:
- * the agi and agf of the ag getting the new inodes: 2 * sectorsize
- * the superblock for the nlink flag: sector size
- * the inode blocks allocated: mp->m_ialloc_blks * blocksize
- * the inode btree: max depth * blocksize
- * the allocation btrees: 2 trees * (max depth - 1) * block size
- */
-STATIC uint
-xfs_calc_create_resv_alloc(
- struct xfs_mount *mp)
-{
- return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
- mp->m_sb.sb_sectsize +
- xfs_calc_buf_res(mp->m_ialloc_blks, XFS_FSB_TO_B(mp, 1)) +
- xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1)) +
- xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
- XFS_FSB_TO_B(mp, 1));
-}
-
-STATIC uint
-__xfs_calc_create_reservation(
- struct xfs_mount *mp)
-{
- return XFS_DQUOT_LOGRES(mp) +
- MAX(xfs_calc_create_resv_alloc(mp),
- xfs_calc_create_resv_modify(mp));
+ xfs_calc_finobt_res(mp);
}
/*
* For icreate we can allocate some inodes giving:
* the agi and agf of the ag getting the new inodes: 2 * sectorsize
* the superblock for the nlink flag: sector size
- * the inode btree: max depth * blocksize
- * the allocation btrees: 2 trees * (max depth - 1) * block size
- * the finobt (record insertion)
+ * the inode chunk (allocation, optional init)
+ * the inobt (record insertion)
+ * the finobt (optional, record insertion)
*/
STATIC uint
xfs_calc_icreate_resv_alloc(
@@ -425,10 +427,9 @@ xfs_calc_icreate_resv_alloc(
{
return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
mp->m_sb.sb_sectsize +
- xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1)) +
- xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
- XFS_FSB_TO_B(mp, 1)) +
- xfs_calc_finobt_res(mp, 0, 0);
+ xfs_calc_inode_chunk_res(mp, _ALLOC) +
+ xfs_calc_inobt_res(mp) +
+ xfs_calc_finobt_res(mp);
}
STATIC uint
@@ -440,26 +441,12 @@ xfs_calc_icreate_reservation(xfs_mount_t *mp)
}
STATIC uint
-xfs_calc_create_reservation(
- struct xfs_mount *mp)
-{
- if (xfs_sb_version_hascrc(&mp->m_sb))
- return xfs_calc_icreate_reservation(mp);
- return __xfs_calc_create_reservation(mp);
-
-}
-
-STATIC uint
xfs_calc_create_tmpfile_reservation(
struct xfs_mount *mp)
{
uint res = XFS_DQUOT_LOGRES(mp);
- if (xfs_sb_version_hascrc(&mp->m_sb))
- res += xfs_calc_icreate_resv_alloc(mp);
- else
- res += xfs_calc_create_resv_alloc(mp);
-
+ res += xfs_calc_icreate_resv_alloc(mp);
return res + xfs_calc_iunlink_add_reservation(mp);
}
@@ -470,7 +457,7 @@ STATIC uint
xfs_calc_mkdir_reservation(
struct xfs_mount *mp)
{
- return xfs_calc_create_reservation(mp);
+ return xfs_calc_icreate_reservation(mp);
}
@@ -483,20 +470,24 @@ STATIC uint
xfs_calc_symlink_reservation(
struct xfs_mount *mp)
{
- return xfs_calc_create_reservation(mp) +
+ return xfs_calc_icreate_reservation(mp) +
xfs_calc_buf_res(1, XFS_SYMLINK_MAXLEN);
}
/*
* In freeing an inode we can modify:
* the inode being freed: inode size
- * the super block free inode counter: sector size
- * the agi hash list and counters: sector size
- * the inode btree entry: block size
- * the on disk inode before ours in the agi hash list: inode cluster size
- * the inode btree: max depth * blocksize
- * the allocation btrees: 2 trees * (max depth - 1) * block size
+ * the super block free inode counter, AGF and AGFL: sector size
+ * the on disk inode (agi unlinked list removal)
+ * the inode chunk (invalidated, headers only)
+ * the inode btree
* the finobt (record insertion, removal or modification)
+ *
+ * Note that the inode chunk res. includes an allocfree res. for freeing of the
+ * inode chunk. This is technically extraneous because the inode chunk free is
+ * deferred (it occurs after a transaction roll). Include the extra reservation
+ * anyways since we've had reports of ifree transaction overruns due to too many
+ * agfl fixups during inode chunk frees.
*/
STATIC uint
xfs_calc_ifree_reservation(
@@ -504,15 +495,11 @@ xfs_calc_ifree_reservation(
{
return XFS_DQUOT_LOGRES(mp) +
xfs_calc_inode_res(mp, 1) +
- xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
- xfs_calc_buf_res(1, XFS_FSB_TO_B(mp, 1)) +
+ xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
xfs_calc_iunlink_remove_reservation(mp) +
- xfs_calc_buf_res(1, 0) +
- xfs_calc_buf_res(2 + mp->m_ialloc_blks +
- mp->m_in_maxlevels, 0) +
- xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
- XFS_FSB_TO_B(mp, 1)) +
- xfs_calc_finobt_res(mp, 0, 1);
+ xfs_calc_inode_chunk_res(mp, _FREE) +
+ xfs_calc_inobt_res(mp) +
+ xfs_calc_finobt_res(mp);
}
/*
@@ -842,7 +829,7 @@ xfs_trans_resv_calc(
resp->tr_symlink.tr_logcount = XFS_SYMLINK_LOG_COUNT;
resp->tr_symlink.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
- resp->tr_create.tr_logres = xfs_calc_create_reservation(mp);
+ resp->tr_create.tr_logres = xfs_calc_icreate_reservation(mp);
resp->tr_create.tr_logcount = XFS_CREATE_LOG_COUNT;
resp->tr_create.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
diff --git a/fs/xfs/scrub/agheader.c b/fs/xfs/scrub/agheader.c
index 2a9b4f9e93c6..fd975524f460 100644
--- a/fs/xfs/scrub/agheader.c
+++ b/fs/xfs/scrub/agheader.c
@@ -32,30 +32,17 @@
#include "xfs_inode.h"
#include "xfs_alloc.h"
#include "xfs_ialloc.h"
+#include "xfs_rmap.h"
#include "scrub/xfs_scrub.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/trace.h"
/*
- * Set up scrub to check all the static metadata in each AG.
- * This means the SB, AGF, AGI, and AGFL headers.
+ * Walk all the blocks in the AGFL. The fn function can return any negative
+ * error code or XFS_BTREE_QUERY_RANGE_ABORT.
*/
int
-xfs_scrub_setup_ag_header(
- struct xfs_scrub_context *sc,
- struct xfs_inode *ip)
-{
- struct xfs_mount *mp = sc->mp;
-
- if (sc->sm->sm_agno >= mp->m_sb.sb_agcount ||
- sc->sm->sm_ino || sc->sm->sm_gen)
- return -EINVAL;
- return xfs_scrub_setup_fs(sc, ip);
-}
-
-/* Walk all the blocks in the AGFL. */
-int
xfs_scrub_walk_agfl(
struct xfs_scrub_context *sc,
int (*fn)(struct xfs_scrub_context *,
@@ -115,6 +102,36 @@ xfs_scrub_walk_agfl(
/* Superblock */
+/* Cross-reference with the other btrees. */
+STATIC void
+xfs_scrub_superblock_xref(
+ struct xfs_scrub_context *sc,
+ struct xfs_buf *bp)
+{
+ struct xfs_owner_info oinfo;
+ struct xfs_mount *mp = sc->mp;
+ xfs_agnumber_t agno = sc->sm->sm_agno;
+ xfs_agblock_t agbno;
+ int error;
+
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return;
+
+ agbno = XFS_SB_BLOCK(mp);
+
+ error = xfs_scrub_ag_init(sc, agno, &sc->sa);
+ if (!xfs_scrub_xref_process_error(sc, agno, agbno, &error))
+ return;
+
+ xfs_scrub_xref_is_used_space(sc, agbno, 1);
+ xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
+ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
+ xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
+ xfs_scrub_xref_is_not_shared(sc, agbno, 1);
+
+ /* scrub teardown will take care of sc->sa for us */
+}
+
/*
* Scrub the filesystem superblock.
*
@@ -143,6 +160,22 @@ xfs_scrub_superblock(
error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_sb_buf_ops);
+ /*
+ * The superblock verifier can return several different error codes
+ * if it thinks the superblock doesn't look right. For a mount these
+ * would all get bounced back to userspace, but if we're here then the
+ * fs mounted successfully, which means that this secondary superblock
+ * is simply incorrect. Treat all these codes the same way we treat
+ * any corruption.
+ */
+ switch (error) {
+ case -EINVAL: /* also -EWRONGFS */
+ case -ENOSYS:
+ case -EFBIG:
+ error = -EFSCORRUPTED;
+ default:
+ break;
+ }
if (!xfs_scrub_process_error(sc, agno, XFS_SB_BLOCK(mp), &error))
return error;
@@ -387,11 +420,175 @@ xfs_scrub_superblock(
BBTOB(bp->b_length) - sizeof(struct xfs_dsb)))
xfs_scrub_block_set_corrupt(sc, bp);
+ xfs_scrub_superblock_xref(sc, bp);
+
return error;
}
/* AGF */
+/* Tally freespace record lengths. */
+STATIC int
+xfs_scrub_agf_record_bno_lengths(
+ struct xfs_btree_cur *cur,
+ struct xfs_alloc_rec_incore *rec,
+ void *priv)
+{
+ xfs_extlen_t *blocks = priv;
+
+ (*blocks) += rec->ar_blockcount;
+ return 0;
+}
+
+/* Check agf_freeblks */
+static inline void
+xfs_scrub_agf_xref_freeblks(
+ struct xfs_scrub_context *sc)
+{
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
+ xfs_extlen_t blocks = 0;
+ int error;
+
+ if (!sc->sa.bno_cur)
+ return;
+
+ error = xfs_alloc_query_all(sc->sa.bno_cur,
+ xfs_scrub_agf_record_bno_lengths, &blocks);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur))
+ return;
+ if (blocks != be32_to_cpu(agf->agf_freeblks))
+ xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
+}
+
+/* Cross reference the AGF with the cntbt (freespace by length btree) */
+static inline void
+xfs_scrub_agf_xref_cntbt(
+ struct xfs_scrub_context *sc)
+{
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
+ xfs_agblock_t agbno;
+ xfs_extlen_t blocks;
+ int have;
+ int error;
+
+ if (!sc->sa.cnt_cur)
+ return;
+
+ /* Any freespace at all? */
+ error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur))
+ return;
+ if (!have) {
+ if (agf->agf_freeblks != be32_to_cpu(0))
+ xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
+ return;
+ }
+
+ /* Check agf_longest */
+ error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur))
+ return;
+ if (!have || blocks != be32_to_cpu(agf->agf_longest))
+ xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
+}
+
+/* Check the btree block counts in the AGF against the btrees. */
+STATIC void
+xfs_scrub_agf_xref_btreeblks(
+ struct xfs_scrub_context *sc)
+{
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
+ struct xfs_mount *mp = sc->mp;
+ xfs_agblock_t blocks;
+ xfs_agblock_t btreeblks;
+ int error;
+
+ /* Check agf_rmap_blocks; set up for agf_btreeblks check */
+ if (sc->sa.rmap_cur) {
+ error = xfs_btree_count_blocks(sc->sa.rmap_cur, &blocks);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ return;
+ btreeblks = blocks - 1;
+ if (blocks != be32_to_cpu(agf->agf_rmap_blocks))
+ xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
+ } else {
+ btreeblks = 0;
+ }
+
+ /*
+ * No rmap cursor; we can't xref if we have the rmapbt feature.
+ * We also can't do it if we're missing the free space btree cursors.
+ */
+ if ((xfs_sb_version_hasrmapbt(&mp->m_sb) && !sc->sa.rmap_cur) ||
+ !sc->sa.bno_cur || !sc->sa.cnt_cur)
+ return;
+
+ /* Check agf_btreeblks */
+ error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur))
+ return;
+ btreeblks += blocks - 1;
+
+ error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur))
+ return;
+ btreeblks += blocks - 1;
+
+ if (btreeblks != be32_to_cpu(agf->agf_btreeblks))
+ xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
+}
+
+/* Check agf_refcount_blocks against tree size */
+static inline void
+xfs_scrub_agf_xref_refcblks(
+ struct xfs_scrub_context *sc)
+{
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
+ xfs_agblock_t blocks;
+ int error;
+
+ if (!sc->sa.refc_cur)
+ return;
+
+ error = xfs_btree_count_blocks(sc->sa.refc_cur, &blocks);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
+ return;
+ if (blocks != be32_to_cpu(agf->agf_refcount_blocks))
+ xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
+}
+
+/* Cross-reference with the other btrees. */
+STATIC void
+xfs_scrub_agf_xref(
+ struct xfs_scrub_context *sc)
+{
+ struct xfs_owner_info oinfo;
+ struct xfs_mount *mp = sc->mp;
+ xfs_agblock_t agbno;
+ int error;
+
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return;
+
+ agbno = XFS_AGF_BLOCK(mp);
+
+ error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
+ if (error)
+ return;
+
+ xfs_scrub_xref_is_used_space(sc, agbno, 1);
+ xfs_scrub_agf_xref_freeblks(sc);
+ xfs_scrub_agf_xref_cntbt(sc);
+ xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
+ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
+ xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
+ xfs_scrub_agf_xref_btreeblks(sc);
+ xfs_scrub_xref_is_not_shared(sc, agbno, 1);
+ xfs_scrub_agf_xref_refcblks(sc);
+
+ /* scrub teardown will take care of sc->sa for us */
+}
+
/* Scrub the AGF. */
int
xfs_scrub_agf(
@@ -414,6 +611,7 @@ xfs_scrub_agf(
&sc->sa.agf_bp, &sc->sa.agfl_bp);
if (!xfs_scrub_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error))
goto out;
+ xfs_scrub_buffer_recheck(sc, sc->sa.agf_bp);
agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
@@ -470,6 +668,7 @@ xfs_scrub_agf(
if (agfl_count != 0 && fl_count != agfl_count)
xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
+ xfs_scrub_agf_xref(sc);
out:
return error;
}
@@ -477,11 +676,28 @@ out:
/* AGFL */
struct xfs_scrub_agfl_info {
+ struct xfs_owner_info oinfo;
unsigned int sz_entries;
unsigned int nr_entries;
xfs_agblock_t *entries;
};
+/* Cross-reference with the other btrees. */
+STATIC void
+xfs_scrub_agfl_block_xref(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno,
+ struct xfs_owner_info *oinfo)
+{
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return;
+
+ xfs_scrub_xref_is_used_space(sc, agbno, 1);
+ xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
+ xfs_scrub_xref_is_owned_by(sc, agbno, 1, oinfo);
+ xfs_scrub_xref_is_not_shared(sc, agbno, 1);
+}
+
/* Scrub an AGFL block. */
STATIC int
xfs_scrub_agfl_block(
@@ -499,6 +715,8 @@ xfs_scrub_agfl_block(
else
xfs_scrub_block_set_corrupt(sc, sc->sa.agfl_bp);
+ xfs_scrub_agfl_block_xref(sc, agbno, priv);
+
return 0;
}
@@ -513,6 +731,37 @@ xfs_scrub_agblock_cmp(
return (int)*a - (int)*b;
}
+/* Cross-reference with the other btrees. */
+STATIC void
+xfs_scrub_agfl_xref(
+ struct xfs_scrub_context *sc)
+{
+ struct xfs_owner_info oinfo;
+ struct xfs_mount *mp = sc->mp;
+ xfs_agblock_t agbno;
+ int error;
+
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return;
+
+ agbno = XFS_AGFL_BLOCK(mp);
+
+ error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
+ if (error)
+ return;
+
+ xfs_scrub_xref_is_used_space(sc, agbno, 1);
+ xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
+ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
+ xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
+ xfs_scrub_xref_is_not_shared(sc, agbno, 1);
+
+ /*
+ * Scrub teardown will take care of sc->sa for us. Leave sc->sa
+ * active so that the agfl block xref can use it too.
+ */
+}
+
/* Scrub the AGFL. */
int
xfs_scrub_agfl(
@@ -532,6 +781,12 @@ xfs_scrub_agfl(
goto out;
if (!sc->sa.agf_bp)
return -EFSCORRUPTED;
+ xfs_scrub_buffer_recheck(sc, sc->sa.agfl_bp);
+
+ xfs_scrub_agfl_xref(sc);
+
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ goto out;
/* Allocate buffer to ensure uniqueness of AGFL entries. */
agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
@@ -548,6 +803,7 @@ xfs_scrub_agfl(
}
/* Check the blocks in the AGFL. */
+ xfs_rmap_ag_owner(&sai.oinfo, XFS_RMAP_OWN_AG);
error = xfs_scrub_walk_agfl(sc, xfs_scrub_agfl_block, &sai);
if (error)
goto out_free;
@@ -575,6 +831,56 @@ out:
/* AGI */
+/* Check agi_count/agi_freecount */
+static inline void
+xfs_scrub_agi_xref_icounts(
+ struct xfs_scrub_context *sc)
+{
+ struct xfs_agi *agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
+ xfs_agino_t icount;
+ xfs_agino_t freecount;
+ int error;
+
+ if (!sc->sa.ino_cur)
+ return;
+
+ error = xfs_ialloc_count_inodes(sc->sa.ino_cur, &icount, &freecount);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.ino_cur))
+ return;
+ if (be32_to_cpu(agi->agi_count) != icount ||
+ be32_to_cpu(agi->agi_freecount) != freecount)
+ xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agi_bp);
+}
+
+/* Cross-reference with the other btrees. */
+STATIC void
+xfs_scrub_agi_xref(
+ struct xfs_scrub_context *sc)
+{
+ struct xfs_owner_info oinfo;
+ struct xfs_mount *mp = sc->mp;
+ xfs_agblock_t agbno;
+ int error;
+
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return;
+
+ agbno = XFS_AGI_BLOCK(mp);
+
+ error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
+ if (error)
+ return;
+
+ xfs_scrub_xref_is_used_space(sc, agbno, 1);
+ xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
+ xfs_scrub_agi_xref_icounts(sc);
+ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
+ xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
+ xfs_scrub_xref_is_not_shared(sc, agbno, 1);
+
+ /* scrub teardown will take care of sc->sa for us */
+}
+
/* Scrub the AGI. */
int
xfs_scrub_agi(
@@ -598,6 +904,7 @@ xfs_scrub_agi(
&sc->sa.agf_bp, &sc->sa.agfl_bp);
if (!xfs_scrub_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error))
goto out;
+ xfs_scrub_buffer_recheck(sc, sc->sa.agi_bp);
agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
@@ -653,6 +960,7 @@ xfs_scrub_agi(
if (agi->agi_pad32 != cpu_to_be32(0))
xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
+ xfs_scrub_agi_xref(sc);
out:
return error;
}
diff --git a/fs/xfs/scrub/alloc.c b/fs/xfs/scrub/alloc.c
index 059663e13414..517c079d3f68 100644
--- a/fs/xfs/scrub/alloc.c
+++ b/fs/xfs/scrub/alloc.c
@@ -31,6 +31,7 @@
#include "xfs_sb.h"
#include "xfs_alloc.h"
#include "xfs_rmap.h"
+#include "xfs_alloc.h"
#include "scrub/xfs_scrub.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
@@ -49,6 +50,64 @@ xfs_scrub_setup_ag_allocbt(
}
/* Free space btree scrubber. */
+/*
+ * Ensure there's a corresponding cntbt/bnobt record matching this
+ * bnobt/cntbt record, respectively.
+ */
+STATIC void
+xfs_scrub_allocbt_xref_other(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len)
+{
+ struct xfs_btree_cur **pcur;
+ xfs_agblock_t fbno;
+ xfs_extlen_t flen;
+ int has_otherrec;
+ int error;
+
+ if (sc->sm->sm_type == XFS_SCRUB_TYPE_BNOBT)
+ pcur = &sc->sa.cnt_cur;
+ else
+ pcur = &sc->sa.bno_cur;
+ if (!*pcur)
+ return;
+
+ error = xfs_alloc_lookup_le(*pcur, agbno, len, &has_otherrec);
+ if (!xfs_scrub_should_check_xref(sc, &error, pcur))
+ return;
+ if (!has_otherrec) {
+ xfs_scrub_btree_xref_set_corrupt(sc, *pcur, 0);
+ return;
+ }
+
+ error = xfs_alloc_get_rec(*pcur, &fbno, &flen, &has_otherrec);
+ if (!xfs_scrub_should_check_xref(sc, &error, pcur))
+ return;
+ if (!has_otherrec) {
+ xfs_scrub_btree_xref_set_corrupt(sc, *pcur, 0);
+ return;
+ }
+
+ if (fbno != agbno || flen != len)
+ xfs_scrub_btree_xref_set_corrupt(sc, *pcur, 0);
+}
+
+/* Cross-reference with the other btrees. */
+STATIC void
+xfs_scrub_allocbt_xref(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len)
+{
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return;
+
+ xfs_scrub_allocbt_xref_other(sc, agbno, len);
+ xfs_scrub_xref_is_not_inode_chunk(sc, agbno, len);
+ xfs_scrub_xref_has_no_owner(sc, agbno, len);
+ xfs_scrub_xref_is_not_shared(sc, agbno, len);
+}
/* Scrub a bnobt/cntbt record. */
STATIC int
@@ -70,6 +129,8 @@ xfs_scrub_allocbt_rec(
!xfs_verify_agbno(mp, agno, bno + len - 1))
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xfs_scrub_allocbt_xref(bs->sc, bno, len);
+
return error;
}
@@ -100,3 +161,23 @@ xfs_scrub_cntbt(
{
return xfs_scrub_allocbt(sc, XFS_BTNUM_CNT);
}
+
+/* xref check that the extent is not free */
+void
+xfs_scrub_xref_is_used_space(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len)
+{
+ bool is_freesp;
+ int error;
+
+ if (!sc->sa.bno_cur)
+ return;
+
+ error = xfs_alloc_has_record(sc->sa.bno_cur, agbno, len, &is_freesp);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur))
+ return;
+ if (is_freesp)
+ xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.bno_cur, 0);
+}
diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c
index 42fec0bcd9e1..d00282130492 100644
--- a/fs/xfs/scrub/bmap.c
+++ b/fs/xfs/scrub/bmap.c
@@ -37,6 +37,7 @@
#include "xfs_bmap_util.h"
#include "xfs_bmap_btree.h"
#include "xfs_rmap.h"
+#include "xfs_refcount.h"
#include "scrub/xfs_scrub.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
@@ -99,6 +100,201 @@ struct xfs_scrub_bmap_info {
int whichfork;
};
+/* Look for a corresponding rmap for this irec. */
+static inline bool
+xfs_scrub_bmap_get_rmap(
+ struct xfs_scrub_bmap_info *info,
+ struct xfs_bmbt_irec *irec,
+ xfs_agblock_t agbno,
+ uint64_t owner,
+ struct xfs_rmap_irec *rmap)
+{
+ xfs_fileoff_t offset;
+ unsigned int rflags = 0;
+ int has_rmap;
+ int error;
+
+ if (info->whichfork == XFS_ATTR_FORK)
+ rflags |= XFS_RMAP_ATTR_FORK;
+
+ /*
+ * CoW staging extents are owned (on disk) by the refcountbt, so
+ * their rmaps do not have offsets.
+ */
+ if (info->whichfork == XFS_COW_FORK)
+ offset = 0;
+ else
+ offset = irec->br_startoff;
+
+ /*
+ * If the caller thinks this could be a shared bmbt extent (IOWs,
+ * any data fork extent of a reflink inode) then we have to use the
+ * range rmap lookup to make sure we get the correct owner/offset.
+ */
+ if (info->is_shared) {
+ error = xfs_rmap_lookup_le_range(info->sc->sa.rmap_cur, agbno,
+ owner, offset, rflags, rmap, &has_rmap);
+ if (!xfs_scrub_should_check_xref(info->sc, &error,
+ &info->sc->sa.rmap_cur))
+ return false;
+ goto out;
+ }
+
+ /*
+ * Otherwise, use the (faster) regular lookup.
+ */
+ error = xfs_rmap_lookup_le(info->sc->sa.rmap_cur, agbno, 0, owner,
+ offset, rflags, &has_rmap);
+ if (!xfs_scrub_should_check_xref(info->sc, &error,
+ &info->sc->sa.rmap_cur))
+ return false;
+ if (!has_rmap)
+ goto out;
+
+ error = xfs_rmap_get_rec(info->sc->sa.rmap_cur, rmap, &has_rmap);
+ if (!xfs_scrub_should_check_xref(info->sc, &error,
+ &info->sc->sa.rmap_cur))
+ return false;
+
+out:
+ if (!has_rmap)
+ xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
+ irec->br_startoff);
+ return has_rmap;
+}
+
+/* Make sure that we have rmapbt records for this extent. */
+STATIC void
+xfs_scrub_bmap_xref_rmap(
+ struct xfs_scrub_bmap_info *info,
+ struct xfs_bmbt_irec *irec,
+ xfs_agblock_t agbno)
+{
+ struct xfs_rmap_irec rmap;
+ unsigned long long rmap_end;
+ uint64_t owner;
+
+ if (!info->sc->sa.rmap_cur)
+ return;
+
+ if (info->whichfork == XFS_COW_FORK)
+ owner = XFS_RMAP_OWN_COW;
+ else
+ owner = info->sc->ip->i_ino;
+
+ /* Find the rmap record for this irec. */
+ if (!xfs_scrub_bmap_get_rmap(info, irec, agbno, owner, &rmap))
+ return;
+
+ /* Check the rmap. */
+ rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount;
+ if (rmap.rm_startblock > agbno ||
+ agbno + irec->br_blockcount > rmap_end)
+ xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
+ irec->br_startoff);
+
+ /*
+ * Check the logical offsets if applicable. CoW staging extents
+ * don't track logical offsets since the mappings only exist in
+ * memory.
+ */
+ if (info->whichfork != XFS_COW_FORK) {
+ rmap_end = (unsigned long long)rmap.rm_offset +
+ rmap.rm_blockcount;
+ if (rmap.rm_offset > irec->br_startoff ||
+ irec->br_startoff + irec->br_blockcount > rmap_end)
+ xfs_scrub_fblock_xref_set_corrupt(info->sc,
+ info->whichfork, irec->br_startoff);
+ }
+
+ if (rmap.rm_owner != owner)
+ xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
+ irec->br_startoff);
+
+ /*
+ * Check for discrepancies between the unwritten flag in the irec and
+ * the rmap. Note that the (in-memory) CoW fork distinguishes between
+ * unwritten and written extents, but we don't track that in the rmap
+ * records because the blocks are owned (on-disk) by the refcountbt,
+ * which doesn't track unwritten state.
+ */
+ if (owner != XFS_RMAP_OWN_COW &&
+ irec->br_state == XFS_EXT_UNWRITTEN &&
+ !(rmap.rm_flags & XFS_RMAP_UNWRITTEN))
+ xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
+ irec->br_startoff);
+
+ if (info->whichfork == XFS_ATTR_FORK &&
+ !(rmap.rm_flags & XFS_RMAP_ATTR_FORK))
+ xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
+ irec->br_startoff);
+ if (rmap.rm_flags & XFS_RMAP_BMBT_BLOCK)
+ xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
+ irec->br_startoff);
+}
+
+/* Cross-reference a single rtdev extent record. */
+STATIC void
+xfs_scrub_bmap_rt_extent_xref(
+ struct xfs_scrub_bmap_info *info,
+ struct xfs_inode *ip,
+ struct xfs_btree_cur *cur,
+ struct xfs_bmbt_irec *irec)
+{
+ if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return;
+
+ xfs_scrub_xref_is_used_rt_space(info->sc, irec->br_startblock,
+ irec->br_blockcount);
+}
+
+/* Cross-reference a single datadev extent record. */
+STATIC void
+xfs_scrub_bmap_extent_xref(
+ struct xfs_scrub_bmap_info *info,
+ struct xfs_inode *ip,
+ struct xfs_btree_cur *cur,
+ struct xfs_bmbt_irec *irec)
+{
+ struct xfs_mount *mp = info->sc->mp;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ xfs_extlen_t len;
+ int error;
+
+ if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return;
+
+ agno = XFS_FSB_TO_AGNO(mp, irec->br_startblock);
+ agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock);
+ len = irec->br_blockcount;
+
+ error = xfs_scrub_ag_init(info->sc, agno, &info->sc->sa);
+ if (!xfs_scrub_fblock_process_error(info->sc, info->whichfork,
+ irec->br_startoff, &error))
+ return;
+
+ xfs_scrub_xref_is_used_space(info->sc, agbno, len);
+ xfs_scrub_xref_is_not_inode_chunk(info->sc, agbno, len);
+ xfs_scrub_bmap_xref_rmap(info, irec, agbno);
+ switch (info->whichfork) {
+ case XFS_DATA_FORK:
+ if (xfs_is_reflink_inode(info->sc->ip))
+ break;
+ /* fall through */
+ case XFS_ATTR_FORK:
+ xfs_scrub_xref_is_not_shared(info->sc, agbno,
+ irec->br_blockcount);
+ break;
+ case XFS_COW_FORK:
+ xfs_scrub_xref_is_cow_staging(info->sc, agbno,
+ irec->br_blockcount);
+ break;
+ }
+
+ xfs_scrub_ag_free(info->sc, &info->sc->sa);
+}
+
/* Scrub a single extent record. */
STATIC int
xfs_scrub_bmap_extent(
@@ -109,6 +305,7 @@ xfs_scrub_bmap_extent(
{
struct xfs_mount *mp = info->sc->mp;
struct xfs_buf *bp = NULL;
+ xfs_filblks_t end;
int error = 0;
if (cur)
@@ -136,19 +333,23 @@ xfs_scrub_bmap_extent(
irec->br_startoff);
/* Make sure the extent points to a valid place. */
+ if (irec->br_blockcount > MAXEXTLEN)
+ xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
+ irec->br_startoff);
if (irec->br_startblock + irec->br_blockcount <= irec->br_startblock)
xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
+ end = irec->br_startblock + irec->br_blockcount - 1;
if (info->is_rt &&
(!xfs_verify_rtbno(mp, irec->br_startblock) ||
- !xfs_verify_rtbno(mp, irec->br_startblock +
- irec->br_blockcount - 1)))
+ !xfs_verify_rtbno(mp, end)))
xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
if (!info->is_rt &&
(!xfs_verify_fsbno(mp, irec->br_startblock) ||
- !xfs_verify_fsbno(mp, irec->br_startblock +
- irec->br_blockcount - 1)))
+ !xfs_verify_fsbno(mp, end) ||
+ XFS_FSB_TO_AGNO(mp, irec->br_startblock) !=
+ XFS_FSB_TO_AGNO(mp, end)))
xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
@@ -158,6 +359,11 @@ xfs_scrub_bmap_extent(
xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
irec->br_startoff);
+ if (info->is_rt)
+ xfs_scrub_bmap_rt_extent_xref(info, ip, cur, irec);
+ else
+ xfs_scrub_bmap_extent_xref(info, ip, cur, irec);
+
info->lastoff = irec->br_startoff + irec->br_blockcount;
return error;
}
@@ -235,7 +441,6 @@ xfs_scrub_bmap(
struct xfs_ifork *ifp;
xfs_fileoff_t endoff;
struct xfs_iext_cursor icur;
- bool found;
int error = 0;
ifp = XFS_IFORK_PTR(ip, whichfork);
@@ -314,9 +519,7 @@ xfs_scrub_bmap(
/* Scrub extent records. */
info.lastoff = 0;
ifp = XFS_IFORK_PTR(ip, whichfork);
- for (found = xfs_iext_lookup_extent(ip, ifp, 0, &icur, &irec);
- found != 0;
- found = xfs_iext_next_extent(ifp, &icur, &irec)) {
+ for_each_xfs_iext(ifp, &icur, &irec) {
if (xfs_scrub_should_terminate(sc, &error))
break;
if (isnullstartblock(irec.br_startblock))
diff --git a/fs/xfs/scrub/btree.c b/fs/xfs/scrub/btree.c
index df0766132ace..54218168c8f9 100644
--- a/fs/xfs/scrub/btree.c
+++ b/fs/xfs/scrub/btree.c
@@ -42,12 +42,14 @@
* Check for btree operation errors. See the section about handling
* operational errors in common.c.
*/
-bool
-xfs_scrub_btree_process_error(
+static bool
+__xfs_scrub_btree_process_error(
struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur,
int level,
- int *error)
+ int *error,
+ __u32 errflag,
+ void *ret_ip)
{
if (*error == 0)
return true;
@@ -60,36 +62,80 @@ xfs_scrub_btree_process_error(
case -EFSBADCRC:
case -EFSCORRUPTED:
/* Note the badness but don't abort. */
- sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
+ sc->sm->sm_flags |= errflag;
*error = 0;
/* fall through */
default:
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
trace_xfs_scrub_ifork_btree_op_error(sc, cur, level,
- *error, __return_address);
+ *error, ret_ip);
else
trace_xfs_scrub_btree_op_error(sc, cur, level,
- *error, __return_address);
+ *error, ret_ip);
break;
}
return false;
}
+bool
+xfs_scrub_btree_process_error(
+ struct xfs_scrub_context *sc,
+ struct xfs_btree_cur *cur,
+ int level,
+ int *error)
+{
+ return __xfs_scrub_btree_process_error(sc, cur, level, error,
+ XFS_SCRUB_OFLAG_CORRUPT, __return_address);
+}
+
+bool
+xfs_scrub_btree_xref_process_error(
+ struct xfs_scrub_context *sc,
+ struct xfs_btree_cur *cur,
+ int level,
+ int *error)
+{
+ return __xfs_scrub_btree_process_error(sc, cur, level, error,
+ XFS_SCRUB_OFLAG_XFAIL, __return_address);
+}
+
/* Record btree block corruption. */
-void
-xfs_scrub_btree_set_corrupt(
+static void
+__xfs_scrub_btree_set_corrupt(
struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur,
- int level)
+ int level,
+ __u32 errflag,
+ void *ret_ip)
{
- sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
+ sc->sm->sm_flags |= errflag;
if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE)
trace_xfs_scrub_ifork_btree_error(sc, cur, level,
- __return_address);
+ ret_ip);
else
trace_xfs_scrub_btree_error(sc, cur, level,
- __return_address);
+ ret_ip);
+}
+
+void
+xfs_scrub_btree_set_corrupt(
+ struct xfs_scrub_context *sc,
+ struct xfs_btree_cur *cur,
+ int level)
+{
+ __xfs_scrub_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_CORRUPT,
+ __return_address);
+}
+
+void
+xfs_scrub_btree_xref_set_corrupt(
+ struct xfs_scrub_context *sc,
+ struct xfs_btree_cur *cur,
+ int level)
+{
+ __xfs_scrub_btree_set_corrupt(sc, cur, level, XFS_SCRUB_OFLAG_XCORRUPT,
+ __return_address);
}
/*
@@ -268,6 +314,8 @@ xfs_scrub_btree_block_check_sibling(
pp = xfs_btree_ptr_addr(ncur, ncur->bc_ptrs[level + 1], pblock);
if (!xfs_scrub_btree_ptr_ok(bs, level + 1, pp))
goto out;
+ if (pbp)
+ xfs_scrub_buffer_recheck(bs->sc, pbp);
if (xfs_btree_diff_two_ptrs(cur, pp, sibling))
xfs_scrub_btree_set_corrupt(bs->sc, cur, level);
@@ -315,6 +363,97 @@ out:
return error;
}
+struct check_owner {
+ struct list_head list;
+ xfs_daddr_t daddr;
+ int level;
+};
+
+/*
+ * Make sure this btree block isn't in the free list and that there's
+ * an rmap record for it.
+ */
+STATIC int
+xfs_scrub_btree_check_block_owner(
+ struct xfs_scrub_btree *bs,
+ int level,
+ xfs_daddr_t daddr)
+{
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ xfs_btnum_t btnum;
+ bool init_sa;
+ int error = 0;
+
+ if (!bs->cur)
+ return 0;
+
+ btnum = bs->cur->bc_btnum;
+ agno = xfs_daddr_to_agno(bs->cur->bc_mp, daddr);
+ agbno = xfs_daddr_to_agbno(bs->cur->bc_mp, daddr);
+
+ init_sa = bs->cur->bc_flags & XFS_BTREE_LONG_PTRS;
+ if (init_sa) {
+ error = xfs_scrub_ag_init(bs->sc, agno, &bs->sc->sa);
+ if (!xfs_scrub_btree_xref_process_error(bs->sc, bs->cur,
+ level, &error))
+ return error;
+ }
+
+ xfs_scrub_xref_is_used_space(bs->sc, agbno, 1);
+ /*
+ * The bnobt scrubber aliases bs->cur to bs->sc->sa.bno_cur, so we
+ * have to nullify it (to shut down further block owner checks) if
+ * self-xref encounters problems.
+ */
+ if (!bs->sc->sa.bno_cur && btnum == XFS_BTNUM_BNO)
+ bs->cur = NULL;
+
+ xfs_scrub_xref_is_owned_by(bs->sc, agbno, 1, bs->oinfo);
+ if (!bs->sc->sa.rmap_cur && btnum == XFS_BTNUM_RMAP)
+ bs->cur = NULL;
+
+ if (init_sa)
+ xfs_scrub_ag_free(bs->sc, &bs->sc->sa);
+
+ return error;
+}
+
+/* Check the owner of a btree block. */
+STATIC int
+xfs_scrub_btree_check_owner(
+ struct xfs_scrub_btree *bs,
+ int level,
+ struct xfs_buf *bp)
+{
+ struct xfs_btree_cur *cur = bs->cur;
+ struct check_owner *co;
+
+ if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && bp == NULL)
+ return 0;
+
+ /*
+ * We want to cross-reference each btree block with the bnobt
+ * and the rmapbt. We cannot cross-reference the bnobt or
+ * rmapbt while scanning the bnobt or rmapbt, respectively,
+ * because we cannot alter the cursor and we'd prefer not to
+ * duplicate cursors. Therefore, save the buffer daddr for
+ * later scanning.
+ */
+ if (cur->bc_btnum == XFS_BTNUM_BNO || cur->bc_btnum == XFS_BTNUM_RMAP) {
+ co = kmem_alloc(sizeof(struct check_owner),
+ KM_MAYFAIL | KM_NOFS);
+ if (!co)
+ return -ENOMEM;
+ co->level = level;
+ co->daddr = XFS_BUF_ADDR(bp);
+ list_add_tail(&co->list, &bs->to_check);
+ return 0;
+ }
+
+ return xfs_scrub_btree_check_block_owner(bs, level, XFS_BUF_ADDR(bp));
+}
+
/*
* Grab and scrub a btree block given a btree pointer. Returns block
* and buffer pointers (if applicable) if they're ok to use.
@@ -349,6 +488,16 @@ xfs_scrub_btree_get_block(
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, level);
return 0;
}
+ if (*pbp)
+ xfs_scrub_buffer_recheck(bs->sc, *pbp);
+
+ /*
+ * Check the block's owner; this function absorbs error codes
+ * for us.
+ */
+ error = xfs_scrub_btree_check_owner(bs, level, *pbp);
+ if (error)
+ return error;
/*
* Check the block's siblings; this function absorbs error codes
@@ -421,6 +570,8 @@ xfs_scrub_btree(
struct xfs_btree_block *block;
int level;
struct xfs_buf *bp;
+ struct check_owner *co;
+ struct check_owner *n;
int i;
int error = 0;
@@ -512,5 +663,14 @@ xfs_scrub_btree(
}
out:
+ /* Process deferred owner checks on btree blocks. */
+ list_for_each_entry_safe(co, n, &bs.to_check, list) {
+ if (!error && bs.cur)
+ error = xfs_scrub_btree_check_block_owner(&bs,
+ co->level, co->daddr);
+ list_del(&co->list);
+ kmem_free(co);
+ }
+
return error;
}
diff --git a/fs/xfs/scrub/btree.h b/fs/xfs/scrub/btree.h
index 4de825a626d1..e2b868ede70b 100644
--- a/fs/xfs/scrub/btree.h
+++ b/fs/xfs/scrub/btree.h
@@ -26,10 +26,19 @@
bool xfs_scrub_btree_process_error(struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, int level, int *error);
+/* Check for btree xref operation errors. */
+bool xfs_scrub_btree_xref_process_error(struct xfs_scrub_context *sc,
+ struct xfs_btree_cur *cur, int level,
+ int *error);
+
/* Check for btree corruption. */
void xfs_scrub_btree_set_corrupt(struct xfs_scrub_context *sc,
struct xfs_btree_cur *cur, int level);
+/* Check for btree xref discrepancies. */
+void xfs_scrub_btree_xref_set_corrupt(struct xfs_scrub_context *sc,
+ struct xfs_btree_cur *cur, int level);
+
struct xfs_scrub_btree;
typedef int (*xfs_scrub_btree_rec_fn)(
struct xfs_scrub_btree *bs,
diff --git a/fs/xfs/scrub/common.c b/fs/xfs/scrub/common.c
index ac95fe911d96..8033ab9d8f47 100644
--- a/fs/xfs/scrub/common.c
+++ b/fs/xfs/scrub/common.c
@@ -78,12 +78,14 @@
*/
/* Check for operational errors. */
-bool
-xfs_scrub_process_error(
+static bool
+__xfs_scrub_process_error(
struct xfs_scrub_context *sc,
xfs_agnumber_t agno,
xfs_agblock_t bno,
- int *error)
+ int *error,
+ __u32 errflag,
+ void *ret_ip)
{
switch (*error) {
case 0:
@@ -95,24 +97,48 @@ xfs_scrub_process_error(
case -EFSBADCRC:
case -EFSCORRUPTED:
/* Note the badness but don't abort. */
- sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
+ sc->sm->sm_flags |= errflag;
*error = 0;
/* fall through */
default:
trace_xfs_scrub_op_error(sc, agno, bno, *error,
- __return_address);
+ ret_ip);
break;
}
return false;
}
-/* Check for operational errors for a file offset. */
bool
-xfs_scrub_fblock_process_error(
+xfs_scrub_process_error(
+ struct xfs_scrub_context *sc,
+ xfs_agnumber_t agno,
+ xfs_agblock_t bno,
+ int *error)
+{
+ return __xfs_scrub_process_error(sc, agno, bno, error,
+ XFS_SCRUB_OFLAG_CORRUPT, __return_address);
+}
+
+bool
+xfs_scrub_xref_process_error(
+ struct xfs_scrub_context *sc,
+ xfs_agnumber_t agno,
+ xfs_agblock_t bno,
+ int *error)
+{
+ return __xfs_scrub_process_error(sc, agno, bno, error,
+ XFS_SCRUB_OFLAG_XFAIL, __return_address);
+}
+
+/* Check for operational errors for a file offset. */
+static bool
+__xfs_scrub_fblock_process_error(
struct xfs_scrub_context *sc,
int whichfork,
xfs_fileoff_t offset,
- int *error)
+ int *error,
+ __u32 errflag,
+ void *ret_ip)
{
switch (*error) {
case 0:
@@ -124,17 +150,39 @@ xfs_scrub_fblock_process_error(
case -EFSBADCRC:
case -EFSCORRUPTED:
/* Note the badness but don't abort. */
- sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
+ sc->sm->sm_flags |= errflag;
*error = 0;
/* fall through */
default:
trace_xfs_scrub_file_op_error(sc, whichfork, offset, *error,
- __return_address);
+ ret_ip);
break;
}
return false;
}
+bool
+xfs_scrub_fblock_process_error(
+ struct xfs_scrub_context *sc,
+ int whichfork,
+ xfs_fileoff_t offset,
+ int *error)
+{
+ return __xfs_scrub_fblock_process_error(sc, whichfork, offset, error,
+ XFS_SCRUB_OFLAG_CORRUPT, __return_address);
+}
+
+bool
+xfs_scrub_fblock_xref_process_error(
+ struct xfs_scrub_context *sc,
+ int whichfork,
+ xfs_fileoff_t offset,
+ int *error)
+{
+ return __xfs_scrub_fblock_process_error(sc, whichfork, offset, error,
+ XFS_SCRUB_OFLAG_XFAIL, __return_address);
+}
+
/*
* Handling scrub corruption/optimization/warning checks.
*
@@ -183,6 +231,16 @@ xfs_scrub_block_set_corrupt(
trace_xfs_scrub_block_error(sc, bp->b_bn, __return_address);
}
+/* Record a corruption while cross-referencing. */
+void
+xfs_scrub_block_xref_set_corrupt(
+ struct xfs_scrub_context *sc,
+ struct xfs_buf *bp)
+{
+ sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
+ trace_xfs_scrub_block_error(sc, bp->b_bn, __return_address);
+}
+
/*
* Record a corrupt inode. The trace data will include the block given
* by bp if bp is given; otherwise it will use the block location of the
@@ -198,6 +256,17 @@ xfs_scrub_ino_set_corrupt(
trace_xfs_scrub_ino_error(sc, ino, bp ? bp->b_bn : 0, __return_address);
}
+/* Record a corruption while cross-referencing with an inode. */
+void
+xfs_scrub_ino_xref_set_corrupt(
+ struct xfs_scrub_context *sc,
+ xfs_ino_t ino,
+ struct xfs_buf *bp)
+{
+ sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
+ trace_xfs_scrub_ino_error(sc, ino, bp ? bp->b_bn : 0, __return_address);
+}
+
/* Record corruption in a block indexed by a file fork. */
void
xfs_scrub_fblock_set_corrupt(
@@ -209,6 +278,17 @@ xfs_scrub_fblock_set_corrupt(
trace_xfs_scrub_fblock_error(sc, whichfork, offset, __return_address);
}
+/* Record a corruption while cross-referencing a fork block. */
+void
+xfs_scrub_fblock_xref_set_corrupt(
+ struct xfs_scrub_context *sc,
+ int whichfork,
+ xfs_fileoff_t offset)
+{
+ sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
+ trace_xfs_scrub_fblock_error(sc, whichfork, offset, __return_address);
+}
+
/*
* Warn about inodes that need administrative review but is not
* incorrect.
@@ -245,6 +325,59 @@ xfs_scrub_set_incomplete(
}
/*
+ * rmap scrubbing -- compute the number of blocks with a given owner,
+ * at least according to the reverse mapping data.
+ */
+
+struct xfs_scrub_rmap_ownedby_info {
+ struct xfs_owner_info *oinfo;
+ xfs_filblks_t *blocks;
+};
+
+STATIC int
+xfs_scrub_count_rmap_ownedby_irec(
+ struct xfs_btree_cur *cur,
+ struct xfs_rmap_irec *rec,
+ void *priv)
+{
+ struct xfs_scrub_rmap_ownedby_info *sroi = priv;
+ bool irec_attr;
+ bool oinfo_attr;
+
+ irec_attr = rec->rm_flags & XFS_RMAP_ATTR_FORK;
+ oinfo_attr = sroi->oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK;
+
+ if (rec->rm_owner != sroi->oinfo->oi_owner)
+ return 0;
+
+ if (XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) || irec_attr == oinfo_attr)
+ (*sroi->blocks) += rec->rm_blockcount;
+
+ return 0;
+}
+
+/*
+ * Calculate the number of blocks the rmap thinks are owned by something.
+ * The caller should pass us an rmapbt cursor.
+ */
+int
+xfs_scrub_count_rmap_ownedby_ag(
+ struct xfs_scrub_context *sc,
+ struct xfs_btree_cur *cur,
+ struct xfs_owner_info *oinfo,
+ xfs_filblks_t *blocks)
+{
+ struct xfs_scrub_rmap_ownedby_info sroi;
+
+ sroi.oinfo = oinfo;
+ *blocks = 0;
+ sroi.blocks = blocks;
+
+ return xfs_rmap_query_all(cur, xfs_scrub_count_rmap_ownedby_irec,
+ &sroi);
+}
+
+/*
* AG scrubbing
*
* These helpers facilitate locking an allocation group's header
@@ -302,7 +435,7 @@ xfs_scrub_ag_read_headers(
error = xfs_alloc_read_agfl(mp, sc->tp, agno, agfl);
if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGFL))
goto out;
-
+ error = 0;
out:
return error;
}
@@ -472,7 +605,7 @@ xfs_scrub_setup_ag_btree(
return error;
}
- error = xfs_scrub_setup_ag_header(sc, ip);
+ error = xfs_scrub_setup_fs(sc, ip);
if (error)
return error;
@@ -503,18 +636,11 @@ xfs_scrub_get_inode(
struct xfs_scrub_context *sc,
struct xfs_inode *ip_in)
{
+ struct xfs_imap imap;
struct xfs_mount *mp = sc->mp;
struct xfs_inode *ip = NULL;
int error;
- /*
- * If userspace passed us an AG number or a generation number
- * without an inode number, they haven't got a clue so bail out
- * immediately.
- */
- if (sc->sm->sm_agno || (sc->sm->sm_gen && !sc->sm->sm_ino))
- return -EINVAL;
-
/* We want to scan the inode we already had opened. */
if (sc->sm->sm_ino == 0 || sc->sm->sm_ino == ip_in->i_ino) {
sc->ip = ip_in;
@@ -526,10 +652,33 @@ xfs_scrub_get_inode(
return -ENOENT;
error = xfs_iget(mp, NULL, sc->sm->sm_ino,
XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE, 0, &ip);
- if (error == -ENOENT || error == -EINVAL) {
- /* inode doesn't exist... */
- return -ENOENT;
- } else if (error) {
+ switch (error) {
+ case -ENOENT:
+ /* Inode doesn't exist, just bail out. */
+ return error;
+ case 0:
+ /* Got an inode, continue. */
+ break;
+ case -EINVAL:
+ /*
+ * -EINVAL with IGET_UNTRUSTED could mean one of several
+ * things: userspace gave us an inode number that doesn't
+ * correspond to fs space, or doesn't have an inobt entry;
+ * or it could simply mean that the inode buffer failed the
+ * read verifiers.
+ *
+ * Try just the inode mapping lookup -- if it succeeds, then
+ * the inode buffer verifier failed and something needs fixing.
+ * Otherwise, we really couldn't find it so tell userspace
+ * that it no longer exists.
+ */
+ error = xfs_imap(sc->mp, sc->tp, sc->sm->sm_ino, &imap,
+ XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE);
+ if (error)
+ return -ENOENT;
+ error = -EFSCORRUPTED;
+ /* fall through */
+ default:
trace_xfs_scrub_op_error(sc,
XFS_INO_TO_AGNO(mp, sc->sm->sm_ino),
XFS_INO_TO_AGBNO(mp, sc->sm->sm_ino),
@@ -572,3 +721,61 @@ out:
/* scrub teardown will unlock and release the inode for us */
return error;
}
+
+/*
+ * Predicate that decides if we need to evaluate the cross-reference check.
+ * If there was an error accessing the cross-reference btree, just delete
+ * the cursor and skip the check.
+ */
+bool
+xfs_scrub_should_check_xref(
+ struct xfs_scrub_context *sc,
+ int *error,
+ struct xfs_btree_cur **curpp)
+{
+ if (*error == 0)
+ return true;
+
+ if (curpp) {
+ /* If we've already given up on xref, just bail out. */
+ if (!*curpp)
+ return false;
+
+ /* xref error, delete cursor and bail out. */
+ xfs_btree_del_cursor(*curpp, XFS_BTREE_ERROR);
+ *curpp = NULL;
+ }
+
+ sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XFAIL;
+ trace_xfs_scrub_xref_error(sc, *error, __return_address);
+
+ /*
+ * Errors encountered during cross-referencing with another
+ * data structure should not cause this scrubber to abort.
+ */
+ *error = 0;
+ return false;
+}
+
+/* Run the structure verifiers on in-memory buffers to detect bad memory. */
+void
+xfs_scrub_buffer_recheck(
+ struct xfs_scrub_context *sc,
+ struct xfs_buf *bp)
+{
+ xfs_failaddr_t fa;
+
+ if (bp->b_ops == NULL) {
+ xfs_scrub_block_set_corrupt(sc, bp);
+ return;
+ }
+ if (bp->b_ops->verify_struct == NULL) {
+ xfs_scrub_set_incomplete(sc);
+ return;
+ }
+ fa = bp->b_ops->verify_struct(bp);
+ if (!fa)
+ return;
+ sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
+ trace_xfs_scrub_block_error(sc, bp->b_bn, fa);
+}
diff --git a/fs/xfs/scrub/common.h b/fs/xfs/scrub/common.h
index 5c043855570e..ddb65d22c76a 100644
--- a/fs/xfs/scrub/common.h
+++ b/fs/xfs/scrub/common.h
@@ -56,6 +56,11 @@ bool xfs_scrub_process_error(struct xfs_scrub_context *sc, xfs_agnumber_t agno,
bool xfs_scrub_fblock_process_error(struct xfs_scrub_context *sc, int whichfork,
xfs_fileoff_t offset, int *error);
+bool xfs_scrub_xref_process_error(struct xfs_scrub_context *sc,
+ xfs_agnumber_t agno, xfs_agblock_t bno, int *error);
+bool xfs_scrub_fblock_xref_process_error(struct xfs_scrub_context *sc,
+ int whichfork, xfs_fileoff_t offset, int *error);
+
void xfs_scrub_block_set_preen(struct xfs_scrub_context *sc,
struct xfs_buf *bp);
void xfs_scrub_ino_set_preen(struct xfs_scrub_context *sc, xfs_ino_t ino,
@@ -68,6 +73,13 @@ void xfs_scrub_ino_set_corrupt(struct xfs_scrub_context *sc, xfs_ino_t ino,
void xfs_scrub_fblock_set_corrupt(struct xfs_scrub_context *sc, int whichfork,
xfs_fileoff_t offset);
+void xfs_scrub_block_xref_set_corrupt(struct xfs_scrub_context *sc,
+ struct xfs_buf *bp);
+void xfs_scrub_ino_xref_set_corrupt(struct xfs_scrub_context *sc, xfs_ino_t ino,
+ struct xfs_buf *bp);
+void xfs_scrub_fblock_xref_set_corrupt(struct xfs_scrub_context *sc,
+ int whichfork, xfs_fileoff_t offset);
+
void xfs_scrub_ino_set_warning(struct xfs_scrub_context *sc, xfs_ino_t ino,
struct xfs_buf *bp);
void xfs_scrub_fblock_set_warning(struct xfs_scrub_context *sc, int whichfork,
@@ -76,10 +88,12 @@ void xfs_scrub_fblock_set_warning(struct xfs_scrub_context *sc, int whichfork,
void xfs_scrub_set_incomplete(struct xfs_scrub_context *sc);
int xfs_scrub_checkpoint_log(struct xfs_mount *mp);
+/* Are we set up for a cross-referencing check? */
+bool xfs_scrub_should_check_xref(struct xfs_scrub_context *sc, int *error,
+ struct xfs_btree_cur **curpp);
+
/* Setup functions */
int xfs_scrub_setup_fs(struct xfs_scrub_context *sc, struct xfs_inode *ip);
-int xfs_scrub_setup_ag_header(struct xfs_scrub_context *sc,
- struct xfs_inode *ip);
int xfs_scrub_setup_ag_allocbt(struct xfs_scrub_context *sc,
struct xfs_inode *ip);
int xfs_scrub_setup_ag_iallocbt(struct xfs_scrub_context *sc,
@@ -134,11 +148,16 @@ int xfs_scrub_walk_agfl(struct xfs_scrub_context *sc,
int (*fn)(struct xfs_scrub_context *, xfs_agblock_t bno,
void *),
void *priv);
+int xfs_scrub_count_rmap_ownedby_ag(struct xfs_scrub_context *sc,
+ struct xfs_btree_cur *cur,
+ struct xfs_owner_info *oinfo,
+ xfs_filblks_t *blocks);
int xfs_scrub_setup_ag_btree(struct xfs_scrub_context *sc,
struct xfs_inode *ip, bool force_log);
int xfs_scrub_get_inode(struct xfs_scrub_context *sc, struct xfs_inode *ip_in);
int xfs_scrub_setup_inode_contents(struct xfs_scrub_context *sc,
struct xfs_inode *ip, unsigned int resblks);
+void xfs_scrub_buffer_recheck(struct xfs_scrub_context *sc, struct xfs_buf *bp);
#endif /* __XFS_SCRUB_COMMON_H__ */
diff --git a/fs/xfs/scrub/dabtree.c b/fs/xfs/scrub/dabtree.c
index d94edd93cba8..bffdb7dc09bf 100644
--- a/fs/xfs/scrub/dabtree.c
+++ b/fs/xfs/scrub/dabtree.c
@@ -233,11 +233,28 @@ xfs_scrub_da_btree_write_verify(
return;
}
}
+static void *
+xfs_scrub_da_btree_verify(
+ struct xfs_buf *bp)
+{
+ struct xfs_da_blkinfo *info = bp->b_addr;
+
+ switch (be16_to_cpu(info->magic)) {
+ case XFS_DIR2_LEAF1_MAGIC:
+ case XFS_DIR3_LEAF1_MAGIC:
+ bp->b_ops = &xfs_dir3_leaf1_buf_ops;
+ return bp->b_ops->verify_struct(bp);
+ default:
+ bp->b_ops = &xfs_da3_node_buf_ops;
+ return bp->b_ops->verify_struct(bp);
+ }
+}
static const struct xfs_buf_ops xfs_scrub_da_btree_buf_ops = {
.name = "xfs_scrub_da_btree",
.verify_read = xfs_scrub_da_btree_read_verify,
.verify_write = xfs_scrub_da_btree_write_verify,
+ .verify_struct = xfs_scrub_da_btree_verify,
};
/* Check a block's sibling. */
@@ -276,6 +293,9 @@ xfs_scrub_da_btree_block_check_sibling(
xfs_scrub_da_set_corrupt(ds, level);
return error;
}
+ if (ds->state->altpath.blk[level].bp)
+ xfs_scrub_buffer_recheck(ds->sc,
+ ds->state->altpath.blk[level].bp);
/* Compare upper level pointer to sibling pointer. */
if (ds->state->altpath.blk[level].blkno != sibling)
@@ -358,6 +378,8 @@ xfs_scrub_da_btree_block(
&xfs_scrub_da_btree_buf_ops);
if (!xfs_scrub_da_process_error(ds, level, &error))
goto out_nobuf;
+ if (blk->bp)
+ xfs_scrub_buffer_recheck(ds->sc, blk->bp);
/*
* We didn't find a dir btree root block, which means that
diff --git a/fs/xfs/scrub/dir.c b/fs/xfs/scrub/dir.c
index 69e1efdd4019..50b6a26b0299 100644
--- a/fs/xfs/scrub/dir.c
+++ b/fs/xfs/scrub/dir.c
@@ -92,7 +92,7 @@ xfs_scrub_dir_check_ftype(
* inodes can trigger immediate inactive cleanup of the inode.
*/
error = xfs_iget(mp, sdc->sc->tp, inum, 0, 0, &ip);
- if (!xfs_scrub_fblock_process_error(sdc->sc, XFS_DATA_FORK, offset,
+ if (!xfs_scrub_fblock_xref_process_error(sdc->sc, XFS_DATA_FORK, offset,
&error))
goto out;
@@ -200,6 +200,7 @@ xfs_scrub_dir_rec(
struct xfs_inode *dp = ds->dargs.dp;
struct xfs_dir2_data_entry *dent;
struct xfs_buf *bp;
+ char *p, *endp;
xfs_ino_t ino;
xfs_dablk_t rec_bno;
xfs_dir2_db_t db;
@@ -237,9 +238,37 @@ xfs_scrub_dir_rec(
xfs_scrub_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
goto out;
}
+ xfs_scrub_buffer_recheck(ds->sc, bp);
- /* Retrieve the entry, sanity check it, and compare hashes. */
dent = (struct xfs_dir2_data_entry *)(((char *)bp->b_addr) + off);
+
+ /* Make sure we got a real directory entry. */
+ p = (char *)mp->m_dir_inode_ops->data_entry_p(bp->b_addr);
+ endp = xfs_dir3_data_endp(mp->m_dir_geo, bp->b_addr);
+ if (!endp) {
+ xfs_scrub_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
+ goto out_relse;
+ }
+ while (p < endp) {
+ struct xfs_dir2_data_entry *dep;
+ struct xfs_dir2_data_unused *dup;
+
+ dup = (struct xfs_dir2_data_unused *)p;
+ if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
+ p += be16_to_cpu(dup->length);
+ continue;
+ }
+ dep = (struct xfs_dir2_data_entry *)p;
+ if (dep == dent)
+ break;
+ p += mp->m_dir_inode_ops->data_entsize(dep->namelen);
+ }
+ if (p >= endp) {
+ xfs_scrub_fblock_set_corrupt(ds->sc, XFS_DATA_FORK, rec_bno);
+ goto out_relse;
+ }
+
+ /* Retrieve the entry, sanity check it, and compare hashes. */
ino = be64_to_cpu(dent->inumber);
hash = be32_to_cpu(ent->hashval);
tag = be16_to_cpup(dp->d_ops->data_entry_tag_p(dent));
@@ -324,6 +353,7 @@ xfs_scrub_directory_data_bestfree(
}
if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
goto out;
+ xfs_scrub_buffer_recheck(sc, bp);
/* XXX: Check xfs_dir3_data_hdr.pad is zero once we start setting it. */
@@ -361,13 +391,7 @@ xfs_scrub_directory_data_bestfree(
/* Make sure the bestfrees are actually the best free spaces. */
ptr = (char *)d_ops->data_entry_p(bp->b_addr);
- if (is_block) {
- struct xfs_dir2_block_tail *btp;
-
- btp = xfs_dir2_block_tail_p(mp->m_dir_geo, bp->b_addr);
- endptr = (char *)xfs_dir2_block_leaf_p(btp);
- } else
- endptr = (char *)bp->b_addr + BBTOB(bp->b_length);
+ endptr = xfs_dir3_data_endp(mp->m_dir_geo, bp->b_addr);
/* Iterate the entries, stopping when we hit or go past the end. */
while (ptr < endptr) {
@@ -474,6 +498,7 @@ xfs_scrub_directory_leaf1_bestfree(
error = xfs_dir3_leaf_read(sc->tp, sc->ip, lblk, -1, &bp);
if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
goto out;
+ xfs_scrub_buffer_recheck(sc, bp);
leaf = bp->b_addr;
d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
@@ -559,6 +584,7 @@ xfs_scrub_directory_free_bestfree(
error = xfs_dir2_free_read(sc->tp, sc->ip, lblk, &bp);
if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, lblk, &error))
goto out;
+ xfs_scrub_buffer_recheck(sc, bp);
if (xfs_sb_version_hascrc(&sc->mp->m_sb)) {
struct xfs_dir3_free_hdr *hdr3 = bp->b_addr;
diff --git a/fs/xfs/scrub/ialloc.c b/fs/xfs/scrub/ialloc.c
index 496d6f2fbb9e..63ab3f98430d 100644
--- a/fs/xfs/scrub/ialloc.c
+++ b/fs/xfs/scrub/ialloc.c
@@ -58,6 +58,56 @@ xfs_scrub_setup_ag_iallocbt(
/* Inode btree scrubber. */
+/*
+ * If we're checking the finobt, cross-reference with the inobt.
+ * Otherwise we're checking the inobt; if there is an finobt, make sure
+ * we have a record or not depending on freecount.
+ */
+static inline void
+xfs_scrub_iallocbt_chunk_xref_other(
+ struct xfs_scrub_context *sc,
+ struct xfs_inobt_rec_incore *irec,
+ xfs_agino_t agino)
+{
+ struct xfs_btree_cur **pcur;
+ bool has_irec;
+ int error;
+
+ if (sc->sm->sm_type == XFS_SCRUB_TYPE_FINOBT)
+ pcur = &sc->sa.ino_cur;
+ else
+ pcur = &sc->sa.fino_cur;
+ if (!(*pcur))
+ return;
+ error = xfs_ialloc_has_inode_record(*pcur, agino, agino, &has_irec);
+ if (!xfs_scrub_should_check_xref(sc, &error, pcur))
+ return;
+ if (((irec->ir_freecount > 0 && !has_irec) ||
+ (irec->ir_freecount == 0 && has_irec)))
+ xfs_scrub_btree_xref_set_corrupt(sc, *pcur, 0);
+}
+
+/* Cross-reference with the other btrees. */
+STATIC void
+xfs_scrub_iallocbt_chunk_xref(
+ struct xfs_scrub_context *sc,
+ struct xfs_inobt_rec_incore *irec,
+ xfs_agino_t agino,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len)
+{
+ struct xfs_owner_info oinfo;
+
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return;
+
+ xfs_scrub_xref_is_used_space(sc, agbno, len);
+ xfs_scrub_iallocbt_chunk_xref_other(sc, irec, agino);
+ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
+ xfs_scrub_xref_is_owned_by(sc, agbno, len, &oinfo);
+ xfs_scrub_xref_is_not_shared(sc, agbno, len);
+}
+
/* Is this chunk worth checking? */
STATIC bool
xfs_scrub_iallocbt_chunk(
@@ -76,6 +126,8 @@ xfs_scrub_iallocbt_chunk(
!xfs_verify_agbno(mp, agno, bno + len - 1))
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xfs_scrub_iallocbt_chunk_xref(bs->sc, irec, agino, bno, len);
+
return true;
}
@@ -190,8 +242,14 @@ xfs_scrub_iallocbt_check_freemask(
}
/* If any part of this is a hole, skip it. */
- if (ir_holemask)
+ if (ir_holemask) {
+ xfs_scrub_xref_is_not_owned_by(bs->sc, agbno,
+ blks_per_cluster, &oinfo);
continue;
+ }
+
+ xfs_scrub_xref_is_owned_by(bs->sc, agbno, blks_per_cluster,
+ &oinfo);
/* Grab the inode cluster buffer. */
imap.im_blkno = XFS_AGB_TO_DADDR(mp, bs->cur->bc_private.a.agno,
@@ -227,6 +285,7 @@ xfs_scrub_iallocbt_rec(
union xfs_btree_rec *rec)
{
struct xfs_mount *mp = bs->cur->bc_mp;
+ xfs_filblks_t *inode_blocks = bs->private;
struct xfs_inobt_rec_incore irec;
uint64_t holes;
xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
@@ -264,6 +323,9 @@ xfs_scrub_iallocbt_rec(
(agbno & (xfs_icluster_size_fsb(mp) - 1)))
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ *inode_blocks += XFS_B_TO_FSB(mp,
+ irec.ir_count * mp->m_sb.sb_inodesize);
+
/* Handle non-sparse inodes */
if (!xfs_inobt_issparse(irec.ir_holemask)) {
len = XFS_B_TO_FSB(mp,
@@ -308,6 +370,72 @@ out:
return error;
}
+/*
+ * Make sure the inode btrees are as large as the rmap thinks they are.
+ * Don't bother if we're missing btree cursors, as we're already corrupt.
+ */
+STATIC void
+xfs_scrub_iallocbt_xref_rmap_btreeblks(
+ struct xfs_scrub_context *sc,
+ int which)
+{
+ struct xfs_owner_info oinfo;
+ xfs_filblks_t blocks;
+ xfs_extlen_t inobt_blocks = 0;
+ xfs_extlen_t finobt_blocks = 0;
+ int error;
+
+ if (!sc->sa.ino_cur || !sc->sa.rmap_cur ||
+ (xfs_sb_version_hasfinobt(&sc->mp->m_sb) && !sc->sa.fino_cur))
+ return;
+
+ /* Check that we saw as many inobt blocks as the rmap says. */
+ error = xfs_btree_count_blocks(sc->sa.ino_cur, &inobt_blocks);
+ if (!xfs_scrub_process_error(sc, 0, 0, &error))
+ return;
+
+ if (sc->sa.fino_cur) {
+ error = xfs_btree_count_blocks(sc->sa.fino_cur, &finobt_blocks);
+ if (!xfs_scrub_process_error(sc, 0, 0, &error))
+ return;
+ }
+
+ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT);
+ error = xfs_scrub_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, &oinfo,
+ &blocks);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ return;
+ if (blocks != inobt_blocks + finobt_blocks)
+ xfs_scrub_btree_set_corrupt(sc, sc->sa.ino_cur, 0);
+}
+
+/*
+ * Make sure that the inobt records point to the same number of blocks as
+ * the rmap says are owned by inodes.
+ */
+STATIC void
+xfs_scrub_iallocbt_xref_rmap_inodes(
+ struct xfs_scrub_context *sc,
+ int which,
+ xfs_filblks_t inode_blocks)
+{
+ struct xfs_owner_info oinfo;
+ xfs_filblks_t blocks;
+ int error;
+
+ if (!sc->sa.rmap_cur)
+ return;
+
+ /* Check that we saw as many inode blocks as the rmap knows about. */
+ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
+ error = xfs_scrub_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, &oinfo,
+ &blocks);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ return;
+ if (blocks != inode_blocks)
+ xfs_scrub_btree_set_corrupt(sc, sc->sa.ino_cur, 0);
+}
+
/* Scrub the inode btrees for some AG. */
STATIC int
xfs_scrub_iallocbt(
@@ -316,10 +444,29 @@ xfs_scrub_iallocbt(
{
struct xfs_btree_cur *cur;
struct xfs_owner_info oinfo;
+ xfs_filblks_t inode_blocks = 0;
+ int error;
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT);
cur = which == XFS_BTNUM_INO ? sc->sa.ino_cur : sc->sa.fino_cur;
- return xfs_scrub_btree(sc, cur, xfs_scrub_iallocbt_rec, &oinfo, NULL);
+ error = xfs_scrub_btree(sc, cur, xfs_scrub_iallocbt_rec, &oinfo,
+ &inode_blocks);
+ if (error)
+ return error;
+
+ xfs_scrub_iallocbt_xref_rmap_btreeblks(sc, which);
+
+ /*
+ * If we're scrubbing the inode btree, inode_blocks is the number of
+ * blocks pointed to by all the inode chunk records. Therefore, we
+ * should compare to the number of inode chunk blocks that the rmap
+ * knows about. We can't do this for the finobt since it only points
+ * to inode chunks with free inodes.
+ */
+ if (which == XFS_BTNUM_INO)
+ xfs_scrub_iallocbt_xref_rmap_inodes(sc, which, inode_blocks);
+
+ return error;
}
int
@@ -335,3 +482,46 @@ xfs_scrub_finobt(
{
return xfs_scrub_iallocbt(sc, XFS_BTNUM_FINO);
}
+
+/* See if an inode btree has (or doesn't have) an inode chunk record. */
+static inline void
+xfs_scrub_xref_inode_check(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len,
+ struct xfs_btree_cur **icur,
+ bool should_have_inodes)
+{
+ bool has_inodes;
+ int error;
+
+ if (!(*icur))
+ return;
+
+ error = xfs_ialloc_has_inodes_at_extent(*icur, agbno, len, &has_inodes);
+ if (!xfs_scrub_should_check_xref(sc, &error, icur))
+ return;
+ if (has_inodes != should_have_inodes)
+ xfs_scrub_btree_xref_set_corrupt(sc, *icur, 0);
+}
+
+/* xref check that the extent is not covered by inodes */
+void
+xfs_scrub_xref_is_not_inode_chunk(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len)
+{
+ xfs_scrub_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, false);
+ xfs_scrub_xref_inode_check(sc, agbno, len, &sc->sa.fino_cur, false);
+}
+
+/* xref check that the extent is covered by inodes */
+void
+xfs_scrub_xref_is_inode_chunk(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len)
+{
+ xfs_scrub_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, true);
+}
diff --git a/fs/xfs/scrub/inode.c b/fs/xfs/scrub/inode.c
index f120fb20452f..21297bef8df1 100644
--- a/fs/xfs/scrub/inode.c
+++ b/fs/xfs/scrub/inode.c
@@ -36,9 +36,13 @@
#include "xfs_ialloc.h"
#include "xfs_da_format.h"
#include "xfs_reflink.h"
+#include "xfs_rmap.h"
+#include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
#include "scrub/xfs_scrub.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
+#include "scrub/btree.h"
#include "scrub/trace.h"
/*
@@ -64,7 +68,7 @@ xfs_scrub_setup_inode(
break;
case -EFSCORRUPTED:
case -EFSBADCRC:
- return 0;
+ return xfs_scrub_trans_alloc(sc->sm, mp, &sc->tp);
default:
return error;
}
@@ -392,6 +396,14 @@ xfs_scrub_dinode(
break;
}
+ /* di_[amc]time.nsec */
+ if (be32_to_cpu(dip->di_atime.t_nsec) >= NSEC_PER_SEC)
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+ if (be32_to_cpu(dip->di_mtime.t_nsec) >= NSEC_PER_SEC)
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+ if (be32_to_cpu(dip->di_ctime.t_nsec) >= NSEC_PER_SEC)
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+
/*
* di_size. xfs_dinode_verify checks for things that screw up
* the VFS such as the upper bit being set and zero-length
@@ -495,6 +507,8 @@ xfs_scrub_dinode(
}
if (dip->di_version >= 3) {
+ if (be32_to_cpu(dip->di_crtime.t_nsec) >= NSEC_PER_SEC)
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
xfs_scrub_inode_flags2(sc, bp, dip, ino, mode, flags, flags2);
xfs_scrub_inode_cowextsize(sc, bp, dip, ino, mode, flags,
flags2);
@@ -546,7 +560,7 @@ xfs_scrub_inode_map_raw(
*/
bp->b_ops = &xfs_inode_buf_ops;
dip = xfs_buf_offset(bp, imap.im_boffset);
- if (!xfs_dinode_verify(mp, ino, dip) ||
+ if (xfs_dinode_verify(mp, ino, dip) != NULL ||
!xfs_dinode_good_version(mp, dip->di_version)) {
xfs_scrub_ino_set_corrupt(sc, ino, bp);
goto out_buf;
@@ -567,18 +581,155 @@ out_buf:
return error;
}
+/*
+ * Make sure the finobt doesn't think this inode is free.
+ * We don't have to check the inobt ourselves because we got the inode via
+ * IGET_UNTRUSTED, which checks the inobt for us.
+ */
+static void
+xfs_scrub_inode_xref_finobt(
+ struct xfs_scrub_context *sc,
+ xfs_ino_t ino)
+{
+ struct xfs_inobt_rec_incore rec;
+ xfs_agino_t agino;
+ int has_record;
+ int error;
+
+ if (!sc->sa.fino_cur)
+ return;
+
+ agino = XFS_INO_TO_AGINO(sc->mp, ino);
+
+ /*
+ * Try to get the finobt record. If we can't get it, then we're
+ * in good shape.
+ */
+ error = xfs_inobt_lookup(sc->sa.fino_cur, agino, XFS_LOOKUP_LE,
+ &has_record);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.fino_cur) ||
+ !has_record)
+ return;
+
+ error = xfs_inobt_get_rec(sc->sa.fino_cur, &rec, &has_record);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.fino_cur) ||
+ !has_record)
+ return;
+
+ /*
+ * Otherwise, make sure this record either doesn't cover this inode,
+ * or that it does but it's marked present.
+ */
+ if (rec.ir_startino > agino ||
+ rec.ir_startino + XFS_INODES_PER_CHUNK <= agino)
+ return;
+
+ if (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino))
+ xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.fino_cur, 0);
+}
+
+/* Cross reference the inode fields with the forks. */
+STATIC void
+xfs_scrub_inode_xref_bmap(
+ struct xfs_scrub_context *sc,
+ struct xfs_dinode *dip)
+{
+ xfs_extnum_t nextents;
+ xfs_filblks_t count;
+ xfs_filblks_t acount;
+ int error;
+
+ /* Walk all the extents to check nextents/naextents/nblocks. */
+ error = xfs_bmap_count_blocks(sc->tp, sc->ip, XFS_DATA_FORK,
+ &nextents, &count);
+ if (!xfs_scrub_should_check_xref(sc, &error, NULL))
+ return;
+ if (nextents < be32_to_cpu(dip->di_nextents))
+ xfs_scrub_ino_xref_set_corrupt(sc, sc->ip->i_ino, NULL);
+
+ error = xfs_bmap_count_blocks(sc->tp, sc->ip, XFS_ATTR_FORK,
+ &nextents, &acount);
+ if (!xfs_scrub_should_check_xref(sc, &error, NULL))
+ return;
+ if (nextents != be16_to_cpu(dip->di_anextents))
+ xfs_scrub_ino_xref_set_corrupt(sc, sc->ip->i_ino, NULL);
+
+ /* Check nblocks against the inode. */
+ if (count + acount != be64_to_cpu(dip->di_nblocks))
+ xfs_scrub_ino_xref_set_corrupt(sc, sc->ip->i_ino, NULL);
+}
+
+/* Cross-reference with the other btrees. */
+STATIC void
+xfs_scrub_inode_xref(
+ struct xfs_scrub_context *sc,
+ xfs_ino_t ino,
+ struct xfs_dinode *dip)
+{
+ struct xfs_owner_info oinfo;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ int error;
+
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return;
+
+ agno = XFS_INO_TO_AGNO(sc->mp, ino);
+ agbno = XFS_INO_TO_AGBNO(sc->mp, ino);
+
+ error = xfs_scrub_ag_init(sc, agno, &sc->sa);
+ if (!xfs_scrub_xref_process_error(sc, agno, agbno, &error))
+ return;
+
+ xfs_scrub_xref_is_used_space(sc, agbno, 1);
+ xfs_scrub_inode_xref_finobt(sc, ino);
+ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
+ xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
+ xfs_scrub_xref_is_not_shared(sc, agbno, 1);
+ xfs_scrub_inode_xref_bmap(sc, dip);
+
+ xfs_scrub_ag_free(sc, &sc->sa);
+}
+
+/*
+ * If the reflink iflag disagrees with a scan for shared data fork extents,
+ * either flag an error (shared extents w/ no flag) or a preen (flag set w/o
+ * any shared extents). We already checked for reflink iflag set on a non
+ * reflink filesystem.
+ */
+static void
+xfs_scrub_inode_check_reflink_iflag(
+ struct xfs_scrub_context *sc,
+ xfs_ino_t ino,
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = sc->mp;
+ bool has_shared;
+ int error;
+
+ if (!xfs_sb_version_hasreflink(&mp->m_sb))
+ return;
+
+ error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip,
+ &has_shared);
+ if (!xfs_scrub_xref_process_error(sc, XFS_INO_TO_AGNO(mp, ino),
+ XFS_INO_TO_AGBNO(mp, ino), &error))
+ return;
+ if (xfs_is_reflink_inode(sc->ip) && !has_shared)
+ xfs_scrub_ino_set_preen(sc, ino, bp);
+ else if (!xfs_is_reflink_inode(sc->ip) && has_shared)
+ xfs_scrub_ino_set_corrupt(sc, ino, bp);
+}
+
/* Scrub an inode. */
int
xfs_scrub_inode(
struct xfs_scrub_context *sc)
{
struct xfs_dinode di;
- struct xfs_mount *mp = sc->mp;
struct xfs_buf *bp = NULL;
struct xfs_dinode *dip;
xfs_ino_t ino;
-
- bool has_shared;
int error = 0;
/* Did we get the in-core inode, or are we doing this manually? */
@@ -603,19 +754,14 @@ xfs_scrub_inode(
goto out;
/*
- * Does this inode have the reflink flag set but no shared extents?
- * Set the preening flag if this is the case.
+ * Look for discrepancies between file's data blocks and the reflink
+ * iflag. We already checked the iflag against the file mode when
+ * we scrubbed the dinode.
*/
- if (xfs_is_reflink_inode(sc->ip)) {
- error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip,
- &has_shared);
- if (!xfs_scrub_process_error(sc, XFS_INO_TO_AGNO(mp, ino),
- XFS_INO_TO_AGBNO(mp, ino), &error))
- goto out;
- if (!has_shared)
- xfs_scrub_ino_set_preen(sc, ino, bp);
- }
+ if (S_ISREG(VFS_I(sc->ip)->i_mode))
+ xfs_scrub_inode_check_reflink_iflag(sc, ino, bp);
+ xfs_scrub_inode_xref(sc, ino, dip);
out:
if (bp)
xfs_trans_brelse(sc->tp, bp);
diff --git a/fs/xfs/scrub/parent.c b/fs/xfs/scrub/parent.c
index 63a25334fc83..0d3851410c74 100644
--- a/fs/xfs/scrub/parent.c
+++ b/fs/xfs/scrub/parent.c
@@ -169,9 +169,9 @@ xfs_scrub_parent_validate(
* immediate inactive cleanup of the inode.
*/
error = xfs_iget(mp, sc->tp, dnum, 0, 0, &dp);
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
+ if (!xfs_scrub_fblock_xref_process_error(sc, XFS_DATA_FORK, 0, &error))
goto out;
- if (dp == sc->ip) {
+ if (dp == sc->ip || !S_ISDIR(VFS_I(dp)->i_mode)) {
xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
goto out_rele;
}
@@ -185,7 +185,7 @@ xfs_scrub_parent_validate(
*/
if (xfs_ilock_nowait(dp, XFS_IOLOCK_SHARED)) {
error = xfs_scrub_parent_count_parent_dentries(sc, dp, &nlink);
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0,
+ if (!xfs_scrub_fblock_xref_process_error(sc, XFS_DATA_FORK, 0,
&error))
goto out_unlock;
if (nlink != expected_nlink)
@@ -205,7 +205,7 @@ xfs_scrub_parent_validate(
/* Go looking for our dentry. */
error = xfs_scrub_parent_count_parent_dentries(sc, dp, &nlink);
- if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
+ if (!xfs_scrub_fblock_xref_process_error(sc, XFS_DATA_FORK, 0, &error))
goto out_unlock;
/* Drop the parent lock, relock this inode. */
diff --git a/fs/xfs/scrub/quota.c b/fs/xfs/scrub/quota.c
index 3d9037eceaf1..51daa4ae2627 100644
--- a/fs/xfs/scrub/quota.c
+++ b/fs/xfs/scrub/quota.c
@@ -67,13 +67,6 @@ xfs_scrub_setup_quota(
{
uint dqtype;
- /*
- * If userspace gave us an AG number or inode data, they don't
- * know what they're doing. Get out.
- */
- if (sc->sm->sm_agno || sc->sm->sm_ino || sc->sm->sm_gen)
- return -EINVAL;
-
dqtype = xfs_scrub_quota_to_dqtype(sc);
if (dqtype == 0)
return -EINVAL;
diff --git a/fs/xfs/scrub/refcount.c b/fs/xfs/scrub/refcount.c
index 2f88a8d44bd0..400f1561cd3d 100644
--- a/fs/xfs/scrub/refcount.c
+++ b/fs/xfs/scrub/refcount.c
@@ -31,6 +31,7 @@
#include "xfs_sb.h"
#include "xfs_alloc.h"
#include "xfs_rmap.h"
+#include "xfs_refcount.h"
#include "scrub/xfs_scrub.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
@@ -50,6 +51,307 @@ xfs_scrub_setup_ag_refcountbt(
/* Reference count btree scrubber. */
+/*
+ * Confirming Reference Counts via Reverse Mappings
+ *
+ * We want to count the reverse mappings overlapping a refcount record
+ * (bno, len, refcount), allowing for the possibility that some of the
+ * overlap may come from smaller adjoining reverse mappings, while some
+ * comes from single extents which overlap the range entirely. The
+ * outer loop is as follows:
+ *
+ * 1. For all reverse mappings overlapping the refcount extent,
+ * a. If a given rmap completely overlaps, mark it as seen.
+ * b. Otherwise, record the fragment (in agbno order) for later
+ * processing.
+ *
+ * Once we've seen all the rmaps, we know that for all blocks in the
+ * refcount record we want to find $refcount owners and we've already
+ * visited $seen extents that overlap all the blocks. Therefore, we
+ * need to find ($refcount - $seen) owners for every block in the
+ * extent; call that quantity $target_nr. Proceed as follows:
+ *
+ * 2. Pull the first $target_nr fragments from the list; all of them
+ * should start at or before the start of the extent.
+ * Call this subset of fragments the working set.
+ * 3. Until there are no more unprocessed fragments,
+ * a. Find the shortest fragments in the set and remove them.
+ * b. Note the block number of the end of these fragments.
+ * c. Pull the same number of fragments from the list. All of these
+ * fragments should start at the block number recorded in the
+ * previous step.
+ * d. Put those fragments in the set.
+ * 4. Check that there are $target_nr fragments remaining in the list,
+ * and that they all end at or beyond the end of the refcount extent.
+ *
+ * If the refcount is correct, all the check conditions in the algorithm
+ * should always hold true. If not, the refcount is incorrect.
+ */
+struct xfs_scrub_refcnt_frag {
+ struct list_head list;
+ struct xfs_rmap_irec rm;
+};
+
+struct xfs_scrub_refcnt_check {
+ struct xfs_scrub_context *sc;
+ struct list_head fragments;
+
+ /* refcount extent we're examining */
+ xfs_agblock_t bno;
+ xfs_extlen_t len;
+ xfs_nlink_t refcount;
+
+ /* number of owners seen */
+ xfs_nlink_t seen;
+};
+
+/*
+ * Decide if the given rmap is large enough that we can redeem it
+ * towards refcount verification now, or if it's a fragment, in
+ * which case we'll hang onto it in the hopes that we'll later
+ * discover that we've collected exactly the correct number of
+ * fragments as the refcountbt says we should have.
+ */
+STATIC int
+xfs_scrub_refcountbt_rmap_check(
+ struct xfs_btree_cur *cur,
+ struct xfs_rmap_irec *rec,
+ void *priv)
+{
+ struct xfs_scrub_refcnt_check *refchk = priv;
+ struct xfs_scrub_refcnt_frag *frag;
+ xfs_agblock_t rm_last;
+ xfs_agblock_t rc_last;
+ int error = 0;
+
+ if (xfs_scrub_should_terminate(refchk->sc, &error))
+ return error;
+
+ rm_last = rec->rm_startblock + rec->rm_blockcount - 1;
+ rc_last = refchk->bno + refchk->len - 1;
+
+ /* Confirm that a single-owner refc extent is a CoW stage. */
+ if (refchk->refcount == 1 && rec->rm_owner != XFS_RMAP_OWN_COW) {
+ xfs_scrub_btree_xref_set_corrupt(refchk->sc, cur, 0);
+ return 0;
+ }
+
+ if (rec->rm_startblock <= refchk->bno && rm_last >= rc_last) {
+ /*
+ * The rmap overlaps the refcount record, so we can confirm
+ * one refcount owner seen.
+ */
+ refchk->seen++;
+ } else {
+ /*
+ * This rmap covers only part of the refcount record, so
+ * save the fragment for later processing. If the rmapbt
+ * is healthy each rmap_irec we see will be in agbno order
+ * so we don't need insertion sort here.
+ */
+ frag = kmem_alloc(sizeof(struct xfs_scrub_refcnt_frag),
+ KM_MAYFAIL | KM_NOFS);
+ if (!frag)
+ return -ENOMEM;
+ memcpy(&frag->rm, rec, sizeof(frag->rm));
+ list_add_tail(&frag->list, &refchk->fragments);
+ }
+
+ return 0;
+}
+
+/*
+ * Given a bunch of rmap fragments, iterate through them, keeping
+ * a running tally of the refcount. If this ever deviates from
+ * what we expect (which is the refcountbt's refcount minus the
+ * number of extents that totally covered the refcountbt extent),
+ * we have a refcountbt error.
+ */
+STATIC void
+xfs_scrub_refcountbt_process_rmap_fragments(
+ struct xfs_scrub_refcnt_check *refchk)
+{
+ struct list_head worklist;
+ struct xfs_scrub_refcnt_frag *frag;
+ struct xfs_scrub_refcnt_frag *n;
+ xfs_agblock_t bno;
+ xfs_agblock_t rbno;
+ xfs_agblock_t next_rbno;
+ xfs_nlink_t nr;
+ xfs_nlink_t target_nr;
+
+ target_nr = refchk->refcount - refchk->seen;
+ if (target_nr == 0)
+ return;
+
+ /*
+ * There are (refchk->rc.rc_refcount - refchk->nr refcount)
+ * references we haven't found yet. Pull that many off the
+ * fragment list and figure out where the smallest rmap ends
+ * (and therefore the next rmap should start). All the rmaps
+ * we pull off should start at or before the beginning of the
+ * refcount record's range.
+ */
+ INIT_LIST_HEAD(&worklist);
+ rbno = NULLAGBLOCK;
+ nr = 1;
+
+ /* Make sure the fragments actually /are/ in agbno order. */
+ bno = 0;
+ list_for_each_entry(frag, &refchk->fragments, list) {
+ if (frag->rm.rm_startblock < bno)
+ goto done;
+ bno = frag->rm.rm_startblock;
+ }
+
+ /*
+ * Find all the rmaps that start at or before the refc extent,
+ * and put them on the worklist.
+ */
+ list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
+ if (frag->rm.rm_startblock > refchk->bno)
+ goto done;
+ bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
+ if (bno < rbno)
+ rbno = bno;
+ list_move_tail(&frag->list, &worklist);
+ if (nr == target_nr)
+ break;
+ nr++;
+ }
+
+ /*
+ * We should have found exactly $target_nr rmap fragments starting
+ * at or before the refcount extent.
+ */
+ if (nr != target_nr)
+ goto done;
+
+ while (!list_empty(&refchk->fragments)) {
+ /* Discard any fragments ending at rbno from the worklist. */
+ nr = 0;
+ next_rbno = NULLAGBLOCK;
+ list_for_each_entry_safe(frag, n, &worklist, list) {
+ bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
+ if (bno != rbno) {
+ if (bno < next_rbno)
+ next_rbno = bno;
+ continue;
+ }
+ list_del(&frag->list);
+ kmem_free(frag);
+ nr++;
+ }
+
+ /* Try to add nr rmaps starting at rbno to the worklist. */
+ list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
+ bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
+ if (frag->rm.rm_startblock != rbno)
+ goto done;
+ list_move_tail(&frag->list, &worklist);
+ if (next_rbno > bno)
+ next_rbno = bno;
+ nr--;
+ if (nr == 0)
+ break;
+ }
+
+ /*
+ * If we get here and nr > 0, this means that we added fewer
+ * items to the worklist than we discarded because the fragment
+ * list ran out of items. Therefore, we cannot maintain the
+ * required refcount. Something is wrong, so we're done.
+ */
+ if (nr)
+ goto done;
+
+ rbno = next_rbno;
+ }
+
+ /*
+ * Make sure the last extent we processed ends at or beyond
+ * the end of the refcount extent.
+ */
+ if (rbno < refchk->bno + refchk->len)
+ goto done;
+
+ /* Actually record us having seen the remaining refcount. */
+ refchk->seen = refchk->refcount;
+done:
+ /* Delete fragments and work list. */
+ list_for_each_entry_safe(frag, n, &worklist, list) {
+ list_del(&frag->list);
+ kmem_free(frag);
+ }
+ list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
+ list_del(&frag->list);
+ kmem_free(frag);
+ }
+}
+
+/* Use the rmap entries covering this extent to verify the refcount. */
+STATIC void
+xfs_scrub_refcountbt_xref_rmap(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ xfs_nlink_t refcount)
+{
+ struct xfs_scrub_refcnt_check refchk = {
+ .sc = sc,
+ .bno = bno,
+ .len = len,
+ .refcount = refcount,
+ .seen = 0,
+ };
+ struct xfs_rmap_irec low;
+ struct xfs_rmap_irec high;
+ struct xfs_scrub_refcnt_frag *frag;
+ struct xfs_scrub_refcnt_frag *n;
+ int error;
+
+ if (!sc->sa.rmap_cur)
+ return;
+
+ /* Cross-reference with the rmapbt to confirm the refcount. */
+ memset(&low, 0, sizeof(low));
+ low.rm_startblock = bno;
+ memset(&high, 0xFF, sizeof(high));
+ high.rm_startblock = bno + len - 1;
+
+ INIT_LIST_HEAD(&refchk.fragments);
+ error = xfs_rmap_query_range(sc->sa.rmap_cur, &low, &high,
+ &xfs_scrub_refcountbt_rmap_check, &refchk);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ goto out_free;
+
+ xfs_scrub_refcountbt_process_rmap_fragments(&refchk);
+ if (refcount != refchk.seen)
+ xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
+
+out_free:
+ list_for_each_entry_safe(frag, n, &refchk.fragments, list) {
+ list_del(&frag->list);
+ kmem_free(frag);
+ }
+}
+
+/* Cross-reference with the other btrees. */
+STATIC void
+xfs_scrub_refcountbt_xref(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len,
+ xfs_nlink_t refcount)
+{
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return;
+
+ xfs_scrub_xref_is_used_space(sc, agbno, len);
+ xfs_scrub_xref_is_not_inode_chunk(sc, agbno, len);
+ xfs_scrub_refcountbt_xref_rmap(sc, agbno, len, refcount);
+}
+
/* Scrub a refcountbt record. */
STATIC int
xfs_scrub_refcountbt_rec(
@@ -57,6 +359,7 @@ xfs_scrub_refcountbt_rec(
union xfs_btree_rec *rec)
{
struct xfs_mount *mp = bs->cur->bc_mp;
+ xfs_agblock_t *cow_blocks = bs->private;
xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
xfs_agblock_t bno;
xfs_extlen_t len;
@@ -72,6 +375,8 @@ xfs_scrub_refcountbt_rec(
has_cowflag = (bno & XFS_REFC_COW_START);
if ((refcount == 1 && !has_cowflag) || (refcount != 1 && has_cowflag))
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ if (has_cowflag)
+ (*cow_blocks) += len;
/* Check the extent. */
bno &= ~XFS_REFC_COW_START;
@@ -83,17 +388,128 @@ xfs_scrub_refcountbt_rec(
if (refcount == 0)
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
+ xfs_scrub_refcountbt_xref(bs->sc, bno, len, refcount);
+
return error;
}
+/* Make sure we have as many refc blocks as the rmap says. */
+STATIC void
+xfs_scrub_refcount_xref_rmap(
+ struct xfs_scrub_context *sc,
+ struct xfs_owner_info *oinfo,
+ xfs_filblks_t cow_blocks)
+{
+ xfs_extlen_t refcbt_blocks = 0;
+ xfs_filblks_t blocks;
+ int error;
+
+ if (!sc->sa.rmap_cur)
+ return;
+
+ /* Check that we saw as many refcbt blocks as the rmap knows about. */
+ error = xfs_btree_count_blocks(sc->sa.refc_cur, &refcbt_blocks);
+ if (!xfs_scrub_btree_process_error(sc, sc->sa.refc_cur, 0, &error))
+ return;
+ error = xfs_scrub_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, oinfo,
+ &blocks);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ return;
+ if (blocks != refcbt_blocks)
+ xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
+
+ /* Check that we saw as many cow blocks as the rmap knows about. */
+ xfs_rmap_ag_owner(oinfo, XFS_RMAP_OWN_COW);
+ error = xfs_scrub_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, oinfo,
+ &blocks);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ return;
+ if (blocks != cow_blocks)
+ xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
+}
+
/* Scrub the refcount btree for some AG. */
int
xfs_scrub_refcountbt(
struct xfs_scrub_context *sc)
{
struct xfs_owner_info oinfo;
+ xfs_agblock_t cow_blocks = 0;
+ int error;
xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_REFC);
- return xfs_scrub_btree(sc, sc->sa.refc_cur, xfs_scrub_refcountbt_rec,
- &oinfo, NULL);
+ error = xfs_scrub_btree(sc, sc->sa.refc_cur, xfs_scrub_refcountbt_rec,
+ &oinfo, &cow_blocks);
+ if (error)
+ return error;
+
+ xfs_scrub_refcount_xref_rmap(sc, &oinfo, cow_blocks);
+
+ return 0;
+}
+
+/* xref check that a cow staging extent is marked in the refcountbt. */
+void
+xfs_scrub_xref_is_cow_staging(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len)
+{
+ struct xfs_refcount_irec rc;
+ bool has_cowflag;
+ int has_refcount;
+ int error;
+
+ if (!sc->sa.refc_cur)
+ return;
+
+ /* Find the CoW staging extent. */
+ error = xfs_refcount_lookup_le(sc->sa.refc_cur,
+ agbno + XFS_REFC_COW_START, &has_refcount);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
+ return;
+ if (!has_refcount) {
+ xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
+ return;
+ }
+
+ error = xfs_refcount_get_rec(sc->sa.refc_cur, &rc, &has_refcount);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
+ return;
+ if (!has_refcount) {
+ xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
+ return;
+ }
+
+ /* CoW flag must be set, refcount must be 1. */
+ has_cowflag = (rc.rc_startblock & XFS_REFC_COW_START);
+ if (!has_cowflag || rc.rc_refcount != 1)
+ xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
+
+ /* Must be at least as long as what was passed in */
+ if (rc.rc_blockcount < len)
+ xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
+}
+
+/*
+ * xref check that the extent is not shared. Only file data blocks
+ * can have multiple owners.
+ */
+void
+xfs_scrub_xref_is_not_shared(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len)
+{
+ bool shared;
+ int error;
+
+ if (!sc->sa.refc_cur)
+ return;
+
+ error = xfs_refcount_has_record(sc->sa.refc_cur, agbno, len, &shared);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
+ return;
+ if (shared)
+ xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
}
diff --git a/fs/xfs/scrub/rmap.c b/fs/xfs/scrub/rmap.c
index 97846c424690..8f2a7c3ff455 100644
--- a/fs/xfs/scrub/rmap.c
+++ b/fs/xfs/scrub/rmap.c
@@ -32,6 +32,7 @@
#include "xfs_alloc.h"
#include "xfs_ialloc.h"
#include "xfs_rmap.h"
+#include "xfs_refcount.h"
#include "scrub/xfs_scrub.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
@@ -51,6 +52,61 @@ xfs_scrub_setup_ag_rmapbt(
/* Reverse-mapping scrubber. */
+/* Cross-reference a rmap against the refcount btree. */
+STATIC void
+xfs_scrub_rmapbt_xref_refc(
+ struct xfs_scrub_context *sc,
+ struct xfs_rmap_irec *irec)
+{
+ xfs_agblock_t fbno;
+ xfs_extlen_t flen;
+ bool non_inode;
+ bool is_bmbt;
+ bool is_attr;
+ bool is_unwritten;
+ int error;
+
+ if (!sc->sa.refc_cur)
+ return;
+
+ non_inode = XFS_RMAP_NON_INODE_OWNER(irec->rm_owner);
+ is_bmbt = irec->rm_flags & XFS_RMAP_BMBT_BLOCK;
+ is_attr = irec->rm_flags & XFS_RMAP_ATTR_FORK;
+ is_unwritten = irec->rm_flags & XFS_RMAP_UNWRITTEN;
+
+ /* If this is shared, must be a data fork extent. */
+ error = xfs_refcount_find_shared(sc->sa.refc_cur, irec->rm_startblock,
+ irec->rm_blockcount, &fbno, &flen, false);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
+ return;
+ if (flen != 0 && (non_inode || is_attr || is_bmbt || is_unwritten))
+ xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
+}
+
+/* Cross-reference with the other btrees. */
+STATIC void
+xfs_scrub_rmapbt_xref(
+ struct xfs_scrub_context *sc,
+ struct xfs_rmap_irec *irec)
+{
+ xfs_agblock_t agbno = irec->rm_startblock;
+ xfs_extlen_t len = irec->rm_blockcount;
+
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
+ return;
+
+ xfs_scrub_xref_is_used_space(sc, agbno, len);
+ if (irec->rm_owner == XFS_RMAP_OWN_INODES)
+ xfs_scrub_xref_is_inode_chunk(sc, agbno, len);
+ else
+ xfs_scrub_xref_is_not_inode_chunk(sc, agbno, len);
+ if (irec->rm_owner == XFS_RMAP_OWN_COW)
+ xfs_scrub_xref_is_cow_staging(sc, irec->rm_startblock,
+ irec->rm_blockcount);
+ else
+ xfs_scrub_rmapbt_xref_refc(sc, irec);
+}
+
/* Scrub an rmapbt record. */
STATIC int
xfs_scrub_rmapbt_rec(
@@ -121,6 +177,8 @@ xfs_scrub_rmapbt_rec(
irec.rm_owner > XFS_RMAP_OWN_FS)
xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
}
+
+ xfs_scrub_rmapbt_xref(bs->sc, &irec);
out:
return error;
}
@@ -136,3 +194,68 @@ xfs_scrub_rmapbt(
return xfs_scrub_btree(sc, sc->sa.rmap_cur, xfs_scrub_rmapbt_rec,
&oinfo, NULL);
}
+
+/* xref check that the extent is owned by a given owner */
+static inline void
+xfs_scrub_xref_check_owner(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ struct xfs_owner_info *oinfo,
+ bool should_have_rmap)
+{
+ bool has_rmap;
+ int error;
+
+ if (!sc->sa.rmap_cur)
+ return;
+
+ error = xfs_rmap_record_exists(sc->sa.rmap_cur, bno, len, oinfo,
+ &has_rmap);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ return;
+ if (has_rmap != should_have_rmap)
+ xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
+}
+
+/* xref check that the extent is owned by a given owner */
+void
+xfs_scrub_xref_is_owned_by(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ struct xfs_owner_info *oinfo)
+{
+ xfs_scrub_xref_check_owner(sc, bno, len, oinfo, true);
+}
+
+/* xref check that the extent is not owned by a given owner */
+void
+xfs_scrub_xref_is_not_owned_by(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ struct xfs_owner_info *oinfo)
+{
+ xfs_scrub_xref_check_owner(sc, bno, len, oinfo, false);
+}
+
+/* xref check that the extent has no reverse mapping at all */
+void
+xfs_scrub_xref_has_no_owner(
+ struct xfs_scrub_context *sc,
+ xfs_agblock_t bno,
+ xfs_extlen_t len)
+{
+ bool has_rmap;
+ int error;
+
+ if (!sc->sa.rmap_cur)
+ return;
+
+ error = xfs_rmap_has_record(sc->sa.rmap_cur, bno, len, &has_rmap);
+ if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
+ return;
+ if (has_rmap)
+ xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
+}
diff --git a/fs/xfs/scrub/rtbitmap.c b/fs/xfs/scrub/rtbitmap.c
index c6fedb698008..26390991369a 100644
--- a/fs/xfs/scrub/rtbitmap.c
+++ b/fs/xfs/scrub/rtbitmap.c
@@ -43,22 +43,14 @@ xfs_scrub_setup_rt(
struct xfs_scrub_context *sc,
struct xfs_inode *ip)
{
- struct xfs_mount *mp = sc->mp;
- int error = 0;
-
- /*
- * If userspace gave us an AG number or inode data, they don't
- * know what they're doing. Get out.
- */
- if (sc->sm->sm_agno || sc->sm->sm_ino || sc->sm->sm_gen)
- return -EINVAL;
+ int error;
error = xfs_scrub_setup_fs(sc, ip);
if (error)
return error;
sc->ilock_flags = XFS_ILOCK_EXCL | XFS_ILOCK_RTBITMAP;
- sc->ip = mp->m_rbmip;
+ sc->ip = sc->mp->m_rbmip;
xfs_ilock(sc->ip, sc->ilock_flags);
return 0;
@@ -106,3 +98,26 @@ xfs_scrub_rtsummary(
/* XXX: implement this some day */
return -ENOENT;
}
+
+
+/* xref check that the extent is not free in the rtbitmap */
+void
+xfs_scrub_xref_is_used_rt_space(
+ struct xfs_scrub_context *sc,
+ xfs_rtblock_t fsbno,
+ xfs_extlen_t len)
+{
+ bool is_free;
+ int error;
+
+ xfs_ilock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
+ error = xfs_rtalloc_extent_is_free(sc->mp, sc->tp, fsbno, len,
+ &is_free);
+ if (!xfs_scrub_should_check_xref(sc, &error, NULL))
+ goto out_unlock;
+ if (is_free)
+ xfs_scrub_ino_xref_set_corrupt(sc, sc->mp->m_rbmip->i_ino,
+ NULL);
+out_unlock:
+ xfs_iunlock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
+}
diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c
index ab3aef2ae823..26c75967a072 100644
--- a/fs/xfs/scrub/scrub.c
+++ b/fs/xfs/scrub/scrub.c
@@ -110,6 +110,16 @@
* structure itself is corrupt, the CORRUPT flag will be set. If
* the metadata is correct but otherwise suboptimal, the PREEN flag
* will be set.
+ *
+ * We perform secondary validation of filesystem metadata by
+ * cross-referencing every record with all other available metadata.
+ * For example, for block mapping extents, we verify that there are no
+ * records in the free space and inode btrees corresponding to that
+ * space extent and that there is a corresponding entry in the reverse
+ * mapping btree. Inconsistent metadata is noted by setting the
+ * XCORRUPT flag; btree query function errors are noted by setting the
+ * XFAIL flag and deleting the cursor to prevent further attempts to
+ * cross-reference with a defective btree.
*/
/*
@@ -128,8 +138,6 @@ xfs_scrub_probe(
{
int error = 0;
- if (sc->sm->sm_ino || sc->sm->sm_agno)
- return -EINVAL;
if (xfs_scrub_should_terminate(sc, &error))
return error;
@@ -151,7 +159,8 @@ xfs_scrub_teardown(
sc->tp = NULL;
}
if (sc->ip) {
- xfs_iunlock(sc->ip, sc->ilock_flags);
+ if (sc->ilock_flags)
+ xfs_iunlock(sc->ip, sc->ilock_flags);
if (sc->ip != ip_in &&
!xfs_internal_inum(sc->mp, sc->ip->i_ino))
iput(VFS_I(sc->ip));
@@ -167,106 +176,130 @@ xfs_scrub_teardown(
/* Scrubbing dispatch. */
static const struct xfs_scrub_meta_ops meta_scrub_ops[] = {
- { /* ioctl presence test */
+ [XFS_SCRUB_TYPE_PROBE] = { /* ioctl presence test */
+ .type = ST_NONE,
.setup = xfs_scrub_setup_fs,
.scrub = xfs_scrub_probe,
},
- { /* superblock */
- .setup = xfs_scrub_setup_ag_header,
+ [XFS_SCRUB_TYPE_SB] = { /* superblock */
+ .type = ST_PERAG,
+ .setup = xfs_scrub_setup_fs,
.scrub = xfs_scrub_superblock,
},
- { /* agf */
- .setup = xfs_scrub_setup_ag_header,
+ [XFS_SCRUB_TYPE_AGF] = { /* agf */
+ .type = ST_PERAG,
+ .setup = xfs_scrub_setup_fs,
.scrub = xfs_scrub_agf,
},
- { /* agfl */
- .setup = xfs_scrub_setup_ag_header,
+ [XFS_SCRUB_TYPE_AGFL]= { /* agfl */
+ .type = ST_PERAG,
+ .setup = xfs_scrub_setup_fs,
.scrub = xfs_scrub_agfl,
},
- { /* agi */
- .setup = xfs_scrub_setup_ag_header,
+ [XFS_SCRUB_TYPE_AGI] = { /* agi */
+ .type = ST_PERAG,
+ .setup = xfs_scrub_setup_fs,
.scrub = xfs_scrub_agi,
},
- { /* bnobt */
+ [XFS_SCRUB_TYPE_BNOBT] = { /* bnobt */
+ .type = ST_PERAG,
.setup = xfs_scrub_setup_ag_allocbt,
.scrub = xfs_scrub_bnobt,
},
- { /* cntbt */
+ [XFS_SCRUB_TYPE_CNTBT] = { /* cntbt */
+ .type = ST_PERAG,
.setup = xfs_scrub_setup_ag_allocbt,
.scrub = xfs_scrub_cntbt,
},
- { /* inobt */
+ [XFS_SCRUB_TYPE_INOBT] = { /* inobt */
+ .type = ST_PERAG,
.setup = xfs_scrub_setup_ag_iallocbt,
.scrub = xfs_scrub_inobt,
},
- { /* finobt */
+ [XFS_SCRUB_TYPE_FINOBT] = { /* finobt */
+ .type = ST_PERAG,
.setup = xfs_scrub_setup_ag_iallocbt,
.scrub = xfs_scrub_finobt,
.has = xfs_sb_version_hasfinobt,
},
- { /* rmapbt */
+ [XFS_SCRUB_TYPE_RMAPBT] = { /* rmapbt */
+ .type = ST_PERAG,
.setup = xfs_scrub_setup_ag_rmapbt,
.scrub = xfs_scrub_rmapbt,
.has = xfs_sb_version_hasrmapbt,
},
- { /* refcountbt */
+ [XFS_SCRUB_TYPE_REFCNTBT] = { /* refcountbt */
+ .type = ST_PERAG,
.setup = xfs_scrub_setup_ag_refcountbt,
.scrub = xfs_scrub_refcountbt,
.has = xfs_sb_version_hasreflink,
},
- { /* inode record */
+ [XFS_SCRUB_TYPE_INODE] = { /* inode record */
+ .type = ST_INODE,
.setup = xfs_scrub_setup_inode,
.scrub = xfs_scrub_inode,
},
- { /* inode data fork */
+ [XFS_SCRUB_TYPE_BMBTD] = { /* inode data fork */
+ .type = ST_INODE,
.setup = xfs_scrub_setup_inode_bmap,
.scrub = xfs_scrub_bmap_data,
},
- { /* inode attr fork */
+ [XFS_SCRUB_TYPE_BMBTA] = { /* inode attr fork */
+ .type = ST_INODE,
.setup = xfs_scrub_setup_inode_bmap,
.scrub = xfs_scrub_bmap_attr,
},
- { /* inode CoW fork */
+ [XFS_SCRUB_TYPE_BMBTC] = { /* inode CoW fork */
+ .type = ST_INODE,
.setup = xfs_scrub_setup_inode_bmap,
.scrub = xfs_scrub_bmap_cow,
},
- { /* directory */
+ [XFS_SCRUB_TYPE_DIR] = { /* directory */
+ .type = ST_INODE,
.setup = xfs_scrub_setup_directory,
.scrub = xfs_scrub_directory,
},
- { /* extended attributes */
+ [XFS_SCRUB_TYPE_XATTR] = { /* extended attributes */
+ .type = ST_INODE,
.setup = xfs_scrub_setup_xattr,
.scrub = xfs_scrub_xattr,
},
- { /* symbolic link */
+ [XFS_SCRUB_TYPE_SYMLINK] = { /* symbolic link */
+ .type = ST_INODE,
.setup = xfs_scrub_setup_symlink,
.scrub = xfs_scrub_symlink,
},
- { /* parent pointers */
+ [XFS_SCRUB_TYPE_PARENT] = { /* parent pointers */
+ .type = ST_INODE,
.setup = xfs_scrub_setup_parent,
.scrub = xfs_scrub_parent,
},
- { /* realtime bitmap */
+ [XFS_SCRUB_TYPE_RTBITMAP] = { /* realtime bitmap */
+ .type = ST_FS,
.setup = xfs_scrub_setup_rt,
.scrub = xfs_scrub_rtbitmap,
.has = xfs_sb_version_hasrealtime,
},
- { /* realtime summary */
+ [XFS_SCRUB_TYPE_RTSUM] = { /* realtime summary */
+ .type = ST_FS,
.setup = xfs_scrub_setup_rt,
.scrub = xfs_scrub_rtsummary,
.has = xfs_sb_version_hasrealtime,
},
- { /* user quota */
- .setup = xfs_scrub_setup_quota,
- .scrub = xfs_scrub_quota,
+ [XFS_SCRUB_TYPE_UQUOTA] = { /* user quota */
+ .type = ST_FS,
+ .setup = xfs_scrub_setup_quota,
+ .scrub = xfs_scrub_quota,
},
- { /* group quota */
- .setup = xfs_scrub_setup_quota,
- .scrub = xfs_scrub_quota,
+ [XFS_SCRUB_TYPE_GQUOTA] = { /* group quota */
+ .type = ST_FS,
+ .setup = xfs_scrub_setup_quota,
+ .scrub = xfs_scrub_quota,
},
- { /* project quota */
- .setup = xfs_scrub_setup_quota,
- .scrub = xfs_scrub_quota,
+ [XFS_SCRUB_TYPE_PQUOTA] = { /* project quota */
+ .type = ST_FS,
+ .setup = xfs_scrub_setup_quota,
+ .scrub = xfs_scrub_quota,
},
};
@@ -284,44 +317,56 @@ xfs_scrub_experimental_warning(
"EXPERIMENTAL online scrub feature in use. Use at your own risk!");
}
-/* Dispatch metadata scrubbing. */
-int
-xfs_scrub_metadata(
- struct xfs_inode *ip,
+static int
+xfs_scrub_validate_inputs(
+ struct xfs_mount *mp,
struct xfs_scrub_metadata *sm)
{
- struct xfs_scrub_context sc;
- struct xfs_mount *mp = ip->i_mount;
+ int error;
const struct xfs_scrub_meta_ops *ops;
- bool try_harder = false;
- int error = 0;
-
- trace_xfs_scrub_start(ip, sm, error);
-
- /* Forbidden if we are shut down or mounted norecovery. */
- error = -ESHUTDOWN;
- if (XFS_FORCED_SHUTDOWN(mp))
- goto out;
- error = -ENOTRECOVERABLE;
- if (mp->m_flags & XFS_MOUNT_NORECOVERY)
- goto out;
- /* Check our inputs. */
error = -EINVAL;
+ /* Check our inputs. */
sm->sm_flags &= ~XFS_SCRUB_FLAGS_OUT;
if (sm->sm_flags & ~XFS_SCRUB_FLAGS_IN)
goto out;
+ /* sm_reserved[] must be zero */
if (memchr_inv(sm->sm_reserved, 0, sizeof(sm->sm_reserved)))
goto out;
- /* Do we know about this type of metadata? */
error = -ENOENT;
+ /* Do we know about this type of metadata? */
if (sm->sm_type >= XFS_SCRUB_TYPE_NR)
goto out;
ops = &meta_scrub_ops[sm->sm_type];
- if (ops->scrub == NULL)
+ if (ops->setup == NULL || ops->scrub == NULL)
goto out;
+ /* Does this fs even support this type of metadata? */
+ if (ops->has && !ops->has(&mp->m_sb))
+ goto out;
+
+ error = -EINVAL;
+ /* restricting fields must be appropriate for type */
+ switch (ops->type) {
+ case ST_NONE:
+ case ST_FS:
+ if (sm->sm_ino || sm->sm_gen || sm->sm_agno)
+ goto out;
+ break;
+ case ST_PERAG:
+ if (sm->sm_ino || sm->sm_gen ||
+ sm->sm_agno >= mp->m_sb.sb_agcount)
+ goto out;
+ break;
+ case ST_INODE:
+ if (sm->sm_agno || (sm->sm_gen && !sm->sm_ino))
+ goto out;
+ break;
+ default:
+ goto out;
+ }
+ error = -EOPNOTSUPP;
/*
* We won't scrub any filesystem that doesn't have the ability
* to record unwritten extents. The option was made default in
@@ -331,20 +376,46 @@ xfs_scrub_metadata(
* We also don't support v1-v3 filesystems, which aren't
* mountable.
*/
- error = -EOPNOTSUPP;
if (!xfs_sb_version_hasextflgbit(&mp->m_sb))
goto out;
- /* Does this fs even support this type of metadata? */
- error = -ENOENT;
- if (ops->has && !ops->has(&mp->m_sb))
- goto out;
-
/* We don't know how to repair anything yet. */
- error = -EOPNOTSUPP;
if (sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR)
goto out;
+ error = 0;
+out:
+ return error;
+}
+
+/* Dispatch metadata scrubbing. */
+int
+xfs_scrub_metadata(
+ struct xfs_inode *ip,
+ struct xfs_scrub_metadata *sm)
+{
+ struct xfs_scrub_context sc;
+ struct xfs_mount *mp = ip->i_mount;
+ bool try_harder = false;
+ int error = 0;
+
+ BUILD_BUG_ON(sizeof(meta_scrub_ops) !=
+ (sizeof(struct xfs_scrub_meta_ops) * XFS_SCRUB_TYPE_NR));
+
+ trace_xfs_scrub_start(ip, sm, error);
+
+ /* Forbidden if we are shut down or mounted norecovery. */
+ error = -ESHUTDOWN;
+ if (XFS_FORCED_SHUTDOWN(mp))
+ goto out;
+ error = -ENOTRECOVERABLE;
+ if (mp->m_flags & XFS_MOUNT_NORECOVERY)
+ goto out;
+
+ error = xfs_scrub_validate_inputs(mp, sm);
+ if (error)
+ goto out;
+
xfs_scrub_experimental_warning(mp);
retry_op:
@@ -352,7 +423,7 @@ retry_op:
memset(&sc, 0, sizeof(sc));
sc.mp = ip->i_mount;
sc.sm = sm;
- sc.ops = ops;
+ sc.ops = &meta_scrub_ops[sm->sm_type];
sc.try_harder = try_harder;
sc.sa.agno = NULLAGNUMBER;
error = sc.ops->setup(&sc, ip);
diff --git a/fs/xfs/scrub/scrub.h b/fs/xfs/scrub/scrub.h
index e9ec041cf713..0d92af86f67a 100644
--- a/fs/xfs/scrub/scrub.h
+++ b/fs/xfs/scrub/scrub.h
@@ -22,6 +22,14 @@
struct xfs_scrub_context;
+/* Type info and names for the scrub types. */
+enum xfs_scrub_type {
+ ST_NONE = 1, /* disabled */
+ ST_PERAG, /* per-AG metadata */
+ ST_FS, /* per-FS metadata */
+ ST_INODE, /* per-inode metadata */
+};
+
struct xfs_scrub_meta_ops {
/* Acquire whatever resources are needed for the operation. */
int (*setup)(struct xfs_scrub_context *,
@@ -32,6 +40,9 @@ struct xfs_scrub_meta_ops {
/* Decide if we even have this piece of metadata. */
bool (*has)(struct xfs_sb *);
+
+ /* type describing required/allowed inputs */
+ enum xfs_scrub_type type;
};
/* Buffer pointers and btree cursors for an entire AG. */
@@ -112,4 +123,30 @@ xfs_scrub_quota(struct xfs_scrub_context *sc)
}
#endif
+/* cross-referencing helpers */
+void xfs_scrub_xref_is_used_space(struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno, xfs_extlen_t len);
+void xfs_scrub_xref_is_not_inode_chunk(struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno, xfs_extlen_t len);
+void xfs_scrub_xref_is_inode_chunk(struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno, xfs_extlen_t len);
+void xfs_scrub_xref_is_owned_by(struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno, xfs_extlen_t len,
+ struct xfs_owner_info *oinfo);
+void xfs_scrub_xref_is_not_owned_by(struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno, xfs_extlen_t len,
+ struct xfs_owner_info *oinfo);
+void xfs_scrub_xref_has_no_owner(struct xfs_scrub_context *sc,
+ xfs_agblock_t agbno, xfs_extlen_t len);
+void xfs_scrub_xref_is_cow_staging(struct xfs_scrub_context *sc,
+ xfs_agblock_t bno, xfs_extlen_t len);
+void xfs_scrub_xref_is_not_shared(struct xfs_scrub_context *sc,
+ xfs_agblock_t bno, xfs_extlen_t len);
+#ifdef CONFIG_XFS_RT
+void xfs_scrub_xref_is_used_rt_space(struct xfs_scrub_context *sc,
+ xfs_rtblock_t rtbno, xfs_extlen_t len);
+#else
+# define xfs_scrub_xref_is_used_rt_space(sc, rtbno, len) do { } while (0)
+#endif
+
#endif /* __XFS_SCRUB_SCRUB_H__ */
diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h
index c4ebfb5c1ee8..4dc896852bf0 100644
--- a/fs/xfs/scrub/trace.h
+++ b/fs/xfs/scrub/trace.h
@@ -50,7 +50,7 @@ DECLARE_EVENT_CLASS(xfs_scrub_class,
__entry->flags = sm->sm_flags;
__entry->error = error;
),
- TP_printk("dev %d:%d ino %llu type %u agno %u inum %llu gen %u flags 0x%x error %d",
+ TP_printk("dev %d:%d ino 0x%llx type %u agno %u inum %llu gen %u flags 0x%x error %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->type,
@@ -90,7 +90,7 @@ TRACE_EVENT(xfs_scrub_op_error,
__entry->error = error;
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d type %u agno %u agbno %u error %d ret_ip %pF",
+ TP_printk("dev %d:%d type %u agno %u agbno %u error %d ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->type,
__entry->agno,
@@ -121,7 +121,7 @@ TRACE_EVENT(xfs_scrub_file_op_error,
__entry->error = error;
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d ino %llu fork %d type %u offset %llu error %d ret_ip %pF",
+ TP_printk("dev %d:%d ino 0x%llx fork %d type %u offset %llu error %d ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->whichfork,
@@ -156,7 +156,7 @@ DECLARE_EVENT_CLASS(xfs_scrub_block_error_class,
__entry->bno = bno;
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d type %u agno %u agbno %u ret_ip %pF",
+ TP_printk("dev %d:%d type %u agno %u agbno %u ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->type,
__entry->agno,
@@ -207,7 +207,7 @@ DECLARE_EVENT_CLASS(xfs_scrub_ino_error_class,
__entry->bno = bno;
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d ino %llu type %u agno %u agbno %u ret_ip %pF",
+ TP_printk("dev %d:%d ino 0x%llx type %u agno %u agbno %u ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->type,
@@ -246,7 +246,7 @@ DECLARE_EVENT_CLASS(xfs_scrub_fblock_error_class,
__entry->offset = offset;
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d ino %llu fork %d type %u offset %llu ret_ip %pF",
+ TP_printk("dev %d:%d ino 0x%llx fork %d type %u offset %llu ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->whichfork,
@@ -277,7 +277,7 @@ TRACE_EVENT(xfs_scrub_incomplete,
__entry->type = sc->sm->sm_type;
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d type %u ret_ip %pF",
+ TP_printk("dev %d:%d type %u ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->type,
__entry->ret_ip)
@@ -311,7 +311,7 @@ TRACE_EVENT(xfs_scrub_btree_op_error,
__entry->error = error;
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d type %u btnum %d level %d ptr %d agno %u agbno %u error %d ret_ip %pF",
+ TP_printk("dev %d:%d type %u btnum %d level %d ptr %d agno %u agbno %u error %d ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->type,
__entry->btnum,
@@ -354,7 +354,7 @@ TRACE_EVENT(xfs_scrub_ifork_btree_op_error,
__entry->error = error;
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d ino %llu fork %d type %u btnum %d level %d ptr %d agno %u agbno %u error %d ret_ip %pF",
+ TP_printk("dev %d:%d ino 0x%llx fork %d type %u btnum %d level %d ptr %d agno %u agbno %u error %d ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->whichfork,
@@ -393,7 +393,7 @@ TRACE_EVENT(xfs_scrub_btree_error,
__entry->ptr = cur->bc_ptrs[level];
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d type %u btnum %d level %d ptr %d agno %u agbno %u ret_ip %pF",
+ TP_printk("dev %d:%d type %u btnum %d level %d ptr %d agno %u agbno %u ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->type,
__entry->btnum,
@@ -433,7 +433,7 @@ TRACE_EVENT(xfs_scrub_ifork_btree_error,
__entry->ptr = cur->bc_ptrs[level];
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d ino %llu fork %d type %u btnum %d level %d ptr %d agno %u agbno %u ret_ip %pF",
+ TP_printk("dev %d:%d ino 0x%llx fork %d type %u btnum %d level %d ptr %d agno %u agbno %u ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->whichfork,
@@ -491,6 +491,28 @@ DEFINE_EVENT(xfs_scrub_sbtree_class, name, \
DEFINE_SCRUB_SBTREE_EVENT(xfs_scrub_btree_rec);
DEFINE_SCRUB_SBTREE_EVENT(xfs_scrub_btree_key);
+TRACE_EVENT(xfs_scrub_xref_error,
+ TP_PROTO(struct xfs_scrub_context *sc, int error, void *ret_ip),
+ TP_ARGS(sc, error, ret_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(int, type)
+ __field(int, error)
+ __field(void *, ret_ip)
+ ),
+ TP_fast_assign(
+ __entry->dev = sc->mp->m_super->s_dev;
+ __entry->type = sc->sm->sm_type;
+ __entry->error = error;
+ __entry->ret_ip = ret_ip;
+ ),
+ TP_printk("dev %d:%d type %u xref error %d ret_ip %pF",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->type,
+ __entry->error,
+ __entry->ret_ip)
+);
+
#endif /* _TRACE_XFS_SCRUB_TRACE_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 4fc526a27a94..9c6a830da0ee 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -390,6 +390,19 @@ xfs_map_blocks(
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
+ /*
+ * Truncate can race with writeback since writeback doesn't take the
+ * iolock and truncate decreases the file size before it starts
+ * truncating the pages between new_size and old_size. Therefore, we
+ * can end up in the situation where writeback gets a CoW fork mapping
+ * but the truncate makes the mapping invalid and we end up in here
+ * trying to get a new mapping. Bail out here so that we simply never
+ * get a valid mapping and so we drop the write altogether. The page
+ * truncation will kill the contents anyway.
+ */
+ if (type == XFS_IO_COW && offset > i_size_read(inode))
+ return 0;
+
ASSERT(type != XFS_IO_COW);
if (type == XFS_IO_UNWRITTEN)
bmapi_flags |= XFS_BMAPI_IGSTATE;
@@ -791,7 +804,7 @@ xfs_aops_discard_page(
goto out_invalidate;
xfs_alert(ip->i_mount,
- "page discard on page %p, inode 0x%llx, offset %llu.",
+ "page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
page, ip->i_ino, offset);
xfs_ilock(ip, XFS_ILOCK_EXCL);
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 6d37ab43195f..c83f549dc17b 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1872,7 +1872,7 @@ xfs_swap_extents(
*/
lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
lock_flags = XFS_MMAPLOCK_EXCL;
- xfs_lock_two_inodes(ip, tip, XFS_MMAPLOCK_EXCL);
+ xfs_lock_two_inodes(ip, XFS_MMAPLOCK_EXCL, tip, XFS_MMAPLOCK_EXCL);
/* Verify that both files have the same format */
if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
@@ -1919,7 +1919,7 @@ xfs_swap_extents(
* Lock and join the inodes to the tansaction so that transaction commit
* or cancel will unlock the inodes from this point onwards.
*/
- xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
+ xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
lock_flags |= XFS_ILOCK_EXCL;
xfs_trans_ijoin(tp, ip, 0);
xfs_trans_ijoin(tp, tip, 0);
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 4c6e86d861fd..d1da2ee9e6db 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -236,6 +236,7 @@ _xfs_buf_alloc(
init_completion(&bp->b_iowait);
INIT_LIST_HEAD(&bp->b_lru);
INIT_LIST_HEAD(&bp->b_list);
+ INIT_LIST_HEAD(&bp->b_li_list);
sema_init(&bp->b_sema, 0); /* held, no waiters */
spin_lock_init(&bp->b_lock);
XB_SET_OWNER(bp);
@@ -585,7 +586,7 @@ _xfs_buf_find(
* returning a specific error on buffer lookup failures.
*/
xfs_alert(btp->bt_mount,
- "%s: Block out of range: block 0x%llx, EOFS 0x%llx ",
+ "%s: daddr 0x%llx out of range, EOFS 0x%llx",
__func__, cmap.bm_bn, eofs);
WARN_ON(1);
return NULL;
@@ -1180,13 +1181,14 @@ xfs_buf_ioend_async(
}
void
-xfs_buf_ioerror(
+__xfs_buf_ioerror(
xfs_buf_t *bp,
- int error)
+ int error,
+ xfs_failaddr_t failaddr)
{
ASSERT(error <= 0 && error >= -1000);
bp->b_error = error;
- trace_xfs_buf_ioerror(bp, error, _RET_IP_);
+ trace_xfs_buf_ioerror(bp, error, failaddr);
}
void
@@ -1195,8 +1197,9 @@ xfs_buf_ioerror_alert(
const char *func)
{
xfs_alert(bp->b_target->bt_mount,
-"metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d",
- (uint64_t)XFS_BUF_ADDR(bp), func, -bp->b_error, bp->b_length);
+"metadata I/O error in \"%s\" at daddr 0x%llx len %d error %d",
+ func, (uint64_t)XFS_BUF_ADDR(bp), bp->b_length,
+ -bp->b_error);
}
int
@@ -1378,9 +1381,10 @@ _xfs_buf_ioapply(
*/
if (xfs_sb_version_hascrc(&mp->m_sb)) {
xfs_warn(mp,
- "%s: no ops on block 0x%llx/0x%x",
+ "%s: no buf ops on daddr 0x%llx len %d",
__func__, bp->b_bn, bp->b_length);
- xfs_hex_dump(bp->b_addr, 64);
+ xfs_hex_dump(bp->b_addr,
+ XFS_CORRUPTION_DUMP_LEN);
dump_stack();
}
}
@@ -1671,7 +1675,7 @@ xfs_wait_buftarg(
list_del_init(&bp->b_lru);
if (bp->b_flags & XBF_WRITE_FAIL) {
xfs_alert(btp->bt_mount,
-"Corruption Alert: Buffer at block 0x%llx had permanent write failures!",
+"Corruption Alert: Buffer at daddr 0x%llx had permanent write failures!",
(long long)bp->b_bn);
xfs_alert(btp->bt_mount,
"Please run xfs_repair to determine the extent of the problem.");
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index f873bb786824..2f4c91452861 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -140,6 +140,7 @@ struct xfs_buf_ops {
char *name;
void (*verify_read)(struct xfs_buf *);
void (*verify_write)(struct xfs_buf *);
+ xfs_failaddr_t (*verify_struct)(struct xfs_buf *bp);
};
typedef struct xfs_buf {
@@ -175,7 +176,8 @@ typedef struct xfs_buf {
struct workqueue_struct *b_ioend_wq; /* I/O completion wq */
xfs_buf_iodone_t b_iodone; /* I/O completion function */
struct completion b_iowait; /* queue for I/O waiters */
- void *b_fspriv;
+ void *b_log_item;
+ struct list_head b_li_list; /* Log items list head */
struct xfs_trans *b_transp;
struct page **b_pages; /* array of page pointers */
struct page *b_page_array[XB_PAGES]; /* inline pages */
@@ -315,7 +317,9 @@ extern void xfs_buf_unlock(xfs_buf_t *);
/* Buffer Read and Write Routines */
extern int xfs_bwrite(struct xfs_buf *bp);
extern void xfs_buf_ioend(struct xfs_buf *bp);
-extern void xfs_buf_ioerror(xfs_buf_t *, int);
+extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error,
+ xfs_failaddr_t failaddr);
+#define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address)
extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
extern void xfs_buf_submit(struct xfs_buf *bp);
extern int xfs_buf_submit_wait(struct xfs_buf *bp);
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index e0a0af0946f2..270ddb4d2313 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -61,14 +61,14 @@ xfs_buf_log_format_size(
*/
STATIC void
xfs_buf_item_size_segment(
- struct xfs_buf_log_item *bip,
- struct xfs_buf_log_format *blfp,
- int *nvecs,
- int *nbytes)
+ struct xfs_buf_log_item *bip,
+ struct xfs_buf_log_format *blfp,
+ int *nvecs,
+ int *nbytes)
{
- struct xfs_buf *bp = bip->bli_buf;
- int next_bit;
- int last_bit;
+ struct xfs_buf *bp = bip->bli_buf;
+ int next_bit;
+ int last_bit;
last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
if (last_bit == -1)
@@ -218,12 +218,12 @@ xfs_buf_item_format_segment(
uint offset,
struct xfs_buf_log_format *blfp)
{
- struct xfs_buf *bp = bip->bli_buf;
- uint base_size;
- int first_bit;
- int last_bit;
- int next_bit;
- uint nbits;
+ struct xfs_buf *bp = bip->bli_buf;
+ uint base_size;
+ int first_bit;
+ int last_bit;
+ int next_bit;
+ uint nbits;
/* copy the flags across from the base format item */
blfp->blf_flags = bip->__bli_format.blf_flags;
@@ -406,12 +406,12 @@ xfs_buf_item_unpin(
int remove)
{
struct xfs_buf_log_item *bip = BUF_ITEM(lip);
- xfs_buf_t *bp = bip->bli_buf;
- struct xfs_ail *ailp = lip->li_ailp;
- int stale = bip->bli_flags & XFS_BLI_STALE;
- int freed;
+ xfs_buf_t *bp = bip->bli_buf;
+ struct xfs_ail *ailp = lip->li_ailp;
+ int stale = bip->bli_flags & XFS_BLI_STALE;
+ int freed;
- ASSERT(bp->b_fspriv == bip);
+ ASSERT(bp->b_log_item == bip);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
trace_xfs_buf_item_unpin(bip);
@@ -456,13 +456,14 @@ xfs_buf_item_unpin(
*/
if (bip->bli_flags & XFS_BLI_STALE_INODE) {
xfs_buf_do_callbacks(bp);
- bp->b_fspriv = NULL;
+ bp->b_log_item = NULL;
+ list_del_init(&bp->b_li_list);
bp->b_iodone = NULL;
} else {
spin_lock(&ailp->xa_lock);
xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR);
xfs_buf_item_relse(bp);
- ASSERT(bp->b_fspriv == NULL);
+ ASSERT(bp->b_log_item == NULL);
}
xfs_buf_relse(bp);
} else if (freed && remove) {
@@ -722,18 +723,15 @@ xfs_buf_item_free_format(
/*
* Allocate a new buf log item to go with the given buffer.
- * Set the buffer's b_fsprivate field to point to the new
- * buf log item. If there are other item's attached to the
- * buffer (see xfs_buf_attach_iodone() below), then put the
- * buf log item at the front.
+ * Set the buffer's b_log_item field to point to the new
+ * buf log item.
*/
int
xfs_buf_item_init(
struct xfs_buf *bp,
struct xfs_mount *mp)
{
- struct xfs_log_item *lip = bp->b_fspriv;
- struct xfs_buf_log_item *bip;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
int chunks;
int map_size;
int error;
@@ -741,13 +739,14 @@ xfs_buf_item_init(
/*
* Check to see if there is already a buf log item for
- * this buffer. If there is, it is guaranteed to be
- * the first. If we do already have one, there is
+ * this buffer. If we do already have one, there is
* nothing to do here so return.
*/
ASSERT(bp->b_target->bt_mount == mp);
- if (lip != NULL && lip->li_type == XFS_LI_BUF)
+ if (bip != NULL) {
+ ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
return 0;
+ }
bip = kmem_zone_zalloc(xfs_buf_item_zone, KM_SLEEP);
xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
@@ -781,13 +780,7 @@ xfs_buf_item_init(
bip->bli_formats[i].blf_map_size = map_size;
}
- /*
- * Put the buf item into the list of items attached to the
- * buffer at the front.
- */
- if (bp->b_fspriv)
- bip->bli_item.li_bio_list = bp->b_fspriv;
- bp->b_fspriv = bip;
+ bp->b_log_item = bip;
xfs_buf_hold(bp);
return 0;
}
@@ -880,7 +873,7 @@ xfs_buf_item_log_segment(
*/
void
xfs_buf_item_log(
- xfs_buf_log_item_t *bip,
+ struct xfs_buf_log_item *bip,
uint first,
uint last)
{
@@ -943,7 +936,7 @@ xfs_buf_item_dirty_format(
STATIC void
xfs_buf_item_free(
- xfs_buf_log_item_t *bip)
+ struct xfs_buf_log_item *bip)
{
xfs_buf_item_free_format(bip);
kmem_free(bip->bli_item.li_lv_shadow);
@@ -961,13 +954,13 @@ void
xfs_buf_item_relse(
xfs_buf_t *bp)
{
- xfs_buf_log_item_t *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
trace_xfs_buf_item_relse(bp, _RET_IP_);
ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL));
- bp->b_fspriv = bip->bli_item.li_bio_list;
- if (bp->b_fspriv == NULL)
+ bp->b_log_item = NULL;
+ if (list_empty(&bp->b_li_list))
bp->b_iodone = NULL;
xfs_buf_rele(bp);
@@ -980,9 +973,7 @@ xfs_buf_item_relse(
* to be called when the buffer's I/O completes. If it is not set
* already, set the buffer's b_iodone() routine to be
* xfs_buf_iodone_callbacks() and link the log item into the list of
- * items rooted at b_fsprivate. Items are always added as the second
- * entry in the list if there is a first, because the buf item code
- * assumes that the buf log item is first.
+ * items rooted at b_li_list.
*/
void
xfs_buf_attach_iodone(
@@ -990,18 +981,10 @@ xfs_buf_attach_iodone(
void (*cb)(xfs_buf_t *, xfs_log_item_t *),
xfs_log_item_t *lip)
{
- xfs_log_item_t *head_lip;
-
ASSERT(xfs_buf_islocked(bp));
lip->li_cb = cb;
- head_lip = bp->b_fspriv;
- if (head_lip) {
- lip->li_bio_list = head_lip->li_bio_list;
- head_lip->li_bio_list = lip;
- } else {
- bp->b_fspriv = lip;
- }
+ list_add_tail(&lip->li_bio_list, &bp->b_li_list);
ASSERT(bp->b_iodone == NULL ||
bp->b_iodone == xfs_buf_iodone_callbacks);
@@ -1011,12 +994,12 @@ xfs_buf_attach_iodone(
/*
* We can have many callbacks on a buffer. Running the callbacks individually
* can cause a lot of contention on the AIL lock, so we allow for a single
- * callback to be able to scan the remaining lip->li_bio_list for other items
- * of the same type and callback to be processed in the first call.
+ * callback to be able to scan the remaining items in bp->b_li_list for other
+ * items of the same type and callback to be processed in the first call.
*
* As a result, the loop walking the callback list below will also modify the
* list. it removes the first item from the list and then runs the callback.
- * The loop then restarts from the new head of the list. This allows the
+ * The loop then restarts from the new first item int the list. This allows the
* callback to scan and modify the list attached to the buffer and we don't
* have to care about maintaining a next item pointer.
*/
@@ -1024,18 +1007,26 @@ STATIC void
xfs_buf_do_callbacks(
struct xfs_buf *bp)
{
+ struct xfs_buf_log_item *blip = bp->b_log_item;
struct xfs_log_item *lip;
- while ((lip = bp->b_fspriv) != NULL) {
- bp->b_fspriv = lip->li_bio_list;
- ASSERT(lip->li_cb != NULL);
+ /* If there is a buf_log_item attached, run its callback */
+ if (blip) {
+ lip = &blip->bli_item;
+ lip->li_cb(bp, lip);
+ }
+
+ while (!list_empty(&bp->b_li_list)) {
+ lip = list_first_entry(&bp->b_li_list, struct xfs_log_item,
+ li_bio_list);
+
/*
- * Clear the next pointer so we don't have any
+ * Remove the item from the list, so we don't have any
* confusion if the item is added to another buf.
* Don't touch the log item after calling its
* callback, because it could have freed itself.
*/
- lip->li_bio_list = NULL;
+ list_del_init(&lip->li_bio_list);
lip->li_cb(bp, lip);
}
}
@@ -1052,13 +1043,22 @@ STATIC void
xfs_buf_do_callbacks_fail(
struct xfs_buf *bp)
{
- struct xfs_log_item *next;
- struct xfs_log_item *lip = bp->b_fspriv;
- struct xfs_ail *ailp = lip->li_ailp;
+ struct xfs_log_item *lip;
+ struct xfs_ail *ailp;
+ /*
+ * Buffer log item errors are handled directly by xfs_buf_item_push()
+ * and xfs_buf_iodone_callback_error, and they have no IO error
+ * callbacks. Check only for items in b_li_list.
+ */
+ if (list_empty(&bp->b_li_list))
+ return;
+
+ lip = list_first_entry(&bp->b_li_list, struct xfs_log_item,
+ li_bio_list);
+ ailp = lip->li_ailp;
spin_lock(&ailp->xa_lock);
- for (; lip; lip = next) {
- next = lip->li_bio_list;
+ list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
if (lip->li_ops->iop_error)
lip->li_ops->iop_error(lip, bp);
}
@@ -1069,13 +1069,23 @@ static bool
xfs_buf_iodone_callback_error(
struct xfs_buf *bp)
{
- struct xfs_log_item *lip = bp->b_fspriv;
- struct xfs_mount *mp = lip->li_mountp;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
+ struct xfs_log_item *lip;
+ struct xfs_mount *mp;
static ulong lasttime;
static xfs_buftarg_t *lasttarg;
struct xfs_error_cfg *cfg;
/*
+ * The failed buffer might not have a buf_log_item attached or the
+ * log_item list might be empty. Get the mp from the available
+ * xfs_log_item
+ */
+ lip = list_first_entry_or_null(&bp->b_li_list, struct xfs_log_item,
+ li_bio_list);
+ mp = lip ? lip->li_mountp : bip->bli_item.li_mountp;
+
+ /*
* If we've already decided to shutdown the filesystem because of
* I/O errors, there's no point in giving this a retry.
*/
@@ -1183,7 +1193,8 @@ xfs_buf_iodone_callbacks(
bp->b_first_retry_time = 0;
xfs_buf_do_callbacks(bp);
- bp->b_fspriv = NULL;
+ bp->b_log_item = NULL;
+ list_del_init(&bp->b_li_list);
bp->b_iodone = NULL;
xfs_buf_ioend(bp);
}
@@ -1228,10 +1239,9 @@ xfs_buf_iodone(
bool
xfs_buf_resubmit_failed_buffers(
struct xfs_buf *bp,
- struct xfs_log_item *lip,
struct list_head *buffer_list)
{
- struct xfs_log_item *next;
+ struct xfs_log_item *lip;
/*
* Clear XFS_LI_FAILED flag from all items before resubmit
@@ -1239,10 +1249,8 @@ xfs_buf_resubmit_failed_buffers(
* XFS_LI_FAILED set/clear is protected by xa_lock, caller this
* function already have it acquired
*/
- for (; lip; lip = next) {
- next = lip->li_bio_list;
+ list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
xfs_clear_li_failed(lip);
- }
/* Add this buffer back to the delayed write list */
return xfs_buf_delwri_queue(bp, buffer_list);
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index 9690ce62c9a7..643f53dcfe51 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -50,7 +50,7 @@ struct xfs_buf_log_item;
* needed to log buffers. It tracks how many times the lock has been
* locked, and which 128 byte chunks of the buffer are dirty.
*/
-typedef struct xfs_buf_log_item {
+struct xfs_buf_log_item {
xfs_log_item_t bli_item; /* common item structure */
struct xfs_buf *bli_buf; /* real buffer pointer */
unsigned int bli_flags; /* misc flags */
@@ -59,11 +59,11 @@ typedef struct xfs_buf_log_item {
int bli_format_count; /* count of headers */
struct xfs_buf_log_format *bli_formats; /* array of in-log header ptrs */
struct xfs_buf_log_format __bli_format; /* embedded in-log header */
-} xfs_buf_log_item_t;
+};
int xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *);
void xfs_buf_item_relse(struct xfs_buf *);
-void xfs_buf_item_log(xfs_buf_log_item_t *, uint, uint);
+void xfs_buf_item_log(struct xfs_buf_log_item *, uint, uint);
bool xfs_buf_item_dirty_format(struct xfs_buf_log_item *);
void xfs_buf_attach_iodone(struct xfs_buf *,
void(*)(struct xfs_buf *, xfs_log_item_t *),
@@ -71,7 +71,6 @@ void xfs_buf_attach_iodone(struct xfs_buf *,
void xfs_buf_iodone_callbacks(struct xfs_buf *);
void xfs_buf_iodone(struct xfs_buf *, struct xfs_log_item *);
bool xfs_buf_resubmit_failed_buffers(struct xfs_buf *,
- struct xfs_log_item *,
struct list_head *);
extern kmem_zone_t *xfs_buf_item_zone;
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index 0c58918bc0ad..b6ae3597bfb0 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -152,7 +152,6 @@ xfs_dir2_block_getdents(
struct xfs_inode *dp = args->dp; /* incore directory inode */
xfs_dir2_data_hdr_t *hdr; /* block header */
struct xfs_buf *bp; /* buffer for block */
- xfs_dir2_block_tail_t *btp; /* block tail */
xfs_dir2_data_entry_t *dep; /* block data entry */
xfs_dir2_data_unused_t *dup; /* block unused entry */
char *endptr; /* end of the data entries */
@@ -185,9 +184,8 @@ xfs_dir2_block_getdents(
/*
* Set up values for the loop.
*/
- btp = xfs_dir2_block_tail_p(geo, hdr);
ptr = (char *)dp->d_ops->data_entry_p(hdr);
- endptr = (char *)xfs_dir2_block_leaf_p(btp);
+ endptr = xfs_dir3_data_endp(geo, hdr);
/*
* Loop over the data portion of the block.
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index f248708c10ff..43572f8a1b8e 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -399,52 +399,6 @@ error0:
return error;
}
-STATIC int
-xfs_qm_dqrepair(
- struct xfs_mount *mp,
- struct xfs_trans *tp,
- struct xfs_dquot *dqp,
- xfs_dqid_t firstid,
- struct xfs_buf **bpp)
-{
- int error;
- struct xfs_disk_dquot *ddq;
- struct xfs_dqblk *d;
- int i;
-
- /*
- * Read the buffer without verification so we get the corrupted
- * buffer returned to us. make sure we verify it on write, though.
- */
- error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, dqp->q_blkno,
- mp->m_quotainfo->qi_dqchunklen,
- 0, bpp, NULL);
-
- if (error) {
- ASSERT(*bpp == NULL);
- return error;
- }
- (*bpp)->b_ops = &xfs_dquot_buf_ops;
-
- ASSERT(xfs_buf_islocked(*bpp));
- d = (struct xfs_dqblk *)(*bpp)->b_addr;
-
- /* Do the actual repair of dquots in this buffer */
- for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++) {
- ddq = &d[i].dd_diskdq;
- error = xfs_dqcheck(mp, ddq, firstid + i,
- dqp->dq_flags & XFS_DQ_ALLTYPES,
- XFS_QMOPT_DQREPAIR, "xfs_qm_dqrepair");
- if (error) {
- /* repair failed, we're screwed */
- xfs_trans_brelse(tp, *bpp);
- return -EIO;
- }
- }
-
- return 0;
-}
-
/*
* Maps a dquot to the buffer containing its on-disk version.
* This returns a ptr to the buffer containing the on-disk dquot
@@ -526,14 +480,6 @@ xfs_qm_dqtobp(
dqp->q_blkno,
mp->m_quotainfo->qi_dqchunklen,
0, &bp, &xfs_dquot_buf_ops);
-
- if (error == -EFSCORRUPTED && (flags & XFS_QMOPT_DQREPAIR)) {
- xfs_dqid_t firstid = (xfs_dqid_t)map.br_startoff *
- mp->m_quotainfo->qi_dqperchunk;
- ASSERT(bp == NULL);
- error = xfs_qm_dqrepair(mp, tp, dqp, firstid, &bp);
- }
-
if (error) {
ASSERT(bp == NULL);
return error;
@@ -1010,6 +956,7 @@ xfs_qm_dqflush(
struct xfs_mount *mp = dqp->q_mount;
struct xfs_buf *bp;
struct xfs_disk_dquot *ddqp;
+ xfs_failaddr_t fa;
int error;
ASSERT(XFS_DQ_IS_LOCKED(dqp));
@@ -1056,9 +1003,10 @@ xfs_qm_dqflush(
/*
* A simple sanity check in case we got a corrupted dquot..
*/
- error = xfs_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
- XFS_QMOPT_DOWARN, "dqflush (incore copy)");
- if (error) {
+ fa = xfs_dquot_verify(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0, 0);
+ if (fa) {
+ xfs_alert(mp, "corrupt dquot ID 0x%x in memory at %pS",
+ be32_to_cpu(ddqp->d_id), fa);
xfs_buf_relse(bp);
xfs_dqfunlock(dqp);
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index 664dea105e76..96eaa6933709 100644
--- a/fs/xfs/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
@@ -150,10 +150,7 @@ xfs_dquot_item_error(
struct xfs_log_item *lip,
struct xfs_buf *bp)
{
- struct xfs_dquot *dqp;
-
- dqp = DQUOT_ITEM(lip)->qli_dquot;
- ASSERT(!completion_done(&dqp->q_flush));
+ ASSERT(!completion_done(&DQUOT_ITEM(lip)->qli_dquot->q_flush));
xfs_set_li_failed(lip, bp);
}
@@ -179,7 +176,7 @@ xfs_qm_dquot_logitem_push(
if (!xfs_buf_trylock(bp))
return XFS_ITEM_LOCKED;
- if (!xfs_buf_resubmit_failed_buffers(bp, lip, buffer_list))
+ if (!xfs_buf_resubmit_failed_buffers(bp, buffer_list))
rval = XFS_ITEM_FLUSHING;
xfs_buf_unlock(bp);
@@ -212,7 +209,7 @@ xfs_qm_dquot_logitem_push(
error = xfs_qm_dqflush(dqp, &bp);
if (error) {
- xfs_warn(dqp->q_mount, "%s: push error %d on dqp %p",
+ xfs_warn(dqp->q_mount, "%s: push error %d on dqp "PTR_FMT,
__func__, error, dqp);
} else {
if (!xfs_buf_delwri_queue(bp, buffer_list))
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index 4c9f35d983b2..ccf520f0b00d 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -24,6 +24,7 @@
#include "xfs_errortag.h"
#include "xfs_error.h"
#include "xfs_sysfs.h"
+#include "xfs_inode.h"
#ifdef DEBUG
@@ -314,12 +315,12 @@ xfs_error_report(
struct xfs_mount *mp,
const char *filename,
int linenum,
- void *ra)
+ xfs_failaddr_t failaddr)
{
if (level <= xfs_error_level) {
xfs_alert_tag(mp, XFS_PTAG_ERROR_REPORT,
"Internal error %s at line %d of file %s. Caller %pS",
- tag, linenum, filename, ra);
+ tag, linenum, filename, failaddr);
xfs_stack_trace();
}
@@ -333,11 +334,11 @@ xfs_corruption_error(
void *p,
const char *filename,
int linenum,
- void *ra)
+ xfs_failaddr_t failaddr)
{
if (level <= xfs_error_level)
- xfs_hex_dump(p, 64);
- xfs_error_report(tag, level, mp, filename, linenum, ra);
+ xfs_hex_dump(p, XFS_CORRUPTION_DUMP_LEN);
+ xfs_error_report(tag, level, mp, filename, linenum, failaddr);
xfs_alert(mp, "Corruption detected. Unmount and run xfs_repair");
}
@@ -347,19 +348,62 @@ xfs_corruption_error(
*/
void
xfs_verifier_error(
- struct xfs_buf *bp)
+ struct xfs_buf *bp,
+ int error,
+ xfs_failaddr_t failaddr)
{
- struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ xfs_failaddr_t fa;
+
+ fa = failaddr ? failaddr : __return_address;
+ __xfs_buf_ioerror(bp, error, fa);
xfs_alert(mp, "Metadata %s detected at %pS, %s block 0x%llx",
bp->b_error == -EFSBADCRC ? "CRC error" : "corruption",
- __return_address, bp->b_ops->name, bp->b_bn);
+ fa, bp->b_ops->name, bp->b_bn);
xfs_alert(mp, "Unmount and run xfs_repair");
if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
- xfs_alert(mp, "First 64 bytes of corrupted metadata buffer:");
- xfs_hex_dump(xfs_buf_offset(bp, 0), 64);
+ xfs_alert(mp, "First %d bytes of corrupted metadata buffer:",
+ XFS_CORRUPTION_DUMP_LEN);
+ xfs_hex_dump(xfs_buf_offset(bp, 0), XFS_CORRUPTION_DUMP_LEN);
+ }
+
+ if (xfs_error_level >= XFS_ERRLEVEL_HIGH)
+ xfs_stack_trace();
+}
+
+/*
+ * Warnings for inode corruption problems. Don't bother with the stack
+ * trace unless the error level is turned up high.
+ */
+void
+xfs_inode_verifier_error(
+ struct xfs_inode *ip,
+ int error,
+ const char *name,
+ void *buf,
+ size_t bufsz,
+ xfs_failaddr_t failaddr)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ xfs_failaddr_t fa;
+ int sz;
+
+ fa = failaddr ? failaddr : __return_address;
+
+ xfs_alert(mp, "Metadata %s detected at %pS, inode 0x%llx %s",
+ error == -EFSBADCRC ? "CRC error" : "corruption",
+ fa, ip->i_ino, name);
+
+ xfs_alert(mp, "Unmount and run xfs_repair");
+
+ if (buf && xfs_error_level >= XFS_ERRLEVEL_LOW) {
+ sz = min_t(size_t, XFS_CORRUPTION_DUMP_LEN, bufsz);
+ xfs_alert(mp, "First %d bytes of corrupted metadata buffer:",
+ sz);
+ xfs_hex_dump(buf, sz);
}
if (xfs_error_level >= XFS_ERRLEVEL_HIGH)
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h
index ea816c1bf8db..7e728c5a46b8 100644
--- a/fs/xfs/xfs_error.h
+++ b/fs/xfs/xfs_error.h
@@ -21,11 +21,16 @@
struct xfs_mount;
extern void xfs_error_report(const char *tag, int level, struct xfs_mount *mp,
- const char *filename, int linenum, void *ra);
+ const char *filename, int linenum,
+ xfs_failaddr_t failaddr);
extern void xfs_corruption_error(const char *tag, int level,
struct xfs_mount *mp, void *p, const char *filename,
- int linenum, void *ra);
-extern void xfs_verifier_error(struct xfs_buf *bp);
+ int linenum, xfs_failaddr_t failaddr);
+extern void xfs_verifier_error(struct xfs_buf *bp, int error,
+ xfs_failaddr_t failaddr);
+extern void xfs_inode_verifier_error(struct xfs_inode *ip, int error,
+ const char *name, void *buf, size_t bufsz,
+ xfs_failaddr_t failaddr);
#define XFS_ERROR_REPORT(e, lvl, mp) \
xfs_error_report(e, lvl, mp, __FILE__, __LINE__, __return_address)
@@ -37,6 +42,9 @@ extern void xfs_verifier_error(struct xfs_buf *bp);
#define XFS_ERRLEVEL_LOW 1
#define XFS_ERRLEVEL_HIGH 5
+/* Dump 128 bytes of any corrupt buffer */
+#define XFS_CORRUPTION_DUMP_LEN (128)
+
/*
* Macros to set EFSCORRUPTED & return/branch.
*/
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 60a2e128cb6a..8b4545623e25 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -49,83 +49,6 @@
* File system operations
*/
-int
-xfs_fs_geometry(
- xfs_mount_t *mp,
- xfs_fsop_geom_t *geo,
- int new_version)
-{
-
- memset(geo, 0, sizeof(*geo));
-
- geo->blocksize = mp->m_sb.sb_blocksize;
- geo->rtextsize = mp->m_sb.sb_rextsize;
- geo->agblocks = mp->m_sb.sb_agblocks;
- geo->agcount = mp->m_sb.sb_agcount;
- geo->logblocks = mp->m_sb.sb_logblocks;
- geo->sectsize = mp->m_sb.sb_sectsize;
- geo->inodesize = mp->m_sb.sb_inodesize;
- geo->imaxpct = mp->m_sb.sb_imax_pct;
- geo->datablocks = mp->m_sb.sb_dblocks;
- geo->rtblocks = mp->m_sb.sb_rblocks;
- geo->rtextents = mp->m_sb.sb_rextents;
- geo->logstart = mp->m_sb.sb_logstart;
- ASSERT(sizeof(geo->uuid)==sizeof(mp->m_sb.sb_uuid));
- memcpy(geo->uuid, &mp->m_sb.sb_uuid, sizeof(mp->m_sb.sb_uuid));
- if (new_version >= 2) {
- geo->sunit = mp->m_sb.sb_unit;
- geo->swidth = mp->m_sb.sb_width;
- }
- if (new_version >= 3) {
- geo->version = XFS_FSOP_GEOM_VERSION;
- geo->flags = XFS_FSOP_GEOM_FLAGS_NLINK |
- XFS_FSOP_GEOM_FLAGS_DIRV2 |
- (xfs_sb_version_hasattr(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_ATTR : 0) |
- (xfs_sb_version_hasquota(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_QUOTA : 0) |
- (xfs_sb_version_hasalign(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_IALIGN : 0) |
- (xfs_sb_version_hasdalign(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_DALIGN : 0) |
- (xfs_sb_version_hasextflgbit(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_EXTFLG : 0) |
- (xfs_sb_version_hassector(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_SECTOR : 0) |
- (xfs_sb_version_hasasciici(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_DIRV2CI : 0) |
- (xfs_sb_version_haslazysbcount(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_LAZYSB : 0) |
- (xfs_sb_version_hasattr2(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_ATTR2 : 0) |
- (xfs_sb_version_hasprojid32bit(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_PROJID32 : 0) |
- (xfs_sb_version_hascrc(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_V5SB : 0) |
- (xfs_sb_version_hasftype(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_FTYPE : 0) |
- (xfs_sb_version_hasfinobt(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_FINOBT : 0) |
- (xfs_sb_version_hassparseinodes(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_SPINODES : 0) |
- (xfs_sb_version_hasrmapbt(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_RMAPBT : 0) |
- (xfs_sb_version_hasreflink(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_REFLINK : 0);
- geo->logsectsize = xfs_sb_version_hassector(&mp->m_sb) ?
- mp->m_sb.sb_logsectsize : BBSIZE;
- geo->rtsectsize = mp->m_sb.sb_blocksize;
- geo->dirblocksize = mp->m_dir_geo->blksize;
- }
- if (new_version >= 4) {
- geo->flags |=
- (xfs_sb_version_haslogv2(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_LOGV2 : 0);
- geo->logsunit = mp->m_sb.sb_logsunit;
- }
- return 0;
-}
-
static struct xfs_buf *
xfs_growfs_get_hdr_buf(
struct xfs_mount *mp,
@@ -955,7 +878,7 @@ xfs_do_force_shutdown(
if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
xfs_notice(mp,
- "%s(0x%x) called from line %d of file %s. Return address = 0x%p",
+ "%s(0x%x) called from line %d of file %s. Return address = "PTR_FMT,
__func__, flags, lnnum, fname, __return_address);
}
/*
diff --git a/fs/xfs/xfs_fsops.h b/fs/xfs/xfs_fsops.h
index 2954c13a3acd..20484ed5e919 100644
--- a/fs/xfs/xfs_fsops.h
+++ b/fs/xfs/xfs_fsops.h
@@ -18,7 +18,6 @@
#ifndef __XFS_FSOPS_H__
#define __XFS_FSOPS_H__
-extern int xfs_fs_geometry(xfs_mount_t *mp, xfs_fsop_geom_t *geo, int nversion);
extern int xfs_growfs_data(xfs_mount_t *mp, xfs_growfs_data_t *in);
extern int xfs_growfs_log(xfs_mount_t *mp, xfs_growfs_log_t *in);
extern int xfs_fs_counts(xfs_mount_t *mp, xfs_fsop_counts_t *cnt);
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 3861d61fb265..d53a316162d6 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -37,6 +37,7 @@
#include <linux/kthread.h>
#include <linux/freezer.h>
+#include <linux/iversion.h>
/*
* Allocate and initialise an xfs_inode.
@@ -293,15 +294,17 @@ xfs_reinit_inode(
int error;
uint32_t nlink = inode->i_nlink;
uint32_t generation = inode->i_generation;
- uint64_t version = inode->i_version;
+ uint64_t version = inode_peek_iversion(inode);
umode_t mode = inode->i_mode;
+ dev_t dev = inode->i_rdev;
error = inode_init_always(mp->m_super, inode);
set_nlink(inode, nlink);
inode->i_generation = generation;
- inode->i_version = version;
+ inode_set_iversion_queried(inode, version);
inode->i_mode = mode;
+ inode->i_rdev = dev;
return error;
}
@@ -473,6 +476,11 @@ xfs_iget_cache_miss(
if (error)
goto out_destroy;
+ if (!xfs_inode_verify_forks(ip)) {
+ error = -EFSCORRUPTED;
+ goto out_destroy;
+ }
+
trace_xfs_iget_miss(ip);
if ((VFS_I(ip)->i_mode == 0) && !(flags & XFS_IGET_CREATE)) {
@@ -1650,28 +1658,15 @@ xfs_inode_clear_eofblocks_tag(
}
/*
- * Automatic CoW Reservation Freeing
- *
- * These functions automatically garbage collect leftover CoW reservations
- * that were made on behalf of a cowextsize hint when we start to run out
- * of quota or when the reservations sit around for too long. If the file
- * has dirty pages or is undergoing writeback, its CoW reservations will
- * be retained.
- *
- * The actual garbage collection piggybacks off the same code that runs
- * the speculative EOF preallocation garbage collector.
+ * Set ourselves up to free CoW blocks from this file. If it's already clean
+ * then we can bail out quickly, but otherwise we must back off if the file
+ * is undergoing some kind of write.
*/
-STATIC int
-xfs_inode_free_cowblocks(
+static bool
+xfs_prep_free_cowblocks(
struct xfs_inode *ip,
- int flags,
- void *args)
+ struct xfs_ifork *ifp)
{
- int ret;
- struct xfs_eofblocks *eofb = args;
- int match;
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
-
/*
* Just clear the tag if we have an empty cow fork or none at all. It's
* possible the inode was fully unshared since it was originally tagged.
@@ -1679,7 +1674,7 @@ xfs_inode_free_cowblocks(
if (!xfs_is_reflink_inode(ip) || !ifp->if_bytes) {
trace_xfs_inode_free_cowblocks_invalid(ip);
xfs_inode_clear_cowblocks_tag(ip);
- return 0;
+ return false;
}
/*
@@ -1690,6 +1685,35 @@ xfs_inode_free_cowblocks(
mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
atomic_read(&VFS_I(ip)->i_dio_count))
+ return false;
+
+ return true;
+}
+
+/*
+ * Automatic CoW Reservation Freeing
+ *
+ * These functions automatically garbage collect leftover CoW reservations
+ * that were made on behalf of a cowextsize hint when we start to run out
+ * of quota or when the reservations sit around for too long. If the file
+ * has dirty pages or is undergoing writeback, its CoW reservations will
+ * be retained.
+ *
+ * The actual garbage collection piggybacks off the same code that runs
+ * the speculative EOF preallocation garbage collector.
+ */
+STATIC int
+xfs_inode_free_cowblocks(
+ struct xfs_inode *ip,
+ int flags,
+ void *args)
+{
+ struct xfs_eofblocks *eofb = args;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
+ int match;
+ int ret = 0;
+
+ if (!xfs_prep_free_cowblocks(ip, ifp))
return 0;
if (eofb) {
@@ -1710,7 +1734,12 @@ xfs_inode_free_cowblocks(
xfs_ilock(ip, XFS_IOLOCK_EXCL);
xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
- ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
+ /*
+ * Check again, nobody else should be able to dirty blocks or change
+ * the reflink iflag now that we have the first two locks held.
+ */
+ if (xfs_prep_free_cowblocks(ip, ifp))
+ ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 6f95bdb408ce..604ee384a00a 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -16,6 +16,7 @@
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/log2.h>
+#include <linux/iversion.h>
#include "xfs.h"
#include "xfs_fs.h"
@@ -546,23 +547,36 @@ again:
/*
* xfs_lock_two_inodes() can only be used to lock one type of lock at a time -
- * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
- * lock more than one at a time, lockdep will report false positives saying we
- * have violated locking orders.
+ * the mmaplock or the ilock, but not more than one type at a time. If we lock
+ * more than one at a time, lockdep will report false positives saying we have
+ * violated locking orders. The iolock must be double-locked separately since
+ * we use i_rwsem for that. We now support taking one lock EXCL and the other
+ * SHARED.
*/
void
xfs_lock_two_inodes(
- xfs_inode_t *ip0,
- xfs_inode_t *ip1,
- uint lock_mode)
+ struct xfs_inode *ip0,
+ uint ip0_mode,
+ struct xfs_inode *ip1,
+ uint ip1_mode)
{
- xfs_inode_t *temp;
+ struct xfs_inode *temp;
+ uint mode_temp;
int attempts = 0;
xfs_log_item_t *lp;
- ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
- if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL))
- ASSERT(!(lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
+ ASSERT(hweight32(ip0_mode) == 1);
+ ASSERT(hweight32(ip1_mode) == 1);
+ ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
+ ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
+ ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
+ !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
+ ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
+ !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
+ ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
+ !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
+ ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
+ !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
ASSERT(ip0->i_ino != ip1->i_ino);
@@ -570,10 +584,13 @@ xfs_lock_two_inodes(
temp = ip0;
ip0 = ip1;
ip1 = temp;
+ mode_temp = ip0_mode;
+ ip0_mode = ip1_mode;
+ ip1_mode = mode_temp;
}
again:
- xfs_ilock(ip0, xfs_lock_inumorder(lock_mode, 0));
+ xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
/*
* If the first lock we have locked is in the AIL, we must TRY to get
@@ -582,18 +599,17 @@ xfs_lock_two_inodes(
*/
lp = (xfs_log_item_t *)ip0->i_itemp;
if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
- if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(lock_mode, 1))) {
- xfs_iunlock(ip0, lock_mode);
+ if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
+ xfs_iunlock(ip0, ip0_mode);
if ((++attempts % 5) == 0)
delay(1); /* Don't just spin the CPU */
goto again;
}
} else {
- xfs_ilock(ip1, xfs_lock_inumorder(lock_mode, 1));
+ xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
}
}
-
void
__xfs_iflock(
struct xfs_inode *ip)
@@ -832,7 +848,7 @@ xfs_ialloc(
ip->i_d.di_flags = 0;
if (ip->i_d.di_version == 3) {
- inode->i_version = 1;
+ inode_set_iversion(inode, 1);
ip->i_d.di_flags2 = 0;
ip->i_d.di_cowextsize = 0;
ip->i_d.di_crtime.t_sec = (int32_t)tv.tv_sec;
@@ -1421,7 +1437,7 @@ xfs_link(
if (error)
goto std_return;
- xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL);
+ xfs_lock_two_inodes(sip, XFS_ILOCK_EXCL, tdp, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
@@ -2214,7 +2230,7 @@ xfs_ifree_cluster(
xfs_buf_t *bp;
xfs_inode_t *ip;
xfs_inode_log_item_t *iip;
- xfs_log_item_t *lip;
+ struct xfs_log_item *lip;
struct xfs_perag *pag;
xfs_ino_t inum;
@@ -2272,8 +2288,7 @@ xfs_ifree_cluster(
* stale first, we will not attempt to lock them in the loop
* below as the XFS_ISTALE flag will be set.
*/
- lip = bp->b_fspriv;
- while (lip) {
+ list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
if (lip->li_type == XFS_LI_INODE) {
iip = (xfs_inode_log_item_t *)lip;
ASSERT(iip->ili_logged == 1);
@@ -2283,7 +2298,6 @@ xfs_ifree_cluster(
&iip->ili_item.li_lsn);
xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
}
- lip = lip->li_bio_list;
}
@@ -2451,6 +2465,7 @@ xfs_ifree(
VFS_I(ip)->i_mode = 0; /* mark incore inode as free */
ip->i_d.di_flags = 0;
+ ip->i_d.di_flags2 = 0;
ip->i_d.di_dmevmask = 0;
ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
@@ -2586,7 +2601,7 @@ xfs_remove(
goto std_return;
}
- xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL);
+ xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
@@ -3479,6 +3494,36 @@ abort_out:
return error;
}
+/*
+ * If there are inline format data / attr forks attached to this inode,
+ * make sure they're not corrupt.
+ */
+bool
+xfs_inode_verify_forks(
+ struct xfs_inode *ip)
+{
+ struct xfs_ifork *ifp;
+ xfs_failaddr_t fa;
+
+ fa = xfs_ifork_verify_data(ip, &xfs_default_ifork_ops);
+ if (fa) {
+ ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED, "data fork",
+ ifp->if_u1.if_data, ifp->if_bytes, fa);
+ return false;
+ }
+
+ fa = xfs_ifork_verify_attr(ip, &xfs_default_ifork_ops);
+ if (fa) {
+ ifp = XFS_IFORK_PTR(ip, XFS_ATTR_FORK);
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED, "attr fork",
+ ifp ? ifp->if_u1.if_data : NULL,
+ ifp ? ifp->if_bytes : 0, fa);
+ return false;
+ }
+ return true;
+}
+
STATIC int
xfs_iflush_int(
struct xfs_inode *ip,
@@ -3501,7 +3546,7 @@ xfs_iflush_int(
if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
mp, XFS_ERRTAG_IFLUSH_1)) {
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
- "%s: Bad inode %Lu magic number 0x%x, ptr 0x%p",
+ "%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT,
__func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
goto corrupt_out;
}
@@ -3511,7 +3556,7 @@ xfs_iflush_int(
(ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
mp, XFS_ERRTAG_IFLUSH_3)) {
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
- "%s: Bad regular inode %Lu, ptr 0x%p",
+ "%s: Bad regular inode %Lu, ptr "PTR_FMT,
__func__, ip->i_ino, ip);
goto corrupt_out;
}
@@ -3522,7 +3567,7 @@ xfs_iflush_int(
(ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
mp, XFS_ERRTAG_IFLUSH_4)) {
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
- "%s: Bad directory inode %Lu, ptr 0x%p",
+ "%s: Bad directory inode %Lu, ptr "PTR_FMT,
__func__, ip->i_ino, ip);
goto corrupt_out;
}
@@ -3531,7 +3576,7 @@ xfs_iflush_int(
ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
"%s: detected corrupt incore inode %Lu, "
- "total extents = %d, nblocks = %Ld, ptr 0x%p",
+ "total extents = %d, nblocks = %Ld, ptr "PTR_FMT,
__func__, ip->i_ino,
ip->i_d.di_nextents + ip->i_d.di_anextents,
ip->i_d.di_nblocks, ip);
@@ -3540,7 +3585,7 @@ xfs_iflush_int(
if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
mp, XFS_ERRTAG_IFLUSH_6)) {
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
- "%s: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
+ "%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT,
__func__, ip->i_ino, ip->i_d.di_forkoff, ip);
goto corrupt_out;
}
@@ -3557,10 +3602,8 @@ xfs_iflush_int(
if (ip->i_d.di_version < 3)
ip->i_d.di_flushiter++;
- /* Check the inline directory data. */
- if (S_ISDIR(VFS_I(ip)->i_mode) &&
- ip->i_d.di_format == XFS_DINODE_FMT_LOCAL &&
- xfs_dir2_sf_verify(ip))
+ /* Check the inline fork data before we write out. */
+ if (!xfs_inode_verify_forks(ip))
goto corrupt_out;
/*
@@ -3623,7 +3666,7 @@ xfs_iflush_int(
/* generate the checksum. */
xfs_dinode_calc_crc(mp, dip);
- ASSERT(bp->b_fspriv != NULL);
+ ASSERT(!list_empty(&bp->b_li_list));
ASSERT(bp->b_iodone != NULL);
return 0;
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index d383e392ec9d..3e8dc990d41c 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -423,7 +423,8 @@ void xfs_iunpin_wait(xfs_inode_t *);
#define xfs_ipincount(ip) ((unsigned int) atomic_read(&ip->i_pincount))
int xfs_iflush(struct xfs_inode *, struct xfs_buf **);
-void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint);
+void xfs_lock_two_inodes(struct xfs_inode *ip0, uint ip0_mode,
+ struct xfs_inode *ip1, uint ip1_mode);
xfs_extlen_t xfs_get_extsz_hint(struct xfs_inode *ip);
xfs_extlen_t xfs_get_cowextsz_hint(struct xfs_inode *ip);
@@ -491,4 +492,6 @@ extern struct kmem_zone *xfs_inode_zone;
/* The default CoW extent size hint. */
#define XFS_DEFAULT_COWEXTSZ_HINT 32
+bool xfs_inode_verify_forks(struct xfs_inode *ip);
+
#endif /* __XFS_INODE_H__ */
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 6ee5c3bf19ad..d5037f060d6f 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -30,6 +30,7 @@
#include "xfs_buf_item.h"
#include "xfs_log.h"
+#include <linux/iversion.h>
kmem_zone_t *xfs_ili_zone; /* inode log item zone */
@@ -354,7 +355,7 @@ xfs_inode_to_log_dinode(
to->di_next_unlinked = NULLAGINO;
if (from->di_version == 3) {
- to->di_changecount = inode->i_version;
+ to->di_changecount = inode_peek_iversion(inode);
to->di_crtime.t_sec = from->di_crtime.t_sec;
to->di_crtime.t_nsec = from->di_crtime.t_nsec;
to->di_flags2 = from->di_flags2;
@@ -521,7 +522,7 @@ xfs_inode_item_push(
if (!xfs_buf_trylock(bp))
return XFS_ITEM_LOCKED;
- if (!xfs_buf_resubmit_failed_buffers(bp, lip, buffer_list))
+ if (!xfs_buf_resubmit_failed_buffers(bp, buffer_list))
rval = XFS_ITEM_FLUSHING;
xfs_buf_unlock(bp);
@@ -712,37 +713,23 @@ xfs_iflush_done(
struct xfs_log_item *lip)
{
struct xfs_inode_log_item *iip;
- struct xfs_log_item *blip;
- struct xfs_log_item *next;
- struct xfs_log_item *prev;
+ struct xfs_log_item *blip, *n;
struct xfs_ail *ailp = lip->li_ailp;
int need_ail = 0;
+ LIST_HEAD(tmp);
/*
* Scan the buffer IO completions for other inodes being completed and
* attach them to the current inode log item.
*/
- blip = bp->b_fspriv;
- prev = NULL;
- while (blip != NULL) {
- if (blip->li_cb != xfs_iflush_done) {
- prev = blip;
- blip = blip->li_bio_list;
- continue;
- }
- /* remove from list */
- next = blip->li_bio_list;
- if (!prev) {
- bp->b_fspriv = next;
- } else {
- prev->li_bio_list = next;
- }
+ list_add_tail(&lip->li_bio_list, &tmp);
- /* add to current list */
- blip->li_bio_list = lip->li_bio_list;
- lip->li_bio_list = blip;
+ list_for_each_entry_safe(blip, n, &bp->b_li_list, li_bio_list) {
+ if (lip->li_cb != xfs_iflush_done)
+ continue;
+ list_move_tail(&blip->li_bio_list, &tmp);
/*
* while we have the item, do the unlocked check for needing
* the AIL lock.
@@ -751,8 +738,6 @@ xfs_iflush_done(
if ((iip->ili_logged && blip->li_lsn == iip->ili_flush_lsn) ||
(blip->li_flags & XFS_LI_FAILED))
need_ail++;
-
- blip = next;
}
/* make sure we capture the state of the initial inode. */
@@ -775,7 +760,7 @@ xfs_iflush_done(
/* this is an opencoded batch version of xfs_trans_ail_delete */
spin_lock(&ailp->xa_lock);
- for (blip = lip; blip; blip = blip->li_bio_list) {
+ list_for_each_entry(blip, &tmp, li_bio_list) {
if (INODE_ITEM(blip)->ili_logged &&
blip->li_lsn == INODE_ITEM(blip)->ili_flush_lsn)
mlip_changed |= xfs_ail_delete_one(ailp, blip);
@@ -801,15 +786,14 @@ xfs_iflush_done(
* ili_last_fields bits now that we know that the data corresponding to
* them is safely on disk.
*/
- for (blip = lip; blip; blip = next) {
- next = blip->li_bio_list;
- blip->li_bio_list = NULL;
-
+ list_for_each_entry_safe(blip, n, &tmp, li_bio_list) {
+ list_del_init(&blip->li_bio_list);
iip = INODE_ITEM(blip);
iip->ili_logged = 0;
iip->ili_last_fields = 0;
xfs_ifunlock(iip->ili_inode);
}
+ list_del(&tmp);
}
/*
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 20dc65fef6a4..89fb1eb80aae 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -45,6 +45,7 @@
#include <linux/fsmap.h>
#include "xfs_fsmap.h"
#include "scrub/xfs_scrub.h"
+#include "xfs_sb.h"
#include <linux/capability.h>
#include <linux/cred.h>
@@ -809,7 +810,7 @@ xfs_ioc_fsgeometry_v1(
xfs_fsop_geom_t fsgeo;
int error;
- error = xfs_fs_geometry(mp, &fsgeo, 3);
+ error = xfs_fs_geometry(&mp->m_sb, &fsgeo, 3);
if (error)
return error;
@@ -831,7 +832,7 @@ xfs_ioc_fsgeometry(
xfs_fsop_geom_t fsgeo;
int error;
- error = xfs_fs_geometry(mp, &fsgeo, 4);
+ error = xfs_fs_geometry(&mp->m_sb, &fsgeo, 4);
if (error)
return error;
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index 35c79e246fde..10fbde359649 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -37,6 +37,7 @@
#include "xfs_ioctl.h"
#include "xfs_ioctl32.h"
#include "xfs_trace.h"
+#include "xfs_sb.h"
#define _NATIVE_IOC(cmd, type) \
_IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type))
@@ -66,7 +67,7 @@ xfs_compat_ioc_fsgeometry_v1(
xfs_fsop_geom_t fsgeo;
int error;
- error = xfs_fs_geometry(mp, &fsgeo, 3);
+ error = xfs_fs_geometry(&mp->m_sb, &fsgeo, 3);
if (error)
return error;
/* The 32-bit variant simply has some padding at the end */
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index 99562ec0de56..bee51a14a906 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -285,8 +285,22 @@ static inline uint64_t howmany_64(uint64_t x, uint32_t y)
#define XFS_IS_REALTIME_INODE(ip) \
(((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME) && \
(ip)->i_mount->m_rtdev_targp)
+#define XFS_IS_REALTIME_MOUNT(mp) ((mp)->m_rtdev_targp ? 1 : 0)
#else
#define XFS_IS_REALTIME_INODE(ip) (0)
+#define XFS_IS_REALTIME_MOUNT(mp) (0)
+#endif
+
+/*
+ * Starting in Linux 4.15, the %p (raw pointer value) printk modifier
+ * prints a hashed version of the pointer to avoid leaking kernel
+ * pointers into dmesg. If we're trying to debug the kernel we want the
+ * raw values, so override this behavior as best we can.
+ */
+#ifdef DEBUG
+# define PTR_FMT "%px"
+#else
+# define PTR_FMT "%p"
#endif
#endif /* __XFS_LINUX__ */
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index a503af96d780..3e5ba1ecc080 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1047,6 +1047,7 @@ xfs_log_item_init(
INIT_LIST_HEAD(&item->li_ail);
INIT_LIST_HEAD(&item->li_cil);
+ INIT_LIST_HEAD(&item->li_bio_list);
}
/*
@@ -1242,7 +1243,7 @@ xlog_space_left(
static void
xlog_iodone(xfs_buf_t *bp)
{
- struct xlog_in_core *iclog = bp->b_fspriv;
+ struct xlog_in_core *iclog = bp->b_log_item;
struct xlog *l = iclog->ic_log;
int aborted = 0;
@@ -1773,7 +1774,7 @@ STATIC int
xlog_bdstrat(
struct xfs_buf *bp)
{
- struct xlog_in_core *iclog = bp->b_fspriv;
+ struct xlog_in_core *iclog = bp->b_log_item;
xfs_buf_lock(bp);
if (iclog->ic_state & XLOG_STATE_IOERROR) {
@@ -1919,7 +1920,7 @@ xlog_sync(
}
bp->b_io_length = BTOBB(count);
- bp->b_fspriv = iclog;
+ bp->b_log_item = iclog;
bp->b_flags &= ~XBF_FLUSH;
bp->b_flags |= (XBF_ASYNC | XBF_SYNCIO | XBF_WRITE | XBF_FUA);
@@ -1958,7 +1959,7 @@ xlog_sync(
XFS_BUF_SET_ADDR(bp, 0); /* logical 0 */
xfs_buf_associate_memory(bp,
(char *)&iclog->ic_header + count, split);
- bp->b_fspriv = iclog;
+ bp->b_log_item = iclog;
bp->b_flags &= ~XBF_FLUSH;
bp->b_flags |= (XBF_ASYNC | XBF_SYNCIO | XBF_WRITE | XBF_FUA);
@@ -2117,7 +2118,9 @@ xlog_print_trans(
/* dump core transaction and ticket info */
xfs_warn(mp, "transaction summary:");
- xfs_warn(mp, " flags = 0x%x", tp->t_flags);
+ xfs_warn(mp, " log res = %d", tp->t_log_res);
+ xfs_warn(mp, " log count = %d", tp->t_log_count);
+ xfs_warn(mp, " flags = 0x%x", tp->t_flags);
xlog_print_tic_res(mp, tp->t_ticket);
@@ -2242,7 +2245,7 @@ xlog_write_setup_ophdr(
break;
default:
xfs_warn(log->l_mp,
- "Bad XFS transaction clientid 0x%x in ticket 0x%p",
+ "Bad XFS transaction clientid 0x%x in ticket "PTR_FMT,
ophdr->oh_clientid, ticket);
return NULL;
}
@@ -3924,7 +3927,7 @@ xlog_verify_iclog(
}
if (clientid != XFS_TRANSACTION && clientid != XFS_LOG)
xfs_warn(log->l_mp,
- "%s: invalid clientid %d op 0x%p offset 0x%lx",
+ "%s: invalid clientid %d op "PTR_FMT" offset 0x%lx",
__func__, clientid, ophead,
(unsigned long)field_offset);
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 28d1abfe835e..00240c9ee72e 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -400,9 +400,9 @@ xlog_recover_iodone(
* On v5 supers, a bli could be attached to update the metadata LSN.
* Clean it up.
*/
- if (bp->b_fspriv)
+ if (bp->b_log_item)
xfs_buf_item_relse(bp);
- ASSERT(bp->b_fspriv == NULL);
+ ASSERT(bp->b_log_item == NULL);
bp->b_iodone = NULL;
xfs_buf_ioend(bp);
@@ -2218,7 +2218,7 @@ xlog_recover_do_inode_buffer(
next_unlinked_offset - reg_buf_offset;
if (unlikely(*logged_nextp == 0)) {
xfs_alert(mp,
- "Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). "
+ "Bad inode buffer log record (ptr = "PTR_FMT", bp = "PTR_FMT"). "
"Trying to replay bad (0) inode di_next_unlinked field.",
item, bp);
XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
@@ -2630,7 +2630,7 @@ xlog_recover_validate_buf_type(
ASSERT(!bp->b_iodone || bp->b_iodone == xlog_recover_iodone);
bp->b_iodone = xlog_recover_iodone;
xfs_buf_item_init(bp, mp);
- bip = bp->b_fspriv;
+ bip = bp->b_log_item;
bip->bli_item.li_lsn = current_lsn;
}
}
@@ -2652,7 +2652,7 @@ xlog_recover_do_reg_buffer(
int i;
int bit;
int nbits;
- int error;
+ xfs_failaddr_t fa;
trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
@@ -2687,7 +2687,7 @@ xlog_recover_do_reg_buffer(
* the first dquot in the buffer should do. XXXThis is
* probably a good thing to do for other buf types also.
*/
- error = 0;
+ fa = NULL;
if (buf_f->blf_flags &
(XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
if (item->ri_buf[i].i_addr == NULL) {
@@ -2701,11 +2701,14 @@ xlog_recover_do_reg_buffer(
item->ri_buf[i].i_len, __func__);
goto next;
}
- error = xfs_dqcheck(mp, item->ri_buf[i].i_addr,
- -1, 0, XFS_QMOPT_DOWARN,
- "dquot_buf_recover");
- if (error)
+ fa = xfs_dquot_verify(mp, item->ri_buf[i].i_addr,
+ -1, 0, 0);
+ if (fa) {
+ xfs_alert(mp,
+ "dquot corrupt at %pS trying to replay into block 0x%llx",
+ fa, bp->b_bn);
goto next;
+ }
}
memcpy(xfs_buf_offset(bp,
@@ -2957,6 +2960,10 @@ xfs_recover_inode_owner_change(
if (error)
goto out_free_ip;
+ if (!xfs_inode_verify_forks(ip)) {
+ error = -EFSCORRUPTED;
+ goto out_free_ip;
+ }
if (in_f->ilf_fields & XFS_ILOG_DOWNER) {
ASSERT(in_f->ilf_fields & XFS_ILOG_DBROOT);
@@ -3042,7 +3049,7 @@ xlog_recover_inode_pass2(
*/
if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) {
xfs_alert(mp,
- "%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
+ "%s: Bad inode magic number, dip = "PTR_FMT", dino bp = "PTR_FMT", ino = %Ld",
__func__, dip, bp, in_f->ilf_ino);
XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
XFS_ERRLEVEL_LOW, mp);
@@ -3052,7 +3059,7 @@ xlog_recover_inode_pass2(
ldip = item->ri_buf[1].i_addr;
if (unlikely(ldip->di_magic != XFS_DINODE_MAGIC)) {
xfs_alert(mp,
- "%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
+ "%s: Bad inode log record, rec ptr "PTR_FMT", ino %Ld",
__func__, item, in_f->ilf_ino);
XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
XFS_ERRLEVEL_LOW, mp);
@@ -3110,8 +3117,8 @@ xlog_recover_inode_pass2(
XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
XFS_ERRLEVEL_LOW, mp, ldip);
xfs_alert(mp,
- "%s: Bad regular inode log record, rec ptr 0x%p, "
- "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
+ "%s: Bad regular inode log record, rec ptr "PTR_FMT", "
+ "ino ptr = "PTR_FMT", ino bp = "PTR_FMT", ino %Ld",
__func__, item, dip, bp, in_f->ilf_ino);
error = -EFSCORRUPTED;
goto out_release;
@@ -3123,8 +3130,8 @@ xlog_recover_inode_pass2(
XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
XFS_ERRLEVEL_LOW, mp, ldip);
xfs_alert(mp,
- "%s: Bad dir inode log record, rec ptr 0x%p, "
- "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
+ "%s: Bad dir inode log record, rec ptr "PTR_FMT", "
+ "ino ptr = "PTR_FMT", ino bp = "PTR_FMT", ino %Ld",
__func__, item, dip, bp, in_f->ilf_ino);
error = -EFSCORRUPTED;
goto out_release;
@@ -3134,8 +3141,8 @@ xlog_recover_inode_pass2(
XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
XFS_ERRLEVEL_LOW, mp, ldip);
xfs_alert(mp,
- "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
- "dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
+ "%s: Bad inode log record, rec ptr "PTR_FMT", dino ptr "PTR_FMT", "
+ "dino bp "PTR_FMT", ino %Ld, total extents = %d, nblocks = %Ld",
__func__, item, dip, bp, in_f->ilf_ino,
ldip->di_nextents + ldip->di_anextents,
ldip->di_nblocks);
@@ -3146,8 +3153,8 @@ xlog_recover_inode_pass2(
XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
XFS_ERRLEVEL_LOW, mp, ldip);
xfs_alert(mp,
- "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
- "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
+ "%s: Bad inode log record, rec ptr "PTR_FMT", dino ptr "PTR_FMT", "
+ "dino bp "PTR_FMT", ino %Ld, forkoff 0x%x", __func__,
item, dip, bp, in_f->ilf_ino, ldip->di_forkoff);
error = -EFSCORRUPTED;
goto out_release;
@@ -3157,7 +3164,7 @@ xlog_recover_inode_pass2(
XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
XFS_ERRLEVEL_LOW, mp, ldip);
xfs_alert(mp,
- "%s: Bad inode log record length %d, rec ptr 0x%p",
+ "%s: Bad inode log record length %d, rec ptr "PTR_FMT,
__func__, item->ri_buf[1].i_len, item);
error = -EFSCORRUPTED;
goto out_release;
@@ -3303,6 +3310,7 @@ xlog_recover_dquot_pass2(
xfs_mount_t *mp = log->l_mp;
xfs_buf_t *bp;
struct xfs_disk_dquot *ddq, *recddq;
+ xfs_failaddr_t fa;
int error;
xfs_dq_logformat_t *dq_f;
uint type;
@@ -3345,10 +3353,12 @@ xlog_recover_dquot_pass2(
*/
dq_f = item->ri_buf[0].i_addr;
ASSERT(dq_f);
- error = xfs_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
- "xlog_recover_dquot_pass2 (log copy)");
- if (error)
+ fa = xfs_dquot_verify(mp, recddq, dq_f->qlf_id, 0, 0);
+ if (fa) {
+ xfs_alert(mp, "corrupt dquot ID 0x%x in log at %pS",
+ dq_f->qlf_id, fa);
return -EIO;
+ }
ASSERT(dq_f->qlf_len == 1);
/*
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index c879b517cc94..98fd41cbb9e1 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -162,6 +162,7 @@ xfs_free_perag(
ASSERT(pag);
ASSERT(atomic_read(&pag->pag_ref) == 0);
xfs_buf_hash_destroy(pag);
+ mutex_destroy(&pag->pag_ici_reclaim_lock);
call_rcu(&pag->rcu_head, __xfs_free_perag);
}
}
@@ -248,6 +249,7 @@ xfs_initialize_perag(
out_hash_destroy:
xfs_buf_hash_destroy(pag);
out_free_pag:
+ mutex_destroy(&pag->pag_ici_reclaim_lock);
kmem_free(pag);
out_unwind_new_pags:
/* unwind any prior newly initialized pags */
@@ -256,6 +258,7 @@ out_unwind_new_pags:
if (!pag)
break;
xfs_buf_hash_destroy(pag);
+ mutex_destroy(&pag->pag_ici_reclaim_lock);
kmem_free(pag);
}
return error;
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index b897b11afb2c..5b848f4b637f 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -162,7 +162,7 @@ xfs_qm_dqpurge(
*/
error = xfs_qm_dqflush(dqp, &bp);
if (error) {
- xfs_warn(mp, "%s: dquot %p flush failed",
+ xfs_warn(mp, "%s: dquot "PTR_FMT" flush failed",
__func__, dqp);
} else {
error = xfs_bwrite(bp);
@@ -291,8 +291,7 @@ xfs_qm_dqattach_one(
* exist on disk and we didn't ask it to allocate; ESRCH if quotas got
* turned off suddenly.
*/
- error = xfs_qm_dqget(ip->i_mount, ip, id, type,
- doalloc | XFS_QMOPT_DOWARN, &dqp);
+ error = xfs_qm_dqget(ip->i_mount, ip, id, type, doalloc, &dqp);
if (error)
return error;
@@ -481,7 +480,7 @@ xfs_qm_dquot_isolate(
error = xfs_qm_dqflush(dqp, &bp);
if (error) {
- xfs_warn(dqp->q_mount, "%s: dquot %p flush failed",
+ xfs_warn(dqp->q_mount, "%s: dquot "PTR_FMT" flush failed",
__func__, dqp);
goto out_unlock_dirty;
}
@@ -574,7 +573,7 @@ xfs_qm_set_defquota(
struct xfs_def_quota *defq;
int error;
- error = xfs_qm_dqread(mp, 0, type, XFS_QMOPT_DOWARN, &dqp);
+ error = xfs_qm_dqread(mp, 0, type, 0, &dqp);
if (!error) {
xfs_disk_dquot_t *ddqp = &dqp->q_core;
@@ -652,7 +651,7 @@ xfs_qm_init_quotainfo(
XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
(XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
XFS_DQ_PROJ),
- XFS_QMOPT_DOWARN, &dqp);
+ 0, &dqp);
if (!error) {
xfs_disk_dquot_t *ddqp = &dqp->q_core;
@@ -843,6 +842,7 @@ xfs_qm_reset_dqcounts(
{
struct xfs_dqblk *dqb;
int j;
+ xfs_failaddr_t fa;
trace_xfs_reset_dqcounts(bp, _RET_IP_);
@@ -864,10 +864,13 @@ xfs_qm_reset_dqcounts(
/*
* Do a sanity check, and if needed, repair the dqblk. Don't
* output any warnings because it's perfectly possible to
- * find uninitialised dquot blks. See comment in xfs_dqcheck.
+ * find uninitialised dquot blks. See comment in
+ * xfs_dquot_verify.
*/
- xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
- "xfs_quotacheck");
+ fa = xfs_dquot_verify(mp, ddq, id + j, type, 0);
+ if (fa)
+ xfs_dquot_repair(mp, ddq, id + j, type);
+
/*
* Reset type in case we are reusing group quota file for
* project quotas or vice versa
@@ -1074,8 +1077,7 @@ xfs_qm_quotacheck_dqadjust(
struct xfs_dquot *dqp;
int error;
- error = xfs_qm_dqget(mp, ip, id, type,
- XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp);
+ error = xfs_qm_dqget(mp, ip, id, type, XFS_QMOPT_DQALLOC, &dqp);
if (error) {
/*
* Shouldn't be able to turn off quotas here.
@@ -1696,8 +1698,7 @@ xfs_qm_vop_dqalloc(
xfs_iunlock(ip, lockflags);
error = xfs_qm_dqget(mp, NULL, uid,
XFS_DQ_USER,
- XFS_QMOPT_DQALLOC |
- XFS_QMOPT_DOWARN,
+ XFS_QMOPT_DQALLOC,
&uq);
if (error) {
ASSERT(error != -ENOENT);
@@ -1723,8 +1724,7 @@ xfs_qm_vop_dqalloc(
xfs_iunlock(ip, lockflags);
error = xfs_qm_dqget(mp, NULL, gid,
XFS_DQ_GROUP,
- XFS_QMOPT_DQALLOC |
- XFS_QMOPT_DOWARN,
+ XFS_QMOPT_DQALLOC,
&gq);
if (error) {
ASSERT(error != -ENOENT);
@@ -1743,8 +1743,7 @@ xfs_qm_vop_dqalloc(
xfs_iunlock(ip, lockflags);
error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
XFS_DQ_PROJ,
- XFS_QMOPT_DQALLOC |
- XFS_QMOPT_DOWARN,
+ XFS_QMOPT_DQALLOC,
&pq);
if (error) {
ASSERT(error != -ENOENT);
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 47aea2e82c26..270246943a06 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -464,6 +464,13 @@ retry:
error = xfs_trans_commit(tp);
if (error)
return error;
+
+ /*
+ * Allocation succeeded but the requested range was not even partially
+ * satisfied? Bail out!
+ */
+ if (nimaps == 0)
+ return -ENOSPC;
convert:
return xfs_reflink_convert_cow_extent(ip, imap, offset_fsb, count_fsb,
&dfops);
@@ -599,10 +606,6 @@ xfs_reflink_cancel_cow_blocks(
del.br_startblock, del.br_blockcount,
NULL);
- /* Update quota accounting */
- xfs_trans_mod_dquot_byino(*tpp, ip, XFS_TRANS_DQ_BCOUNT,
- -(long)del.br_blockcount);
-
/* Roll the transaction */
xfs_defer_ijoin(&dfops, ip);
error = xfs_defer_finish(tpp, &dfops);
@@ -613,6 +616,13 @@ xfs_reflink_cancel_cow_blocks(
/* Remove the mapping from the CoW fork. */
xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
+
+ /* Remove the quota reservation */
+ error = xfs_trans_reserve_quota_nblks(NULL, ip,
+ -(long)del.br_blockcount, 0,
+ XFS_QMOPT_RES_REGBLKS);
+ if (error)
+ break;
} else {
/* Didn't do anything, push cursor back. */
xfs_iext_prev(ifp, &icur);
@@ -795,6 +805,10 @@ xfs_reflink_end_cow(
if (error)
goto out_defer;
+ /* Charge this new data fork mapping to the on-disk quota. */
+ xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_DELBCOUNT,
+ (long)del.br_blockcount);
+
/* Remove the mapping from the CoW fork. */
xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
@@ -944,7 +958,7 @@ xfs_reflink_set_inode_flag(
if (src->i_ino == dest->i_ino)
xfs_ilock(src, XFS_ILOCK_EXCL);
else
- xfs_lock_two_inodes(src, dest, XFS_ILOCK_EXCL);
+ xfs_lock_two_inodes(src, XFS_ILOCK_EXCL, dest, XFS_ILOCK_EXCL);
if (!xfs_is_reflink_inode(src)) {
trace_xfs_reflink_set_inode_flag(src);
@@ -1202,13 +1216,16 @@ xfs_reflink_remap_blocks(
/* drange = (destoff, destoff + len); srange = (srcoff, srcoff + len) */
while (len) {
+ uint lock_mode;
+
trace_xfs_reflink_remap_blocks_loop(src, srcoff, len,
dest, destoff);
+
/* Read extent from the source file */
nimaps = 1;
- xfs_ilock(src, XFS_ILOCK_EXCL);
+ lock_mode = xfs_ilock_data_map_shared(src);
error = xfs_bmapi_read(src, srcoff, len, &imap, &nimaps, 0);
- xfs_iunlock(src, XFS_ILOCK_EXCL);
+ xfs_iunlock(src, lock_mode);
if (error)
goto err;
ASSERT(nimaps == 1);
@@ -1245,6 +1262,50 @@ err:
}
/*
+ * Grab the exclusive iolock for a data copy from src to dest, making
+ * sure to abide vfs locking order (lowest pointer value goes first) and
+ * breaking the pnfs layout leases on dest before proceeding. The loop
+ * is needed because we cannot call the blocking break_layout() with the
+ * src iolock held, and therefore have to back out both locks.
+ */
+static int
+xfs_iolock_two_inodes_and_break_layout(
+ struct inode *src,
+ struct inode *dest)
+{
+ int error;
+
+retry:
+ if (src < dest) {
+ inode_lock_shared(src);
+ inode_lock_nested(dest, I_MUTEX_NONDIR2);
+ } else {
+ /* src >= dest */
+ inode_lock(dest);
+ }
+
+ error = break_layout(dest, false);
+ if (error == -EWOULDBLOCK) {
+ inode_unlock(dest);
+ if (src < dest)
+ inode_unlock_shared(src);
+ error = break_layout(dest, true);
+ if (error)
+ return error;
+ goto retry;
+ }
+ if (error) {
+ inode_unlock(dest);
+ if (src < dest)
+ inode_unlock_shared(src);
+ return error;
+ }
+ if (src > dest)
+ inode_lock_shared_nested(src, I_MUTEX_NONDIR2);
+ return 0;
+}
+
+/*
* Link a range of blocks from one file to another.
*/
int
@@ -1274,11 +1335,14 @@ xfs_reflink_remap_range(
return -EIO;
/* Lock both files against IO */
- lock_two_nondirectories(inode_in, inode_out);
+ ret = xfs_iolock_two_inodes_and_break_layout(inode_in, inode_out);
+ if (ret)
+ return ret;
if (same_inode)
xfs_ilock(src, XFS_MMAPLOCK_EXCL);
else
- xfs_lock_two_inodes(src, dest, XFS_MMAPLOCK_EXCL);
+ xfs_lock_two_inodes(src, XFS_MMAPLOCK_SHARED, dest,
+ XFS_MMAPLOCK_EXCL);
/* Check file eligibility and prepare for block sharing. */
ret = -EINVAL;
@@ -1295,6 +1359,11 @@ xfs_reflink_remap_range(
if (ret <= 0)
goto out_unlock;
+ /* Attach dquots to dest inode before changing block map */
+ ret = xfs_qm_dqattach(dest, 0);
+ if (ret)
+ goto out_unlock;
+
trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
/*
@@ -1341,10 +1410,12 @@ xfs_reflink_remap_range(
is_dedupe);
out_unlock:
- xfs_iunlock(src, XFS_MMAPLOCK_EXCL);
+ xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
+ if (!same_inode)
+ xfs_iunlock(src, XFS_MMAPLOCK_SHARED);
+ inode_unlock(inode_out);
if (!same_inode)
- xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
- unlock_two_nondirectories(inode_in, inode_out);
+ inode_unlock_shared(inode_in);
if (ret)
trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
return ret;
diff --git a/fs/xfs/xfs_rtalloc.h b/fs/xfs/xfs_rtalloc.h
index 3f30f846d7f2..dfee3c991155 100644
--- a/fs/xfs/xfs_rtalloc.h
+++ b/fs/xfs/xfs_rtalloc.h
@@ -139,6 +139,9 @@ int xfs_rtalloc_query_all(struct xfs_trans *tp,
xfs_rtalloc_query_range_fn fn,
void *priv);
bool xfs_verify_rtbno(struct xfs_mount *mp, xfs_rtblock_t rtbno);
+int xfs_rtalloc_extent_is_free(struct xfs_mount *mp, struct xfs_trans *tp,
+ xfs_rtblock_t start, xfs_extlen_t len,
+ bool *is_free);
#else
# define xfs_rtallocate_extent(t,b,min,max,l,f,p,rb) (ENOSYS)
# define xfs_rtfree_extent(t,b,l) (ENOSYS)
@@ -148,6 +151,7 @@ bool xfs_verify_rtbno(struct xfs_mount *mp, xfs_rtblock_t rtbno);
# define xfs_rtalloc_query_all(t,f,p) (ENOSYS)
# define xfs_rtbuf_get(m,t,b,i,p) (ENOSYS)
# define xfs_verify_rtbno(m, r) (false)
+# define xfs_rtalloc_extent_is_free(m,t,s,l,i) (ENOSYS)
static inline int /* error */
xfs_rtmount_init(
xfs_mount_t *mp) /* file system mount structure */
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 1dacccc367f8..f3e0001f9992 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1153,6 +1153,14 @@ xfs_fs_statfs(
((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
(XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
xfs_qm_statvfs(ip, statp);
+
+ if (XFS_IS_REALTIME_MOUNT(mp) &&
+ (ip->i_d.di_flags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
+ statp->f_blocks = sbp->sb_rblocks;
+ statp->f_bavail = statp->f_bfree =
+ sbp->sb_frextents * sbp->sb_rextsize;
+ }
+
return 0;
}
@@ -1660,7 +1668,7 @@ xfs_fs_fill_super(
}
if (xfs_sb_version_hasreflink(&mp->m_sb))
xfs_alert(mp,
- "DAX and reflink have not been tested together!");
+ "DAX and reflink cannot be used together!");
}
if (mp->m_flags & XFS_MOUNT_DISCARD) {
@@ -1684,10 +1692,6 @@ xfs_fs_fill_super(
"EXPERIMENTAL reverse mapping btree feature enabled. Use at your own risk!");
}
- if (xfs_sb_version_hasreflink(&mp->m_sb))
- xfs_alert(mp,
- "EXPERIMENTAL reflink feature enabled. Use at your own risk!");
-
error = xfs_mountfs(mp);
if (error)
goto out_filestream_unmount;
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index d718a10c2271..945de08af7ba 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -72,7 +72,7 @@ DECLARE_EVENT_CLASS(xfs_attr_list_class,
__entry->flags = ctx->flags;
),
TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
- "alist 0x%p size %u count %u firstu %u flags %d %s",
+ "alist %p size %u count %u firstu %u flags %d %s",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->hashval,
@@ -119,7 +119,7 @@ DECLARE_EVENT_CLASS(xfs_perag_class,
__entry->refcount = refcount;
__entry->caller_ip = caller_ip;
),
- TP_printk("dev %d:%d agno %u refcount %d caller %ps",
+ TP_printk("dev %d:%d agno %u refcount %d caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__entry->refcount,
@@ -200,7 +200,7 @@ TRACE_EVENT(xfs_attr_list_node_descend,
__entry->bt_before = be32_to_cpu(btree->before);
),
TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
- "alist 0x%p size %u count %u firstu %u flags %d %s "
+ "alist %p size %u count %u firstu %u flags %d %s "
"node hashval %u, node before %u",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
@@ -251,8 +251,8 @@ DECLARE_EVENT_CLASS(xfs_bmap_class,
__entry->bmap_state = state;
__entry->caller_ip = caller_ip;
),
- TP_printk("dev %d:%d ino 0x%llx state %s cur 0x%p/%d "
- "offset %lld block %lld count %lld flag %d caller %ps",
+ TP_printk("dev %d:%d ino 0x%llx state %s cur %p/%d "
+ "offset %lld block %lld count %lld flag %d caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS),
@@ -301,7 +301,7 @@ DECLARE_EVENT_CLASS(xfs_buf_class,
__entry->caller_ip = caller_ip;
),
TP_printk("dev %d:%d bno 0x%llx nblks 0x%x hold %d pincount %d "
- "lock %d flags %s caller %ps",
+ "lock %d flags %s caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->bno,
__entry->nblks,
@@ -370,7 +370,7 @@ DECLARE_EVENT_CLASS(xfs_buf_flags_class,
__entry->caller_ip = caller_ip;
),
TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
- "lock %d flags %s caller %ps",
+ "lock %d flags %s caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->bno,
__entry->buffer_length,
@@ -390,7 +390,7 @@ DEFINE_BUF_FLAGS_EVENT(xfs_buf_get);
DEFINE_BUF_FLAGS_EVENT(xfs_buf_read);
TRACE_EVENT(xfs_buf_ioerror,
- TP_PROTO(struct xfs_buf *bp, int error, unsigned long caller_ip),
+ TP_PROTO(struct xfs_buf *bp, int error, xfs_failaddr_t caller_ip),
TP_ARGS(bp, error, caller_ip),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -401,7 +401,7 @@ TRACE_EVENT(xfs_buf_ioerror,
__field(int, pincount)
__field(unsigned, lockval)
__field(int, error)
- __field(unsigned long, caller_ip)
+ __field(xfs_failaddr_t, caller_ip)
),
TP_fast_assign(
__entry->dev = bp->b_target->bt_dev;
@@ -415,7 +415,7 @@ TRACE_EVENT(xfs_buf_ioerror,
__entry->caller_ip = caller_ip;
),
TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
- "lock %d error %d flags %s caller %ps",
+ "lock %d error %d flags %s caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->bno,
__entry->buffer_length,
@@ -460,7 +460,7 @@ DECLARE_EVENT_CLASS(xfs_buf_item_class,
),
TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
"lock %d flags %s recur %d refcount %d bliflags %s "
- "lidesc 0x%p liflags %s",
+ "lidesc %p liflags %s",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->buf_bno,
__entry->buf_len,
@@ -579,7 +579,7 @@ DECLARE_EVENT_CLASS(xfs_lock_class,
__entry->lock_flags = lock_flags;
__entry->caller_ip = caller_ip;
),
- TP_printk("dev %d:%d ino 0x%llx flags %s caller %ps",
+ TP_printk("dev %d:%d ino 0x%llx flags %s caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__print_flags(__entry->lock_flags, "|", XFS_LOCK_FLAGS),
@@ -697,7 +697,7 @@ DECLARE_EVENT_CLASS(xfs_iref_class,
__entry->pincount = atomic_read(&ip->i_pincount);
__entry->caller_ip = caller_ip;
),
- TP_printk("dev %d:%d ino 0x%llx count %d pincount %d caller %ps",
+ TP_printk("dev %d:%d ino 0x%llx count %d pincount %d caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->count,
@@ -1028,7 +1028,7 @@ DECLARE_EVENT_CLASS(xfs_log_item_class,
__entry->flags = lip->li_flags;
__entry->lsn = lip->li_lsn;
),
- TP_printk("dev %d:%d lip 0x%p lsn %d/%d type %s flags %s",
+ TP_printk("dev %d:%d lip %p lsn %d/%d type %s flags %s",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->lip,
CYCLE_LSN(__entry->lsn), BLOCK_LSN(__entry->lsn),
@@ -1049,7 +1049,7 @@ TRACE_EVENT(xfs_log_force,
__entry->lsn = lsn;
__entry->caller_ip = caller_ip;
),
- TP_printk("dev %d:%d lsn 0x%llx caller %ps",
+ TP_printk("dev %d:%d lsn 0x%llx caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->lsn, (void *)__entry->caller_ip)
)
@@ -1082,7 +1082,7 @@ DECLARE_EVENT_CLASS(xfs_ail_class,
__entry->old_lsn = old_lsn;
__entry->new_lsn = new_lsn;
),
- TP_printk("dev %d:%d lip 0x%p old lsn %d/%d new lsn %d/%d type %s flags %s",
+ TP_printk("dev %d:%d lip %p old lsn %d/%d new lsn %d/%d type %s flags %s",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->lip,
CYCLE_LSN(__entry->old_lsn), BLOCK_LSN(__entry->old_lsn),
@@ -1403,7 +1403,7 @@ TRACE_EVENT(xfs_bunmap,
__entry->flags = flags;
),
TP_printk("dev %d:%d ino 0x%llx size 0x%llx bno 0x%llx len 0x%llx"
- "flags %s caller %ps",
+ "flags %s caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->size,
@@ -1517,7 +1517,7 @@ TRACE_EVENT(xfs_agf,
),
TP_printk("dev %d:%d agno %u flags %s length %u roots b %u c %u "
"levels b %u c %u flfirst %u fllast %u flcount %u "
- "freeblks %u longest %u caller %ps",
+ "freeblks %u longest %u caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__print_flags(__entry->flags, "|", XFS_AGF_FLAGS),
@@ -2014,7 +2014,7 @@ DECLARE_EVENT_CLASS(xfs_log_recover_item_class,
__entry->count = item->ri_cnt;
__entry->total = item->ri_total;
),
- TP_printk("dev %d:%d tid 0x%x lsn 0x%llx, pass %d, item 0x%p, "
+ TP_printk("dev %d:%d tid 0x%x lsn 0x%llx, pass %d, item %p, "
"item type %s item region count/total %d/%d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->tid,
@@ -2486,7 +2486,7 @@ DECLARE_EVENT_CLASS(xfs_ag_error_class,
__entry->error = error;
__entry->caller_ip = caller_ip;
),
- TP_printk("dev %d:%d agno %u error %d caller %ps",
+ TP_printk("dev %d:%d agno %u error %d caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__entry->error,
@@ -2977,7 +2977,7 @@ DECLARE_EVENT_CLASS(xfs_inode_error_class,
__entry->error = error;
__entry->caller_ip = caller_ip;
),
- TP_printk("dev %d:%d ino %llx error %d caller %ps",
+ TP_printk("dev %d:%d ino %llx error %d caller %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->error,
@@ -3313,6 +3313,32 @@ DEFINE_GETFSMAP_EVENT(xfs_getfsmap_low_key);
DEFINE_GETFSMAP_EVENT(xfs_getfsmap_high_key);
DEFINE_GETFSMAP_EVENT(xfs_getfsmap_mapping);
+TRACE_EVENT(xfs_trans_resv_calc,
+ TP_PROTO(struct xfs_mount *mp, unsigned int type,
+ struct xfs_trans_res *res),
+ TP_ARGS(mp, type, res),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(int, type)
+ __field(uint, logres)
+ __field(int, logcount)
+ __field(int, logflags)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->type = type;
+ __entry->logres = res->tr_logres;
+ __entry->logcount = res->tr_logcount;
+ __entry->logflags = res->tr_logflags;
+ ),
+ TP_printk("dev %d:%d type %d logres %u logcount %d flags 0x%x",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->type,
+ __entry->logres,
+ __entry->logcount,
+ __entry->logflags)
+);
+
#endif /* _TRACE_XFS_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index a87f657f59c9..86f92df32c42 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -35,6 +35,27 @@
kmem_zone_t *xfs_trans_zone;
kmem_zone_t *xfs_log_item_desc_zone;
+#if defined(CONFIG_TRACEPOINTS)
+static void
+xfs_trans_trace_reservations(
+ struct xfs_mount *mp)
+{
+ struct xfs_trans_res resv;
+ struct xfs_trans_res *res;
+ struct xfs_trans_res *end_res;
+ int i;
+
+ res = (struct xfs_trans_res *)M_RES(mp);
+ end_res = (struct xfs_trans_res *)(M_RES(mp) + 1);
+ for (i = 0; res < end_res; i++, res++)
+ trace_xfs_trans_resv_calc(mp, i, res);
+ xfs_log_get_max_trans_res(mp, &resv);
+ trace_xfs_trans_resv_calc(mp, -1, &resv);
+}
+#else
+# define xfs_trans_trace_reservations(mp)
+#endif
+
/*
* Initialize the precomputed transaction reservation values
* in the mount structure.
@@ -44,6 +65,7 @@ xfs_trans_init(
struct xfs_mount *mp)
{
xfs_trans_resv_calc(mp, M_RES(mp));
+ xfs_trans_trace_reservations(mp);
}
/*
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 815b53d20e26..9d542dfe0052 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -50,7 +50,7 @@ typedef struct xfs_log_item {
uint li_type; /* item type */
uint li_flags; /* misc flags */
struct xfs_buf *li_buf; /* real buffer pointer */
- struct xfs_log_item *li_bio_list; /* buffer item list */
+ struct list_head li_bio_list; /* buffer item list */
void (*li_cb)(struct xfs_buf *,
struct xfs_log_item *);
/* buffer item iodone */
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 3ba7a96a8abd..653ce379d36b 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -82,12 +82,12 @@ _xfs_trans_bjoin(
ASSERT(bp->b_transp == NULL);
/*
- * The xfs_buf_log_item pointer is stored in b_fsprivate. If
+ * The xfs_buf_log_item pointer is stored in b_log_item. If
* it doesn't have one yet, then allocate one and initialize it.
* The checks to see if one is there are in xfs_buf_item_init().
*/
xfs_buf_item_init(bp, tp->t_mountp);
- bip = bp->b_fspriv;
+ bip = bp->b_log_item;
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
@@ -118,7 +118,7 @@ xfs_trans_bjoin(
struct xfs_buf *bp)
{
_xfs_trans_bjoin(tp, bp, 0);
- trace_xfs_trans_bjoin(bp->b_fspriv);
+ trace_xfs_trans_bjoin(bp->b_log_item);
}
/*
@@ -139,7 +139,7 @@ xfs_trans_get_buf_map(
xfs_buf_flags_t flags)
{
xfs_buf_t *bp;
- xfs_buf_log_item_t *bip;
+ struct xfs_buf_log_item *bip;
if (!tp)
return xfs_buf_get_map(target, map, nmaps, flags);
@@ -159,7 +159,7 @@ xfs_trans_get_buf_map(
}
ASSERT(bp->b_transp == tp);
- bip = bp->b_fspriv;
+ bip = bp->b_log_item;
ASSERT(bip != NULL);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_recur++;
@@ -175,7 +175,7 @@ xfs_trans_get_buf_map(
ASSERT(!bp->b_error);
_xfs_trans_bjoin(tp, bp, 1);
- trace_xfs_trans_get_buf(bp->b_fspriv);
+ trace_xfs_trans_get_buf(bp->b_log_item);
return bp;
}
@@ -188,12 +188,13 @@ xfs_trans_get_buf_map(
* mount structure.
*/
xfs_buf_t *
-xfs_trans_getsb(xfs_trans_t *tp,
- struct xfs_mount *mp,
- int flags)
+xfs_trans_getsb(
+ xfs_trans_t *tp,
+ struct xfs_mount *mp,
+ int flags)
{
xfs_buf_t *bp;
- xfs_buf_log_item_t *bip;
+ struct xfs_buf_log_item *bip;
/*
* Default to just trying to lock the superblock buffer
@@ -210,7 +211,7 @@ xfs_trans_getsb(xfs_trans_t *tp,
*/
bp = mp->m_sb_bp;
if (bp->b_transp == tp) {
- bip = bp->b_fspriv;
+ bip = bp->b_log_item;
ASSERT(bip != NULL);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_recur++;
@@ -223,7 +224,7 @@ xfs_trans_getsb(xfs_trans_t *tp,
return NULL;
_xfs_trans_bjoin(tp, bp, 1);
- trace_xfs_trans_getsb(bp->b_fspriv);
+ trace_xfs_trans_getsb(bp->b_log_item);
return bp;
}
@@ -266,7 +267,7 @@ xfs_trans_read_buf_map(
if (bp) {
ASSERT(xfs_buf_islocked(bp));
ASSERT(bp->b_transp == tp);
- ASSERT(bp->b_fspriv != NULL);
+ ASSERT(bp->b_log_item != NULL);
ASSERT(!bp->b_error);
ASSERT(bp->b_flags & XBF_DONE);
@@ -279,7 +280,7 @@ xfs_trans_read_buf_map(
return -EIO;
}
- bip = bp->b_fspriv;
+ bip = bp->b_log_item;
bip->bli_recur++;
ASSERT(atomic_read(&bip->bli_refcount) > 0);
@@ -329,7 +330,7 @@ xfs_trans_read_buf_map(
if (tp) {
_xfs_trans_bjoin(tp, bp, 1);
- trace_xfs_trans_read_buf(bp->b_fspriv);
+ trace_xfs_trans_read_buf(bp->b_log_item);
}
*bpp = bp;
return 0;
@@ -352,10 +353,11 @@ xfs_trans_read_buf_map(
* brelse() call.
*/
void
-xfs_trans_brelse(xfs_trans_t *tp,
- xfs_buf_t *bp)
+xfs_trans_brelse(
+ xfs_trans_t *tp,
+ xfs_buf_t *bp)
{
- xfs_buf_log_item_t *bip;
+ struct xfs_buf_log_item *bip;
int freed;
/*
@@ -368,7 +370,7 @@ xfs_trans_brelse(xfs_trans_t *tp,
}
ASSERT(bp->b_transp == tp);
- bip = bp->b_fspriv;
+ bip = bp->b_log_item;
ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
@@ -456,10 +458,11 @@ xfs_trans_brelse(xfs_trans_t *tp,
*/
/* ARGSUSED */
void
-xfs_trans_bhold(xfs_trans_t *tp,
- xfs_buf_t *bp)
+xfs_trans_bhold(
+ xfs_trans_t *tp,
+ xfs_buf_t *bp)
{
- xfs_buf_log_item_t *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
ASSERT(bp->b_transp == tp);
ASSERT(bip != NULL);
@@ -476,10 +479,11 @@ xfs_trans_bhold(xfs_trans_t *tp,
* for this transaction.
*/
void
-xfs_trans_bhold_release(xfs_trans_t *tp,
- xfs_buf_t *bp)
+xfs_trans_bhold_release(
+ xfs_trans_t *tp,
+ xfs_buf_t *bp)
{
- xfs_buf_log_item_t *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
ASSERT(bp->b_transp == tp);
ASSERT(bip != NULL);
@@ -500,7 +504,7 @@ xfs_trans_dirty_buf(
struct xfs_trans *tp,
struct xfs_buf *bp)
{
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
ASSERT(bp->b_transp == tp);
ASSERT(bip != NULL);
@@ -557,7 +561,7 @@ xfs_trans_log_buf(
uint first,
uint last)
{
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
ASSERT(first <= last && last < BBTOB(bp->b_length));
ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED));
@@ -600,10 +604,10 @@ xfs_trans_log_buf(
*/
void
xfs_trans_binval(
- xfs_trans_t *tp,
- xfs_buf_t *bp)
+ xfs_trans_t *tp,
+ xfs_buf_t *bp)
{
- xfs_buf_log_item_t *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
int i;
ASSERT(bp->b_transp == tp);
@@ -655,10 +659,10 @@ xfs_trans_binval(
*/
void
xfs_trans_inode_buf(
- xfs_trans_t *tp,
- xfs_buf_t *bp)
+ xfs_trans_t *tp,
+ xfs_buf_t *bp)
{
- xfs_buf_log_item_t *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
ASSERT(bp->b_transp == tp);
ASSERT(bip != NULL);
@@ -679,10 +683,10 @@ xfs_trans_inode_buf(
*/
void
xfs_trans_stale_inode_buf(
- xfs_trans_t *tp,
- xfs_buf_t *bp)
+ xfs_trans_t *tp,
+ xfs_buf_t *bp)
{
- xfs_buf_log_item_t *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
ASSERT(bp->b_transp == tp);
ASSERT(bip != NULL);
@@ -704,10 +708,10 @@ xfs_trans_stale_inode_buf(
/* ARGSUSED */
void
xfs_trans_inode_alloc_buf(
- xfs_trans_t *tp,
- xfs_buf_t *bp)
+ xfs_trans_t *tp,
+ xfs_buf_t *bp)
{
- xfs_buf_log_item_t *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
ASSERT(bp->b_transp == tp);
ASSERT(bip != NULL);
@@ -729,7 +733,7 @@ xfs_trans_ordered_buf(
struct xfs_trans *tp,
struct xfs_buf *bp)
{
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
ASSERT(bp->b_transp == tp);
ASSERT(bip != NULL);
@@ -759,7 +763,7 @@ xfs_trans_buf_set_type(
struct xfs_buf *bp,
enum xfs_blft type)
{
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
if (!tp)
return;
@@ -776,8 +780,8 @@ xfs_trans_buf_copy_type(
struct xfs_buf *dst_bp,
struct xfs_buf *src_bp)
{
- struct xfs_buf_log_item *sbip = src_bp->b_fspriv;
- struct xfs_buf_log_item *dbip = dst_bp->b_fspriv;
+ struct xfs_buf_log_item *sbip = src_bp->b_log_item;
+ struct xfs_buf_log_item *dbip = dst_bp->b_log_item;
enum xfs_blft type;
type = xfs_blft_from_flags(&sbip->__bli_format);
@@ -797,11 +801,11 @@ xfs_trans_buf_copy_type(
/* ARGSUSED */
void
xfs_trans_dquot_buf(
- xfs_trans_t *tp,
- xfs_buf_t *bp,
- uint type)
+ xfs_trans_t *tp,
+ xfs_buf_t *bp,
+ uint type)
{
- struct xfs_buf_log_item *bip = bp->b_fspriv;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
ASSERT(type == XFS_BLF_UDQUOT_BUF ||
type == XFS_BLF_PDQUOT_BUF ||
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c
index daa7615497f9..4a89da4b6fe7 100644
--- a/fs/xfs/xfs_trans_inode.c
+++ b/fs/xfs/xfs_trans_inode.c
@@ -28,6 +28,8 @@
#include "xfs_inode_item.h"
#include "xfs_trace.h"
+#include <linux/iversion.h>
+
/*
* Add a locked inode to the transaction.
*
@@ -110,15 +112,17 @@ xfs_trans_log_inode(
/*
* First time we log the inode in a transaction, bump the inode change
- * counter if it is configured for this to occur. We don't use
- * inode_inc_version() because there is no need for extra locking around
- * i_version as we already hold the inode locked exclusively for
- * metadata modification.
+ * counter if it is configured for this to occur. While we have the
+ * inode locked exclusively for metadata modification, we can usually
+ * avoid setting XFS_ILOG_CORE if no one has queried the value since
+ * the last time it was incremented. If we have XFS_ILOG_CORE already
+ * set however, then go ahead and bump the i_version counter
+ * unconditionally.
*/
if (!(ip->i_itemp->ili_item.li_desc->lid_flags & XFS_LID_DIRTY) &&
IS_I_VERSION(VFS_I(ip))) {
- VFS_I(ip)->i_version++;
- flags |= XFS_ILOG_CORE;
+ if (inode_maybe_inc_iversion(VFS_I(ip), flags & XFS_ILOG_CORE))
+ flags |= XFS_ILOG_CORE;
}
tp->t_flags |= XFS_TRANS_DIRTY;
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index 6db3b4668b1a..ffe364fa4040 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -145,9 +145,9 @@
#define ACPI_ADDRESS_RANGE_MAX 2
-/* Maximum number of While() loops before abort */
+/* Maximum time (default 30s) of While() loops before abort */
-#define ACPI_MAX_LOOP_COUNT 0x000FFFFF
+#define ACPI_MAX_LOOP_TIMEOUT 30
/******************************************************************************
*
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 17d61b1f2511..3c46f0ef5f7a 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -130,8 +130,9 @@ struct acpi_exception_info {
#define AE_HEX_OVERFLOW EXCEP_ENV (0x0020)
#define AE_DECIMAL_OVERFLOW EXCEP_ENV (0x0021)
#define AE_OCTAL_OVERFLOW EXCEP_ENV (0x0022)
+#define AE_END_OF_TABLE EXCEP_ENV (0x0023)
-#define AE_CODE_ENV_MAX 0x0022
+#define AE_CODE_ENV_MAX 0x0023
/*
* Programmer exceptions
@@ -195,7 +196,7 @@ struct acpi_exception_info {
#define AE_AML_CIRCULAR_REFERENCE EXCEP_AML (0x001E)
#define AE_AML_BAD_RESOURCE_LENGTH EXCEP_AML (0x001F)
#define AE_AML_ILLEGAL_ADDRESS EXCEP_AML (0x0020)
-#define AE_AML_INFINITE_LOOP EXCEP_AML (0x0021)
+#define AE_AML_LOOP_TIMEOUT EXCEP_AML (0x0021)
#define AE_AML_UNINITIALIZED_NODE EXCEP_AML (0x0022)
#define AE_AML_TARGET_TYPE EXCEP_AML (0x0023)
@@ -275,7 +276,8 @@ static const struct acpi_exception_info acpi_gbl_exception_names_env[] = {
EXCEP_TXT("AE_DECIMAL_OVERFLOW",
"Overflow during ASCII decimal-to-binary conversion"),
EXCEP_TXT("AE_OCTAL_OVERFLOW",
- "Overflow during ASCII octal-to-binary conversion")
+ "Overflow during ASCII octal-to-binary conversion"),
+ EXCEP_TXT("AE_END_OF_TABLE", "Reached the end of table")
};
static const struct acpi_exception_info acpi_gbl_exception_names_pgm[] = {
@@ -368,8 +370,8 @@ static const struct acpi_exception_info acpi_gbl_exception_names_aml[] = {
"The length of a Resource Descriptor in the AML is incorrect"),
EXCEP_TXT("AE_AML_ILLEGAL_ADDRESS",
"A memory, I/O, or PCI configuration address is invalid"),
- EXCEP_TXT("AE_AML_INFINITE_LOOP",
- "An apparent infinite AML While loop, method was aborted"),
+ EXCEP_TXT("AE_AML_LOOP_TIMEOUT",
+ "An AML While loop exceeded the maximum execution time"),
EXCEP_TXT("AE_AML_UNINITIALIZED_NODE",
"A namespace node is uninitialized or unresolved"),
EXCEP_TXT("AE_AML_TARGET_TYPE",
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 79287629c888..c9608b0b80c6 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -91,6 +91,9 @@ acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev,
bool acpi_dev_found(const char *hid);
bool acpi_dev_present(const char *hid, const char *uid, s64 hrv);
+const char *
+acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv);
+
#ifdef CONFIG_ACPI
#include <linux/proc_fs.h>
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index e1dd1a8d42b6..c589c3e12d90 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20170831
+#define ACPI_CA_VERSION 0x20171215
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -260,11 +260,11 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_osi_data, 0);
ACPI_INIT_GLOBAL(u8, acpi_gbl_reduced_hardware, FALSE);
/*
- * Maximum number of While() loop iterations before forced method abort.
+ * Maximum timeout for While() loop iterations before forced method abort.
* This mechanism is intended to prevent infinite loops during interpreter
* execution within a host kernel.
*/
-ACPI_INIT_GLOBAL(u32, acpi_gbl_max_loop_iterations, ACPI_MAX_LOOP_COUNT);
+ACPI_INIT_GLOBAL(u32, acpi_gbl_max_loop_iterations, ACPI_MAX_LOOP_TIMEOUT);
/*
* This mechanism is used to trace a specified AML method. The method is
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 7a89e6de94da..4c304bf4d591 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -69,9 +69,10 @@
#define ACPI_SIG_HEST "HEST" /* Hardware Error Source Table */
#define ACPI_SIG_MADT "APIC" /* Multiple APIC Description Table */
#define ACPI_SIG_MSCT "MSCT" /* Maximum System Characteristics Table */
-#define ACPI_SIG_PDTT "PDTT" /* Processor Debug Trigger Table */
+#define ACPI_SIG_PDTT "PDTT" /* Platform Debug Trigger Table */
#define ACPI_SIG_PPTT "PPTT" /* Processor Properties Topology Table */
#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */
+#define ACPI_SIG_SDEV "SDEV" /* Secure Devices table */
#define ACPI_SIG_SLIT "SLIT" /* System Locality Distance Information Table */
#define ACPI_SIG_SRAT "SRAT" /* System Resource Affinity Table */
#define ACPI_SIG_NFIT "NFIT" /* NVDIMM Firmware Interface Table */
@@ -1149,7 +1150,8 @@ enum acpi_nfit_type {
ACPI_NFIT_TYPE_CONTROL_REGION = 4,
ACPI_NFIT_TYPE_DATA_REGION = 5,
ACPI_NFIT_TYPE_FLUSH_ADDRESS = 6,
- ACPI_NFIT_TYPE_RESERVED = 7 /* 7 and greater are reserved */
+ ACPI_NFIT_TYPE_CAPABILITIES = 7,
+ ACPI_NFIT_TYPE_RESERVED = 8 /* 8 and greater are reserved */
};
/*
@@ -1162,7 +1164,7 @@ struct acpi_nfit_system_address {
struct acpi_nfit_header header;
u16 range_index;
u16 flags;
- u32 reserved; /* Reseved, must be zero */
+ u32 reserved; /* Reserved, must be zero */
u32 proximity_domain;
u8 range_guid[16];
u64 address;
@@ -1281,9 +1283,72 @@ struct acpi_nfit_flush_address {
u64 hint_address[1]; /* Variable length */
};
+/* 7: Platform Capabilities Structure */
+
+struct acpi_nfit_capabilities {
+ struct acpi_nfit_header header;
+ u8 highest_capability;
+ u8 reserved[3]; /* Reserved, must be zero */
+ u32 capabilities;
+ u32 reserved2;
+};
+
+/* Capabilities Flags */
+
+#define ACPI_NFIT_CAPABILITY_CACHE_FLUSH (1) /* 00: Cache Flush to NVDIMM capable */
+#define ACPI_NFIT_CAPABILITY_MEM_FLUSH (1<<1) /* 01: Memory Flush to NVDIMM capable */
+#define ACPI_NFIT_CAPABILITY_MEM_MIRRORING (1<<2) /* 02: Memory Mirroring capable */
+
+/*
+ * NFIT/DVDIMM device handle support - used as the _ADR for each NVDIMM
+ */
+struct nfit_device_handle {
+ u32 handle;
+};
+
+/* Device handle construction and extraction macros */
+
+#define ACPI_NFIT_DIMM_NUMBER_MASK 0x0000000F
+#define ACPI_NFIT_CHANNEL_NUMBER_MASK 0x000000F0
+#define ACPI_NFIT_MEMORY_ID_MASK 0x00000F00
+#define ACPI_NFIT_SOCKET_ID_MASK 0x0000F000
+#define ACPI_NFIT_NODE_ID_MASK 0x0FFF0000
+
+#define ACPI_NFIT_DIMM_NUMBER_OFFSET 0
+#define ACPI_NFIT_CHANNEL_NUMBER_OFFSET 4
+#define ACPI_NFIT_MEMORY_ID_OFFSET 8
+#define ACPI_NFIT_SOCKET_ID_OFFSET 12
+#define ACPI_NFIT_NODE_ID_OFFSET 16
+
+/* Macro to construct a NFIT/NVDIMM device handle */
+
+#define ACPI_NFIT_BUILD_DEVICE_HANDLE(dimm, channel, memory, socket, node) \
+ ((dimm) | \
+ ((channel) << ACPI_NFIT_CHANNEL_NUMBER_OFFSET) | \
+ ((memory) << ACPI_NFIT_MEMORY_ID_OFFSET) | \
+ ((socket) << ACPI_NFIT_SOCKET_ID_OFFSET) | \
+ ((node) << ACPI_NFIT_NODE_ID_OFFSET))
+
+/* Macros to extract individual fields from a NFIT/NVDIMM device handle */
+
+#define ACPI_NFIT_GET_DIMM_NUMBER(handle) \
+ ((handle) & ACPI_NFIT_DIMM_NUMBER_MASK)
+
+#define ACPI_NFIT_GET_CHANNEL_NUMBER(handle) \
+ (((handle) & ACPI_NFIT_CHANNEL_NUMBER_MASK) >> ACPI_NFIT_CHANNEL_NUMBER_OFFSET)
+
+#define ACPI_NFIT_GET_MEMORY_ID(handle) \
+ (((handle) & ACPI_NFIT_MEMORY_ID_MASK) >> ACPI_NFIT_MEMORY_ID_OFFSET)
+
+#define ACPI_NFIT_GET_SOCKET_ID(handle) \
+ (((handle) & ACPI_NFIT_SOCKET_ID_MASK) >> ACPI_NFIT_SOCKET_ID_OFFSET)
+
+#define ACPI_NFIT_GET_NODE_ID(handle) \
+ (((handle) & ACPI_NFIT_NODE_ID_MASK) >> ACPI_NFIT_NODE_ID_OFFSET)
+
/*******************************************************************************
*
- * PDTT - Processor Debug Trigger Table (ACPI 6.2)
+ * PDTT - Platform Debug Trigger Table (ACPI 6.2)
* Version 0
*
******************************************************************************/
@@ -1301,14 +1366,14 @@ struct acpi_table_pdtt {
* starting at array_offset.
*/
struct acpi_pdtt_channel {
- u16 sub_channel_id;
+ u8 subchannel_id;
+ u8 flags;
};
-/* Mask and Flags for above */
+/* Flags for above */
-#define ACPI_PDTT_SUBCHANNEL_ID_MASK 0x00FF
-#define ACPI_PDTT_RUNTIME_TRIGGER (1<<8)
-#define ACPI_PPTT_WAIT_COMPLETION (1<<9)
+#define ACPI_PDTT_RUNTIME_TRIGGER (1)
+#define ACPI_PDTT_WAIT_COMPLETION (1<<1)
/*******************************************************************************
*
@@ -1376,6 +1441,20 @@ struct acpi_pptt_cache {
#define ACPI_PPTT_MASK_CACHE_TYPE (0x0C) /* Cache type */
#define ACPI_PPTT_MASK_WRITE_POLICY (0x10) /* Write policy */
+/* Attributes describing cache */
+#define ACPI_PPTT_CACHE_READ_ALLOCATE (0x0) /* Cache line is allocated on read */
+#define ACPI_PPTT_CACHE_WRITE_ALLOCATE (0x01) /* Cache line is allocated on write */
+#define ACPI_PPTT_CACHE_RW_ALLOCATE (0x02) /* Cache line is allocated on read and write */
+#define ACPI_PPTT_CACHE_RW_ALLOCATE_ALT (0x03) /* Alternate representation of above */
+
+#define ACPI_PPTT_CACHE_TYPE_DATA (0x0) /* Data cache */
+#define ACPI_PPTT_CACHE_TYPE_INSTR (1<<2) /* Instruction cache */
+#define ACPI_PPTT_CACHE_TYPE_UNIFIED (2<<2) /* Unified I & D cache */
+#define ACPI_PPTT_CACHE_TYPE_UNIFIED_ALT (3<<2) /* Alternate representation of above */
+
+#define ACPI_PPTT_CACHE_POLICY_WB (0x0) /* Cache is write back */
+#define ACPI_PPTT_CACHE_POLICY_WT (1<<4) /* Cache is write through */
+
/* 2: ID Structure */
struct acpi_pptt_id {
@@ -1405,6 +1484,68 @@ struct acpi_table_sbst {
/*******************************************************************************
*
+ * SDEV - Secure Devices Table (ACPI 6.2)
+ * Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_sdev {
+ struct acpi_table_header header; /* Common ACPI table header */
+};
+
+struct acpi_sdev_header {
+ u8 type;
+ u8 flags;
+ u16 length;
+};
+
+/* Values for subtable type above */
+
+enum acpi_sdev_type {
+ ACPI_SDEV_TYPE_NAMESPACE_DEVICE = 0,
+ ACPI_SDEV_TYPE_PCIE_ENDPOINT_DEVICE = 1,
+ ACPI_SDEV_TYPE_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/* Values for flags above */
+
+#define ACPI_SDEV_HANDOFF_TO_UNSECURE_OS (1)
+
+/*
+ * SDEV subtables
+ */
+
+/* 0: Namespace Device Based Secure Device Structure */
+
+struct acpi_sdev_namespace {
+ struct acpi_sdev_header header;
+ u16 device_id_offset;
+ u16 device_id_length;
+ u16 vendor_data_offset;
+ u16 vendor_data_length;
+};
+
+/* 1: PCIe Endpoint Device Based Device Structure */
+
+struct acpi_sdev_pcie {
+ struct acpi_sdev_header header;
+ u16 segment;
+ u16 start_bus;
+ u16 path_offset;
+ u16 path_length;
+ u16 vendor_data_offset;
+ u16 vendor_data_length;
+};
+
+/* 1a: PCIe Endpoint path entry */
+
+struct acpi_sdev_pcie_path {
+ u8 device;
+ u8 function;
+};
+
+/*******************************************************************************
+ *
* SLIT - System Locality Distance Information Table
* Version 1
*
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index 686b6f8c09dc..0d60d5df14f8 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -810,6 +810,7 @@ struct acpi_iort_smmu_v3 {
u8 pxm;
u8 reserved1;
u16 reserved2;
+ u32 id_mapping_index;
};
/* Values for Model field above */
@@ -1246,6 +1247,8 @@ enum acpi_spmi_interface_types {
* TCPA - Trusted Computing Platform Alliance table
* Version 2
*
+ * TCG Hardware Interface Table for TPM 1.2 Clients and Servers
+ *
* Conforms to "TCG ACPI Specification, Family 1.2 and 2.0",
* Version 1.2, Revision 8
* February 27, 2017
@@ -1310,6 +1313,8 @@ struct acpi_table_tcpa_server {
* TPM2 - Trusted Platform Module (TPM) 2.0 Hardware Interface Table
* Version 4
*
+ * TCG Hardware Interface Table for TPM 2.0 Clients and Servers
+ *
* Conforms to "TCG ACPI Specification, Family 1.2 and 2.0",
* Version 1.2, Revision 8
* February 27, 2017
@@ -1329,15 +1334,23 @@ struct acpi_table_tpm2 {
/* Values for start_method above */
#define ACPI_TPM2_NOT_ALLOWED 0
+#define ACPI_TPM2_RESERVED1 1
#define ACPI_TPM2_START_METHOD 2
+#define ACPI_TPM2_RESERVED3 3
+#define ACPI_TPM2_RESERVED4 4
+#define ACPI_TPM2_RESERVED5 5
#define ACPI_TPM2_MEMORY_MAPPED 6
#define ACPI_TPM2_COMMAND_BUFFER 7
#define ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD 8
+#define ACPI_TPM2_RESERVED9 9
+#define ACPI_TPM2_RESERVED10 10
#define ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC 11 /* V1.2 Rev 8 */
+#define ACPI_TPM2_RESERVED 12
-/* Trailer appears after any start_method subtables */
+/* Optional trailer appears after any start_method subtables */
struct acpi_tpm2_trailer {
+ u8 method_parameters[12];
u32 minimum_log_length; /* Minimum length for the event log area */
u64 log_address; /* Address of the event log area */
};
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 4f077edb9b81..31f1be74dd16 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -468,6 +468,8 @@ typedef void *acpi_handle; /* Actually a ptr to a NS Node */
#define ACPI_NSEC_PER_MSEC 1000000L
#define ACPI_NSEC_PER_SEC 1000000000L
+#define ACPI_TIME_AFTER(a, b) ((s64)((b) - (a)) < 0)
+
/* Owner IDs are used to track namespace nodes for selective deletion */
typedef u8 acpi_owner_id;
@@ -1299,6 +1301,8 @@ typedef enum {
#define ACPI_OSI_WIN_7 0x0B
#define ACPI_OSI_WIN_8 0x0C
#define ACPI_OSI_WIN_10 0x0D
+#define ACPI_OSI_WIN_10_RS1 0x0E
+#define ACPI_OSI_WIN_10_RS2 0x0F
/* Definitions of getopt */
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h
new file mode 100644
index 000000000000..880a292d792f
--- /dev/null
+++ b/include/asm-generic/dma-mapping.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_DMA_MAPPING_H
+#define _ASM_GENERIC_DMA_MAPPING_H
+
+static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
+{
+ return &dma_direct_ops;
+}
+
+#endif /* _ASM_GENERIC_DMA_MAPPING_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index ebe544e048cd..1ab0e520d6fc 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -277,7 +277,11 @@
#define INIT_TASK_DATA(align) \
. = ALIGN(align); \
VMLINUX_SYMBOL(__start_init_task) = .; \
+ VMLINUX_SYMBOL(init_thread_union) = .; \
+ VMLINUX_SYMBOL(init_stack) = .; \
*(.data..init_task) \
+ *(.data..init_thread_info) \
+ . = VMLINUX_SYMBOL(__start_init_task) + THREAD_SIZE; \
VMLINUX_SYMBOL(__end_init_task) = .;
/*
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 03b97629442c..1e26f790b03f 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -327,7 +327,12 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
*/
static inline int crypto_aead_encrypt(struct aead_request *req)
{
- return crypto_aead_alg(crypto_aead_reqtfm(req))->encrypt(req);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+
+ if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
+ return crypto_aead_alg(aead)->encrypt(req);
}
/**
@@ -356,6 +361,9 @@ static inline int crypto_aead_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
if (req->cryptlen < crypto_aead_authsize(aead))
return -EINVAL;
diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h
index caaa470389e0..b83d66073db0 100644
--- a/include/crypto/chacha20.h
+++ b/include/crypto/chacha20.h
@@ -13,12 +13,13 @@
#define CHACHA20_IV_SIZE 16
#define CHACHA20_KEY_SIZE 32
#define CHACHA20_BLOCK_SIZE 64
+#define CHACHA20_BLOCK_WORDS (CHACHA20_BLOCK_SIZE / sizeof(u32))
struct chacha20_ctx {
u32 key[8];
};
-void chacha20_block(u32 *state, void *stream);
+void chacha20_block(u32 *state, u32 *stream);
void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv);
int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keysize);
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 0ed31fd80242..2d1849dffb80 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -71,12 +71,11 @@ struct ahash_request {
/**
* struct ahash_alg - asynchronous message digest definition
- * @init: Initialize the transformation context. Intended only to initialize the
+ * @init: **[mandatory]** Initialize the transformation context. Intended only to initialize the
* state of the HASH transformation at the beginning. This shall fill in
* the internal structures used during the entire duration of the whole
* transformation. No data processing happens at this point.
- * Note: mandatory.
- * @update: Push a chunk of data into the driver for transformation. This
+ * @update: **[mandatory]** Push a chunk of data into the driver for transformation. This
* function actually pushes blocks of data from upper layers into the
* driver, which then passes those to the hardware as seen fit. This
* function must not finalize the HASH transformation by calculating the
@@ -85,20 +84,17 @@ struct ahash_request {
* context, as this function may be called in parallel with the same
* transformation object. Data processing can happen synchronously
* [SHASH] or asynchronously [AHASH] at this point.
- * Note: mandatory.
- * @final: Retrieve result from the driver. This function finalizes the
+ * @final: **[mandatory]** Retrieve result from the driver. This function finalizes the
* transformation and retrieves the resulting hash from the driver and
* pushes it back to upper layers. No data processing happens at this
* point unless hardware requires it to finish the transformation
* (then the data buffered by the device driver is processed).
- * Note: mandatory.
- * @finup: Combination of @update and @final. This function is effectively a
+ * @finup: **[optional]** Combination of @update and @final. This function is effectively a
* combination of @update and @final calls issued in sequence. As some
* hardware cannot do @update and @final separately, this callback was
* added to allow such hardware to be used at least by IPsec. Data
* processing can happen synchronously [SHASH] or asynchronously [AHASH]
* at this point.
- * Note: optional.
* @digest: Combination of @init and @update and @final. This function
* effectively behaves as the entire chain of operations, @init,
* @update and @final issued in sequence. Just like @finup, this was
@@ -210,7 +206,6 @@ struct crypto_ahash {
unsigned int keylen);
unsigned int reqsize;
- bool has_setkey;
struct crypto_tfm base;
};
@@ -410,11 +405,6 @@ static inline void *ahash_request_ctx(struct ahash_request *req)
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
-static inline bool crypto_ahash_has_setkey(struct crypto_ahash *tfm)
-{
- return tfm->has_setkey;
-}
-
/**
* crypto_ahash_finup() - update and finalize message digest
* @req: reference to the ahash_request handle that holds all information
@@ -487,7 +477,12 @@ static inline int crypto_ahash_export(struct ahash_request *req, void *out)
*/
static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
{
- return crypto_ahash_reqtfm(req)->import(req, in);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
+ return tfm->import(req, in);
}
/**
@@ -503,7 +498,12 @@ static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
*/
static inline int crypto_ahash_init(struct ahash_request *req)
{
- return crypto_ahash_reqtfm(req)->init(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
+ return tfm->init(req);
}
/**
@@ -855,7 +855,12 @@ static inline int crypto_shash_export(struct shash_desc *desc, void *out)
*/
static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
{
- return crypto_shash_alg(desc->tfm)->import(desc, in);
+ struct crypto_shash *tfm = desc->tfm;
+
+ if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
+ return crypto_shash_alg(tfm)->import(desc, in);
}
/**
@@ -871,7 +876,12 @@ static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
*/
static inline int crypto_shash_init(struct shash_desc *desc)
{
- return crypto_shash_alg(desc->tfm)->init(desc);
+ struct crypto_shash *tfm = desc->tfm;
+
+ if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
+ return crypto_shash_alg(tfm)->init(desc);
}
/**
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index f38227a78eae..482461d8931d 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -245,7 +245,7 @@ ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
int offset, size_t size, int flags);
void af_alg_free_resources(struct af_alg_async_req *areq);
void af_alg_async_cb(struct crypto_async_request *_req, int err);
-unsigned int af_alg_poll(struct file *file, struct socket *sock,
+__poll_t af_alg_poll(struct file *file, struct socket *sock,
poll_table *wait);
struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
unsigned int areqlen);
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index c2bae8da642c..27040a46d50a 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -90,6 +90,8 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
return alg->setkey != shash_no_setkey;
}
+bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg);
+
int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
struct hash_alg_common *alg,
struct crypto_instance *inst);
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
index ccad9b2c9bd6..0f6ddac1acfc 100644
--- a/include/crypto/internal/scompress.h
+++ b/include/crypto/internal/scompress.h
@@ -28,17 +28,6 @@ struct crypto_scomp {
* @free_ctx: Function frees context allocated with alloc_ctx
* @compress: Function performs a compress operation
* @decompress: Function performs a de-compress operation
- * @init: Initialize the cryptographic transformation object.
- * This function is used to initialize the cryptographic
- * transformation object. This function is called only once at
- * the instantiation time, right after the transformation context
- * was allocated. In case the cryptographic hardware has some
- * special requirements which need to be handled by software, this
- * function shall check for the precise requirement of the
- * transformation and put any software fallbacks in place.
- * @exit: Deinitialize the cryptographic transformation object. This is a
- * counterpart to @init, used to remove various changes set in
- * @init.
* @base: Common crypto API algorithm data structure
*/
struct scomp_alg {
diff --git a/include/crypto/null.h b/include/crypto/null.h
index 5757c0a4b321..15aeef6e30ef 100644
--- a/include/crypto/null.h
+++ b/include/crypto/null.h
@@ -12,14 +12,4 @@
struct crypto_skcipher *crypto_get_default_null_skcipher(void);
void crypto_put_default_null_skcipher(void);
-static inline struct crypto_skcipher *crypto_get_default_null_skcipher2(void)
-{
- return crypto_get_default_null_skcipher();
-}
-
-static inline void crypto_put_default_null_skcipher2(void)
-{
- crypto_put_default_null_skcipher();
-}
-
#endif
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
index c65567d01e8e..f718a19da82f 100644
--- a/include/crypto/poly1305.h
+++ b/include/crypto/poly1305.h
@@ -31,8 +31,6 @@ struct poly1305_desc_ctx {
};
int crypto_poly1305_init(struct shash_desc *desc);
-int crypto_poly1305_setkey(struct crypto_shash *tfm,
- const u8 *key, unsigned int keylen);
unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
const u8 *src, unsigned int srclen);
int crypto_poly1305_update(struct shash_desc *desc,
diff --git a/include/crypto/salsa20.h b/include/crypto/salsa20.h
new file mode 100644
index 000000000000..19ed48aefc86
--- /dev/null
+++ b/include/crypto/salsa20.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common values for the Salsa20 algorithm
+ */
+
+#ifndef _CRYPTO_SALSA20_H
+#define _CRYPTO_SALSA20_H
+
+#include <linux/types.h>
+
+#define SALSA20_IV_SIZE 8
+#define SALSA20_MIN_KEY_SIZE 16
+#define SALSA20_MAX_KEY_SIZE 32
+#define SALSA20_BLOCK_SIZE 64
+
+struct crypto_skcipher;
+
+struct salsa20_ctx {
+ u32 initial_state[16];
+};
+
+void crypto_salsa20_init(u32 *state, const struct salsa20_ctx *ctx,
+ const u8 *iv);
+int crypto_salsa20_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keysize);
+
+#endif /* _CRYPTO_SALSA20_H */
diff --git a/include/crypto/sha3.h b/include/crypto/sha3.h
index b9d9bd553b48..080f60c2e6b1 100644
--- a/include/crypto/sha3.h
+++ b/include/crypto/sha3.h
@@ -19,7 +19,6 @@
struct sha3_state {
u64 st[25];
- unsigned int md_len;
unsigned int rsiz;
unsigned int rsizw;
@@ -27,4 +26,9 @@ struct sha3_state {
u8 buf[SHA3_224_BLOCK_SIZE];
};
+int crypto_sha3_init(struct shash_desc *desc);
+int crypto_sha3_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len);
+int crypto_sha3_final(struct shash_desc *desc, u8 *out);
+
#endif
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 562001cb412b..2f327f090c3e 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -401,11 +401,6 @@ static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
return tfm->setkey(tfm, key, keylen);
}
-static inline bool crypto_skcipher_has_setkey(struct crypto_skcipher *tfm)
-{
- return tfm->keysize;
-}
-
static inline unsigned int crypto_skcipher_default_keysize(
struct crypto_skcipher *tfm)
{
@@ -442,6 +437,9 @@ static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
return tfm->encrypt(req);
}
@@ -460,6 +458,9 @@ static inline int crypto_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
return tfm->decrypt(req);
}
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 0e0c868451a5..5176c3797680 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -364,7 +364,7 @@ int drm_open(struct inode *inode, struct file *filp);
ssize_t drm_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset);
int drm_release(struct inode *inode, struct file *filp);
-unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
+__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait);
int drm_event_reserve_init_locked(struct drm_device *dev,
struct drm_file *file_priv,
struct drm_pending_event *p,
diff --git a/include/dt-bindings/gpio/gpio.h b/include/dt-bindings/gpio/gpio.h
index dd549ff04295..2cc10ae4bbb7 100644
--- a/include/dt-bindings/gpio/gpio.h
+++ b/include/dt-bindings/gpio/gpio.h
@@ -29,8 +29,8 @@
#define GPIO_OPEN_DRAIN (GPIO_SINGLE_ENDED | GPIO_LINE_OPEN_DRAIN)
#define GPIO_OPEN_SOURCE (GPIO_SINGLE_ENDED | GPIO_LINE_OPEN_SOURCE)
-/* Bit 3 express GPIO suspend/resume persistence */
-#define GPIO_SLEEP_MAINTAIN_VALUE 0
-#define GPIO_SLEEP_MAY_LOSE_VALUE 8
+/* Bit 3 express GPIO suspend/resume and reset persistence */
+#define GPIO_PERSISTENT 0
+#define GPIO_TRANSITORY 8
#endif
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index f05b9b6cd43f..e6d41b65d396 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -453,6 +453,7 @@ void __init acpi_no_s4_hw_signature(void);
void __init acpi_old_suspend_ordering(void);
void __init acpi_nvs_nosave(void);
void __init acpi_nvs_nosave_s3(void);
+void __init acpi_sleep_no_blacklist(void);
#endif /* CONFIG_PM_SLEEP */
struct acpi_osc_context {
@@ -586,6 +587,7 @@ extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
const struct device *dev);
+void *acpi_get_match_data(const struct device *dev);
extern bool acpi_driver_match_device(struct device *dev,
const struct device_driver *drv);
int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *);
@@ -643,6 +645,12 @@ static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
return false;
}
+static inline const char *
+acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv)
+{
+ return NULL;
+}
+
static inline bool is_acpi_node(struct fwnode_handle *fwnode)
{
return false;
@@ -758,6 +766,11 @@ static inline const struct acpi_device_id *acpi_match_device(
return NULL;
}
+static inline void *acpi_get_match_data(const struct device *dev)
+{
+ return NULL;
+}
+
static inline bool acpi_driver_match_device(struct device *dev,
const struct device_driver *drv)
{
@@ -981,6 +994,11 @@ struct acpi_gpio_mapping {
const char *name;
const struct acpi_gpio_params *data;
unsigned int size;
+
+/* Ignore IoRestriction field */
+#define ACPI_GPIO_QUIRK_NO_IO_RESTRICTION BIT(0)
+
+ unsigned int quirks;
};
#if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB)
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index 304511267c82..2b709416de05 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -27,7 +27,7 @@ void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
DECLARE_PER_CPU(unsigned long, freq_scale);
static inline
-unsigned long topology_get_freq_scale(struct sched_domain *sd, int cpu)
+unsigned long topology_get_freq_scale(int cpu)
{
return per_cpu(freq_scale, cpu);
}
diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h
new file mode 100644
index 000000000000..942afbd544b7
--- /dev/null
+++ b/include/linux/arm_sdei.h
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2017 Arm Ltd.
+#ifndef __LINUX_ARM_SDEI_H
+#define __LINUX_ARM_SDEI_H
+
+#include <uapi/linux/arm_sdei.h>
+
+enum sdei_conduit_types {
+ CONDUIT_INVALID = 0,
+ CONDUIT_SMC,
+ CONDUIT_HVC,
+};
+
+#include <asm/sdei.h>
+
+/* Arch code should override this to set the entry point from firmware... */
+#ifndef sdei_arch_get_entry_point
+#define sdei_arch_get_entry_point(conduit) (0)
+#endif
+
+/*
+ * When an event occurs sdei_event_handler() will call a user-provided callback
+ * like this in NMI context on the CPU that received the event.
+ */
+typedef int (sdei_event_callback)(u32 event, struct pt_regs *regs, void *arg);
+
+/*
+ * Register your callback to claim an event. The event must be described
+ * by firmware.
+ */
+int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg);
+
+/*
+ * Calls to sdei_event_unregister() may return EINPROGRESS. Keep calling
+ * it until it succeeds.
+ */
+int sdei_event_unregister(u32 event_num);
+
+int sdei_event_enable(u32 event_num);
+int sdei_event_disable(u32 event_num);
+
+#ifdef CONFIG_ARM_SDE_INTERFACE
+/* For use by arch code when CPU hotplug notifiers are not appropriate. */
+int sdei_mask_local_cpu(void);
+int sdei_unmask_local_cpu(void);
+#else
+static inline int sdei_mask_local_cpu(void) { return 0; }
+static inline int sdei_unmask_local_cpu(void) { return 0; }
+#endif /* CONFIG_ARM_SDE_INTERFACE */
+
+
+/*
+ * This struct represents an event that has been registered. The driver
+ * maintains a list of all events, and which ones are registered. (Private
+ * events have one entry in the list, but are registered on each CPU).
+ * A pointer to this struct is passed to firmware, and back to the event
+ * handler. The event handler can then use this to invoke the registered
+ * callback, without having to walk the list.
+ *
+ * For CPU private events, this structure is per-cpu.
+ */
+struct sdei_registered_event {
+ /* For use by arch code: */
+ struct pt_regs interrupted_regs;
+
+ sdei_event_callback *callback;
+ void *callback_arg;
+ u32 event_num;
+ u8 priority;
+};
+
+/* The arch code entry point should then call this when an event arrives. */
+int notrace sdei_event_handler(struct pt_regs *regs,
+ struct sdei_registered_event *arg);
+
+/* arch code may use this to retrieve the extra registers. */
+int sdei_api_event_context(u32 query, u64 *result);
+
+#endif /* __LINUX_ARM_SDEI_H */
diff --git a/include/linux/ata.h b/include/linux/ata.h
index c7a353825450..40d150ad7e07 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -448,6 +448,8 @@ enum {
ATA_SET_MAX_LOCK = 0x02,
ATA_SET_MAX_UNLOCK = 0x03,
ATA_SET_MAX_FREEZE_LOCK = 0x04,
+ ATA_SET_MAX_PASSWD_DMA = 0x05,
+ ATA_SET_MAX_UNLOCK_DMA = 0x06,
/* feature values for DEVICE CONFIGURATION OVERLAY */
ATA_DCO_RESTORE = 0xC0,
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index e54e7e0033eb..3e4ce54d84ab 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -332,7 +332,7 @@ static inline bool inode_to_wb_is_valid(struct inode *inode)
* holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
* associated wb's list_lock.
*/
-static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
+static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
{
#ifdef CONFIG_LOCKDEP
WARN_ON_ONCE(debug_locks &&
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 23d29b39f71e..d0eb659fa733 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -300,6 +300,29 @@ static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
bv->bv_len = iter.bi_bvec_done;
}
+static inline unsigned bio_pages_all(struct bio *bio)
+{
+ WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
+ return bio->bi_vcnt;
+}
+
+static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
+{
+ WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
+ return bio->bi_io_vec;
+}
+
+static inline struct page *bio_first_page_all(struct bio *bio)
+{
+ return bio_first_bvec_all(bio)->bv_page;
+}
+
+static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
+{
+ WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
+ return &bio->bi_io_vec[bio->bi_vcnt - 1];
+}
+
enum bip_flags {
BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
@@ -477,7 +500,6 @@ static inline void bio_flush_dcache_pages(struct bio *bi)
#endif
extern void bio_copy_data(struct bio *dst, struct bio *src);
-extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
extern void bio_free_pages(struct bio *bio);
extern struct bio *bio_copy_user_iov(struct request_queue *,
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index 1030651f8309..cf2588d81148 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -16,6 +16,7 @@
#define _LINUX_BITFIELD_H
#include <linux/build_bug.h>
+#include <asm/byteorder.h>
/*
* Bitfield access macros
@@ -103,4 +104,49 @@
(typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
})
+extern void __compiletime_warning("value doesn't fit into mask")
+__field_overflow(void);
+extern void __compiletime_error("bad bitfield mask")
+__bad_mask(void);
+static __always_inline u64 field_multiplier(u64 field)
+{
+ if ((field | (field - 1)) & ((field | (field - 1)) + 1))
+ __bad_mask();
+ return field & -field;
+}
+static __always_inline u64 field_mask(u64 field)
+{
+ return field / field_multiplier(field);
+}
+#define ____MAKE_OP(type,base,to,from) \
+static __always_inline __##type type##_encode_bits(base v, base field) \
+{ \
+ if (__builtin_constant_p(v) && (v & ~field_multiplier(field))) \
+ __field_overflow(); \
+ return to((v & field_mask(field)) * field_multiplier(field)); \
+} \
+static __always_inline __##type type##_replace_bits(__##type old, \
+ base val, base field) \
+{ \
+ return (old & ~to(field)) | type##_encode_bits(val, field); \
+} \
+static __always_inline void type##p_replace_bits(__##type *p, \
+ base val, base field) \
+{ \
+ *p = (*p & ~to(field)) | type##_encode_bits(val, field); \
+} \
+static __always_inline base type##_get_bits(__##type v, base field) \
+{ \
+ return (from(v) & field)/field_multiplier(field); \
+}
+#define __MAKE_OP(size) \
+ ____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \
+ ____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \
+ ____MAKE_OP(u##size,u##size,,)
+__MAKE_OP(16)
+__MAKE_OP(32)
+__MAKE_OP(64)
+#undef __MAKE_OP
+#undef ____MAKE_OP
+
#endif
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index e9825ff57b15..69bea82ebeb1 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -660,12 +660,14 @@ static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
struct blkg_rwstat *from)
{
- struct blkg_rwstat v = blkg_rwstat_read(from);
+ u64 sum[BLKG_RWSTAT_NR];
int i;
for (i = 0; i < BLKG_RWSTAT_NR; i++)
- atomic64_add(atomic64_read(&v.aux_cnt[i]) +
- atomic64_read(&from->aux_cnt[i]),
+ sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]);
+
+ for (i = 0; i < BLKG_RWSTAT_NR; i++)
+ atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]),
&to->aux_cnt[i]);
}
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 95c9a5c862e2..8efcf49796a3 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -51,6 +51,7 @@ struct blk_mq_hw_ctx {
unsigned int queue_num;
atomic_t nr_active;
+ unsigned int nr_expired;
struct hlist_node cpuhp_dead;
struct kobject kobj;
@@ -65,7 +66,7 @@ struct blk_mq_hw_ctx {
#endif
/* Must be the last member - see also blk_mq_hw_ctx_size(). */
- struct srcu_struct queue_rq_srcu[0];
+ struct srcu_struct srcu[0];
};
struct blk_mq_tag_set {
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 9e7d8bd776d2..c5d3db0d83f8 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -39,6 +39,34 @@ typedef u8 __bitwise blk_status_t;
#define BLK_STS_AGAIN ((__force blk_status_t)12)
+/**
+ * blk_path_error - returns true if error may be path related
+ * @error: status the request was completed with
+ *
+ * Description:
+ * This classifies block error status into non-retryable errors and ones
+ * that may be successful if retried on a failover path.
+ *
+ * Return:
+ * %false - retrying failover path will not help
+ * %true - may succeed if retried
+ */
+static inline bool blk_path_error(blk_status_t error)
+{
+ switch (error) {
+ case BLK_STS_NOTSUPP:
+ case BLK_STS_NOSPC:
+ case BLK_STS_TARGET:
+ case BLK_STS_NEXUS:
+ case BLK_STS_MEDIUM:
+ case BLK_STS_PROTECTION:
+ return false;
+ }
+
+ /* Anything else could be a path failure, so should be retried */
+ return true;
+}
+
struct blk_issue_stat {
u64 stat;
};
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0ce8a372d506..4f3df807cf8f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -27,6 +27,8 @@
#include <linux/percpu-refcount.h>
#include <linux/scatterlist.h>
#include <linux/blkzoned.h>
+#include <linux/seqlock.h>
+#include <linux/u64_stats_sync.h>
struct module;
struct scsi_ioctl_command;
@@ -121,6 +123,12 @@ typedef __u32 __bitwise req_flags_t;
/* Look at ->special_vec for the actual data payload instead of the
bio chain. */
#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
+/* The per-zone write lock is held for this request */
+#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
+/* timeout is expired */
+#define RQF_MQ_TIMEOUT_EXPIRED ((__force req_flags_t)(1 << 20))
+/* already slept for hybrid poll */
+#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 21))
/* flags that prevent us from merging requests: */
#define RQF_NOMERGE_FLAGS \
@@ -133,12 +141,6 @@ typedef __u32 __bitwise req_flags_t;
* especially blk_mq_rq_ctx_init() to take care of the added fields.
*/
struct request {
- struct list_head queuelist;
- union {
- struct __call_single_data csd;
- u64 fifo_time;
- };
-
struct request_queue *q;
struct blk_mq_ctx *mq_ctx;
@@ -148,8 +150,6 @@ struct request {
int internal_tag;
- unsigned long atomic_flags;
-
/* the following two fields are internal, NEVER access directly */
unsigned int __data_len; /* total data len */
int tag;
@@ -158,6 +158,8 @@ struct request {
struct bio *bio;
struct bio *biotail;
+ struct list_head queuelist;
+
/*
* The hash is used inside the scheduler, and killed once the
* request reaches the dispatch list. The ipi_list is only used
@@ -205,19 +207,16 @@ struct request {
struct hd_struct *part;
unsigned long start_time;
struct blk_issue_stat issue_stat;
-#ifdef CONFIG_BLK_CGROUP
- struct request_list *rl; /* rl this rq is alloced from */
- unsigned long long start_time_ns;
- unsigned long long io_start_time_ns; /* when passed to hardware */
-#endif
/* Number of scatter-gather DMA addr+len pairs after
* physical address coalescing is performed.
*/
unsigned short nr_phys_segments;
+
#if defined(CONFIG_BLK_DEV_INTEGRITY)
unsigned short nr_integrity_segments;
#endif
+ unsigned short write_hint;
unsigned short ioprio;
unsigned int timeout;
@@ -226,11 +225,37 @@ struct request {
unsigned int extra_len; /* length of alignment and padding */
- unsigned short write_hint;
+ /*
+ * On blk-mq, the lower bits of ->gstate (generation number and
+ * state) carry the MQ_RQ_* state value and the upper bits the
+ * generation number which is monotonically incremented and used to
+ * distinguish the reuse instances.
+ *
+ * ->gstate_seq allows updates to ->gstate and other fields
+ * (currently ->deadline) during request start to be read
+ * atomically from the timeout path, so that it can operate on a
+ * coherent set of information.
+ */
+ seqcount_t gstate_seq;
+ u64 gstate;
+
+ /*
+ * ->aborted_gstate is used by the timeout to claim a specific
+ * recycle instance of this request. See blk_mq_timeout_work().
+ */
+ struct u64_stats_sync aborted_gstate_sync;
+ u64 aborted_gstate;
+
+ /* access through blk_rq_set_deadline, blk_rq_deadline */
+ unsigned long __deadline;
- unsigned long deadline;
struct list_head timeout_list;
+ union {
+ struct __call_single_data csd;
+ u64 fifo_time;
+ };
+
/*
* completion callback.
*/
@@ -239,6 +264,12 @@ struct request {
/* for bidi */
struct request *next_rq;
+
+#ifdef CONFIG_BLK_CGROUP
+ struct request_list *rl; /* rl this rq is alloced from */
+ unsigned long long start_time_ns;
+ unsigned long long io_start_time_ns; /* when passed to hardware */
+#endif
};
static inline bool blk_op_is_scsi(unsigned int op)
@@ -564,6 +595,22 @@ struct request_queue {
struct queue_limits limits;
/*
+ * Zoned block device information for request dispatch control.
+ * nr_zones is the total number of zones of the device. This is always
+ * 0 for regular block devices. seq_zones_bitmap is a bitmap of nr_zones
+ * bits which indicates if a zone is conventional (bit clear) or
+ * sequential (bit set). seq_zones_wlock is a bitmap of nr_zones
+ * bits which indicates if a zone is write locked, that is, if a write
+ * request targeting the zone was dispatched. All three fields are
+ * initialized by the low level device driver (e.g. scsi/sd.c).
+ * Stacking drivers (device mappers) may or may not initialize
+ * these fields.
+ */
+ unsigned int nr_zones;
+ unsigned long *seq_zones_bitmap;
+ unsigned long *seq_zones_wlock;
+
+ /*
* sg stuff
*/
unsigned int sg_timeout;
@@ -807,6 +854,27 @@ static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
}
+static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
+{
+ return q->nr_zones;
+}
+
+static inline unsigned int blk_queue_zone_no(struct request_queue *q,
+ sector_t sector)
+{
+ if (!blk_queue_is_zoned(q))
+ return 0;
+ return sector >> ilog2(q->limits.chunk_sectors);
+}
+
+static inline bool blk_queue_zone_is_seq(struct request_queue *q,
+ sector_t sector)
+{
+ if (!blk_queue_is_zoned(q) || !q->seq_zones_bitmap)
+ return false;
+ return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap);
+}
+
static inline bool rq_is_sync(struct request *rq)
{
return op_is_sync(rq->cmd_flags);
@@ -1046,6 +1114,16 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
return blk_rq_cur_bytes(rq) >> 9;
}
+static inline unsigned int blk_rq_zone_no(struct request *rq)
+{
+ return blk_queue_zone_no(rq->q, blk_rq_pos(rq));
+}
+
+static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
+{
+ return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq));
+}
+
/*
* Some commands like WRITE SAME have a payload or data transfer size which
* is different from the size of the request. Any driver that supports such
@@ -1595,7 +1673,15 @@ static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
if (q)
return blk_queue_zone_sectors(q);
+ return 0;
+}
+
+static inline unsigned int bdev_nr_zones(struct block_device *bdev)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+ if (q)
+ return blk_queue_nr_zones(q);
return 0;
}
@@ -1731,8 +1817,6 @@ static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
int kblockd_schedule_work(struct work_struct *work);
int kblockd_schedule_work_on(int cpu, struct work_struct *work);
-int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
-int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
#ifdef CONFIG_BLK_CGROUP
@@ -1971,6 +2055,60 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
extern int bdev_read_page(struct block_device *, sector_t, struct page *);
extern int bdev_write_page(struct block_device *, sector_t, struct page *,
struct writeback_control *);
+
+#ifdef CONFIG_BLK_DEV_ZONED
+bool blk_req_needs_zone_write_lock(struct request *rq);
+void __blk_req_zone_write_lock(struct request *rq);
+void __blk_req_zone_write_unlock(struct request *rq);
+
+static inline void blk_req_zone_write_lock(struct request *rq)
+{
+ if (blk_req_needs_zone_write_lock(rq))
+ __blk_req_zone_write_lock(rq);
+}
+
+static inline void blk_req_zone_write_unlock(struct request *rq)
+{
+ if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
+ __blk_req_zone_write_unlock(rq);
+}
+
+static inline bool blk_req_zone_is_write_locked(struct request *rq)
+{
+ return rq->q->seq_zones_wlock &&
+ test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock);
+}
+
+static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
+{
+ if (!blk_req_needs_zone_write_lock(rq))
+ return true;
+ return !blk_req_zone_is_write_locked(rq);
+}
+#else
+static inline bool blk_req_needs_zone_write_lock(struct request *rq)
+{
+ return false;
+}
+
+static inline void blk_req_zone_write_lock(struct request *rq)
+{
+}
+
+static inline void blk_req_zone_write_unlock(struct request *rq)
+{
+}
+static inline bool blk_req_zone_is_write_locked(struct request *rq)
+{
+ return false;
+}
+
+static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
+{
+ return true;
+}
+#endif /* CONFIG_BLK_DEV_ZONED */
+
#else /* CONFIG_BLOCK */
struct block_device;
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 8b1bf8d3d4a2..58a82f58e44e 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -151,7 +151,6 @@ void buffer_check_dirty_writeback(struct page *page,
void mark_buffer_dirty(struct buffer_head *bh);
void mark_buffer_write_io_error(struct buffer_head *bh);
-void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
void touch_buffer(struct buffer_head *bh);
void set_bh_page(struct buffer_head *bh,
struct page *page, unsigned long offset);
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index ec8a4d7af6bd..fe7a22dd133b 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -125,4 +125,13 @@ static inline bool bvec_iter_rewind(const struct bio_vec *bv,
((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
+/* for iterating one bio from start to end */
+#define BVEC_ITER_ALL_INIT (struct bvec_iter) \
+{ \
+ .bi_sector = 0, \
+ .bi_size = UINT_MAX, \
+ .bi_idx = 0, \
+ .bi_bvec_done = 0, \
+}
+
#endif /* __LINUX_BVEC_ITER_H */
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 8b7fd8eeccee..9f242b876fde 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -561,7 +561,7 @@ struct cftype {
/*
* Control Group subsystem type.
- * See Documentation/cgroups/cgroups.txt for details
+ * See Documentation/cgroup-v1/cgroups.txt for details
*/
struct cgroup_subsys {
struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 0fc36406f32c..8a9643857c4a 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -157,6 +157,104 @@ struct compat_sigaction {
compat_sigset_t sa_mask __packed;
};
+typedef union compat_sigval {
+ compat_int_t sival_int;
+ compat_uptr_t sival_ptr;
+} compat_sigval_t;
+
+typedef struct compat_siginfo {
+ int si_signo;
+#ifndef __ARCH_HAS_SWAPPED_SIGINFO
+ int si_errno;
+ int si_code;
+#else
+ int si_code;
+ int si_errno;
+#endif
+
+ union {
+ int _pad[128/sizeof(int) - 3];
+
+ /* kill() */
+ struct {
+ compat_pid_t _pid; /* sender's pid */
+ __compat_uid32_t _uid; /* sender's uid */
+ } _kill;
+
+ /* POSIX.1b timers */
+ struct {
+ compat_timer_t _tid; /* timer id */
+ int _overrun; /* overrun count */
+ compat_sigval_t _sigval; /* same as below */
+ } _timer;
+
+ /* POSIX.1b signals */
+ struct {
+ compat_pid_t _pid; /* sender's pid */
+ __compat_uid32_t _uid; /* sender's uid */
+ compat_sigval_t _sigval;
+ } _rt;
+
+ /* SIGCHLD */
+ struct {
+ compat_pid_t _pid; /* which child */
+ __compat_uid32_t _uid; /* sender's uid */
+ int _status; /* exit code */
+ compat_clock_t _utime;
+ compat_clock_t _stime;
+ } _sigchld;
+
+#ifdef CONFIG_X86_X32_ABI
+ /* SIGCHLD (x32 version) */
+ struct {
+ compat_pid_t _pid; /* which child */
+ __compat_uid32_t _uid; /* sender's uid */
+ int _status; /* exit code */
+ compat_s64 _utime;
+ compat_s64 _stime;
+ } _sigchld_x32;
+#endif
+
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */
+ struct {
+ compat_uptr_t _addr; /* faulting insn/memory ref. */
+#ifdef __ARCH_SI_TRAPNO
+ int _trapno; /* TRAP # which caused the signal */
+#endif
+ union {
+ /*
+ * used when si_code=BUS_MCEERR_AR or
+ * used when si_code=BUS_MCEERR_AO
+ */
+ short int _addr_lsb; /* Valid LSB of the reported address. */
+ /* used when si_code=SEGV_BNDERR */
+ struct {
+ short _dummy_bnd;
+ compat_uptr_t _lower;
+ compat_uptr_t _upper;
+ } _addr_bnd;
+ /* used when si_code=SEGV_PKUERR */
+ struct {
+ short _dummy_pkey;
+ u32 _pkey;
+ } _addr_pkey;
+ };
+ } _sigfault;
+
+ /* SIGPOLL */
+ struct {
+ compat_long_t _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+ int _fd;
+ } _sigpoll;
+
+ struct {
+ compat_uptr_t _call_addr; /* calling user insn */
+ int _syscall; /* triggering system call number */
+ unsigned int _arch; /* AUDIT_ARCH_* of syscall */
+ } _sigsys;
+ } _sifields;
+} compat_siginfo_t;
+
/*
* These functions operate on 32- or 64-bit specs depending on
* COMPAT_USE_64BIT_TIME, hence the void user pointer arguments.
@@ -412,7 +510,7 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
unsigned long bitmap_size);
long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
unsigned long bitmap_size);
-int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from);
+int copy_siginfo_from_user32(siginfo_t *to, const struct compat_siginfo __user *from);
int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from);
int get_compat_sigevent(struct sigevent *event,
const struct compat_sigevent __user *u_event);
diff --git a/include/linux/cper.h b/include/linux/cper.h
index 723e952fde0d..d14ef4e77c8a 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -275,6 +275,50 @@ enum {
#define CPER_ARM_INFO_FLAGS_PROPAGATED BIT(2)
#define CPER_ARM_INFO_FLAGS_OVERFLOW BIT(3)
+#define CPER_ARM_CACHE_ERROR 0
+#define CPER_ARM_TLB_ERROR 1
+#define CPER_ARM_BUS_ERROR 2
+#define CPER_ARM_VENDOR_ERROR 3
+#define CPER_ARM_MAX_TYPE CPER_ARM_VENDOR_ERROR
+
+#define CPER_ARM_ERR_VALID_TRANSACTION_TYPE BIT(0)
+#define CPER_ARM_ERR_VALID_OPERATION_TYPE BIT(1)
+#define CPER_ARM_ERR_VALID_LEVEL BIT(2)
+#define CPER_ARM_ERR_VALID_PROC_CONTEXT_CORRUPT BIT(3)
+#define CPER_ARM_ERR_VALID_CORRECTED BIT(4)
+#define CPER_ARM_ERR_VALID_PRECISE_PC BIT(5)
+#define CPER_ARM_ERR_VALID_RESTARTABLE_PC BIT(6)
+#define CPER_ARM_ERR_VALID_PARTICIPATION_TYPE BIT(7)
+#define CPER_ARM_ERR_VALID_TIME_OUT BIT(8)
+#define CPER_ARM_ERR_VALID_ADDRESS_SPACE BIT(9)
+#define CPER_ARM_ERR_VALID_MEM_ATTRIBUTES BIT(10)
+#define CPER_ARM_ERR_VALID_ACCESS_MODE BIT(11)
+
+#define CPER_ARM_ERR_TRANSACTION_SHIFT 16
+#define CPER_ARM_ERR_TRANSACTION_MASK GENMASK(1,0)
+#define CPER_ARM_ERR_OPERATION_SHIFT 18
+#define CPER_ARM_ERR_OPERATION_MASK GENMASK(3,0)
+#define CPER_ARM_ERR_LEVEL_SHIFT 22
+#define CPER_ARM_ERR_LEVEL_MASK GENMASK(2,0)
+#define CPER_ARM_ERR_PC_CORRUPT_SHIFT 25
+#define CPER_ARM_ERR_PC_CORRUPT_MASK GENMASK(0,0)
+#define CPER_ARM_ERR_CORRECTED_SHIFT 26
+#define CPER_ARM_ERR_CORRECTED_MASK GENMASK(0,0)
+#define CPER_ARM_ERR_PRECISE_PC_SHIFT 27
+#define CPER_ARM_ERR_PRECISE_PC_MASK GENMASK(0,0)
+#define CPER_ARM_ERR_RESTARTABLE_PC_SHIFT 28
+#define CPER_ARM_ERR_RESTARTABLE_PC_MASK GENMASK(0,0)
+#define CPER_ARM_ERR_PARTICIPATION_TYPE_SHIFT 29
+#define CPER_ARM_ERR_PARTICIPATION_TYPE_MASK GENMASK(1,0)
+#define CPER_ARM_ERR_TIME_OUT_SHIFT 31
+#define CPER_ARM_ERR_TIME_OUT_MASK GENMASK(0,0)
+#define CPER_ARM_ERR_ADDRESS_SPACE_SHIFT 32
+#define CPER_ARM_ERR_ADDRESS_SPACE_MASK GENMASK(1,0)
+#define CPER_ARM_ERR_MEM_ATTRIBUTES_SHIFT 34
+#define CPER_ARM_ERR_MEM_ATTRIBUTES_MASK GENMASK(8,0)
+#define CPER_ARM_ERR_ACCESS_MODE_SHIFT 43
+#define CPER_ARM_ERR_ACCESS_MODE_MASK GENMASK(0,0)
+
/*
* All tables and structs must be byte-packed to match CPER
* specification, since the tables are provided by the system BIOS
@@ -494,6 +538,8 @@ struct cper_sec_pcie {
/* Reset to default packing */
#pragma pack()
+extern const char * const cper_proc_error_type_strs[4];
+
u64 cper_next_record_id(void);
const char *cper_severity_str(unsigned int);
const char *cper_mem_err_type_str(unsigned int);
@@ -503,5 +549,7 @@ void cper_mem_err_pack(const struct cper_sec_mem_err *,
struct cper_mem_err_compact *);
const char *cper_mem_err_unpack(struct trace_seq *,
struct cper_mem_err_compact *);
+void cper_print_proc_arm(const char *pfx,
+ const struct cper_sec_proc_arm *proc);
#endif
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index d4292ebc5c8b..de0dafb9399d 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -30,9 +30,6 @@
struct cpufreq_policy;
-typedef int (*get_static_t)(cpumask_t *cpumask, int interval,
- unsigned long voltage, u32 *power);
-
#ifdef CONFIG_CPU_THERMAL
/**
* cpufreq_cooling_register - function to create cpufreq cooling device.
@@ -41,43 +38,6 @@ typedef int (*get_static_t)(cpumask_t *cpumask, int interval,
struct thermal_cooling_device *
cpufreq_cooling_register(struct cpufreq_policy *policy);
-struct thermal_cooling_device *
-cpufreq_power_cooling_register(struct cpufreq_policy *policy,
- u32 capacitance, get_static_t plat_static_func);
-
-/**
- * of_cpufreq_cooling_register - create cpufreq cooling device based on DT.
- * @np: a valid struct device_node to the cooling device device tree node.
- * @policy: cpufreq policy.
- */
-#ifdef CONFIG_THERMAL_OF
-struct thermal_cooling_device *
-of_cpufreq_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy);
-
-struct thermal_cooling_device *
-of_cpufreq_power_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy,
- u32 capacitance,
- get_static_t plat_static_func);
-#else
-static inline struct thermal_cooling_device *
-of_cpufreq_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy)
-{
- return ERR_PTR(-ENOSYS);
-}
-
-static inline struct thermal_cooling_device *
-of_cpufreq_power_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy,
- u32 capacitance,
- get_static_t plat_static_func)
-{
- return NULL;
-}
-#endif
-
/**
* cpufreq_cooling_unregister - function to remove cpufreq cooling device.
* @cdev: thermal cooling device pointer.
@@ -90,34 +50,27 @@ cpufreq_cooling_register(struct cpufreq_policy *policy)
{
return ERR_PTR(-ENOSYS);
}
-static inline struct thermal_cooling_device *
-cpufreq_power_cooling_register(struct cpufreq_policy *policy,
- u32 capacitance, get_static_t plat_static_func)
-{
- return NULL;
-}
-static inline struct thermal_cooling_device *
-of_cpufreq_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy)
+static inline
+void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
- return ERR_PTR(-ENOSYS);
+ return;
}
+#endif /* CONFIG_CPU_THERMAL */
+#if defined(CONFIG_THERMAL_OF) && defined(CONFIG_CPU_THERMAL)
+/**
+ * of_cpufreq_cooling_register - create cpufreq cooling device based on DT.
+ * @policy: cpufreq policy.
+ */
+struct thermal_cooling_device *
+of_cpufreq_cooling_register(struct cpufreq_policy *policy);
+#else
static inline struct thermal_cooling_device *
-of_cpufreq_power_cooling_register(struct device_node *np,
- struct cpufreq_policy *policy,
- u32 capacitance,
- get_static_t plat_static_func)
+of_cpufreq_cooling_register(struct cpufreq_policy *policy)
{
return NULL;
}
-
-static inline
-void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
-{
- return;
-}
-#endif /* CONFIG_CPU_THERMAL */
+#endif /* defined(CONFIG_THERMAL_OF) && defined(CONFIG_CPU_THERMAL) */
#endif /* __CPU_COOLING_H__ */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 1a32e558eb11..2c787c5cad3d 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -109,6 +109,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_XTENSA_STARTING,
CPUHP_AP_PERF_METAG_STARTING,
CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
+ CPUHP_AP_ARM_SDEI_STARTING,
CPUHP_AP_ARM_VFP_STARTING,
CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 8f7788d23b57..871f9e21810c 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -257,22 +257,30 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
{return 0;}
#endif
-#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \
-({ \
- int __ret; \
- \
- if (!idx) { \
- cpu_do_idle(); \
- return idx; \
- } \
- \
- __ret = cpu_pm_enter(); \
- if (!__ret) { \
- __ret = low_level_idle_enter(idx); \
- cpu_pm_exit(); \
- } \
- \
- __ret ? -1 : idx; \
+#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, is_retention) \
+({ \
+ int __ret = 0; \
+ \
+ if (!idx) { \
+ cpu_do_idle(); \
+ return idx; \
+ } \
+ \
+ if (!is_retention) \
+ __ret = cpu_pm_enter(); \
+ if (!__ret) { \
+ __ret = low_level_idle_enter(idx); \
+ if (!is_retention) \
+ cpu_pm_exit(); \
+ } \
+ \
+ __ret ? -1 : idx; \
})
+#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 0)
+
+#define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 1)
+
#endif /* _LINUX_CPUIDLE_H */
diff --git a/include/linux/crc-ccitt.h b/include/linux/crc-ccitt.h
index cd4f420231ba..72c92c396bb8 100644
--- a/include/linux/crc-ccitt.h
+++ b/include/linux/crc-ccitt.h
@@ -5,12 +5,19 @@
#include <linux/types.h>
extern u16 const crc_ccitt_table[256];
+extern u16 const crc_ccitt_false_table[256];
extern u16 crc_ccitt(u16 crc, const u8 *buffer, size_t len);
+extern u16 crc_ccitt_false(u16 crc, const u8 *buffer, size_t len);
static inline u16 crc_ccitt_byte(u16 crc, const u8 c)
{
return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff];
}
+static inline u16 crc_ccitt_false_byte(u16 crc, const u8 c)
+{
+ return (crc << 8) ^ crc_ccitt_false_table[(crc >> 8) ^ c];
+}
+
#endif /* _LINUX_CRC_CCITT_H */
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 78508ca4b108..7e6e84cf6383 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -107,8 +107,16 @@
#define CRYPTO_ALG_INTERNAL 0x00002000
/*
+ * Set if the algorithm has a ->setkey() method but can be used without
+ * calling it first, i.e. there is a default key.
+ */
+#define CRYPTO_ALG_OPTIONAL_KEY 0x00004000
+
+/*
* Transform masks and values (for crt_flags).
*/
+#define CRYPTO_TFM_NEED_KEY 0x00000001
+
#define CRYPTO_TFM_REQ_MASK 0x000fff00
#define CRYPTO_TFM_RES_MASK 0xfff00000
@@ -447,7 +455,7 @@ struct crypto_alg {
unsigned int cra_alignmask;
int cra_priority;
- atomic_t cra_refcnt;
+ refcount_t cra_refcnt;
char cra_name[CRYPTO_MAX_ALG_NAME];
char cra_driver_name[CRYPTO_MAX_ALG_NAME];
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index a5538433c927..da83f64952e7 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -28,6 +28,7 @@ enum dm_queue_mode {
DM_TYPE_REQUEST_BASED = 2,
DM_TYPE_MQ_REQUEST_BASED = 3,
DM_TYPE_DAX_BIO_BASED = 4,
+ DM_TYPE_NVME_BIO_BASED = 5,
};
typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
@@ -221,14 +222,6 @@ struct target_type {
#define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD)
/*
- * Some targets need to be sent the same WRITE bio severals times so
- * that they can send copies of it to different devices. This function
- * examines any supplied bio and returns the number of copies of it the
- * target requires.
- */
-typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio);
-
-/*
* A target implements own bio data integrity.
*/
#define DM_TARGET_INTEGRITY 0x00000010
@@ -291,13 +284,6 @@ struct dm_target {
*/
unsigned per_io_data_size;
- /*
- * If defined, this function is called to find out how many
- * duplicate bios should be sent to the target when writing
- * data.
- */
- dm_num_write_bios_fn num_write_bios;
-
/* target specific data */
void *private;
@@ -329,35 +315,9 @@ struct dm_target_callbacks {
int (*congested_fn) (struct dm_target_callbacks *, int);
};
-/*
- * For bio-based dm.
- * One of these is allocated for each bio.
- * This structure shouldn't be touched directly by target drivers.
- * It is here so that we can inline dm_per_bio_data and
- * dm_bio_from_per_bio_data
- */
-struct dm_target_io {
- struct dm_io *io;
- struct dm_target *ti;
- unsigned target_bio_nr;
- unsigned *len_ptr;
- struct bio clone;
-};
-
-static inline void *dm_per_bio_data(struct bio *bio, size_t data_size)
-{
- return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
-}
-
-static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
-{
- return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone));
-}
-
-static inline unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
-{
- return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
-}
+void *dm_per_bio_data(struct bio *bio, size_t data_size);
+struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
+unsigned dm_bio_get_target_bio_nr(const struct bio *bio);
int dm_register_target(struct target_type *t);
void dm_unregister_target(struct target_type *t);
@@ -500,6 +460,11 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type);
int dm_table_complete(struct dm_table *t);
/*
+ * Destroy the table when finished.
+ */
+void dm_table_destroy(struct dm_table *t);
+
+/*
* Target may require that it is never sent I/O larger than len.
*/
int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
@@ -585,6 +550,7 @@ do { \
#define DM_ENDIO_DONE 0
#define DM_ENDIO_INCOMPLETE 1
#define DM_ENDIO_REQUEUE 2
+#define DM_ENDIO_DELAY_REQUEUE 3
/*
* Definitions of return values from target map function.
@@ -592,7 +558,7 @@ do { \
#define DM_MAPIO_SUBMITTED 0
#define DM_MAPIO_REMAPPED 1
#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
-#define DM_MAPIO_DELAY_REQUEUE 3
+#define DM_MAPIO_DELAY_REQUEUE DM_ENDIO_DELAY_REQUEUE
#define DM_MAPIO_KILL 4
#define dm_sector_div64(x, y)( \
diff --git a/include/linux/device.h b/include/linux/device.h
index 9d32000725da..46ac622e5c6f 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -575,6 +575,9 @@ ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
#define DEVICE_ATTR(_name, _mode, _show, _store) \
struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
+#define DEVICE_ATTR_PREALLOC(_name, _mode, _show, _store) \
+ struct device_attribute dev_attr_##_name = \
+ __ATTR_PREALLOC(_name, _mode, _show, _store)
#define DEVICE_ATTR_RW(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
#define DEVICE_ATTR_RO(_name) \
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 79f27d60ec66..085db2fee2d7 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -301,7 +301,7 @@ struct dma_buf {
struct dma_fence_cb cb;
wait_queue_head_t *poll;
- unsigned long active;
+ __poll_t active;
} cb_excl, cb_shared;
};
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
new file mode 100644
index 000000000000..bcdb1a3e4b1f
--- /dev/null
+++ b/include/linux/dma-direct.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_DMA_DIRECT_H
+#define _LINUX_DMA_DIRECT_H 1
+
+#include <linux/dma-mapping.h>
+
+#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
+#include <asm/dma-direct.h>
+#else
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ dma_addr_t dev_addr = (dma_addr_t)paddr;
+
+ return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
+}
+
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
+{
+ phys_addr_t paddr = (phys_addr_t)dev_addr;
+
+ return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
+}
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ if (!dev->dma_mask)
+ return false;
+
+ return addr + size - 1 <= *dev->dma_mask;
+}
+#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
+
+#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
+void dma_mark_clean(void *addr, size_t size);
+#else
+static inline void dma_mark_clean(void *addr, size_t size)
+{
+}
+#endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */
+
+void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs);
+void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr, unsigned long attrs);
+int dma_direct_supported(struct device *dev, u64 mask);
+
+#endif /* _LINUX_DMA_DIRECT_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 81ed9b2d84dc..34fe8463d10e 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -136,7 +136,7 @@ struct dma_map_ops {
int is_phys;
};
-extern const struct dma_map_ops dma_noop_ops;
+extern const struct dma_map_ops dma_direct_ops;
extern const struct dma_map_ops dma_virt_ops;
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
@@ -513,10 +513,18 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
void *cpu_addr;
BUG_ON(!ops);
+ WARN_ON_ONCE(dev && !dev->coherent_dma_mask);
if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
return cpu_addr;
+ /*
+ * Let the implementation decide on the zone to allocate from, and
+ * decide on the way of zeroing the memory given that the memory
+ * returned should always be zeroed.
+ */
+ flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM | __GFP_ZERO);
+
if (!arch_dma_alloc_attrs(&dev, &flag))
return NULL;
if (!ops->alloc)
@@ -568,6 +576,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
return 0;
}
+/*
+ * This is a hack for the legacy x86 forbid_dac and iommu_sac_force. Please
+ * don't use this is new code.
+ */
+#ifndef arch_dma_supported
+#define arch_dma_supported(dev, mask) (1)
+#endif
+
static inline void dma_check_mask(struct device *dev, u64 mask)
{
if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
@@ -580,6 +596,9 @@ static inline int dma_supported(struct device *dev, u64 mask)
if (!ops)
return 0;
+ if (!arch_dma_supported(dev, mask))
+ return 0;
+
if (!ops->dma_supported)
return 1;
return ops->dma_supported(dev, mask);
@@ -692,7 +711,7 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
#ifndef dma_max_pfn
static inline unsigned long dma_max_pfn(struct device *dev)
{
- return *dev->dma_mask >> PAGE_SHIFT;
+ return (*dev->dma_mask >> PAGE_SHIFT) + dev->dma_pfn_offset;
}
#endif
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 29fdf8029cf6..f5083aa72eae 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -475,6 +475,39 @@ typedef struct {
u64 get_all;
} apple_properties_protocol_64_t;
+typedef struct {
+ u32 get_capability;
+ u32 get_event_log;
+ u32 hash_log_extend_event;
+ u32 submit_command;
+ u32 get_active_pcr_banks;
+ u32 set_active_pcr_banks;
+ u32 get_result_of_set_active_pcr_banks;
+} efi_tcg2_protocol_32_t;
+
+typedef struct {
+ u64 get_capability;
+ u64 get_event_log;
+ u64 hash_log_extend_event;
+ u64 submit_command;
+ u64 get_active_pcr_banks;
+ u64 set_active_pcr_banks;
+ u64 get_result_of_set_active_pcr_banks;
+} efi_tcg2_protocol_64_t;
+
+typedef u32 efi_tcg2_event_log_format;
+
+typedef struct {
+ void *get_capability;
+ efi_status_t (*get_event_log)(efi_handle_t, efi_tcg2_event_log_format,
+ efi_physical_addr_t *, efi_physical_addr_t *, efi_bool_t *);
+ void *hash_log_extend_event;
+ void *submit_command;
+ void *get_active_pcr_banks;
+ void *set_active_pcr_banks;
+ void *get_result_of_set_active_pcr_banks;
+} efi_tcg2_protocol_t;
+
/*
* Types and defines for EFI ResetSystem
*/
@@ -625,6 +658,7 @@ void efi_native_runtime_setup(void);
#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20)
#define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0)
+#define EFI_TCG2_PROTOCOL_GUID EFI_GUID(0x607f766c, 0x7455, 0x42be, 0x93, 0x0b, 0xe4, 0xd7, 0x6d, 0xb2, 0x72, 0x0f)
#define EFI_IMAGE_SECURITY_DATABASE_GUID EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f)
#define EFI_SHIM_LOCK_GUID EFI_GUID(0x605dab50, 0xe046, 0x4300, 0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23)
@@ -637,6 +671,7 @@ void efi_native_runtime_setup(void);
#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
#define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b)
+#define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa)
typedef struct {
efi_guid_t guid;
@@ -911,6 +946,7 @@ extern struct efi {
unsigned long properties_table; /* properties table */
unsigned long mem_attr_table; /* memory attributes table */
unsigned long rng_seed; /* UEFI firmware random seed */
+ unsigned long tpm_log; /* TPM2 Event Log table */
efi_get_time_t *get_time;
efi_set_time_t *set_time;
efi_get_wakeup_time_t *get_wakeup_time;
@@ -1536,6 +1572,8 @@ static inline void
efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg) { }
#endif
+void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table);
+
/*
* Arch code can implement the following three template macros, avoiding
* reptition for the void/non-void return cases of {__,}efi_call_virt():
@@ -1603,4 +1641,12 @@ struct linux_efi_random_seed {
u8 bits[];
};
+struct linux_efi_tpm_eventlog {
+ u32 size;
+ u8 version;
+ u8 log[];
+};
+
+extern int efi_tpm_eventlog_init(void);
+
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 3d794b3dc532..6d9e230dffd2 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -198,8 +198,6 @@ extern bool elv_attempt_insert_merge(struct request_queue *, struct request *);
extern void elv_requeue_request(struct request_queue *, struct request *);
extern struct request *elv_former_request(struct request_queue *, struct request *);
extern struct request *elv_latter_request(struct request_queue *, struct request *);
-extern int elv_register_queue(struct request_queue *q);
-extern void elv_unregister_queue(struct request_queue *q);
extern int elv_may_queue(struct request_queue *, unsigned int);
extern void elv_completed_request(struct request_queue *, struct request *);
extern int elv_set_request(struct request_queue *q, struct request *rq,
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index 60b2985e8a18..7094718b653b 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -26,18 +26,16 @@
#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
+struct eventfd_ctx;
struct file;
#ifdef CONFIG_EVENTFD
-struct file *eventfd_file_create(unsigned int count, int flags);
-struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx);
void eventfd_ctx_put(struct eventfd_ctx *ctx);
struct file *eventfd_fget(int fd);
struct eventfd_ctx *eventfd_ctx_fdget(int fd);
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
-ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt);
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
__u64 *cnt);
@@ -47,10 +45,6 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w
* Ugly ugly ugly error layer to support modules that uses eventfd but
* pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO.
*/
-static inline struct file *eventfd_file_create(unsigned int count, int flags)
-{
- return ERR_PTR(-ENOSYS);
-}
static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
{
@@ -67,12 +61,6 @@ static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
}
-static inline ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait,
- __u64 *cnt)
-{
- return -ENOSYS;
-}
-
static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx,
wait_queue_entry_t *wait, __u64 *cnt)
{
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 43e98d30d2df..58aecb60ea51 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -117,6 +117,7 @@ struct f2fs_super_block {
/*
* For checkpoint
*/
+#define CP_NOCRC_RECOVERY_FLAG 0x00000200
#define CP_TRIMMED_FLAG 0x00000100
#define CP_NAT_BITS_FLAG 0x00000080
#define CP_CRC_RECOVERY_FLAG 0x00000040
@@ -212,6 +213,7 @@ struct f2fs_extent {
#define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */
#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */
#define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */
+#define F2FS_PIN_FILE 0x40 /* file should not be gced */
struct f2fs_inode {
__le16 i_mode; /* file mode */
@@ -229,7 +231,13 @@ struct f2fs_inode {
__le32 i_ctime_nsec; /* change time in nano scale */
__le32 i_mtime_nsec; /* modification time in nano scale */
__le32 i_generation; /* file version (for NFS) */
- __le32 i_current_depth; /* only for directory depth */
+ union {
+ __le32 i_current_depth; /* only for directory depth */
+ __le16 i_gc_failures; /*
+ * # of gc failures on pinned file.
+ * only for regular files.
+ */
+ };
__le32 i_xattr_nid; /* nid to save xattr */
__le32 i_flags; /* file attributes */
__le32 i_pino; /* parent inode number */
@@ -245,8 +253,10 @@ struct f2fs_inode {
__le16 i_inline_xattr_size; /* inline xattr size, unit: 4 bytes */
__le32 i_projid; /* project id */
__le32 i_inode_checksum;/* inode meta checksum */
+ __le64 i_crtime; /* creation time */
+ __le32 i_crtime_nsec; /* creation time in nano scale */
__le32 i_extra_end[0]; /* for attribute size calculation */
- };
+ } __packed;
__le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */
};
__le32 i_nid[DEF_NIDS_PER_INODE]; /* direct(2), indirect(2),
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 511fbaabf624..2a815560fda0 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -639,7 +639,7 @@ struct inode {
struct hlist_head i_dentry;
struct rcu_head i_rcu;
};
- u64 i_version;
+ atomic64_t i_version;
atomic_t i_count;
atomic_t i_dio_count;
atomic_t i_writecount;
@@ -748,6 +748,11 @@ static inline void inode_lock_nested(struct inode *inode, unsigned subclass)
down_write_nested(&inode->i_rwsem, subclass);
}
+static inline void inode_lock_shared_nested(struct inode *inode, unsigned subclass)
+{
+ down_read_nested(&inode->i_rwsem, subclass);
+}
+
void lock_two_nondirectories(struct inode *, struct inode*);
void unlock_two_nondirectories(struct inode *, struct inode*);
@@ -1359,7 +1364,7 @@ struct super_block {
const struct fscrypt_operations *s_cop;
- struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */
+ struct hlist_bl_head s_roots; /* alternate root dentries for NFS */
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
struct block_device *s_bdev;
struct backing_dev_info *s_bdi;
@@ -1608,6 +1613,10 @@ extern int vfs_whiteout(struct inode *, struct dentry *);
extern struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode,
int open_flag);
+int vfs_mkobj(struct dentry *, umode_t,
+ int (*f)(struct dentry *, umode_t, void *),
+ void *);
+
/*
* VFS file helper functions.
*/
@@ -1698,7 +1707,7 @@ struct file_operations {
ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
int (*iterate) (struct file *, struct dir_context *);
int (*iterate_shared) (struct file *, struct dir_context *);
- unsigned int (*poll) (struct file *, struct poll_table_struct *);
+ __poll_t (*poll) (struct file *, struct poll_table_struct *);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
@@ -2036,21 +2045,6 @@ static inline void inode_dec_link_count(struct inode *inode)
mark_inode_dirty(inode);
}
-/**
- * inode_inc_iversion - increments i_version
- * @inode: inode that need to be updated
- *
- * Every time the inode is modified, the i_version field will be incremented.
- * The filesystem has to be mounted with i_version flag
- */
-
-static inline void inode_inc_iversion(struct inode *inode)
-{
- spin_lock(&inode->i_lock);
- inode->i_version++;
- spin_unlock(&inode->i_lock);
-}
-
enum file_time_flags {
S_ATIME = 1,
S_MTIME = 2,
@@ -2699,7 +2693,6 @@ extern sector_t bmap(struct inode *, sector_t);
#endif
extern int notify_change(struct dentry *, struct iattr *, struct inode **);
extern int inode_permission(struct inode *, int);
-extern int __inode_permission(struct inode *, int);
extern int generic_permission(struct inode *, int);
extern int __check_sticky(struct inode *dir, struct inode *inode);
@@ -2992,6 +2985,7 @@ enum {
};
void dio_end_io(struct bio *bio);
+void dio_warn_stale_pagecache(struct file *filp);
ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter,
@@ -3239,6 +3233,8 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags)
ki->ki_flags |= IOCB_DSYNC;
if (flags & RWF_SYNC)
ki->ki_flags |= (IOCB_DSYNC | IOCB_SYNC);
+ if (flags & RWF_APPEND)
+ ki->ki_flags |= IOCB_APPEND;
return 0;
}
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 3319df9727aa..9c3c9a319e48 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -766,9 +766,6 @@ typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-/* for init task */
-#define INIT_FTRACE_GRAPH .ret_stack = NULL,
-
/*
* Stack of return addresses for functions
* of a thread.
@@ -846,7 +843,6 @@ static inline void unpause_graph_tracing(void)
#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
#define __notrace_funcgraph
-#define INIT_FTRACE_GRAPH
static inline void ftrace_graph_init_task(struct task_struct *t) { }
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
@@ -925,10 +921,6 @@ extern int tracepoint_printk;
extern void disable_trace_on_warning(void);
extern int __disable_trace_on_warning;
-#ifdef CONFIG_PREEMPT
-#define INIT_TRACE_RECURSION .trace_recursion = 0,
-#endif
-
int tracepoint_printk_sysctl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
@@ -937,10 +929,6 @@ int tracepoint_printk_sysctl(struct ctl_table *table, int write,
static inline void disable_trace_on_warning(void) { }
#endif /* CONFIG_TRACING */
-#ifndef INIT_TRACE_RECURSION
-#define INIT_TRACE_RECURSION
-#endif
-
#ifdef CONFIG_FTRACE_SYSCALLS
unsigned long arch_syscall_addr(int nr);
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 411a84c6c400..4fa1a489efe4 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -15,6 +15,7 @@
#include <linux/types.h>
struct fwnode_operations;
+struct device;
struct fwnode_handle {
struct fwnode_handle *secondary;
@@ -51,6 +52,7 @@ struct fwnode_reference_args {
* struct fwnode_operations - Operations for fwnode interface
* @get: Get a reference to an fwnode.
* @put: Put a reference to an fwnode.
+ * @device_get_match_data: Return the device driver match data.
* @property_present: Return true if a property is present.
* @property_read_integer_array: Read an array of integer properties. Return
* zero on success, a negative error code
@@ -71,6 +73,8 @@ struct fwnode_operations {
struct fwnode_handle *(*get)(struct fwnode_handle *fwnode);
void (*put)(struct fwnode_handle *fwnode);
bool (*device_is_available)(const struct fwnode_handle *fwnode);
+ void *(*device_get_match_data)(const struct fwnode_handle *fwnode,
+ const struct device *dev);
bool (*property_present)(const struct fwnode_handle *fwnode,
const char *propname);
int (*property_read_int_array)(const struct fwnode_handle *fwnode,
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index ecc2928e8046..bc738504ab4a 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -31,8 +31,7 @@ extern wait_queue_head_t genl_sk_destructing_waitq;
* @p: The pointer to read, prior to dereferencing
*
* Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the READ_ONCE(), because
- * caller holds genl mutex.
+ * the READ_ONCE(), because caller holds genl mutex.
*/
#define genl_dereference(p) \
rcu_dereference_protected(p, lockdep_genl_is_held())
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 5144ebe046c9..5e3531027b51 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -395,6 +395,11 @@ static inline void add_disk(struct gendisk *disk)
{
device_add_disk(NULL, disk);
}
+extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk);
+static inline void add_disk_no_queue_reg(struct gendisk *disk)
+{
+ device_add_disk_no_queue_reg(NULL, disk);
+}
extern void del_gendisk(struct gendisk *gp);
extern struct gendisk *get_gendisk(dev_t dev, int *partno);
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 8ef7fc0ce0f0..91ed23468530 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -1,4 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * <linux/gpio.h>
+ *
+ * This is the LEGACY GPIO bulk include file, including legacy APIs. It is
+ * used for GPIO drivers still referencing the global GPIO numberspace,
+ * and should not be included in new code.
+ *
+ * If you're implementing a GPIO driver, only include <linux/gpio/driver.h>
+ * If you're implementing a GPIO consumer, only include <linux/gpio/consumer.h>
+ */
#ifndef __LINUX_GPIO_H
#define __LINUX_GPIO_H
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 7447d85dbe2f..dbd065963296 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -139,6 +139,7 @@ void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
int *value_array);
int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
+int gpiod_set_transitory(struct gpio_desc *desc, bool transitory);
int gpiod_is_active_low(const struct gpio_desc *desc);
int gpiod_cansleep(const struct gpio_desc *desc);
@@ -150,8 +151,14 @@ struct gpio_desc *gpio_to_desc(unsigned gpio);
int desc_to_gpio(const struct gpio_desc *desc);
/* Child properties interface */
+struct device_node;
struct fwnode_handle;
+struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
+ struct device_node *node,
+ const char *propname, int index,
+ enum gpiod_flags dflags,
+ const char *label);
struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
const char *propname, int index,
enum gpiod_flags dflags,
@@ -431,6 +438,13 @@ static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
return -ENOSYS;
}
+static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return -ENOSYS;
+}
+
static inline int gpiod_is_active_low(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
@@ -464,9 +478,20 @@ static inline int desc_to_gpio(const struct gpio_desc *desc)
}
/* Child properties interface */
+struct device_node;
struct fwnode_handle;
static inline
+struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
+ struct device_node *node,
+ const char *propname, int index,
+ enum gpiod_flags dflags,
+ const char *label)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline
struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
const char *propname, int index,
enum gpiod_flags dflags,
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 7258cd676df4..1ba9a331ec51 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -436,6 +436,9 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
struct lock_class_key *lock_key,
struct lock_class_key *request_key);
+bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip,
+ unsigned int offset);
+
#ifdef CONFIG_LOCKDEP
/*
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index 846be7c69a52..b2f2dc638463 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -10,8 +10,8 @@ enum gpio_lookup_flags {
GPIO_ACTIVE_LOW = (1 << 0),
GPIO_OPEN_DRAIN = (1 << 1),
GPIO_OPEN_SOURCE = (1 << 2),
- GPIO_SLEEP_MAINTAIN_VALUE = (0 << 3),
- GPIO_SLEEP_MAY_LOSE_VALUE = (1 << 3),
+ GPIO_PERSISTENT = (0 << 3),
+ GPIO_TRANSITORY = (1 << 3),
};
/**
diff --git a/include/linux/hid.h b/include/linux/hid.h
index d491027a7c22..091a81cf330f 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -281,6 +281,7 @@ struct hid_item {
#define HID_DG_DEVICECONFIG 0x000d000e
#define HID_DG_DEVICESETTINGS 0x000d0023
+#define HID_DG_AZIMUTH 0x000d003f
#define HID_DG_CONFIDENCE 0x000d0047
#define HID_DG_WIDTH 0x000d0048
#define HID_DG_HEIGHT 0x000d0049
@@ -342,6 +343,7 @@ struct hid_item {
#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000
#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID 0x00020000
#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP 0x00040000
+#define HID_QUIRK_HAVE_SPECIAL_DRIVER 0x00080000
#define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000
#define HID_QUIRK_NO_INIT_REPORTS 0x20000000
#define HID_QUIRK_NO_IGNORE 0x40000000
@@ -671,6 +673,7 @@ struct hid_usage_id {
* to be called)
* @dyn_list: list of dynamically added device ids
* @dyn_lock: lock protecting @dyn_list
+ * @match: check if the given device is handled by this driver
* @probe: new device inserted
* @remove: device removed (NULL if not a hot-plug capable driver)
* @report_table: on which reports to call raw_event (NULL means all)
@@ -683,6 +686,8 @@ struct hid_usage_id {
* @input_mapped: invoked on input registering after mapping an usage
* @input_configured: invoked just before the device is registered
* @feature_mapping: invoked on feature registering
+ * @bus_add_driver: invoked when a HID driver is about to be added
+ * @bus_removed_driver: invoked when a HID driver has been removed
* @suspend: invoked on suspend (NULL means nop)
* @resume: invoked on resume if device was not reset (NULL means nop)
* @reset_resume: invoked on resume if device was reset (NULL means nop)
@@ -711,6 +716,7 @@ struct hid_driver {
struct list_head dyn_list;
spinlock_t dyn_lock;
+ bool (*match)(struct hid_device *dev, bool ignore_special_driver);
int (*probe)(struct hid_device *dev, const struct hid_device_id *id);
void (*remove)(struct hid_device *dev);
@@ -736,6 +742,8 @@ struct hid_driver {
void (*feature_mapping)(struct hid_device *hdev,
struct hid_field *field,
struct hid_usage *usage);
+ void (*bus_add_driver)(struct hid_driver *driver);
+ void (*bus_removed_driver)(struct hid_driver *driver);
#ifdef CONFIG_PM
int (*suspend)(struct hid_device *hdev, pm_message_t message);
int (*resume)(struct hid_device *hdev);
@@ -814,6 +822,8 @@ extern bool hid_ignore(struct hid_device *);
extern int hid_add_device(struct hid_device *);
extern void hid_destroy_device(struct hid_device *);
+extern struct bus_type hid_bus_type;
+
extern int __must_check __hid_register_driver(struct hid_driver *,
struct module *, const char *mod_name);
@@ -860,8 +870,12 @@ int hid_open_report(struct hid_device *device);
int hid_check_keys_pressed(struct hid_device *hid);
int hid_connect(struct hid_device *hid, unsigned int connect_mask);
void hid_disconnect(struct hid_device *hid);
-const struct hid_device_id *hid_match_id(struct hid_device *hdev,
+bool hid_match_one_id(const struct hid_device *hdev,
+ const struct hid_device_id *id);
+const struct hid_device_id *hid_match_id(const struct hid_device *hdev,
const struct hid_device_id *id);
+const struct hid_device_id *hid_match_device(struct hid_device *hdev,
+ struct hid_driver *hdrv);
s32 hid_snto32(__u32 value, unsigned n);
__u32 hid_field_extract(const struct hid_device *hid, __u8 *report,
unsigned offset, unsigned n);
@@ -1098,9 +1112,9 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
int interrupt);
/* HID quirks API */
-u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct);
-int usbhid_quirks_init(char **quirks_param);
-void usbhid_quirks_exit(void);
+unsigned long hid_lookup_quirk(const struct hid_device *hdev);
+int hid_quirks_init(char **quirks_param, __u16 bus, int count);
+void hid_quirks_exit(__u16 bus);
#ifdef CONFIG_HID_PID
int hid_pidff_init(struct hid_device *hid);
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 012c37fdb688..c7902ca7c9f4 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -28,13 +28,29 @@ struct hrtimer_cpu_base;
/*
* Mode arguments of xxx_hrtimer functions:
+ *
+ * HRTIMER_MODE_ABS - Time value is absolute
+ * HRTIMER_MODE_REL - Time value is relative to now
+ * HRTIMER_MODE_PINNED - Timer is bound to CPU (is only considered
+ * when starting the timer)
+ * HRTIMER_MODE_SOFT - Timer callback function will be executed in
+ * soft irq context
*/
enum hrtimer_mode {
- HRTIMER_MODE_ABS = 0x0, /* Time value is absolute */
- HRTIMER_MODE_REL = 0x1, /* Time value is relative to now */
- HRTIMER_MODE_PINNED = 0x02, /* Timer is bound to CPU */
- HRTIMER_MODE_ABS_PINNED = 0x02,
- HRTIMER_MODE_REL_PINNED = 0x03,
+ HRTIMER_MODE_ABS = 0x00,
+ HRTIMER_MODE_REL = 0x01,
+ HRTIMER_MODE_PINNED = 0x02,
+ HRTIMER_MODE_SOFT = 0x04,
+
+ HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED,
+ HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED,
+
+ HRTIMER_MODE_ABS_SOFT = HRTIMER_MODE_ABS | HRTIMER_MODE_SOFT,
+ HRTIMER_MODE_REL_SOFT = HRTIMER_MODE_REL | HRTIMER_MODE_SOFT,
+
+ HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT,
+ HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT,
+
};
/*
@@ -87,6 +103,7 @@ enum hrtimer_restart {
* @base: pointer to the timer base (per cpu and per clock)
* @state: state information (See bit values above)
* @is_rel: Set if the timer was armed relative
+ * @is_soft: Set if hrtimer will be expired in soft interrupt context.
*
* The hrtimer structure must be initialized by hrtimer_init()
*/
@@ -97,6 +114,7 @@ struct hrtimer {
struct hrtimer_clock_base *base;
u8 state;
u8 is_rel;
+ u8 is_soft;
};
/**
@@ -112,9 +130,9 @@ struct hrtimer_sleeper {
};
#ifdef CONFIG_64BIT
-# define HRTIMER_CLOCK_BASE_ALIGN 64
+# define __hrtimer_clock_base_align ____cacheline_aligned
#else
-# define HRTIMER_CLOCK_BASE_ALIGN 32
+# define __hrtimer_clock_base_align
#endif
/**
@@ -123,48 +141,57 @@ struct hrtimer_sleeper {
* @index: clock type index for per_cpu support when moving a
* timer to a base on another cpu.
* @clockid: clock id for per_cpu support
+ * @seq: seqcount around __run_hrtimer
+ * @running: pointer to the currently running hrtimer
* @active: red black tree root node for the active timers
* @get_time: function to retrieve the current time of the clock
* @offset: offset of this clock to the monotonic base
*/
struct hrtimer_clock_base {
struct hrtimer_cpu_base *cpu_base;
- int index;
+ unsigned int index;
clockid_t clockid;
+ seqcount_t seq;
+ struct hrtimer *running;
struct timerqueue_head active;
ktime_t (*get_time)(void);
ktime_t offset;
-} __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN)));
+} __hrtimer_clock_base_align;
enum hrtimer_base_type {
HRTIMER_BASE_MONOTONIC,
HRTIMER_BASE_REALTIME,
HRTIMER_BASE_BOOTTIME,
HRTIMER_BASE_TAI,
+ HRTIMER_BASE_MONOTONIC_SOFT,
+ HRTIMER_BASE_REALTIME_SOFT,
+ HRTIMER_BASE_BOOTTIME_SOFT,
+ HRTIMER_BASE_TAI_SOFT,
HRTIMER_MAX_CLOCK_BASES,
};
-/*
+/**
* struct hrtimer_cpu_base - the per cpu clock bases
* @lock: lock protecting the base and associated clock bases
* and timers
- * @seq: seqcount around __run_hrtimer
- * @running: pointer to the currently running hrtimer
* @cpu: cpu number
* @active_bases: Bitfield to mark bases with active timers
* @clock_was_set_seq: Sequence counter of clock was set events
- * @migration_enabled: The migration of hrtimers to other cpus is enabled
- * @nohz_active: The nohz functionality is enabled
- * @expires_next: absolute time of the next event which was scheduled
- * via clock_set_next_event()
- * @next_timer: Pointer to the first expiring timer
- * @in_hrtirq: hrtimer_interrupt() is currently executing
* @hres_active: State of high resolution mode
+ * @in_hrtirq: hrtimer_interrupt() is currently executing
* @hang_detected: The last hrtimer interrupt detected a hang
+ * @softirq_activated: displays, if the softirq is raised - update of softirq
+ * related settings is not required then.
* @nr_events: Total number of hrtimer interrupt events
* @nr_retries: Total number of hrtimer interrupt retries
* @nr_hangs: Total number of hrtimer interrupt hangs
* @max_hang_time: Maximum time spent in hrtimer_interrupt
+ * @expires_next: absolute time of the next event, is required for remote
+ * hrtimer enqueue; it is the total first expiry time (hard
+ * and soft hrtimer are taken into account)
+ * @next_timer: Pointer to the first expiring timer
+ * @softirq_expires_next: Time to check, if soft queues needs also to be expired
+ * @softirq_next_timer: Pointer to the first expiring softirq based timer
* @clock_base: array of clock bases for this cpu
*
* Note: next_timer is just an optimization for __remove_hrtimer().
@@ -173,31 +200,28 @@ enum hrtimer_base_type {
*/
struct hrtimer_cpu_base {
raw_spinlock_t lock;
- seqcount_t seq;
- struct hrtimer *running;
unsigned int cpu;
unsigned int active_bases;
unsigned int clock_was_set_seq;
- bool migration_enabled;
- bool nohz_active;
+ unsigned int hres_active : 1,
+ in_hrtirq : 1,
+ hang_detected : 1,
+ softirq_activated : 1;
#ifdef CONFIG_HIGH_RES_TIMERS
- unsigned int in_hrtirq : 1,
- hres_active : 1,
- hang_detected : 1;
- ktime_t expires_next;
- struct hrtimer *next_timer;
unsigned int nr_events;
- unsigned int nr_retries;
- unsigned int nr_hangs;
+ unsigned short nr_retries;
+ unsigned short nr_hangs;
unsigned int max_hang_time;
#endif
+ ktime_t expires_next;
+ struct hrtimer *next_timer;
+ ktime_t softirq_expires_next;
+ struct hrtimer *softirq_next_timer;
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
} ____cacheline_aligned;
static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
{
- BUILD_BUG_ON(sizeof(struct hrtimer_clock_base) > HRTIMER_CLOCK_BASE_ALIGN);
-
timer->node.expires = time;
timer->_softexpires = time;
}
@@ -266,16 +290,17 @@ static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
return timer->base->get_time();
}
+static inline int hrtimer_is_hres_active(struct hrtimer *timer)
+{
+ return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
+ timer->base->cpu_base->hres_active : 0;
+}
+
#ifdef CONFIG_HIGH_RES_TIMERS
struct clock_event_device;
extern void hrtimer_interrupt(struct clock_event_device *dev);
-static inline int hrtimer_is_hres_active(struct hrtimer *timer)
-{
- return timer->base->cpu_base->hres_active;
-}
-
/*
* The resolution of the clocks. The resolution value is returned in
* the clock_getres() system call to give application programmers an
@@ -298,11 +323,6 @@ extern unsigned int hrtimer_resolution;
#define hrtimer_resolution (unsigned int)LOW_RES_NSEC
-static inline int hrtimer_is_hres_active(struct hrtimer *timer)
-{
- return 0;
-}
-
static inline void clock_was_set_delayed(void) { }
#endif
@@ -365,11 +385,12 @@ extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
u64 range_ns, const enum hrtimer_mode mode);
/**
- * hrtimer_start - (re)start an hrtimer on the current CPU
+ * hrtimer_start - (re)start an hrtimer
* @timer: the timer to be added
* @tim: expiry time
- * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or
- * relative (HRTIMER_MODE_REL)
+ * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or
+ * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED);
+ * softirq based mode is considered for debug purpose only!
*/
static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim,
const enum hrtimer_mode mode)
@@ -422,7 +443,7 @@ static inline int hrtimer_is_queued(struct hrtimer *timer)
*/
static inline int hrtimer_callback_running(struct hrtimer *timer)
{
- return timer->base->cpu_base->running == timer;
+ return timer->base->running == timer;
}
/* Forward a hrtimer so it expires after now: */
@@ -466,7 +487,7 @@ extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta,
extern int schedule_hrtimeout_range_clock(ktime_t *expires,
u64 delta,
const enum hrtimer_mode mode,
- int clock);
+ clockid_t clock_id);
extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
/* Soft interrupt function to run the hrtimer queues: */
diff --git a/include/linux/iio/adc/stm32-dfsdm-adc.h b/include/linux/iio/adc/stm32-dfsdm-adc.h
new file mode 100644
index 000000000000..e7dc7a542a4e
--- /dev/null
+++ b/include/linux/iio/adc/stm32-dfsdm-adc.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file discribe the STM32 DFSDM IIO driver API for audio part
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Arnaud Pouliquen <arnaud.pouliquen@st.com>.
+ */
+
+#ifndef STM32_DFSDM_ADC_H
+#define STM32_DFSDM_ADC_H
+
+int stm32_dfsdm_get_buff_cb(struct iio_dev *iio_dev,
+ int (*cb)(const void *data, size_t size,
+ void *private),
+ void *private);
+int stm32_dfsdm_release_buff_cb(struct iio_dev *iio_dev);
+
+#endif
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 5e347a9805fd..9887f4f8e2a8 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -134,6 +134,17 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
void *private),
void *private);
/**
+ * iio_channel_cb_set_buffer_watermark() - set the buffer watermark.
+ * @cb_buffer: The callback buffer from whom we want the channel
+ * information.
+ * @watermark: buffer watermark in bytes.
+ *
+ * This function allows to configure the buffer watermark.
+ */
+int iio_channel_cb_set_buffer_watermark(struct iio_cb_buffer *cb_buffer,
+ size_t watermark);
+
+/**
* iio_channel_release_all_cb() - release and unregister the callback.
* @cb_buffer: The callback buffer that was allocated.
*/
@@ -216,6 +227,32 @@ int iio_read_channel_average_raw(struct iio_channel *chan, int *val);
int iio_read_channel_processed(struct iio_channel *chan, int *val);
/**
+ * iio_write_channel_attribute() - Write values to the device attribute.
+ * @chan: The channel being queried.
+ * @val: Value being written.
+ * @val2: Value being written.val2 use depends on attribute type.
+ * @attribute: info attribute to be read.
+ *
+ * Returns an error code or 0.
+ */
+int iio_write_channel_attribute(struct iio_channel *chan, int val,
+ int val2, enum iio_chan_info_enum attribute);
+
+/**
+ * iio_read_channel_attribute() - Read values from the device attribute.
+ * @chan: The channel being queried.
+ * @val: Value being written.
+ * @val2: Value being written.Val2 use depends on attribute type.
+ * @attribute: info attribute to be written.
+ *
+ * Returns an error code if failed. Else returns a description of what is in val
+ * and val2, such as IIO_VAL_INT_PLUS_MICRO telling us we have a value of val
+ * + val2/1e6
+ */
+int iio_read_channel_attribute(struct iio_channel *chan, int *val,
+ int *val2, enum iio_chan_info_enum attribute);
+
+/**
* iio_write_channel_raw() - write to a given channel
* @chan: The channel being queried.
* @val: Value being written.
diff --git a/include/linux/iio/hw-consumer.h b/include/linux/iio/hw-consumer.h
new file mode 100644
index 000000000000..44d48bb1d39f
--- /dev/null
+++ b/include/linux/iio/hw-consumer.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Industrial I/O in kernel hardware consumer interface
+ *
+ * Copyright 2017 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ */
+
+#ifndef LINUX_IIO_HW_CONSUMER_H
+#define LINUX_IIO_HW_CONSUMER_H
+
+struct iio_hw_consumer;
+
+struct iio_hw_consumer *iio_hw_consumer_alloc(struct device *dev);
+void iio_hw_consumer_free(struct iio_hw_consumer *hwc);
+struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev);
+void devm_iio_hw_consumer_free(struct device *dev, struct iio_hw_consumer *hwc);
+int iio_hw_consumer_enable(struct iio_hw_consumer *hwc);
+void iio_hw_consumer_disable(struct iio_hw_consumer *hwc);
+
+#endif
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 20b61347ea58..f12a61be1ede 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -20,34 +20,6 @@
* Currently assumes nano seconds.
*/
-enum iio_chan_info_enum {
- IIO_CHAN_INFO_RAW = 0,
- IIO_CHAN_INFO_PROCESSED,
- IIO_CHAN_INFO_SCALE,
- IIO_CHAN_INFO_OFFSET,
- IIO_CHAN_INFO_CALIBSCALE,
- IIO_CHAN_INFO_CALIBBIAS,
- IIO_CHAN_INFO_PEAK,
- IIO_CHAN_INFO_PEAK_SCALE,
- IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW,
- IIO_CHAN_INFO_AVERAGE_RAW,
- IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY,
- IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY,
- IIO_CHAN_INFO_SAMP_FREQ,
- IIO_CHAN_INFO_FREQUENCY,
- IIO_CHAN_INFO_PHASE,
- IIO_CHAN_INFO_HARDWAREGAIN,
- IIO_CHAN_INFO_HYSTERESIS,
- IIO_CHAN_INFO_INT_TIME,
- IIO_CHAN_INFO_ENABLE,
- IIO_CHAN_INFO_CALIBHEIGHT,
- IIO_CHAN_INFO_CALIBWEIGHT,
- IIO_CHAN_INFO_DEBOUNCE_COUNT,
- IIO_CHAN_INFO_DEBOUNCE_TIME,
- IIO_CHAN_INFO_CALIBEMISSIVITY,
- IIO_CHAN_INFO_OVERSAMPLING_RATIO,
-};
-
enum iio_shared_by {
IIO_SEPARATE,
IIO_SHARED_BY_TYPE,
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index 2aa7b6384d64..6eb3d683ef62 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -34,4 +34,32 @@ enum iio_available_type {
IIO_AVAIL_RANGE,
};
+enum iio_chan_info_enum {
+ IIO_CHAN_INFO_RAW = 0,
+ IIO_CHAN_INFO_PROCESSED,
+ IIO_CHAN_INFO_SCALE,
+ IIO_CHAN_INFO_OFFSET,
+ IIO_CHAN_INFO_CALIBSCALE,
+ IIO_CHAN_INFO_CALIBBIAS,
+ IIO_CHAN_INFO_PEAK,
+ IIO_CHAN_INFO_PEAK_SCALE,
+ IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW,
+ IIO_CHAN_INFO_AVERAGE_RAW,
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY,
+ IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY,
+ IIO_CHAN_INFO_SAMP_FREQ,
+ IIO_CHAN_INFO_FREQUENCY,
+ IIO_CHAN_INFO_PHASE,
+ IIO_CHAN_INFO_HARDWAREGAIN,
+ IIO_CHAN_INFO_HYSTERESIS,
+ IIO_CHAN_INFO_INT_TIME,
+ IIO_CHAN_INFO_ENABLE,
+ IIO_CHAN_INFO_CALIBHEIGHT,
+ IIO_CHAN_INFO_CALIBWEIGHT,
+ IIO_CHAN_INFO_DEBOUNCE_COUNT,
+ IIO_CHAN_INFO_DEBOUNCE_TIME,
+ IIO_CHAN_INFO_CALIBEMISSIVITY,
+ IIO_CHAN_INFO_OVERSAMPLING_RATIO,
+};
+
#endif /* _IIO_TYPES_H_ */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 6a532629c983..a454b8aeb938 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -21,22 +21,11 @@
#include <asm/thread_info.h>
-#ifdef CONFIG_SMP
-# define INIT_PUSHABLE_TASKS(tsk) \
- .pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO),
-#else
-# define INIT_PUSHABLE_TASKS(tsk)
-#endif
-
extern struct files_struct init_files;
extern struct fs_struct init_fs;
-
-#ifdef CONFIG_CPUSETS
-#define INIT_CPUSET_SEQ(tsk) \
- .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
-#else
-#define INIT_CPUSET_SEQ(tsk)
-#endif
+extern struct nsproxy init_nsproxy;
+extern struct group_info init_groups;
+extern struct cred init_cred;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#define INIT_PREV_CPUTIME(x) .prev_cputime = { \
@@ -47,67 +36,16 @@ extern struct fs_struct init_fs;
#endif
#ifdef CONFIG_POSIX_TIMERS
-#define INIT_POSIX_TIMERS(s) \
- .posix_timers = LIST_HEAD_INIT(s.posix_timers),
#define INIT_CPU_TIMERS(s) \
.cpu_timers = { \
LIST_HEAD_INIT(s.cpu_timers[0]), \
LIST_HEAD_INIT(s.cpu_timers[1]), \
- LIST_HEAD_INIT(s.cpu_timers[2]), \
- },
-#define INIT_CPUTIMER(s) \
- .cputimer = { \
- .cputime_atomic = INIT_CPUTIME_ATOMIC, \
- .running = false, \
- .checking_timer = false, \
+ LIST_HEAD_INIT(s.cpu_timers[2]), \
},
#else
-#define INIT_POSIX_TIMERS(s)
#define INIT_CPU_TIMERS(s)
-#define INIT_CPUTIMER(s)
#endif
-#define INIT_SIGNALS(sig) { \
- .nr_threads = 1, \
- .thread_head = LIST_HEAD_INIT(init_task.thread_node), \
- .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
- .shared_pending = { \
- .list = LIST_HEAD_INIT(sig.shared_pending.list), \
- .signal = {{0}}}, \
- INIT_POSIX_TIMERS(sig) \
- INIT_CPU_TIMERS(sig) \
- .rlim = INIT_RLIMITS, \
- INIT_CPUTIMER(sig) \
- INIT_PREV_CPUTIME(sig) \
- .cred_guard_mutex = \
- __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
-}
-
-extern struct nsproxy init_nsproxy;
-
-#define INIT_SIGHAND(sighand) { \
- .count = ATOMIC_INIT(1), \
- .action = { { { .sa_handler = SIG_DFL, } }, }, \
- .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \
- .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh), \
-}
-
-extern struct group_info init_groups;
-
-#define INIT_STRUCT_PID { \
- .count = ATOMIC_INIT(1), \
- .tasks = { \
- { .first = NULL }, \
- { .first = NULL }, \
- { .first = NULL }, \
- }, \
- .level = 0, \
- .numbers = { { \
- .nr = 0, \
- .ns = &init_pid_ns, \
- }, } \
-}
-
#define INIT_PID_LINK(type) \
{ \
.node = { \
@@ -117,192 +55,16 @@ extern struct group_info init_groups;
.pid = &init_struct_pid, \
}
-#ifdef CONFIG_AUDITSYSCALL
-#define INIT_IDS \
- .loginuid = INVALID_UID, \
- .sessionid = (unsigned int)-1,
-#else
-#define INIT_IDS
-#endif
-
-#ifdef CONFIG_PREEMPT_RCU
-#define INIT_TASK_RCU_PREEMPT(tsk) \
- .rcu_read_lock_nesting = 0, \
- .rcu_read_unlock_special.s = 0, \
- .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \
- .rcu_blocked_node = NULL,
-#else
-#define INIT_TASK_RCU_PREEMPT(tsk)
-#endif
-#ifdef CONFIG_TASKS_RCU
-#define INIT_TASK_RCU_TASKS(tsk) \
- .rcu_tasks_holdout = false, \
- .rcu_tasks_holdout_list = \
- LIST_HEAD_INIT(tsk.rcu_tasks_holdout_list), \
- .rcu_tasks_idle_cpu = -1,
-#else
-#define INIT_TASK_RCU_TASKS(tsk)
-#endif
-
-extern struct cred init_cred;
-
-#ifdef CONFIG_CGROUP_SCHED
-# define INIT_CGROUP_SCHED(tsk) \
- .sched_task_group = &root_task_group,
-#else
-# define INIT_CGROUP_SCHED(tsk)
-#endif
-
-#ifdef CONFIG_PERF_EVENTS
-# define INIT_PERF_EVENTS(tsk) \
- .perf_event_mutex = \
- __MUTEX_INITIALIZER(tsk.perf_event_mutex), \
- .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list),
-#else
-# define INIT_PERF_EVENTS(tsk)
-#endif
-
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-# define INIT_VTIME(tsk) \
- .vtime.seqcount = SEQCNT_ZERO(tsk.vtime.seqcount), \
- .vtime.starttime = 0, \
- .vtime.state = VTIME_SYS,
-#else
-# define INIT_VTIME(tsk)
-#endif
-
#define INIT_TASK_COMM "swapper"
-#ifdef CONFIG_RT_MUTEXES
-# define INIT_RT_MUTEXES(tsk) \
- .pi_waiters = RB_ROOT_CACHED, \
- .pi_top_task = NULL,
-#else
-# define INIT_RT_MUTEXES(tsk)
-#endif
-
-#ifdef CONFIG_NUMA_BALANCING
-# define INIT_NUMA_BALANCING(tsk) \
- .numa_preferred_nid = -1, \
- .numa_group = NULL, \
- .numa_faults = NULL,
-#else
-# define INIT_NUMA_BALANCING(tsk)
-#endif
-
-#ifdef CONFIG_KASAN
-# define INIT_KASAN(tsk) \
- .kasan_depth = 1,
-#else
-# define INIT_KASAN(tsk)
-#endif
-
-#ifdef CONFIG_LIVEPATCH
-# define INIT_LIVEPATCH(tsk) \
- .patch_state = KLP_UNDEFINED,
-#else
-# define INIT_LIVEPATCH(tsk)
-#endif
-
-#ifdef CONFIG_THREAD_INFO_IN_TASK
-# define INIT_TASK_TI(tsk) \
- .thread_info = INIT_THREAD_INFO(tsk), \
- .stack_refcount = ATOMIC_INIT(1),
-#else
-# define INIT_TASK_TI(tsk)
-#endif
-
-#ifdef CONFIG_SECURITY
-#define INIT_TASK_SECURITY .security = NULL,
-#else
-#define INIT_TASK_SECURITY
-#endif
-
-/*
- * INIT_TASK is used to set up the first task table, touch at
- * your own risk!. Base=0, limit=0x1fffff (=2MB)
- */
-#define INIT_TASK(tsk) \
-{ \
- INIT_TASK_TI(tsk) \
- .state = 0, \
- .stack = init_stack, \
- .usage = ATOMIC_INIT(2), \
- .flags = PF_KTHREAD, \
- .prio = MAX_PRIO-20, \
- .static_prio = MAX_PRIO-20, \
- .normal_prio = MAX_PRIO-20, \
- .policy = SCHED_NORMAL, \
- .cpus_allowed = CPU_MASK_ALL, \
- .nr_cpus_allowed= NR_CPUS, \
- .mm = NULL, \
- .active_mm = &init_mm, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
- .se = { \
- .group_node = LIST_HEAD_INIT(tsk.se.group_node), \
- }, \
- .rt = { \
- .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \
- .time_slice = RR_TIMESLICE, \
- }, \
- .tasks = LIST_HEAD_INIT(tsk.tasks), \
- INIT_PUSHABLE_TASKS(tsk) \
- INIT_CGROUP_SCHED(tsk) \
- .ptraced = LIST_HEAD_INIT(tsk.ptraced), \
- .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
- .real_parent = &tsk, \
- .parent = &tsk, \
- .children = LIST_HEAD_INIT(tsk.children), \
- .sibling = LIST_HEAD_INIT(tsk.sibling), \
- .group_leader = &tsk, \
- RCU_POINTER_INITIALIZER(real_cred, &init_cred), \
- RCU_POINTER_INITIALIZER(cred, &init_cred), \
- .comm = INIT_TASK_COMM, \
- .thread = INIT_THREAD, \
- .fs = &init_fs, \
- .files = &init_files, \
- .signal = &init_signals, \
- .sighand = &init_sighand, \
- .nsproxy = &init_nsproxy, \
- .pending = { \
- .list = LIST_HEAD_INIT(tsk.pending.list), \
- .signal = {{0}}}, \
- .blocked = {{0}}, \
- .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
- .journal_info = NULL, \
- INIT_CPU_TIMERS(tsk) \
- .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
- .timer_slack_ns = 50000, /* 50 usec default slack */ \
- .pids = { \
- [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
- [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
- [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
- }, \
- .thread_group = LIST_HEAD_INIT(tsk.thread_group), \
- .thread_node = LIST_HEAD_INIT(init_signals.thread_head), \
- INIT_IDS \
- INIT_PERF_EVENTS(tsk) \
- INIT_TRACE_IRQFLAGS \
- INIT_LOCKDEP \
- INIT_FTRACE_GRAPH \
- INIT_TRACE_RECURSION \
- INIT_TASK_RCU_PREEMPT(tsk) \
- INIT_TASK_RCU_TASKS(tsk) \
- INIT_CPUSET_SEQ(tsk) \
- INIT_RT_MUTEXES(tsk) \
- INIT_PREV_CPUTIME(tsk) \
- INIT_VTIME(tsk) \
- INIT_NUMA_BALANCING(tsk) \
- INIT_KASAN(tsk) \
- INIT_LIVEPATCH(tsk) \
- INIT_TASK_SECURITY \
-}
-
-
/* Attach to the init_task data structure for proper alignment */
+#ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK
#define __init_task_data __attribute__((__section__(".data..init_task")))
+#else
+#define __init_task_data /**/
+#endif
+/* Attach to the thread_info data structure for proper alignment */
+#define __init_thread_info __attribute__((__section__(".data..init_thread_info")))
#endif
diff --git a/include/linux/integrity.h b/include/linux/integrity.h
index c2d6082a1a4c..858d3f4a2241 100644
--- a/include/linux/integrity.h
+++ b/include/linux/integrity.h
@@ -14,6 +14,7 @@
enum integrity_status {
INTEGRITY_PASS = 0,
+ INTEGRITY_PASS_IMMUTABLE,
INTEGRITY_FAIL,
INTEGRITY_NOLABEL,
INTEGRITY_NOXATTRS,
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index 0e81035b678f..b11fcdfd0770 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -13,10 +13,13 @@
* busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
*/
-#define IRQ_WORK_PENDING 1UL
-#define IRQ_WORK_BUSY 2UL
-#define IRQ_WORK_FLAGS 3UL
-#define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */
+#define IRQ_WORK_PENDING BIT(0)
+#define IRQ_WORK_BUSY BIT(1)
+
+/* Doesn't want IPI, wait for tick: */
+#define IRQ_WORK_LAZY BIT(2)
+
+#define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY)
struct irq_work {
unsigned long flags;
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 1b3996ff3f16..9700f00bbc04 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -40,7 +40,6 @@ do { \
do { \
current->softirq_context--; \
} while (0)
-# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
#else
# define trace_hardirqs_on() do { } while (0)
# define trace_hardirqs_off() do { } while (0)
@@ -54,7 +53,6 @@ do { \
# define trace_hardirq_exit() do { } while (0)
# define lockdep_softirq_enter() do { } while (0)
# define lockdep_softirq_exit() do { } while (0)
-# define INIT_TRACE_IRQFLAGS
#endif
#if defined(CONFIG_IRQSOFF_TRACER) || \
diff --git a/include/linux/iversion.h b/include/linux/iversion.h
new file mode 100644
index 000000000000..3d2fd06495ec
--- /dev/null
+++ b/include/linux/iversion.h
@@ -0,0 +1,337 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IVERSION_H
+#define _LINUX_IVERSION_H
+
+#include <linux/fs.h>
+
+/*
+ * The inode->i_version field:
+ * ---------------------------
+ * The change attribute (i_version) is mandated by NFSv4 and is mostly for
+ * knfsd, but is also used for other purposes (e.g. IMA). The i_version must
+ * appear different to observers if there was a change to the inode's data or
+ * metadata since it was last queried.
+ *
+ * Observers see the i_version as a 64-bit number that never decreases. If it
+ * remains the same since it was last checked, then nothing has changed in the
+ * inode. If it's different then something has changed. Observers cannot infer
+ * anything about the nature or magnitude of the changes from the value, only
+ * that the inode has changed in some fashion.
+ *
+ * Not all filesystems properly implement the i_version counter. Subsystems that
+ * want to use i_version field on an inode should first check whether the
+ * filesystem sets the SB_I_VERSION flag (usually via the IS_I_VERSION macro).
+ *
+ * Those that set SB_I_VERSION will automatically have their i_version counter
+ * incremented on writes to normal files. If the SB_I_VERSION is not set, then
+ * the VFS will not touch it on writes, and the filesystem can use it how it
+ * wishes. Note that the filesystem is always responsible for updating the
+ * i_version on namespace changes in directories (mkdir, rmdir, unlink, etc.).
+ * We consider these sorts of filesystems to have a kernel-managed i_version.
+ *
+ * It may be impractical for filesystems to keep i_version updates atomic with
+ * respect to the changes that cause them. They should, however, guarantee
+ * that i_version updates are never visible before the changes that caused
+ * them. Also, i_version updates should never be delayed longer than it takes
+ * the original change to reach disk.
+ *
+ * This implementation uses the low bit in the i_version field as a flag to
+ * track when the value has been queried. If it has not been queried since it
+ * was last incremented, we can skip the increment in most cases.
+ *
+ * In the event that we're updating the ctime, we will usually go ahead and
+ * bump the i_version anyway. Since that has to go to stable storage in some
+ * fashion, we might as well increment it as well.
+ *
+ * With this implementation, the value should always appear to observers to
+ * increase over time if the file has changed. It's recommended to use
+ * inode_cmp_iversion() helper to compare values.
+ *
+ * Note that some filesystems (e.g. NFS and AFS) just use the field to store
+ * a server-provided value (for the most part). For that reason, those
+ * filesystems do not set SB_I_VERSION. These filesystems are considered to
+ * have a self-managed i_version.
+ *
+ * Persistently storing the i_version
+ * ----------------------------------
+ * Queries of the i_version field are not gated on them hitting the backing
+ * store. It's always possible that the host could crash after allowing
+ * a query of the value but before it has made it to disk.
+ *
+ * To mitigate this problem, filesystems should always use
+ * inode_set_iversion_queried when loading an existing inode from disk. This
+ * ensures that the next attempted inode increment will result in the value
+ * changing.
+ *
+ * Storing the value to disk therefore does not count as a query, so those
+ * filesystems should use inode_peek_iversion to grab the value to be stored.
+ * There is no need to flag the value as having been queried in that case.
+ */
+
+/*
+ * We borrow the lowest bit in the i_version to use as a flag to tell whether
+ * it has been queried since we last incremented it. If it has, then we must
+ * increment it on the next change. After that, we can clear the flag and
+ * avoid incrementing it again until it has again been queried.
+ */
+#define I_VERSION_QUERIED_SHIFT (1)
+#define I_VERSION_QUERIED (1ULL << (I_VERSION_QUERIED_SHIFT - 1))
+#define I_VERSION_INCREMENT (1ULL << I_VERSION_QUERIED_SHIFT)
+
+/**
+ * inode_set_iversion_raw - set i_version to the specified raw value
+ * @inode: inode to set
+ * @val: new i_version value to set
+ *
+ * Set @inode's i_version field to @val. This function is for use by
+ * filesystems that self-manage the i_version.
+ *
+ * For example, the NFS client stores its NFSv4 change attribute in this way,
+ * and the AFS client stores the data_version from the server here.
+ */
+static inline void
+inode_set_iversion_raw(struct inode *inode, u64 val)
+{
+ atomic64_set(&inode->i_version, val);
+}
+
+/**
+ * inode_peek_iversion_raw - grab a "raw" iversion value
+ * @inode: inode from which i_version should be read
+ *
+ * Grab a "raw" inode->i_version value and return it. The i_version is not
+ * flagged or converted in any way. This is mostly used to access a self-managed
+ * i_version.
+ *
+ * With those filesystems, we want to treat the i_version as an entirely
+ * opaque value.
+ */
+static inline u64
+inode_peek_iversion_raw(const struct inode *inode)
+{
+ return atomic64_read(&inode->i_version);
+}
+
+/**
+ * inode_set_iversion - set i_version to a particular value
+ * @inode: inode to set
+ * @val: new i_version value to set
+ *
+ * Set @inode's i_version field to @val. This function is for filesystems with
+ * a kernel-managed i_version, for initializing a newly-created inode from
+ * scratch.
+ *
+ * In this case, we do not set the QUERIED flag since we know that this value
+ * has never been queried.
+ */
+static inline void
+inode_set_iversion(struct inode *inode, u64 val)
+{
+ inode_set_iversion_raw(inode, val << I_VERSION_QUERIED_SHIFT);
+}
+
+/**
+ * inode_set_iversion_queried - set i_version to a particular value as quereied
+ * @inode: inode to set
+ * @val: new i_version value to set
+ *
+ * Set @inode's i_version field to @val, and flag it for increment on the next
+ * change.
+ *
+ * Filesystems that persistently store the i_version on disk should use this
+ * when loading an existing inode from disk.
+ *
+ * When loading in an i_version value from a backing store, we can't be certain
+ * that it wasn't previously viewed before being stored. Thus, we must assume
+ * that it was, to ensure that we don't end up handing out the same value for
+ * different versions of the same inode.
+ */
+static inline void
+inode_set_iversion_queried(struct inode *inode, u64 val)
+{
+ inode_set_iversion_raw(inode, (val << I_VERSION_QUERIED_SHIFT) |
+ I_VERSION_QUERIED);
+}
+
+/**
+ * inode_maybe_inc_iversion - increments i_version
+ * @inode: inode with the i_version that should be updated
+ * @force: increment the counter even if it's not necessary?
+ *
+ * Every time the inode is modified, the i_version field must be seen to have
+ * changed by any observer.
+ *
+ * If "force" is set or the QUERIED flag is set, then ensure that we increment
+ * the value, and clear the queried flag.
+ *
+ * In the common case where neither is set, then we can return "false" without
+ * updating i_version.
+ *
+ * If this function returns false, and no other metadata has changed, then we
+ * can avoid logging the metadata.
+ */
+static inline bool
+inode_maybe_inc_iversion(struct inode *inode, bool force)
+{
+ u64 cur, old, new;
+
+ /*
+ * The i_version field is not strictly ordered with any other inode
+ * information, but the legacy inode_inc_iversion code used a spinlock
+ * to serialize increments.
+ *
+ * Here, we add full memory barriers to ensure that any de-facto
+ * ordering with other info is preserved.
+ *
+ * This barrier pairs with the barrier in inode_query_iversion()
+ */
+ smp_mb();
+ cur = inode_peek_iversion_raw(inode);
+ for (;;) {
+ /* If flag is clear then we needn't do anything */
+ if (!force && !(cur & I_VERSION_QUERIED))
+ return false;
+
+ /* Since lowest bit is flag, add 2 to avoid it */
+ new = (cur & ~I_VERSION_QUERIED) + I_VERSION_INCREMENT;
+
+ old = atomic64_cmpxchg(&inode->i_version, cur, new);
+ if (likely(old == cur))
+ break;
+ cur = old;
+ }
+ return true;
+}
+
+
+/**
+ * inode_inc_iversion - forcibly increment i_version
+ * @inode: inode that needs to be updated
+ *
+ * Forcbily increment the i_version field. This always results in a change to
+ * the observable value.
+ */
+static inline void
+inode_inc_iversion(struct inode *inode)
+{
+ inode_maybe_inc_iversion(inode, true);
+}
+
+/**
+ * inode_iversion_need_inc - is the i_version in need of being incremented?
+ * @inode: inode to check
+ *
+ * Returns whether the inode->i_version counter needs incrementing on the next
+ * change. Just fetch the value and check the QUERIED flag.
+ */
+static inline bool
+inode_iversion_need_inc(struct inode *inode)
+{
+ return inode_peek_iversion_raw(inode) & I_VERSION_QUERIED;
+}
+
+/**
+ * inode_inc_iversion_raw - forcibly increment raw i_version
+ * @inode: inode that needs to be updated
+ *
+ * Forcbily increment the raw i_version field. This always results in a change
+ * to the raw value.
+ *
+ * NFS will use the i_version field to store the value from the server. It
+ * mostly treats it as opaque, but in the case where it holds a write
+ * delegation, it must increment the value itself. This function does that.
+ */
+static inline void
+inode_inc_iversion_raw(struct inode *inode)
+{
+ atomic64_inc(&inode->i_version);
+}
+
+/**
+ * inode_peek_iversion - read i_version without flagging it to be incremented
+ * @inode: inode from which i_version should be read
+ *
+ * Read the inode i_version counter for an inode without registering it as a
+ * query.
+ *
+ * This is typically used by local filesystems that need to store an i_version
+ * on disk. In that situation, it's not necessary to flag it as having been
+ * viewed, as the result won't be used to gauge changes from that point.
+ */
+static inline u64
+inode_peek_iversion(const struct inode *inode)
+{
+ return inode_peek_iversion_raw(inode) >> I_VERSION_QUERIED_SHIFT;
+}
+
+/**
+ * inode_query_iversion - read i_version for later use
+ * @inode: inode from which i_version should be read
+ *
+ * Read the inode i_version counter. This should be used by callers that wish
+ * to store the returned i_version for later comparison. This will guarantee
+ * that a later query of the i_version will result in a different value if
+ * anything has changed.
+ *
+ * In this implementation, we fetch the current value, set the QUERIED flag and
+ * then try to swap it into place with a cmpxchg, if it wasn't already set. If
+ * that fails, we try again with the newly fetched value from the cmpxchg.
+ */
+static inline u64
+inode_query_iversion(struct inode *inode)
+{
+ u64 cur, old, new;
+
+ cur = inode_peek_iversion_raw(inode);
+ for (;;) {
+ /* If flag is already set, then no need to swap */
+ if (cur & I_VERSION_QUERIED) {
+ /*
+ * This barrier (and the implicit barrier in the
+ * cmpxchg below) pairs with the barrier in
+ * inode_maybe_inc_iversion().
+ */
+ smp_mb();
+ break;
+ }
+
+ new = cur | I_VERSION_QUERIED;
+ old = atomic64_cmpxchg(&inode->i_version, cur, new);
+ if (likely(old == cur))
+ break;
+ cur = old;
+ }
+ return cur >> I_VERSION_QUERIED_SHIFT;
+}
+
+/**
+ * inode_cmp_iversion_raw - check whether the raw i_version counter has changed
+ * @inode: inode to check
+ * @old: old value to check against its i_version
+ *
+ * Compare the current raw i_version counter with a previous one. Returns false
+ * if they are the same or true if they are different.
+ */
+static inline bool
+inode_cmp_iversion_raw(const struct inode *inode, u64 old)
+{
+ return inode_peek_iversion_raw(inode) != old;
+}
+
+/**
+ * inode_cmp_iversion - check whether the i_version counter has changed
+ * @inode: inode to check
+ * @old: old value to check against its i_version
+ *
+ * Compare an i_version counter with a previous one. Returns false if they are
+ * the same, and true if they are different.
+ *
+ * Note that we don't need to set the QUERIED flag in this case, as the value
+ * in the inode is not being recorded for later use.
+ */
+static inline bool
+inode_cmp_iversion(const struct inode *inode, u64 old)
+{
+ return inode_peek_iversion(inode) != old;
+}
+#endif
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index c7b368c734af..e0340ca08d98 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -160,6 +160,8 @@ extern void arch_jump_label_transform_static(struct jump_entry *entry,
extern int jump_label_text_reserved(void *start, void *end);
extern void static_key_slow_inc(struct static_key *key);
extern void static_key_slow_dec(struct static_key *key);
+extern void static_key_slow_inc_cpuslocked(struct static_key *key);
+extern void static_key_slow_dec_cpuslocked(struct static_key *key);
extern void jump_label_apply_nops(struct module *mod);
extern int static_key_count(struct static_key *key);
extern void static_key_enable(struct static_key *key);
@@ -222,6 +224,9 @@ static inline void static_key_slow_dec(struct static_key *key)
atomic_dec(&key->enabled);
}
+#define static_key_slow_inc_cpuslocked(key) static_key_slow_inc(key)
+#define static_key_slow_dec_cpuslocked(key) static_key_slow_dec(key)
+
static inline int jump_label_text_reserved(void *start, void *end)
{
return 0;
@@ -416,6 +421,8 @@ extern bool ____wrong_branch_error(void);
#define static_branch_inc(x) static_key_slow_inc(&(x)->key)
#define static_branch_dec(x) static_key_slow_dec(&(x)->key)
+#define static_branch_inc_cpuslocked(x) static_key_slow_inc_cpuslocked(&(x)->key)
+#define static_branch_dec_cpuslocked(x) static_key_slow_dec_cpuslocked(&(x)->key)
/*
* Normal usage; boolean enable/disable.
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 2d1d9de06728..7f4b60abdf27 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -50,10 +50,7 @@ struct nvm_id;
struct nvm_dev;
struct nvm_tgt_dev;
-typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
-typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
- nvm_l2p_update_fn *, void *);
typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
@@ -66,7 +63,6 @@ typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
struct nvm_dev_ops {
nvm_id_fn *identity;
- nvm_get_l2p_tbl_fn *get_l2p_tbl;
nvm_op_bb_tbl_fn *get_bb_tbl;
nvm_op_set_bb_fn *set_bb_tbl;
@@ -112,8 +108,6 @@ enum {
NVM_RSP_WARN_HIGHECC = 0x4700,
/* Device opcodes */
- NVM_OP_HBREAD = 0x02,
- NVM_OP_HBWRITE = 0x81,
NVM_OP_PWRITE = 0x91,
NVM_OP_PREAD = 0x92,
NVM_OP_ERASE = 0x90,
@@ -165,12 +159,16 @@ struct nvm_id_group {
u8 fmtype;
u8 num_ch;
u8 num_lun;
- u8 num_pln;
- u16 num_blk;
- u16 num_pg;
- u16 fpg_sz;
+ u16 num_chk;
+ u16 clba;
u16 csecs;
u16 sos;
+
+ u16 ws_min;
+ u16 ws_opt;
+ u16 ws_seq;
+ u16 ws_per_chk;
+
u32 trdt;
u32 trdm;
u32 tprt;
@@ -181,7 +179,10 @@ struct nvm_id_group {
u32 mccap;
u16 cpar;
- struct nvm_id_lp_tbl lptbl;
+ /* 1.2 compatibility */
+ u8 num_pln;
+ u16 num_pg;
+ u16 fpg_sz;
};
struct nvm_addr_format {
@@ -217,6 +218,10 @@ struct nvm_target {
#define ADDR_EMPTY (~0ULL)
+#define NVM_TARGET_DEFAULT_OP (101)
+#define NVM_TARGET_MIN_OP (3)
+#define NVM_TARGET_MAX_OP (80)
+
#define NVM_VERSION_MAJOR 1
#define NVM_VERSION_MINOR 0
#define NVM_VERSION_PATCH 0
@@ -239,7 +244,6 @@ struct nvm_rq {
void *meta_list;
dma_addr_t dma_meta_list;
- struct completion *wait;
nvm_end_io_fn *end_io;
uint8_t opcode;
@@ -268,31 +272,38 @@ enum {
NVM_BLK_ST_BAD = 0x8, /* Bad block */
};
+
/* Device generic information */
struct nvm_geo {
+ /* generic geometry */
int nr_chnls;
- int nr_luns;
- int luns_per_chnl; /* -1 if channels are not symmetric */
- int nr_planes;
- int sec_per_pg; /* only sectors for a single page */
- int pgs_per_blk;
- int blks_per_lun;
- int fpg_size;
- int pfpg_size; /* size of buffer if all pages are to be read */
+ int all_luns; /* across channels */
+ int nr_luns; /* per channel */
+ int nr_chks; /* per lun */
+
int sec_size;
int oob_size;
int mccap;
- struct nvm_addr_format ppaf;
- /* Calculated/Cached values. These do not reflect the actual usable
- * blocks at run-time.
- */
+ int sec_per_chk;
+ int sec_per_lun;
+
+ int ws_min;
+ int ws_opt;
+ int ws_seq;
+ int ws_per_chk;
+
int max_rq_size;
- int plane_mode; /* drive device in single, double or quad mode */
+ int op;
+
+ struct nvm_addr_format ppaf;
+
+ /* Legacy 1.2 specific geometry */
+ int plane_mode; /* drive device in single, double or quad mode */
+ int nr_planes;
+ int sec_per_pg; /* only sectors for a single page */
int sec_per_pl; /* all sectors across planes */
- int sec_per_blk;
- int sec_per_lun;
};
/* sub-device structure */
@@ -320,10 +331,6 @@ struct nvm_dev {
/* Device information */
struct nvm_geo geo;
- /* lower page table */
- int lps_per_blk;
- int *lptbl;
-
unsigned long total_secs;
unsigned long *lun_map;
@@ -346,36 +353,6 @@ struct nvm_dev {
struct list_head targets;
};
-static inline struct ppa_addr linear_to_generic_addr(struct nvm_geo *geo,
- u64 pba)
-{
- struct ppa_addr l;
- int secs, pgs, blks, luns;
- sector_t ppa = pba;
-
- l.ppa = 0;
-
- div_u64_rem(ppa, geo->sec_per_pg, &secs);
- l.g.sec = secs;
-
- sector_div(ppa, geo->sec_per_pg);
- div_u64_rem(ppa, geo->pgs_per_blk, &pgs);
- l.g.pg = pgs;
-
- sector_div(ppa, geo->pgs_per_blk);
- div_u64_rem(ppa, geo->blks_per_lun, &blks);
- l.g.blk = blks;
-
- sector_div(ppa, geo->blks_per_lun);
- div_u64_rem(ppa, geo->luns_per_chnl, &luns);
- l.g.lun = luns;
-
- sector_div(ppa, geo->luns_per_chnl);
- l.g.ch = ppa;
-
- return l;
-}
-
static inline struct ppa_addr generic_to_dev_addr(struct nvm_tgt_dev *tgt_dev,
struct ppa_addr r)
{
@@ -418,25 +395,6 @@ static inline struct ppa_addr dev_to_generic_addr(struct nvm_tgt_dev *tgt_dev,
return l;
}
-static inline int ppa_empty(struct ppa_addr ppa_addr)
-{
- return (ppa_addr.ppa == ADDR_EMPTY);
-}
-
-static inline void ppa_set_empty(struct ppa_addr *ppa_addr)
-{
- ppa_addr->ppa = ADDR_EMPTY;
-}
-
-static inline int ppa_cmp_blk(struct ppa_addr ppa1, struct ppa_addr ppa2)
-{
- if (ppa_empty(ppa1) || ppa_empty(ppa2))
- return 0;
-
- return ((ppa1.g.ch == ppa2.g.ch) && (ppa1.g.lun == ppa2.g.lun) &&
- (ppa1.g.blk == ppa2.g.blk));
-}
-
typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
typedef sector_t (nvm_tgt_capacity_fn)(void *);
typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *,
@@ -481,17 +439,10 @@ extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *,
extern int nvm_max_phys_sects(struct nvm_tgt_dev *);
extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *);
-extern int nvm_erase_sync(struct nvm_tgt_dev *, struct ppa_addr *, int);
-extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *,
- void *);
-extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t);
-extern void nvm_put_area(struct nvm_tgt_dev *, sector_t);
extern void nvm_end_io(struct nvm_rq *);
extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int);
extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *);
-extern void nvm_part_to_tgt(struct nvm_dev *, sector_t *, int);
-
#else /* CONFIG_NVM */
struct nvm_dev_ops;
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index fc5c1be3f6f4..4754f01c1abb 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -40,7 +40,6 @@
* @new_func: pointer to the patched function code
* @old_sympos: a hint indicating which symbol position the old function
* can be found (optional)
- * @immediate: patch the func immediately, bypassing safety mechanisms
* @old_addr: the address of the function being patched
* @kobj: kobject for sysfs resources
* @stack_node: list node for klp_ops func_stack list
@@ -76,7 +75,6 @@ struct klp_func {
* in kallsyms for the given object is used.
*/
unsigned long old_sympos;
- bool immediate;
/* internal */
unsigned long old_addr;
@@ -137,7 +135,6 @@ struct klp_object {
* struct klp_patch - patch structure for live patching
* @mod: reference to the live patch module
* @objs: object entries for kernel objects to be patched
- * @immediate: patch all funcs immediately, bypassing safety mechanisms
* @list: list node for global list of registered patches
* @kobj: kobject for sysfs resources
* @enabled: the patch is enabled (but operation may be incomplete)
@@ -147,7 +144,6 @@ struct klp_patch {
/* external */
struct module *mod;
struct klp_object *objs;
- bool immediate;
/* internal */
struct list_head list;
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index d7d313fb9cd4..4fd95dbeb52f 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -17,6 +17,7 @@
#include <net/ipv6.h>
#include <linux/fs.h>
#include <linux/kref.h>
+#include <linux/refcount.h>
#include <linux/utsname.h>
#include <linux/lockd/bind.h>
#include <linux/lockd/xdr.h>
@@ -58,7 +59,7 @@ struct nlm_host {
u32 h_state; /* pseudo-state counter */
u32 h_nsmstate; /* true remote NSM state */
u32 h_pidcount; /* Pseudopids */
- atomic_t h_count; /* reference count */
+ refcount_t h_count; /* reference count */
struct mutex h_mutex; /* mutex for pmap binding */
unsigned long h_nextrebind; /* next portmap call */
unsigned long h_expires; /* eligible for GC */
@@ -83,7 +84,7 @@ struct nlm_host {
struct nsm_handle {
struct list_head sm_link;
- atomic_t sm_count;
+ refcount_t sm_count;
char *sm_mon_name;
char *sm_name;
struct sockaddr_storage sm_addr;
@@ -122,7 +123,7 @@ static inline struct sockaddr *nlm_srcaddr(const struct nlm_host *host)
*/
struct nlm_lockowner {
struct list_head list;
- atomic_t count;
+ refcount_t count;
struct nlm_host *host;
fl_owner_t owner;
@@ -136,7 +137,7 @@ struct nlm_wait;
*/
#define NLMCLNT_OHSIZE ((__NEW_UTS_LEN) + 10u)
struct nlm_rqst {
- atomic_t a_count;
+ refcount_t a_count;
unsigned int a_flags; /* initial RPC task flags */
struct nlm_host * a_host; /* host handle */
struct nlm_args a_args; /* arguments */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 3251d9c0d313..6fc77d4dbdcd 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -337,9 +337,9 @@ extern void lock_release(struct lockdep_map *lock, int nested,
/*
* Same "read" as for lock_acquire(), except -1 means any.
*/
-extern int lock_is_held_type(struct lockdep_map *lock, int read);
+extern int lock_is_held_type(const struct lockdep_map *lock, int read);
-static inline int lock_is_held(struct lockdep_map *lock)
+static inline int lock_is_held(const struct lockdep_map *lock)
{
return lock_is_held_type(lock, -1);
}
@@ -367,8 +367,6 @@ extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
-# define INIT_LOCKDEP .lockdep_recursion = 0,
-
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
#define lockdep_assert_held(l) do { \
@@ -426,7 +424,6 @@ static inline void lockdep_on(void)
* #ifdef the call himself.
*/
-# define INIT_LOCKDEP
# define lockdep_reset() do { debug_locks = 1; } while (0)
# define lockdep_free_key_range(start, size) do { } while (0)
# define lockdep_sys_exit() do { } while (0)
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index 78dc85365c4f..080798f17ece 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -645,11 +645,6 @@ struct axp20x_dev {
const struct regmap_irq_chip *regmap_irq_chip;
};
-struct axp288_extcon_pdata {
- /* GPIO pin control to switch D+/D- lines b/w PMIC and SOC */
- struct gpio_desc *gpio_mux_cntl;
-};
-
/* generic helper function for reading 9-16 bit wide regs */
static inline int axp20x_read_variable_width(struct regmap *regmap,
unsigned int reg, unsigned int width)
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 4e887ba22635..c61535979b8f 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -322,6 +322,10 @@ extern struct attribute_group cros_ec_attr_group;
extern struct attribute_group cros_ec_lightbar_attr_group;
extern struct attribute_group cros_ec_vbc_attr_group;
+/* debugfs stuff */
+int cros_ec_debugfs_init(struct cros_ec_dev *ec);
+void cros_ec_debugfs_remove(struct cros_ec_dev *ec);
+
/* ACPI GPE handler */
#ifdef CONFIG_ACPI
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index 2b16e95b9bb8..a83f6498b95e 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -2904,16 +2904,33 @@ enum usb_pd_control_mux {
USB_PD_CTRL_MUX_AUTO = 5,
};
+enum usb_pd_control_swap {
+ USB_PD_CTRL_SWAP_NONE = 0,
+ USB_PD_CTRL_SWAP_DATA = 1,
+ USB_PD_CTRL_SWAP_POWER = 2,
+ USB_PD_CTRL_SWAP_VCONN = 3,
+ USB_PD_CTRL_SWAP_COUNT
+};
+
struct ec_params_usb_pd_control {
uint8_t port;
uint8_t role;
uint8_t mux;
+ uint8_t swap;
} __packed;
#define PD_CTRL_RESP_ENABLED_COMMS (1 << 0) /* Communication enabled */
#define PD_CTRL_RESP_ENABLED_CONNECTED (1 << 1) /* Device connected */
#define PD_CTRL_RESP_ENABLED_PD_CAPABLE (1 << 2) /* Partner is PD capable */
+#define PD_CTRL_RESP_ROLE_POWER BIT(0) /* 0=SNK/1=SRC */
+#define PD_CTRL_RESP_ROLE_DATA BIT(1) /* 0=UFP/1=DFP */
+#define PD_CTRL_RESP_ROLE_VCONN BIT(2) /* Vconn status */
+#define PD_CTRL_RESP_ROLE_DR_POWER BIT(3) /* Partner is dualrole power */
+#define PD_CTRL_RESP_ROLE_DR_DATA BIT(4) /* Partner is dualrole data */
+#define PD_CTRL_RESP_ROLE_USB_COMM BIT(5) /* Partner USB comm capable */
+#define PD_CTRL_RESP_ROLE_EXT_POWERED BIT(6) /* Partner externally powerd */
+
struct ec_response_usb_pd_control_v1 {
uint8_t enabled;
uint8_t role;
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index 3c8568aa82a5..75e5c8ff85fc 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -3733,6 +3733,9 @@ enum usb_irq_events {
#define TPS65917_REGEN3_CTRL_MODE_ACTIVE 0x01
#define TPS65917_REGEN3_CTRL_MODE_ACTIVE_SHIFT 0x00
+/* POWERHOLD Mask field for PRIMARY_SECONDARY_PAD2 register */
+#define TPS65917_PRIMARY_SECONDARY_PAD2_GPIO_5_MASK 0xC
+
/* Registers for function RESOURCE */
#define TPS65917_REGEN1_CTRL 0x2
#define TPS65917_PLLEN_CTRL 0x3
diff --git a/include/linux/mfd/rave-sp.h b/include/linux/mfd/rave-sp.h
new file mode 100644
index 000000000000..796fb9794c9e
--- /dev/null
+++ b/include/linux/mfd/rave-sp.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+/*
+ * Core definitions for RAVE SP MFD driver.
+ *
+ * Copyright (C) 2017 Zodiac Inflight Innovations
+ */
+
+#ifndef _LINUX_RAVE_SP_H_
+#define _LINUX_RAVE_SP_H_
+
+#include <linux/notifier.h>
+
+enum rave_sp_command {
+ RAVE_SP_CMD_GET_FIRMWARE_VERSION = 0x20,
+ RAVE_SP_CMD_GET_BOOTLOADER_VERSION = 0x21,
+ RAVE_SP_CMD_BOOT_SOURCE = 0x26,
+ RAVE_SP_CMD_GET_BOARD_COPPER_REV = 0x2B,
+ RAVE_SP_CMD_GET_GPIO_STATE = 0x2F,
+
+ RAVE_SP_CMD_STATUS = 0xA0,
+ RAVE_SP_CMD_SW_WDT = 0xA1,
+ RAVE_SP_CMD_PET_WDT = 0xA2,
+ RAVE_SP_CMD_RESET = 0xA7,
+ RAVE_SP_CMD_RESET_REASON = 0xA8,
+
+ RAVE_SP_CMD_REQ_COPPER_REV = 0xB6,
+ RAVE_SP_CMD_GET_I2C_DEVICE_STATUS = 0xBA,
+ RAVE_SP_CMD_GET_SP_SILICON_REV = 0xB9,
+ RAVE_SP_CMD_CONTROL_EVENTS = 0xBB,
+
+ RAVE_SP_EVNT_BASE = 0xE0,
+};
+
+struct rave_sp;
+
+static inline unsigned long rave_sp_action_pack(u8 event, u8 value)
+{
+ return ((unsigned long)value << 8) | event;
+}
+
+static inline u8 rave_sp_action_unpack_event(unsigned long action)
+{
+ return action;
+}
+
+static inline u8 rave_sp_action_unpack_value(unsigned long action)
+{
+ return action >> 8;
+}
+
+int rave_sp_exec(struct rave_sp *sp,
+ void *__data, size_t data_size,
+ void *reply_data, size_t reply_data_size);
+
+struct device;
+int devm_rave_sp_register_event_notifier(struct device *dev,
+ struct notifier_block *nb);
+
+#endif /* _LINUX_RAVE_SP_H_ */
diff --git a/include/linux/mfd/stm32-lptimer.h b/include/linux/mfd/stm32-lptimer.h
index 77c7cf40d9b4..605f62264825 100644
--- a/include/linux/mfd/stm32-lptimer.h
+++ b/include/linux/mfd/stm32-lptimer.h
@@ -1,13 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* STM32 Low-Power Timer parent driver.
- *
* Copyright (C) STMicroelectronics 2017
- *
* Author: Fabrice Gasnier <fabrice.gasnier@st.com>
- *
* Inspired by Benjamin Gaignard's stm32-timers driver
- *
- * License terms: GNU General Public License (GPL), version 2
*/
#ifndef _LINUX_STM32_LPTIMER_H_
diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h
index ce7346e7f77a..2aadab6f34a1 100644
--- a/include/linux/mfd/stm32-timers.h
+++ b/include/linux/mfd/stm32-timers.h
@@ -1,9 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) STMicroelectronics 2016
- *
* Author: Benjamin Gaignard <benjamin.gaignard@st.com>
- *
- * License terms: GNU General Public License (GPL), version 2
*/
#ifndef _LINUX_STM32_GPTIMER_H_
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index e1cfe9194129..396a103c8bc6 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -25,26 +25,6 @@
writew((val) >> 16, (addr) + 2); \
} while (0)
-#define CNF_CMD 0x04
-#define CNF_CTL_BASE 0x10
-#define CNF_INT_PIN 0x3d
-#define CNF_STOP_CLK_CTL 0x40
-#define CNF_GCLK_CTL 0x41
-#define CNF_SD_CLK_MODE 0x42
-#define CNF_PIN_STATUS 0x44
-#define CNF_PWR_CTL_1 0x48
-#define CNF_PWR_CTL_2 0x49
-#define CNF_PWR_CTL_3 0x4a
-#define CNF_CARD_DETECT_MODE 0x4c
-#define CNF_SD_SLOT 0x50
-#define CNF_EXT_GCLK_CTL_1 0xf0
-#define CNF_EXT_GCLK_CTL_2 0xf1
-#define CNF_EXT_GCLK_CTL_3 0xf9
-#define CNF_SD_LED_EN_1 0xfa
-#define CNF_SD_LED_EN_2 0xfe
-
-#define SDCREN 0x2 /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/
-
#define sd_config_write8(base, shift, reg, val) \
tmio_iowrite8((val), (base) + ((reg) << (shift)))
#define sd_config_write16(base, shift, reg, val) \
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 409ffb14298a..e5258ee4e38b 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -79,6 +79,11 @@
<< __mlx5_dw_bit_off(typ, fld))); \
} while (0)
+#define MLX5_ARRAY_SET(typ, p, fld, idx, v) do { \
+ BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 32); \
+ MLX5_SET(typ, p, fld[idx], v); \
+} while (0)
+
#define MLX5_SET_TO_ONES(typ, p, fld) do { \
BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
@@ -244,6 +249,8 @@ enum {
MLX5_NON_FP_BFREGS_PER_UAR,
MLX5_UARS_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
MLX5_NON_FP_BFREGS_IN_PAGE = MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE,
+ MLX5_MIN_DYN_BFREGS = 512,
+ MLX5_MAX_DYN_BFREGS = 1024,
};
enum {
@@ -284,6 +291,7 @@ enum {
MLX5_EVENT_QUEUE_TYPE_QP = 0,
MLX5_EVENT_QUEUE_TYPE_RQ = 1,
MLX5_EVENT_QUEUE_TYPE_SQ = 2,
+ MLX5_EVENT_QUEUE_TYPE_DCT = 6,
};
enum mlx5_event {
@@ -319,6 +327,8 @@ enum mlx5_event {
MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
+ MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c,
+
MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
};
@@ -611,6 +621,11 @@ struct mlx5_eqe_pps {
u8 rsvd2[12];
} __packed;
+struct mlx5_eqe_dct {
+ __be32 reserved[6];
+ __be32 dctn;
+};
+
union ev_data {
__be32 raw[7];
struct mlx5_eqe_cmd cmd;
@@ -626,6 +641,7 @@ union ev_data {
struct mlx5_eqe_vport_change vport_change;
struct mlx5_eqe_port_module port_module;
struct mlx5_eqe_pps pps;
+ struct mlx5_eqe_dct dct;
} __packed;
struct mlx5_eqe {
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index a0610427e168..fb7e8b205eb9 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -155,6 +155,13 @@ enum mlx5_dcbx_oper_mode {
MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3,
};
+enum mlx5_dct_atomic_mode {
+ MLX5_ATOMIC_MODE_DCT_OFF = 20,
+ MLX5_ATOMIC_MODE_DCT_NONE = 0 << MLX5_ATOMIC_MODE_DCT_OFF,
+ MLX5_ATOMIC_MODE_DCT_IB_COMP = 1 << MLX5_ATOMIC_MODE_DCT_OFF,
+ MLX5_ATOMIC_MODE_DCT_CX = 2 << MLX5_ATOMIC_MODE_DCT_OFF,
+};
+
enum {
MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
@@ -231,6 +238,9 @@ struct mlx5_bfreg_info {
u32 ver;
bool lib_uar_4k;
u32 num_sys_pages;
+ u32 num_static_sys_pages;
+ u32 total_num_bfregs;
+ u32 num_dyn_bfregs;
};
struct mlx5_cmd_first {
@@ -430,6 +440,7 @@ enum mlx5_res_type {
MLX5_RES_SRQ = 3,
MLX5_RES_XSRQ = 4,
MLX5_RES_XRQ = 5,
+ MLX5_RES_DCT = MLX5_EVENT_QUEUE_TYPE_DCT,
};
struct mlx5_core_rsc_common {
@@ -788,6 +799,7 @@ struct mlx5_clock {
u32 nominal_c_mult;
unsigned long overflow_period;
struct delayed_work overflow_work;
+ struct mlx5_core_dev *mdev;
struct ptp_clock *ptp;
struct ptp_clock_info ptp_info;
struct mlx5_pps pps_info;
@@ -826,7 +838,7 @@ struct mlx5_core_dev {
struct mlx5e_resources mlx5e_res;
struct {
struct mlx5_rsvd_gids reserved_gids;
- atomic_t roce_en;
+ u32 roce_en;
} roce;
#ifdef CONFIG_MLX5_FPGA
struct mlx5_fpga_device *fpga;
@@ -835,6 +847,8 @@ struct mlx5_core_dev {
struct cpu_rmap *rmap;
#endif
struct mlx5_clock clock;
+ struct mlx5_ib_clock_info *clock_info;
+ struct page *clock_info_page;
};
struct mlx5_db {
@@ -1103,7 +1117,7 @@ void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
u8 roce_version, u8 roce_l3_type, const u8 *gid,
- const u8 *mac, bool vlan, u16 vlan_id);
+ const u8 *mac, bool vlan, u16 vlan_id, u8 port_num);
static inline int fw_initializing(struct mlx5_core_dev *dev)
{
@@ -1225,6 +1239,31 @@ static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev)
return !!(dev->priv.rl_table.max_size);
}
+static inline int mlx5_core_is_mp_slave(struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN(dev, affiliate_nic_vport_criteria) &&
+ MLX5_CAP_GEN(dev, num_vhca_ports) <= 1;
+}
+
+static inline int mlx5_core_is_mp_master(struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN(dev, num_vhca_ports) > 1;
+}
+
+static inline int mlx5_core_mp_enabled(struct mlx5_core_dev *dev)
+{
+ return mlx5_core_is_mp_slave(dev) ||
+ mlx5_core_is_mp_master(dev);
+}
+
+static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev)
+{
+ if (!mlx5_core_mp_enabled(dev))
+ return 1;
+
+ return MLX5_CAP_GEN(dev, native_port_num);
+}
+
enum {
MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
};
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 199bfcd2f2ce..f4e417686f62 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -502,7 +502,7 @@ struct mlx5_ifc_ads_bits {
u8 dei_cfi[0x1];
u8 eth_prio[0x3];
u8 sl[0x4];
- u8 port[0x8];
+ u8 vhca_port_num[0x8];
u8 rmac_47_32[0x10];
u8 rmac_31_0[0x20];
@@ -1078,7 +1078,12 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_5f8[0x3];
u8 log_max_xrq[0x5];
- u8 reserved_at_600[0x200];
+ u8 affiliate_nic_vport_criteria[0x8];
+ u8 native_port_num[0x8];
+ u8 num_vhca_ports[0x8];
+ u8 reserved_at_618[0x6];
+ u8 sw_owner_id[0x1];
+ u8 reserved_at_61f[0x1e1];
};
enum mlx5_flow_destination_type {
@@ -2646,7 +2651,12 @@ struct mlx5_ifc_nic_vport_context_bits {
u8 event_on_mc_address_change[0x1];
u8 event_on_uc_address_change[0x1];
- u8 reserved_at_40[0xf0];
+ u8 reserved_at_40[0xc];
+
+ u8 affiliation_criteria[0x4];
+ u8 affiliated_vhca_id[0x10];
+
+ u8 reserved_at_60[0xd0];
u8 mtu[0x10];
@@ -3289,7 +3299,8 @@ struct mlx5_ifc_set_roce_address_in_bits {
u8 op_mod[0x10];
u8 roce_address_index[0x10];
- u8 reserved_at_50[0x10];
+ u8 reserved_at_50[0xc];
+ u8 vhca_port_num[0x4];
u8 reserved_at_60[0x20];
@@ -3909,7 +3920,8 @@ struct mlx5_ifc_query_roce_address_in_bits {
u8 op_mod[0x10];
u8 roce_address_index[0x10];
- u8 reserved_at_50[0x10];
+ u8 reserved_at_50[0xc];
+ u8 vhca_port_num[0x4];
u8 reserved_at_60[0x20];
};
@@ -5341,7 +5353,9 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits {
};
struct mlx5_ifc_modify_nic_vport_field_select_bits {
- u8 reserved_at_0[0x14];
+ u8 reserved_at_0[0x12];
+ u8 affiliation[0x1];
+ u8 reserved_at_e[0x1];
u8 disable_uc_local_lb[0x1];
u8 disable_mc_local_lb[0x1];
u8 node_guid[0x1];
@@ -5562,6 +5576,7 @@ struct mlx5_ifc_init_hca_in_bits {
u8 op_mod[0x10];
u8 reserved_at_40[0x40];
+ u8 sw_owner_id[4][0x20];
};
struct mlx5_ifc_init2rtr_qp_out_bits {
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 62af7512dabb..4778d41085d4 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -473,6 +473,11 @@ struct mlx5_core_qp {
int pid;
};
+struct mlx5_core_dct {
+ struct mlx5_core_qp mqp;
+ struct completion drained;
+};
+
struct mlx5_qp_path {
u8 fl_free_ar;
u8 rsvd3;
@@ -549,6 +554,9 @@ static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev,
return radix_tree_lookup(&dev->priv.mkey_table.tree, key);
}
+int mlx5_core_create_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *qp,
+ u32 *in, int inlen);
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
u32 *in,
@@ -558,8 +566,12 @@ int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
struct mlx5_core_qp *qp);
int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp);
+int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct);
int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
u32 *out, int outlen);
+int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
+ u32 *out, int outlen);
int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev,
u32 timeout_usec);
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index aaa0bb9e7655..64e193e87394 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -116,4 +116,8 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
struct mlx5_hca_vport_context *req);
int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable);
int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status);
+
+int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
+ struct mlx5_core_dev *port_mdev);
+int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev);
#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ea818ff739cd..7fc92384977e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2570,8 +2570,8 @@ enum mf_flags {
MF_MUST_KILL = 1 << 2,
MF_SOFT_OFFLINE = 1 << 3,
};
-extern int memory_failure(unsigned long pfn, int trapno, int flags);
-extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
+extern int memory_failure(unsigned long pfn, int flags);
+extern void memory_failure_queue(unsigned long pfn, int flags);
extern int unpoison_memory(unsigned long pfn);
extern int get_hwpoison_page(struct page *page);
#define put_hwpoison_page(page) put_page(page)
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index e7743eca1021..85146235231e 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -324,6 +324,7 @@ struct mmc_host {
#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
+#define MMC_CAP_DONE_COMPLETE (1 << 27) /* RW reqs can be completed within mmc_request_done() */
#define MMC_CAP_CD_WAKE (1 << 28) /* Enable card detect wake */
#define MMC_CAP_CMD_DURING_TFR (1 << 29) /* Commands during data transfer */
#define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */
@@ -380,6 +381,7 @@ struct mmc_host {
unsigned int doing_retune:1; /* re-tuning in progress */
unsigned int retune_now:1; /* do re-tuning at next req */
unsigned int retune_paused:1; /* re-tuning is temporarily disabled */
+ unsigned int use_blk_mq:1; /* use blk-mq */
int rescan_disable; /* disable card detection */
int rescan_entered; /* used with nonremovable devices */
@@ -422,9 +424,6 @@ struct mmc_host {
struct dentry *debugfs_root;
- struct mmc_async_req *areq; /* active async req */
- struct mmc_context_info context_info; /* async synchronization info */
-
/* Ongoing data transfer that allows commands during transfer */
struct mmc_request *ongoing_mrq;
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index 82f0d289f110..91f1ba0663c8 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -33,5 +33,6 @@ void mmc_gpio_set_cd_isr(struct mmc_host *host,
irqreturn_t (*isr)(int irq, void *dev_id));
void mmc_gpiod_request_cd_irq(struct mmc_host *host);
bool mmc_can_gpio_cd(struct mmc_host *host);
+bool mmc_can_gpio_ro(struct mmc_host *host);
#endif
diff --git a/include/linux/module.h b/include/linux/module.h
index 9642d3116718..70245f1a3590 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -807,6 +807,15 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr,
static inline void module_bug_cleanup(struct module *mod) {}
#endif /* CONFIG_GENERIC_BUG */
+#ifdef RETPOLINE
+extern bool retpoline_module_ok(bool has_retpoline);
+#else
+static inline bool retpoline_module_ok(bool has_retpoline)
+{
+ return true;
+}
+#endif
+
#ifdef CONFIG_MODULE_SIG
static inline bool module_sig_ok(struct module *module)
{
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 3aa56e3104bb..b5b43f94f311 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -270,75 +270,67 @@ void map_destroy(struct mtd_info *mtd);
#define INVALIDATE_CACHED_RANGE(map, from, size) \
do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0)
-
-static inline int map_word_equal(struct map_info *map, map_word val1, map_word val2)
-{
- int i;
-
- for (i = 0; i < map_words(map); i++) {
- if (val1.x[i] != val2.x[i])
- return 0;
- }
-
- return 1;
-}
-
-static inline map_word map_word_and(struct map_info *map, map_word val1, map_word val2)
-{
- map_word r;
- int i;
-
- for (i = 0; i < map_words(map); i++)
- r.x[i] = val1.x[i] & val2.x[i];
-
- return r;
-}
-
-static inline map_word map_word_clr(struct map_info *map, map_word val1, map_word val2)
-{
- map_word r;
- int i;
-
- for (i = 0; i < map_words(map); i++)
- r.x[i] = val1.x[i] & ~val2.x[i];
-
- return r;
-}
-
-static inline map_word map_word_or(struct map_info *map, map_word val1, map_word val2)
-{
- map_word r;
- int i;
-
- for (i = 0; i < map_words(map); i++)
- r.x[i] = val1.x[i] | val2.x[i];
-
- return r;
-}
-
-static inline int map_word_andequal(struct map_info *map, map_word val1, map_word val2, map_word val3)
-{
- int i;
-
- for (i = 0; i < map_words(map); i++) {
- if ((val1.x[i] & val2.x[i]) != val3.x[i])
- return 0;
- }
-
- return 1;
-}
-
-static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word val2)
-{
- int i;
-
- for (i = 0; i < map_words(map); i++) {
- if (val1.x[i] & val2.x[i])
- return 1;
- }
-
- return 0;
-}
+#define map_word_equal(map, val1, val2) \
+({ \
+ int i, ret = 1; \
+ for (i = 0; i < map_words(map); i++) \
+ if ((val1).x[i] != (val2).x[i]) { \
+ ret = 0; \
+ break; \
+ } \
+ ret; \
+})
+
+#define map_word_and(map, val1, val2) \
+({ \
+ map_word r; \
+ int i; \
+ for (i = 0; i < map_words(map); i++) \
+ r.x[i] = (val1).x[i] & (val2).x[i]; \
+ r; \
+})
+
+#define map_word_clr(map, val1, val2) \
+({ \
+ map_word r; \
+ int i; \
+ for (i = 0; i < map_words(map); i++) \
+ r.x[i] = (val1).x[i] & ~(val2).x[i]; \
+ r; \
+})
+
+#define map_word_or(map, val1, val2) \
+({ \
+ map_word r; \
+ int i; \
+ for (i = 0; i < map_words(map); i++) \
+ r.x[i] = (val1).x[i] | (val2).x[i]; \
+ r; \
+})
+
+#define map_word_andequal(map, val1, val2, val3) \
+({ \
+ int i, ret = 1; \
+ for (i = 0; i < map_words(map); i++) { \
+ if (((val1).x[i] & (val2).x[i]) != (val2).x[i]) { \
+ ret = 0; \
+ break; \
+ } \
+ } \
+ ret; \
+})
+
+#define map_word_bitsset(map, val1, val2) \
+({ \
+ int i, ret = 0; \
+ for (i = 0; i < map_words(map); i++) { \
+ if ((val1).x[i] & (val2).x[i]) { \
+ ret = 1; \
+ break; \
+ } \
+ } \
+ ret; \
+})
static inline map_word map_word_load(struct map_info *map, const void *ptr)
{
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index cd55bf14ad51..205ededccc60 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -489,6 +489,34 @@ static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd)
return do_div(sz, mtd->erasesize);
}
+/**
+ * mtd_align_erase_req - Adjust an erase request to align things on eraseblock
+ * boundaries.
+ * @mtd: the MTD device this erase request applies on
+ * @req: the erase request to adjust
+ *
+ * This function will adjust @req->addr and @req->len to align them on
+ * @mtd->erasesize. Of course we expect @mtd->erasesize to be != 0.
+ */
+static inline void mtd_align_erase_req(struct mtd_info *mtd,
+ struct erase_info *req)
+{
+ u32 mod;
+
+ if (WARN_ON(!mtd->erasesize))
+ return;
+
+ mod = mtd_mod_by_eb(req->addr, mtd);
+ if (mod) {
+ req->addr -= mod;
+ req->len += mod;
+ }
+
+ mod = mtd_mod_by_eb(req->addr + req->len, mtd);
+ if (mod)
+ req->len += mtd->erasesize - mod;
+}
+
static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd)
{
if (mtd->writesize_shift)
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 749bb08c4772..56c5570aadbe 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -133,12 +133,6 @@ enum nand_ecc_algo {
*/
#define NAND_ECC_GENERIC_ERASED_CHECK BIT(0)
#define NAND_ECC_MAXIMIZE BIT(1)
-/*
- * If your controller already sends the required NAND commands when
- * reading or writing a page, then the framework is not supposed to
- * send READ0 and SEQIN/PAGEPROG respectively.
- */
-#define NAND_ECC_CUSTOM_PAGE_ACCESS BIT(2)
/* Bit mask for flags passed to do_nand_read_ecc */
#define NAND_GET_DEVICE 0x80
@@ -191,11 +185,6 @@ enum nand_ecc_algo {
/* Non chip related options */
/* This option skips the bbt scan during initialization. */
#define NAND_SKIP_BBTSCAN 0x00010000
-/*
- * This option is defined if the board driver allocates its own buffers
- * (e.g. because it needs them DMA-coherent).
- */
-#define NAND_OWN_BUFFERS 0x00020000
/* Chip may not exist, so silence any errors in scan */
#define NAND_SCAN_SILENT_NODEV 0x00040000
/*
@@ -525,6 +514,8 @@ static const struct nand_ecc_caps __name = { \
* @postpad: padding information for syndrome based ECC generators
* @options: ECC specific options (see NAND_ECC_XXX flags defined above)
* @priv: pointer to private ECC control data
+ * @calc_buf: buffer for calculated ECC, size is oobsize.
+ * @code_buf: buffer for ECC read from flash, size is oobsize.
* @hwctl: function to control hardware ECC generator. Must only
* be provided if an hardware ECC is available
* @calculate: function for ECC calculation or readback from ECC hardware
@@ -575,6 +566,8 @@ struct nand_ecc_ctrl {
int postpad;
unsigned int options;
void *priv;
+ u8 *calc_buf;
+ u8 *code_buf;
void (*hwctl)(struct mtd_info *mtd, int mode);
int (*calculate)(struct mtd_info *mtd, const uint8_t *dat,
uint8_t *ecc_code);
@@ -602,26 +595,6 @@ struct nand_ecc_ctrl {
int page);
};
-static inline int nand_standard_page_accessors(struct nand_ecc_ctrl *ecc)
-{
- return !(ecc->options & NAND_ECC_CUSTOM_PAGE_ACCESS);
-}
-
-/**
- * struct nand_buffers - buffer structure for read/write
- * @ecccalc: buffer pointer for calculated ECC, size is oobsize.
- * @ecccode: buffer pointer for ECC read from flash, size is oobsize.
- * @databuf: buffer pointer for data, size is (page size + oobsize).
- *
- * Do not change the order of buffers. databuf and oobrbuf must be in
- * consecutive order.
- */
-struct nand_buffers {
- uint8_t *ecccalc;
- uint8_t *ecccode;
- uint8_t *databuf;
-};
-
/**
* struct nand_sdr_timings - SDR NAND chip timings
*
@@ -762,6 +735,350 @@ struct nand_manufacturer_ops {
};
/**
+ * struct nand_op_cmd_instr - Definition of a command instruction
+ * @opcode: the command to issue in one cycle
+ */
+struct nand_op_cmd_instr {
+ u8 opcode;
+};
+
+/**
+ * struct nand_op_addr_instr - Definition of an address instruction
+ * @naddrs: length of the @addrs array
+ * @addrs: array containing the address cycles to issue
+ */
+struct nand_op_addr_instr {
+ unsigned int naddrs;
+ const u8 *addrs;
+};
+
+/**
+ * struct nand_op_data_instr - Definition of a data instruction
+ * @len: number of data bytes to move
+ * @in: buffer to fill when reading from the NAND chip
+ * @out: buffer to read from when writing to the NAND chip
+ * @force_8bit: force 8-bit access
+ *
+ * Please note that "in" and "out" are inverted from the ONFI specification
+ * and are from the controller perspective, so a "in" is a read from the NAND
+ * chip while a "out" is a write to the NAND chip.
+ */
+struct nand_op_data_instr {
+ unsigned int len;
+ union {
+ void *in;
+ const void *out;
+ } buf;
+ bool force_8bit;
+};
+
+/**
+ * struct nand_op_waitrdy_instr - Definition of a wait ready instruction
+ * @timeout_ms: maximum delay while waiting for the ready/busy pin in ms
+ */
+struct nand_op_waitrdy_instr {
+ unsigned int timeout_ms;
+};
+
+/**
+ * enum nand_op_instr_type - Definition of all instruction types
+ * @NAND_OP_CMD_INSTR: command instruction
+ * @NAND_OP_ADDR_INSTR: address instruction
+ * @NAND_OP_DATA_IN_INSTR: data in instruction
+ * @NAND_OP_DATA_OUT_INSTR: data out instruction
+ * @NAND_OP_WAITRDY_INSTR: wait ready instruction
+ */
+enum nand_op_instr_type {
+ NAND_OP_CMD_INSTR,
+ NAND_OP_ADDR_INSTR,
+ NAND_OP_DATA_IN_INSTR,
+ NAND_OP_DATA_OUT_INSTR,
+ NAND_OP_WAITRDY_INSTR,
+};
+
+/**
+ * struct nand_op_instr - Instruction object
+ * @type: the instruction type
+ * @cmd/@addr/@data/@waitrdy: extra data associated to the instruction.
+ * You'll have to use the appropriate element
+ * depending on @type
+ * @delay_ns: delay the controller should apply after the instruction has been
+ * issued on the bus. Most modern controllers have internal timings
+ * control logic, and in this case, the controller driver can ignore
+ * this field.
+ */
+struct nand_op_instr {
+ enum nand_op_instr_type type;
+ union {
+ struct nand_op_cmd_instr cmd;
+ struct nand_op_addr_instr addr;
+ struct nand_op_data_instr data;
+ struct nand_op_waitrdy_instr waitrdy;
+ } ctx;
+ unsigned int delay_ns;
+};
+
+/*
+ * Special handling must be done for the WAITRDY timeout parameter as it usually
+ * is either tPROG (after a prog), tR (before a read), tRST (during a reset) or
+ * tBERS (during an erase) which all of them are u64 values that cannot be
+ * divided by usual kernel macros and must be handled with the special
+ * DIV_ROUND_UP_ULL() macro.
+ */
+#define __DIVIDE(dividend, divisor) ({ \
+ sizeof(dividend) == sizeof(u32) ? \
+ DIV_ROUND_UP(dividend, divisor) : \
+ DIV_ROUND_UP_ULL(dividend, divisor); \
+ })
+#define PSEC_TO_NSEC(x) __DIVIDE(x, 1000)
+#define PSEC_TO_MSEC(x) __DIVIDE(x, 1000000000)
+
+#define NAND_OP_CMD(id, ns) \
+ { \
+ .type = NAND_OP_CMD_INSTR, \
+ .ctx.cmd.opcode = id, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_ADDR(ncycles, cycles, ns) \
+ { \
+ .type = NAND_OP_ADDR_INSTR, \
+ .ctx.addr = { \
+ .naddrs = ncycles, \
+ .addrs = cycles, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_DATA_IN(l, b, ns) \
+ { \
+ .type = NAND_OP_DATA_IN_INSTR, \
+ .ctx.data = { \
+ .len = l, \
+ .buf.in = b, \
+ .force_8bit = false, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_DATA_OUT(l, b, ns) \
+ { \
+ .type = NAND_OP_DATA_OUT_INSTR, \
+ .ctx.data = { \
+ .len = l, \
+ .buf.out = b, \
+ .force_8bit = false, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_8BIT_DATA_IN(l, b, ns) \
+ { \
+ .type = NAND_OP_DATA_IN_INSTR, \
+ .ctx.data = { \
+ .len = l, \
+ .buf.in = b, \
+ .force_8bit = true, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_8BIT_DATA_OUT(l, b, ns) \
+ { \
+ .type = NAND_OP_DATA_OUT_INSTR, \
+ .ctx.data = { \
+ .len = l, \
+ .buf.out = b, \
+ .force_8bit = true, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_WAIT_RDY(tout_ms, ns) \
+ { \
+ .type = NAND_OP_WAITRDY_INSTR, \
+ .ctx.waitrdy.timeout_ms = tout_ms, \
+ .delay_ns = ns, \
+ }
+
+/**
+ * struct nand_subop - a sub operation
+ * @instrs: array of instructions
+ * @ninstrs: length of the @instrs array
+ * @first_instr_start_off: offset to start from for the first instruction
+ * of the sub-operation
+ * @last_instr_end_off: offset to end at (excluded) for the last instruction
+ * of the sub-operation
+ *
+ * Both @first_instr_start_off and @last_instr_end_off only apply to data or
+ * address instructions.
+ *
+ * When an operation cannot be handled as is by the NAND controller, it will
+ * be split by the parser into sub-operations which will be passed to the
+ * controller driver.
+ */
+struct nand_subop {
+ const struct nand_op_instr *instrs;
+ unsigned int ninstrs;
+ unsigned int first_instr_start_off;
+ unsigned int last_instr_end_off;
+};
+
+int nand_subop_get_addr_start_off(const struct nand_subop *subop,
+ unsigned int op_id);
+int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
+ unsigned int op_id);
+int nand_subop_get_data_start_off(const struct nand_subop *subop,
+ unsigned int op_id);
+int nand_subop_get_data_len(const struct nand_subop *subop,
+ unsigned int op_id);
+
+/**
+ * struct nand_op_parser_addr_constraints - Constraints for address instructions
+ * @maxcycles: maximum number of address cycles the controller can issue in a
+ * single step
+ */
+struct nand_op_parser_addr_constraints {
+ unsigned int maxcycles;
+};
+
+/**
+ * struct nand_op_parser_data_constraints - Constraints for data instructions
+ * @maxlen: maximum data length that the controller can handle in a single step
+ */
+struct nand_op_parser_data_constraints {
+ unsigned int maxlen;
+};
+
+/**
+ * struct nand_op_parser_pattern_elem - One element of a pattern
+ * @type: the instructuction type
+ * @optional: whether this element of the pattern is optional or mandatory
+ * @addr/@data: address or data constraint (number of cycles or data length)
+ */
+struct nand_op_parser_pattern_elem {
+ enum nand_op_instr_type type;
+ bool optional;
+ union {
+ struct nand_op_parser_addr_constraints addr;
+ struct nand_op_parser_data_constraints data;
+ } ctx;
+};
+
+#define NAND_OP_PARSER_PAT_CMD_ELEM(_opt) \
+ { \
+ .type = NAND_OP_CMD_INSTR, \
+ .optional = _opt, \
+ }
+
+#define NAND_OP_PARSER_PAT_ADDR_ELEM(_opt, _maxcycles) \
+ { \
+ .type = NAND_OP_ADDR_INSTR, \
+ .optional = _opt, \
+ .ctx.addr.maxcycles = _maxcycles, \
+ }
+
+#define NAND_OP_PARSER_PAT_DATA_IN_ELEM(_opt, _maxlen) \
+ { \
+ .type = NAND_OP_DATA_IN_INSTR, \
+ .optional = _opt, \
+ .ctx.data.maxlen = _maxlen, \
+ }
+
+#define NAND_OP_PARSER_PAT_DATA_OUT_ELEM(_opt, _maxlen) \
+ { \
+ .type = NAND_OP_DATA_OUT_INSTR, \
+ .optional = _opt, \
+ .ctx.data.maxlen = _maxlen, \
+ }
+
+#define NAND_OP_PARSER_PAT_WAITRDY_ELEM(_opt) \
+ { \
+ .type = NAND_OP_WAITRDY_INSTR, \
+ .optional = _opt, \
+ }
+
+/**
+ * struct nand_op_parser_pattern - NAND sub-operation pattern descriptor
+ * @elems: array of pattern elements
+ * @nelems: number of pattern elements in @elems array
+ * @exec: the function that will issue a sub-operation
+ *
+ * A pattern is a list of elements, each element reprensenting one instruction
+ * with its constraints. The pattern itself is used by the core to match NAND
+ * chip operation with NAND controller operations.
+ * Once a match between a NAND controller operation pattern and a NAND chip
+ * operation (or a sub-set of a NAND operation) is found, the pattern ->exec()
+ * hook is called so that the controller driver can issue the operation on the
+ * bus.
+ *
+ * Controller drivers should declare as many patterns as they support and pass
+ * this list of patterns (created with the help of the following macro) to
+ * the nand_op_parser_exec_op() helper.
+ */
+struct nand_op_parser_pattern {
+ const struct nand_op_parser_pattern_elem *elems;
+ unsigned int nelems;
+ int (*exec)(struct nand_chip *chip, const struct nand_subop *subop);
+};
+
+#define NAND_OP_PARSER_PATTERN(_exec, ...) \
+ { \
+ .exec = _exec, \
+ .elems = (struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }, \
+ .nelems = sizeof((struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }) / \
+ sizeof(struct nand_op_parser_pattern_elem), \
+ }
+
+/**
+ * struct nand_op_parser - NAND controller operation parser descriptor
+ * @patterns: array of supported patterns
+ * @npatterns: length of the @patterns array
+ *
+ * The parser descriptor is just an array of supported patterns which will be
+ * iterated by nand_op_parser_exec_op() everytime it tries to execute an
+ * NAND operation (or tries to determine if a specific operation is supported).
+ *
+ * It is worth mentioning that patterns will be tested in their declaration
+ * order, and the first match will be taken, so it's important to order patterns
+ * appropriately so that simple/inefficient patterns are placed at the end of
+ * the list. Usually, this is where you put single instruction patterns.
+ */
+struct nand_op_parser {
+ const struct nand_op_parser_pattern *patterns;
+ unsigned int npatterns;
+};
+
+#define NAND_OP_PARSER(...) \
+ { \
+ .patterns = (struct nand_op_parser_pattern[]) { __VA_ARGS__ }, \
+ .npatterns = sizeof((struct nand_op_parser_pattern[]) { __VA_ARGS__ }) / \
+ sizeof(struct nand_op_parser_pattern), \
+ }
+
+/**
+ * struct nand_operation - NAND operation descriptor
+ * @instrs: array of instructions to execute
+ * @ninstrs: length of the @instrs array
+ *
+ * The actual operation structure that will be passed to chip->exec_op().
+ */
+struct nand_operation {
+ const struct nand_op_instr *instrs;
+ unsigned int ninstrs;
+};
+
+#define NAND_OPERATION(_instrs) \
+ { \
+ .instrs = _instrs, \
+ .ninstrs = ARRAY_SIZE(_instrs), \
+ }
+
+int nand_op_parser_exec_op(struct nand_chip *chip,
+ const struct nand_op_parser *parser,
+ const struct nand_operation *op, bool check_only);
+
+/**
* struct nand_chip - NAND Private Flash Chip Data
* @mtd: MTD device registered to the MTD framework
* @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the
@@ -787,10 +1104,13 @@ struct nand_manufacturer_ops {
* commands to the chip.
* @waitfunc: [REPLACEABLE] hardwarespecific function for wait on
* ready.
+ * @exec_op: controller specific method to execute NAND operations.
+ * This method replaces ->cmdfunc(),
+ * ->{read,write}_{buf,byte,word}(), ->dev_ready() and
+ * ->waifunc().
* @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for
* setting the read-retry mode. Mostly needed for MLC NAND.
* @ecc: [BOARDSPECIFIC] ECC control structure
- * @buffers: buffer structure for read/write
* @buf_align: minimum buffer alignment required by a platform
* @hwcontrol: platform-specific hardware control structure
* @erase: [REPLACEABLE] erase function
@@ -830,6 +1150,7 @@ struct nand_manufacturer_ops {
* @numchips: [INTERN] number of physical chips
* @chipsize: [INTERN] the size of one chip for multichip arrays
* @pagemask: [INTERN] page number mask = number of (pages / chip) - 1
+ * @data_buf: [INTERN] buffer for data, size is (page size + oobsize).
* @pagebuf: [INTERN] holds the pagenumber which is currently in
* data_buf.
* @pagebuf_bitflips: [INTERN] holds the bitflip count for the page which is
@@ -886,6 +1207,9 @@ struct nand_chip {
void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column,
int page_addr);
int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this);
+ int (*exec_op)(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only);
int (*erase)(struct mtd_info *mtd, int page);
int (*scan_bbt)(struct mtd_info *mtd);
int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip,
@@ -896,7 +1220,6 @@ struct nand_chip {
int (*setup_data_interface)(struct mtd_info *mtd, int chipnr,
const struct nand_data_interface *conf);
-
int chip_delay;
unsigned int options;
unsigned int bbt_options;
@@ -908,6 +1231,7 @@ struct nand_chip {
int numchips;
uint64_t chipsize;
int pagemask;
+ u8 *data_buf;
int pagebuf;
unsigned int pagebuf_bitflips;
int subpagesize;
@@ -928,7 +1252,7 @@ struct nand_chip {
u16 max_bb_per_die;
u32 blocks_per_die;
- struct nand_data_interface *data_interface;
+ struct nand_data_interface data_interface;
int read_retries;
@@ -938,7 +1262,6 @@ struct nand_chip {
struct nand_hw_control *controller;
struct nand_ecc_ctrl ecc;
- struct nand_buffers *buffers;
unsigned long buf_align;
struct nand_hw_control hwcontrol;
@@ -956,6 +1279,15 @@ struct nand_chip {
} manufacturer;
};
+static inline int nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ if (!chip->exec_op)
+ return -ENOTSUPP;
+
+ return chip->exec_op(chip, op, false);
+}
+
extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops;
extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops;
@@ -1225,8 +1557,7 @@ static inline int onfi_get_sync_timing_mode(struct nand_chip *chip)
return le16_to_cpu(chip->onfi_params.src_sync_timing_mode);
}
-int onfi_init_data_interface(struct nand_chip *chip,
- struct nand_data_interface *iface,
+int onfi_fill_data_interface(struct nand_chip *chip,
enum nand_data_interface_type type,
int timing_mode);
@@ -1269,8 +1600,6 @@ static inline int jedec_feature(struct nand_chip *chip)
/* get timing characteristics from ONFI timing mode. */
const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode);
-/* get data interface from ONFI timing mode 0, used after reset. */
-const struct nand_data_interface *nand_get_default_data_interface(void);
int nand_check_erased_ecc_chunk(void *data, int datalen,
void *ecc, int ecclen,
@@ -1316,9 +1645,45 @@ int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
/* Reset and initialize a NAND device */
int nand_reset(struct nand_chip *chip, int chipnr);
+/* NAND operation helpers */
+int nand_reset_op(struct nand_chip *chip);
+int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
+ unsigned int len);
+int nand_status_op(struct nand_chip *chip, u8 *status);
+int nand_exit_status_op(struct nand_chip *chip);
+int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock);
+int nand_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf, unsigned int len);
+int nand_change_read_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len, bool force_8bit);
+int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf, unsigned int len);
+int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len);
+int nand_prog_page_end_op(struct nand_chip *chip);
+int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len);
+int nand_change_write_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len, bool force_8bit);
+int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
+ bool force_8bit);
+int nand_write_data_op(struct nand_chip *chip, const void *buf,
+ unsigned int len, bool force_8bit);
+
/* Free resources held by the NAND device */
void nand_cleanup(struct nand_chip *chip);
/* Default extended ID decoding function */
void nand_decode_ext_id(struct nand_chip *chip);
+
+/*
+ * External helper for controller drivers that have to implement the WAITRDY
+ * instruction and have no physical pin to check it.
+ */
+int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms);
+
#endif /* __LINUX_MTD_RAWNAND_H */
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index d0c66a0975cf..de36969eb359 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -61,6 +61,7 @@
#define SPINOR_OP_RDSFDP 0x5a /* Read SFDP */
#define SPINOR_OP_RDCR 0x35 /* Read configuration register */
#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */
+#define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */
/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
#define SPINOR_OP_READ_4B 0x13 /* Read data bytes (low frequency) */
@@ -130,7 +131,10 @@
#define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */
/* Flag Status Register bits */
-#define FSR_READY BIT(7)
+#define FSR_READY BIT(7) /* Device status, 0 = Busy, 1 = Ready */
+#define FSR_E_ERR BIT(5) /* Erase operation status */
+#define FSR_P_ERR BIT(4) /* Program operation status */
+#define FSR_PT_ERR BIT(1) /* Protection error bit */
/* Configuration Register bits. */
#define CR_QUAD_EN_SPAN BIT(1) /* Spansion Quad I/O */
@@ -399,4 +403,10 @@ struct spi_nor_hwcaps {
int spi_nor_scan(struct spi_nor *nor, const char *name,
const struct spi_nor_hwcaps *hwcaps);
+/**
+ * spi_nor_restore_addr_mode() - restore the status of SPI NOR
+ * @nor: the spi_nor structure
+ */
+void spi_nor_restore(struct spi_nor *nor);
+
#endif
diff --git a/include/linux/net.h b/include/linux/net.h
index 68acc54976bf..91216b16feb7 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -147,7 +147,7 @@ struct proto_ops {
int (*getname) (struct socket *sock,
struct sockaddr *addr,
int *sockaddr_len, int peer);
- unsigned int (*poll) (struct file *file, struct socket *sock,
+ __poll_t (*poll) (struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int (*ioctl) (struct socket *sock, unsigned int cmd,
unsigned long arg);
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 495ba4dd9da5..34551f8aaf9d 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -67,8 +67,7 @@ static inline bool lockdep_nfnl_is_held(__u8 subsys_id)
* @ss: The nfnetlink subsystem ID
*
* Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the READ_ONCE(), because
- * caller holds the NFNL subsystem mutex.
+ * the READ_ONCE(), because caller holds the NFNL subsystem mutex.
*/
#define nfnl_dereference(p, ss) \
rcu_dereference_protected(p, lockdep_nfnl_is_held(ss))
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 47adac640191..57ffaa20d564 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -457,7 +457,12 @@ enum lock_type4 {
#define NFS4_DEBUG 1
-/* Index of predefined Linux client operations */
+/*
+ * Index of predefined Linux client operations
+ *
+ * To ensure that /proc/net/rpc/nfs remains correctly ordered, please
+ * append only to this enum when adding new client operations.
+ */
enum {
NFSPROC4_CLNT_NULL = 0, /* Unused */
@@ -480,7 +485,6 @@ enum {
NFSPROC4_CLNT_ACCESS,
NFSPROC4_CLNT_GETATTR,
NFSPROC4_CLNT_LOOKUP,
- NFSPROC4_CLNT_LOOKUPP,
NFSPROC4_CLNT_LOOKUP_ROOT,
NFSPROC4_CLNT_REMOVE,
NFSPROC4_CLNT_RENAME,
@@ -500,7 +504,6 @@ enum {
NFSPROC4_CLNT_SECINFO,
NFSPROC4_CLNT_FSID_PRESENT,
- /* nfs41 */
NFSPROC4_CLNT_EXCHANGE_ID,
NFSPROC4_CLNT_CREATE_SESSION,
NFSPROC4_CLNT_DESTROY_SESSION,
@@ -518,13 +521,14 @@ enum {
NFSPROC4_CLNT_BIND_CONN_TO_SESSION,
NFSPROC4_CLNT_DESTROY_CLIENTID,
- /* nfs42 */
NFSPROC4_CLNT_SEEK,
NFSPROC4_CLNT_ALLOCATE,
NFSPROC4_CLNT_DEALLOCATE,
NFSPROC4_CLNT_LAYOUTSTATS,
NFSPROC4_CLNT_CLONE,
NFSPROC4_CLNT_COPY,
+
+ NFSPROC4_CLNT_LOOKUPP,
};
/* nfs41 types */
diff --git a/include/linux/nubus.h b/include/linux/nubus.h
index 11ce6b1117a8..6e8200215321 100644
--- a/include/linux/nubus.h
+++ b/include/linux/nubus.h
@@ -5,20 +5,36 @@
Originally written by Alan Cox.
Hacked to death by C. Scott Ananian and David Huggins-Daines.
-
- Some of the constants in here are from the corresponding
- NetBSD/OpenBSD header file, by Allen Briggs. We figured out the
- rest of them on our own. */
+*/
+
#ifndef LINUX_NUBUS_H
#define LINUX_NUBUS_H
+#include <linux/device.h>
#include <asm/nubus.h>
#include <uapi/linux/nubus.h>
+struct proc_dir_entry;
+struct seq_file;
+
+struct nubus_dir {
+ unsigned char *base;
+ unsigned char *ptr;
+ int done;
+ int mask;
+ struct proc_dir_entry *procdir;
+};
+
+struct nubus_dirent {
+ unsigned char *base;
+ unsigned char type;
+ __u32 data; /* Actually 24 bits used */
+ int mask;
+};
+
struct nubus_board {
- struct nubus_board* next;
- struct nubus_dev* first_dev;
-
+ struct device dev;
+
/* Only 9-E actually exist, though 0-8 are also theoretically
possible, and 0 is a special case which represents the
motherboard and onboard peripherals (Ethernet, video) */
@@ -27,10 +43,10 @@ struct nubus_board {
char name[64];
/* Format block */
- unsigned char* fblock;
+ unsigned char *fblock;
/* Root directory (does *not* always equal fblock + doffset!) */
- unsigned char* directory;
-
+ unsigned char *directory;
+
unsigned long slot_addr;
/* Offset to root directory (sometimes) */
unsigned long doffset;
@@ -41,15 +57,15 @@ struct nubus_board {
unsigned char rev;
unsigned char format;
unsigned char lanes;
-};
-struct nubus_dev {
- /* Next link in device list */
- struct nubus_dev* next;
/* Directory entry in /proc/bus/nubus */
- struct proc_dir_entry* procdir;
+ struct proc_dir_entry *procdir;
+};
+
+struct nubus_rsrc {
+ struct list_head list;
- /* The functional resource ID of this device */
+ /* The functional resource ID */
unsigned char resid;
/* These are mostly here for convenience; we could always read
them from the ROMs if we wanted to */
@@ -57,79 +73,116 @@ struct nubus_dev {
unsigned short type;
unsigned short dr_sw;
unsigned short dr_hw;
- /* This is the device's name rather than the board's.
- Sometimes they are different. Usually the board name is
- more correct. */
- char name[64];
- /* MacOS driver (I kid you not) */
- unsigned char* driver;
- /* Actually this is an offset */
- unsigned long iobase;
- unsigned long iosize;
- unsigned char flags, hwdevid;
-
+
/* Functional directory */
- unsigned char* directory;
+ unsigned char *directory;
/* Much of our info comes from here */
- struct nubus_board* board;
+ struct nubus_board *board;
+};
+
+/* This is all NuBus functional resources (used to find devices later on) */
+extern struct list_head nubus_func_rsrcs;
+
+struct nubus_driver {
+ struct device_driver driver;
+ int (*probe)(struct nubus_board *board);
+ int (*remove)(struct nubus_board *board);
};
-/* This is all NuBus devices (used to find devices later on) */
-extern struct nubus_dev* nubus_devices;
-/* This is all NuBus cards */
-extern struct nubus_board* nubus_boards;
+extern struct bus_type nubus_bus_type;
/* Generic NuBus interface functions, modelled after the PCI interface */
-void nubus_scan_bus(void);
#ifdef CONFIG_PROC_FS
-extern void nubus_proc_init(void);
+void nubus_proc_init(void);
+struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board);
+struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent,
+ struct nubus_board *board);
+void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent,
+ unsigned int size);
+void nubus_proc_add_rsrc(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent);
#else
static inline void nubus_proc_init(void) {}
+static inline
+struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board)
+{ return NULL; }
+static inline
+struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent,
+ struct nubus_board *board)
+{ return NULL; }
+static inline void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent,
+ unsigned int size) {}
+static inline void nubus_proc_add_rsrc(struct proc_dir_entry *procdir,
+ const struct nubus_dirent *ent) {}
#endif
-int get_nubus_list(char *buf);
-int nubus_proc_attach_device(struct nubus_dev *dev);
-/* If we need more precision we can add some more of these */
-struct nubus_dev* nubus_find_device(unsigned short category,
- unsigned short type,
- unsigned short dr_hw,
- unsigned short dr_sw,
- const struct nubus_dev* from);
-struct nubus_dev* nubus_find_type(unsigned short category,
- unsigned short type,
- const struct nubus_dev* from);
-/* Might have more than one device in a slot, you know... */
-struct nubus_dev* nubus_find_slot(unsigned int slot,
- const struct nubus_dev* from);
+
+struct nubus_rsrc *nubus_first_rsrc_or_null(void);
+struct nubus_rsrc *nubus_next_rsrc_or_null(struct nubus_rsrc *from);
+
+#define for_each_func_rsrc(f) \
+ for (f = nubus_first_rsrc_or_null(); f; f = nubus_next_rsrc_or_null(f))
+
+#define for_each_board_func_rsrc(b, f) \
+ for_each_func_rsrc(f) if (f->board != b) {} else
/* These are somewhat more NuBus-specific. They all return 0 for
success and -1 for failure, as you'd expect. */
/* The root directory which contains the board and functional
directories */
-int nubus_get_root_dir(const struct nubus_board* board,
- struct nubus_dir* dir);
+int nubus_get_root_dir(const struct nubus_board *board,
+ struct nubus_dir *dir);
/* The board directory */
-int nubus_get_board_dir(const struct nubus_board* board,
- struct nubus_dir* dir);
+int nubus_get_board_dir(const struct nubus_board *board,
+ struct nubus_dir *dir);
/* The functional directory */
-int nubus_get_func_dir(const struct nubus_dev* dev,
- struct nubus_dir* dir);
+int nubus_get_func_dir(const struct nubus_rsrc *fres, struct nubus_dir *dir);
/* These work on any directory gotten via the above */
-int nubus_readdir(struct nubus_dir* dir,
- struct nubus_dirent* ent);
-int nubus_find_rsrc(struct nubus_dir* dir,
+int nubus_readdir(struct nubus_dir *dir,
+ struct nubus_dirent *ent);
+int nubus_find_rsrc(struct nubus_dir *dir,
unsigned char rsrc_type,
- struct nubus_dirent* ent);
-int nubus_rewinddir(struct nubus_dir* dir);
+ struct nubus_dirent *ent);
+int nubus_rewinddir(struct nubus_dir *dir);
/* Things to do with directory entries */
-int nubus_get_subdir(const struct nubus_dirent* ent,
- struct nubus_dir* dir);
-void nubus_get_rsrc_mem(void* dest,
- const struct nubus_dirent *dirent,
- int len);
-void nubus_get_rsrc_str(void* dest,
- const struct nubus_dirent *dirent,
- int maxlen);
+int nubus_get_subdir(const struct nubus_dirent *ent,
+ struct nubus_dir *dir);
+void nubus_get_rsrc_mem(void *dest, const struct nubus_dirent *dirent,
+ unsigned int len);
+unsigned int nubus_get_rsrc_str(char *dest, const struct nubus_dirent *dirent,
+ unsigned int len);
+void nubus_seq_write_rsrc_mem(struct seq_file *m,
+ const struct nubus_dirent *dirent,
+ unsigned int len);
+unsigned char *nubus_dirptr(const struct nubus_dirent *nd);
+
+/* Declarations relating to driver model objects */
+int nubus_bus_register(void);
+int nubus_device_register(struct nubus_board *board);
+int nubus_driver_register(struct nubus_driver *ndrv);
+void nubus_driver_unregister(struct nubus_driver *ndrv);
+int nubus_proc_show(struct seq_file *m, void *data);
+
+static inline void nubus_set_drvdata(struct nubus_board *board, void *data)
+{
+ dev_set_drvdata(&board->dev, data);
+}
+
+static inline void *nubus_get_drvdata(struct nubus_board *board)
+{
+ return dev_get_drvdata(&board->dev);
+}
+
+/* Returns a pointer to the "standard" slot space. */
+static inline void *nubus_slot_addr(int slot)
+{
+ return (void *)(0xF0000000 | (slot << 24));
+}
+
#endif /* LINUX_NUBUS_H */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index aea87f0d917b..4112e2bd747f 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -124,14 +124,20 @@ enum {
#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
-#define NVME_CMB_SZ(cmbsz) (((cmbsz) >> 12) & 0xfffff)
-#define NVME_CMB_SZU(cmbsz) (((cmbsz) >> 8) & 0xf)
-
-#define NVME_CMB_WDS(cmbsz) ((cmbsz) & 0x10)
-#define NVME_CMB_RDS(cmbsz) ((cmbsz) & 0x8)
-#define NVME_CMB_LISTS(cmbsz) ((cmbsz) & 0x4)
-#define NVME_CMB_CQS(cmbsz) ((cmbsz) & 0x2)
-#define NVME_CMB_SQS(cmbsz) ((cmbsz) & 0x1)
+
+enum {
+ NVME_CMBSZ_SQS = 1 << 0,
+ NVME_CMBSZ_CQS = 1 << 1,
+ NVME_CMBSZ_LISTS = 1 << 2,
+ NVME_CMBSZ_RDS = 1 << 3,
+ NVME_CMBSZ_WDS = 1 << 4,
+
+ NVME_CMBSZ_SZ_SHIFT = 12,
+ NVME_CMBSZ_SZ_MASK = 0xfffff,
+
+ NVME_CMBSZ_SZU_SHIFT = 8,
+ NVME_CMBSZ_SZU_MASK = 0xf,
+};
/*
* Submission and Completion Queue Entry Sizes for the NVM command set.
diff --git a/include/linux/of.h b/include/linux/of.h
index d3dea1d1e3a9..173102dafb07 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -544,6 +544,8 @@ const char *of_prop_next_string(struct property *prop, const char *cur);
bool of_console_check(struct device_node *dn, char *name, int index);
+extern int of_cpu_node_to_id(struct device_node *np);
+
#else /* CONFIG_OF */
static inline void of_core_init(void)
@@ -916,6 +918,11 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag
{
}
+static inline int of_cpu_node_to_id(struct device_node *np)
+{
+ return -ENODEV;
+}
+
#define of_match_ptr(_ptr) NULL
#define of_match_node(_matches, _node) NULL
#endif /* CONFIG_OF */
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 1fe205582111..18a7f03e1182 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -31,7 +31,7 @@ enum of_gpio_flags {
OF_GPIO_ACTIVE_LOW = 0x1,
OF_GPIO_SINGLE_ENDED = 0x2,
OF_GPIO_OPEN_DRAIN = 0x4,
- OF_GPIO_SLEEP_MAY_LOSE_VALUE = 0x8,
+ OF_GPIO_TRANSITORY = 0x8,
};
#ifdef CONFIG_OF_GPIO
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
index edfa280c3d56..053feb41510a 100644
--- a/include/linux/omap-gpmc.h
+++ b/include/linux/omap-gpmc.h
@@ -25,15 +25,43 @@ struct gpmc_nand_ops {
struct gpmc_nand_regs;
+struct gpmc_onenand_info {
+ bool sync_read;
+ bool sync_write;
+ int burst_len;
+};
+
#if IS_ENABLED(CONFIG_OMAP_GPMC)
struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs,
int cs);
+/**
+ * gpmc_omap_onenand_set_timings - set optimized sync timings.
+ * @cs: Chip Select Region
+ * @freq: Chip frequency
+ * @latency: Burst latency cycle count
+ * @info: Structure describing parameters used
+ *
+ * Sets optimized timings for the @cs region based on @freq and @latency.
+ * Updates the @info structure based on the GPMC settings.
+ */
+int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq,
+ int latency,
+ struct gpmc_onenand_info *info);
+
#else
static inline struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs,
int cs)
{
return NULL;
}
+
+static inline
+int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq,
+ int latency,
+ struct gpmc_onenand_info *info)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_OMAP_GPMC */
extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 6658d9ee5257..864d167a1073 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -139,12 +139,12 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref,
* when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
* between contaminating the pointer value, meaning that
* READ_ONCE() is required when fetching it.
+ *
+ * The smp_read_barrier_depends() implied by READ_ONCE() pairs
+ * with smp_store_release() in __percpu_ref_switch_to_percpu().
*/
percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
- /* paired with smp_store_release() in __percpu_ref_switch_to_percpu() */
- smp_read_barrier_depends();
-
/*
* Theoretically, the following could test just ATOMIC; however,
* then we'd have to mask off DEAD separately as DEAD may be
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 73a7bf30fe9a..4f052496cdfd 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -86,7 +86,7 @@ static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
return 0;
}
-static inline int percpu_counter_initialized(struct percpu_counter *fbc)
+static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
{
return (fbc->counters != NULL);
}
@@ -167,9 +167,9 @@ static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
return percpu_counter_read(fbc);
}
-static inline int percpu_counter_initialized(struct percpu_counter *fbc)
+static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
{
- return 1;
+ return true;
}
#endif /* CONFIG_SMP */
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index ec6dadcc1fde..6c0680641108 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -94,6 +94,7 @@
* or latch delay (on outputs) this parameter (in a custom format)
* specifies the clock skew or latch delay. It typically controls how
* many double inverters are put in front of the line.
+ * @PIN_CONFIG_PERSIST_STATE: retain pin state across sleep or controller reset
* @PIN_CONFIG_END: this is the last enumerator for pin configurations, if
* you need to pass in custom configurations to the pin controller, use
* PIN_CONFIG_END+1 as the base offset.
@@ -122,6 +123,7 @@ enum pin_config_param {
PIN_CONFIG_SLEEP_HARDWARE_STATE,
PIN_CONFIG_SLEW_RATE,
PIN_CONFIG_SKEW_DELAY,
+ PIN_CONFIG_PERSIST_STATE,
PIN_CONFIG_END = 0x7F,
PIN_CONFIG_MAX = 0xFF,
};
diff --git a/include/linux/platform_data/mtd-onenand-omap2.h b/include/linux/platform_data/mtd-onenand-omap2.h
deleted file mode 100644
index 56ff0e6f5ad1..000000000000
--- a/include/linux/platform_data/mtd-onenand-omap2.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (C) 2006 Nokia Corporation
- * Author: Juha Yrjola
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __MTD_ONENAND_OMAP2_H
-#define __MTD_ONENAND_OMAP2_H
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-
-#define ONENAND_SYNC_READ (1 << 0)
-#define ONENAND_SYNC_READWRITE (1 << 1)
-#define ONENAND_IN_OMAP34XX (1 << 2)
-
-struct omap_onenand_platform_data {
- int cs;
- int gpio_irq;
- struct mtd_partition *parts;
- int nr_parts;
- int (*onenand_setup)(void __iomem *, int *freq_ptr);
- int dma_channel;
- u8 flags;
- u8 regulator_can_sleep;
- u8 skip_initial_unlocking;
-
- /* for passing the partitions */
- struct device_node *of_node;
-};
-#endif
diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h
index da79774078a7..773daf7915a3 100644
--- a/include/linux/platform_data/spi-s3c64xx.h
+++ b/include/linux/platform_data/spi-s3c64xx.h
@@ -1,10 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
/*
* Copyright (C) 2009 Samsung Electronics Ltd.
* Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __SPI_S3C64XX_H
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 492ed473ba7e..e723b78d8357 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -556,9 +556,10 @@ struct pm_subsys_data {
* These flags can be set by device drivers at the probe time. They need not be
* cleared by the drivers as the driver core will take care of that.
*
- * NEVER_SKIP: Do not skip system suspend/resume callbacks for the device.
+ * NEVER_SKIP: Do not skip all system suspend/resume callbacks for the device.
* SMART_PREPARE: Check the return value of the driver's ->prepare callback.
* SMART_SUSPEND: No need to resume the device from runtime suspend.
+ * LEAVE_SUSPENDED: Avoid resuming the device during system resume if possible.
*
* Setting SMART_PREPARE instructs bus types and PM domains which may want
* system suspend/resume callbacks to be skipped for the device to return 0 from
@@ -572,10 +573,14 @@ struct pm_subsys_data {
* necessary from the driver's perspective. It also may cause them to skip
* invocations of the ->suspend_late and ->suspend_noirq callbacks provided by
* the driver if they decide to leave the device in runtime suspend.
+ *
+ * Setting LEAVE_SUSPENDED informs the PM core and middle-layer code that the
+ * driver prefers the device to be left in suspend after system resume.
*/
-#define DPM_FLAG_NEVER_SKIP BIT(0)
-#define DPM_FLAG_SMART_PREPARE BIT(1)
-#define DPM_FLAG_SMART_SUSPEND BIT(2)
+#define DPM_FLAG_NEVER_SKIP BIT(0)
+#define DPM_FLAG_SMART_PREPARE BIT(1)
+#define DPM_FLAG_SMART_SUSPEND BIT(2)
+#define DPM_FLAG_LEAVE_SUSPENDED BIT(3)
struct dev_pm_info {
pm_message_t power_state;
@@ -597,6 +602,8 @@ struct dev_pm_info {
bool wakeup_path:1;
bool syscore:1;
bool no_pm_callbacks:1; /* Owned by the PM core */
+ unsigned int must_resume:1; /* Owned by the PM core */
+ unsigned int may_skip_resume:1; /* Set by subsystems */
#else
unsigned int should_wakeup:1;
#endif
@@ -766,6 +773,7 @@ extern int pm_generic_poweroff(struct device *dev);
extern void pm_generic_complete(struct device *dev);
extern void dev_pm_skip_next_resume_phases(struct device *dev);
+extern bool dev_pm_may_skip_resume(struct device *dev);
extern bool dev_pm_smart_suspend_and_suspended(struct device *dev);
#else /* !CONFIG_PM_SLEEP */
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index 4c2cba7ec1d4..4238dde0aaf0 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -88,6 +88,11 @@ static inline bool device_may_wakeup(struct device *dev)
return dev->power.can_wakeup && !!dev->power.wakeup;
}
+static inline void device_set_wakeup_path(struct device *dev)
+{
+ dev->power.wakeup_path = true;
+}
+
/* drivers/base/power/wakeup.c */
extern void wakeup_source_prepare(struct wakeup_source *ws, const char *name);
extern struct wakeup_source *wakeup_source_create(const char *name);
@@ -174,6 +179,8 @@ static inline bool device_may_wakeup(struct device *dev)
return dev->power.can_wakeup && dev->power.should_wakeup;
}
+static inline void device_set_wakeup_path(struct device *dev) {}
+
static inline void __pm_stay_awake(struct wakeup_source *ws) {}
static inline void pm_stay_awake(struct device *dev) {}
diff --git a/include/linux/poll.h b/include/linux/poll.h
index d384f12abdd5..04781a753326 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -37,7 +37,7 @@ typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_
*/
typedef struct poll_table_struct {
poll_queue_proc _qproc;
- unsigned long _key;
+ __poll_t _key;
} poll_table;
static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
@@ -62,20 +62,20 @@ static inline bool poll_does_not_wait(const poll_table *p)
* to be started implicitly on poll(). You typically only want to do that
* if the application is actually polling for POLLIN and/or POLLOUT.
*/
-static inline unsigned long poll_requested_events(const poll_table *p)
+static inline __poll_t poll_requested_events(const poll_table *p)
{
- return p ? p->_key : ~0UL;
+ return p ? p->_key : ~(__poll_t)0;
}
static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
{
pt->_qproc = qproc;
- pt->_key = ~0UL; /* all events enabled */
+ pt->_key = ~(__poll_t)0; /* all events enabled */
}
struct poll_table_entry {
struct file *filp;
- unsigned long key;
+ __poll_t key;
wait_queue_entry_t wait;
wait_queue_head_t *wait_address;
};
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
index 38d8225510f1..3a3bc71017d5 100644
--- a/include/linux/posix-clock.h
+++ b/include/linux/posix-clock.h
@@ -68,7 +68,7 @@ struct posix_clock_operations {
int (*open) (struct posix_clock *pc, fmode_t f_mode);
- uint (*poll) (struct posix_clock *pc,
+ __poll_t (*poll) (struct posix_clock *pc,
struct file *file, poll_table *wait);
int (*release) (struct posix_clock *pc);
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 672c4f32311e..c85704fcdbd2 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -42,13 +42,26 @@ struct cpu_timer_list {
#define CLOCKFD CPUCLOCK_MAX
#define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK)
-#define MAKE_PROCESS_CPUCLOCK(pid, clock) \
- ((~(clockid_t) (pid) << 3) | (clockid_t) (clock))
-#define MAKE_THREAD_CPUCLOCK(tid, clock) \
- MAKE_PROCESS_CPUCLOCK((tid), (clock) | CPUCLOCK_PERTHREAD_MASK)
+static inline clockid_t make_process_cpuclock(const unsigned int pid,
+ const clockid_t clock)
+{
+ return ((~pid) << 3) | clock;
+}
+static inline clockid_t make_thread_cpuclock(const unsigned int tid,
+ const clockid_t clock)
+{
+ return make_process_cpuclock(tid, clock | CPUCLOCK_PERTHREAD_MASK);
+}
-#define FD_TO_CLOCKID(fd) ((~(clockid_t) (fd) << 3) | CLOCKFD)
-#define CLOCKID_TO_FD(clk) ((unsigned int) ~((clk) >> 3))
+static inline clockid_t fd_to_clockid(const int fd)
+{
+ return make_process_cpuclock((unsigned int) fd, CLOCKFD);
+}
+
+static inline int clockid_to_fd(const clockid_t clk)
+{
+ return ~(clk >> 3);
+}
#define REQUEUE_PENDING 1
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index b2b7255ec7f5..540595a321a7 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -12,6 +12,7 @@
#include <linux/bug.h>
#include <linux/slab.h>
#include <linux/rcupdate.h>
+#include <linux/refcount.h>
#include <uapi/linux/posix_acl.h>
struct posix_acl_entry {
@@ -24,7 +25,7 @@ struct posix_acl_entry {
};
struct posix_acl {
- atomic_t a_refcount;
+ refcount_t a_refcount;
struct rcu_head a_rcu;
unsigned int a_count;
struct posix_acl_entry a_entries[0];
@@ -41,7 +42,7 @@ static inline struct posix_acl *
posix_acl_dup(struct posix_acl *acl)
{
if (acl)
- atomic_inc(&acl->a_refcount);
+ refcount_inc(&acl->a_refcount);
return acl;
}
@@ -51,7 +52,7 @@ posix_acl_dup(struct posix_acl *acl)
static inline void
posix_acl_release(struct posix_acl *acl)
{
- if (acl && atomic_dec_and_test(&acl->a_refcount))
+ if (acl && refcount_dec_and_test(&acl->a_refcount))
kfree_rcu(acl, a_rcu);
}
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index e6187f524f2c..01fbf1b16258 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -16,6 +16,7 @@ enum bq27xxx_chip {
BQ27520G2, /* bq27520G2 */
BQ27520G3, /* bq27520G3 */
BQ27520G4, /* bq27520G4 */
+ BQ27521, /* bq27521 */
BQ27530, /* bq27530, bq27531 */
BQ27531,
BQ27541, /* bq27541, bq27542, bq27546, bq27742 */
diff --git a/include/linux/property.h b/include/linux/property.h
index 5b0563ad79a5..1106bc62dd8c 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -283,6 +283,8 @@ bool device_dma_supported(struct device *dev);
enum dev_dma_attr device_get_dma_attr(struct device *dev);
+void *device_get_match_data(struct device *dev);
+
int device_get_phy_mode(struct device *dev);
void *device_get_mac_address(struct device *dev, char *addr, int alen);
diff --git a/include/linux/psci.h b/include/linux/psci.h
index bdea1cb5e1db..f724fd8c78e8 100644
--- a/include/linux/psci.h
+++ b/include/linux/psci.h
@@ -26,6 +26,7 @@ int psci_cpu_init_idle(unsigned int cpu);
int psci_cpu_suspend_enter(unsigned long index);
struct psci_operations {
+ u32 (*get_version)(void);
int (*cpu_suspend)(u32 state, unsigned long entry_point);
int (*cpu_off)(u32 state);
int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
@@ -46,10 +47,11 @@ static inline int psci_dt_init(void) { return 0; }
#if defined(CONFIG_ARM_PSCI_FW) && defined(CONFIG_ACPI)
int __init psci_acpi_init(void);
bool __init acpi_psci_present(void);
-bool __init acpi_psci_use_hvc(void);
+bool acpi_psci_use_hvc(void);
#else
static inline int psci_acpi_init(void) { return 0; }
static inline bool acpi_psci_present(void) { return false; }
+static inline bool acpi_psci_use_hvc(void) {return false; }
#endif
#endif /* __LINUX_PSCI_H */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index a6ddc42f87a5..043d04784675 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -197,7 +197,7 @@ static inline void exit_tasks_rcu_finish(void) { }
#define cond_resched_rcu_qs() \
do { \
if (!cond_resched()) \
- rcu_note_voluntary_context_switch(current); \
+ rcu_note_voluntary_context_switch_lite(current); \
} while (0)
/*
@@ -433,12 +433,12 @@ static inline void rcu_preempt_sleep_check(void) { }
* @p: The pointer to read
*
* Return the value of the specified RCU-protected pointer, but omit the
- * smp_read_barrier_depends() and keep the READ_ONCE(). This is useful
- * when the value of this pointer is accessed, but the pointer is not
- * dereferenced, for example, when testing an RCU-protected pointer against
- * NULL. Although rcu_access_pointer() may also be used in cases where
- * update-side locks prevent the value of the pointer from changing, you
- * should instead use rcu_dereference_protected() for this use case.
+ * lockdep checks for being in an RCU read-side critical section. This is
+ * useful when the value of this pointer is accessed, but the pointer is
+ * not dereferenced, for example, when testing an RCU-protected pointer
+ * against NULL. Although rcu_access_pointer() may also be used in cases
+ * where update-side locks prevent the value of the pointer from changing,
+ * you should instead use rcu_dereference_protected() for this use case.
*
* It is also permissible to use rcu_access_pointer() when read-side
* access to the pointer was removed at least one grace period ago, as
@@ -521,12 +521,11 @@ static inline void rcu_preempt_sleep_check(void) { }
* @c: The conditions under which the dereference will take place
*
* Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the READ_ONCE(). This
- * is useful in cases where update-side locks prevent the value of the
- * pointer from changing. Please note that this primitive does *not*
- * prevent the compiler from repeating this reference or combining it
- * with other references, so it should not be used without protection
- * of appropriate locks.
+ * the READ_ONCE(). This is useful in cases where update-side locks
+ * prevent the value of the pointer from changing. Please note that this
+ * primitive does *not* prevent the compiler from repeating this reference
+ * or combining it with other references, so it should not be used without
+ * protection of appropriate locks.
*
* This function is only for update-side use. Using this function
* when protected only by rcu_read_lock() will result in infrequent
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index b3dbf9502fd0..ce9beec35e34 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -111,7 +111,6 @@ static inline void rcu_cpu_stall_reset(void) { }
static inline void rcu_idle_enter(void) { }
static inline void rcu_idle_exit(void) { }
static inline void rcu_irq_enter(void) { }
-static inline bool rcu_irq_enter_disabled(void) { return false; }
static inline void rcu_irq_exit_irqson(void) { }
static inline void rcu_irq_enter_irqson(void) { }
static inline void rcu_irq_exit(void) { }
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 37d6fd3b7ff8..fd996cdf1833 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -85,7 +85,6 @@ void rcu_irq_enter(void);
void rcu_irq_exit(void);
void rcu_irq_enter_irqson(void);
void rcu_irq_exit_irqson(void);
-bool rcu_irq_enter_disabled(void);
void exit_rcu(void);
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 15eddc1353ba..20268b7d5001 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -30,6 +30,7 @@ struct regmap;
struct regmap_range_cfg;
struct regmap_field;
struct snd_ac97;
+struct sdw_slave;
/* An enum of all the supported cache types */
enum regcache_type {
@@ -264,6 +265,9 @@ typedef void (*regmap_unlock)(void *);
* field is NULL but precious_table (see below) is not, the
* check is performed on such table (a register is precious if
* it belongs to one of the ranges specified by precious_table).
+ * @disable_locking: This regmap is either protected by external means or
+ * is guaranteed not be be accessed from multiple threads.
+ * Don't use any locking mechanisms.
* @lock: Optional lock callback (overrides regmap's default lock
* function, based on spinlock or mutex).
* @unlock: As above for unlocking.
@@ -296,7 +300,10 @@ typedef void (*regmap_unlock)(void *);
* a read.
* @write_flag_mask: Mask to be set in the top bytes of the register when doing
* a write. If both read_flag_mask and write_flag_mask are
- * empty the regmap_bus default masks are used.
+ * empty and zero_flag_mask is not set the regmap_bus default
+ * masks are used.
+ * @zero_flag_mask: If set, read_flag_mask and write_flag_mask are used even
+ * if they are both empty.
* @use_single_rw: If set, converts the bulk read and write operations into
* a series of single read and write operations. This is useful
* for device that does not support bulk read and write.
@@ -317,6 +324,7 @@ typedef void (*regmap_unlock)(void *);
*
* @ranges: Array of configuration entries for virtual address ranges.
* @num_ranges: Number of range configuration entries.
+ * @use_hwlock: Indicate if a hardware spinlock should be used.
* @hwlock_id: Specify the hardware spinlock id.
* @hwlock_mode: The hardware spinlock mode, should be HWLOCK_IRQSTATE,
* HWLOCK_IRQ or 0.
@@ -333,6 +341,8 @@ struct regmap_config {
bool (*readable_reg)(struct device *dev, unsigned int reg);
bool (*volatile_reg)(struct device *dev, unsigned int reg);
bool (*precious_reg)(struct device *dev, unsigned int reg);
+
+ bool disable_locking;
regmap_lock lock;
regmap_unlock unlock;
void *lock_arg;
@@ -355,6 +365,7 @@ struct regmap_config {
unsigned long read_flag_mask;
unsigned long write_flag_mask;
+ bool zero_flag_mask;
bool use_single_rw;
bool can_multi_write;
@@ -365,6 +376,7 @@ struct regmap_config {
const struct regmap_range_cfg *ranges;
unsigned int num_ranges;
+ bool use_hwlock;
unsigned int hwlock_id;
unsigned int hwlock_mode;
};
@@ -524,6 +536,10 @@ struct regmap *__regmap_init_ac97(struct snd_ac97 *ac97,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__regmap_init_sdw(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__devm_regmap_init(struct device *dev,
const struct regmap_bus *bus,
@@ -561,6 +577,10 @@ struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
/*
* Wrapper for regmap_init macros to include a unique lockdep key and name
@@ -710,6 +730,20 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
/**
+ * regmap_init_sdw() - Initialise register map
+ *
+ * @sdw: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_sdw(sdw, config) \
+ __regmap_lockdep_wrapper(__regmap_init_sdw, #config, \
+ sdw, config)
+
+
+/**
* devm_regmap_init() - Initialise managed register map
*
* @dev: Device that will be interacted with
@@ -839,6 +873,20 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
__regmap_lockdep_wrapper(__devm_regmap_init_ac97, #config, \
ac97, config)
+/**
+ * devm_regmap_init_sdw() - Initialise managed register map
+ *
+ * @sdw: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_sdw(sdw, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_sdw, #config, \
+ sdw, config)
+
void regmap_exit(struct regmap *map);
int regmap_reinit_cache(struct regmap *map,
const struct regmap_config *config);
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 94417b4226bd..4c00486b7a78 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -214,6 +214,8 @@ struct regulator_ops {
/* set regulator suspend operating mode (defined in consumer.h) */
int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode);
+ int (*resume_early)(struct regulator_dev *rdev);
+
int (*set_pull_down) (struct regulator_dev *);
};
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 9cd4fef37203..93a04893c739 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -42,6 +42,16 @@ struct regulator;
#define REGULATOR_CHANGE_DRMS 0x10
#define REGULATOR_CHANGE_BYPASS 0x20
+/*
+ * operations in suspend mode
+ * DO_NOTHING_IN_SUSPEND - the default value
+ * DISABLE_IN_SUSPEND - turn off regulator in suspend states
+ * ENABLE_IN_SUSPEND - keep regulator on in suspend states
+ */
+#define DO_NOTHING_IN_SUSPEND (-1)
+#define DISABLE_IN_SUSPEND 0
+#define ENABLE_IN_SUSPEND 1
+
/* Regulator active discharge flags */
enum regulator_active_discharge {
REGULATOR_ACTIVE_DISCHARGE_DEFAULT,
@@ -56,16 +66,24 @@ enum regulator_active_discharge {
* state. One of enabled or disabled must be set for the
* configuration to be applied.
*
- * @uV: Operating voltage during suspend.
+ * @uV: Default operating voltage during suspend, it can be adjusted
+ * among <min_uV, max_uV>.
+ * @min_uV: Minimum suspend voltage may be set.
+ * @max_uV: Maximum suspend voltage may be set.
* @mode: Operating mode during suspend.
- * @enabled: Enabled during suspend.
- * @disabled: Disabled during suspend.
+ * @enabled: operations during suspend.
+ * - DO_NOTHING_IN_SUSPEND
+ * - DISABLE_IN_SUSPEND
+ * - ENABLE_IN_SUSPEND
+ * @changeable: Is this state can be switched between enabled/disabled,
*/
struct regulator_state {
- int uV; /* suspend voltage */
- unsigned int mode; /* suspend regulator operating mode */
- int enabled; /* is regulator enabled in this suspend state */
- int disabled; /* is the regulator disabled in this suspend state */
+ int uV;
+ int min_uV;
+ int max_uV;
+ unsigned int mode;
+ int enabled;
+ bool changeable;
};
/**
@@ -225,12 +243,12 @@ struct regulator_init_data {
#ifdef CONFIG_REGULATOR
void regulator_has_full_constraints(void);
-int regulator_suspend_prepare(suspend_state_t state);
-int regulator_suspend_finish(void);
#else
static inline void regulator_has_full_constraints(void)
{
}
+#endif
+
static inline int regulator_suspend_prepare(suspend_state_t state)
{
return 0;
@@ -239,6 +257,5 @@ static inline int regulator_suspend_finish(void)
{
return 0;
}
-#endif
#endif
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 289e4d54e3e0..7d9eb39fa76a 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -96,7 +96,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
})
int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full);
-int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
+__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
struct file *filp, poll_table *poll_table);
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
index 10d6ae8bbb7d..ca07366c4c33 100644
--- a/include/linux/rpmsg.h
+++ b/include/linux/rpmsg.h
@@ -157,7 +157,7 @@ int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
void *data, int len);
-unsigned int rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
+__poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
poll_table *wait);
#else
@@ -258,7 +258,7 @@ static inline int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src,
return -ENXIO;
}
-static inline unsigned int rpmsg_poll(struct rpmsg_endpoint *ept,
+static inline __poll_t rpmsg_poll(struct rpmsg_endpoint *ept,
struct file *filp, poll_table *wait)
{
/* This shouldn't be possible */
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 0514cc36ac34..1fdcde96eb65 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -71,8 +71,7 @@ static inline bool lockdep_rtnl_is_held(void)
* @p: The pointer to read, prior to dereferencing
*
* Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the READ_ONCE(), because
- * caller holds RTNL.
+ * the READ_ONCE(), because caller holds RTNL.
*/
#define rtnl_dereference(p) \
rcu_dereference_protected(p, lockdep_rtnl_is_held())
diff --git a/include/linux/mfd/rtsx_common.h b/include/linux/rtsx_common.h
index 443176ee1ab0..443176ee1ab0 100644
--- a/include/linux/mfd/rtsx_common.h
+++ b/include/linux/rtsx_common.h
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/rtsx_pci.h
index c3d3f04d8cc6..478acf6efac6 100644
--- a/include/linux/mfd/rtsx_pci.h
+++ b/include/linux/rtsx_pci.h
@@ -24,7 +24,7 @@
#include <linux/sched.h>
#include <linux/pci.h>
-#include <linux/mfd/rtsx_common.h>
+#include <linux/rtsx_common.h>
#define MAX_RW_REG_CNT 1024
@@ -203,6 +203,7 @@
#define SD_DDR_MODE 0x04
#define SD_30_MODE 0x08
#define SD_CLK_DIVIDE_MASK 0xC0
+#define SD_MODE_SELECT_MASK 0x0C
#define SD_CFG2 0xFDA1
#define SD_CALCULATE_CRC7 0x00
#define SD_NO_CALCULATE_CRC7 0x80
@@ -226,6 +227,7 @@
#define SD_RSP_TYPE_R6 0x01
#define SD_RSP_TYPE_R7 0x01
#define SD_CFG3 0xFDA2
+#define SD30_CLK_END_EN 0x10
#define SD_RSP_80CLK_TIMEOUT_EN 0x01
#define SD_STAT1 0xFDA3
@@ -309,6 +311,12 @@
#define SD_DATA_STATE 0xFDB6
#define SD_DATA_IDLE 0x80
+#define REG_SD_STOP_SDCLK_CFG 0xFDB8
+#define SD30_CLK_STOP_CFG_EN 0x04
+#define SD30_CLK_STOP_CFG1 0x02
+#define SD30_CLK_STOP_CFG0 0x01
+#define REG_PRE_RW_MODE 0xFD70
+#define EN_INFINITE_MODE 0x01
#define SRCTL 0xFC13
@@ -434,6 +442,7 @@
#define CARD_CLK_EN 0xFD69
#define SD_CLK_EN 0x04
#define MS_CLK_EN 0x08
+#define SD40_CLK_EN 0x10
#define SDIO_CTRL 0xFD6B
#define CD_PAD_CTL 0xFD73
#define CD_DISABLE_MASK 0x07
@@ -453,8 +462,8 @@
#define FPDCTL 0xFC00
#define SSC_POWER_DOWN 0x01
#define SD_OC_POWER_DOWN 0x02
-#define ALL_POWER_DOWN 0x07
-#define OC_POWER_DOWN 0x06
+#define ALL_POWER_DOWN 0x03
+#define OC_POWER_DOWN 0x02
#define PDINFO 0xFC01
#define CLK_CTL 0xFC02
@@ -490,6 +499,9 @@
#define FPGA_PULL_CTL 0xFC1D
#define OLT_LED_CTL 0xFC1E
+#define LED_SHINE_MASK 0x08
+#define LED_SHINE_EN 0x08
+#define LED_SHINE_DISABLE 0x00
#define GPIO_CTL 0xFC1F
#define LDO_CTL 0xFC1E
@@ -511,7 +523,11 @@
#define BPP_LDO_ON 0x00
#define BPP_LDO_SUSPEND 0x02
#define BPP_LDO_OFF 0x03
+#define EFUSE_CTL 0xFC30
+#define EFUSE_ADD 0xFC31
#define SYS_VER 0xFC32
+#define EFUSE_DATAL 0xFC34
+#define EFUSE_DATAH 0xFC35
#define CARD_PULL_CTL1 0xFD60
#define CARD_PULL_CTL2 0xFD61
@@ -553,6 +569,9 @@
#define RBBC1 0xFE2F
#define RBDAT 0xFE30
#define RBCTL 0xFE34
+#define U_AUTO_DMA_EN_MASK 0x20
+#define U_AUTO_DMA_DISABLE 0x00
+#define RB_FLUSH 0x80
#define CFGADDR0 0xFE35
#define CFGADDR1 0xFE36
#define CFGDATA0 0xFE37
@@ -581,6 +600,8 @@
#define LTR_LATENCY_MODE_HW 0
#define LTR_LATENCY_MODE_SW BIT(6)
#define OBFF_CFG 0xFE4C
+#define OBFF_EN_MASK 0x03
+#define OBFF_DISABLE 0x00
#define CDRESUMECTL 0xFE52
#define WAKE_SEL_CTL 0xFE54
@@ -595,6 +616,7 @@
#define FORCE_ASPM_L0_EN 0x01
#define FORCE_ASPM_NO_ASPM 0x00
#define PM_CLK_FORCE_CTL 0xFE58
+#define CLK_PM_EN 0x01
#define FUNC_FORCE_CTL 0xFE59
#define FUNC_FORCE_UPME_XMT_DBG 0x02
#define PERST_GLITCH_WIDTH 0xFE5C
@@ -620,14 +642,23 @@
#define LDO_PWR_SEL 0xFE78
#define L1SUB_CONFIG1 0xFE8D
+#define AUX_CLK_ACTIVE_SEL_MASK 0x01
+#define MAC_CKSW_DONE 0x00
#define L1SUB_CONFIG2 0xFE8E
#define L1SUB_AUTO_CFG 0x02
#define L1SUB_CONFIG3 0xFE8F
#define L1OFF_MBIAS2_EN_5250 BIT(7)
#define DUMMY_REG_RESET_0 0xFE90
+#define IC_VERSION_MASK 0x0F
+#define REG_VREF 0xFE97
+#define PWD_SUSPND_EN 0x10
+#define RTS5260_DMA_RST_CTL_0 0xFEBF
+#define RTS5260_DMA_RST 0x80
+#define RTS5260_ADMA3_RST 0x40
#define AUTOLOAD_CFG_BASE 0xFF00
+#define RELINK_TIME_MASK 0x01
#define PETXCFG 0xFF03
#define FORCE_CLKREQ_DELINK_MASK BIT(7)
#define FORCE_CLKREQ_LOW 0x80
@@ -667,15 +698,24 @@
#define LDO_DV18_CFG 0xFF70
#define LDO_DV18_SR_MASK 0xC0
#define LDO_DV18_SR_DF 0x40
+#define DV331812_MASK 0x70
+#define DV331812_33 0x70
+#define DV331812_17 0x30
#define LDO_CONFIG2 0xFF71
#define LDO_D3318_MASK 0x07
#define LDO_D3318_33V 0x07
#define LDO_D3318_18V 0x02
+#define DV331812_VDD1 0x04
+#define DV331812_POWERON 0x08
+#define DV331812_POWEROFF 0x00
#define LDO_VCC_CFG0 0xFF72
#define LDO_VCC_LMTVTH_MASK 0x30
#define LDO_VCC_LMTVTH_2A 0x10
+/*RTS5260*/
+#define RTS5260_DVCC_TUNE_MASK 0x70
+#define RTS5260_DVCC_33 0x70
#define LDO_VCC_CFG1 0xFF73
#define LDO_VCC_REF_TUNE_MASK 0x30
@@ -684,6 +724,10 @@
#define LDO_VCC_1V8 0x04
#define LDO_VCC_3V3 0x07
#define LDO_VCC_LMT_EN 0x08
+/*RTS5260*/
+#define LDO_POW_SDVDD1_MASK 0x08
+#define LDO_POW_SDVDD1_ON 0x08
+#define LDO_POW_SDVDD1_OFF 0x00
#define LDO_VIO_CFG 0xFF75
#define LDO_VIO_SR_MASK 0xC0
@@ -711,6 +755,160 @@
#define SD_VIO_LDO_1V8 0x40
#define SD_VIO_LDO_3V3 0x70
+#define RTS5260_AUTOLOAD_CFG4 0xFF7F
+#define RTS5260_MIMO_DISABLE 0x8A
+
+#define RTS5260_REG_GPIO_CTL0 0xFC1A
+#define RTS5260_REG_GPIO_MASK 0x01
+#define RTS5260_REG_GPIO_ON 0x01
+#define RTS5260_REG_GPIO_OFF 0x00
+
+#define PWR_GLOBAL_CTRL 0xF200
+#define PCIE_L1_2_EN 0x0C
+#define PCIE_L1_1_EN 0x0A
+#define PCIE_L1_0_EN 0x09
+#define PWR_FE_CTL 0xF201
+#define PCIE_L1_2_PD_FE_EN 0x0C
+#define PCIE_L1_1_PD_FE_EN 0x0A
+#define PCIE_L1_0_PD_FE_EN 0x09
+#define CFG_PCIE_APHY_OFF_0 0xF204
+#define CFG_PCIE_APHY_OFF_0_DEFAULT 0xBF
+#define CFG_PCIE_APHY_OFF_1 0xF205
+#define CFG_PCIE_APHY_OFF_1_DEFAULT 0xFF
+#define CFG_PCIE_APHY_OFF_2 0xF206
+#define CFG_PCIE_APHY_OFF_2_DEFAULT 0x01
+#define CFG_PCIE_APHY_OFF_3 0xF207
+#define CFG_PCIE_APHY_OFF_3_DEFAULT 0x00
+#define CFG_L1_0_PCIE_MAC_RET_VALUE 0xF20C
+#define CFG_L1_0_PCIE_DPHY_RET_VALUE 0xF20E
+#define CFG_L1_0_SYS_RET_VALUE 0xF210
+#define CFG_L1_0_CRC_MISC_RET_VALUE 0xF212
+#define CFG_L1_0_CRC_SD30_RET_VALUE 0xF214
+#define CFG_L1_0_CRC_SD40_RET_VALUE 0xF216
+#define CFG_LP_FPWM_VALUE 0xF219
+#define CFG_LP_FPWM_VALUE_DEFAULT 0x18
+#define PWC_CDR 0xF253
+#define PWC_CDR_DEFAULT 0x03
+#define CFG_L1_0_RET_VALUE_DEFAULT 0x1B
+#define CFG_L1_0_CRC_MISC_RET_VALUE_DEFAULT 0x0C
+
+/* OCPCTL */
+#define SD_DETECT_EN 0x08
+#define SD_OCP_INT_EN 0x04
+#define SD_OCP_INT_CLR 0x02
+#define SD_OC_CLR 0x01
+
+#define SDVIO_DETECT_EN (1 << 7)
+#define SDVIO_OCP_INT_EN (1 << 6)
+#define SDVIO_OCP_INT_CLR (1 << 5)
+#define SDVIO_OC_CLR (1 << 4)
+
+/* OCPSTAT */
+#define SD_OCP_DETECT 0x08
+#define SD_OC_NOW 0x04
+#define SD_OC_EVER 0x02
+
+#define SDVIO_OC_NOW (1 << 6)
+#define SDVIO_OC_EVER (1 << 5)
+
+#define REG_OCPCTL 0xFD6A
+#define REG_OCPSTAT 0xFD6E
+#define REG_OCPGLITCH 0xFD6C
+#define REG_OCPPARA1 0xFD6B
+#define REG_OCPPARA2 0xFD6D
+
+/* rts5260 DV3318 OCP-related registers */
+#define REG_DV3318_OCPCTL 0xFD89
+#define DV3318_OCP_TIME_MASK 0xF0
+#define DV3318_DETECT_EN 0x08
+#define DV3318_OCP_INT_EN 0x04
+#define DV3318_OCP_INT_CLR 0x02
+#define DV3318_OCP_CLR 0x01
+
+#define REG_DV3318_OCPSTAT 0xFD8A
+#define DV3318_OCP_GlITCH_TIME_MASK 0xF0
+#define DV3318_OCP_DETECT 0x08
+#define DV3318_OCP_NOW 0x04
+#define DV3318_OCP_EVER 0x02
+
+#define SD_OCP_GLITCH_MASK 0x0F
+
+/* OCPPARA1 */
+#define SDVIO_OCP_TIME_60 0x00
+#define SDVIO_OCP_TIME_100 0x10
+#define SDVIO_OCP_TIME_200 0x20
+#define SDVIO_OCP_TIME_400 0x30
+#define SDVIO_OCP_TIME_600 0x40
+#define SDVIO_OCP_TIME_800 0x50
+#define SDVIO_OCP_TIME_1100 0x60
+#define SDVIO_OCP_TIME_MASK 0x70
+
+#define SD_OCP_TIME_60 0x00
+#define SD_OCP_TIME_100 0x01
+#define SD_OCP_TIME_200 0x02
+#define SD_OCP_TIME_400 0x03
+#define SD_OCP_TIME_600 0x04
+#define SD_OCP_TIME_800 0x05
+#define SD_OCP_TIME_1100 0x06
+#define SD_OCP_TIME_MASK 0x07
+
+/* OCPPARA2 */
+#define SDVIO_OCP_THD_190 0x00
+#define SDVIO_OCP_THD_250 0x10
+#define SDVIO_OCP_THD_320 0x20
+#define SDVIO_OCP_THD_380 0x30
+#define SDVIO_OCP_THD_440 0x40
+#define SDVIO_OCP_THD_500 0x50
+#define SDVIO_OCP_THD_570 0x60
+#define SDVIO_OCP_THD_630 0x70
+#define SDVIO_OCP_THD_MASK 0x70
+
+#define SD_OCP_THD_450 0x00
+#define SD_OCP_THD_550 0x01
+#define SD_OCP_THD_650 0x02
+#define SD_OCP_THD_750 0x03
+#define SD_OCP_THD_850 0x04
+#define SD_OCP_THD_950 0x05
+#define SD_OCP_THD_1050 0x06
+#define SD_OCP_THD_1150 0x07
+#define SD_OCP_THD_MASK 0x07
+
+#define SDVIO_OCP_GLITCH_MASK 0xF0
+#define SDVIO_OCP_GLITCH_NONE 0x00
+#define SDVIO_OCP_GLITCH_50U 0x10
+#define SDVIO_OCP_GLITCH_100U 0x20
+#define SDVIO_OCP_GLITCH_200U 0x30
+#define SDVIO_OCP_GLITCH_600U 0x40
+#define SDVIO_OCP_GLITCH_800U 0x50
+#define SDVIO_OCP_GLITCH_1M 0x60
+#define SDVIO_OCP_GLITCH_2M 0x70
+#define SDVIO_OCP_GLITCH_3M 0x80
+#define SDVIO_OCP_GLITCH_4M 0x90
+#define SDVIO_OCP_GLIVCH_5M 0xA0
+#define SDVIO_OCP_GLITCH_6M 0xB0
+#define SDVIO_OCP_GLITCH_7M 0xC0
+#define SDVIO_OCP_GLITCH_8M 0xD0
+#define SDVIO_OCP_GLITCH_9M 0xE0
+#define SDVIO_OCP_GLITCH_10M 0xF0
+
+#define SD_OCP_GLITCH_MASK 0x0F
+#define SD_OCP_GLITCH_NONE 0x00
+#define SD_OCP_GLITCH_50U 0x01
+#define SD_OCP_GLITCH_100U 0x02
+#define SD_OCP_GLITCH_200U 0x03
+#define SD_OCP_GLITCH_600U 0x04
+#define SD_OCP_GLITCH_800U 0x05
+#define SD_OCP_GLITCH_1M 0x06
+#define SD_OCP_GLITCH_2M 0x07
+#define SD_OCP_GLITCH_3M 0x08
+#define SD_OCP_GLITCH_4M 0x09
+#define SD_OCP_GLIVCH_5M 0x0A
+#define SD_OCP_GLITCH_6M 0x0B
+#define SD_OCP_GLITCH_7M 0x0C
+#define SD_OCP_GLITCH_8M 0x0D
+#define SD_OCP_GLITCH_9M 0x0E
+#define SD_OCP_GLITCH_10M 0x0F
+
/* Phy register */
#define PHY_PCR 0x00
#define PHY_PCR_FORCE_CODE 0xB000
@@ -857,6 +1055,7 @@
#define PCR_ASPM_SETTING_REG1 0x160
#define PCR_ASPM_SETTING_REG2 0x168
+#define PCR_ASPM_SETTING_5260 0x178
#define PCR_SETTING_REG1 0x724
#define PCR_SETTING_REG2 0x814
@@ -890,6 +1089,7 @@ struct pcr_ops {
int (*conv_clk_and_div_n)(int clk, int dir);
void (*fetch_vendor_settings)(struct rtsx_pcr *pcr);
void (*force_power_down)(struct rtsx_pcr *pcr, u8 pm_state);
+ void (*stop_cmd)(struct rtsx_pcr *pcr);
void (*set_aspm)(struct rtsx_pcr *pcr, bool enable);
int (*set_ltr_latency)(struct rtsx_pcr *pcr, u32 latency);
@@ -897,6 +1097,12 @@ struct pcr_ops {
void (*set_l1off_cfg_sub_d0)(struct rtsx_pcr *pcr, int active);
void (*full_on)(struct rtsx_pcr *pcr);
void (*power_saving)(struct rtsx_pcr *pcr);
+ void (*enable_ocp)(struct rtsx_pcr *pcr);
+ void (*disable_ocp)(struct rtsx_pcr *pcr);
+ void (*init_ocp)(struct rtsx_pcr *pcr);
+ void (*process_ocp)(struct rtsx_pcr *pcr);
+ int (*get_ocpstat)(struct rtsx_pcr *pcr, u8 *val);
+ void (*clear_ocpstat)(struct rtsx_pcr *pcr);
};
enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN};
@@ -935,6 +1141,9 @@ enum dev_aspm_mode {
* @l1_snooze_delay: l1 snooze delay
* @ltr_l1off_sspwrgate: ltr l1off sspwrgate
* @ltr_l1off_snooze_sspwrgate: ltr l1off snooze sspwrgate
+ * @ocp_en: enable ocp flag
+ * @sd_400mA_ocp_thd: 400mA ocp thd
+ * @sd_800mA_ocp_thd: 800mA ocp thd
*/
struct rtsx_cr_option {
u32 dev_flags;
@@ -949,6 +1158,19 @@ struct rtsx_cr_option {
u32 l1_snooze_delay;
u8 ltr_l1off_sspwrgate;
u8 ltr_l1off_snooze_sspwrgate;
+ bool ocp_en;
+ u8 sd_400mA_ocp_thd;
+ u8 sd_800mA_ocp_thd;
+};
+
+/*
+ * struct rtsx_hw_param - card reader hardware param
+ * @interrupt_en: indicate which interrutp enable
+ * @ocp_glitch: ocp glitch time
+ */
+struct rtsx_hw_param {
+ u32 interrupt_en;
+ u8 ocp_glitch;
};
#define rtsx_set_dev_flag(cr, flag) \
@@ -963,6 +1185,7 @@ struct rtsx_pcr {
unsigned int id;
int pcie_cap;
struct rtsx_cr_option option;
+ struct rtsx_hw_param hw_param;
/* pci resources */
unsigned long addr;
@@ -1042,12 +1265,15 @@ struct rtsx_pcr {
struct rtsx_slot *slots;
u8 dma_error_count;
+ u8 ocp_stat;
+ u8 ocp_stat2;
};
#define PID_524A 0x524A
-#define PID_5249 0x5249
-#define PID_5250 0x5250
+#define PID_5249 0x5249
+#define PID_5250 0x5250
#define PID_525A 0x525A
+#define PID_5260 0x5260
#define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid))
#define PCI_VID(pcr) ((pcr)->pci->vendor)
diff --git a/include/linux/mfd/rtsx_usb.h b/include/linux/rtsx_usb.h
index c446e4fd6b5c..c446e4fd6b5c 100644
--- a/include/linux/mfd/rtsx_usb.h
+++ b/include/linux/rtsx_usb.h
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index b7c83254c566..22b2131bcdcd 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -276,6 +276,17 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
unsigned int n_pages, unsigned int offset,
unsigned long size, gfp_t gfp_mask);
+#ifdef CONFIG_SGL_ALLOC
+struct scatterlist *sgl_alloc_order(unsigned long long length,
+ unsigned int order, bool chainable,
+ gfp_t gfp, unsigned int *nent_p);
+struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
+ unsigned int *nent_p);
+void sgl_free_n_order(struct scatterlist *sgl, int nents, int order);
+void sgl_free_order(struct scatterlist *sgl, int order);
+void sgl_free(struct scatterlist *sgl);
+#endif /* CONFIG_SGL_ALLOC */
+
size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
size_t buflen, off_t skip, bool to_buffer);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d2588263a989..166144c04ef6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -472,11 +472,15 @@ struct sched_dl_entity {
* has not been executed yet. This flag is useful to avoid race
* conditions between the inactive timer handler and the wakeup
* code.
+ *
+ * @dl_overrun tells if the task asked to be informed about runtime
+ * overruns.
*/
unsigned int dl_throttled : 1;
unsigned int dl_boosted : 1;
unsigned int dl_yielded : 1;
unsigned int dl_non_contending : 1;
+ unsigned int dl_overrun : 1;
/*
* Bandwidth enforcement timer. Each -deadline task has its
@@ -1427,6 +1431,7 @@ extern int idle_cpu(int cpu);
extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
extern int sched_setattr(struct task_struct *, const struct sched_attr *);
+extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
extern struct task_struct *idle_task(int cpu);
/**
@@ -1446,12 +1451,21 @@ extern void ia64_set_curr_task(int cpu, struct task_struct *p);
void yield(void);
union thread_union {
+#ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
+ struct task_struct task;
+#endif
#ifndef CONFIG_THREAD_INFO_IN_TASK
struct thread_info thread_info;
#endif
unsigned long stack[THREAD_SIZE/sizeof(long)];
};
+#ifndef CONFIG_THREAD_INFO_IN_TASK
+extern struct thread_info init_thread_info;
+#endif
+
+extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
+
#ifdef CONFIG_THREAD_INFO_IN_TASK
static inline struct thread_info *task_thread_info(struct task_struct *task)
{
diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h
index d1ad3d825561..0b55834efd46 100644
--- a/include/linux/sched/cpufreq.h
+++ b/include/linux/sched/cpufreq.h
@@ -12,8 +12,6 @@
#define SCHED_CPUFREQ_DL (1U << 1)
#define SCHED_CPUFREQ_IOWAIT (1U << 2)
-#define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)
-
#ifdef CONFIG_CPU_FREQ
struct update_util_data {
void (*func)(struct update_util_data *data, u64 time, unsigned int flags);
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 0aa4548fb492..23b4f9cb82db 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -285,6 +285,34 @@ static inline void kernel_signal_stop(void)
schedule();
}
+#ifdef __ARCH_SI_TRAPNO
+# define ___ARCH_SI_TRAPNO(_a1) , _a1
+#else
+# define ___ARCH_SI_TRAPNO(_a1)
+#endif
+#ifdef __ia64__
+# define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3
+#else
+# define ___ARCH_SI_IA64(_a1, _a2, _a3)
+#endif
+
+int force_sig_fault(int sig, int code, void __user *addr
+ ___ARCH_SI_TRAPNO(int trapno)
+ ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
+ , struct task_struct *t);
+int send_sig_fault(int sig, int code, void __user *addr
+ ___ARCH_SI_TRAPNO(int trapno)
+ ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
+ , struct task_struct *t);
+
+int force_sig_mceerr(int code, void __user *, short, struct task_struct *);
+int send_sig_mceerr(int code, void __user *, short, struct task_struct *);
+
+int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper);
+int force_sig_pkuerr(void __user *addr, u32 pkey);
+
+int force_sig_ptrace_errno_trap(int errno, void __user *addr);
+
extern int send_sig_info(int, struct siginfo *, struct task_struct *);
extern int force_sigsegv(int, struct task_struct *);
extern int force_sig_info(int, struct siginfo *, struct task_struct *);
diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h
index cb4828aaa34f..6a841929073f 100644
--- a/include/linux/sched/task_stack.h
+++ b/include/linux/sched/task_stack.h
@@ -78,7 +78,7 @@ static inline void put_task_stack(struct task_struct *tsk) {}
#define task_stack_end_corrupted(task) \
(*(end_of_stack(task)) != STACK_END_MAGIC)
-static inline int object_is_on_stack(void *obj)
+static inline int object_is_on_stack(const void *obj)
{
void *stack = task_stack_page(current);
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index cf257c2e728d..26347741ba50 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -7,6 +7,12 @@
#include <linux/sched/idle.h>
/*
+ * Increase resolution of cpu_capacity calculations
+ */
+#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
+#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
+
+/*
* sched-domains (multiprocessor balancing) declarations:
*/
#ifdef CONFIG_SMP
@@ -27,12 +33,6 @@
#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
#define SD_NUMA 0x4000 /* cross-node balancing */
-/*
- * Increase resolution of cpu_capacity calculations
- */
-#define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
-#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
-
#ifdef CONFIG_SCHED_SMT
static inline int cpu_smt_flags(void)
{
diff --git a/include/linux/scif.h b/include/linux/scif.h
index 49a35d6edc94..7046111b8d0a 100644
--- a/include/linux/scif.h
+++ b/include/linux/scif.h
@@ -123,8 +123,8 @@ struct scif_range {
*/
struct scif_pollepd {
scif_epd_t epd;
- short events;
- short revents;
+ __poll_t events;
+ __poll_t revents;
};
/**
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index 10f25f7e4304..c723a5c4e3ff 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -95,11 +95,19 @@ static inline void get_seccomp_filter(struct task_struct *tsk)
#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
extern long seccomp_get_filter(struct task_struct *task,
unsigned long filter_off, void __user *data);
+extern long seccomp_get_metadata(struct task_struct *task,
+ unsigned long filter_off, void __user *data);
#else
static inline long seccomp_get_filter(struct task_struct *task,
unsigned long n, void __user *data)
{
return -EINVAL;
}
+static inline long seccomp_get_metadata(struct task_struct *task,
+ unsigned long filter_off,
+ void __user *data)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_SECCOMP_FILTER && CONFIG_CHECKPOINT_RESTORE */
#endif /* _LINUX_SECCOMP_H */
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index f189a8a3bbb8..bcf4cf26b8c8 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -278,9 +278,8 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s)
static inline int raw_read_seqcount_latch(seqcount_t *s)
{
- int seq = READ_ONCE(s->sequence);
/* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
- smp_read_barrier_depends();
+ int seq = READ_ONCE(s->sequence); /* ^^^ */
return seq;
}
diff --git a/include/linux/serdev.h b/include/linux/serdev.h
index a73d87b483b4..531031a15cdd 100644
--- a/include/linux/serdev.h
+++ b/include/linux/serdev.h
@@ -200,6 +200,7 @@ static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl,
int serdev_device_open(struct serdev_device *);
void serdev_device_close(struct serdev_device *);
+int devm_serdev_device_open(struct device *, struct serdev_device *);
unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int);
void serdev_device_set_flow_control(struct serdev_device *, bool);
int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t);
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 042968dd98f0..a9bc7e1b077e 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -11,13 +11,14 @@ struct task_struct;
/* for sysctl */
extern int print_fatal_signals;
-static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
+static inline void copy_siginfo(struct siginfo *to, const struct siginfo *from)
{
- if (from->si_code < 0)
- memcpy(to, from, sizeof(*to));
- else
- /* _sigchld is currently the largest know union member */
- memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
+ memcpy(to, from, sizeof(*to));
+}
+
+static inline void clear_siginfo(struct siginfo *info)
+{
+ memset(info, 0, sizeof(*info));
}
int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from);
@@ -29,9 +30,7 @@ enum siginfo_layout {
SIL_FAULT,
SIL_CHLD,
SIL_RT,
-#ifdef __ARCH_SIGSYS
SIL_SYS,
-#endif
};
enum siginfo_layout siginfo_layout(int sig, int si_code);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index b8e0da6c27d6..ac89a93b7c83 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3246,7 +3246,7 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
int *peeked, int *off, int *err);
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
int *err);
-unsigned int datagram_poll(struct file *file, struct socket *sock,
+__poll_t datagram_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
struct iov_iter *to, int size);
diff --git a/include/linux/sound.h b/include/linux/sound.h
index 3c6d393c7f29..ec85b7a1f8d1 100644
--- a/include/linux/sound.h
+++ b/include/linux/sound.h
@@ -12,11 +12,9 @@ struct device;
extern int register_sound_special(const struct file_operations *fops, int unit);
extern int register_sound_special_device(const struct file_operations *fops, int unit, struct device *dev);
extern int register_sound_mixer(const struct file_operations *fops, int dev);
-extern int register_sound_midi(const struct file_operations *fops, int dev);
extern int register_sound_dsp(const struct file_operations *fops, int dev);
extern void unregister_sound_special(int unit);
extern void unregister_sound_mixer(int unit);
-extern void unregister_sound_midi(int unit);
extern void unregister_sound_dsp(int unit);
#endif /* _LINUX_SOUND_H */
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 62be8966e837..33c1c698df09 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -92,7 +92,7 @@ void synchronize_srcu(struct srcu_struct *sp);
* relies on normal RCU, it can be called from the CPU which
* is in the idle loop from an RCU point of view or offline.
*/
-static inline int srcu_read_lock_held(struct srcu_struct *sp)
+static inline int srcu_read_lock_held(const struct srcu_struct *sp)
{
if (!debug_lockdep_rcu_enabled())
return 1;
@@ -101,7 +101,7 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp)
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-static inline int srcu_read_lock_held(struct srcu_struct *sp)
+static inline int srcu_read_lock_held(const struct srcu_struct *sp)
{
return 1;
}
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index a949f4f9e4d7..4eda108abee0 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -40,7 +40,7 @@ struct srcu_data {
unsigned long srcu_unlock_count[2]; /* Unlocks per CPU. */
/* Update-side state. */
- raw_spinlock_t __private lock ____cacheline_internodealigned_in_smp;
+ spinlock_t __private lock ____cacheline_internodealigned_in_smp;
struct rcu_segcblist srcu_cblist; /* List of callbacks.*/
unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */
unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
@@ -58,7 +58,7 @@ struct srcu_data {
* Node in SRCU combining tree, similar in function to rcu_data.
*/
struct srcu_node {
- raw_spinlock_t __private lock;
+ spinlock_t __private lock;
unsigned long srcu_have_cbs[4]; /* GP seq for children */
/* having CBs, but only */
/* is > ->srcu_gq_seq. */
@@ -78,7 +78,7 @@ struct srcu_struct {
struct srcu_node *level[RCU_NUM_LVLS + 1];
/* First node at each level. */
struct mutex srcu_cb_mutex; /* Serialize CB preparation. */
- raw_spinlock_t __private lock; /* Protect counters */
+ spinlock_t __private lock; /* Protect counters */
struct mutex srcu_gp_mutex; /* Serialize GP work. */
unsigned int srcu_idx; /* Current rdr array element. */
unsigned long srcu_gp_seq; /* Grace-period seq #. */
@@ -107,7 +107,7 @@ struct srcu_struct {
#define __SRCU_STRUCT_INIT(name) \
{ \
.sda = &name##_srcu_data, \
- .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
+ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
.srcu_gp_seq_needed = 0 - 1, \
__SRCU_DEP_MAP_INIT(name) \
}
diff --git a/include/linux/string.h b/include/linux/string.h
index cfd83eb2f926..dd39a690c841 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -11,6 +11,7 @@
extern char *strndup_user(const char __user *, long);
extern void *memdup_user(const void __user *, size_t);
+extern void *vmemdup_user(const void __user *, size_t);
extern void *memdup_user_nul(const void __user *, size_t);
/*
@@ -28,7 +29,7 @@ extern char * strncpy(char *,const char *, __kernel_size_t);
size_t strlcpy(char *, const char *, size_t);
#endif
#ifndef __HAVE_ARCH_STRSCPY
-ssize_t __must_check strscpy(char *, const char *, size_t);
+ssize_t strscpy(char *, const char *, size_t);
#endif
#ifndef __HAVE_ARCH_STRCAT
extern char * strcat(char *, const char *);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 71c237e8240e..ed761f751ecb 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -179,7 +179,6 @@ struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred,
int rpc_restart_call_prepare(struct rpc_task *);
int rpc_restart_call(struct rpc_task *);
void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
-int rpc_protocol(struct rpc_clnt *);
struct net * rpc_net_ns(struct rpc_clnt *);
size_t rpc_max_payload(struct rpc_clnt *);
size_t rpc_max_bc_payload(struct rpc_clnt *);
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h
index 221b7a2e5406..5859563e3c1f 100644
--- a/include/linux/sunrpc/xprtrdma.h
+++ b/include/linux/sunrpc/xprtrdma.h
@@ -64,7 +64,7 @@ enum rpcrdma_memreg {
RPCRDMA_MEMWINDOWS,
RPCRDMA_MEMWINDOWS_ASYNC,
RPCRDMA_MTHCAFMR,
- RPCRDMA_FRMR,
+ RPCRDMA_FRWR,
RPCRDMA_ALLPHYSICAL,
RPCRDMA_LAST
};
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index d60b0f5c38d5..cc22a24516d6 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -443,32 +443,8 @@ extern bool pm_save_wakeup_count(unsigned int count);
extern void pm_wakep_autosleep_enabled(bool set);
extern void pm_print_active_wakeup_sources(void);
-static inline void lock_system_sleep(void)
-{
- current->flags |= PF_FREEZER_SKIP;
- mutex_lock(&pm_mutex);
-}
-
-static inline void unlock_system_sleep(void)
-{
- /*
- * Don't use freezer_count() because we don't want the call to
- * try_to_freeze() here.
- *
- * Reason:
- * Fundamentally, we just don't need it, because freezing condition
- * doesn't come into effect until we release the pm_mutex lock,
- * since the freezer always works with pm_mutex held.
- *
- * More importantly, in the case of hibernation,
- * unlock_system_sleep() gets called in snapshot_read() and
- * snapshot_write() when the freezing condition is still in effect.
- * Which means, if we use try_to_freeze() here, it would make them
- * enter the refrigerator, thus causing hibernation to lockup.
- */
- current->flags &= ~PF_FREEZER_SKIP;
- mutex_unlock(&pm_mutex);
-}
+extern void lock_system_sleep(void);
+extern void unlock_system_sleep(void);
#else /* !CONFIG_PM_SLEEP */
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 24ed817082ee..5b1f2a00491c 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -66,6 +66,12 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev,
enum dma_sync_target target);
/* Accessory functions. */
+
+void *swiotlb_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flags, unsigned long attrs);
+void swiotlb_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_addr, unsigned long attrs);
+
extern void
*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags);
@@ -115,10 +121,10 @@ extern int
swiotlb_dma_supported(struct device *hwdev, u64 mask);
#ifdef CONFIG_SWIOTLB
-extern void __init swiotlb_free(void);
+extern void __init swiotlb_exit(void);
unsigned int swiotlb_max_segment(void);
#else
-static inline void swiotlb_free(void) { }
+static inline void swiotlb_exit(void) { }
static inline unsigned int swiotlb_max_segment(void) { return 0; }
#endif
@@ -126,4 +132,6 @@ extern void swiotlb_print_info(void);
extern int is_swiotlb_buffer(phys_addr_t paddr);
extern void swiotlb_set_max_segment(unsigned int);
+extern const struct dma_map_ops swiotlb_dma_ops;
+
#endif /* __LINUX_SWIOTLB_H */
diff --git a/include/linux/torture.h b/include/linux/torture.h
index a45702eb3e7b..66272862070b 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -79,7 +79,7 @@ void stutter_wait(const char *title);
int torture_stutter_init(int s);
/* Initialization and cleanup. */
-bool torture_init_begin(char *ttype, bool v, int *runnable);
+bool torture_init_begin(char *ttype, bool v);
void torture_init_end(void);
bool torture_cleanup_begin(void);
void torture_cleanup_end(void);
@@ -96,4 +96,10 @@ void _torture_stop_kthread(char *m, struct task_struct **tp);
#define torture_stop_kthread(n, tp) \
_torture_stop_kthread("Stopping " #n " task", &(tp))
+#ifdef CONFIG_PREEMPT
+#define torture_preempt_schedule() preempt_schedule()
+#else
+#define torture_preempt_schedule()
+#endif
+
#endif /* __LINUX_TORTURE_H */
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index 5a090f5ab335..bcdd3790e94d 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -24,11 +24,6 @@
#define TPM_DIGEST_SIZE 20 /* Max TPM v1.2 PCR size */
-/*
- * Chip num is this value or a valid tpm idx
- */
-#define TPM_ANY_NUM 0xFFFF
-
struct tpm_chip;
struct trusted_key_payload;
struct trusted_key_options;
@@ -50,46 +45,52 @@ struct tpm_class_ops {
unsigned long *timeout_cap);
int (*request_locality)(struct tpm_chip *chip, int loc);
void (*relinquish_locality)(struct tpm_chip *chip, int loc);
+ void (*clk_enable)(struct tpm_chip *chip, bool value);
};
#if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
-extern int tpm_is_tpm2(u32 chip_num);
-extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf);
-extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash);
-extern int tpm_send(u32 chip_num, void *cmd, size_t buflen);
-extern int tpm_get_random(u32 chip_num, u8 *data, size_t max);
-extern int tpm_seal_trusted(u32 chip_num,
+extern int tpm_is_tpm2(struct tpm_chip *chip);
+extern int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf);
+extern int tpm_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash);
+extern int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen);
+extern int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max);
+extern int tpm_seal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
struct trusted_key_options *options);
-extern int tpm_unseal_trusted(u32 chip_num,
+extern int tpm_unseal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
struct trusted_key_options *options);
#else
-static inline int tpm_is_tpm2(u32 chip_num)
+static inline int tpm_is_tpm2(struct tpm_chip *chip)
{
return -ENODEV;
}
-static inline int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) {
+static inline int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
+{
return -ENODEV;
}
-static inline int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) {
+static inline int tpm_pcr_extend(struct tpm_chip *chip, int pcr_idx,
+ const u8 *hash)
+{
return -ENODEV;
}
-static inline int tpm_send(u32 chip_num, void *cmd, size_t buflen) {
+static inline int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen)
+{
return -ENODEV;
}
-static inline int tpm_get_random(u32 chip_num, u8 *data, size_t max) {
+static inline int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max)
+{
return -ENODEV;
}
-static inline int tpm_seal_trusted(u32 chip_num,
+static inline int tpm_seal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
struct trusted_key_options *options)
{
return -ENODEV;
}
-static inline int tpm_unseal_trusted(u32 chip_num,
+static inline int tpm_unseal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
struct trusted_key_options *options)
{
diff --git a/drivers/char/tpm/tpm_eventlog.h b/include/linux/tpm_eventlog.h
index 204466cc4d05..20d9da77fc11 100644
--- a/drivers/char/tpm/tpm_eventlog.h
+++ b/include/linux/tpm_eventlog.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __TPM_EVENTLOG_H__
-#define __TPM_EVENTLOG_H__
+#ifndef __LINUX_TPM_EVENTLOG_H__
+#define __LINUX_TPM_EVENTLOG_H__
#include <crypto/hash_info.h>
@@ -10,6 +10,9 @@
#define ACPI_TCPA_SIG "TCPA" /* 0x41504354 /'TCPA' */
#define TPM2_ACTIVE_PCR_BANKS 3
+#define EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2 0x1
+#define EFI_TCG2_EVENT_LOG_FORMAT_TCG_2 0x2
+
#ifdef CONFIG_PPC64
#define do_endian_conversion(x) be32_to_cpu(x)
#else
@@ -105,6 +108,11 @@ struct tcg_event_field {
u8 event[0];
} __packed;
+struct tpm2_digest {
+ u16 alg_id;
+ u8 digest[SHA512_DIGEST_SIZE];
+} __packed;
+
struct tcg_pcr_event2 {
u32 pcr_idx;
u32 event_type;
@@ -113,26 +121,4 @@ struct tcg_pcr_event2 {
struct tcg_event_field event;
} __packed;
-extern const struct seq_operations tpm2_binary_b_measurements_seqops;
-
-#if defined(CONFIG_ACPI)
-int tpm_read_log_acpi(struct tpm_chip *chip);
-#else
-static inline int tpm_read_log_acpi(struct tpm_chip *chip)
-{
- return -ENODEV;
-}
-#endif
-#if defined(CONFIG_OF)
-int tpm_read_log_of(struct tpm_chip *chip);
-#else
-static inline int tpm_read_log_of(struct tpm_chip *chip)
-{
- return -ENODEV;
-}
-#endif
-
-int tpm_bios_log_setup(struct tpm_chip *chip);
-void tpm_bios_log_teardown(struct tpm_chip *chip);
-
#endif
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index a26ffbe09e71..c94f466d57ef 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -137,11 +137,8 @@ extern void syscall_unregfunc(void);
\
if (!(cond)) \
return; \
- if (rcucheck) { \
- if (WARN_ON_ONCE(rcu_irq_enter_disabled())) \
- return; \
+ if (rcucheck) \
rcu_irq_enter_irqson(); \
- } \
rcu_read_lock_sched_notrace(); \
it_func_ptr = rcu_dereference_sched((tp)->funcs); \
if (it_func_ptr) { \
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index 3bc5144b1c7e..1ef64d4ad887 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -187,7 +187,7 @@ struct tty_ldisc_ops {
long (*compat_ioctl)(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
void (*set_termios)(struct tty_struct *tty, struct ktermios *old);
- unsigned int (*poll)(struct tty_struct *, struct file *,
+ __poll_t (*poll)(struct tty_struct *, struct file *,
struct poll_table_struct *);
int (*hangup)(struct tty_struct *tty);
diff --git a/include/linux/w1-gpio.h b/include/linux/w1-gpio.h
index d58594a32324..78901ecd2f95 100644
--- a/include/linux/w1-gpio.h
+++ b/include/linux/w1-gpio.h
@@ -10,16 +10,15 @@
#ifndef _LINUX_W1_GPIO_H
#define _LINUX_W1_GPIO_H
+struct gpio_desc;
+
/**
* struct w1_gpio_platform_data - Platform-dependent data for w1-gpio
- * @pin: GPIO pin to use
- * @is_open_drain: GPIO pin is configured as open drain
*/
struct w1_gpio_platform_data {
- unsigned int pin;
- unsigned int is_open_drain:1;
+ struct gpio_desc *gpiod;
+ struct gpio_desc *pullup_gpiod;
void (*enable_external_pullup)(int enable);
- unsigned int ext_pullup_enable_pin;
unsigned int pullup_duration;
};
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 158715445ffb..55a611486bac 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -206,14 +206,16 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
/*
* Wakeup macros to be used to report events to the targets.
*/
+#define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
+#define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
#define wake_up_poll(x, m) \
- __wake_up(x, TASK_NORMAL, 1, (void *) (m))
+ __wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
#define wake_up_locked_poll(x, m) \
- __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
+ __wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
#define wake_up_interruptible_poll(x, m) \
- __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
+ __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
#define wake_up_interruptible_sync_poll(x, m) \
- __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
+ __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, poll_to_key(m))
#define ___wait_cond_timeout(condition) \
({ \
diff --git a/include/media/lirc_dev.h b/include/media/lirc_dev.h
index 857da67bd931..d9c143d17f70 100644
--- a/include/media/lirc_dev.h
+++ b/include/media/lirc_dev.h
@@ -185,7 +185,7 @@ void *lirc_get_pdata(struct file *file);
*/
int lirc_dev_fop_open(struct inode *inode, struct file *file);
int lirc_dev_fop_close(struct inode *inode, struct file *file);
-unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait);
+__poll_t lirc_dev_fop_poll(struct file *file, poll_table *wait);
long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
ssize_t lirc_dev_fop_read(struct file *file, char __user *buffer, size_t length,
loff_t *ppos);
diff --git a/include/media/media-devnode.h b/include/media/media-devnode.h
index 511615d3bf6f..dc2f64e1b08f 100644
--- a/include/media/media-devnode.h
+++ b/include/media/media-devnode.h
@@ -56,7 +56,7 @@ struct media_file_operations {
struct module *owner;
ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
- unsigned int (*poll) (struct file *, struct poll_table_struct *);
+ __poll_t (*poll) (struct file *, struct poll_table_struct *);
long (*ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*open) (struct file *);
diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h
index 4d8cb0796bc6..b7e42a1b0910 100644
--- a/include/media/soc_camera.h
+++ b/include/media/soc_camera.h
@@ -117,7 +117,7 @@ struct soc_camera_host_ops {
int (*get_parm)(struct soc_camera_device *, struct v4l2_streamparm *);
int (*set_parm)(struct soc_camera_device *, struct v4l2_streamparm *);
int (*enum_framesizes)(struct soc_camera_device *, struct v4l2_frmsizeenum *);
- unsigned int (*poll)(struct file *, poll_table *);
+ __poll_t (*poll)(struct file *, poll_table *);
};
#define SOCAM_SENSOR_INVERT_PCLK (1 << 0)
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index dacfe54057f8..a9ced6bbee55 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -1037,7 +1037,7 @@ int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh,
* @file: pointer to struct file
* @wait: pointer to struct poll_table_struct
*/
-unsigned int v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait);
+__poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait);
/* Helpers for ioctl_ops */
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index 28a686eb7d09..fa99f6f66712 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -152,7 +152,7 @@ struct v4l2_file_operations {
struct module *owner;
ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
- unsigned int (*poll) (struct file *, struct poll_table_struct *);
+ __poll_t (*poll) (struct file *, struct poll_table_struct *);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
#ifdef CONFIG_COMPAT
long (*compat_ioctl32) (struct file *, unsigned int, unsigned long);
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index e157d5c9b224..3d07ba3a8262 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -297,7 +297,7 @@ int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
* indicate that a non-blocking write can be performed, while read will be
* returned in case of the destination queue.
*/
-unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct poll_table_struct *wait);
/**
@@ -601,7 +601,7 @@ int v4l2_m2m_ioctl_streamon(struct file *file, void *fh,
int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh,
enum v4l2_buf_type type);
int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma);
-unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait);
+__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait);
#endif /* _MEDIA_V4L2_MEM2MEM_H */
diff --git a/include/media/videobuf-core.h b/include/media/videobuf-core.h
index d760aa73ebbb..0bda0adc744f 100644
--- a/include/media/videobuf-core.h
+++ b/include/media/videobuf-core.h
@@ -219,7 +219,7 @@ ssize_t videobuf_read_stream(struct videobuf_queue *q,
ssize_t videobuf_read_one(struct videobuf_queue *q,
char __user *data, size_t count, loff_t *ppos,
int nonblocking);
-unsigned int videobuf_poll_stream(struct file *file,
+__poll_t videobuf_poll_stream(struct file *file,
struct videobuf_queue *q,
poll_table *wait);
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index ef9b64398c8c..e55efc62a950 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -871,7 +871,7 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
* The return values from this function are intended to be directly returned
* from poll handler in driver.
*/
-unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
+__poll_t vb2_core_poll(struct vb2_queue *q, struct file *file,
poll_table *wait);
size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
diff --git a/include/media/videobuf2-v4l2.h b/include/media/videobuf2-v4l2.h
index 036127c54bbf..c2fa55657440 100644
--- a/include/media/videobuf2-v4l2.h
+++ b/include/media/videobuf2-v4l2.h
@@ -226,8 +226,7 @@ void vb2_queue_release(struct vb2_queue *q);
* The return values from this function are intended to be directly returned
* from poll handler in driver.
*/
-unsigned int vb2_poll(struct vb2_queue *q, struct file *file,
- poll_table *wait);
+__poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait);
/*
* The following functions are not part of the vb2 core API, but are simple
@@ -262,7 +261,7 @@ ssize_t vb2_fop_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos);
ssize_t vb2_fop_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos);
-unsigned int vb2_fop_poll(struct file *file, poll_table *wait);
+__poll_t vb2_fop_poll(struct file *file, poll_table *wait);
#ifndef CONFIG_MMU
unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags);
diff --git a/include/misc/cxl.h b/include/misc/cxl.h
index 480d50a0b8ba..b712be544f8c 100644
--- a/include/misc/cxl.h
+++ b/include/misc/cxl.h
@@ -267,7 +267,7 @@ int cxl_fd_open(struct inode *inode, struct file *file);
int cxl_fd_release(struct inode *inode, struct file *file);
long cxl_fd_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
int cxl_fd_mmap(struct file *file, struct vm_area_struct *vm);
-unsigned int cxl_fd_poll(struct file *file, struct poll_table_struct *poll);
+__poll_t cxl_fd_poll(struct file *file, struct poll_table_struct *poll);
ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
loff_t *off);
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index e89cff0c4c23..ec9d6bc65855 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -271,7 +271,7 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags);
int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags);
-uint bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
+__poll_t bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
int bt_sock_wait_ready(struct sock *sk, unsigned long flags);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 8e1bf9ae4a5e..6692d67e9245 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -307,7 +307,7 @@ void inet_csk_prepare_forced_close(struct sock *sk);
/*
* LISTEN is a special case for poll..
*/
-static inline unsigned int inet_csk_listen_poll(const struct sock *sk)
+static inline __poll_t inet_csk_listen_poll(const struct sock *sk)
{
return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ?
(POLLIN | POLLRDNORM) : 0;
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
index 070e93a17c59..f4c21b5a1242 100644
--- a/include/net/iucv/af_iucv.h
+++ b/include/net/iucv/af_iucv.h
@@ -153,7 +153,7 @@ struct iucv_sock_list {
atomic_t autobind_name;
};
-unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
+__poll_t iucv_sock_poll(struct file *file, struct socket *sock,
poll_table *wait);
void iucv_sock_link(struct iucv_sock_list *l, struct sock *s);
void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s);
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 20c0c1be2ca7..f7ae6b0a21d0 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -107,7 +107,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
int sctp_inet_listen(struct socket *sock, int backlog);
void sctp_write_space(struct sock *sk);
void sctp_data_ready(struct sock *sk);
-unsigned int sctp_poll(struct file *file, struct socket *sock,
+__poll_t sctp_poll(struct file *file, struct socket *sock,
poll_table *wait);
void sctp_sock_rfree(struct sk_buff *skb);
void sctp_copy_sock(struct sock *newsk, struct sock *sk,
diff --git a/include/net/sock.h b/include/net/sock.h
index 73b7830b0bb8..63731289186a 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1447,10 +1447,8 @@ do { \
} while (0)
#ifdef CONFIG_LOCKDEP
-static inline bool lockdep_sock_is_held(const struct sock *csk)
+static inline bool lockdep_sock_is_held(const struct sock *sk)
{
- struct sock *sk = (struct sock *)csk;
-
return lockdep_is_held(&sk->sk_lock) ||
lockdep_is_held(&sk->sk_lock.slock);
}
@@ -1585,7 +1583,7 @@ int sock_no_connect(struct socket *, struct sockaddr *, int, int);
int sock_no_socketpair(struct socket *, struct socket *);
int sock_no_accept(struct socket *, struct socket *, int, bool);
int sock_no_getname(struct socket *, struct sockaddr *, int *, int);
-unsigned int sock_no_poll(struct file *, struct socket *,
+__poll_t sock_no_poll(struct file *, struct socket *,
struct poll_table_struct *);
int sock_no_ioctl(struct socket *, unsigned int, unsigned long);
int sock_no_listen(struct socket *, int);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 093e967a2960..58278669cc55 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -387,7 +387,7 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
void tcp_close(struct sock *sk, long timeout);
void tcp_init_sock(struct sock *sk);
void tcp_init_transfer(struct sock *sk, int bpf_op);
-unsigned int tcp_poll(struct file *file, struct socket *sock,
+__poll_t tcp_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int tcp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
diff --git a/include/net/udp.h b/include/net/udp.h
index 6c759c8594e2..850a8e581cce 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -275,7 +275,7 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
int udp_init_sock(struct sock *sk);
int __udp_disconnect(struct sock *sk, int flags);
int udp_disconnect(struct sock *sk, int flags);
-unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait);
+__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait);
struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
netdev_features_t features,
bool is_ipv6);
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index 18c564f60e93..d656809f1217 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -94,7 +94,7 @@ struct rdma_dev_addr {
* The dev_addr->net field must be initialized.
*/
int rdma_translate_ip(const struct sockaddr *addr,
- struct rdma_dev_addr *dev_addr, u16 *vlan_id);
+ struct rdma_dev_addr *dev_addr);
/**
* rdma_resolve_ip - Resolve source and destination IP addresses to
@@ -131,10 +131,9 @@ void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
int rdma_addr_size(struct sockaddr *addr);
-int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id);
int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
const union ib_gid *dgid,
- u8 *smac, u16 *vlan_id, int *if_index,
+ u8 *dmac, const struct net_device *ndev,
int *hoplimit);
static inline u16 ib_addr_get_pkey(struct rdma_dev_addr *dev_addr)
@@ -198,34 +197,15 @@ static inline void rdma_gid2ip(struct sockaddr *out, const union ib_gid *gid)
}
}
-static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr,
- union ib_gid *gid)
-{
- struct net_device *dev;
- struct in_device *ip4;
-
- dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
- if (dev) {
- ip4 = in_dev_get(dev);
- if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address)
- ipv6_addr_set_v4mapped(ip4->ifa_list->ifa_address,
- (struct in6_addr *)gid);
-
- if (ip4)
- in_dev_put(ip4);
-
- dev_put(dev);
- }
-}
-
+/*
+ * rdma_get/set_sgid/dgid() APIs are applicable to IB, and iWarp.
+ * They are not applicable to RoCE.
+ * RoCE GIDs are derived from the IP addresses.
+ */
static inline void rdma_addr_get_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid)
{
- if (dev_addr->transport == RDMA_TRANSPORT_IB &&
- dev_addr->dev_type != ARPHRD_INFINIBAND)
- iboe_addr_get_sgid(dev_addr, gid);
- else
- memcpy(gid, dev_addr->src_dev_addr +
- rdma_addr_gid_offset(dev_addr), sizeof *gid);
+ memcpy(gid, dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr),
+ sizeof(*gid));
}
static inline void rdma_addr_set_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid)
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index 1f7f604db5aa..811cfcfcbe3d 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -549,12 +549,12 @@ int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
struct rdma_ah_attr *ah_attr);
/**
- * ib_init_ah_from_path - Initialize address handle attributes based on an SA
- * path record.
+ * ib_init_ah_attr_from_path - Initialize address handle attributes based on
+ * an SA path record.
*/
-int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
- struct sa_path_rec *rec,
- struct rdma_ah_attr *ah_attr);
+int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
+ struct sa_path_rec *rec,
+ struct rdma_ah_attr *ah_attr);
/**
* ib_sa_pack_path - Conert a path record from struct ib_sa_path_rec
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index fd84cda5ed7c..5263c86fd103 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -63,6 +63,7 @@
#include <linux/uaccess.h>
#include <linux/cgroup_rdma.h>
#include <uapi/rdma/ib_user_verbs.h>
+#include <rdma/restrack.h>
#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
@@ -300,11 +301,6 @@ struct ib_tm_caps {
u32 max_sge;
};
-enum ib_cq_creation_flags {
- IB_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0,
- IB_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1,
-};
-
struct ib_cq_init_attr {
unsigned int cqe;
int comp_vector;
@@ -983,9 +979,9 @@ struct ib_wc {
u32 invalidate_rkey;
} ex;
u32 src_qp;
+ u32 slid;
int wc_flags;
u16 pkey_index;
- u32 slid;
u8 sl;
u8 dlid_path_bits;
u8 port_num; /* valid only for DR SMPs on switches */
@@ -1082,6 +1078,7 @@ enum ib_qp_type {
IB_QPT_XRC_INI = 9,
IB_QPT_XRC_TGT,
IB_QPT_MAX,
+ IB_QPT_DRIVER = 0xFF,
/* Reserve a range for qp types internal to the low level driver.
* These qp types will not be visible at the IB core layer, so the
* IB_QPT_MAX usages should not be affected in the core layer
@@ -1529,6 +1526,7 @@ struct ib_pd {
* Implementation details of the RDMA core, don't use in drivers:
*/
struct ib_mr *__internal_mr;
+ struct rdma_restrack_entry res;
};
struct ib_xrcd {
@@ -1538,6 +1536,10 @@ struct ib_xrcd {
struct mutex tgt_qp_mutex;
struct list_head tgt_qp_list;
+ /*
+ * Implementation details of the RDMA core, don't use in drivers:
+ */
+ struct rdma_restrack_entry res;
};
struct ib_ah {
@@ -1569,6 +1571,10 @@ struct ib_cq {
struct irq_poll iop;
struct work_struct work;
};
+ /*
+ * Implementation details of the RDMA core, don't use in drivers:
+ */
+ struct rdma_restrack_entry res;
};
struct ib_srq {
@@ -1745,6 +1751,11 @@ struct ib_qp {
struct ib_rwq_ind_table *rwq_ind_tbl;
struct ib_qp_security *qp_sec;
u8 port;
+
+ /*
+ * Implementation details of the RDMA core, don't use in drivers:
+ */
+ struct rdma_restrack_entry res;
};
struct ib_mr {
@@ -2351,6 +2362,10 @@ struct ib_device {
#endif
u32 index;
+ /*
+ * Implementation details of the RDMA core, don't use in drivers
+ */
+ struct rdma_restrack_root res;
/**
* The following mandatory functions are used only at device
@@ -2836,8 +2851,7 @@ int ib_modify_port(struct ib_device *device,
struct ib_port_modify *port_modify);
int ib_find_gid(struct ib_device *device, union ib_gid *gid,
- enum ib_gid_type gid_type, struct net_device *ndev,
- u8 *port_num, u16 *index);
+ struct net_device *ndev, u8 *port_num, u16 *index);
int ib_find_pkey(struct ib_device *device,
u8 port_num, u16 pkey, u16 *index);
@@ -2858,7 +2872,7 @@ enum ib_pd_flags {
struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
const char *caller);
#define ib_alloc_pd(device, flags) \
- __ib_alloc_pd((device), (flags), __func__)
+ __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
void ib_dealloc_pd(struct ib_pd *pd);
/**
@@ -2905,7 +2919,7 @@ int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
/**
- * ib_init_ah_from_wc - Initializes address handle attributes from a
+ * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
* work completion.
* @device: Device on which the received message arrived.
* @port_num: Port on which the received message arrived.
@@ -2915,9 +2929,9 @@ int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
* @ah_attr: Returned attributes that can be used when creating an address
* handle for replying to the message.
*/
-int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
- const struct ib_wc *wc, const struct ib_grh *grh,
- struct rdma_ah_attr *ah_attr);
+int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
+ const struct ib_wc *wc, const struct ib_grh *grh,
+ struct rdma_ah_attr *ah_attr);
/**
* ib_create_ah_from_wc - Creates an address handle associated with the
@@ -3135,8 +3149,12 @@ static inline int ib_post_recv(struct ib_qp *qp,
return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
}
-struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
- int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx);
+struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
+ int nr_cqe, int comp_vector,
+ enum ib_poll_context poll_ctx, const char *caller);
+#define ib_alloc_cq(device, priv, nr_cqe, comp_vect, poll_ctx) \
+ __ib_alloc_cq((device), (priv), (nr_cqe), (comp_vect), (poll_ctx), KBUILD_MODNAME)
+
void ib_free_cq(struct ib_cq *cq);
int ib_process_cq_direct(struct ib_cq *cq, int budget);
@@ -3560,8 +3578,11 @@ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
/**
* ib_alloc_xrcd - Allocates an XRC domain.
* @device: The device on which to allocate the XRC domain.
+ * @caller: Module name for kernel consumers
*/
-struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
+struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
+#define ib_alloc_xrcd(device) \
+ __ib_alloc_xrcd((device), KBUILD_MODNAME)
/**
* ib_dealloc_xrcd - Deallocates an XRC domain.
@@ -3793,8 +3814,7 @@ static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
u32 port_num)
{
- if ((rdma_protocol_roce(dev, port_num)) ||
- (rdma_protocol_iwarp(dev, port_num)))
+ if (rdma_protocol_roce(dev, port_num))
return RDMA_AH_ATTR_TYPE_ROCE;
else if ((rdma_protocol_ib(dev, port_num)) &&
(rdma_cap_opa_ah(dev, port_num)))
@@ -3850,4 +3870,12 @@ ib_get_vector_affinity(struct ib_device *device, int comp_vector)
}
+/**
+ * rdma_roce_rescan_device - Rescan all of the network devices in the system
+ * and add their gids, as needed, to the relevant RoCE devices.
+ *
+ * @device: the rdma device
+ */
+void rdma_roce_rescan_device(struct ib_device *ibdev);
+
#endif /* IB_VERBS_H */
diff --git a/include/rdma/opa_addr.h b/include/rdma/opa_addr.h
index f68fca296631..2bbb7a67e643 100644
--- a/include/rdma/opa_addr.h
+++ b/include/rdma/opa_addr.h
@@ -114,4 +114,20 @@ static inline u32 opa_get_mcast_base(u32 nr_top_bits)
return (be32_to_cpu(OPA_LID_PERMISSIVE) << (32 - nr_top_bits));
}
+/* Check for a valid unicast LID for non-SM traffic types */
+static inline bool rdma_is_valid_unicast_lid(struct rdma_ah_attr *attr)
+{
+ if (attr->type == RDMA_AH_ATTR_TYPE_IB) {
+ if (!rdma_ah_get_dlid(attr) ||
+ rdma_ah_get_dlid(attr) >=
+ be32_to_cpu(IB_MULTICAST_LID_BASE))
+ return false;
+ } else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) {
+ if (!rdma_ah_get_dlid(attr) ||
+ rdma_ah_get_dlid(attr) >=
+ opa_get_mcast_base(OPA_MCAST_NR))
+ return false;
+ }
+ return true;
+}
#endif /* OPA_ADDR_H */
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 3d2eed3c4e75..6538a5cc27b6 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -413,4 +413,23 @@ bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason);
const void *rdma_consumer_reject_data(struct rdma_cm_id *id,
struct rdma_cm_event *ev, u8 *data_len);
+/**
+ * rdma_read_gids - Return the SGID and DGID used for establishing
+ * connection. This can be used after rdma_resolve_addr()
+ * on client side. This can be use on new connection
+ * on server side. This is applicable to IB, RoCE, iWarp.
+ * If cm_id is not bound yet to the RDMA device, it doesn't
+ * copy and SGID or DGID to the given pointers.
+ * @id: Communication identifier whose GIDs are queried.
+ * @sgid: Pointer to SGID where SGID will be returned. It is optional.
+ * @dgid: Pointer to DGID where DGID will be returned. It is optional.
+ * Note: This API should not be used by any new ULPs or new code.
+ * Instead, users interested in querying GIDs should refer to path record
+ * of the rdma_cm_id to query the GIDs.
+ * This API is provided for compatibility for existing users.
+ */
+
+void rdma_read_gids(struct rdma_cm_id *cm_id, union ib_gid *sgid,
+ union ib_gid *dgid);
+
#endif /* RDMA_CM_H */
diff --git a/include/rdma/rdma_cm_ib.h b/include/rdma/rdma_cm_ib.h
index 6947a6ba2557..6a69d71a21a5 100644
--- a/include/rdma/rdma_cm_ib.h
+++ b/include/rdma/rdma_cm_ib.h
@@ -36,17 +36,17 @@
#include <rdma/rdma_cm.h>
/**
- * rdma_set_ib_paths - Manually sets the path records used to establish a
+ * rdma_set_ib_path - Manually sets the path record used to establish a
* connection.
* @id: Connection identifier associated with the request.
* @path_rec: Reference to the path record
*
* This call permits a user to specify routing information for rdma_cm_id's
- * bound to Infiniband devices. It is called on the client side of a
+ * bound to InfiniBand devices. It is called on the client side of a
* connection and replaces the call to rdma_resolve_route.
*/
-int rdma_set_ib_paths(struct rdma_cm_id *id,
- struct sa_path_rec *path_rec, int num_paths);
+int rdma_set_ib_path(struct rdma_cm_id *id,
+ struct sa_path_rec *path_rec);
/* Global qkey for UDP QPs and multicast groups. */
#define RDMA_UDP_QKEY 0x01234567
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index 1ba84a78f1c5..4118324a0310 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -228,13 +228,6 @@ struct rvt_driver_provided {
int (*port_callback)(struct ib_device *, u8, struct kobject *);
/*
- * Returns a string to represent the device for which is being
- * registered. This is primarily used for error and debug messages on
- * the console.
- */
- const char * (*get_card_name)(struct rvt_dev_info *rdi);
-
- /*
* Returns a pointer to the undelying hardware's PCI device. This is
* used to display information as to what hardware is being referenced
* in an output message
@@ -419,6 +412,30 @@ struct rvt_dev_info {
};
+/**
+ * rvt_set_ibdev_name - Craft an IB device name from client info
+ * @rdi: pointer to the client rvt_dev_info structure
+ * @name: client specific name
+ * @unit: client specific unit number.
+ */
+static inline void rvt_set_ibdev_name(struct rvt_dev_info *rdi,
+ const char *fmt, const char *name,
+ const int unit)
+{
+ snprintf(rdi->ibdev.name, sizeof(rdi->ibdev.name), fmt, name, unit);
+}
+
+/**
+ * rvt_get_ibdev_name - return the IB name
+ * @rdi: rdmavt device
+ *
+ * Return the registered name of the device.
+ */
+static inline const char *rvt_get_ibdev_name(const struct rvt_dev_info *rdi)
+{
+ return rdi->ibdev.name;
+}
+
static inline struct rvt_pd *ibpd_to_rvtpd(struct ib_pd *ibpd)
{
return container_of(ibpd, struct rvt_pd, ibpd);
diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
new file mode 100644
index 000000000000..c2d81167c858
--- /dev/null
+++ b/include/rdma/restrack.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
+ */
+
+#ifndef _RDMA_RESTRACK_H_
+#define _RDMA_RESTRACK_H_
+
+#include <linux/typecheck.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/kref.h>
+#include <linux/completion.h>
+
+/**
+ * enum rdma_restrack_type - HW objects to track
+ */
+enum rdma_restrack_type {
+ /**
+ * @RDMA_RESTRACK_PD: Protection domain (PD)
+ */
+ RDMA_RESTRACK_PD,
+ /**
+ * @RDMA_RESTRACK_CQ: Completion queue (CQ)
+ */
+ RDMA_RESTRACK_CQ,
+ /**
+ * @RDMA_RESTRACK_QP: Queue pair (QP)
+ */
+ RDMA_RESTRACK_QP,
+ /**
+ * @RDMA_RESTRACK_XRCD: XRC domain (XRCD)
+ */
+ RDMA_RESTRACK_XRCD,
+ /**
+ * @RDMA_RESTRACK_MAX: Last entry, used for array dclarations
+ */
+ RDMA_RESTRACK_MAX
+};
+
+#define RDMA_RESTRACK_HASH_BITS 8
+/**
+ * struct rdma_restrack_root - main resource tracking management
+ * entity, per-device
+ */
+struct rdma_restrack_root {
+ /*
+ * @rwsem: Read/write lock to protect lists
+ */
+ struct rw_semaphore rwsem;
+ /**
+ * @hash: global database for all resources per-device
+ */
+ DECLARE_HASHTABLE(hash, RDMA_RESTRACK_HASH_BITS);
+};
+
+/**
+ * struct rdma_restrack_entry - metadata per-entry
+ */
+struct rdma_restrack_entry {
+ /**
+ * @valid: validity indicator
+ *
+ * The entries are filled during rdma_restrack_add,
+ * can be attempted to be free during rdma_restrack_del.
+ *
+ * As an example for that, see mlx5 QPs with type MLX5_IB_QPT_HW_GSI
+ */
+ bool valid;
+ /*
+ * @kref: Protect destroy of the resource
+ */
+ struct kref kref;
+ /*
+ * @comp: Signal that all consumers of resource are completed their work
+ */
+ struct completion comp;
+ /**
+ * @task: owner of resource tracking entity
+ *
+ * There are two types of entities: created by user and created
+ * by kernel.
+ *
+ * This is relevant for the entities created by users.
+ * For the entities created by kernel, this pointer will be NULL.
+ */
+ struct task_struct *task;
+ /**
+ * @kern_name: name of owner for the kernel created entities.
+ */
+ const char *kern_name;
+ /**
+ * @node: hash table entry
+ */
+ struct hlist_node node;
+ /**
+ * @type: various objects in restrack database
+ */
+ enum rdma_restrack_type type;
+};
+
+/**
+ * rdma_restrack_init() - initialize resource tracking
+ * @res: resource tracking root
+ */
+void rdma_restrack_init(struct rdma_restrack_root *res);
+
+/**
+ * rdma_restrack_clean() - clean resource tracking
+ * @res: resource tracking root
+ */
+void rdma_restrack_clean(struct rdma_restrack_root *res);
+
+/**
+ * rdma_restrack_count() - the current usage of specific object
+ * @res: resource entry
+ * @type: actual type of object to operate
+ * @ns: PID namespace
+ */
+int rdma_restrack_count(struct rdma_restrack_root *res,
+ enum rdma_restrack_type type,
+ struct pid_namespace *ns);
+
+/**
+ * rdma_restrack_add() - add object to the reource tracking database
+ * @res: resource entry
+ */
+void rdma_restrack_add(struct rdma_restrack_entry *res);
+
+/**
+ * rdma_restrack_del() - delete object from the reource tracking database
+ * @res: resource entry
+ * @type: actual type of object to operate
+ */
+void rdma_restrack_del(struct rdma_restrack_entry *res);
+
+/**
+ * rdma_is_kernel_res() - check the owner of resource
+ * @res: resource entry
+ */
+static inline bool rdma_is_kernel_res(struct rdma_restrack_entry *res)
+{
+ return !res->task;
+}
+
+/**
+ * rdma_restrack_get() - grab to protect resource from release
+ * @res: resource entry
+ */
+int __must_check rdma_restrack_get(struct rdma_restrack_entry *res);
+
+/**
+ * rdma_restrack_put() - relase resource
+ * @res: resource entry
+ */
+int rdma_restrack_put(struct rdma_restrack_entry *res);
+#endif /* _RDMA_RESTRACK_H_ */
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 6df6fe0c2198..225ab7783dfd 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -75,16 +75,15 @@ enum phy_event {
PHYE_OOB_ERROR,
PHYE_SPINUP_HOLD, /* hot plug SATA, no COMWAKE sent */
PHYE_RESUME_TIMEOUT,
+ PHYE_SHUTDOWN,
PHY_NUM_EVENTS,
};
enum discover_event {
DISCE_DISCOVER_DOMAIN = 0U,
DISCE_REVALIDATE_DOMAIN,
- DISCE_PROBE,
DISCE_SUSPEND,
DISCE_RESUME,
- DISCE_DESTRUCT,
DISC_NUM_EVENTS,
};
@@ -261,6 +260,7 @@ struct asd_sas_port {
struct list_head dev_list;
struct list_head disco_list;
struct list_head destroy_list;
+ struct list_head sas_port_del_list;
enum sas_linkrate linkrate;
struct sas_work work;
@@ -292,6 +292,7 @@ struct asd_sas_port {
struct asd_sas_event {
struct sas_work work;
struct asd_sas_phy *phy;
+ int event;
};
static inline struct asd_sas_event *to_asd_sas_event(struct work_struct *work)
@@ -301,17 +302,24 @@ static inline struct asd_sas_event *to_asd_sas_event(struct work_struct *work)
return ev;
}
+static inline void INIT_SAS_EVENT(struct asd_sas_event *ev,
+ void (*fn)(struct work_struct *),
+ struct asd_sas_phy *phy, int event)
+{
+ INIT_SAS_WORK(&ev->work, fn);
+ ev->phy = phy;
+ ev->event = event;
+}
+
+#define SAS_PHY_SHUTDOWN_THRES 1024
+
/* The phy pretty much is controlled by the LLDD.
* The class only reads those fields.
*/
struct asd_sas_phy {
/* private: */
- struct asd_sas_event port_events[PORT_NUM_EVENTS];
- struct asd_sas_event phy_events[PHY_NUM_EVENTS];
-
- unsigned long port_events_pending;
- unsigned long phy_events_pending;
-
+ atomic_t event_nr;
+ int in_shutdown;
int error;
int suspended;
@@ -380,6 +388,9 @@ struct sas_ha_struct {
struct device *dev; /* should be set */
struct module *lldd_module; /* should be set */
+ struct workqueue_struct *event_q;
+ struct workqueue_struct *disco_q;
+
u8 *sas_addr; /* must be set */
u8 hashed_sas_addr[HASHED_SAS_ADDR_SIZE];
@@ -399,6 +410,8 @@ struct sas_ha_struct {
struct list_head eh_done_q; /* complete via scsi_eh_flush_done_q */
struct list_head eh_ata_q; /* scmds to promote from sas to ata eh */
+
+ int event_thres;
};
#define SHOST_TO_SAS_HA(_shost) (*(struct sas_ha_struct **)(_shost)->hostdata)
@@ -670,6 +683,7 @@ extern int sas_bios_param(struct scsi_device *,
sector_t capacity, int *hsc);
extern struct scsi_transport_template *
sas_domain_attach_transport(struct sas_domain_function_template *);
+extern struct device_attribute dev_attr_phy_event_threshold;
int sas_discover_root_expander(struct domain_device *);
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 7fb57e905526..949a016dd7fa 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -171,7 +171,6 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
extern void scsi_kunmap_atomic_sg(void *virt);
extern int scsi_init_io(struct scsi_cmnd *cmd);
-extern void scsi_initialize_rq(struct request *rq);
extern int scsi_dma_map(struct scsi_cmnd *cmd);
extern void scsi_dma_unmap(struct scsi_cmnd *cmd);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index a8b7bf879ced..1a1df0d21ee3 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -571,6 +571,8 @@ struct Scsi_Host {
struct blk_mq_tag_set tag_set;
};
+ struct rcu_head rcu;
+
atomic_t host_busy; /* commands actually active on low-level */
atomic_t host_blocked;
diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h
index 1df8efb0ee01..c36860111932 100644
--- a/include/scsi/scsi_proto.h
+++ b/include/scsi/scsi_proto.h
@@ -236,6 +236,7 @@ struct scsi_varlen_cdb_hdr {
#define UNIT_ATTENTION 0x06
#define DATA_PROTECT 0x07
#define BLANK_CHECK 0x08
+#define VENDOR_SPECIFIC 0x09
#define COPY_ABORTED 0x0a
#define ABORTED_COMMAND 0x0b
#define VOLUME_OVERFLOW 0x0d
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 8cf30215c177..15da45dc2a5d 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -139,8 +139,8 @@ enum fc_vport_state {
#define FC_PORTSPEED_50GBIT 0x200
#define FC_PORTSPEED_100GBIT 0x400
#define FC_PORTSPEED_25GBIT 0x800
-#define FC_PORTSPEED_64BIT 0x1000
-#define FC_PORTSPEED_128BIT 0x2000
+#define FC_PORTSPEED_64GBIT 0x1000
+#define FC_PORTSPEED_128GBIT 0x2000
#define FC_PORTSPEED_NOT_NEGOTIATED (1 << 15) /* Speed not established */
/*
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index 62895b405933..05ec927a3c72 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -156,6 +156,7 @@ struct sas_port {
struct mutex phy_list_mutex;
struct list_head phy_list;
+ struct list_head del_list; /* libsas only */
};
#define dev_to_sas_port(d) \
diff --git a/include/scsi/srp.h b/include/scsi/srp.h
index 5be834de491a..c16a3c9a4d9b 100644
--- a/include/scsi/srp.h
+++ b/include/scsi/srp.h
@@ -129,6 +129,23 @@ struct srp_login_req {
u8 target_port_id[16];
};
+/**
+ * struct srp_login_req_rdma - RDMA/CM login parameters.
+ *
+ * RDMA/CM over InfiniBand can only carry 92 - 36 = 56 bytes of private
+ * data. The %srp_login_req_rdma structure contains the same information as
+ * %srp_login_req but with the reserved data removed.
+ */
+struct srp_login_req_rdma {
+ u64 tag;
+ __be16 req_buf_fmt;
+ u8 req_flags;
+ u8 opcode;
+ __be32 req_it_iu_len;
+ u8 initiator_port_id[16];
+ u8 target_port_id[16];
+};
+
/*
* The SRP spec defines the size of the LOGIN_RSP structure to be 52
* bytes, so it needs to be packed to avoid having it padded to 56
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index ca00130cb028..9c14e21dda85 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -193,7 +193,7 @@ struct hda_dai_map {
* @pvt_data - private data, for asoc contains asoc codec object
*/
struct hdac_ext_device {
- struct hdac_device hdac;
+ struct hdac_device hdev;
struct hdac_ext_bus *ebus;
/* soc-dai to nid map */
@@ -213,7 +213,7 @@ struct hdac_ext_dma_params {
u8 stream_tag;
};
#define to_ehdac_device(dev) (container_of((dev), \
- struct hdac_ext_device, hdac))
+ struct hdac_ext_device, hdev))
/*
* HD-audio codec base driver
*/
diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
index ab9fcb2f97f0..afeca593188a 100644
--- a/include/sound/hwdep.h
+++ b/include/sound/hwdep.h
@@ -37,7 +37,7 @@ struct snd_hwdep_ops {
long count, loff_t *offset);
int (*open)(struct snd_hwdep *hw, struct file * file);
int (*release)(struct snd_hwdep *hw, struct file * file);
- unsigned int (*poll)(struct snd_hwdep *hw, struct file *file,
+ __poll_t (*poll)(struct snd_hwdep *hw, struct file *file,
poll_table *wait);
int (*ioctl)(struct snd_hwdep *hw, struct file *file,
unsigned int cmd, unsigned long arg);
diff --git a/include/sound/info.h b/include/sound/info.h
index 67390ee846aa..becdf66d2825 100644
--- a/include/sound/info.h
+++ b/include/sound/info.h
@@ -62,7 +62,7 @@ struct snd_info_entry_ops {
loff_t (*llseek)(struct snd_info_entry *entry,
void *file_private_data, struct file *file,
loff_t offset, int orig);
- unsigned int (*poll)(struct snd_info_entry *entry,
+ __poll_t (*poll)(struct snd_info_entry *entry,
void *file_private_data, struct file *file,
poll_table *wait);
int (*ioctl)(struct snd_info_entry *entry, void *file_private_data,
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 24febf9e177c..e054c583d3b3 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -169,6 +169,10 @@ struct snd_pcm_ops {
#define SNDRV_PCM_FMTBIT_IMA_ADPCM _SNDRV_PCM_FMTBIT(IMA_ADPCM)
#define SNDRV_PCM_FMTBIT_MPEG _SNDRV_PCM_FMTBIT(MPEG)
#define SNDRV_PCM_FMTBIT_GSM _SNDRV_PCM_FMTBIT(GSM)
+#define SNDRV_PCM_FMTBIT_S20_LE _SNDRV_PCM_FMTBIT(S20_LE)
+#define SNDRV_PCM_FMTBIT_U20_LE _SNDRV_PCM_FMTBIT(U20_LE)
+#define SNDRV_PCM_FMTBIT_S20_BE _SNDRV_PCM_FMTBIT(S20_BE)
+#define SNDRV_PCM_FMTBIT_U20_BE _SNDRV_PCM_FMTBIT(U20_BE)
#define SNDRV_PCM_FMTBIT_SPECIAL _SNDRV_PCM_FMTBIT(SPECIAL)
#define SNDRV_PCM_FMTBIT_S24_3LE _SNDRV_PCM_FMTBIT(S24_3LE)
#define SNDRV_PCM_FMTBIT_U24_3LE _SNDRV_PCM_FMTBIT(U24_3LE)
@@ -202,6 +206,8 @@ struct snd_pcm_ops {
#define SNDRV_PCM_FMTBIT_FLOAT SNDRV_PCM_FMTBIT_FLOAT_LE
#define SNDRV_PCM_FMTBIT_FLOAT64 SNDRV_PCM_FMTBIT_FLOAT64_LE
#define SNDRV_PCM_FMTBIT_IEC958_SUBFRAME SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE
+#define SNDRV_PCM_FMTBIT_S20 SNDRV_PCM_FMTBIT_S20_LE
+#define SNDRV_PCM_FMTBIT_U20 SNDRV_PCM_FMTBIT_U20_LE
#endif
#ifdef SNDRV_BIG_ENDIAN
#define SNDRV_PCM_FMTBIT_S16 SNDRV_PCM_FMTBIT_S16_BE
@@ -213,6 +219,8 @@ struct snd_pcm_ops {
#define SNDRV_PCM_FMTBIT_FLOAT SNDRV_PCM_FMTBIT_FLOAT_BE
#define SNDRV_PCM_FMTBIT_FLOAT64 SNDRV_PCM_FMTBIT_FLOAT64_BE
#define SNDRV_PCM_FMTBIT_IEC958_SUBFRAME SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_BE
+#define SNDRV_PCM_FMTBIT_S20 SNDRV_PCM_FMTBIT_S20_BE
+#define SNDRV_PCM_FMTBIT_U20 SNDRV_PCM_FMTBIT_U20_BE
#endif
struct snd_pcm_file {
diff --git a/include/sound/rt5514.h b/include/sound/rt5514.h
index ef18494769ee..64d027dbaaca 100644
--- a/include/sound/rt5514.h
+++ b/include/sound/rt5514.h
@@ -14,6 +14,8 @@
struct rt5514_platform_data {
unsigned int dmic_init_delay;
+ const char *dsp_calib_clk_name;
+ unsigned int dsp_calib_clk_rate;
};
#endif
diff --git a/include/sound/rt5645.h b/include/sound/rt5645.h
index d0c33a9972b9..f218c742f08e 100644
--- a/include/sound/rt5645.h
+++ b/include/sound/rt5645.h
@@ -25,6 +25,9 @@ struct rt5645_platform_data {
bool level_trigger_irq;
/* Invert JD1_1 status polarity */
bool inv_jd1_1;
+
+ /* Value to asign to snd_soc_card.long_name */
+ const char *long_name;
};
#endif
diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h
index 1a9191cd4bb3..9da6388c20a1 100644
--- a/include/sound/soc-acpi-intel-match.h
+++ b/include/sound/soc-acpi-intel-match.h
@@ -16,6 +16,7 @@
#ifndef __LINUX_SND_SOC_ACPI_INTEL_MATCH_H
#define __LINUX_SND_SOC_ACPI_INTEL_MATCH_H
+#include <linux/module.h>
#include <linux/stddef.h>
#include <linux/acpi.h>
diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
index a7d8d335b043..082224275f52 100644
--- a/include/sound/soc-acpi.h
+++ b/include/sound/soc-acpi.h
@@ -17,6 +17,7 @@
#include <linux/stddef.h>
#include <linux/acpi.h>
+#include <linux/mod_devicetable.h>
struct snd_soc_acpi_package_context {
char *name; /* package name */
@@ -26,17 +27,13 @@ struct snd_soc_acpi_package_context {
bool data_valid;
};
+/* codec name is used in DAIs is i2c-<HID>:00 with HID being 8 chars */
+#define SND_ACPI_I2C_ID_LEN (4 + ACPI_ID_LEN + 3 + 1)
+
#if IS_ENABLED(CONFIG_ACPI)
-/* translation fron HID to I2C name, needed for DAI codec_name */
-const char *snd_soc_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN]);
bool snd_soc_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
struct snd_soc_acpi_package_context *ctx);
#else
-static inline const char *
-snd_soc_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN])
-{
- return NULL;
-}
static inline bool
snd_soc_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
struct snd_soc_acpi_package_context *ctx)
@@ -49,9 +46,6 @@ snd_soc_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
struct snd_soc_acpi_mach *
snd_soc_acpi_find_machine(struct snd_soc_acpi_mach *machines);
-/* acpi check hid */
-bool snd_soc_acpi_check_hid(const u8 hid[ACPI_ID_LEN]);
-
/**
* snd_soc_acpi_mach: ACPI-based machine descriptor. Most of the fields are
* related to the hardware, except for the firmware and topology file names.
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index 58acd00cae19..8ad11669e4d8 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -102,6 +102,8 @@ struct snd_compr_stream;
SNDRV_PCM_FMTBIT_S16_BE |\
SNDRV_PCM_FMTBIT_S20_3LE |\
SNDRV_PCM_FMTBIT_S20_3BE |\
+ SNDRV_PCM_FMTBIT_S20_LE |\
+ SNDRV_PCM_FMTBIT_S20_BE |\
SNDRV_PCM_FMTBIT_S24_3LE |\
SNDRV_PCM_FMTBIT_S24_3BE |\
SNDRV_PCM_FMTBIT_S32_LE |\
@@ -294,9 +296,6 @@ struct snd_soc_dai {
/* DAI runtime info */
unsigned int capture_active:1; /* stream is in use */
unsigned int playback_active:1; /* stream is in use */
- unsigned int symmetric_rates:1;
- unsigned int symmetric_channels:1;
- unsigned int symmetric_samplebits:1;
unsigned int probed:1;
unsigned int active;
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 1a7323238c49..b655d987fbe7 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -494,6 +494,8 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num);
int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num);
#endif
+void snd_soc_disconnect_sync(struct device *dev);
+
struct snd_pcm_substream *snd_soc_get_dai_substream(struct snd_soc_card *card,
const char *dai_link, int stream);
struct snd_soc_pcm_runtime *snd_soc_get_pcm_runtime(struct snd_soc_card *card,
@@ -802,6 +804,9 @@ struct snd_soc_component_driver {
int (*suspend)(struct snd_soc_component *);
int (*resume)(struct snd_soc_component *);
+ unsigned int (*read)(struct snd_soc_component *, unsigned int);
+ int (*write)(struct snd_soc_component *, unsigned int, unsigned int);
+
/* pcm creation and destruction */
int (*pcm_new)(struct snd_soc_pcm_runtime *);
void (*pcm_free)(struct snd_pcm *);
@@ -858,12 +863,10 @@ struct snd_soc_component {
struct list_head card_aux_list; /* for auxiliary bound components */
struct list_head card_list;
- struct snd_soc_dai_driver *dai_drv;
- int num_dai;
-
const struct snd_soc_component_driver *driver;
struct list_head dai_list;
+ int num_dai;
int (*read)(struct snd_soc_component *, unsigned int, unsigned int *);
int (*write)(struct snd_soc_component *, unsigned int, unsigned int);
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 4342a329821f..c3ac5ec86519 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -193,7 +193,6 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
__print_flags(flag, "|", \
{ (1 << EXTENT_FLAG_PINNED), "PINNED" },\
{ (1 << EXTENT_FLAG_COMPRESSED), "COMPRESSED" },\
- { (1 << EXTENT_FLAG_VACANCY), "VACANCY" },\
{ (1 << EXTENT_FLAG_PREALLOC), "PREALLOC" },\
{ (1 << EXTENT_FLAG_LOGGING), "LOGGING" },\
{ (1 << EXTENT_FLAG_FILLING), "FILLING" },\
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 8f8dd42fa57b..06c87f9f720c 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -147,7 +147,8 @@ TRACE_DEFINE_ENUM(CP_TRIMMED);
{ CP_NO_SPC_ROLL, "no space roll forward" }, \
{ CP_NODE_NEED_CP, "node needs cp" }, \
{ CP_FASTBOOT_MODE, "fastboot mode" }, \
- { CP_SPEC_LOG_NUM, "log type is 2" })
+ { CP_SPEC_LOG_NUM, "log type is 2" }, \
+ { CP_RECOVER_DIR, "dir needs recovery" })
struct victim_sel_policy;
struct f2fs_map_blocks;
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 59d40c454aa0..0b50fda80db0 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -243,6 +243,7 @@ TRACE_EVENT(rcu_exp_funnel_lock,
__entry->grphi, __entry->gpevent)
);
+#ifdef CONFIG_RCU_NOCB_CPU
/*
* Tracepoint for RCU no-CBs CPU callback handoffs. This event is intended
* to assist debugging of these handoffs.
@@ -285,6 +286,7 @@ TRACE_EVENT(rcu_nocb_wake,
TP_printk("%s %d %s", __entry->rcuname, __entry->cpu, __entry->reason)
);
+#endif
/*
* Tracepoint for tasks blocking within preemptible-RCU read-side
@@ -421,76 +423,40 @@ TRACE_EVENT(rcu_fqs,
/*
* Tracepoint for dyntick-idle entry/exit events. These take a string
- * as argument: "Start" for entering dyntick-idle mode, "End" for
- * leaving it, "--=" for events moving towards idle, and "++=" for events
- * moving away from idle. "Error on entry: not idle task" and "Error on
- * exit: not idle task" indicate that a non-idle task is erroneously
- * toying with the idle loop.
+ * as argument: "Start" for entering dyntick-idle mode, "Startirq" for
+ * entering it from irq/NMI, "End" for leaving it, "Endirq" for leaving it
+ * to irq/NMI, "--=" for events moving towards idle, and "++=" for events
+ * moving away from idle.
*
* These events also take a pair of numbers, which indicate the nesting
- * depth before and after the event of interest. Note that task-related
- * events use the upper bits of each number, while interrupt-related
- * events use the lower bits.
+ * depth before and after the event of interest, and a third number that is
+ * the ->dynticks counter. Note that task-related and interrupt-related
+ * events use two separate counters, and that the "++=" and "--=" events
+ * for irq/NMI will change the counter by two, otherwise by one.
*/
TRACE_EVENT(rcu_dyntick,
- TP_PROTO(const char *polarity, long long oldnesting, long long newnesting),
+ TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks),
- TP_ARGS(polarity, oldnesting, newnesting),
+ TP_ARGS(polarity, oldnesting, newnesting, dynticks),
TP_STRUCT__entry(
__field(const char *, polarity)
- __field(long long, oldnesting)
- __field(long long, newnesting)
+ __field(long, oldnesting)
+ __field(long, newnesting)
+ __field(int, dynticks)
),
TP_fast_assign(
__entry->polarity = polarity;
__entry->oldnesting = oldnesting;
__entry->newnesting = newnesting;
+ __entry->dynticks = atomic_read(&dynticks);
),
- TP_printk("%s %llx %llx", __entry->polarity,
- __entry->oldnesting, __entry->newnesting)
-);
-
-/*
- * Tracepoint for RCU preparation for idle, the goal being to get RCU
- * processing done so that the current CPU can shut off its scheduling
- * clock and enter dyntick-idle mode. One way to accomplish this is
- * to drain all RCU callbacks from this CPU, and the other is to have
- * done everything RCU requires for the current grace period. In this
- * latter case, the CPU will be awakened at the end of the current grace
- * period in order to process the remainder of its callbacks.
- *
- * These tracepoints take a string as argument:
- *
- * "No callbacks": Nothing to do, no callbacks on this CPU.
- * "In holdoff": Nothing to do, holding off after unsuccessful attempt.
- * "Begin holdoff": Attempt failed, don't retry until next jiffy.
- * "Dyntick with callbacks": Entering dyntick-idle despite callbacks.
- * "Dyntick with lazy callbacks": Entering dyntick-idle w/lazy callbacks.
- * "More callbacks": Still more callbacks, try again to clear them out.
- * "Callbacks drained": All callbacks processed, off to dyntick idle!
- * "Timer": Timer fired to cause CPU to continue processing callbacks.
- * "Demigrate": Timer fired on wrong CPU, woke up correct CPU.
- * "Cleanup after idle": Idle exited, timer canceled.
- */
-TRACE_EVENT(rcu_prep_idle,
-
- TP_PROTO(const char *reason),
-
- TP_ARGS(reason),
-
- TP_STRUCT__entry(
- __field(const char *, reason)
- ),
-
- TP_fast_assign(
- __entry->reason = reason;
- ),
-
- TP_printk("%s", __entry->reason)
+ TP_printk("%s %lx %lx %#3x", __entry->polarity,
+ __entry->oldnesting, __entry->newnesting,
+ __entry->dynticks & 0xfff)
);
/*
@@ -799,8 +765,7 @@ TRACE_EVENT(rcu_barrier,
grplo, grphi, gp_tasks) do { } \
while (0)
#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
-#define trace_rcu_dyntick(polarity, oldnesting, newnesting) do { } while (0)
-#define trace_rcu_prep_idle(reason) do { } while (0)
+#define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0)
#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
do { } while (0)
diff --git a/include/trace/events/rdma.h b/include/trace/events/rdma.h
new file mode 100644
index 000000000000..aa19afc73a4e
--- /dev/null
+++ b/include/trace/events/rdma.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017 Oracle. All rights reserved.
+ */
+
+/*
+ * enum ib_event_type, from include/rdma/ib_verbs.h
+ */
+
+#define IB_EVENT_LIST \
+ ib_event(CQ_ERR) \
+ ib_event(QP_FATAL) \
+ ib_event(QP_REQ_ERR) \
+ ib_event(QP_ACCESS_ERR) \
+ ib_event(COMM_EST) \
+ ib_event(SQ_DRAINED) \
+ ib_event(PATH_MIG) \
+ ib_event(PATH_MIG_ERR) \
+ ib_event(DEVICE_FATAL) \
+ ib_event(PORT_ACTIVE) \
+ ib_event(PORT_ERR) \
+ ib_event(LID_CHANGE) \
+ ib_event(PKEY_CHANGE) \
+ ib_event(SM_CHANGE) \
+ ib_event(SRQ_ERR) \
+ ib_event(SRQ_LIMIT_REACHED) \
+ ib_event(QP_LAST_WQE_REACHED) \
+ ib_event(CLIENT_REREGISTER) \
+ ib_event(GID_CHANGE) \
+ ib_event_end(WQ_FATAL)
+
+#undef ib_event
+#undef ib_event_end
+
+#define ib_event(x) TRACE_DEFINE_ENUM(IB_EVENT_##x);
+#define ib_event_end(x) TRACE_DEFINE_ENUM(IB_EVENT_##x);
+
+IB_EVENT_LIST
+
+#undef ib_event
+#undef ib_event_end
+
+#define ib_event(x) { IB_EVENT_##x, #x },
+#define ib_event_end(x) { IB_EVENT_##x, #x }
+
+#define rdma_show_ib_event(x) \
+ __print_symbolic(x, IB_EVENT_LIST)
+
+/*
+ * enum ib_wc_status type, from include/rdma/ib_verbs.h
+ */
+#define IB_WC_STATUS_LIST \
+ ib_wc_status(SUCCESS) \
+ ib_wc_status(LOC_LEN_ERR) \
+ ib_wc_status(LOC_QP_OP_ERR) \
+ ib_wc_status(LOC_EEC_OP_ERR) \
+ ib_wc_status(LOC_PROT_ERR) \
+ ib_wc_status(WR_FLUSH_ERR) \
+ ib_wc_status(MW_BIND_ERR) \
+ ib_wc_status(BAD_RESP_ERR) \
+ ib_wc_status(LOC_ACCESS_ERR) \
+ ib_wc_status(REM_INV_REQ_ERR) \
+ ib_wc_status(REM_ACCESS_ERR) \
+ ib_wc_status(REM_OP_ERR) \
+ ib_wc_status(RETRY_EXC_ERR) \
+ ib_wc_status(RNR_RETRY_EXC_ERR) \
+ ib_wc_status(LOC_RDD_VIOL_ERR) \
+ ib_wc_status(REM_INV_RD_REQ_ERR) \
+ ib_wc_status(REM_ABORT_ERR) \
+ ib_wc_status(INV_EECN_ERR) \
+ ib_wc_status(INV_EEC_STATE_ERR) \
+ ib_wc_status(FATAL_ERR) \
+ ib_wc_status(RESP_TIMEOUT_ERR) \
+ ib_wc_status_end(GENERAL_ERR)
+
+#undef ib_wc_status
+#undef ib_wc_status_end
+
+#define ib_wc_status(x) TRACE_DEFINE_ENUM(IB_WC_##x);
+#define ib_wc_status_end(x) TRACE_DEFINE_ENUM(IB_WC_##x);
+
+IB_WC_STATUS_LIST
+
+#undef ib_wc_status
+#undef ib_wc_status_end
+
+#define ib_wc_status(x) { IB_WC_##x, #x },
+#define ib_wc_status_end(x) { IB_WC_##x, #x }
+
+#define rdma_show_wc_status(x) \
+ __print_symbolic(x, IB_WC_STATUS_LIST)
+
+/*
+ * enum rdma_cm_event_type, from include/rdma/rdma_cm.h
+ */
+#define RDMA_CM_EVENT_LIST \
+ rdma_cm_event(ADDR_RESOLVED) \
+ rdma_cm_event(ADDR_ERROR) \
+ rdma_cm_event(ROUTE_RESOLVED) \
+ rdma_cm_event(ROUTE_ERROR) \
+ rdma_cm_event(CONNECT_REQUEST) \
+ rdma_cm_event(CONNECT_RESPONSE) \
+ rdma_cm_event(CONNECT_ERROR) \
+ rdma_cm_event(UNREACHABLE) \
+ rdma_cm_event(REJECTED) \
+ rdma_cm_event(ESTABLISHED) \
+ rdma_cm_event(DISCONNECTED) \
+ rdma_cm_event(DEVICE_REMOVAL) \
+ rdma_cm_event(MULTICAST_JOIN) \
+ rdma_cm_event(MULTICAST_ERROR) \
+ rdma_cm_event(ADDR_CHANGE) \
+ rdma_cm_event_end(TIMEWAIT_EXIT)
+
+#undef rdma_cm_event
+#undef rdma_cm_event_end
+
+#define rdma_cm_event(x) TRACE_DEFINE_ENUM(RDMA_CM_EVENT_##x);
+#define rdma_cm_event_end(x) TRACE_DEFINE_ENUM(RDMA_CM_EVENT_##x);
+
+RDMA_CM_EVENT_LIST
+
+#undef rdma_cm_event
+#undef rdma_cm_event_end
+
+#define rdma_cm_event(x) { RDMA_CM_EVENT_##x, #x },
+#define rdma_cm_event_end(x) { RDMA_CM_EVENT_##x, #x }
+
+#define rdma_show_cm_event(x) \
+ __print_symbolic(x, RDMA_CM_EVENT_LIST)
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
new file mode 100644
index 000000000000..50ed3f8bf534
--- /dev/null
+++ b/include/trace/events/rpcrdma.h
@@ -0,0 +1,890 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017 Oracle. All rights reserved.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rpcrdma
+
+#if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RPCRDMA_H
+
+#include <linux/tracepoint.h>
+#include <trace/events/rdma.h>
+
+/**
+ ** Event classes
+ **/
+
+DECLARE_EVENT_CLASS(xprtrdma_reply_event,
+ TP_PROTO(
+ const struct rpcrdma_rep *rep
+ ),
+
+ TP_ARGS(rep),
+
+ TP_STRUCT__entry(
+ __field(const void *, rep)
+ __field(const void *, r_xprt)
+ __field(u32, xid)
+ __field(u32, version)
+ __field(u32, proc)
+ ),
+
+ TP_fast_assign(
+ __entry->rep = rep;
+ __entry->r_xprt = rep->rr_rxprt;
+ __entry->xid = be32_to_cpu(rep->rr_xid);
+ __entry->version = be32_to_cpu(rep->rr_vers);
+ __entry->proc = be32_to_cpu(rep->rr_proc);
+ ),
+
+ TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u",
+ __entry->r_xprt, __entry->xid, __entry->rep,
+ __entry->version, __entry->proc
+ )
+);
+
+#define DEFINE_REPLY_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_reply_event, name, \
+ TP_PROTO( \
+ const struct rpcrdma_rep *rep \
+ ), \
+ TP_ARGS(rep))
+
+DECLARE_EVENT_CLASS(xprtrdma_rxprt,
+ TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt
+ ),
+
+ TP_ARGS(r_xprt),
+
+ TP_STRUCT__entry(
+ __field(const void *, r_xprt)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
+ ),
+
+ TP_fast_assign(
+ __entry->r_xprt = r_xprt;
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
+ ),
+
+ TP_printk("peer=[%s]:%s r_xprt=%p",
+ __get_str(addr), __get_str(port), __entry->r_xprt
+ )
+);
+
+#define DEFINE_RXPRT_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_rxprt, name, \
+ TP_PROTO( \
+ const struct rpcrdma_xprt *r_xprt \
+ ), \
+ TP_ARGS(r_xprt))
+
+DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
+ TP_PROTO(
+ const struct rpc_task *task,
+ unsigned int pos,
+ struct rpcrdma_mr *mr,
+ int nsegs
+ ),
+
+ TP_ARGS(task, pos, mr, nsegs),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, mr)
+ __field(unsigned int, pos)
+ __field(int, nents)
+ __field(u32, handle)
+ __field(u32, length)
+ __field(u64, offset)
+ __field(int, nsegs)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->mr = mr;
+ __entry->pos = pos;
+ __entry->nents = mr->mr_nents;
+ __entry->handle = mr->mr_handle;
+ __entry->length = mr->mr_length;
+ __entry->offset = mr->mr_offset;
+ __entry->nsegs = nsegs;
+ ),
+
+ TP_printk("task:%u@%u mr=%p pos=%u %u@0x%016llx:0x%08x (%s)",
+ __entry->task_id, __entry->client_id, __entry->mr,
+ __entry->pos, __entry->length,
+ (unsigned long long)__entry->offset, __entry->handle,
+ __entry->nents < __entry->nsegs ? "more" : "last"
+ )
+);
+
+#define DEFINE_RDCH_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_rdch_event, name, \
+ TP_PROTO( \
+ const struct rpc_task *task, \
+ unsigned int pos, \
+ struct rpcrdma_mr *mr, \
+ int nsegs \
+ ), \
+ TP_ARGS(task, pos, mr, nsegs))
+
+DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
+ TP_PROTO(
+ const struct rpc_task *task,
+ struct rpcrdma_mr *mr,
+ int nsegs
+ ),
+
+ TP_ARGS(task, mr, nsegs),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, mr)
+ __field(int, nents)
+ __field(u32, handle)
+ __field(u32, length)
+ __field(u64, offset)
+ __field(int, nsegs)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->mr = mr;
+ __entry->nents = mr->mr_nents;
+ __entry->handle = mr->mr_handle;
+ __entry->length = mr->mr_length;
+ __entry->offset = mr->mr_offset;
+ __entry->nsegs = nsegs;
+ ),
+
+ TP_printk("task:%u@%u mr=%p %u@0x%016llx:0x%08x (%s)",
+ __entry->task_id, __entry->client_id, __entry->mr,
+ __entry->length, (unsigned long long)__entry->offset,
+ __entry->handle,
+ __entry->nents < __entry->nsegs ? "more" : "last"
+ )
+);
+
+#define DEFINE_WRCH_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_wrch_event, name, \
+ TP_PROTO( \
+ const struct rpc_task *task, \
+ struct rpcrdma_mr *mr, \
+ int nsegs \
+ ), \
+ TP_ARGS(task, mr, nsegs))
+
+TRACE_DEFINE_ENUM(FRWR_IS_INVALID);
+TRACE_DEFINE_ENUM(FRWR_IS_VALID);
+TRACE_DEFINE_ENUM(FRWR_FLUSHED_FR);
+TRACE_DEFINE_ENUM(FRWR_FLUSHED_LI);
+
+#define xprtrdma_show_frwr_state(x) \
+ __print_symbolic(x, \
+ { FRWR_IS_INVALID, "INVALID" }, \
+ { FRWR_IS_VALID, "VALID" }, \
+ { FRWR_FLUSHED_FR, "FLUSHED_FR" }, \
+ { FRWR_FLUSHED_LI, "FLUSHED_LI" })
+
+DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
+ TP_PROTO(
+ const struct ib_wc *wc,
+ const struct rpcrdma_frwr *frwr
+ ),
+
+ TP_ARGS(wc, frwr),
+
+ TP_STRUCT__entry(
+ __field(const void *, mr)
+ __field(unsigned int, state)
+ __field(unsigned int, status)
+ __field(unsigned int, vendor_err)
+ ),
+
+ TP_fast_assign(
+ __entry->mr = container_of(frwr, struct rpcrdma_mr, frwr);
+ __entry->state = frwr->fr_state;
+ __entry->status = wc->status;
+ __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
+ ),
+
+ TP_printk(
+ "mr=%p state=%s: %s (%u/0x%x)",
+ __entry->mr, xprtrdma_show_frwr_state(__entry->state),
+ rdma_show_wc_status(__entry->status),
+ __entry->status, __entry->vendor_err
+ )
+);
+
+#define DEFINE_FRWR_DONE_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_frwr_done, name, \
+ TP_PROTO( \
+ const struct ib_wc *wc, \
+ const struct rpcrdma_frwr *frwr \
+ ), \
+ TP_ARGS(wc, frwr))
+
+DECLARE_EVENT_CLASS(xprtrdma_mr,
+ TP_PROTO(
+ const struct rpcrdma_mr *mr
+ ),
+
+ TP_ARGS(mr),
+
+ TP_STRUCT__entry(
+ __field(const void *, mr)
+ __field(u32, handle)
+ __field(u32, length)
+ __field(u64, offset)
+ ),
+
+ TP_fast_assign(
+ __entry->mr = mr;
+ __entry->handle = mr->mr_handle;
+ __entry->length = mr->mr_length;
+ __entry->offset = mr->mr_offset;
+ ),
+
+ TP_printk("mr=%p %u@0x%016llx:0x%08x",
+ __entry->mr, __entry->length,
+ (unsigned long long)__entry->offset,
+ __entry->handle
+ )
+);
+
+#define DEFINE_MR_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_mr, name, \
+ TP_PROTO( \
+ const struct rpcrdma_mr *mr \
+ ), \
+ TP_ARGS(mr))
+
+DECLARE_EVENT_CLASS(xprtrdma_cb_event,
+ TP_PROTO(
+ const struct rpc_rqst *rqst
+ ),
+
+ TP_ARGS(rqst),
+
+ TP_STRUCT__entry(
+ __field(const void *, rqst)
+ __field(const void *, rep)
+ __field(const void *, req)
+ __field(u32, xid)
+ ),
+
+ TP_fast_assign(
+ __entry->rqst = rqst;
+ __entry->req = rpcr_to_rdmar(rqst);
+ __entry->rep = rpcr_to_rdmar(rqst)->rl_reply;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ ),
+
+ TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p",
+ __entry->xid, __entry->rqst, __entry->req, __entry->rep
+ )
+);
+
+#define DEFINE_CB_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_cb_event, name, \
+ TP_PROTO( \
+ const struct rpc_rqst *rqst \
+ ), \
+ TP_ARGS(rqst))
+
+/**
+ ** Connection events
+ **/
+
+TRACE_EVENT(xprtrdma_conn_upcall,
+ TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt,
+ struct rdma_cm_event *event
+ ),
+
+ TP_ARGS(r_xprt, event),
+
+ TP_STRUCT__entry(
+ __field(const void *, r_xprt)
+ __field(unsigned int, event)
+ __field(int, status)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
+ ),
+
+ TP_fast_assign(
+ __entry->r_xprt = r_xprt;
+ __entry->event = event->event;
+ __entry->status = event->status;
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
+ ),
+
+ TP_printk("peer=[%s]:%s r_xprt=%p: %s (%u/%d)",
+ __get_str(addr), __get_str(port),
+ __entry->r_xprt, rdma_show_cm_event(__entry->event),
+ __entry->event, __entry->status
+ )
+);
+
+TRACE_EVENT(xprtrdma_disconnect,
+ TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt,
+ int status
+ ),
+
+ TP_ARGS(r_xprt, status),
+
+ TP_STRUCT__entry(
+ __field(const void *, r_xprt)
+ __field(int, status)
+ __field(int, connected)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
+ ),
+
+ TP_fast_assign(
+ __entry->r_xprt = r_xprt;
+ __entry->status = status;
+ __entry->connected = r_xprt->rx_ep.rep_connected;
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
+ ),
+
+ TP_printk("peer=[%s]:%s r_xprt=%p: status=%d %sconnected",
+ __get_str(addr), __get_str(port),
+ __entry->r_xprt, __entry->status,
+ __entry->connected == 1 ? "still " : "dis"
+ )
+);
+
+DEFINE_RXPRT_EVENT(xprtrdma_conn_start);
+DEFINE_RXPRT_EVENT(xprtrdma_conn_tout);
+DEFINE_RXPRT_EVENT(xprtrdma_create);
+DEFINE_RXPRT_EVENT(xprtrdma_destroy);
+DEFINE_RXPRT_EVENT(xprtrdma_remove);
+DEFINE_RXPRT_EVENT(xprtrdma_reinsert);
+DEFINE_RXPRT_EVENT(xprtrdma_reconnect);
+DEFINE_RXPRT_EVENT(xprtrdma_inject_dsc);
+
+TRACE_EVENT(xprtrdma_qp_error,
+ TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt,
+ const struct ib_event *event
+ ),
+
+ TP_ARGS(r_xprt, event),
+
+ TP_STRUCT__entry(
+ __field(const void *, r_xprt)
+ __field(unsigned int, event)
+ __string(name, event->device->name)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
+ ),
+
+ TP_fast_assign(
+ __entry->r_xprt = r_xprt;
+ __entry->event = event->event;
+ __assign_str(name, event->device->name);
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
+ ),
+
+ TP_printk("peer=[%s]:%s r_xprt=%p: dev %s: %s (%u)",
+ __get_str(addr), __get_str(port), __entry->r_xprt,
+ __get_str(name), rdma_show_ib_event(__entry->event),
+ __entry->event
+ )
+);
+
+/**
+ ** Call events
+ **/
+
+TRACE_EVENT(xprtrdma_createmrs,
+ TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt,
+ unsigned int count
+ ),
+
+ TP_ARGS(r_xprt, count),
+
+ TP_STRUCT__entry(
+ __field(const void *, r_xprt)
+ __field(unsigned int, count)
+ ),
+
+ TP_fast_assign(
+ __entry->r_xprt = r_xprt;
+ __entry->count = count;
+ ),
+
+ TP_printk("r_xprt=%p: created %u MRs",
+ __entry->r_xprt, __entry->count
+ )
+);
+
+DEFINE_RXPRT_EVENT(xprtrdma_nomrs);
+
+DEFINE_RDCH_EVENT(xprtrdma_read_chunk);
+DEFINE_WRCH_EVENT(xprtrdma_write_chunk);
+DEFINE_WRCH_EVENT(xprtrdma_reply_chunk);
+
+TRACE_DEFINE_ENUM(rpcrdma_noch);
+TRACE_DEFINE_ENUM(rpcrdma_readch);
+TRACE_DEFINE_ENUM(rpcrdma_areadch);
+TRACE_DEFINE_ENUM(rpcrdma_writech);
+TRACE_DEFINE_ENUM(rpcrdma_replych);
+
+#define xprtrdma_show_chunktype(x) \
+ __print_symbolic(x, \
+ { rpcrdma_noch, "inline" }, \
+ { rpcrdma_readch, "read list" }, \
+ { rpcrdma_areadch, "*read list" }, \
+ { rpcrdma_writech, "write list" }, \
+ { rpcrdma_replych, "reply chunk" })
+
+TRACE_EVENT(xprtrdma_marshal,
+ TP_PROTO(
+ const struct rpc_rqst *rqst,
+ unsigned int hdrlen,
+ unsigned int rtype,
+ unsigned int wtype
+ ),
+
+ TP_ARGS(rqst, hdrlen, rtype, wtype),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, xid)
+ __field(unsigned int, hdrlen)
+ __field(unsigned int, headlen)
+ __field(unsigned int, pagelen)
+ __field(unsigned int, taillen)
+ __field(unsigned int, rtype)
+ __field(unsigned int, wtype)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ __entry->hdrlen = hdrlen;
+ __entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
+ __entry->pagelen = rqst->rq_snd_buf.page_len;
+ __entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
+ __entry->rtype = rtype;
+ __entry->wtype = wtype;
+ ),
+
+ TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
+ __entry->task_id, __entry->client_id, __entry->xid,
+ __entry->hdrlen,
+ __entry->headlen, __entry->pagelen, __entry->taillen,
+ xprtrdma_show_chunktype(__entry->rtype),
+ xprtrdma_show_chunktype(__entry->wtype)
+ )
+);
+
+TRACE_EVENT(xprtrdma_post_send,
+ TP_PROTO(
+ const struct rpcrdma_req *req,
+ int status
+ ),
+
+ TP_ARGS(req, status),
+
+ TP_STRUCT__entry(
+ __field(const void *, req)
+ __field(int, num_sge)
+ __field(bool, signaled)
+ __field(int, status)
+ ),
+
+ TP_fast_assign(
+ __entry->req = req;
+ __entry->num_sge = req->rl_sendctx->sc_wr.num_sge;
+ __entry->signaled = req->rl_sendctx->sc_wr.send_flags &
+ IB_SEND_SIGNALED;
+ __entry->status = status;
+ ),
+
+ TP_printk("req=%p, %d SGEs%s, status=%d",
+ __entry->req, __entry->num_sge,
+ (__entry->signaled ? ", signaled" : ""),
+ __entry->status
+ )
+);
+
+TRACE_EVENT(xprtrdma_post_recv,
+ TP_PROTO(
+ const struct rpcrdma_rep *rep,
+ int status
+ ),
+
+ TP_ARGS(rep, status),
+
+ TP_STRUCT__entry(
+ __field(const void *, rep)
+ __field(int, status)
+ ),
+
+ TP_fast_assign(
+ __entry->rep = rep;
+ __entry->status = status;
+ ),
+
+ TP_printk("rep=%p status=%d",
+ __entry->rep, __entry->status
+ )
+);
+
+/**
+ ** Completion events
+ **/
+
+TRACE_EVENT(xprtrdma_wc_send,
+ TP_PROTO(
+ const struct rpcrdma_sendctx *sc,
+ const struct ib_wc *wc
+ ),
+
+ TP_ARGS(sc, wc),
+
+ TP_STRUCT__entry(
+ __field(const void *, req)
+ __field(unsigned int, unmap_count)
+ __field(unsigned int, status)
+ __field(unsigned int, vendor_err)
+ ),
+
+ TP_fast_assign(
+ __entry->req = sc->sc_req;
+ __entry->unmap_count = sc->sc_unmap_count;
+ __entry->status = wc->status;
+ __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
+ ),
+
+ TP_printk("req=%p, unmapped %u pages: %s (%u/0x%x)",
+ __entry->req, __entry->unmap_count,
+ rdma_show_wc_status(__entry->status),
+ __entry->status, __entry->vendor_err
+ )
+);
+
+TRACE_EVENT(xprtrdma_wc_receive,
+ TP_PROTO(
+ const struct rpcrdma_rep *rep,
+ const struct ib_wc *wc
+ ),
+
+ TP_ARGS(rep, wc),
+
+ TP_STRUCT__entry(
+ __field(const void *, rep)
+ __field(unsigned int, byte_len)
+ __field(unsigned int, status)
+ __field(unsigned int, vendor_err)
+ ),
+
+ TP_fast_assign(
+ __entry->rep = rep;
+ __entry->byte_len = wc->byte_len;
+ __entry->status = wc->status;
+ __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
+ ),
+
+ TP_printk("rep=%p, %u bytes: %s (%u/0x%x)",
+ __entry->rep, __entry->byte_len,
+ rdma_show_wc_status(__entry->status),
+ __entry->status, __entry->vendor_err
+ )
+);
+
+DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
+DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
+DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
+
+DEFINE_MR_EVENT(xprtrdma_localinv);
+DEFINE_MR_EVENT(xprtrdma_dma_unmap);
+DEFINE_MR_EVENT(xprtrdma_remoteinv);
+DEFINE_MR_EVENT(xprtrdma_recover_mr);
+
+/**
+ ** Reply events
+ **/
+
+TRACE_EVENT(xprtrdma_reply,
+ TP_PROTO(
+ const struct rpc_task *task,
+ const struct rpcrdma_rep *rep,
+ const struct rpcrdma_req *req,
+ unsigned int credits
+ ),
+
+ TP_ARGS(task, rep, req, credits),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, rep)
+ __field(const void *, req)
+ __field(u32, xid)
+ __field(unsigned int, credits)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->rep = rep;
+ __entry->req = req;
+ __entry->xid = be32_to_cpu(rep->rr_xid);
+ __entry->credits = credits;
+ ),
+
+ TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p",
+ __entry->task_id, __entry->client_id, __entry->xid,
+ __entry->credits, __entry->rep, __entry->req
+ )
+);
+
+TRACE_EVENT(xprtrdma_defer_cmp,
+ TP_PROTO(
+ const struct rpcrdma_rep *rep
+ ),
+
+ TP_ARGS(rep),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, rep)
+ __field(u32, xid)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rep->rr_rqst->rq_task->tk_pid;
+ __entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid;
+ __entry->rep = rep;
+ __entry->xid = be32_to_cpu(rep->rr_xid);
+ ),
+
+ TP_printk("task:%u@%u xid=0x%08x rep=%p",
+ __entry->task_id, __entry->client_id, __entry->xid,
+ __entry->rep
+ )
+);
+
+DEFINE_REPLY_EVENT(xprtrdma_reply_vers);
+DEFINE_REPLY_EVENT(xprtrdma_reply_rqst);
+DEFINE_REPLY_EVENT(xprtrdma_reply_short);
+DEFINE_REPLY_EVENT(xprtrdma_reply_hdr);
+
+TRACE_EVENT(xprtrdma_fixup,
+ TP_PROTO(
+ const struct rpc_rqst *rqst,
+ int len,
+ int hdrlen
+ ),
+
+ TP_ARGS(rqst, len, hdrlen),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, base)
+ __field(int, len)
+ __field(int, hdrlen)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->base = rqst->rq_rcv_buf.head[0].iov_base;
+ __entry->len = len;
+ __entry->hdrlen = hdrlen;
+ ),
+
+ TP_printk("task:%u@%u base=%p len=%d hdrlen=%d",
+ __entry->task_id, __entry->client_id,
+ __entry->base, __entry->len, __entry->hdrlen
+ )
+);
+
+TRACE_EVENT(xprtrdma_fixup_pg,
+ TP_PROTO(
+ const struct rpc_rqst *rqst,
+ int pageno,
+ const void *pos,
+ int len,
+ int curlen
+ ),
+
+ TP_ARGS(rqst, pageno, pos, len, curlen),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, pos)
+ __field(int, pageno)
+ __field(int, len)
+ __field(int, curlen)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->pos = pos;
+ __entry->pageno = pageno;
+ __entry->len = len;
+ __entry->curlen = curlen;
+ ),
+
+ TP_printk("task:%u@%u pageno=%d pos=%p len=%d curlen=%d",
+ __entry->task_id, __entry->client_id,
+ __entry->pageno, __entry->pos, __entry->len, __entry->curlen
+ )
+);
+
+TRACE_EVENT(xprtrdma_decode_seg,
+ TP_PROTO(
+ u32 handle,
+ u32 length,
+ u64 offset
+ ),
+
+ TP_ARGS(handle, length, offset),
+
+ TP_STRUCT__entry(
+ __field(u32, handle)
+ __field(u32, length)
+ __field(u64, offset)
+ ),
+
+ TP_fast_assign(
+ __entry->handle = handle;
+ __entry->length = length;
+ __entry->offset = offset;
+ ),
+
+ TP_printk("%u@0x%016llx:0x%08x",
+ __entry->length, (unsigned long long)__entry->offset,
+ __entry->handle
+ )
+);
+
+/**
+ ** Allocation/release of rpcrdma_reqs and rpcrdma_reps
+ **/
+
+TRACE_EVENT(xprtrdma_allocate,
+ TP_PROTO(
+ const struct rpc_task *task,
+ const struct rpcrdma_req *req
+ ),
+
+ TP_ARGS(task, req),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, req)
+ __field(const void *, rep)
+ __field(size_t, callsize)
+ __field(size_t, rcvsize)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->req = req;
+ __entry->rep = req ? req->rl_reply : NULL;
+ __entry->callsize = task->tk_rqstp->rq_callsize;
+ __entry->rcvsize = task->tk_rqstp->rq_rcvsize;
+ ),
+
+ TP_printk("task:%u@%u req=%p rep=%p (%zu, %zu)",
+ __entry->task_id, __entry->client_id,
+ __entry->req, __entry->rep,
+ __entry->callsize, __entry->rcvsize
+ )
+);
+
+TRACE_EVENT(xprtrdma_rpc_done,
+ TP_PROTO(
+ const struct rpc_task *task,
+ const struct rpcrdma_req *req
+ ),
+
+ TP_ARGS(task, req),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(const void *, req)
+ __field(const void *, rep)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client->cl_clid;
+ __entry->req = req;
+ __entry->rep = req->rl_reply;
+ ),
+
+ TP_printk("task:%u@%u req=%p rep=%p",
+ __entry->task_id, __entry->client_id,
+ __entry->req, __entry->rep
+ )
+);
+
+DEFINE_RXPRT_EVENT(xprtrdma_noreps);
+
+/**
+ ** Callback events
+ **/
+
+TRACE_EVENT(xprtrdma_cb_setup,
+ TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt,
+ unsigned int reqs
+ ),
+
+ TP_ARGS(r_xprt, reqs),
+
+ TP_STRUCT__entry(
+ __field(const void *, r_xprt)
+ __field(unsigned int, reqs)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
+ ),
+
+ TP_fast_assign(
+ __entry->r_xprt = r_xprt;
+ __entry->reqs = reqs;
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
+ ),
+
+ TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs",
+ __get_str(addr), __get_str(port),
+ __entry->r_xprt, __entry->reqs
+ )
+);
+
+DEFINE_CB_EVENT(xprtrdma_cb_call);
+DEFINE_CB_EVENT(xprtrdma_cb_reply);
+
+#endif /* _TRACE_RPCRDMA_H */
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 8c153f68509e..970c91a83173 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -32,7 +32,7 @@ DECLARE_EVENT_CLASS(rpc_task_status,
__entry->status = task->tk_status;
),
- TP_printk("task:%u@%u, status %d",
+ TP_printk("task:%u@%u status=%d",
__entry->task_id, __entry->client_id,
__entry->status)
);
@@ -66,7 +66,7 @@ TRACE_EVENT(rpc_connect_status,
__entry->status = status;
),
- TP_printk("task:%u@%u, status %d",
+ TP_printk("task:%u@%u status=%d",
__entry->task_id, __entry->client_id,
__entry->status)
);
@@ -175,7 +175,7 @@ DECLARE_EVENT_CLASS(rpc_task_queued,
),
TP_fast_assign(
- __entry->client_id = clnt->cl_clid;
+ __entry->client_id = clnt ? clnt->cl_clid : -1;
__entry->task_id = task->tk_pid;
__entry->timeout = task->tk_timeout;
__entry->runstate = task->tk_runstate;
@@ -184,7 +184,7 @@ DECLARE_EVENT_CLASS(rpc_task_queued,
__assign_str(q_name, rpc_qname(q));
),
- TP_printk("task:%u@%u flags=%4.4x state=%4.4lx status=%d timeout=%lu queue=%s",
+ TP_printk("task:%u@%d flags=%4.4x state=%4.4lx status=%d timeout=%lu queue=%s",
__entry->task_id, __entry->client_id,
__entry->flags,
__entry->runstate,
@@ -390,6 +390,10 @@ DECLARE_EVENT_CLASS(rpc_xprt_event,
__entry->status)
);
+DEFINE_EVENT(rpc_xprt_event, xprt_timer,
+ TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status),
+ TP_ARGS(xprt, xid, status));
+
DEFINE_EVENT(rpc_xprt_event, xprt_lookup_rqst,
TP_PROTO(struct rpc_xprt *xprt, __be32 xid, int status),
TP_ARGS(xprt, xid, status));
diff --git a/include/trace/events/thermal.h b/include/trace/events/thermal.h
index 78946640fe03..135e5421f003 100644
--- a/include/trace/events/thermal.h
+++ b/include/trace/events/thermal.h
@@ -94,9 +94,9 @@ TRACE_EVENT(thermal_zone_trip,
#ifdef CONFIG_CPU_THERMAL
TRACE_EVENT(thermal_power_cpu_get_power,
TP_PROTO(const struct cpumask *cpus, unsigned long freq, u32 *load,
- size_t load_len, u32 dynamic_power, u32 static_power),
+ size_t load_len, u32 dynamic_power),
- TP_ARGS(cpus, freq, load, load_len, dynamic_power, static_power),
+ TP_ARGS(cpus, freq, load, load_len, dynamic_power),
TP_STRUCT__entry(
__bitmask(cpumask, num_possible_cpus())
@@ -104,7 +104,6 @@ TRACE_EVENT(thermal_power_cpu_get_power,
__dynamic_array(u32, load, load_len)
__field(size_t, load_len )
__field(u32, dynamic_power )
- __field(u32, static_power )
),
TP_fast_assign(
@@ -115,13 +114,12 @@ TRACE_EVENT(thermal_power_cpu_get_power,
load_len * sizeof(*load));
__entry->load_len = load_len;
__entry->dynamic_power = dynamic_power;
- __entry->static_power = static_power;
),
- TP_printk("cpus=%s freq=%lu load={%s} dynamic_power=%d static_power=%d",
+ TP_printk("cpus=%s freq=%lu load={%s} dynamic_power=%d",
__get_bitmask(cpumask), __entry->freq,
__print_array(__get_dynamic_array(load), __entry->load_len, 4),
- __entry->dynamic_power, __entry->static_power)
+ __entry->dynamic_power)
);
TRACE_EVENT(thermal_power_cpu_limit,
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 16e305e69f34..a57e4ee989d6 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -136,6 +136,24 @@ DEFINE_EVENT(timer_class, timer_cancel,
TP_ARGS(timer)
);
+#define decode_clockid(type) \
+ __print_symbolic(type, \
+ { CLOCK_REALTIME, "CLOCK_REALTIME" }, \
+ { CLOCK_MONOTONIC, "CLOCK_MONOTONIC" }, \
+ { CLOCK_BOOTTIME, "CLOCK_BOOTTIME" }, \
+ { CLOCK_TAI, "CLOCK_TAI" })
+
+#define decode_hrtimer_mode(mode) \
+ __print_symbolic(mode, \
+ { HRTIMER_MODE_ABS, "ABS" }, \
+ { HRTIMER_MODE_REL, "REL" }, \
+ { HRTIMER_MODE_ABS_PINNED, "ABS|PINNED" }, \
+ { HRTIMER_MODE_REL_PINNED, "REL|PINNED" }, \
+ { HRTIMER_MODE_ABS_SOFT, "ABS|SOFT" }, \
+ { HRTIMER_MODE_REL_SOFT, "REL|SOFT" }, \
+ { HRTIMER_MODE_ABS_PINNED_SOFT, "ABS|PINNED|SOFT" }, \
+ { HRTIMER_MODE_REL_PINNED_SOFT, "REL|PINNED|SOFT" })
+
/**
* hrtimer_init - called when the hrtimer is initialized
* @hrtimer: pointer to struct hrtimer
@@ -162,10 +180,8 @@ TRACE_EVENT(hrtimer_init,
),
TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer,
- __entry->clockid == CLOCK_REALTIME ?
- "CLOCK_REALTIME" : "CLOCK_MONOTONIC",
- __entry->mode == HRTIMER_MODE_ABS ?
- "HRTIMER_MODE_ABS" : "HRTIMER_MODE_REL")
+ decode_clockid(__entry->clockid),
+ decode_hrtimer_mode(__entry->mode))
);
/**
@@ -174,15 +190,16 @@ TRACE_EVENT(hrtimer_init,
*/
TRACE_EVENT(hrtimer_start,
- TP_PROTO(struct hrtimer *hrtimer),
+ TP_PROTO(struct hrtimer *hrtimer, enum hrtimer_mode mode),
- TP_ARGS(hrtimer),
+ TP_ARGS(hrtimer, mode),
TP_STRUCT__entry(
__field( void *, hrtimer )
__field( void *, function )
__field( s64, expires )
__field( s64, softexpires )
+ __field( enum hrtimer_mode, mode )
),
TP_fast_assign(
@@ -190,12 +207,14 @@ TRACE_EVENT(hrtimer_start,
__entry->function = hrtimer->function;
__entry->expires = hrtimer_get_expires(hrtimer);
__entry->softexpires = hrtimer_get_softexpires(hrtimer);
+ __entry->mode = mode;
),
- TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu",
- __entry->hrtimer, __entry->function,
+ TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu "
+ "mode=%s", __entry->hrtimer, __entry->function,
(unsigned long long) __entry->expires,
- (unsigned long long) __entry->softexpires)
+ (unsigned long long) __entry->softexpires,
+ decode_hrtimer_mode(__entry->mode))
);
/**
diff --git a/include/uapi/asm-generic/poll.h b/include/uapi/asm-generic/poll.h
index fefb3d2c3fac..639fade14b23 100644
--- a/include/uapi/asm-generic/poll.h
+++ b/include/uapi/asm-generic/poll.h
@@ -3,35 +3,49 @@
#define __ASM_GENERIC_POLL_H
/* These are specified by iBCS2 */
-#define POLLIN 0x0001
-#define POLLPRI 0x0002
-#define POLLOUT 0x0004
-#define POLLERR 0x0008
-#define POLLHUP 0x0010
-#define POLLNVAL 0x0020
+#define POLLIN (__force __poll_t)0x0001
+#define POLLPRI (__force __poll_t)0x0002
+#define POLLOUT (__force __poll_t)0x0004
+#define POLLERR (__force __poll_t)0x0008
+#define POLLHUP (__force __poll_t)0x0010
+#define POLLNVAL (__force __poll_t)0x0020
/* The rest seem to be more-or-less nonstandard. Check them! */
-#define POLLRDNORM 0x0040
-#define POLLRDBAND 0x0080
+#define POLLRDNORM (__force __poll_t)0x0040
+#define POLLRDBAND (__force __poll_t)0x0080
#ifndef POLLWRNORM
-#define POLLWRNORM 0x0100
+#define POLLWRNORM (__force __poll_t)0x0100
#endif
#ifndef POLLWRBAND
-#define POLLWRBAND 0x0200
+#define POLLWRBAND (__force __poll_t)0x0200
#endif
#ifndef POLLMSG
-#define POLLMSG 0x0400
+#define POLLMSG (__force __poll_t)0x0400
#endif
#ifndef POLLREMOVE
-#define POLLREMOVE 0x1000
+#define POLLREMOVE (__force __poll_t)0x1000
#endif
#ifndef POLLRDHUP
-#define POLLRDHUP 0x2000
+#define POLLRDHUP (__force __poll_t)0x2000
#endif
-#define POLLFREE 0x4000 /* currently only for epoll */
+#define POLLFREE (__force __poll_t)0x4000 /* currently only for epoll */
-#define POLL_BUSY_LOOP 0x8000
+#define POLL_BUSY_LOOP (__force __poll_t)0x8000
+
+#ifdef __KERNEL__
+#ifndef __ARCH_HAS_MANGLED_POLL
+static inline __u16 mangle_poll(__poll_t val)
+{
+ return (__force __u16)val;
+}
+
+static inline __poll_t demangle_poll(__u16 v)
+{
+ return (__force __poll_t)v;
+}
+#endif
+#endif
struct pollfd {
int fd;
diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h
index e447283b8f52..254afc31e3be 100644
--- a/include/uapi/asm-generic/siginfo.h
+++ b/include/uapi/asm-generic/siginfo.h
@@ -23,10 +23,6 @@ typedef union sigval {
#define SI_PAD_SIZE ((SI_MAX_SIZE - __ARCH_SI_PREAMBLE_SIZE) / sizeof(int))
#endif
-#ifndef __ARCH_SI_UID_T
-#define __ARCH_SI_UID_T __kernel_uid32_t
-#endif
-
/*
* The default "si_band" type is "long", as specified by POSIX.
* However, some architectures want to override this to "int"
@@ -44,12 +40,15 @@ typedef union sigval {
#define __ARCH_SI_ATTRIBUTES
#endif
-#ifndef HAVE_ARCH_SIGINFO_T
-
typedef struct siginfo {
int si_signo;
+#ifndef __ARCH_HAS_SWAPPED_SIGINFO
int si_errno;
int si_code;
+#else
+ int si_code;
+ int si_errno;
+#endif
union {
int _pad[SI_PAD_SIZE];
@@ -57,14 +56,13 @@ typedef struct siginfo {
/* kill() */
struct {
__kernel_pid_t _pid; /* sender's pid */
- __ARCH_SI_UID_T _uid; /* sender's uid */
+ __kernel_uid32_t _uid; /* sender's uid */
} _kill;
/* POSIX.1b timers */
struct {
__kernel_timer_t _tid; /* timer id */
int _overrun; /* overrun count */
- char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)];
sigval_t _sigval; /* same as below */
int _sys_private; /* not to be passed to user */
} _timer;
@@ -72,34 +70,47 @@ typedef struct siginfo {
/* POSIX.1b signals */
struct {
__kernel_pid_t _pid; /* sender's pid */
- __ARCH_SI_UID_T _uid; /* sender's uid */
+ __kernel_uid32_t _uid; /* sender's uid */
sigval_t _sigval;
} _rt;
/* SIGCHLD */
struct {
__kernel_pid_t _pid; /* which child */
- __ARCH_SI_UID_T _uid; /* sender's uid */
+ __kernel_uid32_t _uid; /* sender's uid */
int _status; /* exit code */
__ARCH_SI_CLOCK_T _utime;
__ARCH_SI_CLOCK_T _stime;
} _sigchld;
- /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */
struct {
void __user *_addr; /* faulting insn/memory ref. */
#ifdef __ARCH_SI_TRAPNO
int _trapno; /* TRAP # which caused the signal */
#endif
- short _addr_lsb; /* LSB of the reported address */
+#ifdef __ia64__
+ int _imm; /* immediate value for "break" */
+ unsigned int _flags; /* see ia64 si_flags */
+ unsigned long _isr; /* isr */
+#endif
union {
+ /*
+ * used when si_code=BUS_MCEERR_AR or
+ * used when si_code=BUS_MCEERR_AO
+ */
+ short _addr_lsb; /* LSB of the reported address */
/* used when si_code=SEGV_BNDERR */
struct {
+ short _dummy_bnd;
void __user *_lower;
void __user *_upper;
} _addr_bnd;
/* used when si_code=SEGV_PKUERR */
- __u32 _pkey;
+ struct {
+ short _dummy_pkey;
+ __u32 _pkey;
+ } _addr_pkey;
};
} _sigfault;
@@ -118,10 +129,6 @@ typedef struct siginfo {
} _sifields;
} __ARCH_SI_ATTRIBUTES siginfo_t;
-/* If the arch shares siginfo, then it has SIGSYS. */
-#define __ARCH_SIGSYS
-#endif
-
/*
* How these fields are to be accessed.
*/
@@ -143,14 +150,12 @@ typedef struct siginfo {
#define si_addr_lsb _sifields._sigfault._addr_lsb
#define si_lower _sifields._sigfault._addr_bnd._lower
#define si_upper _sifields._sigfault._addr_bnd._upper
-#define si_pkey _sifields._sigfault._pkey
+#define si_pkey _sifields._sigfault._addr_pkey._pkey
#define si_band _sifields._sigpoll._band
#define si_fd _sifields._sigpoll._fd
-#ifdef __ARCH_SIGSYS
#define si_call_addr _sifields._sigsys._call_addr
#define si_syscall _sifields._sigsys._syscall
#define si_arch _sifields._sigsys._arch
-#endif
/*
* si_code values
@@ -165,6 +170,7 @@ typedef struct siginfo {
#define SI_SIGIO -5 /* sent by queued SIGIO */
#define SI_TKILL -6 /* sent by tkill system call */
#define SI_DETHREAD -7 /* sent by execve() killing subsidiary threads */
+#define SI_ASYNCNL -60 /* sent by glibc async name lookup completion */
#define SI_FROMUSER(siptr) ((siptr)->si_code <= 0)
#define SI_FROMKERNEL(siptr) ((siptr)->si_code > 0)
@@ -173,14 +179,34 @@ typedef struct siginfo {
* SIGILL si_codes
*/
#define ILL_ILLOPC 1 /* illegal opcode */
+#ifdef __bfin__
+# define ILL_ILLPARAOP 2 /* illegal opcode combine */
+#endif
#define ILL_ILLOPN 2 /* illegal operand */
#define ILL_ILLADR 3 /* illegal addressing mode */
#define ILL_ILLTRP 4 /* illegal trap */
+#ifdef __bfin__
+# define ILL_ILLEXCPT 4 /* unrecoverable exception */
+#endif
#define ILL_PRVOPC 5 /* privileged opcode */
#define ILL_PRVREG 6 /* privileged register */
#define ILL_COPROC 7 /* coprocessor error */
#define ILL_BADSTK 8 /* internal stack error */
-#define NSIGILL 8
+#ifdef __bfin__
+# define ILL_CPLB_VI 9 /* D/I CPLB protect violation */
+# define ILL_CPLB_MISS 10 /* D/I CPLB miss */
+# define ILL_CPLB_MULHIT 11 /* D/I CPLB multiple hit */
+#endif
+#ifdef __tile__
+# define ILL_DBLFLT 9 /* double fault */
+# define ILL_HARDWALL 10 /* user networks hardwall violation */
+#endif
+#ifdef __ia64__
+# define ILL_BADIADDR 9 /* unimplemented instruction address */
+# define __ILL_BREAK 10 /* illegal break */
+# define __ILL_BNDMOD 11 /* bundle-update (modification) in progress */
+#endif
+#define NSIGILL 11
/*
* SIGFPE si_codes
@@ -193,15 +219,33 @@ typedef struct siginfo {
#define FPE_FLTRES 6 /* floating point inexact result */
#define FPE_FLTINV 7 /* floating point invalid operation */
#define FPE_FLTSUB 8 /* subscript out of range */
-#define NSIGFPE 8
+#ifdef __frv__
+# define FPE_MDAOVF 9 /* media overflow */
+#endif
+#ifdef __ia64__
+# define __FPE_DECOVF 9 /* decimal overflow */
+# define __FPE_DECDIV 10 /* decimal division by zero */
+# define __FPE_DECERR 11 /* packed decimal error */
+# define __FPE_INVASC 12 /* invalid ASCII digit */
+# define __FPE_INVDEC 13 /* invalid decimal digit */
+#endif
+#define NSIGFPE 13
/*
* SIGSEGV si_codes
*/
#define SEGV_MAPERR 1 /* address not mapped to object */
#define SEGV_ACCERR 2 /* invalid permissions for mapped object */
-#define SEGV_BNDERR 3 /* failed address bound checks */
-#define SEGV_PKUERR 4 /* failed protection key checks */
+#ifdef __bfin__
+# define SEGV_STACKFLOW 3 /* stack overflow */
+#else
+# define SEGV_BNDERR 3 /* failed address bound checks */
+#endif
+#ifdef __ia64__
+# define __SEGV_PSTKOVF 4 /* paragraph stack overflow */
+#else
+# define SEGV_PKUERR 4 /* failed protection key checks */
+#endif
#define NSIGSEGV 4
/*
@@ -210,8 +254,12 @@ typedef struct siginfo {
#define BUS_ADRALN 1 /* invalid address alignment */
#define BUS_ADRERR 2 /* non-existent physical address */
#define BUS_OBJERR 3 /* object specific hardware error */
+#ifdef __bfin__
+# define BUS_OPFETCH 4 /* error from instruction fetch */
+#else
/* hardware memory error consumed on a machine check: action required */
-#define BUS_MCEERR_AR 4
+# define BUS_MCEERR_AR 4
+#endif
/* hardware memory error detected in process but not consumed: action optional*/
#define BUS_MCEERR_AO 5
#define NSIGBUS 5
@@ -223,9 +271,20 @@ typedef struct siginfo {
#define TRAP_TRACE 2 /* process trace trap */
#define TRAP_BRANCH 3 /* process taken branch trap */
#define TRAP_HWBKPT 4 /* hardware breakpoint/watchpoint */
+#ifdef __bfin__
+# define TRAP_STEP 1 /* single-step breakpoint */
+# define TRAP_TRACEFLOW 2 /* trace buffer overflow */
+# define TRAP_WATCHPT 3 /* watchpoint match */
+# define TRAP_ILLTRAP 4 /* illegal trap */
+#endif
#define NSIGTRAP 4
/*
+ * There are an additional set of SIGTRAP si_codes used by ptrace
+ * that of the form: ((PTRACE_EVENT_XXX << 8) | SIGTRAP)
+ */
+
+/*
* SIGCHLD si_codes
*/
#define CLD_EXITED 1 /* child has exited */
diff --git a/include/uapi/linux/arm_sdei.h b/include/uapi/linux/arm_sdei.h
new file mode 100644
index 000000000000..af0630ba5437
--- /dev/null
+++ b/include/uapi/linux/arm_sdei.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* Copyright (C) 2017 Arm Ltd. */
+#ifndef _UAPI_LINUX_ARM_SDEI_H
+#define _UAPI_LINUX_ARM_SDEI_H
+
+#define SDEI_1_0_FN_BASE 0xC4000020
+#define SDEI_1_0_MASK 0xFFFFFFE0
+#define SDEI_1_0_FN(n) (SDEI_1_0_FN_BASE + (n))
+
+#define SDEI_1_0_FN_SDEI_VERSION SDEI_1_0_FN(0x00)
+#define SDEI_1_0_FN_SDEI_EVENT_REGISTER SDEI_1_0_FN(0x01)
+#define SDEI_1_0_FN_SDEI_EVENT_ENABLE SDEI_1_0_FN(0x02)
+#define SDEI_1_0_FN_SDEI_EVENT_DISABLE SDEI_1_0_FN(0x03)
+#define SDEI_1_0_FN_SDEI_EVENT_CONTEXT SDEI_1_0_FN(0x04)
+#define SDEI_1_0_FN_SDEI_EVENT_COMPLETE SDEI_1_0_FN(0x05)
+#define SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME SDEI_1_0_FN(0x06)
+#define SDEI_1_0_FN_SDEI_EVENT_UNREGISTER SDEI_1_0_FN(0x07)
+#define SDEI_1_0_FN_SDEI_EVENT_STATUS SDEI_1_0_FN(0x08)
+#define SDEI_1_0_FN_SDEI_EVENT_GET_INFO SDEI_1_0_FN(0x09)
+#define SDEI_1_0_FN_SDEI_EVENT_ROUTING_SET SDEI_1_0_FN(0x0A)
+#define SDEI_1_0_FN_SDEI_PE_MASK SDEI_1_0_FN(0x0B)
+#define SDEI_1_0_FN_SDEI_PE_UNMASK SDEI_1_0_FN(0x0C)
+#define SDEI_1_0_FN_SDEI_INTERRUPT_BIND SDEI_1_0_FN(0x0D)
+#define SDEI_1_0_FN_SDEI_INTERRUPT_RELEASE SDEI_1_0_FN(0x0E)
+#define SDEI_1_0_FN_SDEI_PRIVATE_RESET SDEI_1_0_FN(0x11)
+#define SDEI_1_0_FN_SDEI_SHARED_RESET SDEI_1_0_FN(0x12)
+
+#define SDEI_VERSION_MAJOR_SHIFT 48
+#define SDEI_VERSION_MAJOR_MASK 0x7fff
+#define SDEI_VERSION_MINOR_SHIFT 32
+#define SDEI_VERSION_MINOR_MASK 0xffff
+#define SDEI_VERSION_VENDOR_SHIFT 0
+#define SDEI_VERSION_VENDOR_MASK 0xffffffff
+
+#define SDEI_VERSION_MAJOR(x) (x>>SDEI_VERSION_MAJOR_SHIFT & SDEI_VERSION_MAJOR_MASK)
+#define SDEI_VERSION_MINOR(x) (x>>SDEI_VERSION_MINOR_SHIFT & SDEI_VERSION_MINOR_MASK)
+#define SDEI_VERSION_VENDOR(x) (x>>SDEI_VERSION_VENDOR_SHIFT & SDEI_VERSION_VENDOR_MASK)
+
+/* SDEI return values */
+#define SDEI_SUCCESS 0
+#define SDEI_NOT_SUPPORTED -1
+#define SDEI_INVALID_PARAMETERS -2
+#define SDEI_DENIED -3
+#define SDEI_PENDING -5
+#define SDEI_OUT_OF_RESOURCE -10
+
+/* EVENT_REGISTER flags */
+#define SDEI_EVENT_REGISTER_RM_ANY 0
+#define SDEI_EVENT_REGISTER_RM_PE 1
+
+/* EVENT_STATUS return value bits */
+#define SDEI_EVENT_STATUS_RUNNING 2
+#define SDEI_EVENT_STATUS_ENABLED 1
+#define SDEI_EVENT_STATUS_REGISTERED 0
+
+/* EVENT_COMPLETE status values */
+#define SDEI_EV_HANDLED 0
+#define SDEI_EV_FAILED 1
+
+/* GET_INFO values */
+#define SDEI_EVENT_INFO_EV_TYPE 0
+#define SDEI_EVENT_INFO_EV_SIGNALED 1
+#define SDEI_EVENT_INFO_EV_PRIORITY 2
+#define SDEI_EVENT_INFO_EV_ROUTING_MODE 3
+#define SDEI_EVENT_INFO_EV_ROUTING_AFF 4
+
+/* and their results */
+#define SDEI_EVENT_TYPE_PRIVATE 0
+#define SDEI_EVENT_TYPE_SHARED 1
+#define SDEI_EVENT_PRIORITY_NORMAL 0
+#define SDEI_EVENT_PRIORITY_CRITICAL 1
+
+#endif /* _UAPI_LINUX_ARM_SDEI_H */
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index ce615b75e855..c8d99b9ca550 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -33,7 +33,12 @@ struct btrfs_ioctl_vol_args {
char name[BTRFS_PATH_NAME_MAX + 1];
};
-#define BTRFS_DEVICE_PATH_NAME_MAX 1024
+#define BTRFS_DEVICE_PATH_NAME_MAX 1024
+#define BTRFS_SUBVOL_NAME_MAX 4039
+
+#define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0)
+#define BTRFS_SUBVOL_RDONLY (1ULL << 1)
+#define BTRFS_SUBVOL_QGROUP_INHERIT (1ULL << 2)
#define BTRFS_DEVICE_SPEC_BY_ID (1ULL << 3)
@@ -101,11 +106,7 @@ struct btrfs_ioctl_qgroup_limit_args {
* - BTRFS_IOC_SUBVOL_GETFLAGS
* - BTRFS_IOC_SUBVOL_SETFLAGS
*/
-#define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0)
-#define BTRFS_SUBVOL_RDONLY (1ULL << 1)
-#define BTRFS_SUBVOL_QGROUP_INHERIT (1ULL << 2)
-#define BTRFS_SUBVOL_NAME_MAX 4039
struct btrfs_ioctl_vol_args_v2 {
__s64 fd;
__u64 transid;
diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
index 6d6e5da51527..aff1356c2bb8 100644
--- a/include/uapi/linux/btrfs_tree.h
+++ b/include/uapi/linux/btrfs_tree.h
@@ -456,6 +456,8 @@ struct btrfs_free_space_header {
#define BTRFS_SUPER_FLAG_SEEDING (1ULL << 32)
#define BTRFS_SUPER_FLAG_METADUMP (1ULL << 33)
+#define BTRFS_SUPER_FLAG_METADUMP_V2 (1ULL << 34)
+#define BTRFS_SUPER_FLAG_CHANGING_FSID (1ULL << 35)
/*
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 4199f8acbce5..d2a8313fabd7 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -377,7 +377,11 @@ typedef int __bitwise __kernel_rwf_t;
/* per-IO, return -EAGAIN if operation would block */
#define RWF_NOWAIT ((__force __kernel_rwf_t)0x00000008)
+/* per-IO O_APPEND */
+#define RWF_APPEND ((__force __kernel_rwf_t)0x00000010)
+
/* mask of flags supported by the kernel */
-#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT)
+#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\
+ RWF_APPEND)
#endif /* _UAPI_LINUX_FS_H */
diff --git a/include/uapi/linux/gfs2_ondisk.h b/include/uapi/linux/gfs2_ondisk.h
index 5156bad77b47..2dc10a034de1 100644
--- a/include/uapi/linux/gfs2_ondisk.h
+++ b/include/uapi/linux/gfs2_ondisk.h
@@ -187,10 +187,19 @@ struct gfs2_rgrp {
__be32 rg_flags;
__be32 rg_free;
__be32 rg_dinodes;
- __be32 __pad;
+ union {
+ __be32 __pad;
+ __be32 rg_skip; /* Distance to the next rgrp in fs blocks */
+ };
__be64 rg_igeneration;
-
- __u8 rg_reserved[80]; /* Several fields from gfs1 now reserved */
+ /* The following 3 fields are duplicated from gfs2_rindex to reduce
+ reliance on the rindex */
+ __be64 rg_data0; /* First data location */
+ __be32 rg_data; /* Number of data blocks in rgrp */
+ __be32 rg_bitbytes; /* Number of bytes in data bitmaps */
+ __be32 rg_crc; /* crc32 of the structure with this field 0 */
+
+ __u8 rg_reserved[60]; /* Several fields from gfs1 now reserved */
};
/*
@@ -394,7 +403,36 @@ struct gfs2_ea_header {
* Log header structure
*/
-#define GFS2_LOG_HEAD_UNMOUNT 0x00000001 /* log is clean */
+#define GFS2_LOG_HEAD_UNMOUNT 0x00000001 /* log is clean */
+#define GFS2_LOG_HEAD_FLUSH_NORMAL 0x00000002 /* normal log flush */
+#define GFS2_LOG_HEAD_FLUSH_SYNC 0x00000004 /* Sync log flush */
+#define GFS2_LOG_HEAD_FLUSH_SHUTDOWN 0x00000008 /* Shutdown log flush */
+#define GFS2_LOG_HEAD_FLUSH_FREEZE 0x00000010 /* Freeze flush */
+#define GFS2_LOG_HEAD_RECOVERY 0x00000020 /* Journal recovery */
+#define GFS2_LOG_HEAD_USERSPACE 0x80000000 /* Written by gfs2-utils */
+
+/* Log flush callers */
+#define GFS2_LFC_SHUTDOWN 0x00000100
+#define GFS2_LFC_JDATA_WPAGES 0x00000200
+#define GFS2_LFC_SET_FLAGS 0x00000400
+#define GFS2_LFC_AIL_EMPTY_GL 0x00000800
+#define GFS2_LFC_AIL_FLUSH 0x00001000
+#define GFS2_LFC_RGRP_GO_SYNC 0x00002000
+#define GFS2_LFC_INODE_GO_SYNC 0x00004000
+#define GFS2_LFC_INODE_GO_INVAL 0x00008000
+#define GFS2_LFC_FREEZE_GO_SYNC 0x00010000
+#define GFS2_LFC_KILL_SB 0x00020000
+#define GFS2_LFC_DO_SYNC 0x00040000
+#define GFS2_LFC_INPLACE_RESERVE 0x00080000
+#define GFS2_LFC_WRITE_INODE 0x00100000
+#define GFS2_LFC_MAKE_FS_RO 0x00200000
+#define GFS2_LFC_SYNC_FS 0x00400000
+#define GFS2_LFC_EVICT_INODE 0x00800000
+#define GFS2_LFC_TRANS_END 0x01000000
+#define GFS2_LFC_LOGD_JFLUSH_REQD 0x02000000
+#define GFS2_LFC_LOGD_AIL_FLUSH_REQD 0x04000000
+
+#define LH_V1_SIZE (offsetofend(struct gfs2_log_header, lh_hash))
struct gfs2_log_header {
struct gfs2_meta_header lh_header;
@@ -403,7 +441,21 @@ struct gfs2_log_header {
__be32 lh_flags; /* GFS2_LOG_HEAD_... */
__be32 lh_tail; /* Block number of log tail */
__be32 lh_blkno;
- __be32 lh_hash;
+ __be32 lh_hash; /* crc up to here with this field 0 */
+
+ /* Version 2 additional fields start here */
+ __be32 lh_crc; /* crc32c from lh_nsec to end of block */
+ __be32 lh_nsec; /* Nanoseconds of timestamp */
+ __be64 lh_sec; /* Seconds of timestamp */
+ __be64 lh_addr; /* Block addr of this log header (absolute) */
+ __be64 lh_jinode; /* Journal inode number */
+ __be64 lh_statfs_addr; /* Local statfs inode number */
+ __be64 lh_quota_addr; /* Local quota change inode number */
+
+ /* Statfs local changes (i.e. diff from global statfs) */
+ __be64 lh_local_total;
+ __be64 lh_local_free;
+ __be64 lh_local_dinodes;
};
/*
diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h
index 42d1a434af29..f9a1be7fc696 100644
--- a/include/uapi/linux/lightnvm.h
+++ b/include/uapi/linux/lightnvm.h
@@ -75,14 +75,23 @@ struct nvm_ioctl_create_simple {
__u32 lun_end;
};
+struct nvm_ioctl_create_extended {
+ __u16 lun_begin;
+ __u16 lun_end;
+ __u16 op;
+ __u16 rsv;
+};
+
enum {
NVM_CONFIG_TYPE_SIMPLE = 0,
+ NVM_CONFIG_TYPE_EXTENDED = 1,
};
struct nvm_ioctl_create_conf {
__u32 type;
union {
struct nvm_ioctl_create_simple s;
+ struct nvm_ioctl_create_extended e;
};
};
diff --git a/include/uapi/linux/nfs.h b/include/uapi/linux/nfs.h
index 057d22a48416..946cb62d64b0 100644
--- a/include/uapi/linux/nfs.h
+++ b/include/uapi/linux/nfs.h
@@ -12,6 +12,7 @@
#define NFS_PROGRAM 100003
#define NFS_PORT 2049
+#define NFS_RDMA_PORT 20049
#define NFS_MAXDATA 8192
#define NFS_MAXPATHLEN 1024
#define NFS_MAXNAMLEN 255
diff --git a/include/uapi/linux/nubus.h b/include/uapi/linux/nubus.h
index f3776cc80f4d..48031e7858f1 100644
--- a/include/uapi/linux/nubus.h
+++ b/include/uapi/linux/nubus.h
@@ -221,27 +221,4 @@ enum nubus_display_res_id {
NUBUS_RESID_SIXTHMODE = 0x0085
};
-struct nubus_dir
-{
- unsigned char *base;
- unsigned char *ptr;
- int done;
- int mask;
-};
-
-struct nubus_dirent
-{
- unsigned char *base;
- unsigned char type;
- __u32 data; /* Actually 24bits used */
- int mask;
-};
-
-
-/* We'd like to get rid of this eventually. Only daynaport.c uses it now. */
-static inline void *nubus_slot_addr(int slot)
-{
- return (void *)(0xF0000000|(slot<<24));
-}
-
#endif /* _UAPILINUX_NUBUS_H */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 769533696483..e0739a1aa4b2 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -634,9 +634,12 @@ struct perf_event_mmap_page {
*/
#define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12)
/*
- * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
- * different events so can reuse the same bit position.
- * Ditto PERF_RECORD_MISC_SWITCH_OUT.
+ * Following PERF_RECORD_MISC_* are used on different
+ * events, so can reuse the same bit position:
+ *
+ * PERF_RECORD_MISC_MMAP_DATA - PERF_RECORD_MMAP* events
+ * PERF_RECORD_MISC_COMM_EXEC - PERF_RECORD_COMM event
+ * PERF_RECORD_MISC_SWITCH_OUT - PERF_RECORD_SWITCH* events
*/
#define PERF_RECORD_MISC_MMAP_DATA (1 << 13)
#define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
@@ -886,6 +889,7 @@ enum perf_event_type {
* struct perf_event_header header;
* u32 pid;
* u32 tid;
+ * struct sample_id sample_id;
* };
*/
PERF_RECORD_ITRACE_START = 12,
diff --git a/include/uapi/linux/ptrace.h b/include/uapi/linux/ptrace.h
index e3939e00980b..e46d82b91166 100644
--- a/include/uapi/linux/ptrace.h
+++ b/include/uapi/linux/ptrace.h
@@ -66,6 +66,12 @@ struct ptrace_peeksiginfo_args {
#define PTRACE_SETSIGMASK 0x420b
#define PTRACE_SECCOMP_GET_FILTER 0x420c
+#define PTRACE_SECCOMP_GET_METADATA 0x420d
+
+struct seccomp_metadata {
+ unsigned long filter_off; /* Input: which filter */
+ unsigned int flags; /* Output: filter's flags */
+};
/* Read signals from a shared (process wide) queue */
#define PTRACE_PEEKSIGINFO_SHARED (1 << 0)
diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
index 30a9e51bbb1e..22627f80063e 100644
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -49,5 +49,10 @@
*/
#define SCHED_FLAG_RESET_ON_FORK 0x01
#define SCHED_FLAG_RECLAIM 0x02
+#define SCHED_FLAG_DL_OVERRUN 0x04
+
+#define SCHED_FLAG_ALL (SCHED_FLAG_RESET_ON_FORK | \
+ SCHED_FLAG_RECLAIM | \
+ SCHED_FLAG_DL_OVERRUN)
#endif /* _UAPI_LINUX_SCHED_H */
diff --git a/include/uapi/linux/types.h b/include/uapi/linux/types.h
index e3d1d0c78f3c..cd4f0b897a48 100644
--- a/include/uapi/linux/types.h
+++ b/include/uapi/linux/types.h
@@ -49,5 +49,11 @@ typedef __u32 __bitwise __wsum;
#define __aligned_be64 __be64 __attribute__((aligned(8)))
#define __aligned_le64 __le64 __attribute__((aligned(8)))
+#ifdef __CHECK_POLL
+typedef unsigned __bitwise __poll_t;
+#else
+typedef unsigned __poll_t;
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* _UAPI_LINUX_TYPES_H */
diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h
index 398a514ee446..db54115be044 100644
--- a/include/uapi/rdma/bnxt_re-abi.h
+++ b/include/uapi/rdma/bnxt_re-abi.h
@@ -82,6 +82,15 @@ struct bnxt_re_qp_resp {
__u32 rsvd;
};
+struct bnxt_re_srq_req {
+ __u64 srqva;
+ __u64 srq_handle;
+};
+
+struct bnxt_re_srq_resp {
+ __u32 srqid;
+};
+
enum bnxt_re_shpg_offt {
BNXT_RE_BEG_RESV_OFFT = 0x00,
BNXT_RE_AVID_OFFT = 0x10,
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 7e11bb8651b6..04d0e67b1312 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -402,13 +402,18 @@ struct ib_uverbs_create_cq {
__u64 driver_data[0];
};
+enum ib_uverbs_ex_create_cq_flags {
+ IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION = 1 << 0,
+ IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN = 1 << 1,
+};
+
struct ib_uverbs_ex_create_cq {
__u64 user_handle;
__u32 cqe;
__u32 comp_vector;
__s32 comp_channel;
__u32 comp_mask;
- __u32 flags;
+ __u32 flags; /* bitmask of ib_uverbs_ex_create_cq_flags */
__u32 reserved;
};
@@ -449,7 +454,7 @@ struct ib_uverbs_wc {
__u32 vendor_err;
__u32 byte_len;
union {
- __u32 imm_data;
+ __be32 imm_data;
__u32 invalidate_rkey;
} ex;
__u32 qp_num;
@@ -765,7 +770,7 @@ struct ib_uverbs_send_wr {
__u32 opcode;
__u32 send_flags;
union {
- __u32 imm_data;
+ __be32 imm_data;
__u32 invalidate_rkey;
} ex;
union {
diff --git a/include/uapi/rdma/mlx4-abi.h b/include/uapi/rdma/mlx4-abi.h
index 224b52b6279c..7f9c37346613 100644
--- a/include/uapi/rdma/mlx4-abi.h
+++ b/include/uapi/rdma/mlx4-abi.h
@@ -97,8 +97,8 @@ struct mlx4_ib_create_srq_resp {
};
struct mlx4_ib_create_qp_rss {
- __u64 rx_hash_fields_mask;
- __u8 rx_hash_function;
+ __u64 rx_hash_fields_mask; /* Use enum mlx4_ib_rx_hash_fields */
+ __u8 rx_hash_function; /* Use enum mlx4_ib_rx_hash_function_flags */
__u8 reserved[7];
__u8 rx_hash_key[40];
__u32 comp_mask;
@@ -152,7 +152,8 @@ enum mlx4_ib_rx_hash_fields {
MLX4_IB_RX_HASH_SRC_PORT_TCP = 1 << 4,
MLX4_IB_RX_HASH_DST_PORT_TCP = 1 << 5,
MLX4_IB_RX_HASH_SRC_PORT_UDP = 1 << 6,
- MLX4_IB_RX_HASH_DST_PORT_UDP = 1 << 7
+ MLX4_IB_RX_HASH_DST_PORT_UDP = 1 << 7,
+ MLX4_IB_RX_HASH_INNER = 1ULL << 31,
};
#endif /* MLX4_ABI_USER_H */
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index a33e0517d3fd..1111aa4e7c1e 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -41,6 +41,9 @@ enum {
MLX5_QP_FLAG_SIGNATURE = 1 << 0,
MLX5_QP_FLAG_SCATTER_CQE = 1 << 1,
MLX5_QP_FLAG_TUNNEL_OFFLOADS = 1 << 2,
+ MLX5_QP_FLAG_BFREG_INDEX = 1 << 3,
+ MLX5_QP_FLAG_TYPE_DCT = 1 << 4,
+ MLX5_QP_FLAG_TYPE_DCI = 1 << 5,
};
enum {
@@ -121,10 +124,12 @@ struct mlx5_ib_alloc_ucontext_resp {
__u8 cqe_version;
__u8 cmds_supp_uhw;
__u8 eth_min_inline;
- __u8 reserved2;
+ __u8 clock_info_versions;
__u64 hca_core_clock_offset;
__u32 log_uar_size;
__u32 num_uars_per_page;
+ __u32 num_dyn_bfregs;
+ __u32 reserved3;
};
struct mlx5_ib_alloc_pd_resp {
@@ -280,8 +285,11 @@ struct mlx5_ib_create_qp {
__u32 rq_wqe_shift;
__u32 flags;
__u32 uidx;
- __u32 reserved0;
- __u64 sq_buf_addr;
+ __u32 bfreg_index;
+ union {
+ __u64 sq_buf_addr;
+ __u64 access_key;
+ };
};
/* RX Hash function flags */
@@ -307,7 +315,7 @@ enum mlx5_rx_hash_fields {
MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6,
MLX5_RX_HASH_DST_PORT_UDP = 1 << 7,
/* Save bits for future fields */
- MLX5_RX_HASH_INNER = 1 << 31
+ MLX5_RX_HASH_INNER = (1UL << 31),
};
struct mlx5_ib_create_qp_rss {
@@ -354,6 +362,11 @@ struct mlx5_ib_create_ah_resp {
__u8 reserved[6];
};
+struct mlx5_ib_modify_qp_resp {
+ __u32 response_length;
+ __u32 dctn;
+};
+
struct mlx5_ib_create_wq_resp {
__u32 response_length;
__u32 reserved;
@@ -368,4 +381,36 @@ struct mlx5_ib_modify_wq {
__u32 comp_mask;
__u32 reserved;
};
+
+struct mlx5_ib_clock_info {
+ __u32 sign;
+ __u32 resv;
+ __u64 nsec;
+ __u64 cycles;
+ __u64 frac;
+ __u32 mult;
+ __u32 shift;
+ __u64 mask;
+ __u64 overflow_period;
+};
+
+enum mlx5_ib_mmap_cmd {
+ MLX5_IB_MMAP_REGULAR_PAGE = 0,
+ MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
+ MLX5_IB_MMAP_WC_PAGE = 2,
+ MLX5_IB_MMAP_NC_PAGE = 3,
+ /* 5 is chosen in order to be compatible with old versions of libmlx5 */
+ MLX5_IB_MMAP_CORE_CLOCK = 5,
+ MLX5_IB_MMAP_ALLOC_WC = 6,
+ MLX5_IB_MMAP_CLOCK_INFO = 7,
+};
+
+enum {
+ MLX5_IB_CLOCK_INFO_KERNEL_UPDATING = 1,
+};
+
+/* Bit indexes for the mlx5_alloc_ucontext_resp.clock_info_versions bitmap */
+enum {
+ MLX5_IB_CLOCK_INFO_V1 = 0,
+};
#endif /* MLX5_ABI_USER_H */
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index cc002e316d09..17e59bec169e 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -236,6 +236,10 @@ enum rdma_nldev_command {
RDMA_NLDEV_CMD_PORT_NEW,
RDMA_NLDEV_CMD_PORT_DEL,
+ RDMA_NLDEV_CMD_RES_GET, /* can dump */
+
+ RDMA_NLDEV_CMD_RES_QP_GET, /* can dump */
+
RDMA_NLDEV_NUM_OPS
};
@@ -303,6 +307,51 @@ enum rdma_nldev_attr {
RDMA_NLDEV_ATTR_DEV_NODE_TYPE, /* u8 */
+ RDMA_NLDEV_ATTR_RES_SUMMARY, /* nested table */
+ RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY, /* nested table */
+ RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, /* string */
+ RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, /* u64 */
+
+ RDMA_NLDEV_ATTR_RES_QP, /* nested table */
+ RDMA_NLDEV_ATTR_RES_QP_ENTRY, /* nested table */
+ /*
+ * Local QPN
+ */
+ RDMA_NLDEV_ATTR_RES_LQPN, /* u32 */
+ /*
+ * Remote QPN,
+ * Applicable for RC and UC only IBTA 11.2.5.3 QUERY QUEUE PAIR
+ */
+ RDMA_NLDEV_ATTR_RES_RQPN, /* u32 */
+ /*
+ * Receive Queue PSN,
+ * Applicable for RC and UC only 11.2.5.3 QUERY QUEUE PAIR
+ */
+ RDMA_NLDEV_ATTR_RES_RQ_PSN, /* u32 */
+ /*
+ * Send Queue PSN
+ */
+ RDMA_NLDEV_ATTR_RES_SQ_PSN, /* u32 */
+ RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE, /* u8 */
+ /*
+ * QP types as visible to RDMA/core, the reserved QPT
+ * are not exported through this interface.
+ */
+ RDMA_NLDEV_ATTR_RES_TYPE, /* u8 */
+ RDMA_NLDEV_ATTR_RES_STATE, /* u8 */
+ /*
+ * Process ID which created object,
+ * in case of kernel origin, PID won't exist.
+ */
+ RDMA_NLDEV_ATTR_RES_PID, /* u32 */
+ /*
+ * The name of process created following resource.
+ * It will exist only for kernel objects.
+ * For user created objects, the user is supposed
+ * to read /proc/PID/comm file.
+ */
+ RDMA_NLDEV_ATTR_RES_KERN_NAME, /* string */
+
RDMA_NLDEV_ATTR_MAX
};
#endif /* _UAPI_RDMA_NETLINK_H */
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h
index aaa352f2f110..02ca0d0f1eb7 100644
--- a/include/uapi/rdma/vmw_pvrdma-abi.h
+++ b/include/uapi/rdma/vmw_pvrdma-abi.h
@@ -52,12 +52,14 @@
#define PVRDMA_UVERBS_ABI_VERSION 3 /* ABI Version. */
#define PVRDMA_UAR_HANDLE_MASK 0x00FFFFFF /* Bottom 24 bits. */
#define PVRDMA_UAR_QP_OFFSET 0 /* QP doorbell. */
-#define PVRDMA_UAR_QP_SEND BIT(30) /* Send bit. */
-#define PVRDMA_UAR_QP_RECV BIT(31) /* Recv bit. */
+#define PVRDMA_UAR_QP_SEND (1 << 30) /* Send bit. */
+#define PVRDMA_UAR_QP_RECV (1 << 31) /* Recv bit. */
#define PVRDMA_UAR_CQ_OFFSET 4 /* CQ doorbell. */
-#define PVRDMA_UAR_CQ_ARM_SOL BIT(29) /* Arm solicited bit. */
-#define PVRDMA_UAR_CQ_ARM BIT(30) /* Arm bit. */
-#define PVRDMA_UAR_CQ_POLL BIT(31) /* Poll bit. */
+#define PVRDMA_UAR_CQ_ARM_SOL (1 << 29) /* Arm solicited bit. */
+#define PVRDMA_UAR_CQ_ARM (1 << 30) /* Arm bit. */
+#define PVRDMA_UAR_CQ_POLL (1 << 31) /* Poll bit. */
+#define PVRDMA_UAR_SRQ_OFFSET 8 /* SRQ doorbell. */
+#define PVRDMA_UAR_SRQ_RECV (1 << 30) /* Recv bit. */
enum pvrdma_wr_opcode {
PVRDMA_WR_RDMA_WRITE,
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index c227ccba60ae..07d61583fd02 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -214,6 +214,11 @@ typedef int __bitwise snd_pcm_format_t;
#define SNDRV_PCM_FORMAT_IMA_ADPCM ((__force snd_pcm_format_t) 22)
#define SNDRV_PCM_FORMAT_MPEG ((__force snd_pcm_format_t) 23)
#define SNDRV_PCM_FORMAT_GSM ((__force snd_pcm_format_t) 24)
+#define SNDRV_PCM_FORMAT_S20_LE ((__force snd_pcm_format_t) 25) /* in four bytes, LSB justified */
+#define SNDRV_PCM_FORMAT_S20_BE ((__force snd_pcm_format_t) 26) /* in four bytes, LSB justified */
+#define SNDRV_PCM_FORMAT_U20_LE ((__force snd_pcm_format_t) 27) /* in four bytes, LSB justified */
+#define SNDRV_PCM_FORMAT_U20_BE ((__force snd_pcm_format_t) 28) /* in four bytes, LSB justified */
+/* gap in the numbering for a future standard linear format */
#define SNDRV_PCM_FORMAT_SPECIAL ((__force snd_pcm_format_t) 31)
#define SNDRV_PCM_FORMAT_S24_3LE ((__force snd_pcm_format_t) 32) /* in three bytes */
#define SNDRV_PCM_FORMAT_S24_3BE ((__force snd_pcm_format_t) 33) /* in three bytes */
@@ -248,6 +253,8 @@ typedef int __bitwise snd_pcm_format_t;
#define SNDRV_PCM_FORMAT_FLOAT SNDRV_PCM_FORMAT_FLOAT_LE
#define SNDRV_PCM_FORMAT_FLOAT64 SNDRV_PCM_FORMAT_FLOAT64_LE
#define SNDRV_PCM_FORMAT_IEC958_SUBFRAME SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE
+#define SNDRV_PCM_FORMAT_S20 SNDRV_PCM_FORMAT_S20_LE
+#define SNDRV_PCM_FORMAT_U20 SNDRV_PCM_FORMAT_U20_LE
#endif
#ifdef SNDRV_BIG_ENDIAN
#define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_BE
@@ -259,6 +266,8 @@ typedef int __bitwise snd_pcm_format_t;
#define SNDRV_PCM_FORMAT_FLOAT SNDRV_PCM_FORMAT_FLOAT_BE
#define SNDRV_PCM_FORMAT_FLOAT64 SNDRV_PCM_FORMAT_FLOAT64_BE
#define SNDRV_PCM_FORMAT_IEC958_SUBFRAME SNDRV_PCM_FORMAT_IEC958_SUBFRAME_BE
+#define SNDRV_PCM_FORMAT_S20 SNDRV_PCM_FORMAT_S20_BE
+#define SNDRV_PCM_FORMAT_U20 SNDRV_PCM_FORMAT_U20_BE
#endif
typedef int __bitwise snd_pcm_subformat_t;
diff --git a/include/uapi/sound/snd_sst_tokens.h b/include/uapi/sound/snd_sst_tokens.h
index 326054a72bc7..8ba0112e5336 100644
--- a/include/uapi/sound/snd_sst_tokens.h
+++ b/include/uapi/sound/snd_sst_tokens.h
@@ -222,6 +222,17 @@
* %SKL_TKN_MM_U32_NUM_IN_FMT: Number of input formats
* %SKL_TKN_MM_U32_NUM_OUT_FMT: Number of output formats
*
+ * %SKL_TKN_U32_ASTATE_IDX: Table Index for the A-State entry to be filled
+ * with kcps and clock source
+ *
+ * %SKL_TKN_U32_ASTATE_COUNT: Number of valid entries in A-State table
+ *
+ * %SKL_TKN_U32_ASTATE_KCPS: Specifies the core load threshold (in kilo
+ * cycles per second) below which DSP is clocked
+ * from source specified by clock source.
+ *
+ * %SKL_TKN_U32_ASTATE_CLK_SRC: Clock source for A-State entry
+ *
* module_id and loadable flags dont have tokens as these values will be
* read from the DSP FW manifest
*
@@ -309,7 +320,11 @@ enum SKL_TKNS {
SKL_TKN_MM_U32_NUM_IN_FMT,
SKL_TKN_MM_U32_NUM_OUT_FMT,
- SKL_TKN_MAX = SKL_TKN_MM_U32_NUM_OUT_FMT,
+ SKL_TKN_U32_ASTATE_IDX,
+ SKL_TKN_U32_ASTATE_COUNT,
+ SKL_TKN_U32_ASTATE_KCPS,
+ SKL_TKN_U32_ASTATE_CLK_SRC,
+ SKL_TKN_MAX = SKL_TKN_U32_ASTATE_CLK_SRC,
};
#endif
diff --git a/init/Makefile b/init/Makefile
index 1dbb23787290..a3e5ce2bcf08 100644
--- a/init/Makefile
+++ b/init/Makefile
@@ -13,9 +13,7 @@ obj-$(CONFIG_BLK_DEV_INITRD) += initramfs.o
endif
obj-$(CONFIG_GENERIC_CALIBRATE_DELAY) += calibrate.o
-ifneq ($(CONFIG_ARCH_INIT_TASK),y)
obj-y += init_task.o
-endif
mounts-y := do_mounts.o
mounts-$(CONFIG_BLK_DEV_RAM) += do_mounts_rd.o
diff --git a/init/init_task.c b/init/init_task.c
index 9325fee7dc82..3ac6e754cf64 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -13,19 +13,175 @@
#include <asm/pgtable.h>
#include <linux/uaccess.h>
-static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
-static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+static struct signal_struct init_signals = {
+ .nr_threads = 1,
+ .thread_head = LIST_HEAD_INIT(init_task.thread_node),
+ .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(init_signals.wait_chldexit),
+ .shared_pending = {
+ .list = LIST_HEAD_INIT(init_signals.shared_pending.list),
+ .signal = {{0}}
+ },
+ .rlim = INIT_RLIMITS,
+ .cred_guard_mutex = __MUTEX_INITIALIZER(init_signals.cred_guard_mutex),
+#ifdef CONFIG_POSIX_TIMERS
+ .posix_timers = LIST_HEAD_INIT(init_signals.posix_timers),
+ .cputimer = {
+ .cputime_atomic = INIT_CPUTIME_ATOMIC,
+ .running = false,
+ .checking_timer = false,
+ },
+#endif
+ INIT_CPU_TIMERS(init_signals)
+ INIT_PREV_CPUTIME(init_signals)
+};
+
+static struct sighand_struct init_sighand = {
+ .count = ATOMIC_INIT(1),
+ .action = { { { .sa_handler = SIG_DFL, } }, },
+ .siglock = __SPIN_LOCK_UNLOCKED(init_sighand.siglock),
+ .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(init_sighand.signalfd_wqh),
+};
-/* Initial task structure */
-struct task_struct init_task = INIT_TASK(init_task);
+/*
+ * Set up the first task table, touch at your own risk!. Base=0,
+ * limit=0x1fffff (=2MB)
+ */
+struct task_struct init_task
+#ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK
+ __init_task_data
+#endif
+= {
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ .thread_info = INIT_THREAD_INFO(init_task),
+ .stack_refcount = ATOMIC_INIT(1),
+#endif
+ .state = 0,
+ .stack = init_stack,
+ .usage = ATOMIC_INIT(2),
+ .flags = PF_KTHREAD,
+ .prio = MAX_PRIO - 20,
+ .static_prio = MAX_PRIO - 20,
+ .normal_prio = MAX_PRIO - 20,
+ .policy = SCHED_NORMAL,
+ .cpus_allowed = CPU_MASK_ALL,
+ .nr_cpus_allowed= NR_CPUS,
+ .mm = NULL,
+ .active_mm = &init_mm,
+ .restart_block = {
+ .fn = do_no_restart_syscall,
+ },
+ .se = {
+ .group_node = LIST_HEAD_INIT(init_task.se.group_node),
+ },
+ .rt = {
+ .run_list = LIST_HEAD_INIT(init_task.rt.run_list),
+ .time_slice = RR_TIMESLICE,
+ },
+ .tasks = LIST_HEAD_INIT(init_task.tasks),
+#ifdef CONFIG_SMP
+ .pushable_tasks = PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO),
+#endif
+#ifdef CONFIG_CGROUP_SCHED
+ .sched_task_group = &root_task_group,
+#endif
+ .ptraced = LIST_HEAD_INIT(init_task.ptraced),
+ .ptrace_entry = LIST_HEAD_INIT(init_task.ptrace_entry),
+ .real_parent = &init_task,
+ .parent = &init_task,
+ .children = LIST_HEAD_INIT(init_task.children),
+ .sibling = LIST_HEAD_INIT(init_task.sibling),
+ .group_leader = &init_task,
+ RCU_POINTER_INITIALIZER(real_cred, &init_cred),
+ RCU_POINTER_INITIALIZER(cred, &init_cred),
+ .comm = INIT_TASK_COMM,
+ .thread = INIT_THREAD,
+ .fs = &init_fs,
+ .files = &init_files,
+ .signal = &init_signals,
+ .sighand = &init_sighand,
+ .nsproxy = &init_nsproxy,
+ .pending = {
+ .list = LIST_HEAD_INIT(init_task.pending.list),
+ .signal = {{0}}
+ },
+ .blocked = {{0}},
+ .alloc_lock = __SPIN_LOCK_UNLOCKED(init_task.alloc_lock),
+ .journal_info = NULL,
+ INIT_CPU_TIMERS(init_task)
+ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(init_task.pi_lock),
+ .timer_slack_ns = 50000, /* 50 usec default slack */
+ .pids = {
+ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID),
+ [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID),
+ [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID),
+ },
+ .thread_group = LIST_HEAD_INIT(init_task.thread_group),
+ .thread_node = LIST_HEAD_INIT(init_signals.thread_head),
+#ifdef CONFIG_AUDITSYSCALL
+ .loginuid = INVALID_UID,
+ .sessionid = (unsigned int)-1,
+#endif
+#ifdef CONFIG_PERF_EVENTS
+ .perf_event_mutex = __MUTEX_INITIALIZER(init_task.perf_event_mutex),
+ .perf_event_list = LIST_HEAD_INIT(init_task.perf_event_list),
+#endif
+#ifdef CONFIG_PREEMPT_RCU
+ .rcu_read_lock_nesting = 0,
+ .rcu_read_unlock_special.s = 0,
+ .rcu_node_entry = LIST_HEAD_INIT(init_task.rcu_node_entry),
+ .rcu_blocked_node = NULL,
+#endif
+#ifdef CONFIG_TASKS_RCU
+ .rcu_tasks_holdout = false,
+ .rcu_tasks_holdout_list = LIST_HEAD_INIT(init_task.rcu_tasks_holdout_list),
+ .rcu_tasks_idle_cpu = -1,
+#endif
+#ifdef CONFIG_CPUSETS
+ .mems_allowed_seq = SEQCNT_ZERO(init_task.mems_allowed_seq),
+#endif
+#ifdef CONFIG_RT_MUTEXES
+ .pi_waiters = RB_ROOT_CACHED,
+ .pi_top_task = NULL,
+#endif
+ INIT_PREV_CPUTIME(init_task)
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+ .vtime.seqcount = SEQCNT_ZERO(init_task.vtime_seqcount),
+ .vtime.starttime = 0,
+ .vtime.state = VTIME_SYS,
+#endif
+#ifdef CONFIG_NUMA_BALANCING
+ .numa_preferred_nid = -1,
+ .numa_group = NULL,
+ .numa_faults = NULL,
+#endif
+#ifdef CONFIG_KASAN
+ .kasan_depth = 1,
+#endif
+#ifdef CONFIG_TRACE_IRQFLAGS
+ .softirqs_enabled = 1,
+#endif
+#ifdef CONFIG_LOCKDEP
+ .lockdep_recursion = 0,
+#endif
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ .ret_stack = NULL,
+#endif
+#if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPT)
+ .trace_recursion = 0,
+#endif
+#ifdef CONFIG_LIVEPATCH
+ .patch_state = KLP_UNDEFINED,
+#endif
+#ifdef CONFIG_SECURITY
+ .security = NULL,
+#endif
+};
EXPORT_SYMBOL(init_task);
/*
* Initial thread structure. Alignment of this is handled by a special
* linker map entry.
*/
-union thread_union init_thread_union __init_task_data = {
#ifndef CONFIG_THREAD_INFO_IN_TASK
- INIT_THREAD_INFO(init_task)
+struct thread_info init_thread_info __init_thread_info = INIT_THREAD_INFO(init_task);
#endif
-};
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 9649ecd8a73a..690ae6665500 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -270,13 +270,30 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
* that means the min(mq_maxmsg, max_priorities) * struct
* posix_msg_tree_node.
*/
+
+ ret = -EINVAL;
+ if (info->attr.mq_maxmsg <= 0 || info->attr.mq_msgsize <= 0)
+ goto out_inode;
+ if (capable(CAP_SYS_RESOURCE)) {
+ if (info->attr.mq_maxmsg > HARD_MSGMAX ||
+ info->attr.mq_msgsize > HARD_MSGSIZEMAX)
+ goto out_inode;
+ } else {
+ if (info->attr.mq_maxmsg > ipc_ns->mq_msg_max ||
+ info->attr.mq_msgsize > ipc_ns->mq_msgsize_max)
+ goto out_inode;
+ }
+ ret = -EOVERFLOW;
+ /* check for overflow */
+ if (info->attr.mq_msgsize > ULONG_MAX/info->attr.mq_maxmsg)
+ goto out_inode;
mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
sizeof(struct posix_msg_tree_node);
-
- mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
- info->attr.mq_msgsize);
-
+ mq_bytes = info->attr.mq_maxmsg * info->attr.mq_msgsize;
+ if (mq_bytes + mq_treesize < mq_bytes)
+ goto out_inode;
+ mq_bytes += mq_treesize;
spin_lock(&mq_lock);
if (u->mq_bytes + mq_bytes < u->mq_bytes ||
u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
@@ -308,8 +325,9 @@ err:
static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
{
struct inode *inode;
- struct ipc_namespace *ns = sb->s_fs_info;
+ struct ipc_namespace *ns = data;
+ sb->s_fs_info = ns;
sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV;
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
@@ -326,18 +344,44 @@ static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
return 0;
}
+static struct file_system_type mqueue_fs_type;
+/*
+ * Return value is pinned only by reference in ->mq_mnt; it will
+ * live until ipcns dies. Caller does not need to drop it.
+ */
+static struct vfsmount *mq_internal_mount(void)
+{
+ struct ipc_namespace *ns = current->nsproxy->ipc_ns;
+ struct vfsmount *m = ns->mq_mnt;
+ if (m)
+ return m;
+ m = kern_mount_data(&mqueue_fs_type, ns);
+ spin_lock(&mq_lock);
+ if (unlikely(ns->mq_mnt)) {
+ spin_unlock(&mq_lock);
+ if (!IS_ERR(m))
+ kern_unmount(m);
+ return ns->mq_mnt;
+ }
+ if (!IS_ERR(m))
+ ns->mq_mnt = m;
+ spin_unlock(&mq_lock);
+ return m;
+}
+
static struct dentry *mqueue_mount(struct file_system_type *fs_type,
int flags, const char *dev_name,
void *data)
{
- struct ipc_namespace *ns;
- if (flags & SB_KERNMOUNT) {
- ns = data;
- data = NULL;
- } else {
- ns = current->nsproxy->ipc_ns;
- }
- return mount_ns(fs_type, flags, data, ns, ns->user_ns, mqueue_fill_super);
+ struct vfsmount *m;
+ if (flags & SB_KERNMOUNT)
+ return mount_nodev(fs_type, flags, data, mqueue_fill_super);
+ m = mq_internal_mount();
+ if (IS_ERR(m))
+ return ERR_CAST(m);
+ atomic_inc(&m->mnt_sb->s_active);
+ down_write(&m->mnt_sb->s_umount);
+ return dget(m->mnt_root);
}
static void init_once(void *foo)
@@ -416,11 +460,11 @@ static void mqueue_evict_inode(struct inode *inode)
put_ipc_ns(ipc_ns);
}
-static int mqueue_create(struct inode *dir, struct dentry *dentry,
- umode_t mode, bool excl)
+static int mqueue_create_attr(struct dentry *dentry, umode_t mode, void *arg)
{
+ struct inode *dir = dentry->d_parent->d_inode;
struct inode *inode;
- struct mq_attr *attr = dentry->d_fsdata;
+ struct mq_attr *attr = arg;
int error;
struct ipc_namespace *ipc_ns;
@@ -461,6 +505,12 @@ out_unlock:
return error;
}
+static int mqueue_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode, bool excl)
+{
+ return mqueue_create_attr(dentry, mode, NULL);
+}
+
static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
@@ -519,10 +569,10 @@ static int mqueue_flush_file(struct file *filp, fl_owner_t id)
return 0;
}
-static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
+static __poll_t mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
{
struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
- int retval = 0;
+ __poll_t retval = 0;
poll_wait(filp, &info->wait_q, poll_tab);
@@ -639,6 +689,7 @@ static void __do_notify(struct mqueue_inode_info *info)
case SIGEV_SIGNAL:
/* sends signal */
+ clear_siginfo(&sig_i);
sig_i.si_signo = info->notify.sigev_signo;
sig_i.si_errno = 0;
sig_i.si_code = SI_MESGQ;
@@ -690,96 +741,46 @@ static void remove_notification(struct mqueue_inode_info *info)
info->notify_user_ns = NULL;
}
-static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
-{
- int mq_treesize;
- unsigned long total_size;
-
- if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
- return -EINVAL;
- if (capable(CAP_SYS_RESOURCE)) {
- if (attr->mq_maxmsg > HARD_MSGMAX ||
- attr->mq_msgsize > HARD_MSGSIZEMAX)
- return -EINVAL;
- } else {
- if (attr->mq_maxmsg > ipc_ns->mq_msg_max ||
- attr->mq_msgsize > ipc_ns->mq_msgsize_max)
- return -EINVAL;
- }
- /* check for overflow */
- if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
- return -EOVERFLOW;
- mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) +
- min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) *
- sizeof(struct posix_msg_tree_node);
- total_size = attr->mq_maxmsg * attr->mq_msgsize;
- if (total_size + mq_treesize < total_size)
- return -EOVERFLOW;
- return 0;
-}
-
-/*
- * Invoked when creating a new queue via sys_mq_open
- */
-static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir,
- struct path *path, int oflag, umode_t mode,
+static int prepare_open(struct dentry *dentry, int oflag, int ro,
+ umode_t mode, struct filename *name,
struct mq_attr *attr)
{
- const struct cred *cred = current_cred();
- int ret;
-
- if (attr) {
- ret = mq_attr_ok(ipc_ns, attr);
- if (ret)
- return ERR_PTR(ret);
- /* store for use during create */
- path->dentry->d_fsdata = attr;
- } else {
- struct mq_attr def_attr;
-
- def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
- ipc_ns->mq_msg_default);
- def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
- ipc_ns->mq_msgsize_default);
- ret = mq_attr_ok(ipc_ns, &def_attr);
- if (ret)
- return ERR_PTR(ret);
- }
-
- mode &= ~current_umask();
- ret = vfs_create(dir, path->dentry, mode, true);
- path->dentry->d_fsdata = NULL;
- if (ret)
- return ERR_PTR(ret);
- return dentry_open(path, oflag, cred);
-}
-
-/* Opens existing queue */
-static struct file *do_open(struct path *path, int oflag)
-{
static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
MAY_READ | MAY_WRITE };
int acc;
+
+ if (d_really_is_negative(dentry)) {
+ if (!(oflag & O_CREAT))
+ return -ENOENT;
+ if (ro)
+ return ro;
+ audit_inode_parent_hidden(name, dentry->d_parent);
+ return vfs_mkobj(dentry, mode & ~current_umask(),
+ mqueue_create_attr, attr);
+ }
+ /* it already existed */
+ audit_inode(name, dentry, 0);
+ if ((oflag & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
+ return -EEXIST;
if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
acc = oflag2acc[oflag & O_ACCMODE];
- if (inode_permission(d_inode(path->dentry), acc))
- return ERR_PTR(-EACCES);
- return dentry_open(path, oflag, current_cred());
+ return inode_permission(d_inode(dentry), acc);
}
static int do_mq_open(const char __user *u_name, int oflag, umode_t mode,
struct mq_attr *attr)
{
- struct path path;
- struct file *filp;
+ struct vfsmount *mnt = mq_internal_mount();
+ struct dentry *root;
struct filename *name;
+ struct path path;
int fd, error;
- struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
- struct vfsmount *mnt = ipc_ns->mq_mnt;
- struct dentry *root = mnt->mnt_root;
int ro;
+ if (IS_ERR(mnt))
+ return PTR_ERR(mnt);
+
audit_mq_open(oflag, mode, attr);
if (IS_ERR(name = getname(u_name)))
@@ -790,7 +791,7 @@ static int do_mq_open(const char __user *u_name, int oflag, umode_t mode,
goto out_putname;
ro = mnt_want_write(mnt); /* we'll drop it in any case */
- error = 0;
+ root = mnt->mnt_root;
inode_lock(d_inode(root));
path.dentry = lookup_one_len(name->name, root, strlen(name->name));
if (IS_ERR(path.dentry)) {
@@ -798,38 +799,14 @@ static int do_mq_open(const char __user *u_name, int oflag, umode_t mode,
goto out_putfd;
}
path.mnt = mntget(mnt);
-
- if (oflag & O_CREAT) {
- if (d_really_is_positive(path.dentry)) { /* entry already exists */
- audit_inode(name, path.dentry, 0);
- if (oflag & O_EXCL) {
- error = -EEXIST;
- goto out;
- }
- filp = do_open(&path, oflag);
- } else {
- if (ro) {
- error = ro;
- goto out;
- }
- audit_inode_parent_hidden(name, root);
- filp = do_create(ipc_ns, d_inode(root), &path,
- oflag, mode, attr);
- }
- } else {
- if (d_really_is_negative(path.dentry)) {
- error = -ENOENT;
- goto out;
- }
- audit_inode(name, path.dentry, 0);
- filp = do_open(&path, oflag);
+ error = prepare_open(path.dentry, oflag, ro, mode, name, attr);
+ if (!error) {
+ struct file *file = dentry_open(&path, oflag, current_cred());
+ if (!IS_ERR(file))
+ fd_install(fd, file);
+ else
+ error = PTR_ERR(file);
}
-
- if (!IS_ERR(filp))
- fd_install(fd, filp);
- else
- error = PTR_ERR(filp);
-out:
path_put(&path);
out_putfd:
if (error) {
@@ -863,6 +840,9 @@ SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
struct vfsmount *mnt = ipc_ns->mq_mnt;
+ if (!mnt)
+ return -ENOENT;
+
name = getname(u_name);
if (IS_ERR(name))
return PTR_ERR(name);
@@ -1589,28 +1569,26 @@ int mq_init_ns(struct ipc_namespace *ns)
ns->mq_msgsize_max = DFLT_MSGSIZEMAX;
ns->mq_msg_default = DFLT_MSG;
ns->mq_msgsize_default = DFLT_MSGSIZE;
+ ns->mq_mnt = NULL;
- ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns);
- if (IS_ERR(ns->mq_mnt)) {
- int err = PTR_ERR(ns->mq_mnt);
- ns->mq_mnt = NULL;
- return err;
- }
return 0;
}
void mq_clear_sbinfo(struct ipc_namespace *ns)
{
- ns->mq_mnt->mnt_sb->s_fs_info = NULL;
+ if (ns->mq_mnt)
+ ns->mq_mnt->mnt_sb->s_fs_info = NULL;
}
void mq_put_mnt(struct ipc_namespace *ns)
{
- kern_unmount(ns->mq_mnt);
+ if (ns->mq_mnt)
+ kern_unmount(ns->mq_mnt);
}
static int __init init_mqueue_fs(void)
{
+ struct vfsmount *m;
int error;
mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
@@ -1632,6 +1610,10 @@ static int __init init_mqueue_fs(void)
if (error)
goto out_filesystem;
+ m = kern_mount_data(&mqueue_fs_type, &init_ipc_ns);
+ if (IS_ERR(m))
+ goto out_filesystem;
+ init_ipc_ns.mq_mnt = m;
return 0;
out_filesystem:
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 5bb5e49ef4c3..81e2f6995adb 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -150,39 +150,29 @@ static int bpf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
return 0;
}
-static int bpf_mkobj_ops(struct inode *dir, struct dentry *dentry,
- umode_t mode, const struct inode_operations *iops)
+static int bpf_mkobj_ops(struct dentry *dentry, umode_t mode, void *raw,
+ const struct inode_operations *iops)
{
- struct inode *inode;
-
- inode = bpf_get_inode(dir->i_sb, dir, mode | S_IFREG);
+ struct inode *dir = dentry->d_parent->d_inode;
+ struct inode *inode = bpf_get_inode(dir->i_sb, dir, mode);
if (IS_ERR(inode))
return PTR_ERR(inode);
inode->i_op = iops;
- inode->i_private = dentry->d_fsdata;
+ inode->i_private = raw;
bpf_dentry_finalize(dentry, inode, dir);
return 0;
}
-static int bpf_mkobj(struct inode *dir, struct dentry *dentry, umode_t mode,
- dev_t devt)
+static int bpf_mkprog(struct dentry *dentry, umode_t mode, void *arg)
{
- enum bpf_type type = MINOR(devt);
-
- if (MAJOR(devt) != UNNAMED_MAJOR || !S_ISREG(mode) ||
- dentry->d_fsdata == NULL)
- return -EPERM;
+ return bpf_mkobj_ops(dentry, mode, arg, &bpf_prog_iops);
+}
- switch (type) {
- case BPF_TYPE_PROG:
- return bpf_mkobj_ops(dir, dentry, mode, &bpf_prog_iops);
- case BPF_TYPE_MAP:
- return bpf_mkobj_ops(dir, dentry, mode, &bpf_map_iops);
- default:
- return -EPERM;
- }
+static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg)
+{
+ return bpf_mkobj_ops(dentry, mode, arg, &bpf_map_iops);
}
static struct dentry *
@@ -218,7 +208,6 @@ static int bpf_symlink(struct inode *dir, struct dentry *dentry,
static const struct inode_operations bpf_dir_iops = {
.lookup = bpf_lookup,
- .mknod = bpf_mkobj,
.mkdir = bpf_mkdir,
.symlink = bpf_symlink,
.rmdir = simple_rmdir,
@@ -234,7 +223,6 @@ static int bpf_obj_do_pin(const struct filename *pathname, void *raw,
struct inode *dir;
struct path path;
umode_t mode;
- dev_t devt;
int ret;
dentry = kern_path_create(AT_FDCWD, pathname->name, &path, 0);
@@ -242,9 +230,8 @@ static int bpf_obj_do_pin(const struct filename *pathname, void *raw,
return PTR_ERR(dentry);
mode = S_IFREG | ((S_IRUSR | S_IWUSR) & ~current_umask());
- devt = MKDEV(UNNAMED_MAJOR, type);
- ret = security_path_mknod(&path, dentry, mode, devt);
+ ret = security_path_mknod(&path, dentry, mode, 0);
if (ret)
goto out;
@@ -254,9 +241,16 @@ static int bpf_obj_do_pin(const struct filename *pathname, void *raw,
goto out;
}
- dentry->d_fsdata = raw;
- ret = vfs_mknod(dir, dentry, mode, devt);
- dentry->d_fsdata = NULL;
+ switch (type) {
+ case BPF_TYPE_PROG:
+ ret = vfs_mkobj(dentry, mode, bpf_mkprog, raw);
+ break;
+ case BPF_TYPE_MAP:
+ ret = vfs_mkobj(dentry, mode, bpf_mkmap, raw);
+ break;
+ default:
+ ret = -EPERM;
+ }
out:
done_path_create(&path, dentry);
return ret;
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 7e4c44538119..8cda3bc3ae22 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -1397,7 +1397,7 @@ static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name,
cft->name);
else
- strlcpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
+ strscpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
return buf;
}
@@ -1864,9 +1864,9 @@ void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts)
root->flags = opts->flags;
if (opts->release_agent)
- strlcpy(root->release_agent_path, opts->release_agent, PATH_MAX);
+ strscpy(root->release_agent_path, opts->release_agent, PATH_MAX);
if (opts->name)
- strlcpy(root->name, opts->name, MAX_CGROUP_ROOT_NAMELEN);
+ strscpy(root->name, opts->name, MAX_CGROUP_ROOT_NAMELEN);
if (opts->cpuset_clone_children)
set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
}
diff --git a/kernel/configs/nopm.config b/kernel/configs/nopm.config
new file mode 100644
index 000000000000..81ff07863576
--- /dev/null
+++ b/kernel/configs/nopm.config
@@ -0,0 +1,15 @@
+CONFIG_PM=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+
+# Triggers PM on OMAP
+CONFIG_CPU_IDLE=n
+
+# Triggers enablement via hibernate callbacks
+CONFIG_XEN=n
+
+# ARM/ARM64 architectures that select PM unconditionally
+CONFIG_ARCH_OMAP2PLUS_TYPICAL=n
+CONFIG_ARCH_RENESAS=n
+CONFIG_ARCH_TEGRA=n
+CONFIG_ARCH_VEXPRESS=n
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index c8146d53ca67..dbb0781a0533 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -2441,7 +2441,6 @@ static int kdb_kill(int argc, const char **argv)
long sig, pid;
char *endp;
struct task_struct *p;
- struct siginfo info;
if (argc != 2)
return KDB_ARGCOUNT;
@@ -2449,7 +2448,7 @@ static int kdb_kill(int argc, const char **argv)
sig = simple_strtol(argv[1], &endp, 0);
if (*endp)
return KDB_BADINT;
- if (sig >= 0) {
+ if ((sig >= 0) || !valid_signal(-sig)) {
kdb_printf("Invalid signal parameter.<-signal>\n");
return 0;
}
@@ -2470,12 +2469,7 @@ static int kdb_kill(int argc, const char **argv)
return 0;
}
p = p->group_leader;
- info.si_signo = sig;
- info.si_errno = 0;
- info.si_code = SI_USER;
- info.si_pid = pid; /* same capabilities as process being signalled */
- info.si_uid = 0; /* kdb has root authority */
- kdb_send_sig_info(p, &info);
+ kdb_send_sig(p, sig);
return 0;
}
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
index fc224fbcf954..1e5a502ba4a7 100644
--- a/kernel/debug/kdb/kdb_private.h
+++ b/kernel/debug/kdb/kdb_private.h
@@ -208,7 +208,7 @@ extern unsigned long kdb_task_state(const struct task_struct *p,
extern void kdb_ps_suppressed(void);
extern void kdb_ps1(const struct task_struct *p);
extern void kdb_print_nameval(const char *name, unsigned long val);
-extern void kdb_send_sig_info(struct task_struct *p, struct siginfo *info);
+extern void kdb_send_sig(struct task_struct *p, int sig);
extern void kdb_meminfo_proc_show(void);
extern char *kdb_getstr(char *, size_t, const char *);
extern void kdb_gdb_state_pass(char *buf);
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 1b2be63c8528..772a43fea825 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -179,21 +179,6 @@ put_callchain_entry(int rctx)
}
struct perf_callchain_entry *
-perf_callchain(struct perf_event *event, struct pt_regs *regs)
-{
- bool kernel = !event->attr.exclude_callchain_kernel;
- bool user = !event->attr.exclude_callchain_user;
- /* Disallow cross-task user callchains. */
- bool crosstask = event->ctx->task && event->ctx->task != current;
- const u32 max_stack = event->attr.sample_max_stack;
-
- if (!kernel && !user)
- return NULL;
-
- return get_perf_callchain(regs, 0, kernel, user, max_stack, crosstask, true);
-}
-
-struct perf_callchain_entry *
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
u32 max_stack, bool crosstask, bool add_mark)
{
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 878d86c513d6..f0549e79978b 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1231,6 +1231,10 @@ static void put_ctx(struct perf_event_context *ctx)
* perf_event_context::lock
* perf_event::mmap_mutex
* mmap_sem
+ *
+ * cpu_hotplug_lock
+ * pmus_lock
+ * cpuctx->mutex / perf_event_context::mutex
*/
static struct perf_event_context *
perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
@@ -4196,6 +4200,7 @@ int perf_event_release_kernel(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct perf_event *child, *tmp;
+ LIST_HEAD(free_list);
/*
* If we got here through err_file: fput(event_file); we will not have
@@ -4268,8 +4273,7 @@ again:
struct perf_event, child_list);
if (tmp == child) {
perf_remove_from_context(child, DETACH_GROUP);
- list_del(&child->child_list);
- free_event(child);
+ list_move(&child->child_list, &free_list);
/*
* This matches the refcount bump in inherit_event();
* this can't be the last reference.
@@ -4284,6 +4288,11 @@ again:
}
mutex_unlock(&event->child_mutex);
+ list_for_each_entry_safe(child, tmp, &free_list, child_list) {
+ list_del(&child->child_list);
+ free_event(child);
+ }
+
no_ctx:
put_event(event); /* Must be the 'last' reference */
return 0;
@@ -4511,11 +4520,11 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
return ret;
}
-static unsigned int perf_poll(struct file *file, poll_table *wait)
+static __poll_t perf_poll(struct file *file, poll_table *wait)
{
struct perf_event *event = file->private_data;
struct ring_buffer *rb;
- unsigned int events = POLLHUP;
+ __poll_t events = POLLHUP;
poll_wait(file, &event->waitq, wait);
@@ -4907,6 +4916,7 @@ void perf_event_update_userpage(struct perf_event *event)
unlock:
rcu_read_unlock();
}
+EXPORT_SYMBOL_GPL(perf_event_update_userpage);
static int perf_mmap_fault(struct vm_fault *vmf)
{
@@ -5818,19 +5828,11 @@ void perf_output_sample(struct perf_output_handle *handle,
perf_output_read(handle, event);
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
- if (data->callchain) {
- int size = 1;
-
- if (data->callchain)
- size += data->callchain->nr;
-
- size *= sizeof(u64);
+ int size = 1;
- __output_copy(handle, data->callchain, size);
- } else {
- u64 nr = 0;
- perf_output_put(handle, nr);
- }
+ size += data->callchain->nr;
+ size *= sizeof(u64);
+ __output_copy(handle, data->callchain, size);
}
if (sample_type & PERF_SAMPLE_RAW) {
@@ -5983,6 +5985,26 @@ static u64 perf_virt_to_phys(u64 virt)
return phys_addr;
}
+static struct perf_callchain_entry __empty_callchain = { .nr = 0, };
+
+static struct perf_callchain_entry *
+perf_callchain(struct perf_event *event, struct pt_regs *regs)
+{
+ bool kernel = !event->attr.exclude_callchain_kernel;
+ bool user = !event->attr.exclude_callchain_user;
+ /* Disallow cross-task user callchains. */
+ bool crosstask = event->ctx->task && event->ctx->task != current;
+ const u32 max_stack = event->attr.sample_max_stack;
+ struct perf_callchain_entry *callchain;
+
+ if (!kernel && !user)
+ return &__empty_callchain;
+
+ callchain = get_perf_callchain(regs, 0, kernel, user,
+ max_stack, crosstask, true);
+ return callchain ?: &__empty_callchain;
+}
+
void perf_prepare_sample(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event,
@@ -6005,9 +6027,7 @@ void perf_prepare_sample(struct perf_event_header *header,
int size = 1;
data->callchain = perf_callchain(event, regs);
-
- if (data->callchain)
- size += data->callchain->nr;
+ size += data->callchain->nr;
header->size += size * sizeof(u64);
}
@@ -8526,6 +8546,29 @@ fail_clear_files:
return ret;
}
+static int
+perf_tracepoint_set_filter(struct perf_event *event, char *filter_str)
+{
+ struct perf_event_context *ctx = event->ctx;
+ int ret;
+
+ /*
+ * Beware, here be dragons!!
+ *
+ * the tracepoint muck will deadlock against ctx->mutex, but the tracepoint
+ * stuff does not actually need it. So temporarily drop ctx->mutex. As per
+ * perf_event_ctx_lock() we already have a reference on ctx.
+ *
+ * This can result in event getting moved to a different ctx, but that
+ * does not affect the tracepoint state.
+ */
+ mutex_unlock(&ctx->mutex);
+ ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
+ mutex_lock(&ctx->mutex);
+
+ return ret;
+}
+
static int perf_event_set_filter(struct perf_event *event, void __user *arg)
{
char *filter_str;
@@ -8542,8 +8585,7 @@ static int perf_event_set_filter(struct perf_event *event, void __user *arg)
if (IS_ENABLED(CONFIG_EVENT_TRACING) &&
event->attr.type == PERF_TYPE_TRACEPOINT)
- ret = ftrace_profile_set_filter(event, event->attr.config,
- filter_str);
+ ret = perf_tracepoint_set_filter(event, filter_str);
else if (has_addr_filter(event))
ret = perf_event_set_addr_filter(event, filter_str);
@@ -9178,7 +9220,13 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
if (!try_module_get(pmu->module))
return -ENODEV;
- if (event->group_leader != event) {
+ /*
+ * A number of pmu->event_init() methods iterate the sibling_list to,
+ * for example, validate if the group fits on the PMU. Therefore,
+ * if this is a sibling event, acquire the ctx->mutex to protect
+ * the sibling_list.
+ */
+ if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) {
/*
* This ctx->mutex can nest when we're called through
* inheritance. See the perf_event_ctx_lock_nested() comment.
@@ -10713,6 +10761,19 @@ inherit_event(struct perf_event *parent_event,
if (IS_ERR(child_event))
return child_event;
+
+ if ((child_event->attach_state & PERF_ATTACH_TASK_DATA) &&
+ !child_ctx->task_ctx_data) {
+ struct pmu *pmu = child_event->pmu;
+
+ child_ctx->task_ctx_data = kzalloc(pmu->task_ctx_size,
+ GFP_KERNEL);
+ if (!child_ctx->task_ctx_data) {
+ free_event(child_event);
+ return NULL;
+ }
+ }
+
/*
* is_orphaned_event() and list_add_tail(&parent_event->child_list)
* must be under the same lock in order to serialize against
@@ -10723,6 +10784,7 @@ inherit_event(struct perf_event *parent_event,
if (is_orphaned_event(parent_event) ||
!atomic_long_inc_not_zero(&parent_event->refcount)) {
mutex_unlock(&parent_event->child_mutex);
+ /* task_ctx_data is freed with child_ctx */
free_event(child_event);
return NULL;
}
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index 09b1537ae06c..6dc725a7e7bc 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -201,10 +201,6 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
-/* Callchain handling */
-extern struct perf_callchain_entry *
-perf_callchain(struct perf_event *event, struct pt_regs *regs);
-
static inline int get_recursion_context(int *recursion)
{
int rctx;
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 267f6ef91d97..ce6848e46e94 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1167,8 +1167,8 @@ static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
}
ret = 0;
- smp_wmb(); /* pairs with get_xol_area() */
- mm->uprobes_state.xol_area = area;
+ /* pairs with get_xol_area() */
+ smp_store_release(&mm->uprobes_state.xol_area, area); /* ^^^ */
fail:
up_write(&mm->mmap_sem);
@@ -1230,8 +1230,8 @@ static struct xol_area *get_xol_area(void)
if (!mm->uprobes_state.xol_area)
__create_xol_area(0);
- area = mm->uprobes_state.xol_area;
- smp_read_barrier_depends(); /* pairs with wmb in xol_add_vma() */
+ /* Pairs with xol_add_vma() smp_store_release() */
+ area = READ_ONCE(mm->uprobes_state.xol_area); /* ^^^ */
return area;
}
@@ -1528,8 +1528,8 @@ static unsigned long get_trampoline_vaddr(void)
struct xol_area *area;
unsigned long trampoline_vaddr = -1;
- area = current->mm->uprobes_state.xol_area;
- smp_read_barrier_depends();
+ /* Pairs with xol_add_vma() smp_store_release() */
+ area = READ_ONCE(current->mm->uprobes_state.xol_area); /* ^^^ */
if (area)
trampoline_vaddr = area->vaddr;
diff --git a/kernel/futex.c b/kernel/futex.c
index 8c5424dd5924..7f719d110908 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2311,9 +2311,6 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
oldowner = pi_state->owner;
- /* Owner died? */
- if (!pi_state->owner)
- newtid |= FUTEX_OWNER_DIED;
/*
* We are here because either:
@@ -2374,6 +2371,9 @@ retry:
}
newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
+ /* Owner died? */
+ if (!pi_state->owner)
+ newtid |= FUTEX_OWNER_DIED;
if (get_futex_value_locked(&uval, uaddr))
goto handle_fault;
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 89e355866450..6fc87ccda1d7 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -103,16 +103,6 @@ config GENERIC_IRQ_MATRIX_ALLOCATOR
config GENERIC_IRQ_RESERVATION_MODE
bool
-config IRQ_DOMAIN_DEBUG
- bool "Expose hardware/virtual IRQ mapping via debugfs"
- depends on IRQ_DOMAIN && DEBUG_FS
- help
- This option will show the mapping relationship between hardware irq
- numbers and Linux irq numbers. The mapping is exposed via debugfs
- in the file "irq_domain_mapping".
-
- If you don't know what this means you don't need it.
-
# Support forced irq threading
config IRQ_FORCED_THREADING
bool
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index e12d35108225..a37a3b4b6342 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -39,7 +39,7 @@ static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
}
}
-static cpumask_var_t *alloc_node_to_present_cpumask(void)
+static cpumask_var_t *alloc_node_to_possible_cpumask(void)
{
cpumask_var_t *masks;
int node;
@@ -62,7 +62,7 @@ out_unwind:
return NULL;
}
-static void free_node_to_present_cpumask(cpumask_var_t *masks)
+static void free_node_to_possible_cpumask(cpumask_var_t *masks)
{
int node;
@@ -71,22 +71,22 @@ static void free_node_to_present_cpumask(cpumask_var_t *masks)
kfree(masks);
}
-static void build_node_to_present_cpumask(cpumask_var_t *masks)
+static void build_node_to_possible_cpumask(cpumask_var_t *masks)
{
int cpu;
- for_each_present_cpu(cpu)
+ for_each_possible_cpu(cpu)
cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]);
}
-static int get_nodes_in_cpumask(cpumask_var_t *node_to_present_cpumask,
+static int get_nodes_in_cpumask(cpumask_var_t *node_to_possible_cpumask,
const struct cpumask *mask, nodemask_t *nodemsk)
{
int n, nodes = 0;
/* Calculate the number of nodes in the supplied affinity mask */
for_each_node(n) {
- if (cpumask_intersects(mask, node_to_present_cpumask[n])) {
+ if (cpumask_intersects(mask, node_to_possible_cpumask[n])) {
node_set(n, *nodemsk);
nodes++;
}
@@ -109,7 +109,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
int last_affv = affv + affd->pre_vectors;
nodemask_t nodemsk = NODE_MASK_NONE;
struct cpumask *masks;
- cpumask_var_t nmsk, *node_to_present_cpumask;
+ cpumask_var_t nmsk, *node_to_possible_cpumask;
/*
* If there aren't any vectors left after applying the pre/post
@@ -125,8 +125,8 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
if (!masks)
goto out;
- node_to_present_cpumask = alloc_node_to_present_cpumask();
- if (!node_to_present_cpumask)
+ node_to_possible_cpumask = alloc_node_to_possible_cpumask();
+ if (!node_to_possible_cpumask)
goto out;
/* Fill out vectors at the beginning that don't need affinity */
@@ -135,8 +135,8 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
/* Stabilize the cpumasks */
get_online_cpus();
- build_node_to_present_cpumask(node_to_present_cpumask);
- nodes = get_nodes_in_cpumask(node_to_present_cpumask, cpu_present_mask,
+ build_node_to_possible_cpumask(node_to_possible_cpumask);
+ nodes = get_nodes_in_cpumask(node_to_possible_cpumask, cpu_possible_mask,
&nodemsk);
/*
@@ -146,7 +146,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
if (affv <= nodes) {
for_each_node_mask(n, nodemsk) {
cpumask_copy(masks + curvec,
- node_to_present_cpumask[n]);
+ node_to_possible_cpumask[n]);
if (++curvec == last_affv)
break;
}
@@ -160,7 +160,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
vecs_per_node = (affv - (curvec - affd->pre_vectors)) / nodes;
/* Get the cpus on this node which are in the mask */
- cpumask_and(nmsk, cpu_present_mask, node_to_present_cpumask[n]);
+ cpumask_and(nmsk, cpu_possible_mask, node_to_possible_cpumask[n]);
/* Calculate the number of cpus per vector */
ncpus = cpumask_weight(nmsk);
@@ -192,7 +192,7 @@ done:
/* Fill out vectors at the end that don't need affinity */
for (; curvec < nvecs; curvec++)
cpumask_copy(masks + curvec, irq_default_affinity);
- free_node_to_present_cpumask(node_to_present_cpumask);
+ free_node_to_possible_cpumask(node_to_possible_cpumask);
out:
free_cpumask_var(nmsk);
return masks;
@@ -214,7 +214,7 @@ int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity
return 0;
get_online_cpus();
- ret = min_t(int, cpumask_weight(cpu_present_mask), vecs) + resv;
+ ret = min_t(int, cpumask_weight(cpu_possible_mask), vecs) + resv;
put_online_cpus();
return ret;
}
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 62068ad46930..e6a9c36470ee 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -897,124 +897,6 @@ unsigned int irq_find_mapping(struct irq_domain *domain,
}
EXPORT_SYMBOL_GPL(irq_find_mapping);
-#ifdef CONFIG_IRQ_DOMAIN_DEBUG
-static void virq_debug_show_one(struct seq_file *m, struct irq_desc *desc)
-{
- struct irq_domain *domain;
- struct irq_data *data;
-
- domain = desc->irq_data.domain;
- data = &desc->irq_data;
-
- while (domain) {
- unsigned int irq = data->irq;
- unsigned long hwirq = data->hwirq;
- struct irq_chip *chip;
- bool direct;
-
- if (data == &desc->irq_data)
- seq_printf(m, "%5d ", irq);
- else
- seq_printf(m, "%5d+ ", irq);
- seq_printf(m, "0x%05lx ", hwirq);
-
- chip = irq_data_get_irq_chip(data);
- seq_printf(m, "%-15s ", (chip && chip->name) ? chip->name : "none");
-
- seq_printf(m, "0x%p ", irq_data_get_irq_chip_data(data));
-
- seq_printf(m, " %c ", (desc->action && desc->action->handler) ? '*' : ' ');
- direct = (irq == hwirq) && (irq < domain->revmap_direct_max_irq);
- seq_printf(m, "%6s%-8s ",
- (hwirq < domain->revmap_size) ? "LINEAR" : "RADIX",
- direct ? "(DIRECT)" : "");
- seq_printf(m, "%s\n", domain->name);
-#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
- domain = domain->parent;
- data = data->parent_data;
-#else
- domain = NULL;
-#endif
- }
-}
-
-static int virq_debug_show(struct seq_file *m, void *private)
-{
- unsigned long flags;
- struct irq_desc *desc;
- struct irq_domain *domain;
- struct radix_tree_iter iter;
- void __rcu **slot;
- int i;
-
- seq_printf(m, " %-16s %-6s %-10s %-10s %s\n",
- "name", "mapped", "linear-max", "direct-max", "devtree-node");
- mutex_lock(&irq_domain_mutex);
- list_for_each_entry(domain, &irq_domain_list, link) {
- struct device_node *of_node;
- const char *name;
-
- int count = 0;
-
- of_node = irq_domain_get_of_node(domain);
- if (of_node)
- name = of_node_full_name(of_node);
- else if (is_fwnode_irqchip(domain->fwnode))
- name = container_of(domain->fwnode, struct irqchip_fwid,
- fwnode)->name;
- else
- name = "";
-
- radix_tree_for_each_slot(slot, &domain->revmap_tree, &iter, 0)
- count++;
- seq_printf(m, "%c%-16s %6u %10u %10u %s\n",
- domain == irq_default_domain ? '*' : ' ', domain->name,
- domain->revmap_size + count, domain->revmap_size,
- domain->revmap_direct_max_irq,
- name);
- }
- mutex_unlock(&irq_domain_mutex);
-
- seq_printf(m, "%-5s %-7s %-15s %-*s %6s %-14s %s\n", "irq", "hwirq",
- "chip name", (int)(2 * sizeof(void *) + 2), "chip data",
- "active", "type", "domain");
-
- for (i = 1; i < nr_irqs; i++) {
- desc = irq_to_desc(i);
- if (!desc)
- continue;
-
- raw_spin_lock_irqsave(&desc->lock, flags);
- virq_debug_show_one(m, desc);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- }
-
- return 0;
-}
-
-static int virq_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, virq_debug_show, inode->i_private);
-}
-
-static const struct file_operations virq_debug_fops = {
- .open = virq_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int __init irq_debugfs_init(void)
-{
- if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL,
- NULL, &virq_debug_fops) == NULL)
- return -ENOMEM;
-
- return 0;
-}
-__initcall(irq_debugfs_init);
-#endif /* CONFIG_IRQ_DOMAIN_DEBUG */
-
/**
* irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
*
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 40e9d739c169..6b7cdf17ccf8 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -36,7 +36,7 @@ static bool irq_work_claim(struct irq_work *work)
*/
flags = work->flags & ~IRQ_WORK_PENDING;
for (;;) {
- nflags = flags | IRQ_WORK_FLAGS;
+ nflags = flags | IRQ_WORK_CLAIMED;
oflags = cmpxchg(&work->flags, flags, nflags);
if (oflags == flags)
break;
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 8594d24e4adc..b4517095db6a 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -79,7 +79,7 @@ int static_key_count(struct static_key *key)
}
EXPORT_SYMBOL_GPL(static_key_count);
-static void static_key_slow_inc_cpuslocked(struct static_key *key)
+void static_key_slow_inc_cpuslocked(struct static_key *key)
{
int v, v1;
@@ -180,7 +180,7 @@ void static_key_disable(struct static_key *key)
}
EXPORT_SYMBOL_GPL(static_key_disable);
-static void static_key_slow_dec_cpuslocked(struct static_key *key,
+static void __static_key_slow_dec_cpuslocked(struct static_key *key,
unsigned long rate_limit,
struct delayed_work *work)
{
@@ -211,7 +211,7 @@ static void __static_key_slow_dec(struct static_key *key,
struct delayed_work *work)
{
cpus_read_lock();
- static_key_slow_dec_cpuslocked(key, rate_limit, work);
+ __static_key_slow_dec_cpuslocked(key, rate_limit, work);
cpus_read_unlock();
}
@@ -229,6 +229,12 @@ void static_key_slow_dec(struct static_key *key)
}
EXPORT_SYMBOL_GPL(static_key_slow_dec);
+void static_key_slow_dec_cpuslocked(struct static_key *key)
+{
+ STATIC_KEY_CHECK_USE(key);
+ __static_key_slow_dec_cpuslocked(key, 0, NULL);
+}
+
void static_key_slow_dec_deferred(struct static_key_deferred *key)
{
STATIC_KEY_CHECK_USE(key);
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index de9e45dca70f..3a4656fb7047 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -366,11 +366,6 @@ static int __klp_enable_patch(struct klp_patch *patch)
/*
* A reference is taken on the patch module to prevent it from being
* unloaded.
- *
- * Note: For immediate (no consistency model) patches we don't allow
- * patch modules to unload since there is no safe/sane method to
- * determine if a thread is still running in the patched code contained
- * in the patch module once the ftrace registration is successful.
*/
if (!try_module_get(patch->mod))
return -ENODEV;
@@ -454,6 +449,8 @@ EXPORT_SYMBOL_GPL(klp_enable_patch);
* /sys/kernel/livepatch/<patch>
* /sys/kernel/livepatch/<patch>/enabled
* /sys/kernel/livepatch/<patch>/transition
+ * /sys/kernel/livepatch/<patch>/signal
+ * /sys/kernel/livepatch/<patch>/force
* /sys/kernel/livepatch/<patch>/<object>
* /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
*/
@@ -528,11 +525,73 @@ static ssize_t transition_show(struct kobject *kobj,
patch == klp_transition_patch);
}
+static ssize_t signal_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct klp_patch *patch;
+ int ret;
+ bool val;
+
+ ret = kstrtobool(buf, &val);
+ if (ret)
+ return ret;
+
+ if (!val)
+ return count;
+
+ mutex_lock(&klp_mutex);
+
+ patch = container_of(kobj, struct klp_patch, kobj);
+ if (patch != klp_transition_patch) {
+ mutex_unlock(&klp_mutex);
+ return -EINVAL;
+ }
+
+ klp_send_signals();
+
+ mutex_unlock(&klp_mutex);
+
+ return count;
+}
+
+static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct klp_patch *patch;
+ int ret;
+ bool val;
+
+ ret = kstrtobool(buf, &val);
+ if (ret)
+ return ret;
+
+ if (!val)
+ return count;
+
+ mutex_lock(&klp_mutex);
+
+ patch = container_of(kobj, struct klp_patch, kobj);
+ if (patch != klp_transition_patch) {
+ mutex_unlock(&klp_mutex);
+ return -EINVAL;
+ }
+
+ klp_force_transition();
+
+ mutex_unlock(&klp_mutex);
+
+ return count;
+}
+
static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
+static struct kobj_attribute signal_kobj_attr = __ATTR_WO(signal);
+static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
static struct attribute *klp_patch_attrs[] = {
&enabled_kobj_attr.attr,
&transition_kobj_attr.attr,
+ &signal_kobj_attr.attr,
+ &force_kobj_attr.attr,
NULL
};
@@ -830,12 +889,7 @@ int klp_register_patch(struct klp_patch *patch)
if (!klp_initialized())
return -ENODEV;
- /*
- * Architectures without reliable stack traces have to set
- * patch->immediate because there's currently no way to patch kthreads
- * with the consistency model.
- */
- if (!klp_have_reliable_stack() && !patch->immediate) {
+ if (!klp_have_reliable_stack()) {
pr_err("This architecture doesn't have support for the livepatch consistency model.\n");
return -ENOSYS;
}
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index 56add6327736..7c6631e693bc 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -33,6 +33,8 @@ struct klp_patch *klp_transition_patch;
static int klp_target_state = KLP_UNDEFINED;
+static bool klp_forced = false;
+
/*
* This work can be performed periodically to finish patching or unpatching any
* "straggler" tasks which failed to transition in the first attempt.
@@ -80,7 +82,6 @@ static void klp_complete_transition(void)
struct klp_func *func;
struct task_struct *g, *task;
unsigned int cpu;
- bool immediate_func = false;
pr_debug("'%s': completing %s transition\n",
klp_transition_patch->mod->name,
@@ -102,16 +103,9 @@ static void klp_complete_transition(void)
klp_synchronize_transition();
}
- if (klp_transition_patch->immediate)
- goto done;
-
- klp_for_each_object(klp_transition_patch, obj) {
- klp_for_each_func(obj, func) {
+ klp_for_each_object(klp_transition_patch, obj)
+ klp_for_each_func(obj, func)
func->transition = false;
- if (func->immediate)
- immediate_func = true;
- }
- }
/* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
if (klp_target_state == KLP_PATCHED)
@@ -130,7 +124,6 @@ static void klp_complete_transition(void)
task->patch_state = KLP_UNDEFINED;
}
-done:
klp_for_each_object(klp_transition_patch, obj) {
if (!klp_is_object_loaded(obj))
continue;
@@ -144,13 +137,11 @@ done:
klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
/*
- * See complementary comment in __klp_enable_patch() for why we
- * keep the module reference for immediate patches.
+ * klp_forced set implies unbounded increase of module's ref count if
+ * the module is disabled/enabled in a loop.
*/
- if (!klp_transition_patch->immediate && !immediate_func &&
- klp_target_state == KLP_UNPATCHED) {
+ if (!klp_forced && klp_target_state == KLP_UNPATCHED)
module_put(klp_transition_patch->mod);
- }
klp_target_state = KLP_UNDEFINED;
klp_transition_patch = NULL;
@@ -218,9 +209,6 @@ static int klp_check_stack_func(struct klp_func *func,
struct klp_ops *ops;
int i;
- if (func->immediate)
- return 0;
-
for (i = 0; i < trace->nr_entries; i++) {
address = trace->entries[i];
@@ -383,13 +371,6 @@ void klp_try_complete_transition(void)
WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
/*
- * If the patch can be applied or reverted immediately, skip the
- * per-task transitions.
- */
- if (klp_transition_patch->immediate)
- goto success;
-
- /*
* Try to switch the tasks to the target patch state by walking their
* stacks and looking for any to-be-patched or to-be-unpatched
* functions. If such functions are found on a stack, or if the stack
@@ -432,7 +413,6 @@ void klp_try_complete_transition(void)
return;
}
-success:
/* we're done, now cleanup the data structures */
klp_complete_transition();
}
@@ -453,13 +433,6 @@ void klp_start_transition(void)
klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
/*
- * If the patch can be applied or reverted immediately, skip the
- * per-task transitions.
- */
- if (klp_transition_patch->immediate)
- return;
-
- /*
* Mark all normal tasks as needing a patch state update. They'll
* switch either in klp_try_complete_transition() or as they exit the
* kernel.
@@ -509,13 +482,6 @@ void klp_init_transition(struct klp_patch *patch, int state)
klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
/*
- * If the patch can be applied or reverted immediately, skip the
- * per-task transitions.
- */
- if (patch->immediate)
- return;
-
- /*
* Initialize all tasks to the initial patch state to prepare them for
* switching to the target state.
*/
@@ -608,3 +574,71 @@ void klp_copy_process(struct task_struct *child)
/* TIF_PATCH_PENDING gets copied in setup_thread_stack() */
}
+
+/*
+ * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
+ * Kthreads with TIF_PATCH_PENDING set are woken up. Only admin can request this
+ * action currently.
+ */
+void klp_send_signals(void)
+{
+ struct task_struct *g, *task;
+
+ pr_notice("signaling remaining tasks\n");
+
+ read_lock(&tasklist_lock);
+ for_each_process_thread(g, task) {
+ if (!klp_patch_pending(task))
+ continue;
+
+ /*
+ * There is a small race here. We could see TIF_PATCH_PENDING
+ * set and decide to wake up a kthread or send a fake signal.
+ * Meanwhile the task could migrate itself and the action
+ * would be meaningless. It is not serious though.
+ */
+ if (task->flags & PF_KTHREAD) {
+ /*
+ * Wake up a kthread which sleeps interruptedly and
+ * still has not been migrated.
+ */
+ wake_up_state(task, TASK_INTERRUPTIBLE);
+ } else {
+ /*
+ * Send fake signal to all non-kthread tasks which are
+ * still not migrated.
+ */
+ spin_lock_irq(&task->sighand->siglock);
+ signal_wake_up(task, 0);
+ spin_unlock_irq(&task->sighand->siglock);
+ }
+ }
+ read_unlock(&tasklist_lock);
+}
+
+/*
+ * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an
+ * existing transition to finish.
+ *
+ * NOTE: klp_update_patch_state(task) requires the task to be inactive or
+ * 'current'. This is not the case here and the consistency model could be
+ * broken. Administrator, who is the only one to execute the
+ * klp_force_transitions(), has to be aware of this.
+ */
+void klp_force_transition(void)
+{
+ struct task_struct *g, *task;
+ unsigned int cpu;
+
+ pr_warn("forcing remaining tasks to the patched state\n");
+
+ read_lock(&tasklist_lock);
+ for_each_process_thread(g, task)
+ klp_update_patch_state(task);
+ read_unlock(&tasklist_lock);
+
+ for_each_possible_cpu(cpu)
+ klp_update_patch_state(idle_task(cpu));
+
+ klp_forced = true;
+}
diff --git a/kernel/livepatch/transition.h b/kernel/livepatch/transition.h
index 0f6e27c481f9..f9d0bc016067 100644
--- a/kernel/livepatch/transition.h
+++ b/kernel/livepatch/transition.h
@@ -11,5 +11,7 @@ void klp_cancel_transition(void);
void klp_start_transition(void);
void klp_try_complete_transition(void);
void klp_reverse_transition(void);
+void klp_send_signals(void);
+void klp_force_transition(void);
#endif /* _LIVEPATCH_TRANSITION_H */
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 5fa1324a4f29..89b5f83f1969 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -49,6 +49,7 @@
#include <linux/gfp.h>
#include <linux/random.h>
#include <linux/jhash.h>
+#include <linux/nmi.h>
#include <asm/sections.h>
@@ -647,18 +648,12 @@ static int count_matching_names(struct lock_class *new_class)
return count + 1;
}
-/*
- * Register a lock's class in the hash-table, if the class is not present
- * yet. Otherwise we look it up. We cache the result in the lock object
- * itself, so actual lookup of the hash should be once per lock object.
- */
static inline struct lock_class *
-look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
+look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
{
struct lockdep_subclass_key *key;
struct hlist_head *hash_head;
struct lock_class *class;
- bool is_static = false;
if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
debug_locks_off();
@@ -671,24 +666,11 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
}
/*
- * Static locks do not have their class-keys yet - for them the key
- * is the lock object itself. If the lock is in the per cpu area,
- * the canonical address of the lock (per cpu offset removed) is
- * used.
+ * If it is not initialised then it has never been locked,
+ * so it won't be present in the hash table.
*/
- if (unlikely(!lock->key)) {
- unsigned long can_addr, addr = (unsigned long)lock;
-
- if (__is_kernel_percpu_address(addr, &can_addr))
- lock->key = (void *)can_addr;
- else if (__is_module_percpu_address(addr, &can_addr))
- lock->key = (void *)can_addr;
- else if (static_obj(lock))
- lock->key = (void *)lock;
- else
- return ERR_PTR(-EINVAL);
- is_static = true;
- }
+ if (unlikely(!lock->key))
+ return NULL;
/*
* NOTE: the class-key must be unique. For dynamic locks, a static
@@ -720,7 +702,35 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
}
}
- return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL);
+ return NULL;
+}
+
+/*
+ * Static locks do not have their class-keys yet - for them the key is
+ * the lock object itself. If the lock is in the per cpu area, the
+ * canonical address of the lock (per cpu offset removed) is used.
+ */
+static bool assign_lock_key(struct lockdep_map *lock)
+{
+ unsigned long can_addr, addr = (unsigned long)lock;
+
+ if (__is_kernel_percpu_address(addr, &can_addr))
+ lock->key = (void *)can_addr;
+ else if (__is_module_percpu_address(addr, &can_addr))
+ lock->key = (void *)can_addr;
+ else if (static_obj(lock))
+ lock->key = (void *)lock;
+ else {
+ /* Debug-check: all keys must be persistent! */
+ debug_locks_off();
+ pr_err("INFO: trying to register non-static key.\n");
+ pr_err("the code is fine but needs lockdep annotation.\n");
+ pr_err("turning off the locking correctness validator.\n");
+ dump_stack();
+ return false;
+ }
+
+ return true;
}
/*
@@ -738,18 +748,13 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
class = look_up_lock_class(lock, subclass);
- if (likely(!IS_ERR_OR_NULL(class)))
+ if (likely(class))
goto out_set_class_cache;
- /*
- * Debug-check: all keys must be persistent!
- */
- if (IS_ERR(class)) {
- debug_locks_off();
- printk("INFO: trying to register non-static key.\n");
- printk("the code is fine but needs lockdep annotation.\n");
- printk("turning off the locking correctness validator.\n");
- dump_stack();
+ if (!lock->key) {
+ if (!assign_lock_key(lock))
+ return NULL;
+ } else if (!static_obj(lock->key)) {
return NULL;
}
@@ -3272,7 +3277,7 @@ print_lock_nested_lock_not_held(struct task_struct *curr,
return 0;
}
-static int __lock_is_held(struct lockdep_map *lock, int read);
+static int __lock_is_held(const struct lockdep_map *lock, int read);
/*
* This gets called for every mutex_lock*()/spin_lock*() operation.
@@ -3481,13 +3486,14 @@ print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
return 0;
}
-static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
+static int match_held_lock(const struct held_lock *hlock,
+ const struct lockdep_map *lock)
{
if (hlock->instance == lock)
return 1;
if (hlock->references) {
- struct lock_class *class = lock->class_cache[0];
+ const struct lock_class *class = lock->class_cache[0];
if (!class)
class = look_up_lock_class(lock, 0);
@@ -3498,7 +3504,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
* Clearly if the lock hasn't been acquired _ever_, we're not
* holding it either, so report failure.
*/
- if (IS_ERR_OR_NULL(class))
+ if (!class)
return 0;
/*
@@ -3723,7 +3729,7 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
return 1;
}
-static int __lock_is_held(struct lockdep_map *lock, int read)
+static int __lock_is_held(const struct lockdep_map *lock, int read)
{
struct task_struct *curr = current;
int i;
@@ -3937,7 +3943,7 @@ void lock_release(struct lockdep_map *lock, int nested,
}
EXPORT_SYMBOL_GPL(lock_release);
-int lock_is_held_type(struct lockdep_map *lock, int read)
+int lock_is_held_type(const struct lockdep_map *lock, int read)
{
unsigned long flags;
int ret = 0;
@@ -4294,7 +4300,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
* If the class exists we look it up and zap it:
*/
class = look_up_lock_class(lock, j);
- if (!IS_ERR_OR_NULL(class))
+ if (class)
zap_class(class);
}
/*
@@ -4490,6 +4496,7 @@ retry:
if (!unlock)
if (read_trylock(&tasklist_lock))
unlock = 1;
+ touch_nmi_watchdog();
} while_each_thread(g, p);
pr_warn("\n");
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index f24582d4dad3..6850ffd69125 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -77,10 +77,6 @@ struct lock_stress_stats {
long n_lock_acquired;
};
-int torture_runnable = IS_ENABLED(MODULE);
-module_param(torture_runnable, int, 0444);
-MODULE_PARM_DESC(torture_runnable, "Start locktorture at module init");
-
/* Forward reference. */
static void lock_torture_cleanup(void);
@@ -130,10 +126,8 @@ static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
if (!(torture_random(trsp) %
(cxt.nrealwriters_stress * 2000 * longdelay_ms)))
mdelay(longdelay_ms);
-#ifdef CONFIG_PREEMPT
if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
- preempt_schedule(); /* Allow test to be preempted. */
-#endif
+ torture_preempt_schedule(); /* Allow test to be preempted. */
}
static void torture_lock_busted_write_unlock(void)
@@ -179,10 +173,8 @@ static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
if (!(torture_random(trsp) %
(cxt.nrealwriters_stress * 2 * shortdelay_us)))
udelay(shortdelay_us);
-#ifdef CONFIG_PREEMPT
if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
- preempt_schedule(); /* Allow test to be preempted. */
-#endif
+ torture_preempt_schedule(); /* Allow test to be preempted. */
}
static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock)
@@ -352,10 +344,8 @@ static void torture_mutex_delay(struct torture_random_state *trsp)
mdelay(longdelay_ms * 5);
else
mdelay(longdelay_ms / 5);
-#ifdef CONFIG_PREEMPT
if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
- preempt_schedule(); /* Allow test to be preempted. */
-#endif
+ torture_preempt_schedule(); /* Allow test to be preempted. */
}
static void torture_mutex_unlock(void) __releases(torture_mutex)
@@ -507,10 +497,8 @@ static void torture_rtmutex_delay(struct torture_random_state *trsp)
if (!(torture_random(trsp) %
(cxt.nrealwriters_stress * 2 * shortdelay_us)))
udelay(shortdelay_us);
-#ifdef CONFIG_PREEMPT
if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
- preempt_schedule(); /* Allow test to be preempted. */
-#endif
+ torture_preempt_schedule(); /* Allow test to be preempted. */
}
static void torture_rtmutex_unlock(void) __releases(torture_rtmutex)
@@ -547,10 +535,8 @@ static void torture_rwsem_write_delay(struct torture_random_state *trsp)
mdelay(longdelay_ms * 10);
else
mdelay(longdelay_ms / 10);
-#ifdef CONFIG_PREEMPT
if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
- preempt_schedule(); /* Allow test to be preempted. */
-#endif
+ torture_preempt_schedule(); /* Allow test to be preempted. */
}
static void torture_rwsem_up_write(void) __releases(torture_rwsem)
@@ -570,14 +556,12 @@ static void torture_rwsem_read_delay(struct torture_random_state *trsp)
/* We want a long delay occasionally to force massive contention. */
if (!(torture_random(trsp) %
- (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
+ (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
mdelay(longdelay_ms * 2);
else
mdelay(longdelay_ms / 2);
-#ifdef CONFIG_PREEMPT
if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
- preempt_schedule(); /* Allow test to be preempted. */
-#endif
+ torture_preempt_schedule(); /* Allow test to be preempted. */
}
static void torture_rwsem_up_read(void) __releases(torture_rwsem)
@@ -715,8 +699,7 @@ static void __torture_print_stats(char *page,
{
bool fail = 0;
int i, n_stress;
- long max = 0;
- long min = statp[0].n_lock_acquired;
+ long max = 0, min = statp ? statp[0].n_lock_acquired : 0;
long long sum = 0;
n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
@@ -823,7 +806,7 @@ static void lock_torture_cleanup(void)
* such, only perform the underlying torture-specific cleanups,
* and avoid anything related to locktorture.
*/
- if (!cxt.lwsa)
+ if (!cxt.lwsa && !cxt.lrsa)
goto end;
if (writer_tasks) {
@@ -879,7 +862,7 @@ static int __init lock_torture_init(void)
&percpu_rwsem_lock_ops,
};
- if (!torture_init_begin(torture_type, verbose, &torture_runnable))
+ if (!torture_init_begin(torture_type, verbose))
return -EBUSY;
/* Process args and tell the world that the torturer is on the job. */
@@ -898,6 +881,13 @@ static int __init lock_torture_init(void)
firsterr = -EINVAL;
goto unwind;
}
+
+ if (nwriters_stress == 0 && nreaders_stress == 0) {
+ pr_alert("lock-torture: must run at least one locking thread\n");
+ firsterr = -EINVAL;
+ goto unwind;
+ }
+
if (cxt.cur_ops->init)
cxt.cur_ops->init();
@@ -921,17 +911,19 @@ static int __init lock_torture_init(void)
#endif
/* Initialize the statistics so that each run gets its own numbers. */
+ if (nwriters_stress) {
+ lock_is_write_held = 0;
+ cxt.lwsa = kmalloc(sizeof(*cxt.lwsa) * cxt.nrealwriters_stress, GFP_KERNEL);
+ if (cxt.lwsa == NULL) {
+ VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
+ firsterr = -ENOMEM;
+ goto unwind;
+ }
- lock_is_write_held = 0;
- cxt.lwsa = kmalloc(sizeof(*cxt.lwsa) * cxt.nrealwriters_stress, GFP_KERNEL);
- if (cxt.lwsa == NULL) {
- VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
- firsterr = -ENOMEM;
- goto unwind;
- }
- for (i = 0; i < cxt.nrealwriters_stress; i++) {
- cxt.lwsa[i].n_lock_fail = 0;
- cxt.lwsa[i].n_lock_acquired = 0;
+ for (i = 0; i < cxt.nrealwriters_stress; i++) {
+ cxt.lwsa[i].n_lock_fail = 0;
+ cxt.lwsa[i].n_lock_acquired = 0;
+ }
}
if (cxt.cur_ops->readlock) {
@@ -948,19 +940,21 @@ static int __init lock_torture_init(void)
cxt.nrealreaders_stress = cxt.nrealwriters_stress;
}
- lock_is_read_held = 0;
- cxt.lrsa = kmalloc(sizeof(*cxt.lrsa) * cxt.nrealreaders_stress, GFP_KERNEL);
- if (cxt.lrsa == NULL) {
- VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
- firsterr = -ENOMEM;
- kfree(cxt.lwsa);
- cxt.lwsa = NULL;
- goto unwind;
- }
-
- for (i = 0; i < cxt.nrealreaders_stress; i++) {
- cxt.lrsa[i].n_lock_fail = 0;
- cxt.lrsa[i].n_lock_acquired = 0;
+ if (nreaders_stress) {
+ lock_is_read_held = 0;
+ cxt.lrsa = kmalloc(sizeof(*cxt.lrsa) * cxt.nrealreaders_stress, GFP_KERNEL);
+ if (cxt.lrsa == NULL) {
+ VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
+ firsterr = -ENOMEM;
+ kfree(cxt.lwsa);
+ cxt.lwsa = NULL;
+ goto unwind;
+ }
+
+ for (i = 0; i < cxt.nrealreaders_stress; i++) {
+ cxt.lrsa[i].n_lock_fail = 0;
+ cxt.lrsa[i].n_lock_acquired = 0;
+ }
}
}
@@ -990,12 +984,14 @@ static int __init lock_torture_init(void)
goto unwind;
}
- writer_tasks = kzalloc(cxt.nrealwriters_stress * sizeof(writer_tasks[0]),
- GFP_KERNEL);
- if (writer_tasks == NULL) {
- VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
- firsterr = -ENOMEM;
- goto unwind;
+ if (nwriters_stress) {
+ writer_tasks = kzalloc(cxt.nrealwriters_stress * sizeof(writer_tasks[0]),
+ GFP_KERNEL);
+ if (writer_tasks == NULL) {
+ VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
+ firsterr = -ENOMEM;
+ goto unwind;
+ }
}
if (cxt.cur_ops->readlock) {
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 294294c71ba4..38ece035039e 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -170,7 +170,7 @@ static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
* @tail : The new queue tail code word
* Return: The previous queue tail code word
*
- * xchg(lock, tail)
+ * xchg(lock, tail), which heads an address dependency
*
* p,*,* -> n,*,* ; prev = xchg(lock, node)
*/
@@ -409,13 +409,11 @@ queue:
if (old & _Q_TAIL_MASK) {
prev = decode_tail(old);
/*
- * The above xchg_tail() is also a load of @lock which generates,
- * through decode_tail(), a pointer.
- *
- * The address dependency matches the RELEASE of xchg_tail()
- * such that the access to @prev must happen after.
+ * The above xchg_tail() is also a load of @lock which
+ * generates, through decode_tail(), a pointer. The address
+ * dependency matches the RELEASE of xchg_tail() such that
+ * the subsequent access to @prev happens after.
*/
- smp_read_barrier_depends();
WRITE_ONCE(prev->next, node);
diff --git a/kernel/module.c b/kernel/module.c
index 601494d4b7ea..1d65b2cc4f80 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2863,6 +2863,15 @@ static int check_modinfo_livepatch(struct module *mod, struct load_info *info)
}
#endif /* CONFIG_LIVEPATCH */
+static void check_modinfo_retpoline(struct module *mod, struct load_info *info)
+{
+ if (retpoline_module_ok(get_modinfo(info, "retpoline")))
+ return;
+
+ pr_warn("%s: loading module not compiled with retpoline compiler.\n",
+ mod->name);
+}
+
/* Sets info->hdr and info->len. */
static int copy_module_from_user(const void __user *umod, unsigned long len,
struct load_info *info)
@@ -3029,6 +3038,8 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK);
}
+ check_modinfo_retpoline(mod, info);
+
if (get_modinfo(info, "staging")) {
add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
pr_warn("%s: module is from the staging directory, the quality "
diff --git a/kernel/padata.c b/kernel/padata.c
index 57c0074d50cc..d568cc56405f 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* padata.c - generic interface to process data streams in parallel
*
diff --git a/kernel/pid.c b/kernel/pid.c
index 1e8bb6550ec4..5d30c87e3c42 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -41,7 +41,19 @@
#include <linux/sched/task.h>
#include <linux/idr.h>
-struct pid init_struct_pid = INIT_STRUCT_PID;
+struct pid init_struct_pid = {
+ .count = ATOMIC_INIT(1),
+ .tasks = {
+ { .first = NULL },
+ { .first = NULL },
+ { .first = NULL },
+ },
+ .level = 0,
+ .numbers = { {
+ .nr = 0,
+ .ns = &init_pid_ns,
+ }, }
+};
int pid_max = PID_MAX_DEFAULT;
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 3a2ca9066583..705c2366dafe 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -22,6 +22,35 @@ DEFINE_MUTEX(pm_mutex);
#ifdef CONFIG_PM_SLEEP
+void lock_system_sleep(void)
+{
+ current->flags |= PF_FREEZER_SKIP;
+ mutex_lock(&pm_mutex);
+}
+EXPORT_SYMBOL_GPL(lock_system_sleep);
+
+void unlock_system_sleep(void)
+{
+ /*
+ * Don't use freezer_count() because we don't want the call to
+ * try_to_freeze() here.
+ *
+ * Reason:
+ * Fundamentally, we just don't need it, because freezing condition
+ * doesn't come into effect until we release the pm_mutex lock,
+ * since the freezer always works with pm_mutex held.
+ *
+ * More importantly, in the case of hibernation,
+ * unlock_system_sleep() gets called in snapshot_read() and
+ * snapshot_write() when the freezing condition is still in effect.
+ * Which means, if we use try_to_freeze() here, it would make them
+ * enter the refrigerator, thus causing hibernation to lockup.
+ */
+ current->flags &= ~PF_FREEZER_SKIP;
+ mutex_unlock(&pm_mutex);
+}
+EXPORT_SYMBOL_GPL(unlock_system_sleep);
+
/* Routines for PM-transition notifications */
static BLOCKING_NOTIFIER_HEAD(pm_chain_head);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index bce0464524d8..3d37c279c090 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1645,8 +1645,7 @@ static unsigned long free_unnecessary_pages(void)
* [number of saveable pages] - [number of pages that can be freed in theory]
*
* where the second term is the sum of (1) reclaimable slab pages, (2) active
- * and (3) inactive anonymous pages, (4) active and (5) inactive file pages,
- * minus mapped file pages.
+ * and (3) inactive anonymous pages, (4) active and (5) inactive file pages.
*/
static unsigned long minimum_image_size(unsigned long saveable)
{
@@ -1656,8 +1655,7 @@ static unsigned long minimum_image_size(unsigned long saveable)
+ global_node_page_state(NR_ACTIVE_ANON)
+ global_node_page_state(NR_INACTIVE_ANON)
+ global_node_page_state(NR_ACTIVE_FILE)
- + global_node_page_state(NR_INACTIVE_FILE)
- - global_node_page_state(NR_FILE_MAPPED);
+ + global_node_page_state(NR_INACTIVE_FILE);
return saveable <= size ? 0 : saveable - size;
}
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 293ead59eccc..11b4282c2d20 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -240,7 +240,7 @@ static void hib_init_batch(struct hib_bio_batch *hb)
static void hib_end_io(struct bio *bio)
{
struct hib_bio_batch *hb = bio->bi_private;
- struct page *page = bio->bi_io_vec[0].bv_page;
+ struct page *page = bio_first_page_all(bio);
if (bio->bi_status) {
pr_alert("Read-error on swap-device (%u:%u:%Lu)\n",
@@ -879,7 +879,7 @@ out_clean:
* space avaiable from the resume partition.
*/
-static int enough_swap(unsigned int nr_pages, unsigned int flags)
+static int enough_swap(unsigned int nr_pages)
{
unsigned int free_swap = count_swap_pages(root_swap, 1);
unsigned int required;
@@ -915,7 +915,7 @@ int swsusp_write(unsigned int flags)
return error;
}
if (flags & SF_NOCOMPRESS_MODE) {
- if (!enough_swap(pages, flags)) {
+ if (!enough_swap(pages)) {
pr_err("Not enough free swap\n");
error = -ENOSPC;
goto out_finish;
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index b9006617710f..c2e713f6ae2e 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -920,10 +920,10 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
return ret;
}
-static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
+static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
{
struct devkmsg_user *user = file->private_data;
- int ret = 0;
+ __poll_t ret = 0;
if (!user)
return POLLERR|POLLNVAL;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 84b1367935e4..5e1d713c8e61 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -659,7 +659,7 @@ static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
if (lock_task_sighand(child, &flags)) {
error = -EINVAL;
if (likely(child->last_siginfo != NULL)) {
- *info = *child->last_siginfo;
+ copy_siginfo(info, child->last_siginfo);
error = 0;
}
unlock_task_sighand(child, &flags);
@@ -675,7 +675,7 @@ static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
if (lock_task_sighand(child, &flags)) {
error = -EINVAL;
if (likely(child->last_siginfo != NULL)) {
- *child->last_siginfo = *info;
+ copy_siginfo(child->last_siginfo, info);
error = 0;
}
unlock_task_sighand(child, &flags);
@@ -1092,6 +1092,10 @@ int ptrace_request(struct task_struct *child, long request,
ret = seccomp_get_filter(child, addr, datavp);
break;
+ case PTRACE_SECCOMP_GET_METADATA:
+ ret = seccomp_get_metadata(child, addr, datavp);
+ break;
+
default:
break;
}
@@ -1226,7 +1230,6 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
break;
case PTRACE_SETSIGINFO:
- memset(&siginfo, 0, sizeof siginfo);
if (copy_siginfo_from_user32(
&siginfo, (struct compat_siginfo __user *) datap))
ret = -EFAULT;
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index 59c471de342a..6334f2c1abd0 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -30,31 +30,8 @@
#define RCU_TRACE(stmt)
#endif /* #else #ifdef CONFIG_RCU_TRACE */
-/*
- * Process-level increment to ->dynticks_nesting field. This allows for
- * architectures that use half-interrupts and half-exceptions from
- * process context.
- *
- * DYNTICK_TASK_NEST_MASK defines a field of width DYNTICK_TASK_NEST_WIDTH
- * that counts the number of process-based reasons why RCU cannot
- * consider the corresponding CPU to be idle, and DYNTICK_TASK_NEST_VALUE
- * is the value used to increment or decrement this field.
- *
- * The rest of the bits could in principle be used to count interrupts,
- * but this would mean that a negative-one value in the interrupt
- * field could incorrectly zero out the DYNTICK_TASK_NEST_MASK field.
- * We therefore provide a two-bit guard field defined by DYNTICK_TASK_MASK
- * that is set to DYNTICK_TASK_FLAG upon initial exit from idle.
- * The DYNTICK_TASK_EXIT_IDLE value is thus the combined value used upon
- * initial exit from idle.
- */
-#define DYNTICK_TASK_NEST_WIDTH 7
-#define DYNTICK_TASK_NEST_VALUE ((LLONG_MAX >> DYNTICK_TASK_NEST_WIDTH) + 1)
-#define DYNTICK_TASK_NEST_MASK (LLONG_MAX - DYNTICK_TASK_NEST_VALUE + 1)
-#define DYNTICK_TASK_FLAG ((DYNTICK_TASK_NEST_VALUE / 8) * 2)
-#define DYNTICK_TASK_MASK ((DYNTICK_TASK_NEST_VALUE / 8) * 3)
-#define DYNTICK_TASK_EXIT_IDLE (DYNTICK_TASK_NEST_VALUE + \
- DYNTICK_TASK_FLAG)
+/* Offset to allow for unmatched rcu_irq_{enter,exit}(). */
+#define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
/*
diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c
index 1f87a02c3399..d1ebdf9868bb 100644
--- a/kernel/rcu/rcuperf.c
+++ b/kernel/rcu/rcuperf.c
@@ -106,10 +106,6 @@ static int rcu_perf_writer_state;
#define MAX_MEAS 10000
#define MIN_MEAS 100
-static int perf_runnable = IS_ENABLED(MODULE);
-module_param(perf_runnable, int, 0444);
-MODULE_PARM_DESC(perf_runnable, "Start rcuperf at boot");
-
/*
* Operations vector for selecting different types of tests.
*/
@@ -646,7 +642,7 @@ rcu_perf_init(void)
&tasks_ops,
};
- if (!torture_init_begin(perf_type, verbose, &perf_runnable))
+ if (!torture_init_begin(perf_type, verbose))
return -EBUSY;
/* Process args and tell the world that the perf'er is on the job. */
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 74f6b0146b98..308e6fdbced8 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -187,10 +187,6 @@ static const char *rcu_torture_writer_state_getname(void)
return rcu_torture_writer_state_names[i];
}
-static int torture_runnable = IS_ENABLED(MODULE);
-module_param(torture_runnable, int, 0444);
-MODULE_PARM_DESC(torture_runnable, "Start rcutorture at boot");
-
#if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
#define rcu_can_boost() 1
#else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
@@ -315,11 +311,9 @@ static void rcu_read_delay(struct torture_random_state *rrsp)
}
if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
udelay(shortdelay_us);
-#ifdef CONFIG_PREEMPT
if (!preempt_count() &&
- !(torture_random(rrsp) % (nrealreaders * 20000)))
- preempt_schedule(); /* No QS if preempt_disable() in effect */
-#endif
+ !(torture_random(rrsp) % (nrealreaders * 500)))
+ torture_preempt_schedule(); /* QS only if preemptible. */
}
static void rcu_torture_read_unlock(int idx) __releases(RCU)
@@ -1731,7 +1725,7 @@ rcu_torture_init(void)
&sched_ops, &tasks_ops,
};
- if (!torture_init_begin(torture_type, verbose, &torture_runnable))
+ if (!torture_init_begin(torture_type, verbose))
return -EBUSY;
/* Process args and tell the world that the torturer is on the job. */
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 6d5880089ff6..d5cea81378cc 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -53,6 +53,33 @@ static void srcu_invoke_callbacks(struct work_struct *work);
static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay);
static void process_srcu(struct work_struct *work);
+/* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
+#define spin_lock_rcu_node(p) \
+do { \
+ spin_lock(&ACCESS_PRIVATE(p, lock)); \
+ smp_mb__after_unlock_lock(); \
+} while (0)
+
+#define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
+
+#define spin_lock_irq_rcu_node(p) \
+do { \
+ spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
+ smp_mb__after_unlock_lock(); \
+} while (0)
+
+#define spin_unlock_irq_rcu_node(p) \
+ spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
+
+#define spin_lock_irqsave_rcu_node(p, flags) \
+do { \
+ spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
+ smp_mb__after_unlock_lock(); \
+} while (0)
+
+#define spin_unlock_irqrestore_rcu_node(p, flags) \
+ spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \
+
/*
* Initialize SRCU combining tree. Note that statically allocated
* srcu_struct structures might already have srcu_read_lock() and
@@ -77,7 +104,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
/* Each pass through this loop initializes one srcu_node structure. */
rcu_for_each_node_breadth_first(sp, snp) {
- raw_spin_lock_init(&ACCESS_PRIVATE(snp, lock));
+ spin_lock_init(&ACCESS_PRIVATE(snp, lock));
WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
ARRAY_SIZE(snp->srcu_data_have_cbs));
for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
@@ -111,7 +138,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
snp_first = sp->level[level];
for_each_possible_cpu(cpu) {
sdp = per_cpu_ptr(sp->sda, cpu);
- raw_spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
+ spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
rcu_segcblist_init(&sdp->srcu_cblist);
sdp->srcu_cblist_invoking = false;
sdp->srcu_gp_seq_needed = sp->srcu_gp_seq;
@@ -170,7 +197,7 @@ int __init_srcu_struct(struct srcu_struct *sp, const char *name,
/* Don't re-initialize a lock while it is held. */
debug_check_no_locks_freed((void *)sp, sizeof(*sp));
lockdep_init_map(&sp->dep_map, name, key, 0);
- raw_spin_lock_init(&ACCESS_PRIVATE(sp, lock));
+ spin_lock_init(&ACCESS_PRIVATE(sp, lock));
return init_srcu_struct_fields(sp, false);
}
EXPORT_SYMBOL_GPL(__init_srcu_struct);
@@ -187,7 +214,7 @@ EXPORT_SYMBOL_GPL(__init_srcu_struct);
*/
int init_srcu_struct(struct srcu_struct *sp)
{
- raw_spin_lock_init(&ACCESS_PRIVATE(sp, lock));
+ spin_lock_init(&ACCESS_PRIVATE(sp, lock));
return init_srcu_struct_fields(sp, false);
}
EXPORT_SYMBOL_GPL(init_srcu_struct);
@@ -210,13 +237,13 @@ static void check_init_srcu_struct(struct srcu_struct *sp)
/* The smp_load_acquire() pairs with the smp_store_release(). */
if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/
return; /* Already initialized. */
- raw_spin_lock_irqsave_rcu_node(sp, flags);
+ spin_lock_irqsave_rcu_node(sp, flags);
if (!rcu_seq_state(sp->srcu_gp_seq_needed)) {
- raw_spin_unlock_irqrestore_rcu_node(sp, flags);
+ spin_unlock_irqrestore_rcu_node(sp, flags);
return;
}
init_srcu_struct_fields(sp, true);
- raw_spin_unlock_irqrestore_rcu_node(sp, flags);
+ spin_unlock_irqrestore_rcu_node(sp, flags);
}
/*
@@ -513,7 +540,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
mutex_lock(&sp->srcu_cb_mutex);
/* End the current grace period. */
- raw_spin_lock_irq_rcu_node(sp);
+ spin_lock_irq_rcu_node(sp);
idx = rcu_seq_state(sp->srcu_gp_seq);
WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
cbdelay = srcu_get_delay(sp);
@@ -522,7 +549,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
gpseq = rcu_seq_current(&sp->srcu_gp_seq);
if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq))
sp->srcu_gp_seq_needed_exp = gpseq;
- raw_spin_unlock_irq_rcu_node(sp);
+ spin_unlock_irq_rcu_node(sp);
mutex_unlock(&sp->srcu_gp_mutex);
/* A new grace period can start at this point. But only one. */
@@ -530,7 +557,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
idxnext = (idx + 1) % ARRAY_SIZE(snp->srcu_have_cbs);
rcu_for_each_node_breadth_first(sp, snp) {
- raw_spin_lock_irq_rcu_node(snp);
+ spin_lock_irq_rcu_node(snp);
cbs = false;
if (snp >= sp->level[rcu_num_lvls - 1])
cbs = snp->srcu_have_cbs[idx] == gpseq;
@@ -540,7 +567,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
snp->srcu_gp_seq_needed_exp = gpseq;
mask = snp->srcu_data_have_cbs[idx];
snp->srcu_data_have_cbs[idx] = 0;
- raw_spin_unlock_irq_rcu_node(snp);
+ spin_unlock_irq_rcu_node(snp);
if (cbs)
srcu_schedule_cbs_snp(sp, snp, mask, cbdelay);
@@ -548,11 +575,11 @@ static void srcu_gp_end(struct srcu_struct *sp)
if (!(gpseq & counter_wrap_check))
for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
sdp = per_cpu_ptr(sp->sda, cpu);
- raw_spin_lock_irqsave_rcu_node(sdp, flags);
+ spin_lock_irqsave_rcu_node(sdp, flags);
if (ULONG_CMP_GE(gpseq,
sdp->srcu_gp_seq_needed + 100))
sdp->srcu_gp_seq_needed = gpseq;
- raw_spin_unlock_irqrestore_rcu_node(sdp, flags);
+ spin_unlock_irqrestore_rcu_node(sdp, flags);
}
}
@@ -560,17 +587,17 @@ static void srcu_gp_end(struct srcu_struct *sp)
mutex_unlock(&sp->srcu_cb_mutex);
/* Start a new grace period if needed. */
- raw_spin_lock_irq_rcu_node(sp);
+ spin_lock_irq_rcu_node(sp);
gpseq = rcu_seq_current(&sp->srcu_gp_seq);
if (!rcu_seq_state(gpseq) &&
ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) {
srcu_gp_start(sp);
- raw_spin_unlock_irq_rcu_node(sp);
+ spin_unlock_irq_rcu_node(sp);
/* Throttle expedited grace periods: Should be rare! */
srcu_reschedule(sp, rcu_seq_ctr(gpseq) & 0x3ff
? 0 : SRCU_INTERVAL);
} else {
- raw_spin_unlock_irq_rcu_node(sp);
+ spin_unlock_irq_rcu_node(sp);
}
}
@@ -590,18 +617,18 @@ static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
if (rcu_seq_done(&sp->srcu_gp_seq, s) ||
ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
return;
- raw_spin_lock_irqsave_rcu_node(snp, flags);
+ spin_lock_irqsave_rcu_node(snp, flags);
if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) {
- raw_spin_unlock_irqrestore_rcu_node(snp, flags);
+ spin_unlock_irqrestore_rcu_node(snp, flags);
return;
}
WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
- raw_spin_unlock_irqrestore_rcu_node(snp, flags);
+ spin_unlock_irqrestore_rcu_node(snp, flags);
}
- raw_spin_lock_irqsave_rcu_node(sp, flags);
+ spin_lock_irqsave_rcu_node(sp, flags);
if (!ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
sp->srcu_gp_seq_needed_exp = s;
- raw_spin_unlock_irqrestore_rcu_node(sp, flags);
+ spin_unlock_irqrestore_rcu_node(sp, flags);
}
/*
@@ -623,12 +650,12 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
for (; snp != NULL; snp = snp->srcu_parent) {
if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode)
return; /* GP already done and CBs recorded. */
- raw_spin_lock_irqsave_rcu_node(snp, flags);
+ spin_lock_irqsave_rcu_node(snp, flags);
if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
snp_seq = snp->srcu_have_cbs[idx];
if (snp == sdp->mynode && snp_seq == s)
snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
- raw_spin_unlock_irqrestore_rcu_node(snp, flags);
+ spin_unlock_irqrestore_rcu_node(snp, flags);
if (snp == sdp->mynode && snp_seq != s) {
srcu_schedule_cbs_sdp(sdp, do_norm
? SRCU_INTERVAL
@@ -644,11 +671,11 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
snp->srcu_gp_seq_needed_exp = s;
- raw_spin_unlock_irqrestore_rcu_node(snp, flags);
+ spin_unlock_irqrestore_rcu_node(snp, flags);
}
/* Top of tree, must ensure the grace period will be started. */
- raw_spin_lock_irqsave_rcu_node(sp, flags);
+ spin_lock_irqsave_rcu_node(sp, flags);
if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) {
/*
* Record need for grace period s. Pair with load
@@ -667,7 +694,7 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
queue_delayed_work(system_power_efficient_wq, &sp->work,
srcu_get_delay(sp));
}
- raw_spin_unlock_irqrestore_rcu_node(sp, flags);
+ spin_unlock_irqrestore_rcu_node(sp, flags);
}
/*
@@ -830,7 +857,7 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
rhp->func = func;
local_irq_save(flags);
sdp = this_cpu_ptr(sp->sda);
- raw_spin_lock_rcu_node(sdp);
+ spin_lock_rcu_node(sdp);
rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
rcu_segcblist_advance(&sdp->srcu_cblist,
rcu_seq_current(&sp->srcu_gp_seq));
@@ -844,7 +871,7 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
sdp->srcu_gp_seq_needed_exp = s;
needexp = true;
}
- raw_spin_unlock_irqrestore_rcu_node(sdp, flags);
+ spin_unlock_irqrestore_rcu_node(sdp, flags);
if (needgp)
srcu_funnel_gp_start(sp, sdp, s, do_norm);
else if (needexp)
@@ -900,7 +927,7 @@ static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
/*
* Make sure that later code is ordered after the SRCU grace
- * period. This pairs with the raw_spin_lock_irq_rcu_node()
+ * period. This pairs with the spin_lock_irq_rcu_node()
* in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed
* because the current CPU might have been totally uninvolved with
* (and thus unordered against) that grace period.
@@ -1024,7 +1051,7 @@ void srcu_barrier(struct srcu_struct *sp)
*/
for_each_possible_cpu(cpu) {
sdp = per_cpu_ptr(sp->sda, cpu);
- raw_spin_lock_irq_rcu_node(sdp);
+ spin_lock_irq_rcu_node(sdp);
atomic_inc(&sp->srcu_barrier_cpu_cnt);
sdp->srcu_barrier_head.func = srcu_barrier_cb;
debug_rcu_head_queue(&sdp->srcu_barrier_head);
@@ -1033,7 +1060,7 @@ void srcu_barrier(struct srcu_struct *sp)
debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
atomic_dec(&sp->srcu_barrier_cpu_cnt);
}
- raw_spin_unlock_irq_rcu_node(sdp);
+ spin_unlock_irq_rcu_node(sdp);
}
/* Remove the initial count, at which point reaching zero can happen. */
@@ -1082,17 +1109,17 @@ static void srcu_advance_state(struct srcu_struct *sp)
*/
idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */
if (idx == SRCU_STATE_IDLE) {
- raw_spin_lock_irq_rcu_node(sp);
+ spin_lock_irq_rcu_node(sp);
if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq));
- raw_spin_unlock_irq_rcu_node(sp);
+ spin_unlock_irq_rcu_node(sp);
mutex_unlock(&sp->srcu_gp_mutex);
return;
}
idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
if (idx == SRCU_STATE_IDLE)
srcu_gp_start(sp);
- raw_spin_unlock_irq_rcu_node(sp);
+ spin_unlock_irq_rcu_node(sp);
if (idx != SRCU_STATE_IDLE) {
mutex_unlock(&sp->srcu_gp_mutex);
return; /* Someone else started the grace period. */
@@ -1141,19 +1168,19 @@ static void srcu_invoke_callbacks(struct work_struct *work)
sdp = container_of(work, struct srcu_data, work.work);
sp = sdp->sp;
rcu_cblist_init(&ready_cbs);
- raw_spin_lock_irq_rcu_node(sdp);
+ spin_lock_irq_rcu_node(sdp);
rcu_segcblist_advance(&sdp->srcu_cblist,
rcu_seq_current(&sp->srcu_gp_seq));
if (sdp->srcu_cblist_invoking ||
!rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
- raw_spin_unlock_irq_rcu_node(sdp);
+ spin_unlock_irq_rcu_node(sdp);
return; /* Someone else on the job or nothing to do. */
}
/* We are on the job! Extract and invoke ready callbacks. */
sdp->srcu_cblist_invoking = true;
rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
- raw_spin_unlock_irq_rcu_node(sdp);
+ spin_unlock_irq_rcu_node(sdp);
rhp = rcu_cblist_dequeue(&ready_cbs);
for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
debug_rcu_head_unqueue(rhp);
@@ -1166,13 +1193,13 @@ static void srcu_invoke_callbacks(struct work_struct *work)
* Update counts, accelerate new callbacks, and if needed,
* schedule another round of callback invocation.
*/
- raw_spin_lock_irq_rcu_node(sdp);
+ spin_lock_irq_rcu_node(sdp);
rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
rcu_seq_snap(&sp->srcu_gp_seq));
sdp->srcu_cblist_invoking = false;
more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
- raw_spin_unlock_irq_rcu_node(sdp);
+ spin_unlock_irq_rcu_node(sdp);
if (more)
srcu_schedule_cbs_sdp(sdp, 0);
}
@@ -1185,7 +1212,7 @@ static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
{
bool pushgp = true;
- raw_spin_lock_irq_rcu_node(sp);
+ spin_lock_irq_rcu_node(sp);
if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) {
/* All requests fulfilled, time to go idle. */
@@ -1195,7 +1222,7 @@ static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
/* Outstanding request and no GP. Start one. */
srcu_gp_start(sp);
}
- raw_spin_unlock_irq_rcu_node(sp);
+ spin_unlock_irq_rcu_node(sp);
if (pushgp)
queue_delayed_work(system_power_efficient_wq, &sp->work, delay);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index f9c0ca2ccf0c..491bdf39f276 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -265,25 +265,12 @@ void rcu_bh_qs(void)
#endif
static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
- .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
+ .dynticks_nesting = 1,
+ .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
.dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
};
/*
- * There's a few places, currently just in the tracing infrastructure,
- * that uses rcu_irq_enter() to make sure RCU is watching. But there's
- * a small location where that will not even work. In those cases
- * rcu_irq_enter_disabled() needs to be checked to make sure rcu_irq_enter()
- * can be called.
- */
-static DEFINE_PER_CPU(bool, disable_rcu_irq_enter);
-
-bool rcu_irq_enter_disabled(void)
-{
- return this_cpu_read(disable_rcu_irq_enter);
-}
-
-/*
* Record entry into an extended quiescent state. This is only to be
* called when not already in an extended quiescent state.
*/
@@ -762,68 +749,39 @@ cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
}
/*
- * rcu_eqs_enter_common - current CPU is entering an extended quiescent state
+ * Enter an RCU extended quiescent state, which can be either the
+ * idle loop or adaptive-tickless usermode execution.
*
- * Enter idle, doing appropriate accounting. The caller must have
- * disabled interrupts.
+ * We crowbar the ->dynticks_nmi_nesting field to zero to allow for
+ * the possibility of usermode upcalls having messed up our count
+ * of interrupt nesting level during the prior busy period.
*/
-static void rcu_eqs_enter_common(bool user)
+static void rcu_eqs_enter(bool user)
{
struct rcu_state *rsp;
struct rcu_data *rdp;
- struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+ struct rcu_dynticks *rdtp;
- lockdep_assert_irqs_disabled();
- trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0);
- if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
- !user && !is_idle_task(current)) {
- struct task_struct *idle __maybe_unused =
- idle_task(smp_processor_id());
-
- trace_rcu_dyntick(TPS("Error on entry: not idle task"), rdtp->dynticks_nesting, 0);
- rcu_ftrace_dump(DUMP_ORIG);
- WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
- current->pid, current->comm,
- idle->pid, idle->comm); /* must be idle task! */
+ rdtp = this_cpu_ptr(&rcu_dynticks);
+ WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0);
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
+ rdtp->dynticks_nesting == 0);
+ if (rdtp->dynticks_nesting != 1) {
+ rdtp->dynticks_nesting--;
+ return;
}
+
+ lockdep_assert_irqs_disabled();
+ trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0, rdtp->dynticks);
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
for_each_rcu_flavor(rsp) {
rdp = this_cpu_ptr(rsp->rda);
do_nocb_deferred_wakeup(rdp);
}
rcu_prepare_for_idle();
- __this_cpu_inc(disable_rcu_irq_enter);
- rdtp->dynticks_nesting = 0; /* Breaks tracing momentarily. */
- rcu_dynticks_eqs_enter(); /* After this, tracing works again. */
- __this_cpu_dec(disable_rcu_irq_enter);
+ WRITE_ONCE(rdtp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
+ rcu_dynticks_eqs_enter();
rcu_dynticks_task_enter();
-
- /*
- * It is illegal to enter an extended quiescent state while
- * in an RCU read-side critical section.
- */
- RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
- "Illegal idle entry in RCU read-side critical section.");
- RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map),
- "Illegal idle entry in RCU-bh read-side critical section.");
- RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map),
- "Illegal idle entry in RCU-sched read-side critical section.");
-}
-
-/*
- * Enter an RCU extended quiescent state, which can be either the
- * idle loop or adaptive-tickless usermode execution.
- */
-static void rcu_eqs_enter(bool user)
-{
- struct rcu_dynticks *rdtp;
-
- rdtp = this_cpu_ptr(&rcu_dynticks);
- WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
- (rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
- if ((rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE)
- rcu_eqs_enter_common(user);
- else
- rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
}
/**
@@ -834,10 +792,6 @@ static void rcu_eqs_enter(bool user)
* critical sections can occur in irq handlers in idle, a possibility
* handled by irq_enter() and irq_exit().)
*
- * We crowbar the ->dynticks_nesting field to zero to allow for
- * the possibility of usermode upcalls having messed up our count
- * of interrupt nesting level during the prior busy period.
- *
* If you add or remove a call to rcu_idle_enter(), be sure to test with
* CONFIG_RCU_EQS_DEBUG=y.
*/
@@ -867,6 +821,46 @@ void rcu_user_enter(void)
#endif /* CONFIG_NO_HZ_FULL */
/**
+ * rcu_nmi_exit - inform RCU of exit from NMI context
+ *
+ * If we are returning from the outermost NMI handler that interrupted an
+ * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting
+ * to let the RCU grace-period handling know that the CPU is back to
+ * being RCU-idle.
+ *
+ * If you add or remove a call to rcu_nmi_exit(), be sure to test
+ * with CONFIG_RCU_EQS_DEBUG=y.
+ */
+void rcu_nmi_exit(void)
+{
+ struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+
+ /*
+ * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
+ * (We are exiting an NMI handler, so RCU better be paying attention
+ * to us!)
+ */
+ WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
+ WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
+
+ /*
+ * If the nesting level is not 1, the CPU wasn't RCU-idle, so
+ * leave it in non-RCU-idle state.
+ */
+ if (rdtp->dynticks_nmi_nesting != 1) {
+ trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nmi_nesting, rdtp->dynticks_nmi_nesting - 2, rdtp->dynticks);
+ WRITE_ONCE(rdtp->dynticks_nmi_nesting, /* No store tearing. */
+ rdtp->dynticks_nmi_nesting - 2);
+ return;
+ }
+
+ /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
+ trace_rcu_dyntick(TPS("Startirq"), rdtp->dynticks_nmi_nesting, 0, rdtp->dynticks);
+ WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
+ rcu_dynticks_eqs_enter();
+}
+
+/**
* rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
*
* Exit from an interrupt handler, which might possibly result in entering
@@ -875,8 +869,8 @@ void rcu_user_enter(void)
*
* This code assumes that the idle loop never does anything that might
* result in unbalanced calls to irq_enter() and irq_exit(). If your
- * architecture violates this assumption, RCU will give you what you
- * deserve, good and hard. But very infrequently and irreproducibly.
+ * architecture's idle loop violates this assumption, RCU will give you what
+ * you deserve, good and hard. But very infrequently and irreproducibly.
*
* Use things like work queues to work around this limitation.
*
@@ -887,23 +881,14 @@ void rcu_user_enter(void)
*/
void rcu_irq_exit(void)
{
- struct rcu_dynticks *rdtp;
+ struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
lockdep_assert_irqs_disabled();
- rdtp = this_cpu_ptr(&rcu_dynticks);
-
- /* Page faults can happen in NMI handlers, so check... */
- if (rdtp->dynticks_nmi_nesting)
- return;
-
- WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
- rdtp->dynticks_nesting < 1);
- if (rdtp->dynticks_nesting <= 1) {
- rcu_eqs_enter_common(true);
- } else {
- trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nesting, rdtp->dynticks_nesting - 1);
- rdtp->dynticks_nesting--;
- }
+ if (rdtp->dynticks_nmi_nesting == 1)
+ rcu_prepare_for_idle();
+ rcu_nmi_exit();
+ if (rdtp->dynticks_nmi_nesting == 0)
+ rcu_dynticks_task_enter();
}
/*
@@ -922,55 +907,33 @@ void rcu_irq_exit_irqson(void)
}
/*
- * rcu_eqs_exit_common - current CPU moving away from extended quiescent state
- *
- * If the new value of the ->dynticks_nesting counter was previously zero,
- * we really have exited idle, and must do the appropriate accounting.
- * The caller must have disabled interrupts.
- */
-static void rcu_eqs_exit_common(long long oldval, int user)
-{
- RCU_TRACE(struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);)
-
- rcu_dynticks_task_exit();
- rcu_dynticks_eqs_exit();
- rcu_cleanup_after_idle();
- trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
- if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
- !user && !is_idle_task(current)) {
- struct task_struct *idle __maybe_unused =
- idle_task(smp_processor_id());
-
- trace_rcu_dyntick(TPS("Error on exit: not idle task"),
- oldval, rdtp->dynticks_nesting);
- rcu_ftrace_dump(DUMP_ORIG);
- WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
- current->pid, current->comm,
- idle->pid, idle->comm); /* must be idle task! */
- }
-}
-
-/*
* Exit an RCU extended quiescent state, which can be either the
* idle loop or adaptive-tickless usermode execution.
+ *
+ * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to
+ * allow for the possibility of usermode upcalls messing up our count of
+ * interrupt nesting level during the busy period that is just now starting.
*/
static void rcu_eqs_exit(bool user)
{
struct rcu_dynticks *rdtp;
- long long oldval;
+ long oldval;
lockdep_assert_irqs_disabled();
rdtp = this_cpu_ptr(&rcu_dynticks);
oldval = rdtp->dynticks_nesting;
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
- if (oldval & DYNTICK_TASK_NEST_MASK) {
- rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
- } else {
- __this_cpu_inc(disable_rcu_irq_enter);
- rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
- rcu_eqs_exit_common(oldval, user);
- __this_cpu_dec(disable_rcu_irq_enter);
+ if (oldval) {
+ rdtp->dynticks_nesting++;
+ return;
}
+ rcu_dynticks_task_exit();
+ rcu_dynticks_eqs_exit();
+ rcu_cleanup_after_idle();
+ trace_rcu_dyntick(TPS("End"), rdtp->dynticks_nesting, 1, rdtp->dynticks);
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
+ WRITE_ONCE(rdtp->dynticks_nesting, 1);
+ WRITE_ONCE(rdtp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
}
/**
@@ -979,11 +942,6 @@ static void rcu_eqs_exit(bool user)
* Exit idle mode, in other words, -enter- the mode in which RCU
* read-side critical sections can occur.
*
- * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NEST to
- * allow for the possibility of usermode upcalls messing up our count
- * of interrupt nesting level during the busy period that is just
- * now starting.
- *
* If you add or remove a call to rcu_idle_exit(), be sure to test with
* CONFIG_RCU_EQS_DEBUG=y.
*/
@@ -1013,65 +971,6 @@ void rcu_user_exit(void)
#endif /* CONFIG_NO_HZ_FULL */
/**
- * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
- *
- * Enter an interrupt handler, which might possibly result in exiting
- * idle mode, in other words, entering the mode in which read-side critical
- * sections can occur. The caller must have disabled interrupts.
- *
- * Note that the Linux kernel is fully capable of entering an interrupt
- * handler that it never exits, for example when doing upcalls to
- * user mode! This code assumes that the idle loop never does upcalls to
- * user mode. If your architecture does do upcalls from the idle loop (or
- * does anything else that results in unbalanced calls to the irq_enter()
- * and irq_exit() functions), RCU will give you what you deserve, good
- * and hard. But very infrequently and irreproducibly.
- *
- * Use things like work queues to work around this limitation.
- *
- * You have been warned.
- *
- * If you add or remove a call to rcu_irq_enter(), be sure to test with
- * CONFIG_RCU_EQS_DEBUG=y.
- */
-void rcu_irq_enter(void)
-{
- struct rcu_dynticks *rdtp;
- long long oldval;
-
- lockdep_assert_irqs_disabled();
- rdtp = this_cpu_ptr(&rcu_dynticks);
-
- /* Page faults can happen in NMI handlers, so check... */
- if (rdtp->dynticks_nmi_nesting)
- return;
-
- oldval = rdtp->dynticks_nesting;
- rdtp->dynticks_nesting++;
- WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
- rdtp->dynticks_nesting == 0);
- if (oldval)
- trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
- else
- rcu_eqs_exit_common(oldval, true);
-}
-
-/*
- * Wrapper for rcu_irq_enter() where interrupts are enabled.
- *
- * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test
- * with CONFIG_RCU_EQS_DEBUG=y.
- */
-void rcu_irq_enter_irqson(void)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- rcu_irq_enter();
- local_irq_restore(flags);
-}
-
-/**
* rcu_nmi_enter - inform RCU of entry to NMI context
*
* If the CPU was idle from RCU's viewpoint, update rdtp->dynticks and
@@ -1086,7 +985,7 @@ void rcu_irq_enter_irqson(void)
void rcu_nmi_enter(void)
{
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
- int incby = 2;
+ long incby = 2;
/* Complain about underflow. */
WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0);
@@ -1103,45 +1002,61 @@ void rcu_nmi_enter(void)
rcu_dynticks_eqs_exit();
incby = 1;
}
- rdtp->dynticks_nmi_nesting += incby;
+ trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
+ rdtp->dynticks_nmi_nesting,
+ rdtp->dynticks_nmi_nesting + incby, rdtp->dynticks);
+ WRITE_ONCE(rdtp->dynticks_nmi_nesting, /* Prevent store tearing. */
+ rdtp->dynticks_nmi_nesting + incby);
barrier();
}
/**
- * rcu_nmi_exit - inform RCU of exit from NMI context
+ * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
*
- * If we are returning from the outermost NMI handler that interrupted an
- * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting
- * to let the RCU grace-period handling know that the CPU is back to
- * being RCU-idle.
+ * Enter an interrupt handler, which might possibly result in exiting
+ * idle mode, in other words, entering the mode in which read-side critical
+ * sections can occur. The caller must have disabled interrupts.
*
- * If you add or remove a call to rcu_nmi_exit(), be sure to test
- * with CONFIG_RCU_EQS_DEBUG=y.
+ * Note that the Linux kernel is fully capable of entering an interrupt
+ * handler that it never exits, for example when doing upcalls to user mode!
+ * This code assumes that the idle loop never does upcalls to user mode.
+ * If your architecture's idle loop does do upcalls to user mode (or does
+ * anything else that results in unbalanced calls to the irq_enter() and
+ * irq_exit() functions), RCU will give you what you deserve, good and hard.
+ * But very infrequently and irreproducibly.
+ *
+ * Use things like work queues to work around this limitation.
+ *
+ * You have been warned.
+ *
+ * If you add or remove a call to rcu_irq_enter(), be sure to test with
+ * CONFIG_RCU_EQS_DEBUG=y.
*/
-void rcu_nmi_exit(void)
+void rcu_irq_enter(void)
{
struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
- /*
- * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
- * (We are exiting an NMI handler, so RCU better be paying attention
- * to us!)
- */
- WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
- WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
+ lockdep_assert_irqs_disabled();
+ if (rdtp->dynticks_nmi_nesting == 0)
+ rcu_dynticks_task_exit();
+ rcu_nmi_enter();
+ if (rdtp->dynticks_nmi_nesting == 1)
+ rcu_cleanup_after_idle();
+}
- /*
- * If the nesting level is not 1, the CPU wasn't RCU-idle, so
- * leave it in non-RCU-idle state.
- */
- if (rdtp->dynticks_nmi_nesting != 1) {
- rdtp->dynticks_nmi_nesting -= 2;
- return;
- }
+/*
+ * Wrapper for rcu_irq_enter() where interrupts are enabled.
+ *
+ * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test
+ * with CONFIG_RCU_EQS_DEBUG=y.
+ */
+void rcu_irq_enter_irqson(void)
+{
+ unsigned long flags;
- /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
- rdtp->dynticks_nmi_nesting = 0;
- rcu_dynticks_eqs_enter();
+ local_irq_save(flags);
+ rcu_irq_enter();
+ local_irq_restore(flags);
}
/**
@@ -1233,7 +1148,8 @@ EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
*/
static int rcu_is_cpu_rrupt_from_idle(void)
{
- return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1;
+ return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 0 &&
+ __this_cpu_read(rcu_dynticks.dynticks_nmi_nesting) <= 1;
}
/*
@@ -2789,6 +2705,11 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
rdp->n_force_qs_snap = rsp->n_force_qs;
} else if (count < rdp->qlen_last_fqs_check - qhimark)
rdp->qlen_last_fqs_check = count;
+
+ /*
+ * The following usually indicates a double call_rcu(). To track
+ * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
+ */
WARN_ON_ONCE(rcu_segcblist_empty(&rdp->cblist) != (count == 0));
local_irq_restore(flags);
@@ -3723,7 +3644,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
raw_spin_lock_irqsave_rcu_node(rnp, flags);
rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
- WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
+ WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != 1);
WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks)));
rdp->cpu = cpu;
rdp->rsp = rsp;
@@ -3752,7 +3673,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */
!init_nocb_callback_list(rdp))
rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */
- rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
+ rdp->dynticks->dynticks_nesting = 1; /* CPU not up, no tearing. */
rcu_dynticks_eqs_online();
raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 46a5d1991450..6488a3b0e729 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -38,9 +38,8 @@
* Dynticks per-CPU state.
*/
struct rcu_dynticks {
- long long dynticks_nesting; /* Track irq/process nesting level. */
- /* Process level is worth LLONG_MAX/2. */
- int dynticks_nmi_nesting; /* Track NMI nesting level. */
+ long dynticks_nesting; /* Track process nesting level. */
+ long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */
atomic_t dynticks; /* Even value for idle, else odd. */
bool rcu_need_heavy_qs; /* GP old, need heavy quiescent state. */
unsigned long rcu_qs_ctr; /* Light universal quiescent state ctr. */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index db85ca3975f1..fb88a028deec 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -61,7 +61,6 @@ DEFINE_PER_CPU(char, rcu_cpu_has_work);
#ifdef CONFIG_RCU_NOCB_CPU
static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
-static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */
static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
@@ -1687,7 +1686,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
}
print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
delta = rdp->mynode->gpnum - rdp->rcu_iw_gpnum;
- pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n",
+ pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%ld softirq=%u/%u fqs=%ld %s\n",
cpu,
"O."[!!cpu_online(cpu)],
"o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
@@ -1752,7 +1751,6 @@ static void increment_cpu_stall_ticks(void)
static int __init rcu_nocb_setup(char *str)
{
alloc_bootmem_cpumask_var(&rcu_nocb_mask);
- have_rcu_nocb_mask = true;
cpulist_parse(str, rcu_nocb_mask);
return 1;
}
@@ -1801,7 +1799,7 @@ static void rcu_init_one_nocb(struct rcu_node *rnp)
/* Is the specified CPU a no-CBs CPU? */
bool rcu_is_nocb_cpu(int cpu)
{
- if (have_rcu_nocb_mask)
+ if (cpumask_available(rcu_nocb_mask))
return cpumask_test_cpu(cpu, rcu_nocb_mask);
return false;
}
@@ -2295,14 +2293,13 @@ void __init rcu_init_nohz(void)
need_rcu_nocb_mask = true;
#endif /* #if defined(CONFIG_NO_HZ_FULL) */
- if (!have_rcu_nocb_mask && need_rcu_nocb_mask) {
+ if (!cpumask_available(rcu_nocb_mask) && need_rcu_nocb_mask) {
if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
return;
}
- have_rcu_nocb_mask = true;
}
- if (!have_rcu_nocb_mask)
+ if (!cpumask_available(rcu_nocb_mask))
return;
#if defined(CONFIG_NO_HZ_FULL)
@@ -2428,7 +2425,7 @@ static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp)
struct rcu_data *rdp_leader = NULL; /* Suppress misguided gcc warn. */
struct rcu_data *rdp_prev = NULL;
- if (!have_rcu_nocb_mask)
+ if (!cpumask_available(rcu_nocb_mask))
return;
if (ls == -1) {
ls = int_sqrt(nr_cpu_ids);
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index fbd56d6e575b..68fa19a5e7bd 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -422,11 +422,13 @@ void init_rcu_head(struct rcu_head *head)
{
debug_object_init(head, &rcuhead_debug_descr);
}
+EXPORT_SYMBOL_GPL(init_rcu_head);
void destroy_rcu_head(struct rcu_head *head)
{
debug_object_free(head, &rcuhead_debug_descr);
}
+EXPORT_SYMBOL_GPL(destroy_rcu_head);
static bool rcuhead_is_static_object(void *addr)
{
diff --git a/kernel/relay.c b/kernel/relay.c
index 39a9dfc69486..41280033a4c5 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -919,9 +919,9 @@ static int relay_file_mmap(struct file *filp, struct vm_area_struct *vma)
*
* Poll implemention.
*/
-static unsigned int relay_file_poll(struct file *filp, poll_table *wait)
+static __poll_t relay_file_poll(struct file *filp, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
struct rchan_buf *buf = filp->private_data;
if (buf->finalized)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a7bf32aabfda..3da7a2444a91 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -508,7 +508,8 @@ void resched_cpu(int cpu)
unsigned long flags;
raw_spin_lock_irqsave(&rq->lock, flags);
- resched_curr(rq);
+ if (cpu_online(cpu) || cpu == smp_processor_id())
+ resched_curr(rq);
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
@@ -2045,7 +2046,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
* If the owning (remote) CPU is still in the middle of schedule() with
* this task as prev, wait until its done referencing the task.
*
- * Pairs with the smp_store_release() in finish_lock_switch().
+ * Pairs with the smp_store_release() in finish_task().
*
* This ensures that tasks getting woken will be fully ordered against
* their previous state and preserve Program Order.
@@ -2571,6 +2572,50 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr,
#endif /* CONFIG_PREEMPT_NOTIFIERS */
+static inline void prepare_task(struct task_struct *next)
+{
+#ifdef CONFIG_SMP
+ /*
+ * Claim the task as running, we do this before switching to it
+ * such that any running task will have this set.
+ */
+ next->on_cpu = 1;
+#endif
+}
+
+static inline void finish_task(struct task_struct *prev)
+{
+#ifdef CONFIG_SMP
+ /*
+ * After ->on_cpu is cleared, the task can be moved to a different CPU.
+ * We must ensure this doesn't happen until the switch is completely
+ * finished.
+ *
+ * In particular, the load of prev->state in finish_task_switch() must
+ * happen before this.
+ *
+ * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
+ */
+ smp_store_release(&prev->on_cpu, 0);
+#endif
+}
+
+static inline void finish_lock_switch(struct rq *rq)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+ /* this is a valid case when another task releases the spinlock */
+ rq->lock.owner = current;
+#endif
+ /*
+ * If we are tracking spinlock dependencies then we have to
+ * fix up the runqueue lock - which gets 'carried over' from
+ * prev into current:
+ */
+ spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
+
+ raw_spin_unlock_irq(&rq->lock);
+}
+
/**
* prepare_task_switch - prepare to switch tasks
* @rq: the runqueue preparing to switch
@@ -2591,7 +2636,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
sched_info_switch(rq, prev, next);
perf_event_task_sched_out(prev, next);
fire_sched_out_preempt_notifiers(prev, next);
- prepare_lock_switch(rq, next);
+ prepare_task(next);
prepare_arch_switch(next);
}
@@ -2646,7 +2691,7 @@ static struct rq *finish_task_switch(struct task_struct *prev)
* the scheduled task must drop that reference.
*
* We must observe prev->state before clearing prev->on_cpu (in
- * finish_lock_switch), otherwise a concurrent wakeup can get prev
+ * finish_task), otherwise a concurrent wakeup can get prev
* running on another CPU and we could rave with its RUNNING -> DEAD
* transition, resulting in a double drop.
*/
@@ -2663,7 +2708,8 @@ static struct rq *finish_task_switch(struct task_struct *prev)
* to use.
*/
smp_mb__after_unlock_lock();
- finish_lock_switch(rq, prev);
+ finish_task(prev);
+ finish_lock_switch(rq);
finish_arch_post_lock_switch();
fire_sched_in_preempt_notifiers(current);
@@ -4040,8 +4086,7 @@ recheck:
return -EINVAL;
}
- if (attr->sched_flags &
- ~(SCHED_FLAG_RESET_ON_FORK | SCHED_FLAG_RECLAIM))
+ if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV))
return -EINVAL;
/*
@@ -4108,6 +4153,9 @@ recheck:
}
if (user) {
+ if (attr->sched_flags & SCHED_FLAG_SUGOV)
+ return -EINVAL;
+
retval = security_task_setscheduler(p);
if (retval)
return retval;
@@ -4163,7 +4211,8 @@ change:
}
#endif
#ifdef CONFIG_SMP
- if (dl_bandwidth_enabled() && dl_policy(policy)) {
+ if (dl_bandwidth_enabled() && dl_policy(policy) &&
+ !(attr->sched_flags & SCHED_FLAG_SUGOV)) {
cpumask_t *span = rq->rd->span;
/*
@@ -4293,6 +4342,11 @@ int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
}
EXPORT_SYMBOL_GPL(sched_setattr);
+int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
+{
+ return __sched_setscheduler(p, attr, false, true);
+}
+
/**
* sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
* @p: the task in question.
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index d6717a3331a1..dd062a1c8cf0 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -60,7 +60,8 @@ struct sugov_cpu {
u64 last_update;
/* The fields below are only needed when sharing a policy. */
- unsigned long util;
+ unsigned long util_cfs;
+ unsigned long util_dl;
unsigned long max;
unsigned int flags;
@@ -176,21 +177,28 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
return cpufreq_driver_resolve_freq(policy, freq);
}
-static void sugov_get_util(unsigned long *util, unsigned long *max, int cpu)
+static void sugov_get_util(struct sugov_cpu *sg_cpu)
{
- struct rq *rq = cpu_rq(cpu);
- unsigned long cfs_max;
+ struct rq *rq = cpu_rq(sg_cpu->cpu);
- cfs_max = arch_scale_cpu_capacity(NULL, cpu);
+ sg_cpu->max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
+ sg_cpu->util_cfs = cpu_util_cfs(rq);
+ sg_cpu->util_dl = cpu_util_dl(rq);
+}
- *util = min(rq->cfs.avg.util_avg, cfs_max);
- *max = cfs_max;
+static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu)
+{
+ /*
+ * Ideally we would like to set util_dl as min/guaranteed freq and
+ * util_cfs + util_dl as requested freq. However, cpufreq is not yet
+ * ready for such an interface. So, we only do the latter for now.
+ */
+ return min(sg_cpu->util_cfs + sg_cpu->util_dl, sg_cpu->max);
}
-static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
- unsigned int flags)
+static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time)
{
- if (flags & SCHED_CPUFREQ_IOWAIT) {
+ if (sg_cpu->flags & SCHED_CPUFREQ_IOWAIT) {
if (sg_cpu->iowait_boost_pending)
return;
@@ -264,7 +272,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
unsigned int next_f;
bool busy;
- sugov_set_iowait_boost(sg_cpu, time, flags);
+ sugov_set_iowait_boost(sg_cpu, time);
sg_cpu->last_update = time;
if (!sugov_should_update_freq(sg_policy, time))
@@ -272,10 +280,12 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
busy = sugov_cpu_is_busy(sg_cpu);
- if (flags & SCHED_CPUFREQ_RT_DL) {
+ if (flags & SCHED_CPUFREQ_RT) {
next_f = policy->cpuinfo.max_freq;
} else {
- sugov_get_util(&util, &max, sg_cpu->cpu);
+ sugov_get_util(sg_cpu);
+ max = sg_cpu->max;
+ util = sugov_aggregate_util(sg_cpu);
sugov_iowait_boost(sg_cpu, &util, &max);
next_f = get_next_freq(sg_policy, util, max);
/*
@@ -305,23 +315,27 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
s64 delta_ns;
/*
- * If the CPU utilization was last updated before the previous
- * frequency update and the time elapsed between the last update
- * of the CPU utilization and the last frequency update is long
- * enough, don't take the CPU into account as it probably is
- * idle now (and clear iowait_boost for it).
+ * If the CFS CPU utilization was last updated before the
+ * previous frequency update and the time elapsed between the
+ * last update of the CPU utilization and the last frequency
+ * update is long enough, reset iowait_boost and util_cfs, as
+ * they are now probably stale. However, still consider the
+ * CPU contribution if it has some DEADLINE utilization
+ * (util_dl).
*/
delta_ns = time - j_sg_cpu->last_update;
if (delta_ns > TICK_NSEC) {
j_sg_cpu->iowait_boost = 0;
j_sg_cpu->iowait_boost_pending = false;
- continue;
+ j_sg_cpu->util_cfs = 0;
+ if (j_sg_cpu->util_dl == 0)
+ continue;
}
- if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL)
+ if (j_sg_cpu->flags & SCHED_CPUFREQ_RT)
return policy->cpuinfo.max_freq;
- j_util = j_sg_cpu->util;
j_max = j_sg_cpu->max;
+ j_util = sugov_aggregate_util(j_sg_cpu);
if (j_util * max > j_max * util) {
util = j_util;
max = j_max;
@@ -338,22 +352,18 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
{
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
- unsigned long util, max;
unsigned int next_f;
- sugov_get_util(&util, &max, sg_cpu->cpu);
-
raw_spin_lock(&sg_policy->update_lock);
- sg_cpu->util = util;
- sg_cpu->max = max;
+ sugov_get_util(sg_cpu);
sg_cpu->flags = flags;
- sugov_set_iowait_boost(sg_cpu, time, flags);
+ sugov_set_iowait_boost(sg_cpu, time);
sg_cpu->last_update = time;
if (sugov_should_update_freq(sg_policy, time)) {
- if (flags & SCHED_CPUFREQ_RT_DL)
+ if (flags & SCHED_CPUFREQ_RT)
next_f = sg_policy->policy->cpuinfo.max_freq;
else
next_f = sugov_next_freq_shared(sg_cpu, time);
@@ -383,9 +393,9 @@ static void sugov_irq_work(struct irq_work *irq_work)
sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
/*
- * For RT and deadline tasks, the schedutil governor shoots the
- * frequency to maximum. Special care must be taken to ensure that this
- * kthread doesn't result in the same behavior.
+ * For RT tasks, the schedutil governor shoots the frequency to maximum.
+ * Special care must be taken to ensure that this kthread doesn't result
+ * in the same behavior.
*
* This is (mostly) guaranteed by the work_in_progress flag. The flag is
* updated only at the end of the sugov_work() function and before that
@@ -470,7 +480,20 @@ static void sugov_policy_free(struct sugov_policy *sg_policy)
static int sugov_kthread_create(struct sugov_policy *sg_policy)
{
struct task_struct *thread;
- struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO / 2 };
+ struct sched_attr attr = {
+ .size = sizeof(struct sched_attr),
+ .sched_policy = SCHED_DEADLINE,
+ .sched_flags = SCHED_FLAG_SUGOV,
+ .sched_nice = 0,
+ .sched_priority = 0,
+ /*
+ * Fake (unused) bandwidth; workaround to "fix"
+ * priority inheritance.
+ */
+ .sched_runtime = 1000000,
+ .sched_deadline = 10000000,
+ .sched_period = 10000000,
+ };
struct cpufreq_policy *policy = sg_policy->policy;
int ret;
@@ -488,10 +511,10 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
return PTR_ERR(thread);
}
- ret = sched_setscheduler_nocheck(thread, SCHED_FIFO, &param);
+ ret = sched_setattr_nocheck(thread, &attr);
if (ret) {
kthread_stop(thread);
- pr_warn("%s: failed to set SCHED_FIFO\n", __func__);
+ pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
return ret;
}
@@ -655,7 +678,7 @@ static int sugov_start(struct cpufreq_policy *policy)
memset(sg_cpu, 0, sizeof(*sg_cpu));
sg_cpu->cpu = cpu;
sg_cpu->sg_policy = sg_policy;
- sg_cpu->flags = SCHED_CPUFREQ_RT;
+ sg_cpu->flags = 0;
sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
}
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 2473736c7616..9bb0e0c412ec 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -78,7 +78,7 @@ static inline int dl_bw_cpus(int i)
#endif
static inline
-void add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
+void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
{
u64 old = dl_rq->running_bw;
@@ -86,10 +86,12 @@ void add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
dl_rq->running_bw += dl_bw;
SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
+ /* kick cpufreq (see the comment in kernel/sched/sched.h). */
+ cpufreq_update_util(rq_of_dl_rq(dl_rq), SCHED_CPUFREQ_DL);
}
static inline
-void sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
+void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
{
u64 old = dl_rq->running_bw;
@@ -98,10 +100,12 @@ void sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
if (dl_rq->running_bw > old)
dl_rq->running_bw = 0;
+ /* kick cpufreq (see the comment in kernel/sched/sched.h). */
+ cpufreq_update_util(rq_of_dl_rq(dl_rq), SCHED_CPUFREQ_DL);
}
static inline
-void add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
+void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
{
u64 old = dl_rq->this_bw;
@@ -111,7 +115,7 @@ void add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
}
static inline
-void sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
+void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
{
u64 old = dl_rq->this_bw;
@@ -123,16 +127,46 @@ void sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
}
+static inline
+void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
+{
+ if (!dl_entity_is_special(dl_se))
+ __add_rq_bw(dl_se->dl_bw, dl_rq);
+}
+
+static inline
+void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
+{
+ if (!dl_entity_is_special(dl_se))
+ __sub_rq_bw(dl_se->dl_bw, dl_rq);
+}
+
+static inline
+void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
+{
+ if (!dl_entity_is_special(dl_se))
+ __add_running_bw(dl_se->dl_bw, dl_rq);
+}
+
+static inline
+void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
+{
+ if (!dl_entity_is_special(dl_se))
+ __sub_running_bw(dl_se->dl_bw, dl_rq);
+}
+
void dl_change_utilization(struct task_struct *p, u64 new_bw)
{
struct rq *rq;
+ BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV);
+
if (task_on_rq_queued(p))
return;
rq = task_rq(p);
if (p->dl.dl_non_contending) {
- sub_running_bw(p->dl.dl_bw, &rq->dl);
+ sub_running_bw(&p->dl, &rq->dl);
p->dl.dl_non_contending = 0;
/*
* If the timer handler is currently running and the
@@ -144,8 +178,8 @@ void dl_change_utilization(struct task_struct *p, u64 new_bw)
if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
put_task_struct(p);
}
- sub_rq_bw(p->dl.dl_bw, &rq->dl);
- add_rq_bw(new_bw, &rq->dl);
+ __sub_rq_bw(p->dl.dl_bw, &rq->dl);
+ __add_rq_bw(new_bw, &rq->dl);
}
/*
@@ -217,6 +251,9 @@ static void task_non_contending(struct task_struct *p)
if (dl_se->dl_runtime == 0)
return;
+ if (dl_entity_is_special(dl_se))
+ return;
+
WARN_ON(hrtimer_active(&dl_se->inactive_timer));
WARN_ON(dl_se->dl_non_contending);
@@ -236,12 +273,12 @@ static void task_non_contending(struct task_struct *p)
*/
if (zerolag_time < 0) {
if (dl_task(p))
- sub_running_bw(dl_se->dl_bw, dl_rq);
+ sub_running_bw(dl_se, dl_rq);
if (!dl_task(p) || p->state == TASK_DEAD) {
struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
if (p->state == TASK_DEAD)
- sub_rq_bw(p->dl.dl_bw, &rq->dl);
+ sub_rq_bw(&p->dl, &rq->dl);
raw_spin_lock(&dl_b->lock);
__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
__dl_clear_params(p);
@@ -268,7 +305,7 @@ static void task_contending(struct sched_dl_entity *dl_se, int flags)
return;
if (flags & ENQUEUE_MIGRATED)
- add_rq_bw(dl_se->dl_bw, dl_rq);
+ add_rq_bw(dl_se, dl_rq);
if (dl_se->dl_non_contending) {
dl_se->dl_non_contending = 0;
@@ -289,7 +326,7 @@ static void task_contending(struct sched_dl_entity *dl_se, int flags)
* when the "inactive timer" fired).
* So, add it back.
*/
- add_running_bw(dl_se->dl_bw, dl_rq);
+ add_running_bw(dl_se, dl_rq);
}
}
@@ -1114,7 +1151,8 @@ static void update_curr_dl(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct sched_dl_entity *dl_se = &curr->dl;
- u64 delta_exec;
+ u64 delta_exec, scaled_delta_exec;
+ int cpu = cpu_of(rq);
if (!dl_task(curr) || !on_dl_rq(dl_se))
return;
@@ -1134,9 +1172,6 @@ static void update_curr_dl(struct rq *rq)
return;
}
- /* kick cpufreq (see the comment in kernel/sched/sched.h). */
- cpufreq_update_util(rq, SCHED_CPUFREQ_DL);
-
schedstat_set(curr->se.statistics.exec_max,
max(curr->se.statistics.exec_max, delta_exec));
@@ -1148,13 +1183,39 @@ static void update_curr_dl(struct rq *rq)
sched_rt_avg_update(rq, delta_exec);
- if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM))
- delta_exec = grub_reclaim(delta_exec, rq, &curr->dl);
- dl_se->runtime -= delta_exec;
+ if (dl_entity_is_special(dl_se))
+ return;
+
+ /*
+ * For tasks that participate in GRUB, we implement GRUB-PA: the
+ * spare reclaimed bandwidth is used to clock down frequency.
+ *
+ * For the others, we still need to scale reservation parameters
+ * according to current frequency and CPU maximum capacity.
+ */
+ if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
+ scaled_delta_exec = grub_reclaim(delta_exec,
+ rq,
+ &curr->dl);
+ } else {
+ unsigned long scale_freq = arch_scale_freq_capacity(cpu);
+ unsigned long scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
+
+ scaled_delta_exec = cap_scale(delta_exec, scale_freq);
+ scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
+ }
+
+ dl_se->runtime -= scaled_delta_exec;
throttle:
if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
dl_se->dl_throttled = 1;
+
+ /* If requested, inform the user about runtime overruns. */
+ if (dl_runtime_exceeded(dl_se) &&
+ (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
+ dl_se->dl_overrun = 1;
+
__dequeue_task_dl(rq, curr, 0);
if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
@@ -1204,8 +1265,8 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
if (p->state == TASK_DEAD && dl_se->dl_non_contending) {
- sub_running_bw(p->dl.dl_bw, dl_rq_of_se(&p->dl));
- sub_rq_bw(p->dl.dl_bw, dl_rq_of_se(&p->dl));
+ sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
+ sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
dl_se->dl_non_contending = 0;
}
@@ -1222,7 +1283,7 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
sched_clock_tick();
update_rq_clock(rq);
- sub_running_bw(dl_se->dl_bw, &rq->dl);
+ sub_running_bw(dl_se, &rq->dl);
dl_se->dl_non_contending = 0;
unlock:
task_rq_unlock(rq, p, &rf);
@@ -1416,8 +1477,8 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
dl_check_constrained_dl(&p->dl);
if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
- add_rq_bw(p->dl.dl_bw, &rq->dl);
- add_running_bw(p->dl.dl_bw, &rq->dl);
+ add_rq_bw(&p->dl, &rq->dl);
+ add_running_bw(&p->dl, &rq->dl);
}
/*
@@ -1457,8 +1518,8 @@ static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
__dequeue_task_dl(rq, p, flags);
if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
- sub_running_bw(p->dl.dl_bw, &rq->dl);
- sub_rq_bw(p->dl.dl_bw, &rq->dl);
+ sub_running_bw(&p->dl, &rq->dl);
+ sub_rq_bw(&p->dl, &rq->dl);
}
/*
@@ -1564,7 +1625,7 @@ static void migrate_task_rq_dl(struct task_struct *p)
*/
raw_spin_lock(&rq->lock);
if (p->dl.dl_non_contending) {
- sub_running_bw(p->dl.dl_bw, &rq->dl);
+ sub_running_bw(&p->dl, &rq->dl);
p->dl.dl_non_contending = 0;
/*
* If the timer handler is currently running and the
@@ -1576,7 +1637,7 @@ static void migrate_task_rq_dl(struct task_struct *p)
if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
put_task_struct(p);
}
- sub_rq_bw(p->dl.dl_bw, &rq->dl);
+ sub_rq_bw(&p->dl, &rq->dl);
raw_spin_unlock(&rq->lock);
}
@@ -2019,11 +2080,11 @@ retry:
}
deactivate_task(rq, next_task, 0);
- sub_running_bw(next_task->dl.dl_bw, &rq->dl);
- sub_rq_bw(next_task->dl.dl_bw, &rq->dl);
+ sub_running_bw(&next_task->dl, &rq->dl);
+ sub_rq_bw(&next_task->dl, &rq->dl);
set_task_cpu(next_task, later_rq->cpu);
- add_rq_bw(next_task->dl.dl_bw, &later_rq->dl);
- add_running_bw(next_task->dl.dl_bw, &later_rq->dl);
+ add_rq_bw(&next_task->dl, &later_rq->dl);
+ add_running_bw(&next_task->dl, &later_rq->dl);
activate_task(later_rq, next_task, 0);
ret = 1;
@@ -2111,11 +2172,11 @@ static void pull_dl_task(struct rq *this_rq)
resched = true;
deactivate_task(src_rq, p, 0);
- sub_running_bw(p->dl.dl_bw, &src_rq->dl);
- sub_rq_bw(p->dl.dl_bw, &src_rq->dl);
+ sub_running_bw(&p->dl, &src_rq->dl);
+ sub_rq_bw(&p->dl, &src_rq->dl);
set_task_cpu(p, this_cpu);
- add_rq_bw(p->dl.dl_bw, &this_rq->dl);
- add_running_bw(p->dl.dl_bw, &this_rq->dl);
+ add_rq_bw(&p->dl, &this_rq->dl);
+ add_running_bw(&p->dl, &this_rq->dl);
activate_task(this_rq, p, 0);
dmin = p->dl.deadline;
@@ -2224,7 +2285,7 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
task_non_contending(p);
if (!task_on_rq_queued(p))
- sub_rq_bw(p->dl.dl_bw, &rq->dl);
+ sub_rq_bw(&p->dl, &rq->dl);
/*
* We cannot use inactive_task_timer() to invoke sub_running_bw()
@@ -2256,7 +2317,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
/* If p is not queued we will update its parameters at next wakeup. */
if (!task_on_rq_queued(p)) {
- add_rq_bw(p->dl.dl_bw, &rq->dl);
+ add_rq_bw(&p->dl, &rq->dl);
return;
}
@@ -2435,6 +2496,9 @@ int sched_dl_overflow(struct task_struct *p, int policy,
u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
int cpus, err = -1;
+ if (attr->sched_flags & SCHED_FLAG_SUGOV)
+ return 0;
+
/* !deadline task may carry old deadline bandwidth */
if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
return 0;
@@ -2521,6 +2585,10 @@ void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
*/
bool __checkparam_dl(const struct sched_attr *attr)
{
+ /* special dl tasks don't actually use any parameter */
+ if (attr->sched_flags & SCHED_FLAG_SUGOV)
+ return true;
+
/* deadline != 0 */
if (attr->sched_deadline == 0)
return false;
@@ -2566,6 +2634,7 @@ void __dl_clear_params(struct task_struct *p)
dl_se->dl_throttled = 0;
dl_se->dl_yielded = 0;
dl_se->dl_non_contending = 0;
+ dl_se->dl_overrun = 0;
}
bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2fe3aa853e4d..7b6535987500 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3020,9 +3020,7 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
/*
* There are a few boundary cases this might miss but it should
* get called often enough that that should (hopefully) not be
- * a real problem -- added to that it only calls on the local
- * CPU, so if we enqueue remotely we'll miss an update, but
- * the next tick/schedule should update.
+ * a real problem.
*
* It will not get called when we go idle, because the idle
* thread is a different class (!fair), nor will the utilization
@@ -3091,8 +3089,6 @@ static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
return c1 + c2 + c3;
}
-#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
-
/*
* Accumulate the three separate parts of the sum; d1 the remainder
* of the last (incomplete) period, d2 the span of full periods and d3
@@ -3122,7 +3118,7 @@ accumulate_sum(u64 delta, int cpu, struct sched_avg *sa,
u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
u64 periods;
- scale_freq = arch_scale_freq_capacity(NULL, cpu);
+ scale_freq = arch_scale_freq_capacity(cpu);
scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
delta += sa->period_contrib;
@@ -4365,12 +4361,12 @@ static inline bool cfs_bandwidth_used(void)
void cfs_bandwidth_usage_inc(void)
{
- static_key_slow_inc(&__cfs_bandwidth_used);
+ static_key_slow_inc_cpuslocked(&__cfs_bandwidth_used);
}
void cfs_bandwidth_usage_dec(void)
{
- static_key_slow_dec(&__cfs_bandwidth_used);
+ static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used);
}
#else /* HAVE_JUMP_LABEL */
static bool cfs_bandwidth_used(void)
@@ -5689,8 +5685,8 @@ static int wake_wide(struct task_struct *p)
* soonest. For the purpose of speed we only consider the waking and previous
* CPU.
*
- * wake_affine_idle() - only considers 'now', it check if the waking CPU is (or
- * will be) idle.
+ * wake_affine_idle() - only considers 'now', it check if the waking CPU is
+ * cache-affine and is (or will be) idle.
*
* wake_affine_weight() - considers the weight to reflect the average
* scheduling latency of the CPUs. This seems to work
@@ -5701,7 +5697,13 @@ static bool
wake_affine_idle(struct sched_domain *sd, struct task_struct *p,
int this_cpu, int prev_cpu, int sync)
{
- if (idle_cpu(this_cpu))
+ /*
+ * If this_cpu is idle, it implies the wakeup is from interrupt
+ * context. Only allow the move if cache is shared. Otherwise an
+ * interrupt intensive workload could force all tasks onto one
+ * node depending on the IO topology or IRQ affinity settings.
+ */
+ if (idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu))
return true;
if (sync && cpu_rq(this_cpu)->nr_running == 1)
@@ -5765,12 +5767,12 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
return affine;
}
-static inline int task_util(struct task_struct *p);
-static int cpu_util_wake(int cpu, struct task_struct *p);
+static inline unsigned long task_util(struct task_struct *p);
+static unsigned long cpu_util_wake(int cpu, struct task_struct *p);
static unsigned long capacity_spare_wake(int cpu, struct task_struct *p)
{
- return capacity_orig_of(cpu) - cpu_util_wake(cpu, p);
+ return max_t(long, capacity_of(cpu) - cpu_util_wake(cpu, p), 0);
}
/*
@@ -5950,7 +5952,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this
}
} else if (shallowest_idle_cpu == -1) {
load = weighted_cpuload(cpu_rq(i));
- if (load < min_load || (load == min_load && i == this_cpu)) {
+ if (load < min_load) {
min_load = load;
least_loaded_cpu = i;
}
@@ -6247,7 +6249,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
* capacity_orig) as it useful for predicting the capacity required after task
* migrations (scheduler-driven DVFS).
*/
-static int cpu_util(int cpu)
+static unsigned long cpu_util(int cpu)
{
unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
unsigned long capacity = capacity_orig_of(cpu);
@@ -6255,7 +6257,7 @@ static int cpu_util(int cpu)
return (util >= capacity) ? capacity : util;
}
-static inline int task_util(struct task_struct *p)
+static inline unsigned long task_util(struct task_struct *p)
{
return p->se.avg.util_avg;
}
@@ -6264,7 +6266,7 @@ static inline int task_util(struct task_struct *p)
* cpu_util_wake: Compute cpu utilization with any contributions from
* the waking task p removed.
*/
-static int cpu_util_wake(int cpu, struct task_struct *p)
+static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
{
unsigned long util, capacity;
@@ -6449,8 +6451,7 @@ static void task_dead_fair(struct task_struct *p)
}
#endif /* CONFIG_SMP */
-static unsigned long
-wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
+static unsigned long wakeup_gran(struct sched_entity *se)
{
unsigned long gran = sysctl_sched_wakeup_granularity;
@@ -6492,7 +6493,7 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
if (vdiff <= 0)
return -1;
- gran = wakeup_gran(curr, se);
+ gran = wakeup_gran(se);
if (vdiff > gran)
return 1;
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 665ace2fc558..862a513adca3 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2212,7 +2212,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
queue_push_tasks(rq);
#endif /* CONFIG_SMP */
- if (p->prio < rq->curr->prio)
+ if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
resched_curr(rq);
}
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b19552a212de..2e95505e23c6 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -156,13 +156,39 @@ static inline int task_has_dl_policy(struct task_struct *p)
return dl_policy(p->policy);
}
+#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
+
+/*
+ * !! For sched_setattr_nocheck() (kernel) only !!
+ *
+ * This is actually gross. :(
+ *
+ * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE
+ * tasks, but still be able to sleep. We need this on platforms that cannot
+ * atomically change clock frequency. Remove once fast switching will be
+ * available on such platforms.
+ *
+ * SUGOV stands for SchedUtil GOVernor.
+ */
+#define SCHED_FLAG_SUGOV 0x10000000
+
+static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
+{
+#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
+ return unlikely(dl_se->flags & SCHED_FLAG_SUGOV);
+#else
+ return false;
+#endif
+}
+
/*
* Tells if entity @a should preempt entity @b.
*/
static inline bool
dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
{
- return dl_time_before(a->deadline, b->deadline);
+ return dl_entity_is_special(a) ||
+ dl_time_before(a->deadline, b->deadline);
}
/*
@@ -1328,47 +1354,6 @@ static inline int task_on_rq_migrating(struct task_struct *p)
# define finish_arch_post_lock_switch() do { } while (0)
#endif
-static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
-{
-#ifdef CONFIG_SMP
- /*
- * We can optimise this out completely for !SMP, because the
- * SMP rebalancing from interrupt is the only thing that cares
- * here.
- */
- next->on_cpu = 1;
-#endif
-}
-
-static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
-{
-#ifdef CONFIG_SMP
- /*
- * After ->on_cpu is cleared, the task can be moved to a different CPU.
- * We must ensure this doesn't happen until the switch is completely
- * finished.
- *
- * In particular, the load of prev->state in finish_task_switch() must
- * happen before this.
- *
- * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
- */
- smp_store_release(&prev->on_cpu, 0);
-#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
- /* this is a valid case when another task releases the spinlock */
- rq->lock.owner = current;
-#endif
- /*
- * If we are tracking spinlock dependencies then we have to
- * fix up the runqueue lock - which gets 'carried over' from
- * prev into current:
- */
- spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
-
- raw_spin_unlock_irq(&rq->lock);
-}
-
/*
* wake flags
*/
@@ -1687,17 +1672,17 @@ static inline int hrtick_enabled(struct rq *rq)
#endif /* CONFIG_SCHED_HRTICK */
-#ifdef CONFIG_SMP
-extern void sched_avg_update(struct rq *rq);
-
#ifndef arch_scale_freq_capacity
static __always_inline
-unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
+unsigned long arch_scale_freq_capacity(int cpu)
{
return SCHED_CAPACITY_SCALE;
}
#endif
+#ifdef CONFIG_SMP
+extern void sched_avg_update(struct rq *rq);
+
#ifndef arch_scale_cpu_capacity
static __always_inline
unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
@@ -1711,10 +1696,17 @@ unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
{
- rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));
+ rq->rt_avg += rt_delta * arch_scale_freq_capacity(cpu_of(rq));
sched_avg_update(rq);
}
#else
+#ifndef arch_scale_cpu_capacity
+static __always_inline
+unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu)
+{
+ return SCHED_CAPACITY_SCALE;
+}
+#endif
static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
static inline void sched_avg_update(struct rq *rq) { }
#endif
@@ -2096,14 +2088,14 @@ DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
* The way cpufreq is currently arranged requires it to evaluate the CPU
* performance state (frequency/voltage) on a regular basis to prevent it from
* being stuck in a completely inadequate performance level for too long.
- * That is not guaranteed to happen if the updates are only triggered from CFS,
- * though, because they may not be coming in if RT or deadline tasks are active
- * all the time (or there are RT and DL tasks only).
+ * That is not guaranteed to happen if the updates are only triggered from CFS
+ * and DL, though, because they may not be coming in if only RT tasks are
+ * active all the time (or there are RT tasks only).
*
- * As a workaround for that issue, this function is called by the RT and DL
- * sched classes to trigger extra cpufreq updates to prevent it from stalling,
+ * As a workaround for that issue, this function is called periodically by the
+ * RT sched class to trigger extra cpufreq updates to prevent it from stalling,
* but that really is a band-aid. Going forward it should be replaced with
- * solutions targeted more specifically at RT and DL tasks.
+ * solutions targeted more specifically at RT tasks.
*/
static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
{
@@ -2125,3 +2117,17 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
#else /* arch_scale_freq_capacity */
#define arch_scale_freq_invariant() (false)
#endif
+
+#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
+
+static inline unsigned long cpu_util_dl(struct rq *rq)
+{
+ return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
+}
+
+static inline unsigned long cpu_util_cfs(struct rq *rq)
+{
+ return rq->cfs.avg.util_avg;
+}
+
+#endif
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 5f0dfb2abb8d..940fa408a288 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -515,7 +515,7 @@ void put_seccomp_filter(struct task_struct *tsk)
static void seccomp_init_siginfo(siginfo_t *info, int syscall, int reason)
{
- memset(info, 0, sizeof(*info));
+ clear_siginfo(info);
info->si_signo = SIGSYS;
info->si_code = SYS_SECCOMP;
info->si_call_addr = (void __user *)KSTK_EIP(current);
@@ -978,49 +978,68 @@ long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter)
}
#if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE)
-long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
- void __user *data)
+static struct seccomp_filter *get_nth_filter(struct task_struct *task,
+ unsigned long filter_off)
{
- struct seccomp_filter *filter;
- struct sock_fprog_kern *fprog;
- long ret;
- unsigned long count = 0;
-
- if (!capable(CAP_SYS_ADMIN) ||
- current->seccomp.mode != SECCOMP_MODE_DISABLED) {
- return -EACCES;
- }
+ struct seccomp_filter *orig, *filter;
+ unsigned long count;
+ /*
+ * Note: this is only correct because the caller should be the (ptrace)
+ * tracer of the task, otherwise lock_task_sighand is needed.
+ */
spin_lock_irq(&task->sighand->siglock);
+
if (task->seccomp.mode != SECCOMP_MODE_FILTER) {
- ret = -EINVAL;
- goto out;
+ spin_unlock_irq(&task->sighand->siglock);
+ return ERR_PTR(-EINVAL);
}
- filter = task->seccomp.filter;
- while (filter) {
- filter = filter->prev;
+ orig = task->seccomp.filter;
+ __get_seccomp_filter(orig);
+ spin_unlock_irq(&task->sighand->siglock);
+
+ count = 0;
+ for (filter = orig; filter; filter = filter->prev)
count++;
- }
if (filter_off >= count) {
- ret = -ENOENT;
+ filter = ERR_PTR(-ENOENT);
goto out;
}
- count -= filter_off;
- filter = task->seccomp.filter;
- while (filter && count > 1) {
- filter = filter->prev;
+ count -= filter_off;
+ for (filter = orig; filter && count > 1; filter = filter->prev)
count--;
- }
if (WARN_ON(count != 1 || !filter)) {
- /* The filter tree shouldn't shrink while we're using it. */
- ret = -ENOENT;
+ filter = ERR_PTR(-ENOENT);
goto out;
}
+ __get_seccomp_filter(filter);
+
+out:
+ __put_seccomp_filter(orig);
+ return filter;
+}
+
+long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
+ void __user *data)
+{
+ struct seccomp_filter *filter;
+ struct sock_fprog_kern *fprog;
+ long ret;
+
+ if (!capable(CAP_SYS_ADMIN) ||
+ current->seccomp.mode != SECCOMP_MODE_DISABLED) {
+ return -EACCES;
+ }
+
+ filter = get_nth_filter(task, filter_off);
+ if (IS_ERR(filter))
+ return PTR_ERR(filter);
+
fprog = filter->prog->orig_prog;
if (!fprog) {
/* This must be a new non-cBPF filter, since we save
@@ -1035,17 +1054,44 @@ long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
if (!data)
goto out;
- __get_seccomp_filter(filter);
- spin_unlock_irq(&task->sighand->siglock);
-
if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
ret = -EFAULT;
+out:
__put_seccomp_filter(filter);
return ret;
+}
-out:
- spin_unlock_irq(&task->sighand->siglock);
+long seccomp_get_metadata(struct task_struct *task,
+ unsigned long size, void __user *data)
+{
+ long ret;
+ struct seccomp_filter *filter;
+ struct seccomp_metadata kmd = {};
+
+ if (!capable(CAP_SYS_ADMIN) ||
+ current->seccomp.mode != SECCOMP_MODE_DISABLED) {
+ return -EACCES;
+ }
+
+ size = min_t(unsigned long, size, sizeof(kmd));
+
+ if (copy_from_user(&kmd, data, size))
+ return -EFAULT;
+
+ filter = get_nth_filter(task, kmd.filter_off);
+ if (IS_ERR(filter))
+ return PTR_ERR(filter);
+
+ memset(&kmd, 0, sizeof(kmd));
+ if (filter->log)
+ kmd.flags |= SECCOMP_FILTER_FLAG_LOG;
+
+ ret = size;
+ if (copy_to_user(data, &kmd, size))
+ ret = -EFAULT;
+
+ __put_seccomp_filter(filter);
return ret;
}
#endif
diff --git a/kernel/signal.c b/kernel/signal.c
index 9558664bd9ec..c6e4c83dc090 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -40,6 +40,7 @@
#include <linux/cn_proc.h>
#include <linux/compiler.h>
#include <linux/posix-timers.h>
+#include <linux/livepatch.h>
#define CREATE_TRACE_POINTS
#include <trace/events/signal.h>
@@ -165,7 +166,8 @@ void recalc_sigpending_and_wake(struct task_struct *t)
void recalc_sigpending(void)
{
- if (!recalc_sigpending_tsk(current) && !freezing(current))
+ if (!recalc_sigpending_tsk(current) && !freezing(current) &&
+ !klp_patch_pending(current))
clear_thread_flag(TIF_SIGPENDING);
}
@@ -549,6 +551,7 @@ still_pending:
* a fast-pathed signal or we must have been
* out of queue space. So zero out the info.
*/
+ clear_siginfo(info);
info->si_signo = sig;
info->si_errno = 0;
info->si_code = SI_USER;
@@ -642,6 +645,9 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
spin_unlock(&tsk->sighand->siglock);
posixtimer_rearm(info);
spin_lock(&tsk->sighand->siglock);
+
+ /* Don't expose the si_sys_private value to userspace */
+ info->si_sys_private = 0;
}
#endif
return signr;
@@ -1043,6 +1049,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
list_add_tail(&q->list, &pending->list);
switch ((unsigned long) info) {
case (unsigned long) SEND_SIG_NOINFO:
+ clear_siginfo(&q->info);
q->info.si_signo = sig;
q->info.si_errno = 0;
q->info.si_code = SI_USER;
@@ -1051,6 +1058,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
break;
case (unsigned long) SEND_SIG_PRIV:
+ clear_siginfo(&q->info);
q->info.si_signo = sig;
q->info.si_errno = 0;
q->info.si_code = SI_KERNEL;
@@ -1485,6 +1493,129 @@ force_sigsegv(int sig, struct task_struct *p)
return 0;
}
+int force_sig_fault(int sig, int code, void __user *addr
+ ___ARCH_SI_TRAPNO(int trapno)
+ ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
+ , struct task_struct *t)
+{
+ struct siginfo info;
+
+ clear_siginfo(&info);
+ info.si_signo = sig;
+ info.si_errno = 0;
+ info.si_code = code;
+ info.si_addr = addr;
+#ifdef __ARCH_SI_TRAPNO
+ info.si_trapno = trapno;
+#endif
+#ifdef __ia64__
+ info.si_imm = imm;
+ info.si_flags = flags;
+ info.si_isr = isr;
+#endif
+ return force_sig_info(info.si_signo, &info, t);
+}
+
+int send_sig_fault(int sig, int code, void __user *addr
+ ___ARCH_SI_TRAPNO(int trapno)
+ ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
+ , struct task_struct *t)
+{
+ struct siginfo info;
+
+ clear_siginfo(&info);
+ info.si_signo = sig;
+ info.si_errno = 0;
+ info.si_code = code;
+ info.si_addr = addr;
+#ifdef __ARCH_SI_TRAPNO
+ info.si_trapno = trapno;
+#endif
+#ifdef __ia64__
+ info.si_imm = imm;
+ info.si_flags = flags;
+ info.si_isr = isr;
+#endif
+ return send_sig_info(info.si_signo, &info, t);
+}
+
+#if defined(BUS_MCEERR_AO) && defined(BUS_MCEERR_AR)
+int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
+{
+ struct siginfo info;
+
+ WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
+ clear_siginfo(&info);
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = code;
+ info.si_addr = addr;
+ info.si_addr_lsb = lsb;
+ return force_sig_info(info.si_signo, &info, t);
+}
+
+int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
+{
+ struct siginfo info;
+
+ WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
+ clear_siginfo(&info);
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = code;
+ info.si_addr = addr;
+ info.si_addr_lsb = lsb;
+ return send_sig_info(info.si_signo, &info, t);
+}
+EXPORT_SYMBOL(send_sig_mceerr);
+#endif
+
+#ifdef SEGV_BNDERR
+int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
+{
+ struct siginfo info;
+
+ clear_siginfo(&info);
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = SEGV_BNDERR;
+ info.si_addr = addr;
+ info.si_lower = lower;
+ info.si_upper = upper;
+ return force_sig_info(info.si_signo, &info, current);
+}
+#endif
+
+#ifdef SEGV_PKUERR
+int force_sig_pkuerr(void __user *addr, u32 pkey)
+{
+ struct siginfo info;
+
+ clear_siginfo(&info);
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = SEGV_PKUERR;
+ info.si_addr = addr;
+ info.si_pkey = pkey;
+ return force_sig_info(info.si_signo, &info, current);
+}
+#endif
+
+/* For the crazy architectures that include trap information in
+ * the errno field, instead of an actual errno value.
+ */
+int force_sig_ptrace_errno_trap(int errno, void __user *addr)
+{
+ struct siginfo info;
+
+ clear_siginfo(&info);
+ info.si_signo = SIGTRAP;
+ info.si_errno = errno;
+ info.si_code = TRAP_HWBKPT;
+ info.si_addr = addr;
+ return force_sig_info(info.si_signo, &info, current);
+}
+
int kill_pgrp(struct pid *pid, int sig, int priv)
{
int ret;
@@ -1623,6 +1754,7 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
sig = SIGCHLD;
}
+ clear_siginfo(&info);
info.si_signo = sig;
info.si_errno = 0;
/*
@@ -1717,6 +1849,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
parent = tsk->real_parent;
}
+ clear_siginfo(&info);
info.si_signo = SIGCHLD;
info.si_errno = 0;
/*
@@ -1929,7 +2062,7 @@ static void ptrace_do_notify(int signr, int exit_code, int why)
{
siginfo_t info;
- memset(&info, 0, sizeof info);
+ clear_siginfo(&info);
info.si_signo = signr;
info.si_code = exit_code;
info.si_pid = task_pid_vnr(current);
@@ -2136,6 +2269,7 @@ static int ptrace_signal(int signr, siginfo_t *info)
* have updated *info via PTRACE_SETSIGINFO.
*/
if (signr != info->si_signo) {
+ clear_siginfo(info);
info->si_signo = signr;
info->si_errno = 0;
info->si_code = SI_USER;
@@ -2688,9 +2822,7 @@ enum siginfo_layout siginfo_layout(int sig, int si_code)
#endif
[SIGCHLD] = { NSIGCHLD, SIL_CHLD },
[SIGPOLL] = { NSIGPOLL, SIL_POLL },
-#ifdef __ARCH_SIGSYS
[SIGSYS] = { NSIGSYS, SIL_SYS },
-#endif
};
if ((sig < ARRAY_SIZE(filter)) && (si_code <= filter[sig].limit))
layout = filter[sig].layout;
@@ -2712,12 +2844,14 @@ enum siginfo_layout siginfo_layout(int sig, int si_code)
if ((sig == SIGFPE) && (si_code == FPE_FIXME))
layout = SIL_FAULT;
#endif
+#ifdef BUS_FIXME
+ if ((sig == SIGBUS) && (si_code == BUS_FIXME))
+ layout = SIL_FAULT;
+#endif
}
return layout;
}
-#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
-
int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
{
int err;
@@ -2756,13 +2890,21 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
#ifdef __ARCH_SI_TRAPNO
err |= __put_user(from->si_trapno, &to->si_trapno);
#endif
-#ifdef BUS_MCEERR_AO
+#ifdef __ia64__
+ err |= __put_user(from->si_imm, &to->si_imm);
+ err |= __put_user(from->si_flags, &to->si_flags);
+ err |= __put_user(from->si_isr, &to->si_isr);
+#endif
/*
* Other callers might not initialize the si_lsb field,
* so check explicitly for the right codes here.
*/
- if (from->si_signo == SIGBUS &&
- (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
+#ifdef BUS_MCEERR_AR
+ if (from->si_signo == SIGBUS && from->si_code == BUS_MCEERR_AR)
+ err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
+#endif
+#ifdef BUS_MCEERR_AO
+ if (from->si_signo == SIGBUS && from->si_code == BUS_MCEERR_AO)
err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
#endif
#ifdef SEGV_BNDERR
@@ -2788,18 +2930,185 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
err |= __put_user(from->si_uid, &to->si_uid);
err |= __put_user(from->si_ptr, &to->si_ptr);
break;
-#ifdef __ARCH_SIGSYS
case SIL_SYS:
err |= __put_user(from->si_call_addr, &to->si_call_addr);
err |= __put_user(from->si_syscall, &to->si_syscall);
err |= __put_user(from->si_arch, &to->si_arch);
break;
-#endif
}
return err;
}
+#ifdef CONFIG_COMPAT
+int copy_siginfo_to_user32(struct compat_siginfo __user *to,
+ const struct siginfo *from)
+#if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
+{
+ return __copy_siginfo_to_user32(to, from, in_x32_syscall());
+}
+int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
+ const struct siginfo *from, bool x32_ABI)
+#endif
+{
+ struct compat_siginfo new;
+ memset(&new, 0, sizeof(new));
+
+ new.si_signo = from->si_signo;
+ new.si_errno = from->si_errno;
+ new.si_code = from->si_code;
+ switch(siginfo_layout(from->si_signo, from->si_code)) {
+ case SIL_KILL:
+ new.si_pid = from->si_pid;
+ new.si_uid = from->si_uid;
+ break;
+ case SIL_TIMER:
+ new.si_tid = from->si_tid;
+ new.si_overrun = from->si_overrun;
+ new.si_int = from->si_int;
+ break;
+ case SIL_POLL:
+ new.si_band = from->si_band;
+ new.si_fd = from->si_fd;
+ break;
+ case SIL_FAULT:
+ new.si_addr = ptr_to_compat(from->si_addr);
+#ifdef __ARCH_SI_TRAPNO
+ new.si_trapno = from->si_trapno;
+#endif
+#ifdef BUS_MCEERR_AR
+ if ((from->si_signo == SIGBUS) && (from->si_code == BUS_MCEERR_AR))
+ new.si_addr_lsb = from->si_addr_lsb;
+#endif
+#ifdef BUS_MCEERR_AO
+ if ((from->si_signo == SIGBUS) && (from->si_code == BUS_MCEERR_AO))
+ new.si_addr_lsb = from->si_addr_lsb;
+#endif
+#ifdef SEGV_BNDERR
+ if ((from->si_signo == SIGSEGV) &&
+ (from->si_code == SEGV_BNDERR)) {
+ new.si_lower = ptr_to_compat(from->si_lower);
+ new.si_upper = ptr_to_compat(from->si_upper);
+ }
+#endif
+#ifdef SEGV_PKUERR
+ if ((from->si_signo == SIGSEGV) &&
+ (from->si_code == SEGV_PKUERR))
+ new.si_pkey = from->si_pkey;
+#endif
+
+ break;
+ case SIL_CHLD:
+ new.si_pid = from->si_pid;
+ new.si_uid = from->si_uid;
+ new.si_status = from->si_status;
+#ifdef CONFIG_X86_X32_ABI
+ if (x32_ABI) {
+ new._sifields._sigchld_x32._utime = from->si_utime;
+ new._sifields._sigchld_x32._stime = from->si_stime;
+ } else
+#endif
+ {
+ new.si_utime = from->si_utime;
+ new.si_stime = from->si_stime;
+ }
+ break;
+ case SIL_RT:
+ new.si_pid = from->si_pid;
+ new.si_uid = from->si_uid;
+ new.si_int = from->si_int;
+ break;
+ case SIL_SYS:
+ new.si_call_addr = ptr_to_compat(from->si_call_addr);
+ new.si_syscall = from->si_syscall;
+ new.si_arch = from->si_arch;
+ break;
+ }
+
+ if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
+ return -EFAULT;
+
+ return 0;
+}
+
+int copy_siginfo_from_user32(struct siginfo *to,
+ const struct compat_siginfo __user *ufrom)
+{
+ struct compat_siginfo from;
+
+ if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
+ return -EFAULT;
+
+ clear_siginfo(to);
+ to->si_signo = from.si_signo;
+ to->si_errno = from.si_errno;
+ to->si_code = from.si_code;
+ switch(siginfo_layout(from.si_signo, from.si_code)) {
+ case SIL_KILL:
+ to->si_pid = from.si_pid;
+ to->si_uid = from.si_uid;
+ break;
+ case SIL_TIMER:
+ to->si_tid = from.si_tid;
+ to->si_overrun = from.si_overrun;
+ to->si_int = from.si_int;
+ break;
+ case SIL_POLL:
+ to->si_band = from.si_band;
+ to->si_fd = from.si_fd;
+ break;
+ case SIL_FAULT:
+ to->si_addr = compat_ptr(from.si_addr);
+#ifdef __ARCH_SI_TRAPNO
+ to->si_trapno = from.si_trapno;
+#endif
+#ifdef BUS_MCEERR_AR
+ if ((from.si_signo == SIGBUS) && (from.si_code == BUS_MCEERR_AR))
+ to->si_addr_lsb = from.si_addr_lsb;
+#endif
+#ifdef BUS_MCEER_AO
+ if ((from.si_signo == SIGBUS) && (from.si_code == BUS_MCEERR_AO))
+ to->si_addr_lsb = from.si_addr_lsb;
+#endif
+#ifdef SEGV_BNDERR
+ if ((from.si_signo == SIGSEGV) && (from.si_code == SEGV_BNDERR)) {
+ to->si_lower = compat_ptr(from.si_lower);
+ to->si_upper = compat_ptr(from.si_upper);
+ }
+#endif
+#ifdef SEGV_PKUERR
+ if ((from.si_signo == SIGSEGV) && (from.si_code == SEGV_PKUERR))
+ to->si_pkey = from.si_pkey;
+#endif
+ break;
+ case SIL_CHLD:
+ to->si_pid = from.si_pid;
+ to->si_uid = from.si_uid;
+ to->si_status = from.si_status;
+#ifdef CONFIG_X86_X32_ABI
+ if (in_x32_syscall()) {
+ to->si_utime = from._sifields._sigchld_x32._utime;
+ to->si_stime = from._sifields._sigchld_x32._stime;
+ } else
#endif
+ {
+ to->si_utime = from.si_utime;
+ to->si_stime = from.si_stime;
+ }
+ break;
+ case SIL_RT:
+ to->si_pid = from.si_pid;
+ to->si_uid = from.si_uid;
+ to->si_int = from.si_int;
+ break;
+ case SIL_SYS:
+ to->si_call_addr = compat_ptr(from.si_call_addr);
+ to->si_syscall = from.si_syscall;
+ to->si_arch = from.si_arch;
+ break;
+ }
+ return 0;
+}
+#endif /* CONFIG_COMPAT */
/**
* do_sigtimedwait - wait for queued signals specified in @which
@@ -2937,6 +3246,7 @@ SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
{
struct siginfo info;
+ clear_siginfo(&info);
info.si_signo = sig;
info.si_errno = 0;
info.si_code = SI_USER;
@@ -2978,8 +3288,9 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
static int do_tkill(pid_t tgid, pid_t pid, int sig)
{
- struct siginfo info = {};
+ struct siginfo info;
+ clear_siginfo(&info);
info.si_signo = sig;
info.si_errno = 0;
info.si_code = SI_TKILL;
@@ -3060,7 +3371,7 @@ COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
int, sig,
struct compat_siginfo __user *, uinfo)
{
- siginfo_t info = {};
+ siginfo_t info;
int ret = copy_siginfo_from_user32(&info, uinfo);
if (unlikely(ret))
return ret;
@@ -3104,7 +3415,7 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
int, sig,
struct compat_siginfo __user *, uinfo)
{
- siginfo_t info = {};
+ siginfo_t info;
if (copy_siginfo_from_user32(&info, uinfo))
return -EFAULT;
@@ -3677,6 +3988,7 @@ void __init signals_init(void)
/* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
!= offsetof(struct siginfo, _sifields._pad));
+ BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
}
@@ -3684,26 +3996,25 @@ void __init signals_init(void)
#ifdef CONFIG_KGDB_KDB
#include <linux/kdb.h>
/*
- * kdb_send_sig_info - Allows kdb to send signals without exposing
+ * kdb_send_sig - Allows kdb to send signals without exposing
* signal internals. This function checks if the required locks are
* available before calling the main signal code, to avoid kdb
* deadlocks.
*/
-void
-kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
+void kdb_send_sig(struct task_struct *t, int sig)
{
static struct task_struct *kdb_prev_t;
- int sig, new_t;
+ int new_t, ret;
if (!spin_trylock(&t->sighand->siglock)) {
kdb_printf("Can't do kill command now.\n"
"The sigmask lock is held somewhere else in "
"kernel, try again later\n");
return;
}
- spin_unlock(&t->sighand->siglock);
new_t = kdb_prev_t != t;
kdb_prev_t = t;
if (t->state != TASK_RUNNING && new_t) {
+ spin_unlock(&t->sighand->siglock);
kdb_printf("Process is not RUNNING, sending a signal from "
"kdb risks deadlock\n"
"on the run queue locks. "
@@ -3712,8 +4023,9 @@ kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
"the deadlock.\n");
return;
}
- sig = info->si_signo;
- if (send_sig_info(sig, info, t))
+ ret = send_signal(sig, SEND_SIG_PRIV, t, false);
+ spin_unlock(&t->sighand->siglock);
+ if (ret)
kdb_printf("Fail to deliver Signal %d to process %d.\n",
sig, t->pid);
else
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 2f5e87f1bae2..24d243ef8e71 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -665,7 +665,7 @@ static void run_ksoftirqd(unsigned int cpu)
*/
__do_softirq();
local_irq_enable();
- cond_resched_rcu_qs();
+ cond_resched();
return;
}
local_irq_enable();
diff --git a/kernel/sys.c b/kernel/sys.c
index 83ffd7dccf23..f2289de20e19 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -135,7 +135,7 @@ EXPORT_SYMBOL(overflowgid);
*/
int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
-int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
+int fs_overflowgid = DEFAULT_FS_OVERFLOWGID;
EXPORT_SYMBOL(fs_overflowuid);
EXPORT_SYMBOL(fs_overflowgid);
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index d32520840fde..ae0c8a411fe7 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -60,6 +60,15 @@
#include "tick-internal.h"
/*
+ * Masks for selecting the soft and hard context timers from
+ * cpu_base->active
+ */
+#define MASK_SHIFT (HRTIMER_BASE_MONOTONIC_SOFT)
+#define HRTIMER_ACTIVE_HARD ((1U << MASK_SHIFT) - 1)
+#define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT)
+#define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)
+
+/*
* The timer bases:
*
* There are more clockids than hrtimer bases. Thus, we index
@@ -70,7 +79,6 @@
DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
{
.lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
- .seq = SEQCNT_ZERO(hrtimer_bases.seq),
.clock_base =
{
{
@@ -93,6 +101,26 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
.clockid = CLOCK_TAI,
.get_time = &ktime_get_clocktai,
},
+ {
+ .index = HRTIMER_BASE_MONOTONIC_SOFT,
+ .clockid = CLOCK_MONOTONIC,
+ .get_time = &ktime_get,
+ },
+ {
+ .index = HRTIMER_BASE_REALTIME_SOFT,
+ .clockid = CLOCK_REALTIME,
+ .get_time = &ktime_get_real,
+ },
+ {
+ .index = HRTIMER_BASE_BOOTTIME_SOFT,
+ .clockid = CLOCK_BOOTTIME,
+ .get_time = &ktime_get_boottime,
+ },
+ {
+ .index = HRTIMER_BASE_TAI_SOFT,
+ .clockid = CLOCK_TAI,
+ .get_time = &ktime_get_clocktai,
+ },
}
};
@@ -118,7 +146,6 @@ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
* timer->base->cpu_base
*/
static struct hrtimer_cpu_base migration_cpu_base = {
- .seq = SEQCNT_ZERO(migration_cpu_base),
.clock_base = { { .cpu_base = &migration_cpu_base, }, },
};
@@ -156,45 +183,33 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
}
/*
- * With HIGHRES=y we do not migrate the timer when it is expiring
- * before the next event on the target cpu because we cannot reprogram
- * the target cpu hardware and we would cause it to fire late.
+ * We do not migrate the timer when it is expiring before the next
+ * event on the target cpu. When high resolution is enabled, we cannot
+ * reprogram the target cpu hardware and we would cause it to fire
+ * late. To keep it simple, we handle the high resolution enabled and
+ * disabled case similar.
*
* Called with cpu_base->lock of target cpu held.
*/
static int
hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
{
-#ifdef CONFIG_HIGH_RES_TIMERS
ktime_t expires;
- if (!new_base->cpu_base->hres_active)
- return 0;
-
expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
- return expires <= new_base->cpu_base->expires_next;
-#else
- return 0;
-#endif
+ return expires < new_base->cpu_base->expires_next;
}
-#ifdef CONFIG_NO_HZ_COMMON
-static inline
-struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
- int pinned)
-{
- if (pinned || !base->migration_enabled)
- return base;
- return &per_cpu(hrtimer_bases, get_nohz_timer_target());
-}
-#else
static inline
struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
int pinned)
{
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+ if (static_branch_likely(&timers_migration_enabled) && !pinned)
+ return &per_cpu(hrtimer_bases, get_nohz_timer_target());
+#endif
return base;
}
-#endif
/*
* We switch the timer base to a power-optimized selected CPU target,
@@ -396,7 +411,8 @@ static inline void debug_hrtimer_init(struct hrtimer *timer)
debug_object_init(timer, &hrtimer_debug_descr);
}
-static inline void debug_hrtimer_activate(struct hrtimer *timer)
+static inline void debug_hrtimer_activate(struct hrtimer *timer,
+ enum hrtimer_mode mode)
{
debug_object_activate(timer, &hrtimer_debug_descr);
}
@@ -429,8 +445,10 @@ void destroy_hrtimer_on_stack(struct hrtimer *timer)
EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack);
#else
+
static inline void debug_hrtimer_init(struct hrtimer *timer) { }
-static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
+static inline void debug_hrtimer_activate(struct hrtimer *timer,
+ enum hrtimer_mode mode) { }
static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
#endif
@@ -442,10 +460,11 @@ debug_init(struct hrtimer *timer, clockid_t clockid,
trace_hrtimer_init(timer, clockid, mode);
}
-static inline void debug_activate(struct hrtimer *timer)
+static inline void debug_activate(struct hrtimer *timer,
+ enum hrtimer_mode mode)
{
- debug_hrtimer_activate(timer);
- trace_hrtimer_start(timer);
+ debug_hrtimer_activate(timer, mode);
+ trace_hrtimer_start(timer, mode);
}
static inline void debug_deactivate(struct hrtimer *timer)
@@ -454,35 +473,43 @@ static inline void debug_deactivate(struct hrtimer *timer)
trace_hrtimer_cancel(timer);
}
-#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
-static inline void hrtimer_update_next_timer(struct hrtimer_cpu_base *cpu_base,
- struct hrtimer *timer)
+static struct hrtimer_clock_base *
+__next_base(struct hrtimer_cpu_base *cpu_base, unsigned int *active)
{
-#ifdef CONFIG_HIGH_RES_TIMERS
- cpu_base->next_timer = timer;
-#endif
+ unsigned int idx;
+
+ if (!*active)
+ return NULL;
+
+ idx = __ffs(*active);
+ *active &= ~(1U << idx);
+
+ return &cpu_base->clock_base[idx];
}
-static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
+#define for_each_active_base(base, cpu_base, active) \
+ while ((base = __next_base((cpu_base), &(active))))
+
+static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
+ unsigned int active,
+ ktime_t expires_next)
{
- struct hrtimer_clock_base *base = cpu_base->clock_base;
- unsigned int active = cpu_base->active_bases;
- ktime_t expires, expires_next = KTIME_MAX;
+ struct hrtimer_clock_base *base;
+ ktime_t expires;
- hrtimer_update_next_timer(cpu_base, NULL);
- for (; active; base++, active >>= 1) {
+ for_each_active_base(base, cpu_base, active) {
struct timerqueue_node *next;
struct hrtimer *timer;
- if (!(active & 0x01))
- continue;
-
next = timerqueue_getnext(&base->active);
timer = container_of(next, struct hrtimer, node);
expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
if (expires < expires_next) {
expires_next = expires;
- hrtimer_update_next_timer(cpu_base, timer);
+ if (timer->is_soft)
+ cpu_base->softirq_next_timer = timer;
+ else
+ cpu_base->next_timer = timer;
}
}
/*
@@ -494,7 +521,47 @@ static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
expires_next = 0;
return expires_next;
}
-#endif
+
+/*
+ * Recomputes cpu_base::*next_timer and returns the earliest expires_next but
+ * does not set cpu_base::*expires_next, that is done by hrtimer_reprogram.
+ *
+ * When a softirq is pending, we can ignore the HRTIMER_ACTIVE_SOFT bases,
+ * those timers will get run whenever the softirq gets handled, at the end of
+ * hrtimer_run_softirq(), hrtimer_update_softirq_timer() will re-add these bases.
+ *
+ * Therefore softirq values are those from the HRTIMER_ACTIVE_SOFT clock bases.
+ * The !softirq values are the minima across HRTIMER_ACTIVE_ALL, unless an actual
+ * softirq is pending, in which case they're the minima of HRTIMER_ACTIVE_HARD.
+ *
+ * @active_mask must be one of:
+ * - HRTIMER_ACTIVE_ALL,
+ * - HRTIMER_ACTIVE_SOFT, or
+ * - HRTIMER_ACTIVE_HARD.
+ */
+static ktime_t
+__hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_mask)
+{
+ unsigned int active;
+ struct hrtimer *next_timer = NULL;
+ ktime_t expires_next = KTIME_MAX;
+
+ if (!cpu_base->softirq_activated && (active_mask & HRTIMER_ACTIVE_SOFT)) {
+ active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
+ cpu_base->softirq_next_timer = NULL;
+ expires_next = __hrtimer_next_event_base(cpu_base, active, KTIME_MAX);
+
+ next_timer = cpu_base->softirq_next_timer;
+ }
+
+ if (active_mask & HRTIMER_ACTIVE_HARD) {
+ active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
+ cpu_base->next_timer = next_timer;
+ expires_next = __hrtimer_next_event_base(cpu_base, active, expires_next);
+ }
+
+ return expires_next;
+}
static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
{
@@ -502,36 +569,14 @@ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
- return ktime_get_update_offsets_now(&base->clock_was_set_seq,
+ ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq,
offs_real, offs_boot, offs_tai);
-}
-/* High resolution timer related functions */
-#ifdef CONFIG_HIGH_RES_TIMERS
-
-/*
- * High resolution timer enabled ?
- */
-static bool hrtimer_hres_enabled __read_mostly = true;
-unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC;
-EXPORT_SYMBOL_GPL(hrtimer_resolution);
-
-/*
- * Enable / Disable high resolution mode
- */
-static int __init setup_hrtimer_hres(char *str)
-{
- return (kstrtobool(str, &hrtimer_hres_enabled) == 0);
-}
-
-__setup("highres=", setup_hrtimer_hres);
+ base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real;
+ base->clock_base[HRTIMER_BASE_BOOTTIME_SOFT].offset = *offs_boot;
+ base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai;
-/*
- * hrtimer_high_res_enabled - query, if the highres mode is enabled
- */
-static inline int hrtimer_is_hres_enabled(void)
-{
- return hrtimer_hres_enabled;
+ return now;
}
/*
@@ -539,7 +584,8 @@ static inline int hrtimer_is_hres_enabled(void)
*/
static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
{
- return cpu_base->hres_active;
+ return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
+ cpu_base->hres_active : 0;
}
static inline int hrtimer_hres_active(void)
@@ -557,10 +603,23 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
{
ktime_t expires_next;
- if (!cpu_base->hres_active)
- return;
+ /*
+ * Find the current next expiration time.
+ */
+ expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
- expires_next = __hrtimer_get_next_event(cpu_base);
+ if (cpu_base->next_timer && cpu_base->next_timer->is_soft) {
+ /*
+ * When the softirq is activated, hrtimer has to be
+ * programmed with the first hard hrtimer because soft
+ * timer interrupt could occur too late.
+ */
+ if (cpu_base->softirq_activated)
+ expires_next = __hrtimer_get_next_event(cpu_base,
+ HRTIMER_ACTIVE_HARD);
+ else
+ cpu_base->softirq_expires_next = expires_next;
+ }
if (skip_equal && expires_next == cpu_base->expires_next)
return;
@@ -568,6 +627,9 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
cpu_base->expires_next = expires_next;
/*
+ * If hres is not active, hardware does not have to be
+ * reprogrammed yet.
+ *
* If a hang was detected in the last timer interrupt then we
* leave the hang delay active in the hardware. We want the
* system to make progress. That also prevents the following
@@ -581,81 +643,38 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
* set. So we'd effectivly block all timers until the T2 event
* fires.
*/
- if (cpu_base->hang_detected)
+ if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
return;
tick_program_event(cpu_base->expires_next, 1);
}
+/* High resolution timer related functions */
+#ifdef CONFIG_HIGH_RES_TIMERS
+
/*
- * When a timer is enqueued and expires earlier than the already enqueued
- * timers, we have to check, whether it expires earlier than the timer for
- * which the clock event device was armed.
- *
- * Called with interrupts disabled and base->cpu_base.lock held
+ * High resolution timer enabled ?
*/
-static void hrtimer_reprogram(struct hrtimer *timer,
- struct hrtimer_clock_base *base)
-{
- struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
- ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
-
- WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
-
- /*
- * If the timer is not on the current cpu, we cannot reprogram
- * the other cpus clock event device.
- */
- if (base->cpu_base != cpu_base)
- return;
-
- /*
- * If the hrtimer interrupt is running, then it will
- * reevaluate the clock bases and reprogram the clock event
- * device. The callbacks are always executed in hard interrupt
- * context so we don't need an extra check for a running
- * callback.
- */
- if (cpu_base->in_hrtirq)
- return;
-
- /*
- * CLOCK_REALTIME timer might be requested with an absolute
- * expiry time which is less than base->offset. Set it to 0.
- */
- if (expires < 0)
- expires = 0;
-
- if (expires >= cpu_base->expires_next)
- return;
-
- /* Update the pointer to the next expiring timer */
- cpu_base->next_timer = timer;
-
- /*
- * If a hang was detected in the last timer interrupt then we
- * do not schedule a timer which is earlier than the expiry
- * which we enforced in the hang detection. We want the system
- * to make progress.
- */
- if (cpu_base->hang_detected)
- return;
+static bool hrtimer_hres_enabled __read_mostly = true;
+unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC;
+EXPORT_SYMBOL_GPL(hrtimer_resolution);
- /*
- * Program the timer hardware. We enforce the expiry for
- * events which are already in the past.
- */
- cpu_base->expires_next = expires;
- tick_program_event(expires, 1);
+/*
+ * Enable / Disable high resolution mode
+ */
+static int __init setup_hrtimer_hres(char *str)
+{
+ return (kstrtobool(str, &hrtimer_hres_enabled) == 0);
}
+__setup("highres=", setup_hrtimer_hres);
+
/*
- * Initialize the high resolution related parts of cpu_base
+ * hrtimer_high_res_enabled - query, if the highres mode is enabled
*/
-static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
+static inline int hrtimer_is_hres_enabled(void)
{
- base->expires_next = KTIME_MAX;
- base->hres_active = 0;
+ return hrtimer_hres_enabled;
}
/*
@@ -667,7 +686,7 @@ static void retrigger_next_event(void *arg)
{
struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
- if (!base->hres_active)
+ if (!__hrtimer_hres_active(base))
return;
raw_spin_lock(&base->lock);
@@ -714,23 +733,102 @@ void clock_was_set_delayed(void)
#else
-static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *b) { return 0; }
-static inline int hrtimer_hres_active(void) { return 0; }
static inline int hrtimer_is_hres_enabled(void) { return 0; }
static inline void hrtimer_switch_to_hres(void) { }
-static inline void
-hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
-static inline int hrtimer_reprogram(struct hrtimer *timer,
- struct hrtimer_clock_base *base)
-{
- return 0;
-}
-static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
static inline void retrigger_next_event(void *arg) { }
#endif /* CONFIG_HIGH_RES_TIMERS */
/*
+ * When a timer is enqueued and expires earlier than the already enqueued
+ * timers, we have to check, whether it expires earlier than the timer for
+ * which the clock event device was armed.
+ *
+ * Called with interrupts disabled and base->cpu_base.lock held
+ */
+static void hrtimer_reprogram(struct hrtimer *timer, bool reprogram)
+{
+ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
+ struct hrtimer_clock_base *base = timer->base;
+ ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
+
+ WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
+
+ /*
+ * CLOCK_REALTIME timer might be requested with an absolute
+ * expiry time which is less than base->offset. Set it to 0.
+ */
+ if (expires < 0)
+ expires = 0;
+
+ if (timer->is_soft) {
+ /*
+ * soft hrtimer could be started on a remote CPU. In this
+ * case softirq_expires_next needs to be updated on the
+ * remote CPU. The soft hrtimer will not expire before the
+ * first hard hrtimer on the remote CPU -
+ * hrtimer_check_target() prevents this case.
+ */
+ struct hrtimer_cpu_base *timer_cpu_base = base->cpu_base;
+
+ if (timer_cpu_base->softirq_activated)
+ return;
+
+ if (!ktime_before(expires, timer_cpu_base->softirq_expires_next))
+ return;
+
+ timer_cpu_base->softirq_next_timer = timer;
+ timer_cpu_base->softirq_expires_next = expires;
+
+ if (!ktime_before(expires, timer_cpu_base->expires_next) ||
+ !reprogram)
+ return;
+ }
+
+ /*
+ * If the timer is not on the current cpu, we cannot reprogram
+ * the other cpus clock event device.
+ */
+ if (base->cpu_base != cpu_base)
+ return;
+
+ /*
+ * If the hrtimer interrupt is running, then it will
+ * reevaluate the clock bases and reprogram the clock event
+ * device. The callbacks are always executed in hard interrupt
+ * context so we don't need an extra check for a running
+ * callback.
+ */
+ if (cpu_base->in_hrtirq)
+ return;
+
+ if (expires >= cpu_base->expires_next)
+ return;
+
+ /* Update the pointer to the next expiring timer */
+ cpu_base->next_timer = timer;
+ cpu_base->expires_next = expires;
+
+ /*
+ * If hres is not active, hardware does not have to be
+ * programmed yet.
+ *
+ * If a hang was detected in the last timer interrupt then we
+ * do not schedule a timer which is earlier than the expiry
+ * which we enforced in the hang detection. We want the system
+ * to make progress.
+ */
+ if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
+ return;
+
+ /*
+ * Program the timer hardware. We enforce the expiry for
+ * events which are already in the past.
+ */
+ tick_program_event(expires, 1);
+}
+
+/*
* Clock realtime was set
*
* Change the offset of the realtime clock vs. the monotonic
@@ -835,9 +933,10 @@ EXPORT_SYMBOL_GPL(hrtimer_forward);
* Returns 1 when the new timer is the leftmost timer in the tree.
*/
static int enqueue_hrtimer(struct hrtimer *timer,
- struct hrtimer_clock_base *base)
+ struct hrtimer_clock_base *base,
+ enum hrtimer_mode mode)
{
- debug_activate(timer);
+ debug_activate(timer, mode);
base->cpu_base->active_bases |= 1 << base->index;
@@ -870,7 +969,6 @@ static void __remove_hrtimer(struct hrtimer *timer,
if (!timerqueue_del(&base->active, &timer->node))
cpu_base->active_bases &= ~(1 << base->index);
-#ifdef CONFIG_HIGH_RES_TIMERS
/*
* Note: If reprogram is false we do not update
* cpu_base->next_timer. This happens when we remove the first
@@ -881,7 +979,6 @@ static void __remove_hrtimer(struct hrtimer *timer,
*/
if (reprogram && timer == cpu_base->next_timer)
hrtimer_force_reprogram(cpu_base, 1);
-#endif
}
/*
@@ -930,22 +1027,36 @@ static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
return tim;
}
-/**
- * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
- * @timer: the timer to be added
- * @tim: expiry time
- * @delta_ns: "slack" range for the timer
- * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or
- * relative (HRTIMER_MODE_REL)
- */
-void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
- u64 delta_ns, const enum hrtimer_mode mode)
+static void
+hrtimer_update_softirq_timer(struct hrtimer_cpu_base *cpu_base, bool reprogram)
{
- struct hrtimer_clock_base *base, *new_base;
- unsigned long flags;
- int leftmost;
+ ktime_t expires;
- base = lock_hrtimer_base(timer, &flags);
+ /*
+ * Find the next SOFT expiration.
+ */
+ expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT);
+
+ /*
+ * reprogramming needs to be triggered, even if the next soft
+ * hrtimer expires at the same time than the next hard
+ * hrtimer. cpu_base->softirq_expires_next needs to be updated!
+ */
+ if (expires == KTIME_MAX)
+ return;
+
+ /*
+ * cpu_base->*next_timer is recomputed by __hrtimer_get_next_event()
+ * cpu_base->*expires_next is only set by hrtimer_reprogram()
+ */
+ hrtimer_reprogram(cpu_base->softirq_next_timer, reprogram);
+}
+
+static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ u64 delta_ns, const enum hrtimer_mode mode,
+ struct hrtimer_clock_base *base)
+{
+ struct hrtimer_clock_base *new_base;
/* Remove an active timer from the queue: */
remove_hrtimer(timer, base, true);
@@ -960,21 +1071,35 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
/* Switch the timer base, if necessary: */
new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
- leftmost = enqueue_hrtimer(timer, new_base);
- if (!leftmost)
- goto unlock;
+ return enqueue_hrtimer(timer, new_base, mode);
+}
+
+/**
+ * hrtimer_start_range_ns - (re)start an hrtimer
+ * @timer: the timer to be added
+ * @tim: expiry time
+ * @delta_ns: "slack" range for the timer
+ * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or
+ * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED);
+ * softirq based mode is considered for debug purpose only!
+ */
+void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ u64 delta_ns, const enum hrtimer_mode mode)
+{
+ struct hrtimer_clock_base *base;
+ unsigned long flags;
+
+ /*
+ * Check whether the HRTIMER_MODE_SOFT bit and hrtimer.is_soft
+ * match.
+ */
+ WARN_ON_ONCE(!(mode & HRTIMER_MODE_SOFT) ^ !timer->is_soft);
+
+ base = lock_hrtimer_base(timer, &flags);
+
+ if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base))
+ hrtimer_reprogram(timer, true);
- if (!hrtimer_is_hres_active(timer)) {
- /*
- * Kick to reschedule the next tick to handle the new timer
- * on dynticks target.
- */
- if (new_base->cpu_base->nohz_active)
- wake_up_nohz_cpu(new_base->cpu_base->cpu);
- } else {
- hrtimer_reprogram(timer, new_base);
- }
-unlock:
unlock_hrtimer_base(timer, &flags);
}
EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
@@ -1072,7 +1197,7 @@ u64 hrtimer_get_next_event(void)
raw_spin_lock_irqsave(&cpu_base->lock, flags);
if (!__hrtimer_hres_active(cpu_base))
- expires = __hrtimer_get_next_event(cpu_base);
+ expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
@@ -1095,17 +1220,24 @@ static inline int hrtimer_clockid_to_base(clockid_t clock_id)
static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
enum hrtimer_mode mode)
{
+ bool softtimer = !!(mode & HRTIMER_MODE_SOFT);
+ int base = softtimer ? HRTIMER_MAX_CLOCK_BASES / 2 : 0;
struct hrtimer_cpu_base *cpu_base;
- int base;
memset(timer, 0, sizeof(struct hrtimer));
cpu_base = raw_cpu_ptr(&hrtimer_bases);
- if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
+ /*
+ * POSIX magic: Relative CLOCK_REALTIME timers are not affected by
+ * clock modifications, so they needs to become CLOCK_MONOTONIC to
+ * ensure POSIX compliance.
+ */
+ if (clock_id == CLOCK_REALTIME && mode & HRTIMER_MODE_REL)
clock_id = CLOCK_MONOTONIC;
- base = hrtimer_clockid_to_base(clock_id);
+ base += hrtimer_clockid_to_base(clock_id);
+ timer->is_soft = softtimer;
timer->base = &cpu_base->clock_base[base];
timerqueue_init(&timer->node);
}
@@ -1114,7 +1246,13 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
* hrtimer_init - initialize a timer to the given clock
* @timer: the timer to be initialized
* @clock_id: the clock to be used
- * @mode: timer mode abs/rel
+ * @mode: The modes which are relevant for intitialization:
+ * HRTIMER_MODE_ABS, HRTIMER_MODE_REL, HRTIMER_MODE_ABS_SOFT,
+ * HRTIMER_MODE_REL_SOFT
+ *
+ * The PINNED variants of the above can be handed in,
+ * but the PINNED bit is ignored as pinning happens
+ * when the hrtimer is started
*/
void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
enum hrtimer_mode mode)
@@ -1133,19 +1271,19 @@ EXPORT_SYMBOL_GPL(hrtimer_init);
*/
bool hrtimer_active(const struct hrtimer *timer)
{
- struct hrtimer_cpu_base *cpu_base;
+ struct hrtimer_clock_base *base;
unsigned int seq;
do {
- cpu_base = READ_ONCE(timer->base->cpu_base);
- seq = raw_read_seqcount_begin(&cpu_base->seq);
+ base = READ_ONCE(timer->base);
+ seq = raw_read_seqcount_begin(&base->seq);
if (timer->state != HRTIMER_STATE_INACTIVE ||
- cpu_base->running == timer)
+ base->running == timer)
return true;
- } while (read_seqcount_retry(&cpu_base->seq, seq) ||
- cpu_base != READ_ONCE(timer->base->cpu_base));
+ } while (read_seqcount_retry(&base->seq, seq) ||
+ base != READ_ONCE(timer->base));
return false;
}
@@ -1171,7 +1309,8 @@ EXPORT_SYMBOL_GPL(hrtimer_active);
static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
struct hrtimer_clock_base *base,
- struct hrtimer *timer, ktime_t *now)
+ struct hrtimer *timer, ktime_t *now,
+ unsigned long flags)
{
enum hrtimer_restart (*fn)(struct hrtimer *);
int restart;
@@ -1179,16 +1318,16 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
lockdep_assert_held(&cpu_base->lock);
debug_deactivate(timer);
- cpu_base->running = timer;
+ base->running = timer;
/*
* Separate the ->running assignment from the ->state assignment.
*
* As with a regular write barrier, this ensures the read side in
- * hrtimer_active() cannot observe cpu_base->running == NULL &&
+ * hrtimer_active() cannot observe base->running == NULL &&
* timer->state == INACTIVE.
*/
- raw_write_seqcount_barrier(&cpu_base->seq);
+ raw_write_seqcount_barrier(&base->seq);
__remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
fn = timer->function;
@@ -1202,15 +1341,15 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
timer->is_rel = false;
/*
- * Because we run timers from hardirq context, there is no chance
- * they get migrated to another cpu, therefore its safe to unlock
- * the timer base.
+ * The timer is marked as running in the CPU base, so it is
+ * protected against migration to a different CPU even if the lock
+ * is dropped.
*/
- raw_spin_unlock(&cpu_base->lock);
+ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
trace_hrtimer_expire_entry(timer, now);
restart = fn(timer);
trace_hrtimer_expire_exit(timer);
- raw_spin_lock(&cpu_base->lock);
+ raw_spin_lock_irq(&cpu_base->lock);
/*
* Note: We clear the running state after enqueue_hrtimer and
@@ -1223,33 +1362,31 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
*/
if (restart != HRTIMER_NORESTART &&
!(timer->state & HRTIMER_STATE_ENQUEUED))
- enqueue_hrtimer(timer, base);
+ enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS);
/*
* Separate the ->running assignment from the ->state assignment.
*
* As with a regular write barrier, this ensures the read side in
- * hrtimer_active() cannot observe cpu_base->running == NULL &&
+ * hrtimer_active() cannot observe base->running.timer == NULL &&
* timer->state == INACTIVE.
*/
- raw_write_seqcount_barrier(&cpu_base->seq);
+ raw_write_seqcount_barrier(&base->seq);
- WARN_ON_ONCE(cpu_base->running != timer);
- cpu_base->running = NULL;
+ WARN_ON_ONCE(base->running != timer);
+ base->running = NULL;
}
-static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
+static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
+ unsigned long flags, unsigned int active_mask)
{
- struct hrtimer_clock_base *base = cpu_base->clock_base;
- unsigned int active = cpu_base->active_bases;
+ struct hrtimer_clock_base *base;
+ unsigned int active = cpu_base->active_bases & active_mask;
- for (; active; base++, active >>= 1) {
+ for_each_active_base(base, cpu_base, active) {
struct timerqueue_node *node;
ktime_t basenow;
- if (!(active & 0x01))
- continue;
-
basenow = ktime_add(now, base->offset);
while ((node = timerqueue_getnext(&base->active))) {
@@ -1272,11 +1409,28 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
if (basenow < hrtimer_get_softexpires_tv64(timer))
break;
- __run_hrtimer(cpu_base, base, timer, &basenow);
+ __run_hrtimer(cpu_base, base, timer, &basenow, flags);
}
}
}
+static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
+{
+ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
+ unsigned long flags;
+ ktime_t now;
+
+ raw_spin_lock_irqsave(&cpu_base->lock, flags);
+
+ now = hrtimer_update_base(cpu_base);
+ __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_SOFT);
+
+ cpu_base->softirq_activated = 0;
+ hrtimer_update_softirq_timer(cpu_base, true);
+
+ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
+}
+
#ifdef CONFIG_HIGH_RES_TIMERS
/*
@@ -1287,13 +1441,14 @@ void hrtimer_interrupt(struct clock_event_device *dev)
{
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
ktime_t expires_next, now, entry_time, delta;
+ unsigned long flags;
int retries = 0;
BUG_ON(!cpu_base->hres_active);
cpu_base->nr_events++;
dev->next_event = KTIME_MAX;
- raw_spin_lock(&cpu_base->lock);
+ raw_spin_lock_irqsave(&cpu_base->lock, flags);
entry_time = now = hrtimer_update_base(cpu_base);
retry:
cpu_base->in_hrtirq = 1;
@@ -1306,17 +1461,23 @@ retry:
*/
cpu_base->expires_next = KTIME_MAX;
- __hrtimer_run_queues(cpu_base, now);
+ if (!ktime_before(now, cpu_base->softirq_expires_next)) {
+ cpu_base->softirq_expires_next = KTIME_MAX;
+ cpu_base->softirq_activated = 1;
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+ }
+
+ __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
/* Reevaluate the clock bases for the next expiry */
- expires_next = __hrtimer_get_next_event(cpu_base);
+ expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
/*
* Store the new expiry value so the migration code can verify
* against it.
*/
cpu_base->expires_next = expires_next;
cpu_base->in_hrtirq = 0;
- raw_spin_unlock(&cpu_base->lock);
+ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
/* Reprogramming necessary ? */
if (!tick_program_event(expires_next, 0)) {
@@ -1337,7 +1498,7 @@ retry:
* Acquire base lock for updating the offsets and retrieving
* the current time.
*/
- raw_spin_lock(&cpu_base->lock);
+ raw_spin_lock_irqsave(&cpu_base->lock, flags);
now = hrtimer_update_base(cpu_base);
cpu_base->nr_retries++;
if (++retries < 3)
@@ -1350,7 +1511,8 @@ retry:
*/
cpu_base->nr_hangs++;
cpu_base->hang_detected = 1;
- raw_spin_unlock(&cpu_base->lock);
+ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
+
delta = ktime_sub(now, entry_time);
if ((unsigned int)delta > cpu_base->max_hang_time)
cpu_base->max_hang_time = (unsigned int) delta;
@@ -1392,6 +1554,7 @@ static inline void __hrtimer_peek_ahead_timers(void) { }
void hrtimer_run_queues(void)
{
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
+ unsigned long flags;
ktime_t now;
if (__hrtimer_hres_active(cpu_base))
@@ -1409,10 +1572,17 @@ void hrtimer_run_queues(void)
return;
}
- raw_spin_lock(&cpu_base->lock);
+ raw_spin_lock_irqsave(&cpu_base->lock, flags);
now = hrtimer_update_base(cpu_base);
- __hrtimer_run_queues(cpu_base, now);
- raw_spin_unlock(&cpu_base->lock);
+
+ if (!ktime_before(now, cpu_base->softirq_expires_next)) {
+ cpu_base->softirq_expires_next = KTIME_MAX;
+ cpu_base->softirq_activated = 1;
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+ }
+
+ __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
+ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
}
/*
@@ -1590,7 +1760,13 @@ int hrtimers_prepare_cpu(unsigned int cpu)
}
cpu_base->cpu = cpu;
- hrtimer_init_hres(cpu_base);
+ cpu_base->active_bases = 0;
+ cpu_base->hres_active = 0;
+ cpu_base->hang_detected = 0;
+ cpu_base->next_timer = NULL;
+ cpu_base->softirq_next_timer = NULL;
+ cpu_base->expires_next = KTIME_MAX;
+ cpu_base->softirq_expires_next = KTIME_MAX;
return 0;
}
@@ -1622,7 +1798,7 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
* sort out already expired timers and reprogram the
* event device.
*/
- enqueue_hrtimer(timer, new_base);
+ enqueue_hrtimer(timer, new_base, HRTIMER_MODE_ABS);
}
}
@@ -1634,6 +1810,12 @@ int hrtimers_dead_cpu(unsigned int scpu)
BUG_ON(cpu_online(scpu));
tick_cancel_sched_timer(scpu);
+ /*
+ * this BH disable ensures that raise_softirq_irqoff() does
+ * not wakeup ksoftirqd (and acquire the pi-lock) while
+ * holding the cpu_base lock
+ */
+ local_bh_disable();
local_irq_disable();
old_base = &per_cpu(hrtimer_bases, scpu);
new_base = this_cpu_ptr(&hrtimer_bases);
@@ -1649,12 +1831,19 @@ int hrtimers_dead_cpu(unsigned int scpu)
&new_base->clock_base[i]);
}
+ /*
+ * The migration might have changed the first expiring softirq
+ * timer on this CPU. Update it.
+ */
+ hrtimer_update_softirq_timer(new_base, false);
+
raw_spin_unlock(&old_base->lock);
raw_spin_unlock(&new_base->lock);
/* Check, if we got expired work to do */
__hrtimer_peek_ahead_timers();
local_irq_enable();
+ local_bh_enable();
return 0;
}
@@ -1663,18 +1852,19 @@ int hrtimers_dead_cpu(unsigned int scpu)
void __init hrtimers_init(void)
{
hrtimers_prepare_cpu(smp_processor_id());
+ open_softirq(HRTIMER_SOFTIRQ, hrtimer_run_softirq);
}
/**
* schedule_hrtimeout_range_clock - sleep until timeout
* @expires: timeout value (ktime_t)
* @delta: slack in expires timeout (ktime_t)
- * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
- * @clock: timer clock, CLOCK_MONOTONIC or CLOCK_REALTIME
+ * @mode: timer mode
+ * @clock_id: timer clock to be used
*/
int __sched
schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
- const enum hrtimer_mode mode, int clock)
+ const enum hrtimer_mode mode, clockid_t clock_id)
{
struct hrtimer_sleeper t;
@@ -1695,7 +1885,7 @@ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
return -EINTR;
}
- hrtimer_init_on_stack(&t.timer, clock, mode);
+ hrtimer_init_on_stack(&t.timer, clock_id, mode);
hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
hrtimer_init_sleeper(&t, current);
@@ -1717,7 +1907,7 @@ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
* schedule_hrtimeout_range - sleep until timeout
* @expires: timeout value (ktime_t)
* @delta: slack in expires timeout (ktime_t)
- * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
+ * @mode: timer mode
*
* Make the current task sleep until the given expiry time has
* elapsed. The routine will return immediately unless
@@ -1756,7 +1946,7 @@ EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
/**
* schedule_hrtimeout - sleep until timeout
* @expires: timeout value (ktime_t)
- * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
+ * @mode: timer mode
*
* Make the current task sleep until the given expiry time has
* elapsed. The routine will return immediately unless
diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
index 17cdc554c9fe..94ad46d50b56 100644
--- a/kernel/time/posix-clock.c
+++ b/kernel/time/posix-clock.c
@@ -68,10 +68,10 @@ static ssize_t posix_clock_read(struct file *fp, char __user *buf,
return err;
}
-static unsigned int posix_clock_poll(struct file *fp, poll_table *wait)
+static __poll_t posix_clock_poll(struct file *fp, poll_table *wait)
{
struct posix_clock *clk = get_posix_clock(fp);
- unsigned int result = 0;
+ __poll_t result = 0;
if (!clk)
return POLLERR;
@@ -216,7 +216,7 @@ struct posix_clock_desc {
static int get_clock_desc(const clockid_t id, struct posix_clock_desc *cd)
{
- struct file *fp = fget(CLOCKID_TO_FD(id));
+ struct file *fp = fget(clockid_to_fd(id));
int err = -EINVAL;
if (!fp)
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 1f27887aa194..2541bd89f20e 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -14,6 +14,7 @@
#include <linux/tick.h>
#include <linux/workqueue.h>
#include <linux/compat.h>
+#include <linux/sched/deadline.h>
#include "posix-timers.h"
@@ -791,6 +792,14 @@ check_timers_list(struct list_head *timers,
return 0;
}
+static inline void check_dl_overrun(struct task_struct *tsk)
+{
+ if (tsk->dl.dl_overrun) {
+ tsk->dl.dl_overrun = 0;
+ __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
+ }
+}
+
/*
* Check for any per-thread CPU timers that have fired and move them off
* the tsk->cpu_timers[N] list onto the firing list. Here we update the
@@ -804,6 +813,9 @@ static void check_thread_timers(struct task_struct *tsk,
u64 expires;
unsigned long soft;
+ if (dl_task(tsk))
+ check_dl_overrun(tsk);
+
/*
* If cputime_expires is zero, then there are no active
* per thread CPU timers.
@@ -906,6 +918,9 @@ static void check_process_timers(struct task_struct *tsk,
struct task_cputime cputime;
unsigned long soft;
+ if (dl_task(tsk))
+ check_dl_overrun(tsk);
+
/*
* If cputimer is not running, then there are no active
* process wide timers (POSIX 1.b, itimers, RLIMIT_CPU).
@@ -1111,6 +1126,9 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
return 1;
}
+ if (dl_task(tsk) && tsk->dl.dl_overrun)
+ return 1;
+
return 0;
}
@@ -1189,9 +1207,8 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
u64 now;
WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
- cpu_timer_sample_group(clock_idx, tsk, &now);
- if (oldval) {
+ if (oldval && cpu_timer_sample_group(clock_idx, tsk, &now) != -EINVAL) {
/*
* We are setting itimer. The *oldval is absolute and we update
* it to be relative, *newval argument is relative and we update
@@ -1363,8 +1380,8 @@ static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t);
}
-#define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
-#define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
+#define PROCESS_CLOCK make_process_cpuclock(0, CPUCLOCK_SCHED)
+#define THREAD_CLOCK make_thread_cpuclock(0, CPUCLOCK_SCHED)
static int process_cpu_clock_getres(const clockid_t which_clock,
struct timespec64 *tp)
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index ec999f32c840..75043046914e 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -462,7 +462,7 @@ static struct k_itimer * alloc_posix_timer(void)
kmem_cache_free(posix_timers_cache, tmr);
return NULL;
}
- memset(&tmr->sigq->info, 0, sizeof(siginfo_t));
+ clear_siginfo(&tmr->sigq->info);
return tmr;
}
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index f8e1845aa464..e277284c2831 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -150,16 +150,15 @@ static inline void tick_nohz_init(void) { }
#ifdef CONFIG_NO_HZ_COMMON
extern unsigned long tick_nohz_active;
-#else
+extern void timers_update_nohz(void);
+# ifdef CONFIG_SMP
+extern struct static_key_false timers_migration_enabled;
+# endif
+#else /* CONFIG_NO_HZ_COMMON */
+static inline void timers_update_nohz(void) { }
#define tick_nohz_active (0)
#endif
-#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
-extern void timers_update_migration(bool update_nohz);
-#else
-static inline void timers_update_migration(bool update_nohz) { }
-#endif
-
DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
extern u64 get_next_timer_interrupt(unsigned long basej, u64 basem);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index f7cc7abfcf25..29a5733eff83 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -1107,7 +1107,7 @@ static inline void tick_nohz_activate(struct tick_sched *ts, int mode)
ts->nohz_mode = mode;
/* One update is enough */
if (!test_and_set_bit(0, &tick_nohz_active))
- timers_update_migration(true);
+ timers_update_nohz();
}
/**
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 0bcf00e3ce48..48150ab42de9 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -200,8 +200,6 @@ struct timer_base {
unsigned long clk;
unsigned long next_expiry;
unsigned int cpu;
- bool migration_enabled;
- bool nohz_active;
bool is_idle;
bool must_forward_clk;
DECLARE_BITMAP(pending_map, WHEEL_SIZE);
@@ -210,45 +208,64 @@ struct timer_base {
static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
-#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+#ifdef CONFIG_NO_HZ_COMMON
+
+static DEFINE_STATIC_KEY_FALSE(timers_nohz_active);
+static DEFINE_MUTEX(timer_keys_mutex);
+
+static void timer_update_keys(struct work_struct *work);
+static DECLARE_WORK(timer_update_work, timer_update_keys);
+
+#ifdef CONFIG_SMP
unsigned int sysctl_timer_migration = 1;
-void timers_update_migration(bool update_nohz)
+DEFINE_STATIC_KEY_FALSE(timers_migration_enabled);
+
+static void timers_update_migration(void)
{
- bool on = sysctl_timer_migration && tick_nohz_active;
- unsigned int cpu;
+ if (sysctl_timer_migration && tick_nohz_active)
+ static_branch_enable(&timers_migration_enabled);
+ else
+ static_branch_disable(&timers_migration_enabled);
+}
+#else
+static inline void timers_update_migration(void) { }
+#endif /* !CONFIG_SMP */
- /* Avoid the loop, if nothing to update */
- if (this_cpu_read(timer_bases[BASE_STD].migration_enabled) == on)
- return;
+static void timer_update_keys(struct work_struct *work)
+{
+ mutex_lock(&timer_keys_mutex);
+ timers_update_migration();
+ static_branch_enable(&timers_nohz_active);
+ mutex_unlock(&timer_keys_mutex);
+}
- for_each_possible_cpu(cpu) {
- per_cpu(timer_bases[BASE_STD].migration_enabled, cpu) = on;
- per_cpu(timer_bases[BASE_DEF].migration_enabled, cpu) = on;
- per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
- if (!update_nohz)
- continue;
- per_cpu(timer_bases[BASE_STD].nohz_active, cpu) = true;
- per_cpu(timer_bases[BASE_DEF].nohz_active, cpu) = true;
- per_cpu(hrtimer_bases.nohz_active, cpu) = true;
- }
+void timers_update_nohz(void)
+{
+ schedule_work(&timer_update_work);
}
int timer_migration_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
- static DEFINE_MUTEX(mutex);
int ret;
- mutex_lock(&mutex);
+ mutex_lock(&timer_keys_mutex);
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (!ret && write)
- timers_update_migration(false);
- mutex_unlock(&mutex);
+ timers_update_migration();
+ mutex_unlock(&timer_keys_mutex);
return ret;
}
-#endif
+
+static inline bool is_timers_nohz_active(void)
+{
+ return static_branch_unlikely(&timers_nohz_active);
+}
+#else
+static inline bool is_timers_nohz_active(void) { return false; }
+#endif /* NO_HZ_COMMON */
static unsigned long round_jiffies_common(unsigned long j, int cpu,
bool force_up)
@@ -534,7 +551,7 @@ __internal_add_timer(struct timer_base *base, struct timer_list *timer)
static void
trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
{
- if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
+ if (!is_timers_nohz_active())
return;
/*
@@ -849,21 +866,20 @@ static inline struct timer_base *get_timer_base(u32 tflags)
return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
}
-#ifdef CONFIG_NO_HZ_COMMON
static inline struct timer_base *
get_target_base(struct timer_base *base, unsigned tflags)
{
-#ifdef CONFIG_SMP
- if ((tflags & TIMER_PINNED) || !base->migration_enabled)
- return get_timer_this_cpu_base(tflags);
- return get_timer_cpu_base(tflags, get_nohz_timer_target());
-#else
- return get_timer_this_cpu_base(tflags);
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+ if (static_branch_likely(&timers_migration_enabled) &&
+ !(tflags & TIMER_PINNED))
+ return get_timer_cpu_base(tflags, get_nohz_timer_target());
#endif
+ return get_timer_this_cpu_base(tflags);
}
static inline void forward_timer_base(struct timer_base *base)
{
+#ifdef CONFIG_NO_HZ_COMMON
unsigned long jnow;
/*
@@ -887,16 +903,8 @@ static inline void forward_timer_base(struct timer_base *base)
base->clk = jnow;
else
base->clk = base->next_expiry;
-}
-#else
-static inline struct timer_base *
-get_target_base(struct timer_base *base, unsigned tflags)
-{
- return get_timer_this_cpu_base(tflags);
-}
-
-static inline void forward_timer_base(struct timer_base *base) { }
#endif
+}
/*
diff --git a/kernel/torture.c b/kernel/torture.c
index 637e172835d8..37b94012a3f8 100644
--- a/kernel/torture.c
+++ b/kernel/torture.c
@@ -47,6 +47,7 @@
#include <linux/ktime.h>
#include <asm/byteorder.h>
#include <linux/torture.h>
+#include "rcu/rcu.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
@@ -60,7 +61,6 @@ static bool verbose;
#define FULLSTOP_RMMOD 2 /* Normal rmmod of torture. */
static int fullstop = FULLSTOP_RMMOD;
static DEFINE_MUTEX(fullstop_mutex);
-static int *torture_runnable;
#ifdef CONFIG_HOTPLUG_CPU
@@ -500,7 +500,7 @@ static int torture_shutdown(void *arg)
torture_shutdown_hook();
else
VERBOSE_TOROUT_STRING("No torture_shutdown_hook(), skipping.");
- ftrace_dump(DUMP_ALL);
+ rcu_ftrace_dump(DUMP_ALL);
kernel_power_off(); /* Shut down the system. */
return 0;
}
@@ -572,17 +572,19 @@ static int stutter;
*/
void stutter_wait(const char *title)
{
+ int spt;
+
cond_resched_rcu_qs();
- while (READ_ONCE(stutter_pause_test) ||
- (torture_runnable && !READ_ONCE(*torture_runnable))) {
- if (stutter_pause_test)
- if (READ_ONCE(stutter_pause_test) == 1)
- schedule_timeout_interruptible(1);
- else
- while (READ_ONCE(stutter_pause_test))
- cond_resched();
- else
+ spt = READ_ONCE(stutter_pause_test);
+ for (; spt; spt = READ_ONCE(stutter_pause_test)) {
+ if (spt == 1) {
+ schedule_timeout_interruptible(1);
+ } else if (spt == 2) {
+ while (READ_ONCE(stutter_pause_test))
+ cond_resched();
+ } else {
schedule_timeout_interruptible(round_jiffies_relative(HZ));
+ }
torture_shutdown_absorb(title);
}
}
@@ -596,17 +598,15 @@ static int torture_stutter(void *arg)
{
VERBOSE_TOROUT_STRING("torture_stutter task started");
do {
- if (!torture_must_stop()) {
- if (stutter > 1) {
- schedule_timeout_interruptible(stutter - 1);
- WRITE_ONCE(stutter_pause_test, 2);
- }
- schedule_timeout_interruptible(1);
+ if (!torture_must_stop() && stutter > 1) {
WRITE_ONCE(stutter_pause_test, 1);
+ schedule_timeout_interruptible(stutter - 1);
+ WRITE_ONCE(stutter_pause_test, 2);
+ schedule_timeout_interruptible(1);
}
+ WRITE_ONCE(stutter_pause_test, 0);
if (!torture_must_stop())
schedule_timeout_interruptible(stutter);
- WRITE_ONCE(stutter_pause_test, 0);
torture_shutdown_absorb("torture_stutter");
} while (!torture_must_stop());
torture_kthread_stopping("torture_stutter");
@@ -647,7 +647,7 @@ static void torture_stutter_cleanup(void)
* The runnable parameter points to a flag that controls whether or not
* the test is currently runnable. If there is no such flag, pass in NULL.
*/
-bool torture_init_begin(char *ttype, bool v, int *runnable)
+bool torture_init_begin(char *ttype, bool v)
{
mutex_lock(&fullstop_mutex);
if (torture_type != NULL) {
@@ -659,7 +659,6 @@ bool torture_init_begin(char *ttype, bool v, int *runnable)
}
torture_type = ttype;
verbose = v;
- torture_runnable = runnable;
fullstop = FULLSTOP_DONTSTOP;
return true;
}
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 5af2842dea96..ca6930e0d25e 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -630,7 +630,7 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
* Returns POLLIN | POLLRDNORM if data exists in the buffers,
* zero otherwise.
*/
-int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
+__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
struct file *filp, poll_table *poll_table)
{
struct ring_buffer_per_cpu *cpu_buffer;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8e3f20a18a06..32c069bbf41b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2689,17 +2689,6 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
if (unlikely(in_nmi()))
return;
- /*
- * It is possible that a function is being traced in a
- * location that RCU is not watching. A call to
- * rcu_irq_enter() will make sure that it is, but there's
- * a few internal rcu functions that could be traced
- * where that wont work either. In those cases, we just
- * do nothing.
- */
- if (unlikely(rcu_irq_enter_disabled()))
- return;
-
rcu_irq_enter_irqson();
__ftrace_trace_stack(buffer, flags, skip, pc, NULL);
rcu_irq_exit_irqson();
@@ -5627,7 +5616,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int
+static __poll_t
trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
{
struct trace_array *tr = iter->tr;
@@ -5646,7 +5635,7 @@ trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_tabl
filp, poll_table);
}
-static unsigned int
+static __poll_t
tracing_poll_pipe(struct file *filp, poll_table *poll_table)
{
struct trace_iterator *iter = filp->private_data;
@@ -6600,7 +6589,7 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
return ret;
}
-static unsigned int
+static __poll_t
tracing_buffers_poll(struct file *filp, poll_table *poll_table)
{
struct ftrace_buffer_info *info = filp->private_data;
diff --git a/kernel/trace/trace_benchmark.c b/kernel/trace/trace_benchmark.c
index 79f838a75077..22fee766081b 100644
--- a/kernel/trace/trace_benchmark.c
+++ b/kernel/trace/trace_benchmark.c
@@ -165,7 +165,7 @@ static int benchmark_event_kthread(void *arg)
* this thread will never voluntarily schedule which would
* block synchronize_rcu_tasks() indefinitely.
*/
- cond_resched_rcu_qs();
+ cond_resched();
}
return 0;
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 685c50ae6300..671b13457387 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -212,11 +212,10 @@ static int tracepoint_add_func(struct tracepoint *tp,
}
/*
- * rcu_assign_pointer has a smp_wmb() which makes sure that the new
- * probe callbacks array is consistent before setting a pointer to it.
- * This array is referenced by __DO_TRACE from
- * include/linux/tracepoints.h. A matching smp_read_barrier_depends()
- * is used.
+ * rcu_assign_pointer has as smp_store_release() which makes sure
+ * that the new probe callbacks array is consistent before setting
+ * a pointer to it. This array is referenced by __DO_TRACE from
+ * include/linux/tracepoint.h using rcu_dereference_sched().
*/
rcu_assign_pointer(tp->funcs, tp_funcs);
if (!static_key_enabled(&tp->key))
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index f699122dab32..8dd2e66e8383 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2136,7 +2136,7 @@ __acquires(&pool->lock)
* stop_machine. At the same time, report a quiescent RCU state so
* the same condition doesn't freeze RCU.
*/
- cond_resched_rcu_qs();
+ cond_resched();
spin_lock_irq(&pool->lock);
@@ -3940,6 +3940,37 @@ static int wq_clamp_max_active(int max_active, unsigned int flags,
return clamp_val(max_active, 1, lim);
}
+/*
+ * Workqueues which may be used during memory reclaim should have a rescuer
+ * to guarantee forward progress.
+ */
+static int init_rescuer(struct workqueue_struct *wq)
+{
+ struct worker *rescuer;
+ int ret;
+
+ if (!(wq->flags & WQ_MEM_RECLAIM))
+ return 0;
+
+ rescuer = alloc_worker(NUMA_NO_NODE);
+ if (!rescuer)
+ return -ENOMEM;
+
+ rescuer->rescue_wq = wq;
+ rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", wq->name);
+ ret = PTR_ERR_OR_ZERO(rescuer->task);
+ if (ret) {
+ kfree(rescuer);
+ return ret;
+ }
+
+ wq->rescuer = rescuer;
+ kthread_bind_mask(rescuer->task, cpu_possible_mask);
+ wake_up_process(rescuer->task);
+
+ return 0;
+}
+
struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
unsigned int flags,
int max_active,
@@ -4002,29 +4033,8 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
if (alloc_and_link_pwqs(wq) < 0)
goto err_free_wq;
- /*
- * Workqueues which may be used during memory reclaim should
- * have a rescuer to guarantee forward progress.
- */
- if (flags & WQ_MEM_RECLAIM) {
- struct worker *rescuer;
-
- rescuer = alloc_worker(NUMA_NO_NODE);
- if (!rescuer)
- goto err_destroy;
-
- rescuer->rescue_wq = wq;
- rescuer->task = kthread_create(rescuer_thread, rescuer, "%s",
- wq->name);
- if (IS_ERR(rescuer->task)) {
- kfree(rescuer);
- goto err_destroy;
- }
-
- wq->rescuer = rescuer;
- kthread_bind_mask(rescuer->task, cpu_possible_mask);
- wake_up_process(rescuer->task);
- }
+ if (wq_online && init_rescuer(wq) < 0)
+ goto err_destroy;
if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
goto err_destroy;
@@ -5642,6 +5652,8 @@ int __init workqueue_init(void)
* archs such as power and arm64. As per-cpu pools created
* previously could be missing node hint and unbound pools NUMA
* affinity, fix them up.
+ *
+ * Also, while iterating workqueues, create rescuers if requested.
*/
wq_numa_init();
@@ -5653,8 +5665,12 @@ int __init workqueue_init(void)
}
}
- list_for_each_entry(wq, &workqueues, list)
+ list_for_each_entry(wq, &workqueues, list) {
wq_update_unbound_numa(wq, smp_processor_id(), true);
+ WARN(init_rescuer(wq),
+ "workqueue: failed to create early rescuer for %s",
+ wq->name);
+ }
mutex_unlock(&wq_pool_mutex);
diff --git a/lib/Kconfig b/lib/Kconfig
index c5e84fbcb30b..e96089499371 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -409,7 +409,11 @@ config HAS_DMA
depends on !NO_DMA
default y
-config DMA_NOOP_OPS
+config SGL_ALLOC
+ bool
+ default n
+
+config DMA_DIRECT_OPS
bool
depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT)
default n
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 890d4766cef3..64d7c19d3167 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1966,7 +1966,7 @@ config STRICT_DEVMEM
bool "Filter access to /dev/mem"
depends on MMU && DEVMEM
depends on ARCH_HAS_DEVMEM_IS_ALLOWED
- default y if TILE || PPC
+ default y if TILE || PPC || X86 || ARM64
---help---
If this option is disabled, you allow userspace (root) access to all
of memory, including kernel and userspace memory. Accidental
diff --git a/lib/Makefile b/lib/Makefile
index 75ec13778cd8..7adb066692b3 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -28,7 +28,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
-lib-$(CONFIG_DMA_NOOP_OPS) += dma-noop.o
+lib-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o
lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o
lib-y += kobject.o klist.o
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index b77d51da8c73..c6659cb37033 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -38,12 +38,10 @@ begin_node:
if (assoc_array_ptr_is_shortcut(cursor)) {
/* Descend through a shortcut */
shortcut = assoc_array_ptr_to_shortcut(cursor);
- smp_read_barrier_depends();
- cursor = READ_ONCE(shortcut->next_node);
+ cursor = READ_ONCE(shortcut->next_node); /* Address dependency. */
}
node = assoc_array_ptr_to_node(cursor);
- smp_read_barrier_depends();
slot = 0;
/* We perform two passes of each node.
@@ -55,15 +53,12 @@ begin_node:
*/
has_meta = 0;
for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
- ptr = READ_ONCE(node->slots[slot]);
+ ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
has_meta |= (unsigned long)ptr;
if (ptr && assoc_array_ptr_is_leaf(ptr)) {
- /* We need a barrier between the read of the pointer
- * and dereferencing the pointer - but only if we are
- * actually going to dereference it.
+ /* We need a barrier between the read of the pointer,
+ * which is supplied by the above READ_ONCE().
*/
- smp_read_barrier_depends();
-
/* Invoke the callback */
ret = iterator(assoc_array_ptr_to_leaf(ptr),
iterator_data);
@@ -86,10 +81,8 @@ begin_node:
continue_node:
node = assoc_array_ptr_to_node(cursor);
- smp_read_barrier_depends();
-
for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
- ptr = READ_ONCE(node->slots[slot]);
+ ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
if (assoc_array_ptr_is_meta(ptr)) {
cursor = ptr;
goto begin_node;
@@ -98,16 +91,15 @@ continue_node:
finished_node:
/* Move up to the parent (may need to skip back over a shortcut) */
- parent = READ_ONCE(node->back_pointer);
+ parent = READ_ONCE(node->back_pointer); /* Address dependency. */
slot = node->parent_slot;
if (parent == stop)
return 0;
if (assoc_array_ptr_is_shortcut(parent)) {
shortcut = assoc_array_ptr_to_shortcut(parent);
- smp_read_barrier_depends();
cursor = parent;
- parent = READ_ONCE(shortcut->back_pointer);
+ parent = READ_ONCE(shortcut->back_pointer); /* Address dependency. */
slot = shortcut->parent_slot;
if (parent == stop)
return 0;
@@ -147,7 +139,7 @@ int assoc_array_iterate(const struct assoc_array *array,
void *iterator_data),
void *iterator_data)
{
- struct assoc_array_ptr *root = READ_ONCE(array->root);
+ struct assoc_array_ptr *root = READ_ONCE(array->root); /* Address dependency. */
if (!root)
return 0;
@@ -194,7 +186,7 @@ assoc_array_walk(const struct assoc_array *array,
pr_devel("-->%s()\n", __func__);
- cursor = READ_ONCE(array->root);
+ cursor = READ_ONCE(array->root); /* Address dependency. */
if (!cursor)
return assoc_array_walk_tree_empty;
@@ -216,11 +208,9 @@ jumped:
consider_node:
node = assoc_array_ptr_to_node(cursor);
- smp_read_barrier_depends();
-
slot = segments >> (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
slot &= ASSOC_ARRAY_FAN_MASK;
- ptr = READ_ONCE(node->slots[slot]);
+ ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
pr_devel("consider slot %x [ix=%d type=%lu]\n",
slot, level, (unsigned long)ptr & 3);
@@ -254,7 +244,6 @@ consider_node:
cursor = ptr;
follow_shortcut:
shortcut = assoc_array_ptr_to_shortcut(cursor);
- smp_read_barrier_depends();
pr_devel("shortcut to %d\n", shortcut->skip_to_level);
sc_level = level + ASSOC_ARRAY_LEVEL_STEP;
BUG_ON(sc_level > shortcut->skip_to_level);
@@ -294,7 +283,7 @@ follow_shortcut:
} while (sc_level < shortcut->skip_to_level);
/* The shortcut matches the leaf's index to this point. */
- cursor = READ_ONCE(shortcut->next_node);
+ cursor = READ_ONCE(shortcut->next_node); /* Address dependency. */
if (((level ^ sc_level) & ~ASSOC_ARRAY_KEY_CHUNK_MASK) != 0) {
level = sc_level;
goto jumped;
@@ -331,20 +320,18 @@ void *assoc_array_find(const struct assoc_array *array,
return NULL;
node = result.terminal_node.node;
- smp_read_barrier_depends();
/* If the target key is available to us, it's has to be pointed to by
* the terminal node.
*/
for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
- ptr = READ_ONCE(node->slots[slot]);
+ ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
if (ptr && assoc_array_ptr_is_leaf(ptr)) {
/* We need a barrier between the read of the pointer
* and dereferencing the pointer - but only if we are
* actually going to dereference it.
*/
leaf = assoc_array_ptr_to_leaf(ptr);
- smp_read_barrier_depends();
if (ops->compare_object(leaf, index_key))
return (void *)leaf;
}
diff --git a/lib/chacha20.c b/lib/chacha20.c
index 250ceed9ec9a..c1cc50fb68c9 100644
--- a/lib/chacha20.c
+++ b/lib/chacha20.c
@@ -16,12 +16,7 @@
#include <asm/unaligned.h>
#include <crypto/chacha20.h>
-static inline u32 rotl32(u32 v, u8 n)
-{
- return (v << n) | (v >> (sizeof(v) * 8 - n));
-}
-
-extern void chacha20_block(u32 *state, void *stream)
+void chacha20_block(u32 *state, u32 *stream)
{
u32 x[16], *out = stream;
int i;
@@ -30,45 +25,45 @@ extern void chacha20_block(u32 *state, void *stream)
x[i] = state[i];
for (i = 0; i < 20; i += 2) {
- x[0] += x[4]; x[12] = rotl32(x[12] ^ x[0], 16);
- x[1] += x[5]; x[13] = rotl32(x[13] ^ x[1], 16);
- x[2] += x[6]; x[14] = rotl32(x[14] ^ x[2], 16);
- x[3] += x[7]; x[15] = rotl32(x[15] ^ x[3], 16);
+ x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 16);
+ x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 16);
+ x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 16);
+ x[3] += x[7]; x[15] = rol32(x[15] ^ x[3], 16);
- x[8] += x[12]; x[4] = rotl32(x[4] ^ x[8], 12);
- x[9] += x[13]; x[5] = rotl32(x[5] ^ x[9], 12);
- x[10] += x[14]; x[6] = rotl32(x[6] ^ x[10], 12);
- x[11] += x[15]; x[7] = rotl32(x[7] ^ x[11], 12);
+ x[8] += x[12]; x[4] = rol32(x[4] ^ x[8], 12);
+ x[9] += x[13]; x[5] = rol32(x[5] ^ x[9], 12);
+ x[10] += x[14]; x[6] = rol32(x[6] ^ x[10], 12);
+ x[11] += x[15]; x[7] = rol32(x[7] ^ x[11], 12);
- x[0] += x[4]; x[12] = rotl32(x[12] ^ x[0], 8);
- x[1] += x[5]; x[13] = rotl32(x[13] ^ x[1], 8);
- x[2] += x[6]; x[14] = rotl32(x[14] ^ x[2], 8);
- x[3] += x[7]; x[15] = rotl32(x[15] ^ x[3], 8);
+ x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 8);
+ x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 8);
+ x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 8);
+ x[3] += x[7]; x[15] = rol32(x[15] ^ x[3], 8);
- x[8] += x[12]; x[4] = rotl32(x[4] ^ x[8], 7);
- x[9] += x[13]; x[5] = rotl32(x[5] ^ x[9], 7);
- x[10] += x[14]; x[6] = rotl32(x[6] ^ x[10], 7);
- x[11] += x[15]; x[7] = rotl32(x[7] ^ x[11], 7);
+ x[8] += x[12]; x[4] = rol32(x[4] ^ x[8], 7);
+ x[9] += x[13]; x[5] = rol32(x[5] ^ x[9], 7);
+ x[10] += x[14]; x[6] = rol32(x[6] ^ x[10], 7);
+ x[11] += x[15]; x[7] = rol32(x[7] ^ x[11], 7);
- x[0] += x[5]; x[15] = rotl32(x[15] ^ x[0], 16);
- x[1] += x[6]; x[12] = rotl32(x[12] ^ x[1], 16);
- x[2] += x[7]; x[13] = rotl32(x[13] ^ x[2], 16);
- x[3] += x[4]; x[14] = rotl32(x[14] ^ x[3], 16);
+ x[0] += x[5]; x[15] = rol32(x[15] ^ x[0], 16);
+ x[1] += x[6]; x[12] = rol32(x[12] ^ x[1], 16);
+ x[2] += x[7]; x[13] = rol32(x[13] ^ x[2], 16);
+ x[3] += x[4]; x[14] = rol32(x[14] ^ x[3], 16);
- x[10] += x[15]; x[5] = rotl32(x[5] ^ x[10], 12);
- x[11] += x[12]; x[6] = rotl32(x[6] ^ x[11], 12);
- x[8] += x[13]; x[7] = rotl32(x[7] ^ x[8], 12);
- x[9] += x[14]; x[4] = rotl32(x[4] ^ x[9], 12);
+ x[10] += x[15]; x[5] = rol32(x[5] ^ x[10], 12);
+ x[11] += x[12]; x[6] = rol32(x[6] ^ x[11], 12);
+ x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 12);
+ x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 12);
- x[0] += x[5]; x[15] = rotl32(x[15] ^ x[0], 8);
- x[1] += x[6]; x[12] = rotl32(x[12] ^ x[1], 8);
- x[2] += x[7]; x[13] = rotl32(x[13] ^ x[2], 8);
- x[3] += x[4]; x[14] = rotl32(x[14] ^ x[3], 8);
+ x[0] += x[5]; x[15] = rol32(x[15] ^ x[0], 8);
+ x[1] += x[6]; x[12] = rol32(x[12] ^ x[1], 8);
+ x[2] += x[7]; x[13] = rol32(x[13] ^ x[2], 8);
+ x[3] += x[4]; x[14] = rol32(x[14] ^ x[3], 8);
- x[10] += x[15]; x[5] = rotl32(x[5] ^ x[10], 7);
- x[11] += x[12]; x[6] = rotl32(x[6] ^ x[11], 7);
- x[8] += x[13]; x[7] = rotl32(x[7] ^ x[8], 7);
- x[9] += x[14]; x[4] = rotl32(x[4] ^ x[9], 7);
+ x[10] += x[15]; x[5] = rol32(x[5] ^ x[10], 7);
+ x[11] += x[12]; x[6] = rol32(x[6] ^ x[11], 7);
+ x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 7);
+ x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 7);
}
for (i = 0; i < ARRAY_SIZE(x); i++)
diff --git a/lib/crc-ccitt.c b/lib/crc-ccitt.c
index 7f6dd68d2d09..d873b34039ff 100644
--- a/lib/crc-ccitt.c
+++ b/lib/crc-ccitt.c
@@ -51,8 +51,49 @@ u16 const crc_ccitt_table[256] = {
};
EXPORT_SYMBOL(crc_ccitt_table);
+/*
+ * Similar table to calculate CRC16 variant known as CRC-CCITT-FALSE
+ * Reflected bits order, does not augment final value.
+ */
+u16 const crc_ccitt_false_table[256] = {
+ 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7,
+ 0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF,
+ 0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6,
+ 0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE,
+ 0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485,
+ 0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D,
+ 0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4,
+ 0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC,
+ 0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823,
+ 0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969, 0xA90A, 0xB92B,
+ 0x5AF5, 0x4AD4, 0x7AB7, 0x6A96, 0x1A71, 0x0A50, 0x3A33, 0x2A12,
+ 0xDBFD, 0xCBDC, 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A,
+ 0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03, 0x0C60, 0x1C41,
+ 0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD, 0xAD2A, 0xBD0B, 0x8D68, 0x9D49,
+ 0x7E97, 0x6EB6, 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70,
+ 0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A, 0x9F59, 0x8F78,
+ 0x9188, 0x81A9, 0xB1CA, 0xA1EB, 0xD10C, 0xC12D, 0xF14E, 0xE16F,
+ 0x1080, 0x00A1, 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067,
+ 0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C, 0xE37F, 0xF35E,
+ 0x02B1, 0x1290, 0x22F3, 0x32D2, 0x4235, 0x5214, 0x6277, 0x7256,
+ 0xB5EA, 0xA5CB, 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D,
+ 0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
+ 0xA7DB, 0xB7FA, 0x8799, 0x97B8, 0xE75F, 0xF77E, 0xC71D, 0xD73C,
+ 0x26D3, 0x36F2, 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634,
+ 0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9, 0xB98A, 0xA9AB,
+ 0x5844, 0x4865, 0x7806, 0x6827, 0x18C0, 0x08E1, 0x3882, 0x28A3,
+ 0xCB7D, 0xDB5C, 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A,
+ 0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0, 0x2AB3, 0x3A92,
+ 0xFD2E, 0xED0F, 0xDD6C, 0xCD4D, 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9,
+ 0x7C26, 0x6C07, 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1,
+ 0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA, 0x8FD9, 0x9FF8,
+ 0x6E17, 0x7E36, 0x4E55, 0x5E74, 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0
+};
+EXPORT_SYMBOL(crc_ccitt_false_table);
+
/**
- * crc_ccitt - recompute the CRC for the data buffer
+ * crc_ccitt - recompute the CRC (CRC-CCITT variant) for the data
+ * buffer
* @crc: previous CRC value
* @buffer: data pointer
* @len: number of bytes in the buffer
@@ -65,5 +106,20 @@ u16 crc_ccitt(u16 crc, u8 const *buffer, size_t len)
}
EXPORT_SYMBOL(crc_ccitt);
+/**
+ * crc_ccitt_false - recompute the CRC (CRC-CCITT-FALSE variant)
+ * for the data buffer
+ * @crc: previous CRC value
+ * @buffer: data pointer
+ * @len: number of bytes in the buffer
+ */
+u16 crc_ccitt_false(u16 crc, u8 const *buffer, size_t len)
+{
+ while (len--)
+ crc = crc_ccitt_false_byte(crc, *buffer++);
+ return crc;
+}
+EXPORT_SYMBOL(crc_ccitt_false);
+
MODULE_DESCRIPTION("CRC-CCITT calculations");
MODULE_LICENSE("GPL");
diff --git a/lib/dma-direct.c b/lib/dma-direct.c
new file mode 100644
index 000000000000..40b1f92f2214
--- /dev/null
+++ b/lib/dma-direct.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DMA operations that map physical memory directly without using an IOMMU or
+ * flushing caches.
+ */
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/dma-direct.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-contiguous.h>
+#include <linux/pfn.h>
+
+#define DIRECT_MAPPING_ERROR 0
+
+/*
+ * Most architectures use ZONE_DMA for the first 16 Megabytes, but
+ * some use it for entirely different regions:
+ */
+#ifndef ARCH_ZONE_DMA_BITS
+#define ARCH_ZONE_DMA_BITS 24
+#endif
+
+static bool
+check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
+ const char *caller)
+{
+ if (unlikely(dev && !dma_capable(dev, dma_addr, size))) {
+ if (*dev->dma_mask >= DMA_BIT_MASK(32)) {
+ dev_err(dev,
+ "%s: overflow %pad+%zu of device mask %llx\n",
+ caller, &dma_addr, size, *dev->dma_mask);
+ }
+ return false;
+ }
+ return true;
+}
+
+static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
+{
+ return phys_to_dma(dev, phys) + size - 1 <= dev->coherent_dma_mask;
+}
+
+void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs)
+{
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ int page_order = get_order(size);
+ struct page *page = NULL;
+
+ /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
+ if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
+ gfp |= GFP_DMA;
+ if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
+ gfp |= GFP_DMA32;
+
+again:
+ /* CMA can be used only in the context which permits sleeping */
+ if (gfpflags_allow_blocking(gfp)) {
+ page = dma_alloc_from_contiguous(dev, count, page_order, gfp);
+ if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
+ dma_release_from_contiguous(dev, page, count);
+ page = NULL;
+ }
+ }
+ if (!page)
+ page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
+
+ if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
+ __free_pages(page, page_order);
+ page = NULL;
+
+ if (dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
+ !(gfp & GFP_DMA)) {
+ gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
+ goto again;
+ }
+ }
+
+ if (!page)
+ return NULL;
+
+ *dma_handle = phys_to_dma(dev, page_to_phys(page));
+ memset(page_address(page), 0, size);
+ return page_address(page);
+}
+
+void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr, unsigned long attrs)
+{
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+ if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
+ free_pages((unsigned long)cpu_addr, get_order(size));
+}
+
+static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset;
+
+ if (!check_addr(dev, dma_addr, size, __func__))
+ return DIRECT_MAPPING_ERROR;
+ return dma_addr;
+}
+
+static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir, unsigned long attrs)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sgl, sg, nents, i) {
+ BUG_ON(!sg_page(sg));
+
+ sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg));
+ if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__))
+ return 0;
+ sg_dma_len(sg) = sg->length;
+ }
+
+ return nents;
+}
+
+int dma_direct_supported(struct device *dev, u64 mask)
+{
+#ifdef CONFIG_ZONE_DMA
+ if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
+ return 0;
+#else
+ /*
+ * Because 32-bit DMA masks are so common we expect every architecture
+ * to be able to satisfy them - either by not supporting more physical
+ * memory, or by providing a ZONE_DMA32. If neither is the case, the
+ * architecture needs to use an IOMMU instead of the direct mapping.
+ */
+ if (mask < DMA_BIT_MASK(32))
+ return 0;
+#endif
+ return 1;
+}
+
+static int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == DIRECT_MAPPING_ERROR;
+}
+
+const struct dma_map_ops dma_direct_ops = {
+ .alloc = dma_direct_alloc,
+ .free = dma_direct_free,
+ .map_page = dma_direct_map_page,
+ .map_sg = dma_direct_map_sg,
+ .dma_supported = dma_direct_supported,
+ .mapping_error = dma_direct_mapping_error,
+};
+EXPORT_SYMBOL(dma_direct_ops);
diff --git a/lib/dma-noop.c b/lib/dma-noop.c
deleted file mode 100644
index a10185b0c2d4..000000000000
--- a/lib/dma-noop.c
+++ /dev/null
@@ -1,68 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * lib/dma-noop.c
- *
- * DMA operations that map to physical addresses without flushing memory.
- */
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/dma-mapping.h>
-#include <linux/scatterlist.h>
-#include <linux/pfn.h>
-
-static void *dma_noop_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- unsigned long attrs)
-{
- void *ret;
-
- ret = (void *)__get_free_pages(gfp, get_order(size));
- if (ret)
- *dma_handle = virt_to_phys(ret) - PFN_PHYS(dev->dma_pfn_offset);
-
- return ret;
-}
-
-static void dma_noop_free(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_addr,
- unsigned long attrs)
-{
- free_pages((unsigned long)cpu_addr, get_order(size));
-}
-
-static dma_addr_t dma_noop_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- return page_to_phys(page) + offset - PFN_PHYS(dev->dma_pfn_offset);
-}
-
-static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sgl, sg, nents, i) {
- dma_addr_t offset = PFN_PHYS(dev->dma_pfn_offset);
- void *va;
-
- BUG_ON(!sg_page(sg));
- va = sg_virt(sg);
- sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va) - offset;
- sg_dma_len(sg) = sg->length;
- }
-
- return nents;
-}
-
-const struct dma_map_ops dma_noop_ops = {
- .alloc = dma_noop_alloc,
- .free = dma_noop_free,
- .map_page = dma_noop_map_page,
- .map_sg = dma_noop_map_sg,
-};
-
-EXPORT_SYMBOL(dma_noop_ops);
diff --git a/lib/kobject.c b/lib/kobject.c
index 763d70a18941..06b849eee0ca 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -1039,6 +1039,7 @@ void *kobj_ns_grab_current(enum kobj_ns_type type)
return ns;
}
+EXPORT_SYMBOL_GPL(kobj_ns_grab_current);
const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk)
{
@@ -1074,3 +1075,4 @@ void kobj_ns_drop(enum kobj_ns_type type, void *ns)
kobj_ns_ops_tbl[type]->drop_ns(ns);
spin_unlock(&kobj_ns_type_lock);
}
+EXPORT_SYMBOL_GPL(kobj_ns_drop);
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index fe03c6d52761..30e7dd88148b 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -197,10 +197,10 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
/*
- * Restore per-cpu operation. smp_store_release() is paired with
- * smp_read_barrier_depends() in __ref_is_percpu() and guarantees
- * that the zeroing is visible to all percpu accesses which can see
- * the following __PERCPU_REF_ATOMIC clearing.
+ * Restore per-cpu operation. smp_store_release() is paired
+ * with READ_ONCE() in __ref_is_percpu() and guarantees that the
+ * zeroing is visible to all percpu accesses which can see the
+ * following __PERCPU_REF_ATOMIC clearing.
*/
for_each_possible_cpu(cpu)
*per_cpu_ptr(percpu_count, cpu) = 0;
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 80aa8d5463fa..42b5ca0acf93 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -462,7 +462,7 @@ static void sbq_wake_up(struct sbitmap_queue *sbq)
*/
atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wait_cnt + wake_batch);
sbq_index_atomic_inc(&sbq->wake_index);
- wake_up(&ws->wait);
+ wake_up_nr(&ws->wait, wake_batch);
}
}
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 7c1c55f7daaa..53728d391d3a 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -474,6 +474,133 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
}
EXPORT_SYMBOL(sg_alloc_table_from_pages);
+#ifdef CONFIG_SGL_ALLOC
+
+/**
+ * sgl_alloc_order - allocate a scatterlist and its pages
+ * @length: Length in bytes of the scatterlist. Must be at least one
+ * @order: Second argument for alloc_pages()
+ * @chainable: Whether or not to allocate an extra element in the scatterlist
+ * for scatterlist chaining purposes
+ * @gfp: Memory allocation flags
+ * @nent_p: [out] Number of entries in the scatterlist that have pages
+ *
+ * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
+ */
+struct scatterlist *sgl_alloc_order(unsigned long long length,
+ unsigned int order, bool chainable,
+ gfp_t gfp, unsigned int *nent_p)
+{
+ struct scatterlist *sgl, *sg;
+ struct page *page;
+ unsigned int nent, nalloc;
+ u32 elem_len;
+
+ nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order);
+ /* Check for integer overflow */
+ if (length > (nent << (PAGE_SHIFT + order)))
+ return NULL;
+ nalloc = nent;
+ if (chainable) {
+ /* Check for integer overflow */
+ if (nalloc + 1 < nalloc)
+ return NULL;
+ nalloc++;
+ }
+ sgl = kmalloc_array(nalloc, sizeof(struct scatterlist),
+ (gfp & ~GFP_DMA) | __GFP_ZERO);
+ if (!sgl)
+ return NULL;
+
+ sg_init_table(sgl, nalloc);
+ sg = sgl;
+ while (length) {
+ elem_len = min_t(u64, length, PAGE_SIZE << order);
+ page = alloc_pages(gfp, order);
+ if (!page) {
+ sgl_free(sgl);
+ return NULL;
+ }
+
+ sg_set_page(sg, page, elem_len, 0);
+ length -= elem_len;
+ sg = sg_next(sg);
+ }
+ WARN_ONCE(length, "length = %lld\n", length);
+ if (nent_p)
+ *nent_p = nent;
+ return sgl;
+}
+EXPORT_SYMBOL(sgl_alloc_order);
+
+/**
+ * sgl_alloc - allocate a scatterlist and its pages
+ * @length: Length in bytes of the scatterlist
+ * @gfp: Memory allocation flags
+ * @nent_p: [out] Number of entries in the scatterlist
+ *
+ * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
+ */
+struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
+ unsigned int *nent_p)
+{
+ return sgl_alloc_order(length, 0, false, gfp, nent_p);
+}
+EXPORT_SYMBOL(sgl_alloc);
+
+/**
+ * sgl_free_n_order - free a scatterlist and its pages
+ * @sgl: Scatterlist with one or more elements
+ * @nents: Maximum number of elements to free
+ * @order: Second argument for __free_pages()
+ *
+ * Notes:
+ * - If several scatterlists have been chained and each chain element is
+ * freed separately then it's essential to set nents correctly to avoid that a
+ * page would get freed twice.
+ * - All pages in a chained scatterlist can be freed at once by setting @nents
+ * to a high number.
+ */
+void sgl_free_n_order(struct scatterlist *sgl, int nents, int order)
+{
+ struct scatterlist *sg;
+ struct page *page;
+ int i;
+
+ for_each_sg(sgl, sg, nents, i) {
+ if (!sg)
+ break;
+ page = sg_page(sg);
+ if (page)
+ __free_pages(page, order);
+ }
+ kfree(sgl);
+}
+EXPORT_SYMBOL(sgl_free_n_order);
+
+/**
+ * sgl_free_order - free a scatterlist and its pages
+ * @sgl: Scatterlist with one or more elements
+ * @order: Second argument for __free_pages()
+ */
+void sgl_free_order(struct scatterlist *sgl, int order)
+{
+ sgl_free_n_order(sgl, INT_MAX, order);
+}
+EXPORT_SYMBOL(sgl_free_order);
+
+/**
+ * sgl_free - free a scatterlist and its pages
+ * @sgl: Scatterlist with one or more elements
+ */
+void sgl_free(struct scatterlist *sgl)
+{
+ sgl_free_order(sgl, 0);
+}
+EXPORT_SYMBOL(sgl_free);
+
+#endif /* CONFIG_SGL_ALLOC */
+
void __sg_page_iter_start(struct sg_page_iter *piter,
struct scatterlist *sglist, unsigned int nents,
unsigned long pgoffset)
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index cea19aaf303c..c43ec2271469 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -18,7 +18,7 @@
*/
#include <linux/cache.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/spinlock.h>
@@ -417,7 +417,7 @@ cleanup2:
return -ENOMEM;
}
-void __init swiotlb_free(void)
+void __init swiotlb_exit(void)
{
if (!io_tlb_orig_addr)
return;
@@ -586,7 +586,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
not_found:
spin_unlock_irqrestore(&io_tlb_lock, flags);
- if (printk_ratelimit())
+ if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size);
return SWIOTLB_MAP_ERROR;
found:
@@ -605,7 +605,6 @@ found:
return tlb_addr;
}
-EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
/*
* Allocates bounce buffer and returns its kernel virtual address.
@@ -675,7 +674,6 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
}
spin_unlock_irqrestore(&io_tlb_lock, flags);
}
-EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single);
void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir,
@@ -707,92 +705,107 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
BUG();
}
}
-EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single);
+
+static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr,
+ size_t size)
+{
+ u64 mask = DMA_BIT_MASK(32);
+
+ if (dev && dev->coherent_dma_mask)
+ mask = dev->coherent_dma_mask;
+ return addr + size - 1 <= mask;
+}
+
+static void *
+swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ unsigned long attrs)
+{
+ phys_addr_t phys_addr;
+
+ if (swiotlb_force == SWIOTLB_NO_FORCE)
+ goto out_warn;
+
+ phys_addr = swiotlb_tbl_map_single(dev,
+ swiotlb_phys_to_dma(dev, io_tlb_start),
+ 0, size, DMA_FROM_DEVICE, 0);
+ if (phys_addr == SWIOTLB_MAP_ERROR)
+ goto out_warn;
+
+ *dma_handle = swiotlb_phys_to_dma(dev, phys_addr);
+ if (dma_coherent_ok(dev, *dma_handle, size))
+ goto out_unmap;
+
+ memset(phys_to_virt(phys_addr), 0, size);
+ return phys_to_virt(phys_addr);
+
+out_unmap:
+ dev_warn(dev, "hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
+ (unsigned long long)(dev ? dev->coherent_dma_mask : 0),
+ (unsigned long long)*dma_handle);
+
+ /*
+ * DMA_TO_DEVICE to avoid memcpy in unmap_single.
+ * DMA_ATTR_SKIP_CPU_SYNC is optional.
+ */
+ swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+out_warn:
+ if ((attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) {
+ dev_warn(dev,
+ "swiotlb: coherent allocation failed, size=%zu\n",
+ size);
+ dump_stack();
+ }
+ return NULL;
+}
void *
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags)
{
- dma_addr_t dev_addr;
- void *ret;
int order = get_order(size);
- u64 dma_mask = DMA_BIT_MASK(32);
-
- if (hwdev && hwdev->coherent_dma_mask)
- dma_mask = hwdev->coherent_dma_mask;
+ unsigned long attrs = (flags & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0;
+ void *ret;
ret = (void *)__get_free_pages(flags, order);
if (ret) {
- dev_addr = swiotlb_virt_to_bus(hwdev, ret);
- if (dev_addr + size - 1 > dma_mask) {
- /*
- * The allocated memory isn't reachable by the device.
- */
- free_pages((unsigned long) ret, order);
- ret = NULL;
+ *dma_handle = swiotlb_virt_to_bus(hwdev, ret);
+ if (dma_coherent_ok(hwdev, *dma_handle, size)) {
+ memset(ret, 0, size);
+ return ret;
}
+ free_pages((unsigned long)ret, order);
}
- if (!ret) {
- /*
- * We are either out of memory or the device can't DMA to
- * GFP_DMA memory; fall back on map_single(), which
- * will grab memory from the lowest available address range.
- */
- phys_addr_t paddr = map_single(hwdev, 0, size,
- DMA_FROM_DEVICE, 0);
- if (paddr == SWIOTLB_MAP_ERROR)
- goto err_warn;
- ret = phys_to_virt(paddr);
- dev_addr = swiotlb_phys_to_dma(hwdev, paddr);
-
- /* Confirm address can be DMA'd by device */
- if (dev_addr + size - 1 > dma_mask) {
- printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
- (unsigned long long)dma_mask,
- (unsigned long long)dev_addr);
-
- /*
- * DMA_TO_DEVICE to avoid memcpy in unmap_single.
- * The DMA_ATTR_SKIP_CPU_SYNC is optional.
- */
- swiotlb_tbl_unmap_single(hwdev, paddr,
- size, DMA_TO_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
- goto err_warn;
- }
- }
+ return swiotlb_alloc_buffer(hwdev, size, dma_handle, attrs);
+}
+EXPORT_SYMBOL(swiotlb_alloc_coherent);
- *dma_handle = dev_addr;
- memset(ret, 0, size);
+static bool swiotlb_free_buffer(struct device *dev, size_t size,
+ dma_addr_t dma_addr)
+{
+ phys_addr_t phys_addr = dma_to_phys(dev, dma_addr);
- return ret;
+ WARN_ON_ONCE(irqs_disabled());
-err_warn:
- pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n",
- dev_name(hwdev), size);
- dump_stack();
+ if (!is_swiotlb_buffer(phys_addr))
+ return false;
- return NULL;
+ /*
+ * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single.
+ * DMA_ATTR_SKIP_CPU_SYNC is optional.
+ */
+ swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ return true;
}
-EXPORT_SYMBOL(swiotlb_alloc_coherent);
void
swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
dma_addr_t dev_addr)
{
- phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
-
- WARN_ON(irqs_disabled());
- if (!is_swiotlb_buffer(paddr))
+ if (!swiotlb_free_buffer(hwdev, size, dev_addr))
free_pages((unsigned long)vaddr, get_order(size));
- else
- /*
- * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single.
- * DMA_ATTR_SKIP_CPU_SYNC is optional.
- */
- swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
}
EXPORT_SYMBOL(swiotlb_free_coherent);
@@ -868,7 +881,6 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer);
}
-EXPORT_SYMBOL_GPL(swiotlb_map_page);
/*
* Unmap a single streaming mode DMA translation. The dma_addr and size must
@@ -909,7 +921,6 @@ void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
{
unmap_single(hwdev, dev_addr, size, dir, attrs);
}
-EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
/*
* Make physical memory consistent for a single streaming mode DMA translation
@@ -947,7 +958,6 @@ swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
{
swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
}
-EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
void
swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
@@ -955,7 +965,6 @@ swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
{
swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
}
-EXPORT_SYMBOL(swiotlb_sync_single_for_device);
/*
* Map a set of buffers described by scatterlist in streaming mode for DMA.
@@ -1007,7 +1016,6 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
}
return nelems;
}
-EXPORT_SYMBOL(swiotlb_map_sg_attrs);
/*
* Unmap a set of streaming mode DMA translations. Again, cpu read rules
@@ -1027,7 +1035,6 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir,
attrs);
}
-EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
/*
* Make physical memory consistent for a set of streaming mode DMA translations
@@ -1055,7 +1062,6 @@ swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
{
swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
}
-EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
void
swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
@@ -1063,14 +1069,12 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
{
swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
}
-EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
int
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
{
return (dma_addr == swiotlb_phys_to_dma(hwdev, io_tlb_overflow_buffer));
}
-EXPORT_SYMBOL(swiotlb_dma_mapping_error);
/*
* Return whether the given device DMA address mask can be supported
@@ -1083,4 +1087,49 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
return swiotlb_phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
}
-EXPORT_SYMBOL(swiotlb_dma_supported);
+
+#ifdef CONFIG_DMA_DIRECT_OPS
+void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs)
+{
+ void *vaddr;
+
+ /* temporary workaround: */
+ if (gfp & __GFP_NOWARN)
+ attrs |= DMA_ATTR_NO_WARN;
+
+ /*
+ * Don't print a warning when the first allocation attempt fails.
+ * swiotlb_alloc_coherent() will print a warning when the DMA memory
+ * allocation ultimately failed.
+ */
+ gfp |= __GFP_NOWARN;
+
+ vaddr = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
+ if (!vaddr)
+ vaddr = swiotlb_alloc_buffer(dev, size, dma_handle, attrs);
+ return vaddr;
+}
+
+void swiotlb_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_addr, unsigned long attrs)
+{
+ if (!swiotlb_free_buffer(dev, size, dma_addr))
+ dma_direct_free(dev, size, vaddr, dma_addr, attrs);
+}
+
+const struct dma_map_ops swiotlb_dma_ops = {
+ .mapping_error = swiotlb_dma_mapping_error,
+ .alloc = swiotlb_alloc,
+ .free = swiotlb_free,
+ .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
+ .sync_single_for_device = swiotlb_sync_single_for_device,
+ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
+ .sync_sg_for_device = swiotlb_sync_sg_for_device,
+ .map_sg = swiotlb_map_sg_attrs,
+ .unmap_sg = swiotlb_unmap_sg_attrs,
+ .map_page = swiotlb_map_page,
+ .unmap_page = swiotlb_unmap_page,
+ .dma_supported = swiotlb_dma_supported,
+};
+#endif /* CONFIG_DMA_DIRECT_OPS */
diff --git a/lib/usercopy.c b/lib/usercopy.c
index 15e2e6fb060e..3744b2a8e591 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -20,7 +20,7 @@ EXPORT_SYMBOL(_copy_from_user);
#endif
#ifndef INLINE_COPY_TO_USER
-unsigned long _copy_to_user(void *to, const void __user *from, unsigned long n)
+unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
if (likely(access_ok(VERIFY_WRITE, to, n))) {
diff --git a/mm/gup.c b/mm/gup.c
index e0d82b6706d7..9e17d8db2d6b 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -848,7 +848,7 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
unsigned long nr_pages,
struct page **pages,
struct vm_area_struct **vmas,
- int *locked, bool notify_drop,
+ int *locked,
unsigned int flags)
{
long ret, pages_done;
@@ -922,7 +922,7 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
pages++;
start += PAGE_SIZE;
}
- if (notify_drop && lock_dropped && *locked) {
+ if (lock_dropped && *locked) {
/*
* We must let the caller know we temporarily dropped the lock
* and so the critical section protected by it was lost.
@@ -959,36 +959,12 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
int *locked)
{
return __get_user_pages_locked(current, current->mm, start, nr_pages,
- pages, NULL, locked, true,
+ pages, NULL, locked,
gup_flags | FOLL_TOUCH);
}
EXPORT_SYMBOL(get_user_pages_locked);
/*
- * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows for
- * tsk, mm to be specified.
- *
- * NOTE: here FOLL_TOUCH is not set implicitly and must be set by the
- * caller if required (just like with __get_user_pages). "FOLL_GET"
- * is set implicitly if "pages" is non-NULL.
- */
-static __always_inline long __get_user_pages_unlocked(struct task_struct *tsk,
- struct mm_struct *mm, unsigned long start,
- unsigned long nr_pages, struct page **pages,
- unsigned int gup_flags)
-{
- long ret;
- int locked = 1;
-
- down_read(&mm->mmap_sem);
- ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL,
- &locked, false, gup_flags);
- if (locked)
- up_read(&mm->mmap_sem);
- return ret;
-}
-
-/*
* get_user_pages_unlocked() is suitable to replace the form:
*
* down_read(&mm->mmap_sem);
@@ -1006,8 +982,16 @@ static __always_inline long __get_user_pages_unlocked(struct task_struct *tsk,
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags)
{
- return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
- pages, gup_flags | FOLL_TOUCH);
+ struct mm_struct *mm = current->mm;
+ int locked = 1;
+ long ret;
+
+ down_read(&mm->mmap_sem);
+ ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL,
+ &locked, gup_flags | FOLL_TOUCH);
+ if (locked)
+ up_read(&mm->mmap_sem);
+ return ret;
}
EXPORT_SYMBOL(get_user_pages_unlocked);
@@ -1073,7 +1057,7 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
struct vm_area_struct **vmas, int *locked)
{
return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
- locked, true,
+ locked,
gup_flags | FOLL_TOUCH | FOLL_REMOTE);
}
EXPORT_SYMBOL(get_user_pages_remote);
@@ -1090,7 +1074,7 @@ long get_user_pages(unsigned long start, unsigned long nr_pages,
struct vm_area_struct **vmas)
{
return __get_user_pages_locked(current, current->mm, start, nr_pages,
- pages, vmas, NULL, false,
+ pages, vmas, NULL,
gup_flags | FOLL_TOUCH);
}
EXPORT_SYMBOL(get_user_pages);
diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c
index 356df057a2a8..b6ac70616c32 100644
--- a/mm/hwpoison-inject.c
+++ b/mm/hwpoison-inject.c
@@ -52,7 +52,7 @@ static int hwpoison_inject(void *data, u64 val)
inject:
pr_info("Injecting memory failure at pfn %#lx\n", pfn);
- return memory_failure(pfn, 18, MF_COUNT_INCREASED);
+ return memory_failure(pfn, MF_COUNT_INCREASED);
put_out:
put_hwpoison_page(p);
return 0;
diff --git a/mm/ksm.c b/mm/ksm.c
index be8f4576f842..c406f75957ad 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -675,15 +675,8 @@ static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it)
expected_mapping = (void *)((unsigned long)stable_node |
PAGE_MAPPING_KSM);
again:
- kpfn = READ_ONCE(stable_node->kpfn);
+ kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */
page = pfn_to_page(kpfn);
-
- /*
- * page is computed from kpfn, so on most architectures reading
- * page->mapping is naturally ordered after reading node->kpfn,
- * but on Alpha we need to be more careful.
- */
- smp_read_barrier_depends();
if (READ_ONCE(page->mapping) != expected_mapping)
goto stale;
diff --git a/mm/madvise.c b/mm/madvise.c
index 751e97aa2210..4d3c922ea1a1 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -661,7 +661,7 @@ static int madvise_inject_error(int behavior,
pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n",
page_to_pfn(page), start);
- ret = memory_failure(page_to_pfn(page), 0, MF_COUNT_INCREASED);
+ ret = memory_failure(page_to_pfn(page), MF_COUNT_INCREASED);
if (ret)
return ret;
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ac2ffd5e02b9..9011997d8a5c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3777,7 +3777,7 @@ static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
struct mem_cgroup_event *event =
container_of(wait, struct mem_cgroup_event, wait);
struct mem_cgroup *memcg = event->memcg;
- unsigned long flags = (unsigned long)key;
+ __poll_t flags = key_to_poll(key);
if (flags & POLLHUP) {
/*
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 4acdf393a801..4b80ccee4535 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -178,25 +178,19 @@ EXPORT_SYMBOL_GPL(hwpoison_filter);
* ``action optional'' if they are not immediately affected by the error
* ``action required'' if error happened in current execution context
*/
-static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
+static int kill_proc(struct task_struct *t, unsigned long addr,
unsigned long pfn, struct page *page, int flags)
{
- struct siginfo si;
+ short addr_lsb;
int ret;
pr_err("Memory failure: %#lx: Killing %s:%d due to hardware memory corruption\n",
pfn, t->comm, t->pid);
- si.si_signo = SIGBUS;
- si.si_errno = 0;
- si.si_addr = (void *)addr;
-#ifdef __ARCH_SI_TRAPNO
- si.si_trapno = trapno;
-#endif
- si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT;
+ addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT;
if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) {
- si.si_code = BUS_MCEERR_AR;
- ret = force_sig_info(SIGBUS, &si, current);
+ ret = force_sig_mceerr(BUS_MCEERR_AR, (void __user *)addr,
+ addr_lsb, current);
} else {
/*
* Don't use force here, it's convenient if the signal
@@ -204,8 +198,8 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
* This could cause a loop when the user sets SIGBUS
* to SIG_IGN, but hopefully no one will do that?
*/
- si.si_code = BUS_MCEERR_AO;
- ret = send_sig_info(SIGBUS, &si, t); /* synchronous? */
+ ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)addr,
+ addr_lsb, t); /* synchronous? */
}
if (ret < 0)
pr_info("Memory failure: Error sending signal to %s:%d: %d\n",
@@ -323,7 +317,7 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
* Also when FAIL is set do a force kill because something went
* wrong earlier.
*/
-static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
+static void kill_procs(struct list_head *to_kill, int forcekill,
bool fail, struct page *page, unsigned long pfn,
int flags)
{
@@ -348,7 +342,7 @@ static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
* check for that, but we need to tell the
* process anyways.
*/
- else if (kill_proc(tk->tsk, tk->addr, trapno,
+ else if (kill_proc(tk->tsk, tk->addr,
pfn, page, flags) < 0)
pr_err("Memory failure: %#lx: Cannot send advisory machine check signal to %s:%d\n",
pfn, tk->tsk->comm, tk->tsk->pid);
@@ -927,7 +921,7 @@ EXPORT_SYMBOL_GPL(get_hwpoison_page);
* the pages and send SIGBUS to the processes if the data was dirty.
*/
static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
- int trapno, int flags, struct page **hpagep)
+ int flags, struct page **hpagep)
{
enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
struct address_space *mapping;
@@ -1017,7 +1011,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
* any accesses to the poisoned memory.
*/
forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL);
- kill_procs(&tokill, forcekill, trapno, !unmap_success, p, pfn, flags);
+ kill_procs(&tokill, forcekill, !unmap_success, p, pfn, flags);
return unmap_success;
}
@@ -1045,7 +1039,7 @@ static int identify_page_state(unsigned long pfn, struct page *p,
return page_action(ps, p, pfn);
}
-static int memory_failure_hugetlb(unsigned long pfn, int trapno, int flags)
+static int memory_failure_hugetlb(unsigned long pfn, int flags)
{
struct page *p = pfn_to_page(pfn);
struct page *head = compound_head(p);
@@ -1090,7 +1084,7 @@ static int memory_failure_hugetlb(unsigned long pfn, int trapno, int flags)
return 0;
}
- if (!hwpoison_user_mappings(p, pfn, trapno, flags, &head)) {
+ if (!hwpoison_user_mappings(p, pfn, flags, &head)) {
action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
res = -EBUSY;
goto out;
@@ -1105,7 +1099,6 @@ out:
/**
* memory_failure - Handle memory failure of a page.
* @pfn: Page Number of the corrupted page
- * @trapno: Trap number reported in the signal to user space.
* @flags: fine tune action taken
*
* This function is called by the low level machine check code
@@ -1120,7 +1113,7 @@ out:
* Must run in process context (e.g. a work queue) with interrupts
* enabled and no spinlocks hold.
*/
-int memory_failure(unsigned long pfn, int trapno, int flags)
+int memory_failure(unsigned long pfn, int flags)
{
struct page *p;
struct page *hpage;
@@ -1129,7 +1122,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
unsigned long page_flags;
if (!sysctl_memory_failure_recovery)
- panic("Memory failure from trap %d on page %lx", trapno, pfn);
+ panic("Memory failure on page %lx", pfn);
if (!pfn_valid(pfn)) {
pr_err("Memory failure: %#lx: memory outside kernel control\n",
@@ -1139,7 +1132,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
p = pfn_to_page(pfn);
if (PageHuge(p))
- return memory_failure_hugetlb(pfn, trapno, flags);
+ return memory_failure_hugetlb(pfn, flags);
if (TestSetPageHWPoison(p)) {
pr_err("Memory failure: %#lx: already hardware poisoned\n",
pfn);
@@ -1268,7 +1261,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
* When the raw error page is thp tail page, hpage points to the raw
* page after thp split.
*/
- if (!hwpoison_user_mappings(p, pfn, trapno, flags, &hpage)) {
+ if (!hwpoison_user_mappings(p, pfn, flags, &hpage)) {
action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
res = -EBUSY;
goto out;
@@ -1296,7 +1289,6 @@ EXPORT_SYMBOL_GPL(memory_failure);
struct memory_failure_entry {
unsigned long pfn;
- int trapno;
int flags;
};
@@ -1312,7 +1304,6 @@ static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu);
/**
* memory_failure_queue - Schedule handling memory failure of a page.
* @pfn: Page Number of the corrupted page
- * @trapno: Trap number reported in the signal to user space.
* @flags: Flags for memory failure handling
*
* This function is called by the low level hardware error handler
@@ -1326,13 +1317,12 @@ static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu);
*
* Can run in IRQ context.
*/
-void memory_failure_queue(unsigned long pfn, int trapno, int flags)
+void memory_failure_queue(unsigned long pfn, int flags)
{
struct memory_failure_cpu *mf_cpu;
unsigned long proc_flags;
struct memory_failure_entry entry = {
.pfn = pfn,
- .trapno = trapno,
.flags = flags,
};
@@ -1365,7 +1355,7 @@ static void memory_failure_work_func(struct work_struct *work)
if (entry.flags & MF_SOFT_OFFLINE)
soft_offline_page(pfn_to_page(entry.pfn), entry.flags);
else
- memory_failure(entry.pfn, entry.trapno, entry.flags);
+ memory_failure(entry.pfn, entry.flags);
}
}
diff --git a/mm/mlock.c b/mm/mlock.c
index 30472d438794..f7f54fd2e13f 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -779,7 +779,7 @@ static int apply_mlockall_flags(int flags)
/* Ignore errors */
mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
- cond_resched_rcu_qs();
+ cond_resched();
}
out:
return 0;
diff --git a/mm/page_io.c b/mm/page_io.c
index e93f1a4cacd7..b41cf9644585 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -50,7 +50,7 @@ static struct bio *get_swap_bio(gfp_t gfp_flags,
void end_swap_bio_write(struct bio *bio)
{
- struct page *page = bio->bi_io_vec[0].bv_page;
+ struct page *page = bio_first_page_all(bio);
if (bio->bi_status) {
SetPageError(page);
@@ -122,7 +122,7 @@ static void swap_slot_free_notify(struct page *page)
static void end_swap_bio_read(struct bio *bio)
{
- struct page *page = bio->bi_io_vec[0].bv_page;
+ struct page *page = bio_first_page_all(bio);
struct task_struct *waiter = bio->bi_private;
if (bio->bi_status) {
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 3074b02eaa09..42fe5653814a 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2697,7 +2697,7 @@ out:
}
#ifdef CONFIG_PROC_FS
-static unsigned swaps_poll(struct file *file, poll_table *wait)
+static __poll_t swaps_poll(struct file *file, poll_table *wait)
{
struct seq_file *seq = file->private_data;
diff --git a/mm/util.c b/mm/util.c
index 34e57fae959d..c1250501364f 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -150,18 +150,14 @@ EXPORT_SYMBOL(kmemdup_nul);
* @src: source address in user space
* @len: number of bytes to copy
*
- * Returns an ERR_PTR() on failure.
+ * Returns an ERR_PTR() on failure. Result is physically
+ * contiguous, to be freed by kfree().
*/
void *memdup_user(const void __user *src, size_t len)
{
void *p;
- /*
- * Always use GFP_KERNEL, since copy_from_user() can sleep and
- * cause pagefault, which makes it pointless to use GFP_NOFS
- * or GFP_ATOMIC.
- */
- p = kmalloc_track_caller(len, GFP_KERNEL);
+ p = kmalloc_track_caller(len, GFP_USER);
if (!p)
return ERR_PTR(-ENOMEM);
@@ -174,6 +170,32 @@ void *memdup_user(const void __user *src, size_t len)
}
EXPORT_SYMBOL(memdup_user);
+/**
+ * vmemdup_user - duplicate memory region from user space
+ *
+ * @src: source address in user space
+ * @len: number of bytes to copy
+ *
+ * Returns an ERR_PTR() on failure. Result may be not
+ * physically contiguous. Use kvfree() to free.
+ */
+void *vmemdup_user(const void __user *src, size_t len)
+{
+ void *p;
+
+ p = kvmalloc(len, GFP_USER);
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+
+ if (copy_from_user(p, src, len)) {
+ kvfree(p);
+ return ERR_PTR(-EFAULT);
+ }
+
+ return p;
+}
+EXPORT_SYMBOL(vmemdup_user);
+
/*
* strndup_user - duplicate an existing string from user space
* @s: The string to duplicate
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 80f5c79053a4..d6f7f7cb79c4 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -228,32 +228,31 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
}
}
-static int
-p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt)
+static __poll_t
+p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt, int *err)
{
- int ret, n;
+ __poll_t ret, n;
struct p9_trans_fd *ts = NULL;
if (client && client->status == Connected)
ts = client->trans;
- if (!ts)
- return -EREMOTEIO;
+ if (!ts) {
+ if (err)
+ *err = -EREMOTEIO;
+ return POLLERR;
+ }
if (!ts->rd->f_op->poll)
- return -EIO;
-
- if (!ts->wr->f_op->poll)
- return -EIO;
-
- ret = ts->rd->f_op->poll(ts->rd, pt);
- if (ret < 0)
- return ret;
+ ret = DEFAULT_POLLMASK;
+ else
+ ret = ts->rd->f_op->poll(ts->rd, pt);
if (ts->rd != ts->wr) {
- n = ts->wr->f_op->poll(ts->wr, pt);
- if (n < 0)
- return n;
+ if (!ts->wr->f_op->poll)
+ n = DEFAULT_POLLMASK;
+ else
+ n = ts->wr->f_op->poll(ts->wr, pt);
ret = (ret & ~POLLOUT) | (n & ~POLLIN);
}
@@ -298,7 +297,8 @@ static int p9_fd_read(struct p9_client *client, void *v, int len)
static void p9_read_work(struct work_struct *work)
{
- int n, err;
+ __poll_t n;
+ int err;
struct p9_conn *m;
int status = REQ_STATUS_ERROR;
@@ -398,7 +398,7 @@ end_clear:
if (test_and_clear_bit(Rpending, &m->wsched))
n = POLLIN;
else
- n = p9_fd_poll(m->client, NULL);
+ n = p9_fd_poll(m->client, NULL, NULL);
if ((n & POLLIN) && !test_and_set_bit(Rworksched, &m->wsched)) {
p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m);
@@ -448,7 +448,8 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
static void p9_write_work(struct work_struct *work)
{
- int n, err;
+ __poll_t n;
+ int err;
struct p9_conn *m;
struct p9_req_t *req;
@@ -506,7 +507,7 @@ end_clear:
if (test_and_clear_bit(Wpending, &m->wsched))
n = POLLOUT;
else
- n = p9_fd_poll(m->client, NULL);
+ n = p9_fd_poll(m->client, NULL, NULL);
if ((n & POLLOUT) &&
!test_and_set_bit(Wworksched, &m->wsched)) {
@@ -581,7 +582,7 @@ p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
static void p9_conn_create(struct p9_client *client)
{
- int n;
+ __poll_t n;
struct p9_trans_fd *ts = client->trans;
struct p9_conn *m = &ts->conn;
@@ -597,7 +598,7 @@ static void p9_conn_create(struct p9_client *client)
INIT_LIST_HEAD(&m->poll_pending_link);
init_poll_funcptr(&m->pt, p9_pollwait);
- n = p9_fd_poll(client, &m->pt);
+ n = p9_fd_poll(client, &m->pt, NULL);
if (n & POLLIN) {
p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m);
set_bit(Rpending, &m->wsched);
@@ -617,17 +618,16 @@ static void p9_conn_create(struct p9_client *client)
static void p9_poll_mux(struct p9_conn *m)
{
- int n;
+ __poll_t n;
+ int err = -ECONNRESET;
if (m->err < 0)
return;
- n = p9_fd_poll(m->client, NULL);
- if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
+ n = p9_fd_poll(m->client, NULL, &err);
+ if (n & (POLLERR | POLLHUP | POLLNVAL)) {
p9_debug(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n);
- if (n >= 0)
- n = -ECONNRESET;
- p9_conn_cancel(m, n);
+ p9_conn_cancel(m, err);
}
if (n & POLLIN) {
@@ -663,7 +663,7 @@ static void p9_poll_mux(struct p9_conn *m)
static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
{
- int n;
+ __poll_t n;
struct p9_trans_fd *ts = client->trans;
struct p9_conn *m = &ts->conn;
@@ -680,7 +680,7 @@ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
if (test_and_clear_bit(Wpending, &m->wsched))
n = POLLOUT;
else
- n = p9_fd_poll(m->client, NULL);
+ n = p9_fd_poll(m->client, NULL, NULL);
if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
schedule_work(&m->wq);
diff --git a/net/atm/common.c b/net/atm/common.c
index 5763fd241dc3..6523f38c4957 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -648,11 +648,11 @@ out:
return error;
}
-unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
+__poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
struct atm_vcc *vcc;
- unsigned int mask;
+ __poll_t mask;
sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
diff --git a/net/atm/common.h b/net/atm/common.h
index d9d583712a91..5850649068bb 100644
--- a/net/atm/common.h
+++ b/net/atm/common.h
@@ -17,7 +17,7 @@ int vcc_connect(struct socket *sock, int itf, short vpi, int vci);
int vcc_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int flags);
int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len);
-unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait);
+__poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait);
int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int vcc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int vcc_setsockopt(struct socket *sock, int level, int optname,
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 8041cf106c37..581375d0eed2 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -297,7 +297,7 @@ out:
return len;
}
-static unsigned int batadv_socket_poll(struct file *file, poll_table *wait)
+static __poll_t batadv_socket_poll(struct file *file, poll_table *wait)
{
struct batadv_socket_client *socket_client = file->private_data;
diff --git a/net/batman-adv/log.c b/net/batman-adv/log.c
index da004980ab8b..9be74a44e99d 100644
--- a/net/batman-adv/log.c
+++ b/net/batman-adv/log.c
@@ -185,7 +185,7 @@ static ssize_t batadv_log_read(struct file *file, char __user *buf,
return error;
}
-static unsigned int batadv_log_poll(struct file *file, poll_table *wait)
+static __poll_t batadv_log_poll(struct file *file, poll_table *wait)
{
struct batadv_priv *bat_priv = file->private_data;
struct batadv_priv_debug_log *debug_log = bat_priv->debug_log;
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index f044202346c6..f897681780db 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -421,7 +421,7 @@ out:
}
EXPORT_SYMBOL(bt_sock_stream_recvmsg);
-static inline unsigned int bt_accept_poll(struct sock *parent)
+static inline __poll_t bt_accept_poll(struct sock *parent)
{
struct bt_sock *s, *n;
struct sock *sk;
@@ -437,11 +437,11 @@ static inline unsigned int bt_accept_poll(struct sock *parent)
return 0;
}
-unsigned int bt_sock_poll(struct file *file, struct socket *sock,
+__poll_t bt_sock_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
- unsigned int mask = 0;
+ __poll_t mask = 0;
BT_DBG("sock %p, sk %p", sock, sk);
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index f2cec70d520c..1036e4fa1ea2 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -789,7 +789,7 @@ static int hidp_setup_hid(struct hidp_session *session,
hid->dev.parent = &session->conn->hcon->dev;
hid->ll_driver = &hidp_hid_driver;
- /* True if device is blacklisted in drivers/hid/hid-core.c */
+ /* True if device is blacklisted in drivers/hid/hid-quirks.c */
if (hid_ignore(hid)) {
hid_destroy_device(session->hid);
session->hid = NULL;
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 632d5a416d97..64048cec41e0 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -934,11 +934,11 @@ static int caif_release(struct socket *sock)
}
/* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */
-static unsigned int caif_poll(struct file *file,
+static __poll_t caif_poll(struct file *file,
struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
- unsigned int mask;
+ __poll_t mask;
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
sock_poll_wait(file, sk_sleep(sk), wait);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 522873ed120b..b7d9293940b5 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -72,12 +72,10 @@ static inline int connection_based(struct sock *sk)
static int receiver_wake_function(wait_queue_entry_t *wait, unsigned int mode, int sync,
void *key)
{
- unsigned long bits = (unsigned long)key;
-
/*
* Avoid a wakeup if event not interesting for us
*/
- if (bits && !(bits & (POLLIN | POLLERR)))
+ if (key && !(key_to_poll(key) & (POLLIN | POLLERR)))
return 0;
return autoremove_wake_function(wait, mode, sync, key);
}
@@ -833,11 +831,11 @@ EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg);
* and you use a different write policy from sock_writeable()
* then please supply your own write_space callback.
*/
-unsigned int datagram_poll(struct file *file, struct socket *sock,
+__poll_t datagram_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
- unsigned int mask;
+ __poll_t mask;
sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
diff --git a/net/core/sock.c b/net/core/sock.c
index abf4cbff99b2..1033f8ab0547 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2506,7 +2506,7 @@ int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
}
EXPORT_SYMBOL(sock_no_getname);
-unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
+__poll_t sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
{
return 0;
}
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 0c55ffb859bf..f91e3816806b 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -316,7 +316,7 @@ int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
int flags, int *addr_len);
void dccp_shutdown(struct sock *sk, int how);
int inet_dccp_listen(struct socket *sock, int backlog);
-unsigned int dccp_poll(struct file *file, struct socket *sock,
+__poll_t dccp_poll(struct file *file, struct socket *sock,
poll_table *wait);
int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
void dccp_req_err(struct sock *sk, u64 seq);
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index fa7e92e08920..74685fecfdb9 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -321,10 +321,10 @@ EXPORT_SYMBOL_GPL(dccp_disconnect);
* take care of normal races (between the test and the event) and we don't
* go look at any of the socket buffers directly.
*/
-unsigned int dccp_poll(struct file *file, struct socket *sock,
+__poll_t dccp_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
- unsigned int mask;
+ __poll_t mask;
struct sock *sk = sock->sk;
sock_poll_wait(file, sk_sleep(sk), wait);
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index d93e5b887f03..cc1b505453a8 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1209,11 +1209,11 @@ static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int *uaddr_len
}
-static unsigned int dn_poll(struct file *file, struct socket *sock, poll_table *wait)
+static __poll_t dn_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
struct dn_scp *scp = DN_SK(sk);
- int mask = datagram_poll(file, sock, wait);
+ __poll_t mask = datagram_poll(file, sock, wait);
if (!skb_queue_empty(&scp->other_receive_queue))
mask |= POLLRDBAND;
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 5f7c0d643fb3..4ffe302f9b82 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -202,13 +202,8 @@ unsigned int arpt_do_table(struct sk_buff *skb,
local_bh_disable();
addend = xt_write_recseq_begin();
- private = table->private;
+ private = READ_ONCE(table->private); /* Address dependency. */
cpu = smp_processor_id();
- /*
- * Ensure we load private-> members after we've fetched the base
- * pointer.
- */
- smp_read_barrier_depends();
table_base = private->entries;
jumpstack = (struct arpt_entry **)private->jumpstack[cpu];
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 1f534aec22f0..9a71f3149507 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -260,13 +260,8 @@ ipt_do_table(struct sk_buff *skb,
WARN_ON(!(table->valid_hooks & (1 << hook)));
local_bh_disable();
addend = xt_write_recseq_begin();
- private = table->private;
+ private = READ_ONCE(table->private); /* Address dependency. */
cpu = smp_processor_id();
- /*
- * Ensure we load private-> members after we've fetched the base
- * pointer.
- */
- smp_read_barrier_depends();
table_base = private->entries;
jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 874c9317b8df..c059aa7df0a9 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -491,9 +491,9 @@ static void tcp_tx_timestamp(struct sock *sk, u16 tsflags)
* take care of normal races (between the test and the event) and we don't
* go look at any of the socket buffers directly.
*/
-unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
+__poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
{
- unsigned int mask;
+ __poll_t mask;
struct sock *sk = sock->sk;
const struct tcp_sock *tp = tcp_sk(sk);
int state;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 3f018f34cf56..f81f969f9c06 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2495,9 +2495,9 @@ int compat_udp_getsockopt(struct sock *sk, int level, int optname,
* but then block when reading it. Add special case code
* to work around these arguably broken applications.
*/
-unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
+__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait)
{
- unsigned int mask = datagram_poll(file, sock, wait);
+ __poll_t mask = datagram_poll(file, sock, wait);
struct sock *sk = sock->sk;
if (!skb_queue_empty(&udp_sk(sk)->reader_queue))
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 37fa76ee5130..af4c917e0836 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -282,12 +282,7 @@ ip6t_do_table(struct sk_buff *skb,
local_bh_disable();
addend = xt_write_recseq_begin();
- private = table->private;
- /*
- * Ensure we load private-> members after we've fetched the base
- * pointer.
- */
- smp_read_barrier_depends();
+ private = READ_ONCE(table->private); /* Address dependency. */
cpu = smp_processor_id();
table_base = private->entries;
jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 148533169b1d..64331158d693 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1474,7 +1474,7 @@ done:
return copied;
}
-static inline unsigned int iucv_accept_poll(struct sock *parent)
+static inline __poll_t iucv_accept_poll(struct sock *parent)
{
struct iucv_sock *isk, *n;
struct sock *sk;
@@ -1489,11 +1489,11 @@ static inline unsigned int iucv_accept_poll(struct sock *parent)
return 0;
}
-unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
+__poll_t iucv_sock_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
- unsigned int mask = 0;
+ __poll_t mask = 0;
sock_poll_wait(file, sk_sleep(sk), wait);
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 9ee71cb276d7..fbaf3bd05b2e 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -1636,17 +1636,14 @@ static int
ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen)
{
struct msghdr msg = {NULL,};
- struct kvec iov;
+ struct kvec iov = {buffer, buflen};
int len;
EnterFunction(7);
/* Receive a packet */
- iov.iov_base = buffer;
- iov.iov_len = (size_t)buflen;
-
- len = kernel_recvmsg(sock, &msg, &iov, 1, buflen, MSG_DONTWAIT);
-
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, buflen);
+ len = sock_recvmsg(sock, &msg, MSG_DONTWAIT);
if (len < 0)
return len;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 3d72a0842c01..705198de671d 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1062,7 +1062,7 @@ static void gc_worker(struct work_struct *work)
* we will just continue with next hash slot.
*/
rcu_read_unlock();
- cond_resched_rcu_qs();
+ cond_resched();
} while (++buckets < goal);
if (gc_work->exiting)
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index fb7afcaa3004..985909f105eb 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -531,7 +531,7 @@ static int llcp_sock_getname(struct socket *sock, struct sockaddr *uaddr,
return 0;
}
-static inline unsigned int llcp_accept_poll(struct sock *parent)
+static inline __poll_t llcp_accept_poll(struct sock *parent)
{
struct nfc_llcp_sock *llcp_sock, *parent_sock;
struct sock *sk;
@@ -549,11 +549,11 @@ static inline unsigned int llcp_accept_poll(struct sock *parent)
return 0;
}
-static unsigned int llcp_sock_poll(struct file *file, struct socket *sock,
+static __poll_t llcp_sock_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
- unsigned int mask = 0;
+ __poll_t mask = 0;
pr_debug("%p\n", sk);
diff --git a/net/nfc/nci/uart.c b/net/nfc/nci/uart.c
index 8d104c1db628..a66f102c6c01 100644
--- a/net/nfc/nci/uart.c
+++ b/net/nfc/nci/uart.c
@@ -305,7 +305,7 @@ static ssize_t nci_uart_tty_write(struct tty_struct *tty, struct file *file,
return 0;
}
-static unsigned int nci_uart_tty_poll(struct tty_struct *tty,
+static __poll_t nci_uart_tty_poll(struct tty_struct *tty,
struct file *filp, poll_table *wait)
{
return 0;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 05d31864a34e..1d1483007e46 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -4074,12 +4074,12 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd,
return 0;
}
-static unsigned int packet_poll(struct file *file, struct socket *sock,
+static __poll_t packet_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
- unsigned int mask = datagram_poll(file, sock, wait);
+ __poll_t mask = datagram_poll(file, sock, wait);
spin_lock_bh(&sk->sk_receive_queue.lock);
if (po->rx_ring.pg_vec) {
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index fa2f13a8938f..08f6751d2030 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -341,12 +341,12 @@ static int pn_socket_getname(struct socket *sock, struct sockaddr *addr,
return 0;
}
-static unsigned int pn_socket_poll(struct file *file, struct socket *sock,
+static __poll_t pn_socket_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
struct pep_sock *pn = pep_sk(sk);
- unsigned int mask = 0;
+ __poll_t mask = 0;
poll_wait(file, sk_sleep(sk), wait);
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index b405f77d664c..88aa8ad0f5b6 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -152,12 +152,12 @@ static int rds_getname(struct socket *sock, struct sockaddr *uaddr,
* to send to a congested destination, the system call may still fail (and
* return ENOBUFS).
*/
-static unsigned int rds_poll(struct file *file, struct socket *sock,
+static __poll_t rds_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
struct rds_sock *rs = rds_sk_to_rs(sk);
- unsigned int mask = 0;
+ __poll_t mask = 0;
unsigned long flags;
poll_wait(file, sk_sleep(sk), wait);
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 36dd2099048a..b2a5067b4afe 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -301,13 +301,11 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn,
memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid));
if (rds_conn_state(conn) == RDS_CONN_UP) {
struct rds_ib_device *rds_ibdev;
- struct rdma_dev_addr *dev_addr;
ic = conn->c_transport_data;
- dev_addr = &ic->i_cm_id->route.addr.dev_addr;
- rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid);
- rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid);
+ rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo->src_gid,
+ (union ib_gid *)&iinfo->dst_gid);
rds_ibdev = ic->rds_ibdev;
iinfo->max_send_wr = ic->i_send_ring.w_nr;
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 2064c3a35ef8..124c77e9d058 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -1139,10 +1139,10 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
return -ENOMEM;
}
-static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
+static __poll_t rfkill_fop_poll(struct file *file, poll_table *wait)
{
struct rfkill_data *data = file->private_data;
- unsigned int res = POLLOUT | POLLWRNORM;
+ __poll_t res = POLLOUT | POLLWRNORM;
poll_wait(file, &data->read_wait, wait);
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index dcd818fa837e..21ad6a3a465c 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -729,12 +729,12 @@ static int rxrpc_getsockopt(struct socket *sock, int level, int optname,
/*
* permit an RxRPC socket to be polled
*/
-static unsigned int rxrpc_poll(struct file *file, struct socket *sock,
+static __poll_t rxrpc_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
struct rxrpc_sock *rx = rxrpc_sk(sk);
- unsigned int mask;
+ __poll_t mask;
sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index a40fa53c93ef..356e387f82e7 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -984,13 +984,6 @@ int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw)
* This is used for tunneling the sctp_bindx() request through sctp_setsockopt()
* from userspace.
*
- * We don't use copy_from_user() for optimization: we first do the
- * sanity checks (buffer size -fast- and access check-healthy
- * pointer); if all of those succeed, then we can alloc the memory
- * (expensive operation) needed to copy the data to kernel. Then we do
- * the copying without checking the user space area
- * (__copy_from_user()).
- *
* On exit there is no need to do sockfd_put(), sys_setsockopt() does
* it.
*
@@ -1020,25 +1013,15 @@ static int sctp_setsockopt_bindx(struct sock *sk,
if (unlikely(addrs_size <= 0))
return -EINVAL;
- /* Check the user passed a healthy pointer. */
- if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size)))
- return -EFAULT;
-
- /* Alloc space for the address array in kernel memory. */
- kaddrs = kmalloc(addrs_size, GFP_USER | __GFP_NOWARN);
- if (unlikely(!kaddrs))
- return -ENOMEM;
-
- if (__copy_from_user(kaddrs, addrs, addrs_size)) {
- kfree(kaddrs);
- return -EFAULT;
- }
+ kaddrs = vmemdup_user(addrs, addrs_size);
+ if (unlikely(IS_ERR(kaddrs)))
+ return PTR_ERR(kaddrs);
/* Walk through the addrs buffer and count the number of addresses. */
addr_buf = kaddrs;
while (walk_size < addrs_size) {
if (walk_size + sizeof(sa_family_t) > addrs_size) {
- kfree(kaddrs);
+ kvfree(kaddrs);
return -EINVAL;
}
@@ -1049,7 +1032,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
* causes the address buffer to overflow return EINVAL.
*/
if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
- kfree(kaddrs);
+ kvfree(kaddrs);
return -EINVAL;
}
addrcnt++;
@@ -1079,7 +1062,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
}
out:
- kfree(kaddrs);
+ kvfree(kaddrs);
return err;
}
@@ -1337,13 +1320,6 @@ out_free:
* land and invoking either sctp_connectx(). This is used for tunneling
* the sctp_connectx() request through sctp_setsockopt() from userspace.
*
- * We don't use copy_from_user() for optimization: we first do the
- * sanity checks (buffer size -fast- and access check-healthy
- * pointer); if all of those succeed, then we can alloc the memory
- * (expensive operation) needed to copy the data to kernel. Then we do
- * the copying without checking the user space area
- * (__copy_from_user()).
- *
* On exit there is no need to do sockfd_put(), sys_setsockopt() does
* it.
*
@@ -1359,7 +1335,6 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
sctp_assoc_t *assoc_id)
{
struct sockaddr *kaddrs;
- gfp_t gfp = GFP_KERNEL;
int err = 0;
pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n",
@@ -1368,24 +1343,12 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
if (unlikely(addrs_size <= 0))
return -EINVAL;
- /* Check the user passed a healthy pointer. */
- if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size)))
- return -EFAULT;
-
- /* Alloc space for the address array in kernel memory. */
- if (sk->sk_socket->file)
- gfp = GFP_USER | __GFP_NOWARN;
- kaddrs = kmalloc(addrs_size, gfp);
- if (unlikely(!kaddrs))
- return -ENOMEM;
-
- if (__copy_from_user(kaddrs, addrs, addrs_size)) {
- err = -EFAULT;
- } else {
- err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id);
- }
+ kaddrs = vmemdup_user(addrs, addrs_size);
+ if (unlikely(IS_ERR(kaddrs)))
+ return PTR_ERR(kaddrs);
- kfree(kaddrs);
+ err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id);
+ kvfree(kaddrs);
return err;
}
@@ -7624,11 +7587,11 @@ out:
* here, again, by modeling the current TCP/UDP code. We don't have
* a good way to test with it yet.
*/
-unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
+__poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
struct sctp_sock *sp = sctp_sk(sk);
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, sk_sleep(sk), wait);
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 267e68379110..3583c8ab1bae 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -1138,7 +1138,7 @@ out:
return rc;
}
-static unsigned int smc_accept_poll(struct sock *parent)
+static __poll_t smc_accept_poll(struct sock *parent)
{
struct smc_sock *isk = smc_sk(parent);
int mask = 0;
@@ -1151,11 +1151,11 @@ static unsigned int smc_accept_poll(struct sock *parent)
return mask;
}
-static unsigned int smc_poll(struct file *file, struct socket *sock,
+static __poll_t smc_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
- unsigned int mask = 0;
+ __poll_t mask = 0;
struct smc_sock *smc;
int rc;
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index abf7ceb6690b..8ac51583a063 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -83,7 +83,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
struct smc_clc_msg_hdr *clcm = buf;
struct msghdr msg = {NULL, 0};
int reason_code = 0;
- struct kvec vec;
+ struct kvec vec = {buf, buflen};
int len, datlen;
int krflags;
@@ -91,12 +91,15 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
* so we don't consume any subsequent CLC message or payload data
* in the TCP byte stream
*/
- vec.iov_base = buf;
- vec.iov_len = buflen;
+ /*
+ * Caller must make sure that buflen is no less than
+ * sizeof(struct smc_clc_msg_hdr)
+ */
krflags = MSG_PEEK | MSG_WAITALL;
smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME;
- len = kernel_recvmsg(smc->clcsock, &msg, &vec, 1,
- sizeof(struct smc_clc_msg_hdr), krflags);
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1,
+ sizeof(struct smc_clc_msg_hdr));
+ len = sock_recvmsg(smc->clcsock, &msg, krflags);
if (signal_pending(current)) {
reason_code = -EINTR;
clc_sk->sk_err = EINTR;
@@ -129,12 +132,11 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
}
/* receive the complete CLC message */
- vec.iov_base = buf;
- vec.iov_len = buflen;
memset(&msg, 0, sizeof(struct msghdr));
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, buflen);
krflags = MSG_WAITALL;
smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME;
- len = kernel_recvmsg(smc->clcsock, &msg, &vec, 1, datlen, krflags);
+ len = sock_recvmsg(smc->clcsock, &msg, krflags);
if (len < datlen || !smc_clc_msg_hdr_valid(clcm)) {
smc->sk.sk_err = EPROTO;
reason_code = -EPROTO;
diff --git a/net/socket.c b/net/socket.c
index 11cc2cd0f37b..a93c99b518ca 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -118,7 +118,7 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from);
static int sock_mmap(struct file *file, struct vm_area_struct *vma);
static int sock_close(struct inode *inode, struct file *file);
-static unsigned int sock_poll(struct file *file,
+static __poll_t sock_poll(struct file *file,
struct poll_table_struct *wait);
static long sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
#ifdef CONFIG_COMPAT
@@ -1115,9 +1115,9 @@ out_release:
EXPORT_SYMBOL(sock_create_lite);
/* No kernel lock held - perfect */
-static unsigned int sock_poll(struct file *file, poll_table *wait)
+static __poll_t sock_poll(struct file *file, poll_table *wait)
{
- unsigned int busy_flag = 0;
+ __poll_t busy_flag = 0;
struct socket *sock;
/*
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index e68943895be4..aa36dad32db1 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -930,10 +930,10 @@ out:
static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
-static unsigned int cache_poll(struct file *filp, poll_table *wait,
+static __poll_t cache_poll(struct file *filp, poll_table *wait,
struct cache_detail *cd)
{
- unsigned int mask;
+ __poll_t mask;
struct cache_reader *rp = filp->private_data;
struct cache_queue *cq;
@@ -1501,7 +1501,7 @@ static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
return cache_write(filp, buf, count, ppos, cd);
}
-static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait)
+static __poll_t cache_poll_procfs(struct file *filp, poll_table *wait)
{
struct cache_detail *cd = PDE_DATA(file_inode(filp));
@@ -1720,7 +1720,7 @@ static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
return cache_write(filp, buf, count, ppos, cd);
}
-static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait)
+static __poll_t cache_poll_pipefs(struct file *filp, poll_table *wait)
{
struct cache_detail *cd = RPC_I(file_inode(filp))->private;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index e2a4184f3c5d..6e432ecd7f99 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1376,22 +1376,6 @@ rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize
EXPORT_SYMBOL_GPL(rpc_setbufsize);
/**
- * rpc_protocol - Get transport protocol number for an RPC client
- * @clnt: RPC client to query
- *
- */
-int rpc_protocol(struct rpc_clnt *clnt)
-{
- int protocol;
-
- rcu_read_lock();
- protocol = rcu_dereference(clnt->cl_xprt)->prot;
- rcu_read_unlock();
- return protocol;
-}
-EXPORT_SYMBOL_GPL(rpc_protocol);
-
-/**
* rpc_net_ns - Get the network namespace for this RPC client
* @clnt: RPC client to query
*
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 7803f3b6aa53..5c4330325787 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -340,12 +340,12 @@ rpc_pipe_write(struct file *filp, const char __user *buf, size_t len, loff_t *of
return res;
}
-static unsigned int
+static __poll_t
rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait)
{
struct inode *inode = file_inode(filp);
struct rpc_inode *rpci = RPC_I(inode);
- unsigned int mask = POLLOUT | POLLWRNORM;
+ __poll_t mask = POLLOUT | POLLWRNORM;
poll_wait(filp, &rpci->waitq, wait);
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index b1b49edd7c4d..896691afbb1a 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -755,22 +755,20 @@ static void __rpc_execute(struct rpc_task *task)
void (*do_action)(struct rpc_task *);
/*
- * Execute any pending callback first.
+ * Perform the next FSM step or a pending callback.
+ *
+ * tk_action may be NULL if the task has been killed.
+ * In particular, note that rpc_killall_tasks may
+ * do this at any time, so beware when dereferencing.
*/
- do_action = task->tk_callback;
- task->tk_callback = NULL;
- if (do_action == NULL) {
- /*
- * Perform the next FSM step.
- * tk_action may be NULL if the task has been killed.
- * In particular, note that rpc_killall_tasks may
- * do this at any time, so beware when dereferencing.
- */
- do_action = task->tk_action;
- if (do_action == NULL)
- break;
+ do_action = task->tk_action;
+ if (task->tk_callback) {
+ do_action = task->tk_callback;
+ task->tk_callback = NULL;
}
- trace_rpc_task_run_action(task->tk_client, task, task->tk_action);
+ if (!do_action)
+ break;
+ trace_rpc_task_run_action(task->tk_client, task, do_action);
do_action(task);
/*
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index ff8e06cd067e..5570719e4787 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -338,8 +338,8 @@ static int svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr,
rqstp->rq_xprt_hlen = 0;
clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
- len = kernel_recvmsg(svsk->sk_sock, &msg, iov, nr, buflen,
- msg.msg_flags);
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, iov, nr, buflen);
+ len = sock_recvmsg(svsk->sk_sock, &msg, msg.msg_flags);
/* If we read a full record, then assume there may be more
* data to read (stream based sockets only!)
*/
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 33b74fd84051..2436fd1125fc 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -940,8 +940,8 @@ static void xprt_timer(struct rpc_task *task)
if (task->tk_status != -ETIMEDOUT)
return;
- dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
+ trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
if (!req->rq_reply_bytes_recvd) {
if (xprt->ops->timer)
xprt->ops->timer(xprt, task);
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
index 8b818bb3518a..ed1a4a3065ee 100644
--- a/net/sunrpc/xprtrdma/backchannel.c
+++ b/net/sunrpc/xprtrdma/backchannel.c
@@ -43,7 +43,6 @@ static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt,
req = rpcrdma_create_req(r_xprt);
if (IS_ERR(req))
return PTR_ERR(req);
- __set_bit(RPCRDMA_REQ_F_BACKCHANNEL, &req->rl_flags);
rb = rpcrdma_alloc_regbuf(RPCRDMA_HDRBUF_SIZE,
DMA_TO_DEVICE, GFP_KERNEL);
@@ -74,21 +73,13 @@ out_fail:
static int rpcrdma_bc_setup_reps(struct rpcrdma_xprt *r_xprt,
unsigned int count)
{
- struct rpcrdma_rep *rep;
int rc = 0;
while (count--) {
- rep = rpcrdma_create_rep(r_xprt);
- if (IS_ERR(rep)) {
- pr_err("RPC: %s: reply buffer alloc failed\n",
- __func__);
- rc = PTR_ERR(rep);
+ rc = rpcrdma_create_rep(r_xprt);
+ if (rc)
break;
- }
-
- rpcrdma_recv_buffer_put(rep);
}
-
return rc;
}
@@ -129,6 +120,7 @@ int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
rqst->rq_xprt = &r_xprt->rx_xprt;
INIT_LIST_HEAD(&rqst->rq_list);
INIT_LIST_HEAD(&rqst->rq_bc_list);
+ __set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
if (rpcrdma_bc_setup_rqst(r_xprt, rqst))
goto out_free;
@@ -148,7 +140,7 @@ int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
buffer->rb_bc_srv_max_requests = reqs;
request_module("svcrdma");
-
+ trace_xprtrdma_cb_setup(r_xprt, reqs);
return 0;
out_free:
@@ -196,13 +188,7 @@ size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
return maxmsg - RPCRDMA_HDRLEN_MIN;
}
-/**
- * rpcrdma_bc_marshal_reply - Send backwards direction reply
- * @rqst: buffer containing RPC reply data
- *
- * Returns zero on success.
- */
-int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
+static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
{
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
@@ -226,7 +212,46 @@ int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
if (rpcrdma_prepare_send_sges(r_xprt, req, RPCRDMA_HDRLEN_MIN,
&rqst->rq_snd_buf, rpcrdma_noch))
return -EIO;
+
+ trace_xprtrdma_cb_reply(rqst);
+ return 0;
+}
+
+/**
+ * xprt_rdma_bc_send_reply - marshal and send a backchannel reply
+ * @rqst: RPC rqst with a backchannel RPC reply in rq_snd_buf
+ *
+ * Caller holds the transport's write lock.
+ *
+ * Returns:
+ * %0 if the RPC message has been sent
+ * %-ENOTCONN if the caller should reconnect and call again
+ * %-EIO if a permanent error occurred and the request was not
+ * sent. Do not try to send this message again.
+ */
+int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst)
+{
+ struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
+ struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
+ int rc;
+
+ if (!xprt_connected(rqst->rq_xprt))
+ goto drop_connection;
+
+ rc = rpcrdma_bc_marshal_reply(rqst);
+ if (rc < 0)
+ goto failed_marshal;
+
+ if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
+ goto drop_connection;
return 0;
+
+failed_marshal:
+ if (rc != -ENOTCONN)
+ return rc;
+drop_connection:
+ xprt_disconnect_done(rqst->rq_xprt);
+ return -ENOTCONN;
}
/**
@@ -262,11 +287,6 @@ void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
dprintk("RPC: %s: freeing rqst %p (req %p)\n",
__func__, rqst, rpcr_to_rdmar(rqst));
- smp_mb__before_atomic();
- WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state));
- clear_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
- smp_mb__after_atomic();
-
spin_lock_bh(&xprt->bc_pa_lock);
list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
spin_unlock_bh(&xprt->bc_pa_lock);
@@ -274,7 +294,7 @@ void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
/**
* rpcrdma_bc_receive_call - Handle a backward direction call
- * @xprt: transport receiving the call
+ * @r_xprt: transport receiving the call
* @rep: receive buffer containing the call
*
* Operational assumptions:
@@ -313,7 +333,6 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
struct rpc_rqst, rq_bc_pa_list);
list_del(&rqst->rq_bc_pa_list);
spin_unlock(&xprt->bc_pa_lock);
- dprintk("RPC: %s: using rqst %p\n", __func__, rqst);
/* Prepare rqst */
rqst->rq_reply_bytes_recvd = 0;
@@ -321,7 +340,6 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
rqst->rq_xid = *p;
rqst->rq_private_buf.len = size;
- set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
buf = &rqst->rq_rcv_buf;
memset(buf, 0, sizeof(*buf));
@@ -335,12 +353,8 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
* the Upper Layer is done decoding it.
*/
req = rpcr_to_rdmar(rqst);
- dprintk("RPC: %s: attaching rep %p to req %p\n",
- __func__, rep, req);
req->rl_reply = rep;
-
- /* Defeat the retransmit detection logic in send_request */
- req->rl_connect_cookie = 0;
+ trace_xprtrdma_cb_call(rqst);
/* Queue rqst for ULP's callback service */
bc_serv = xprt->bc_serv;
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c
index 29fc84c7ff98..d5f95bb39300 100644
--- a/net/sunrpc/xprtrdma/fmr_ops.c
+++ b/net/sunrpc/xprtrdma/fmr_ops.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2015 Oracle. All rights reserved.
+ * Copyright (c) 2015, 2017 Oracle. All rights reserved.
* Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
*/
@@ -47,7 +47,7 @@ fmr_is_supported(struct rpcrdma_ia *ia)
}
static int
-fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *mw)
+fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
{
static struct ib_fmr_attr fmr_attr = {
.max_pages = RPCRDMA_MAX_FMR_SGES,
@@ -55,106 +55,108 @@ fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *mw)
.page_shift = PAGE_SHIFT
};
- mw->fmr.fm_physaddrs = kcalloc(RPCRDMA_MAX_FMR_SGES,
+ mr->fmr.fm_physaddrs = kcalloc(RPCRDMA_MAX_FMR_SGES,
sizeof(u64), GFP_KERNEL);
- if (!mw->fmr.fm_physaddrs)
+ if (!mr->fmr.fm_physaddrs)
goto out_free;
- mw->mw_sg = kcalloc(RPCRDMA_MAX_FMR_SGES,
- sizeof(*mw->mw_sg), GFP_KERNEL);
- if (!mw->mw_sg)
+ mr->mr_sg = kcalloc(RPCRDMA_MAX_FMR_SGES,
+ sizeof(*mr->mr_sg), GFP_KERNEL);
+ if (!mr->mr_sg)
goto out_free;
- sg_init_table(mw->mw_sg, RPCRDMA_MAX_FMR_SGES);
+ sg_init_table(mr->mr_sg, RPCRDMA_MAX_FMR_SGES);
- mw->fmr.fm_mr = ib_alloc_fmr(ia->ri_pd, RPCRDMA_FMR_ACCESS_FLAGS,
+ mr->fmr.fm_mr = ib_alloc_fmr(ia->ri_pd, RPCRDMA_FMR_ACCESS_FLAGS,
&fmr_attr);
- if (IS_ERR(mw->fmr.fm_mr))
+ if (IS_ERR(mr->fmr.fm_mr))
goto out_fmr_err;
return 0;
out_fmr_err:
dprintk("RPC: %s: ib_alloc_fmr returned %ld\n", __func__,
- PTR_ERR(mw->fmr.fm_mr));
+ PTR_ERR(mr->fmr.fm_mr));
out_free:
- kfree(mw->mw_sg);
- kfree(mw->fmr.fm_physaddrs);
+ kfree(mr->mr_sg);
+ kfree(mr->fmr.fm_physaddrs);
return -ENOMEM;
}
static int
-__fmr_unmap(struct rpcrdma_mw *mw)
+__fmr_unmap(struct rpcrdma_mr *mr)
{
LIST_HEAD(l);
int rc;
- list_add(&mw->fmr.fm_mr->list, &l);
+ list_add(&mr->fmr.fm_mr->list, &l);
rc = ib_unmap_fmr(&l);
- list_del(&mw->fmr.fm_mr->list);
+ list_del(&mr->fmr.fm_mr->list);
return rc;
}
static void
-fmr_op_release_mr(struct rpcrdma_mw *r)
+fmr_op_release_mr(struct rpcrdma_mr *mr)
{
LIST_HEAD(unmap_list);
int rc;
/* Ensure MW is not on any rl_registered list */
- if (!list_empty(&r->mw_list))
- list_del(&r->mw_list);
+ if (!list_empty(&mr->mr_list))
+ list_del(&mr->mr_list);
- kfree(r->fmr.fm_physaddrs);
- kfree(r->mw_sg);
+ kfree(mr->fmr.fm_physaddrs);
+ kfree(mr->mr_sg);
/* In case this one was left mapped, try to unmap it
* to prevent dealloc_fmr from failing with EBUSY
*/
- rc = __fmr_unmap(r);
+ rc = __fmr_unmap(mr);
if (rc)
pr_err("rpcrdma: final ib_unmap_fmr for %p failed %i\n",
- r, rc);
+ mr, rc);
- rc = ib_dealloc_fmr(r->fmr.fm_mr);
+ rc = ib_dealloc_fmr(mr->fmr.fm_mr);
if (rc)
pr_err("rpcrdma: final ib_dealloc_fmr for %p returned %i\n",
- r, rc);
+ mr, rc);
- kfree(r);
+ kfree(mr);
}
/* Reset of a single FMR.
*/
static void
-fmr_op_recover_mr(struct rpcrdma_mw *mw)
+fmr_op_recover_mr(struct rpcrdma_mr *mr)
{
- struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
+ struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
int rc;
/* ORDER: invalidate first */
- rc = __fmr_unmap(mw);
-
- /* ORDER: then DMA unmap */
- ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
- mw->mw_sg, mw->mw_nents, mw->mw_dir);
+ rc = __fmr_unmap(mr);
if (rc)
goto out_release;
- rpcrdma_put_mw(r_xprt, mw);
+ /* ORDER: then DMA unmap */
+ rpcrdma_mr_unmap_and_put(mr);
+
r_xprt->rx_stats.mrs_recovered++;
return;
out_release:
- pr_err("rpcrdma: FMR reset failed (%d), %p released\n", rc, mw);
+ pr_err("rpcrdma: FMR reset failed (%d), %p released\n", rc, mr);
r_xprt->rx_stats.mrs_orphaned++;
- spin_lock(&r_xprt->rx_buf.rb_mwlock);
- list_del(&mw->mw_all);
- spin_unlock(&r_xprt->rx_buf.rb_mwlock);
+ trace_xprtrdma_dma_unmap(mr);
+ ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
+ mr->mr_sg, mr->mr_nents, mr->mr_dir);
+
+ spin_lock(&r_xprt->rx_buf.rb_mrlock);
+ list_del(&mr->mr_all);
+ spin_unlock(&r_xprt->rx_buf.rb_mrlock);
- fmr_op_release_mr(mw);
+ fmr_op_release_mr(mr);
}
static int
@@ -180,15 +182,15 @@ fmr_op_maxpages(struct rpcrdma_xprt *r_xprt)
*/
static struct rpcrdma_mr_seg *
fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
- int nsegs, bool writing, struct rpcrdma_mw **out)
+ int nsegs, bool writing, struct rpcrdma_mr **out)
{
struct rpcrdma_mr_seg *seg1 = seg;
int len, pageoff, i, rc;
- struct rpcrdma_mw *mw;
+ struct rpcrdma_mr *mr;
u64 *dma_pages;
- mw = rpcrdma_get_mw(r_xprt);
- if (!mw)
+ mr = rpcrdma_mr_get(r_xprt);
+ if (!mr)
return ERR_PTR(-ENOBUFS);
pageoff = offset_in_page(seg1->mr_offset);
@@ -199,12 +201,12 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
nsegs = RPCRDMA_MAX_FMR_SGES;
for (i = 0; i < nsegs;) {
if (seg->mr_page)
- sg_set_page(&mw->mw_sg[i],
+ sg_set_page(&mr->mr_sg[i],
seg->mr_page,
seg->mr_len,
offset_in_page(seg->mr_offset));
else
- sg_set_buf(&mw->mw_sg[i], seg->mr_offset,
+ sg_set_buf(&mr->mr_sg[i], seg->mr_offset,
seg->mr_len);
len += seg->mr_len;
++seg;
@@ -214,40 +216,38 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
break;
}
- mw->mw_dir = rpcrdma_data_dir(writing);
+ mr->mr_dir = rpcrdma_data_dir(writing);
- mw->mw_nents = ib_dma_map_sg(r_xprt->rx_ia.ri_device,
- mw->mw_sg, i, mw->mw_dir);
- if (!mw->mw_nents)
+ mr->mr_nents = ib_dma_map_sg(r_xprt->rx_ia.ri_device,
+ mr->mr_sg, i, mr->mr_dir);
+ if (!mr->mr_nents)
goto out_dmamap_err;
- for (i = 0, dma_pages = mw->fmr.fm_physaddrs; i < mw->mw_nents; i++)
- dma_pages[i] = sg_dma_address(&mw->mw_sg[i]);
- rc = ib_map_phys_fmr(mw->fmr.fm_mr, dma_pages, mw->mw_nents,
+ for (i = 0, dma_pages = mr->fmr.fm_physaddrs; i < mr->mr_nents; i++)
+ dma_pages[i] = sg_dma_address(&mr->mr_sg[i]);
+ rc = ib_map_phys_fmr(mr->fmr.fm_mr, dma_pages, mr->mr_nents,
dma_pages[0]);
if (rc)
goto out_maperr;
- mw->mw_handle = mw->fmr.fm_mr->rkey;
- mw->mw_length = len;
- mw->mw_offset = dma_pages[0] + pageoff;
+ mr->mr_handle = mr->fmr.fm_mr->rkey;
+ mr->mr_length = len;
+ mr->mr_offset = dma_pages[0] + pageoff;
- *out = mw;
+ *out = mr;
return seg;
out_dmamap_err:
pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
- mw->mw_sg, i);
- rpcrdma_put_mw(r_xprt, mw);
+ mr->mr_sg, i);
+ rpcrdma_mr_put(mr);
return ERR_PTR(-EIO);
out_maperr:
pr_err("rpcrdma: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n",
len, (unsigned long long)dma_pages[0],
- pageoff, mw->mw_nents, rc);
- ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
- mw->mw_sg, mw->mw_nents, mw->mw_dir);
- rpcrdma_put_mw(r_xprt, mw);
+ pageoff, mr->mr_nents, rc);
+ rpcrdma_mr_unmap_and_put(mr);
return ERR_PTR(-EIO);
}
@@ -256,13 +256,13 @@ out_maperr:
* Sleeps until it is safe for the host CPU to access the
* previously mapped memory regions.
*
- * Caller ensures that @mws is not empty before the call. This
+ * Caller ensures that @mrs is not empty before the call. This
* function empties the list.
*/
static void
-fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
+fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs)
{
- struct rpcrdma_mw *mw;
+ struct rpcrdma_mr *mr;
LIST_HEAD(unmap_list);
int rc;
@@ -271,10 +271,11 @@ fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
* ib_unmap_fmr() is slow, so use a single call instead
* of one call per mapped FMR.
*/
- list_for_each_entry(mw, mws, mw_list) {
+ list_for_each_entry(mr, mrs, mr_list) {
dprintk("RPC: %s: unmapping fmr %p\n",
- __func__, &mw->fmr);
- list_add_tail(&mw->fmr.fm_mr->list, &unmap_list);
+ __func__, &mr->fmr);
+ trace_xprtrdma_localinv(mr);
+ list_add_tail(&mr->fmr.fm_mr->list, &unmap_list);
}
r_xprt->rx_stats.local_inv_needed++;
rc = ib_unmap_fmr(&unmap_list);
@@ -284,14 +285,10 @@ fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
/* ORDER: Now DMA unmap all of the req's MRs, and return
* them to the free MW list.
*/
- while (!list_empty(mws)) {
- mw = rpcrdma_pop_mw(mws);
- dprintk("RPC: %s: DMA unmapping fmr %p\n",
- __func__, &mw->fmr);
- list_del(&mw->fmr.fm_mr->list);
- ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
- mw->mw_sg, mw->mw_nents, mw->mw_dir);
- rpcrdma_put_mw(r_xprt, mw);
+ while (!list_empty(mrs)) {
+ mr = rpcrdma_mr_pop(mrs);
+ list_del(&mr->fmr.fm_mr->list);
+ rpcrdma_mr_unmap_and_put(mr);
}
return;
@@ -299,10 +296,10 @@ fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
out_reset:
pr_err("rpcrdma: ib_unmap_fmr failed (%i)\n", rc);
- while (!list_empty(mws)) {
- mw = rpcrdma_pop_mw(mws);
- list_del(&mw->fmr.fm_mr->list);
- fmr_op_recover_mr(mw);
+ while (!list_empty(mrs)) {
+ mr = rpcrdma_mr_pop(mrs);
+ list_del(&mr->fmr.fm_mr->list);
+ fmr_op_recover_mr(mr);
}
}
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index 773e66e10a15..90f688f19783 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -1,11 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2015 Oracle. All rights reserved.
+ * Copyright (c) 2015, 2017 Oracle. All rights reserved.
* Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
*/
/* Lightweight memory registration using Fast Registration Work
- * Requests (FRWR). Also referred to sometimes as FRMR mode.
+ * Requests (FRWR).
*
* FRWR features ordered asynchronous registration and deregistration
* of arbitrarily sized memory regions. This is the fastest and safest
@@ -15,9 +15,9 @@
/* Normal operation
*
* A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
- * Work Request (frmr_op_map). When the RDMA operation is finished, this
+ * Work Request (frwr_op_map). When the RDMA operation is finished, this
* Memory Region is invalidated using a LOCAL_INV Work Request
- * (frmr_op_unmap).
+ * (frwr_op_unmap_sync).
*
* Typically these Work Requests are not signaled, and neither are RDMA
* SEND Work Requests (with the exception of signaling occasionally to
@@ -26,7 +26,7 @@
*
* As an optimization, frwr_op_unmap marks MRs INVALID before the
* LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
- * rb_mws immediately so that no work (like managing a linked list
+ * rb_mrs immediately so that no work (like managing a linked list
* under a spinlock) is needed in the completion upcall.
*
* But this means that frwr_op_map() can occasionally encounter an MR
@@ -60,7 +60,7 @@
* When frwr_op_map encounters FLUSHED and VALID MRs, they are recovered
* with ib_dereg_mr and then are re-initialized. Because MR recovery
* allocates fresh resources, it is deferred to a workqueue, and the
- * recovered MRs are placed back on the rb_mws list when recovery is
+ * recovered MRs are placed back on the rb_mrs list when recovery is
* complete. frwr_op_map allocates another MR for the current RPC while
* the broken MR is reset.
*
@@ -96,26 +96,26 @@ out_not_supported:
}
static int
-frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
+frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
{
- unsigned int depth = ia->ri_max_frmr_depth;
- struct rpcrdma_frmr *f = &r->frmr;
+ unsigned int depth = ia->ri_max_frwr_depth;
+ struct rpcrdma_frwr *frwr = &mr->frwr;
int rc;
- f->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth);
- if (IS_ERR(f->fr_mr))
+ frwr->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth);
+ if (IS_ERR(frwr->fr_mr))
goto out_mr_err;
- r->mw_sg = kcalloc(depth, sizeof(*r->mw_sg), GFP_KERNEL);
- if (!r->mw_sg)
+ mr->mr_sg = kcalloc(depth, sizeof(*mr->mr_sg), GFP_KERNEL);
+ if (!mr->mr_sg)
goto out_list_err;
- sg_init_table(r->mw_sg, depth);
- init_completion(&f->fr_linv_done);
+ sg_init_table(mr->mr_sg, depth);
+ init_completion(&frwr->fr_linv_done);
return 0;
out_mr_err:
- rc = PTR_ERR(f->fr_mr);
+ rc = PTR_ERR(frwr->fr_mr);
dprintk("RPC: %s: ib_alloc_mr status %i\n",
__func__, rc);
return rc;
@@ -124,83 +124,85 @@ out_list_err:
rc = -ENOMEM;
dprintk("RPC: %s: sg allocation failure\n",
__func__);
- ib_dereg_mr(f->fr_mr);
+ ib_dereg_mr(frwr->fr_mr);
return rc;
}
static void
-frwr_op_release_mr(struct rpcrdma_mw *r)
+frwr_op_release_mr(struct rpcrdma_mr *mr)
{
int rc;
- /* Ensure MW is not on any rl_registered list */
- if (!list_empty(&r->mw_list))
- list_del(&r->mw_list);
+ /* Ensure MR is not on any rl_registered list */
+ if (!list_empty(&mr->mr_list))
+ list_del(&mr->mr_list);
- rc = ib_dereg_mr(r->frmr.fr_mr);
+ rc = ib_dereg_mr(mr->frwr.fr_mr);
if (rc)
pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
- r, rc);
- kfree(r->mw_sg);
- kfree(r);
+ mr, rc);
+ kfree(mr->mr_sg);
+ kfree(mr);
}
static int
-__frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
+__frwr_mr_reset(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
{
- struct rpcrdma_frmr *f = &r->frmr;
+ struct rpcrdma_frwr *frwr = &mr->frwr;
int rc;
- rc = ib_dereg_mr(f->fr_mr);
+ rc = ib_dereg_mr(frwr->fr_mr);
if (rc) {
pr_warn("rpcrdma: ib_dereg_mr status %d, frwr %p orphaned\n",
- rc, r);
+ rc, mr);
return rc;
}
- f->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype,
- ia->ri_max_frmr_depth);
- if (IS_ERR(f->fr_mr)) {
+ frwr->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype,
+ ia->ri_max_frwr_depth);
+ if (IS_ERR(frwr->fr_mr)) {
pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n",
- PTR_ERR(f->fr_mr), r);
- return PTR_ERR(f->fr_mr);
+ PTR_ERR(frwr->fr_mr), mr);
+ return PTR_ERR(frwr->fr_mr);
}
- dprintk("RPC: %s: recovered FRMR %p\n", __func__, f);
- f->fr_state = FRMR_IS_INVALID;
+ dprintk("RPC: %s: recovered FRWR %p\n", __func__, frwr);
+ frwr->fr_state = FRWR_IS_INVALID;
return 0;
}
-/* Reset of a single FRMR. Generate a fresh rkey by replacing the MR.
+/* Reset of a single FRWR. Generate a fresh rkey by replacing the MR.
*/
static void
-frwr_op_recover_mr(struct rpcrdma_mw *mw)
+frwr_op_recover_mr(struct rpcrdma_mr *mr)
{
- enum rpcrdma_frmr_state state = mw->frmr.fr_state;
- struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
+ enum rpcrdma_frwr_state state = mr->frwr.fr_state;
+ struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
int rc;
- rc = __frwr_reset_mr(ia, mw);
- if (state != FRMR_FLUSHED_LI)
+ rc = __frwr_mr_reset(ia, mr);
+ if (state != FRWR_FLUSHED_LI) {
+ trace_xprtrdma_dma_unmap(mr);
ib_dma_unmap_sg(ia->ri_device,
- mw->mw_sg, mw->mw_nents, mw->mw_dir);
+ mr->mr_sg, mr->mr_nents, mr->mr_dir);
+ }
if (rc)
goto out_release;
- rpcrdma_put_mw(r_xprt, mw);
+ rpcrdma_mr_put(mr);
r_xprt->rx_stats.mrs_recovered++;
return;
out_release:
- pr_err("rpcrdma: FRMR reset failed %d, %p release\n", rc, mw);
+ pr_err("rpcrdma: FRWR reset failed %d, %p release\n", rc, mr);
r_xprt->rx_stats.mrs_orphaned++;
- spin_lock(&r_xprt->rx_buf.rb_mwlock);
- list_del(&mw->mw_all);
- spin_unlock(&r_xprt->rx_buf.rb_mwlock);
+ spin_lock(&r_xprt->rx_buf.rb_mrlock);
+ list_del(&mr->mr_all);
+ spin_unlock(&r_xprt->rx_buf.rb_mrlock);
- frwr_op_release_mr(mw);
+ frwr_op_release_mr(mr);
}
static int
@@ -214,31 +216,31 @@ frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
ia->ri_mrtype = IB_MR_TYPE_SG_GAPS;
- ia->ri_max_frmr_depth =
+ ia->ri_max_frwr_depth =
min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
attrs->max_fast_reg_page_list_len);
dprintk("RPC: %s: device's max FR page list len = %u\n",
- __func__, ia->ri_max_frmr_depth);
-
- /* Add room for frmr register and invalidate WRs.
- * 1. FRMR reg WR for head
- * 2. FRMR invalidate WR for head
- * 3. N FRMR reg WRs for pagelist
- * 4. N FRMR invalidate WRs for pagelist
- * 5. FRMR reg WR for tail
- * 6. FRMR invalidate WR for tail
+ __func__, ia->ri_max_frwr_depth);
+
+ /* Add room for frwr register and invalidate WRs.
+ * 1. FRWR reg WR for head
+ * 2. FRWR invalidate WR for head
+ * 3. N FRWR reg WRs for pagelist
+ * 4. N FRWR invalidate WRs for pagelist
+ * 5. FRWR reg WR for tail
+ * 6. FRWR invalidate WR for tail
* 7. The RDMA_SEND WR
*/
depth = 7;
- /* Calculate N if the device max FRMR depth is smaller than
+ /* Calculate N if the device max FRWR depth is smaller than
* RPCRDMA_MAX_DATA_SEGS.
*/
- if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) {
- delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth;
+ if (ia->ri_max_frwr_depth < RPCRDMA_MAX_DATA_SEGS) {
+ delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frwr_depth;
do {
- depth += 2; /* FRMR reg + invalidate */
- delta -= ia->ri_max_frmr_depth;
+ depth += 2; /* FRWR reg + invalidate */
+ delta -= ia->ri_max_frwr_depth;
} while (delta > 0);
}
@@ -252,7 +254,7 @@ frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
}
ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
- ia->ri_max_frmr_depth);
+ ia->ri_max_frwr_depth);
return 0;
}
@@ -265,7 +267,7 @@ frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
- RPCRDMA_MAX_HDR_SEGS * ia->ri_max_frmr_depth);
+ RPCRDMA_MAX_HDR_SEGS * ia->ri_max_frwr_depth);
}
static void
@@ -286,16 +288,16 @@ __frwr_sendcompletion_flush(struct ib_wc *wc, const char *wr)
static void
frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
{
- struct rpcrdma_frmr *frmr;
- struct ib_cqe *cqe;
+ struct ib_cqe *cqe = wc->wr_cqe;
+ struct rpcrdma_frwr *frwr =
+ container_of(cqe, struct rpcrdma_frwr, fr_cqe);
/* WARNING: Only wr_cqe and status are reliable at this point */
if (wc->status != IB_WC_SUCCESS) {
- cqe = wc->wr_cqe;
- frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
- frmr->fr_state = FRMR_FLUSHED_FR;
+ frwr->fr_state = FRWR_FLUSHED_FR;
__frwr_sendcompletion_flush(wc, "fastreg");
}
+ trace_xprtrdma_wc_fastreg(wc, frwr);
}
/**
@@ -307,16 +309,16 @@ frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
static void
frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
{
- struct rpcrdma_frmr *frmr;
- struct ib_cqe *cqe;
+ struct ib_cqe *cqe = wc->wr_cqe;
+ struct rpcrdma_frwr *frwr = container_of(cqe, struct rpcrdma_frwr,
+ fr_cqe);
/* WARNING: Only wr_cqe and status are reliable at this point */
if (wc->status != IB_WC_SUCCESS) {
- cqe = wc->wr_cqe;
- frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
- frmr->fr_state = FRMR_FLUSHED_LI;
+ frwr->fr_state = FRWR_FLUSHED_LI;
__frwr_sendcompletion_flush(wc, "localinv");
}
+ trace_xprtrdma_wc_li(wc, frwr);
}
/**
@@ -329,17 +331,17 @@ frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
static void
frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
{
- struct rpcrdma_frmr *frmr;
- struct ib_cqe *cqe;
+ struct ib_cqe *cqe = wc->wr_cqe;
+ struct rpcrdma_frwr *frwr = container_of(cqe, struct rpcrdma_frwr,
+ fr_cqe);
/* WARNING: Only wr_cqe and status are reliable at this point */
- cqe = wc->wr_cqe;
- frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
if (wc->status != IB_WC_SUCCESS) {
- frmr->fr_state = FRMR_FLUSHED_LI;
+ frwr->fr_state = FRWR_FLUSHED_LI;
__frwr_sendcompletion_flush(wc, "localinv");
}
- complete(&frmr->fr_linv_done);
+ complete(&frwr->fr_linv_done);
+ trace_xprtrdma_wc_li_wake(wc, frwr);
}
/* Post a REG_MR Work Request to register a memory region
@@ -347,41 +349,39 @@ frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
*/
static struct rpcrdma_mr_seg *
frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
- int nsegs, bool writing, struct rpcrdma_mw **out)
+ int nsegs, bool writing, struct rpcrdma_mr **out)
{
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
bool holes_ok = ia->ri_mrtype == IB_MR_TYPE_SG_GAPS;
- struct rpcrdma_mw *mw;
- struct rpcrdma_frmr *frmr;
- struct ib_mr *mr;
+ struct rpcrdma_frwr *frwr;
+ struct rpcrdma_mr *mr;
+ struct ib_mr *ibmr;
struct ib_reg_wr *reg_wr;
struct ib_send_wr *bad_wr;
int rc, i, n;
u8 key;
- mw = NULL;
+ mr = NULL;
do {
- if (mw)
- rpcrdma_defer_mr_recovery(mw);
- mw = rpcrdma_get_mw(r_xprt);
- if (!mw)
+ if (mr)
+ rpcrdma_mr_defer_recovery(mr);
+ mr = rpcrdma_mr_get(r_xprt);
+ if (!mr)
return ERR_PTR(-ENOBUFS);
- } while (mw->frmr.fr_state != FRMR_IS_INVALID);
- frmr = &mw->frmr;
- frmr->fr_state = FRMR_IS_VALID;
- mr = frmr->fr_mr;
- reg_wr = &frmr->fr_regwr;
-
- if (nsegs > ia->ri_max_frmr_depth)
- nsegs = ia->ri_max_frmr_depth;
+ } while (mr->frwr.fr_state != FRWR_IS_INVALID);
+ frwr = &mr->frwr;
+ frwr->fr_state = FRWR_IS_VALID;
+
+ if (nsegs > ia->ri_max_frwr_depth)
+ nsegs = ia->ri_max_frwr_depth;
for (i = 0; i < nsegs;) {
if (seg->mr_page)
- sg_set_page(&mw->mw_sg[i],
+ sg_set_page(&mr->mr_sg[i],
seg->mr_page,
seg->mr_len,
offset_in_page(seg->mr_offset));
else
- sg_set_buf(&mw->mw_sg[i], seg->mr_offset,
+ sg_set_buf(&mr->mr_sg[i], seg->mr_offset,
seg->mr_len);
++seg;
@@ -392,30 +392,29 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
break;
}
- mw->mw_dir = rpcrdma_data_dir(writing);
+ mr->mr_dir = rpcrdma_data_dir(writing);
- mw->mw_nents = ib_dma_map_sg(ia->ri_device, mw->mw_sg, i, mw->mw_dir);
- if (!mw->mw_nents)
+ mr->mr_nents = ib_dma_map_sg(ia->ri_device, mr->mr_sg, i, mr->mr_dir);
+ if (!mr->mr_nents)
goto out_dmamap_err;
- n = ib_map_mr_sg(mr, mw->mw_sg, mw->mw_nents, NULL, PAGE_SIZE);
- if (unlikely(n != mw->mw_nents))
+ ibmr = frwr->fr_mr;
+ n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE);
+ if (unlikely(n != mr->mr_nents))
goto out_mapmr_err;
- dprintk("RPC: %s: Using frmr %p to map %u segments (%llu bytes)\n",
- __func__, frmr, mw->mw_nents, mr->length);
-
- key = (u8)(mr->rkey & 0x000000FF);
- ib_update_fast_reg_key(mr, ++key);
+ key = (u8)(ibmr->rkey & 0x000000FF);
+ ib_update_fast_reg_key(ibmr, ++key);
+ reg_wr = &frwr->fr_regwr;
reg_wr->wr.next = NULL;
reg_wr->wr.opcode = IB_WR_REG_MR;
- frmr->fr_cqe.done = frwr_wc_fastreg;
- reg_wr->wr.wr_cqe = &frmr->fr_cqe;
+ frwr->fr_cqe.done = frwr_wc_fastreg;
+ reg_wr->wr.wr_cqe = &frwr->fr_cqe;
reg_wr->wr.num_sge = 0;
reg_wr->wr.send_flags = 0;
- reg_wr->mr = mr;
- reg_wr->key = mr->rkey;
+ reg_wr->mr = ibmr;
+ reg_wr->key = ibmr->rkey;
reg_wr->access = writing ?
IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
IB_ACCESS_REMOTE_READ;
@@ -424,47 +423,64 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
if (rc)
goto out_senderr;
- mw->mw_handle = mr->rkey;
- mw->mw_length = mr->length;
- mw->mw_offset = mr->iova;
+ mr->mr_handle = ibmr->rkey;
+ mr->mr_length = ibmr->length;
+ mr->mr_offset = ibmr->iova;
- *out = mw;
+ *out = mr;
return seg;
out_dmamap_err:
pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
- mw->mw_sg, i);
- frmr->fr_state = FRMR_IS_INVALID;
- rpcrdma_put_mw(r_xprt, mw);
+ mr->mr_sg, i);
+ frwr->fr_state = FRWR_IS_INVALID;
+ rpcrdma_mr_put(mr);
return ERR_PTR(-EIO);
out_mapmr_err:
pr_err("rpcrdma: failed to map mr %p (%d/%d)\n",
- frmr->fr_mr, n, mw->mw_nents);
- rpcrdma_defer_mr_recovery(mw);
+ frwr->fr_mr, n, mr->mr_nents);
+ rpcrdma_mr_defer_recovery(mr);
return ERR_PTR(-EIO);
out_senderr:
- pr_err("rpcrdma: FRMR registration ib_post_send returned %i\n", rc);
- rpcrdma_defer_mr_recovery(mw);
+ pr_err("rpcrdma: FRWR registration ib_post_send returned %i\n", rc);
+ rpcrdma_mr_defer_recovery(mr);
return ERR_PTR(-ENOTCONN);
}
+/* Handle a remotely invalidated mr on the @mrs list
+ */
+static void
+frwr_op_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
+{
+ struct rpcrdma_mr *mr;
+
+ list_for_each_entry(mr, mrs, mr_list)
+ if (mr->mr_handle == rep->rr_inv_rkey) {
+ list_del(&mr->mr_list);
+ trace_xprtrdma_remoteinv(mr);
+ mr->frwr.fr_state = FRWR_IS_INVALID;
+ rpcrdma_mr_unmap_and_put(mr);
+ break; /* only one invalidated MR per RPC */
+ }
+}
+
/* Invalidate all memory regions that were registered for "req".
*
* Sleeps until it is safe for the host CPU to access the
* previously mapped memory regions.
*
- * Caller ensures that @mws is not empty before the call. This
+ * Caller ensures that @mrs is not empty before the call. This
* function empties the list.
*/
static void
-frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
+frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs)
{
struct ib_send_wr *first, **prev, *last, *bad_wr;
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
- struct rpcrdma_frmr *f;
- struct rpcrdma_mw *mw;
+ struct rpcrdma_frwr *frwr;
+ struct rpcrdma_mr *mr;
int count, rc;
/* ORDER: Invalidate all of the MRs first
@@ -472,31 +488,27 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
* Chain the LOCAL_INV Work Requests and post them with
* a single ib_post_send() call.
*/
- f = NULL;
+ frwr = NULL;
count = 0;
prev = &first;
- list_for_each_entry(mw, mws, mw_list) {
- mw->frmr.fr_state = FRMR_IS_INVALID;
+ list_for_each_entry(mr, mrs, mr_list) {
+ mr->frwr.fr_state = FRWR_IS_INVALID;
- if (mw->mw_flags & RPCRDMA_MW_F_RI)
- continue;
+ frwr = &mr->frwr;
+ trace_xprtrdma_localinv(mr);
- f = &mw->frmr;
- dprintk("RPC: %s: invalidating frmr %p\n",
- __func__, f);
-
- f->fr_cqe.done = frwr_wc_localinv;
- last = &f->fr_invwr;
+ frwr->fr_cqe.done = frwr_wc_localinv;
+ last = &frwr->fr_invwr;
memset(last, 0, sizeof(*last));
- last->wr_cqe = &f->fr_cqe;
+ last->wr_cqe = &frwr->fr_cqe;
last->opcode = IB_WR_LOCAL_INV;
- last->ex.invalidate_rkey = mw->mw_handle;
+ last->ex.invalidate_rkey = mr->mr_handle;
count++;
*prev = last;
prev = &last->next;
}
- if (!f)
+ if (!frwr)
goto unmap;
/* Strong send queue ordering guarantees that when the
@@ -504,8 +516,8 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
* are complete.
*/
last->send_flags = IB_SEND_SIGNALED;
- f->fr_cqe.done = frwr_wc_localinv_wake;
- reinit_completion(&f->fr_linv_done);
+ frwr->fr_cqe.done = frwr_wc_localinv_wake;
+ reinit_completion(&frwr->fr_linv_done);
/* Transport disconnect drains the receive CQ before it
* replaces the QP. The RPC reply handler won't call us
@@ -515,36 +527,32 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
bad_wr = NULL;
rc = ib_post_send(ia->ri_id->qp, first, &bad_wr);
if (bad_wr != first)
- wait_for_completion(&f->fr_linv_done);
+ wait_for_completion(&frwr->fr_linv_done);
if (rc)
goto reset_mrs;
/* ORDER: Now DMA unmap all of the MRs, and return
- * them to the free MW list.
+ * them to the free MR list.
*/
unmap:
- while (!list_empty(mws)) {
- mw = rpcrdma_pop_mw(mws);
- dprintk("RPC: %s: DMA unmapping frmr %p\n",
- __func__, &mw->frmr);
- ib_dma_unmap_sg(ia->ri_device,
- mw->mw_sg, mw->mw_nents, mw->mw_dir);
- rpcrdma_put_mw(r_xprt, mw);
+ while (!list_empty(mrs)) {
+ mr = rpcrdma_mr_pop(mrs);
+ rpcrdma_mr_unmap_and_put(mr);
}
return;
reset_mrs:
- pr_err("rpcrdma: FRMR invalidate ib_post_send returned %i\n", rc);
+ pr_err("rpcrdma: FRWR invalidate ib_post_send returned %i\n", rc);
/* Find and reset the MRs in the LOCAL_INV WRs that did not
* get posted.
*/
while (bad_wr) {
- f = container_of(bad_wr, struct rpcrdma_frmr,
- fr_invwr);
- mw = container_of(f, struct rpcrdma_mw, frmr);
+ frwr = container_of(bad_wr, struct rpcrdma_frwr,
+ fr_invwr);
+ mr = container_of(frwr, struct rpcrdma_mr, frwr);
- __frwr_reset_mr(ia, mw);
+ __frwr_mr_reset(ia, mr);
bad_wr = bad_wr->next;
}
@@ -553,6 +561,7 @@ reset_mrs:
const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
.ro_map = frwr_op_map,
+ .ro_reminv = frwr_op_reminv,
.ro_unmap_sync = frwr_op_unmap_sync,
.ro_recover_mr = frwr_op_recover_mr,
.ro_open = frwr_op_open,
diff --git a/net/sunrpc/xprtrdma/module.c b/net/sunrpc/xprtrdma/module.c
index 560712bd9fa2..a762d192372b 100644
--- a/net/sunrpc/xprtrdma/module.c
+++ b/net/sunrpc/xprtrdma/module.c
@@ -1,18 +1,20 @@
/*
- * Copyright (c) 2015 Oracle. All rights reserved.
+ * Copyright (c) 2015, 2017 Oracle. All rights reserved.
*/
/* rpcrdma.ko module initialization
*/
+#include <linux/types.h>
+#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sunrpc/svc_rdma.h>
-#include "xprt_rdma.h"
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
-# define RPCDBG_FACILITY RPCDBG_TRANS
-#endif
+#include <asm/swab.h>
+
+#define CREATE_TRACE_POINTS
+#include "xprt_rdma.h"
MODULE_AUTHOR("Open Grid Computing and Network Appliance, Inc.");
MODULE_DESCRIPTION("RPC/RDMA Transport");
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index a3f2ab283aeb..162e5dd82466 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -292,15 +292,15 @@ encode_item_not_present(struct xdr_stream *xdr)
}
static void
-xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mw *mw)
+xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr *mr)
{
- *iptr++ = cpu_to_be32(mw->mw_handle);
- *iptr++ = cpu_to_be32(mw->mw_length);
- xdr_encode_hyper(iptr, mw->mw_offset);
+ *iptr++ = cpu_to_be32(mr->mr_handle);
+ *iptr++ = cpu_to_be32(mr->mr_length);
+ xdr_encode_hyper(iptr, mr->mr_offset);
}
static int
-encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mw *mw)
+encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr)
{
__be32 *p;
@@ -308,12 +308,12 @@ encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mw *mw)
if (unlikely(!p))
return -EMSGSIZE;
- xdr_encode_rdma_segment(p, mw);
+ xdr_encode_rdma_segment(p, mr);
return 0;
}
static int
-encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mw *mw,
+encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr,
u32 position)
{
__be32 *p;
@@ -324,7 +324,7 @@ encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mw *mw,
*p++ = xdr_one; /* Item present */
*p++ = cpu_to_be32(position);
- xdr_encode_rdma_segment(p, mw);
+ xdr_encode_rdma_segment(p, mr);
return 0;
}
@@ -348,7 +348,7 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
{
struct xdr_stream *xdr = &req->rl_stream;
struct rpcrdma_mr_seg *seg;
- struct rpcrdma_mw *mw;
+ struct rpcrdma_mr *mr;
unsigned int pos;
int nsegs;
@@ -363,21 +363,17 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
do {
seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
- false, &mw);
+ false, &mr);
if (IS_ERR(seg))
return PTR_ERR(seg);
- rpcrdma_push_mw(mw, &req->rl_registered);
+ rpcrdma_mr_push(mr, &req->rl_registered);
- if (encode_read_segment(xdr, mw, pos) < 0)
+ if (encode_read_segment(xdr, mr, pos) < 0)
return -EMSGSIZE;
- dprintk("RPC: %5u %s: pos %u %u@0x%016llx:0x%08x (%s)\n",
- rqst->rq_task->tk_pid, __func__, pos,
- mw->mw_length, (unsigned long long)mw->mw_offset,
- mw->mw_handle, mw->mw_nents < nsegs ? "more" : "last");
-
+ trace_xprtrdma_read_chunk(rqst->rq_task, pos, mr, nsegs);
r_xprt->rx_stats.read_chunk_count++;
- nsegs -= mw->mw_nents;
+ nsegs -= mr->mr_nents;
} while (nsegs);
return 0;
@@ -404,7 +400,7 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
{
struct xdr_stream *xdr = &req->rl_stream;
struct rpcrdma_mr_seg *seg;
- struct rpcrdma_mw *mw;
+ struct rpcrdma_mr *mr;
int nsegs, nchunks;
__be32 *segcount;
@@ -425,23 +421,19 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
nchunks = 0;
do {
seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
- true, &mw);
+ true, &mr);
if (IS_ERR(seg))
return PTR_ERR(seg);
- rpcrdma_push_mw(mw, &req->rl_registered);
+ rpcrdma_mr_push(mr, &req->rl_registered);
- if (encode_rdma_segment(xdr, mw) < 0)
+ if (encode_rdma_segment(xdr, mr) < 0)
return -EMSGSIZE;
- dprintk("RPC: %5u %s: %u@0x016%llx:0x%08x (%s)\n",
- rqst->rq_task->tk_pid, __func__,
- mw->mw_length, (unsigned long long)mw->mw_offset,
- mw->mw_handle, mw->mw_nents < nsegs ? "more" : "last");
-
+ trace_xprtrdma_write_chunk(rqst->rq_task, mr, nsegs);
r_xprt->rx_stats.write_chunk_count++;
- r_xprt->rx_stats.total_rdma_request += seg->mr_len;
+ r_xprt->rx_stats.total_rdma_request += mr->mr_length;
nchunks++;
- nsegs -= mw->mw_nents;
+ nsegs -= mr->mr_nents;
} while (nsegs);
/* Update count of segments in this Write chunk */
@@ -468,7 +460,7 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
{
struct xdr_stream *xdr = &req->rl_stream;
struct rpcrdma_mr_seg *seg;
- struct rpcrdma_mw *mw;
+ struct rpcrdma_mr *mr;
int nsegs, nchunks;
__be32 *segcount;
@@ -487,23 +479,19 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
nchunks = 0;
do {
seg = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
- true, &mw);
+ true, &mr);
if (IS_ERR(seg))
return PTR_ERR(seg);
- rpcrdma_push_mw(mw, &req->rl_registered);
+ rpcrdma_mr_push(mr, &req->rl_registered);
- if (encode_rdma_segment(xdr, mw) < 0)
+ if (encode_rdma_segment(xdr, mr) < 0)
return -EMSGSIZE;
- dprintk("RPC: %5u %s: %u@0x%016llx:0x%08x (%s)\n",
- rqst->rq_task->tk_pid, __func__,
- mw->mw_length, (unsigned long long)mw->mw_offset,
- mw->mw_handle, mw->mw_nents < nsegs ? "more" : "last");
-
+ trace_xprtrdma_reply_chunk(rqst->rq_task, mr, nsegs);
r_xprt->rx_stats.reply_chunk_count++;
- r_xprt->rx_stats.total_rdma_request += seg->mr_len;
+ r_xprt->rx_stats.total_rdma_request += mr->mr_length;
nchunks++;
- nsegs -= mw->mw_nents;
+ nsegs -= mr->mr_nents;
} while (nsegs);
/* Update count of segments in the Reply chunk */
@@ -524,9 +512,6 @@ rpcrdma_unmap_sendctx(struct rpcrdma_sendctx *sc)
struct ib_sge *sge;
unsigned int count;
- dprintk("RPC: %s: unmapping %u sges for sc=%p\n",
- __func__, sc->sc_unmap_count, sc);
-
/* The first two SGEs contain the transport header and
* the inline buffer. These are always left mapped so
* they can be cheaply re-used.
@@ -754,11 +739,6 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
__be32 *p;
int ret;
-#if defined(CONFIG_SUNRPC_BACKCHANNEL)
- if (test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state))
- return rpcrdma_bc_marshal_reply(rqst);
-#endif
-
rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
xdr_init_encode(xdr, &req->rl_hdrbuf,
req->rl_rdmabuf->rg_base);
@@ -821,6 +801,17 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
rtype = rpcrdma_areadch;
}
+ /* If this is a retransmit, discard previously registered
+ * chunks. Very likely the connection has been replaced,
+ * so these registrations are invalid and unusable.
+ */
+ while (unlikely(!list_empty(&req->rl_registered))) {
+ struct rpcrdma_mr *mr;
+
+ mr = rpcrdma_mr_pop(&req->rl_registered);
+ rpcrdma_mr_defer_recovery(mr);
+ }
+
/* This implementation supports the following combinations
* of chunk lists in one RPC-over-RDMA Call message:
*
@@ -868,10 +859,7 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
if (ret)
goto out_err;
- dprintk("RPC: %5u %s: %s/%s: hdrlen %u rpclen\n",
- rqst->rq_task->tk_pid, __func__,
- transfertypes[rtype], transfertypes[wtype],
- xdr_stream_pos(xdr));
+ trace_xprtrdma_marshal(rqst, xdr_stream_pos(xdr), rtype, wtype);
ret = rpcrdma_prepare_send_sges(r_xprt, req, xdr_stream_pos(xdr),
&rqst->rq_snd_buf, rtype);
@@ -926,8 +914,7 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
curlen = rqst->rq_rcv_buf.head[0].iov_len;
if (curlen > copy_len)
curlen = copy_len;
- dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n",
- __func__, srcp, copy_len, curlen);
+ trace_xprtrdma_fixup(rqst, copy_len, curlen);
srcp += curlen;
copy_len -= curlen;
@@ -947,9 +934,8 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
if (curlen > pagelist_len)
curlen = pagelist_len;
- dprintk("RPC: %s: page %d"
- " srcp 0x%p len %d curlen %d\n",
- __func__, i, srcp, copy_len, curlen);
+ trace_xprtrdma_fixup_pg(rqst, i, srcp,
+ copy_len, curlen);
destp = kmap_atomic(ppages[i]);
memcpy(destp + page_base, srcp, curlen);
flush_dcache_page(ppages[i]);
@@ -984,24 +970,6 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
return fixup_copy_count;
}
-/* Caller must guarantee @rep remains stable during this call.
- */
-static void
-rpcrdma_mark_remote_invalidation(struct list_head *mws,
- struct rpcrdma_rep *rep)
-{
- struct rpcrdma_mw *mw;
-
- if (!(rep->rr_wc_flags & IB_WC_WITH_INVALIDATE))
- return;
-
- list_for_each_entry(mw, mws, mw_list)
- if (mw->mw_handle == rep->rr_inv_rkey) {
- mw->mw_flags = RPCRDMA_MW_F_RI;
- break; /* only one invalidated MR per RPC */
- }
-}
-
/* By convention, backchannel calls arrive via rdma_msg type
* messages, and never populate the chunk lists. This makes
* the RPC/RDMA header small and fixed in size, so it is
@@ -1058,26 +1026,19 @@ out_short:
static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
{
+ u32 handle;
+ u64 offset;
__be32 *p;
p = xdr_inline_decode(xdr, 4 * sizeof(*p));
if (unlikely(!p))
return -EIO;
- ifdebug(FACILITY) {
- u64 offset;
- u32 handle;
-
- handle = be32_to_cpup(p++);
- *length = be32_to_cpup(p++);
- xdr_decode_hyper(p, &offset);
- dprintk("RPC: %s: segment %u@0x%016llx:0x%08x\n",
- __func__, *length, (unsigned long long)offset,
- handle);
- } else {
- *length = be32_to_cpup(p + 1);
- }
+ handle = be32_to_cpup(p++);
+ *length = be32_to_cpup(p++);
+ xdr_decode_hyper(p, &offset);
+ trace_xprtrdma_decode_seg(handle, *length, offset);
return 0;
}
@@ -1098,8 +1059,6 @@ static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
*length += seglength;
}
- dprintk("RPC: %s: segcount=%u, %u bytes\n",
- __func__, be32_to_cpup(p), *length);
return 0;
}
@@ -1296,8 +1255,7 @@ out:
* being marshaled.
*/
out_badheader:
- dprintk("RPC: %5u %s: invalid rpcrdma reply (type %u)\n",
- rqst->rq_task->tk_pid, __func__, be32_to_cpu(rep->rr_proc));
+ trace_xprtrdma_reply_hdr(rep);
r_xprt->rx_stats.bad_reply_count++;
status = -EIO;
goto out;
@@ -1339,9 +1297,12 @@ void rpcrdma_deferred_completion(struct work_struct *work)
struct rpcrdma_rep *rep =
container_of(work, struct rpcrdma_rep, rr_work);
struct rpcrdma_req *req = rpcr_to_rdmar(rep->rr_rqst);
+ struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
- rpcrdma_mark_remote_invalidation(&req->rl_registered, rep);
- rpcrdma_release_rqst(rep->rr_rxprt, req);
+ trace_xprtrdma_defer_cmp(rep);
+ if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
+ r_xprt->rx_ia.ri_ops->ro_reminv(rep, &req->rl_registered);
+ rpcrdma_release_rqst(r_xprt, req);
rpcrdma_complete_rqst(rep);
}
@@ -1360,8 +1321,6 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
u32 credits;
__be32 *p;
- dprintk("RPC: %s: incoming rep %p\n", __func__, rep);
-
if (rep->rr_hdrbuf.head[0].iov_len == 0)
goto out_badstatus;
@@ -1405,8 +1364,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
rep->rr_rqst = rqst;
clear_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags);
- dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n",
- __func__, rep, req, be32_to_cpu(rep->rr_xid));
+ trace_xprtrdma_reply(rqst->rq_task, rep, req, credits);
queue_work_on(req->rl_cpu, rpcrdma_receive_wq, &rep->rr_work);
return;
@@ -1420,8 +1378,7 @@ out_badstatus:
return;
out_badversion:
- dprintk("RPC: %s: invalid version %d\n",
- __func__, be32_to_cpu(rep->rr_vers));
+ trace_xprtrdma_reply_vers(rep);
goto repost;
/* The RPC transaction has already been terminated, or the header
@@ -1429,12 +1386,11 @@ out_badversion:
*/
out_norqst:
spin_unlock(&xprt->recv_lock);
- dprintk("RPC: %s: no match for incoming xid 0x%08x\n",
- __func__, be32_to_cpu(rep->rr_xid));
+ trace_xprtrdma_reply_rqst(rep);
goto repost;
out_shortreply:
- dprintk("RPC: %s: short/invalid reply\n", __func__);
+ trace_xprtrdma_reply_short(rep);
/* If no pending RPC transaction was matched, post a replacement
* receive buffer before returning.
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 6ee1ad8978f3..4b1ecfe979cf 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -67,8 +67,7 @@
static unsigned int xprt_rdma_slot_table_entries = RPCRDMA_DEF_SLOT_TABLE;
unsigned int xprt_rdma_max_inline_read = RPCRDMA_DEF_INLINE;
static unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE;
-static unsigned int xprt_rdma_inline_write_padding;
-unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRMR;
+unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRWR;
int xprt_rdma_pad_optimize;
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
@@ -81,6 +80,7 @@ static unsigned int zero;
static unsigned int max_padding = PAGE_SIZE;
static unsigned int min_memreg = RPCRDMA_BOUNCEBUFFERS;
static unsigned int max_memreg = RPCRDMA_LAST - 1;
+static unsigned int dummy;
static struct ctl_table_header *sunrpc_table_header;
@@ -114,7 +114,7 @@ static struct ctl_table xr_tunables_table[] = {
},
{
.procname = "rdma_inline_write_padding",
- .data = &xprt_rdma_inline_write_padding,
+ .data = &dummy,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
@@ -259,13 +259,10 @@ xprt_rdma_connect_worker(struct work_struct *work)
xprt_clear_connected(xprt);
- dprintk("RPC: %s: %sconnect\n", __func__,
- r_xprt->rx_ep.rep_connected != 0 ? "re" : "");
rc = rpcrdma_ep_connect(&r_xprt->rx_ep, &r_xprt->rx_ia);
if (rc)
xprt_wake_pending_tasks(xprt, rc);
- dprintk("RPC: %s: exit\n", __func__);
xprt_clear_connecting(xprt);
}
@@ -275,7 +272,7 @@ xprt_rdma_inject_disconnect(struct rpc_xprt *xprt)
struct rpcrdma_xprt *r_xprt = container_of(xprt, struct rpcrdma_xprt,
rx_xprt);
- pr_info("rpcrdma: injecting transport disconnect on xprt=%p\n", xprt);
+ trace_xprtrdma_inject_dsc(r_xprt);
rdma_disconnect(r_xprt->rx_ia.ri_id);
}
@@ -295,7 +292,7 @@ xprt_rdma_destroy(struct rpc_xprt *xprt)
{
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
- dprintk("RPC: %s: called\n", __func__);
+ trace_xprtrdma_destroy(r_xprt);
cancel_delayed_work_sync(&r_xprt->rx_connect_worker);
@@ -306,11 +303,8 @@ xprt_rdma_destroy(struct rpc_xprt *xprt)
rpcrdma_ia_close(&r_xprt->rx_ia);
xprt_rdma_free_addresses(xprt);
-
xprt_free(xprt);
- dprintk("RPC: %s: returning\n", __func__);
-
module_put(THIS_MODULE);
}
@@ -361,9 +355,7 @@ xprt_setup_rdma(struct xprt_create *args)
/*
* Set up RDMA-specific connect data.
*/
-
- sap = (struct sockaddr *)&cdata.addr;
- memcpy(sap, args->dstaddr, args->addrlen);
+ sap = args->dstaddr;
/* Ensure xprt->addr holds valid server TCP (not RDMA)
* address, for any side protocols which peek at it */
@@ -373,6 +365,7 @@ xprt_setup_rdma(struct xprt_create *args)
if (rpc_get_port(sap))
xprt_set_bound(xprt);
+ xprt_rdma_format_addresses(xprt, sap);
cdata.max_requests = xprt->max_reqs;
@@ -387,8 +380,6 @@ xprt_setup_rdma(struct xprt_create *args)
if (cdata.inline_rsize > cdata.rsize)
cdata.inline_rsize = cdata.rsize;
- cdata.padding = xprt_rdma_inline_write_padding;
-
/*
* Create new transport instance, which includes initialized
* o ia
@@ -398,7 +389,7 @@ xprt_setup_rdma(struct xprt_create *args)
new_xprt = rpcx_to_rdmax(xprt);
- rc = rpcrdma_ia_open(new_xprt, sap);
+ rc = rpcrdma_ia_open(new_xprt);
if (rc)
goto out1;
@@ -407,31 +398,19 @@ xprt_setup_rdma(struct xprt_create *args)
*/
new_xprt->rx_data = cdata;
new_ep = &new_xprt->rx_ep;
- new_ep->rep_remote_addr = cdata.addr;
rc = rpcrdma_ep_create(&new_xprt->rx_ep,
&new_xprt->rx_ia, &new_xprt->rx_data);
if (rc)
goto out2;
- /*
- * Allocate pre-registered send and receive buffers for headers and
- * any inline data. Also specify any padding which will be provided
- * from a preregistered zero buffer.
- */
rc = rpcrdma_buffer_create(new_xprt);
if (rc)
goto out3;
- /*
- * Register a callback for connection events. This is necessary because
- * connection loss notification is async. We also catch connection loss
- * when reaping receives.
- */
INIT_DELAYED_WORK(&new_xprt->rx_connect_worker,
xprt_rdma_connect_worker);
- xprt_rdma_format_addresses(xprt, sap);
xprt->max_payload = new_xprt->rx_ia.ri_ops->ro_maxpages(new_xprt);
if (xprt->max_payload == 0)
goto out4;
@@ -445,16 +424,19 @@ xprt_setup_rdma(struct xprt_create *args)
dprintk("RPC: %s: %s:%s\n", __func__,
xprt->address_strings[RPC_DISPLAY_ADDR],
xprt->address_strings[RPC_DISPLAY_PORT]);
+ trace_xprtrdma_create(new_xprt);
return xprt;
out4:
- xprt_rdma_free_addresses(xprt);
- rc = -EINVAL;
+ rpcrdma_buffer_destroy(&new_xprt->rx_buf);
+ rc = -ENODEV;
out3:
rpcrdma_ep_destroy(new_ep, &new_xprt->rx_ia);
out2:
rpcrdma_ia_close(&new_xprt->rx_ia);
out1:
+ trace_xprtrdma_destroy(new_xprt);
+ xprt_rdma_free_addresses(xprt);
xprt_free(xprt);
return ERR_PTR(rc);
}
@@ -488,16 +470,34 @@ xprt_rdma_close(struct rpc_xprt *xprt)
rpcrdma_ep_disconnect(ep, ia);
}
+/**
+ * xprt_rdma_set_port - update server port with rpcbind result
+ * @xprt: controlling RPC transport
+ * @port: new port value
+ *
+ * Transport connect status is unchanged.
+ */
static void
xprt_rdma_set_port(struct rpc_xprt *xprt, u16 port)
{
- struct sockaddr_in *sap;
+ struct sockaddr *sap = (struct sockaddr *)&xprt->addr;
+ char buf[8];
- sap = (struct sockaddr_in *)&xprt->addr;
- sap->sin_port = htons(port);
- sap = (struct sockaddr_in *)&rpcx_to_rdmad(xprt).addr;
- sap->sin_port = htons(port);
- dprintk("RPC: %s: %u\n", __func__, port);
+ dprintk("RPC: %s: setting port for xprt %p (%s:%s) to %u\n",
+ __func__, xprt,
+ xprt->address_strings[RPC_DISPLAY_ADDR],
+ xprt->address_strings[RPC_DISPLAY_PORT],
+ port);
+
+ rpc_set_port(sap, port);
+
+ kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
+ snprintf(buf, sizeof(buf), "%u", port);
+ xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
+
+ kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
+ snprintf(buf, sizeof(buf), "%4hx", port);
+ xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
}
/**
@@ -516,8 +516,6 @@ xprt_rdma_set_port(struct rpc_xprt *xprt, u16 port)
static void
xprt_rdma_timer(struct rpc_xprt *xprt, struct rpc_task *task)
{
- dprintk("RPC: %5u %s: xprt = %p\n", task->tk_pid, __func__, xprt);
-
xprt_force_disconnect(xprt);
}
@@ -640,7 +638,7 @@ xprt_rdma_allocate(struct rpc_task *task)
req = rpcrdma_buffer_get(&r_xprt->rx_buf);
if (req == NULL)
- return -ENOMEM;
+ goto out_get;
flags = RPCRDMA_DEF_GFP;
if (RPC_IS_SWAPPER(task))
@@ -653,19 +651,18 @@ xprt_rdma_allocate(struct rpc_task *task)
if (!rpcrdma_get_recvbuf(r_xprt, req, rqst->rq_rcvsize, flags))
goto out_fail;
- dprintk("RPC: %5u %s: send size = %zd, recv size = %zd, req = %p\n",
- task->tk_pid, __func__, rqst->rq_callsize,
- rqst->rq_rcvsize, req);
-
req->rl_cpu = smp_processor_id();
req->rl_connect_cookie = 0; /* our reserved value */
rpcrdma_set_xprtdata(rqst, req);
rqst->rq_buffer = req->rl_sendbuf->rg_base;
rqst->rq_rbuffer = req->rl_recvbuf->rg_base;
+ trace_xprtrdma_allocate(task, req);
return 0;
out_fail:
rpcrdma_buffer_put(req);
+out_get:
+ trace_xprtrdma_allocate(task, NULL);
return -ENOMEM;
}
@@ -682,13 +679,9 @@ xprt_rdma_free(struct rpc_task *task)
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
- if (test_bit(RPCRDMA_REQ_F_BACKCHANNEL, &req->rl_flags))
- return;
-
- dprintk("RPC: %s: called on 0x%p\n", __func__, req->rl_reply);
-
if (test_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags))
rpcrdma_release_rqst(r_xprt, req);
+ trace_xprtrdma_rpc_done(task, req);
rpcrdma_buffer_put(req);
}
@@ -698,22 +691,12 @@ xprt_rdma_free(struct rpc_task *task)
*
* Caller holds the transport's write lock.
*
- * Return values:
- * 0: The request has been sent
- * ENOTCONN: Caller needs to invoke connect logic then call again
- * ENOBUFS: Call again later to send the request
- * EIO: A permanent error occurred. The request was not sent,
- * and don't try it again
- *
- * send_request invokes the meat of RPC RDMA. It must do the following:
- *
- * 1. Marshal the RPC request into an RPC RDMA request, which means
- * putting a header in front of data, and creating IOVs for RDMA
- * from those in the request.
- * 2. In marshaling, detect opportunities for RDMA, and use them.
- * 3. Post a recv message to set up asynch completion, then send
- * the request (rpcrdma_ep_post).
- * 4. No partial sends are possible in the RPC-RDMA protocol (as in UDP).
+ * Returns:
+ * %0 if the RPC message has been sent
+ * %-ENOTCONN if the caller should reconnect and call again
+ * %-ENOBUFS if the caller should call again later
+ * %-EIO if a permanent error occurred and the request was not
+ * sent. Do not try to send this message again.
*/
static int
xprt_rdma_send_request(struct rpc_task *task)
@@ -724,14 +707,14 @@ xprt_rdma_send_request(struct rpc_task *task)
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
int rc = 0;
+#if defined(CONFIG_SUNRPC_BACKCHANNEL)
+ if (unlikely(!rqst->rq_buffer))
+ return xprt_rdma_bc_send_reply(rqst);
+#endif /* CONFIG_SUNRPC_BACKCHANNEL */
+
if (!xprt_connected(xprt))
goto drop_connection;
- /* On retransmit, remove any previously registered chunks */
- if (unlikely(!list_empty(&req->rl_registered)))
- r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt,
- &req->rl_registered);
-
rc = rpcrdma_marshal_req(r_xprt, rqst);
if (rc < 0)
goto failed_marshal;
@@ -744,7 +727,7 @@ xprt_rdma_send_request(struct rpc_task *task)
goto drop_connection;
req->rl_connect_cookie = xprt->connect_cookie;
- set_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags);
+ __set_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags);
if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
goto drop_connection;
@@ -904,8 +887,7 @@ int xprt_rdma_init(void)
"\tMaxInlineRead %d\n\tMaxInlineWrite %d\n",
xprt_rdma_slot_table_entries,
xprt_rdma_max_inline_read, xprt_rdma_max_inline_write);
- dprintk("\tPadding %d\n\tMemreg %d\n",
- xprt_rdma_inline_write_padding, xprt_rdma_memreg_strategy);
+ dprintk("\tPadding 0\n\tMemreg %d\n", xprt_rdma_memreg_strategy);
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
if (!sunrpc_table_header)
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 8607c029c0dd..f4eb63e8e689 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -71,8 +71,8 @@
/*
* internal functions
*/
-static void rpcrdma_create_mrs(struct rpcrdma_xprt *r_xprt);
-static void rpcrdma_destroy_mrs(struct rpcrdma_buffer *buf);
+static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
+static void rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf);
static void rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb);
struct workqueue_struct *rpcrdma_receive_wq __read_mostly;
@@ -108,7 +108,10 @@ static void
rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
{
struct rpcrdma_ep *ep = context;
+ struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt,
+ rx_ep);
+ trace_xprtrdma_qp_error(r_xprt, event);
pr_err("rpcrdma: %s on device %s ep %p\n",
ib_event_msg(event->event), event->device->name, context);
@@ -133,6 +136,7 @@ rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
container_of(cqe, struct rpcrdma_sendctx, sc_cqe);
/* WARNING: Only wr_cqe and status are reliable at this point */
+ trace_xprtrdma_wc_send(sc, wc);
if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR)
pr_err("rpcrdma: Send: %s (%u/0x%x)\n",
ib_wc_status_msg(wc->status),
@@ -155,13 +159,11 @@ rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
rr_cqe);
/* WARNING: Only wr_id and status are reliable at this point */
+ trace_xprtrdma_wc_receive(rep, wc);
if (wc->status != IB_WC_SUCCESS)
goto out_fail;
/* status == SUCCESS means all fields in wc are trustworthy */
- dprintk("RPC: %s: rep %p opcode 'recv', length %u: success\n",
- __func__, rep, wc->byte_len);
-
rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len);
rep->rr_wc_flags = wc->wc_flags;
rep->rr_inv_rkey = wc->ex.invalidate_rkey;
@@ -192,7 +194,6 @@ rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt,
unsigned int rsize, wsize;
/* Default settings for RPC-over-RDMA Version One */
- r_xprt->rx_ia.ri_reminv_expected = false;
r_xprt->rx_ia.ri_implicit_roundup = xprt_rdma_pad_optimize;
rsize = RPCRDMA_V1_DEF_INLINE_SIZE;
wsize = RPCRDMA_V1_DEF_INLINE_SIZE;
@@ -200,7 +201,6 @@ rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt,
if (pmsg &&
pmsg->cp_magic == rpcrdma_cmp_magic &&
pmsg->cp_version == RPCRDMA_CMP_VERSION) {
- r_xprt->rx_ia.ri_reminv_expected = true;
r_xprt->rx_ia.ri_implicit_roundup = true;
rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size);
wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size);
@@ -221,11 +221,9 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
struct rpcrdma_xprt *xprt = id->context;
struct rpcrdma_ia *ia = &xprt->rx_ia;
struct rpcrdma_ep *ep = &xprt->rx_ep;
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
- struct sockaddr *sap = (struct sockaddr *)&ep->rep_remote_addr;
-#endif
int connstate = 0;
+ trace_xprtrdma_conn_upcall(xprt, event);
switch (event->event) {
case RDMA_CM_EVENT_ADDR_RESOLVED:
case RDMA_CM_EVENT_ROUTE_RESOLVED:
@@ -234,21 +232,17 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
break;
case RDMA_CM_EVENT_ADDR_ERROR:
ia->ri_async_rc = -EHOSTUNREACH;
- dprintk("RPC: %s: CM address resolution error, ep 0x%p\n",
- __func__, ep);
complete(&ia->ri_done);
break;
case RDMA_CM_EVENT_ROUTE_ERROR:
ia->ri_async_rc = -ENETUNREACH;
- dprintk("RPC: %s: CM route resolution error, ep 0x%p\n",
- __func__, ep);
complete(&ia->ri_done);
break;
case RDMA_CM_EVENT_DEVICE_REMOVAL:
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
- pr_info("rpcrdma: removing device %s for %pIS:%u\n",
+ pr_info("rpcrdma: removing device %s for %s:%s\n",
ia->ri_device->name,
- sap, rpc_get_port(sap));
+ rpcrdma_addrstr(xprt), rpcrdma_portstr(xprt));
#endif
set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags);
ep->rep_connected = -ENODEV;
@@ -271,8 +265,8 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
connstate = -ENETDOWN;
goto connected;
case RDMA_CM_EVENT_REJECTED:
- dprintk("rpcrdma: connection to %pIS:%u rejected: %s\n",
- sap, rpc_get_port(sap),
+ dprintk("rpcrdma: connection to %s:%s rejected: %s\n",
+ rpcrdma_addrstr(xprt), rpcrdma_portstr(xprt),
rdma_reject_msg(id, event->status));
connstate = -ECONNREFUSED;
if (event->status == IB_CM_REJ_STALE_CONN)
@@ -287,8 +281,9 @@ connected:
wake_up_all(&ep->rep_connect_wait);
/*FALLTHROUGH*/
default:
- dprintk("RPC: %s: %pIS:%u on %s/%s (ep 0x%p): %s\n",
- __func__, sap, rpc_get_port(sap),
+ dprintk("RPC: %s: %s:%s on %s/%s (ep 0x%p): %s\n",
+ __func__,
+ rpcrdma_addrstr(xprt), rpcrdma_portstr(xprt),
ia->ri_device->name, ia->ri_ops->ro_displayname,
ep, rdma_event_msg(event->event));
break;
@@ -298,13 +293,14 @@ connected:
}
static struct rdma_cm_id *
-rpcrdma_create_id(struct rpcrdma_xprt *xprt,
- struct rpcrdma_ia *ia, struct sockaddr *addr)
+rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia)
{
unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1;
struct rdma_cm_id *id;
int rc;
+ trace_xprtrdma_conn_start(xprt);
+
init_completion(&ia->ri_done);
init_completion(&ia->ri_remove_done);
@@ -318,7 +314,9 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt,
}
ia->ri_async_rc = -ETIMEDOUT;
- rc = rdma_resolve_addr(id, NULL, addr, RDMA_RESOLVE_TIMEOUT);
+ rc = rdma_resolve_addr(id, NULL,
+ (struct sockaddr *)&xprt->rx_xprt.addr,
+ RDMA_RESOLVE_TIMEOUT);
if (rc) {
dprintk("RPC: %s: rdma_resolve_addr() failed %i\n",
__func__, rc);
@@ -326,8 +324,7 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt,
}
rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
if (rc < 0) {
- dprintk("RPC: %s: wait() exited: %i\n",
- __func__, rc);
+ trace_xprtrdma_conn_tout(xprt);
goto out;
}
@@ -344,8 +341,7 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt,
}
rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
if (rc < 0) {
- dprintk("RPC: %s: wait() exited: %i\n",
- __func__, rc);
+ trace_xprtrdma_conn_tout(xprt);
goto out;
}
rc = ia->ri_async_rc;
@@ -365,19 +361,18 @@ out:
/**
* rpcrdma_ia_open - Open and initialize an Interface Adapter.
- * @xprt: controlling transport
- * @addr: IP address of remote peer
+ * @xprt: transport with IA to (re)initialize
*
* Returns 0 on success, negative errno if an appropriate
* Interface Adapter could not be found and opened.
*/
int
-rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr)
+rpcrdma_ia_open(struct rpcrdma_xprt *xprt)
{
struct rpcrdma_ia *ia = &xprt->rx_ia;
int rc;
- ia->ri_id = rpcrdma_create_id(xprt, ia, addr);
+ ia->ri_id = rpcrdma_create_id(xprt, ia);
if (IS_ERR(ia->ri_id)) {
rc = PTR_ERR(ia->ri_id);
goto out_err;
@@ -392,7 +387,7 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr)
}
switch (xprt_rdma_memreg_strategy) {
- case RPCRDMA_FRMR:
+ case RPCRDMA_FRWR:
if (frwr_is_supported(ia)) {
ia->ri_ops = &rpcrdma_frwr_memreg_ops;
break;
@@ -462,10 +457,12 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia)
rpcrdma_dma_unmap_regbuf(req->rl_sendbuf);
rpcrdma_dma_unmap_regbuf(req->rl_recvbuf);
}
- rpcrdma_destroy_mrs(buf);
+ rpcrdma_mrs_destroy(buf);
/* Allow waiters to continue */
complete(&ia->ri_remove_done);
+
+ trace_xprtrdma_remove(r_xprt);
}
/**
@@ -476,7 +473,6 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia)
void
rpcrdma_ia_close(struct rpcrdma_ia *ia)
{
- dprintk("RPC: %s: entering\n", __func__);
if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
if (ia->ri_id->qp)
rdma_destroy_qp(ia->ri_id);
@@ -630,9 +626,6 @@ out1:
void
rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
- dprintk("RPC: %s: entering, connected is %d\n",
- __func__, ep->rep_connected);
-
cancel_delayed_work_sync(&ep->rep_connect_worker);
if (ia->ri_id->qp) {
@@ -653,13 +646,12 @@ static int
rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
- struct sockaddr *sap = (struct sockaddr *)&r_xprt->rx_data.addr;
int rc, err;
- pr_info("%s: r_xprt = %p\n", __func__, r_xprt);
+ trace_xprtrdma_reinsert(r_xprt);
rc = -EHOSTUNREACH;
- if (rpcrdma_ia_open(r_xprt, sap))
+ if (rpcrdma_ia_open(r_xprt))
goto out1;
rc = -ENOMEM;
@@ -676,7 +668,7 @@ rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
goto out3;
}
- rpcrdma_create_mrs(r_xprt);
+ rpcrdma_mrs_create(r_xprt);
return 0;
out3:
@@ -691,16 +683,15 @@ static int
rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt, struct rpcrdma_ep *ep,
struct rpcrdma_ia *ia)
{
- struct sockaddr *sap = (struct sockaddr *)&r_xprt->rx_data.addr;
struct rdma_cm_id *id, *old;
int err, rc;
- dprintk("RPC: %s: reconnecting...\n", __func__);
+ trace_xprtrdma_reconnect(r_xprt);
rpcrdma_ep_disconnect(ep, ia);
rc = -EHOSTUNREACH;
- id = rpcrdma_create_id(r_xprt, ia, sap);
+ id = rpcrdma_create_id(r_xprt, ia);
if (IS_ERR(id))
goto out;
@@ -817,16 +808,14 @@ rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
int rc;
rc = rdma_disconnect(ia->ri_id);
- if (!rc) {
+ if (!rc)
/* returns without wait if not connected */
wait_event_interruptible(ep->rep_connect_wait,
ep->rep_connected != 1);
- dprintk("RPC: %s: after wait, %sconnected\n", __func__,
- (ep->rep_connected == 1) ? "still " : "dis");
- } else {
- dprintk("RPC: %s: rdma_disconnect %i\n", __func__, rc);
+ else
ep->rep_connected = rc;
- }
+ trace_xprtrdma_disconnect(container_of(ep, struct rpcrdma_xprt,
+ rx_ep), rc);
ib_drain_qp(ia->ri_id->qp);
}
@@ -998,15 +987,15 @@ rpcrdma_mr_recovery_worker(struct work_struct *work)
{
struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
rb_recovery_worker.work);
- struct rpcrdma_mw *mw;
+ struct rpcrdma_mr *mr;
spin_lock(&buf->rb_recovery_lock);
while (!list_empty(&buf->rb_stale_mrs)) {
- mw = rpcrdma_pop_mw(&buf->rb_stale_mrs);
+ mr = rpcrdma_mr_pop(&buf->rb_stale_mrs);
spin_unlock(&buf->rb_recovery_lock);
- dprintk("RPC: %s: recovering MR %p\n", __func__, mw);
- mw->mw_xprt->rx_ia.ri_ops->ro_recover_mr(mw);
+ trace_xprtrdma_recover_mr(mr);
+ mr->mr_xprt->rx_ia.ri_ops->ro_recover_mr(mr);
spin_lock(&buf->rb_recovery_lock);
}
@@ -1014,20 +1003,20 @@ rpcrdma_mr_recovery_worker(struct work_struct *work)
}
void
-rpcrdma_defer_mr_recovery(struct rpcrdma_mw *mw)
+rpcrdma_mr_defer_recovery(struct rpcrdma_mr *mr)
{
- struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
+ struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
spin_lock(&buf->rb_recovery_lock);
- rpcrdma_push_mw(mw, &buf->rb_stale_mrs);
+ rpcrdma_mr_push(mr, &buf->rb_stale_mrs);
spin_unlock(&buf->rb_recovery_lock);
schedule_delayed_work(&buf->rb_recovery_worker, 0);
}
static void
-rpcrdma_create_mrs(struct rpcrdma_xprt *r_xprt)
+rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
{
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
@@ -1036,32 +1025,32 @@ rpcrdma_create_mrs(struct rpcrdma_xprt *r_xprt)
LIST_HEAD(all);
for (count = 0; count < 32; count++) {
- struct rpcrdma_mw *mw;
+ struct rpcrdma_mr *mr;
int rc;
- mw = kzalloc(sizeof(*mw), GFP_KERNEL);
- if (!mw)
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
break;
- rc = ia->ri_ops->ro_init_mr(ia, mw);
+ rc = ia->ri_ops->ro_init_mr(ia, mr);
if (rc) {
- kfree(mw);
+ kfree(mr);
break;
}
- mw->mw_xprt = r_xprt;
+ mr->mr_xprt = r_xprt;
- list_add(&mw->mw_list, &free);
- list_add(&mw->mw_all, &all);
+ list_add(&mr->mr_list, &free);
+ list_add(&mr->mr_all, &all);
}
- spin_lock(&buf->rb_mwlock);
- list_splice(&free, &buf->rb_mws);
+ spin_lock(&buf->rb_mrlock);
+ list_splice(&free, &buf->rb_mrs);
list_splice(&all, &buf->rb_all);
r_xprt->rx_stats.mrs_allocated += count;
- spin_unlock(&buf->rb_mwlock);
+ spin_unlock(&buf->rb_mrlock);
- dprintk("RPC: %s: created %u MRs\n", __func__, count);
+ trace_xprtrdma_createmrs(r_xprt, count);
}
static void
@@ -1072,7 +1061,7 @@ rpcrdma_mr_refresh_worker(struct work_struct *work)
struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
rx_buf);
- rpcrdma_create_mrs(r_xprt);
+ rpcrdma_mrs_create(r_xprt);
}
struct rpcrdma_req *
@@ -1093,10 +1082,17 @@ rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
return req;
}
-struct rpcrdma_rep *
+/**
+ * rpcrdma_create_rep - Allocate an rpcrdma_rep object
+ * @r_xprt: controlling transport
+ *
+ * Returns 0 on success or a negative errno on failure.
+ */
+int
rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
{
struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
+ struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
struct rpcrdma_rep *rep;
int rc;
@@ -1121,12 +1117,18 @@ rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
rep->rr_recv_wr.wr_cqe = &rep->rr_cqe;
rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
rep->rr_recv_wr.num_sge = 1;
- return rep;
+
+ spin_lock(&buf->rb_lock);
+ list_add(&rep->rr_list, &buf->rb_recv_bufs);
+ spin_unlock(&buf->rb_lock);
+ return 0;
out_free:
kfree(rep);
out:
- return ERR_PTR(rc);
+ dprintk("RPC: %s: reply buffer %d alloc failed\n",
+ __func__, rc);
+ return rc;
}
int
@@ -1137,10 +1139,10 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
buf->rb_max_requests = r_xprt->rx_data.max_requests;
buf->rb_bc_srv_max_requests = 0;
- spin_lock_init(&buf->rb_mwlock);
+ spin_lock_init(&buf->rb_mrlock);
spin_lock_init(&buf->rb_lock);
spin_lock_init(&buf->rb_recovery_lock);
- INIT_LIST_HEAD(&buf->rb_mws);
+ INIT_LIST_HEAD(&buf->rb_mrs);
INIT_LIST_HEAD(&buf->rb_all);
INIT_LIST_HEAD(&buf->rb_stale_mrs);
INIT_DELAYED_WORK(&buf->rb_refresh_worker,
@@ -1148,7 +1150,7 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
INIT_DELAYED_WORK(&buf->rb_recovery_worker,
rpcrdma_mr_recovery_worker);
- rpcrdma_create_mrs(r_xprt);
+ rpcrdma_mrs_create(r_xprt);
INIT_LIST_HEAD(&buf->rb_send_bufs);
INIT_LIST_HEAD(&buf->rb_allreqs);
@@ -1167,17 +1169,10 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
}
INIT_LIST_HEAD(&buf->rb_recv_bufs);
- for (i = 0; i < buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS; i++) {
- struct rpcrdma_rep *rep;
-
- rep = rpcrdma_create_rep(r_xprt);
- if (IS_ERR(rep)) {
- dprintk("RPC: %s: reply buffer %d alloc failed\n",
- __func__, i);
- rc = PTR_ERR(rep);
+ for (i = 0; i <= buf->rb_max_requests; i++) {
+ rc = rpcrdma_create_rep(r_xprt);
+ if (rc)
goto out;
- }
- list_add(&rep->rr_list, &buf->rb_recv_bufs);
}
rc = rpcrdma_sendctxs_create(r_xprt);
@@ -1229,26 +1224,26 @@ rpcrdma_destroy_req(struct rpcrdma_req *req)
}
static void
-rpcrdma_destroy_mrs(struct rpcrdma_buffer *buf)
+rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf)
{
struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
rx_buf);
struct rpcrdma_ia *ia = rdmab_to_ia(buf);
- struct rpcrdma_mw *mw;
+ struct rpcrdma_mr *mr;
unsigned int count;
count = 0;
- spin_lock(&buf->rb_mwlock);
+ spin_lock(&buf->rb_mrlock);
while (!list_empty(&buf->rb_all)) {
- mw = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
- list_del(&mw->mw_all);
+ mr = list_entry(buf->rb_all.next, struct rpcrdma_mr, mr_all);
+ list_del(&mr->mr_all);
- spin_unlock(&buf->rb_mwlock);
- ia->ri_ops->ro_release_mr(mw);
+ spin_unlock(&buf->rb_mrlock);
+ ia->ri_ops->ro_release_mr(mr);
count++;
- spin_lock(&buf->rb_mwlock);
+ spin_lock(&buf->rb_mrlock);
}
- spin_unlock(&buf->rb_mwlock);
+ spin_unlock(&buf->rb_mrlock);
r_xprt->rx_stats.mrs_allocated = 0;
dprintk("RPC: %s: released %u MRs\n", __func__, count);
@@ -1285,27 +1280,33 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
spin_unlock(&buf->rb_reqslock);
buf->rb_recv_count = 0;
- rpcrdma_destroy_mrs(buf);
+ rpcrdma_mrs_destroy(buf);
}
-struct rpcrdma_mw *
-rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt)
+/**
+ * rpcrdma_mr_get - Allocate an rpcrdma_mr object
+ * @r_xprt: controlling transport
+ *
+ * Returns an initialized rpcrdma_mr or NULL if no free
+ * rpcrdma_mr objects are available.
+ */
+struct rpcrdma_mr *
+rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt)
{
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
- struct rpcrdma_mw *mw = NULL;
+ struct rpcrdma_mr *mr = NULL;
- spin_lock(&buf->rb_mwlock);
- if (!list_empty(&buf->rb_mws))
- mw = rpcrdma_pop_mw(&buf->rb_mws);
- spin_unlock(&buf->rb_mwlock);
+ spin_lock(&buf->rb_mrlock);
+ if (!list_empty(&buf->rb_mrs))
+ mr = rpcrdma_mr_pop(&buf->rb_mrs);
+ spin_unlock(&buf->rb_mrlock);
- if (!mw)
- goto out_nomws;
- mw->mw_flags = 0;
- return mw;
+ if (!mr)
+ goto out_nomrs;
+ return mr;
-out_nomws:
- dprintk("RPC: %s: no MWs available\n", __func__);
+out_nomrs:
+ trace_xprtrdma_nomrs(r_xprt);
if (r_xprt->rx_ep.rep_connected != -ENODEV)
schedule_delayed_work(&buf->rb_refresh_worker, 0);
@@ -1315,14 +1316,39 @@ out_nomws:
return NULL;
}
+static void
+__rpcrdma_mr_put(struct rpcrdma_buffer *buf, struct rpcrdma_mr *mr)
+{
+ spin_lock(&buf->rb_mrlock);
+ rpcrdma_mr_push(mr, &buf->rb_mrs);
+ spin_unlock(&buf->rb_mrlock);
+}
+
+/**
+ * rpcrdma_mr_put - Release an rpcrdma_mr object
+ * @mr: object to release
+ *
+ */
void
-rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw)
+rpcrdma_mr_put(struct rpcrdma_mr *mr)
{
- struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
+ __rpcrdma_mr_put(&mr->mr_xprt->rx_buf, mr);
+}
+
+/**
+ * rpcrdma_mr_unmap_and_put - DMA unmap an MR and release it
+ * @mr: object to release
+ *
+ */
+void
+rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr)
+{
+ struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
- spin_lock(&buf->rb_mwlock);
- rpcrdma_push_mw(mw, &buf->rb_mws);
- spin_unlock(&buf->rb_mwlock);
+ trace_xprtrdma_dma_unmap(mr);
+ ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
+ mr->mr_sg, mr->mr_nents, mr->mr_dir);
+ __rpcrdma_mr_put(&r_xprt->rx_buf, mr);
}
static struct rpcrdma_rep *
@@ -1359,11 +1385,11 @@ rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
req = rpcrdma_buffer_get_req_locked(buffers);
req->rl_reply = rpcrdma_buffer_get_rep(buffers);
spin_unlock(&buffers->rb_lock);
+
return req;
out_reqbuf:
spin_unlock(&buffers->rb_lock);
- pr_warn("RPC: %s: out of request buffers\n", __func__);
return NULL;
}
@@ -1519,9 +1545,6 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
req->rl_reply = NULL;
}
- dprintk("RPC: %s: posting %d s/g entries\n",
- __func__, send_wr->num_sge);
-
if (!ep->rep_send_count ||
test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) {
send_wr->send_flags |= IB_SEND_SIGNALED;
@@ -1530,14 +1553,12 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
send_wr->send_flags &= ~IB_SEND_SIGNALED;
--ep->rep_send_count;
}
+
rc = ib_post_send(ia->ri_id->qp, send_wr, &send_wr_fail);
+ trace_xprtrdma_post_send(req, rc);
if (rc)
- goto out_postsend_err;
+ return -ENOTCONN;
return 0;
-
-out_postsend_err:
- pr_err("rpcrdma: RDMA Send ib_post_send returned %i\n", rc);
- return -ENOTCONN;
}
int
@@ -1550,23 +1571,20 @@ rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
if (!rpcrdma_dma_map_regbuf(ia, rep->rr_rdmabuf))
goto out_map;
rc = ib_post_recv(ia->ri_id->qp, &rep->rr_recv_wr, &recv_wr_fail);
+ trace_xprtrdma_post_recv(rep, rc);
if (rc)
- goto out_postrecv;
+ return -ENOTCONN;
return 0;
out_map:
pr_err("rpcrdma: failed to DMA map the Receive buffer\n");
return -EIO;
-
-out_postrecv:
- pr_err("rpcrdma: ib_post_recv returned %i\n", rc);
- return -ENOTCONN;
}
/**
* rpcrdma_ep_post_extra_recv - Post buffers for incoming backchannel requests
* @r_xprt: transport associated with these backchannel resources
- * @min_reqs: minimum number of incoming requests expected
+ * @count: minimum number of incoming requests expected
*
* Returns zero if all requested buffers were posted, or a negative errno.
*/
@@ -1594,7 +1612,7 @@ rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *r_xprt, unsigned int count)
out_reqbuf:
spin_unlock(&buffers->rb_lock);
- pr_warn("%s: no extra receive buffers\n", __func__);
+ trace_xprtrdma_noreps(r_xprt);
return -ENOMEM;
out_rc:
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 1342f743f1c4..69883a960a3f 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -73,11 +73,10 @@ struct rpcrdma_ia {
struct completion ri_remove_done;
int ri_async_rc;
unsigned int ri_max_segs;
- unsigned int ri_max_frmr_depth;
+ unsigned int ri_max_frwr_depth;
unsigned int ri_max_inline_write;
unsigned int ri_max_inline_read;
unsigned int ri_max_send_sges;
- bool ri_reminv_expected;
bool ri_implicit_roundup;
enum ib_mr_type ri_mrtype;
unsigned long ri_flags;
@@ -101,7 +100,6 @@ struct rpcrdma_ep {
wait_queue_head_t rep_connect_wait;
struct rpcrdma_connect_private rep_cm_private;
struct rdma_conn_param rep_remote_cma;
- struct sockaddr_storage rep_remote_addr;
struct delayed_work rep_connect_worker;
};
@@ -232,29 +230,29 @@ enum {
};
/*
- * struct rpcrdma_mw - external memory region metadata
+ * struct rpcrdma_mr - external memory region metadata
*
* An external memory region is any buffer or page that is registered
* on the fly (ie, not pre-registered).
*
- * Each rpcrdma_buffer has a list of free MWs anchored in rb_mws. During
+ * Each rpcrdma_buffer has a list of free MWs anchored in rb_mrs. During
* call_allocate, rpcrdma_buffer_get() assigns one to each segment in
* an rpcrdma_req. Then rpcrdma_register_external() grabs these to keep
* track of registration metadata while each RPC is pending.
* rpcrdma_deregister_external() uses this metadata to unmap and
* release these resources when an RPC is complete.
*/
-enum rpcrdma_frmr_state {
- FRMR_IS_INVALID, /* ready to be used */
- FRMR_IS_VALID, /* in use */
- FRMR_FLUSHED_FR, /* flushed FASTREG WR */
- FRMR_FLUSHED_LI, /* flushed LOCALINV WR */
+enum rpcrdma_frwr_state {
+ FRWR_IS_INVALID, /* ready to be used */
+ FRWR_IS_VALID, /* in use */
+ FRWR_FLUSHED_FR, /* flushed FASTREG WR */
+ FRWR_FLUSHED_LI, /* flushed LOCALINV WR */
};
-struct rpcrdma_frmr {
+struct rpcrdma_frwr {
struct ib_mr *fr_mr;
struct ib_cqe fr_cqe;
- enum rpcrdma_frmr_state fr_state;
+ enum rpcrdma_frwr_state fr_state;
struct completion fr_linv_done;
union {
struct ib_reg_wr fr_regwr;
@@ -267,26 +265,20 @@ struct rpcrdma_fmr {
u64 *fm_physaddrs;
};
-struct rpcrdma_mw {
- struct list_head mw_list;
- struct scatterlist *mw_sg;
- int mw_nents;
- enum dma_data_direction mw_dir;
- unsigned long mw_flags;
+struct rpcrdma_mr {
+ struct list_head mr_list;
+ struct scatterlist *mr_sg;
+ int mr_nents;
+ enum dma_data_direction mr_dir;
union {
struct rpcrdma_fmr fmr;
- struct rpcrdma_frmr frmr;
+ struct rpcrdma_frwr frwr;
};
- struct rpcrdma_xprt *mw_xprt;
- u32 mw_handle;
- u32 mw_length;
- u64 mw_offset;
- struct list_head mw_all;
-};
-
-/* mw_flags */
-enum {
- RPCRDMA_MW_F_RI = 1,
+ struct rpcrdma_xprt *mr_xprt;
+ u32 mr_handle;
+ u32 mr_length;
+ u64 mr_offset;
+ struct list_head mr_all;
};
/*
@@ -362,8 +354,7 @@ struct rpcrdma_req {
/* rl_flags */
enum {
- RPCRDMA_REQ_F_BACKCHANNEL = 0,
- RPCRDMA_REQ_F_PENDING,
+ RPCRDMA_REQ_F_PENDING = 0,
RPCRDMA_REQ_F_TX_RESOURCES,
};
@@ -374,25 +365,25 @@ rpcrdma_set_xprtdata(struct rpc_rqst *rqst, struct rpcrdma_req *req)
}
static inline struct rpcrdma_req *
-rpcr_to_rdmar(struct rpc_rqst *rqst)
+rpcr_to_rdmar(const struct rpc_rqst *rqst)
{
return rqst->rq_xprtdata;
}
static inline void
-rpcrdma_push_mw(struct rpcrdma_mw *mw, struct list_head *list)
+rpcrdma_mr_push(struct rpcrdma_mr *mr, struct list_head *list)
{
- list_add_tail(&mw->mw_list, list);
+ list_add_tail(&mr->mr_list, list);
}
-static inline struct rpcrdma_mw *
-rpcrdma_pop_mw(struct list_head *list)
+static inline struct rpcrdma_mr *
+rpcrdma_mr_pop(struct list_head *list)
{
- struct rpcrdma_mw *mw;
+ struct rpcrdma_mr *mr;
- mw = list_first_entry(list, struct rpcrdma_mw, mw_list);
- list_del(&mw->mw_list);
- return mw;
+ mr = list_first_entry(list, struct rpcrdma_mr, mr_list);
+ list_del(&mr->mr_list);
+ return mr;
}
/*
@@ -402,8 +393,8 @@ rpcrdma_pop_mw(struct list_head *list)
* One of these is associated with a transport instance
*/
struct rpcrdma_buffer {
- spinlock_t rb_mwlock; /* protect rb_mws list */
- struct list_head rb_mws;
+ spinlock_t rb_mrlock; /* protect rb_mrs list */
+ struct list_head rb_mrs;
struct list_head rb_all;
unsigned long rb_sc_head;
@@ -438,13 +429,11 @@ struct rpcrdma_buffer {
* This data should be set with mount options
*/
struct rpcrdma_create_data_internal {
- struct sockaddr_storage addr; /* RDMA server address */
unsigned int max_requests; /* max requests (slots) in flight */
unsigned int rsize; /* mount rsize - max read hdr+data */
unsigned int wsize; /* mount wsize - max write hdr+data */
unsigned int inline_rsize; /* max non-rdma read data payload */
unsigned int inline_wsize; /* max non-rdma write data payload */
- unsigned int padding; /* non-rdma write header padding */
};
/*
@@ -484,17 +473,19 @@ struct rpcrdma_memreg_ops {
struct rpcrdma_mr_seg *
(*ro_map)(struct rpcrdma_xprt *,
struct rpcrdma_mr_seg *, int, bool,
- struct rpcrdma_mw **);
+ struct rpcrdma_mr **);
+ void (*ro_reminv)(struct rpcrdma_rep *rep,
+ struct list_head *mrs);
void (*ro_unmap_sync)(struct rpcrdma_xprt *,
struct list_head *);
- void (*ro_recover_mr)(struct rpcrdma_mw *);
+ void (*ro_recover_mr)(struct rpcrdma_mr *mr);
int (*ro_open)(struct rpcrdma_ia *,
struct rpcrdma_ep *,
struct rpcrdma_create_data_internal *);
size_t (*ro_maxpages)(struct rpcrdma_xprt *);
int (*ro_init_mr)(struct rpcrdma_ia *,
- struct rpcrdma_mw *);
- void (*ro_release_mr)(struct rpcrdma_mw *);
+ struct rpcrdma_mr *);
+ void (*ro_release_mr)(struct rpcrdma_mr *mr);
const char *ro_displayname;
const int ro_send_w_inv_ok;
};
@@ -525,6 +516,18 @@ struct rpcrdma_xprt {
#define rpcx_to_rdmax(x) container_of(x, struct rpcrdma_xprt, rx_xprt)
#define rpcx_to_rdmad(x) (rpcx_to_rdmax(x)->rx_data)
+static inline const char *
+rpcrdma_addrstr(const struct rpcrdma_xprt *r_xprt)
+{
+ return r_xprt->rx_xprt.address_strings[RPC_DISPLAY_ADDR];
+}
+
+static inline const char *
+rpcrdma_portstr(const struct rpcrdma_xprt *r_xprt)
+{
+ return r_xprt->rx_xprt.address_strings[RPC_DISPLAY_PORT];
+}
+
/* Setting this to 0 ensures interoperability with early servers.
* Setting this to 1 enhances certain unaligned read/write performance.
* Default is 0, see sysctl entry and rpc_rdma.c rpcrdma_convert_iovs() */
@@ -538,7 +541,7 @@ extern unsigned int xprt_rdma_memreg_strategy;
/*
* Interface Adapter calls - xprtrdma/verbs.c
*/
-int rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr);
+int rpcrdma_ia_open(struct rpcrdma_xprt *xprt);
void rpcrdma_ia_remove(struct rpcrdma_ia *ia);
void rpcrdma_ia_close(struct rpcrdma_ia *);
bool frwr_is_supported(struct rpcrdma_ia *);
@@ -564,22 +567,23 @@ int rpcrdma_ep_post_recv(struct rpcrdma_ia *, struct rpcrdma_rep *);
* Buffer calls - xprtrdma/verbs.c
*/
struct rpcrdma_req *rpcrdma_create_req(struct rpcrdma_xprt *);
-struct rpcrdma_rep *rpcrdma_create_rep(struct rpcrdma_xprt *);
void rpcrdma_destroy_req(struct rpcrdma_req *);
+int rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt);
int rpcrdma_buffer_create(struct rpcrdma_xprt *);
void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);
struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_buffer *buf);
void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc);
-struct rpcrdma_mw *rpcrdma_get_mw(struct rpcrdma_xprt *);
-void rpcrdma_put_mw(struct rpcrdma_xprt *, struct rpcrdma_mw *);
+struct rpcrdma_mr *rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt);
+void rpcrdma_mr_put(struct rpcrdma_mr *mr);
+void rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr);
+void rpcrdma_mr_defer_recovery(struct rpcrdma_mr *mr);
+
struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
void rpcrdma_buffer_put(struct rpcrdma_req *);
void rpcrdma_recv_buffer_get(struct rpcrdma_req *);
void rpcrdma_recv_buffer_put(struct rpcrdma_rep *);
-void rpcrdma_defer_mr_recovery(struct rpcrdma_mw *);
-
struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(size_t, enum dma_data_direction,
gfp_t);
bool __rpcrdma_dma_map_regbuf(struct rpcrdma_ia *, struct rpcrdma_regbuf *);
@@ -663,7 +667,7 @@ int xprt_rdma_bc_up(struct svc_serv *, struct net *);
size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *);
int rpcrdma_bc_post_recv(struct rpcrdma_xprt *, unsigned int);
void rpcrdma_bc_receive_call(struct rpcrdma_xprt *, struct rpcrdma_rep *);
-int rpcrdma_bc_marshal_reply(struct rpc_rqst *);
+int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst);
void xprt_rdma_bc_free_rqst(struct rpc_rqst *);
void xprt_rdma_bc_destroy(struct rpc_xprt *, unsigned int);
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
@@ -671,3 +675,5 @@ void xprt_rdma_bc_destroy(struct rpc_xprt *, unsigned int);
extern struct xprt_class xprt_rdma_bc;
#endif /* _LINUX_SUNRPC_XPRT_RDMA_H */
+
+#include <trace/events/rpcrdma.h>
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 6d0cc3b8f932..18803021f242 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -52,6 +52,8 @@
#include "sunrpc.h"
+#define RPC_TCP_READ_CHUNK_SZ (3*512*1024)
+
static void xs_close(struct rpc_xprt *xprt);
static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt,
struct socket *sock);
@@ -1003,6 +1005,7 @@ static void xs_local_data_receive(struct sock_xprt *transport)
struct sock *sk;
int err;
+restart:
mutex_lock(&transport->recv_mutex);
sk = transport->inet;
if (sk == NULL)
@@ -1016,6 +1019,11 @@ static void xs_local_data_receive(struct sock_xprt *transport)
}
if (!test_and_clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
break;
+ if (need_resched()) {
+ mutex_unlock(&transport->recv_mutex);
+ cond_resched();
+ goto restart;
+ }
}
out:
mutex_unlock(&transport->recv_mutex);
@@ -1094,6 +1102,7 @@ static void xs_udp_data_receive(struct sock_xprt *transport)
struct sock *sk;
int err;
+restart:
mutex_lock(&transport->recv_mutex);
sk = transport->inet;
if (sk == NULL)
@@ -1107,6 +1116,11 @@ static void xs_udp_data_receive(struct sock_xprt *transport)
}
if (!test_and_clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
break;
+ if (need_resched()) {
+ mutex_unlock(&transport->recv_mutex);
+ cond_resched();
+ goto restart;
+ }
}
out:
mutex_unlock(&transport->recv_mutex);
@@ -1479,6 +1493,7 @@ static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, uns
.offset = offset,
.count = len,
};
+ size_t ret;
dprintk("RPC: xs_tcp_data_recv started\n");
do {
@@ -1507,9 +1522,14 @@ static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, uns
/* Skip over any trailing bytes on short reads */
xs_tcp_read_discard(transport, &desc);
} while (desc.count);
+ ret = len - desc.count;
+ if (ret < rd_desc->count)
+ rd_desc->count -= ret;
+ else
+ rd_desc->count = 0;
trace_xs_tcp_data_recv(transport);
dprintk("RPC: xs_tcp_data_recv done\n");
- return len - desc.count;
+ return ret;
}
static void xs_tcp_data_receive(struct sock_xprt *transport)
@@ -1517,30 +1537,34 @@ static void xs_tcp_data_receive(struct sock_xprt *transport)
struct rpc_xprt *xprt = &transport->xprt;
struct sock *sk;
read_descriptor_t rd_desc = {
- .count = 2*1024*1024,
.arg.data = xprt,
};
unsigned long total = 0;
- int loop;
int read = 0;
+restart:
mutex_lock(&transport->recv_mutex);
sk = transport->inet;
if (sk == NULL)
goto out;
/* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
- for (loop = 0; loop < 64; loop++) {
+ for (;;) {
+ rd_desc.count = RPC_TCP_READ_CHUNK_SZ;
lock_sock(sk);
read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
- if (read <= 0) {
+ if (rd_desc.count != 0 || read < 0) {
clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
release_sock(sk);
break;
}
release_sock(sk);
total += read;
- rd_desc.count = 65536;
+ if (need_resched()) {
+ mutex_unlock(&transport->recv_mutex);
+ cond_resched();
+ goto restart;
+ }
}
if (test_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
queue_work(xprtiod_workqueue, &transport->recv_worker);
diff --git a/net/tipc/server.c b/net/tipc/server.c
index c0d331f13eee..df0c563c90cd 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -259,8 +259,8 @@ static int tipc_receive_from_sock(struct tipc_conn *con)
iov.iov_base = buf;
iov.iov_len = s->max_rcvbuf_size;
msg.msg_name = &addr;
- ret = kernel_recvmsg(con->sock, &msg, &iov, 1, iov.iov_len,
- MSG_DONTWAIT);
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, iov.iov_len);
+ ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
if (ret <= 0) {
kmem_cache_free(s->rcvbuf_cache, buf);
goto out_close;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 473a096b6fba..163f3a547501 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -711,12 +711,12 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
* imply that the operation will succeed, merely that it should be performed
* and will not block.
*/
-static unsigned int tipc_poll(struct file *file, struct socket *sock,
+static __poll_t tipc_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
- u32 revents = 0;
+ __poll_t revents = 0;
sock_poll_wait(file, sk_sleep(sk), wait);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 90a3784e3084..0214acbd6bff 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -367,7 +367,7 @@ static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int
/* relaying can only happen while the wq still exists */
u_sleep = sk_sleep(&u->sk);
if (u_sleep)
- wake_up_interruptible_poll(u_sleep, key);
+ wake_up_interruptible_poll(u_sleep, key_to_poll(key));
return 0;
}
@@ -638,8 +638,8 @@ static int unix_stream_connect(struct socket *, struct sockaddr *,
static int unix_socketpair(struct socket *, struct socket *);
static int unix_accept(struct socket *, struct socket *, int, bool);
static int unix_getname(struct socket *, struct sockaddr *, int *, int);
-static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
-static unsigned int unix_dgram_poll(struct file *, struct socket *,
+static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
+static __poll_t unix_dgram_poll(struct file *, struct socket *,
poll_table *);
static int unix_ioctl(struct socket *, unsigned int, unsigned long);
static int unix_shutdown(struct socket *, int);
@@ -2640,10 +2640,10 @@ static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
return err;
}
-static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
+static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
- unsigned int mask;
+ __poll_t mask;
sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
@@ -2675,11 +2675,12 @@ static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table
return mask;
}
-static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
+static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk, *other;
- unsigned int mask, writable;
+ unsigned int writable;
+ __poll_t mask;
sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index c9473d698525..9d95e773f4c8 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -850,11 +850,11 @@ static int vsock_shutdown(struct socket *sock, int mode)
return err;
}
-static unsigned int vsock_poll(struct file *file, struct socket *sock,
+static __poll_t vsock_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk;
- unsigned int mask;
+ __poll_t mask;
struct vsock_sock *vsk;
sk = sock->sk;
diff --git a/samples/livepatch/livepatch-callbacks-demo.c b/samples/livepatch/livepatch-callbacks-demo.c
index 3d115bd68442..72f9e6d1387b 100644
--- a/samples/livepatch/livepatch-callbacks-demo.c
+++ b/samples/livepatch/livepatch-callbacks-demo.c
@@ -197,21 +197,6 @@ static int livepatch_callbacks_demo_init(void)
{
int ret;
- if (!klp_have_reliable_stack() && !patch.immediate) {
- /*
- * WARNING: Be very careful when using 'patch.immediate' in
- * your patches. It's ok to use it for simple patches like
- * this, but for more complex patches which change function
- * semantics, locking semantics, or data structures, it may not
- * be safe. Use of this option will also prevent removal of
- * the patch.
- *
- * See Documentation/livepatch/livepatch.txt for more details.
- */
- patch.immediate = true;
- pr_notice("The consistency model isn't supported for your architecture. Bypassing safety mechanisms and applying the patch immediately.\n");
- }
-
ret = klp_register_patch(&patch);
if (ret)
return ret;
diff --git a/samples/livepatch/livepatch-sample.c b/samples/livepatch/livepatch-sample.c
index 84795223f15f..2d554dd930e2 100644
--- a/samples/livepatch/livepatch-sample.c
+++ b/samples/livepatch/livepatch-sample.c
@@ -71,21 +71,6 @@ static int livepatch_init(void)
{
int ret;
- if (!klp_have_reliable_stack() && !patch.immediate) {
- /*
- * WARNING: Be very careful when using 'patch.immediate' in
- * your patches. It's ok to use it for simple patches like
- * this, but for more complex patches which change function
- * semantics, locking semantics, or data structures, it may not
- * be safe. Use of this option will also prevent removal of
- * the patch.
- *
- * See Documentation/livepatch/livepatch.txt for more details.
- */
- patch.immediate = true;
- pr_notice("The consistency model isn't supported for your architecture. Bypassing safety mechanisms and applying the patch immediately.\n");
- }
-
ret = klp_register_patch(&patch);
if (ret)
return ret;
diff --git a/samples/livepatch/livepatch-shadow-fix1.c b/samples/livepatch/livepatch-shadow-fix1.c
index fbe0a1f3d99b..830c55514f9f 100644
--- a/samples/livepatch/livepatch-shadow-fix1.c
+++ b/samples/livepatch/livepatch-shadow-fix1.c
@@ -133,21 +133,6 @@ static int livepatch_shadow_fix1_init(void)
{
int ret;
- if (!klp_have_reliable_stack() && !patch.immediate) {
- /*
- * WARNING: Be very careful when using 'patch.immediate' in
- * your patches. It's ok to use it for simple patches like
- * this, but for more complex patches which change function
- * semantics, locking semantics, or data structures, it may not
- * be safe. Use of this option will also prevent removal of
- * the patch.
- *
- * See Documentation/livepatch/livepatch.txt for more details.
- */
- patch.immediate = true;
- pr_notice("The consistency model isn't supported for your architecture. Bypassing safety mechanisms and applying the patch immediately.\n");
- }
-
ret = klp_register_patch(&patch);
if (ret)
return ret;
diff --git a/samples/livepatch/livepatch-shadow-fix2.c b/samples/livepatch/livepatch-shadow-fix2.c
index 53c1794bdc5f..ff9948f0ec00 100644
--- a/samples/livepatch/livepatch-shadow-fix2.c
+++ b/samples/livepatch/livepatch-shadow-fix2.c
@@ -128,21 +128,6 @@ static int livepatch_shadow_fix2_init(void)
{
int ret;
- if (!klp_have_reliable_stack() && !patch.immediate) {
- /*
- * WARNING: Be very careful when using 'patch.immediate' in
- * your patches. It's ok to use it for simple patches like
- * this, but for more complex patches which change function
- * semantics, locking semantics, or data structures, it may not
- * be safe. Use of this option will also prevent removal of
- * the patch.
- *
- * See Documentation/livepatch/livepatch.txt for more details.
- */
- patch.immediate = true;
- pr_notice("The consistency model isn't supported for your architecture. Bypassing safety mechanisms and applying the patch immediately.\n");
- }
-
ret = klp_register_patch(&patch);
if (ret)
return ret;
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 31031f10fe56..ba03f17ff662 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -5586,6 +5586,12 @@ sub process {
}
}
+# check for smp_read_barrier_depends and read_barrier_depends
+ if (!$file && $line =~ /\b(smp_|)read_barrier_depends\s*\(/) {
+ WARN("READ_BARRIER_DEPENDS",
+ "$1read_barrier_depends should only be used in READ_ONCE or DEC Alpha code\n" . $herecurr);
+ }
+
# check of hardware specific defines
if ($line =~ m@^.\s*\#\s*if.*\b(__i386__|__powerpc64__|__sun__|__s390x__)\b@ && $realfile !~ m@include/asm-@) {
CHK("ARCH_DEFINES",
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index f51cf977c65b..6510536c06df 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -2165,6 +2165,14 @@ static void add_intree_flag(struct buffer *b, int is_intree)
buf_printf(b, "\nMODULE_INFO(intree, \"Y\");\n");
}
+/* Cannot check for assembler */
+static void add_retpoline(struct buffer *b)
+{
+ buf_printf(b, "\n#ifdef RETPOLINE\n");
+ buf_printf(b, "MODULE_INFO(retpoline, \"Y\");\n");
+ buf_printf(b, "#endif\n");
+}
+
static void add_staging_flag(struct buffer *b, const char *name)
{
static const char *staging_dir = "drivers/staging";
@@ -2506,6 +2514,7 @@ int main(int argc, char **argv)
err |= check_modname_len(mod);
add_header(&buf, mod);
add_intree_flag(&buf, !external_module);
+ add_retpoline(&buf);
add_staging_flag(&buf, mod->name);
err |= add_versions(&buf, mod);
add_depends(&buf, mod, modules);
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index d4fa04d91439..4d202b73a0e1 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -571,10 +571,10 @@ static int ns_revision_open(struct inode *inode, struct file *file)
return 0;
}
-static unsigned int ns_revision_poll(struct file *file, poll_table *pt)
+static __poll_t ns_revision_poll(struct file *file, poll_table *pt)
{
struct aa_revision *rev = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
if (rev) {
mutex_lock_nested(&rev->ns->lock, rev->ns->level);
diff --git a/security/integrity/evm/evm.h b/security/integrity/evm/evm.h
index 241aca315b0c..04825393facb 100644
--- a/security/integrity/evm/evm.h
+++ b/security/integrity/evm/evm.h
@@ -23,9 +23,12 @@
#define EVM_INIT_HMAC 0x0001
#define EVM_INIT_X509 0x0002
-#define EVM_SETUP 0x80000000 /* userland has signaled key load */
+#define EVM_ALLOW_METADATA_WRITES 0x0004
+#define EVM_SETUP_COMPLETE 0x80000000 /* userland has signaled key load */
-#define EVM_INIT_MASK (EVM_INIT_HMAC | EVM_INIT_X509 | EVM_SETUP)
+#define EVM_KEY_MASK (EVM_INIT_HMAC | EVM_INIT_X509)
+#define EVM_INIT_MASK (EVM_INIT_HMAC | EVM_INIT_X509 | EVM_SETUP_COMPLETE | \
+ EVM_ALLOW_METADATA_WRITES)
extern int evm_initialized;
extern char *evm_hmac;
@@ -51,7 +54,7 @@ int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name,
size_t req_xattr_value_len, char *digest);
int evm_calc_hash(struct dentry *dentry, const char *req_xattr_name,
const char *req_xattr_value,
- size_t req_xattr_value_len, char *digest);
+ size_t req_xattr_value_len, char type, char *digest);
int evm_init_hmac(struct inode *inode, const struct xattr *xattr,
char *hmac_val);
int evm_init_secfs(void);
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index bcd64baf8788..691f3e09154c 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -138,7 +138,7 @@ out:
* protection.)
*/
static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
- char *digest)
+ char type, char *digest)
{
struct h_misc {
unsigned long ino;
@@ -149,8 +149,13 @@ static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
} hmac_misc;
memset(&hmac_misc, 0, sizeof(hmac_misc));
- hmac_misc.ino = inode->i_ino;
- hmac_misc.generation = inode->i_generation;
+ /* Don't include the inode or generation number in portable
+ * signatures
+ */
+ if (type != EVM_XATTR_PORTABLE_DIGSIG) {
+ hmac_misc.ino = inode->i_ino;
+ hmac_misc.generation = inode->i_generation;
+ }
/* The hmac uid and gid must be encoded in the initial user
* namespace (not the filesystems user namespace) as encoding
* them in the filesystems user namespace allows an attack
@@ -163,7 +168,8 @@ static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
hmac_misc.gid = from_kgid(&init_user_ns, inode->i_gid);
hmac_misc.mode = inode->i_mode;
crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof(hmac_misc));
- if (evm_hmac_attrs & EVM_ATTR_FSUUID)
+ if ((evm_hmac_attrs & EVM_ATTR_FSUUID) &&
+ type != EVM_XATTR_PORTABLE_DIGSIG)
crypto_shash_update(desc, &inode->i_sb->s_uuid.b[0],
sizeof(inode->i_sb->s_uuid));
crypto_shash_final(desc, digest);
@@ -189,6 +195,7 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
char *xattr_value = NULL;
int error;
int size;
+ bool ima_present = false;
if (!(inode->i_opflags & IOP_XATTR))
return -EOPNOTSUPP;
@@ -199,11 +206,18 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
error = -ENODATA;
for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) {
+ bool is_ima = false;
+
+ if (strcmp(*xattrname, XATTR_NAME_IMA) == 0)
+ is_ima = true;
+
if ((req_xattr_name && req_xattr_value)
&& !strcmp(*xattrname, req_xattr_name)) {
error = 0;
crypto_shash_update(desc, (const u8 *)req_xattr_value,
req_xattr_value_len);
+ if (is_ima)
+ ima_present = true;
continue;
}
size = vfs_getxattr_alloc(dentry, *xattrname,
@@ -218,9 +232,14 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
error = 0;
xattr_size = size;
crypto_shash_update(desc, (const u8 *)xattr_value, xattr_size);
+ if (is_ima)
+ ima_present = true;
}
- hmac_add_misc(desc, inode, digest);
+ hmac_add_misc(desc, inode, type, digest);
+ /* Portable EVM signatures must include an IMA hash */
+ if (type == EVM_XATTR_PORTABLE_DIGSIG && !ima_present)
+ return -EPERM;
out:
kfree(xattr_value);
kfree(desc);
@@ -232,17 +251,45 @@ int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name,
char *digest)
{
return evm_calc_hmac_or_hash(dentry, req_xattr_name, req_xattr_value,
- req_xattr_value_len, EVM_XATTR_HMAC, digest);
+ req_xattr_value_len, EVM_XATTR_HMAC, digest);
}
int evm_calc_hash(struct dentry *dentry, const char *req_xattr_name,
const char *req_xattr_value, size_t req_xattr_value_len,
- char *digest)
+ char type, char *digest)
{
return evm_calc_hmac_or_hash(dentry, req_xattr_name, req_xattr_value,
- req_xattr_value_len, IMA_XATTR_DIGEST, digest);
+ req_xattr_value_len, type, digest);
+}
+
+static int evm_is_immutable(struct dentry *dentry, struct inode *inode)
+{
+ const struct evm_ima_xattr_data *xattr_data = NULL;
+ struct integrity_iint_cache *iint;
+ int rc = 0;
+
+ iint = integrity_iint_find(inode);
+ if (iint && (iint->flags & EVM_IMMUTABLE_DIGSIG))
+ return 1;
+
+ /* Do this the hard way */
+ rc = vfs_getxattr_alloc(dentry, XATTR_NAME_EVM, (char **)&xattr_data, 0,
+ GFP_NOFS);
+ if (rc <= 0) {
+ if (rc == -ENODATA)
+ return 0;
+ return rc;
+ }
+ if (xattr_data->type == EVM_XATTR_PORTABLE_DIGSIG)
+ rc = 1;
+ else
+ rc = 0;
+
+ kfree(xattr_data);
+ return rc;
}
+
/*
* Calculate the hmac and update security.evm xattr
*
@@ -255,6 +302,16 @@ int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name,
struct evm_ima_xattr_data xattr_data;
int rc = 0;
+ /*
+ * Don't permit any transformation of the EVM xattr if the signature
+ * is of an immutable type
+ */
+ rc = evm_is_immutable(dentry, inode);
+ if (rc < 0)
+ return rc;
+ if (rc)
+ return -EPERM;
+
rc = evm_calc_hmac(dentry, xattr_name, xattr_value,
xattr_value_len, xattr_data.digest);
if (rc == 0) {
@@ -280,7 +337,7 @@ int evm_init_hmac(struct inode *inode, const struct xattr *lsm_xattr,
}
crypto_shash_update(desc, lsm_xattr->value, lsm_xattr->value_len);
- hmac_add_misc(desc, inode, hmac_val);
+ hmac_add_misc(desc, inode, EVM_XATTR_HMAC, hmac_val);
kfree(desc);
return 0;
}
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index 9826c02e2db8..a8d502827270 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -31,7 +31,7 @@
int evm_initialized;
static char *integrity_status_msg[] = {
- "pass", "fail", "no_label", "no_xattrs", "unknown"
+ "pass", "pass_immutable", "fail", "no_label", "no_xattrs", "unknown"
};
char *evm_hmac = "hmac(sha1)";
char *evm_hash = "sha1";
@@ -76,6 +76,11 @@ static void __init evm_init_config(void)
pr_info("HMAC attrs: 0x%x\n", evm_hmac_attrs);
}
+static bool evm_key_loaded(void)
+{
+ return (bool)(evm_initialized & EVM_KEY_MASK);
+}
+
static int evm_find_protected_xattrs(struct dentry *dentry)
{
struct inode *inode = d_backing_inode(dentry);
@@ -123,7 +128,8 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
enum integrity_status evm_status = INTEGRITY_PASS;
int rc, xattr_len;
- if (iint && iint->evm_status == INTEGRITY_PASS)
+ if (iint && (iint->evm_status == INTEGRITY_PASS ||
+ iint->evm_status == INTEGRITY_PASS_IMMUTABLE))
return iint->evm_status;
/* if status is not PASS, try to check again - against -ENOMEM */
@@ -164,22 +170,26 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
rc = -EINVAL;
break;
case EVM_IMA_XATTR_DIGSIG:
+ case EVM_XATTR_PORTABLE_DIGSIG:
rc = evm_calc_hash(dentry, xattr_name, xattr_value,
- xattr_value_len, calc.digest);
+ xattr_value_len, xattr_data->type,
+ calc.digest);
if (rc)
break;
rc = integrity_digsig_verify(INTEGRITY_KEYRING_EVM,
(const char *)xattr_data, xattr_len,
calc.digest, sizeof(calc.digest));
if (!rc) {
- /* Replace RSA with HMAC if not mounted readonly and
- * not immutable
- */
- if (!IS_RDONLY(d_backing_inode(dentry)) &&
- !IS_IMMUTABLE(d_backing_inode(dentry)))
+ if (xattr_data->type == EVM_XATTR_PORTABLE_DIGSIG) {
+ if (iint)
+ iint->flags |= EVM_IMMUTABLE_DIGSIG;
+ evm_status = INTEGRITY_PASS_IMMUTABLE;
+ } else if (!IS_RDONLY(d_backing_inode(dentry)) &&
+ !IS_IMMUTABLE(d_backing_inode(dentry))) {
evm_update_evmxattr(dentry, xattr_name,
xattr_value,
xattr_value_len);
+ }
}
break;
default:
@@ -241,7 +251,7 @@ enum integrity_status evm_verifyxattr(struct dentry *dentry,
void *xattr_value, size_t xattr_value_len,
struct integrity_iint_cache *iint)
{
- if (!evm_initialized || !evm_protected_xattr(xattr_name))
+ if (!evm_key_loaded() || !evm_protected_xattr(xattr_name))
return INTEGRITY_UNKNOWN;
if (!iint) {
@@ -265,7 +275,7 @@ static enum integrity_status evm_verify_current_integrity(struct dentry *dentry)
{
struct inode *inode = d_backing_inode(dentry);
- if (!evm_initialized || !S_ISREG(inode->i_mode) || evm_fixmode)
+ if (!evm_key_loaded() || !S_ISREG(inode->i_mode) || evm_fixmode)
return 0;
return evm_verify_hmac(dentry, NULL, NULL, 0, NULL);
}
@@ -280,7 +290,7 @@ static enum integrity_status evm_verify_current_integrity(struct dentry *dentry)
* affect security.evm. An interesting side affect of writing posix xattr
* acls is their modifying of the i_mode, which is included in security.evm.
* For posix xattr acls only, permit security.evm, even if it currently
- * doesn't exist, to be updated.
+ * doesn't exist, to be updated unless the EVM signature is immutable.
*/
static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name,
const void *xattr_value, size_t xattr_value_len)
@@ -299,6 +309,7 @@ static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name,
return 0;
goto out;
}
+
evm_status = evm_verify_current_integrity(dentry);
if (evm_status == INTEGRITY_NOXATTRS) {
struct integrity_iint_cache *iint;
@@ -345,10 +356,17 @@ int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
{
const struct evm_ima_xattr_data *xattr_data = xattr_value;
+ /* Policy permits modification of the protected xattrs even though
+ * there's no HMAC key loaded
+ */
+ if (evm_initialized & EVM_ALLOW_METADATA_WRITES)
+ return 0;
+
if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) {
if (!xattr_value_len)
return -EINVAL;
- if (xattr_data->type != EVM_IMA_XATTR_DIGSIG)
+ if (xattr_data->type != EVM_IMA_XATTR_DIGSIG &&
+ xattr_data->type != EVM_XATTR_PORTABLE_DIGSIG)
return -EPERM;
}
return evm_protect_xattr(dentry, xattr_name, xattr_value,
@@ -365,6 +383,12 @@ int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
*/
int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name)
{
+ /* Policy permits modification of the protected xattrs even though
+ * there's no HMAC key loaded
+ */
+ if (evm_initialized & EVM_ALLOW_METADATA_WRITES)
+ return 0;
+
return evm_protect_xattr(dentry, xattr_name, NULL, 0);
}
@@ -393,8 +417,8 @@ static void evm_reset_status(struct inode *inode)
void evm_inode_post_setxattr(struct dentry *dentry, const char *xattr_name,
const void *xattr_value, size_t xattr_value_len)
{
- if (!evm_initialized || (!evm_protected_xattr(xattr_name)
- && !posix_xattr_acl(xattr_name)))
+ if (!evm_key_loaded() || (!evm_protected_xattr(xattr_name)
+ && !posix_xattr_acl(xattr_name)))
return;
evm_reset_status(dentry->d_inode);
@@ -414,7 +438,7 @@ void evm_inode_post_setxattr(struct dentry *dentry, const char *xattr_name,
*/
void evm_inode_post_removexattr(struct dentry *dentry, const char *xattr_name)
{
- if (!evm_initialized || !evm_protected_xattr(xattr_name))
+ if (!evm_key_loaded() || !evm_protected_xattr(xattr_name))
return;
evm_reset_status(dentry->d_inode);
@@ -425,12 +449,21 @@ void evm_inode_post_removexattr(struct dentry *dentry, const char *xattr_name)
/**
* evm_inode_setattr - prevent updating an invalid EVM extended attribute
* @dentry: pointer to the affected dentry
+ *
+ * Permit update of file attributes when files have a valid EVM signature,
+ * except in the case of them having an immutable portable signature.
*/
int evm_inode_setattr(struct dentry *dentry, struct iattr *attr)
{
unsigned int ia_valid = attr->ia_valid;
enum integrity_status evm_status;
+ /* Policy permits modification of the protected attrs even though
+ * there's no HMAC key loaded
+ */
+ if (evm_initialized & EVM_ALLOW_METADATA_WRITES)
+ return 0;
+
if (!(ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)))
return 0;
evm_status = evm_verify_current_integrity(dentry);
@@ -456,7 +489,7 @@ int evm_inode_setattr(struct dentry *dentry, struct iattr *attr)
*/
void evm_inode_post_setattr(struct dentry *dentry, int ia_valid)
{
- if (!evm_initialized)
+ if (!evm_key_loaded())
return;
if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID))
@@ -473,7 +506,7 @@ int evm_inode_init_security(struct inode *inode,
struct evm_ima_xattr_data *xattr_data;
int rc;
- if (!evm_initialized || !evm_protected_xattr(lsm_xattr->name))
+ if (!evm_key_loaded() || !evm_protected_xattr(lsm_xattr->name))
return 0;
xattr_data = kzalloc(sizeof(*xattr_data), GFP_NOFS);
diff --git a/security/integrity/evm/evm_secfs.c b/security/integrity/evm/evm_secfs.c
index 319cf16d6603..feba03bbedae 100644
--- a/security/integrity/evm/evm_secfs.c
+++ b/security/integrity/evm/evm_secfs.c
@@ -40,7 +40,7 @@ static ssize_t evm_read_key(struct file *filp, char __user *buf,
if (*ppos != 0)
return 0;
- sprintf(temp, "%d", (evm_initialized & ~EVM_SETUP));
+ sprintf(temp, "%d", (evm_initialized & ~EVM_SETUP_COMPLETE));
rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
return rc;
@@ -63,7 +63,7 @@ static ssize_t evm_write_key(struct file *file, const char __user *buf,
{
int i, ret;
- if (!capable(CAP_SYS_ADMIN) || (evm_initialized & EVM_SETUP))
+ if (!capable(CAP_SYS_ADMIN) || (evm_initialized & EVM_SETUP_COMPLETE))
return -EPERM;
ret = kstrtoint_from_user(buf, count, 0, &i);
@@ -75,16 +75,30 @@ static ssize_t evm_write_key(struct file *file, const char __user *buf,
if (!i || (i & ~EVM_INIT_MASK) != 0)
return -EINVAL;
+ /* Don't allow a request to freshly enable metadata writes if
+ * keys are loaded.
+ */
+ if ((i & EVM_ALLOW_METADATA_WRITES) &&
+ ((evm_initialized & EVM_KEY_MASK) != 0) &&
+ !(evm_initialized & EVM_ALLOW_METADATA_WRITES))
+ return -EPERM;
+
if (i & EVM_INIT_HMAC) {
ret = evm_init_key();
if (ret != 0)
return ret;
/* Forbid further writes after the symmetric key is loaded */
- i |= EVM_SETUP;
+ i |= EVM_SETUP_COMPLETE;
}
evm_initialized |= i;
+ /* Don't allow protected metadata modification if a symmetric key
+ * is loaded
+ */
+ if (evm_initialized & EVM_INIT_HMAC)
+ evm_initialized &= ~(EVM_ALLOW_METADATA_WRITES);
+
return count;
}
diff --git a/security/integrity/iint.c b/security/integrity/iint.c
index c84e05866052..fc38ca08dbb5 100644
--- a/security/integrity/iint.c
+++ b/security/integrity/iint.c
@@ -153,14 +153,12 @@ static void init_once(void *foo)
struct integrity_iint_cache *iint = foo;
memset(iint, 0, sizeof(*iint));
- iint->version = 0;
- iint->flags = 0UL;
iint->ima_file_status = INTEGRITY_UNKNOWN;
iint->ima_mmap_status = INTEGRITY_UNKNOWN;
iint->ima_bprm_status = INTEGRITY_UNKNOWN;
iint->ima_read_status = INTEGRITY_UNKNOWN;
iint->evm_status = INTEGRITY_UNKNOWN;
- iint->measured_pcrs = 0;
+ mutex_init(&iint->mutex);
}
static int __init integrity_iintcache_init(void)
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index c7e8db0ea4c0..08fe405338e1 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -18,6 +18,7 @@
#include <linux/fs.h>
#include <linux/xattr.h>
#include <linux/evm.h>
+#include <linux/iversion.h>
#include "ima.h"
@@ -174,7 +175,7 @@ err_out:
*/
int ima_get_action(struct inode *inode, int mask, enum ima_hooks func, int *pcr)
{
- int flags = IMA_MEASURE | IMA_AUDIT | IMA_APPRAISE;
+ int flags = IMA_MEASURE | IMA_AUDIT | IMA_APPRAISE | IMA_HASH;
flags &= ima_policy_flag;
@@ -215,7 +216,7 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
* which do not support i_version, support is limited to an initial
* measurement/appraisal/audit.
*/
- i_version = file_inode(file)->i_version;
+ i_version = inode_query_iversion(inode);
hash.hdr.algo = algo;
/* Initialize hash digest to 0's in case of failure */
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 65fbcf3c32c7..f2803a40ff82 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -46,14 +46,15 @@ bool is_ima_appraise_enabled(void)
/*
* ima_must_appraise - set appraise flag
*
- * Return 1 to appraise
+ * Return 1 to appraise or hash
*/
int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func)
{
if (!ima_appraise)
return 0;
- return ima_match_policy(inode, func, mask, IMA_APPRAISE, NULL);
+ return ima_match_policy(inode, func, mask, IMA_APPRAISE | IMA_HASH,
+ NULL);
}
static int ima_fix_xattr(struct dentry *dentry,
@@ -223,13 +224,16 @@ int ima_appraise_measurement(enum ima_hooks func,
if (opened & FILE_CREATED)
iint->flags |= IMA_NEW_FILE;
if ((iint->flags & IMA_NEW_FILE) &&
- !(iint->flags & IMA_DIGSIG_REQUIRED))
+ (!(iint->flags & IMA_DIGSIG_REQUIRED) ||
+ (inode->i_size == 0)))
status = INTEGRITY_PASS;
goto out;
}
status = evm_verifyxattr(dentry, XATTR_NAME_IMA, xattr_value, rc, iint);
- if ((status != INTEGRITY_PASS) && (status != INTEGRITY_UNKNOWN)) {
+ if ((status != INTEGRITY_PASS) &&
+ (status != INTEGRITY_PASS_IMMUTABLE) &&
+ (status != INTEGRITY_UNKNOWN)) {
if ((status == INTEGRITY_NOLABEL)
|| (status == INTEGRITY_NOXATTRS))
cause = "missing-HMAC";
@@ -248,6 +252,7 @@ int ima_appraise_measurement(enum ima_hooks func,
status = INTEGRITY_FAIL;
break;
}
+ clear_bit(IMA_DIGSIG, &iint->atomic_flags);
if (xattr_len - sizeof(xattr_value->type) - hash_start >=
iint->ima_hash->length)
/* xattr length may be longer. md5 hash in previous
@@ -266,7 +271,7 @@ int ima_appraise_measurement(enum ima_hooks func,
status = INTEGRITY_PASS;
break;
case EVM_IMA_XATTR_DIGSIG:
- iint->flags |= IMA_DIGSIG;
+ set_bit(IMA_DIGSIG, &iint->atomic_flags);
rc = integrity_digsig_verify(INTEGRITY_KEYRING_IMA,
(const char *)xattr_value, rc,
iint->ima_hash->digest,
@@ -317,17 +322,20 @@ void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file)
int rc = 0;
/* do not collect and update hash for digital signatures */
- if (iint->flags & IMA_DIGSIG)
+ if (test_bit(IMA_DIGSIG, &iint->atomic_flags))
return;
- if (iint->ima_file_status != INTEGRITY_PASS)
+ if ((iint->ima_file_status != INTEGRITY_PASS) &&
+ !(iint->flags & IMA_HASH))
return;
rc = ima_collect_measurement(iint, file, NULL, 0, ima_hash_algo);
if (rc < 0)
return;
+ inode_lock(file_inode(file));
ima_fix_xattr(dentry, iint);
+ inode_unlock(file_inode(file));
}
/**
@@ -343,23 +351,21 @@ void ima_inode_post_setattr(struct dentry *dentry)
{
struct inode *inode = d_backing_inode(dentry);
struct integrity_iint_cache *iint;
- int must_appraise;
+ int action;
if (!(ima_policy_flag & IMA_APPRAISE) || !S_ISREG(inode->i_mode)
|| !(inode->i_opflags & IOP_XATTR))
return;
- must_appraise = ima_must_appraise(inode, MAY_ACCESS, POST_SETATTR);
+ action = ima_must_appraise(inode, MAY_ACCESS, POST_SETATTR);
+ if (!action)
+ __vfs_removexattr(dentry, XATTR_NAME_IMA);
iint = integrity_iint_find(inode);
if (iint) {
- iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED |
- IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK |
- IMA_ACTION_RULE_FLAGS);
- if (must_appraise)
- iint->flags |= IMA_APPRAISE;
+ set_bit(IMA_CHANGE_ATTR, &iint->atomic_flags);
+ if (!action)
+ clear_bit(IMA_UPDATE_XATTR, &iint->atomic_flags);
}
- if (!must_appraise)
- __vfs_removexattr(dentry, XATTR_NAME_IMA);
}
/*
@@ -388,12 +394,12 @@ static void ima_reset_appraise_flags(struct inode *inode, int digsig)
iint = integrity_iint_find(inode);
if (!iint)
return;
-
- iint->flags &= ~IMA_DONE_MASK;
iint->measured_pcrs = 0;
+ set_bit(IMA_CHANGE_XATTR, &iint->atomic_flags);
if (digsig)
- iint->flags |= IMA_DIGSIG;
- return;
+ set_bit(IMA_DIGSIG, &iint->atomic_flags);
+ else
+ clear_bit(IMA_DIGSIG, &iint->atomic_flags);
}
int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index 9057b163c378..205bc69361ea 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -632,7 +632,7 @@ static void __init ima_pcrread(int idx, u8 *pcr)
if (!ima_used_chip)
return;
- if (tpm_pcr_read(TPM_ANY_NUM, idx, pcr) != 0)
+ if (tpm_pcr_read(NULL, idx, pcr) != 0)
pr_err("Error Communicating to TPM chip\n");
}
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index 2967d497a665..29b72cd2502e 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -110,7 +110,7 @@ int __init ima_init(void)
int rc;
ima_used_chip = 0;
- rc = tpm_pcr_read(TPM_ANY_NUM, 0, pcr_i);
+ rc = tpm_pcr_read(NULL, 0, pcr_i);
if (rc == 0)
ima_used_chip = 1;
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 770654694efc..061425dd6400 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/xattr.h>
#include <linux/ima.h>
+#include <linux/iversion.h>
#include "ima.h"
@@ -84,10 +85,10 @@ static void ima_rdwr_violation_check(struct file *file,
struct integrity_iint_cache *iint,
int must_measure,
char **pathbuf,
- const char **pathname)
+ const char **pathname,
+ char *filename)
{
struct inode *inode = file_inode(file);
- char filename[NAME_MAX];
fmode_t mode = file->f_mode;
bool send_tomtou = false, send_writers = false;
@@ -96,10 +97,13 @@ static void ima_rdwr_violation_check(struct file *file,
if (!iint)
iint = integrity_iint_find(inode);
/* IMA_MEASURE is set from reader side */
- if (iint && (iint->flags & IMA_MEASURE))
+ if (iint && test_bit(IMA_MUST_MEASURE,
+ &iint->atomic_flags))
send_tomtou = true;
}
} else {
+ if (must_measure)
+ set_bit(IMA_MUST_MEASURE, &iint->atomic_flags);
if ((atomic_read(&inode->i_writecount) > 0) && must_measure)
send_writers = true;
}
@@ -121,21 +125,25 @@ static void ima_check_last_writer(struct integrity_iint_cache *iint,
struct inode *inode, struct file *file)
{
fmode_t mode = file->f_mode;
+ bool update;
if (!(mode & FMODE_WRITE))
return;
- inode_lock(inode);
+ mutex_lock(&iint->mutex);
if (atomic_read(&inode->i_writecount) == 1) {
- if ((iint->version != inode->i_version) ||
+ update = test_and_clear_bit(IMA_UPDATE_XATTR,
+ &iint->atomic_flags);
+ if (!IS_I_VERSION(inode) ||
+ inode_cmp_iversion(inode, iint->version) ||
(iint->flags & IMA_NEW_FILE)) {
iint->flags &= ~(IMA_DONE_MASK | IMA_NEW_FILE);
iint->measured_pcrs = 0;
- if (iint->flags & IMA_APPRAISE)
+ if (update)
ima_update_xattr(iint, file);
}
}
- inode_unlock(inode);
+ mutex_unlock(&iint->mutex);
}
/**
@@ -168,7 +176,7 @@ static int process_measurement(struct file *file, char *buf, loff_t size,
char *pathbuf = NULL;
char filename[NAME_MAX];
const char *pathname = NULL;
- int rc = -ENOMEM, action, must_appraise;
+ int rc = 0, action, must_appraise = 0;
int pcr = CONFIG_IMA_MEASURE_PCR_IDX;
struct evm_ima_xattr_data *xattr_value = NULL;
int xattr_len = 0;
@@ -199,17 +207,31 @@ static int process_measurement(struct file *file, char *buf, loff_t size,
if (action) {
iint = integrity_inode_get(inode);
if (!iint)
- goto out;
+ rc = -ENOMEM;
}
- if (violation_check) {
+ if (!rc && violation_check)
ima_rdwr_violation_check(file, iint, action & IMA_MEASURE,
- &pathbuf, &pathname);
- if (!action) {
- rc = 0;
- goto out_free;
- }
- }
+ &pathbuf, &pathname, filename);
+
+ inode_unlock(inode);
+
+ if (rc)
+ goto out;
+ if (!action)
+ goto out;
+
+ mutex_lock(&iint->mutex);
+
+ if (test_and_clear_bit(IMA_CHANGE_ATTR, &iint->atomic_flags))
+ /* reset appraisal flags if ima_inode_post_setattr was called */
+ iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED |
+ IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK |
+ IMA_ACTION_FLAGS);
+
+ if (test_and_clear_bit(IMA_CHANGE_XATTR, &iint->atomic_flags))
+ /* reset all flags if ima_inode_setxattr was called */
+ iint->flags &= ~IMA_DONE_MASK;
/* Determine if already appraised/measured based on bitmask
* (IMA_MEASURE, IMA_MEASURED, IMA_XXXX_APPRAISE, IMA_XXXX_APPRAISED,
@@ -223,11 +245,23 @@ static int process_measurement(struct file *file, char *buf, loff_t size,
if ((action & IMA_MEASURE) && (iint->measured_pcrs & (0x1 << pcr)))
action ^= IMA_MEASURE;
+ /* HASH sets the digital signature and update flags, nothing else */
+ if ((action & IMA_HASH) &&
+ !(test_bit(IMA_DIGSIG, &iint->atomic_flags))) {
+ xattr_len = ima_read_xattr(file_dentry(file), &xattr_value);
+ if ((xattr_value && xattr_len > 2) &&
+ (xattr_value->type == EVM_IMA_XATTR_DIGSIG))
+ set_bit(IMA_DIGSIG, &iint->atomic_flags);
+ iint->flags |= IMA_HASHED;
+ action ^= IMA_HASH;
+ set_bit(IMA_UPDATE_XATTR, &iint->atomic_flags);
+ }
+
/* Nothing to do, just return existing appraised status */
if (!action) {
if (must_appraise)
rc = ima_get_cache_status(iint, func);
- goto out_digsig;
+ goto out_locked;
}
template_desc = ima_template_desc_current();
@@ -240,7 +274,7 @@ static int process_measurement(struct file *file, char *buf, loff_t size,
rc = ima_collect_measurement(iint, file, buf, size, hash_algo);
if (rc != 0 && rc != -EBADF && rc != -EINVAL)
- goto out_digsig;
+ goto out_locked;
if (!pathbuf) /* ima_rdwr_violation possibly pre-fetched */
pathname = ima_d_path(&file->f_path, &pathbuf, filename);
@@ -248,26 +282,32 @@ static int process_measurement(struct file *file, char *buf, loff_t size,
if (action & IMA_MEASURE)
ima_store_measurement(iint, file, pathname,
xattr_value, xattr_len, pcr);
- if (rc == 0 && (action & IMA_APPRAISE_SUBMASK))
+ if (rc == 0 && (action & IMA_APPRAISE_SUBMASK)) {
+ inode_lock(inode);
rc = ima_appraise_measurement(func, iint, file, pathname,
xattr_value, xattr_len, opened);
+ inode_unlock(inode);
+ }
if (action & IMA_AUDIT)
ima_audit_measurement(iint, pathname);
if ((file->f_flags & O_DIRECT) && (iint->flags & IMA_PERMIT_DIRECTIO))
rc = 0;
-out_digsig:
- if ((mask & MAY_WRITE) && (iint->flags & IMA_DIGSIG) &&
+out_locked:
+ if ((mask & MAY_WRITE) && test_bit(IMA_DIGSIG, &iint->atomic_flags) &&
!(iint->flags & IMA_NEW_FILE))
rc = -EACCES;
+ mutex_unlock(&iint->mutex);
kfree(xattr_value);
-out_free:
+out:
if (pathbuf)
__putname(pathbuf);
-out:
- inode_unlock(inode);
- if ((rc && must_appraise) && (ima_appraise & IMA_APPRAISE_ENFORCE))
- return -EACCES;
+ if (must_appraise) {
+ if (rc && (ima_appraise & IMA_APPRAISE_ENFORCE))
+ return -EACCES;
+ if (file->f_mode & FMODE_WRITE)
+ set_bit(IMA_UPDATE_XATTR, &iint->atomic_flags);
+ }
return 0;
}
@@ -366,8 +406,10 @@ int ima_read_file(struct file *file, enum kernel_read_file_id read_id)
if (!file && read_id == READING_MODULE) {
if (!sig_enforce && (ima_appraise & IMA_APPRAISE_MODULES) &&
- (ima_appraise & IMA_APPRAISE_ENFORCE))
+ (ima_appraise & IMA_APPRAISE_ENFORCE)) {
+ pr_err("impossible to appraise a module without a file descriptor. sig_enforce kernel parameter might help\n");
return -EACCES; /* INTEGRITY_UNKNOWN */
+ }
return 0; /* We rely on module signature checking */
}
return 0;
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index ee4613fa5840..915f5572c6ff 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -40,6 +40,8 @@
#define APPRAISE 0x0004 /* same as IMA_APPRAISE */
#define DONT_APPRAISE 0x0008
#define AUDIT 0x0040
+#define HASH 0x0100
+#define DONT_HASH 0x0200
#define INVALID_PCR(a) (((a) < 0) || \
(a) >= (FIELD_SIZEOF(struct integrity_iint_cache, measured_pcrs) * 8))
@@ -380,8 +382,10 @@ int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask,
action |= entry->flags & IMA_ACTION_FLAGS;
action |= entry->action & IMA_DO_MASK;
- if (entry->action & IMA_APPRAISE)
+ if (entry->action & IMA_APPRAISE) {
action |= get_subaction(entry, func);
+ action ^= IMA_HASH;
+ }
if (entry->action & IMA_DO_MASK)
actmask &= ~(entry->action | entry->action << 1);
@@ -521,7 +525,7 @@ enum {
Opt_err = -1,
Opt_measure = 1, Opt_dont_measure,
Opt_appraise, Opt_dont_appraise,
- Opt_audit,
+ Opt_audit, Opt_hash, Opt_dont_hash,
Opt_obj_user, Opt_obj_role, Opt_obj_type,
Opt_subj_user, Opt_subj_role, Opt_subj_type,
Opt_func, Opt_mask, Opt_fsmagic,
@@ -538,6 +542,8 @@ static match_table_t policy_tokens = {
{Opt_appraise, "appraise"},
{Opt_dont_appraise, "dont_appraise"},
{Opt_audit, "audit"},
+ {Opt_hash, "hash"},
+ {Opt_dont_hash, "dont_hash"},
{Opt_obj_user, "obj_user=%s"},
{Opt_obj_role, "obj_role=%s"},
{Opt_obj_type, "obj_type=%s"},
@@ -671,6 +677,22 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
entry->action = AUDIT;
break;
+ case Opt_hash:
+ ima_log_string(ab, "action", "hash");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
+ entry->action = HASH;
+ break;
+ case Opt_dont_hash:
+ ima_log_string(ab, "action", "dont_hash");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
+ entry->action = DONT_HASH;
+ break;
case Opt_func:
ima_log_string(ab, "func", args[0].from);
@@ -743,7 +765,7 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
case Opt_fsuuid:
ima_log_string(ab, "fsuuid", args[0].from);
- if (uuid_is_null(&entry->fsuuid)) {
+ if (!uuid_is_null(&entry->fsuuid)) {
result = -EINVAL;
break;
}
@@ -1040,6 +1062,10 @@ int ima_policy_show(struct seq_file *m, void *v)
seq_puts(m, pt(Opt_dont_appraise));
if (entry->action & AUDIT)
seq_puts(m, pt(Opt_audit));
+ if (entry->action & HASH)
+ seq_puts(m, pt(Opt_hash));
+ if (entry->action & DONT_HASH)
+ seq_puts(m, pt(Opt_dont_hash));
seq_puts(m, " ");
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
index a02a86d51102..418f35e38015 100644
--- a/security/integrity/ima/ima_queue.c
+++ b/security/integrity/ima/ima_queue.c
@@ -145,7 +145,7 @@ static int ima_pcr_extend(const u8 *hash, int pcr)
if (!ima_used_chip)
return result;
- result = tpm_pcr_extend(TPM_ANY_NUM, pcr, hash);
+ result = tpm_pcr_extend(NULL, pcr, hash);
if (result != 0)
pr_err("Error Communicating to TPM chip, result: %d\n", result);
return result;
diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
index 7412d0291ab9..30db39b23804 100644
--- a/security/integrity/ima/ima_template.c
+++ b/security/integrity/ima/ima_template.c
@@ -377,8 +377,7 @@ int ima_restore_measurement_list(loff_t size, void *buf)
break;
if (hdr[HDR_TEMPLATE_NAME].len >= MAX_TEMPLATE_NAME_LEN) {
- pr_err("attempting to restore a template name \
- that is too long\n");
+ pr_err("attempting to restore a template name that is too long\n");
ret = -EINVAL;
break;
}
@@ -389,8 +388,8 @@ int ima_restore_measurement_list(loff_t size, void *buf)
template_name[hdr[HDR_TEMPLATE_NAME].len] = 0;
if (strcmp(template_name, "ima") == 0) {
- pr_err("attempting to restore an unsupported \
- template \"%s\" failed\n", template_name);
+ pr_err("attempting to restore an unsupported template \"%s\" failed\n",
+ template_name);
ret = -EINVAL;
break;
}
@@ -410,8 +409,8 @@ int ima_restore_measurement_list(loff_t size, void *buf)
&(template_desc->fields),
&(template_desc->num_fields));
if (ret < 0) {
- pr_err("attempting to restore the template fmt \"%s\" \
- failed\n", template_desc->fmt);
+ pr_err("attempting to restore the template fmt \"%s\" failed\n",
+ template_desc->fmt);
ret = -EINVAL;
break;
}
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
index e1bf040fb110..50a8e3365df7 100644
--- a/security/integrity/integrity.h
+++ b/security/integrity/integrity.h
@@ -25,39 +25,50 @@
#define IMA_COLLECTED 0x00000020
#define IMA_AUDIT 0x00000040
#define IMA_AUDITED 0x00000080
+#define IMA_HASH 0x00000100
+#define IMA_HASHED 0x00000200
/* iint cache flags */
#define IMA_ACTION_FLAGS 0xff000000
#define IMA_ACTION_RULE_FLAGS 0x06000000
-#define IMA_DIGSIG 0x01000000
-#define IMA_DIGSIG_REQUIRED 0x02000000
-#define IMA_PERMIT_DIRECTIO 0x04000000
-#define IMA_NEW_FILE 0x08000000
+#define IMA_DIGSIG_REQUIRED 0x01000000
+#define IMA_PERMIT_DIRECTIO 0x02000000
+#define IMA_NEW_FILE 0x04000000
+#define EVM_IMMUTABLE_DIGSIG 0x08000000
#define IMA_DO_MASK (IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \
- IMA_APPRAISE_SUBMASK)
+ IMA_HASH | IMA_APPRAISE_SUBMASK)
#define IMA_DONE_MASK (IMA_MEASURED | IMA_APPRAISED | IMA_AUDITED | \
- IMA_COLLECTED | IMA_APPRAISED_SUBMASK)
+ IMA_HASHED | IMA_COLLECTED | \
+ IMA_APPRAISED_SUBMASK)
/* iint subaction appraise cache flags */
-#define IMA_FILE_APPRAISE 0x00000100
-#define IMA_FILE_APPRAISED 0x00000200
-#define IMA_MMAP_APPRAISE 0x00000400
-#define IMA_MMAP_APPRAISED 0x00000800
-#define IMA_BPRM_APPRAISE 0x00001000
-#define IMA_BPRM_APPRAISED 0x00002000
-#define IMA_READ_APPRAISE 0x00004000
-#define IMA_READ_APPRAISED 0x00008000
+#define IMA_FILE_APPRAISE 0x00001000
+#define IMA_FILE_APPRAISED 0x00002000
+#define IMA_MMAP_APPRAISE 0x00004000
+#define IMA_MMAP_APPRAISED 0x00008000
+#define IMA_BPRM_APPRAISE 0x00010000
+#define IMA_BPRM_APPRAISED 0x00020000
+#define IMA_READ_APPRAISE 0x00040000
+#define IMA_READ_APPRAISED 0x00080000
#define IMA_APPRAISE_SUBMASK (IMA_FILE_APPRAISE | IMA_MMAP_APPRAISE | \
IMA_BPRM_APPRAISE | IMA_READ_APPRAISE)
#define IMA_APPRAISED_SUBMASK (IMA_FILE_APPRAISED | IMA_MMAP_APPRAISED | \
IMA_BPRM_APPRAISED | IMA_READ_APPRAISED)
+/* iint cache atomic_flags */
+#define IMA_CHANGE_XATTR 0
+#define IMA_UPDATE_XATTR 1
+#define IMA_CHANGE_ATTR 2
+#define IMA_DIGSIG 3
+#define IMA_MUST_MEASURE 4
+
enum evm_ima_xattr_type {
IMA_XATTR_DIGEST = 0x01,
EVM_XATTR_HMAC,
EVM_IMA_XATTR_DIGSIG,
IMA_XATTR_DIGEST_NG,
+ EVM_XATTR_PORTABLE_DIGSIG,
IMA_XATTR_LAST
};
@@ -100,10 +111,12 @@ struct signature_v2_hdr {
/* integrity data associated with an inode */
struct integrity_iint_cache {
struct rb_node rb_node; /* rooted in integrity_iint_tree */
+ struct mutex mutex; /* protects: version, flags, digest */
struct inode *inode; /* back pointer to inode in question */
u64 version; /* track inode changes */
unsigned long flags;
unsigned long measured_pcrs;
+ unsigned long atomic_flags;
enum integrity_status ima_file_status:4;
enum integrity_status ima_mmap_status:4;
enum integrity_status ima_bprm_status:4;
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index d0bccebbd3b5..41bcf57e96f2 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -713,7 +713,6 @@ descend_to_keyring:
* doesn't contain any keyring pointers.
*/
shortcut = assoc_array_ptr_to_shortcut(ptr);
- smp_read_barrier_depends();
if ((shortcut->index_key[0] & ASSOC_ARRAY_FAN_MASK) != 0)
goto not_this_keyring;
@@ -723,8 +722,6 @@ descend_to_keyring:
}
node = assoc_array_ptr_to_node(ptr);
- smp_read_barrier_depends();
-
ptr = node->slots[0];
if (!assoc_array_ptr_is_meta(ptr))
goto begin_node;
@@ -736,7 +733,6 @@ descend_to_node:
kdebug("descend");
if (assoc_array_ptr_is_shortcut(ptr)) {
shortcut = assoc_array_ptr_to_shortcut(ptr);
- smp_read_barrier_depends();
ptr = READ_ONCE(shortcut->next_node);
BUG_ON(!assoc_array_ptr_is_node(ptr));
}
@@ -744,7 +740,6 @@ descend_to_node:
begin_node:
kdebug("begin_node");
- smp_read_barrier_depends();
slot = 0;
ascend_to_node:
/* Go through the slots in a node */
@@ -792,14 +787,12 @@ ascend_to_node:
if (ptr && assoc_array_ptr_is_shortcut(ptr)) {
shortcut = assoc_array_ptr_to_shortcut(ptr);
- smp_read_barrier_depends();
ptr = READ_ONCE(shortcut->back_pointer);
slot = shortcut->parent_slot;
}
if (!ptr)
goto not_this_keyring;
node = assoc_array_ptr_to_node(ptr);
- smp_read_barrier_depends();
slot++;
/* If we've ascended to the root (zero backpointer), we must have just
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index 98aa89ff7bfd..423776682025 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -355,13 +355,12 @@ out:
* For key specific tpm requests, we will generate and send our
* own TPM command packets using the drivers send function.
*/
-static int trusted_tpm_send(const u32 chip_num, unsigned char *cmd,
- size_t buflen)
+static int trusted_tpm_send(unsigned char *cmd, size_t buflen)
{
int rc;
dump_tpm_buf(cmd);
- rc = tpm_send(chip_num, cmd, buflen);
+ rc = tpm_send(NULL, cmd, buflen);
dump_tpm_buf(cmd);
if (rc > 0)
/* Can't return positive return codes values to keyctl */
@@ -382,10 +381,10 @@ static int pcrlock(const int pcrnum)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- ret = tpm_get_random(TPM_ANY_NUM, hash, SHA1_DIGEST_SIZE);
+ ret = tpm_get_random(NULL, hash, SHA1_DIGEST_SIZE);
if (ret != SHA1_DIGEST_SIZE)
return ret;
- return tpm_pcr_extend(TPM_ANY_NUM, pcrnum, hash) ? -EINVAL : 0;
+ return tpm_pcr_extend(NULL, pcrnum, hash) ? -EINVAL : 0;
}
/*
@@ -398,7 +397,7 @@ static int osap(struct tpm_buf *tb, struct osapsess *s,
unsigned char ononce[TPM_NONCE_SIZE];
int ret;
- ret = tpm_get_random(TPM_ANY_NUM, ononce, TPM_NONCE_SIZE);
+ ret = tpm_get_random(NULL, ononce, TPM_NONCE_SIZE);
if (ret != TPM_NONCE_SIZE)
return ret;
@@ -410,7 +409,7 @@ static int osap(struct tpm_buf *tb, struct osapsess *s,
store32(tb, handle);
storebytes(tb, ononce, TPM_NONCE_SIZE);
- ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE);
+ ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
if (ret < 0)
return ret;
@@ -434,7 +433,7 @@ static int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce)
store16(tb, TPM_TAG_RQU_COMMAND);
store32(tb, TPM_OIAP_SIZE);
store32(tb, TPM_ORD_OIAP);
- ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE);
+ ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
if (ret < 0)
return ret;
@@ -493,7 +492,7 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
if (ret < 0)
goto out;
- ret = tpm_get_random(TPM_ANY_NUM, td->nonceodd, TPM_NONCE_SIZE);
+ ret = tpm_get_random(NULL, td->nonceodd, TPM_NONCE_SIZE);
if (ret != TPM_NONCE_SIZE)
goto out;
ordinal = htonl(TPM_ORD_SEAL);
@@ -542,7 +541,7 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
store8(tb, cont);
storebytes(tb, td->pubauth, SHA1_DIGEST_SIZE);
- ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE);
+ ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
if (ret < 0)
goto out;
@@ -603,7 +602,7 @@ static int tpm_unseal(struct tpm_buf *tb,
ordinal = htonl(TPM_ORD_UNSEAL);
keyhndl = htonl(SRKHANDLE);
- ret = tpm_get_random(TPM_ANY_NUM, nonceodd, TPM_NONCE_SIZE);
+ ret = tpm_get_random(NULL, nonceodd, TPM_NONCE_SIZE);
if (ret != TPM_NONCE_SIZE) {
pr_info("trusted_key: tpm_get_random failed (%d)\n", ret);
return ret;
@@ -635,7 +634,7 @@ static int tpm_unseal(struct tpm_buf *tb,
store8(tb, cont);
storebytes(tb, authdata2, SHA1_DIGEST_SIZE);
- ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE);
+ ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
if (ret < 0) {
pr_info("trusted_key: authhmac failed (%d)\n", ret);
return ret;
@@ -748,7 +747,7 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
int i;
int tpm2;
- tpm2 = tpm_is_tpm2(TPM_ANY_NUM);
+ tpm2 = tpm_is_tpm2(NULL);
if (tpm2 < 0)
return tpm2;
@@ -917,7 +916,7 @@ static struct trusted_key_options *trusted_options_alloc(void)
struct trusted_key_options *options;
int tpm2;
- tpm2 = tpm_is_tpm2(TPM_ANY_NUM);
+ tpm2 = tpm_is_tpm2(NULL);
if (tpm2 < 0)
return NULL;
@@ -967,7 +966,7 @@ static int trusted_instantiate(struct key *key,
size_t key_len;
int tpm2;
- tpm2 = tpm_is_tpm2(TPM_ANY_NUM);
+ tpm2 = tpm_is_tpm2(NULL);
if (tpm2 < 0)
return tpm2;
@@ -1008,7 +1007,7 @@ static int trusted_instantiate(struct key *key,
switch (key_cmd) {
case Opt_load:
if (tpm2)
- ret = tpm_unseal_trusted(TPM_ANY_NUM, payload, options);
+ ret = tpm_unseal_trusted(NULL, payload, options);
else
ret = key_unseal(payload, options);
dump_payload(payload);
@@ -1018,13 +1017,13 @@ static int trusted_instantiate(struct key *key,
break;
case Opt_new:
key_len = payload->key_len;
- ret = tpm_get_random(TPM_ANY_NUM, payload->key, key_len);
+ ret = tpm_get_random(NULL, payload->key, key_len);
if (ret != key_len) {
pr_info("trusted_key: key_create failed (%d)\n", ret);
goto out;
}
if (tpm2)
- ret = tpm_seal_trusted(TPM_ANY_NUM, payload, options);
+ ret = tpm_seal_trusted(NULL, payload, options);
else
ret = key_seal(payload, options);
if (ret < 0)
diff --git a/security/selinux/include/netlabel.h b/security/selinux/include/netlabel.h
index 75686d53df07..e77a5e307955 100644
--- a/security/selinux/include/netlabel.h
+++ b/security/selinux/include/netlabel.h
@@ -19,8 +19,7 @@
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c
index aaba6677ee2e..2c297b995b16 100644
--- a/security/selinux/netlabel.c
+++ b/security/selinux/netlabel.c
@@ -22,8 +22,7 @@
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 33cfe5d3d6cb..8900ea5cbabf 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -867,6 +867,9 @@ int security_bounded_transition(u32 old_sid, u32 new_sid)
int index;
int rc;
+ if (!ss_initialized)
+ return 0;
+
read_lock(&policy_rwlock);
rc = -EINVAL;
@@ -1413,27 +1416,25 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
if (!scontext_len)
return -EINVAL;
+ /* Copy the string to allow changes and ensure a NUL terminator */
+ scontext2 = kmemdup_nul(scontext, scontext_len, gfp_flags);
+ if (!scontext2)
+ return -ENOMEM;
+
if (!ss_initialized) {
int i;
for (i = 1; i < SECINITSID_NUM; i++) {
- if (!strcmp(initial_sid_to_string[i], scontext)) {
+ if (!strcmp(initial_sid_to_string[i], scontext2)) {
*sid = i;
- return 0;
+ goto out;
}
}
*sid = SECINITSID_KERNEL;
- return 0;
+ goto out;
}
*sid = SECSID_NULL;
- /* Copy the string so that we can modify the copy as we parse it. */
- scontext2 = kmalloc(scontext_len + 1, gfp_flags);
- if (!scontext2)
- return -ENOMEM;
- memcpy(scontext2, scontext, scontext_len);
- scontext2[scontext_len] = 0;
-
if (force) {
/* Save another copy for storing in uninterpreted form */
rc = -ENOMEM;
diff --git a/security/smack/smack.h b/security/smack/smack.h
index 6a71fc7831ab..f7db791fb566 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -321,6 +321,7 @@ struct smack_known *smk_import_entry(const char *, int);
void smk_insert_entry(struct smack_known *skp);
struct smack_known *smk_find_entry(const char *);
bool smack_privileged(int cap);
+bool smack_privileged_cred(int cap, const struct cred *cred);
void smk_destroy_label_list(struct list_head *list);
/*
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index 1a3004189447..9a4c0ad46518 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -623,26 +623,24 @@ struct smack_known *smack_from_secid(const u32 secid)
LIST_HEAD(smack_onlycap_list);
DEFINE_MUTEX(smack_onlycap_lock);
-/*
+/**
+ * smack_privileged_cred - are all privilege requirements met by cred
+ * @cap: The requested capability
+ * @cred: the credential to use
+ *
* Is the task privileged and allowed to be privileged
* by the onlycap rule.
*
* Returns true if the task is allowed to be privileged, false if it's not.
*/
-bool smack_privileged(int cap)
+bool smack_privileged_cred(int cap, const struct cred *cred)
{
- struct smack_known *skp = smk_of_current();
+ struct task_smack *tsp = cred->security;
+ struct smack_known *skp = tsp->smk_task;
struct smack_known_list_elem *sklep;
int rc;
- /*
- * All kernel tasks are privileged
- */
- if (unlikely(current->flags & PF_KTHREAD))
- return true;
-
- rc = cap_capable(current_cred(), &init_user_ns, cap,
- SECURITY_CAP_AUDIT);
+ rc = cap_capable(cred, &init_user_ns, cap, SECURITY_CAP_AUDIT);
if (rc)
return false;
@@ -662,3 +660,23 @@ bool smack_privileged(int cap)
return false;
}
+
+/**
+ * smack_privileged - are all privilege requirements met
+ * @cap: The requested capability
+ *
+ * Is the task privileged and allowed to be privileged
+ * by the onlycap rule.
+ *
+ * Returns true if the task is allowed to be privileged, false if it's not.
+ */
+bool smack_privileged(int cap)
+{
+ /*
+ * All kernel tasks are privileged
+ */
+ if (unlikely(current->flags & PF_KTHREAD))
+ return true;
+
+ return smack_privileged_cred(cap, current_cred());
+}
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 14cc7940b36d..03fdecba93bb 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -2866,12 +2866,16 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap,
#endif
#ifdef SMACK_IPV6_SECMARK_LABELING
struct smack_known *rsp;
- struct socket_smack *ssp = sock->sk->sk_security;
+ struct socket_smack *ssp;
#endif
if (sock->sk == NULL)
return 0;
+#ifdef SMACK_IPV6_SECMARK_LABELING
+ ssp = sock->sk->sk_security;
+#endif
+
switch (sock->sk->sk_family) {
case PF_INET:
if (addrlen < sizeof(struct sockaddr_in))
@@ -4365,6 +4369,10 @@ static int smack_key_permission(key_ref_t key_ref,
*/
if (tkp == NULL)
return -EACCES;
+
+ if (smack_privileged_cred(CAP_MAC_OVERRIDE, cred))
+ return 0;
+
#ifdef CONFIG_AUDIT
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_KEY);
ad.a.u.key_struct.key = keyp->serial;
diff --git a/security/tomoyo/audit.c b/security/tomoyo/audit.c
index 0f73fe30e37a..558e3076d38c 100644
--- a/security/tomoyo/audit.c
+++ b/security/tomoyo/audit.c
@@ -458,7 +458,7 @@ void tomoyo_read_log(struct tomoyo_io_buffer *head)
*
* Returns POLLIN | POLLRDNORM when ready to read an audit log.
*/
-unsigned int tomoyo_poll_log(struct file *file, poll_table *wait)
+__poll_t tomoyo_poll_log(struct file *file, poll_table *wait)
{
if (tomoyo_log_count)
return POLLIN | POLLRDNORM;
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index 25eed4b0b0e8..70c73bf66c88 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -2120,7 +2120,7 @@ static struct tomoyo_domain_info *tomoyo_find_domain_by_qid
*
* Waits for access requests which violated policy in enforcing mode.
*/
-static unsigned int tomoyo_poll_query(struct file *file, poll_table *wait)
+static __poll_t tomoyo_poll_query(struct file *file, poll_table *wait)
{
if (!list_empty(&tomoyo_query_list))
return POLLIN | POLLRDNORM;
@@ -2453,7 +2453,7 @@ int tomoyo_open_control(const u8 type, struct file *file)
* Returns POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM if ready to read/write,
* POLLOUT | POLLWRNORM otherwise.
*/
-unsigned int tomoyo_poll_control(struct file *file, poll_table *wait)
+__poll_t tomoyo_poll_control(struct file *file, poll_table *wait)
{
struct tomoyo_io_buffer *head = file->private_data;
if (head->poll)
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index 7adccdd8e36d..539bcdd30bb8 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -789,7 +789,7 @@ struct tomoyo_acl_param {
struct tomoyo_io_buffer {
void (*read) (struct tomoyo_io_buffer *);
int (*write) (struct tomoyo_io_buffer *);
- unsigned int (*poll) (struct file *file, poll_table *wait);
+ __poll_t (*poll) (struct file *file, poll_table *wait);
/* Exclusive lock for this structure. */
struct mutex io_sem;
char __user *read_user_buf;
@@ -981,8 +981,8 @@ int tomoyo_path_number_perm(const u8 operation, const struct path *path,
unsigned long number);
int tomoyo_path_perm(const u8 operation, const struct path *path,
const char *target);
-unsigned int tomoyo_poll_control(struct file *file, poll_table *wait);
-unsigned int tomoyo_poll_log(struct file *file, poll_table *wait);
+__poll_t tomoyo_poll_control(struct file *file, poll_table *wait);
+__poll_t tomoyo_poll_log(struct file *file, poll_table *wait);
int tomoyo_socket_bind_permission(struct socket *sock, struct sockaddr *addr,
int addr_len);
int tomoyo_socket_connect_permission(struct socket *sock,
diff --git a/security/tomoyo/securityfs_if.c b/security/tomoyo/securityfs_if.c
index 49393c2a3f8b..fb9bf99deb35 100644
--- a/security/tomoyo/securityfs_if.c
+++ b/security/tomoyo/securityfs_if.c
@@ -157,7 +157,7 @@ static int tomoyo_release(struct inode *inode, struct file *file)
* Returns POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM if ready to read/write,
* POLLOUT | POLLWRNORM otherwise.
*/
-static unsigned int tomoyo_poll(struct file *file, poll_table *wait)
+static __poll_t tomoyo_poll(struct file *file, poll_table *wait)
{
return tomoyo_poll_control(file, wait);
}
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 4490a699030b..a12b9555e910 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -396,7 +396,7 @@ static int snd_compr_mmap(struct file *f, struct vm_area_struct *vma)
return -ENXIO;
}
-static inline int snd_compr_get_poll(struct snd_compr_stream *stream)
+static __poll_t snd_compr_get_poll(struct snd_compr_stream *stream)
{
if (stream->direction == SND_COMPRESS_PLAYBACK)
return POLLOUT | POLLWRNORM;
@@ -404,12 +404,12 @@ static inline int snd_compr_get_poll(struct snd_compr_stream *stream)
return POLLIN | POLLRDNORM;
}
-static unsigned int snd_compr_poll(struct file *f, poll_table *wait)
+static __poll_t snd_compr_poll(struct file *f, poll_table *wait)
{
struct snd_compr_file *data = f->private_data;
struct snd_compr_stream *stream;
size_t avail;
- int retval = 0;
+ __poll_t retval = 0;
if (snd_BUG_ON(!data))
return POLLERR;
diff --git a/sound/core/control.c b/sound/core/control.c
index 56b3e2d49c82..50fa16022f1f 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/time.h>
+#include <linux/mm.h>
#include <linux/sched/signal.h>
#include <sound/core.h>
#include <sound/minors.h>
@@ -1129,7 +1130,7 @@ static int replace_user_tlv(struct snd_kcontrol *kctl, unsigned int __user *buf,
if (size > 1024 * 128) /* sane value */
return -EINVAL;
- container = memdup_user(buf, size);
+ container = vmemdup_user(buf, size);
if (IS_ERR(container))
return PTR_ERR(container);
@@ -1137,7 +1138,7 @@ static int replace_user_tlv(struct snd_kcontrol *kctl, unsigned int __user *buf,
if (!change)
change = memcmp(ue->tlv_data, container, size) != 0;
if (!change) {
- kfree(container);
+ kvfree(container);
return 0;
}
@@ -1148,7 +1149,7 @@ static int replace_user_tlv(struct snd_kcontrol *kctl, unsigned int __user *buf,
mask = SNDRV_CTL_EVENT_MASK_INFO;
}
- kfree(ue->tlv_data);
+ kvfree(ue->tlv_data);
ue->tlv_data = container;
ue->tlv_data_size = size;
@@ -1197,7 +1198,7 @@ static int snd_ctl_elem_init_enum_names(struct user_element *ue)
if (ue->info.value.enumerated.names_length > 64 * 1024)
return -EINVAL;
- names = memdup_user((const void __user *)user_ptrval,
+ names = vmemdup_user((const void __user *)user_ptrval,
ue->info.value.enumerated.names_length);
if (IS_ERR(names))
return PTR_ERR(names);
@@ -1208,7 +1209,7 @@ static int snd_ctl_elem_init_enum_names(struct user_element *ue)
for (i = 0; i < ue->info.value.enumerated.items; ++i) {
name_len = strnlen(p, buf_len);
if (name_len == 0 || name_len >= 64 || name_len == buf_len) {
- kfree(names);
+ kvfree(names);
return -EINVAL;
}
p += name_len + 1;
@@ -1225,8 +1226,8 @@ static void snd_ctl_elem_user_free(struct snd_kcontrol *kcontrol)
{
struct user_element *ue = kcontrol->private_data;
- kfree(ue->tlv_data);
- kfree(ue->priv_data);
+ kvfree(ue->tlv_data);
+ kvfree(ue->priv_data);
kfree(ue);
}
@@ -1666,9 +1667,9 @@ static ssize_t snd_ctl_read(struct file *file, char __user *buffer,
return result > 0 ? result : err;
}
-static unsigned int snd_ctl_poll(struct file *file, poll_table * wait)
+static __poll_t snd_ctl_poll(struct file *file, poll_table * wait)
{
- unsigned int mask;
+ __poll_t mask;
struct snd_ctl_file *ctl;
ctl = file->private_data;
diff --git a/sound/core/hwdep.c b/sound/core/hwdep.c
index 8faae3d1455d..26e71cf05f1e 100644
--- a/sound/core/hwdep.c
+++ b/sound/core/hwdep.c
@@ -177,7 +177,7 @@ static int snd_hwdep_release(struct inode *inode, struct file * file)
return err;
}
-static unsigned int snd_hwdep_poll(struct file * file, poll_table * wait)
+static __poll_t snd_hwdep_poll(struct file * file, poll_table * wait)
{
struct snd_hwdep *hw = file->private_data;
if (hw->ops.poll)
@@ -233,8 +233,6 @@ static int snd_hwdep_dsp_load(struct snd_hwdep *hw,
/* check whether the dsp was already loaded */
if (hw->dsp_loaded & (1 << info.index))
return -EBUSY;
- if (!access_ok(VERIFY_READ, info.image, info.length))
- return -EFAULT;
err = hw->ops.dsp_load(hw, &info);
if (err < 0)
return err;
diff --git a/sound/core/info.c b/sound/core/info.c
index bcf6a48cc70d..aa86f3f8e056 100644
--- a/sound/core/info.c
+++ b/sound/core/info.c
@@ -203,11 +203,11 @@ static ssize_t snd_info_entry_write(struct file *file, const char __user *buffer
return size;
}
-static unsigned int snd_info_entry_poll(struct file *file, poll_table *wait)
+static __poll_t snd_info_entry_poll(struct file *file, poll_table *wait)
{
struct snd_info_private_data *data = file->private_data;
struct snd_info_entry *entry = data->entry;
- unsigned int mask = 0;
+ __poll_t mask = 0;
if (entry->c.ops->poll)
return entry->c.ops->poll(entry,
diff --git a/sound/core/init.c b/sound/core/init.c
index 168ae03d3a1c..8753440c3a6e 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -344,7 +344,7 @@ static int snd_disconnect_release(struct inode *inode, struct file *file)
panic("%s(%p, %p) failed!", __func__, inode, file);
}
-static unsigned int snd_disconnect_poll(struct file * file, poll_table * wait)
+static __poll_t snd_disconnect_poll(struct file * file, poll_table * wait)
{
return POLLERR | POLLNVAL;
}
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index c2db7e905f7d..3ebba9c7f86e 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -186,7 +186,7 @@ static int _snd_pcm_hw_param_mask(struct snd_pcm_hw_params *params,
{
int changed;
changed = snd_mask_refine(hw_param_mask(params, var), val);
- if (changed) {
+ if (changed > 0) {
params->cmask |= 1 << var;
params->rmask |= 1 << var;
}
@@ -233,7 +233,7 @@ static int _snd_pcm_hw_param_min(struct snd_pcm_hw_params *params,
val, open);
else
return -EINVAL;
- if (changed) {
+ if (changed > 0) {
params->cmask |= 1 << var;
params->rmask |= 1 << var;
}
@@ -294,7 +294,7 @@ static int _snd_pcm_hw_param_max(struct snd_pcm_hw_params *params,
val, open);
else
return -EINVAL;
- if (changed) {
+ if (changed > 0) {
params->cmask |= 1 << var;
params->rmask |= 1 << var;
}
@@ -499,7 +499,7 @@ static int _snd_pcm_hw_param_set(struct snd_pcm_hw_params *params,
}
} else
return -EINVAL;
- if (changed) {
+ if (changed > 0) {
params->cmask |= 1 << var;
params->rmask |= 1 << var;
}
@@ -539,7 +539,7 @@ static int _snd_pcm_hw_param_setinteger(struct snd_pcm_hw_params *params,
{
int changed;
changed = snd_interval_setinteger(hw_param_interval(params, var));
- if (changed) {
+ if (changed > 0) {
params->cmask |= 1 << var;
params->rmask |= 1 << var;
}
@@ -842,7 +842,7 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
if (!(mutex_trylock(&runtime->oss.params_lock)))
return -EAGAIN;
} else if (mutex_lock_interruptible(&runtime->oss.params_lock))
- return -EINTR;
+ return -ERESTARTSYS;
sw_params = kzalloc(sizeof(*sw_params), GFP_KERNEL);
params = kmalloc(sizeof(*params), GFP_KERNEL);
sparams = kmalloc(sizeof(*sparams), GFP_KERNEL);
@@ -2686,10 +2686,10 @@ static int snd_pcm_oss_capture_ready(struct snd_pcm_substream *substream)
runtime->oss.period_frames;
}
-static unsigned int snd_pcm_oss_poll(struct file *file, poll_table * wait)
+static __poll_t snd_pcm_oss_poll(struct file *file, poll_table * wait)
{
struct snd_pcm_oss_file *pcm_oss_file;
- unsigned int mask;
+ __poll_t mask;
struct snd_pcm_substream *psubstream = NULL, *csubstream = NULL;
pcm_oss_file = file->private_data;
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index faa67861cbc1..a83152e7d387 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1602,7 +1602,7 @@ static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params,
changed = snd_interval_refine_first(hw_param_interval(params, var));
else
return -EINVAL;
- if (changed) {
+ if (changed > 0) {
params->cmask |= 1 << var;
params->rmask |= 1 << var;
}
@@ -1648,7 +1648,7 @@ static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params,
changed = snd_interval_refine_last(hw_param_interval(params, var));
else
return -EINVAL;
- if (changed) {
+ if (changed > 0) {
params->cmask |= 1 << var;
params->rmask |= 1 << var;
}
diff --git a/sound/core/pcm_misc.c b/sound/core/pcm_misc.c
index 9be81025372f..c4eb561d2008 100644
--- a/sound/core/pcm_misc.c
+++ b/sound/core/pcm_misc.c
@@ -163,13 +163,30 @@ static struct pcm_format_data pcm_formats[(INT)SNDRV_PCM_FORMAT_LAST+1] = {
.width = 32, .phys = 32, .le = 0, .signd = 0,
.silence = { 0x69, 0x69, 0x69, 0x69 },
},
- /* FIXME: the following three formats are not defined properly yet */
+ /* FIXME: the following two formats are not defined properly yet */
[SNDRV_PCM_FORMAT_MPEG] = {
.le = -1, .signd = -1,
},
[SNDRV_PCM_FORMAT_GSM] = {
.le = -1, .signd = -1,
},
+ [SNDRV_PCM_FORMAT_S20_LE] = {
+ .width = 20, .phys = 32, .le = 1, .signd = 1,
+ .silence = {},
+ },
+ [SNDRV_PCM_FORMAT_S20_BE] = {
+ .width = 20, .phys = 32, .le = 0, .signd = 1,
+ .silence = {},
+ },
+ [SNDRV_PCM_FORMAT_U20_LE] = {
+ .width = 20, .phys = 32, .le = 1, .signd = 0,
+ .silence = { 0x00, 0x00, 0x08, 0x00 },
+ },
+ [SNDRV_PCM_FORMAT_U20_BE] = {
+ .width = 20, .phys = 32, .le = 0, .signd = 0,
+ .silence = { 0x00, 0x08, 0x00, 0x00 },
+ },
+ /* FIXME: the following format is not defined properly yet */
[SNDRV_PCM_FORMAT_SPECIAL] = {
.le = -1, .signd = -1,
},
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index f08772568c17..51104df924e1 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -3135,12 +3135,12 @@ static ssize_t snd_pcm_writev(struct kiocb *iocb, struct iov_iter *from)
return result;
}
-static unsigned int snd_pcm_playback_poll(struct file *file, poll_table * wait)
+static __poll_t snd_pcm_playback_poll(struct file *file, poll_table * wait)
{
struct snd_pcm_file *pcm_file;
struct snd_pcm_substream *substream;
struct snd_pcm_runtime *runtime;
- unsigned int mask;
+ __poll_t mask;
snd_pcm_uframes_t avail;
pcm_file = file->private_data;
@@ -3174,12 +3174,12 @@ static unsigned int snd_pcm_playback_poll(struct file *file, poll_table * wait)
return mask;
}
-static unsigned int snd_pcm_capture_poll(struct file *file, poll_table * wait)
+static __poll_t snd_pcm_capture_poll(struct file *file, poll_table * wait)
{
struct snd_pcm_file *pcm_file;
struct snd_pcm_substream *substream;
struct snd_pcm_runtime *runtime;
- unsigned int mask;
+ __poll_t mask;
snd_pcm_uframes_t avail;
pcm_file = file->private_data;
@@ -3446,7 +3446,7 @@ EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap);
int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream,
struct vm_area_struct *area)
{
- struct snd_pcm_runtime *runtime = substream->runtime;;
+ struct snd_pcm_runtime *runtime = substream->runtime;
area->vm_page_prot = pgprot_noncached(area->vm_page_prot);
return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes);
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index f055ca10bbc1..fae21311723f 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -1366,11 +1366,11 @@ static ssize_t snd_rawmidi_write(struct file *file, const char __user *buf,
return result;
}
-static unsigned int snd_rawmidi_poll(struct file *file, poll_table * wait)
+static __poll_t snd_rawmidi_poll(struct file *file, poll_table * wait)
{
struct snd_rawmidi_file *rfile;
struct snd_rawmidi_runtime *runtime;
- unsigned int mask;
+ __poll_t mask;
rfile = file->private_data;
if (rfile->input != NULL) {
diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c
index 8cdf489df80e..5f64d0d88320 100644
--- a/sound/core/seq/oss/seq_oss.c
+++ b/sound/core/seq/oss/seq_oss.c
@@ -59,7 +59,7 @@ static int odev_release(struct inode *inode, struct file *file);
static ssize_t odev_read(struct file *file, char __user *buf, size_t count, loff_t *offset);
static ssize_t odev_write(struct file *file, const char __user *buf, size_t count, loff_t *offset);
static long odev_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-static unsigned int odev_poll(struct file *file, poll_table * wait);
+static __poll_t odev_poll(struct file *file, poll_table * wait);
/*
@@ -197,7 +197,7 @@ static long odev_ioctl_compat(struct file *file, unsigned int cmd,
#define odev_ioctl_compat NULL
#endif
-static unsigned int
+static __poll_t
odev_poll(struct file *file, poll_table * wait)
{
struct seq_oss_devinfo *dp;
diff --git a/sound/core/seq/oss/seq_oss_device.h b/sound/core/seq/oss/seq_oss_device.h
index afa007c0cc2d..2d0e9eaf13aa 100644
--- a/sound/core/seq/oss/seq_oss_device.h
+++ b/sound/core/seq/oss/seq_oss_device.h
@@ -124,7 +124,7 @@ void snd_seq_oss_release(struct seq_oss_devinfo *dp);
int snd_seq_oss_ioctl(struct seq_oss_devinfo *dp, unsigned int cmd, unsigned long arg);
int snd_seq_oss_read(struct seq_oss_devinfo *dev, char __user *buf, int count);
int snd_seq_oss_write(struct seq_oss_devinfo *dp, const char __user *buf, int count, struct file *opt);
-unsigned int snd_seq_oss_poll(struct seq_oss_devinfo *dp, struct file *file, poll_table * wait);
+__poll_t snd_seq_oss_poll(struct seq_oss_devinfo *dp, struct file *file, poll_table * wait);
void snd_seq_oss_reset(struct seq_oss_devinfo *dp);
diff --git a/sound/core/seq/oss/seq_oss_rw.c b/sound/core/seq/oss/seq_oss_rw.c
index 6a7b6aceeca9..c538e78ca310 100644
--- a/sound/core/seq/oss/seq_oss_rw.c
+++ b/sound/core/seq/oss/seq_oss_rw.c
@@ -196,10 +196,10 @@ insert_queue(struct seq_oss_devinfo *dp, union evrec *rec, struct file *opt)
* select / poll
*/
-unsigned int
+__poll_t
snd_seq_oss_poll(struct seq_oss_devinfo *dp, struct file *file, poll_table * wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
/* input */
if (dp->readq && is_read_mode(dp->file_mode)) {
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index d01913404581..b611deef81f5 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1087,10 +1087,10 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
/*
* handle polling
*/
-static unsigned int snd_seq_poll(struct file *file, poll_table * wait)
+static __poll_t snd_seq_poll(struct file *file, poll_table * wait)
{
struct snd_seq_client *client = file->private_data;
- unsigned int mask = 0;
+ __poll_t mask = 0;
/* check client structures are in place */
if (snd_BUG_ON(!client))
diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
index 79e0c5604ef8..0428e9061b47 100644
--- a/sound/core/seq/seq_queue.c
+++ b/sound/core/seq/seq_queue.c
@@ -497,9 +497,7 @@ int snd_seq_queue_timer_set_tempo(int queueid, int client,
return -EPERM;
}
- result = snd_seq_timer_set_tempo(q->timer, info->tempo);
- if (result >= 0)
- result = snd_seq_timer_set_ppq(q->timer, info->ppq);
+ result = snd_seq_timer_set_tempo_ppq(q->timer, info->tempo, info->ppq);
if (result >= 0 && info->skew_base > 0)
result = snd_seq_timer_set_skew(q->timer, info->skew_value,
info->skew_base);
diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
index b80985fbc334..23167578231f 100644
--- a/sound/core/seq/seq_timer.c
+++ b/sound/core/seq/seq_timer.c
@@ -191,14 +191,15 @@ int snd_seq_timer_set_tempo(struct snd_seq_timer * tmr, int tempo)
return 0;
}
-/* set current ppq */
-int snd_seq_timer_set_ppq(struct snd_seq_timer * tmr, int ppq)
+/* set current tempo and ppq in a shot */
+int snd_seq_timer_set_tempo_ppq(struct snd_seq_timer *tmr, int tempo, int ppq)
{
+ int changed;
unsigned long flags;
if (snd_BUG_ON(!tmr))
return -EINVAL;
- if (ppq <= 0)
+ if (tempo <= 0 || ppq <= 0)
return -EINVAL;
spin_lock_irqsave(&tmr->lock, flags);
if (tmr->running && (ppq != tmr->ppq)) {
@@ -208,9 +209,11 @@ int snd_seq_timer_set_ppq(struct snd_seq_timer * tmr, int ppq)
pr_debug("ALSA: seq: cannot change ppq of a running timer\n");
return -EBUSY;
}
-
+ changed = (tempo != tmr->tempo) || (ppq != tmr->ppq);
+ tmr->tempo = tempo;
tmr->ppq = ppq;
- snd_seq_timer_set_tick_resolution(tmr);
+ if (changed)
+ snd_seq_timer_set_tick_resolution(tmr);
spin_unlock_irqrestore(&tmr->lock, flags);
return 0;
}
diff --git a/sound/core/seq/seq_timer.h b/sound/core/seq/seq_timer.h
index 9506b661fe5b..62f390671096 100644
--- a/sound/core/seq/seq_timer.h
+++ b/sound/core/seq/seq_timer.h
@@ -131,7 +131,7 @@ int snd_seq_timer_stop(struct snd_seq_timer *tmr);
int snd_seq_timer_start(struct snd_seq_timer *tmr);
int snd_seq_timer_continue(struct snd_seq_timer *tmr);
int snd_seq_timer_set_tempo(struct snd_seq_timer *tmr, int tempo);
-int snd_seq_timer_set_ppq(struct snd_seq_timer *tmr, int ppq);
+int snd_seq_timer_set_tempo_ppq(struct snd_seq_timer *tmr, int tempo, int ppq);
int snd_seq_timer_set_position_tick(struct snd_seq_timer *tmr, snd_seq_tick_time_t position);
int snd_seq_timer_set_position_time(struct snd_seq_timer *tmr, snd_seq_real_time_t position);
int snd_seq_timer_set_skew(struct snd_seq_timer *tmr, unsigned int skew, unsigned int base);
diff --git a/sound/core/timer.c b/sound/core/timer.c
index ee09dace8bb1..da05e314917f 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -2072,9 +2072,9 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
return result > 0 ? result : err;
}
-static unsigned int snd_timer_user_poll(struct file *file, poll_table * wait)
+static __poll_t snd_timer_user_poll(struct file *file, poll_table * wait)
{
- unsigned int mask;
+ __poll_t mask;
struct snd_timer_user *tu;
tu = file->private_data;
diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
index 7b2b1f766b00..8fb9a54fe8ba 100644
--- a/sound/drivers/dummy.c
+++ b/sound/drivers/dummy.c
@@ -375,17 +375,9 @@ struct dummy_hrtimer_pcm {
ktime_t period_time;
atomic_t running;
struct hrtimer timer;
- struct tasklet_struct tasklet;
struct snd_pcm_substream *substream;
};
-static void dummy_hrtimer_pcm_elapsed(unsigned long priv)
-{
- struct dummy_hrtimer_pcm *dpcm = (struct dummy_hrtimer_pcm *)priv;
- if (atomic_read(&dpcm->running))
- snd_pcm_period_elapsed(dpcm->substream);
-}
-
static enum hrtimer_restart dummy_hrtimer_callback(struct hrtimer *timer)
{
struct dummy_hrtimer_pcm *dpcm;
@@ -393,7 +385,14 @@ static enum hrtimer_restart dummy_hrtimer_callback(struct hrtimer *timer)
dpcm = container_of(timer, struct dummy_hrtimer_pcm, timer);
if (!atomic_read(&dpcm->running))
return HRTIMER_NORESTART;
- tasklet_schedule(&dpcm->tasklet);
+ /*
+ * In cases of XRUN and draining, this calls .trigger to stop PCM
+ * substream.
+ */
+ snd_pcm_period_elapsed(dpcm->substream);
+ if (!atomic_read(&dpcm->running))
+ return HRTIMER_NORESTART;
+
hrtimer_forward_now(timer, dpcm->period_time);
return HRTIMER_RESTART;
}
@@ -403,7 +402,7 @@ static int dummy_hrtimer_start(struct snd_pcm_substream *substream)
struct dummy_hrtimer_pcm *dpcm = substream->runtime->private_data;
dpcm->base_time = hrtimer_cb_get_time(&dpcm->timer);
- hrtimer_start(&dpcm->timer, dpcm->period_time, HRTIMER_MODE_REL);
+ hrtimer_start(&dpcm->timer, dpcm->period_time, HRTIMER_MODE_REL_SOFT);
atomic_set(&dpcm->running, 1);
return 0;
}
@@ -413,14 +412,14 @@ static int dummy_hrtimer_stop(struct snd_pcm_substream *substream)
struct dummy_hrtimer_pcm *dpcm = substream->runtime->private_data;
atomic_set(&dpcm->running, 0);
- hrtimer_cancel(&dpcm->timer);
+ if (!hrtimer_callback_running(&dpcm->timer))
+ hrtimer_cancel(&dpcm->timer);
return 0;
}
static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm)
{
hrtimer_cancel(&dpcm->timer);
- tasklet_kill(&dpcm->tasklet);
}
static snd_pcm_uframes_t
@@ -465,12 +464,10 @@ static int dummy_hrtimer_create(struct snd_pcm_substream *substream)
if (!dpcm)
return -ENOMEM;
substream->runtime->private_data = dpcm;
- hrtimer_init(&dpcm->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_init(&dpcm->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
dpcm->timer.function = dummy_hrtimer_callback;
dpcm->substream = substream;
atomic_set(&dpcm->running, 0);
- tasklet_init(&dpcm->tasklet, dummy_hrtimer_pcm_elapsed,
- (unsigned long)dpcm);
return 0;
}
@@ -830,7 +827,7 @@ static int snd_dummy_capsrc_put(struct snd_kcontrol *kcontrol, struct snd_ctl_el
static int snd_dummy_iobox_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *info)
{
- const char *const names[] = { "None", "CD Player" };
+ static const char *const names[] = { "None", "CD Player" };
return snd_ctl_enum_info(info, 1, 2, names);
}
diff --git a/sound/firewire/bebob/bebob_hwdep.c b/sound/firewire/bebob/bebob_hwdep.c
index 2b367c21b80c..83e791810c52 100644
--- a/sound/firewire/bebob/bebob_hwdep.c
+++ b/sound/firewire/bebob/bebob_hwdep.c
@@ -53,11 +53,11 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
return count;
}
-static unsigned int
+static __poll_t
hwdep_poll(struct snd_hwdep *hwdep, struct file *file, poll_table *wait)
{
struct snd_bebob *bebob = hwdep->private_data;
- unsigned int events;
+ __poll_t events;
poll_wait(file, &bebob->hwdep_wait, wait);
diff --git a/sound/firewire/dice/dice-hwdep.c b/sound/firewire/dice/dice-hwdep.c
index a4dc02a86f12..7a8af0f91c96 100644
--- a/sound/firewire/dice/dice-hwdep.c
+++ b/sound/firewire/dice/dice-hwdep.c
@@ -52,11 +52,11 @@ static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf,
return count;
}
-static unsigned int hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
+static __poll_t hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
poll_table *wait)
{
struct snd_dice *dice = hwdep->private_data;
- unsigned int events;
+ __poll_t events;
poll_wait(file, &dice->hwdep_wait, wait);
diff --git a/sound/firewire/digi00x/digi00x-hwdep.c b/sound/firewire/digi00x/digi00x-hwdep.c
index 463c6b8e864d..a084c2a834db 100644
--- a/sound/firewire/digi00x/digi00x-hwdep.c
+++ b/sound/firewire/digi00x/digi00x-hwdep.c
@@ -60,11 +60,11 @@ static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
return count;
}
-static unsigned int hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
+static __poll_t hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
poll_table *wait)
{
struct snd_dg00x *dg00x = hwdep->private_data;
- unsigned int events;
+ __poll_t events;
poll_wait(file, &dg00x->hwdep_wait, wait);
diff --git a/sound/firewire/fireface/ff-hwdep.c b/sound/firewire/fireface/ff-hwdep.c
index 3ee04b054585..68e273fa5d23 100644
--- a/sound/firewire/fireface/ff-hwdep.c
+++ b/sound/firewire/fireface/ff-hwdep.c
@@ -52,11 +52,11 @@ static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
return count;
}
-static unsigned int hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
+static __poll_t hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
poll_table *wait)
{
struct snd_ff *ff = hwdep->private_data;
- unsigned int events;
+ __poll_t events;
poll_wait(file, &ff->hwdep_wait, wait);
diff --git a/sound/firewire/fireworks/fireworks_hwdep.c b/sound/firewire/fireworks/fireworks_hwdep.c
index a3a3a16f5e08..e0eff9328ee1 100644
--- a/sound/firewire/fireworks/fireworks_hwdep.c
+++ b/sound/firewire/fireworks/fireworks_hwdep.c
@@ -184,11 +184,11 @@ end:
return count;
}
-static unsigned int
+static __poll_t
hwdep_poll(struct snd_hwdep *hwdep, struct file *file, poll_table *wait)
{
struct snd_efw *efw = hwdep->private_data;
- unsigned int events;
+ __poll_t events;
poll_wait(file, &efw->hwdep_wait, wait);
diff --git a/sound/firewire/motu/motu-hwdep.c b/sound/firewire/motu/motu-hwdep.c
index b87ccb69d597..7b6a086866e7 100644
--- a/sound/firewire/motu/motu-hwdep.c
+++ b/sound/firewire/motu/motu-hwdep.c
@@ -59,11 +59,11 @@ static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
return count;
}
-static unsigned int hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
+static __poll_t hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
poll_table *wait)
{
struct snd_motu *motu = hwdep->private_data;
- unsigned int events;
+ __poll_t events;
poll_wait(file, &motu->hwdep_wait, wait);
diff --git a/sound/firewire/oxfw/oxfw-hwdep.c b/sound/firewire/oxfw/oxfw-hwdep.c
index ff2687ad0460..6c1828aff672 100644
--- a/sound/firewire/oxfw/oxfw-hwdep.c
+++ b/sound/firewire/oxfw/oxfw-hwdep.c
@@ -52,11 +52,11 @@ static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
return count;
}
-static unsigned int hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
+static __poll_t hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
poll_table *wait)
{
struct snd_oxfw *oxfw = hwdep->private_data;
- unsigned int events;
+ __poll_t events;
poll_wait(file, &oxfw->hwdep_wait, wait);
diff --git a/sound/firewire/tascam/tascam-hwdep.c b/sound/firewire/tascam/tascam-hwdep.c
index 8c4437d0051d..37b21647b471 100644
--- a/sound/firewire/tascam/tascam-hwdep.c
+++ b/sound/firewire/tascam/tascam-hwdep.c
@@ -50,11 +50,11 @@ static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
return count;
}
-static unsigned int hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
+static __poll_t hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
poll_table *wait)
{
struct snd_tscm *tscm = hwdep->private_data;
- unsigned int events;
+ __poll_t events;
poll_wait(file, &tscm->hwdep_wait, wait);
diff --git a/sound/hda/ext/hdac_ext_bus.c b/sound/hda/ext/hdac_ext_bus.c
index 31b510c5ca0b..0daf31383084 100644
--- a/sound/hda/ext/hdac_ext_bus.c
+++ b/sound/hda/ext/hdac_ext_bus.c
@@ -146,7 +146,7 @@ int snd_hdac_ext_bus_device_init(struct hdac_ext_bus *ebus, int addr)
edev = kzalloc(sizeof(*edev), GFP_KERNEL);
if (!edev)
return -ENOMEM;
- hdev = &edev->hdac;
+ hdev = &edev->hdev;
edev->ebus = ebus;
snprintf(name, sizeof(name), "ehdaudio%dD%d", ebus->idx, addr);
diff --git a/sound/isa/gus/gus_dma.c b/sound/isa/gus/gus_dma.c
index 36c27c832360..7f95f452f106 100644
--- a/sound/isa/gus/gus_dma.c
+++ b/sound/isa/gus/gus_dma.c
@@ -201,10 +201,9 @@ int snd_gf1_dma_transfer_block(struct snd_gus_card * gus,
struct snd_gf1_dma_block *block;
block = kmalloc(sizeof(*block), atomic ? GFP_ATOMIC : GFP_KERNEL);
- if (block == NULL) {
- snd_printk(KERN_ERR "gf1: DMA transfer failure; not enough memory\n");
+ if (!block)
return -ENOMEM;
- }
+
*block = *__block;
block->next = NULL;
diff --git a/sound/mips/hal2.c b/sound/mips/hal2.c
index 37d378a26a50..c8904e732aaa 100644
--- a/sound/mips/hal2.c
+++ b/sound/mips/hal2.c
@@ -814,7 +814,7 @@ static int hal2_create(struct snd_card *card, struct snd_hal2 **rchip)
struct hpc3_regs *hpc3 = hpc3c0;
int err;
- hal2 = kzalloc(sizeof(struct snd_hal2), GFP_KERNEL);
+ hal2 = kzalloc(sizeof(*hal2), GFP_KERNEL);
if (!hal2)
return -ENOMEM;
diff --git a/sound/mips/sgio2audio.c b/sound/mips/sgio2audio.c
index 71c942162c25..9fb68b35de5a 100644
--- a/sound/mips/sgio2audio.c
+++ b/sound/mips/sgio2audio.c
@@ -840,7 +840,7 @@ static int snd_sgio2audio_create(struct snd_card *card,
if (!(readq(&mace->perif.audio.control) & AUDIO_CONTROL_CODEC_PRESENT))
return -ENOENT;
- chip = kzalloc(sizeof(struct snd_sgio2audio), GFP_KERNEL);
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
diff --git a/sound/oss/dmasound/dmasound_core.c b/sound/oss/dmasound/dmasound_core.c
index fb3bbceb1fef..6b57f8aac1b7 100644
--- a/sound/oss/dmasound/dmasound_core.c
+++ b/sound/oss/dmasound/dmasound_core.c
@@ -670,9 +670,9 @@ static ssize_t sq_write(struct file *file, const char __user *src, size_t uLeft,
return uUsed < 0? uUsed: uWritten;
}
-static unsigned int sq_poll(struct file *file, struct poll_table_struct *wait)
+static __poll_t sq_poll(struct file *file, struct poll_table_struct *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
int retVal;
if (write_sq.locked == 0) {
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index 7f3b5ed81995..f7a492c382d9 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -88,7 +88,6 @@ config SND_HDA_PATCH_LOADER
config SND_HDA_CODEC_REALTEK
tristate "Build Realtek HD-audio codec support"
select SND_HDA_GENERIC
- select INPUT
help
Say Y or M here to include Realtek HD-audio codec support in
snd-hda-intel driver, such as ALC880.
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 9aafc6c86132..23475888192b 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3154,11 +3154,13 @@ static void alc256_shutup(struct hda_codec *codec)
if (hp_pin_sense)
msleep(85);
+ /* 3k pull low control for Headset jack. */
+ /* NOTE: call this before clearing the pin, otherwise codec stalls */
+ alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
+
snd_hda_codec_write(codec, hp_pin, 0,
AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
- alc_update_coef_idx(codec, 0x46, 0, 3 << 12); /* 3k pull low control for Headset jack. */
-
if (hp_pin_sense)
msleep(100);
@@ -3166,6 +3168,93 @@ static void alc256_shutup(struct hda_codec *codec)
snd_hda_shutup_pins(codec);
}
+static void alc225_init(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+ hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ bool hp1_pin_sense, hp2_pin_sense;
+
+ if (!hp_pin)
+ return;
+
+ msleep(30);
+
+ hp1_pin_sense = snd_hda_jack_detect(codec, hp_pin);
+ hp2_pin_sense = snd_hda_jack_detect(codec, 0x16);
+
+ if (hp1_pin_sense || hp2_pin_sense)
+ msleep(2);
+
+ alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */
+
+ if (hp1_pin_sense)
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+ if (hp2_pin_sense)
+ snd_hda_codec_write(codec, 0x16, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+
+ if (hp1_pin_sense || hp2_pin_sense)
+ msleep(85);
+
+ if (hp1_pin_sense)
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+ if (hp2_pin_sense)
+ snd_hda_codec_write(codec, 0x16, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+
+ if (hp1_pin_sense || hp2_pin_sense)
+ msleep(100);
+
+ alc_update_coef_idx(codec, 0x4a, 3 << 10, 0);
+ alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */
+}
+
+static void alc225_shutup(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+ hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ bool hp1_pin_sense, hp2_pin_sense;
+
+ if (!hp_pin) {
+ alc269_shutup(codec);
+ return;
+ }
+
+ /* 3k pull low control for Headset jack. */
+ alc_update_coef_idx(codec, 0x4a, 0, 3 << 10);
+
+ hp1_pin_sense = snd_hda_jack_detect(codec, hp_pin);
+ hp2_pin_sense = snd_hda_jack_detect(codec, 0x16);
+
+ if (hp1_pin_sense || hp2_pin_sense)
+ msleep(2);
+
+ if (hp1_pin_sense)
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+ if (hp2_pin_sense)
+ snd_hda_codec_write(codec, 0x16, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+
+ if (hp1_pin_sense || hp2_pin_sense)
+ msleep(85);
+
+ if (hp1_pin_sense)
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+ if (hp2_pin_sense)
+ snd_hda_codec_write(codec, 0x16, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+
+ if (hp1_pin_sense || hp2_pin_sense)
+ msleep(100);
+
+ alc_auto_setup_eapd(codec, false);
+ snd_hda_shutup_pins(codec);
+}
+
static void alc_default_init(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
@@ -3723,6 +3812,7 @@ static void alc280_fixup_hp_gpio4(struct hda_codec *codec,
}
}
+#if IS_REACHABLE(INPUT)
static void gpio2_mic_hotkey_event(struct hda_codec *codec,
struct hda_jack_callback *event)
{
@@ -3855,6 +3945,10 @@ static void alc233_fixup_lenovo_line2_mic_hotkey(struct hda_codec *codec,
spec->kb_dev = NULL;
}
}
+#else /* INPUT */
+#define alc280_fixup_hp_gpio2_mic_hotkey NULL
+#define alc233_fixup_lenovo_line2_mic_hotkey NULL
+#endif /* INPUT */
static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
@@ -3994,8 +4088,11 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
case 0x10ec0668:
alc_process_coef_fw(codec, coef0668);
break;
+ case 0x10ec0215:
case 0x10ec0225:
+ case 0x10ec0285:
case 0x10ec0295:
+ case 0x10ec0289:
case 0x10ec0299:
alc_process_coef_fw(codec, coef0225);
break;
@@ -4117,8 +4214,11 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
alc_process_coef_fw(codec, coef0688);
snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
break;
+ case 0x10ec0215:
case 0x10ec0225:
+ case 0x10ec0285:
case 0x10ec0295:
+ case 0x10ec0289:
case 0x10ec0299:
alc_process_coef_fw(codec, alc225_pre_hsmode);
alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x31<<10);
@@ -4189,8 +4289,11 @@ static void alc_headset_mode_default(struct hda_codec *codec)
};
switch (codec->core.vendor_id) {
+ case 0x10ec0215:
case 0x10ec0225:
+ case 0x10ec0285:
case 0x10ec0295:
+ case 0x10ec0289:
case 0x10ec0299:
alc_process_coef_fw(codec, alc225_pre_hsmode);
alc_process_coef_fw(codec, coef0225);
@@ -4332,8 +4435,11 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
case 0x10ec0668:
alc_process_coef_fw(codec, coef0688);
break;
+ case 0x10ec0215:
case 0x10ec0225:
+ case 0x10ec0285:
case 0x10ec0295:
+ case 0x10ec0289:
case 0x10ec0299:
val = alc_read_coef_idx(codec, 0x45);
if (val & (1 << 9))
@@ -4436,8 +4542,11 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
case 0x10ec0668:
alc_process_coef_fw(codec, coef0688);
break;
+ case 0x10ec0215:
case 0x10ec0225:
+ case 0x10ec0285:
case 0x10ec0295:
+ case 0x10ec0289:
case 0x10ec0299:
alc_process_coef_fw(codec, coef0225);
break;
@@ -4566,9 +4675,18 @@ static void alc_determine_headset_type(struct hda_codec *codec)
val = alc_read_coef_idx(codec, 0xbe);
is_ctia = (val & 0x1c02) == 0x1c02;
break;
+ case 0x10ec0215:
case 0x10ec0225:
+ case 0x10ec0285:
case 0x10ec0295:
+ case 0x10ec0289:
case 0x10ec0299:
+ snd_hda_codec_write(codec, 0x21, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+ msleep(80);
+ snd_hda_codec_write(codec, 0x21, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+
alc_process_coef_fw(codec, alc225_pre_hsmode);
alc_update_coef_idx(codec, 0x67, 0xf000, 0x1000);
val = alc_read_coef_idx(codec, 0x45);
@@ -4588,6 +4706,12 @@ static void alc_determine_headset_type(struct hda_codec *codec)
alc_update_coef_idx(codec, 0x4a, 7<<6, 7<<6);
alc_update_coef_idx(codec, 0x4a, 3<<4, 3<<4);
alc_update_coef_idx(codec, 0x67, 0xf000, 0x3000);
+
+ snd_hda_codec_write(codec, 0x21, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+ msleep(80);
+ snd_hda_codec_write(codec, 0x21, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
break;
case 0x10ec0867:
is_ctia = true;
@@ -6920,16 +7044,17 @@ static int patch_alc269(struct hda_codec *codec)
case 0x10ec0285:
case 0x10ec0289:
spec->codec_variant = ALC269_TYPE_ALC215;
+ spec->shutup = alc225_shutup;
+ spec->init_hook = alc225_init;
spec->gen.mixer_nid = 0;
break;
case 0x10ec0225:
case 0x10ec0295:
- spec->codec_variant = ALC269_TYPE_ALC225;
- spec->gen.mixer_nid = 0; /* no loopback on ALC225 ALC295 */
- break;
case 0x10ec0299:
spec->codec_variant = ALC269_TYPE_ALC225;
- spec->gen.mixer_nid = 0; /* no loopback on ALC299 */
+ spec->shutup = alc225_shutup;
+ spec->init_hook = alc225_init;
+ spec->gen.mixer_nid = 0; /* no loopback on ALC225, ALC295 and ALC299 */
break;
case 0x10ec0234:
case 0x10ec0274:
diff --git a/sound/pci/ice1712/prodigy_hifi.c b/sound/pci/ice1712/prodigy_hifi.c
index 2697402b5195..8dabd4d0211d 100644
--- a/sound/pci/ice1712/prodigy_hifi.c
+++ b/sound/pci/ice1712/prodigy_hifi.c
@@ -965,13 +965,32 @@ static int prodigy_hd2_add_controls(struct snd_ice1712 *ice)
return 0;
}
+static void wm8766_init(struct snd_ice1712 *ice)
+{
+ static unsigned short wm8766_inits[] = {
+ WM8766_RESET, 0x0000,
+ WM8766_DAC_CTRL, 0x0120,
+ WM8766_INT_CTRL, 0x0022, /* I2S Normal Mode, 24 bit */
+ WM8766_DAC_CTRL2, 0x0001,
+ WM8766_DAC_CTRL3, 0x0080,
+ WM8766_LDA1, 0x0100,
+ WM8766_LDA2, 0x0100,
+ WM8766_LDA3, 0x0100,
+ WM8766_RDA1, 0x0100,
+ WM8766_RDA2, 0x0100,
+ WM8766_RDA3, 0x0100,
+ WM8766_MUTE1, 0x0000,
+ WM8766_MUTE2, 0x0000,
+ };
+ unsigned int i;
-/*
- * initialize the chip
- */
-static int prodigy_hifi_init(struct snd_ice1712 *ice)
+ for (i = 0; i < ARRAY_SIZE(wm8766_inits); i += 2)
+ wm8766_spi_write(ice, wm8766_inits[i], wm8766_inits[i + 1]);
+}
+
+static void wm8776_init(struct snd_ice1712 *ice)
{
- static unsigned short wm_inits[] = {
+ static unsigned short wm8776_inits[] = {
/* These come first to reduce init pop noise */
WM_ADC_MUX, 0x0003, /* ADC mute */
/* 0x00c0 replaced by 0x0003 */
@@ -982,7 +1001,76 @@ static int prodigy_hifi_init(struct snd_ice1712 *ice)
WM_POWERDOWN, 0x0008, /* All power-up except HP */
WM_RESET, 0x0000, /* reset */
};
- static unsigned short wm_inits2[] = {
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(wm8776_inits); i += 2)
+ wm_put(ice, wm8776_inits[i], wm8776_inits[i + 1]);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int prodigy_hifi_resume(struct snd_ice1712 *ice)
+{
+ static unsigned short wm8776_reinit_registers[] = {
+ WM_MASTER_CTRL,
+ WM_DAC_INT,
+ WM_ADC_INT,
+ WM_OUT_MUX,
+ WM_HP_ATTEN_L,
+ WM_HP_ATTEN_R,
+ WM_PHASE_SWAP,
+ WM_DAC_CTRL2,
+ WM_ADC_ATTEN_L,
+ WM_ADC_ATTEN_R,
+ WM_ALC_CTRL1,
+ WM_ALC_CTRL2,
+ WM_ALC_CTRL3,
+ WM_NOISE_GATE,
+ WM_ADC_MUX,
+ /* no DAC attenuation here */
+ };
+ struct prodigy_hifi_spec *spec = ice->spec;
+ int i, ch;
+
+ mutex_lock(&ice->gpio_mutex);
+
+ /* reinitialize WM8776 and re-apply old register values */
+ wm8776_init(ice);
+ schedule_timeout_uninterruptible(1);
+ for (i = 0; i < ARRAY_SIZE(wm8776_reinit_registers); i++)
+ wm_put(ice, wm8776_reinit_registers[i],
+ wm_get(ice, wm8776_reinit_registers[i]));
+
+ /* reinitialize WM8766 and re-apply volumes for all DACs */
+ wm8766_init(ice);
+ for (ch = 0; ch < 2; ch++) {
+ wm_set_vol(ice, WM_DAC_ATTEN_L + ch,
+ spec->vol[2 + ch], spec->master[ch]);
+
+ wm8766_set_vol(ice, WM8766_LDA1 + ch,
+ spec->vol[0 + ch], spec->master[ch]);
+
+ wm8766_set_vol(ice, WM8766_LDA2 + ch,
+ spec->vol[4 + ch], spec->master[ch]);
+
+ wm8766_set_vol(ice, WM8766_LDA3 + ch,
+ spec->vol[6 + ch], spec->master[ch]);
+ }
+
+ /* unmute WM8776 DAC */
+ wm_put(ice, WM_DAC_MUTE, 0x00);
+ wm_put(ice, WM_DAC_CTRL1, 0x90);
+
+ mutex_unlock(&ice->gpio_mutex);
+ return 0;
+}
+#endif
+
+/*
+ * initialize the chip
+ */
+static int prodigy_hifi_init(struct snd_ice1712 *ice)
+{
+ static unsigned short wm8776_defaults[] = {
WM_MASTER_CTRL, 0x0022, /* 256fs, slave mode */
WM_DAC_INT, 0x0022, /* I2S, normal polarity, 24bit */
WM_ADC_INT, 0x0022, /* I2S, normal polarity, 24bit */
@@ -1010,22 +1098,6 @@ static int prodigy_hifi_init(struct snd_ice1712 *ice)
WM_DAC_MUTE, 0x0000, /* DAC unmute */
WM_ADC_MUX, 0x0003, /* ADC unmute, both CD/Line On */
};
- static unsigned short wm8766_inits[] = {
- WM8766_RESET, 0x0000,
- WM8766_DAC_CTRL, 0x0120,
- WM8766_INT_CTRL, 0x0022, /* I2S Normal Mode, 24 bit */
- WM8766_DAC_CTRL2, 0x0001,
- WM8766_DAC_CTRL3, 0x0080,
- WM8766_LDA1, 0x0100,
- WM8766_LDA2, 0x0100,
- WM8766_LDA3, 0x0100,
- WM8766_RDA1, 0x0100,
- WM8766_RDA2, 0x0100,
- WM8766_RDA3, 0x0100,
- WM8766_MUTE1, 0x0000,
- WM8766_MUTE2, 0x0000,
- };
-
struct prodigy_hifi_spec *spec;
unsigned int i;
@@ -1052,16 +1124,17 @@ static int prodigy_hifi_init(struct snd_ice1712 *ice)
ice->spec = spec;
/* initialize WM8776 codec */
- for (i = 0; i < ARRAY_SIZE(wm_inits); i += 2)
- wm_put(ice, wm_inits[i], wm_inits[i+1]);
+ wm8776_init(ice);
schedule_timeout_uninterruptible(1);
- for (i = 0; i < ARRAY_SIZE(wm_inits2); i += 2)
- wm_put(ice, wm_inits2[i], wm_inits2[i+1]);
+ for (i = 0; i < ARRAY_SIZE(wm8776_defaults); i += 2)
+ wm_put(ice, wm8776_defaults[i], wm8776_defaults[i + 1]);
- /* initialize WM8766 codec */
- for (i = 0; i < ARRAY_SIZE(wm8766_inits); i += 2)
- wm8766_spi_write(ice, wm8766_inits[i], wm8766_inits[i+1]);
+ wm8766_init(ice);
+#ifdef CONFIG_PM_SLEEP
+ ice->pm_resume = &prodigy_hifi_resume;
+ ice->pm_suspend_enabled = 1;
+#endif
return 0;
}
diff --git a/sound/pci/korg1212/korg1212.c b/sound/pci/korg1212/korg1212.c
index c7b007164c99..4206ba44d8bb 100644
--- a/sound/pci/korg1212/korg1212.c
+++ b/sound/pci/korg1212/korg1212.c
@@ -2348,7 +2348,6 @@ static int snd_korg1212_create(struct snd_card *card, struct pci_dev *pci,
err = request_firmware(&dsp_code, "korg/k1212.dsp", &pci->dev);
if (err < 0) {
- release_firmware(dsp_code);
snd_printk(KERN_ERR "firmware not available\n");
snd_korg1212_free(korg1212);
return err;
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index d22758165496..84c3582f3982 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -71,6 +71,7 @@ source "sound/soc/stm/Kconfig"
source "sound/soc/sunxi/Kconfig"
source "sound/soc/tegra/Kconfig"
source "sound/soc/txx9/Kconfig"
+source "sound/soc/uniphier/Kconfig"
source "sound/soc/ux500/Kconfig"
source "sound/soc/xtensa/Kconfig"
source "sound/soc/zte/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index 5327f4d6c668..74cd1858d38b 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -55,6 +55,7 @@ obj-$(CONFIG_SND_SOC) += stm/
obj-$(CONFIG_SND_SOC) += sunxi/
obj-$(CONFIG_SND_SOC) += tegra/
obj-$(CONFIG_SND_SOC) += txx9/
+obj-$(CONFIG_SND_SOC) += uniphier/
obj-$(CONFIG_SND_SOC) += ux500/
obj-$(CONFIG_SND_SOC) += xtensa/
obj-$(CONFIG_SND_SOC) += zte/
diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
index b5e41df6bb3a..c33a512283a4 100644
--- a/sound/soc/amd/acp-pcm-dma.c
+++ b/sound/soc/amd/acp-pcm-dma.c
@@ -850,6 +850,9 @@ static snd_pcm_uframes_t acp_dma_pointer(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
struct audio_substream_data *rtd = runtime->private_data;
+ if (!rtd)
+ return -EINVAL;
+
buffersize = frames_to_bytes(runtime, runtime->buffer_size);
bytescount = acp_get_byte_count(rtd->acp_mmio, substream->stream);
@@ -875,6 +878,8 @@ static int acp_dma_prepare(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
struct audio_substream_data *rtd = runtime->private_data;
+ if (!rtd)
+ return -EINVAL;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
config_acp_dma_channel(rtd->acp_mmio, SYSRAM_TO_ACP_CH_NUM,
PLAYBACK_START_DMA_DESCR_CH12,
@@ -1091,7 +1096,11 @@ static int acp_audio_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, audio_drv_data);
/* Initialize the ACP */
- acp_init(audio_drv_data->acp_mmio, audio_drv_data->asic_type);
+ status = acp_init(audio_drv_data->acp_mmio, audio_drv_data->asic_type);
+ if (status) {
+ dev_err(&pdev->dev, "ACP Init failed status:%d\n", status);
+ return status;
+ }
status = snd_soc_register_platform(&pdev->dev, &acp_asoc_platform);
if (status != 0) {
@@ -1108,9 +1117,12 @@ static int acp_audio_probe(struct platform_device *pdev)
static int acp_audio_remove(struct platform_device *pdev)
{
+ int status;
struct audio_drv_data *adata = dev_get_drvdata(&pdev->dev);
- acp_deinit(adata->acp_mmio);
+ status = acp_deinit(adata->acp_mmio);
+ if (status)
+ dev_err(&pdev->dev, "ACP Deinit failed status:%d\n", status);
snd_soc_unregister_platform(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -1120,9 +1132,14 @@ static int acp_audio_remove(struct platform_device *pdev)
static int acp_pcm_resume(struct device *dev)
{
u16 bank;
+ int status;
struct audio_drv_data *adata = dev_get_drvdata(dev);
- acp_init(adata->acp_mmio, adata->asic_type);
+ status = acp_init(adata->acp_mmio, adata->asic_type);
+ if (status) {
+ dev_err(dev, "ACP Init failed status:%d\n", status);
+ return status;
+ }
if (adata->play_stream && adata->play_stream->runtime) {
/* For Stoney, Memory gating is disabled,i.e SRAM Banks
@@ -1154,18 +1171,26 @@ static int acp_pcm_resume(struct device *dev)
static int acp_pcm_runtime_suspend(struct device *dev)
{
+ int status;
struct audio_drv_data *adata = dev_get_drvdata(dev);
- acp_deinit(adata->acp_mmio);
+ status = acp_deinit(adata->acp_mmio);
+ if (status)
+ dev_err(dev, "ACP Deinit failed status:%d\n", status);
acp_reg_write(0, adata->acp_mmio, mmACP_EXTERNAL_INTR_ENB);
return 0;
}
static int acp_pcm_runtime_resume(struct device *dev)
{
+ int status;
struct audio_drv_data *adata = dev_get_drvdata(dev);
- acp_init(adata->acp_mmio, adata->asic_type);
+ status = acp_init(adata->acp_mmio, adata->asic_type);
+ if (status) {
+ dev_err(dev, "ACP Init failed status:%d\n", status);
+ return status;
+ }
acp_reg_write(1, adata->acp_mmio, mmACP_EXTERNAL_INTR_ENB);
return 0;
}
diff --git a/sound/soc/atmel/atmel-classd.c b/sound/soc/atmel/atmel-classd.c
index 8445edd06737..ebabed69f0e6 100644
--- a/sound/soc/atmel/atmel-classd.c
+++ b/sound/soc/atmel/atmel-classd.c
@@ -308,15 +308,9 @@ static int atmel_classd_codec_resume(struct snd_soc_codec *codec)
return regcache_sync(dd->regmap);
}
-static struct regmap *atmel_classd_codec_get_remap(struct device *dev)
-{
- return dev_get_regmap(dev, NULL);
-}
-
static struct snd_soc_codec_driver soc_codec_dev_classd = {
.probe = atmel_classd_codec_probe,
.resume = atmel_classd_codec_resume,
- .get_regmap = atmel_classd_codec_get_remap,
.component_driver = {
.controls = atmel_classd_snd_controls,
.num_controls = ARRAY_SIZE(atmel_classd_snd_controls),
diff --git a/sound/soc/au1x/ac97c.c b/sound/soc/au1x/ac97c.c
index 29a97d52e8ad..66d6c52e7761 100644
--- a/sound/soc/au1x/ac97c.c
+++ b/sound/soc/au1x/ac97c.c
@@ -91,8 +91,8 @@ static unsigned short au1xac97c_ac97_read(struct snd_ac97 *ac97,
do {
mutex_lock(&ctx->lock);
- tmo = 5;
- while ((RD(ctx, AC97_STATUS) & STAT_CP) && tmo--)
+ tmo = 6;
+ while ((RD(ctx, AC97_STATUS) & STAT_CP) && --tmo)
udelay(21); /* wait an ac97 frame time */
if (!tmo) {
pr_debug("ac97rd timeout #1\n");
@@ -105,7 +105,7 @@ static unsigned short au1xac97c_ac97_read(struct snd_ac97 *ac97,
* poll, Forrest, poll...
*/
tmo = 0x10000;
- while ((RD(ctx, AC97_STATUS) & STAT_CP) && tmo--)
+ while ((RD(ctx, AC97_STATUS) & STAT_CP) && --tmo)
asm volatile ("nop");
data = RD(ctx, AC97_CMDRESP);
diff --git a/sound/soc/bcm/bcm2835-i2s.c b/sound/soc/bcm/bcm2835-i2s.c
index 2e449d7173fc..d5f73a8ab893 100644
--- a/sound/soc/bcm/bcm2835-i2s.c
+++ b/sound/soc/bcm/bcm2835-i2s.c
@@ -130,6 +130,7 @@ struct bcm2835_i2s_dev {
struct regmap *i2s_regmap;
struct clk *clk;
bool clk_prepared;
+ int clk_rate;
};
static void bcm2835_i2s_start_clock(struct bcm2835_i2s_dev *dev)
@@ -419,10 +420,19 @@ static int bcm2835_i2s_hw_params(struct snd_pcm_substream *substream,
}
/* Clock should only be set up here if CPU is clock master */
- if (bit_clock_master) {
- ret = clk_set_rate(dev->clk, bclk_rate);
- if (ret)
- return ret;
+ if (bit_clock_master &&
+ (!dev->clk_prepared || dev->clk_rate != bclk_rate)) {
+ if (dev->clk_prepared)
+ bcm2835_i2s_stop_clock(dev);
+
+ if (dev->clk_rate != bclk_rate) {
+ ret = clk_set_rate(dev->clk, bclk_rate);
+ if (ret)
+ return ret;
+ dev->clk_rate = bclk_rate;
+ }
+
+ bcm2835_i2s_start_clock(dev);
}
/* Setup the frame format */
@@ -618,8 +628,6 @@ static int bcm2835_i2s_prepare(struct snd_pcm_substream *substream,
struct bcm2835_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
uint32_t cs_reg;
- bcm2835_i2s_start_clock(dev);
-
/*
* Clear both FIFOs if the one that should be started
* is not empty at the moment. This should only happen
diff --git a/sound/soc/cirrus/ep93xx-ac97.c b/sound/soc/cirrus/ep93xx-ac97.c
index bbf7a9266a99..cd5a939ad608 100644
--- a/sound/soc/cirrus/ep93xx-ac97.c
+++ b/sound/soc/cirrus/ep93xx-ac97.c
@@ -365,7 +365,7 @@ static int ep93xx_ac97_probe(struct platform_device *pdev)
{
struct ep93xx_ac97_info *info;
struct resource *res;
- unsigned int irq;
+ int irq;
int ret;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
@@ -378,8 +378,8 @@ static int ep93xx_ac97_probe(struct platform_device *pdev)
return PTR_ERR(info->regs);
irq = platform_get_irq(pdev, 0);
- if (!irq)
- return -ENODEV;
+ if (irq <= 0)
+ return irq < 0 ? irq : -ENODEV;
ret = devm_request_irq(&pdev->dev, irq, ep93xx_ac97_interrupt,
IRQF_TRIGGER_HIGH, pdev->name, info);
diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c
index 848c5fe49bc7..be8ea723dff9 100644
--- a/sound/soc/codecs/88pm860x-codec.c
+++ b/sound/soc/codecs/88pm860x-codec.c
@@ -1319,6 +1319,7 @@ static int pm860x_probe(struct snd_soc_codec *codec)
int i, ret;
pm860x->codec = codec;
+ snd_soc_codec_init_regmap(codec, pm860x->regmap);
for (i = 0; i < 4; i++) {
ret = request_threaded_irq(pm860x->irq[i], NULL,
@@ -1348,18 +1349,10 @@ static int pm860x_remove(struct snd_soc_codec *codec)
return 0;
}
-static struct regmap *pm860x_get_regmap(struct device *dev)
-{
- struct pm860x_priv *pm860x = dev_get_drvdata(dev);
-
- return pm860x->regmap;
-}
-
static const struct snd_soc_codec_driver soc_codec_dev_pm860x = {
.probe = pm860x_probe,
.remove = pm860x_remove,
.set_bias_level = pm860x_set_bias_level,
- .get_regmap = pm860x_get_regmap,
.component_driver = {
.controls = pm860x_snd_controls,
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index a42ddbc93f3d..2b331f7266ab 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -95,6 +95,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_MAX98925 if I2C
select SND_SOC_MAX98926 if I2C
select SND_SOC_MAX98927 if I2C
+ select SND_SOC_MAX98373 if I2C
select SND_SOC_MAX9850 if I2C
select SND_SOC_MAX9860 if I2C
select SND_SOC_MAX9768 if I2C
@@ -109,6 +110,8 @@ config SND_SOC_ALL_CODECS
select SND_SOC_PCM1681 if I2C
select SND_SOC_PCM179X_I2C if I2C
select SND_SOC_PCM179X_SPI if SPI_MASTER
+ select SND_SOC_PCM186X_I2C if I2C
+ select SND_SOC_PCM186X_SPI if SPI_MASTER
select SND_SOC_PCM3008
select SND_SOC_PCM3168A_I2C if I2C
select SND_SOC_PCM3168A_SPI if SPI_MASTER
@@ -133,7 +136,6 @@ config SND_SOC_ALL_CODECS
select SND_SOC_SGTL5000 if I2C
select SND_SOC_SI476X if MFD_SI476X_CORE
select SND_SOC_SIRF_AUDIO_CODEC
- select SND_SOC_SN95031 if INTEL_SCU_IPC
select SND_SOC_SPDIF
select SND_SOC_SSM2518 if I2C
select SND_SOC_SSM2602_SPI if SPI_MASTER
@@ -148,6 +150,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_TAS5086 if I2C
select SND_SOC_TAS571X if I2C
select SND_SOC_TAS5720 if I2C
+ select SND_SOC_TAS6424 if I2C
select SND_SOC_TFA9879 if I2C
select SND_SOC_TLV320AIC23_I2C if I2C
select SND_SOC_TLV320AIC23_SPI if SPI_MASTER
@@ -158,6 +161,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_TLV320AIC3X if I2C
select SND_SOC_TPA6130A2 if I2C
select SND_SOC_TLV320DAC33 if I2C
+ select SND_SOC_TSCS42XX if I2C
select SND_SOC_TS3A227E if I2C
select SND_SOC_TWL4030 if TWL4030_CORE
select SND_SOC_TWL6040 if TWL6040_CORE
@@ -623,6 +627,10 @@ config SND_SOC_MAX98927
tristate "Maxim Integrated MAX98927 Speaker Amplifier"
depends on I2C
+config SND_SOC_MAX98373
+ tristate "Maxim Integrated MAX98373 Speaker Amplifier"
+ depends on I2C
+
config SND_SOC_MAX9850
tristate
@@ -661,6 +669,21 @@ config SND_SOC_PCM179X_SPI
Enable support for Texas Instruments PCM179x CODEC.
Select this if your PCM179x is connected via an SPI bus.
+config SND_SOC_PCM186X
+ tristate
+
+config SND_SOC_PCM186X_I2C
+ tristate "Texas Instruments PCM186x CODECs - I2C"
+ depends on I2C
+ select SND_SOC_PCM186X
+ select REGMAP_I2C
+
+config SND_SOC_PCM186X_SPI
+ tristate "Texas Instruments PCM186x CODECs - SPI"
+ depends on SPI_MASTER
+ select SND_SOC_PCM186X
+ select REGMAP_SPI
+
config SND_SOC_PCM3008
tristate
@@ -818,9 +841,6 @@ config SND_SOC_SIRF_AUDIO_CODEC
tristate "SiRF SoC internal audio codec"
select REGMAP_MMIO
-config SND_SOC_SN95031
- tristate
-
config SND_SOC_SPDIF
tristate "S/PDIF CODEC"
@@ -883,6 +903,13 @@ config SND_SOC_TAS5720
Enable support for Texas Instruments TAS5720L/M high-efficiency mono
Class-D audio power amplifiers.
+config SND_SOC_TAS6424
+ tristate "Texas Instruments TAS6424 Quad-Channel Audio amplifier"
+ depends on I2C
+ help
+ Enable support for Texas Instruments TAS6424 high-efficiency
+ digital input quad-channel Class-D audio power amplifiers.
+
config SND_SOC_TFA9879
tristate "NXP Semiconductors TFA9879 amplifier"
depends on I2C
@@ -913,12 +940,12 @@ config SND_SOC_TLV320AIC32X4
tristate
config SND_SOC_TLV320AIC32X4_I2C
- tristate
+ tristate "Texas Instruments TLV320AIC32x4 audio CODECs - I2C"
depends on I2C
select SND_SOC_TLV320AIC32X4
config SND_SOC_TLV320AIC32X4_SPI
- tristate
+ tristate "Texas Instruments TLV320AIC32x4 audio CODECs - SPI"
depends on SPI_MASTER
select SND_SOC_TLV320AIC32X4
@@ -933,6 +960,13 @@ config SND_SOC_TS3A227E
tristate "TI Headset/Mic detect and keypress chip"
depends on I2C
+config SND_SOC_TSCS42XX
+ tristate "Tempo Semiconductor TSCS42xx CODEC"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Add support for Tempo Semiconductor's TSCS42xx audio CODEC.
+
config SND_SOC_TWL4030
select MFD_TWL4030_AUDIO
tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 0001069ce2a7..da1571336f1e 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -90,6 +90,7 @@ snd-soc-max9867-objs := max9867.o
snd-soc-max98925-objs := max98925.o
snd-soc-max98926-objs := max98926.o
snd-soc-max98927-objs := max98927.o
+snd-soc-max98373-objs := max98373.o
snd-soc-max9850-objs := max9850.o
snd-soc-max9860-objs := max9860.o
snd-soc-mc13783-objs := mc13783.o
@@ -105,6 +106,9 @@ snd-soc-pcm1681-objs := pcm1681.o
snd-soc-pcm179x-codec-objs := pcm179x.o
snd-soc-pcm179x-i2c-objs := pcm179x-i2c.o
snd-soc-pcm179x-spi-objs := pcm179x-spi.o
+snd-soc-pcm186x-objs := pcm186x.o
+snd-soc-pcm186x-i2c-objs := pcm186x-i2c.o
+snd-soc-pcm186x-spi-objs := pcm186x-spi.o
snd-soc-pcm3008-objs := pcm3008.o
snd-soc-pcm3168a-objs := pcm3168a.o
snd-soc-pcm3168a-i2c-objs := pcm3168a-i2c.o
@@ -140,7 +144,6 @@ snd-soc-sigmadsp-i2c-objs := sigmadsp-i2c.o
snd-soc-sigmadsp-regmap-objs := sigmadsp-regmap.o
snd-soc-si476x-objs := si476x.o
snd-soc-sirf-audio-codec-objs := sirf-audio-codec.o
-snd-soc-sn95031-objs := sn95031.o
snd-soc-spdif-tx-objs := spdif_transmitter.o
snd-soc-spdif-rx-objs := spdif_receiver.o
snd-soc-ssm2518-objs := ssm2518.o
@@ -156,6 +159,7 @@ snd-soc-sti-sas-objs := sti-sas.o
snd-soc-tas5086-objs := tas5086.o
snd-soc-tas571x-objs := tas571x.o
snd-soc-tas5720-objs := tas5720.o
+snd-soc-tas6424-objs := tas6424.o
snd-soc-tfa9879-objs := tfa9879.o
snd-soc-tlv320aic23-objs := tlv320aic23.o
snd-soc-tlv320aic23-i2c-objs := tlv320aic23-i2c.o
@@ -167,6 +171,7 @@ snd-soc-tlv320aic32x4-i2c-objs := tlv320aic32x4-i2c.o
snd-soc-tlv320aic32x4-spi-objs := tlv320aic32x4-spi.o
snd-soc-tlv320aic3x-objs := tlv320aic3x.o
snd-soc-tlv320dac33-objs := tlv320dac33.o
+snd-soc-tscs42xx-objs := tscs42xx.o
snd-soc-ts3a227e-objs := ts3a227e.o
snd-soc-twl4030-objs := twl4030.o
snd-soc-twl6040-objs := twl6040.o
@@ -330,6 +335,7 @@ obj-$(CONFIG_SND_SOC_MAX9867) += snd-soc-max9867.o
obj-$(CONFIG_SND_SOC_MAX98925) += snd-soc-max98925.o
obj-$(CONFIG_SND_SOC_MAX98926) += snd-soc-max98926.o
obj-$(CONFIG_SND_SOC_MAX98927) += snd-soc-max98927.o
+obj-$(CONFIG_SND_SOC_MAX98373) += snd-soc-max98373.o
obj-$(CONFIG_SND_SOC_MAX9850) += snd-soc-max9850.o
obj-$(CONFIG_SND_SOC_MAX9860) += snd-soc-max9860.o
obj-$(CONFIG_SND_SOC_MC13783) += snd-soc-mc13783.o
@@ -345,6 +351,9 @@ obj-$(CONFIG_SND_SOC_PCM1681) += snd-soc-pcm1681.o
obj-$(CONFIG_SND_SOC_PCM179X) += snd-soc-pcm179x-codec.o
obj-$(CONFIG_SND_SOC_PCM179X_I2C) += snd-soc-pcm179x-i2c.o
obj-$(CONFIG_SND_SOC_PCM179X_SPI) += snd-soc-pcm179x-spi.o
+obj-$(CONFIG_SND_SOC_PCM186X) += snd-soc-pcm186x.o
+obj-$(CONFIG_SND_SOC_PCM186X_I2C) += snd-soc-pcm186x-i2c.o
+obj-$(CONFIG_SND_SOC_PCM186X_SPI) += snd-soc-pcm186x-spi.o
obj-$(CONFIG_SND_SOC_PCM3008) += snd-soc-pcm3008.o
obj-$(CONFIG_SND_SOC_PCM3168A) += snd-soc-pcm3168a.o
obj-$(CONFIG_SND_SOC_PCM3168A_I2C) += snd-soc-pcm3168a-i2c.o
@@ -395,6 +404,7 @@ obj-$(CONFIG_SND_SOC_TAS2552) += snd-soc-tas2552.o
obj-$(CONFIG_SND_SOC_TAS5086) += snd-soc-tas5086.o
obj-$(CONFIG_SND_SOC_TAS571X) += snd-soc-tas571x.o
obj-$(CONFIG_SND_SOC_TAS5720) += snd-soc-tas5720.o
+obj-$(CONFIG_SND_SOC_TAS6424) += snd-soc-tas6424.o
obj-$(CONFIG_SND_SOC_TFA9879) += snd-soc-tfa9879.o
obj-$(CONFIG_SND_SOC_TLV320AIC23) += snd-soc-tlv320aic23.o
obj-$(CONFIG_SND_SOC_TLV320AIC23_I2C) += snd-soc-tlv320aic23-i2c.o
@@ -406,6 +416,7 @@ obj-$(CONFIG_SND_SOC_TLV320AIC32X4_I2C) += snd-soc-tlv320aic32x4-i2c.o
obj-$(CONFIG_SND_SOC_TLV320AIC32X4_SPI) += snd-soc-tlv320aic32x4-spi.o
obj-$(CONFIG_SND_SOC_TLV320AIC3X) += snd-soc-tlv320aic3x.o
obj-$(CONFIG_SND_SOC_TLV320DAC33) += snd-soc-tlv320dac33.o
+obj-$(CONFIG_SND_SOC_TSCS42XX) += snd-soc-tscs42xx.o
obj-$(CONFIG_SND_SOC_TS3A227E) += snd-soc-ts3a227e.o
obj-$(CONFIG_SND_SOC_TWL4030) += snd-soc-twl4030.o
obj-$(CONFIG_SND_SOC_TWL6040) += snd-soc-twl6040.o
diff --git a/sound/soc/codecs/cq93vc.c b/sound/soc/codecs/cq93vc.c
index 6ed2cc374768..3bf93652bb31 100644
--- a/sound/soc/codecs/cq93vc.c
+++ b/sound/soc/codecs/cq93vc.c
@@ -121,17 +121,19 @@ static struct snd_soc_dai_driver cq93vc_dai = {
.ops = &cq93vc_dai_ops,
};
-static struct regmap *cq93vc_get_regmap(struct device *dev)
+static int cq93vc_probe(struct snd_soc_component *component)
{
- struct davinci_vc *davinci_vc = dev->platform_data;
+ struct davinci_vc *davinci_vc = component->dev->platform_data;
- return davinci_vc->regmap;
+ snd_soc_component_init_regmap(component, davinci_vc->regmap);
+
+ return 0;
}
static const struct snd_soc_codec_driver soc_codec_dev_cq93vc = {
.set_bias_level = cq93vc_set_bias_level,
- .get_regmap = cq93vc_get_regmap,
.component_driver = {
+ .probe = cq93vc_probe,
.controls = cq93vc_snd_controls,
.num_controls = ARRAY_SIZE(cq93vc_snd_controls),
},
diff --git a/sound/soc/codecs/cs35l32.c b/sound/soc/codecs/cs35l32.c
index 7e9806206648..bc3a72e4c4ed 100644
--- a/sound/soc/codecs/cs35l32.c
+++ b/sound/soc/codecs/cs35l32.c
@@ -355,13 +355,9 @@ static int cs35l32_i2c_probe(struct i2c_client *i2c_client,
unsigned int devid = 0;
unsigned int reg;
-
- cs35l32 = devm_kzalloc(&i2c_client->dev, sizeof(struct cs35l32_private),
- GFP_KERNEL);
- if (!cs35l32) {
- dev_err(&i2c_client->dev, "could not allocate codec\n");
+ cs35l32 = devm_kzalloc(&i2c_client->dev, sizeof(*cs35l32), GFP_KERNEL);
+ if (!cs35l32)
return -ENOMEM;
- }
i2c_set_clientdata(i2c_client, cs35l32);
@@ -375,13 +371,11 @@ static int cs35l32_i2c_probe(struct i2c_client *i2c_client,
if (pdata) {
cs35l32->pdata = *pdata;
} else {
- pdata = devm_kzalloc(&i2c_client->dev,
- sizeof(struct cs35l32_platform_data),
- GFP_KERNEL);
- if (!pdata) {
- dev_err(&i2c_client->dev, "could not allocate pdata\n");
+ pdata = devm_kzalloc(&i2c_client->dev, sizeof(*pdata),
+ GFP_KERNEL);
+ if (!pdata)
return -ENOMEM;
- }
+
if (i2c_client->dev.of_node) {
ret = cs35l32_handle_of_data(i2c_client,
&cs35l32->pdata);
diff --git a/sound/soc/codecs/cs35l34.c b/sound/soc/codecs/cs35l34.c
index 1e05026bedca..0600d5264c4c 100644
--- a/sound/soc/codecs/cs35l34.c
+++ b/sound/soc/codecs/cs35l34.c
@@ -1004,13 +1004,9 @@ static int cs35l34_i2c_probe(struct i2c_client *i2c_client,
unsigned int devid = 0;
unsigned int reg;
- cs35l34 = devm_kzalloc(&i2c_client->dev,
- sizeof(struct cs35l34_private),
- GFP_KERNEL);
- if (!cs35l34) {
- dev_err(&i2c_client->dev, "could not allocate codec\n");
+ cs35l34 = devm_kzalloc(&i2c_client->dev, sizeof(*cs35l34), GFP_KERNEL);
+ if (!cs35l34)
return -ENOMEM;
- }
i2c_set_clientdata(i2c_client, cs35l34);
cs35l34->regmap = devm_regmap_init_i2c(i2c_client, &cs35l34_regmap);
@@ -1044,14 +1040,11 @@ static int cs35l34_i2c_probe(struct i2c_client *i2c_client,
if (pdata) {
cs35l34->pdata = *pdata;
} else {
- pdata = devm_kzalloc(&i2c_client->dev,
- sizeof(struct cs35l34_platform_data),
- GFP_KERNEL);
- if (!pdata) {
- dev_err(&i2c_client->dev,
- "could not allocate pdata\n");
+ pdata = devm_kzalloc(&i2c_client->dev, sizeof(*pdata),
+ GFP_KERNEL);
+ if (!pdata)
return -ENOMEM;
- }
+
if (i2c_client->dev.of_node) {
ret = cs35l34_handle_of_data(i2c_client, pdata);
if (ret != 0)
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
index 0d9c4a57301b..9731e5dff291 100644
--- a/sound/soc/codecs/cs42l52.c
+++ b/sound/soc/codecs/cs42l52.c
@@ -1100,8 +1100,7 @@ static int cs42l52_i2c_probe(struct i2c_client *i2c_client,
unsigned int reg;
u32 val32;
- cs42l52 = devm_kzalloc(&i2c_client->dev, sizeof(struct cs42l52_private),
- GFP_KERNEL);
+ cs42l52 = devm_kzalloc(&i2c_client->dev, sizeof(*cs42l52), GFP_KERNEL);
if (cs42l52 == NULL)
return -ENOMEM;
cs42l52->dev = &i2c_client->dev;
@@ -1115,13 +1114,11 @@ static int cs42l52_i2c_probe(struct i2c_client *i2c_client,
if (pdata) {
cs42l52->pdata = *pdata;
} else {
- pdata = devm_kzalloc(&i2c_client->dev,
- sizeof(struct cs42l52_platform_data),
- GFP_KERNEL);
- if (!pdata) {
- dev_err(&i2c_client->dev, "could not allocate pdata\n");
+ pdata = devm_kzalloc(&i2c_client->dev, sizeof(*pdata),
+ GFP_KERNEL);
+ if (!pdata)
return -ENOMEM;
- }
+
if (i2c_client->dev.of_node) {
if (of_property_read_bool(i2c_client->dev.of_node,
"cirrus,mica-differential-cfg"))
diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c
index cb6ca85f1536..fd7b8d32c2b2 100644
--- a/sound/soc/codecs/cs42l56.c
+++ b/sound/soc/codecs/cs42l56.c
@@ -1190,9 +1190,7 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client,
unsigned int alpha_rev, metal_rev;
unsigned int reg;
- cs42l56 = devm_kzalloc(&i2c_client->dev,
- sizeof(struct cs42l56_private),
- GFP_KERNEL);
+ cs42l56 = devm_kzalloc(&i2c_client->dev, sizeof(*cs42l56), GFP_KERNEL);
if (cs42l56 == NULL)
return -ENOMEM;
cs42l56->dev = &i2c_client->dev;
@@ -1207,14 +1205,11 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client,
if (pdata) {
cs42l56->pdata = *pdata;
} else {
- pdata = devm_kzalloc(&i2c_client->dev,
- sizeof(struct cs42l56_platform_data),
+ pdata = devm_kzalloc(&i2c_client->dev, sizeof(*pdata),
GFP_KERNEL);
- if (!pdata) {
- dev_err(&i2c_client->dev,
- "could not allocate pdata\n");
+ if (!pdata)
return -ENOMEM;
- }
+
if (i2c_client->dev.of_node) {
ret = cs42l56_handle_of_data(i2c_client,
&cs42l56->pdata);
diff --git a/sound/soc/codecs/cs42l73.c b/sound/soc/codecs/cs42l73.c
index 3df2c473ab88..aebaa97490b6 100644
--- a/sound/soc/codecs/cs42l73.c
+++ b/sound/soc/codecs/cs42l73.c
@@ -1289,8 +1289,7 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
unsigned int reg;
u32 val32;
- cs42l73 = devm_kzalloc(&i2c_client->dev, sizeof(struct cs42l73_private),
- GFP_KERNEL);
+ cs42l73 = devm_kzalloc(&i2c_client->dev, sizeof(*cs42l73), GFP_KERNEL);
if (!cs42l73)
return -ENOMEM;
@@ -1304,13 +1303,11 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
if (pdata) {
cs42l73->pdata = *pdata;
} else {
- pdata = devm_kzalloc(&i2c_client->dev,
- sizeof(struct cs42l73_platform_data),
- GFP_KERNEL);
- if (!pdata) {
- dev_err(&i2c_client->dev, "could not allocate pdata\n");
+ pdata = devm_kzalloc(&i2c_client->dev, sizeof(*pdata),
+ GFP_KERNEL);
+ if (!pdata)
return -ENOMEM;
- }
+
if (i2c_client->dev.of_node) {
if (of_property_read_u32(i2c_client->dev.of_node,
"chgfreq", &val32) >= 0)
@@ -1358,7 +1355,7 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
ret = regmap_read(cs42l73->regmap, CS42L73_REVID, &reg);
if (ret < 0) {
dev_err(&i2c_client->dev, "Get Revision ID failed\n");
- return ret;;
+ return ret;
}
dev_info(&i2c_client->dev,
diff --git a/sound/soc/codecs/cs47l24.c b/sound/soc/codecs/cs47l24.c
index 94c0209977d0..be2750680838 100644
--- a/sound/soc/codecs/cs47l24.c
+++ b/sound/soc/codecs/cs47l24.c
@@ -1120,9 +1120,11 @@ static int cs47l24_codec_probe(struct snd_soc_codec *codec)
struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
struct cs47l24_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct arizona *arizona = priv->core.arizona;
int ret;
- priv->core.arizona->dapm = dapm;
+ arizona->dapm = dapm;
+ snd_soc_codec_init_regmap(codec, arizona->regmap);
ret = arizona_init_spk(codec);
if (ret < 0)
@@ -1175,17 +1177,9 @@ static unsigned int cs47l24_digital_vu[] = {
ARIZONA_DAC_DIGITAL_VOLUME_4L,
};
-static struct regmap *cs47l24_get_regmap(struct device *dev)
-{
- struct cs47l24_priv *priv = dev_get_drvdata(dev);
-
- return priv->core.arizona->regmap;
-}
-
static const struct snd_soc_codec_driver soc_codec_dev_cs47l24 = {
.probe = cs47l24_codec_probe,
.remove = cs47l24_codec_remove,
- .get_regmap = cs47l24_get_regmap,
.idle_bias_off = true,
diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c
index 46b1fbb66eba..95bb10ba80dc 100644
--- a/sound/soc/codecs/cx20442.c
+++ b/sound/soc/codecs/cx20442.c
@@ -26,8 +26,9 @@
struct cx20442_priv {
- void *control_data;
+ struct tty_struct *tty;
struct regulator *por;
+ u8 reg_cache;
};
#define CX20442_PM 0x0
@@ -89,14 +90,14 @@ static const struct snd_soc_dapm_route cx20442_audio_map[] = {
};
static unsigned int cx20442_read_reg_cache(struct snd_soc_codec *codec,
- unsigned int reg)
+ unsigned int reg)
{
- u8 *reg_cache = codec->reg_cache;
+ struct cx20442_priv *cx20442 = snd_soc_codec_get_drvdata(codec);
- if (reg >= codec->driver->reg_cache_size)
+ if (reg >= 1)
return -EINVAL;
- return reg_cache[reg];
+ return cx20442->reg_cache;
}
enum v253_vls {
@@ -156,20 +157,19 @@ static int cx20442_write(struct snd_soc_codec *codec, unsigned int reg,
unsigned int value)
{
struct cx20442_priv *cx20442 = snd_soc_codec_get_drvdata(codec);
- u8 *reg_cache = codec->reg_cache;
int vls, vsp, old, len;
char buf[18];
- if (reg >= codec->driver->reg_cache_size)
+ if (reg >= 1)
return -EINVAL;
- /* hw_write and control_data pointers required for talking to the modem
+ /* tty and write pointers required for talking to the modem
* are expected to be set by the line discipline initialization code */
- if (!codec->hw_write || !cx20442->control_data)
+ if (!cx20442->tty || !cx20442->tty->ops->write)
return -EIO;
- old = reg_cache[reg];
- reg_cache[reg] = value;
+ old = cx20442->reg_cache;
+ cx20442->reg_cache = value;
vls = cx20442_pm_to_v253_vls(value);
if (vls < 0)
@@ -194,13 +194,12 @@ static int cx20442_write(struct snd_soc_codec *codec, unsigned int reg,
return -ENOMEM;
dev_dbg(codec->dev, "%s: %s\n", __func__, buf);
- if (codec->hw_write(cx20442->control_data, buf, len) != len)
+ if (cx20442->tty->ops->write(cx20442->tty, buf, len) != len)
return -EIO;
return 0;
}
-
/*
* Line discpline related code
*
@@ -252,8 +251,7 @@ static void v253_close(struct tty_struct *tty)
cx20442 = snd_soc_codec_get_drvdata(codec);
/* Prevent the codec driver from further accessing the modem */
- codec->hw_write = NULL;
- cx20442->control_data = NULL;
+ cx20442->tty = NULL;
codec->component.card->pop_time = 0;
}
@@ -276,12 +274,11 @@ static void v253_receive(struct tty_struct *tty,
cx20442 = snd_soc_codec_get_drvdata(codec);
- if (!cx20442->control_data) {
+ if (!cx20442->tty) {
/* First modem response, complete setup procedure */
/* Set up codec driver access to modem controls */
- cx20442->control_data = tty;
- codec->hw_write = (hw_write_t)tty->ops->write;
+ cx20442->tty = tty;
codec->component.card->pop_time = 1;
}
}
@@ -367,10 +364,9 @@ static int cx20442_codec_probe(struct snd_soc_codec *codec)
cx20442->por = regulator_get(codec->dev, "POR");
if (IS_ERR(cx20442->por))
dev_warn(codec->dev, "failed to get the regulator");
- cx20442->control_data = NULL;
+ cx20442->tty = NULL;
snd_soc_codec_set_drvdata(codec, cx20442);
- codec->hw_write = NULL;
codec->component.card->pop_time = 0;
return 0;
@@ -381,8 +377,8 @@ static int cx20442_codec_remove(struct snd_soc_codec *codec)
{
struct cx20442_priv *cx20442 = snd_soc_codec_get_drvdata(codec);
- if (cx20442->control_data) {
- struct tty_struct *tty = cx20442->control_data;
+ if (cx20442->tty) {
+ struct tty_struct *tty = cx20442->tty;
tty_hangup(tty);
}
@@ -396,17 +392,13 @@ static int cx20442_codec_remove(struct snd_soc_codec *codec)
return 0;
}
-static const u8 cx20442_reg;
-
static const struct snd_soc_codec_driver cx20442_codec_dev = {
.probe = cx20442_codec_probe,
.remove = cx20442_codec_remove,
.set_bias_level = cx20442_set_bias_level,
- .reg_cache_default = &cx20442_reg,
- .reg_cache_size = 1,
- .reg_word_size = sizeof(u8),
.read = cx20442_read_reg_cache,
.write = cx20442_write,
+
.component_driver = {
.dapm_widgets = cx20442_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(cx20442_dapm_widgets),
diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
index 41d9b1da27c2..b2b4e90fc02a 100644
--- a/sound/soc/codecs/da7213.c
+++ b/sound/soc/codecs/da7213.c
@@ -1654,10 +1654,8 @@ static struct da7213_platform_data
u32 fw_val32;
pdata = devm_kzalloc(codec->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- dev_warn(codec->dev, "Failed to allocate memory for pdata\n");
+ if (!pdata)
return NULL;
- }
if (device_property_read_u32(dev, "dlg,micbias1-lvl", &fw_val32) >= 0)
pdata->micbias1_lvl = da7213_of_micbias_lvl(codec, fw_val32);
@@ -1855,8 +1853,7 @@ static int da7213_i2c_probe(struct i2c_client *i2c,
struct da7213_priv *da7213;
int ret;
- da7213 = devm_kzalloc(&i2c->dev, sizeof(struct da7213_priv),
- GFP_KERNEL);
+ da7213 = devm_kzalloc(&i2c->dev, sizeof(*da7213), GFP_KERNEL);
if (!da7213)
return -ENOMEM;
diff --git a/sound/soc/codecs/da7218.c b/sound/soc/codecs/da7218.c
index 56564ce90cb6..96c644a15b11 100644
--- a/sound/soc/codecs/da7218.c
+++ b/sound/soc/codecs/da7218.c
@@ -2455,10 +2455,8 @@ static struct da7218_pdata *da7218_of_to_pdata(struct snd_soc_codec *codec)
u32 of_val32;
pdata = devm_kzalloc(codec->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- dev_warn(codec->dev, "Failed to allocate memory for pdata\n");
+ if (!pdata)
return NULL;
- }
if (of_property_read_u32(np, "dlg,micbias1-lvl-millivolt", &of_val32) >= 0)
pdata->micbias1_lvl = da7218_of_micbias_lvl(codec, of_val32);
@@ -2527,8 +2525,6 @@ static struct da7218_pdata *da7218_of_to_pdata(struct snd_soc_codec *codec)
hpldet_pdata = devm_kzalloc(codec->dev, sizeof(*hpldet_pdata),
GFP_KERNEL);
if (!hpldet_pdata) {
- dev_warn(codec->dev,
- "Failed to allocate memory for hpldet pdata\n");
of_node_put(hpldet_np);
return pdata;
}
@@ -3273,8 +3269,7 @@ static int da7218_i2c_probe(struct i2c_client *i2c,
struct da7218_priv *da7218;
int ret;
- da7218 = devm_kzalloc(&i2c->dev, sizeof(struct da7218_priv),
- GFP_KERNEL);
+ da7218 = devm_kzalloc(&i2c->dev, sizeof(*da7218), GFP_KERNEL);
if (!da7218)
return -ENOMEM;
diff --git a/sound/soc/codecs/dmic.c b/sound/soc/codecs/dmic.c
index b88a1ee66f80..c88f974ebe3e 100644
--- a/sound/soc/codecs/dmic.c
+++ b/sound/soc/codecs/dmic.c
@@ -107,8 +107,30 @@ static const struct snd_soc_codec_driver soc_dmic = {
static int dmic_dev_probe(struct platform_device *pdev)
{
+ int err;
+ u32 chans;
+ struct snd_soc_dai_driver *dai_drv = &dmic_dai;
+
+ if (pdev->dev.of_node) {
+ err = of_property_read_u32(pdev->dev.of_node, "num-channels", &chans);
+ if (err && (err != -ENOENT))
+ return err;
+
+ if (!err) {
+ if (chans < 1 || chans > 8)
+ return -EINVAL;
+
+ dai_drv = devm_kzalloc(&pdev->dev, sizeof(*dai_drv), GFP_KERNEL);
+ if (!dai_drv)
+ return -ENOMEM;
+
+ memcpy(dai_drv, &dmic_dai, sizeof(*dai_drv));
+ dai_drv->capture.channels_max = chans;
+ }
+ }
+
return snd_soc_register_codec(&pdev->dev,
- &soc_dmic, &dmic_dai, 1);
+ &soc_dmic, dai_drv, 1);
}
static int dmic_dev_remove(struct platform_device *pdev)
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index f3b4f4dfae6a..dba6f4c5074a 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -136,8 +136,11 @@ struct hdac_hdmi_priv {
struct mutex pin_mutex;
struct hdac_chmap chmap;
struct hdac_hdmi_drv_data *drv_data;
+ struct snd_soc_dai_driver *dai_drv;
};
+#define hdev_to_hdmi_priv(_hdev) ((to_ehdac_device(_hdev))->private_data)
+
static struct hdac_hdmi_pcm *
hdac_hdmi_get_pcm_from_cvt(struct hdac_hdmi_priv *hdmi,
struct hdac_hdmi_cvt *cvt)
@@ -169,7 +172,7 @@ static void hdac_hdmi_jack_report(struct hdac_hdmi_pcm *pcm,
* ports.
*/
if (pcm->jack_event == 0) {
- dev_dbg(&edev->hdac.dev,
+ dev_dbg(&edev->hdev.dev,
"jack report for pcm=%d\n",
pcm->pcm_id);
snd_soc_jack_report(pcm->jack, SND_JACK_AVOUT,
@@ -195,18 +198,18 @@ static void hdac_hdmi_jack_report(struct hdac_hdmi_pcm *pcm,
/*
* Get the no devices that can be connected to a port on the Pin widget.
*/
-static int hdac_hdmi_get_port_len(struct hdac_ext_device *hdac, hda_nid_t nid)
+static int hdac_hdmi_get_port_len(struct hdac_ext_device *edev, hda_nid_t nid)
{
unsigned int caps;
unsigned int type, param;
- caps = get_wcaps(&hdac->hdac, nid);
+ caps = get_wcaps(&edev->hdev, nid);
type = get_wcaps_type(caps);
if (!(caps & AC_WCAP_DIGITAL) || (type != AC_WID_PIN))
return 0;
- param = snd_hdac_read_parm_uncached(&hdac->hdac, nid,
+ param = snd_hdac_read_parm_uncached(&edev->hdev, nid,
AC_PAR_DEVLIST_LEN);
if (param == -1)
return param;
@@ -219,10 +222,10 @@ static int hdac_hdmi_get_port_len(struct hdac_ext_device *hdac, hda_nid_t nid)
* id selected on the pin. Return 0 means the first port entry
* is selected or MST is not supported.
*/
-static int hdac_hdmi_port_select_get(struct hdac_ext_device *hdac,
+static int hdac_hdmi_port_select_get(struct hdac_ext_device *edev,
struct hdac_hdmi_port *port)
{
- return snd_hdac_codec_read(&hdac->hdac, port->pin->nid,
+ return snd_hdac_codec_read(&edev->hdev, port->pin->nid,
0, AC_VERB_GET_DEVICE_SEL, 0);
}
@@ -230,7 +233,7 @@ static int hdac_hdmi_port_select_get(struct hdac_ext_device *hdac,
* Sets the selected port entry for the configuring Pin widget verb.
* returns error if port set is not equal to port get otherwise success
*/
-static int hdac_hdmi_port_select_set(struct hdac_ext_device *hdac,
+static int hdac_hdmi_port_select_set(struct hdac_ext_device *edev,
struct hdac_hdmi_port *port)
{
int num_ports;
@@ -239,7 +242,7 @@ static int hdac_hdmi_port_select_set(struct hdac_ext_device *hdac,
return 0;
/* AC_PAR_DEVLIST_LEN is 0 based. */
- num_ports = hdac_hdmi_get_port_len(hdac, port->pin->nid);
+ num_ports = hdac_hdmi_get_port_len(edev, port->pin->nid);
if (num_ports < 0)
return -EIO;
@@ -250,13 +253,13 @@ static int hdac_hdmi_port_select_set(struct hdac_ext_device *hdac,
if (num_ports + 1 < port->id)
return 0;
- snd_hdac_codec_write(&hdac->hdac, port->pin->nid, 0,
+ snd_hdac_codec_write(&edev->hdev, port->pin->nid, 0,
AC_VERB_SET_DEVICE_SEL, port->id);
- if (port->id != hdac_hdmi_port_select_get(hdac, port))
+ if (port->id != hdac_hdmi_port_select_get(edev, port))
return -EIO;
- dev_dbg(&hdac->hdac.dev, "Selected the port=%d\n", port->id);
+ dev_dbg(&edev->hdev.dev, "Selected the port=%d\n", port->id);
return 0;
}
@@ -276,9 +279,9 @@ static struct hdac_hdmi_pcm *get_hdmi_pcm_from_id(struct hdac_hdmi_priv *hdmi,
static inline struct hdac_ext_device *to_hda_ext_device(struct device *dev)
{
- struct hdac_device *hdac = dev_to_hdac_dev(dev);
+ struct hdac_device *hdev = dev_to_hdac_dev(dev);
- return to_ehdac_device(hdac);
+ return to_ehdac_device(hdev);
}
static unsigned int sad_format(const u8 *sad)
@@ -321,14 +324,14 @@ format_constraint:
}
static void
-hdac_hdmi_set_dip_index(struct hdac_ext_device *hdac, hda_nid_t pin_nid,
+hdac_hdmi_set_dip_index(struct hdac_ext_device *edev, hda_nid_t pin_nid,
int packet_index, int byte_index)
{
int val;
val = (packet_index << 5) | (byte_index & 0x1f);
- snd_hdac_codec_write(&hdac->hdac, pin_nid, 0,
+ snd_hdac_codec_write(&edev->hdev, pin_nid, 0,
AC_VERB_SET_HDMI_DIP_INDEX, val);
}
@@ -344,14 +347,14 @@ struct dp_audio_infoframe {
u8 LFEPBL01_LSV36_DM_INH7;
};
-static int hdac_hdmi_setup_audio_infoframe(struct hdac_ext_device *hdac,
+static int hdac_hdmi_setup_audio_infoframe(struct hdac_ext_device *edev,
struct hdac_hdmi_pcm *pcm, struct hdac_hdmi_port *port)
{
uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE];
struct hdmi_audio_infoframe frame;
struct hdac_hdmi_pin *pin = port->pin;
struct dp_audio_infoframe dp_ai;
- struct hdac_hdmi_priv *hdmi = hdac->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_cvt *cvt = pcm->cvt;
u8 *dip;
int ret;
@@ -360,11 +363,11 @@ static int hdac_hdmi_setup_audio_infoframe(struct hdac_ext_device *hdac,
u8 conn_type;
int channels, ca;
- ca = snd_hdac_channel_allocation(&hdac->hdac, port->eld.info.spk_alloc,
+ ca = snd_hdac_channel_allocation(&edev->hdev, port->eld.info.spk_alloc,
pcm->channels, pcm->chmap_set, true, pcm->chmap);
channels = snd_hdac_get_active_channels(ca);
- hdmi->chmap.ops.set_channel_count(&hdac->hdac, cvt->nid, channels);
+ hdmi->chmap.ops.set_channel_count(&edev->hdev, cvt->nid, channels);
snd_hdac_setup_channel_mapping(&hdmi->chmap, pin->nid, false, ca,
pcm->channels, pcm->chmap, pcm->chmap_set);
@@ -397,32 +400,32 @@ static int hdac_hdmi_setup_audio_infoframe(struct hdac_ext_device *hdac,
break;
default:
- dev_err(&hdac->hdac.dev, "Invalid connection type: %d\n",
+ dev_err(&edev->hdev.dev, "Invalid connection type: %d\n",
conn_type);
return -EIO;
}
/* stop infoframe transmission */
- hdac_hdmi_set_dip_index(hdac, pin->nid, 0x0, 0x0);
- snd_hdac_codec_write(&hdac->hdac, pin->nid, 0,
+ hdac_hdmi_set_dip_index(edev, pin->nid, 0x0, 0x0);
+ snd_hdac_codec_write(&edev->hdev, pin->nid, 0,
AC_VERB_SET_HDMI_DIP_XMIT, AC_DIPXMIT_DISABLE);
/* Fill infoframe. Index auto-incremented */
- hdac_hdmi_set_dip_index(hdac, pin->nid, 0x0, 0x0);
+ hdac_hdmi_set_dip_index(edev, pin->nid, 0x0, 0x0);
if (conn_type == DRM_ELD_CONN_TYPE_HDMI) {
for (i = 0; i < sizeof(buffer); i++)
- snd_hdac_codec_write(&hdac->hdac, pin->nid, 0,
+ snd_hdac_codec_write(&edev->hdev, pin->nid, 0,
AC_VERB_SET_HDMI_DIP_DATA, buffer[i]);
} else {
for (i = 0; i < sizeof(dp_ai); i++)
- snd_hdac_codec_write(&hdac->hdac, pin->nid, 0,
+ snd_hdac_codec_write(&edev->hdev, pin->nid, 0,
AC_VERB_SET_HDMI_DIP_DATA, dip[i]);
}
/* Start infoframe */
- hdac_hdmi_set_dip_index(hdac, pin->nid, 0x0, 0x0);
- snd_hdac_codec_write(&hdac->hdac, pin->nid, 0,
+ hdac_hdmi_set_dip_index(edev, pin->nid, 0x0, 0x0);
+ snd_hdac_codec_write(&edev->hdev, pin->nid, 0,
AC_VERB_SET_HDMI_DIP_XMIT, AC_DIPXMIT_BEST);
return 0;
@@ -433,11 +436,11 @@ static int hdac_hdmi_set_tdm_slot(struct snd_soc_dai *dai,
int slots, int slot_width)
{
struct hdac_ext_device *edev = snd_soc_dai_get_drvdata(dai);
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_dai_port_map *dai_map;
struct hdac_hdmi_pcm *pcm;
- dev_dbg(&edev->hdac.dev, "%s: strm_tag: %d\n", __func__, tx_mask);
+ dev_dbg(&edev->hdev.dev, "%s: strm_tag: %d\n", __func__, tx_mask);
dai_map = &hdmi->dai_map[dai->id];
@@ -452,8 +455,8 @@ static int hdac_hdmi_set_tdm_slot(struct snd_soc_dai *dai,
static int hdac_hdmi_set_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hparams, struct snd_soc_dai *dai)
{
- struct hdac_ext_device *hdac = snd_soc_dai_get_drvdata(dai);
- struct hdac_hdmi_priv *hdmi = hdac->private_data;
+ struct hdac_ext_device *edev = snd_soc_dai_get_drvdata(dai);
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_dai_port_map *dai_map;
struct hdac_hdmi_port *port;
struct hdac_hdmi_pcm *pcm;
@@ -466,7 +469,7 @@ static int hdac_hdmi_set_hw_params(struct snd_pcm_substream *substream,
return -ENODEV;
if ((!port->eld.monitor_present) || (!port->eld.eld_valid)) {
- dev_err(&hdac->hdac.dev,
+ dev_err(&edev->hdev.dev,
"device is not configured for this pin:port%d:%d\n",
port->pin->nid, port->id);
return -ENODEV;
@@ -486,28 +489,28 @@ static int hdac_hdmi_set_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int hdac_hdmi_query_port_connlist(struct hdac_ext_device *hdac,
+static int hdac_hdmi_query_port_connlist(struct hdac_ext_device *edev,
struct hdac_hdmi_pin *pin,
struct hdac_hdmi_port *port)
{
- if (!(get_wcaps(&hdac->hdac, pin->nid) & AC_WCAP_CONN_LIST)) {
- dev_warn(&hdac->hdac.dev,
+ if (!(get_wcaps(&edev->hdev, pin->nid) & AC_WCAP_CONN_LIST)) {
+ dev_warn(&edev->hdev.dev,
"HDMI: pin %d wcaps %#x does not support connection list\n",
- pin->nid, get_wcaps(&hdac->hdac, pin->nid));
+ pin->nid, get_wcaps(&edev->hdev, pin->nid));
return -EINVAL;
}
- if (hdac_hdmi_port_select_set(hdac, port) < 0)
+ if (hdac_hdmi_port_select_set(edev, port) < 0)
return -EIO;
- port->num_mux_nids = snd_hdac_get_connections(&hdac->hdac, pin->nid,
+ port->num_mux_nids = snd_hdac_get_connections(&edev->hdev, pin->nid,
port->mux_nids, HDA_MAX_CONNECTIONS);
if (port->num_mux_nids == 0)
- dev_warn(&hdac->hdac.dev,
+ dev_warn(&edev->hdev.dev,
"No connections found for pin:port %d:%d\n",
pin->nid, port->id);
- dev_dbg(&hdac->hdac.dev, "num_mux_nids %d for pin:port %d:%d\n",
+ dev_dbg(&edev->hdev.dev, "num_mux_nids %d for pin:port %d:%d\n",
port->num_mux_nids, pin->nid, port->id);
return port->num_mux_nids;
@@ -565,8 +568,8 @@ static struct hdac_hdmi_port *hdac_hdmi_get_port_from_cvt(
static int hdac_hdmi_pcm_open(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- struct hdac_ext_device *hdac = snd_soc_dai_get_drvdata(dai);
- struct hdac_hdmi_priv *hdmi = hdac->private_data;
+ struct hdac_ext_device *edev = snd_soc_dai_get_drvdata(dai);
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_dai_port_map *dai_map;
struct hdac_hdmi_cvt *cvt;
struct hdac_hdmi_port *port;
@@ -575,7 +578,7 @@ static int hdac_hdmi_pcm_open(struct snd_pcm_substream *substream,
dai_map = &hdmi->dai_map[dai->id];
cvt = dai_map->cvt;
- port = hdac_hdmi_get_port_from_cvt(hdac, hdmi, cvt);
+ port = hdac_hdmi_get_port_from_cvt(edev, hdmi, cvt);
/*
* To make PA and other userland happy.
@@ -586,7 +589,7 @@ static int hdac_hdmi_pcm_open(struct snd_pcm_substream *substream,
if ((!port->eld.monitor_present) ||
(!port->eld.eld_valid)) {
- dev_warn(&hdac->hdac.dev,
+ dev_warn(&edev->hdev.dev,
"Failed: present?:%d ELD valid?:%d pin:port: %d:%d\n",
port->eld.monitor_present, port->eld.eld_valid,
port->pin->nid, port->id);
@@ -608,8 +611,8 @@ static int hdac_hdmi_pcm_open(struct snd_pcm_substream *substream,
static void hdac_hdmi_pcm_close(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- struct hdac_ext_device *hdac = snd_soc_dai_get_drvdata(dai);
- struct hdac_hdmi_priv *hdmi = hdac->private_data;
+ struct hdac_ext_device *edev = snd_soc_dai_get_drvdata(dai);
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_dai_port_map *dai_map;
struct hdac_hdmi_pcm *pcm;
@@ -630,14 +633,13 @@ static void hdac_hdmi_pcm_close(struct snd_pcm_substream *substream,
}
static int
-hdac_hdmi_query_cvt_params(struct hdac_device *hdac, struct hdac_hdmi_cvt *cvt)
+hdac_hdmi_query_cvt_params(struct hdac_device *hdev, struct hdac_hdmi_cvt *cvt)
{
unsigned int chans;
- struct hdac_ext_device *edev = to_ehdac_device(hdac);
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
int err;
- chans = get_wcaps(hdac, cvt->nid);
+ chans = get_wcaps(hdev, cvt->nid);
chans = get_wcaps_channels(chans);
cvt->params.channels_min = 2;
@@ -646,12 +648,12 @@ hdac_hdmi_query_cvt_params(struct hdac_device *hdac, struct hdac_hdmi_cvt *cvt)
if (chans > hdmi->chmap.channels_max)
hdmi->chmap.channels_max = chans;
- err = snd_hdac_query_supported_pcm(hdac, cvt->nid,
+ err = snd_hdac_query_supported_pcm(hdev, cvt->nid,
&cvt->params.rates,
&cvt->params.formats,
&cvt->params.maxbps);
if (err < 0)
- dev_err(&hdac->dev,
+ dev_err(&hdev->dev,
"Failed to query pcm params for nid %d: %d\n",
cvt->nid, err);
@@ -696,7 +698,7 @@ static void hdac_hdmi_fill_route(struct snd_soc_dapm_route *route,
static struct hdac_hdmi_pcm *hdac_hdmi_get_pcm(struct hdac_ext_device *edev,
struct hdac_hdmi_port *port)
{
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_pcm *pcm = NULL;
struct hdac_hdmi_port *p;
@@ -716,9 +718,9 @@ static struct hdac_hdmi_pcm *hdac_hdmi_get_pcm(struct hdac_ext_device *edev,
static void hdac_hdmi_set_power_state(struct hdac_ext_device *edev,
hda_nid_t nid, unsigned int pwr_state)
{
- if (get_wcaps(&edev->hdac, nid) & AC_WCAP_POWER) {
- if (!snd_hdac_check_power_state(&edev->hdac, nid, pwr_state))
- snd_hdac_codec_write(&edev->hdac, nid, 0,
+ if (get_wcaps(&edev->hdev, nid) & AC_WCAP_POWER) {
+ if (!snd_hdac_check_power_state(&edev->hdev, nid, pwr_state))
+ snd_hdac_codec_write(&edev->hdev, nid, 0,
AC_VERB_SET_POWER_STATE, pwr_state);
}
}
@@ -726,8 +728,8 @@ static void hdac_hdmi_set_power_state(struct hdac_ext_device *edev,
static void hdac_hdmi_set_amp(struct hdac_ext_device *edev,
hda_nid_t nid, int val)
{
- if (get_wcaps(&edev->hdac, nid) & AC_WCAP_OUT_AMP)
- snd_hdac_codec_write(&edev->hdac, nid, 0,
+ if (get_wcaps(&edev->hdev, nid) & AC_WCAP_OUT_AMP)
+ snd_hdac_codec_write(&edev->hdev, nid, 0,
AC_VERB_SET_AMP_GAIN_MUTE, val);
}
@@ -739,7 +741,7 @@ static int hdac_hdmi_pin_output_widget_event(struct snd_soc_dapm_widget *w,
struct hdac_ext_device *edev = to_hda_ext_device(w->dapm->dev);
struct hdac_hdmi_pcm *pcm;
- dev_dbg(&edev->hdac.dev, "%s: widget: %s event: %x\n",
+ dev_dbg(&edev->hdev.dev, "%s: widget: %s event: %x\n",
__func__, w->name, event);
pcm = hdac_hdmi_get_pcm(edev, port);
@@ -755,7 +757,7 @@ static int hdac_hdmi_pin_output_widget_event(struct snd_soc_dapm_widget *w,
hdac_hdmi_set_power_state(edev, port->pin->nid, AC_PWRST_D0);
/* Enable out path for this pin widget */
- snd_hdac_codec_write(&edev->hdac, port->pin->nid, 0,
+ snd_hdac_codec_write(&edev->hdev, port->pin->nid, 0,
AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
hdac_hdmi_set_amp(edev, port->pin->nid, AMP_OUT_UNMUTE);
@@ -766,7 +768,7 @@ static int hdac_hdmi_pin_output_widget_event(struct snd_soc_dapm_widget *w,
hdac_hdmi_set_amp(edev, port->pin->nid, AMP_OUT_MUTE);
/* Disable out path for this pin widget */
- snd_hdac_codec_write(&edev->hdac, port->pin->nid, 0,
+ snd_hdac_codec_write(&edev->hdev, port->pin->nid, 0,
AC_VERB_SET_PIN_WIDGET_CONTROL, 0);
hdac_hdmi_set_power_state(edev, port->pin->nid, AC_PWRST_D3);
@@ -782,10 +784,10 @@ static int hdac_hdmi_cvt_output_widget_event(struct snd_soc_dapm_widget *w,
{
struct hdac_hdmi_cvt *cvt = w->priv;
struct hdac_ext_device *edev = to_hda_ext_device(w->dapm->dev);
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_pcm *pcm;
- dev_dbg(&edev->hdac.dev, "%s: widget: %s event: %x\n",
+ dev_dbg(&edev->hdev.dev, "%s: widget: %s event: %x\n",
__func__, w->name, event);
pcm = hdac_hdmi_get_pcm_from_cvt(hdmi, cvt);
@@ -797,23 +799,23 @@ static int hdac_hdmi_cvt_output_widget_event(struct snd_soc_dapm_widget *w,
hdac_hdmi_set_power_state(edev, cvt->nid, AC_PWRST_D0);
/* Enable transmission */
- snd_hdac_codec_write(&edev->hdac, cvt->nid, 0,
+ snd_hdac_codec_write(&edev->hdev, cvt->nid, 0,
AC_VERB_SET_DIGI_CONVERT_1, 1);
/* Category Code (CC) to zero */
- snd_hdac_codec_write(&edev->hdac, cvt->nid, 0,
+ snd_hdac_codec_write(&edev->hdev, cvt->nid, 0,
AC_VERB_SET_DIGI_CONVERT_2, 0);
- snd_hdac_codec_write(&edev->hdac, cvt->nid, 0,
+ snd_hdac_codec_write(&edev->hdev, cvt->nid, 0,
AC_VERB_SET_CHANNEL_STREAMID, pcm->stream_tag);
- snd_hdac_codec_write(&edev->hdac, cvt->nid, 0,
+ snd_hdac_codec_write(&edev->hdev, cvt->nid, 0,
AC_VERB_SET_STREAM_FORMAT, pcm->format);
break;
case SND_SOC_DAPM_POST_PMD:
- snd_hdac_codec_write(&edev->hdac, cvt->nid, 0,
+ snd_hdac_codec_write(&edev->hdev, cvt->nid, 0,
AC_VERB_SET_CHANNEL_STREAMID, 0);
- snd_hdac_codec_write(&edev->hdac, cvt->nid, 0,
+ snd_hdac_codec_write(&edev->hdev, cvt->nid, 0,
AC_VERB_SET_STREAM_FORMAT, 0);
hdac_hdmi_set_power_state(edev, cvt->nid, AC_PWRST_D3);
@@ -831,7 +833,7 @@ static int hdac_hdmi_pin_mux_widget_event(struct snd_soc_dapm_widget *w,
struct hdac_ext_device *edev = to_hda_ext_device(w->dapm->dev);
int mux_idx;
- dev_dbg(&edev->hdac.dev, "%s: widget: %s event: %x\n",
+ dev_dbg(&edev->hdev.dev, "%s: widget: %s event: %x\n",
__func__, w->name, event);
if (!kc)
@@ -844,7 +846,7 @@ static int hdac_hdmi_pin_mux_widget_event(struct snd_soc_dapm_widget *w,
return -EIO;
if (mux_idx > 0) {
- snd_hdac_codec_write(&edev->hdac, port->pin->nid, 0,
+ snd_hdac_codec_write(&edev->hdev, port->pin->nid, 0,
AC_VERB_SET_CONNECT_SEL, (mux_idx - 1));
}
@@ -864,7 +866,7 @@ static int hdac_hdmi_set_pin_port_mux(struct snd_kcontrol *kcontrol,
struct snd_soc_dapm_context *dapm = w->dapm;
struct hdac_hdmi_port *port = w->priv;
struct hdac_ext_device *edev = to_hda_ext_device(dapm->dev);
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_pcm *pcm = NULL;
const char *cvt_name = e->texts[ucontrol->value.enumerated.item[0]];
@@ -922,7 +924,7 @@ static int hdac_hdmi_create_pin_port_muxs(struct hdac_ext_device *edev,
struct snd_soc_dapm_widget *widget,
const char *widget_name)
{
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_pin *pin = port->pin;
struct snd_kcontrol_new *kc;
struct hdac_hdmi_cvt *cvt;
@@ -934,17 +936,17 @@ static int hdac_hdmi_create_pin_port_muxs(struct hdac_ext_device *edev,
int i = 0;
int num_items = hdmi->num_cvt + 1;
- kc = devm_kzalloc(&edev->hdac.dev, sizeof(*kc), GFP_KERNEL);
+ kc = devm_kzalloc(&edev->hdev.dev, sizeof(*kc), GFP_KERNEL);
if (!kc)
return -ENOMEM;
- se = devm_kzalloc(&edev->hdac.dev, sizeof(*se), GFP_KERNEL);
+ se = devm_kzalloc(&edev->hdev.dev, sizeof(*se), GFP_KERNEL);
if (!se)
return -ENOMEM;
snprintf(kc_name, NAME_SIZE, "Pin %d port %d Input",
pin->nid, port->id);
- kc->name = devm_kstrdup(&edev->hdac.dev, kc_name, GFP_KERNEL);
+ kc->name = devm_kstrdup(&edev->hdev.dev, kc_name, GFP_KERNEL);
if (!kc->name)
return -ENOMEM;
@@ -962,24 +964,24 @@ static int hdac_hdmi_create_pin_port_muxs(struct hdac_ext_device *edev,
se->mask = roundup_pow_of_two(se->items) - 1;
sprintf(mux_items, "NONE");
- items[i] = devm_kstrdup(&edev->hdac.dev, mux_items, GFP_KERNEL);
+ items[i] = devm_kstrdup(&edev->hdev.dev, mux_items, GFP_KERNEL);
if (!items[i])
return -ENOMEM;
list_for_each_entry(cvt, &hdmi->cvt_list, head) {
i++;
sprintf(mux_items, "cvt %d", cvt->nid);
- items[i] = devm_kstrdup(&edev->hdac.dev, mux_items, GFP_KERNEL);
+ items[i] = devm_kstrdup(&edev->hdev.dev, mux_items, GFP_KERNEL);
if (!items[i])
return -ENOMEM;
}
- se->texts = devm_kmemdup(&edev->hdac.dev, items,
+ se->texts = devm_kmemdup(&edev->hdev.dev, items,
(num_items * sizeof(char *)), GFP_KERNEL);
if (!se->texts)
return -ENOMEM;
- return hdac_hdmi_fill_widget_info(&edev->hdac.dev, widget,
+ return hdac_hdmi_fill_widget_info(&edev->hdev.dev, widget,
snd_soc_dapm_mux, port, widget_name, NULL, kc, 1,
hdac_hdmi_pin_mux_widget_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_REG);
@@ -990,7 +992,7 @@ static void hdac_hdmi_add_pinmux_cvt_route(struct hdac_ext_device *edev,
struct snd_soc_dapm_widget *widgets,
struct snd_soc_dapm_route *route, int rindex)
{
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
const struct snd_kcontrol_new *kc;
struct soc_enum *se;
int mux_index = hdmi->num_cvt + hdmi->num_ports;
@@ -1033,8 +1035,8 @@ static int create_fill_widget_route_map(struct snd_soc_dapm_context *dapm)
struct snd_soc_dapm_widget *widgets;
struct snd_soc_dapm_route *route;
struct hdac_ext_device *edev = to_hda_ext_device(dapm->dev);
- struct hdac_hdmi_priv *hdmi = edev->private_data;
- struct snd_soc_dai_driver *dai_drv = dapm->component->dai_drv;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+ struct snd_soc_dai_driver *dai_drv = hdmi->dai_drv;
char widget_name[NAME_SIZE];
struct hdac_hdmi_cvt *cvt;
struct hdac_hdmi_pin *pin;
@@ -1134,7 +1136,7 @@ static int create_fill_widget_route_map(struct snd_soc_dapm_context *dapm)
static int hdac_hdmi_init_dai_map(struct hdac_ext_device *edev)
{
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_dai_port_map *dai_map;
struct hdac_hdmi_cvt *cvt;
int dai_id = 0;
@@ -1150,7 +1152,7 @@ static int hdac_hdmi_init_dai_map(struct hdac_ext_device *edev)
dai_id++;
if (dai_id == HDA_MAX_CVTS) {
- dev_warn(&edev->hdac.dev,
+ dev_warn(&edev->hdev.dev,
"Max dais supported: %d\n", dai_id);
break;
}
@@ -1161,7 +1163,7 @@ static int hdac_hdmi_init_dai_map(struct hdac_ext_device *edev)
static int hdac_hdmi_add_cvt(struct hdac_ext_device *edev, hda_nid_t nid)
{
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_cvt *cvt;
char name[NAME_SIZE];
@@ -1176,7 +1178,7 @@ static int hdac_hdmi_add_cvt(struct hdac_ext_device *edev, hda_nid_t nid)
list_add_tail(&cvt->head, &hdmi->cvt_list);
hdmi->num_cvt++;
- return hdac_hdmi_query_cvt_params(&edev->hdac, cvt);
+ return hdac_hdmi_query_cvt_params(&edev->hdev, cvt);
}
static int hdac_hdmi_parse_eld(struct hdac_ext_device *edev,
@@ -1188,7 +1190,7 @@ static int hdac_hdmi_parse_eld(struct hdac_ext_device *edev,
>> DRM_ELD_VER_SHIFT;
if (ver != ELD_VER_CEA_861D && ver != ELD_VER_PARTIAL) {
- dev_err(&edev->hdac.dev, "HDMI: Unknown ELD version %d\n", ver);
+ dev_err(&edev->hdev.dev, "HDMI: Unknown ELD version %d\n", ver);
return -EINVAL;
}
@@ -1196,7 +1198,7 @@ static int hdac_hdmi_parse_eld(struct hdac_ext_device *edev,
DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT;
if (mnl > ELD_MAX_MNL) {
- dev_err(&edev->hdac.dev, "HDMI: MNL Invalid %d\n", mnl);
+ dev_err(&edev->hdev.dev, "HDMI: MNL Invalid %d\n", mnl);
return -EINVAL;
}
@@ -1209,7 +1211,7 @@ static void hdac_hdmi_present_sense(struct hdac_hdmi_pin *pin,
struct hdac_hdmi_port *port)
{
struct hdac_ext_device *edev = pin->edev;
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_pcm *pcm;
int size = 0;
int port_id = -1;
@@ -1227,7 +1229,7 @@ static void hdac_hdmi_present_sense(struct hdac_hdmi_pin *pin,
if (pin->mst_capable)
port_id = port->id;
- size = snd_hdac_acomp_get_eld(&edev->hdac, pin->nid, port_id,
+ size = snd_hdac_acomp_get_eld(&edev->hdev, pin->nid, port_id,
&port->eld.monitor_present,
port->eld.eld_buffer,
ELD_MAX_SIZE);
@@ -1250,7 +1252,7 @@ static void hdac_hdmi_present_sense(struct hdac_hdmi_pin *pin,
if (!port->eld.monitor_present || !port->eld.eld_valid) {
- dev_err(&edev->hdac.dev, "%s: disconnect for pin:port %d:%d\n",
+ dev_err(&edev->hdev.dev, "%s: disconnect for pin:port %d:%d\n",
__func__, pin->nid, port->id);
/*
@@ -1304,7 +1306,7 @@ static int hdac_hdmi_add_ports(struct hdac_hdmi_priv *hdmi,
static int hdac_hdmi_add_pin(struct hdac_ext_device *edev, hda_nid_t nid)
{
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_pin *pin;
int ret;
@@ -1333,40 +1335,38 @@ static int hdac_hdmi_add_pin(struct hdac_ext_device *edev, hda_nid_t nid)
#define INTEL_EN_DP12 0x02 /* enable DP 1.2 features */
#define INTEL_EN_ALL_PIN_CVTS 0x01 /* enable 2nd & 3rd pins and convertors */
-static void hdac_hdmi_skl_enable_all_pins(struct hdac_device *hdac)
+static void hdac_hdmi_skl_enable_all_pins(struct hdac_device *hdev)
{
unsigned int vendor_param;
- struct hdac_ext_device *edev = to_ehdac_device(hdac);
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
unsigned int vendor_nid = hdmi->drv_data->vendor_nid;
- vendor_param = snd_hdac_codec_read(hdac, vendor_nid, 0,
+ vendor_param = snd_hdac_codec_read(hdev, vendor_nid, 0,
INTEL_GET_VENDOR_VERB, 0);
if (vendor_param == -1 || vendor_param & INTEL_EN_ALL_PIN_CVTS)
return;
vendor_param |= INTEL_EN_ALL_PIN_CVTS;
- vendor_param = snd_hdac_codec_read(hdac, vendor_nid, 0,
+ vendor_param = snd_hdac_codec_read(hdev, vendor_nid, 0,
INTEL_SET_VENDOR_VERB, vendor_param);
if (vendor_param == -1)
return;
}
-static void hdac_hdmi_skl_enable_dp12(struct hdac_device *hdac)
+static void hdac_hdmi_skl_enable_dp12(struct hdac_device *hdev)
{
unsigned int vendor_param;
- struct hdac_ext_device *edev = to_ehdac_device(hdac);
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
unsigned int vendor_nid = hdmi->drv_data->vendor_nid;
- vendor_param = snd_hdac_codec_read(hdac, vendor_nid, 0,
+ vendor_param = snd_hdac_codec_read(hdev, vendor_nid, 0,
INTEL_GET_VENDOR_VERB, 0);
if (vendor_param == -1 || vendor_param & INTEL_EN_DP12)
return;
/* enable DP1.2 mode */
vendor_param |= INTEL_EN_DP12;
- vendor_param = snd_hdac_codec_read(hdac, vendor_nid, 0,
+ vendor_param = snd_hdac_codec_read(hdev, vendor_nid, 0,
INTEL_SET_VENDOR_VERB, vendor_param);
if (vendor_param == -1)
return;
@@ -1384,7 +1384,7 @@ static const struct snd_soc_dai_ops hdmi_dai_ops = {
* Each converter can support a stream independently. So a dai is created
* based on the number of converter queried.
*/
-static int hdac_hdmi_create_dais(struct hdac_device *hdac,
+static int hdac_hdmi_create_dais(struct hdac_device *hdev,
struct snd_soc_dai_driver **dais,
struct hdac_hdmi_priv *hdmi, int num_dais)
{
@@ -1397,20 +1397,20 @@ static int hdac_hdmi_create_dais(struct hdac_device *hdac,
u64 formats;
int ret;
- hdmi_dais = devm_kzalloc(&hdac->dev,
+ hdmi_dais = devm_kzalloc(&hdev->dev,
(sizeof(*hdmi_dais) * num_dais),
GFP_KERNEL);
if (!hdmi_dais)
return -ENOMEM;
list_for_each_entry(cvt, &hdmi->cvt_list, head) {
- ret = snd_hdac_query_supported_pcm(hdac, cvt->nid,
+ ret = snd_hdac_query_supported_pcm(hdev, cvt->nid,
&rates, &formats, &bps);
if (ret)
return ret;
sprintf(dai_name, "intel-hdmi-hifi%d", i+1);
- hdmi_dais[i].name = devm_kstrdup(&hdac->dev,
+ hdmi_dais[i].name = devm_kstrdup(&hdev->dev,
dai_name, GFP_KERNEL);
if (!hdmi_dais[i].name)
@@ -1418,7 +1418,7 @@ static int hdac_hdmi_create_dais(struct hdac_device *hdac,
snprintf(name, sizeof(name), "hifi%d", i+1);
hdmi_dais[i].playback.stream_name =
- devm_kstrdup(&hdac->dev, name, GFP_KERNEL);
+ devm_kstrdup(&hdev->dev, name, GFP_KERNEL);
if (!hdmi_dais[i].playback.stream_name)
return -ENOMEM;
@@ -1438,6 +1438,7 @@ static int hdac_hdmi_create_dais(struct hdac_device *hdac,
}
*dais = hdmi_dais;
+ hdmi->dai_drv = hdmi_dais;
return 0;
}
@@ -1451,29 +1452,26 @@ static int hdac_hdmi_parse_and_map_nid(struct hdac_ext_device *edev,
{
hda_nid_t nid;
int i, num_nodes;
- struct hdac_device *hdac = &edev->hdac;
- struct hdac_hdmi_priv *hdmi = edev->private_data;
struct hdac_hdmi_cvt *temp_cvt, *cvt_next;
struct hdac_hdmi_pin *temp_pin, *pin_next;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+ struct hdac_device *hdev = &edev->hdev;
int ret;
- hdac_hdmi_skl_enable_all_pins(hdac);
- hdac_hdmi_skl_enable_dp12(hdac);
+ hdac_hdmi_skl_enable_all_pins(hdev);
+ hdac_hdmi_skl_enable_dp12(hdev);
- num_nodes = snd_hdac_get_sub_nodes(hdac, hdac->afg, &nid);
+ num_nodes = snd_hdac_get_sub_nodes(hdev, hdev->afg, &nid);
if (!nid || num_nodes <= 0) {
- dev_warn(&hdac->dev, "HDMI: failed to get afg sub nodes\n");
+ dev_warn(&hdev->dev, "HDMI: failed to get afg sub nodes\n");
return -EINVAL;
}
- hdac->num_nodes = num_nodes;
- hdac->start_nid = nid;
-
- for (i = 0; i < hdac->num_nodes; i++, nid++) {
+ for (i = 0; i < num_nodes; i++, nid++) {
unsigned int caps;
unsigned int type;
- caps = get_wcaps(hdac, nid);
+ caps = get_wcaps(hdev, nid);
type = get_wcaps_type(caps);
if (!(caps & AC_WCAP_DIGITAL))
@@ -1495,16 +1493,14 @@ static int hdac_hdmi_parse_and_map_nid(struct hdac_ext_device *edev,
}
}
- hdac->end_nid = nid;
-
if (!hdmi->num_pin || !hdmi->num_cvt) {
ret = -EIO;
goto free_widgets;
}
- ret = hdac_hdmi_create_dais(hdac, dais, hdmi, hdmi->num_cvt);
+ ret = hdac_hdmi_create_dais(hdev, dais, hdmi, hdmi->num_cvt);
if (ret) {
- dev_err(&hdac->dev, "Failed to create dais with err: %d\n",
+ dev_err(&hdev->dev, "Failed to create dais with err: %d\n",
ret);
goto free_widgets;
}
@@ -1537,7 +1533,7 @@ free_widgets:
static void hdac_hdmi_eld_notify_cb(void *aptr, int port, int pipe)
{
struct hdac_ext_device *edev = aptr;
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_pin *pin = NULL;
struct hdac_hdmi_port *hport = NULL;
struct snd_soc_codec *codec = edev->scodec;
@@ -1546,7 +1542,7 @@ static void hdac_hdmi_eld_notify_cb(void *aptr, int port, int pipe)
/* Don't know how this mapping is derived */
hda_nid_t pin_nid = port + 0x04;
- dev_dbg(&edev->hdac.dev, "%s: for pin:%d port=%d\n", __func__,
+ dev_dbg(&edev->hdev.dev, "%s: for pin:%d port=%d\n", __func__,
pin_nid, pipe);
/*
@@ -1559,7 +1555,7 @@ static void hdac_hdmi_eld_notify_cb(void *aptr, int port, int pipe)
SNDRV_CTL_POWER_D0)
return;
- if (atomic_read(&edev->hdac.in_pm))
+ if (atomic_read(&edev->hdev.in_pm))
return;
list_for_each_entry(pin, &hdmi->pin_list, head) {
@@ -1614,7 +1610,7 @@ static int create_fill_jack_kcontrols(struct snd_soc_card *card,
char *name;
int i = 0, j;
struct snd_soc_codec *codec = edev->scodec;
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
kc = devm_kcalloc(codec->dev, hdmi->num_ports,
sizeof(*kc), GFP_KERNEL);
@@ -1652,7 +1648,7 @@ int hdac_hdmi_jack_port_init(struct snd_soc_codec *codec,
struct snd_soc_dapm_context *dapm)
{
struct hdac_ext_device *edev = snd_soc_codec_get_drvdata(codec);
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_pin *pin;
struct snd_soc_dapm_widget *widgets;
struct snd_soc_dapm_route *route;
@@ -1728,7 +1724,7 @@ int hdac_hdmi_jack_init(struct snd_soc_dai *dai, int device,
{
struct snd_soc_codec *codec = dai->codec;
struct hdac_ext_device *edev = snd_soc_codec_get_drvdata(codec);
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_pcm *pcm;
struct snd_pcm *snd_pcm;
int err;
@@ -1750,7 +1746,7 @@ int hdac_hdmi_jack_init(struct snd_soc_dai *dai, int device,
if (snd_pcm) {
err = snd_hdac_add_chmap_ctls(snd_pcm, device, &hdmi->chmap);
if (err < 0) {
- dev_err(&edev->hdac.dev,
+ dev_err(&edev->hdev.dev,
"chmap control add failed with err: %d for pcm: %d\n",
err, device);
kfree(pcm);
@@ -1791,7 +1787,7 @@ static void hdac_hdmi_present_sense_all_pins(struct hdac_ext_device *edev,
static int hdmi_codec_probe(struct snd_soc_codec *codec)
{
struct hdac_ext_device *edev = snd_soc_codec_get_drvdata(codec);
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct snd_soc_dapm_context *dapm =
snd_soc_component_get_dapm(&codec->component);
struct hdac_ext_link *hlink = NULL;
@@ -1803,9 +1799,9 @@ static int hdmi_codec_probe(struct snd_soc_codec *codec)
* hold the ref while we probe, also no need to drop the ref on
* exit, we call pm_runtime_suspend() so that will do for us
*/
- hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev));
+ hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdev.dev));
if (!hlink) {
- dev_err(&edev->hdac.dev, "hdac link not found\n");
+ dev_err(&edev->hdev.dev, "hdac link not found\n");
return -EIO;
}
@@ -1818,7 +1814,7 @@ static int hdmi_codec_probe(struct snd_soc_codec *codec)
aops.audio_ptr = edev;
ret = snd_hdac_i915_register_notifier(&aops);
if (ret < 0) {
- dev_err(&edev->hdac.dev, "notifier register failed: err: %d\n",
+ dev_err(&edev->hdev.dev, "notifier register failed: err: %d\n",
ret);
return ret;
}
@@ -1831,9 +1827,9 @@ static int hdmi_codec_probe(struct snd_soc_codec *codec)
* hdac_device core already sets the state to active and calls
* get_noresume. So enable runtime and set the device to suspend.
*/
- pm_runtime_enable(&edev->hdac.dev);
- pm_runtime_put(&edev->hdac.dev);
- pm_runtime_suspend(&edev->hdac.dev);
+ pm_runtime_enable(&edev->hdev.dev);
+ pm_runtime_put(&edev->hdev.dev);
+ pm_runtime_suspend(&edev->hdev.dev);
return 0;
}
@@ -1842,7 +1838,7 @@ static int hdmi_codec_remove(struct snd_soc_codec *codec)
{
struct hdac_ext_device *edev = snd_soc_codec_get_drvdata(codec);
- pm_runtime_disable(&edev->hdac.dev);
+ pm_runtime_disable(&edev->hdev.dev);
return 0;
}
@@ -1850,9 +1846,9 @@ static int hdmi_codec_remove(struct snd_soc_codec *codec)
static int hdmi_codec_prepare(struct device *dev)
{
struct hdac_ext_device *edev = to_hda_ext_device(dev);
- struct hdac_device *hdac = &edev->hdac;
+ struct hdac_device *hdev = &edev->hdev;
- pm_runtime_get_sync(&edev->hdac.dev);
+ pm_runtime_get_sync(&edev->hdev.dev);
/*
* Power down afg.
@@ -1861,7 +1857,7 @@ static int hdmi_codec_prepare(struct device *dev)
* is received. So setting power state is ensured without using loop
* to read the state.
*/
- snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
+ snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE,
AC_PWRST_D3);
return 0;
@@ -1870,15 +1866,15 @@ static int hdmi_codec_prepare(struct device *dev)
static void hdmi_codec_complete(struct device *dev)
{
struct hdac_ext_device *edev = to_hda_ext_device(dev);
- struct hdac_hdmi_priv *hdmi = edev->private_data;
- struct hdac_device *hdac = &edev->hdac;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
+ struct hdac_device *hdev = &edev->hdev;
/* Power up afg */
- snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
+ snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE,
AC_PWRST_D0);
- hdac_hdmi_skl_enable_all_pins(&edev->hdac);
- hdac_hdmi_skl_enable_dp12(&edev->hdac);
+ hdac_hdmi_skl_enable_all_pins(&edev->hdev);
+ hdac_hdmi_skl_enable_dp12(&edev->hdev);
/*
* As the ELD notify callback request is not entertained while the
@@ -1888,7 +1884,7 @@ static void hdmi_codec_complete(struct device *dev)
*/
hdac_hdmi_present_sense_all_pins(edev, hdmi, false);
- pm_runtime_put_sync(&edev->hdac.dev);
+ pm_runtime_put_sync(&edev->hdev.dev);
}
#else
#define hdmi_codec_prepare NULL
@@ -1901,21 +1897,20 @@ static const struct snd_soc_codec_driver hdmi_hda_codec = {
.idle_bias_off = true,
};
-static void hdac_hdmi_get_chmap(struct hdac_device *hdac, int pcm_idx,
+static void hdac_hdmi_get_chmap(struct hdac_device *hdev, int pcm_idx,
unsigned char *chmap)
{
- struct hdac_ext_device *edev = to_ehdac_device(hdac);
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
struct hdac_hdmi_pcm *pcm = get_hdmi_pcm_from_id(hdmi, pcm_idx);
memcpy(chmap, pcm->chmap, ARRAY_SIZE(pcm->chmap));
}
-static void hdac_hdmi_set_chmap(struct hdac_device *hdac, int pcm_idx,
+static void hdac_hdmi_set_chmap(struct hdac_device *hdev, int pcm_idx,
unsigned char *chmap, int prepared)
{
- struct hdac_ext_device *edev = to_ehdac_device(hdac);
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_ext_device *edev = to_ehdac_device(hdev);
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
struct hdac_hdmi_pcm *pcm = get_hdmi_pcm_from_id(hdmi, pcm_idx);
struct hdac_hdmi_port *port;
@@ -1934,10 +1929,9 @@ static void hdac_hdmi_set_chmap(struct hdac_device *hdac, int pcm_idx,
mutex_unlock(&pcm->lock);
}
-static bool is_hdac_hdmi_pcm_attached(struct hdac_device *hdac, int pcm_idx)
+static bool is_hdac_hdmi_pcm_attached(struct hdac_device *hdev, int pcm_idx)
{
- struct hdac_ext_device *edev = to_ehdac_device(hdac);
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
struct hdac_hdmi_pcm *pcm = get_hdmi_pcm_from_id(hdmi, pcm_idx);
if (!pcm)
@@ -1949,10 +1943,9 @@ static bool is_hdac_hdmi_pcm_attached(struct hdac_device *hdac, int pcm_idx)
return true;
}
-static int hdac_hdmi_get_spk_alloc(struct hdac_device *hdac, int pcm_idx)
+static int hdac_hdmi_get_spk_alloc(struct hdac_device *hdev, int pcm_idx)
{
- struct hdac_ext_device *edev = to_ehdac_device(hdac);
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
struct hdac_hdmi_pcm *pcm = get_hdmi_pcm_from_id(hdmi, pcm_idx);
struct hdac_hdmi_port *port;
@@ -1983,30 +1976,30 @@ static struct hdac_hdmi_drv_data intel_drv_data = {
static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
{
- struct hdac_device *codec = &edev->hdac;
+ struct hdac_device *hdev = &edev->hdev;
struct hdac_hdmi_priv *hdmi_priv;
struct snd_soc_dai_driver *hdmi_dais = NULL;
struct hdac_ext_link *hlink = NULL;
int num_dais = 0;
int ret = 0;
- struct hdac_driver *hdrv = drv_to_hdac_driver(codec->dev.driver);
- const struct hda_device_id *hdac_id = hdac_get_device_id(codec, hdrv);
+ struct hdac_driver *hdrv = drv_to_hdac_driver(hdev->dev.driver);
+ const struct hda_device_id *hdac_id = hdac_get_device_id(hdev, hdrv);
/* hold the ref while we probe */
- hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev));
+ hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdev.dev));
if (!hlink) {
- dev_err(&edev->hdac.dev, "hdac link not found\n");
+ dev_err(&edev->hdev.dev, "hdac link not found\n");
return -EIO;
}
snd_hdac_ext_bus_link_get(edev->ebus, hlink);
- hdmi_priv = devm_kzalloc(&codec->dev, sizeof(*hdmi_priv), GFP_KERNEL);
+ hdmi_priv = devm_kzalloc(&hdev->dev, sizeof(*hdmi_priv), GFP_KERNEL);
if (hdmi_priv == NULL)
return -ENOMEM;
edev->private_data = hdmi_priv;
- snd_hdac_register_chmap_ops(codec, &hdmi_priv->chmap);
+ snd_hdac_register_chmap_ops(hdev, &hdmi_priv->chmap);
hdmi_priv->chmap.ops.get_chmap = hdac_hdmi_get_chmap;
hdmi_priv->chmap.ops.set_chmap = hdac_hdmi_set_chmap;
hdmi_priv->chmap.ops.is_pcm_attached = is_hdac_hdmi_pcm_attached;
@@ -2021,7 +2014,7 @@ static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
else
hdmi_priv->drv_data = &intel_drv_data;
- dev_set_drvdata(&codec->dev, edev);
+ dev_set_drvdata(&hdev->dev, edev);
INIT_LIST_HEAD(&hdmi_priv->pin_list);
INIT_LIST_HEAD(&hdmi_priv->cvt_list);
@@ -2032,9 +2025,9 @@ static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
* Turned off in the runtime_suspend during the first explicit
* pm_runtime_suspend call.
*/
- ret = snd_hdac_display_power(edev->hdac.bus, true);
+ ret = snd_hdac_display_power(edev->hdev.bus, true);
if (ret < 0) {
- dev_err(&edev->hdac.dev,
+ dev_err(&edev->hdev.dev,
"Cannot turn on display power on i915 err: %d\n",
ret);
return ret;
@@ -2042,13 +2035,14 @@ static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
ret = hdac_hdmi_parse_and_map_nid(edev, &hdmi_dais, &num_dais);
if (ret < 0) {
- dev_err(&codec->dev,
+ dev_err(&hdev->dev,
"Failed in parse and map nid with err: %d\n", ret);
return ret;
}
+ snd_hdac_refresh_widgets(hdev, true);
/* ASoC specific initialization */
- ret = snd_soc_register_codec(&codec->dev, &hdmi_hda_codec,
+ ret = snd_soc_register_codec(&hdev->dev, &hdmi_hda_codec,
hdmi_dais, num_dais);
snd_hdac_ext_bus_link_put(edev->ebus, hlink);
@@ -2058,14 +2052,14 @@ static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
static int hdac_hdmi_dev_remove(struct hdac_ext_device *edev)
{
- struct hdac_hdmi_priv *hdmi = edev->private_data;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(&edev->hdev);
struct hdac_hdmi_pin *pin, *pin_next;
struct hdac_hdmi_cvt *cvt, *cvt_next;
struct hdac_hdmi_pcm *pcm, *pcm_next;
struct hdac_hdmi_port *port, *port_next;
int i;
- snd_soc_unregister_codec(&edev->hdac.dev);
+ snd_soc_unregister_codec(&edev->hdev.dev);
list_for_each_entry_safe(pcm, pcm_next, &hdmi->pcm_list, head) {
pcm->cvt = NULL;
@@ -2101,8 +2095,8 @@ static int hdac_hdmi_dev_remove(struct hdac_ext_device *edev)
static int hdac_hdmi_runtime_suspend(struct device *dev)
{
struct hdac_ext_device *edev = to_hda_ext_device(dev);
- struct hdac_device *hdac = &edev->hdac;
- struct hdac_bus *bus = hdac->bus;
+ struct hdac_device *hdev = &edev->hdev;
+ struct hdac_bus *bus = hdev->bus;
struct hdac_ext_bus *ebus = hbus_to_ebus(bus);
struct hdac_ext_link *hlink = NULL;
int err;
@@ -2120,7 +2114,7 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
* is received. So setting power state is ensured without using loop
* to read the state.
*/
- snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
+ snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE,
AC_PWRST_D3);
err = snd_hdac_display_power(bus, false);
if (err < 0) {
@@ -2142,8 +2136,8 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
static int hdac_hdmi_runtime_resume(struct device *dev)
{
struct hdac_ext_device *edev = to_hda_ext_device(dev);
- struct hdac_device *hdac = &edev->hdac;
- struct hdac_bus *bus = hdac->bus;
+ struct hdac_device *hdev = &edev->hdev;
+ struct hdac_bus *bus = hdev->bus;
struct hdac_ext_bus *ebus = hbus_to_ebus(bus);
struct hdac_ext_link *hlink = NULL;
int err;
@@ -2168,11 +2162,11 @@ static int hdac_hdmi_runtime_resume(struct device *dev)
return err;
}
- hdac_hdmi_skl_enable_all_pins(&edev->hdac);
- hdac_hdmi_skl_enable_dp12(&edev->hdac);
+ hdac_hdmi_skl_enable_all_pins(&edev->hdev);
+ hdac_hdmi_skl_enable_dp12(&edev->hdev);
/* Power up afg */
- snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
+ snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE,
AC_PWRST_D0);
return 0;
@@ -2192,6 +2186,8 @@ static const struct hda_device_id hdmi_list[] = {
HDA_CODEC_EXT_ENTRY(0x80862809, 0x100000, "Skylake HDMI", 0),
HDA_CODEC_EXT_ENTRY(0x8086280a, 0x100000, "Broxton HDMI", 0),
HDA_CODEC_EXT_ENTRY(0x8086280b, 0x100000, "Kabylake HDMI", 0),
+ HDA_CODEC_EXT_ENTRY(0x8086280c, 0x100000, "Cannonlake HDMI",
+ &intel_glk_drv_data),
HDA_CODEC_EXT_ENTRY(0x8086280d, 0x100000, "Geminilake HDMI",
&intel_glk_drv_data),
{}
diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c
new file mode 100644
index 000000000000..31b0864583e8
--- /dev/null
+++ b/sound/soc/codecs/max98373.c
@@ -0,0 +1,976 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2017, Maxim Integrated */
+
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <sound/tlv.h>
+#include "max98373.h"
+
+static struct reg_default max98373_reg[] = {
+ {MAX98373_R2000_SW_RESET, 0x00},
+ {MAX98373_R2001_INT_RAW1, 0x00},
+ {MAX98373_R2002_INT_RAW2, 0x00},
+ {MAX98373_R2003_INT_RAW3, 0x00},
+ {MAX98373_R2004_INT_STATE1, 0x00},
+ {MAX98373_R2005_INT_STATE2, 0x00},
+ {MAX98373_R2006_INT_STATE3, 0x00},
+ {MAX98373_R2007_INT_FLAG1, 0x00},
+ {MAX98373_R2008_INT_FLAG2, 0x00},
+ {MAX98373_R2009_INT_FLAG3, 0x00},
+ {MAX98373_R200A_INT_EN1, 0x00},
+ {MAX98373_R200B_INT_EN2, 0x00},
+ {MAX98373_R200C_INT_EN3, 0x00},
+ {MAX98373_R200D_INT_FLAG_CLR1, 0x00},
+ {MAX98373_R200E_INT_FLAG_CLR2, 0x00},
+ {MAX98373_R200F_INT_FLAG_CLR3, 0x00},
+ {MAX98373_R2010_IRQ_CTRL, 0x00},
+ {MAX98373_R2014_THERM_WARN_THRESH, 0x10},
+ {MAX98373_R2015_THERM_SHDN_THRESH, 0x27},
+ {MAX98373_R2016_THERM_HYSTERESIS, 0x01},
+ {MAX98373_R2017_THERM_FOLDBACK_SET, 0xC0},
+ {MAX98373_R2018_THERM_FOLDBACK_EN, 0x00},
+ {MAX98373_R201E_PIN_DRIVE_STRENGTH, 0x55},
+ {MAX98373_R2020_PCM_TX_HIZ_EN_1, 0xFE},
+ {MAX98373_R2021_PCM_TX_HIZ_EN_2, 0xFF},
+ {MAX98373_R2022_PCM_TX_SRC_1, 0x00},
+ {MAX98373_R2023_PCM_TX_SRC_2, 0x00},
+ {MAX98373_R2024_PCM_DATA_FMT_CFG, 0xC0},
+ {MAX98373_R2025_AUDIO_IF_MODE, 0x00},
+ {MAX98373_R2026_PCM_CLOCK_RATIO, 0x04},
+ {MAX98373_R2027_PCM_SR_SETUP_1, 0x08},
+ {MAX98373_R2028_PCM_SR_SETUP_2, 0x88},
+ {MAX98373_R2029_PCM_TO_SPK_MONO_MIX_1, 0x00},
+ {MAX98373_R202A_PCM_TO_SPK_MONO_MIX_2, 0x00},
+ {MAX98373_R202B_PCM_RX_EN, 0x00},
+ {MAX98373_R202C_PCM_TX_EN, 0x00},
+ {MAX98373_R202E_ICC_RX_CH_EN_1, 0x00},
+ {MAX98373_R202F_ICC_RX_CH_EN_2, 0x00},
+ {MAX98373_R2030_ICC_TX_HIZ_EN_1, 0xFF},
+ {MAX98373_R2031_ICC_TX_HIZ_EN_2, 0xFF},
+ {MAX98373_R2032_ICC_LINK_EN_CFG, 0x30},
+ {MAX98373_R2034_ICC_TX_CNTL, 0x00},
+ {MAX98373_R2035_ICC_TX_EN, 0x00},
+ {MAX98373_R2036_SOUNDWIRE_CTRL, 0x05},
+ {MAX98373_R203D_AMP_DIG_VOL_CTRL, 0x00},
+ {MAX98373_R203E_AMP_PATH_GAIN, 0x08},
+ {MAX98373_R203F_AMP_DSP_CFG, 0x02},
+ {MAX98373_R2040_TONE_GEN_CFG, 0x00},
+ {MAX98373_R2041_AMP_CFG, 0x03},
+ {MAX98373_R2042_AMP_EDGE_RATE_CFG, 0x00},
+ {MAX98373_R2043_AMP_EN, 0x00},
+ {MAX98373_R2046_IV_SENSE_ADC_DSP_CFG, 0x04},
+ {MAX98373_R2047_IV_SENSE_ADC_EN, 0x00},
+ {MAX98373_R2051_MEAS_ADC_SAMPLING_RATE, 0x00},
+ {MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG, 0x00},
+ {MAX98373_R2053_MEAS_ADC_THERM_FLT_CFG, 0x00},
+ {MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK, 0x00},
+ {MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK, 0x00},
+ {MAX98373_R2056_MEAS_ADC_PVDD_CH_EN, 0x00},
+ {MAX98373_R2090_BDE_LVL_HOLD, 0x00},
+ {MAX98373_R2091_BDE_GAIN_ATK_REL_RATE, 0x00},
+ {MAX98373_R2092_BDE_CLIPPER_MODE, 0x00},
+ {MAX98373_R2097_BDE_L1_THRESH, 0x00},
+ {MAX98373_R2098_BDE_L2_THRESH, 0x00},
+ {MAX98373_R2099_BDE_L3_THRESH, 0x00},
+ {MAX98373_R209A_BDE_L4_THRESH, 0x00},
+ {MAX98373_R209B_BDE_THRESH_HYST, 0x00},
+ {MAX98373_R20A8_BDE_L1_CFG_1, 0x00},
+ {MAX98373_R20A9_BDE_L1_CFG_2, 0x00},
+ {MAX98373_R20AA_BDE_L1_CFG_3, 0x00},
+ {MAX98373_R20AB_BDE_L2_CFG_1, 0x00},
+ {MAX98373_R20AC_BDE_L2_CFG_2, 0x00},
+ {MAX98373_R20AD_BDE_L2_CFG_3, 0x00},
+ {MAX98373_R20AE_BDE_L3_CFG_1, 0x00},
+ {MAX98373_R20AF_BDE_L3_CFG_2, 0x00},
+ {MAX98373_R20B0_BDE_L3_CFG_3, 0x00},
+ {MAX98373_R20B1_BDE_L4_CFG_1, 0x00},
+ {MAX98373_R20B2_BDE_L4_CFG_2, 0x00},
+ {MAX98373_R20B3_BDE_L4_CFG_3, 0x00},
+ {MAX98373_R20B4_BDE_INFINITE_HOLD_RELEASE, 0x00},
+ {MAX98373_R20B5_BDE_EN, 0x00},
+ {MAX98373_R20B6_BDE_CUR_STATE_READBACK, 0x00},
+ {MAX98373_R20D1_DHT_CFG, 0x01},
+ {MAX98373_R20D2_DHT_ATTACK_CFG, 0x02},
+ {MAX98373_R20D3_DHT_RELEASE_CFG, 0x03},
+ {MAX98373_R20D4_DHT_EN, 0x00},
+ {MAX98373_R20E0_LIMITER_THRESH_CFG, 0x00},
+ {MAX98373_R20E1_LIMITER_ATK_REL_RATES, 0x00},
+ {MAX98373_R20E2_LIMITER_EN, 0x00},
+ {MAX98373_R20FE_DEVICE_AUTO_RESTART_CFG, 0x00},
+ {MAX98373_R20FF_GLOBAL_SHDN, 0x00},
+ {MAX98373_R21FF_REV_ID, 0x42},
+};
+
+static int max98373_dai_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct max98373_priv *max98373 = snd_soc_codec_get_drvdata(codec);
+ unsigned int format = 0;
+ unsigned int invert = 0;
+
+ dev_dbg(codec->dev, "%s: fmt 0x%08X\n", __func__, fmt);
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ invert = MAX98373_PCM_MODE_CFG_PCM_BCLKEDGE;
+ break;
+ default:
+ dev_err(codec->dev, "DAI invert mode unsupported\n");
+ return -EINVAL;
+ }
+
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2026_PCM_CLOCK_RATIO,
+ MAX98373_PCM_MODE_CFG_PCM_BCLKEDGE,
+ invert);
+
+ /* interface format */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ format = MAX98373_PCM_FORMAT_I2S;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ format = MAX98373_PCM_FORMAT_LJ;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ format = MAX98373_PCM_FORMAT_TDM_MODE1;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ format = MAX98373_PCM_FORMAT_TDM_MODE0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2024_PCM_DATA_FMT_CFG,
+ MAX98373_PCM_MODE_CFG_FORMAT_MASK,
+ format << MAX98373_PCM_MODE_CFG_FORMAT_SHIFT);
+
+ return 0;
+}
+
+/* BCLKs per LRCLK */
+static const int bclk_sel_table[] = {
+ 32, 48, 64, 96, 128, 192, 256, 384, 512, 320,
+};
+
+static int max98373_get_bclk_sel(int bclk)
+{
+ int i;
+ /* match BCLKs per LRCLK */
+ for (i = 0; i < ARRAY_SIZE(bclk_sel_table); i++) {
+ if (bclk_sel_table[i] == bclk)
+ return i + 2;
+ }
+ return 0;
+}
+
+static int max98373_set_clock(struct snd_soc_codec *codec,
+ struct snd_pcm_hw_params *params)
+{
+ struct max98373_priv *max98373 = snd_soc_codec_get_drvdata(codec);
+ /* BCLK/LRCLK ratio calculation */
+ int blr_clk_ratio = params_channels(params) * max98373->ch_size;
+ int value;
+
+ if (!max98373->tdm_mode) {
+ /* BCLK configuration */
+ value = max98373_get_bclk_sel(blr_clk_ratio);
+ if (!value) {
+ dev_err(codec->dev, "format unsupported %d\n",
+ params_format(params));
+ return -EINVAL;
+ }
+
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2026_PCM_CLOCK_RATIO,
+ MAX98373_PCM_CLK_SETUP_BSEL_MASK,
+ value);
+ }
+ return 0;
+}
+
+static int max98373_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct max98373_priv *max98373 = snd_soc_codec_get_drvdata(codec);
+ unsigned int sampling_rate = 0;
+ unsigned int chan_sz = 0;
+
+ /* pcm mode configuration */
+ switch (snd_pcm_format_width(params_format(params))) {
+ case 16:
+ chan_sz = MAX98373_PCM_MODE_CFG_CHANSZ_16;
+ break;
+ case 24:
+ chan_sz = MAX98373_PCM_MODE_CFG_CHANSZ_24;
+ break;
+ case 32:
+ chan_sz = MAX98373_PCM_MODE_CFG_CHANSZ_32;
+ break;
+ default:
+ dev_err(codec->dev, "format unsupported %d\n",
+ params_format(params));
+ goto err;
+ }
+
+ max98373->ch_size = snd_pcm_format_width(params_format(params));
+
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2024_PCM_DATA_FMT_CFG,
+ MAX98373_PCM_MODE_CFG_CHANSZ_MASK, chan_sz);
+
+ dev_dbg(codec->dev, "format supported %d",
+ params_format(params));
+
+ /* sampling rate configuration */
+ switch (params_rate(params)) {
+ case 8000:
+ sampling_rate = MAX98373_PCM_SR_SET1_SR_8000;
+ break;
+ case 11025:
+ sampling_rate = MAX98373_PCM_SR_SET1_SR_11025;
+ break;
+ case 12000:
+ sampling_rate = MAX98373_PCM_SR_SET1_SR_12000;
+ break;
+ case 16000:
+ sampling_rate = MAX98373_PCM_SR_SET1_SR_16000;
+ break;
+ case 22050:
+ sampling_rate = MAX98373_PCM_SR_SET1_SR_22050;
+ break;
+ case 24000:
+ sampling_rate = MAX98373_PCM_SR_SET1_SR_24000;
+ break;
+ case 32000:
+ sampling_rate = MAX98373_PCM_SR_SET1_SR_32000;
+ break;
+ case 44100:
+ sampling_rate = MAX98373_PCM_SR_SET1_SR_44100;
+ break;
+ case 48000:
+ sampling_rate = MAX98373_PCM_SR_SET1_SR_48000;
+ break;
+ default:
+ dev_err(codec->dev, "rate %d not supported\n",
+ params_rate(params));
+ goto err;
+ }
+
+ /* set DAI_SR to correct LRCLK frequency */
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2027_PCM_SR_SETUP_1,
+ MAX98373_PCM_SR_SET1_SR_MASK,
+ sampling_rate);
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2028_PCM_SR_SETUP_2,
+ MAX98373_PCM_SR_SET2_SR_MASK,
+ sampling_rate << MAX98373_PCM_SR_SET2_SR_SHIFT);
+
+ /* set sampling rate of IV */
+ if (max98373->interleave_mode &&
+ sampling_rate > MAX98373_PCM_SR_SET1_SR_16000)
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2028_PCM_SR_SETUP_2,
+ MAX98373_PCM_SR_SET2_IVADC_SR_MASK,
+ sampling_rate - 3);
+ else
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2028_PCM_SR_SETUP_2,
+ MAX98373_PCM_SR_SET2_IVADC_SR_MASK,
+ sampling_rate);
+
+ return max98373_set_clock(codec, params);
+err:
+ return -EINVAL;
+}
+
+static int max98373_dai_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask, unsigned int rx_mask,
+ int slots, int slot_width)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct max98373_priv *max98373 = snd_soc_codec_get_drvdata(codec);
+ int bsel = 0;
+ unsigned int chan_sz = 0;
+ unsigned int mask;
+ int x, slot_found;
+
+ if (!tx_mask && !rx_mask && !slots && !slot_width)
+ max98373->tdm_mode = false;
+ else
+ max98373->tdm_mode = true;
+
+ /* BCLK configuration */
+ bsel = max98373_get_bclk_sel(slots * slot_width);
+ if (bsel == 0) {
+ dev_err(codec->dev, "BCLK %d not supported\n",
+ slots * slot_width);
+ return -EINVAL;
+ }
+
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2026_PCM_CLOCK_RATIO,
+ MAX98373_PCM_CLK_SETUP_BSEL_MASK,
+ bsel);
+
+ /* Channel size configuration */
+ switch (slot_width) {
+ case 16:
+ chan_sz = MAX98373_PCM_MODE_CFG_CHANSZ_16;
+ break;
+ case 24:
+ chan_sz = MAX98373_PCM_MODE_CFG_CHANSZ_24;
+ break;
+ case 32:
+ chan_sz = MAX98373_PCM_MODE_CFG_CHANSZ_32;
+ break;
+ default:
+ dev_err(codec->dev, "format unsupported %d\n",
+ slot_width);
+ return -EINVAL;
+ }
+
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2024_PCM_DATA_FMT_CFG,
+ MAX98373_PCM_MODE_CFG_CHANSZ_MASK, chan_sz);
+
+ /* Rx slot configuration */
+ slot_found = 0;
+ mask = rx_mask;
+ for (x = 0 ; x < 16 ; x++, mask >>= 1) {
+ if (mask & 0x1) {
+ if (slot_found == 0)
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2029_PCM_TO_SPK_MONO_MIX_1,
+ MAX98373_PCM_TO_SPK_CH0_SRC_MASK, x);
+ else
+ regmap_write(max98373->regmap,
+ MAX98373_R202A_PCM_TO_SPK_MONO_MIX_2,
+ x);
+ slot_found++;
+ if (slot_found > 1)
+ break;
+ }
+ }
+
+ /* Tx slot Hi-Z configuration */
+ regmap_write(max98373->regmap,
+ MAX98373_R2020_PCM_TX_HIZ_EN_1,
+ ~tx_mask & 0xFF);
+ regmap_write(max98373->regmap,
+ MAX98373_R2021_PCM_TX_HIZ_EN_2,
+ (~tx_mask & 0xFF00) >> 8);
+
+ return 0;
+}
+
+#define MAX98373_RATES SNDRV_PCM_RATE_8000_96000
+
+#define MAX98373_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static const struct snd_soc_dai_ops max98373_dai_ops = {
+ .set_fmt = max98373_dai_set_fmt,
+ .hw_params = max98373_dai_hw_params,
+ .set_tdm_slot = max98373_dai_tdm_slot,
+};
+
+static int max98373_dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct max98373_priv *max98373 = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R20FF_GLOBAL_SHDN,
+ MAX98373_GLOBAL_EN_MASK, 1);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R20FF_GLOBAL_SHDN,
+ MAX98373_GLOBAL_EN_MASK, 0);
+ max98373->tdm_mode = 0;
+ break;
+ default:
+ return 0;
+ }
+ return 0;
+}
+
+static const char * const max98373_switch_text[] = {
+ "Left", "Right", "LeftRight"};
+
+static const struct soc_enum dai_sel_enum =
+ SOC_ENUM_SINGLE(MAX98373_R2029_PCM_TO_SPK_MONO_MIX_1,
+ MAX98373_PCM_TO_SPK_MONOMIX_CFG_SHIFT,
+ 3, max98373_switch_text);
+
+static const struct snd_kcontrol_new max98373_dai_controls =
+ SOC_DAPM_ENUM("DAI Sel", dai_sel_enum);
+
+static const struct snd_kcontrol_new max98373_vi_control =
+ SOC_DAPM_SINGLE("Switch", MAX98373_R202C_PCM_TX_EN, 0, 1, 0);
+
+static const struct snd_kcontrol_new max98373_spkfb_control =
+ SOC_DAPM_SINGLE("Switch", MAX98373_R2043_AMP_EN, 1, 1, 0);
+
+static const struct snd_soc_dapm_widget max98373_dapm_widgets[] = {
+SND_SOC_DAPM_DAC_E("Amp Enable", "HiFi Playback",
+ MAX98373_R202B_PCM_RX_EN, 0, 0, max98373_dac_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_MUX("DAI Sel Mux", SND_SOC_NOPM, 0, 0,
+ &max98373_dai_controls),
+SND_SOC_DAPM_OUTPUT("BE_OUT"),
+SND_SOC_DAPM_AIF_OUT("Voltage Sense", "HiFi Capture", 0,
+ MAX98373_R2047_IV_SENSE_ADC_EN, 0, 0),
+SND_SOC_DAPM_AIF_OUT("Current Sense", "HiFi Capture", 0,
+ MAX98373_R2047_IV_SENSE_ADC_EN, 1, 0),
+SND_SOC_DAPM_AIF_OUT("Speaker FB Sense", "HiFi Capture", 0,
+ SND_SOC_NOPM, 0, 0),
+SND_SOC_DAPM_SWITCH("VI Sense", SND_SOC_NOPM, 0, 0,
+ &max98373_vi_control),
+SND_SOC_DAPM_SWITCH("SpkFB Sense", SND_SOC_NOPM, 0, 0,
+ &max98373_spkfb_control),
+SND_SOC_DAPM_SIGGEN("VMON"),
+SND_SOC_DAPM_SIGGEN("IMON"),
+SND_SOC_DAPM_SIGGEN("FBMON"),
+};
+
+static DECLARE_TLV_DB_SCALE(max98373_digital_tlv, 0, -50, 0);
+static const DECLARE_TLV_DB_RANGE(max98373_spk_tlv,
+ 0, 8, TLV_DB_SCALE_ITEM(0, 50, 0),
+ 9, 10, TLV_DB_SCALE_ITEM(500, 100, 0),
+);
+static const DECLARE_TLV_DB_RANGE(max98373_spkgain_max_tlv,
+ 0, 9, TLV_DB_SCALE_ITEM(800, 100, 0),
+);
+static const DECLARE_TLV_DB_RANGE(max98373_dht_step_size_tlv,
+ 0, 1, TLV_DB_SCALE_ITEM(25, 25, 0),
+ 2, 4, TLV_DB_SCALE_ITEM(100, 100, 0),
+);
+static const DECLARE_TLV_DB_RANGE(max98373_dht_spkgain_min_tlv,
+ 0, 9, TLV_DB_SCALE_ITEM(800, 100, 0),
+);
+static const DECLARE_TLV_DB_RANGE(max98373_dht_rotation_point_tlv,
+ 0, 1, TLV_DB_SCALE_ITEM(-50, -50, 0),
+ 2, 7, TLV_DB_SCALE_ITEM(-200, -100, 0),
+ 8, 9, TLV_DB_SCALE_ITEM(-1000, -200, 0),
+ 10, 11, TLV_DB_SCALE_ITEM(-1500, -300, 0),
+ 12, 13, TLV_DB_SCALE_ITEM(-2000, -200, 0),
+ 14, 15, TLV_DB_SCALE_ITEM(-2500, -500, 0),
+);
+static const DECLARE_TLV_DB_RANGE(max98373_limiter_thresh_tlv,
+ 0, 15, TLV_DB_SCALE_ITEM(0, -100, 0),
+);
+
+static const DECLARE_TLV_DB_RANGE(max98373_bde_gain_tlv,
+ 0, 60, TLV_DB_SCALE_ITEM(0, -25, 0),
+);
+
+static bool max98373_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX98373_R2001_INT_RAW1 ... MAX98373_R200C_INT_EN3:
+ case MAX98373_R2010_IRQ_CTRL:
+ case MAX98373_R2014_THERM_WARN_THRESH
+ ... MAX98373_R2018_THERM_FOLDBACK_EN:
+ case MAX98373_R201E_PIN_DRIVE_STRENGTH
+ ... MAX98373_R2036_SOUNDWIRE_CTRL:
+ case MAX98373_R203D_AMP_DIG_VOL_CTRL ... MAX98373_R2043_AMP_EN:
+ case MAX98373_R2046_IV_SENSE_ADC_DSP_CFG
+ ... MAX98373_R2047_IV_SENSE_ADC_EN:
+ case MAX98373_R2051_MEAS_ADC_SAMPLING_RATE
+ ... MAX98373_R2056_MEAS_ADC_PVDD_CH_EN:
+ case MAX98373_R2090_BDE_LVL_HOLD ... MAX98373_R2092_BDE_CLIPPER_MODE:
+ case MAX98373_R2097_BDE_L1_THRESH
+ ... MAX98373_R209B_BDE_THRESH_HYST:
+ case MAX98373_R20A8_BDE_L1_CFG_1 ... MAX98373_R20B3_BDE_L4_CFG_3:
+ case MAX98373_R20B5_BDE_EN ... MAX98373_R20B6_BDE_CUR_STATE_READBACK:
+ case MAX98373_R20D1_DHT_CFG ... MAX98373_R20D4_DHT_EN:
+ case MAX98373_R20E0_LIMITER_THRESH_CFG ... MAX98373_R20E2_LIMITER_EN:
+ case MAX98373_R20FE_DEVICE_AUTO_RESTART_CFG
+ ... MAX98373_R20FF_GLOBAL_SHDN:
+ case MAX98373_R21FF_REV_ID:
+ return true;
+ default:
+ return false;
+ }
+};
+
+static bool max98373_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX98373_R2000_SW_RESET ... MAX98373_R2009_INT_FLAG3:
+ case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK:
+ case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK:
+ case MAX98373_R20B6_BDE_CUR_STATE_READBACK:
+ case MAX98373_R21FF_REV_ID:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const char * const max98373_output_voltage_lvl_text[] = {
+ "5.43V", "6.09V", "6.83V", "7.67V", "8.60V",
+ "9.65V", "10.83V", "12.15V", "13.63V", "15.29V"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98373_out_volt_enum,
+ MAX98373_R203E_AMP_PATH_GAIN, 0,
+ max98373_output_voltage_lvl_text);
+
+static const char * const max98373_dht_attack_rate_text[] = {
+ "17.5us", "35us", "70us", "140us",
+ "280us", "560us", "1120us", "2240us"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98373_dht_attack_rate_enum,
+ MAX98373_R20D2_DHT_ATTACK_CFG, 0,
+ max98373_dht_attack_rate_text);
+
+static const char * const max98373_dht_release_rate_text[] = {
+ "45ms", "225ms", "450ms", "1150ms",
+ "2250ms", "3100ms", "4500ms", "6750ms"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98373_dht_release_rate_enum,
+ MAX98373_R20D3_DHT_RELEASE_CFG, 0,
+ max98373_dht_release_rate_text);
+
+static const char * const max98373_limiter_attack_rate_text[] = {
+ "10us", "20us", "40us", "80us",
+ "160us", "320us", "640us", "1.28ms",
+ "2.56ms", "5.12ms", "10.24ms", "20.48ms",
+ "40.96ms", "81.92ms", "16.384ms", "32.768ms"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98373_limiter_attack_rate_enum,
+ MAX98373_R20E1_LIMITER_ATK_REL_RATES, 4,
+ max98373_limiter_attack_rate_text);
+
+static const char * const max98373_limiter_release_rate_text[] = {
+ "40us", "80us", "160us", "320us",
+ "640us", "1.28ms", "2.56ms", "5.120ms",
+ "10.24ms", "20.48ms", "40.96ms", "81.92ms",
+ "163.84ms", "327.68ms", "655.36ms", "1310.72ms"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98373_limiter_release_rate_enum,
+ MAX98373_R20E1_LIMITER_ATK_REL_RATES, 0,
+ max98373_limiter_release_rate_text);
+
+static const char * const max98373_ADC_samplerate_text[] = {
+ "333kHz", "192kHz", "64kHz", "48kHz"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98373_adc_samplerate_enum,
+ MAX98373_R2051_MEAS_ADC_SAMPLING_RATE, 0,
+ max98373_ADC_samplerate_text);
+
+static const struct snd_kcontrol_new max98373_snd_controls[] = {
+SOC_SINGLE("Digital Vol Sel Switch", MAX98373_R203F_AMP_DSP_CFG,
+ MAX98373_AMP_VOL_SEL_SHIFT, 1, 0),
+SOC_SINGLE("Volume Location Switch", MAX98373_R203F_AMP_DSP_CFG,
+ MAX98373_AMP_VOL_SEL_SHIFT, 1, 0),
+SOC_SINGLE("Ramp Up Switch", MAX98373_R203F_AMP_DSP_CFG,
+ MAX98373_AMP_DSP_CFG_RMP_UP_SHIFT, 1, 0),
+SOC_SINGLE("Ramp Down Switch", MAX98373_R203F_AMP_DSP_CFG,
+ MAX98373_AMP_DSP_CFG_RMP_DN_SHIFT, 1, 0),
+SOC_SINGLE("CLK Monitor Switch", MAX98373_R20FE_DEVICE_AUTO_RESTART_CFG,
+ MAX98373_CLOCK_MON_SHIFT, 1, 0),
+SOC_SINGLE("Dither Switch", MAX98373_R203F_AMP_DSP_CFG,
+ MAX98373_AMP_DSP_CFG_DITH_SHIFT, 1, 0),
+SOC_SINGLE("DC Blocker Switch", MAX98373_R203F_AMP_DSP_CFG,
+ MAX98373_AMP_DSP_CFG_DCBLK_SHIFT, 1, 0),
+SOC_SINGLE_TLV("Digital Volume", MAX98373_R203D_AMP_DIG_VOL_CTRL,
+ 0, 0x7F, 0, max98373_digital_tlv),
+SOC_SINGLE_TLV("Speaker Volume", MAX98373_R203E_AMP_PATH_GAIN,
+ MAX98373_SPK_DIGI_GAIN_SHIFT, 10, 0, max98373_spk_tlv),
+SOC_SINGLE_TLV("FS Max Volume", MAX98373_R203E_AMP_PATH_GAIN,
+ MAX98373_FS_GAIN_MAX_SHIFT, 9, 0, max98373_spkgain_max_tlv),
+SOC_ENUM("Output Voltage", max98373_out_volt_enum),
+/* Dynamic Headroom Tracking */
+SOC_SINGLE("DHT Switch", MAX98373_R20D4_DHT_EN,
+ MAX98373_DHT_EN_SHIFT, 1, 0),
+SOC_SINGLE_TLV("DHT Min Volume", MAX98373_R20D1_DHT_CFG,
+ MAX98373_DHT_SPK_GAIN_MIN_SHIFT, 9, 0, max98373_dht_spkgain_min_tlv),
+SOC_SINGLE_TLV("DHT Rot Pnt Volume", MAX98373_R20D1_DHT_CFG,
+ MAX98373_DHT_ROT_PNT_SHIFT, 15, 0, max98373_dht_rotation_point_tlv),
+SOC_SINGLE_TLV("DHT Attack Step Volume", MAX98373_R20D2_DHT_ATTACK_CFG,
+ MAX98373_DHT_ATTACK_STEP_SHIFT, 4, 0, max98373_dht_step_size_tlv),
+SOC_SINGLE_TLV("DHT Release Step Volume", MAX98373_R20D3_DHT_RELEASE_CFG,
+ MAX98373_DHT_RELEASE_STEP_SHIFT, 4, 0, max98373_dht_step_size_tlv),
+SOC_ENUM("DHT Attack Rate", max98373_dht_attack_rate_enum),
+SOC_ENUM("DHT Release Rate", max98373_dht_release_rate_enum),
+/* ADC configuration */
+SOC_SINGLE("ADC PVDD CH Switch", MAX98373_R2056_MEAS_ADC_PVDD_CH_EN, 0, 1, 0),
+SOC_SINGLE("ADC PVDD FLT Switch", MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG,
+ MAX98373_FLT_EN_SHIFT, 1, 0),
+SOC_SINGLE("ADC TEMP FLT Switch", MAX98373_R2053_MEAS_ADC_THERM_FLT_CFG,
+ MAX98373_FLT_EN_SHIFT, 1, 0),
+SOC_SINGLE("ADC PVDD", MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK, 0, 0xFF, 0),
+SOC_SINGLE("ADC TEMP", MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK, 0, 0xFF, 0),
+SOC_SINGLE("ADC PVDD FLT Coeff", MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG,
+ 0, 0x3, 0),
+SOC_SINGLE("ADC TEMP FLT Coeff", MAX98373_R2053_MEAS_ADC_THERM_FLT_CFG,
+ 0, 0x3, 0),
+SOC_ENUM("ADC SampleRate", max98373_adc_samplerate_enum),
+/* Brownout Detection Engine */
+SOC_SINGLE("BDE Switch", MAX98373_R20B5_BDE_EN, MAX98373_BDE_EN_SHIFT, 1, 0),
+SOC_SINGLE("BDE LVL4 Mute Switch", MAX98373_R20B2_BDE_L4_CFG_2,
+ MAX98373_LVL4_MUTE_EN_SHIFT, 1, 0),
+SOC_SINGLE("BDE LVL4 Hold Switch", MAX98373_R20B2_BDE_L4_CFG_2,
+ MAX98373_LVL4_HOLD_EN_SHIFT, 1, 0),
+SOC_SINGLE("BDE LVL1 Thresh", MAX98373_R2097_BDE_L1_THRESH, 0, 0xFF, 0),
+SOC_SINGLE("BDE LVL2 Thresh", MAX98373_R2098_BDE_L2_THRESH, 0, 0xFF, 0),
+SOC_SINGLE("BDE LVL3 Thresh", MAX98373_R2099_BDE_L3_THRESH, 0, 0xFF, 0),
+SOC_SINGLE("BDE LVL4 Thresh", MAX98373_R209A_BDE_L4_THRESH, 0, 0xFF, 0),
+SOC_SINGLE("BDE Active Level", MAX98373_R20B6_BDE_CUR_STATE_READBACK, 0, 8, 0),
+SOC_SINGLE("BDE Clip Mode Switch", MAX98373_R2092_BDE_CLIPPER_MODE, 0, 1, 0),
+SOC_SINGLE("BDE Thresh Hysteresis", MAX98373_R209B_BDE_THRESH_HYST, 0, 0xFF, 0),
+SOC_SINGLE("BDE Hold Time", MAX98373_R2090_BDE_LVL_HOLD, 0, 0xFF, 0),
+SOC_SINGLE("BDE Attack Rate", MAX98373_R2091_BDE_GAIN_ATK_REL_RATE, 4, 0xF, 0),
+SOC_SINGLE("BDE Release Rate", MAX98373_R2091_BDE_GAIN_ATK_REL_RATE, 0, 0xF, 0),
+SOC_SINGLE_TLV("BDE LVL1 Clip Thresh Volume", MAX98373_R20A9_BDE_L1_CFG_2,
+ 0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL2 Clip Thresh Volume", MAX98373_R20AC_BDE_L2_CFG_2,
+ 0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL3 Clip Thresh Volume", MAX98373_R20AF_BDE_L3_CFG_2,
+ 0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL4 Clip Thresh Volume", MAX98373_R20B2_BDE_L4_CFG_2,
+ 0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL1 Clip Reduction Volume", MAX98373_R20AA_BDE_L1_CFG_3,
+ 0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL2 Clip Reduction Volume", MAX98373_R20AD_BDE_L2_CFG_3,
+ 0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL3 Clip Reduction Volume", MAX98373_R20B0_BDE_L3_CFG_3,
+ 0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL4 Clip Reduction Volume", MAX98373_R20B3_BDE_L4_CFG_3,
+ 0, 0x3C, 0, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL1 Limiter Thresh Volume", MAX98373_R20A8_BDE_L1_CFG_1,
+ 0, 0xF, 0, max98373_limiter_thresh_tlv),
+SOC_SINGLE_TLV("BDE LVL2 Limiter Thresh Volume", MAX98373_R20AB_BDE_L2_CFG_1,
+ 0, 0xF, 0, max98373_limiter_thresh_tlv),
+SOC_SINGLE_TLV("BDE LVL3 Limiter Thresh Volume", MAX98373_R20AE_BDE_L3_CFG_1,
+ 0, 0xF, 0, max98373_limiter_thresh_tlv),
+SOC_SINGLE_TLV("BDE LVL4 Limiter Thresh Volume", MAX98373_R20B1_BDE_L4_CFG_1,
+ 0, 0xF, 0, max98373_limiter_thresh_tlv),
+/* Limiter */
+SOC_SINGLE("Limiter Switch", MAX98373_R20E2_LIMITER_EN,
+ MAX98373_LIMITER_EN_SHIFT, 1, 0),
+SOC_SINGLE("Limiter Src Switch", MAX98373_R20E0_LIMITER_THRESH_CFG,
+ MAX98373_LIMITER_THRESH_SRC_SHIFT, 1, 0),
+SOC_SINGLE_TLV("Limiter Thresh Volume", MAX98373_R20E0_LIMITER_THRESH_CFG,
+ MAX98373_LIMITER_THRESH_SHIFT, 15, 0, max98373_limiter_thresh_tlv),
+SOC_ENUM("Limiter Attack Rate", max98373_limiter_attack_rate_enum),
+SOC_ENUM("Limiter Release Rate", max98373_limiter_release_rate_enum),
+};
+
+static const struct snd_soc_dapm_route max98373_audio_map[] = {
+ /* Plabyack */
+ {"DAI Sel Mux", "Left", "Amp Enable"},
+ {"DAI Sel Mux", "Right", "Amp Enable"},
+ {"DAI Sel Mux", "LeftRight", "Amp Enable"},
+ {"BE_OUT", NULL, "DAI Sel Mux"},
+ /* Capture */
+ { "VI Sense", "Switch", "VMON" },
+ { "VI Sense", "Switch", "IMON" },
+ { "SpkFB Sense", "Switch", "FBMON" },
+ { "Voltage Sense", NULL, "VI Sense" },
+ { "Current Sense", NULL, "VI Sense" },
+ { "Speaker FB Sense", NULL, "SpkFB Sense" },
+};
+
+static struct snd_soc_dai_driver max98373_dai[] = {
+ {
+ .name = "max98373-aif1",
+ .playback = {
+ .stream_name = "HiFi Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MAX98373_RATES,
+ .formats = MAX98373_FORMATS,
+ },
+ .capture = {
+ .stream_name = "HiFi Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MAX98373_RATES,
+ .formats = MAX98373_FORMATS,
+ },
+ .ops = &max98373_dai_ops,
+ }
+};
+
+static int max98373_probe(struct snd_soc_codec *codec)
+{
+ struct max98373_priv *max98373 = snd_soc_codec_get_drvdata(codec);
+
+ codec->control_data = max98373->regmap;
+
+ /* Software Reset */
+ regmap_write(max98373->regmap,
+ MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET);
+
+ /* IV default slot configuration */
+ regmap_write(max98373->regmap,
+ MAX98373_R2020_PCM_TX_HIZ_EN_1,
+ 0xFF);
+ regmap_write(max98373->regmap,
+ MAX98373_R2021_PCM_TX_HIZ_EN_2,
+ 0xFF);
+ /* L/R mix configuration */
+ regmap_write(max98373->regmap,
+ MAX98373_R2029_PCM_TO_SPK_MONO_MIX_1,
+ 0x80);
+ regmap_write(max98373->regmap,
+ MAX98373_R202A_PCM_TO_SPK_MONO_MIX_2,
+ 0x1);
+ /* Set inital volume (0dB) */
+ regmap_write(max98373->regmap,
+ MAX98373_R203D_AMP_DIG_VOL_CTRL,
+ 0x00);
+ regmap_write(max98373->regmap,
+ MAX98373_R203E_AMP_PATH_GAIN,
+ 0x00);
+ /* Enable DC blocker */
+ regmap_write(max98373->regmap,
+ MAX98373_R203F_AMP_DSP_CFG,
+ 0x3);
+ /* Enable IMON VMON DC blocker */
+ regmap_write(max98373->regmap,
+ MAX98373_R2046_IV_SENSE_ADC_DSP_CFG,
+ 0x7);
+ /* voltage, current slot configuration */
+ regmap_write(max98373->regmap,
+ MAX98373_R2022_PCM_TX_SRC_1,
+ (max98373->i_slot << MAX98373_PCM_TX_CH_SRC_A_I_SHIFT |
+ max98373->v_slot) & 0xFF);
+ if (max98373->v_slot < 8)
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2020_PCM_TX_HIZ_EN_1,
+ 1 << max98373->v_slot, 0);
+ else
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2021_PCM_TX_HIZ_EN_2,
+ 1 << (max98373->v_slot - 8), 0);
+
+ if (max98373->i_slot < 8)
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2020_PCM_TX_HIZ_EN_1,
+ 1 << max98373->i_slot, 0);
+ else
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2021_PCM_TX_HIZ_EN_2,
+ 1 << (max98373->i_slot - 8), 0);
+
+ /* speaker feedback slot configuration */
+ regmap_write(max98373->regmap,
+ MAX98373_R2023_PCM_TX_SRC_2,
+ max98373->spkfb_slot & 0xFF);
+
+ /* Set interleave mode */
+ if (max98373->interleave_mode)
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2024_PCM_DATA_FMT_CFG,
+ MAX98373_PCM_TX_CH_INTERLEAVE_MASK,
+ MAX98373_PCM_TX_CH_INTERLEAVE_MASK);
+
+ /* Speaker enable */
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2043_AMP_EN,
+ MAX98373_SPK_EN_MASK, 1);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int max98373_suspend(struct device *dev)
+{
+ struct max98373_priv *max98373 = dev_get_drvdata(dev);
+
+ regcache_cache_only(max98373->regmap, true);
+ regcache_mark_dirty(max98373->regmap);
+ return 0;
+}
+static int max98373_resume(struct device *dev)
+{
+ struct max98373_priv *max98373 = dev_get_drvdata(dev);
+
+ regmap_write(max98373->regmap,
+ MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET);
+ regcache_cache_only(max98373->regmap, false);
+ regcache_sync(max98373->regmap);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops max98373_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(max98373_suspend, max98373_resume)
+};
+
+static const struct snd_soc_codec_driver soc_codec_dev_max98373 = {
+ .probe = max98373_probe,
+ .component_driver = {
+ .controls = max98373_snd_controls,
+ .num_controls = ARRAY_SIZE(max98373_snd_controls),
+ .dapm_widgets = max98373_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(max98373_dapm_widgets),
+ .dapm_routes = max98373_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(max98373_audio_map),
+ },
+};
+
+static const struct regmap_config max98373_regmap = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = MAX98373_R21FF_REV_ID,
+ .reg_defaults = max98373_reg,
+ .num_reg_defaults = ARRAY_SIZE(max98373_reg),
+ .readable_reg = max98373_readable_register,
+ .volatile_reg = max98373_volatile_reg,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static void max98373_slot_config(struct i2c_client *i2c,
+ struct max98373_priv *max98373)
+{
+ int value;
+ struct device *dev = &i2c->dev;
+
+ if (!device_property_read_u32(dev, "maxim,vmon-slot-no", &value))
+ max98373->v_slot = value & 0xF;
+ else
+ max98373->v_slot = 0;
+
+ if (!device_property_read_u32(dev, "maxim,imon-slot-no", &value))
+ max98373->i_slot = value & 0xF;
+ else
+ max98373->i_slot = 1;
+
+ if (!device_property_read_u32(dev, "maxim,spkfb-slot-no", &value))
+ max98373->spkfb_slot = value & 0xF;
+ else
+ max98373->spkfb_slot = 2;
+}
+
+static int max98373_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+
+ int ret = 0;
+ int reg = 0;
+ struct max98373_priv *max98373 = NULL;
+
+ max98373 = devm_kzalloc(&i2c->dev, sizeof(*max98373), GFP_KERNEL);
+
+ if (!max98373) {
+ ret = -ENOMEM;
+ return ret;
+ }
+ i2c_set_clientdata(i2c, max98373);
+
+ /* update interleave mode info */
+ if (device_property_read_bool(&i2c->dev, "maxim,interleave_mode"))
+ max98373->interleave_mode = 1;
+ else
+ max98373->interleave_mode = 0;
+
+
+ /* regmap initialization */
+ max98373->regmap
+ = devm_regmap_init_i2c(i2c, &max98373_regmap);
+ if (IS_ERR(max98373->regmap)) {
+ ret = PTR_ERR(max98373->regmap);
+ dev_err(&i2c->dev,
+ "Failed to allocate regmap: %d\n", ret);
+ return ret;
+ }
+
+ /* Check Revision ID */
+ ret = regmap_read(max98373->regmap,
+ MAX98373_R21FF_REV_ID, &reg);
+ if (ret < 0) {
+ dev_err(&i2c->dev,
+ "Failed to read: 0x%02X\n", MAX98373_R21FF_REV_ID);
+ return ret;
+ }
+ dev_info(&i2c->dev, "MAX98373 revisionID: 0x%02X\n", reg);
+
+ /* voltage/current slot configuration */
+ max98373_slot_config(i2c, max98373);
+
+ /* codec registeration */
+ ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_max98373,
+ max98373_dai, ARRAY_SIZE(max98373_dai));
+ if (ret < 0)
+ dev_err(&i2c->dev, "Failed to register codec: %d\n", ret);
+
+ return ret;
+}
+
+static int max98373_i2c_remove(struct i2c_client *client)
+{
+ snd_soc_unregister_codec(&client->dev);
+ return 0;
+}
+
+static const struct i2c_device_id max98373_i2c_id[] = {
+ { "max98373", 0},
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, max98373_i2c_id);
+
+#if defined(CONFIG_OF)
+static const struct of_device_id max98373_of_match[] = {
+ { .compatible = "maxim,max98373", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max98373_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id max98373_acpi_match[] = {
+ { "MX98373", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, max98373_acpi_match);
+#endif
+
+static struct i2c_driver max98373_i2c_driver = {
+ .driver = {
+ .name = "max98373",
+ .of_match_table = of_match_ptr(max98373_of_match),
+ .acpi_match_table = ACPI_PTR(max98373_acpi_match),
+ .pm = &max98373_pm,
+ },
+ .probe = max98373_i2c_probe,
+ .remove = max98373_i2c_remove,
+ .id_table = max98373_i2c_id,
+};
+
+module_i2c_driver(max98373_i2c_driver)
+
+MODULE_DESCRIPTION("ALSA SoC MAX98373 driver");
+MODULE_AUTHOR("Ryan Lee <ryans.lee@maximintegrated.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/max98373.h b/sound/soc/codecs/max98373.h
new file mode 100644
index 000000000000..d0b359d0cf8c
--- /dev/null
+++ b/sound/soc/codecs/max98373.h
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2017, Maxim Integrated */
+#ifndef _MAX98373_H
+#define _MAX98373_H
+
+#define MAX98373_R2000_SW_RESET 0x2000
+#define MAX98373_R2001_INT_RAW1 0x2001
+#define MAX98373_R2002_INT_RAW2 0x2002
+#define MAX98373_R2003_INT_RAW3 0x2003
+#define MAX98373_R2004_INT_STATE1 0x2004
+#define MAX98373_R2005_INT_STATE2 0x2005
+#define MAX98373_R2006_INT_STATE3 0x2006
+#define MAX98373_R2007_INT_FLAG1 0x2007
+#define MAX98373_R2008_INT_FLAG2 0x2008
+#define MAX98373_R2009_INT_FLAG3 0x2009
+#define MAX98373_R200A_INT_EN1 0x200A
+#define MAX98373_R200B_INT_EN2 0x200B
+#define MAX98373_R200C_INT_EN3 0x200C
+#define MAX98373_R200D_INT_FLAG_CLR1 0x200D
+#define MAX98373_R200E_INT_FLAG_CLR2 0x200E
+#define MAX98373_R200F_INT_FLAG_CLR3 0x200F
+#define MAX98373_R2010_IRQ_CTRL 0x2010
+#define MAX98373_R2014_THERM_WARN_THRESH 0x2014
+#define MAX98373_R2015_THERM_SHDN_THRESH 0x2015
+#define MAX98373_R2016_THERM_HYSTERESIS 0x2016
+#define MAX98373_R2017_THERM_FOLDBACK_SET 0x2017
+#define MAX98373_R2018_THERM_FOLDBACK_EN 0x2018
+#define MAX98373_R201E_PIN_DRIVE_STRENGTH 0x201E
+#define MAX98373_R2020_PCM_TX_HIZ_EN_1 0x2020
+#define MAX98373_R2021_PCM_TX_HIZ_EN_2 0x2021
+#define MAX98373_R2022_PCM_TX_SRC_1 0x2022
+#define MAX98373_R2023_PCM_TX_SRC_2 0x2023
+#define MAX98373_R2024_PCM_DATA_FMT_CFG 0x2024
+#define MAX98373_R2025_AUDIO_IF_MODE 0x2025
+#define MAX98373_R2026_PCM_CLOCK_RATIO 0x2026
+#define MAX98373_R2027_PCM_SR_SETUP_1 0x2027
+#define MAX98373_R2028_PCM_SR_SETUP_2 0x2028
+#define MAX98373_R2029_PCM_TO_SPK_MONO_MIX_1 0x2029
+#define MAX98373_R202A_PCM_TO_SPK_MONO_MIX_2 0x202A
+#define MAX98373_R202B_PCM_RX_EN 0x202B
+#define MAX98373_R202C_PCM_TX_EN 0x202C
+#define MAX98373_R202E_ICC_RX_CH_EN_1 0x202E
+#define MAX98373_R202F_ICC_RX_CH_EN_2 0x202F
+#define MAX98373_R2030_ICC_TX_HIZ_EN_1 0x2030
+#define MAX98373_R2031_ICC_TX_HIZ_EN_2 0x2031
+#define MAX98373_R2032_ICC_LINK_EN_CFG 0x2032
+#define MAX98373_R2034_ICC_TX_CNTL 0x2034
+#define MAX98373_R2035_ICC_TX_EN 0x2035
+#define MAX98373_R2036_SOUNDWIRE_CTRL 0x2036
+#define MAX98373_R203D_AMP_DIG_VOL_CTRL 0x203D
+#define MAX98373_R203E_AMP_PATH_GAIN 0x203E
+#define MAX98373_R203F_AMP_DSP_CFG 0x203F
+#define MAX98373_R2040_TONE_GEN_CFG 0x2040
+#define MAX98373_R2041_AMP_CFG 0x2041
+#define MAX98373_R2042_AMP_EDGE_RATE_CFG 0x2042
+#define MAX98373_R2043_AMP_EN 0x2043
+#define MAX98373_R2046_IV_SENSE_ADC_DSP_CFG 0x2046
+#define MAX98373_R2047_IV_SENSE_ADC_EN 0x2047
+#define MAX98373_R2051_MEAS_ADC_SAMPLING_RATE 0x2051
+#define MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG 0x2052
+#define MAX98373_R2053_MEAS_ADC_THERM_FLT_CFG 0x2053
+#define MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK 0x2054
+#define MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK 0x2055
+#define MAX98373_R2056_MEAS_ADC_PVDD_CH_EN 0x2056
+#define MAX98373_R2090_BDE_LVL_HOLD 0x2090
+#define MAX98373_R2091_BDE_GAIN_ATK_REL_RATE 0x2091
+#define MAX98373_R2092_BDE_CLIPPER_MODE 0x2092
+#define MAX98373_R2097_BDE_L1_THRESH 0x2097
+#define MAX98373_R2098_BDE_L2_THRESH 0x2098
+#define MAX98373_R2099_BDE_L3_THRESH 0x2099
+#define MAX98373_R209A_BDE_L4_THRESH 0x209A
+#define MAX98373_R209B_BDE_THRESH_HYST 0x209B
+#define MAX98373_R20A8_BDE_L1_CFG_1 0x20A8
+#define MAX98373_R20A9_BDE_L1_CFG_2 0x20A9
+#define MAX98373_R20AA_BDE_L1_CFG_3 0x20AA
+#define MAX98373_R20AB_BDE_L2_CFG_1 0x20AB
+#define MAX98373_R20AC_BDE_L2_CFG_2 0x20AC
+#define MAX98373_R20AD_BDE_L2_CFG_3 0x20AD
+#define MAX98373_R20AE_BDE_L3_CFG_1 0x20AE
+#define MAX98373_R20AF_BDE_L3_CFG_2 0x20AF
+#define MAX98373_R20B0_BDE_L3_CFG_3 0x20B0
+#define MAX98373_R20B1_BDE_L4_CFG_1 0x20B1
+#define MAX98373_R20B2_BDE_L4_CFG_2 0x20B2
+#define MAX98373_R20B3_BDE_L4_CFG_3 0x20B3
+#define MAX98373_R20B4_BDE_INFINITE_HOLD_RELEASE 0x20B4
+#define MAX98373_R20B5_BDE_EN 0x20B5
+#define MAX98373_R20B6_BDE_CUR_STATE_READBACK 0x20B6
+#define MAX98373_R20D1_DHT_CFG 0x20D1
+#define MAX98373_R20D2_DHT_ATTACK_CFG 0x20D2
+#define MAX98373_R20D3_DHT_RELEASE_CFG 0x20D3
+#define MAX98373_R20D4_DHT_EN 0x20D4
+#define MAX98373_R20E0_LIMITER_THRESH_CFG 0x20E0
+#define MAX98373_R20E1_LIMITER_ATK_REL_RATES 0x20E1
+#define MAX98373_R20E2_LIMITER_EN 0x20E2
+#define MAX98373_R20FE_DEVICE_AUTO_RESTART_CFG 0x20FE
+#define MAX98373_R20FF_GLOBAL_SHDN 0x20FF
+#define MAX98373_R21FF_REV_ID 0x21FF
+
+/* MAX98373_R2022_PCM_TX_SRC_1 */
+#define MAX98373_PCM_TX_CH_SRC_A_V_SHIFT (0)
+#define MAX98373_PCM_TX_CH_SRC_A_I_SHIFT (4)
+
+/* MAX98373_R2024_PCM_DATA_FMT_CFG */
+#define MAX98373_PCM_MODE_CFG_FORMAT_MASK (0x7 << 3)
+#define MAX98373_PCM_MODE_CFG_FORMAT_SHIFT (3)
+#define MAX98373_PCM_TX_CH_INTERLEAVE_MASK (0x1 << 2)
+#define MAX98373_PCM_FORMAT_I2S (0x0 << 0)
+#define MAX98373_PCM_FORMAT_LJ (0x1 << 0)
+#define MAX98373_PCM_FORMAT_TDM_MODE0 (0x3 << 0)
+#define MAX98373_PCM_FORMAT_TDM_MODE1 (0x4 << 0)
+#define MAX98373_PCM_FORMAT_TDM_MODE2 (0x5 << 0)
+#define MAX98373_PCM_MODE_CFG_CHANSZ_MASK (0x3 << 6)
+#define MAX98373_PCM_MODE_CFG_CHANSZ_16 (0x1 << 6)
+#define MAX98373_PCM_MODE_CFG_CHANSZ_24 (0x2 << 6)
+#define MAX98373_PCM_MODE_CFG_CHANSZ_32 (0x3 << 6)
+
+/* MAX98373_R2026_PCM_CLOCK_RATIO */
+#define MAX98373_PCM_MODE_CFG_PCM_BCLKEDGE (0x1 << 4)
+#define MAX98373_PCM_CLK_SETUP_BSEL_MASK (0xF << 0)
+
+/* MAX98373_R2027_PCM_SR_SETUP_1 */
+#define MAX98373_PCM_SR_SET1_SR_MASK (0xF << 0)
+#define MAX98373_PCM_SR_SET1_SR_8000 (0x0 << 0)
+#define MAX98373_PCM_SR_SET1_SR_11025 (0x1 << 0)
+#define MAX98373_PCM_SR_SET1_SR_12000 (0x2 << 0)
+#define MAX98373_PCM_SR_SET1_SR_16000 (0x3 << 0)
+#define MAX98373_PCM_SR_SET1_SR_22050 (0x4 << 0)
+#define MAX98373_PCM_SR_SET1_SR_24000 (0x5 << 0)
+#define MAX98373_PCM_SR_SET1_SR_32000 (0x6 << 0)
+#define MAX98373_PCM_SR_SET1_SR_44100 (0x7 << 0)
+#define MAX98373_PCM_SR_SET1_SR_48000 (0x8 << 0)
+
+/* MAX98373_R2028_PCM_SR_SETUP_2 */
+#define MAX98373_PCM_SR_SET2_SR_MASK (0xF << 4)
+#define MAX98373_PCM_SR_SET2_SR_SHIFT (4)
+#define MAX98373_PCM_SR_SET2_IVADC_SR_MASK (0xF << 0)
+
+/* MAX98373_R2029_PCM_TO_SPK_MONO_MIX_1 */
+#define MAX98373_PCM_TO_SPK_MONOMIX_CFG_MASK (0x3 << 6)
+#define MAX98373_PCM_TO_SPK_MONOMIX_CFG_SHIFT (6)
+#define MAX98373_PCM_TO_SPK_CH0_SRC_MASK (0xF << 0)
+
+/* MAX98373_R203E_AMP_PATH_GAIN */
+#define MAX98373_SPK_DIGI_GAIN_MASK (0xF << 4)
+#define MAX98373_SPK_DIGI_GAIN_SHIFT (4)
+#define MAX98373_FS_GAIN_MAX_MASK (0xF << 0)
+#define MAX98373_FS_GAIN_MAX_SHIFT (0)
+
+/* MAX98373_R203F_AMP_DSP_CFG */
+#define MAX98373_AMP_DSP_CFG_DCBLK_SHIFT (0)
+#define MAX98373_AMP_DSP_CFG_DITH_SHIFT (1)
+#define MAX98373_AMP_DSP_CFG_RMP_UP_SHIFT (2)
+#define MAX98373_AMP_DSP_CFG_RMP_DN_SHIFT (3)
+#define MAX98373_AMP_DSP_CFG_DAC_INV_SHIFT (5)
+#define MAX98373_AMP_VOL_SEL_SHIFT (7)
+
+/* MAX98373_R2043_AMP_EN */
+#define MAX98373_SPKFB_EN_MASK (0x1 << 1)
+#define MAX98373_SPK_EN_MASK (0x1 << 0)
+#define MAX98373_SPKFB_EN_SHIFT (1)
+
+/*MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG */
+#define MAX98373_FLT_EN_SHIFT (4)
+
+/* MAX98373_R20B2_BDE_L4_CFG_2 */
+#define MAX98373_LVL4_MUTE_EN_SHIFT (7)
+#define MAX98373_LVL4_HOLD_EN_SHIFT (6)
+
+/* MAX98373_R20B5_BDE_EN */
+#define MAX98373_BDE_EN_SHIFT (0)
+
+/* MAX98373_R20D1_DHT_CFG */
+#define MAX98373_DHT_SPK_GAIN_MIN_SHIFT (4)
+#define MAX98373_DHT_ROT_PNT_SHIFT (0)
+
+/* MAX98373_R20D2_DHT_ATTACK_CFG */
+#define MAX98373_DHT_ATTACK_STEP_SHIFT (3)
+#define MAX98373_DHT_ATTACK_RATE_SHIFT (0)
+
+/* MAX98373_R20D3_DHT_RELEASE_CFG */
+#define MAX98373_DHT_RELEASE_STEP_SHIFT (3)
+#define MAX98373_DHT_RELEASE_RATE_SHIFT (0)
+
+/* MAX98373_R20D4_DHT_EN */
+#define MAX98373_DHT_EN_SHIFT (0)
+
+/* MAX98373_R20E0_LIMITER_THRESH_CFG */
+#define MAX98373_LIMITER_THRESH_SHIFT (2)
+#define MAX98373_LIMITER_THRESH_SRC_SHIFT (0)
+
+/* MAX98373_R20E2_LIMITER_EN */
+#define MAX98373_LIMITER_EN_SHIFT (0)
+
+/* MAX98373_R20FE_DEVICE_AUTO_RESTART_CFG */
+#define MAX98373_CLOCK_MON_SHIFT (0)
+
+/* MAX98373_R20FF_GLOBAL_SHDN */
+#define MAX98373_GLOBAL_EN_MASK (0x1 << 0)
+
+/* MAX98373_R2000_SW_RESET */
+#define MAX98373_SOFT_RESET (0x1 << 0)
+
+struct max98373_priv {
+ struct regmap *regmap;
+ unsigned int v_slot;
+ unsigned int i_slot;
+ unsigned int spkfb_slot;
+ bool interleave_mode;
+ unsigned int ch_size;
+ bool tdm_mode;
+};
+#endif
diff --git a/sound/soc/codecs/max98926.c b/sound/soc/codecs/max98926.c
index 03d07bf4d942..7b1d1b0fa879 100644
--- a/sound/soc/codecs/max98926.c
+++ b/sound/soc/codecs/max98926.c
@@ -490,7 +490,7 @@ static int max98926_probe(struct snd_soc_codec *codec)
struct max98926_priv *max98926 = snd_soc_codec_get_drvdata(codec);
max98926->codec = codec;
- codec->control_data = max98926->regmap;
+
/* Hi-Z all the slots */
regmap_write(max98926->regmap, MAX98926_DOUT_HIZ_CFG4, 0xF0);
return 0;
diff --git a/sound/soc/codecs/max98927.c b/sound/soc/codecs/max98927.c
index a1d39353719d..f701fdc81175 100644
--- a/sound/soc/codecs/max98927.c
+++ b/sound/soc/codecs/max98927.c
@@ -682,7 +682,6 @@ static int max98927_probe(struct snd_soc_codec *codec)
struct max98927_priv *max98927 = snd_soc_codec_get_drvdata(codec);
max98927->codec = codec;
- codec->control_data = max98927->regmap;
/* Software Reset */
regmap_write(max98927->regmap,
diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
index 4fd8d1dc4eef..be7a45f05bbf 100644
--- a/sound/soc/codecs/mc13783.c
+++ b/sound/soc/codecs/mc13783.c
@@ -610,6 +610,9 @@ static int mc13783_probe(struct snd_soc_codec *codec)
{
struct mc13783_priv *priv = snd_soc_codec_get_drvdata(codec);
+ snd_soc_codec_init_regmap(codec,
+ dev_get_regmap(codec->dev->parent, NULL));
+
/* these are the reset values */
mc13xxx_reg_write(priv->mc13xxx, MC13783_AUDIO_RX0, 0x25893);
mc13xxx_reg_write(priv->mc13xxx, MC13783_AUDIO_RX1, 0x00d35A);
@@ -728,15 +731,9 @@ static struct snd_soc_dai_driver mc13783_dai_sync[] = {
}
};
-static struct regmap *mc13783_get_regmap(struct device *dev)
-{
- return dev_get_regmap(dev->parent, NULL);
-}
-
static const struct snd_soc_codec_driver soc_codec_dev_mc13783 = {
.probe = mc13783_probe,
.remove = mc13783_remove,
- .get_regmap = mc13783_get_regmap,
.component_driver = {
.controls = mc13783_control_list,
.num_controls = ARRAY_SIZE(mc13783_control_list),
diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c
index 066ea2f4ce7b..44062bb7bf2f 100644
--- a/sound/soc/codecs/msm8916-wcd-analog.c
+++ b/sound/soc/codecs/msm8916-wcd-analog.c
@@ -712,6 +712,8 @@ static int pm8916_wcd_analog_probe(struct snd_soc_codec *codec)
return err;
}
+ snd_soc_codec_init_regmap(codec,
+ dev_get_regmap(codec->dev->parent, NULL));
snd_soc_codec_set_drvdata(codec, priv);
priv->pmic_rev = snd_soc_read(codec, CDC_D_REVISION1);
priv->codec_version = snd_soc_read(codec, CDC_D_PERPH_SUBTYPE);
@@ -943,11 +945,6 @@ static int pm8916_wcd_analog_set_jack(struct snd_soc_codec *codec,
return 0;
}
-static struct regmap *pm8916_get_regmap(struct device *dev)
-{
- return dev_get_regmap(dev->parent, NULL);
-}
-
static irqreturn_t mbhc_btn_release_irq_handler(int irq, void *arg)
{
struct pm8916_wcd_analog_priv *priv = arg;
@@ -1082,7 +1079,6 @@ static const struct snd_soc_codec_driver pm8916_wcd_analog = {
.probe = pm8916_wcd_analog_probe,
.remove = pm8916_wcd_analog_remove,
.set_jack = pm8916_wcd_analog_set_jack,
- .get_regmap = pm8916_get_regmap,
.component_driver = {
.controls = pm8916_wcd_analog_snd_controls,
.num_controls = ARRAY_SIZE(pm8916_wcd_analog_snd_controls),
diff --git a/sound/soc/codecs/nau8540.c b/sound/soc/codecs/nau8540.c
index f9c9933acffb..b08fb7e243c3 100644
--- a/sound/soc/codecs/nau8540.c
+++ b/sound/soc/codecs/nau8540.c
@@ -233,6 +233,41 @@ static SOC_ENUM_SINGLE_DECL(
static const struct snd_kcontrol_new digital_ch1_mux =
SOC_DAPM_ENUM("Digital CH1 Select", digital_ch1_enum);
+static int adc_power_control(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct nau8540 *nau8540 = snd_soc_codec_get_drvdata(codec);
+
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ msleep(300);
+ /* DO12 and DO34 pad output enable */
+ regmap_update_bits(nau8540->regmap, NAU8540_REG_PCM_CTRL1,
+ NAU8540_I2S_DO12_TRI, 0);
+ regmap_update_bits(nau8540->regmap, NAU8540_REG_PCM_CTRL2,
+ NAU8540_I2S_DO34_TRI, 0);
+ } else if (SND_SOC_DAPM_EVENT_OFF(event)) {
+ regmap_update_bits(nau8540->regmap, NAU8540_REG_PCM_CTRL1,
+ NAU8540_I2S_DO12_TRI, NAU8540_I2S_DO12_TRI);
+ regmap_update_bits(nau8540->regmap, NAU8540_REG_PCM_CTRL2,
+ NAU8540_I2S_DO34_TRI, NAU8540_I2S_DO34_TRI);
+ }
+ return 0;
+}
+
+static int aiftx_power_control(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct nau8540 *nau8540 = snd_soc_codec_get_drvdata(codec);
+
+ if (SND_SOC_DAPM_EVENT_OFF(event)) {
+ regmap_write(nau8540->regmap, NAU8540_REG_RST, 0x0001);
+ regmap_write(nau8540->regmap, NAU8540_REG_RST, 0x0000);
+ }
+ return 0;
+}
+
static const struct snd_soc_dapm_widget nau8540_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("MICBIAS2", NAU8540_REG_MIC_BIAS, 11, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("MICBIAS1", NAU8540_REG_MIC_BIAS, 10, 0, NULL, 0),
@@ -247,14 +282,18 @@ static const struct snd_soc_dapm_widget nau8540_dapm_widgets[] = {
SND_SOC_DAPM_PGA("Frontend PGA3", NAU8540_REG_PWR, 14, 0, NULL, 0),
SND_SOC_DAPM_PGA("Frontend PGA4", NAU8540_REG_PWR, 15, 0, NULL, 0),
- SND_SOC_DAPM_ADC("ADC1", NULL,
- NAU8540_REG_POWER_MANAGEMENT, 0, 0),
- SND_SOC_DAPM_ADC("ADC2", NULL,
- NAU8540_REG_POWER_MANAGEMENT, 1, 0),
- SND_SOC_DAPM_ADC("ADC3", NULL,
- NAU8540_REG_POWER_MANAGEMENT, 2, 0),
- SND_SOC_DAPM_ADC("ADC4", NULL,
- NAU8540_REG_POWER_MANAGEMENT, 3, 0),
+ SND_SOC_DAPM_ADC_E("ADC1", NULL,
+ NAU8540_REG_POWER_MANAGEMENT, 0, 0, adc_power_control,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_ADC_E("ADC2", NULL,
+ NAU8540_REG_POWER_MANAGEMENT, 1, 0, adc_power_control,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_ADC_E("ADC3", NULL,
+ NAU8540_REG_POWER_MANAGEMENT, 2, 0, adc_power_control,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_ADC_E("ADC4", NULL,
+ NAU8540_REG_POWER_MANAGEMENT, 3, 0, adc_power_control,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_PGA("ADC CH1", NAU8540_REG_ANALOG_PWR, 0, 0, NULL, 0),
SND_SOC_DAPM_PGA("ADC CH2", NAU8540_REG_ANALOG_PWR, 1, 0, NULL, 0),
@@ -270,7 +309,8 @@ static const struct snd_soc_dapm_widget nau8540_dapm_widgets[] = {
SND_SOC_DAPM_MUX("Digital CH1 Mux",
SND_SOC_NOPM, 0, 0, &digital_ch1_mux),
- SND_SOC_DAPM_AIF_OUT("AIFTX", "Capture", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT_E("AIFTX", "Capture", 0, SND_SOC_NOPM, 0, 0,
+ aiftx_power_control, SND_SOC_DAPM_POST_PMD),
};
static const struct snd_soc_dapm_route nau8540_dapm_routes[] = {
@@ -575,7 +615,8 @@ static void nau8540_fll_apply(struct regmap *regmap,
NAU8540_CLK_SRC_MASK | NAU8540_CLK_MCLK_SRC_MASK,
NAU8540_CLK_SRC_MCLK | fll_param->mclk_src);
regmap_update_bits(regmap, NAU8540_REG_FLL1,
- NAU8540_FLL_RATIO_MASK, fll_param->ratio);
+ NAU8540_FLL_RATIO_MASK | NAU8540_ICTRL_LATCH_MASK,
+ fll_param->ratio | (0x6 << NAU8540_ICTRL_LATCH_SFT));
/* FLL 16-bit fractional input */
regmap_write(regmap, NAU8540_REG_FLL2, fll_param->fll_frac);
/* FLL 10-bit integer input */
@@ -596,13 +637,14 @@ static void nau8540_fll_apply(struct regmap *regmap,
NAU8540_FLL_PDB_DAC_EN | NAU8540_FLL_LOOP_FTR_EN |
NAU8540_FLL_FTR_SW_FILTER);
regmap_update_bits(regmap, NAU8540_REG_FLL6,
- NAU8540_SDM_EN, NAU8540_SDM_EN);
+ NAU8540_SDM_EN | NAU8540_CUTOFF500,
+ NAU8540_SDM_EN | NAU8540_CUTOFF500);
} else {
regmap_update_bits(regmap, NAU8540_REG_FLL5,
NAU8540_FLL_PDB_DAC_EN | NAU8540_FLL_LOOP_FTR_EN |
NAU8540_FLL_FTR_SW_MASK, NAU8540_FLL_FTR_SW_ACCU);
- regmap_update_bits(regmap,
- NAU8540_REG_FLL6, NAU8540_SDM_EN, 0);
+ regmap_update_bits(regmap, NAU8540_REG_FLL6,
+ NAU8540_SDM_EN | NAU8540_CUTOFF500, 0);
}
}
@@ -617,17 +659,22 @@ static int nau8540_set_pll(struct snd_soc_codec *codec, int pll_id, int source,
switch (pll_id) {
case NAU8540_CLK_FLL_MCLK:
regmap_update_bits(nau8540->regmap, NAU8540_REG_FLL3,
- NAU8540_FLL_CLK_SRC_MASK, NAU8540_FLL_CLK_SRC_MCLK);
+ NAU8540_FLL_CLK_SRC_MASK | NAU8540_GAIN_ERR_MASK,
+ NAU8540_FLL_CLK_SRC_MCLK | 0);
break;
case NAU8540_CLK_FLL_BLK:
regmap_update_bits(nau8540->regmap, NAU8540_REG_FLL3,
- NAU8540_FLL_CLK_SRC_MASK, NAU8540_FLL_CLK_SRC_BLK);
+ NAU8540_FLL_CLK_SRC_MASK | NAU8540_GAIN_ERR_MASK,
+ NAU8540_FLL_CLK_SRC_BLK |
+ (0xf << NAU8540_GAIN_ERR_SFT));
break;
case NAU8540_CLK_FLL_FS:
regmap_update_bits(nau8540->regmap, NAU8540_REG_FLL3,
- NAU8540_FLL_CLK_SRC_MASK, NAU8540_FLL_CLK_SRC_FS);
+ NAU8540_FLL_CLK_SRC_MASK | NAU8540_GAIN_ERR_MASK,
+ NAU8540_FLL_CLK_SRC_FS |
+ (0xf << NAU8540_GAIN_ERR_SFT));
break;
default:
@@ -710,9 +757,24 @@ static void nau8540_init_regs(struct nau8540 *nau8540)
regmap_update_bits(regmap, NAU8540_REG_CLOCK_CTRL,
NAU8540_CLK_ADC_EN | NAU8540_CLK_I2S_EN,
NAU8540_CLK_ADC_EN | NAU8540_CLK_I2S_EN);
- /* ADC OSR selection, CLK_ADC = Fs * OSR */
+ /* ADC OSR selection, CLK_ADC = Fs * OSR;
+ * Channel time alignment enable.
+ */
regmap_update_bits(regmap, NAU8540_REG_ADC_SAMPLE_RATE,
- NAU8540_ADC_OSR_MASK, NAU8540_ADC_OSR_64);
+ NAU8540_CH_SYNC | NAU8540_ADC_OSR_MASK,
+ NAU8540_CH_SYNC | NAU8540_ADC_OSR_64);
+ /* PGA input mode selection */
+ regmap_update_bits(regmap, NAU8540_REG_FEPGA1,
+ NAU8540_FEPGA1_MODCH2_SHT | NAU8540_FEPGA1_MODCH1_SHT,
+ NAU8540_FEPGA1_MODCH2_SHT | NAU8540_FEPGA1_MODCH1_SHT);
+ regmap_update_bits(regmap, NAU8540_REG_FEPGA2,
+ NAU8540_FEPGA2_MODCH4_SHT | NAU8540_FEPGA2_MODCH3_SHT,
+ NAU8540_FEPGA2_MODCH4_SHT | NAU8540_FEPGA2_MODCH3_SHT);
+ /* DO12 and DO34 pad output disable */
+ regmap_update_bits(regmap, NAU8540_REG_PCM_CTRL1,
+ NAU8540_I2S_DO12_TRI, NAU8540_I2S_DO12_TRI);
+ regmap_update_bits(regmap, NAU8540_REG_PCM_CTRL2,
+ NAU8540_I2S_DO34_TRI, NAU8540_I2S_DO34_TRI);
}
static int __maybe_unused nau8540_suspend(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/nau8540.h b/sound/soc/codecs/nau8540.h
index 5db5b224944d..732b490edf81 100644
--- a/sound/soc/codecs/nau8540.h
+++ b/sound/soc/codecs/nau8540.h
@@ -100,9 +100,13 @@
#define NAU8540_CLK_MCLK_SRC_MASK 0xf
/* FLL1 (0x04) */
+#define NAU8540_ICTRL_LATCH_SFT 10
+#define NAU8540_ICTRL_LATCH_MASK (0x7 << NAU8540_ICTRL_LATCH_SFT)
#define NAU8540_FLL_RATIO_MASK 0x7f
/* FLL3 (0x06) */
+#define NAU8540_GAIN_ERR_SFT 12
+#define NAU8540_GAIN_ERR_MASK (0xf << NAU8540_GAIN_ERR_SFT)
#define NAU8540_FLL_CLK_SRC_SFT 10
#define NAU8540_FLL_CLK_SRC_MASK (0x3 << NAU8540_FLL_CLK_SRC_SFT)
#define NAU8540_FLL_CLK_SRC_MCLK (0 << NAU8540_FLL_CLK_SRC_SFT)
@@ -127,6 +131,7 @@
/* FLL6 (0x9) */
#define NAU8540_DCO_EN (0x1 << 15)
#define NAU8540_SDM_EN (0x1 << 14)
+#define NAU8540_CUTOFF500 (0x1 << 13)
/* PCM_CTRL0 (0x10) */
#define NAU8540_I2S_BP_SFT 7
@@ -146,6 +151,7 @@
#define NAU8540_I2S_DF_PCM_AB 0x3
/* PCM_CTRL1 (0x11) */
+#define NAU8540_I2S_DO12_TRI (0x1 << 15)
#define NAU8540_I2S_LRC_DIV_SFT 12
#define NAU8540_I2S_LRC_DIV_MASK (0x3 << NAU8540_I2S_LRC_DIV_SFT)
#define NAU8540_I2S_DO12_OE (0x1 << 4)
@@ -156,6 +162,7 @@
#define NAU8540_I2S_BLK_DIV_MASK 0x7
/* PCM_CTRL1 (0x12) */
+#define NAU8540_I2S_DO34_TRI (0x1 << 15)
#define NAU8540_I2S_DO34_OE (0x1 << 11)
#define NAU8540_I2S_TSLOT_L_MASK 0x3ff
@@ -165,6 +172,7 @@
#define NAU8540_TDM_TX_MASK 0xf
/* ADC_SAMPLE_RATE (0x3A) */
+#define NAU8540_CH_SYNC (0x1 << 14)
#define NAU8540_ADC_OSR_MASK 0x3
#define NAU8540_ADC_OSR_256 0x3
#define NAU8540_ADC_OSR_128 0x2
@@ -183,6 +191,18 @@
#define NAU8540_PRECHARGE_DIS (0x1 << 13)
#define NAU8540_GLOBAL_BIAS_EN (0x1 << 12)
+/* FEPGA1 (0x69) */
+#define NAU8540_FEPGA1_MODCH2_SHT_SFT 7
+#define NAU8540_FEPGA1_MODCH2_SHT (0x1 << NAU8540_FEPGA1_MODCH2_SHT_SFT)
+#define NAU8540_FEPGA1_MODCH1_SHT_SFT 3
+#define NAU8540_FEPGA1_MODCH1_SHT (0x1 << NAU8540_FEPGA1_MODCH1_SHT_SFT)
+
+/* FEPGA2 (0x6A) */
+#define NAU8540_FEPGA2_MODCH4_SHT_SFT 7
+#define NAU8540_FEPGA2_MODCH4_SHT (0x1 << NAU8540_FEPGA2_MODCH4_SHT_SFT)
+#define NAU8540_FEPGA2_MODCH3_SHT_SFT 3
+#define NAU8540_FEPGA2_MODCH3_SHT (0x1 << NAU8540_FEPGA2_MODCH3_SHT_SFT)
+
/* System Clock Source */
enum {
diff --git a/sound/soc/codecs/nau8824.c b/sound/soc/codecs/nau8824.c
index 0240759f951c..088e0cef4cb8 100644
--- a/sound/soc/codecs/nau8824.c
+++ b/sound/soc/codecs/nau8824.c
@@ -43,7 +43,7 @@ static bool nau8824_is_jack_inserted(struct nau8824 *nau8824);
/* the parameter threshold of FLL */
#define NAU_FREF_MAX 13500000
-#define NAU_FVCO_MAX 124000000
+#define NAU_FVCO_MAX 100000000
#define NAU_FVCO_MIN 90000000
/* scaling for mclk from sysclk_src output */
@@ -811,7 +811,8 @@ static void nau8824_eject_jack(struct nau8824 *nau8824)
NAU8824_JD_SLEEP_MODE, NAU8824_JD_SLEEP_MODE);
/* Close clock for jack type detection at manual mode */
- nau8824_config_sysclk(nau8824, NAU8824_CLK_DIS, 0);
+ if (dapm->bias_level < SND_SOC_BIAS_PREPARE)
+ nau8824_config_sysclk(nau8824, NAU8824_CLK_DIS, 0);
}
static void nau8824_jdet_work(struct work_struct *work)
@@ -843,6 +844,11 @@ static void nau8824_jdet_work(struct work_struct *work)
event_mask |= SND_JACK_HEADSET;
snd_soc_jack_report(nau8824->jack, event, event_mask);
+ /* Enable short key press and release interruption. */
+ regmap_update_bits(regmap, NAU8824_REG_INTERRUPT_SETTING,
+ NAU8824_IRQ_KEY_RELEASE_DIS |
+ NAU8824_IRQ_KEY_SHORT_PRESS_DIS, 0);
+
nau8824_sema_release(nau8824);
}
@@ -850,15 +856,15 @@ static void nau8824_setup_auto_irq(struct nau8824 *nau8824)
{
struct regmap *regmap = nau8824->regmap;
- /* Enable jack ejection, short key press and release interruption. */
+ /* Enable jack ejection interruption. */
regmap_update_bits(regmap, NAU8824_REG_INTERRUPT_SETTING_1,
NAU8824_IRQ_INSERT_EN | NAU8824_IRQ_EJECT_EN,
NAU8824_IRQ_EJECT_EN);
regmap_update_bits(regmap, NAU8824_REG_INTERRUPT_SETTING,
- NAU8824_IRQ_EJECT_DIS | NAU8824_IRQ_KEY_RELEASE_DIS |
- NAU8824_IRQ_KEY_SHORT_PRESS_DIS, 0);
+ NAU8824_IRQ_EJECT_DIS, 0);
/* Enable internal VCO needed for interruptions */
- nau8824_config_sysclk(nau8824, NAU8824_CLK_INTERNAL, 0);
+ if (nau8824->dapm->bias_level < SND_SOC_BIAS_PREPARE)
+ nau8824_config_sysclk(nau8824, NAU8824_CLK_INTERNAL, 0);
regmap_update_bits(regmap, NAU8824_REG_ENA_CTRL,
NAU8824_JD_SLEEP_MODE, 0);
}
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
index e853a6dfd33b..a1b697b6fb64 100644
--- a/sound/soc/codecs/nau8825.c
+++ b/sound/soc/codecs/nau8825.c
@@ -194,10 +194,10 @@ static const struct reg_default nau8825_reg_defaults[] = {
/* register backup table when cross talk detection */
static struct reg_default nau8825_xtalk_baktab[] = {
- { NAU8825_REG_ADC_DGAIN_CTRL, 0 },
+ { NAU8825_REG_ADC_DGAIN_CTRL, 0x00cf },
{ NAU8825_REG_HSVOL_CTRL, 0 },
- { NAU8825_REG_DACL_CTRL, 0 },
- { NAU8825_REG_DACR_CTRL, 0 },
+ { NAU8825_REG_DACL_CTRL, 0x00cf },
+ { NAU8825_REG_DACR_CTRL, 0x02cf },
};
static const unsigned short logtable[256] = {
@@ -245,13 +245,14 @@ static const unsigned short logtable[256] = {
* tasks are allowed to acquire the semaphore, calling this function will
* put the task to sleep. If the semaphore is not released within the
* specified number of jiffies, this function returns.
- * Acquires the semaphore without jiffies. If no more tasks are allowed
- * to acquire the semaphore, calling this function will put the task to
- * sleep until the semaphore is released.
* If the semaphore is not released within the specified number of jiffies,
- * this function returns -ETIME.
- * If the sleep is interrupted by a signal, this function will return -EINTR.
- * It returns 0 if the semaphore was acquired successfully.
+ * this function returns -ETIME. If the sleep is interrupted by a signal,
+ * this function will return -EINTR. It returns 0 if the semaphore was
+ * acquired successfully.
+ *
+ * Acquires the semaphore without jiffies. Try to acquire the semaphore
+ * atomically. Returns 0 if the semaphore has been acquired successfully
+ * or 1 if it it cannot be acquired.
*/
static int nau8825_sema_acquire(struct nau8825 *nau8825, long timeout)
{
@@ -262,8 +263,8 @@ static int nau8825_sema_acquire(struct nau8825 *nau8825, long timeout)
if (ret < 0)
dev_warn(nau8825->dev, "Acquire semaphore timeout\n");
} else {
- ret = down_interruptible(&nau8825->xtalk_sem);
- if (ret < 0)
+ ret = down_trylock(&nau8825->xtalk_sem);
+ if (ret)
dev_warn(nau8825->dev, "Acquire semaphore fail\n");
}
@@ -454,22 +455,32 @@ static void nau8825_xtalk_backup(struct nau8825 *nau8825)
{
int i;
+ if (nau8825->xtalk_baktab_initialized)
+ return;
+
/* Backup some register values to backup table */
for (i = 0; i < ARRAY_SIZE(nau8825_xtalk_baktab); i++)
regmap_read(nau8825->regmap, nau8825_xtalk_baktab[i].reg,
&nau8825_xtalk_baktab[i].def);
+
+ nau8825->xtalk_baktab_initialized = true;
}
-static void nau8825_xtalk_restore(struct nau8825 *nau8825)
+static void nau8825_xtalk_restore(struct nau8825 *nau8825, bool cause_cancel)
{
int i, volume;
+ if (!nau8825->xtalk_baktab_initialized)
+ return;
+
/* Restore register values from backup table; When the driver restores
- * the headphone volumem, it needs recover to original level gradually
- * with 3dB per step for less pop noise.
+ * the headphone volume in XTALK_DONE state, it needs recover to
+ * original level gradually with 3dB per step for less pop noise.
+ * Otherwise, the restore should do ASAP.
*/
for (i = 0; i < ARRAY_SIZE(nau8825_xtalk_baktab); i++) {
- if (nau8825_xtalk_baktab[i].reg == NAU8825_REG_HSVOL_CTRL) {
+ if (!cause_cancel && nau8825_xtalk_baktab[i].reg ==
+ NAU8825_REG_HSVOL_CTRL) {
/* Ramping up the volume change to reduce pop noise */
volume = nau8825_xtalk_baktab[i].def &
NAU8825_HPR_VOL_MASK;
@@ -479,6 +490,8 @@ static void nau8825_xtalk_restore(struct nau8825 *nau8825)
regmap_write(nau8825->regmap, nau8825_xtalk_baktab[i].reg,
nau8825_xtalk_baktab[i].def);
}
+
+ nau8825->xtalk_baktab_initialized = false;
}
static void nau8825_xtalk_prepare_dac(struct nau8825 *nau8825)
@@ -644,7 +657,7 @@ static void nau8825_xtalk_clean_adc(struct nau8825 *nau8825)
NAU8825_POWERUP_ADCL | NAU8825_ADC_VREFSEL_MASK, 0);
}
-static void nau8825_xtalk_clean(struct nau8825 *nau8825)
+static void nau8825_xtalk_clean(struct nau8825 *nau8825, bool cause_cancel)
{
/* Enable internal VCO needed for interruptions */
nau8825_configure_sysclk(nau8825, NAU8825_CLK_INTERNAL, 0);
@@ -660,7 +673,7 @@ static void nau8825_xtalk_clean(struct nau8825 *nau8825)
NAU8825_I2S_MS_MASK | NAU8825_I2S_LRC_DIV_MASK |
NAU8825_I2S_BLK_DIV_MASK, NAU8825_I2S_MS_SLAVE);
/* Restore value of specific register for cross talk */
- nau8825_xtalk_restore(nau8825);
+ nau8825_xtalk_restore(nau8825, cause_cancel);
}
static void nau8825_xtalk_imm_start(struct nau8825 *nau8825, int vol)
@@ -779,7 +792,7 @@ static void nau8825_xtalk_measure(struct nau8825 *nau8825)
dev_dbg(nau8825->dev, "cross talk sidetone: %x\n", sidetone);
regmap_write(nau8825->regmap, NAU8825_REG_DAC_DGAIN_CTRL,
(sidetone << 8) | sidetone);
- nau8825_xtalk_clean(nau8825);
+ nau8825_xtalk_clean(nau8825, false);
nau8825->xtalk_state = NAU8825_XTALK_DONE;
break;
default:
@@ -815,13 +828,14 @@ static void nau8825_xtalk_work(struct work_struct *work)
static void nau8825_xtalk_cancel(struct nau8825 *nau8825)
{
- /* If the xtalk_protect is true, that means the process is still
- * on going. The driver forces to cancel the cross talk task and
+ /* If the crosstalk is eanbled and the process is on going,
+ * the driver forces to cancel the crosstalk task and
* restores the configuration to original status.
*/
- if (nau8825->xtalk_protect) {
+ if (nau8825->xtalk_enable && nau8825->xtalk_state !=
+ NAU8825_XTALK_DONE) {
cancel_work_sync(&nau8825->xtalk_work);
- nau8825_xtalk_clean(nau8825);
+ nau8825_xtalk_clean(nau8825, true);
}
/* Reset parameters for cross talk suppression function */
nau8825_sema_reset(nau8825);
@@ -1246,8 +1260,10 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream,
regmap_read(nau8825->regmap, NAU8825_REG_DAC_CTRL1, &osr);
osr &= NAU8825_DAC_OVERSAMPLE_MASK;
if (nau8825_clock_check(nau8825, substream->stream,
- params_rate(params), osr))
+ params_rate(params), osr)) {
+ nau8825_sema_release(nau8825);
return -EINVAL;
+ }
regmap_update_bits(nau8825->regmap, NAU8825_REG_CLK_DIVIDER,
NAU8825_CLK_DAC_SRC_MASK,
osr_dac_sel[osr].clk_src << NAU8825_CLK_DAC_SRC_SFT);
@@ -1255,8 +1271,10 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream,
regmap_read(nau8825->regmap, NAU8825_REG_ADC_RATE, &osr);
osr &= NAU8825_ADC_SYNC_DOWN_MASK;
if (nau8825_clock_check(nau8825, substream->stream,
- params_rate(params), osr))
+ params_rate(params), osr)) {
+ nau8825_sema_release(nau8825);
return -EINVAL;
+ }
regmap_update_bits(nau8825->regmap, NAU8825_REG_CLK_DIVIDER,
NAU8825_CLK_ADC_SRC_MASK,
osr_adc_sel[osr].clk_src << NAU8825_CLK_ADC_SRC_SFT);
@@ -1273,8 +1291,10 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream,
bclk_div = 1;
else if (bclk_fs <= 128)
bclk_div = 0;
- else
+ else {
+ nau8825_sema_release(nau8825);
return -EINVAL;
+ }
regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL2,
NAU8825_I2S_LRC_DIV_MASK | NAU8825_I2S_BLK_DIV_MASK,
((bclk_div + 1) << NAU8825_I2S_LRC_DIV_SFT) | bclk_div);
@@ -1294,6 +1314,7 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream,
val_len |= NAU8825_I2S_DL_32;
break;
default:
+ nau8825_sema_release(nau8825);
return -EINVAL;
}
@@ -1312,8 +1333,6 @@ static int nau8825_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
unsigned int ctrl1_val = 0, ctrl2_val = 0;
- nau8825_sema_acquire(nau8825, 3 * HZ);
-
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
ctrl2_val |= NAU8825_I2S_MS_MASTER;
@@ -1355,6 +1374,8 @@ static int nau8825_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
return -EINVAL;
}
+ nau8825_sema_acquire(nau8825, 3 * HZ);
+
regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL1,
NAU8825_I2S_DL_MASK | NAU8825_I2S_DF_MASK |
NAU8825_I2S_BP_MASK | NAU8825_I2S_PCMB_MASK,
@@ -1687,7 +1708,7 @@ static irqreturn_t nau8825_interrupt(int irq, void *data)
} else if (active_irq & NAU8825_HEADSET_COMPLETION_IRQ) {
if (nau8825_is_jack_inserted(regmap)) {
event |= nau8825_jack_insert(nau8825);
- if (!nau8825->xtalk_bypass && !nau8825->high_imped) {
+ if (nau8825->xtalk_enable && !nau8825->high_imped) {
/* Apply the cross talk suppression in the
* headset without high impedance.
*/
@@ -1701,12 +1722,15 @@ static irqreturn_t nau8825_interrupt(int irq, void *data)
int ret;
nau8825->xtalk_protect = true;
ret = nau8825_sema_acquire(nau8825, 0);
- if (ret < 0)
+ if (ret)
nau8825->xtalk_protect = false;
}
/* Startup cross talk detection process */
- nau8825->xtalk_state = NAU8825_XTALK_PREPARE;
- schedule_work(&nau8825->xtalk_work);
+ if (nau8825->xtalk_protect) {
+ nau8825->xtalk_state =
+ NAU8825_XTALK_PREPARE;
+ schedule_work(&nau8825->xtalk_work);
+ }
} else {
/* The cross talk suppression shouldn't apply
* in the headset with high impedance. Thus,
@@ -1733,7 +1757,9 @@ static irqreturn_t nau8825_interrupt(int irq, void *data)
nau8825->xtalk_event_mask = event_mask;
}
} else if (active_irq & NAU8825_IMPEDANCE_MEAS_IRQ) {
- schedule_work(&nau8825->xtalk_work);
+ /* crosstalk detection enable and process on going */
+ if (nau8825->xtalk_enable && nau8825->xtalk_protect)
+ schedule_work(&nau8825->xtalk_work);
clear_irq = NAU8825_IMPEDANCE_MEAS_IRQ;
} else if ((active_irq & NAU8825_JACK_INSERTION_IRQ_MASK) ==
NAU8825_JACK_INSERTION_DETECTED) {
@@ -2382,7 +2408,7 @@ static int __maybe_unused nau8825_resume(struct snd_soc_codec *codec)
regcache_sync(nau8825->regmap);
nau8825->xtalk_protect = true;
ret = nau8825_sema_acquire(nau8825, 0);
- if (ret < 0)
+ if (ret)
nau8825->xtalk_protect = false;
enable_irq(nau8825->irq);
@@ -2441,8 +2467,8 @@ static void nau8825_print_device_properties(struct nau8825 *nau8825)
nau8825->jack_insert_debounce);
dev_dbg(dev, "jack-eject-debounce: %d\n",
nau8825->jack_eject_debounce);
- dev_dbg(dev, "crosstalk-bypass: %d\n",
- nau8825->xtalk_bypass);
+ dev_dbg(dev, "crosstalk-enable: %d\n",
+ nau8825->xtalk_enable);
}
static int nau8825_read_device_properties(struct device *dev,
@@ -2507,8 +2533,8 @@ static int nau8825_read_device_properties(struct device *dev,
&nau8825->jack_eject_debounce);
if (ret)
nau8825->jack_eject_debounce = 0;
- nau8825->xtalk_bypass = device_property_read_bool(dev,
- "nuvoton,crosstalk-bypass");
+ nau8825->xtalk_enable = device_property_read_bool(dev,
+ "nuvoton,crosstalk-enable");
nau8825->mclk = devm_clk_get(dev, "mclk");
if (PTR_ERR(nau8825->mclk) == -EPROBE_DEFER) {
@@ -2569,6 +2595,7 @@ static int nau8825_i2c_probe(struct i2c_client *i2c,
*/
nau8825->xtalk_state = NAU8825_XTALK_DONE;
nau8825->xtalk_protect = false;
+ nau8825->xtalk_baktab_initialized = false;
sema_init(&nau8825->xtalk_sem, 1);
INIT_WORK(&nau8825->xtalk_work, nau8825_xtalk_work);
diff --git a/sound/soc/codecs/nau8825.h b/sound/soc/codecs/nau8825.h
index 8aee5c8647ae..f7e732125882 100644
--- a/sound/soc/codecs/nau8825.h
+++ b/sound/soc/codecs/nau8825.h
@@ -476,7 +476,8 @@ struct nau8825 {
int xtalk_event_mask;
bool xtalk_protect;
int imp_rms[NAU8825_XTALK_IMM];
- int xtalk_bypass;
+ int xtalk_enable;
+ bool xtalk_baktab_initialized; /* True if initialized. */
};
int nau8825_enable_jack_detect(struct snd_soc_codec *codec,
diff --git a/sound/soc/codecs/pcm186x-i2c.c b/sound/soc/codecs/pcm186x-i2c.c
new file mode 100644
index 000000000000..543621232d60
--- /dev/null
+++ b/sound/soc/codecs/pcm186x-i2c.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments PCM186x Universal Audio ADC - I2C
+ *
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com
+ * Andreas Dannenberg <dannenberg@ti.com>
+ * Andrew F. Davis <afd@ti.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+
+#include "pcm186x.h"
+
+static const struct of_device_id pcm186x_of_match[] = {
+ { .compatible = "ti,pcm1862", .data = (void *)PCM1862 },
+ { .compatible = "ti,pcm1863", .data = (void *)PCM1863 },
+ { .compatible = "ti,pcm1864", .data = (void *)PCM1864 },
+ { .compatible = "ti,pcm1865", .data = (void *)PCM1865 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pcm186x_of_match);
+
+static int pcm186x_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ const enum pcm186x_type type = (enum pcm186x_type)id->driver_data;
+ int irq = i2c->irq;
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i2c(i2c, &pcm186x_regmap);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return pcm186x_probe(&i2c->dev, type, irq, regmap);
+}
+
+static int pcm186x_i2c_remove(struct i2c_client *i2c)
+{
+ pcm186x_remove(&i2c->dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id pcm186x_i2c_id[] = {
+ { "pcm1862", PCM1862 },
+ { "pcm1863", PCM1863 },
+ { "pcm1864", PCM1864 },
+ { "pcm1865", PCM1865 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, pcm186x_i2c_id);
+
+static struct i2c_driver pcm186x_i2c_driver = {
+ .probe = pcm186x_i2c_probe,
+ .remove = pcm186x_i2c_remove,
+ .id_table = pcm186x_i2c_id,
+ .driver = {
+ .name = "pcm186x",
+ .of_match_table = pcm186x_of_match,
+ },
+};
+module_i2c_driver(pcm186x_i2c_driver);
+
+MODULE_AUTHOR("Andreas Dannenberg <dannenberg@ti.com>");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("PCM186x Universal Audio ADC I2C Interface Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/pcm186x-spi.c b/sound/soc/codecs/pcm186x-spi.c
new file mode 100644
index 000000000000..2366f8e4d4d4
--- /dev/null
+++ b/sound/soc/codecs/pcm186x-spi.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments PCM186x Universal Audio ADC - SPI
+ *
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com
+ * Andreas Dannenberg <dannenberg@ti.com>
+ * Andrew F. Davis <afd@ti.com>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+#include "pcm186x.h"
+
+static const struct of_device_id pcm186x_of_match[] = {
+ { .compatible = "ti,pcm1862", .data = (void *)PCM1862 },
+ { .compatible = "ti,pcm1863", .data = (void *)PCM1863 },
+ { .compatible = "ti,pcm1864", .data = (void *)PCM1864 },
+ { .compatible = "ti,pcm1865", .data = (void *)PCM1865 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pcm186x_of_match);
+
+static int pcm186x_spi_probe(struct spi_device *spi)
+{
+ const enum pcm186x_type type =
+ (enum pcm186x_type)spi_get_device_id(spi)->driver_data;
+ int irq = spi->irq;
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_spi(spi, &pcm186x_regmap);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return pcm186x_probe(&spi->dev, type, irq, regmap);
+}
+
+static int pcm186x_spi_remove(struct spi_device *spi)
+{
+ pcm186x_remove(&spi->dev);
+
+ return 0;
+}
+
+static const struct spi_device_id pcm186x_spi_id[] = {
+ { "pcm1862", PCM1862 },
+ { "pcm1863", PCM1863 },
+ { "pcm1864", PCM1864 },
+ { "pcm1865", PCM1865 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, pcm186x_spi_id);
+
+static struct spi_driver pcm186x_spi_driver = {
+ .probe = pcm186x_spi_probe,
+ .remove = pcm186x_spi_remove,
+ .id_table = pcm186x_spi_id,
+ .driver = {
+ .name = "pcm186x",
+ .of_match_table = pcm186x_of_match,
+ },
+};
+module_spi_driver(pcm186x_spi_driver);
+
+MODULE_AUTHOR("Andreas Dannenberg <dannenberg@ti.com>");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("PCM186x Universal Audio ADC SPI Interface Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/pcm186x.c b/sound/soc/codecs/pcm186x.c
new file mode 100644
index 000000000000..cdb51427facc
--- /dev/null
+++ b/sound/soc/codecs/pcm186x.c
@@ -0,0 +1,719 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments PCM186x Universal Audio ADC
+ *
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com
+ * Andreas Dannenberg <dannenberg@ti.com>
+ * Andrew F. Davis <afd@ti.com>
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "pcm186x.h"
+
+static const char * const pcm186x_supply_names[] = {
+ "avdd", /* Analog power supply. Connect to 3.3-V supply. */
+ "dvdd", /* Digital power supply. Connect to 3.3-V supply. */
+ "iovdd", /* I/O power supply. Connect to 3.3-V or 1.8-V. */
+};
+#define PCM186x_NUM_SUPPLIES ARRAY_SIZE(pcm186x_supply_names)
+
+struct pcm186x_priv {
+ struct regmap *regmap;
+ struct regulator_bulk_data supplies[PCM186x_NUM_SUPPLIES];
+ unsigned int sysclk;
+ unsigned int tdm_offset;
+ bool is_tdm_mode;
+ bool is_master_mode;
+};
+
+static const DECLARE_TLV_DB_SCALE(pcm186x_pga_tlv, -1200, 4000, 50);
+
+static const struct snd_kcontrol_new pcm1863_snd_controls[] = {
+ SOC_DOUBLE_R_S_TLV("ADC Capture Volume", PCM186X_PGA_VAL_CH1_L,
+ PCM186X_PGA_VAL_CH1_R, 0, -24, 80, 7, 0,
+ pcm186x_pga_tlv),
+};
+
+static const struct snd_kcontrol_new pcm1865_snd_controls[] = {
+ SOC_DOUBLE_R_S_TLV("ADC1 Capture Volume", PCM186X_PGA_VAL_CH1_L,
+ PCM186X_PGA_VAL_CH1_R, 0, -24, 80, 7, 0,
+ pcm186x_pga_tlv),
+ SOC_DOUBLE_R_S_TLV("ADC2 Capture Volume", PCM186X_PGA_VAL_CH2_L,
+ PCM186X_PGA_VAL_CH2_R, 0, -24, 80, 7, 0,
+ pcm186x_pga_tlv),
+};
+
+static const unsigned int pcm186x_adc_input_channel_sel_value[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x20, 0x30
+};
+
+static const char * const pcm186x_adcl_input_channel_sel_text[] = {
+ "No Select",
+ "VINL1[SE]", /* Default for ADC1L */
+ "VINL2[SE]", /* Default for ADC2L */
+ "VINL2[SE] + VINL1[SE]",
+ "VINL3[SE]",
+ "VINL3[SE] + VINL1[SE]",
+ "VINL3[SE] + VINL2[SE]",
+ "VINL3[SE] + VINL2[SE] + VINL1[SE]",
+ "VINL4[SE]",
+ "VINL4[SE] + VINL1[SE]",
+ "VINL4[SE] + VINL2[SE]",
+ "VINL4[SE] + VINL2[SE] + VINL1[SE]",
+ "VINL4[SE] + VINL3[SE]",
+ "VINL4[SE] + VINL3[SE] + VINL1[SE]",
+ "VINL4[SE] + VINL3[SE] + VINL2[SE]",
+ "VINL4[SE] + VINL3[SE] + VINL2[SE] + VINL1[SE]",
+ "{VIN1P, VIN1M}[DIFF]",
+ "{VIN4P, VIN4M}[DIFF]",
+ "{VIN1P, VIN1M}[DIFF] + {VIN4P, VIN4M}[DIFF]"
+};
+
+static const char * const pcm186x_adcr_input_channel_sel_text[] = {
+ "No Select",
+ "VINR1[SE]", /* Default for ADC1R */
+ "VINR2[SE]", /* Default for ADC2R */
+ "VINR2[SE] + VINR1[SE]",
+ "VINR3[SE]",
+ "VINR3[SE] + VINR1[SE]",
+ "VINR3[SE] + VINR2[SE]",
+ "VINR3[SE] + VINR2[SE] + VINR1[SE]",
+ "VINR4[SE]",
+ "VINR4[SE] + VINR1[SE]",
+ "VINR4[SE] + VINR2[SE]",
+ "VINR4[SE] + VINR2[SE] + VINR1[SE]",
+ "VINR4[SE] + VINR3[SE]",
+ "VINR4[SE] + VINR3[SE] + VINR1[SE]",
+ "VINR4[SE] + VINR3[SE] + VINR2[SE]",
+ "VINR4[SE] + VINR3[SE] + VINR2[SE] + VINR1[SE]",
+ "{VIN2P, VIN2M}[DIFF]",
+ "{VIN3P, VIN3M}[DIFF]",
+ "{VIN2P, VIN2M}[DIFF] + {VIN3P, VIN3M}[DIFF]"
+};
+
+static const struct soc_enum pcm186x_adc_input_channel_sel[] = {
+ SOC_VALUE_ENUM_SINGLE(PCM186X_ADC1_INPUT_SEL_L, 0,
+ PCM186X_ADC_INPUT_SEL_MASK,
+ ARRAY_SIZE(pcm186x_adcl_input_channel_sel_text),
+ pcm186x_adcl_input_channel_sel_text,
+ pcm186x_adc_input_channel_sel_value),
+ SOC_VALUE_ENUM_SINGLE(PCM186X_ADC1_INPUT_SEL_R, 0,
+ PCM186X_ADC_INPUT_SEL_MASK,
+ ARRAY_SIZE(pcm186x_adcr_input_channel_sel_text),
+ pcm186x_adcr_input_channel_sel_text,
+ pcm186x_adc_input_channel_sel_value),
+ SOC_VALUE_ENUM_SINGLE(PCM186X_ADC2_INPUT_SEL_L, 0,
+ PCM186X_ADC_INPUT_SEL_MASK,
+ ARRAY_SIZE(pcm186x_adcl_input_channel_sel_text),
+ pcm186x_adcl_input_channel_sel_text,
+ pcm186x_adc_input_channel_sel_value),
+ SOC_VALUE_ENUM_SINGLE(PCM186X_ADC2_INPUT_SEL_R, 0,
+ PCM186X_ADC_INPUT_SEL_MASK,
+ ARRAY_SIZE(pcm186x_adcr_input_channel_sel_text),
+ pcm186x_adcr_input_channel_sel_text,
+ pcm186x_adc_input_channel_sel_value),
+};
+
+static const struct snd_kcontrol_new pcm186x_adc_mux_controls[] = {
+ SOC_DAPM_ENUM("ADC1 Left Input", pcm186x_adc_input_channel_sel[0]),
+ SOC_DAPM_ENUM("ADC1 Right Input", pcm186x_adc_input_channel_sel[1]),
+ SOC_DAPM_ENUM("ADC2 Left Input", pcm186x_adc_input_channel_sel[2]),
+ SOC_DAPM_ENUM("ADC2 Right Input", pcm186x_adc_input_channel_sel[3]),
+};
+
+static const struct snd_soc_dapm_widget pcm1863_dapm_widgets[] = {
+ SND_SOC_DAPM_INPUT("VINL1"),
+ SND_SOC_DAPM_INPUT("VINR1"),
+ SND_SOC_DAPM_INPUT("VINL2"),
+ SND_SOC_DAPM_INPUT("VINR2"),
+ SND_SOC_DAPM_INPUT("VINL3"),
+ SND_SOC_DAPM_INPUT("VINR3"),
+ SND_SOC_DAPM_INPUT("VINL4"),
+ SND_SOC_DAPM_INPUT("VINR4"),
+
+ SND_SOC_DAPM_MUX("ADC Left Capture Source", SND_SOC_NOPM, 0, 0,
+ &pcm186x_adc_mux_controls[0]),
+ SND_SOC_DAPM_MUX("ADC Right Capture Source", SND_SOC_NOPM, 0, 0,
+ &pcm186x_adc_mux_controls[1]),
+
+ /*
+ * Put the codec into SLEEP mode when not in use, allowing the
+ * Energysense mechanism to operate.
+ */
+ SND_SOC_DAPM_ADC("ADC", "HiFi Capture", PCM186X_POWER_CTRL, 1, 0),
+};
+
+static const struct snd_soc_dapm_widget pcm1865_dapm_widgets[] = {
+ SND_SOC_DAPM_INPUT("VINL1"),
+ SND_SOC_DAPM_INPUT("VINR1"),
+ SND_SOC_DAPM_INPUT("VINL2"),
+ SND_SOC_DAPM_INPUT("VINR2"),
+ SND_SOC_DAPM_INPUT("VINL3"),
+ SND_SOC_DAPM_INPUT("VINR3"),
+ SND_SOC_DAPM_INPUT("VINL4"),
+ SND_SOC_DAPM_INPUT("VINR4"),
+
+ SND_SOC_DAPM_MUX("ADC1 Left Capture Source", SND_SOC_NOPM, 0, 0,
+ &pcm186x_adc_mux_controls[0]),
+ SND_SOC_DAPM_MUX("ADC1 Right Capture Source", SND_SOC_NOPM, 0, 0,
+ &pcm186x_adc_mux_controls[1]),
+ SND_SOC_DAPM_MUX("ADC2 Left Capture Source", SND_SOC_NOPM, 0, 0,
+ &pcm186x_adc_mux_controls[2]),
+ SND_SOC_DAPM_MUX("ADC2 Right Capture Source", SND_SOC_NOPM, 0, 0,
+ &pcm186x_adc_mux_controls[3]),
+
+ /*
+ * Put the codec into SLEEP mode when not in use, allowing the
+ * Energysense mechanism to operate.
+ */
+ SND_SOC_DAPM_ADC("ADC1", "HiFi Capture 1", PCM186X_POWER_CTRL, 1, 0),
+ SND_SOC_DAPM_ADC("ADC2", "HiFi Capture 2", PCM186X_POWER_CTRL, 1, 0),
+};
+
+static const struct snd_soc_dapm_route pcm1863_dapm_routes[] = {
+ { "ADC Left Capture Source", NULL, "VINL1" },
+ { "ADC Left Capture Source", NULL, "VINR1" },
+ { "ADC Left Capture Source", NULL, "VINL2" },
+ { "ADC Left Capture Source", NULL, "VINR2" },
+ { "ADC Left Capture Source", NULL, "VINL3" },
+ { "ADC Left Capture Source", NULL, "VINR3" },
+ { "ADC Left Capture Source", NULL, "VINL4" },
+ { "ADC Left Capture Source", NULL, "VINR4" },
+
+ { "ADC", NULL, "ADC Left Capture Source" },
+
+ { "ADC Right Capture Source", NULL, "VINL1" },
+ { "ADC Right Capture Source", NULL, "VINR1" },
+ { "ADC Right Capture Source", NULL, "VINL2" },
+ { "ADC Right Capture Source", NULL, "VINR2" },
+ { "ADC Right Capture Source", NULL, "VINL3" },
+ { "ADC Right Capture Source", NULL, "VINR3" },
+ { "ADC Right Capture Source", NULL, "VINL4" },
+ { "ADC Right Capture Source", NULL, "VINR4" },
+
+ { "ADC", NULL, "ADC Right Capture Source" },
+};
+
+static const struct snd_soc_dapm_route pcm1865_dapm_routes[] = {
+ { "ADC1 Left Capture Source", NULL, "VINL1" },
+ { "ADC1 Left Capture Source", NULL, "VINR1" },
+ { "ADC1 Left Capture Source", NULL, "VINL2" },
+ { "ADC1 Left Capture Source", NULL, "VINR2" },
+ { "ADC1 Left Capture Source", NULL, "VINL3" },
+ { "ADC1 Left Capture Source", NULL, "VINR3" },
+ { "ADC1 Left Capture Source", NULL, "VINL4" },
+ { "ADC1 Left Capture Source", NULL, "VINR4" },
+
+ { "ADC1", NULL, "ADC1 Left Capture Source" },
+
+ { "ADC1 Right Capture Source", NULL, "VINL1" },
+ { "ADC1 Right Capture Source", NULL, "VINR1" },
+ { "ADC1 Right Capture Source", NULL, "VINL2" },
+ { "ADC1 Right Capture Source", NULL, "VINR2" },
+ { "ADC1 Right Capture Source", NULL, "VINL3" },
+ { "ADC1 Right Capture Source", NULL, "VINR3" },
+ { "ADC1 Right Capture Source", NULL, "VINL4" },
+ { "ADC1 Right Capture Source", NULL, "VINR4" },
+
+ { "ADC1", NULL, "ADC1 Right Capture Source" },
+
+ { "ADC2 Left Capture Source", NULL, "VINL1" },
+ { "ADC2 Left Capture Source", NULL, "VINR1" },
+ { "ADC2 Left Capture Source", NULL, "VINL2" },
+ { "ADC2 Left Capture Source", NULL, "VINR2" },
+ { "ADC2 Left Capture Source", NULL, "VINL3" },
+ { "ADC2 Left Capture Source", NULL, "VINR3" },
+ { "ADC2 Left Capture Source", NULL, "VINL4" },
+ { "ADC2 Left Capture Source", NULL, "VINR4" },
+
+ { "ADC2", NULL, "ADC2 Left Capture Source" },
+
+ { "ADC2 Right Capture Source", NULL, "VINL1" },
+ { "ADC2 Right Capture Source", NULL, "VINR1" },
+ { "ADC2 Right Capture Source", NULL, "VINL2" },
+ { "ADC2 Right Capture Source", NULL, "VINR2" },
+ { "ADC2 Right Capture Source", NULL, "VINL3" },
+ { "ADC2 Right Capture Source", NULL, "VINR3" },
+ { "ADC2 Right Capture Source", NULL, "VINL4" },
+ { "ADC2 Right Capture Source", NULL, "VINR4" },
+
+ { "ADC2", NULL, "ADC2 Right Capture Source" },
+};
+
+static int pcm186x_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+
+ struct pcm186x_priv *priv = snd_soc_codec_get_drvdata(codec);
+ unsigned int rate = params_rate(params);
+ unsigned int format = params_format(params);
+ unsigned int width = params_width(params);
+ unsigned int channels = params_channels(params);
+ unsigned int div_lrck;
+ unsigned int div_bck;
+ u8 tdm_tx_sel = 0;
+ u8 pcm_cfg = 0;
+
+ dev_dbg(codec->dev, "%s() rate=%u format=0x%x width=%u channels=%u\n",
+ __func__, rate, format, width, channels);
+
+ switch (width) {
+ case 16:
+ pcm_cfg = PCM186X_PCM_CFG_RX_WLEN_16 <<
+ PCM186X_PCM_CFG_RX_WLEN_SHIFT |
+ PCM186X_PCM_CFG_TX_WLEN_16 <<
+ PCM186X_PCM_CFG_TX_WLEN_SHIFT;
+ break;
+ case 20:
+ pcm_cfg = PCM186X_PCM_CFG_RX_WLEN_20 <<
+ PCM186X_PCM_CFG_RX_WLEN_SHIFT |
+ PCM186X_PCM_CFG_TX_WLEN_20 <<
+ PCM186X_PCM_CFG_TX_WLEN_SHIFT;
+ break;
+ case 24:
+ pcm_cfg = PCM186X_PCM_CFG_RX_WLEN_24 <<
+ PCM186X_PCM_CFG_RX_WLEN_SHIFT |
+ PCM186X_PCM_CFG_TX_WLEN_24 <<
+ PCM186X_PCM_CFG_TX_WLEN_SHIFT;
+ break;
+ case 32:
+ pcm_cfg = PCM186X_PCM_CFG_RX_WLEN_32 <<
+ PCM186X_PCM_CFG_RX_WLEN_SHIFT |
+ PCM186X_PCM_CFG_TX_WLEN_32 <<
+ PCM186X_PCM_CFG_TX_WLEN_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, PCM186X_PCM_CFG,
+ PCM186X_PCM_CFG_RX_WLEN_MASK |
+ PCM186X_PCM_CFG_TX_WLEN_MASK,
+ pcm_cfg);
+
+ div_lrck = width * channels;
+
+ if (priv->is_tdm_mode) {
+ /* Select TDM transmission data */
+ switch (channels) {
+ case 2:
+ tdm_tx_sel = PCM186X_TDM_TX_SEL_2CH;
+ break;
+ case 4:
+ tdm_tx_sel = PCM186X_TDM_TX_SEL_4CH;
+ break;
+ case 6:
+ tdm_tx_sel = PCM186X_TDM_TX_SEL_6CH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, PCM186X_TDM_TX_SEL,
+ PCM186X_TDM_TX_SEL_MASK, tdm_tx_sel);
+
+ /* In DSP/TDM mode, the LRCLK divider must be 256 */
+ div_lrck = 256;
+
+ /* Configure 1/256 duty cycle for LRCK */
+ snd_soc_update_bits(codec, PCM186X_PCM_CFG,
+ PCM186X_PCM_CFG_TDM_LRCK_MODE,
+ PCM186X_PCM_CFG_TDM_LRCK_MODE);
+ }
+
+ /* Only configure clock dividers in master mode. */
+ if (priv->is_master_mode) {
+ div_bck = priv->sysclk / (div_lrck * rate);
+
+ dev_dbg(codec->dev,
+ "%s() master_clk=%u div_bck=%u div_lrck=%u\n",
+ __func__, priv->sysclk, div_bck, div_lrck);
+
+ snd_soc_write(codec, PCM186X_BCK_DIV, div_bck - 1);
+ snd_soc_write(codec, PCM186X_LRK_DIV, div_lrck - 1);
+ }
+
+ return 0;
+}
+
+static int pcm186x_set_fmt(struct snd_soc_dai *dai, unsigned int format)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct pcm186x_priv *priv = snd_soc_codec_get_drvdata(codec);
+ u8 clk_ctrl = 0;
+ u8 pcm_cfg = 0;
+
+ dev_dbg(codec->dev, "%s() format=0x%x\n", __func__, format);
+
+ /* set master/slave audio interface */
+ switch (format & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ if (!priv->sysclk) {
+ dev_err(codec->dev, "operating in master mode requires sysclock to be configured\n");
+ return -EINVAL;
+ }
+ clk_ctrl |= PCM186X_CLK_CTRL_MST_MODE;
+ priv->is_master_mode = true;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ priv->is_master_mode = false;
+ break;
+ default:
+ dev_err(codec->dev, "Invalid DAI master/slave interface\n");
+ return -EINVAL;
+ }
+
+ /* set interface polarity */
+ switch (format & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ default:
+ dev_err(codec->dev, "Inverted DAI clocks not supported\n");
+ return -EINVAL;
+ }
+
+ /* set interface format */
+ switch (format & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ pcm_cfg = PCM186X_PCM_CFG_FMT_I2S;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ pcm_cfg = PCM186X_PCM_CFG_FMT_LEFTJ;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ priv->tdm_offset += 1;
+ /* Fall through... DSP_A uses the same basic config as DSP_B
+ * except we need to shift the TDM output by one BCK cycle
+ */
+ case SND_SOC_DAIFMT_DSP_B:
+ priv->is_tdm_mode = true;
+ pcm_cfg = PCM186X_PCM_CFG_FMT_TDM;
+ break;
+ default:
+ dev_err(codec->dev, "Invalid DAI format\n");
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, PCM186X_CLK_CTRL,
+ PCM186X_CLK_CTRL_MST_MODE, clk_ctrl);
+
+ snd_soc_write(codec, PCM186X_TDM_TX_OFFSET, priv->tdm_offset);
+
+ snd_soc_update_bits(codec, PCM186X_PCM_CFG,
+ PCM186X_PCM_CFG_FMT_MASK, pcm_cfg);
+
+ return 0;
+}
+
+static int pcm186x_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
+ unsigned int rx_mask, int slots, int slot_width)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct pcm186x_priv *priv = snd_soc_codec_get_drvdata(codec);
+ unsigned int first_slot, last_slot, tdm_offset;
+
+ dev_dbg(codec->dev,
+ "%s() tx_mask=0x%x rx_mask=0x%x slots=%d slot_width=%d\n",
+ __func__, tx_mask, rx_mask, slots, slot_width);
+
+ if (!tx_mask) {
+ dev_err(codec->dev, "tdm tx mask must not be 0\n");
+ return -EINVAL;
+ }
+
+ first_slot = __ffs(tx_mask);
+ last_slot = __fls(tx_mask);
+
+ if (last_slot - first_slot != hweight32(tx_mask) - 1) {
+ dev_err(codec->dev, "tdm tx mask must be contiguous\n");
+ return -EINVAL;
+ }
+
+ tdm_offset = first_slot * slot_width;
+
+ if (tdm_offset > 255) {
+ dev_err(codec->dev, "tdm tx slot selection out of bounds\n");
+ return -EINVAL;
+ }
+
+ priv->tdm_offset = tdm_offset;
+
+ return 0;
+}
+
+static int pcm186x_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id,
+ unsigned int freq, int dir)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct pcm186x_priv *priv = snd_soc_codec_get_drvdata(codec);
+
+ dev_dbg(codec->dev, "%s() clk_id=%d freq=%u dir=%d\n",
+ __func__, clk_id, freq, dir);
+
+ priv->sysclk = freq;
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops pcm186x_dai_ops = {
+ .set_sysclk = pcm186x_set_dai_sysclk,
+ .set_tdm_slot = pcm186x_set_tdm_slot,
+ .set_fmt = pcm186x_set_fmt,
+ .hw_params = pcm186x_hw_params,
+};
+
+static struct snd_soc_dai_driver pcm1863_dai = {
+ .name = "pcm1863-aif",
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = PCM186X_RATES,
+ .formats = PCM186X_FORMATS,
+ },
+ .ops = &pcm186x_dai_ops,
+};
+
+static struct snd_soc_dai_driver pcm1865_dai = {
+ .name = "pcm1865-aif",
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 4,
+ .rates = PCM186X_RATES,
+ .formats = PCM186X_FORMATS,
+ },
+ .ops = &pcm186x_dai_ops,
+};
+
+static int pcm186x_power_on(struct snd_soc_codec *codec)
+{
+ struct pcm186x_priv *priv = snd_soc_codec_get_drvdata(codec);
+ int ret = 0;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(priv->supplies),
+ priv->supplies);
+ if (ret)
+ return ret;
+
+ regcache_cache_only(priv->regmap, false);
+ ret = regcache_sync(priv->regmap);
+ if (ret) {
+ dev_err(codec->dev, "Failed to restore cache\n");
+ regcache_cache_only(priv->regmap, true);
+ regulator_bulk_disable(ARRAY_SIZE(priv->supplies),
+ priv->supplies);
+ return ret;
+ }
+
+ snd_soc_update_bits(codec, PCM186X_POWER_CTRL,
+ PCM186X_PWR_CTRL_PWRDN, 0);
+
+ return 0;
+}
+
+static int pcm186x_power_off(struct snd_soc_codec *codec)
+{
+ struct pcm186x_priv *priv = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ snd_soc_update_bits(codec, PCM186X_POWER_CTRL,
+ PCM186X_PWR_CTRL_PWRDN, PCM186X_PWR_CTRL_PWRDN);
+
+ regcache_cache_only(priv->regmap, true);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(priv->supplies),
+ priv->supplies);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int pcm186x_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ dev_dbg(codec->dev, "## %s: %d -> %d\n", __func__,
+ snd_soc_codec_get_bias_level(codec), level);
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ break;
+ case SND_SOC_BIAS_PREPARE:
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF)
+ pcm186x_power_on(codec);
+ break;
+ case SND_SOC_BIAS_OFF:
+ pcm186x_power_off(codec);
+ break;
+ }
+
+ return 0;
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_pcm1863 = {
+ .set_bias_level = pcm186x_set_bias_level,
+
+ .component_driver = {
+ .controls = pcm1863_snd_controls,
+ .num_controls = ARRAY_SIZE(pcm1863_snd_controls),
+ .dapm_widgets = pcm1863_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(pcm1863_dapm_widgets),
+ .dapm_routes = pcm1863_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(pcm1863_dapm_routes),
+ },
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_pcm1865 = {
+ .set_bias_level = pcm186x_set_bias_level,
+ .suspend_bias_off = true,
+
+ .component_driver = {
+ .controls = pcm1865_snd_controls,
+ .num_controls = ARRAY_SIZE(pcm1865_snd_controls),
+ .dapm_widgets = pcm1865_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(pcm1865_dapm_widgets),
+ .dapm_routes = pcm1865_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(pcm1865_dapm_routes),
+ },
+};
+
+static bool pcm186x_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case PCM186X_PAGE:
+ case PCM186X_DEVICE_STATUS:
+ case PCM186X_FSAMPLE_STATUS:
+ case PCM186X_DIV_STATUS:
+ case PCM186X_CLK_STATUS:
+ case PCM186X_SUPPLY_STATUS:
+ case PCM186X_MMAP_STAT_CTRL:
+ case PCM186X_MMAP_ADDRESS:
+ return true;
+ }
+
+ return false;
+}
+
+static const struct regmap_range_cfg pcm186x_range = {
+ .name = "Pages",
+ .range_max = PCM186X_MAX_REGISTER,
+ .selector_reg = PCM186X_PAGE,
+ .selector_mask = 0xff,
+ .window_len = PCM186X_PAGE_LEN,
+};
+
+const struct regmap_config pcm186x_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .volatile_reg = pcm186x_volatile,
+
+ .ranges = &pcm186x_range,
+ .num_ranges = 1,
+
+ .max_register = PCM186X_MAX_REGISTER,
+
+ .cache_type = REGCACHE_RBTREE,
+};
+EXPORT_SYMBOL_GPL(pcm186x_regmap);
+
+int pcm186x_probe(struct device *dev, enum pcm186x_type type, int irq,
+ struct regmap *regmap)
+{
+ struct pcm186x_priv *priv;
+ int i, ret;
+
+ priv = devm_kzalloc(dev, sizeof(struct pcm186x_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+ priv->regmap = regmap;
+
+ for (i = 0; i < ARRAY_SIZE(priv->supplies); i++)
+ priv->supplies[i].supply = pcm186x_supply_names[i];
+
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(priv->supplies),
+ priv->supplies);
+ if (ret) {
+ dev_err(dev, "failed to request supplies: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(priv->supplies),
+ priv->supplies);
+ if (ret) {
+ dev_err(dev, "failed enable supplies: %d\n", ret);
+ return ret;
+ }
+
+ /* Reset device registers for a consistent power-on like state */
+ ret = regmap_write(regmap, PCM186X_PAGE, PCM186X_RESET);
+ if (ret) {
+ dev_err(dev, "failed to write device: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(priv->supplies),
+ priv->supplies);
+ if (ret) {
+ dev_err(dev, "failed disable supplies: %d\n", ret);
+ return ret;
+ }
+
+ switch (type) {
+ case PCM1865:
+ case PCM1864:
+ ret = snd_soc_register_codec(dev, &soc_codec_dev_pcm1865,
+ &pcm1865_dai, 1);
+ break;
+ case PCM1863:
+ case PCM1862:
+ default:
+ ret = snd_soc_register_codec(dev, &soc_codec_dev_pcm1863,
+ &pcm1863_dai, 1);
+ }
+ if (ret) {
+ dev_err(dev, "failed to register CODEC: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pcm186x_probe);
+
+int pcm186x_remove(struct device *dev)
+{
+ snd_soc_unregister_codec(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pcm186x_remove);
+
+MODULE_AUTHOR("Andreas Dannenberg <dannenberg@ti.com>");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("PCM186x Universal Audio ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/pcm186x.h b/sound/soc/codecs/pcm186x.h
new file mode 100644
index 000000000000..b630111bb3c4
--- /dev/null
+++ b/sound/soc/codecs/pcm186x.h
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments PCM186x Universal Audio ADC
+ *
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com
+ * Andreas Dannenberg <dannenberg@ti.com>
+ * Andrew F. Davis <afd@ti.com>
+ */
+
+#ifndef _PCM186X_H_
+#define _PCM186X_H_
+
+#include <linux/pm.h>
+#include <linux/regmap.h>
+
+enum pcm186x_type {
+ PCM1862,
+ PCM1863,
+ PCM1864,
+ PCM1865,
+};
+
+#define PCM186X_RATES SNDRV_PCM_RATE_8000_192000
+#define PCM186X_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S20_3LE |\
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+#define PCM186X_PAGE_LEN 0x0100
+#define PCM186X_PAGE_BASE(n) (PCM186X_PAGE_LEN * n)
+
+/* The page selection register address is the same on all pages */
+#define PCM186X_PAGE 0
+
+/* Register Definitions - Page 0 */
+#define PCM186X_PGA_VAL_CH1_L (PCM186X_PAGE_BASE(0) + 1)
+#define PCM186X_PGA_VAL_CH1_R (PCM186X_PAGE_BASE(0) + 2)
+#define PCM186X_PGA_VAL_CH2_L (PCM186X_PAGE_BASE(0) + 3)
+#define PCM186X_PGA_VAL_CH2_R (PCM186X_PAGE_BASE(0) + 4)
+#define PCM186X_PGA_CTRL (PCM186X_PAGE_BASE(0) + 5)
+#define PCM186X_ADC1_INPUT_SEL_L (PCM186X_PAGE_BASE(0) + 6)
+#define PCM186X_ADC1_INPUT_SEL_R (PCM186X_PAGE_BASE(0) + 7)
+#define PCM186X_ADC2_INPUT_SEL_L (PCM186X_PAGE_BASE(0) + 8)
+#define PCM186X_ADC2_INPUT_SEL_R (PCM186X_PAGE_BASE(0) + 9)
+#define PCM186X_AUXADC_INPUT_SEL (PCM186X_PAGE_BASE(0) + 10)
+#define PCM186X_PCM_CFG (PCM186X_PAGE_BASE(0) + 11)
+#define PCM186X_TDM_TX_SEL (PCM186X_PAGE_BASE(0) + 12)
+#define PCM186X_TDM_TX_OFFSET (PCM186X_PAGE_BASE(0) + 13)
+#define PCM186X_TDM_RX_OFFSET (PCM186X_PAGE_BASE(0) + 14)
+#define PCM186X_DPGA_VAL_CH1_L (PCM186X_PAGE_BASE(0) + 15)
+#define PCM186X_GPIO1_0_CTRL (PCM186X_PAGE_BASE(0) + 16)
+#define PCM186X_GPIO3_2_CTRL (PCM186X_PAGE_BASE(0) + 17)
+#define PCM186X_GPIO1_0_DIR_CTRL (PCM186X_PAGE_BASE(0) + 18)
+#define PCM186X_GPIO3_2_DIR_CTRL (PCM186X_PAGE_BASE(0) + 19)
+#define PCM186X_GPIO_IN_OUT (PCM186X_PAGE_BASE(0) + 20)
+#define PCM186X_GPIO_PULL_CTRL (PCM186X_PAGE_BASE(0) + 21)
+#define PCM186X_DPGA_VAL_CH1_R (PCM186X_PAGE_BASE(0) + 22)
+#define PCM186X_DPGA_VAL_CH2_L (PCM186X_PAGE_BASE(0) + 23)
+#define PCM186X_DPGA_VAL_CH2_R (PCM186X_PAGE_BASE(0) + 24)
+#define PCM186X_DPGA_GAIN_CTRL (PCM186X_PAGE_BASE(0) + 25)
+#define PCM186X_DPGA_MIC_CTRL (PCM186X_PAGE_BASE(0) + 26)
+#define PCM186X_DIN_RESAMP_CTRL (PCM186X_PAGE_BASE(0) + 27)
+#define PCM186X_CLK_CTRL (PCM186X_PAGE_BASE(0) + 32)
+#define PCM186X_DSP1_CLK_DIV (PCM186X_PAGE_BASE(0) + 33)
+#define PCM186X_DSP2_CLK_DIV (PCM186X_PAGE_BASE(0) + 34)
+#define PCM186X_ADC_CLK_DIV (PCM186X_PAGE_BASE(0) + 35)
+#define PCM186X_PLL_SCK_DIV (PCM186X_PAGE_BASE(0) + 37)
+#define PCM186X_BCK_DIV (PCM186X_PAGE_BASE(0) + 38)
+#define PCM186X_LRK_DIV (PCM186X_PAGE_BASE(0) + 39)
+#define PCM186X_PLL_CTRL (PCM186X_PAGE_BASE(0) + 40)
+#define PCM186X_PLL_P_DIV (PCM186X_PAGE_BASE(0) + 41)
+#define PCM186X_PLL_R_DIV (PCM186X_PAGE_BASE(0) + 42)
+#define PCM186X_PLL_J_DIV (PCM186X_PAGE_BASE(0) + 43)
+#define PCM186X_PLL_D_DIV_LSB (PCM186X_PAGE_BASE(0) + 44)
+#define PCM186X_PLL_D_DIV_MSB (PCM186X_PAGE_BASE(0) + 45)
+#define PCM186X_SIGDET_MODE (PCM186X_PAGE_BASE(0) + 48)
+#define PCM186X_SIGDET_MASK (PCM186X_PAGE_BASE(0) + 49)
+#define PCM186X_SIGDET_STAT (PCM186X_PAGE_BASE(0) + 50)
+#define PCM186X_SIGDET_LOSS_TIME (PCM186X_PAGE_BASE(0) + 52)
+#define PCM186X_SIGDET_SCAN_TIME (PCM186X_PAGE_BASE(0) + 53)
+#define PCM186X_SIGDET_INT_INTVL (PCM186X_PAGE_BASE(0) + 54)
+#define PCM186X_SIGDET_DC_REF_CH1_L (PCM186X_PAGE_BASE(0) + 64)
+#define PCM186X_SIGDET_DC_DIFF_CH1_L (PCM186X_PAGE_BASE(0) + 65)
+#define PCM186X_SIGDET_DC_LEV_CH1_L (PCM186X_PAGE_BASE(0) + 66)
+#define PCM186X_SIGDET_DC_REF_CH1_R (PCM186X_PAGE_BASE(0) + 67)
+#define PCM186X_SIGDET_DC_DIFF_CH1_R (PCM186X_PAGE_BASE(0) + 68)
+#define PCM186X_SIGDET_DC_LEV_CH1_R (PCM186X_PAGE_BASE(0) + 69)
+#define PCM186X_SIGDET_DC_REF_CH2_L (PCM186X_PAGE_BASE(0) + 70)
+#define PCM186X_SIGDET_DC_DIFF_CH2_L (PCM186X_PAGE_BASE(0) + 71)
+#define PCM186X_SIGDET_DC_LEV_CH2_L (PCM186X_PAGE_BASE(0) + 72)
+#define PCM186X_SIGDET_DC_REF_CH2_R (PCM186X_PAGE_BASE(0) + 73)
+#define PCM186X_SIGDET_DC_DIFF_CH2_R (PCM186X_PAGE_BASE(0) + 74)
+#define PCM186X_SIGDET_DC_LEV_CH2_R (PCM186X_PAGE_BASE(0) + 75)
+#define PCM186X_SIGDET_DC_REF_CH3_L (PCM186X_PAGE_BASE(0) + 76)
+#define PCM186X_SIGDET_DC_DIFF_CH3_L (PCM186X_PAGE_BASE(0) + 77)
+#define PCM186X_SIGDET_DC_LEV_CH3_L (PCM186X_PAGE_BASE(0) + 78)
+#define PCM186X_SIGDET_DC_REF_CH3_R (PCM186X_PAGE_BASE(0) + 79)
+#define PCM186X_SIGDET_DC_DIFF_CH3_R (PCM186X_PAGE_BASE(0) + 80)
+#define PCM186X_SIGDET_DC_LEV_CH3_R (PCM186X_PAGE_BASE(0) + 81)
+#define PCM186X_SIGDET_DC_REF_CH4_L (PCM186X_PAGE_BASE(0) + 82)
+#define PCM186X_SIGDET_DC_DIFF_CH4_L (PCM186X_PAGE_BASE(0) + 83)
+#define PCM186X_SIGDET_DC_LEV_CH4_L (PCM186X_PAGE_BASE(0) + 84)
+#define PCM186X_SIGDET_DC_REF_CH4_R (PCM186X_PAGE_BASE(0) + 85)
+#define PCM186X_SIGDET_DC_DIFF_CH4_R (PCM186X_PAGE_BASE(0) + 86)
+#define PCM186X_SIGDET_DC_LEV_CH4_R (PCM186X_PAGE_BASE(0) + 87)
+#define PCM186X_AUXADC_DATA_CTRL (PCM186X_PAGE_BASE(0) + 88)
+#define PCM186X_AUXADC_DATA_LSB (PCM186X_PAGE_BASE(0) + 89)
+#define PCM186X_AUXADC_DATA_MSB (PCM186X_PAGE_BASE(0) + 90)
+#define PCM186X_INT_ENABLE (PCM186X_PAGE_BASE(0) + 96)
+#define PCM186X_INT_FLAG (PCM186X_PAGE_BASE(0) + 97)
+#define PCM186X_INT_POL_WIDTH (PCM186X_PAGE_BASE(0) + 98)
+#define PCM186X_POWER_CTRL (PCM186X_PAGE_BASE(0) + 112)
+#define PCM186X_FILTER_MUTE_CTRL (PCM186X_PAGE_BASE(0) + 113)
+#define PCM186X_DEVICE_STATUS (PCM186X_PAGE_BASE(0) + 114)
+#define PCM186X_FSAMPLE_STATUS (PCM186X_PAGE_BASE(0) + 115)
+#define PCM186X_DIV_STATUS (PCM186X_PAGE_BASE(0) + 116)
+#define PCM186X_CLK_STATUS (PCM186X_PAGE_BASE(0) + 117)
+#define PCM186X_SUPPLY_STATUS (PCM186X_PAGE_BASE(0) + 120)
+
+/* Register Definitions - Page 1 */
+#define PCM186X_MMAP_STAT_CTRL (PCM186X_PAGE_BASE(1) + 1)
+#define PCM186X_MMAP_ADDRESS (PCM186X_PAGE_BASE(1) + 2)
+#define PCM186X_MEM_WDATA0 (PCM186X_PAGE_BASE(1) + 4)
+#define PCM186X_MEM_WDATA1 (PCM186X_PAGE_BASE(1) + 5)
+#define PCM186X_MEM_WDATA2 (PCM186X_PAGE_BASE(1) + 6)
+#define PCM186X_MEM_WDATA3 (PCM186X_PAGE_BASE(1) + 7)
+#define PCM186X_MEM_RDATA0 (PCM186X_PAGE_BASE(1) + 8)
+#define PCM186X_MEM_RDATA1 (PCM186X_PAGE_BASE(1) + 9)
+#define PCM186X_MEM_RDATA2 (PCM186X_PAGE_BASE(1) + 10)
+#define PCM186X_MEM_RDATA3 (PCM186X_PAGE_BASE(1) + 11)
+
+/* Register Definitions - Page 3 */
+#define PCM186X_OSC_PWR_DOWN_CTRL (PCM186X_PAGE_BASE(3) + 18)
+#define PCM186X_MIC_BIAS_CTRL (PCM186X_PAGE_BASE(3) + 21)
+
+/* Register Definitions - Page 253 */
+#define PCM186X_CURR_TRIM_CTRL (PCM186X_PAGE_BASE(253) + 20)
+
+#define PCM186X_MAX_REGISTER PCM186X_CURR_TRIM_CTRL
+
+/* PCM186X_PAGE */
+#define PCM186X_RESET 0xff
+
+/* PCM186X_ADCX_INPUT_SEL_X */
+#define PCM186X_ADC_INPUT_SEL_POL BIT(7)
+#define PCM186X_ADC_INPUT_SEL_MASK GENMASK(5, 0)
+
+/* PCM186X_PCM_CFG */
+#define PCM186X_PCM_CFG_RX_WLEN_MASK GENMASK(7, 6)
+#define PCM186X_PCM_CFG_RX_WLEN_SHIFT 6
+#define PCM186X_PCM_CFG_RX_WLEN_32 0x00
+#define PCM186X_PCM_CFG_RX_WLEN_24 0x01
+#define PCM186X_PCM_CFG_RX_WLEN_20 0x02
+#define PCM186X_PCM_CFG_RX_WLEN_16 0x03
+#define PCM186X_PCM_CFG_TDM_LRCK_MODE BIT(4)
+#define PCM186X_PCM_CFG_TX_WLEN_MASK GENMASK(3, 2)
+#define PCM186X_PCM_CFG_TX_WLEN_SHIFT 2
+#define PCM186X_PCM_CFG_TX_WLEN_32 0x00
+#define PCM186X_PCM_CFG_TX_WLEN_24 0x01
+#define PCM186X_PCM_CFG_TX_WLEN_20 0x02
+#define PCM186X_PCM_CFG_TX_WLEN_16 0x03
+#define PCM186X_PCM_CFG_FMT_MASK GENMASK(1, 0)
+#define PCM186X_PCM_CFG_FMT_SHIFT 0
+#define PCM186X_PCM_CFG_FMT_I2S 0x00
+#define PCM186X_PCM_CFG_FMT_LEFTJ 0x01
+#define PCM186X_PCM_CFG_FMT_RIGHTJ 0x02
+#define PCM186X_PCM_CFG_FMT_TDM 0x03
+
+/* PCM186X_TDM_TX_SEL */
+#define PCM186X_TDM_TX_SEL_2CH 0x00
+#define PCM186X_TDM_TX_SEL_4CH 0x01
+#define PCM186X_TDM_TX_SEL_6CH 0x02
+#define PCM186X_TDM_TX_SEL_MASK 0x03
+
+/* PCM186X_CLK_CTRL */
+#define PCM186X_CLK_CTRL_SCK_XI_SEL1 BIT(7)
+#define PCM186X_CLK_CTRL_SCK_XI_SEL0 BIT(6)
+#define PCM186X_CLK_CTRL_SCK_SRC_PLL BIT(5)
+#define PCM186X_CLK_CTRL_MST_MODE BIT(4)
+#define PCM186X_CLK_CTRL_ADC_SRC_PLL BIT(3)
+#define PCM186X_CLK_CTRL_DSP2_SRC_PLL BIT(2)
+#define PCM186X_CLK_CTRL_DSP1_SRC_PLL BIT(1)
+#define PCM186X_CLK_CTRL_CLKDET_EN BIT(0)
+
+/* PCM186X_PLL_CTRL */
+#define PCM186X_PLL_CTRL_LOCK BIT(4)
+#define PCM186X_PLL_CTRL_REF_SEL BIT(1)
+#define PCM186X_PLL_CTRL_EN BIT(0)
+
+/* PCM186X_POWER_CTRL */
+#define PCM186X_PWR_CTRL_PWRDN BIT(2)
+#define PCM186X_PWR_CTRL_SLEEP BIT(1)
+#define PCM186X_PWR_CTRL_STBY BIT(0)
+
+/* PCM186X_CLK_STATUS */
+#define PCM186X_CLK_STATUS_LRCKHLT BIT(6)
+#define PCM186X_CLK_STATUS_BCKHLT BIT(5)
+#define PCM186X_CLK_STATUS_SCKHLT BIT(4)
+#define PCM186X_CLK_STATUS_LRCKERR BIT(2)
+#define PCM186X_CLK_STATUS_BCKERR BIT(1)
+#define PCM186X_CLK_STATUS_SCKERR BIT(0)
+
+/* PCM186X_SUPPLY_STATUS */
+#define PCM186X_SUPPLY_STATUS_DVDD BIT(2)
+#define PCM186X_SUPPLY_STATUS_AVDD BIT(1)
+#define PCM186X_SUPPLY_STATUS_LDO BIT(0)
+
+/* PCM186X_MMAP_STAT_CTRL */
+#define PCM186X_MMAP_STAT_DONE BIT(4)
+#define PCM186X_MMAP_STAT_BUSY BIT(2)
+#define PCM186X_MMAP_STAT_R_REQ BIT(1)
+#define PCM186X_MMAP_STAT_W_REQ BIT(0)
+
+extern const struct regmap_config pcm186x_regmap;
+
+int pcm186x_probe(struct device *dev, enum pcm186x_type type, int irq,
+ struct regmap *regmap);
+int pcm186x_remove(struct device *dev);
+
+#endif /* _PCM186X_H_ */
diff --git a/sound/soc/codecs/pcm512x-spi.c b/sound/soc/codecs/pcm512x-spi.c
index 25c63510ae15..7cdd2dc4fd79 100644
--- a/sound/soc/codecs/pcm512x-spi.c
+++ b/sound/soc/codecs/pcm512x-spi.c
@@ -70,3 +70,7 @@ static struct spi_driver pcm512x_spi_driver = {
};
module_spi_driver(pcm512x_spi_driver);
+
+MODULE_DESCRIPTION("ASoC PCM512x codec driver - SPI");
+MODULE_AUTHOR("Mark Brown <broonie@kernel.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/rl6231.c b/sound/soc/codecs/rl6231.c
index 974a9040651d..7ef3b5476bcc 100644
--- a/sound/soc/codecs/rl6231.c
+++ b/sound/soc/codecs/rl6231.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/regmap.h>
+#include <linux/gcd.h>
#include "rl6231.h"
/**
@@ -106,6 +107,25 @@ static const struct pll_calc_map pll_preset_table[] = {
{19200000, 24576000, 3, 30, 3, false},
};
+static unsigned int find_best_div(unsigned int in,
+ unsigned int max, unsigned int div)
+{
+ unsigned int d;
+
+ if (in <= max)
+ return 1;
+
+ d = in / max;
+ if (in % max)
+ d++;
+
+ while (div % d != 0)
+ d++;
+
+
+ return d;
+}
+
/**
* rl6231_pll_calc - Calcualte PLL M/N/K code.
* @freq_in: external clock provided to codec.
@@ -120,9 +140,11 @@ int rl6231_pll_calc(const unsigned int freq_in,
const unsigned int freq_out, struct rl6231_pll_code *pll_code)
{
int max_n = RL6231_PLL_N_MAX, max_m = RL6231_PLL_M_MAX;
- int i, k, red, n_t, pll_out, in_t, out_t;
- int n = 0, m = 0, m_t = 0;
- int red_t = abs(freq_out - freq_in);
+ int i, k, n_t;
+ int k_t, min_k, max_k, n = 0, m = 0, m_t = 0;
+ unsigned int red, pll_out, in_t, out_t, div, div_t;
+ unsigned int red_t = abs(freq_out - freq_in);
+ unsigned int f_in, f_out, f_max;
bool bypass = false;
if (RL6231_PLL_INP_MAX < freq_in || RL6231_PLL_INP_MIN > freq_in)
@@ -140,39 +162,52 @@ int rl6231_pll_calc(const unsigned int freq_in,
}
}
- k = 100000000 / freq_out - 2;
- if (k > RL6231_PLL_K_MAX)
- k = RL6231_PLL_K_MAX;
- for (n_t = 0; n_t <= max_n; n_t++) {
- in_t = freq_in / (k + 2);
- pll_out = freq_out / (n_t + 2);
- if (in_t < 0)
- continue;
- if (in_t == pll_out) {
- bypass = true;
- n = n_t;
- goto code_find;
- }
- red = abs(in_t - pll_out);
- if (red < red_t) {
- bypass = true;
- n = n_t;
- m = m_t;
- if (red == 0)
+ min_k = 80000000 / freq_out - 2;
+ max_k = 150000000 / freq_out - 2;
+ if (max_k > RL6231_PLL_K_MAX)
+ max_k = RL6231_PLL_K_MAX;
+ if (min_k > RL6231_PLL_K_MAX)
+ min_k = max_k = RL6231_PLL_K_MAX;
+ div_t = gcd(freq_in, freq_out);
+ f_max = 0xffffffff / RL6231_PLL_N_MAX;
+ div = find_best_div(freq_in, f_max, div_t);
+ f_in = freq_in / div;
+ f_out = freq_out / div;
+ k = min_k;
+ for (k_t = min_k; k_t <= max_k; k_t++) {
+ for (n_t = 0; n_t <= max_n; n_t++) {
+ in_t = f_in * (n_t + 2);
+ pll_out = f_out * (k_t + 2);
+ if (in_t == pll_out) {
+ bypass = true;
+ n = n_t;
+ k = k_t;
goto code_find;
- red_t = red;
- }
- for (m_t = 0; m_t <= max_m; m_t++) {
- out_t = in_t / (m_t + 2);
- red = abs(out_t - pll_out);
+ }
+ out_t = in_t / (k_t + 2);
+ red = abs(f_out - out_t);
if (red < red_t) {
- bypass = false;
+ bypass = true;
n = n_t;
- m = m_t;
+ m = 0;
+ k = k_t;
if (red == 0)
goto code_find;
red_t = red;
}
+ for (m_t = 0; m_t <= max_m; m_t++) {
+ out_t = in_t / ((m_t + 2) * (k_t + 2));
+ red = abs(f_out - out_t);
+ if (red < red_t) {
+ bypass = false;
+ n = n_t;
+ m = m_t;
+ k = k_t;
+ if (red == 0)
+ goto code_find;
+ red_t = red;
+ }
+ }
}
}
pr_debug("Only get approximation about PLL\n");
diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c
index 64bf26cec20d..2144edca97b0 100644
--- a/sound/soc/codecs/rt5514-spi.c
+++ b/sound/soc/codecs/rt5514-spi.c
@@ -381,6 +381,7 @@ int rt5514_spi_burst_read(unsigned int addr, u8 *rxbuf, size_t len)
return true;
}
+EXPORT_SYMBOL_GPL(rt5514_spi_burst_read);
/**
* rt5514_spi_burst_write - Write data to SPI by rt5514 address.
diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
index 2dd6e9f990a4..198df016802f 100644
--- a/sound/soc/codecs/rt5514.c
+++ b/sound/soc/codecs/rt5514.c
@@ -295,6 +295,33 @@ static int rt5514_dsp_voice_wake_up_get(struct snd_kcontrol *kcontrol,
return 0;
}
+static int rt5514_calibration(struct rt5514_priv *rt5514, bool on)
+{
+ if (on) {
+ regmap_write(rt5514->regmap, RT5514_ANA_CTRL_PLL3, 0x0000000a);
+ regmap_update_bits(rt5514->regmap, RT5514_PLL_SOURCE_CTRL, 0xf,
+ 0xa);
+ regmap_update_bits(rt5514->regmap, RT5514_PWR_ANA1, 0x301,
+ 0x301);
+ regmap_write(rt5514->regmap, RT5514_PLL3_CALIB_CTRL4,
+ 0x80000000 | rt5514->pll3_cal_value);
+ regmap_write(rt5514->regmap, RT5514_PLL3_CALIB_CTRL1,
+ 0x8bb80800);
+ regmap_update_bits(rt5514->regmap, RT5514_PLL3_CALIB_CTRL5,
+ 0xc0000000, 0x80000000);
+ regmap_update_bits(rt5514->regmap, RT5514_PLL3_CALIB_CTRL5,
+ 0xc0000000, 0xc0000000);
+ } else {
+ regmap_update_bits(rt5514->regmap, RT5514_PLL3_CALIB_CTRL5,
+ 0xc0000000, 0x40000000);
+ regmap_update_bits(rt5514->regmap, RT5514_PWR_ANA1, 0x301, 0);
+ regmap_update_bits(rt5514->regmap, RT5514_PLL_SOURCE_CTRL, 0xf,
+ 0x4);
+ }
+
+ return 0;
+}
+
static int rt5514_dsp_voice_wake_up_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -302,6 +329,7 @@ static int rt5514_dsp_voice_wake_up_put(struct snd_kcontrol *kcontrol,
struct rt5514_priv *rt5514 = snd_soc_component_get_drvdata(component);
struct snd_soc_codec *codec = rt5514->codec;
const struct firmware *fw = NULL;
+ u8 buf[8];
if (ucontrol->value.integer.value[0] == rt5514->dsp_enabled)
return 0;
@@ -310,6 +338,35 @@ static int rt5514_dsp_voice_wake_up_put(struct snd_kcontrol *kcontrol,
rt5514->dsp_enabled = ucontrol->value.integer.value[0];
if (rt5514->dsp_enabled) {
+ if (rt5514->pdata.dsp_calib_clk_name &&
+ !IS_ERR(rt5514->dsp_calib_clk)) {
+ if (clk_set_rate(rt5514->dsp_calib_clk,
+ rt5514->pdata.dsp_calib_clk_rate))
+ dev_err(codec->dev,
+ "Can't set rate for mclk");
+
+ if (clk_prepare_enable(rt5514->dsp_calib_clk))
+ dev_err(codec->dev,
+ "Can't enable dsp_calib_clk");
+
+ rt5514_calibration(rt5514, true);
+
+ msleep(20);
+#if IS_ENABLED(CONFIG_SND_SOC_RT5514_SPI)
+ rt5514_spi_burst_read(RT5514_PLL3_CALIB_CTRL6 |
+ RT5514_DSP_MAPPING,
+ (u8 *)&buf, sizeof(buf));
+#else
+ dev_err(codec->dev, "There is no SPI driver for"
+ " loading the firmware\n");
+#endif
+ rt5514->pll3_cal_value = buf[0] | buf[1] << 8 |
+ buf[2] << 16 | buf[3] << 24;
+
+ rt5514_calibration(rt5514, false);
+ clk_disable_unprepare(rt5514->dsp_calib_clk);
+ }
+
rt5514_enable_dsp_prepare(rt5514);
request_firmware(&fw, RT5514_FIRMWARE1, codec->dev);
@@ -341,6 +398,20 @@ static int rt5514_dsp_voice_wake_up_put(struct snd_kcontrol *kcontrol,
/* DSP run */
regmap_write(rt5514->i2c_regmap, 0x18002f00,
0x00055148);
+
+ if (rt5514->pdata.dsp_calib_clk_name &&
+ !IS_ERR(rt5514->dsp_calib_clk)) {
+ msleep(20);
+
+ regmap_write(rt5514->i2c_regmap, 0x1800211c,
+ rt5514->pll3_cal_value);
+ regmap_write(rt5514->i2c_regmap, 0x18002124,
+ 0x00220012);
+ regmap_write(rt5514->i2c_regmap, 0x18002124,
+ 0x80220042);
+ regmap_write(rt5514->i2c_regmap, 0x18002124,
+ 0xe0220042);
+ }
} else {
regmap_multi_reg_write(rt5514->i2c_regmap,
rt5514_i2c_patch, ARRAY_SIZE(rt5514_i2c_patch));
@@ -1024,12 +1095,22 @@ static int rt5514_set_bias_level(struct snd_soc_codec *codec,
static int rt5514_probe(struct snd_soc_codec *codec)
{
struct rt5514_priv *rt5514 = snd_soc_codec_get_drvdata(codec);
+ struct platform_device *pdev = container_of(codec->dev,
+ struct platform_device, dev);
rt5514->mclk = devm_clk_get(codec->dev, "mclk");
if (PTR_ERR(rt5514->mclk) == -EPROBE_DEFER)
return -EPROBE_DEFER;
+ if (rt5514->pdata.dsp_calib_clk_name) {
+ rt5514->dsp_calib_clk = devm_clk_get(&pdev->dev,
+ rt5514->pdata.dsp_calib_clk_name);
+ if (PTR_ERR(rt5514->dsp_calib_clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ }
+
rt5514->codec = codec;
+ rt5514->pll3_cal_value = 0x0078b000;
return 0;
}
@@ -1147,6 +1228,10 @@ static int rt5514_parse_dp(struct rt5514_priv *rt5514, struct device *dev)
{
device_property_read_u32(dev, "realtek,dmic-init-delay-ms",
&rt5514->pdata.dmic_init_delay);
+ device_property_read_string(dev, "realtek,dsp-calib-clk-name",
+ &rt5514->pdata.dsp_calib_clk_name);
+ device_property_read_u32(dev, "realtek,dsp-calib-clk-rate",
+ &rt5514->pdata.dsp_calib_clk_rate);
return 0;
}
diff --git a/sound/soc/codecs/rt5514.h b/sound/soc/codecs/rt5514.h
index 2dc40e6d8b3f..f0f3400ce6b1 100644
--- a/sound/soc/codecs/rt5514.h
+++ b/sound/soc/codecs/rt5514.h
@@ -34,7 +34,9 @@
#define RT5514_CLK_CTRL1 0x2104
#define RT5514_CLK_CTRL2 0x2108
#define RT5514_PLL3_CALIB_CTRL1 0x2110
+#define RT5514_PLL3_CALIB_CTRL4 0x2120
#define RT5514_PLL3_CALIB_CTRL5 0x2124
+#define RT5514_PLL3_CALIB_CTRL6 0x2128
#define RT5514_DELAY_BUF_CTRL1 0x2140
#define RT5514_DELAY_BUF_CTRL3 0x2148
#define RT5514_ASRC_IN_CTRL1 0x2180
@@ -272,7 +274,7 @@ struct rt5514_priv {
struct rt5514_platform_data pdata;
struct snd_soc_codec *codec;
struct regmap *i2c_regmap, *regmap;
- struct clk *mclk;
+ struct clk *mclk, *dsp_calib_clk;
int sysclk;
int sysclk_src;
int lrck;
@@ -281,6 +283,7 @@ struct rt5514_priv {
int pll_in;
int pll_out;
int dsp_enabled;
+ unsigned int pll3_cal_value;
};
#endif /* __RT5514_H__ */
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index edc152c8a1fe..8f140c8b93ac 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -1943,6 +1943,56 @@ static int rt5650_hp_event(struct snd_soc_dapm_widget *w,
return 0;
}
+static int rt5645_set_micbias1_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_update_bits(codec, RT5645_GEN_CTRL2,
+ RT5645_MICBIAS1_POW_CTRL_SEL_MASK,
+ RT5645_MICBIAS1_POW_CTRL_SEL_M);
+ break;
+
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec, RT5645_GEN_CTRL2,
+ RT5645_MICBIAS1_POW_CTRL_SEL_MASK,
+ RT5645_MICBIAS1_POW_CTRL_SEL_A);
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int rt5645_set_micbias2_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_update_bits(codec, RT5645_GEN_CTRL2,
+ RT5645_MICBIAS2_POW_CTRL_SEL_MASK,
+ RT5645_MICBIAS2_POW_CTRL_SEL_M);
+ break;
+
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec, RT5645_GEN_CTRL2,
+ RT5645_MICBIAS2_POW_CTRL_SEL_MASK,
+ RT5645_MICBIAS2_POW_CTRL_SEL_A);
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
static const struct snd_soc_dapm_widget rt5645_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("LDO2", RT5645_PWR_MIXER,
RT5645_PWR_LDO2_BIT, 0, NULL, 0),
@@ -1980,10 +2030,12 @@ static const struct snd_soc_dapm_widget rt5645_dapm_widgets[] = {
/* Input Side */
/* micbias */
- SND_SOC_DAPM_MICBIAS("micbias1", RT5645_PWR_ANLG2,
- RT5645_PWR_MB1_BIT, 0),
- SND_SOC_DAPM_MICBIAS("micbias2", RT5645_PWR_ANLG2,
- RT5645_PWR_MB2_BIT, 0),
+ SND_SOC_DAPM_SUPPLY("micbias1", RT5645_PWR_ANLG2,
+ RT5645_PWR_MB1_BIT, 0, rt5645_set_micbias1_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("micbias2", RT5645_PWR_ANLG2,
+ RT5645_PWR_MB2_BIT, 0, rt5645_set_micbias2_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
/* Input Lines */
SND_SOC_DAPM_INPUT("DMIC L1"),
SND_SOC_DAPM_INPUT("DMIC R1"),
@@ -3394,6 +3446,9 @@ static int rt5645_probe(struct snd_soc_codec *codec)
snd_soc_dapm_sync(dapm);
}
+ if (rt5645->pdata.long_name)
+ codec->component.card->long_name = rt5645->pdata.long_name;
+
rt5645->eq_param = devm_kzalloc(codec->dev,
RT5645_HWEQ_NUM * sizeof(struct rt5645_eq_param_s), GFP_KERNEL);
@@ -3570,63 +3625,74 @@ static const struct acpi_device_id rt5645_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, rt5645_acpi_match);
#endif
-static const struct rt5645_platform_data general_platform_data = {
+static const struct rt5645_platform_data intel_braswell_platform_data = {
.dmic1_data_pin = RT5645_DMIC1_DISABLE,
.dmic2_data_pin = RT5645_DMIC_DATA_IN2P,
.jd_mode = 3,
};
-static const struct dmi_system_id dmi_platform_intel_braswell[] = {
+static const struct rt5645_platform_data buddy_platform_data = {
+ .dmic1_data_pin = RT5645_DMIC_DATA_GPIO5,
+ .dmic2_data_pin = RT5645_DMIC_DATA_IN2P,
+ .jd_mode = 3,
+ .level_trigger_irq = true,
+};
+
+static const struct rt5645_platform_data gpd_win_platform_data = {
+ .jd_mode = 3,
+ .inv_jd1_1 = true,
+ .long_name = "gpd-win-pocket-rt5645",
+ /* The GPD pocket has a diff. mic, for the win this does not matter. */
+ .in2_diff = true,
+};
+
+static const struct rt5645_platform_data asus_t100ha_platform_data = {
+ .dmic1_data_pin = RT5645_DMIC_DATA_IN2N,
+ .dmic2_data_pin = RT5645_DMIC2_DISABLE,
+ .jd_mode = 3,
+ .inv_jd1_1 = true,
+};
+
+static const struct rt5645_platform_data jd_mode3_platform_data = {
+ .jd_mode = 3,
+};
+
+static const struct dmi_system_id dmi_platform_data[] = {
+ {
+ .ident = "Chrome Buddy",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Buddy"),
+ },
+ .driver_data = (void *)&buddy_platform_data,
+ },
{
.ident = "Intel Strago",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Strago"),
},
+ .driver_data = (void *)&intel_braswell_platform_data,
},
{
.ident = "Google Chrome",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
},
+ .driver_data = (void *)&intel_braswell_platform_data,
},
{
.ident = "Google Setzer",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
},
+ .driver_data = (void *)&intel_braswell_platform_data,
},
{
.ident = "Microsoft Surface 3",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Surface 3"),
},
+ .driver_data = (void *)&intel_braswell_platform_data,
},
- { }
-};
-
-static const struct rt5645_platform_data buddy_platform_data = {
- .dmic1_data_pin = RT5645_DMIC_DATA_GPIO5,
- .dmic2_data_pin = RT5645_DMIC_DATA_IN2P,
- .jd_mode = 3,
- .level_trigger_irq = true,
-};
-
-static const struct dmi_system_id dmi_platform_intel_broadwell[] = {
- {
- .ident = "Chrome Buddy",
- .matches = {
- DMI_MATCH(DMI_PRODUCT_NAME, "Buddy"),
- },
- },
- { }
-};
-
-static const struct rt5645_platform_data gpd_win_platform_data = {
- .jd_mode = 3,
- .inv_jd1_1 = true,
-};
-
-static const struct dmi_system_id dmi_platform_gpd_win[] = {
{
/*
* Match for the GPDwin which unfortunately uses somewhat
@@ -3637,46 +3703,38 @@ static const struct dmi_system_id dmi_platform_gpd_win[] = {
* the same default product_name. Also the GPDwin is the
* only device to have both board_ and product_name not set.
*/
- .ident = "GPD Win",
+ .ident = "GPD Win / Pocket",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
DMI_MATCH(DMI_BOARD_NAME, "Default string"),
DMI_MATCH(DMI_BOARD_SERIAL, "Default string"),
DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
},
+ .driver_data = (void *)&gpd_win_platform_data,
},
- {}
-};
-
-static const struct rt5645_platform_data general_platform_data2 = {
- .dmic1_data_pin = RT5645_DMIC_DATA_IN2N,
- .dmic2_data_pin = RT5645_DMIC2_DISABLE,
- .jd_mode = 3,
- .inv_jd1_1 = true,
-};
-
-static const struct dmi_system_id dmi_platform_asus_t100ha[] = {
{
.ident = "ASUS T100HAN",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "T100HAN"),
},
+ .driver_data = (void *)&asus_t100ha_platform_data,
},
- { }
-};
-
-static const struct rt5645_platform_data minix_z83_4_platform_data = {
- .jd_mode = 3,
-};
-
-static const struct dmi_system_id dmi_platform_minix_z83_4[] = {
{
.ident = "MINIX Z83-4",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MINIX"),
DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
},
+ .driver_data = (void *)&jd_mode3_platform_data,
+ },
+ {
+ .ident = "Teclast X80 Pro",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X80 Pro"),
+ },
+ .driver_data = (void *)&jd_mode3_platform_data,
},
{ }
};
@@ -3684,9 +3742,9 @@ static const struct dmi_system_id dmi_platform_minix_z83_4[] = {
static bool rt5645_check_dp(struct device *dev)
{
if (device_property_present(dev, "realtek,in2-differential") ||
- device_property_present(dev, "realtek,dmic1-data-pin") ||
- device_property_present(dev, "realtek,dmic2-data-pin") ||
- device_property_present(dev, "realtek,jd-mode"))
+ device_property_present(dev, "realtek,dmic1-data-pin") ||
+ device_property_present(dev, "realtek,dmic2-data-pin") ||
+ device_property_present(dev, "realtek,jd-mode"))
return true;
return false;
@@ -3710,6 +3768,7 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct rt5645_platform_data *pdata = dev_get_platdata(&i2c->dev);
+ const struct dmi_system_id *dmi_data;
struct rt5645_priv *rt5645;
int ret, i;
unsigned int val;
@@ -3723,20 +3782,18 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
rt5645->i2c = i2c;
i2c_set_clientdata(i2c, rt5645);
+ dmi_data = dmi_first_match(dmi_platform_data);
+ if (dmi_data) {
+ dev_info(&i2c->dev, "Detected %s platform\n", dmi_data->ident);
+ pdata = dmi_data->driver_data;
+ }
+
if (pdata)
rt5645->pdata = *pdata;
- else if (dmi_check_system(dmi_platform_intel_broadwell))
- rt5645->pdata = buddy_platform_data;
else if (rt5645_check_dp(&i2c->dev))
rt5645_parse_dt(rt5645, &i2c->dev);
- else if (dmi_check_system(dmi_platform_intel_braswell))
- rt5645->pdata = general_platform_data;
- else if (dmi_check_system(dmi_platform_gpd_win))
- rt5645->pdata = gpd_win_platform_data;
- else if (dmi_check_system(dmi_platform_asus_t100ha))
- rt5645->pdata = general_platform_data2;
- else if (dmi_check_system(dmi_platform_minix_z83_4))
- rt5645->pdata = minix_z83_4_platform_data;
+ else
+ rt5645->pdata = jd_mode3_platform_data;
if (quirk != -1) {
rt5645->pdata.in2_diff = QUIRK_IN2_DIFF(quirk);
diff --git a/sound/soc/codecs/rt5645.h b/sound/soc/codecs/rt5645.h
index cfc5f97549eb..940325b28c29 100644
--- a/sound/soc/codecs/rt5645.h
+++ b/sound/soc/codecs/rt5645.h
@@ -2117,6 +2117,12 @@ enum {
#define RT5645_RXDC_SRC_STO (0x0 << 7)
#define RT5645_RXDC_SRC_MONO (0x1 << 7)
#define RT5645_RXDC_SRC_SFT (7)
+#define RT5645_MICBIAS1_POW_CTRL_SEL_MASK (0x1 << 5)
+#define RT5645_MICBIAS1_POW_CTRL_SEL_A (0x0 << 5)
+#define RT5645_MICBIAS1_POW_CTRL_SEL_M (0x1 << 5)
+#define RT5645_MICBIAS2_POW_CTRL_SEL_MASK (0x1 << 4)
+#define RT5645_MICBIAS2_POW_CTRL_SEL_A (0x0 << 4)
+#define RT5645_MICBIAS2_POW_CTRL_SEL_M (0x1 << 4)
#define RT5645_RXDP2_SEL_MASK (0x1 << 3)
#define RT5645_RXDP2_SEL_IF2 (0x0 << 3)
#define RT5645_RXDP2_SEL_ADC (0x1 << 3)
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index f2bb4feba3b6..633cdcfc933d 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -1332,10 +1332,13 @@ static int sgtl5000_i2c_probe(struct i2c_client *client,
sgtl5000->mclk = devm_clk_get(&client->dev, NULL);
if (IS_ERR(sgtl5000->mclk)) {
ret = PTR_ERR(sgtl5000->mclk);
- dev_err(&client->dev, "Failed to get mclock: %d\n", ret);
/* Defer the probe to see if the clk will be provided later */
if (ret == -ENOENT)
ret = -EPROBE_DEFER;
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(&client->dev, "Failed to get mclock: %d\n",
+ ret);
goto disable_regs;
}
diff --git a/sound/soc/codecs/si476x.c b/sound/soc/codecs/si476x.c
index 354dc0d64f11..7b91ee267b4e 100644
--- a/sound/soc/codecs/si476x.c
+++ b/sound/soc/codecs/si476x.c
@@ -231,14 +231,17 @@ static struct snd_soc_dai_driver si476x_dai = {
.ops = &si476x_dai_ops,
};
-static struct regmap *si476x_get_regmap(struct device *dev)
+static int si476x_probe(struct snd_soc_component *component)
{
- return dev_get_regmap(dev->parent, NULL);
+ snd_soc_component_init_regmap(component,
+ dev_get_regmap(component->dev->parent, NULL));
+
+ return 0;
}
static const struct snd_soc_codec_driver soc_codec_dev_si476x = {
- .get_regmap = si476x_get_regmap,
.component_driver = {
+ .probe = si476x_probe,
.dapm_widgets = si476x_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(si476x_dapm_widgets),
.dapm_routes = si476x_dapm_routes,
diff --git a/sound/soc/codecs/sn95031.c b/sound/soc/codecs/sn95031.c
deleted file mode 100644
index 887923e68849..000000000000
--- a/sound/soc/codecs/sn95031.c
+++ /dev/null
@@ -1,936 +0,0 @@
-/*
- * sn95031.c - TI sn95031 Codec driver
- *
- * Copyright (C) 2010 Intel Corp
- * Author: Vinod Koul <vinod.koul@intel.com>
- * Author: Harsha Priya <priya.harsha@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- *
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-
-#include <asm/intel_scu_ipc.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-#include <sound/initval.h>
-#include <sound/tlv.h>
-#include <sound/jack.h>
-#include "sn95031.h"
-
-#define SN95031_RATES (SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_44100)
-#define SN95031_FORMATS (SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE)
-
-/* adc helper functions */
-
-/* enables mic bias voltage */
-static void sn95031_enable_mic_bias(struct snd_soc_codec *codec)
-{
- snd_soc_write(codec, SN95031_VAUD, BIT(2)|BIT(1)|BIT(0));
- snd_soc_update_bits(codec, SN95031_MICBIAS, BIT(2), BIT(2));
-}
-
-/* Enable/Disable the ADC depending on the argument */
-static void configure_adc(struct snd_soc_codec *sn95031_codec, int val)
-{
- int value = snd_soc_read(sn95031_codec, SN95031_ADC1CNTL1);
-
- if (val) {
- /* Enable and start the ADC */
- value |= (SN95031_ADC_ENBL | SN95031_ADC_START);
- value &= (~SN95031_ADC_NO_LOOP);
- } else {
- /* Just stop the ADC */
- value &= (~SN95031_ADC_START);
- }
- snd_soc_write(sn95031_codec, SN95031_ADC1CNTL1, value);
-}
-
-/*
- * finds an empty channel for conversion
- * If the ADC is not enabled then start using 0th channel
- * itself. Otherwise find an empty channel by looking for a
- * channel in which the stopbit is set to 1. returns the index
- * of the first free channel if succeeds or an error code.
- *
- * Context: can sleep
- *
- */
-static int find_free_channel(struct snd_soc_codec *sn95031_codec)
-{
- int i, value;
-
- /* check whether ADC is enabled */
- value = snd_soc_read(sn95031_codec, SN95031_ADC1CNTL1);
-
- if ((value & SN95031_ADC_ENBL) == 0)
- return 0;
-
- /* ADC is already enabled; Looking for an empty channel */
- for (i = 0; i < SN95031_ADC_CHANLS_MAX; i++) {
- value = snd_soc_read(sn95031_codec,
- SN95031_ADC_CHNL_START_ADDR + i);
- if (value & SN95031_STOPBIT_MASK)
- break;
- }
- return (i == SN95031_ADC_CHANLS_MAX) ? (-EINVAL) : i;
-}
-
-/* Initialize the ADC for reading micbias values. Can sleep. */
-static int sn95031_initialize_adc(struct snd_soc_codec *sn95031_codec)
-{
- int base_addr, chnl_addr;
- int value;
- int channel_index;
-
- /* Index of the first channel in which the stop bit is set */
- channel_index = find_free_channel(sn95031_codec);
- if (channel_index < 0) {
- pr_err("No free ADC channels");
- return channel_index;
- }
-
- base_addr = SN95031_ADC_CHNL_START_ADDR + channel_index;
-
- if (!(channel_index == 0 || channel_index == SN95031_ADC_LOOP_MAX)) {
- /* Reset stop bit for channels other than 0 and 12 */
- value = snd_soc_read(sn95031_codec, base_addr);
- /* Set the stop bit to zero */
- snd_soc_write(sn95031_codec, base_addr, value & 0xEF);
- /* Index of the first free channel */
- base_addr++;
- channel_index++;
- }
-
- /* Since this is the last channel, set the stop bit
- to 1 by ORing the DIE_SENSOR_CODE with 0x10 */
- snd_soc_write(sn95031_codec, base_addr,
- SN95031_AUDIO_DETECT_CODE | 0x10);
-
- chnl_addr = SN95031_ADC_DATA_START_ADDR + 2 * channel_index;
- pr_debug("mid_initialize : %x", chnl_addr);
- configure_adc(sn95031_codec, 1);
- return chnl_addr;
-}
-
-
-/* reads the ADC registers and gets the mic bias value in mV. */
-static unsigned int sn95031_get_mic_bias(struct snd_soc_codec *codec)
-{
- u16 adc_adr = sn95031_initialize_adc(codec);
- u16 adc_val1, adc_val2;
- unsigned int mic_bias;
-
- sn95031_enable_mic_bias(codec);
-
- /* Enable the sound card for conversion before reading */
- snd_soc_write(codec, SN95031_ADC1CNTL3, 0x05);
- /* Re-toggle the RRDATARD bit */
- snd_soc_write(codec, SN95031_ADC1CNTL3, 0x04);
-
- /* Read the higher bits of data */
- msleep(1000);
- adc_val1 = snd_soc_read(codec, adc_adr);
- adc_adr++;
- adc_val2 = snd_soc_read(codec, adc_adr);
-
- /* Adding lower two bits to the higher bits */
- mic_bias = (adc_val1 << 2) + (adc_val2 & 3);
- mic_bias = (mic_bias * SN95031_ADC_ONE_LSB_MULTIPLIER) / 1000;
- pr_debug("mic bias = %dmV\n", mic_bias);
- return mic_bias;
-}
-/*end - adc helper functions */
-
-static int sn95031_read(void *ctx, unsigned int reg, unsigned int *val)
-{
- u8 value = 0;
- int ret;
-
- ret = intel_scu_ipc_ioread8(reg, &value);
- if (ret == 0)
- *val = value;
-
- return ret;
-}
-
-static int sn95031_write(void *ctx, unsigned int reg, unsigned int value)
-{
- return intel_scu_ipc_iowrite8(reg, value);
-}
-
-static const struct regmap_config sn95031_regmap = {
- .reg_read = sn95031_read,
- .reg_write = sn95031_write,
-};
-
-static int sn95031_set_vaud_bias(struct snd_soc_codec *codec,
- enum snd_soc_bias_level level)
-{
- switch (level) {
- case SND_SOC_BIAS_ON:
- break;
-
- case SND_SOC_BIAS_PREPARE:
- if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_STANDBY) {
- pr_debug("vaud_bias powering up pll\n");
- /* power up the pll */
- snd_soc_write(codec, SN95031_AUDPLLCTRL, BIT(5));
- /* enable pcm 2 */
- snd_soc_update_bits(codec, SN95031_PCM2C2,
- BIT(0), BIT(0));
- }
- break;
-
- case SND_SOC_BIAS_STANDBY:
- switch (snd_soc_codec_get_bias_level(codec)) {
- case SND_SOC_BIAS_OFF:
- pr_debug("vaud_bias power up rail\n");
- /* power up the rail */
- snd_soc_write(codec, SN95031_VAUD,
- BIT(2)|BIT(1)|BIT(0));
- msleep(1);
- break;
- case SND_SOC_BIAS_PREPARE:
- /* turn off pcm */
- pr_debug("vaud_bias power dn pcm\n");
- snd_soc_update_bits(codec, SN95031_PCM2C2, BIT(0), 0);
- snd_soc_write(codec, SN95031_AUDPLLCTRL, 0);
- break;
- default:
- break;
- }
- break;
-
-
- case SND_SOC_BIAS_OFF:
- pr_debug("vaud_bias _OFF doing rail shutdown\n");
- snd_soc_write(codec, SN95031_VAUD, BIT(3));
- break;
- }
-
- return 0;
-}
-
-static int sn95031_vhs_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-
- if (SND_SOC_DAPM_EVENT_ON(event)) {
- pr_debug("VHS SND_SOC_DAPM_EVENT_ON doing rail startup now\n");
- /* power up the rail */
- snd_soc_write(codec, SN95031_VHSP, 0x3D);
- snd_soc_write(codec, SN95031_VHSN, 0x3F);
- msleep(1);
- } else if (SND_SOC_DAPM_EVENT_OFF(event)) {
- pr_debug("VHS SND_SOC_DAPM_EVENT_OFF doing rail shutdown\n");
- snd_soc_write(codec, SN95031_VHSP, 0xC4);
- snd_soc_write(codec, SN95031_VHSN, 0x04);
- }
- return 0;
-}
-
-static int sn95031_vihf_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-
- if (SND_SOC_DAPM_EVENT_ON(event)) {
- pr_debug("VIHF SND_SOC_DAPM_EVENT_ON doing rail startup now\n");
- /* power up the rail */
- snd_soc_write(codec, SN95031_VIHF, 0x27);
- msleep(1);
- } else if (SND_SOC_DAPM_EVENT_OFF(event)) {
- pr_debug("VIHF SND_SOC_DAPM_EVENT_OFF doing rail shutdown\n");
- snd_soc_write(codec, SN95031_VIHF, 0x24);
- }
- return 0;
-}
-
-static int sn95031_dmic12_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *k, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- unsigned int ldo = 0, clk_dir = 0, data_dir = 0;
-
- if (SND_SOC_DAPM_EVENT_ON(event)) {
- ldo = BIT(5)|BIT(4);
- clk_dir = BIT(0);
- data_dir = BIT(7);
- }
- /* program DMIC LDO, clock and set clock */
- snd_soc_update_bits(codec, SN95031_MICBIAS, BIT(5)|BIT(4), ldo);
- snd_soc_update_bits(codec, SN95031_DMICBUF0123, BIT(0), clk_dir);
- snd_soc_update_bits(codec, SN95031_DMICBUF0123, BIT(7), data_dir);
- return 0;
-}
-
-static int sn95031_dmic34_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *k, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- unsigned int ldo = 0, clk_dir = 0, data_dir = 0;
-
- if (SND_SOC_DAPM_EVENT_ON(event)) {
- ldo = BIT(5)|BIT(4);
- clk_dir = BIT(2);
- data_dir = BIT(1);
- }
- /* program DMIC LDO, clock and set clock */
- snd_soc_update_bits(codec, SN95031_MICBIAS, BIT(5)|BIT(4), ldo);
- snd_soc_update_bits(codec, SN95031_DMICBUF0123, BIT(2), clk_dir);
- snd_soc_update_bits(codec, SN95031_DMICBUF45, BIT(1), data_dir);
- return 0;
-}
-
-static int sn95031_dmic56_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *k, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- unsigned int ldo = 0;
-
- if (SND_SOC_DAPM_EVENT_ON(event))
- ldo = BIT(7)|BIT(6);
-
- /* program DMIC LDO */
- snd_soc_update_bits(codec, SN95031_MICBIAS, BIT(7)|BIT(6), ldo);
- return 0;
-}
-
-/* mux controls */
-static const char *sn95031_mic_texts[] = { "AMIC", "LineIn" };
-
-static SOC_ENUM_SINGLE_DECL(sn95031_micl_enum,
- SN95031_ADCCONFIG, 1, sn95031_mic_texts);
-
-static const struct snd_kcontrol_new sn95031_micl_mux_control =
- SOC_DAPM_ENUM("Route", sn95031_micl_enum);
-
-static SOC_ENUM_SINGLE_DECL(sn95031_micr_enum,
- SN95031_ADCCONFIG, 3, sn95031_mic_texts);
-
-static const struct snd_kcontrol_new sn95031_micr_mux_control =
- SOC_DAPM_ENUM("Route", sn95031_micr_enum);
-
-static const char *sn95031_input_texts[] = { "DMIC1", "DMIC2", "DMIC3",
- "DMIC4", "DMIC5", "DMIC6",
- "ADC Left", "ADC Right" };
-
-static SOC_ENUM_SINGLE_DECL(sn95031_input1_enum,
- SN95031_AUDIOMUX12, 0, sn95031_input_texts);
-
-static const struct snd_kcontrol_new sn95031_input1_mux_control =
- SOC_DAPM_ENUM("Route", sn95031_input1_enum);
-
-static SOC_ENUM_SINGLE_DECL(sn95031_input2_enum,
- SN95031_AUDIOMUX12, 4, sn95031_input_texts);
-
-static const struct snd_kcontrol_new sn95031_input2_mux_control =
- SOC_DAPM_ENUM("Route", sn95031_input2_enum);
-
-static SOC_ENUM_SINGLE_DECL(sn95031_input3_enum,
- SN95031_AUDIOMUX34, 0, sn95031_input_texts);
-
-static const struct snd_kcontrol_new sn95031_input3_mux_control =
- SOC_DAPM_ENUM("Route", sn95031_input3_enum);
-
-static SOC_ENUM_SINGLE_DECL(sn95031_input4_enum,
- SN95031_AUDIOMUX34, 4, sn95031_input_texts);
-
-static const struct snd_kcontrol_new sn95031_input4_mux_control =
- SOC_DAPM_ENUM("Route", sn95031_input4_enum);
-
-/* capture path controls */
-
-static const char *sn95031_micmode_text[] = {"Single Ended", "Differential"};
-
-/* 0dB to 30dB in 10dB steps */
-static const DECLARE_TLV_DB_SCALE(mic_tlv, 0, 10, 0);
-
-static SOC_ENUM_SINGLE_DECL(sn95031_micmode1_enum,
- SN95031_MICAMP1, 1, sn95031_micmode_text);
-static SOC_ENUM_SINGLE_DECL(sn95031_micmode2_enum,
- SN95031_MICAMP2, 1, sn95031_micmode_text);
-
-static const char *sn95031_dmic_cfg_text[] = {"GPO", "DMIC"};
-
-static SOC_ENUM_SINGLE_DECL(sn95031_dmic12_cfg_enum,
- SN95031_DMICMUX, 0, sn95031_dmic_cfg_text);
-static SOC_ENUM_SINGLE_DECL(sn95031_dmic34_cfg_enum,
- SN95031_DMICMUX, 1, sn95031_dmic_cfg_text);
-static SOC_ENUM_SINGLE_DECL(sn95031_dmic56_cfg_enum,
- SN95031_DMICMUX, 2, sn95031_dmic_cfg_text);
-
-static const struct snd_kcontrol_new sn95031_snd_controls[] = {
- SOC_ENUM("Mic1Mode Capture Route", sn95031_micmode1_enum),
- SOC_ENUM("Mic2Mode Capture Route", sn95031_micmode2_enum),
- SOC_ENUM("DMIC12 Capture Route", sn95031_dmic12_cfg_enum),
- SOC_ENUM("DMIC34 Capture Route", sn95031_dmic34_cfg_enum),
- SOC_ENUM("DMIC56 Capture Route", sn95031_dmic56_cfg_enum),
- SOC_SINGLE_TLV("Mic1 Capture Volume", SN95031_MICAMP1,
- 2, 4, 0, mic_tlv),
- SOC_SINGLE_TLV("Mic2 Capture Volume", SN95031_MICAMP2,
- 2, 4, 0, mic_tlv),
-};
-
-/* DAPM widgets */
-static const struct snd_soc_dapm_widget sn95031_dapm_widgets[] = {
-
- /* all end points mic, hs etc */
- SND_SOC_DAPM_OUTPUT("HPOUTL"),
- SND_SOC_DAPM_OUTPUT("HPOUTR"),
- SND_SOC_DAPM_OUTPUT("EPOUT"),
- SND_SOC_DAPM_OUTPUT("IHFOUTL"),
- SND_SOC_DAPM_OUTPUT("IHFOUTR"),
- SND_SOC_DAPM_OUTPUT("LINEOUTL"),
- SND_SOC_DAPM_OUTPUT("LINEOUTR"),
- SND_SOC_DAPM_OUTPUT("VIB1OUT"),
- SND_SOC_DAPM_OUTPUT("VIB2OUT"),
-
- SND_SOC_DAPM_INPUT("AMIC1"), /* headset mic */
- SND_SOC_DAPM_INPUT("AMIC2"),
- SND_SOC_DAPM_INPUT("DMIC1"),
- SND_SOC_DAPM_INPUT("DMIC2"),
- SND_SOC_DAPM_INPUT("DMIC3"),
- SND_SOC_DAPM_INPUT("DMIC4"),
- SND_SOC_DAPM_INPUT("DMIC5"),
- SND_SOC_DAPM_INPUT("DMIC6"),
- SND_SOC_DAPM_INPUT("LINEINL"),
- SND_SOC_DAPM_INPUT("LINEINR"),
-
- SND_SOC_DAPM_MICBIAS("AMIC1Bias", SN95031_MICBIAS, 2, 0),
- SND_SOC_DAPM_MICBIAS("AMIC2Bias", SN95031_MICBIAS, 3, 0),
- SND_SOC_DAPM_MICBIAS("DMIC12Bias", SN95031_DMICMUX, 3, 0),
- SND_SOC_DAPM_MICBIAS("DMIC34Bias", SN95031_DMICMUX, 4, 0),
- SND_SOC_DAPM_MICBIAS("DMIC56Bias", SN95031_DMICMUX, 5, 0),
-
- SND_SOC_DAPM_SUPPLY("DMIC12supply", SN95031_DMICLK, 0, 0,
- sn95031_dmic12_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_SUPPLY("DMIC34supply", SN95031_DMICLK, 1, 0,
- sn95031_dmic34_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_SUPPLY("DMIC56supply", SN95031_DMICLK, 2, 0,
- sn95031_dmic56_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-
- SND_SOC_DAPM_AIF_OUT("PCM_Out", "Capture", 0,
- SND_SOC_NOPM, 0, 0),
-
- SND_SOC_DAPM_SUPPLY("Headset Rail", SND_SOC_NOPM, 0, 0,
- sn95031_vhs_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_SUPPLY("Speaker Rail", SND_SOC_NOPM, 0, 0,
- sn95031_vihf_event,
- SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
-
- /* playback path driver enables */
- SND_SOC_DAPM_PGA("Headset Left Playback",
- SN95031_DRIVEREN, 0, 0, NULL, 0),
- SND_SOC_DAPM_PGA("Headset Right Playback",
- SN95031_DRIVEREN, 1, 0, NULL, 0),
- SND_SOC_DAPM_PGA("Speaker Left Playback",
- SN95031_DRIVEREN, 2, 0, NULL, 0),
- SND_SOC_DAPM_PGA("Speaker Right Playback",
- SN95031_DRIVEREN, 3, 0, NULL, 0),
- SND_SOC_DAPM_PGA("Vibra1 Playback",
- SN95031_DRIVEREN, 4, 0, NULL, 0),
- SND_SOC_DAPM_PGA("Vibra2 Playback",
- SN95031_DRIVEREN, 5, 0, NULL, 0),
- SND_SOC_DAPM_PGA("Earpiece Playback",
- SN95031_DRIVEREN, 6, 0, NULL, 0),
- SND_SOC_DAPM_PGA("Lineout Left Playback",
- SN95031_LOCTL, 0, 0, NULL, 0),
- SND_SOC_DAPM_PGA("Lineout Right Playback",
- SN95031_LOCTL, 4, 0, NULL, 0),
-
- /* playback path filter enable */
- SND_SOC_DAPM_PGA("Headset Left Filter",
- SN95031_HSEPRXCTRL, 4, 0, NULL, 0),
- SND_SOC_DAPM_PGA("Headset Right Filter",
- SN95031_HSEPRXCTRL, 5, 0, NULL, 0),
- SND_SOC_DAPM_PGA("Speaker Left Filter",
- SN95031_IHFRXCTRL, 0, 0, NULL, 0),
- SND_SOC_DAPM_PGA("Speaker Right Filter",
- SN95031_IHFRXCTRL, 1, 0, NULL, 0),
-
- /* DACs */
- SND_SOC_DAPM_DAC("HSDAC Left", "Headset",
- SN95031_DACCONFIG, 0, 0),
- SND_SOC_DAPM_DAC("HSDAC Right", "Headset",
- SN95031_DACCONFIG, 1, 0),
- SND_SOC_DAPM_DAC("IHFDAC Left", "Speaker",
- SN95031_DACCONFIG, 2, 0),
- SND_SOC_DAPM_DAC("IHFDAC Right", "Speaker",
- SN95031_DACCONFIG, 3, 0),
- SND_SOC_DAPM_DAC("Vibra1 DAC", "Vibra1",
- SN95031_VIB1C5, 1, 0),
- SND_SOC_DAPM_DAC("Vibra2 DAC", "Vibra2",
- SN95031_VIB2C5, 1, 0),
-
- /* capture widgets */
- SND_SOC_DAPM_PGA("LineIn Enable Left", SN95031_MICAMP1,
- 7, 0, NULL, 0),
- SND_SOC_DAPM_PGA("LineIn Enable Right", SN95031_MICAMP2,
- 7, 0, NULL, 0),
-
- SND_SOC_DAPM_PGA("MIC1 Enable", SN95031_MICAMP1, 0, 0, NULL, 0),
- SND_SOC_DAPM_PGA("MIC2 Enable", SN95031_MICAMP2, 0, 0, NULL, 0),
- SND_SOC_DAPM_PGA("TX1 Enable", SN95031_AUDIOTXEN, 2, 0, NULL, 0),
- SND_SOC_DAPM_PGA("TX2 Enable", SN95031_AUDIOTXEN, 3, 0, NULL, 0),
- SND_SOC_DAPM_PGA("TX3 Enable", SN95031_AUDIOTXEN, 4, 0, NULL, 0),
- SND_SOC_DAPM_PGA("TX4 Enable", SN95031_AUDIOTXEN, 5, 0, NULL, 0),
-
- /* ADC have null stream as they will be turned ON by TX path */
- SND_SOC_DAPM_ADC("ADC Left", NULL,
- SN95031_ADCCONFIG, 0, 0),
- SND_SOC_DAPM_ADC("ADC Right", NULL,
- SN95031_ADCCONFIG, 2, 0),
-
- SND_SOC_DAPM_MUX("Mic_InputL Capture Route",
- SND_SOC_NOPM, 0, 0, &sn95031_micl_mux_control),
- SND_SOC_DAPM_MUX("Mic_InputR Capture Route",
- SND_SOC_NOPM, 0, 0, &sn95031_micr_mux_control),
-
- SND_SOC_DAPM_MUX("Txpath1 Capture Route",
- SND_SOC_NOPM, 0, 0, &sn95031_input1_mux_control),
- SND_SOC_DAPM_MUX("Txpath2 Capture Route",
- SND_SOC_NOPM, 0, 0, &sn95031_input2_mux_control),
- SND_SOC_DAPM_MUX("Txpath3 Capture Route",
- SND_SOC_NOPM, 0, 0, &sn95031_input3_mux_control),
- SND_SOC_DAPM_MUX("Txpath4 Capture Route",
- SND_SOC_NOPM, 0, 0, &sn95031_input4_mux_control),
-
-};
-
-static const struct snd_soc_dapm_route sn95031_audio_map[] = {
- /* headset and earpiece map */
- { "HPOUTL", NULL, "Headset Rail"},
- { "HPOUTR", NULL, "Headset Rail"},
- { "HPOUTL", NULL, "Headset Left Playback" },
- { "HPOUTR", NULL, "Headset Right Playback" },
- { "EPOUT", NULL, "Earpiece Playback" },
- { "Headset Left Playback", NULL, "Headset Left Filter"},
- { "Headset Right Playback", NULL, "Headset Right Filter"},
- { "Earpiece Playback", NULL, "Headset Left Filter"},
- { "Headset Left Filter", NULL, "HSDAC Left"},
- { "Headset Right Filter", NULL, "HSDAC Right"},
-
- /* speaker map */
- { "IHFOUTL", NULL, "Speaker Rail"},
- { "IHFOUTR", NULL, "Speaker Rail"},
- { "IHFOUTL", NULL, "Speaker Left Playback"},
- { "IHFOUTR", NULL, "Speaker Right Playback"},
- { "Speaker Left Playback", NULL, "Speaker Left Filter"},
- { "Speaker Right Playback", NULL, "Speaker Right Filter"},
- { "Speaker Left Filter", NULL, "IHFDAC Left"},
- { "Speaker Right Filter", NULL, "IHFDAC Right"},
-
- /* vibra map */
- { "VIB1OUT", NULL, "Vibra1 Playback"},
- { "Vibra1 Playback", NULL, "Vibra1 DAC"},
-
- { "VIB2OUT", NULL, "Vibra2 Playback"},
- { "Vibra2 Playback", NULL, "Vibra2 DAC"},
-
- /* lineout */
- { "LINEOUTL", NULL, "Lineout Left Playback"},
- { "LINEOUTR", NULL, "Lineout Right Playback"},
- { "Lineout Left Playback", NULL, "Headset Left Filter"},
- { "Lineout Left Playback", NULL, "Speaker Left Filter"},
- { "Lineout Left Playback", NULL, "Vibra1 DAC"},
- { "Lineout Right Playback", NULL, "Headset Right Filter"},
- { "Lineout Right Playback", NULL, "Speaker Right Filter"},
- { "Lineout Right Playback", NULL, "Vibra2 DAC"},
-
- /* Headset (AMIC1) mic */
- { "AMIC1Bias", NULL, "AMIC1"},
- { "MIC1 Enable", NULL, "AMIC1Bias"},
- { "Mic_InputL Capture Route", "AMIC", "MIC1 Enable"},
-
- /* AMIC2 */
- { "AMIC2Bias", NULL, "AMIC2"},
- { "MIC2 Enable", NULL, "AMIC2Bias"},
- { "Mic_InputR Capture Route", "AMIC", "MIC2 Enable"},
-
-
- /* Linein */
- { "LineIn Enable Left", NULL, "LINEINL"},
- { "LineIn Enable Right", NULL, "LINEINR"},
- { "Mic_InputL Capture Route", "LineIn", "LineIn Enable Left"},
- { "Mic_InputR Capture Route", "LineIn", "LineIn Enable Right"},
-
- /* ADC connection */
- { "ADC Left", NULL, "Mic_InputL Capture Route"},
- { "ADC Right", NULL, "Mic_InputR Capture Route"},
-
- /*DMIC connections */
- { "DMIC1", NULL, "DMIC12supply"},
- { "DMIC2", NULL, "DMIC12supply"},
- { "DMIC3", NULL, "DMIC34supply"},
- { "DMIC4", NULL, "DMIC34supply"},
- { "DMIC5", NULL, "DMIC56supply"},
- { "DMIC6", NULL, "DMIC56supply"},
-
- { "DMIC12Bias", NULL, "DMIC1"},
- { "DMIC12Bias", NULL, "DMIC2"},
- { "DMIC34Bias", NULL, "DMIC3"},
- { "DMIC34Bias", NULL, "DMIC4"},
- { "DMIC56Bias", NULL, "DMIC5"},
- { "DMIC56Bias", NULL, "DMIC6"},
-
- /*TX path inputs*/
- { "Txpath1 Capture Route", "ADC Left", "ADC Left"},
- { "Txpath2 Capture Route", "ADC Left", "ADC Left"},
- { "Txpath3 Capture Route", "ADC Left", "ADC Left"},
- { "Txpath4 Capture Route", "ADC Left", "ADC Left"},
- { "Txpath1 Capture Route", "ADC Right", "ADC Right"},
- { "Txpath2 Capture Route", "ADC Right", "ADC Right"},
- { "Txpath3 Capture Route", "ADC Right", "ADC Right"},
- { "Txpath4 Capture Route", "ADC Right", "ADC Right"},
- { "Txpath1 Capture Route", "DMIC1", "DMIC1"},
- { "Txpath2 Capture Route", "DMIC1", "DMIC1"},
- { "Txpath3 Capture Route", "DMIC1", "DMIC1"},
- { "Txpath4 Capture Route", "DMIC1", "DMIC1"},
- { "Txpath1 Capture Route", "DMIC2", "DMIC2"},
- { "Txpath2 Capture Route", "DMIC2", "DMIC2"},
- { "Txpath3 Capture Route", "DMIC2", "DMIC2"},
- { "Txpath4 Capture Route", "DMIC2", "DMIC2"},
- { "Txpath1 Capture Route", "DMIC3", "DMIC3"},
- { "Txpath2 Capture Route", "DMIC3", "DMIC3"},
- { "Txpath3 Capture Route", "DMIC3", "DMIC3"},
- { "Txpath4 Capture Route", "DMIC3", "DMIC3"},
- { "Txpath1 Capture Route", "DMIC4", "DMIC4"},
- { "Txpath2 Capture Route", "DMIC4", "DMIC4"},
- { "Txpath3 Capture Route", "DMIC4", "DMIC4"},
- { "Txpath4 Capture Route", "DMIC4", "DMIC4"},
- { "Txpath1 Capture Route", "DMIC5", "DMIC5"},
- { "Txpath2 Capture Route", "DMIC5", "DMIC5"},
- { "Txpath3 Capture Route", "DMIC5", "DMIC5"},
- { "Txpath4 Capture Route", "DMIC5", "DMIC5"},
- { "Txpath1 Capture Route", "DMIC6", "DMIC6"},
- { "Txpath2 Capture Route", "DMIC6", "DMIC6"},
- { "Txpath3 Capture Route", "DMIC6", "DMIC6"},
- { "Txpath4 Capture Route", "DMIC6", "DMIC6"},
-
- /* tx path */
- { "TX1 Enable", NULL, "Txpath1 Capture Route"},
- { "TX2 Enable", NULL, "Txpath2 Capture Route"},
- { "TX3 Enable", NULL, "Txpath3 Capture Route"},
- { "TX4 Enable", NULL, "Txpath4 Capture Route"},
- { "PCM_Out", NULL, "TX1 Enable"},
- { "PCM_Out", NULL, "TX2 Enable"},
- { "PCM_Out", NULL, "TX3 Enable"},
- { "PCM_Out", NULL, "TX4 Enable"},
-
-};
-
-/* speaker and headset mutes, for audio pops and clicks */
-static int sn95031_pcm_hs_mute(struct snd_soc_dai *dai, int mute)
-{
- snd_soc_update_bits(dai->codec,
- SN95031_HSLVOLCTRL, BIT(7), (!mute << 7));
- snd_soc_update_bits(dai->codec,
- SN95031_HSRVOLCTRL, BIT(7), (!mute << 7));
- return 0;
-}
-
-static int sn95031_pcm_spkr_mute(struct snd_soc_dai *dai, int mute)
-{
- snd_soc_update_bits(dai->codec,
- SN95031_IHFLVOLCTRL, BIT(7), (!mute << 7));
- snd_soc_update_bits(dai->codec,
- SN95031_IHFRVOLCTRL, BIT(7), (!mute << 7));
- return 0;
-}
-
-static int sn95031_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
-{
- unsigned int format, rate;
-
- switch (params_width(params)) {
- case 16:
- format = BIT(4)|BIT(5);
- break;
-
- case 24:
- format = 0;
- break;
- default:
- return -EINVAL;
- }
- snd_soc_update_bits(dai->codec, SN95031_PCM2C2,
- BIT(4)|BIT(5), format);
-
- switch (params_rate(params)) {
- case 48000:
- pr_debug("RATE_48000\n");
- rate = 0;
- break;
-
- case 44100:
- pr_debug("RATE_44100\n");
- rate = BIT(7);
- break;
-
- default:
- pr_err("ERR rate %d\n", params_rate(params));
- return -EINVAL;
- }
- snd_soc_update_bits(dai->codec, SN95031_PCM1C1, BIT(7), rate);
-
- return 0;
-}
-
-/* Codec DAI section */
-static const struct snd_soc_dai_ops sn95031_headset_dai_ops = {
- .digital_mute = sn95031_pcm_hs_mute,
- .hw_params = sn95031_pcm_hw_params,
-};
-
-static const struct snd_soc_dai_ops sn95031_speaker_dai_ops = {
- .digital_mute = sn95031_pcm_spkr_mute,
- .hw_params = sn95031_pcm_hw_params,
-};
-
-static const struct snd_soc_dai_ops sn95031_vib1_dai_ops = {
- .hw_params = sn95031_pcm_hw_params,
-};
-
-static const struct snd_soc_dai_ops sn95031_vib2_dai_ops = {
- .hw_params = sn95031_pcm_hw_params,
-};
-
-static struct snd_soc_dai_driver sn95031_dais[] = {
-{
- .name = "SN95031 Headset",
- .playback = {
- .stream_name = "Headset",
- .channels_min = 2,
- .channels_max = 2,
- .rates = SN95031_RATES,
- .formats = SN95031_FORMATS,
- },
- .capture = {
- .stream_name = "Capture",
- .channels_min = 1,
- .channels_max = 5,
- .rates = SN95031_RATES,
- .formats = SN95031_FORMATS,
- },
- .ops = &sn95031_headset_dai_ops,
-},
-{ .name = "SN95031 Speaker",
- .playback = {
- .stream_name = "Speaker",
- .channels_min = 2,
- .channels_max = 2,
- .rates = SN95031_RATES,
- .formats = SN95031_FORMATS,
- },
- .ops = &sn95031_speaker_dai_ops,
-},
-{ .name = "SN95031 Vibra1",
- .playback = {
- .stream_name = "Vibra1",
- .channels_min = 1,
- .channels_max = 1,
- .rates = SN95031_RATES,
- .formats = SN95031_FORMATS,
- },
- .ops = &sn95031_vib1_dai_ops,
-},
-{ .name = "SN95031 Vibra2",
- .playback = {
- .stream_name = "Vibra2",
- .channels_min = 1,
- .channels_max = 1,
- .rates = SN95031_RATES,
- .formats = SN95031_FORMATS,
- },
- .ops = &sn95031_vib2_dai_ops,
-},
-};
-
-static inline void sn95031_disable_jack_btn(struct snd_soc_codec *codec)
-{
- snd_soc_write(codec, SN95031_BTNCTRL2, 0x00);
-}
-
-static inline void sn95031_enable_jack_btn(struct snd_soc_codec *codec)
-{
- snd_soc_write(codec, SN95031_BTNCTRL1, 0x77);
- snd_soc_write(codec, SN95031_BTNCTRL2, 0x01);
-}
-
-static int sn95031_get_headset_state(struct snd_soc_codec *codec,
- struct snd_soc_jack *mfld_jack)
-{
- int micbias = sn95031_get_mic_bias(codec);
-
- int jack_type = snd_soc_jack_get_type(mfld_jack, micbias);
-
- pr_debug("jack type detected = %d\n", jack_type);
- if (jack_type == SND_JACK_HEADSET)
- sn95031_enable_jack_btn(codec);
- return jack_type;
-}
-
-void sn95031_jack_detection(struct snd_soc_codec *codec,
- struct mfld_jack_data *jack_data)
-{
- unsigned int status;
- unsigned int mask = SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_HEADSET;
-
- pr_debug("interrupt id read in sram = 0x%x\n", jack_data->intr_id);
- if (jack_data->intr_id & 0x1) {
- pr_debug("short_push detected\n");
- status = SND_JACK_HEADSET | SND_JACK_BTN_0;
- } else if (jack_data->intr_id & 0x2) {
- pr_debug("long_push detected\n");
- status = SND_JACK_HEADSET | SND_JACK_BTN_1;
- } else if (jack_data->intr_id & 0x4) {
- pr_debug("headset or headphones inserted\n");
- status = sn95031_get_headset_state(codec, jack_data->mfld_jack);
- } else if (jack_data->intr_id & 0x8) {
- pr_debug("headset or headphones removed\n");
- status = 0;
- sn95031_disable_jack_btn(codec);
- } else {
- pr_err("unidentified interrupt\n");
- return;
- }
-
- snd_soc_jack_report(jack_data->mfld_jack, status, mask);
- /*button pressed and released so we send explicit button release */
- if ((status & SND_JACK_BTN_0) | (status & SND_JACK_BTN_1))
- snd_soc_jack_report(jack_data->mfld_jack,
- SND_JACK_HEADSET, mask);
-}
-EXPORT_SYMBOL_GPL(sn95031_jack_detection);
-
-/* codec registration */
-static int sn95031_codec_probe(struct snd_soc_codec *codec)
-{
- pr_debug("codec_probe called\n");
-
- /* PCM interface config
- * This sets the pcm rx slot conguration to max 6 slots
- * for max 4 dais (2 stereo and 2 mono)
- */
- snd_soc_write(codec, SN95031_PCM2RXSLOT01, 0x10);
- snd_soc_write(codec, SN95031_PCM2RXSLOT23, 0x32);
- snd_soc_write(codec, SN95031_PCM2RXSLOT45, 0x54);
- snd_soc_write(codec, SN95031_PCM2TXSLOT01, 0x10);
- snd_soc_write(codec, SN95031_PCM2TXSLOT23, 0x32);
- /* pcm port setting
- * This sets the pcm port to slave and clock at 19.2Mhz which
- * can support 6slots, sampling rate set per stream in hw-params
- */
- snd_soc_write(codec, SN95031_PCM1C1, 0x00);
- snd_soc_write(codec, SN95031_PCM2C1, 0x01);
- snd_soc_write(codec, SN95031_PCM2C2, 0x0A);
- snd_soc_write(codec, SN95031_HSMIXER, BIT(0)|BIT(4));
- /* vendor vibra workround, the vibras are muted by
- * custom register so unmute them
- */
- snd_soc_write(codec, SN95031_SSR5, 0x80);
- snd_soc_write(codec, SN95031_SSR6, 0x80);
- snd_soc_write(codec, SN95031_VIB1C5, 0x00);
- snd_soc_write(codec, SN95031_VIB2C5, 0x00);
- /* configure vibras for pcm port */
- snd_soc_write(codec, SN95031_VIB1C3, 0x00);
- snd_soc_write(codec, SN95031_VIB2C3, 0x00);
-
- /* soft mute ramp time */
- snd_soc_write(codec, SN95031_SOFTMUTE, 0x3);
- /* fix the initial volume at 1dB,
- * default in +9dB,
- * 1dB give optimal swing on DAC, amps
- */
- snd_soc_write(codec, SN95031_HSLVOLCTRL, 0x08);
- snd_soc_write(codec, SN95031_HSRVOLCTRL, 0x08);
- snd_soc_write(codec, SN95031_IHFLVOLCTRL, 0x08);
- snd_soc_write(codec, SN95031_IHFRVOLCTRL, 0x08);
- /* dac mode and lineout workaround */
- snd_soc_write(codec, SN95031_SSR2, 0x10);
- snd_soc_write(codec, SN95031_SSR3, 0x40);
-
- return 0;
-}
-
-static const struct snd_soc_codec_driver sn95031_codec = {
- .probe = sn95031_codec_probe,
- .set_bias_level = sn95031_set_vaud_bias,
- .idle_bias_off = true,
-
- .component_driver = {
- .controls = sn95031_snd_controls,
- .num_controls = ARRAY_SIZE(sn95031_snd_controls),
- .dapm_widgets = sn95031_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(sn95031_dapm_widgets),
- .dapm_routes = sn95031_audio_map,
- .num_dapm_routes = ARRAY_SIZE(sn95031_audio_map),
- },
-};
-
-static int sn95031_device_probe(struct platform_device *pdev)
-{
- struct regmap *regmap;
-
- pr_debug("codec device probe called for %s\n", dev_name(&pdev->dev));
-
- regmap = devm_regmap_init(&pdev->dev, NULL, NULL, &sn95031_regmap);
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
-
- return snd_soc_register_codec(&pdev->dev, &sn95031_codec,
- sn95031_dais, ARRAY_SIZE(sn95031_dais));
-}
-
-static int sn95031_device_remove(struct platform_device *pdev)
-{
- pr_debug("codec device remove called\n");
- snd_soc_unregister_codec(&pdev->dev);
- return 0;
-}
-
-static struct platform_driver sn95031_codec_driver = {
- .driver = {
- .name = "sn95031",
- },
- .probe = sn95031_device_probe,
- .remove = sn95031_device_remove,
-};
-
-module_platform_driver(sn95031_codec_driver);
-
-MODULE_DESCRIPTION("ASoC TI SN95031 codec driver");
-MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
-MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:sn95031");
diff --git a/sound/soc/codecs/sn95031.h b/sound/soc/codecs/sn95031.h
deleted file mode 100644
index 7651fe4e6a45..000000000000
--- a/sound/soc/codecs/sn95031.h
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * sn95031.h - TI sn95031 Codec driver
- *
- * Copyright (C) 2010 Intel Corp
- * Author: Vinod Koul <vinod.koul@intel.com>
- * Author: Harsha Priya <priya.harsha@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- *
- */
-#ifndef _SN95031_H
-#define _SN95031_H
-
-/*register map*/
-#define SN95031_VAUD 0xDB
-#define SN95031_VHSP 0xDC
-#define SN95031_VHSN 0xDD
-#define SN95031_VIHF 0xC9
-
-#define SN95031_AUDPLLCTRL 0x240
-#define SN95031_DMICBUF0123 0x241
-#define SN95031_DMICBUF45 0x242
-#define SN95031_DMICGPO 0x244
-#define SN95031_DMICMUX 0x245
-#define SN95031_DMICLK 0x246
-#define SN95031_MICBIAS 0x247
-#define SN95031_ADCCONFIG 0x248
-#define SN95031_MICAMP1 0x249
-#define SN95031_MICAMP2 0x24A
-#define SN95031_NOISEMUX 0x24B
-#define SN95031_AUDIOMUX12 0x24C
-#define SN95031_AUDIOMUX34 0x24D
-#define SN95031_AUDIOSINC 0x24E
-#define SN95031_AUDIOTXEN 0x24F
-#define SN95031_HSEPRXCTRL 0x250
-#define SN95031_IHFRXCTRL 0x251
-#define SN95031_HSMIXER 0x256
-#define SN95031_DACCONFIG 0x257
-#define SN95031_SOFTMUTE 0x258
-#define SN95031_HSLVOLCTRL 0x259
-#define SN95031_HSRVOLCTRL 0x25A
-#define SN95031_IHFLVOLCTRL 0x25B
-#define SN95031_IHFRVOLCTRL 0x25C
-#define SN95031_DRIVEREN 0x25D
-#define SN95031_LOCTL 0x25E
-#define SN95031_VIB1C1 0x25F
-#define SN95031_VIB1C2 0x260
-#define SN95031_VIB1C3 0x261
-#define SN95031_VIB1SPIPCM1 0x262
-#define SN95031_VIB1SPIPCM2 0x263
-#define SN95031_VIB1C5 0x264
-#define SN95031_VIB2C1 0x265
-#define SN95031_VIB2C2 0x266
-#define SN95031_VIB2C3 0x267
-#define SN95031_VIB2SPIPCM1 0x268
-#define SN95031_VIB2SPIPCM2 0x269
-#define SN95031_VIB2C5 0x26A
-#define SN95031_BTNCTRL1 0x26B
-#define SN95031_BTNCTRL2 0x26C
-#define SN95031_PCM1TXSLOT01 0x26D
-#define SN95031_PCM1TXSLOT23 0x26E
-#define SN95031_PCM1TXSLOT45 0x26F
-#define SN95031_PCM1RXSLOT0_3 0x270
-#define SN95031_PCM1RXSLOT45 0x271
-#define SN95031_PCM2TXSLOT01 0x272
-#define SN95031_PCM2TXSLOT23 0x273
-#define SN95031_PCM2TXSLOT45 0x274
-#define SN95031_PCM2RXSLOT01 0x275
-#define SN95031_PCM2RXSLOT23 0x276
-#define SN95031_PCM2RXSLOT45 0x277
-#define SN95031_PCM1C1 0x278
-#define SN95031_PCM1C2 0x279
-#define SN95031_PCM1C3 0x27A
-#define SN95031_PCM2C1 0x27B
-#define SN95031_PCM2C2 0x27C
-/*end codec register defn*/
-
-/*vendor defn these are not part of avp*/
-#define SN95031_SSR2 0x381
-#define SN95031_SSR3 0x382
-#define SN95031_SSR5 0x384
-#define SN95031_SSR6 0x385
-
-/* ADC registers */
-
-#define SN95031_ADC1CNTL1 0x1C0
-#define SN95031_ADC_ENBL 0x10
-#define SN95031_ADC_START 0x08
-#define SN95031_ADC1CNTL3 0x1C2
-#define SN95031_ADCTHERM_ENBL 0x04
-#define SN95031_ADCRRDATA_ENBL 0x05
-#define SN95031_STOPBIT_MASK 16
-#define SN95031_ADCTHERM_MASK 4
-#define SN95031_ADC_CHANLS_MAX 15 /* Number of ADC channels */
-#define SN95031_ADC_LOOP_MAX (SN95031_ADC_CHANLS_MAX - 1)
-#define SN95031_ADC_NO_LOOP 0x07
-#define SN95031_AUDIO_GPIO_CTRL 0x070
-
-/* ADC channel code values */
-#define SN95031_AUDIO_DETECT_CODE 0x06
-
-/* ADC base addresses */
-#define SN95031_ADC_CHNL_START_ADDR 0x1C5 /* increments by 1 */
-#define SN95031_ADC_DATA_START_ADDR 0x1D4 /* increments by 2 */
-/* multipier to convert to mV */
-#define SN95031_ADC_ONE_LSB_MULTIPLIER 2346
-
-
-struct mfld_jack_data {
- int intr_id;
- int micbias_vol;
- struct snd_soc_jack *mfld_jack;
-};
-
-extern void sn95031_jack_detection(struct snd_soc_codec *codec,
- struct mfld_jack_data *jack_data);
-
-#endif
diff --git a/sound/soc/codecs/spdif_receiver.c b/sound/soc/codecs/spdif_receiver.c
index 7acd05140a81..c8fd6367f6c0 100644
--- a/sound/soc/codecs/spdif_receiver.c
+++ b/sound/soc/codecs/spdif_receiver.c
@@ -34,10 +34,11 @@ static const struct snd_soc_dapm_route dir_routes[] = {
#define STUB_RATES SNDRV_PCM_RATE_8000_192000
#define STUB_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
SNDRV_PCM_FMTBIT_S20_3LE | \
- SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE | \
SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE)
-static const struct snd_soc_codec_driver soc_codec_spdif_dir = {
+static struct snd_soc_codec_driver soc_codec_spdif_dir = {
.component_driver = {
.dapm_widgets = dir_widgets,
.num_dapm_widgets = ARRAY_SIZE(dir_widgets),
diff --git a/sound/soc/codecs/spdif_transmitter.c b/sound/soc/codecs/spdif_transmitter.c
index 063a64ff82d3..037aa1d45559 100644
--- a/sound/soc/codecs/spdif_transmitter.c
+++ b/sound/soc/codecs/spdif_transmitter.c
@@ -27,7 +27,8 @@
#define STUB_RATES SNDRV_PCM_RATE_8000_192000
#define STUB_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
SNDRV_PCM_FMTBIT_S20_3LE | \
- SNDRV_PCM_FMTBIT_S24_LE)
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE)
static const struct snd_soc_dapm_widget dit_widgets[] = {
SND_SOC_DAPM_OUTPUT("spdif-out"),
@@ -37,7 +38,7 @@ static const struct snd_soc_dapm_route dit_routes[] = {
{ "spdif-out", NULL, "Playback" },
};
-static const struct snd_soc_codec_driver soc_codec_spdif_dit = {
+static struct snd_soc_codec_driver soc_codec_spdif_dit = {
.component_driver = {
.dapm_widgets = dit_widgets,
.num_dapm_widgets = ARRAY_SIZE(dit_widgets),
diff --git a/sound/soc/codecs/tas5720.c b/sound/soc/codecs/tas5720.c
index a736a2a6976c..f3006f301fe8 100644
--- a/sound/soc/codecs/tas5720.c
+++ b/sound/soc/codecs/tas5720.c
@@ -36,6 +36,11 @@
/* Define how often to check (and clear) the fault status register (in ms) */
#define TAS5720_FAULT_CHECK_INTERVAL 200
+enum tas572x_type {
+ TAS5720,
+ TAS5722,
+};
+
static const char * const tas5720_supply_names[] = {
"dvdd", /* Digital power supply. Connect to 3.3-V supply. */
"pvdd", /* Class-D amp and analog power supply (connected). */
@@ -47,6 +52,7 @@ struct tas5720_data {
struct snd_soc_codec *codec;
struct regmap *regmap;
struct i2c_client *tas5720_client;
+ enum tas572x_type devtype;
struct regulator_bulk_data supplies[TAS5720_NUM_SUPPLIES];
struct delayed_work fault_check_work;
unsigned int last_fault;
@@ -264,7 +270,7 @@ out:
static int tas5720_codec_probe(struct snd_soc_codec *codec)
{
struct tas5720_data *tas5720 = snd_soc_codec_get_drvdata(codec);
- unsigned int device_id;
+ unsigned int device_id, expected_device_id;
int ret;
tas5720->codec = codec;
@@ -276,6 +282,11 @@ static int tas5720_codec_probe(struct snd_soc_codec *codec)
return ret;
}
+ /*
+ * Take a liberal approach to checking the device ID to allow the
+ * driver to be used even if the device ID does not match, however
+ * issue a warning if there is a mismatch.
+ */
ret = regmap_read(tas5720->regmap, TAS5720_DEVICE_ID_REG, &device_id);
if (ret < 0) {
dev_err(codec->dev, "failed to read device ID register: %d\n",
@@ -283,13 +294,22 @@ static int tas5720_codec_probe(struct snd_soc_codec *codec)
goto probe_fail;
}
- if (device_id != TAS5720_DEVICE_ID) {
- dev_err(codec->dev, "wrong device ID. expected: %u read: %u\n",
- TAS5720_DEVICE_ID, device_id);
- ret = -ENODEV;
- goto probe_fail;
+ switch (tas5720->devtype) {
+ case TAS5720:
+ expected_device_id = TAS5720_DEVICE_ID;
+ break;
+ case TAS5722:
+ expected_device_id = TAS5722_DEVICE_ID;
+ break;
+ default:
+ dev_err(codec->dev, "unexpected private driver data\n");
+ return -EINVAL;
}
+ if (device_id != expected_device_id)
+ dev_warn(codec->dev, "wrong device ID. expected: %u read: %u\n",
+ expected_device_id, device_id);
+
/* Set device to mute */
ret = snd_soc_update_bits(codec, TAS5720_DIGITAL_CTRL2_REG,
TAS5720_MUTE, TAS5720_MUTE);
@@ -446,6 +466,15 @@ static const struct regmap_config tas5720_regmap_config = {
.volatile_reg = tas5720_is_volatile_reg,
};
+static const struct regmap_config tas5722_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = TAS5722_MAX_REG,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_reg = tas5720_is_volatile_reg,
+};
+
/*
* DAC analog gain. There are four discrete values to select from, ranging
* from 19.2 dB to 26.3dB.
@@ -544,6 +573,7 @@ static int tas5720_probe(struct i2c_client *client,
{
struct device *dev = &client->dev;
struct tas5720_data *data;
+ const struct regmap_config *regmap_config;
int ret;
int i;
@@ -552,7 +582,20 @@ static int tas5720_probe(struct i2c_client *client,
return -ENOMEM;
data->tas5720_client = client;
- data->regmap = devm_regmap_init_i2c(client, &tas5720_regmap_config);
+ data->devtype = id->driver_data;
+
+ switch (id->driver_data) {
+ case TAS5720:
+ regmap_config = &tas5720_regmap_config;
+ break;
+ case TAS5722:
+ regmap_config = &tas5722_regmap_config;
+ break;
+ default:
+ dev_err(dev, "unexpected private driver data\n");
+ return -EINVAL;
+ }
+ data->regmap = devm_regmap_init_i2c(client, regmap_config);
if (IS_ERR(data->regmap)) {
ret = PTR_ERR(data->regmap);
dev_err(dev, "failed to allocate register map: %d\n", ret);
@@ -592,7 +635,8 @@ static int tas5720_remove(struct i2c_client *client)
}
static const struct i2c_device_id tas5720_id[] = {
- { "tas5720", 0 },
+ { "tas5720", TAS5720 },
+ { "tas5722", TAS5722 },
{ }
};
MODULE_DEVICE_TABLE(i2c, tas5720_id);
@@ -600,6 +644,7 @@ MODULE_DEVICE_TABLE(i2c, tas5720_id);
#if IS_ENABLED(CONFIG_OF)
static const struct of_device_id tas5720_of_match[] = {
{ .compatible = "ti,tas5720", },
+ { .compatible = "ti,tas5722", },
{ },
};
MODULE_DEVICE_TABLE(of, tas5720_of_match);
diff --git a/sound/soc/codecs/tas5720.h b/sound/soc/codecs/tas5720.h
index 3d077c779b12..1dda3095961d 100644
--- a/sound/soc/codecs/tas5720.h
+++ b/sound/soc/codecs/tas5720.h
@@ -30,8 +30,14 @@
#define TAS5720_DIGITAL_CLIP1_REG 0x11
#define TAS5720_MAX_REG TAS5720_DIGITAL_CLIP1_REG
+/* Additional TAS5722-specific Registers */
+#define TAS5722_DIGITAL_CTRL2_REG 0x13
+#define TAS5722_ANALOG_CTRL2_REG 0x14
+#define TAS5722_MAX_REG TAS5722_ANALOG_CTRL2_REG
+
/* TAS5720_DEVICE_ID_REG */
#define TAS5720_DEVICE_ID 0x01
+#define TAS5722_DEVICE_ID 0x12
/* TAS5720_POWER_CTRL_REG */
#define TAS5720_DIG_CLIP_MASK GENMASK(7, 2)
@@ -51,6 +57,7 @@
#define TAS5720_SAIF_FORMAT_MASK GENMASK(2, 0)
/* TAS5720_DIGITAL_CTRL2_REG */
+#define TAS5722_VOL_RAMP_RATE BIT(6)
#define TAS5720_MUTE BIT(4)
#define TAS5720_TDM_SLOT_SEL_MASK GENMASK(2, 0)
@@ -87,4 +94,28 @@
#define TAS5720_CLIP1_MASK GENMASK(7, 2)
#define TAS5720_CLIP1_SHIFT (0x2)
+/* TAS5722_DIGITAL_CTRL2_REG */
+#define TAS5722_HPF_3_7HZ (0x0 << 5)
+#define TAS5722_HPF_7_4HZ (0x1 << 5)
+#define TAS5722_HPF_14_9HZ (0x2 << 5)
+#define TAS5722_HPF_29_7HZ (0x3 << 5)
+#define TAS5722_HPF_59_4HZ (0x4 << 5)
+#define TAS5722_HPF_118_4HZ (0x5 << 5)
+#define TAS5722_HPF_235_0HZ (0x6 << 5)
+#define TAS5722_HPF_463_2HZ (0x7 << 5)
+#define TAS5722_HPF_MASK GENMASK(7, 5)
+#define TAS5722_AUTO_SLEEP_OFF (0x0 << 3)
+#define TAS5722_AUTO_SLEEP_1024LR (0x1 << 3)
+#define TAS5722_AUTO_SLEEP_65536LR (0x2 << 3)
+#define TAS5722_AUTO_SLEEP_262144LR (0x3 << 3)
+#define TAS5722_AUTO_SLEEP_MASK GENMASK(4, 3)
+#define TAS5722_TDM_SLOT_16B BIT(2)
+#define TAS5722_MCLK_PIN_CFG BIT(1)
+#define TAS5722_VOL_CONTROL_LSB BIT(0)
+
+/* TAS5722_ANALOG_CTRL2_REG */
+#define TAS5722_FAULTZ_PU BIT(3)
+#define TAS5722_VREG_LVL BIT(2)
+#define TAS5722_PWR_TUNE BIT(0)
+
#endif /* __TAS5720_H__ */
diff --git a/sound/soc/codecs/tas6424.c b/sound/soc/codecs/tas6424.c
new file mode 100644
index 000000000000..49b87f6e85bf
--- /dev/null
+++ b/sound/soc/codecs/tas6424.c
@@ -0,0 +1,707 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ALSA SoC Texas Instruments TAS6424 Quad-Channel Audio Amplifier
+ *
+ * Copyright (C) 2016-2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Andreas Dannenberg <dannenberg@ti.com>
+ * Andrew F. Davis <afd@ti.com>
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+
+#include "tas6424.h"
+
+/* Define how often to check (and clear) the fault status register (in ms) */
+#define TAS6424_FAULT_CHECK_INTERVAL 200
+
+static const char * const tas6424_supply_names[] = {
+ "dvdd", /* Digital power supply. Connect to 3.3-V supply. */
+ "vbat", /* Supply used for higher voltage analog circuits. */
+ "pvdd", /* Class-D amp output FETs supply. */
+};
+#define TAS6424_NUM_SUPPLIES ARRAY_SIZE(tas6424_supply_names)
+
+struct tas6424_data {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regulator_bulk_data supplies[TAS6424_NUM_SUPPLIES];
+ struct delayed_work fault_check_work;
+ unsigned int last_fault1;
+ unsigned int last_fault2;
+ unsigned int last_warn;
+};
+
+/*
+ * DAC digital volumes. From -103.5 to 24 dB in 0.5 dB steps. Note that
+ * setting the gain below -100 dB (register value <0x7) is effectively a MUTE
+ * as per device datasheet.
+ */
+static DECLARE_TLV_DB_SCALE(dac_tlv, -10350, 50, 0);
+
+static const struct snd_kcontrol_new tas6424_snd_controls[] = {
+ SOC_SINGLE_TLV("Speaker Driver CH1 Playback Volume",
+ TAS6424_CH1_VOL_CTRL, 0, 0xff, 0, dac_tlv),
+ SOC_SINGLE_TLV("Speaker Driver CH2 Playback Volume",
+ TAS6424_CH2_VOL_CTRL, 0, 0xff, 0, dac_tlv),
+ SOC_SINGLE_TLV("Speaker Driver CH3 Playback Volume",
+ TAS6424_CH3_VOL_CTRL, 0, 0xff, 0, dac_tlv),
+ SOC_SINGLE_TLV("Speaker Driver CH4 Playback Volume",
+ TAS6424_CH4_VOL_CTRL, 0, 0xff, 0, dac_tlv),
+};
+
+static int tas6424_dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct tas6424_data *tas6424 = snd_soc_codec_get_drvdata(codec);
+
+ dev_dbg(codec->dev, "%s() event=0x%0x\n", __func__, event);
+
+ if (event & SND_SOC_DAPM_POST_PMU) {
+ /* Observe codec shutdown-to-active time */
+ msleep(12);
+
+ /* Turn on TAS6424 periodic fault checking/handling */
+ tas6424->last_fault1 = 0;
+ tas6424->last_fault2 = 0;
+ tas6424->last_warn = 0;
+ schedule_delayed_work(&tas6424->fault_check_work,
+ msecs_to_jiffies(TAS6424_FAULT_CHECK_INTERVAL));
+ } else if (event & SND_SOC_DAPM_PRE_PMD) {
+ /* Disable TAS6424 periodic fault checking/handling */
+ cancel_delayed_work_sync(&tas6424->fault_check_work);
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget tas6424_dapm_widgets[] = {
+ SND_SOC_DAPM_AIF_IN("DAC IN", "Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_DAC_E("DAC", NULL, SND_SOC_NOPM, 0, 0, tas6424_dac_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_OUTPUT("OUT")
+};
+
+static const struct snd_soc_dapm_route tas6424_audio_map[] = {
+ { "DAC", NULL, "DAC IN" },
+ { "OUT", NULL, "DAC" },
+};
+
+static int tas6424_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ unsigned int rate = params_rate(params);
+ unsigned int width = params_width(params);
+ u8 sap_ctrl = 0;
+
+ dev_dbg(codec->dev, "%s() rate=%u width=%u\n", __func__, rate, width);
+
+ switch (rate) {
+ case 44100:
+ sap_ctrl |= TAS6424_SAP_RATE_44100;
+ break;
+ case 48000:
+ sap_ctrl |= TAS6424_SAP_RATE_48000;
+ break;
+ case 96000:
+ sap_ctrl |= TAS6424_SAP_RATE_96000;
+ break;
+ default:
+ dev_err(codec->dev, "unsupported sample rate: %u\n", rate);
+ return -EINVAL;
+ }
+
+ switch (width) {
+ case 16:
+ sap_ctrl |= TAS6424_SAP_TDM_SLOT_SZ_16;
+ break;
+ case 24:
+ break;
+ default:
+ dev_err(codec->dev, "unsupported sample width: %u\n", width);
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, TAS6424_SAP_CTRL,
+ TAS6424_SAP_RATE_MASK |
+ TAS6424_SAP_TDM_SLOT_SZ_16,
+ sap_ctrl);
+
+ return 0;
+}
+
+static int tas6424_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ u8 serial_format = 0;
+
+ dev_dbg(codec->dev, "%s() fmt=0x%0x\n", __func__, fmt);
+
+ /* clock masters */
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ dev_err(codec->dev, "Invalid DAI master/slave interface\n");
+ return -EINVAL;
+ }
+
+ /* signal polarity */
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ default:
+ dev_err(codec->dev, "Invalid DAI clock signal polarity\n");
+ return -EINVAL;
+ }
+
+ /* interface format */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ serial_format |= TAS6424_SAP_I2S;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ serial_format |= TAS6424_SAP_DSP;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ /*
+ * We can use the fact that the TAS6424 does not care about the
+ * LRCLK duty cycle during TDM to receive DSP_B formatted data
+ * in LEFTJ mode (no delaying of the 1st data bit).
+ */
+ serial_format |= TAS6424_SAP_LEFTJ;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ serial_format |= TAS6424_SAP_LEFTJ;
+ break;
+ default:
+ dev_err(codec->dev, "Invalid DAI interface format\n");
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, TAS6424_SAP_CTRL,
+ TAS6424_SAP_FMT_MASK, serial_format);
+
+ return 0;
+}
+
+static int tas6424_set_dai_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask, unsigned int rx_mask,
+ int slots, int slot_width)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ unsigned int first_slot, last_slot;
+ bool sap_tdm_slot_last;
+
+ dev_dbg(codec->dev, "%s() tx_mask=%d rx_mask=%d\n", __func__,
+ tx_mask, rx_mask);
+
+ if (!tx_mask || !rx_mask)
+ return 0; /* nothing needed to disable TDM mode */
+
+ /*
+ * Determine the first slot and last slot that is being requested so
+ * we'll be able to more easily enforce certain constraints as the
+ * TAS6424's TDM interface is not fully configurable.
+ */
+ first_slot = __ffs(tx_mask);
+ last_slot = __fls(rx_mask);
+
+ if (last_slot - first_slot != 4) {
+ dev_err(codec->dev, "tdm mask must cover 4 contiguous slots\n");
+ return -EINVAL;
+ }
+
+ switch (first_slot) {
+ case 0:
+ sap_tdm_slot_last = false;
+ break;
+ case 4:
+ sap_tdm_slot_last = true;
+ break;
+ default:
+ dev_err(codec->dev, "tdm mask must start at slot 0 or 4\n");
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, TAS6424_SAP_CTRL, TAS6424_SAP_TDM_SLOT_LAST,
+ sap_tdm_slot_last ? TAS6424_SAP_TDM_SLOT_LAST : 0);
+
+ return 0;
+}
+
+static int tas6424_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ unsigned int val;
+
+ dev_dbg(codec->dev, "%s() mute=%d\n", __func__, mute);
+
+ if (mute)
+ val = TAS6424_ALL_STATE_MUTE;
+ else
+ val = TAS6424_ALL_STATE_PLAY;
+
+ snd_soc_write(codec, TAS6424_CH_STATE_CTRL, val);
+
+ return 0;
+}
+
+static int tas6424_power_off(struct snd_soc_codec *codec)
+{
+ struct tas6424_data *tas6424 = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ snd_soc_write(codec, TAS6424_CH_STATE_CTRL, TAS6424_ALL_STATE_HIZ);
+
+ regcache_cache_only(tas6424->regmap, true);
+ regcache_mark_dirty(tas6424->regmap);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(tas6424->supplies),
+ tas6424->supplies);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to disable supplies: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tas6424_power_on(struct snd_soc_codec *codec)
+{
+ struct tas6424_data *tas6424 = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(tas6424->supplies),
+ tas6424->supplies);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to enable supplies: %d\n", ret);
+ return ret;
+ }
+
+ regcache_cache_only(tas6424->regmap, false);
+
+ ret = regcache_sync(tas6424->regmap);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to sync regcache: %d\n", ret);
+ return ret;
+ }
+
+ snd_soc_write(codec, TAS6424_CH_STATE_CTRL, TAS6424_ALL_STATE_MUTE);
+
+ /* any time we come out of HIZ, the output channels automatically run DC
+ * load diagnostics, wait here until this completes
+ */
+ msleep(230);
+
+ return 0;
+}
+
+static int tas6424_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ dev_dbg(codec->dev, "%s() level=%d\n", __func__, level);
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ case SND_SOC_BIAS_PREPARE:
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF)
+ tas6424_power_on(codec);
+ break;
+ case SND_SOC_BIAS_OFF:
+ tas6424_power_off(codec);
+ break;
+ }
+
+ return 0;
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_tas6424 = {
+ .set_bias_level = tas6424_set_bias_level,
+ .idle_bias_off = true,
+
+ .component_driver = {
+ .controls = tas6424_snd_controls,
+ .num_controls = ARRAY_SIZE(tas6424_snd_controls),
+ .dapm_widgets = tas6424_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(tas6424_dapm_widgets),
+ .dapm_routes = tas6424_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(tas6424_audio_map),
+ },
+};
+
+static struct snd_soc_dai_ops tas6424_speaker_dai_ops = {
+ .hw_params = tas6424_hw_params,
+ .set_fmt = tas6424_set_dai_fmt,
+ .set_tdm_slot = tas6424_set_dai_tdm_slot,
+ .digital_mute = tas6424_mute,
+};
+
+static struct snd_soc_dai_driver tas6424_dai[] = {
+ {
+ .name = "tas6424-amplifier",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 4,
+ .rates = TAS6424_RATES,
+ .formats = TAS6424_FORMATS,
+ },
+ .ops = &tas6424_speaker_dai_ops,
+ },
+};
+
+static void tas6424_fault_check_work(struct work_struct *work)
+{
+ struct tas6424_data *tas6424 = container_of(work, struct tas6424_data,
+ fault_check_work.work);
+ struct device *dev = tas6424->dev;
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(tas6424->regmap, TAS6424_GLOB_FAULT1, &reg);
+ if (ret < 0) {
+ dev_err(dev, "failed to read FAULT1 register: %d\n", ret);
+ goto out;
+ }
+
+ /*
+ * Ignore any clock faults as there is no clean way to check for them.
+ * We would need to start checking for those faults *after* the SAIF
+ * stream has been setup, and stop checking *before* the stream is
+ * stopped to avoid any false-positives. However there are no
+ * appropriate hooks to monitor these events.
+ */
+ reg &= TAS6424_FAULT_PVDD_OV |
+ TAS6424_FAULT_VBAT_OV |
+ TAS6424_FAULT_PVDD_UV |
+ TAS6424_FAULT_VBAT_UV;
+
+ if (reg)
+ goto check_global_fault2_reg;
+
+ /*
+ * Only flag errors once for a given occurrence. This is needed as
+ * the TAS6424 will take time clearing the fault condition internally
+ * during which we don't want to bombard the system with the same
+ * error message over and over.
+ */
+ if ((reg & TAS6424_FAULT_PVDD_OV) && !(tas6424->last_fault1 & TAS6424_FAULT_PVDD_OV))
+ dev_crit(dev, "experienced a PVDD overvoltage fault\n");
+
+ if ((reg & TAS6424_FAULT_VBAT_OV) && !(tas6424->last_fault1 & TAS6424_FAULT_VBAT_OV))
+ dev_crit(dev, "experienced a VBAT overvoltage fault\n");
+
+ if ((reg & TAS6424_FAULT_PVDD_UV) && !(tas6424->last_fault1 & TAS6424_FAULT_PVDD_UV))
+ dev_crit(dev, "experienced a PVDD undervoltage fault\n");
+
+ if ((reg & TAS6424_FAULT_VBAT_UV) && !(tas6424->last_fault1 & TAS6424_FAULT_VBAT_UV))
+ dev_crit(dev, "experienced a VBAT undervoltage fault\n");
+
+ /* Store current fault1 value so we can detect any changes next time */
+ tas6424->last_fault1 = reg;
+
+check_global_fault2_reg:
+ ret = regmap_read(tas6424->regmap, TAS6424_GLOB_FAULT2, &reg);
+ if (ret < 0) {
+ dev_err(dev, "failed to read FAULT2 register: %d\n", ret);
+ goto out;
+ }
+
+ reg &= TAS6424_FAULT_OTSD |
+ TAS6424_FAULT_OTSD_CH1 |
+ TAS6424_FAULT_OTSD_CH2 |
+ TAS6424_FAULT_OTSD_CH3 |
+ TAS6424_FAULT_OTSD_CH4;
+
+ if (!reg)
+ goto check_warn_reg;
+
+ if ((reg & TAS6424_FAULT_OTSD) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD))
+ dev_crit(dev, "experienced a global overtemp shutdown\n");
+
+ if ((reg & TAS6424_FAULT_OTSD_CH1) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD_CH1))
+ dev_crit(dev, "experienced an overtemp shutdown on CH1\n");
+
+ if ((reg & TAS6424_FAULT_OTSD_CH2) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD_CH2))
+ dev_crit(dev, "experienced an overtemp shutdown on CH2\n");
+
+ if ((reg & TAS6424_FAULT_OTSD_CH3) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD_CH3))
+ dev_crit(dev, "experienced an overtemp shutdown on CH3\n");
+
+ if ((reg & TAS6424_FAULT_OTSD_CH4) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD_CH4))
+ dev_crit(dev, "experienced an overtemp shutdown on CH4\n");
+
+ /* Store current fault2 value so we can detect any changes next time */
+ tas6424->last_fault2 = reg;
+
+check_warn_reg:
+ ret = regmap_read(tas6424->regmap, TAS6424_WARN, &reg);
+ if (ret < 0) {
+ dev_err(dev, "failed to read WARN register: %d\n", ret);
+ goto out;
+ }
+
+ reg &= TAS6424_WARN_VDD_UV |
+ TAS6424_WARN_VDD_POR |
+ TAS6424_WARN_VDD_OTW |
+ TAS6424_WARN_VDD_OTW_CH1 |
+ TAS6424_WARN_VDD_OTW_CH2 |
+ TAS6424_WARN_VDD_OTW_CH3 |
+ TAS6424_WARN_VDD_OTW_CH4;
+
+ if (!reg)
+ goto out;
+
+ if ((reg & TAS6424_WARN_VDD_UV) && !(tas6424->last_warn & TAS6424_WARN_VDD_UV))
+ dev_warn(dev, "experienced a VDD under voltage condition\n");
+
+ if ((reg & TAS6424_WARN_VDD_POR) && !(tas6424->last_warn & TAS6424_WARN_VDD_POR))
+ dev_warn(dev, "experienced a VDD POR condition\n");
+
+ if ((reg & TAS6424_WARN_VDD_OTW) && !(tas6424->last_warn & TAS6424_WARN_VDD_OTW))
+ dev_warn(dev, "experienced a global overtemp warning\n");
+
+ if ((reg & TAS6424_WARN_VDD_OTW_CH1) && !(tas6424->last_warn & TAS6424_WARN_VDD_OTW_CH1))
+ dev_warn(dev, "experienced an overtemp warning on CH1\n");
+
+ if ((reg & TAS6424_WARN_VDD_OTW_CH2) && !(tas6424->last_warn & TAS6424_WARN_VDD_OTW_CH2))
+ dev_warn(dev, "experienced an overtemp warning on CH2\n");
+
+ if ((reg & TAS6424_WARN_VDD_OTW_CH3) && !(tas6424->last_warn & TAS6424_WARN_VDD_OTW_CH3))
+ dev_warn(dev, "experienced an overtemp warning on CH3\n");
+
+ if ((reg & TAS6424_WARN_VDD_OTW_CH4) && !(tas6424->last_warn & TAS6424_WARN_VDD_OTW_CH4))
+ dev_warn(dev, "experienced an overtemp warning on CH4\n");
+
+ /* Store current warn value so we can detect any changes next time */
+ tas6424->last_warn = reg;
+
+ /* Clear any faults by toggling the CLEAR_FAULT control bit */
+ ret = regmap_write_bits(tas6424->regmap, TAS6424_MISC_CTRL3,
+ TAS6424_CLEAR_FAULT, TAS6424_CLEAR_FAULT);
+ if (ret < 0)
+ dev_err(dev, "failed to write MISC_CTRL3 register: %d\n", ret);
+
+ ret = regmap_write_bits(tas6424->regmap, TAS6424_MISC_CTRL3,
+ TAS6424_CLEAR_FAULT, 0);
+ if (ret < 0)
+ dev_err(dev, "failed to write MISC_CTRL3 register: %d\n", ret);
+
+out:
+ /* Schedule the next fault check at the specified interval */
+ schedule_delayed_work(&tas6424->fault_check_work,
+ msecs_to_jiffies(TAS6424_FAULT_CHECK_INTERVAL));
+}
+
+static const struct reg_default tas6424_reg_defaults[] = {
+ { TAS6424_MODE_CTRL, 0x00 },
+ { TAS6424_MISC_CTRL1, 0x32 },
+ { TAS6424_MISC_CTRL2, 0x62 },
+ { TAS6424_SAP_CTRL, 0x04 },
+ { TAS6424_CH_STATE_CTRL, 0x55 },
+ { TAS6424_CH1_VOL_CTRL, 0xcf },
+ { TAS6424_CH2_VOL_CTRL, 0xcf },
+ { TAS6424_CH3_VOL_CTRL, 0xcf },
+ { TAS6424_CH4_VOL_CTRL, 0xcf },
+ { TAS6424_DC_DIAG_CTRL1, 0x00 },
+ { TAS6424_DC_DIAG_CTRL2, 0x11 },
+ { TAS6424_DC_DIAG_CTRL3, 0x11 },
+ { TAS6424_PIN_CTRL, 0xff },
+ { TAS6424_AC_DIAG_CTRL1, 0x00 },
+ { TAS6424_MISC_CTRL3, 0x00 },
+ { TAS6424_CLIP_CTRL, 0x01 },
+ { TAS6424_CLIP_WINDOW, 0x14 },
+ { TAS6424_CLIP_WARN, 0x00 },
+ { TAS6424_CBC_STAT, 0x00 },
+ { TAS6424_MISC_CTRL4, 0x40 },
+};
+
+static bool tas6424_is_writable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TAS6424_MODE_CTRL:
+ case TAS6424_MISC_CTRL1:
+ case TAS6424_MISC_CTRL2:
+ case TAS6424_SAP_CTRL:
+ case TAS6424_CH_STATE_CTRL:
+ case TAS6424_CH1_VOL_CTRL:
+ case TAS6424_CH2_VOL_CTRL:
+ case TAS6424_CH3_VOL_CTRL:
+ case TAS6424_CH4_VOL_CTRL:
+ case TAS6424_DC_DIAG_CTRL1:
+ case TAS6424_DC_DIAG_CTRL2:
+ case TAS6424_DC_DIAG_CTRL3:
+ case TAS6424_PIN_CTRL:
+ case TAS6424_AC_DIAG_CTRL1:
+ case TAS6424_MISC_CTRL3:
+ case TAS6424_CLIP_CTRL:
+ case TAS6424_CLIP_WINDOW:
+ case TAS6424_CLIP_WARN:
+ case TAS6424_CBC_STAT:
+ case TAS6424_MISC_CTRL4:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool tas6424_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TAS6424_DC_LOAD_DIAG_REP12:
+ case TAS6424_DC_LOAD_DIAG_REP34:
+ case TAS6424_DC_LOAD_DIAG_REPLO:
+ case TAS6424_CHANNEL_STATE:
+ case TAS6424_CHANNEL_FAULT:
+ case TAS6424_GLOB_FAULT1:
+ case TAS6424_GLOB_FAULT2:
+ case TAS6424_WARN:
+ case TAS6424_AC_LOAD_DIAG_REP1:
+ case TAS6424_AC_LOAD_DIAG_REP2:
+ case TAS6424_AC_LOAD_DIAG_REP3:
+ case TAS6424_AC_LOAD_DIAG_REP4:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config tas6424_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .writeable_reg = tas6424_is_writable_reg,
+ .volatile_reg = tas6424_is_volatile_reg,
+
+ .max_register = TAS6424_MAX,
+ .reg_defaults = tas6424_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(tas6424_reg_defaults),
+ .cache_type = REGCACHE_RBTREE,
+};
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id tas6424_of_ids[] = {
+ { .compatible = "ti,tas6424", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tas6424_of_ids);
+#endif
+
+static int tas6424_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct tas6424_data *tas6424;
+ int ret;
+ int i;
+
+ tas6424 = devm_kzalloc(dev, sizeof(*tas6424), GFP_KERNEL);
+ if (!tas6424)
+ return -ENOMEM;
+ dev_set_drvdata(dev, tas6424);
+
+ tas6424->dev = dev;
+
+ tas6424->regmap = devm_regmap_init_i2c(client, &tas6424_regmap_config);
+ if (IS_ERR(tas6424->regmap)) {
+ ret = PTR_ERR(tas6424->regmap);
+ dev_err(dev, "unable to allocate register map: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tas6424->supplies); i++)
+ tas6424->supplies[i].supply = tas6424_supply_names[i];
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(tas6424->supplies),
+ tas6424->supplies);
+ if (ret) {
+ dev_err(dev, "unable to request supplies: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(tas6424->supplies),
+ tas6424->supplies);
+ if (ret) {
+ dev_err(dev, "unable to enable supplies: %d\n", ret);
+ return ret;
+ }
+
+ /* Reset device to establish well-defined startup state */
+ ret = regmap_update_bits(tas6424->regmap, TAS6424_MODE_CTRL,
+ TAS6424_RESET, TAS6424_RESET);
+ if (ret) {
+ dev_err(dev, "unable to reset device: %d\n", ret);
+ return ret;
+ }
+
+ INIT_DELAYED_WORK(&tas6424->fault_check_work, tas6424_fault_check_work);
+
+ ret = snd_soc_register_codec(dev, &soc_codec_dev_tas6424,
+ tas6424_dai, ARRAY_SIZE(tas6424_dai));
+ if (ret < 0) {
+ dev_err(dev, "unable to register codec: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tas6424_i2c_remove(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct tas6424_data *tas6424 = dev_get_drvdata(dev);
+ int ret;
+
+ snd_soc_unregister_codec(dev);
+
+ cancel_delayed_work_sync(&tas6424->fault_check_work);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(tas6424->supplies),
+ tas6424->supplies);
+ if (ret < 0) {
+ dev_err(dev, "unable to disable supplies: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id tas6424_i2c_ids[] = {
+ { "tas6424", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tas6424_i2c_ids);
+
+static struct i2c_driver tas6424_i2c_driver = {
+ .driver = {
+ .name = "tas6424",
+ .of_match_table = of_match_ptr(tas6424_of_ids),
+ },
+ .probe = tas6424_i2c_probe,
+ .remove = tas6424_i2c_remove,
+ .id_table = tas6424_i2c_ids,
+};
+module_i2c_driver(tas6424_i2c_driver);
+
+MODULE_AUTHOR("Andreas Dannenberg <dannenberg@ti.com>");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("TAS6424 Audio amplifier driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/tas6424.h b/sound/soc/codecs/tas6424.h
new file mode 100644
index 000000000000..430588328a06
--- /dev/null
+++ b/sound/soc/codecs/tas6424.h
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ALSA SoC Texas Instruments TAS6424 Quad-Channel Audio Amplifier
+ *
+ * Copyright (C) 2016-2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Andreas Dannenberg <dannenberg@ti.com>
+ * Andrew F. Davis <afd@ti.com>
+ */
+
+#ifndef __TAS6424_H__
+#define __TAS6424_H__
+
+#define TAS6424_RATES (SNDRV_PCM_RATE_44100 | \
+ SNDRV_PCM_RATE_48000 | \
+ SNDRV_PCM_RATE_96000)
+
+#define TAS6424_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE)
+
+/* Register Address Map */
+#define TAS6424_MODE_CTRL 0x00
+#define TAS6424_MISC_CTRL1 0x01
+#define TAS6424_MISC_CTRL2 0x02
+#define TAS6424_SAP_CTRL 0x03
+#define TAS6424_CH_STATE_CTRL 0x04
+#define TAS6424_CH1_VOL_CTRL 0x05
+#define TAS6424_CH2_VOL_CTRL 0x06
+#define TAS6424_CH3_VOL_CTRL 0x07
+#define TAS6424_CH4_VOL_CTRL 0x08
+#define TAS6424_DC_DIAG_CTRL1 0x09
+#define TAS6424_DC_DIAG_CTRL2 0x0a
+#define TAS6424_DC_DIAG_CTRL3 0x0b
+#define TAS6424_DC_LOAD_DIAG_REP12 0x0c
+#define TAS6424_DC_LOAD_DIAG_REP34 0x0d
+#define TAS6424_DC_LOAD_DIAG_REPLO 0x0e
+#define TAS6424_CHANNEL_STATE 0x0f
+#define TAS6424_CHANNEL_FAULT 0x10
+#define TAS6424_GLOB_FAULT1 0x11
+#define TAS6424_GLOB_FAULT2 0x12
+#define TAS6424_WARN 0x13
+#define TAS6424_PIN_CTRL 0x14
+#define TAS6424_AC_DIAG_CTRL1 0x15
+#define TAS6424_AC_DIAG_CTRL2 0x16
+#define TAS6424_AC_LOAD_DIAG_REP1 0x17
+#define TAS6424_AC_LOAD_DIAG_REP2 0x18
+#define TAS6424_AC_LOAD_DIAG_REP3 0x19
+#define TAS6424_AC_LOAD_DIAG_REP4 0x1a
+#define TAS6424_MISC_CTRL3 0x21
+#define TAS6424_CLIP_CTRL 0x22
+#define TAS6424_CLIP_WINDOW 0x23
+#define TAS6424_CLIP_WARN 0x24
+#define TAS6424_CBC_STAT 0x25
+#define TAS6424_MISC_CTRL4 0x26
+#define TAS6424_MAX TAS6424_MISC_CTRL4
+
+/* TAS6424_MODE_CTRL_REG */
+#define TAS6424_RESET BIT(7)
+
+/* TAS6424_SAP_CTRL_REG */
+#define TAS6424_SAP_RATE_MASK GENMASK(7, 6)
+#define TAS6424_SAP_RATE_44100 (0x00 << 6)
+#define TAS6424_SAP_RATE_48000 (0x01 << 6)
+#define TAS6424_SAP_RATE_96000 (0x02 << 6)
+#define TAS6424_SAP_TDM_SLOT_LAST BIT(5)
+#define TAS6424_SAP_TDM_SLOT_SZ_16 BIT(4)
+#define TAS6424_SAP_TDM_SLOT_SWAP BIT(3)
+#define TAS6424_SAP_FMT_MASK GENMASK(2, 0)
+#define TAS6424_SAP_RIGHTJ_24 (0x00 << 0)
+#define TAS6424_SAP_RIGHTJ_20 (0x01 << 0)
+#define TAS6424_SAP_RIGHTJ_18 (0x02 << 0)
+#define TAS6424_SAP_RIGHTJ_16 (0x03 << 0)
+#define TAS6424_SAP_I2S (0x04 << 0)
+#define TAS6424_SAP_LEFTJ (0x05 << 0)
+#define TAS6424_SAP_DSP (0x06 << 0)
+
+/* TAS6424_CH_STATE_CTRL_REG */
+#define TAS6424_CH1_STATE_MASK GENMASK(7, 6)
+#define TAS6424_CH1_STATE_PLAY (0x00 << 6)
+#define TAS6424_CH1_STATE_HIZ (0x01 << 6)
+#define TAS6424_CH1_STATE_MUTE (0x02 << 6)
+#define TAS6424_CH1_STATE_DIAG (0x03 << 6)
+#define TAS6424_CH2_STATE_MASK GENMASK(5, 4)
+#define TAS6424_CH2_STATE_PLAY (0x00 << 4)
+#define TAS6424_CH2_STATE_HIZ (0x01 << 4)
+#define TAS6424_CH2_STATE_MUTE (0x02 << 4)
+#define TAS6424_CH2_STATE_DIAG (0x03 << 4)
+#define TAS6424_CH3_STATE_MASK GENMASK(3, 2)
+#define TAS6424_CH3_STATE_PLAY (0x00 << 2)
+#define TAS6424_CH3_STATE_HIZ (0x01 << 2)
+#define TAS6424_CH3_STATE_MUTE (0x02 << 2)
+#define TAS6424_CH3_STATE_DIAG (0x03 << 2)
+#define TAS6424_CH4_STATE_MASK GENMASK(1, 0)
+#define TAS6424_CH4_STATE_PLAY (0x00 << 0)
+#define TAS6424_CH4_STATE_HIZ (0x01 << 0)
+#define TAS6424_CH4_STATE_MUTE (0x02 << 0)
+#define TAS6424_CH4_STATE_DIAG (0x03 << 0)
+#define TAS6424_ALL_STATE_PLAY (TAS6424_CH1_STATE_PLAY | \
+ TAS6424_CH2_STATE_PLAY | \
+ TAS6424_CH3_STATE_PLAY | \
+ TAS6424_CH4_STATE_PLAY)
+#define TAS6424_ALL_STATE_HIZ (TAS6424_CH1_STATE_HIZ | \
+ TAS6424_CH2_STATE_HIZ | \
+ TAS6424_CH3_STATE_HIZ | \
+ TAS6424_CH4_STATE_HIZ)
+#define TAS6424_ALL_STATE_MUTE (TAS6424_CH1_STATE_MUTE | \
+ TAS6424_CH2_STATE_MUTE | \
+ TAS6424_CH3_STATE_MUTE | \
+ TAS6424_CH4_STATE_MUTE)
+#define TAS6424_ALL_STATE_DIAG (TAS6424_CH1_STATE_DIAG | \
+ TAS6424_CH2_STATE_DIAG | \
+ TAS6424_CH3_STATE_DIAG | \
+ TAS6424_CH4_STATE_DIAG)
+
+/* TAS6424_GLOB_FAULT1_REG */
+#define TAS6424_FAULT_CLOCK BIT(4)
+#define TAS6424_FAULT_PVDD_OV BIT(3)
+#define TAS6424_FAULT_VBAT_OV BIT(2)
+#define TAS6424_FAULT_PVDD_UV BIT(1)
+#define TAS6424_FAULT_VBAT_UV BIT(0)
+
+/* TAS6424_GLOB_FAULT2_REG */
+#define TAS6424_FAULT_OTSD BIT(4)
+#define TAS6424_FAULT_OTSD_CH1 BIT(3)
+#define TAS6424_FAULT_OTSD_CH2 BIT(2)
+#define TAS6424_FAULT_OTSD_CH3 BIT(1)
+#define TAS6424_FAULT_OTSD_CH4 BIT(0)
+
+/* TAS6424_WARN_REG */
+#define TAS6424_WARN_VDD_UV BIT(6)
+#define TAS6424_WARN_VDD_POR BIT(5)
+#define TAS6424_WARN_VDD_OTW BIT(4)
+#define TAS6424_WARN_VDD_OTW_CH1 BIT(3)
+#define TAS6424_WARN_VDD_OTW_CH2 BIT(2)
+#define TAS6424_WARN_VDD_OTW_CH3 BIT(1)
+#define TAS6424_WARN_VDD_OTW_CH4 BIT(0)
+
+/* TAS6424_MISC_CTRL3_REG */
+#define TAS6424_CLEAR_FAULT BIT(7)
+#define TAS6424_PBTL_CH_SEL BIT(6)
+#define TAS6424_MASK_CBC_WARN BIT(5)
+#define TAS6424_MASK_VDD_UV BIT(4)
+#define TAS6424_OTSD_AUTO_RECOVERY BIT(3)
+
+#endif /* __TAS6424_H__ */
diff --git a/sound/soc/codecs/tfa9879.c b/sound/soc/codecs/tfa9879.c
index f8dd67ca0744..e7ca764b5729 100644
--- a/sound/soc/codecs/tfa9879.c
+++ b/sound/soc/codecs/tfa9879.c
@@ -316,6 +316,7 @@ static const struct of_device_id tfa9879_of_match[] = {
{ .compatible = "nxp,tfa9879", },
{ }
};
+MODULE_DEVICE_TABLE(of, tfa9879_of_match);
static struct i2c_driver tfa9879_i2c_driver = {
.driver = {
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index e2862372c26e..858cb8be445f 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -1,22 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * ALSA SoC TLV320AIC31XX codec driver
+ * ALSA SoC TLV320AIC31xx CODEC Driver
*
- * Copyright (C) 2014 Texas Instruments, Inc.
- *
- * Author: Jyri Sarha <jsarha@ti.com>
+ * Copyright (C) 2014-2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Jyri Sarha <jsarha@ti.com>
*
* Based on ground work by: Ajit Kulkarni <x0175765@ti.com>
*
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED AS IS AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
- *
- * The TLV320AIC31xx series of audio codec is a low-power, highly integrated
- * high performance codec which provides a stereo DAC, a mono ADC,
+ * The TLV320AIC31xx series of audio codecs are low-power, highly integrated
+ * high performance codecs which provides a stereo DAC, a mono ADC,
* and mono/stereo Class-D speaker driver.
*/
@@ -26,7 +18,7 @@
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
#include <linux/acpi.h>
#include <linux/of.h>
@@ -144,8 +136,7 @@ static const struct regmap_config aic31xx_i2c_regmap = {
.max_register = 12 * 128,
};
-#define AIC31XX_NUM_SUPPLIES 6
-static const char * const aic31xx_supply_names[AIC31XX_NUM_SUPPLIES] = {
+static const char * const aic31xx_supply_names[] = {
"HPVDD",
"SPRVDD",
"SPLVDD",
@@ -154,6 +145,8 @@ static const char * const aic31xx_supply_names[AIC31XX_NUM_SUPPLIES] = {
"DVDD",
};
+#define AIC31XX_NUM_SUPPLIES ARRAY_SIZE(aic31xx_supply_names)
+
struct aic31xx_disable_nb {
struct notifier_block nb;
struct aic31xx_priv *aic31xx;
@@ -164,6 +157,9 @@ struct aic31xx_priv {
u8 i2c_regs_status;
struct device *dev;
struct regmap *regmap;
+ enum aic31xx_type codec_type;
+ struct gpio_desc *gpio_reset;
+ int micbias_vg;
struct aic31xx_pdata pdata;
struct regulator_bulk_data supplies[AIC31XX_NUM_SUPPLIES];
struct aic31xx_disable_nb disable_nb[AIC31XX_NUM_SUPPLIES];
@@ -185,7 +181,7 @@ struct aic31xx_rate_divs {
u8 madc;
};
-/* ADC dividers can be disabled by cofiguring them to 0 */
+/* ADC dividers can be disabled by configuring them to 0 */
static const struct aic31xx_rate_divs aic31xx_divs[] = {
/* mclk/p rate pll: j d dosr ndac mdac aors nadc madc */
/* 8k rate */
@@ -456,7 +452,7 @@ static int mic_bias_event(struct snd_soc_dapm_widget *w,
/* change mic bias voltage to user defined */
snd_soc_update_bits(codec, AIC31XX_MICBIAS,
AIC31XX_MICBIAS_MASK,
- aic31xx->pdata.micbias_vg <<
+ aic31xx->micbias_vg <<
AIC31XX_MICBIAS_SHIFT);
dev_dbg(codec->dev, "%s: turned on\n", __func__);
break;
@@ -679,14 +675,14 @@ static int aic31xx_add_controls(struct snd_soc_codec *codec)
int ret = 0;
struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
- if (!(aic31xx->pdata.codec_type & DAC31XX_BIT))
+ if (!(aic31xx->codec_type & DAC31XX_BIT))
ret = snd_soc_add_codec_controls(
codec, aic31xx_snd_controls,
ARRAY_SIZE(aic31xx_snd_controls));
if (ret)
return ret;
- if (aic31xx->pdata.codec_type & AIC31XX_STEREO_CLASS_D_BIT)
+ if (aic31xx->codec_type & AIC31XX_STEREO_CLASS_D_BIT)
ret = snd_soc_add_codec_controls(
codec, aic311x_snd_controls,
ARRAY_SIZE(aic311x_snd_controls));
@@ -704,7 +700,7 @@ static int aic31xx_add_widgets(struct snd_soc_codec *codec)
struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
int ret = 0;
- if (aic31xx->pdata.codec_type & DAC31XX_BIT) {
+ if (aic31xx->codec_type & DAC31XX_BIT) {
ret = snd_soc_dapm_new_controls(
dapm, dac31xx_dapm_widgets,
ARRAY_SIZE(dac31xx_dapm_widgets));
@@ -728,7 +724,7 @@ static int aic31xx_add_widgets(struct snd_soc_codec *codec)
return ret;
}
- if (aic31xx->pdata.codec_type & AIC31XX_STEREO_CLASS_D_BIT) {
+ if (aic31xx->codec_type & AIC31XX_STEREO_CLASS_D_BIT) {
ret = snd_soc_dapm_new_controls(
dapm, aic311x_dapm_widgets,
ARRAY_SIZE(aic311x_dapm_widgets));
@@ -760,11 +756,17 @@ static int aic31xx_setup_pll(struct snd_soc_codec *codec,
{
struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
int bclk_score = snd_soc_params_to_frame_size(params);
- int mclk_p = aic31xx->sysclk / aic31xx->p_div;
+ int mclk_p;
int bclk_n = 0;
int match = -1;
int i;
+ if (!aic31xx->sysclk || !aic31xx->p_div) {
+ dev_err(codec->dev, "Master clock not supplied\n");
+ return -EINVAL;
+ }
+ mclk_p = aic31xx->sysclk / aic31xx->p_div;
+
/* Use PLL as CODEC_CLKIN and DAC_CLK as BDIV_CLKIN */
snd_soc_update_bits(codec, AIC31XX_CLKMUX,
AIC31XX_CODEC_CLKIN_MASK, AIC31XX_CODEC_CLKIN_PLL);
@@ -840,11 +842,17 @@ static int aic31xx_setup_pll(struct snd_soc_codec *codec,
dev_dbg(codec->dev,
"pll %d.%04d/%d dosr %d n %d m %d aosr %d n %d m %d bclk_n %d\n",
- aic31xx_divs[i].pll_j, aic31xx_divs[i].pll_d,
- aic31xx->p_div, aic31xx_divs[i].dosr,
- aic31xx_divs[i].ndac, aic31xx_divs[i].mdac,
- aic31xx_divs[i].aosr, aic31xx_divs[i].nadc,
- aic31xx_divs[i].madc, bclk_n);
+ aic31xx_divs[i].pll_j,
+ aic31xx_divs[i].pll_d,
+ aic31xx->p_div,
+ aic31xx_divs[i].dosr,
+ aic31xx_divs[i].ndac,
+ aic31xx_divs[i].mdac,
+ aic31xx_divs[i].aosr,
+ aic31xx_divs[i].nadc,
+ aic31xx_divs[i].madc,
+ bclk_n
+ );
return 0;
}
@@ -919,8 +927,28 @@ static int aic31xx_set_dai_fmt(struct snd_soc_dai *codec_dai,
case SND_SOC_DAIFMT_CBM_CFM:
iface_reg1 |= AIC31XX_BCLK_MASTER | AIC31XX_WCLK_MASTER;
break;
+ case SND_SOC_DAIFMT_CBS_CFM:
+ iface_reg1 |= AIC31XX_WCLK_MASTER;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFS:
+ iface_reg1 |= AIC31XX_BCLK_MASTER;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ dev_err(codec->dev, "Invalid DAI master/slave interface\n");
+ return -EINVAL;
+ }
+
+ /* signal polarity */
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ iface_reg2 |= AIC31XX_BCLKINV_MASK;
+ break;
default:
- dev_alert(codec->dev, "Invalid DAI master/slave interface\n");
+ dev_err(codec->dev, "Invalid DAI clock signal polarity\n");
return -EINVAL;
}
@@ -931,16 +959,12 @@ static int aic31xx_set_dai_fmt(struct snd_soc_dai *codec_dai,
case SND_SOC_DAIFMT_DSP_A:
dsp_a_val = 0x1; /* fall through */
case SND_SOC_DAIFMT_DSP_B:
- /* NOTE: BCLKINV bit value 1 equas NB and 0 equals IB */
- switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
- case SND_SOC_DAIFMT_NB_NF:
- iface_reg2 |= AIC31XX_BCLKINV_MASK;
- break;
- case SND_SOC_DAIFMT_IB_NF:
- break;
- default:
- return -EINVAL;
- }
+ /*
+ * NOTE: This CODEC samples on the falling edge of BCLK in
+ * DSP mode, this is inverted compared to what most DAIs
+ * expect, so we invert for this mode
+ */
+ iface_reg2 ^= AIC31XX_BCLKINV_MASK;
iface_reg1 |= (AIC31XX_DSP_MODE <<
AIC31XX_IFACE1_DATATYPE_SHIFT);
break;
@@ -981,8 +1005,9 @@ static int aic31xx_set_dai_sysclk(struct snd_soc_dai *codec_dai,
dev_dbg(codec->dev, "## %s: clk_id = %d, freq = %d, dir = %d\n",
__func__, clk_id, freq, dir);
- for (i = 1; freq/i > 20000000 && i < 8; i++)
- ;
+ for (i = 1; i < 8; i++)
+ if (freq / i <= 20000000)
+ break;
if (freq/i > 20000000) {
dev_err(aic31xx->dev, "%s: Too high mclk frequency %u\n",
__func__, freq);
@@ -990,9 +1015,9 @@ static int aic31xx_set_dai_sysclk(struct snd_soc_dai *codec_dai,
}
aic31xx->p_div = i;
- for (i = 0; i < ARRAY_SIZE(aic31xx_divs) &&
- aic31xx_divs[i].mclk_p != freq/aic31xx->p_div; i++)
- ;
+ for (i = 0; i < ARRAY_SIZE(aic31xx_divs); i++)
+ if (aic31xx_divs[i].mclk_p == freq / aic31xx->p_div)
+ break;
if (i == ARRAY_SIZE(aic31xx_divs)) {
dev_err(aic31xx->dev, "%s: Unsupported frequency %d\n",
__func__, freq);
@@ -1004,6 +1029,7 @@ static int aic31xx_set_dai_sysclk(struct snd_soc_dai *codec_dai,
clk_id << AIC31XX_PLL_CLKIN_SHIFT);
aic31xx->sysclk = freq;
+
return 0;
}
@@ -1019,8 +1045,8 @@ static int aic31xx_regulator_event(struct notifier_block *nb,
* Put codec to reset and as at least one of the
* supplies was disabled.
*/
- if (gpio_is_valid(aic31xx->pdata.gpio_reset))
- gpio_set_value(aic31xx->pdata.gpio_reset, 0);
+ if (aic31xx->gpio_reset)
+ gpiod_set_value(aic31xx->gpio_reset, 1);
regcache_mark_dirty(aic31xx->regmap);
dev_dbg(aic31xx->dev, "## %s: DISABLE received\n", __func__);
@@ -1029,6 +1055,22 @@ static int aic31xx_regulator_event(struct notifier_block *nb,
return 0;
}
+static int aic31xx_reset(struct aic31xx_priv *aic31xx)
+{
+ int ret = 0;
+
+ if (aic31xx->gpio_reset) {
+ gpiod_set_value(aic31xx->gpio_reset, 1);
+ ndelay(10); /* At least 10ns */
+ gpiod_set_value(aic31xx->gpio_reset, 0);
+ } else {
+ ret = regmap_write(aic31xx->regmap, AIC31XX_RESET, 1);
+ }
+ mdelay(1); /* At least 1ms */
+
+ return ret;
+}
+
static void aic31xx_clk_on(struct snd_soc_codec *codec)
{
struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
@@ -1065,20 +1107,22 @@ static void aic31xx_clk_off(struct snd_soc_codec *codec)
static int aic31xx_power_on(struct snd_soc_codec *codec)
{
struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
- int ret = 0;
+ int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(aic31xx->supplies),
aic31xx->supplies);
if (ret)
return ret;
- if (gpio_is_valid(aic31xx->pdata.gpio_reset)) {
- gpio_set_value(aic31xx->pdata.gpio_reset, 1);
- udelay(100);
- }
regcache_cache_only(aic31xx->regmap, false);
+
+ /* Reset device registers for a consistent power-on like state */
+ ret = aic31xx_reset(aic31xx);
+ if (ret < 0)
+ dev_err(aic31xx->dev, "Could not reset device: %d\n", ret);
+
ret = regcache_sync(aic31xx->regmap);
- if (ret != 0) {
+ if (ret) {
dev_err(codec->dev,
"Failed to restore cache: %d\n", ret);
regcache_cache_only(aic31xx->regmap, true);
@@ -1086,19 +1130,17 @@ static int aic31xx_power_on(struct snd_soc_codec *codec)
aic31xx->supplies);
return ret;
}
+
return 0;
}
-static int aic31xx_power_off(struct snd_soc_codec *codec)
+static void aic31xx_power_off(struct snd_soc_codec *codec)
{
struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
- int ret = 0;
regcache_cache_only(aic31xx->regmap, true);
- ret = regulator_bulk_disable(ARRAY_SIZE(aic31xx->supplies),
- aic31xx->supplies);
-
- return ret;
+ regulator_bulk_disable(ARRAY_SIZE(aic31xx->supplies),
+ aic31xx->supplies);
}
static int aic31xx_set_bias_level(struct snd_soc_codec *codec,
@@ -1137,14 +1179,11 @@ static int aic31xx_set_bias_level(struct snd_soc_codec *codec,
static int aic31xx_codec_probe(struct snd_soc_codec *codec)
{
- int ret = 0;
struct aic31xx_priv *aic31xx = snd_soc_codec_get_drvdata(codec);
- int i;
+ int i, ret;
dev_dbg(aic31xx->dev, "## %s\n", __func__);
- aic31xx = snd_soc_codec_get_drvdata(codec);
-
aic31xx->codec = codec;
for (i = 0; i < ARRAY_SIZE(aic31xx->supplies); i++) {
@@ -1169,8 +1208,10 @@ static int aic31xx_codec_probe(struct snd_soc_codec *codec)
return ret;
ret = aic31xx_add_widgets(codec);
+ if (ret)
+ return ret;
- return ret;
+ return 0;
}
static int aic31xx_codec_remove(struct snd_soc_codec *codec)
@@ -1258,89 +1299,31 @@ static const struct of_device_id tlv320aic31xx_of_match[] = {
{},
};
MODULE_DEVICE_TABLE(of, tlv320aic31xx_of_match);
-
-static void aic31xx_pdata_from_of(struct aic31xx_priv *aic31xx)
-{
- struct device_node *np = aic31xx->dev->of_node;
- unsigned int value = MICBIAS_2_0V;
- int ret;
-
- of_property_read_u32(np, "ai31xx-micbias-vg", &value);
- switch (value) {
- case MICBIAS_2_0V:
- case MICBIAS_2_5V:
- case MICBIAS_AVDDV:
- aic31xx->pdata.micbias_vg = value;
- break;
- default:
- dev_err(aic31xx->dev,
- "Bad ai31xx-micbias-vg value %d DT\n",
- value);
- aic31xx->pdata.micbias_vg = MICBIAS_2_0V;
- }
-
- ret = of_get_named_gpio(np, "gpio-reset", 0);
- if (ret > 0)
- aic31xx->pdata.gpio_reset = ret;
-}
-#else /* CONFIG_OF */
-static void aic31xx_pdata_from_of(struct aic31xx_priv *aic31xx)
-{
-}
#endif /* CONFIG_OF */
-static int aic31xx_device_init(struct aic31xx_priv *aic31xx)
-{
- int ret, i;
-
- dev_set_drvdata(aic31xx->dev, aic31xx);
-
- if (dev_get_platdata(aic31xx->dev))
- memcpy(&aic31xx->pdata, dev_get_platdata(aic31xx->dev),
- sizeof(aic31xx->pdata));
- else if (aic31xx->dev->of_node)
- aic31xx_pdata_from_of(aic31xx);
-
- if (aic31xx->pdata.gpio_reset) {
- ret = devm_gpio_request_one(aic31xx->dev,
- aic31xx->pdata.gpio_reset,
- GPIOF_OUT_INIT_HIGH,
- "aic31xx-reset-pin");
- if (ret < 0) {
- dev_err(aic31xx->dev, "not able to acquire gpio\n");
- return ret;
- }
- }
-
- for (i = 0; i < ARRAY_SIZE(aic31xx->supplies); i++)
- aic31xx->supplies[i].supply = aic31xx_supply_names[i];
-
- ret = devm_regulator_bulk_get(aic31xx->dev,
- ARRAY_SIZE(aic31xx->supplies),
- aic31xx->supplies);
- if (ret != 0)
- dev_err(aic31xx->dev, "Failed to request supplies: %d\n", ret);
-
- return ret;
-}
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id aic31xx_acpi_match[] = {
+ { "10TI3100", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, aic31xx_acpi_match);
+#endif
static int aic31xx_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct aic31xx_priv *aic31xx;
- int ret;
- const struct regmap_config *regmap_config;
+ unsigned int micbias_value = MICBIAS_2_0V;
+ int i, ret;
dev_dbg(&i2c->dev, "## %s: %s codec_type = %d\n", __func__,
- id->name, (int) id->driver_data);
-
- regmap_config = &aic31xx_i2c_regmap;
+ id->name, (int)id->driver_data);
aic31xx = devm_kzalloc(&i2c->dev, sizeof(*aic31xx), GFP_KERNEL);
- if (aic31xx == NULL)
+ if (!aic31xx)
return -ENOMEM;
- aic31xx->regmap = devm_regmap_init_i2c(i2c, regmap_config);
+ aic31xx->regmap = devm_regmap_init_i2c(i2c, &aic31xx_i2c_regmap);
if (IS_ERR(aic31xx->regmap)) {
ret = PTR_ERR(aic31xx->regmap);
dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
@@ -1349,13 +1332,49 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c,
}
aic31xx->dev = &i2c->dev;
- aic31xx->pdata.codec_type = id->driver_data;
+ aic31xx->codec_type = id->driver_data;
- ret = aic31xx_device_init(aic31xx);
- if (ret)
+ dev_set_drvdata(aic31xx->dev, aic31xx);
+
+ fwnode_property_read_u32(aic31xx->dev->fwnode, "ai31xx-micbias-vg",
+ &micbias_value);
+ switch (micbias_value) {
+ case MICBIAS_2_0V:
+ case MICBIAS_2_5V:
+ case MICBIAS_AVDDV:
+ aic31xx->micbias_vg = micbias_value;
+ break;
+ default:
+ dev_err(aic31xx->dev, "Bad ai31xx-micbias-vg value %d\n",
+ micbias_value);
+ aic31xx->micbias_vg = MICBIAS_2_0V;
+ }
+
+ if (dev_get_platdata(aic31xx->dev)) {
+ memcpy(&aic31xx->pdata, dev_get_platdata(aic31xx->dev), sizeof(aic31xx->pdata));
+ aic31xx->codec_type = aic31xx->pdata.codec_type;
+ aic31xx->micbias_vg = aic31xx->pdata.micbias_vg;
+ }
+
+ aic31xx->gpio_reset = devm_gpiod_get_optional(aic31xx->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(aic31xx->gpio_reset)) {
+ dev_err(aic31xx->dev, "not able to acquire gpio\n");
+ return PTR_ERR(aic31xx->gpio_reset);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(aic31xx->supplies); i++)
+ aic31xx->supplies[i].supply = aic31xx_supply_names[i];
+
+ ret = devm_regulator_bulk_get(aic31xx->dev,
+ ARRAY_SIZE(aic31xx->supplies),
+ aic31xx->supplies);
+ if (ret) {
+ dev_err(aic31xx->dev, "Failed to request supplies: %d\n", ret);
return ret;
+ }
- if (aic31xx->pdata.codec_type & DAC31XX_BIT)
+ if (aic31xx->codec_type & DAC31XX_BIT)
return snd_soc_register_codec(&i2c->dev,
&soc_codec_driver_aic31xx,
dac31xx_dai_driver,
@@ -1386,14 +1405,6 @@ static const struct i2c_device_id aic31xx_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, aic31xx_i2c_id);
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id aic31xx_acpi_match[] = {
- { "10TI3100", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(acpi, aic31xx_acpi_match);
-#endif
-
static struct i2c_driver aic31xx_i2c_driver = {
.driver = {
.name = "tlv320aic31xx-codec",
@@ -1404,9 +1415,8 @@ static struct i2c_driver aic31xx_i2c_driver = {
.remove = aic31xx_i2c_remove,
.id_table = aic31xx_i2c_id,
};
-
module_i2c_driver(aic31xx_i2c_driver);
-MODULE_DESCRIPTION("ASoC TLV320AIC3111 codec driver");
-MODULE_AUTHOR("Jyri Sarha");
-MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jyri Sarha <jsarha@ti.com>");
+MODULE_DESCRIPTION("ASoC TLV320AIC31xx CODEC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/tlv320aic31xx.h b/sound/soc/codecs/tlv320aic31xx.h
index 1ff3edb7bbb6..15ac7cba86fe 100644
--- a/sound/soc/codecs/tlv320aic31xx.h
+++ b/sound/soc/codecs/tlv320aic31xx.h
@@ -1,36 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * ALSA SoC TLV320AIC31XX codec driver
- *
- * Copyright (C) 2013 Texas Instruments, Inc.
- *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ * ALSA SoC TLV320AIC31xx CODEC Driver Definitions
*
+ * Copyright (C) 2014-2017 Texas Instruments Incorporated - http://www.ti.com/
*/
+
#ifndef _TLV320AIC31XX_H
#define _TLV320AIC31XX_H
#define AIC31XX_RATES SNDRV_PCM_RATE_8000_192000
-#define AIC31XX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE \
- | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_LE \
- | SNDRV_PCM_FMTBIT_S32_LE)
-
+#define AIC31XX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_3LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE)
-#define AIC31XX_STEREO_CLASS_D_BIT 0x1
-#define AIC31XX_MINIDSP_BIT 0x2
-#define DAC31XX_BIT 0x4
+#define AIC31XX_STEREO_CLASS_D_BIT BIT(1)
+#define AIC31XX_MINIDSP_BIT BIT(2)
+#define DAC31XX_BIT BIT(3)
enum aic31xx_type {
AIC3100 = 0,
AIC3110 = AIC31XX_STEREO_CLASS_D_BIT,
AIC3120 = AIC31XX_MINIDSP_BIT,
- AIC3111 = (AIC31XX_STEREO_CLASS_D_BIT | AIC31XX_MINIDSP_BIT),
+ AIC3111 = AIC31XX_STEREO_CLASS_D_BIT | AIC31XX_MINIDSP_BIT,
DAC3100 = DAC31XX_BIT,
DAC3101 = DAC31XX_BIT | AIC31XX_STEREO_CLASS_D_BIT,
};
@@ -43,222 +37,167 @@ struct aic31xx_pdata {
#define AIC31XX_REG(page, reg) ((page * 128) + reg)
-/* Page Control Register */
-#define AIC31XX_PAGECTL AIC31XX_REG(0, 0)
+#define AIC31XX_PAGECTL AIC31XX_REG(0, 0) /* Page Control Register */
/* Page 0 Registers */
-/* Software reset register */
-#define AIC31XX_RESET AIC31XX_REG(0, 1)
-/* OT FLAG register */
-#define AIC31XX_OT_FLAG AIC31XX_REG(0, 3)
-/* Clock clock Gen muxing, Multiplexers*/
-#define AIC31XX_CLKMUX AIC31XX_REG(0, 4)
-/* PLL P and R-VAL register */
-#define AIC31XX_PLLPR AIC31XX_REG(0, 5)
-/* PLL J-VAL register */
-#define AIC31XX_PLLJ AIC31XX_REG(0, 6)
-/* PLL D-VAL MSB register */
-#define AIC31XX_PLLDMSB AIC31XX_REG(0, 7)
-/* PLL D-VAL LSB register */
-#define AIC31XX_PLLDLSB AIC31XX_REG(0, 8)
-/* DAC NDAC_VAL register*/
-#define AIC31XX_NDAC AIC31XX_REG(0, 11)
-/* DAC MDAC_VAL register */
-#define AIC31XX_MDAC AIC31XX_REG(0, 12)
-/* DAC OSR setting register 1, MSB value */
-#define AIC31XX_DOSRMSB AIC31XX_REG(0, 13)
-/* DAC OSR setting register 2, LSB value */
-#define AIC31XX_DOSRLSB AIC31XX_REG(0, 14)
+#define AIC31XX_RESET AIC31XX_REG(0, 1) /* Software reset register */
+#define AIC31XX_OT_FLAG AIC31XX_REG(0, 3) /* OT FLAG register */
+#define AIC31XX_CLKMUX AIC31XX_REG(0, 4) /* Clock clock Gen muxing, Multiplexers*/
+#define AIC31XX_PLLPR AIC31XX_REG(0, 5) /* PLL P and R-VAL register */
+#define AIC31XX_PLLJ AIC31XX_REG(0, 6) /* PLL J-VAL register */
+#define AIC31XX_PLLDMSB AIC31XX_REG(0, 7) /* PLL D-VAL MSB register */
+#define AIC31XX_PLLDLSB AIC31XX_REG(0, 8) /* PLL D-VAL LSB register */
+#define AIC31XX_NDAC AIC31XX_REG(0, 11) /* DAC NDAC_VAL register*/
+#define AIC31XX_MDAC AIC31XX_REG(0, 12) /* DAC MDAC_VAL register */
+#define AIC31XX_DOSRMSB AIC31XX_REG(0, 13) /* DAC OSR setting register 1, MSB value */
+#define AIC31XX_DOSRLSB AIC31XX_REG(0, 14) /* DAC OSR setting register 2, LSB value */
#define AIC31XX_MINI_DSP_INPOL AIC31XX_REG(0, 16)
-/* Clock setting register 8, PLL */
-#define AIC31XX_NADC AIC31XX_REG(0, 18)
-/* Clock setting register 9, PLL */
-#define AIC31XX_MADC AIC31XX_REG(0, 19)
-/* ADC Oversampling (AOSR) Register */
-#define AIC31XX_AOSR AIC31XX_REG(0, 20)
-/* Clock setting register 9, Multiplexers */
-#define AIC31XX_CLKOUTMUX AIC31XX_REG(0, 25)
-/* Clock setting register 10, CLOCKOUT M divider value */
-#define AIC31XX_CLKOUTMVAL AIC31XX_REG(0, 26)
-/* Audio Interface Setting Register 1 */
-#define AIC31XX_IFACE1 AIC31XX_REG(0, 27)
-/* Audio Data Slot Offset Programming */
-#define AIC31XX_DATA_OFFSET AIC31XX_REG(0, 28)
-/* Audio Interface Setting Register 2 */
-#define AIC31XX_IFACE2 AIC31XX_REG(0, 29)
-/* Clock setting register 11, BCLK N Divider */
-#define AIC31XX_BCLKN AIC31XX_REG(0, 30)
-/* Audio Interface Setting Register 3, Secondary Audio Interface */
-#define AIC31XX_IFACESEC1 AIC31XX_REG(0, 31)
-/* Audio Interface Setting Register 4 */
-#define AIC31XX_IFACESEC2 AIC31XX_REG(0, 32)
-/* Audio Interface Setting Register 5 */
-#define AIC31XX_IFACESEC3 AIC31XX_REG(0, 33)
-/* I2C Bus Condition */
-#define AIC31XX_I2C AIC31XX_REG(0, 34)
-/* ADC FLAG */
-#define AIC31XX_ADCFLAG AIC31XX_REG(0, 36)
-/* DAC Flag Registers */
-#define AIC31XX_DACFLAG1 AIC31XX_REG(0, 37)
+#define AIC31XX_NADC AIC31XX_REG(0, 18) /* Clock setting register 8, PLL */
+#define AIC31XX_MADC AIC31XX_REG(0, 19) /* Clock setting register 9, PLL */
+#define AIC31XX_AOSR AIC31XX_REG(0, 20) /* ADC Oversampling (AOSR) Register */
+#define AIC31XX_CLKOUTMUX AIC31XX_REG(0, 25) /* Clock setting register 9, Multiplexers */
+#define AIC31XX_CLKOUTMVAL AIC31XX_REG(0, 26) /* Clock setting register 10, CLOCKOUT M divider value */
+#define AIC31XX_IFACE1 AIC31XX_REG(0, 27) /* Audio Interface Setting Register 1 */
+#define AIC31XX_DATA_OFFSET AIC31XX_REG(0, 28) /* Audio Data Slot Offset Programming */
+#define AIC31XX_IFACE2 AIC31XX_REG(0, 29) /* Audio Interface Setting Register 2 */
+#define AIC31XX_BCLKN AIC31XX_REG(0, 30) /* Clock setting register 11, BCLK N Divider */
+#define AIC31XX_IFACESEC1 AIC31XX_REG(0, 31) /* Audio Interface Setting Register 3, Secondary Audio Interface */
+#define AIC31XX_IFACESEC2 AIC31XX_REG(0, 32) /* Audio Interface Setting Register 4 */
+#define AIC31XX_IFACESEC3 AIC31XX_REG(0, 33) /* Audio Interface Setting Register 5 */
+#define AIC31XX_I2C AIC31XX_REG(0, 34) /* I2C Bus Condition */
+#define AIC31XX_ADCFLAG AIC31XX_REG(0, 36) /* ADC FLAG */
+#define AIC31XX_DACFLAG1 AIC31XX_REG(0, 37) /* DAC Flag Registers */
#define AIC31XX_DACFLAG2 AIC31XX_REG(0, 38)
-/* Sticky Interrupt flag (overflow) */
-#define AIC31XX_OFFLAG AIC31XX_REG(0, 39)
-/* Sticy DAC Interrupt flags */
-#define AIC31XX_INTRDACFLAG AIC31XX_REG(0, 44)
-/* Sticy ADC Interrupt flags */
-#define AIC31XX_INTRADCFLAG AIC31XX_REG(0, 45)
-/* DAC Interrupt flags 2 */
-#define AIC31XX_INTRDACFLAG2 AIC31XX_REG(0, 46)
-/* ADC Interrupt flags 2 */
-#define AIC31XX_INTRADCFLAG2 AIC31XX_REG(0, 47)
-/* INT1 interrupt control */
-#define AIC31XX_INT1CTRL AIC31XX_REG(0, 48)
-/* INT2 interrupt control */
-#define AIC31XX_INT2CTRL AIC31XX_REG(0, 49)
-/* GPIO1 control */
-#define AIC31XX_GPIO1 AIC31XX_REG(0, 51)
-
+#define AIC31XX_OFFLAG AIC31XX_REG(0, 39) /* Sticky Interrupt flag (overflow) */
+#define AIC31XX_INTRDACFLAG AIC31XX_REG(0, 44) /* Sticy DAC Interrupt flags */
+#define AIC31XX_INTRADCFLAG AIC31XX_REG(0, 45) /* Sticy ADC Interrupt flags */
+#define AIC31XX_INTRDACFLAG2 AIC31XX_REG(0, 46) /* DAC Interrupt flags 2 */
+#define AIC31XX_INTRADCFLAG2 AIC31XX_REG(0, 47) /* ADC Interrupt flags 2 */
+#define AIC31XX_INT1CTRL AIC31XX_REG(0, 48) /* INT1 interrupt control */
+#define AIC31XX_INT2CTRL AIC31XX_REG(0, 49) /* INT2 interrupt control */
+#define AIC31XX_GPIO1 AIC31XX_REG(0, 51) /* GPIO1 control */
#define AIC31XX_DACPRB AIC31XX_REG(0, 60)
-/* ADC Instruction Set Register */
-#define AIC31XX_ADCPRB AIC31XX_REG(0, 61)
-/* DAC channel setup register */
-#define AIC31XX_DACSETUP AIC31XX_REG(0, 63)
-/* DAC Mute and volume control register */
-#define AIC31XX_DACMUTE AIC31XX_REG(0, 64)
-/* Left DAC channel digital volume control */
-#define AIC31XX_LDACVOL AIC31XX_REG(0, 65)
-/* Right DAC channel digital volume control */
-#define AIC31XX_RDACVOL AIC31XX_REG(0, 66)
-/* Headset detection */
-#define AIC31XX_HSDETECT AIC31XX_REG(0, 67)
-/* ADC Digital Mic */
-#define AIC31XX_ADCSETUP AIC31XX_REG(0, 81)
-/* ADC Digital Volume Control Fine Adjust */
-#define AIC31XX_ADCFGA AIC31XX_REG(0, 82)
-/* ADC Digital Volume Control Coarse Adjust */
-#define AIC31XX_ADCVOL AIC31XX_REG(0, 83)
-
+#define AIC31XX_ADCPRB AIC31XX_REG(0, 61) /* ADC Instruction Set Register */
+#define AIC31XX_DACSETUP AIC31XX_REG(0, 63) /* DAC channel setup register */
+#define AIC31XX_DACMUTE AIC31XX_REG(0, 64) /* DAC Mute and volume control register */
+#define AIC31XX_LDACVOL AIC31XX_REG(0, 65) /* Left DAC channel digital volume control */
+#define AIC31XX_RDACVOL AIC31XX_REG(0, 66) /* Right DAC channel digital volume control */
+#define AIC31XX_HSDETECT AIC31XX_REG(0, 67) /* Headset detection */
+#define AIC31XX_ADCSETUP AIC31XX_REG(0, 81) /* ADC Digital Mic */
+#define AIC31XX_ADCFGA AIC31XX_REG(0, 82) /* ADC Digital Volume Control Fine Adjust */
+#define AIC31XX_ADCVOL AIC31XX_REG(0, 83) /* ADC Digital Volume Control Coarse Adjust */
/* Page 1 Registers */
-/* Headphone drivers */
-#define AIC31XX_HPDRIVER AIC31XX_REG(1, 31)
-/* Class-D Speakear Amplifier */
-#define AIC31XX_SPKAMP AIC31XX_REG(1, 32)
-/* HP Output Drivers POP Removal Settings */
-#define AIC31XX_HPPOP AIC31XX_REG(1, 33)
-/* Output Driver PGA Ramp-Down Period Control */
-#define AIC31XX_SPPGARAMP AIC31XX_REG(1, 34)
-/* DAC_L and DAC_R Output Mixer Routing */
-#define AIC31XX_DACMIXERROUTE AIC31XX_REG(1, 35)
-/* Left Analog Vol to HPL */
-#define AIC31XX_LANALOGHPL AIC31XX_REG(1, 36)
-/* Right Analog Vol to HPR */
-#define AIC31XX_RANALOGHPR AIC31XX_REG(1, 37)
-/* Left Analog Vol to SPL */
-#define AIC31XX_LANALOGSPL AIC31XX_REG(1, 38)
-/* Right Analog Vol to SPR */
-#define AIC31XX_RANALOGSPR AIC31XX_REG(1, 39)
-/* HPL Driver */
-#define AIC31XX_HPLGAIN AIC31XX_REG(1, 40)
-/* HPR Driver */
-#define AIC31XX_HPRGAIN AIC31XX_REG(1, 41)
-/* SPL Driver */
-#define AIC31XX_SPLGAIN AIC31XX_REG(1, 42)
-/* SPR Driver */
-#define AIC31XX_SPRGAIN AIC31XX_REG(1, 43)
-/* HP Driver Control */
-#define AIC31XX_HPCONTROL AIC31XX_REG(1, 44)
-/* MIC Bias Control */
-#define AIC31XX_MICBIAS AIC31XX_REG(1, 46)
-/* MIC PGA*/
-#define AIC31XX_MICPGA AIC31XX_REG(1, 47)
-/* Delta-Sigma Mono ADC Channel Fine-Gain Input Selection for P-Terminal */
-#define AIC31XX_MICPGAPI AIC31XX_REG(1, 48)
-/* ADC Input Selection for M-Terminal */
-#define AIC31XX_MICPGAMI AIC31XX_REG(1, 49)
-/* Input CM Settings */
-#define AIC31XX_MICPGACM AIC31XX_REG(1, 50)
-
-/* Bits, masks and shifts */
+#define AIC31XX_HPDRIVER AIC31XX_REG(1, 31) /* Headphone drivers */
+#define AIC31XX_SPKAMP AIC31XX_REG(1, 32) /* Class-D Speakear Amplifier */
+#define AIC31XX_HPPOP AIC31XX_REG(1, 33) /* HP Output Drivers POP Removal Settings */
+#define AIC31XX_SPPGARAMP AIC31XX_REG(1, 34) /* Output Driver PGA Ramp-Down Period Control */
+#define AIC31XX_DACMIXERROUTE AIC31XX_REG(1, 35) /* DAC_L and DAC_R Output Mixer Routing */
+#define AIC31XX_LANALOGHPL AIC31XX_REG(1, 36) /* Left Analog Vol to HPL */
+#define AIC31XX_RANALOGHPR AIC31XX_REG(1, 37) /* Right Analog Vol to HPR */
+#define AIC31XX_LANALOGSPL AIC31XX_REG(1, 38) /* Left Analog Vol to SPL */
+#define AIC31XX_RANALOGSPR AIC31XX_REG(1, 39) /* Right Analog Vol to SPR */
+#define AIC31XX_HPLGAIN AIC31XX_REG(1, 40) /* HPL Driver */
+#define AIC31XX_HPRGAIN AIC31XX_REG(1, 41) /* HPR Driver */
+#define AIC31XX_SPLGAIN AIC31XX_REG(1, 42) /* SPL Driver */
+#define AIC31XX_SPRGAIN AIC31XX_REG(1, 43) /* SPR Driver */
+#define AIC31XX_HPCONTROL AIC31XX_REG(1, 44) /* HP Driver Control */
+#define AIC31XX_MICBIAS AIC31XX_REG(1, 46) /* MIC Bias Control */
+#define AIC31XX_MICPGA AIC31XX_REG(1, 47) /* MIC PGA*/
+#define AIC31XX_MICPGAPI AIC31XX_REG(1, 48) /* Delta-Sigma Mono ADC Channel Fine-Gain Input Selection for P-Terminal */
+#define AIC31XX_MICPGAMI AIC31XX_REG(1, 49) /* ADC Input Selection for M-Terminal */
+#define AIC31XX_MICPGACM AIC31XX_REG(1, 50) /* Input CM Settings */
+
+/* Bits, masks, and shifts */
/* AIC31XX_CLKMUX */
-#define AIC31XX_PLL_CLKIN_MASK 0x0c
-#define AIC31XX_PLL_CLKIN_SHIFT 2
-#define AIC31XX_PLL_CLKIN_MCLK 0
-#define AIC31XX_CODEC_CLKIN_MASK 0x03
-#define AIC31XX_CODEC_CLKIN_SHIFT 0
-#define AIC31XX_CODEC_CLKIN_PLL 3
-#define AIC31XX_CODEC_CLKIN_BCLK 1
-
-/* AIC31XX_PLLPR, AIC31XX_NDAC, AIC31XX_MDAC, AIC31XX_NADC, AIC31XX_MADC,
- AIC31XX_BCLKN */
-#define AIC31XX_PLL_MASK 0x7f
-#define AIC31XX_PM_MASK 0x80
+#define AIC31XX_PLL_CLKIN_MASK GENMASK(3, 2)
+#define AIC31XX_PLL_CLKIN_SHIFT (2)
+#define AIC31XX_PLL_CLKIN_MCLK 0x00
+#define AIC31XX_PLL_CLKIN_BCKL 0x01
+#define AIC31XX_PLL_CLKIN_GPIO1 0x02
+#define AIC31XX_PLL_CLKIN_DIN 0x03
+#define AIC31XX_CODEC_CLKIN_MASK GENMASK(1, 0)
+#define AIC31XX_CODEC_CLKIN_SHIFT (0)
+#define AIC31XX_CODEC_CLKIN_MCLK 0x00
+#define AIC31XX_CODEC_CLKIN_BCLK 0x01
+#define AIC31XX_CODEC_CLKIN_GPIO1 0x02
+#define AIC31XX_CODEC_CLKIN_PLL 0x03
+
+/* AIC31XX_PLLPR */
+/* AIC31XX_NDAC */
+/* AIC31XX_MDAC */
+/* AIC31XX_NADC */
+/* AIC31XX_MADC */
+/* AIC31XX_BCLKN */
+#define AIC31XX_PLL_MASK GENMASK(6, 0)
+#define AIC31XX_PM_MASK BIT(7)
/* AIC31XX_IFACE1 */
-#define AIC31XX_WORD_LEN_16BITS 0x00
-#define AIC31XX_WORD_LEN_20BITS 0x01
-#define AIC31XX_WORD_LEN_24BITS 0x02
-#define AIC31XX_WORD_LEN_32BITS 0x03
-#define AIC31XX_IFACE1_DATALEN_MASK 0x30
-#define AIC31XX_IFACE1_DATALEN_SHIFT (4)
-#define AIC31XX_IFACE1_DATATYPE_MASK 0xC0
+#define AIC31XX_IFACE1_DATATYPE_MASK GENMASK(7, 6)
#define AIC31XX_IFACE1_DATATYPE_SHIFT (6)
#define AIC31XX_I2S_MODE 0x00
#define AIC31XX_DSP_MODE 0x01
#define AIC31XX_RIGHT_JUSTIFIED_MODE 0x02
#define AIC31XX_LEFT_JUSTIFIED_MODE 0x03
-#define AIC31XX_IFACE1_MASTER_MASK 0x0C
-#define AIC31XX_BCLK_MASTER 0x08
-#define AIC31XX_WCLK_MASTER 0x04
+#define AIC31XX_IFACE1_DATALEN_MASK GENMASK(5, 4)
+#define AIC31XX_IFACE1_DATALEN_SHIFT (4)
+#define AIC31XX_WORD_LEN_16BITS 0x00
+#define AIC31XX_WORD_LEN_20BITS 0x01
+#define AIC31XX_WORD_LEN_24BITS 0x02
+#define AIC31XX_WORD_LEN_32BITS 0x03
+#define AIC31XX_IFACE1_MASTER_MASK GENMASK(3, 2)
+#define AIC31XX_BCLK_MASTER BIT(2)
+#define AIC31XX_WCLK_MASTER BIT(3)
/* AIC31XX_DATA_OFFSET */
-#define AIC31XX_DATA_OFFSET_MASK 0xFF
+#define AIC31XX_DATA_OFFSET_MASK GENMASK(7, 0)
/* AIC31XX_IFACE2 */
-#define AIC31XX_BCLKINV_MASK 0x08
-#define AIC31XX_BDIVCLK_MASK 0x03
+#define AIC31XX_BCLKINV_MASK BIT(3)
+#define AIC31XX_BDIVCLK_MASK GENMASK(1, 0)
#define AIC31XX_DAC2BCLK 0x00
#define AIC31XX_DACMOD2BCLK 0x01
#define AIC31XX_ADC2BCLK 0x02
#define AIC31XX_ADCMOD2BCLK 0x03
/* AIC31XX_ADCFLAG */
-#define AIC31XX_ADCPWRSTATUS_MASK 0x40
+#define AIC31XX_ADCPWRSTATUS_MASK BIT(6)
/* AIC31XX_DACFLAG1 */
-#define AIC31XX_LDACPWRSTATUS_MASK 0x80
-#define AIC31XX_RDACPWRSTATUS_MASK 0x08
-#define AIC31XX_HPLDRVPWRSTATUS_MASK 0x20
-#define AIC31XX_HPRDRVPWRSTATUS_MASK 0x02
-#define AIC31XX_SPLDRVPWRSTATUS_MASK 0x10
-#define AIC31XX_SPRDRVPWRSTATUS_MASK 0x01
+#define AIC31XX_LDACPWRSTATUS_MASK BIT(7)
+#define AIC31XX_HPLDRVPWRSTATUS_MASK BIT(5)
+#define AIC31XX_SPLDRVPWRSTATUS_MASK BIT(4)
+#define AIC31XX_RDACPWRSTATUS_MASK BIT(3)
+#define AIC31XX_HPRDRVPWRSTATUS_MASK BIT(1)
+#define AIC31XX_SPRDRVPWRSTATUS_MASK BIT(0)
/* AIC31XX_INTRDACFLAG */
-#define AIC31XX_HPSCDETECT_MASK 0x80
-#define AIC31XX_BUTTONPRESS_MASK 0x20
-#define AIC31XX_HSPLUG_MASK 0x10
-#define AIC31XX_LDRCTHRES_MASK 0x08
-#define AIC31XX_RDRCTHRES_MASK 0x04
-#define AIC31XX_DACSINT_MASK 0x02
-#define AIC31XX_DACAINT_MASK 0x01
+#define AIC31XX_HPLSCDETECT BIT(7)
+#define AIC31XX_HPRSCDETECT BIT(6)
+#define AIC31XX_BUTTONPRESS BIT(5)
+#define AIC31XX_HSPLUG BIT(4)
+#define AIC31XX_LDRCTHRES BIT(3)
+#define AIC31XX_RDRCTHRES BIT(2)
+#define AIC31XX_DACSINT BIT(1)
+#define AIC31XX_DACAINT BIT(0)
/* AIC31XX_INT1CTRL */
-#define AIC31XX_HSPLUGDET_MASK 0x80
-#define AIC31XX_BUTTONPRESSDET_MASK 0x40
-#define AIC31XX_DRCTHRES_MASK 0x20
-#define AIC31XX_AGCNOISE_MASK 0x10
-#define AIC31XX_OC_MASK 0x08
-#define AIC31XX_ENGINE_MASK 0x04
+#define AIC31XX_HSPLUGDET BIT(7)
+#define AIC31XX_BUTTONPRESSDET BIT(6)
+#define AIC31XX_DRCTHRES BIT(5)
+#define AIC31XX_AGCNOISE BIT(4)
+#define AIC31XX_SC BIT(3)
+#define AIC31XX_ENGINE BIT(2)
/* AIC31XX_DACSETUP */
-#define AIC31XX_SOFTSTEP_MASK 0x03
+#define AIC31XX_SOFTSTEP_MASK GENMASK(1, 0)
/* AIC31XX_DACMUTE */
-#define AIC31XX_DACMUTE_MASK 0x0C
+#define AIC31XX_DACMUTE_MASK GENMASK(3, 2)
/* AIC31XX_MICBIAS */
-#define AIC31XX_MICBIAS_MASK 0x03
-#define AIC31XX_MICBIAS_SHIFT 0
+#define AIC31XX_MICBIAS_MASK GENMASK(1, 0)
+#define AIC31XX_MICBIAS_SHIFT 0
#endif /* _TLV320AIC31XX_H */
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index e694f5f04eb9..fea019343c3b 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -281,34 +281,34 @@ static const struct snd_kcontrol_new aic32x4_snd_controls[] = {
static const struct aic32x4_rate_divs aic32x4_divs[] = {
/* 8k rate */
- {AIC32X4_FREQ_12000000, 8000, 1, 7, 6800, 768, 5, 3, 128, 5, 18, 24},
- {AIC32X4_FREQ_24000000, 8000, 2, 7, 6800, 768, 15, 1, 64, 45, 4, 24},
- {AIC32X4_FREQ_25000000, 8000, 2, 7, 3728, 768, 15, 1, 64, 45, 4, 24},
+ {12000000, 8000, 1, 7, 6800, 768, 5, 3, 128, 5, 18, 24},
+ {24000000, 8000, 2, 7, 6800, 768, 15, 1, 64, 45, 4, 24},
+ {25000000, 8000, 2, 7, 3728, 768, 15, 1, 64, 45, 4, 24},
/* 11.025k rate */
- {AIC32X4_FREQ_12000000, 11025, 1, 7, 5264, 512, 8, 2, 128, 8, 8, 16},
- {AIC32X4_FREQ_24000000, 11025, 2, 7, 5264, 512, 16, 1, 64, 32, 4, 16},
+ {12000000, 11025, 1, 7, 5264, 512, 8, 2, 128, 8, 8, 16},
+ {24000000, 11025, 2, 7, 5264, 512, 16, 1, 64, 32, 4, 16},
/* 16k rate */
- {AIC32X4_FREQ_12000000, 16000, 1, 7, 6800, 384, 5, 3, 128, 5, 9, 12},
- {AIC32X4_FREQ_24000000, 16000, 2, 7, 6800, 384, 15, 1, 64, 18, 5, 12},
- {AIC32X4_FREQ_25000000, 16000, 2, 7, 3728, 384, 15, 1, 64, 18, 5, 12},
+ {12000000, 16000, 1, 7, 6800, 384, 5, 3, 128, 5, 9, 12},
+ {24000000, 16000, 2, 7, 6800, 384, 15, 1, 64, 18, 5, 12},
+ {25000000, 16000, 2, 7, 3728, 384, 15, 1, 64, 18, 5, 12},
/* 22.05k rate */
- {AIC32X4_FREQ_12000000, 22050, 1, 7, 5264, 256, 4, 4, 128, 4, 8, 8},
- {AIC32X4_FREQ_24000000, 22050, 2, 7, 5264, 256, 16, 1, 64, 16, 4, 8},
- {AIC32X4_FREQ_25000000, 22050, 2, 7, 2253, 256, 16, 1, 64, 16, 4, 8},
+ {12000000, 22050, 1, 7, 5264, 256, 4, 4, 128, 4, 8, 8},
+ {24000000, 22050, 2, 7, 5264, 256, 16, 1, 64, 16, 4, 8},
+ {25000000, 22050, 2, 7, 2253, 256, 16, 1, 64, 16, 4, 8},
/* 32k rate */
- {AIC32X4_FREQ_12000000, 32000, 1, 7, 1680, 192, 2, 7, 64, 2, 21, 6},
- {AIC32X4_FREQ_24000000, 32000, 2, 7, 1680, 192, 7, 2, 64, 7, 6, 6},
+ {12000000, 32000, 1, 7, 1680, 192, 2, 7, 64, 2, 21, 6},
+ {24000000, 32000, 2, 7, 1680, 192, 7, 2, 64, 7, 6, 6},
/* 44.1k rate */
- {AIC32X4_FREQ_12000000, 44100, 1, 7, 5264, 128, 2, 8, 128, 2, 8, 4},
- {AIC32X4_FREQ_24000000, 44100, 2, 7, 5264, 128, 8, 2, 64, 8, 4, 4},
- {AIC32X4_FREQ_25000000, 44100, 2, 7, 2253, 128, 8, 2, 64, 8, 4, 4},
+ {12000000, 44100, 1, 7, 5264, 128, 2, 8, 128, 2, 8, 4},
+ {24000000, 44100, 2, 7, 5264, 128, 8, 2, 64, 8, 4, 4},
+ {25000000, 44100, 2, 7, 2253, 128, 8, 2, 64, 8, 4, 4},
/* 48k rate */
- {AIC32X4_FREQ_12000000, 48000, 1, 8, 1920, 128, 2, 8, 128, 2, 8, 4},
- {AIC32X4_FREQ_24000000, 48000, 2, 8, 1920, 128, 8, 2, 64, 8, 4, 4},
- {AIC32X4_FREQ_25000000, 48000, 2, 7, 8643, 128, 8, 2, 64, 8, 4, 4},
+ {12000000, 48000, 1, 8, 1920, 128, 2, 8, 128, 2, 8, 4},
+ {24000000, 48000, 2, 8, 1920, 128, 8, 2, 64, 8, 4, 4},
+ {25000000, 48000, 2, 7, 8643, 128, 8, 2, 64, 8, 4, 4},
/* 96k rate */
- {AIC32X4_FREQ_25000000, 96000, 2, 7, 8643, 64, 4, 4, 64, 4, 4, 1},
+ {25000000, 96000, 2, 7, 8643, 64, 4, 4, 64, 4, 4, 1},
};
static const struct snd_kcontrol_new hpl_output_mixer_controls[] = {
@@ -601,9 +601,9 @@ static int aic32x4_set_dai_sysclk(struct snd_soc_dai *codec_dai,
struct aic32x4_priv *aic32x4 = snd_soc_codec_get_drvdata(codec);
switch (freq) {
- case AIC32X4_FREQ_12000000:
- case AIC32X4_FREQ_24000000:
- case AIC32X4_FREQ_25000000:
+ case 12000000:
+ case 24000000:
+ case 25000000:
aic32x4->sysclk = freq;
return 0;
}
@@ -614,16 +614,9 @@ static int aic32x4_set_dai_sysclk(struct snd_soc_dai *codec_dai,
static int aic32x4_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
{
struct snd_soc_codec *codec = codec_dai->codec;
- u8 iface_reg_1;
- u8 iface_reg_2;
- u8 iface_reg_3;
-
- iface_reg_1 = snd_soc_read(codec, AIC32X4_IFACE1);
- iface_reg_1 = iface_reg_1 & ~(3 << 6 | 3 << 2);
- iface_reg_2 = snd_soc_read(codec, AIC32X4_IFACE2);
- iface_reg_2 = 0;
- iface_reg_3 = snd_soc_read(codec, AIC32X4_IFACE3);
- iface_reg_3 = iface_reg_3 & ~(1 << 3);
+ u8 iface_reg_1 = 0;
+ u8 iface_reg_2 = 0;
+ u8 iface_reg_3 = 0;
/* set master/slave audio interface */
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
@@ -641,30 +634,37 @@ static int aic32x4_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
case SND_SOC_DAIFMT_I2S:
break;
case SND_SOC_DAIFMT_DSP_A:
- iface_reg_1 |= (AIC32X4_DSP_MODE << AIC32X4_PLLJ_SHIFT);
- iface_reg_3 |= (1 << 3); /* invert bit clock */
+ iface_reg_1 |= (AIC32X4_DSP_MODE <<
+ AIC32X4_IFACE1_DATATYPE_SHIFT);
+ iface_reg_3 |= AIC32X4_BCLKINV_MASK; /* invert bit clock */
iface_reg_2 = 0x01; /* add offset 1 */
break;
case SND_SOC_DAIFMT_DSP_B:
- iface_reg_1 |= (AIC32X4_DSP_MODE << AIC32X4_PLLJ_SHIFT);
- iface_reg_3 |= (1 << 3); /* invert bit clock */
+ iface_reg_1 |= (AIC32X4_DSP_MODE <<
+ AIC32X4_IFACE1_DATATYPE_SHIFT);
+ iface_reg_3 |= AIC32X4_BCLKINV_MASK; /* invert bit clock */
break;
case SND_SOC_DAIFMT_RIGHT_J:
- iface_reg_1 |=
- (AIC32X4_RIGHT_JUSTIFIED_MODE << AIC32X4_PLLJ_SHIFT);
+ iface_reg_1 |= (AIC32X4_RIGHT_JUSTIFIED_MODE <<
+ AIC32X4_IFACE1_DATATYPE_SHIFT);
break;
case SND_SOC_DAIFMT_LEFT_J:
- iface_reg_1 |=
- (AIC32X4_LEFT_JUSTIFIED_MODE << AIC32X4_PLLJ_SHIFT);
+ iface_reg_1 |= (AIC32X4_LEFT_JUSTIFIED_MODE <<
+ AIC32X4_IFACE1_DATATYPE_SHIFT);
break;
default:
printk(KERN_ERR "aic32x4: invalid DAI interface format\n");
return -EINVAL;
}
- snd_soc_write(codec, AIC32X4_IFACE1, iface_reg_1);
- snd_soc_write(codec, AIC32X4_IFACE2, iface_reg_2);
- snd_soc_write(codec, AIC32X4_IFACE3, iface_reg_3);
+ snd_soc_update_bits(codec, AIC32X4_IFACE1,
+ AIC32X4_IFACE1_DATATYPE_MASK |
+ AIC32X4_IFACE1_MASTER_MASK, iface_reg_1);
+ snd_soc_update_bits(codec, AIC32X4_IFACE2,
+ AIC32X4_DATA_OFFSET_MASK, iface_reg_2);
+ snd_soc_update_bits(codec, AIC32X4_IFACE3,
+ AIC32X4_BCLKINV_MASK, iface_reg_3);
+
return 0;
}
@@ -674,7 +674,8 @@ static int aic32x4_hw_params(struct snd_pcm_substream *substream,
{
struct snd_soc_codec *codec = dai->codec;
struct aic32x4_priv *aic32x4 = snd_soc_codec_get_drvdata(codec);
- u8 data;
+ u8 iface1_reg = 0;
+ u8 dacsetup_reg = 0;
int i;
i = aic32x4_get_divs(aic32x4->sysclk, params_rate(params));
@@ -683,82 +684,88 @@ static int aic32x4_hw_params(struct snd_pcm_substream *substream,
return i;
}
- /* Use PLL as CODEC_CLKIN and DAC_MOD_CLK as BDIV_CLKIN */
- snd_soc_write(codec, AIC32X4_CLKMUX, AIC32X4_PLLCLKIN);
- snd_soc_write(codec, AIC32X4_IFACE3, AIC32X4_DACMOD2BCLK);
+ /* MCLK as PLL_CLKIN */
+ snd_soc_update_bits(codec, AIC32X4_CLKMUX, AIC32X4_PLL_CLKIN_MASK,
+ AIC32X4_PLL_CLKIN_MCLK << AIC32X4_PLL_CLKIN_SHIFT);
+ /* PLL as CODEC_CLKIN */
+ snd_soc_update_bits(codec, AIC32X4_CLKMUX, AIC32X4_CODEC_CLKIN_MASK,
+ AIC32X4_CODEC_CLKIN_PLL << AIC32X4_CODEC_CLKIN_SHIFT);
+ /* DAC_MOD_CLK as BDIV_CLKIN */
+ snd_soc_update_bits(codec, AIC32X4_IFACE3, AIC32X4_BDIVCLK_MASK,
+ AIC32X4_DACMOD2BCLK << AIC32X4_BDIVCLK_SHIFT);
+
+ /* We will fix R value to 1 and will make P & J=K.D as variable */
+ snd_soc_update_bits(codec, AIC32X4_PLLPR, AIC32X4_PLL_R_MASK, 0x01);
- /* We will fix R value to 1 and will make P & J=K.D as varialble */
- data = snd_soc_read(codec, AIC32X4_PLLPR);
- data &= ~(7 << 4);
- snd_soc_write(codec, AIC32X4_PLLPR,
- (data | (aic32x4_divs[i].p_val << 4) | 0x01));
+ /* PLL P value */
+ snd_soc_update_bits(codec, AIC32X4_PLLPR, AIC32X4_PLL_P_MASK,
+ aic32x4_divs[i].p_val << AIC32X4_PLL_P_SHIFT);
+ /* PLL J value */
snd_soc_write(codec, AIC32X4_PLLJ, aic32x4_divs[i].pll_j);
+ /* PLL D value */
snd_soc_write(codec, AIC32X4_PLLDMSB, (aic32x4_divs[i].pll_d >> 8));
- snd_soc_write(codec, AIC32X4_PLLDLSB,
- (aic32x4_divs[i].pll_d & 0xff));
+ snd_soc_write(codec, AIC32X4_PLLDLSB, (aic32x4_divs[i].pll_d & 0xff));
/* NDAC divider value */
- data = snd_soc_read(codec, AIC32X4_NDAC);
- data &= ~(0x7f);
- snd_soc_write(codec, AIC32X4_NDAC, data | aic32x4_divs[i].ndac);
+ snd_soc_update_bits(codec, AIC32X4_NDAC,
+ AIC32X4_NDAC_MASK, aic32x4_divs[i].ndac);
/* MDAC divider value */
- data = snd_soc_read(codec, AIC32X4_MDAC);
- data &= ~(0x7f);
- snd_soc_write(codec, AIC32X4_MDAC, data | aic32x4_divs[i].mdac);
+ snd_soc_update_bits(codec, AIC32X4_MDAC,
+ AIC32X4_MDAC_MASK, aic32x4_divs[i].mdac);
/* DOSR MSB & LSB values */
snd_soc_write(codec, AIC32X4_DOSRMSB, aic32x4_divs[i].dosr >> 8);
- snd_soc_write(codec, AIC32X4_DOSRLSB,
- (aic32x4_divs[i].dosr & 0xff));
+ snd_soc_write(codec, AIC32X4_DOSRLSB, (aic32x4_divs[i].dosr & 0xff));
/* NADC divider value */
- data = snd_soc_read(codec, AIC32X4_NADC);
- data &= ~(0x7f);
- snd_soc_write(codec, AIC32X4_NADC, data | aic32x4_divs[i].nadc);
+ snd_soc_update_bits(codec, AIC32X4_NADC,
+ AIC32X4_NADC_MASK, aic32x4_divs[i].nadc);
/* MADC divider value */
- data = snd_soc_read(codec, AIC32X4_MADC);
- data &= ~(0x7f);
- snd_soc_write(codec, AIC32X4_MADC, data | aic32x4_divs[i].madc);
+ snd_soc_update_bits(codec, AIC32X4_MADC,
+ AIC32X4_MADC_MASK, aic32x4_divs[i].madc);
/* AOSR value */
snd_soc_write(codec, AIC32X4_AOSR, aic32x4_divs[i].aosr);
/* BCLK N divider */
- data = snd_soc_read(codec, AIC32X4_BCLKN);
- data &= ~(0x7f);
- snd_soc_write(codec, AIC32X4_BCLKN, data | aic32x4_divs[i].blck_N);
+ snd_soc_update_bits(codec, AIC32X4_BCLKN,
+ AIC32X4_BCLK_MASK, aic32x4_divs[i].blck_N);
- data = snd_soc_read(codec, AIC32X4_IFACE1);
- data = data & ~(3 << 4);
switch (params_width(params)) {
case 16:
+ iface1_reg |= (AIC32X4_WORD_LEN_16BITS <<
+ AIC32X4_IFACE1_DATALEN_SHIFT);
break;
case 20:
- data |= (AIC32X4_WORD_LEN_20BITS << AIC32X4_DOSRMSB_SHIFT);
+ iface1_reg |= (AIC32X4_WORD_LEN_20BITS <<
+ AIC32X4_IFACE1_DATALEN_SHIFT);
break;
case 24:
- data |= (AIC32X4_WORD_LEN_24BITS << AIC32X4_DOSRMSB_SHIFT);
+ iface1_reg |= (AIC32X4_WORD_LEN_24BITS <<
+ AIC32X4_IFACE1_DATALEN_SHIFT);
break;
case 32:
- data |= (AIC32X4_WORD_LEN_32BITS << AIC32X4_DOSRMSB_SHIFT);
+ iface1_reg |= (AIC32X4_WORD_LEN_32BITS <<
+ AIC32X4_IFACE1_DATALEN_SHIFT);
break;
}
- snd_soc_write(codec, AIC32X4_IFACE1, data);
+ snd_soc_update_bits(codec, AIC32X4_IFACE1,
+ AIC32X4_IFACE1_DATALEN_MASK, iface1_reg);
if (params_channels(params) == 1) {
- data = AIC32X4_RDAC2LCHN | AIC32X4_LDAC2LCHN;
+ dacsetup_reg = AIC32X4_RDAC2LCHN | AIC32X4_LDAC2LCHN;
} else {
if (aic32x4->swapdacs)
- data = AIC32X4_RDAC2LCHN | AIC32X4_LDAC2RCHN;
+ dacsetup_reg = AIC32X4_RDAC2LCHN | AIC32X4_LDAC2RCHN;
else
- data = AIC32X4_LDAC2LCHN | AIC32X4_RDAC2RCHN;
+ dacsetup_reg = AIC32X4_LDAC2LCHN | AIC32X4_RDAC2RCHN;
}
- snd_soc_update_bits(codec, AIC32X4_DACSETUP, AIC32X4_DAC_CHAN_MASK,
- data);
+ snd_soc_update_bits(codec, AIC32X4_DACSETUP,
+ AIC32X4_DAC_CHAN_MASK, dacsetup_reg);
return 0;
}
@@ -766,13 +773,10 @@ static int aic32x4_hw_params(struct snd_pcm_substream *substream,
static int aic32x4_mute(struct snd_soc_dai *dai, int mute)
{
struct snd_soc_codec *codec = dai->codec;
- u8 dac_reg;
- dac_reg = snd_soc_read(codec, AIC32X4_DACMUTE) & ~AIC32X4_MUTEON;
- if (mute)
- snd_soc_write(codec, AIC32X4_DACMUTE, dac_reg | AIC32X4_MUTEON);
- else
- snd_soc_write(codec, AIC32X4_DACMUTE, dac_reg);
+ snd_soc_update_bits(codec, AIC32X4_DACMUTE,
+ AIC32X4_MUTEON, mute ? AIC32X4_MUTEON : 0);
+
return 0;
}
diff --git a/sound/soc/codecs/tlv320aic32x4.h b/sound/soc/codecs/tlv320aic32x4.h
index da7cec482bcb..e9df49edbf19 100644
--- a/sound/soc/codecs/tlv320aic32x4.h
+++ b/sound/soc/codecs/tlv320aic32x4.h
@@ -19,141 +19,189 @@ int aic32x4_remove(struct device *dev);
/* tlv320aic32x4 register space (in decimal to match datasheet) */
-#define AIC32X4_PAGE1 128
-
-#define AIC32X4_PSEL 0
-#define AIC32X4_RESET 1
-#define AIC32X4_CLKMUX 4
-#define AIC32X4_PLLPR 5
-#define AIC32X4_PLLJ 6
-#define AIC32X4_PLLDMSB 7
-#define AIC32X4_PLLDLSB 8
-#define AIC32X4_NDAC 11
-#define AIC32X4_MDAC 12
-#define AIC32X4_DOSRMSB 13
-#define AIC32X4_DOSRLSB 14
-#define AIC32X4_NADC 18
-#define AIC32X4_MADC 19
-#define AIC32X4_AOSR 20
-#define AIC32X4_CLKMUX2 25
-#define AIC32X4_CLKOUTM 26
-#define AIC32X4_IFACE1 27
-#define AIC32X4_IFACE2 28
-#define AIC32X4_IFACE3 29
-#define AIC32X4_BCLKN 30
-#define AIC32X4_IFACE4 31
-#define AIC32X4_IFACE5 32
-#define AIC32X4_IFACE6 33
-#define AIC32X4_GPIOCTL 52
-#define AIC32X4_DOUTCTL 53
-#define AIC32X4_DINCTL 54
-#define AIC32X4_MISOCTL 55
-#define AIC32X4_SCLKCTL 56
-#define AIC32X4_DACSPB 60
-#define AIC32X4_ADCSPB 61
-#define AIC32X4_DACSETUP 63
-#define AIC32X4_DACMUTE 64
-#define AIC32X4_LDACVOL 65
-#define AIC32X4_RDACVOL 66
-#define AIC32X4_ADCSETUP 81
-#define AIC32X4_ADCFGA 82
-#define AIC32X4_LADCVOL 83
-#define AIC32X4_RADCVOL 84
-#define AIC32X4_LAGC1 86
-#define AIC32X4_LAGC2 87
-#define AIC32X4_LAGC3 88
-#define AIC32X4_LAGC4 89
-#define AIC32X4_LAGC5 90
-#define AIC32X4_LAGC6 91
-#define AIC32X4_LAGC7 92
-#define AIC32X4_RAGC1 94
-#define AIC32X4_RAGC2 95
-#define AIC32X4_RAGC3 96
-#define AIC32X4_RAGC4 97
-#define AIC32X4_RAGC5 98
-#define AIC32X4_RAGC6 99
-#define AIC32X4_RAGC7 100
-#define AIC32X4_PWRCFG (AIC32X4_PAGE1 + 1)
-#define AIC32X4_LDOCTL (AIC32X4_PAGE1 + 2)
-#define AIC32X4_OUTPWRCTL (AIC32X4_PAGE1 + 9)
-#define AIC32X4_CMMODE (AIC32X4_PAGE1 + 10)
-#define AIC32X4_HPLROUTE (AIC32X4_PAGE1 + 12)
-#define AIC32X4_HPRROUTE (AIC32X4_PAGE1 + 13)
-#define AIC32X4_LOLROUTE (AIC32X4_PAGE1 + 14)
-#define AIC32X4_LORROUTE (AIC32X4_PAGE1 + 15)
-#define AIC32X4_HPLGAIN (AIC32X4_PAGE1 + 16)
-#define AIC32X4_HPRGAIN (AIC32X4_PAGE1 + 17)
-#define AIC32X4_LOLGAIN (AIC32X4_PAGE1 + 18)
-#define AIC32X4_LORGAIN (AIC32X4_PAGE1 + 19)
-#define AIC32X4_HEADSTART (AIC32X4_PAGE1 + 20)
-#define AIC32X4_MICBIAS (AIC32X4_PAGE1 + 51)
-#define AIC32X4_LMICPGAPIN (AIC32X4_PAGE1 + 52)
-#define AIC32X4_LMICPGANIN (AIC32X4_PAGE1 + 54)
-#define AIC32X4_RMICPGAPIN (AIC32X4_PAGE1 + 55)
-#define AIC32X4_RMICPGANIN (AIC32X4_PAGE1 + 57)
-#define AIC32X4_FLOATINGINPUT (AIC32X4_PAGE1 + 58)
-#define AIC32X4_LMICPGAVOL (AIC32X4_PAGE1 + 59)
-#define AIC32X4_RMICPGAVOL (AIC32X4_PAGE1 + 60)
-
-#define AIC32X4_FREQ_12000000 12000000
-#define AIC32X4_FREQ_24000000 24000000
-#define AIC32X4_FREQ_25000000 25000000
-
-#define AIC32X4_WORD_LEN_16BITS 0x00
-#define AIC32X4_WORD_LEN_20BITS 0x01
-#define AIC32X4_WORD_LEN_24BITS 0x02
-#define AIC32X4_WORD_LEN_32BITS 0x03
-
-#define AIC32X4_LADC_EN (1 << 7)
-#define AIC32X4_RADC_EN (1 << 6)
-
-#define AIC32X4_I2S_MODE 0x00
-#define AIC32X4_DSP_MODE 0x01
-#define AIC32X4_RIGHT_JUSTIFIED_MODE 0x02
-#define AIC32X4_LEFT_JUSTIFIED_MODE 0x03
-
-#define AIC32X4_AVDDWEAKDISABLE 0x08
-#define AIC32X4_LDOCTLEN 0x01
-
-#define AIC32X4_LDOIN_18_36 0x01
-#define AIC32X4_LDOIN2HP 0x02
-
-#define AIC32X4_DACSPBLOCK_MASK 0x1f
-#define AIC32X4_ADCSPBLOCK_MASK 0x1f
-
-#define AIC32X4_PLLJ_SHIFT 6
-#define AIC32X4_DOSRMSB_SHIFT 4
-
-#define AIC32X4_PLLCLKIN 0x03
-
-#define AIC32X4_MICBIAS_LDOIN 0x08
+#define AIC32X4_REG(page, reg) ((page * 128) + reg)
+
+#define AIC32X4_PSEL AIC32X4_REG(0, 0)
+
+#define AIC32X4_RESET AIC32X4_REG(0, 1)
+#define AIC32X4_CLKMUX AIC32X4_REG(0, 4)
+#define AIC32X4_PLLPR AIC32X4_REG(0, 5)
+#define AIC32X4_PLLJ AIC32X4_REG(0, 6)
+#define AIC32X4_PLLDMSB AIC32X4_REG(0, 7)
+#define AIC32X4_PLLDLSB AIC32X4_REG(0, 8)
+#define AIC32X4_NDAC AIC32X4_REG(0, 11)
+#define AIC32X4_MDAC AIC32X4_REG(0, 12)
+#define AIC32X4_DOSRMSB AIC32X4_REG(0, 13)
+#define AIC32X4_DOSRLSB AIC32X4_REG(0, 14)
+#define AIC32X4_NADC AIC32X4_REG(0, 18)
+#define AIC32X4_MADC AIC32X4_REG(0, 19)
+#define AIC32X4_AOSR AIC32X4_REG(0, 20)
+#define AIC32X4_CLKMUX2 AIC32X4_REG(0, 25)
+#define AIC32X4_CLKOUTM AIC32X4_REG(0, 26)
+#define AIC32X4_IFACE1 AIC32X4_REG(0, 27)
+#define AIC32X4_IFACE2 AIC32X4_REG(0, 28)
+#define AIC32X4_IFACE3 AIC32X4_REG(0, 29)
+#define AIC32X4_BCLKN AIC32X4_REG(0, 30)
+#define AIC32X4_IFACE4 AIC32X4_REG(0, 31)
+#define AIC32X4_IFACE5 AIC32X4_REG(0, 32)
+#define AIC32X4_IFACE6 AIC32X4_REG(0, 33)
+#define AIC32X4_GPIOCTL AIC32X4_REG(0, 52)
+#define AIC32X4_DOUTCTL AIC32X4_REG(0, 53)
+#define AIC32X4_DINCTL AIC32X4_REG(0, 54)
+#define AIC32X4_MISOCTL AIC32X4_REG(0, 55)
+#define AIC32X4_SCLKCTL AIC32X4_REG(0, 56)
+#define AIC32X4_DACSPB AIC32X4_REG(0, 60)
+#define AIC32X4_ADCSPB AIC32X4_REG(0, 61)
+#define AIC32X4_DACSETUP AIC32X4_REG(0, 63)
+#define AIC32X4_DACMUTE AIC32X4_REG(0, 64)
+#define AIC32X4_LDACVOL AIC32X4_REG(0, 65)
+#define AIC32X4_RDACVOL AIC32X4_REG(0, 66)
+#define AIC32X4_ADCSETUP AIC32X4_REG(0, 81)
+#define AIC32X4_ADCFGA AIC32X4_REG(0, 82)
+#define AIC32X4_LADCVOL AIC32X4_REG(0, 83)
+#define AIC32X4_RADCVOL AIC32X4_REG(0, 84)
+#define AIC32X4_LAGC1 AIC32X4_REG(0, 86)
+#define AIC32X4_LAGC2 AIC32X4_REG(0, 87)
+#define AIC32X4_LAGC3 AIC32X4_REG(0, 88)
+#define AIC32X4_LAGC4 AIC32X4_REG(0, 89)
+#define AIC32X4_LAGC5 AIC32X4_REG(0, 90)
+#define AIC32X4_LAGC6 AIC32X4_REG(0, 91)
+#define AIC32X4_LAGC7 AIC32X4_REG(0, 92)
+#define AIC32X4_RAGC1 AIC32X4_REG(0, 94)
+#define AIC32X4_RAGC2 AIC32X4_REG(0, 95)
+#define AIC32X4_RAGC3 AIC32X4_REG(0, 96)
+#define AIC32X4_RAGC4 AIC32X4_REG(0, 97)
+#define AIC32X4_RAGC5 AIC32X4_REG(0, 98)
+#define AIC32X4_RAGC6 AIC32X4_REG(0, 99)
+#define AIC32X4_RAGC7 AIC32X4_REG(0, 100)
+
+#define AIC32X4_PWRCFG AIC32X4_REG(1, 1)
+#define AIC32X4_LDOCTL AIC32X4_REG(1, 2)
+#define AIC32X4_OUTPWRCTL AIC32X4_REG(1, 9)
+#define AIC32X4_CMMODE AIC32X4_REG(1, 10)
+#define AIC32X4_HPLROUTE AIC32X4_REG(1, 12)
+#define AIC32X4_HPRROUTE AIC32X4_REG(1, 13)
+#define AIC32X4_LOLROUTE AIC32X4_REG(1, 14)
+#define AIC32X4_LORROUTE AIC32X4_REG(1, 15)
+#define AIC32X4_HPLGAIN AIC32X4_REG(1, 16)
+#define AIC32X4_HPRGAIN AIC32X4_REG(1, 17)
+#define AIC32X4_LOLGAIN AIC32X4_REG(1, 18)
+#define AIC32X4_LORGAIN AIC32X4_REG(1, 19)
+#define AIC32X4_HEADSTART AIC32X4_REG(1, 20)
+#define AIC32X4_MICBIAS AIC32X4_REG(1, 51)
+#define AIC32X4_LMICPGAPIN AIC32X4_REG(1, 52)
+#define AIC32X4_LMICPGANIN AIC32X4_REG(1, 54)
+#define AIC32X4_RMICPGAPIN AIC32X4_REG(1, 55)
+#define AIC32X4_RMICPGANIN AIC32X4_REG(1, 57)
+#define AIC32X4_FLOATINGINPUT AIC32X4_REG(1, 58)
+#define AIC32X4_LMICPGAVOL AIC32X4_REG(1, 59)
+#define AIC32X4_RMICPGAVOL AIC32X4_REG(1, 60)
+
+/* Bits, masks, and shifts */
+
+/* AIC32X4_CLKMUX */
+#define AIC32X4_PLL_CLKIN_MASK GENMASK(3, 2)
+#define AIC32X4_PLL_CLKIN_SHIFT (2)
+#define AIC32X4_PLL_CLKIN_MCLK (0x00)
+#define AIC32X4_PLL_CLKIN_BCKL (0x01)
+#define AIC32X4_PLL_CLKIN_GPIO1 (0x02)
+#define AIC32X4_PLL_CLKIN_DIN (0x03)
+#define AIC32X4_CODEC_CLKIN_MASK GENMASK(1, 0)
+#define AIC32X4_CODEC_CLKIN_SHIFT (0)
+#define AIC32X4_CODEC_CLKIN_MCLK (0x00)
+#define AIC32X4_CODEC_CLKIN_BCLK (0x01)
+#define AIC32X4_CODEC_CLKIN_GPIO1 (0x02)
+#define AIC32X4_CODEC_CLKIN_PLL (0x03)
+
+/* AIC32X4_PLLPR */
+#define AIC32X4_PLLEN BIT(7)
+#define AIC32X4_PLL_P_MASK GENMASK(6, 4)
+#define AIC32X4_PLL_P_SHIFT (4)
+#define AIC32X4_PLL_R_MASK GENMASK(3, 0)
+
+/* AIC32X4_NDAC */
+#define AIC32X4_NDACEN BIT(7)
+#define AIC32X4_NDAC_MASK GENMASK(6, 0)
+
+/* AIC32X4_MDAC */
+#define AIC32X4_MDACEN BIT(7)
+#define AIC32X4_MDAC_MASK GENMASK(6, 0)
+
+/* AIC32X4_NADC */
+#define AIC32X4_NADCEN BIT(7)
+#define AIC32X4_NADC_MASK GENMASK(6, 0)
+
+/* AIC32X4_MADC */
+#define AIC32X4_MADCEN BIT(7)
+#define AIC32X4_MADC_MASK GENMASK(6, 0)
+
+/* AIC32X4_BCLKN */
+#define AIC32X4_BCLKEN BIT(7)
+#define AIC32X4_BCLK_MASK GENMASK(6, 0)
+
+/* AIC32X4_IFACE1 */
+#define AIC32X4_IFACE1_DATATYPE_MASK GENMASK(7, 6)
+#define AIC32X4_IFACE1_DATATYPE_SHIFT (6)
+#define AIC32X4_I2S_MODE (0x00)
+#define AIC32X4_DSP_MODE (0x01)
+#define AIC32X4_RIGHT_JUSTIFIED_MODE (0x02)
+#define AIC32X4_LEFT_JUSTIFIED_MODE (0x03)
+#define AIC32X4_IFACE1_DATALEN_MASK GENMASK(5, 4)
+#define AIC32X4_IFACE1_DATALEN_SHIFT (4)
+#define AIC32X4_WORD_LEN_16BITS (0x00)
+#define AIC32X4_WORD_LEN_20BITS (0x01)
+#define AIC32X4_WORD_LEN_24BITS (0x02)
+#define AIC32X4_WORD_LEN_32BITS (0x03)
+#define AIC32X4_IFACE1_MASTER_MASK GENMASK(3, 2)
+#define AIC32X4_BCLKMASTER BIT(2)
+#define AIC32X4_WCLKMASTER BIT(3)
+
+/* AIC32X4_IFACE2 */
+#define AIC32X4_DATA_OFFSET_MASK GENMASK(7, 0)
+
+/* AIC32X4_IFACE3 */
+#define AIC32X4_BCLKINV_MASK BIT(3)
+#define AIC32X4_BDIVCLK_MASK GENMASK(1, 0)
+#define AIC32X4_BDIVCLK_SHIFT (0)
+#define AIC32X4_DAC2BCLK (0x00)
+#define AIC32X4_DACMOD2BCLK (0x01)
+#define AIC32X4_ADC2BCLK (0x02)
+#define AIC32X4_ADCMOD2BCLK (0x03)
+
+/* AIC32X4_DACSETUP */
+#define AIC32X4_DAC_CHAN_MASK GENMASK(5, 2)
+#define AIC32X4_LDAC2RCHN BIT(5)
+#define AIC32X4_LDAC2LCHN BIT(4)
+#define AIC32X4_RDAC2LCHN BIT(3)
+#define AIC32X4_RDAC2RCHN BIT(2)
+
+/* AIC32X4_DACMUTE */
+#define AIC32X4_MUTEON 0x0C
+
+/* AIC32X4_ADCSETUP */
+#define AIC32X4_LADC_EN BIT(7)
+#define AIC32X4_RADC_EN BIT(6)
+
+/* AIC32X4_PWRCFG */
+#define AIC32X4_AVDDWEAKDISABLE BIT(3)
+
+/* AIC32X4_LDOCTL */
+#define AIC32X4_LDOCTLEN BIT(0)
+
+/* AIC32X4_CMMODE */
+#define AIC32X4_LDOIN_18_36 BIT(0)
+#define AIC32X4_LDOIN2HP BIT(1)
+
+/* AIC32X4_MICBIAS */
+#define AIC32X4_MICBIAS_LDOIN BIT(3)
#define AIC32X4_MICBIAS_2075V 0x60
+/* AIC32X4_LMICPGANIN */
#define AIC32X4_LMICPGANIN_IN2R_10K 0x10
#define AIC32X4_LMICPGANIN_CM1L_10K 0x40
+
+/* AIC32X4_RMICPGANIN */
#define AIC32X4_RMICPGANIN_IN1L_10K 0x10
#define AIC32X4_RMICPGANIN_CM1R_10K 0x40
-#define AIC32X4_LMICPGAVOL_NOGAIN 0x80
-#define AIC32X4_RMICPGAVOL_NOGAIN 0x80
-
-#define AIC32X4_BCLKMASTER 0x08
-#define AIC32X4_WCLKMASTER 0x04
-#define AIC32X4_PLLEN (0x01 << 7)
-#define AIC32X4_NDACEN (0x01 << 7)
-#define AIC32X4_MDACEN (0x01 << 7)
-#define AIC32X4_NADCEN (0x01 << 7)
-#define AIC32X4_MADCEN (0x01 << 7)
-#define AIC32X4_BCLKEN (0x01 << 7)
-#define AIC32X4_DACEN (0x03 << 6)
-#define AIC32X4_RDAC2LCHN (0x02 << 2)
-#define AIC32X4_LDAC2RCHN (0x02 << 4)
-#define AIC32X4_LDAC2LCHN (0x01 << 4)
-#define AIC32X4_RDAC2RCHN (0x01 << 2)
-#define AIC32X4_DAC_CHAN_MASK 0x3c
-
-#define AIC32X4_SSTEP2WCLK 0x01
-#define AIC32X4_MUTEON 0x0C
-#define AIC32X4_DACMOD2BCLK 0x01
-
#endif /* _TLV320AIC32X4_H */
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index 06f92571eba4..b751cad545da 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -1804,11 +1804,18 @@ static int aic3x_i2c_probe(struct i2c_client *i2c,
if (!ai3x_setup)
return -ENOMEM;
- ret = of_get_named_gpio(np, "gpio-reset", 0);
- if (ret >= 0)
+ ret = of_get_named_gpio(np, "reset-gpios", 0);
+ if (ret >= 0) {
aic3x->gpio_reset = ret;
- else
- aic3x->gpio_reset = -1;
+ } else {
+ ret = of_get_named_gpio(np, "gpio-reset", 0);
+ if (ret > 0) {
+ dev_warn(&i2c->dev, "Using deprecated property \"gpio-reset\", please update your DT");
+ aic3x->gpio_reset = ret;
+ } else {
+ aic3x->gpio_reset = -1;
+ }
+ }
if (of_property_read_u32_array(np, "ai3x-gpio-func",
ai3x_setup->gpio_func, 2) >= 0) {
diff --git a/sound/soc/codecs/tlv320dac33.c b/sound/soc/codecs/tlv320dac33.c
index 5b94a151539c..8c71d2f876ff 100644
--- a/sound/soc/codecs/tlv320dac33.c
+++ b/sound/soc/codecs/tlv320dac33.c
@@ -106,6 +106,7 @@ struct tlv320dac33_priv {
int mode1_latency; /* latency caused by the i2c writes in
* us */
u8 burst_bclkdiv; /* BCLK divider value in burst mode */
+ u8 *reg_cache;
unsigned int burst_rate; /* Interface speed in Burst modes */
int keep_bclk; /* Keep the BCLK continuously running
@@ -121,7 +122,7 @@ struct tlv320dac33_priv {
unsigned int uthr;
enum dac33_state state;
- void *control_data;
+ struct i2c_client *i2c;
};
static const u8 dac33_reg[DAC33_CACHEREGNUM] = {
@@ -173,7 +174,8 @@ static const u8 dac33_reg[DAC33_CACHEREGNUM] = {
static inline unsigned int dac33_read_reg_cache(struct snd_soc_codec *codec,
unsigned reg)
{
- u8 *cache = codec->reg_cache;
+ struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
+ u8 *cache = dac33->reg_cache;
if (reg >= DAC33_CACHEREGNUM)
return 0;
@@ -183,7 +185,8 @@ static inline unsigned int dac33_read_reg_cache(struct snd_soc_codec *codec,
static inline void dac33_write_reg_cache(struct snd_soc_codec *codec,
u8 reg, u8 value)
{
- u8 *cache = codec->reg_cache;
+ struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
+ u8 *cache = dac33->reg_cache;
if (reg >= DAC33_CACHEREGNUM)
return;
@@ -200,7 +203,7 @@ static int dac33_read(struct snd_soc_codec *codec, unsigned int reg,
/* If powered off, return the cached value */
if (dac33->chip_power) {
- val = i2c_smbus_read_byte_data(codec->control_data, value[0]);
+ val = i2c_smbus_read_byte_data(dac33->i2c, value[0]);
if (val < 0) {
dev_err(codec->dev, "Read failed (%d)\n", val);
value[0] = dac33_read_reg_cache(codec, reg);
@@ -233,7 +236,7 @@ static int dac33_write(struct snd_soc_codec *codec, unsigned int reg,
dac33_write_reg_cache(codec, data[0], data[1]);
if (dac33->chip_power) {
- ret = codec->hw_write(codec->control_data, data, 2);
+ ret = i2c_master_send(dac33->i2c, data, 2);
if (ret != 2)
dev_err(codec->dev, "Write failed (%d)\n", ret);
else
@@ -244,7 +247,7 @@ static int dac33_write(struct snd_soc_codec *codec, unsigned int reg,
}
static int dac33_write_locked(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int value)
+ unsigned int value)
{
struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
int ret;
@@ -280,7 +283,7 @@ static int dac33_write16(struct snd_soc_codec *codec, unsigned int reg,
if (dac33->chip_power) {
/* We need to set autoincrement mode for 16 bit writes */
data[0] |= DAC33_I2C_ADDR_AUTOINC;
- ret = codec->hw_write(codec->control_data, data, 3);
+ ret = i2c_master_send(dac33->i2c, data, 3);
if (ret != 3)
dev_err(codec->dev, "Write failed (%d)\n", ret);
else
@@ -1379,8 +1382,6 @@ static int dac33_soc_probe(struct snd_soc_codec *codec)
struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
int ret = 0;
- codec->control_data = dac33->control_data;
- codec->hw_write = (hw_write_t) i2c_master_send;
dac33->codec = codec;
/* Read the tlv320dac33 ID registers */
@@ -1438,9 +1439,7 @@ static const struct snd_soc_codec_driver soc_codec_dev_tlv320dac33 = {
.write = dac33_write_locked,
.set_bias_level = dac33_set_bias_level,
.idle_bias_off = true,
- .reg_cache_size = ARRAY_SIZE(dac33_reg),
- .reg_word_size = sizeof(u8),
- .reg_cache_default = dac33_reg,
+
.probe = dac33_soc_probe,
.remove = dac33_soc_remove,
@@ -1499,7 +1498,14 @@ static int dac33_i2c_probe(struct i2c_client *client,
if (dac33 == NULL)
return -ENOMEM;
- dac33->control_data = client;
+ dac33->reg_cache = devm_kmemdup(&client->dev,
+ dac33_reg,
+ ARRAY_SIZE(dac33_reg) * sizeof(u8),
+ GFP_KERNEL);
+ if (!dac33->reg_cache)
+ return -ENOMEM;
+
+ dac33->i2c = client;
mutex_init(&dac33->mutex);
spin_lock_init(&dac33->lock);
diff --git a/sound/soc/codecs/ts3a227e.c b/sound/soc/codecs/ts3a227e.c
index 738e04b09116..1271e7e1fc78 100644
--- a/sound/soc/codecs/ts3a227e.c
+++ b/sound/soc/codecs/ts3a227e.c
@@ -241,7 +241,7 @@ int ts3a227e_enable_jack_detect(struct snd_soc_component *component,
{
struct ts3a227e *ts3a227e = snd_soc_component_get_drvdata(component);
- snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_MEDIA);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
diff --git a/sound/soc/codecs/tscs42xx.c b/sound/soc/codecs/tscs42xx.c
new file mode 100644
index 000000000000..e7661d0315e6
--- /dev/null
+++ b/sound/soc/codecs/tscs42xx.c
@@ -0,0 +1,1456 @@
+// SPDX-License-Identifier: GPL-2.0
+// tscs42xx.c -- TSCS42xx ALSA SoC Audio driver
+// Copyright 2017 Tempo Semiconductor, Inc.
+// Author: Steven Eckhoff <steven.eckhoff.opensource@gmail.com>
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+
+#include "tscs42xx.h"
+
+#define COEFF_SIZE 3
+#define BIQUAD_COEFF_COUNT 5
+#define BIQUAD_SIZE (COEFF_SIZE * BIQUAD_COEFF_COUNT)
+
+#define COEFF_RAM_MAX_ADDR 0xcd
+#define COEFF_RAM_COEFF_COUNT (COEFF_RAM_MAX_ADDR + 1)
+#define COEFF_RAM_SIZE (COEFF_SIZE * COEFF_RAM_COEFF_COUNT)
+
+struct tscs42xx {
+
+ int bclk_ratio;
+ int samplerate;
+ unsigned int blrcm;
+ struct mutex audio_params_lock;
+
+ u8 coeff_ram[COEFF_RAM_SIZE];
+ bool coeff_ram_synced;
+ struct mutex coeff_ram_lock;
+
+ struct mutex pll_lock;
+
+ struct regmap *regmap;
+
+ struct device *dev;
+};
+
+struct coeff_ram_ctl {
+ unsigned int addr;
+ struct soc_bytes_ext bytes_ext;
+};
+
+static bool tscs42xx_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case R_DACCRWRL:
+ case R_DACCRWRM:
+ case R_DACCRWRH:
+ case R_DACCRRDL:
+ case R_DACCRRDM:
+ case R_DACCRRDH:
+ case R_DACCRSTAT:
+ case R_DACCRADDR:
+ case R_PLLCTL0:
+ return true;
+ default:
+ return false;
+ };
+}
+
+static bool tscs42xx_precious(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case R_DACCRWRL:
+ case R_DACCRWRM:
+ case R_DACCRWRH:
+ case R_DACCRRDL:
+ case R_DACCRRDM:
+ case R_DACCRRDH:
+ return true;
+ default:
+ return false;
+ };
+}
+
+static const struct regmap_config tscs42xx_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .volatile_reg = tscs42xx_volatile,
+ .precious_reg = tscs42xx_precious,
+ .max_register = R_DACMBCREL3H,
+
+ .cache_type = REGCACHE_RBTREE,
+ .can_multi_write = true,
+};
+
+#define MAX_PLL_LOCK_20MS_WAITS 1
+static bool plls_locked(struct snd_soc_codec *codec)
+{
+ int ret;
+ int count = MAX_PLL_LOCK_20MS_WAITS;
+
+ do {
+ ret = snd_soc_read(codec, R_PLLCTL0);
+ if (ret < 0) {
+ dev_err(codec->dev,
+ "Failed to read PLL lock status (%d)\n", ret);
+ return false;
+ } else if (ret > 0) {
+ return true;
+ }
+ msleep(20);
+ } while (count--);
+
+ return false;
+}
+
+static int sample_rate_to_pll_freq_out(int sample_rate)
+{
+ switch (sample_rate) {
+ case 11025:
+ case 22050:
+ case 44100:
+ case 88200:
+ return 112896000;
+ case 8000:
+ case 16000:
+ case 32000:
+ case 48000:
+ case 96000:
+ return 122880000;
+ default:
+ return -EINVAL;
+ }
+}
+
+#define DACCRSTAT_MAX_TRYS 10
+static int write_coeff_ram(struct snd_soc_codec *codec, u8 *coeff_ram,
+ unsigned int addr, unsigned int coeff_cnt)
+{
+ struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+ int cnt;
+ int trys;
+ int ret;
+
+ for (cnt = 0; cnt < coeff_cnt; cnt++, addr++) {
+
+ for (trys = 0; trys < DACCRSTAT_MAX_TRYS; trys++) {
+ ret = snd_soc_read(codec, R_DACCRSTAT);
+ if (ret < 0) {
+ dev_err(codec->dev,
+ "Failed to read stat (%d)\n", ret);
+ return ret;
+ }
+ if (!ret)
+ break;
+ }
+
+ if (trys == DACCRSTAT_MAX_TRYS) {
+ ret = -EIO;
+ dev_err(codec->dev,
+ "dac coefficient write error (%d)\n", ret);
+ return ret;
+ }
+
+ ret = regmap_write(tscs42xx->regmap, R_DACCRADDR, addr);
+ if (ret < 0) {
+ dev_err(codec->dev,
+ "Failed to write dac ram address (%d)\n", ret);
+ return ret;
+ }
+
+ ret = regmap_bulk_write(tscs42xx->regmap, R_DACCRWRL,
+ &coeff_ram[addr * COEFF_SIZE],
+ COEFF_SIZE);
+ if (ret < 0) {
+ dev_err(codec->dev,
+ "Failed to write dac ram (%d)\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int power_up_audio_plls(struct snd_soc_codec *codec)
+{
+ struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+ int freq_out;
+ int ret;
+ unsigned int mask;
+ unsigned int val;
+
+ freq_out = sample_rate_to_pll_freq_out(tscs42xx->samplerate);
+ switch (freq_out) {
+ case 122880000: /* 48k */
+ mask = RM_PLLCTL1C_PDB_PLL1;
+ val = RV_PLLCTL1C_PDB_PLL1_ENABLE;
+ break;
+ case 112896000: /* 44.1k */
+ mask = RM_PLLCTL1C_PDB_PLL2;
+ val = RV_PLLCTL1C_PDB_PLL2_ENABLE;
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(codec->dev, "Unrecognized PLL output freq (%d)\n", ret);
+ return ret;
+ }
+
+ mutex_lock(&tscs42xx->pll_lock);
+
+ ret = snd_soc_update_bits(codec, R_PLLCTL1C, mask, val);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to turn PLL on (%d)\n", ret);
+ goto exit;
+ }
+
+ if (!plls_locked(codec)) {
+ dev_err(codec->dev, "Failed to lock plls\n");
+ ret = -ENOMSG;
+ goto exit;
+ }
+
+ ret = 0;
+exit:
+ mutex_unlock(&tscs42xx->pll_lock);
+
+ return ret;
+}
+
+static int power_down_audio_plls(struct snd_soc_codec *codec)
+{
+ struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ mutex_lock(&tscs42xx->pll_lock);
+
+ ret = snd_soc_update_bits(codec, R_PLLCTL1C,
+ RM_PLLCTL1C_PDB_PLL1,
+ RV_PLLCTL1C_PDB_PLL1_DISABLE);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to turn PLL off (%d)\n", ret);
+ goto exit;
+ }
+ ret = snd_soc_update_bits(codec, R_PLLCTL1C,
+ RM_PLLCTL1C_PDB_PLL2,
+ RV_PLLCTL1C_PDB_PLL2_DISABLE);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to turn PLL off (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = 0;
+exit:
+ mutex_unlock(&tscs42xx->pll_lock);
+
+ return ret;
+}
+
+static int coeff_ram_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+ struct coeff_ram_ctl *ctl =
+ (struct coeff_ram_ctl *)kcontrol->private_value;
+ struct soc_bytes_ext *params = &ctl->bytes_ext;
+
+ mutex_lock(&tscs42xx->coeff_ram_lock);
+
+ memcpy(ucontrol->value.bytes.data,
+ &tscs42xx->coeff_ram[ctl->addr * COEFF_SIZE], params->max);
+
+ mutex_unlock(&tscs42xx->coeff_ram_lock);
+
+ return 0;
+}
+
+static int coeff_ram_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+ struct coeff_ram_ctl *ctl =
+ (struct coeff_ram_ctl *)kcontrol->private_value;
+ struct soc_bytes_ext *params = &ctl->bytes_ext;
+ unsigned int coeff_cnt = params->max / COEFF_SIZE;
+ int ret;
+
+ mutex_lock(&tscs42xx->coeff_ram_lock);
+
+ tscs42xx->coeff_ram_synced = false;
+
+ memcpy(&tscs42xx->coeff_ram[ctl->addr * COEFF_SIZE],
+ ucontrol->value.bytes.data, params->max);
+
+ mutex_lock(&tscs42xx->pll_lock);
+
+ if (plls_locked(codec)) {
+ ret = write_coeff_ram(codec, tscs42xx->coeff_ram,
+ ctl->addr, coeff_cnt);
+ if (ret < 0) {
+ dev_err(codec->dev,
+ "Failed to flush coeff ram cache (%d)\n", ret);
+ goto exit;
+ }
+ tscs42xx->coeff_ram_synced = true;
+ }
+
+ ret = 0;
+exit:
+ mutex_unlock(&tscs42xx->pll_lock);
+
+ mutex_unlock(&tscs42xx->coeff_ram_lock);
+
+ return ret;
+}
+
+/* Input L Capture Route */
+static char const * const input_select_text[] = {
+ "Line 1", "Line 2", "Line 3", "D2S"
+};
+
+static const struct soc_enum left_input_select_enum =
+SOC_ENUM_SINGLE(R_INSELL, FB_INSELL, ARRAY_SIZE(input_select_text),
+ input_select_text);
+
+static const struct snd_kcontrol_new left_input_select =
+SOC_DAPM_ENUM("LEFT_INPUT_SELECT_ENUM", left_input_select_enum);
+
+/* Input R Capture Route */
+static const struct soc_enum right_input_select_enum =
+SOC_ENUM_SINGLE(R_INSELR, FB_INSELR, ARRAY_SIZE(input_select_text),
+ input_select_text);
+
+static const struct snd_kcontrol_new right_input_select =
+SOC_DAPM_ENUM("RIGHT_INPUT_SELECT_ENUM", right_input_select_enum);
+
+/* Input Channel Mapping */
+static char const * const ch_map_select_text[] = {
+ "Normal", "Left to Right", "Right to Left", "Swap"
+};
+
+static const struct soc_enum ch_map_select_enum =
+SOC_ENUM_SINGLE(R_AIC2, FB_AIC2_ADCDSEL, ARRAY_SIZE(ch_map_select_text),
+ ch_map_select_text);
+
+static int dapm_vref_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ msleep(20);
+ return 0;
+}
+
+static int dapm_micb_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ msleep(20);
+ return 0;
+}
+
+static int pll_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ int ret;
+
+ if (SND_SOC_DAPM_EVENT_ON(event))
+ ret = power_up_audio_plls(codec);
+ else
+ ret = power_down_audio_plls(codec);
+
+ return ret;
+}
+
+static int dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ mutex_lock(&tscs42xx->coeff_ram_lock);
+
+ if (tscs42xx->coeff_ram_synced == false) {
+ ret = write_coeff_ram(codec, tscs42xx->coeff_ram, 0x00,
+ COEFF_RAM_COEFF_COUNT);
+ if (ret < 0)
+ goto exit;
+ tscs42xx->coeff_ram_synced = true;
+ }
+
+ ret = 0;
+exit:
+ mutex_unlock(&tscs42xx->coeff_ram_lock);
+
+ return ret;
+}
+
+static const struct snd_soc_dapm_widget tscs42xx_dapm_widgets[] = {
+ /* Vref */
+ SND_SOC_DAPM_SUPPLY_S("Vref", 1, R_PWRM2, FB_PWRM2_VREF, 0,
+ dapm_vref_event, SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_PRE_PMD),
+
+ /* PLL */
+ SND_SOC_DAPM_SUPPLY("PLL", SND_SOC_NOPM, 0, 0, pll_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+ /* Headphone */
+ SND_SOC_DAPM_DAC_E("DAC L", "HiFi Playback", R_PWRM2, FB_PWRM2_HPL, 0,
+ dac_event, SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_DAC_E("DAC R", "HiFi Playback", R_PWRM2, FB_PWRM2_HPR, 0,
+ dac_event, SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_OUTPUT("Headphone L"),
+ SND_SOC_DAPM_OUTPUT("Headphone R"),
+
+ /* Speaker */
+ SND_SOC_DAPM_DAC_E("ClassD L", "HiFi Playback",
+ R_PWRM2, FB_PWRM2_SPKL, 0,
+ dac_event, SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_DAC_E("ClassD R", "HiFi Playback",
+ R_PWRM2, FB_PWRM2_SPKR, 0,
+ dac_event, SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_OUTPUT("Speaker L"),
+ SND_SOC_DAPM_OUTPUT("Speaker R"),
+
+ /* Capture */
+ SND_SOC_DAPM_PGA("Analog In PGA L", R_PWRM1, FB_PWRM1_PGAL, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Analog In PGA R", R_PWRM1, FB_PWRM1_PGAR, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Analog Boost L", R_PWRM1, FB_PWRM1_BSTL, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Analog Boost R", R_PWRM1, FB_PWRM1_BSTR, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("ADC Mute", R_CNVRTR0, FB_CNVRTR0_HPOR, true, NULL, 0),
+ SND_SOC_DAPM_ADC("ADC L", "HiFi Capture", R_PWRM1, FB_PWRM1_ADCL, 0),
+ SND_SOC_DAPM_ADC("ADC R", "HiFi Capture", R_PWRM1, FB_PWRM1_ADCR, 0),
+
+ /* Capture Input */
+ SND_SOC_DAPM_MUX("Input L Capture Route", R_PWRM2,
+ FB_PWRM2_INSELL, 0, &left_input_select),
+ SND_SOC_DAPM_MUX("Input R Capture Route", R_PWRM2,
+ FB_PWRM2_INSELR, 0, &right_input_select),
+
+ /* Digital Mic */
+ SND_SOC_DAPM_SUPPLY_S("Digital Mic Enable", 2, R_DMICCTL,
+ FB_DMICCTL_DMICEN, 0, NULL,
+ SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_PRE_PMD),
+
+ /* Analog Mic */
+ SND_SOC_DAPM_SUPPLY_S("Mic Bias", 2, R_PWRM1, FB_PWRM1_MICB,
+ 0, dapm_micb_event, SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_PRE_PMD),
+
+ /* Line In */
+ SND_SOC_DAPM_INPUT("Line In 1 L"),
+ SND_SOC_DAPM_INPUT("Line In 1 R"),
+ SND_SOC_DAPM_INPUT("Line In 2 L"),
+ SND_SOC_DAPM_INPUT("Line In 2 R"),
+ SND_SOC_DAPM_INPUT("Line In 3 L"),
+ SND_SOC_DAPM_INPUT("Line In 3 R"),
+};
+
+static const struct snd_soc_dapm_route tscs42xx_intercon[] = {
+ {"DAC L", NULL, "PLL"},
+ {"DAC R", NULL, "PLL"},
+ {"DAC L", NULL, "Vref"},
+ {"DAC R", NULL, "Vref"},
+ {"Headphone L", NULL, "DAC L"},
+ {"Headphone R", NULL, "DAC R"},
+
+ {"ClassD L", NULL, "PLL"},
+ {"ClassD R", NULL, "PLL"},
+ {"ClassD L", NULL, "Vref"},
+ {"ClassD R", NULL, "Vref"},
+ {"Speaker L", NULL, "ClassD L"},
+ {"Speaker R", NULL, "ClassD R"},
+
+ {"Input L Capture Route", NULL, "Vref"},
+ {"Input R Capture Route", NULL, "Vref"},
+
+ {"Mic Bias", NULL, "Vref"},
+
+ {"Input L Capture Route", "Line 1", "Line In 1 L"},
+ {"Input R Capture Route", "Line 1", "Line In 1 R"},
+ {"Input L Capture Route", "Line 2", "Line In 2 L"},
+ {"Input R Capture Route", "Line 2", "Line In 2 R"},
+ {"Input L Capture Route", "Line 3", "Line In 3 L"},
+ {"Input R Capture Route", "Line 3", "Line In 3 R"},
+
+ {"Analog In PGA L", NULL, "Input L Capture Route"},
+ {"Analog In PGA R", NULL, "Input R Capture Route"},
+ {"Analog Boost L", NULL, "Analog In PGA L"},
+ {"Analog Boost R", NULL, "Analog In PGA R"},
+ {"ADC Mute", NULL, "Analog Boost L"},
+ {"ADC Mute", NULL, "Analog Boost R"},
+ {"ADC L", NULL, "PLL"},
+ {"ADC R", NULL, "PLL"},
+ {"ADC L", NULL, "ADC Mute"},
+ {"ADC R", NULL, "ADC Mute"},
+};
+
+/************
+ * CONTROLS *
+ ************/
+
+static char const * const eq_band_enable_text[] = {
+ "Prescale only",
+ "Band1",
+ "Band1:2",
+ "Band1:3",
+ "Band1:4",
+ "Band1:5",
+ "Band1:6",
+};
+
+static char const * const level_detection_text[] = {
+ "Average",
+ "Peak",
+};
+
+static char const * const level_detection_window_text[] = {
+ "512 Samples",
+ "64 Samples",
+};
+
+static char const * const compressor_ratio_text[] = {
+ "Reserved", "1.5:1", "2:1", "3:1", "4:1", "5:1", "6:1",
+ "7:1", "8:1", "9:1", "10:1", "11:1", "12:1", "13:1", "14:1",
+ "15:1", "16:1", "17:1", "18:1", "19:1", "20:1",
+};
+
+static DECLARE_TLV_DB_SCALE(hpvol_scale, -8850, 75, 0);
+static DECLARE_TLV_DB_SCALE(spkvol_scale, -7725, 75, 0);
+static DECLARE_TLV_DB_SCALE(dacvol_scale, -9563, 38, 0);
+static DECLARE_TLV_DB_SCALE(adcvol_scale, -7125, 38, 0);
+static DECLARE_TLV_DB_SCALE(invol_scale, -1725, 75, 0);
+static DECLARE_TLV_DB_SCALE(mic_boost_scale, 0, 1000, 0);
+static DECLARE_TLV_DB_MINMAX(mugain_scale, 0, 4650);
+static DECLARE_TLV_DB_MINMAX(compth_scale, -9562, 0);
+
+static const struct soc_enum eq1_band_enable_enum =
+ SOC_ENUM_SINGLE(R_CONFIG1, FB_CONFIG1_EQ1_BE,
+ ARRAY_SIZE(eq_band_enable_text), eq_band_enable_text);
+
+static const struct soc_enum eq2_band_enable_enum =
+ SOC_ENUM_SINGLE(R_CONFIG1, FB_CONFIG1_EQ2_BE,
+ ARRAY_SIZE(eq_band_enable_text), eq_band_enable_text);
+
+static const struct soc_enum cle_level_detection_enum =
+ SOC_ENUM_SINGLE(R_CLECTL, FB_CLECTL_LVL_MODE,
+ ARRAY_SIZE(level_detection_text),
+ level_detection_text);
+
+static const struct soc_enum cle_level_detection_window_enum =
+ SOC_ENUM_SINGLE(R_CLECTL, FB_CLECTL_WINDOWSEL,
+ ARRAY_SIZE(level_detection_window_text),
+ level_detection_window_text);
+
+static const struct soc_enum mbc_level_detection_enums[] = {
+ SOC_ENUM_SINGLE(R_DACMBCCTL, FB_DACMBCCTL_LVLMODE1,
+ ARRAY_SIZE(level_detection_text),
+ level_detection_text),
+ SOC_ENUM_SINGLE(R_DACMBCCTL, FB_DACMBCCTL_LVLMODE2,
+ ARRAY_SIZE(level_detection_text),
+ level_detection_text),
+ SOC_ENUM_SINGLE(R_DACMBCCTL, FB_DACMBCCTL_LVLMODE3,
+ ARRAY_SIZE(level_detection_text),
+ level_detection_text),
+};
+
+static const struct soc_enum mbc_level_detection_window_enums[] = {
+ SOC_ENUM_SINGLE(R_DACMBCCTL, FB_DACMBCCTL_WINSEL1,
+ ARRAY_SIZE(level_detection_window_text),
+ level_detection_window_text),
+ SOC_ENUM_SINGLE(R_DACMBCCTL, FB_DACMBCCTL_WINSEL2,
+ ARRAY_SIZE(level_detection_window_text),
+ level_detection_window_text),
+ SOC_ENUM_SINGLE(R_DACMBCCTL, FB_DACMBCCTL_WINSEL3,
+ ARRAY_SIZE(level_detection_window_text),
+ level_detection_window_text),
+};
+
+static const struct soc_enum compressor_ratio_enum =
+ SOC_ENUM_SINGLE(R_CMPRAT, FB_CMPRAT,
+ ARRAY_SIZE(compressor_ratio_text), compressor_ratio_text);
+
+static const struct soc_enum dac_mbc1_compressor_ratio_enum =
+ SOC_ENUM_SINGLE(R_DACMBCRAT1, FB_DACMBCRAT1_RATIO,
+ ARRAY_SIZE(compressor_ratio_text), compressor_ratio_text);
+
+static const struct soc_enum dac_mbc2_compressor_ratio_enum =
+ SOC_ENUM_SINGLE(R_DACMBCRAT2, FB_DACMBCRAT2_RATIO,
+ ARRAY_SIZE(compressor_ratio_text), compressor_ratio_text);
+
+static const struct soc_enum dac_mbc3_compressor_ratio_enum =
+ SOC_ENUM_SINGLE(R_DACMBCRAT3, FB_DACMBCRAT3_RATIO,
+ ARRAY_SIZE(compressor_ratio_text), compressor_ratio_text);
+
+static int bytes_info_ext(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *ucontrol)
+{
+ struct coeff_ram_ctl *ctl =
+ (struct coeff_ram_ctl *)kcontrol->private_value;
+ struct soc_bytes_ext *params = &ctl->bytes_ext;
+
+ ucontrol->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+ ucontrol->count = params->max;
+
+ return 0;
+}
+
+#define COEFF_RAM_CTL(xname, xcount, xaddr) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = bytes_info_ext, \
+ .get = coeff_ram_get, .put = coeff_ram_put, \
+ .private_value = (unsigned long)&(struct coeff_ram_ctl) { \
+ .addr = xaddr, \
+ .bytes_ext = {.max = xcount, }, \
+ } \
+}
+
+static const struct snd_kcontrol_new tscs42xx_snd_controls[] = {
+ /* Volumes */
+ SOC_DOUBLE_R_TLV("Headphone Playback Volume", R_HPVOLL, R_HPVOLR,
+ FB_HPVOLL, 0x7F, 0, hpvol_scale),
+ SOC_DOUBLE_R_TLV("Speaker Playback Volume", R_SPKVOLL, R_SPKVOLR,
+ FB_SPKVOLL, 0x7F, 0, spkvol_scale),
+ SOC_DOUBLE_R_TLV("Master Playback Volume", R_DACVOLL, R_DACVOLR,
+ FB_DACVOLL, 0xFF, 0, dacvol_scale),
+ SOC_DOUBLE_R_TLV("PCM Capture Volume", R_ADCVOLL, R_ADCVOLR,
+ FB_ADCVOLL, 0xFF, 0, adcvol_scale),
+ SOC_DOUBLE_R_TLV("Master Capture Volume", R_INVOLL, R_INVOLR,
+ FB_INVOLL, 0x3F, 0, invol_scale),
+
+ /* INSEL */
+ SOC_DOUBLE_R_TLV("Mic Boost Capture Volume", R_INSELL, R_INSELR,
+ FB_INSELL_MICBSTL, FV_INSELL_MICBSTL_30DB,
+ 0, mic_boost_scale),
+
+ /* Input Channel Map */
+ SOC_ENUM("Input Channel Map", ch_map_select_enum),
+
+ /* Coefficient Ram */
+ COEFF_RAM_CTL("Cascade1L BiQuad1", BIQUAD_SIZE, 0x00),
+ COEFF_RAM_CTL("Cascade1L BiQuad2", BIQUAD_SIZE, 0x05),
+ COEFF_RAM_CTL("Cascade1L BiQuad3", BIQUAD_SIZE, 0x0a),
+ COEFF_RAM_CTL("Cascade1L BiQuad4", BIQUAD_SIZE, 0x0f),
+ COEFF_RAM_CTL("Cascade1L BiQuad5", BIQUAD_SIZE, 0x14),
+ COEFF_RAM_CTL("Cascade1L BiQuad6", BIQUAD_SIZE, 0x19),
+
+ COEFF_RAM_CTL("Cascade1R BiQuad1", BIQUAD_SIZE, 0x20),
+ COEFF_RAM_CTL("Cascade1R BiQuad2", BIQUAD_SIZE, 0x25),
+ COEFF_RAM_CTL("Cascade1R BiQuad3", BIQUAD_SIZE, 0x2a),
+ COEFF_RAM_CTL("Cascade1R BiQuad4", BIQUAD_SIZE, 0x2f),
+ COEFF_RAM_CTL("Cascade1R BiQuad5", BIQUAD_SIZE, 0x34),
+ COEFF_RAM_CTL("Cascade1R BiQuad6", BIQUAD_SIZE, 0x39),
+
+ COEFF_RAM_CTL("Cascade1L Prescale", COEFF_SIZE, 0x1f),
+ COEFF_RAM_CTL("Cascade1R Prescale", COEFF_SIZE, 0x3f),
+
+ COEFF_RAM_CTL("Cascade2L BiQuad1", BIQUAD_SIZE, 0x40),
+ COEFF_RAM_CTL("Cascade2L BiQuad2", BIQUAD_SIZE, 0x45),
+ COEFF_RAM_CTL("Cascade2L BiQuad3", BIQUAD_SIZE, 0x4a),
+ COEFF_RAM_CTL("Cascade2L BiQuad4", BIQUAD_SIZE, 0x4f),
+ COEFF_RAM_CTL("Cascade2L BiQuad5", BIQUAD_SIZE, 0x54),
+ COEFF_RAM_CTL("Cascade2L BiQuad6", BIQUAD_SIZE, 0x59),
+
+ COEFF_RAM_CTL("Cascade2R BiQuad1", BIQUAD_SIZE, 0x60),
+ COEFF_RAM_CTL("Cascade2R BiQuad2", BIQUAD_SIZE, 0x65),
+ COEFF_RAM_CTL("Cascade2R BiQuad3", BIQUAD_SIZE, 0x6a),
+ COEFF_RAM_CTL("Cascade2R BiQuad4", BIQUAD_SIZE, 0x6f),
+ COEFF_RAM_CTL("Cascade2R BiQuad5", BIQUAD_SIZE, 0x74),
+ COEFF_RAM_CTL("Cascade2R BiQuad6", BIQUAD_SIZE, 0x79),
+
+ COEFF_RAM_CTL("Cascade2L Prescale", COEFF_SIZE, 0x5f),
+ COEFF_RAM_CTL("Cascade2R Prescale", COEFF_SIZE, 0x7f),
+
+ COEFF_RAM_CTL("Bass Extraction BiQuad1", BIQUAD_SIZE, 0x80),
+ COEFF_RAM_CTL("Bass Extraction BiQuad2", BIQUAD_SIZE, 0x85),
+
+ COEFF_RAM_CTL("Bass Non Linear Function 1", COEFF_SIZE, 0x8a),
+ COEFF_RAM_CTL("Bass Non Linear Function 2", COEFF_SIZE, 0x8b),
+
+ COEFF_RAM_CTL("Bass Limiter BiQuad", BIQUAD_SIZE, 0x8c),
+
+ COEFF_RAM_CTL("Bass Cut Off BiQuad", BIQUAD_SIZE, 0x91),
+
+ COEFF_RAM_CTL("Bass Mix", COEFF_SIZE, 0x96),
+
+ COEFF_RAM_CTL("Treb Extraction BiQuad1", BIQUAD_SIZE, 0x97),
+ COEFF_RAM_CTL("Treb Extraction BiQuad2", BIQUAD_SIZE, 0x9c),
+
+ COEFF_RAM_CTL("Treb Non Linear Function 1", COEFF_SIZE, 0xa1),
+ COEFF_RAM_CTL("Treb Non Linear Function 2", COEFF_SIZE, 0xa2),
+
+ COEFF_RAM_CTL("Treb Limiter BiQuad", BIQUAD_SIZE, 0xa3),
+
+ COEFF_RAM_CTL("Treb Cut Off BiQuad", BIQUAD_SIZE, 0xa8),
+
+ COEFF_RAM_CTL("Treb Mix", COEFF_SIZE, 0xad),
+
+ COEFF_RAM_CTL("3D", COEFF_SIZE, 0xae),
+
+ COEFF_RAM_CTL("3D Mix", COEFF_SIZE, 0xaf),
+
+ COEFF_RAM_CTL("MBC1 BiQuad1", BIQUAD_SIZE, 0xb0),
+ COEFF_RAM_CTL("MBC1 BiQuad2", BIQUAD_SIZE, 0xb5),
+
+ COEFF_RAM_CTL("MBC2 BiQuad1", BIQUAD_SIZE, 0xba),
+ COEFF_RAM_CTL("MBC2 BiQuad2", BIQUAD_SIZE, 0xbf),
+
+ COEFF_RAM_CTL("MBC3 BiQuad1", BIQUAD_SIZE, 0xc4),
+ COEFF_RAM_CTL("MBC3 BiQuad2", BIQUAD_SIZE, 0xc9),
+
+ /* EQ */
+ SOC_SINGLE("EQ1 Switch", R_CONFIG1, FB_CONFIG1_EQ1_EN, 1, 0),
+ SOC_SINGLE("EQ2 Switch", R_CONFIG1, FB_CONFIG1_EQ2_EN, 1, 0),
+ SOC_ENUM("EQ1 Band Enable", eq1_band_enable_enum),
+ SOC_ENUM("EQ2 Band Enable", eq2_band_enable_enum),
+
+ /* CLE */
+ SOC_ENUM("CLE Level Detect",
+ cle_level_detection_enum),
+ SOC_ENUM("CLE Level Detect Win",
+ cle_level_detection_window_enum),
+ SOC_SINGLE("Expander Switch",
+ R_CLECTL, FB_CLECTL_EXP_EN, 1, 0),
+ SOC_SINGLE("Limiter Switch",
+ R_CLECTL, FB_CLECTL_LIMIT_EN, 1, 0),
+ SOC_SINGLE("Comp Switch",
+ R_CLECTL, FB_CLECTL_COMP_EN, 1, 0),
+ SOC_SINGLE_TLV("CLE Make-Up Gain Playback Volume",
+ R_MUGAIN, FB_MUGAIN_CLEMUG, 0x1f, 0, mugain_scale),
+ SOC_SINGLE_TLV("Comp Thresh Playback Volume",
+ R_COMPTH, FB_COMPTH, 0xff, 0, compth_scale),
+ SOC_ENUM("Comp Ratio", compressor_ratio_enum),
+ SND_SOC_BYTES("Comp Atk Time", R_CATKTCL, 2),
+
+ /* Effects */
+ SOC_SINGLE("3D Switch", R_FXCTL, FB_FXCTL_3DEN, 1, 0),
+ SOC_SINGLE("Treble Switch", R_FXCTL, FB_FXCTL_TEEN, 1, 0),
+ SOC_SINGLE("Treble Bypass Switch", R_FXCTL, FB_FXCTL_TNLFBYPASS, 1, 0),
+ SOC_SINGLE("Bass Switch", R_FXCTL, FB_FXCTL_BEEN, 1, 0),
+ SOC_SINGLE("Bass Bypass Switch", R_FXCTL, FB_FXCTL_BNLFBYPASS, 1, 0),
+
+ /* MBC */
+ SOC_SINGLE("MBC Band1 Switch", R_DACMBCEN, FB_DACMBCEN_MBCEN1, 1, 0),
+ SOC_SINGLE("MBC Band2 Switch", R_DACMBCEN, FB_DACMBCEN_MBCEN2, 1, 0),
+ SOC_SINGLE("MBC Band3 Switch", R_DACMBCEN, FB_DACMBCEN_MBCEN3, 1, 0),
+ SOC_ENUM("MBC Band1 Level Detect",
+ mbc_level_detection_enums[0]),
+ SOC_ENUM("MBC Band2 Level Detect",
+ mbc_level_detection_enums[1]),
+ SOC_ENUM("MBC Band3 Level Detect",
+ mbc_level_detection_enums[2]),
+ SOC_ENUM("MBC Band1 Level Detect Win",
+ mbc_level_detection_window_enums[0]),
+ SOC_ENUM("MBC Band2 Level Detect Win",
+ mbc_level_detection_window_enums[1]),
+ SOC_ENUM("MBC Band3 Level Detect Win",
+ mbc_level_detection_window_enums[2]),
+
+ SOC_SINGLE("MBC1 Phase Invert Switch",
+ R_DACMBCMUG1, FB_DACMBCMUG1_PHASE, 1, 0),
+ SOC_SINGLE_TLV("DAC MBC1 Make-Up Gain Playback Volume",
+ R_DACMBCMUG1, FB_DACMBCMUG1_MUGAIN, 0x1f, 0, mugain_scale),
+ SOC_SINGLE_TLV("DAC MBC1 Comp Thresh Playback Volume",
+ R_DACMBCTHR1, FB_DACMBCTHR1_THRESH, 0xff, 0, compth_scale),
+ SOC_ENUM("DAC MBC1 Comp Ratio",
+ dac_mbc1_compressor_ratio_enum),
+ SND_SOC_BYTES("DAC MBC1 Comp Atk Time", R_DACMBCATK1L, 2),
+ SND_SOC_BYTES("DAC MBC1 Comp Rel Time Const",
+ R_DACMBCREL1L, 2),
+
+ SOC_SINGLE("MBC2 Phase Invert Switch",
+ R_DACMBCMUG2, FB_DACMBCMUG2_PHASE, 1, 0),
+ SOC_SINGLE_TLV("DAC MBC2 Make-Up Gain Playback Volume",
+ R_DACMBCMUG2, FB_DACMBCMUG2_MUGAIN, 0x1f, 0, mugain_scale),
+ SOC_SINGLE_TLV("DAC MBC2 Comp Thresh Playback Volume",
+ R_DACMBCTHR2, FB_DACMBCTHR2_THRESH, 0xff, 0, compth_scale),
+ SOC_ENUM("DAC MBC2 Comp Ratio",
+ dac_mbc2_compressor_ratio_enum),
+ SND_SOC_BYTES("DAC MBC2 Comp Atk Time", R_DACMBCATK2L, 2),
+ SND_SOC_BYTES("DAC MBC2 Comp Rel Time Const",
+ R_DACMBCREL2L, 2),
+
+ SOC_SINGLE("MBC3 Phase Invert Switch",
+ R_DACMBCMUG3, FB_DACMBCMUG3_PHASE, 1, 0),
+ SOC_SINGLE_TLV("DAC MBC3 Make-Up Gain Playback Volume",
+ R_DACMBCMUG3, FB_DACMBCMUG3_MUGAIN, 0x1f, 0, mugain_scale),
+ SOC_SINGLE_TLV("DAC MBC3 Comp Thresh Playback Volume",
+ R_DACMBCTHR3, FB_DACMBCTHR3_THRESH, 0xff, 0, compth_scale),
+ SOC_ENUM("DAC MBC3 Comp Ratio",
+ dac_mbc3_compressor_ratio_enum),
+ SND_SOC_BYTES("DAC MBC3 Comp Atk Time", R_DACMBCATK3L, 2),
+ SND_SOC_BYTES("DAC MBC3 Comp Rel Time Const",
+ R_DACMBCREL3L, 2),
+};
+
+static int setup_sample_format(struct snd_soc_codec *codec,
+ snd_pcm_format_t format)
+{
+ unsigned int width;
+ int ret;
+
+ switch (format) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ width = RV_AIC1_WL_16;
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ width = RV_AIC1_WL_20;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ width = RV_AIC1_WL_24;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ width = RV_AIC1_WL_32;
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(codec->dev, "Unsupported format width (%d)\n", ret);
+ return ret;
+ }
+ ret = snd_soc_update_bits(codec, R_AIC1, RM_AIC1_WL, width);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set sample width (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int setup_sample_rate(struct snd_soc_codec *codec, unsigned int rate)
+{
+ struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+ unsigned int br, bm;
+ int ret;
+
+ switch (rate) {
+ case 8000:
+ br = RV_DACSR_DBR_32;
+ bm = RV_DACSR_DBM_PT25;
+ break;
+ case 16000:
+ br = RV_DACSR_DBR_32;
+ bm = RV_DACSR_DBM_PT5;
+ break;
+ case 24000:
+ br = RV_DACSR_DBR_48;
+ bm = RV_DACSR_DBM_PT5;
+ break;
+ case 32000:
+ br = RV_DACSR_DBR_32;
+ bm = RV_DACSR_DBM_1;
+ break;
+ case 48000:
+ br = RV_DACSR_DBR_48;
+ bm = RV_DACSR_DBM_1;
+ break;
+ case 96000:
+ br = RV_DACSR_DBR_48;
+ bm = RV_DACSR_DBM_2;
+ break;
+ case 11025:
+ br = RV_DACSR_DBR_44_1;
+ bm = RV_DACSR_DBM_PT25;
+ break;
+ case 22050:
+ br = RV_DACSR_DBR_44_1;
+ bm = RV_DACSR_DBM_PT5;
+ break;
+ case 44100:
+ br = RV_DACSR_DBR_44_1;
+ bm = RV_DACSR_DBM_1;
+ break;
+ case 88200:
+ br = RV_DACSR_DBR_44_1;
+ bm = RV_DACSR_DBM_2;
+ break;
+ default:
+ dev_err(codec->dev, "Unsupported sample rate %d\n", rate);
+ return -EINVAL;
+ }
+
+ /* DAC and ADC share bit and frame clock */
+ ret = snd_soc_update_bits(codec, R_DACSR, RM_DACSR_DBR, br);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to update register (%d)\n", ret);
+ return ret;
+ }
+ ret = snd_soc_update_bits(codec, R_DACSR, RM_DACSR_DBM, bm);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to update register (%d)\n", ret);
+ return ret;
+ }
+ ret = snd_soc_update_bits(codec, R_ADCSR, RM_DACSR_DBR, br);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to update register (%d)\n", ret);
+ return ret;
+ }
+ ret = snd_soc_update_bits(codec, R_ADCSR, RM_DACSR_DBM, bm);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to update register (%d)\n", ret);
+ return ret;
+ }
+
+ mutex_lock(&tscs42xx->audio_params_lock);
+
+ tscs42xx->samplerate = rate;
+
+ mutex_unlock(&tscs42xx->audio_params_lock);
+
+ return 0;
+}
+
+struct reg_setting {
+ unsigned int addr;
+ unsigned int val;
+ unsigned int mask;
+};
+
+#define PLL_REG_SETTINGS_COUNT 13
+struct pll_ctl {
+ int input_freq;
+ struct reg_setting settings[PLL_REG_SETTINGS_COUNT];
+};
+
+#define PLL_CTL(f, rt, rd, r1b_l, r9, ra, rb, \
+ rc, r12, r1b_h, re, rf, r10, r11) \
+ { \
+ .input_freq = f, \
+ .settings = { \
+ {R_TIMEBASE, rt, 0xFF}, \
+ {R_PLLCTLD, rd, 0xFF}, \
+ {R_PLLCTL1B, r1b_l, 0x0F}, \
+ {R_PLLCTL9, r9, 0xFF}, \
+ {R_PLLCTLA, ra, 0xFF}, \
+ {R_PLLCTLB, rb, 0xFF}, \
+ {R_PLLCTLC, rc, 0xFF}, \
+ {R_PLLCTL12, r12, 0xFF}, \
+ {R_PLLCTL1B, r1b_h, 0xF0}, \
+ {R_PLLCTLE, re, 0xFF}, \
+ {R_PLLCTLF, rf, 0xFF}, \
+ {R_PLLCTL10, r10, 0xFF}, \
+ {R_PLLCTL11, r11, 0xFF}, \
+ }, \
+ }
+
+static const struct pll_ctl pll_ctls[] = {
+ PLL_CTL(1411200, 0x05,
+ 0x39, 0x04, 0x07, 0x02, 0xC3, 0x04,
+ 0x1B, 0x10, 0x03, 0x03, 0xD0, 0x02),
+ PLL_CTL(1536000, 0x05,
+ 0x1A, 0x04, 0x02, 0x03, 0xE0, 0x01,
+ 0x1A, 0x10, 0x02, 0x03, 0xB9, 0x01),
+ PLL_CTL(2822400, 0x0A,
+ 0x23, 0x04, 0x07, 0x04, 0xC3, 0x04,
+ 0x22, 0x10, 0x05, 0x03, 0x58, 0x02),
+ PLL_CTL(3072000, 0x0B,
+ 0x22, 0x04, 0x07, 0x03, 0x48, 0x03,
+ 0x1A, 0x10, 0x04, 0x03, 0xB9, 0x01),
+ PLL_CTL(5644800, 0x15,
+ 0x23, 0x04, 0x0E, 0x04, 0xC3, 0x04,
+ 0x1A, 0x10, 0x08, 0x03, 0xE0, 0x01),
+ PLL_CTL(6144000, 0x17,
+ 0x1A, 0x04, 0x08, 0x03, 0xE0, 0x01,
+ 0x1A, 0x10, 0x08, 0x03, 0xB9, 0x01),
+ PLL_CTL(12000000, 0x2E,
+ 0x1B, 0x04, 0x19, 0x03, 0x00, 0x03,
+ 0x2A, 0x10, 0x19, 0x05, 0x98, 0x04),
+ PLL_CTL(19200000, 0x4A,
+ 0x13, 0x04, 0x14, 0x03, 0x80, 0x01,
+ 0x1A, 0x10, 0x19, 0x03, 0xB9, 0x01),
+ PLL_CTL(22000000, 0x55,
+ 0x2A, 0x04, 0x37, 0x05, 0x00, 0x06,
+ 0x22, 0x10, 0x26, 0x03, 0x49, 0x02),
+ PLL_CTL(22579200, 0x57,
+ 0x22, 0x04, 0x31, 0x03, 0x20, 0x03,
+ 0x1A, 0x10, 0x1D, 0x03, 0xB3, 0x01),
+ PLL_CTL(24000000, 0x5D,
+ 0x13, 0x04, 0x19, 0x03, 0x80, 0x01,
+ 0x1B, 0x10, 0x19, 0x05, 0x4C, 0x02),
+ PLL_CTL(24576000, 0x5F,
+ 0x13, 0x04, 0x1D, 0x03, 0xB3, 0x01,
+ 0x22, 0x10, 0x40, 0x03, 0x72, 0x03),
+ PLL_CTL(27000000, 0x68,
+ 0x22, 0x04, 0x4B, 0x03, 0x00, 0x04,
+ 0x2A, 0x10, 0x7D, 0x03, 0x20, 0x06),
+ PLL_CTL(36000000, 0x8C,
+ 0x1B, 0x04, 0x4B, 0x03, 0x00, 0x03,
+ 0x2A, 0x10, 0x7D, 0x03, 0x98, 0x04),
+ PLL_CTL(25000000, 0x61,
+ 0x1B, 0x04, 0x37, 0x03, 0x2B, 0x03,
+ 0x1A, 0x10, 0x2A, 0x03, 0x39, 0x02),
+ PLL_CTL(26000000, 0x65,
+ 0x23, 0x04, 0x41, 0x05, 0x00, 0x06,
+ 0x1A, 0x10, 0x26, 0x03, 0xEF, 0x01),
+ PLL_CTL(12288000, 0x2F,
+ 0x1A, 0x04, 0x12, 0x03, 0x1C, 0x02,
+ 0x22, 0x10, 0x20, 0x03, 0x72, 0x03),
+ PLL_CTL(40000000, 0x9B,
+ 0x22, 0x08, 0x7D, 0x03, 0x80, 0x04,
+ 0x23, 0x10, 0x7D, 0x05, 0xE4, 0x06),
+ PLL_CTL(512000, 0x01,
+ 0x22, 0x04, 0x01, 0x03, 0xD0, 0x02,
+ 0x1B, 0x10, 0x01, 0x04, 0x72, 0x03),
+ PLL_CTL(705600, 0x02,
+ 0x22, 0x04, 0x02, 0x03, 0x15, 0x04,
+ 0x22, 0x10, 0x01, 0x04, 0x80, 0x02),
+ PLL_CTL(1024000, 0x03,
+ 0x22, 0x04, 0x02, 0x03, 0xD0, 0x02,
+ 0x1B, 0x10, 0x02, 0x04, 0x72, 0x03),
+ PLL_CTL(2048000, 0x07,
+ 0x22, 0x04, 0x04, 0x03, 0xD0, 0x02,
+ 0x1B, 0x10, 0x04, 0x04, 0x72, 0x03),
+ PLL_CTL(2400000, 0x08,
+ 0x22, 0x04, 0x05, 0x03, 0x00, 0x03,
+ 0x23, 0x10, 0x05, 0x05, 0x98, 0x04),
+};
+
+static const struct pll_ctl *get_pll_ctl(int input_freq)
+{
+ int i;
+ const struct pll_ctl *pll_ctl = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(pll_ctls); ++i)
+ if (input_freq == pll_ctls[i].input_freq) {
+ pll_ctl = &pll_ctls[i];
+ break;
+ }
+
+ return pll_ctl;
+}
+
+static int set_pll_ctl_from_input_freq(struct snd_soc_codec *codec,
+ const int input_freq)
+{
+ int ret;
+ int i;
+ const struct pll_ctl *pll_ctl;
+
+ pll_ctl = get_pll_ctl(input_freq);
+ if (!pll_ctl) {
+ ret = -EINVAL;
+ dev_err(codec->dev, "No PLL input entry for %d (%d)\n",
+ input_freq, ret);
+ return ret;
+ }
+
+ for (i = 0; i < PLL_REG_SETTINGS_COUNT; ++i) {
+ ret = snd_soc_update_bits(codec,
+ pll_ctl->settings[i].addr,
+ pll_ctl->settings[i].mask,
+ pll_ctl->settings[i].val);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set pll ctl (%d)\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int tscs42xx_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *codec_dai)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ int ret;
+
+ ret = setup_sample_format(codec, params_format(params));
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to setup sample format (%d)\n",
+ ret);
+ return ret;
+ }
+
+ ret = setup_sample_rate(codec, params_rate(params));
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to setup sample rate (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline int dac_mute(struct snd_soc_codec *codec)
+{
+ int ret;
+
+ ret = snd_soc_update_bits(codec, R_CNVRTR1, RM_CNVRTR1_DACMU,
+ RV_CNVRTR1_DACMU_ENABLE);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to mute DAC (%d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline int dac_unmute(struct snd_soc_codec *codec)
+{
+ int ret;
+
+ ret = snd_soc_update_bits(codec, R_CNVRTR1, RM_CNVRTR1_DACMU,
+ RV_CNVRTR1_DACMU_DISABLE);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to unmute DAC (%d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline int adc_mute(struct snd_soc_codec *codec)
+{
+ int ret;
+
+ ret = snd_soc_update_bits(codec, R_CNVRTR0, RM_CNVRTR0_ADCMU,
+ RV_CNVRTR0_ADCMU_ENABLE);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to mute ADC (%d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline int adc_unmute(struct snd_soc_codec *codec)
+{
+ int ret;
+
+ ret = snd_soc_update_bits(codec, R_CNVRTR0, RM_CNVRTR0_ADCMU,
+ RV_CNVRTR0_ADCMU_DISABLE);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to unmute ADC (%d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tscs42xx_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ int ret;
+
+ if (mute)
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ ret = dac_mute(codec);
+ else
+ ret = adc_mute(codec);
+ else
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+ ret = dac_unmute(codec);
+ else
+ ret = adc_unmute(codec);
+
+ return ret;
+}
+
+static int tscs42xx_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ int ret;
+
+ /* Slave mode not supported since it needs always-on frame clock */
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ ret = snd_soc_update_bits(codec, R_AIC1, RM_AIC1_MS,
+ RV_AIC1_MS_MASTER);
+ if (ret < 0) {
+ dev_err(codec->dev,
+ "Failed to set codec DAI master (%d)\n", ret);
+ return ret;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(codec->dev, "Unsupported format (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tscs42xx_set_dai_bclk_ratio(struct snd_soc_dai *codec_dai,
+ unsigned int ratio)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct tscs42xx *tscs42xx = snd_soc_codec_get_drvdata(codec);
+ unsigned int value;
+ int ret = 0;
+
+ switch (ratio) {
+ case 32:
+ value = RV_DACSR_DBCM_32;
+ break;
+ case 40:
+ value = RV_DACSR_DBCM_40;
+ break;
+ case 64:
+ value = RV_DACSR_DBCM_64;
+ break;
+ default:
+ dev_err(codec->dev, "Unsupported bclk ratio (%d)\n", ret);
+ return -EINVAL;
+ }
+
+ ret = snd_soc_update_bits(codec, R_DACSR, RM_DACSR_DBCM, value);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set DAC BCLK ratio (%d)\n", ret);
+ return ret;
+ }
+ ret = snd_soc_update_bits(codec, R_ADCSR, RM_ADCSR_ABCM, value);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set ADC BCLK ratio (%d)\n", ret);
+ return ret;
+ }
+
+ mutex_lock(&tscs42xx->audio_params_lock);
+
+ tscs42xx->bclk_ratio = ratio;
+
+ mutex_unlock(&tscs42xx->audio_params_lock);
+
+ return 0;
+}
+
+static int tscs42xx_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ int ret;
+
+ switch (clk_id) {
+ case TSCS42XX_PLL_SRC_XTAL:
+ case TSCS42XX_PLL_SRC_MCLK1:
+ ret = snd_soc_write(codec, R_PLLREFSEL,
+ RV_PLLREFSEL_PLL1_REF_SEL_XTAL_MCLK1 |
+ RV_PLLREFSEL_PLL2_REF_SEL_XTAL_MCLK1);
+ if (ret < 0) {
+ dev_err(codec->dev,
+ "Failed to set pll reference input (%d)\n",
+ ret);
+ return ret;
+ }
+ break;
+ case TSCS42XX_PLL_SRC_MCLK2:
+ ret = snd_soc_write(codec, R_PLLREFSEL,
+ RV_PLLREFSEL_PLL1_REF_SEL_MCLK2 |
+ RV_PLLREFSEL_PLL2_REF_SEL_MCLK2);
+ if (ret < 0) {
+ dev_err(codec->dev,
+ "Failed to set PLL reference (%d)\n", ret);
+ return ret;
+ }
+ break;
+ default:
+ dev_err(codec->dev, "pll src is unsupported\n");
+ return -EINVAL;
+ }
+
+ ret = set_pll_ctl_from_input_freq(codec, freq);
+ if (ret < 0) {
+ dev_err(codec->dev,
+ "Failed to setup PLL input freq (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops tscs42xx_dai_ops = {
+ .hw_params = tscs42xx_hw_params,
+ .mute_stream = tscs42xx_mute_stream,
+ .set_fmt = tscs42xx_set_dai_fmt,
+ .set_bclk_ratio = tscs42xx_set_dai_bclk_ratio,
+ .set_sysclk = tscs42xx_set_dai_sysclk,
+};
+
+static int part_is_valid(struct tscs42xx *tscs42xx)
+{
+ int val;
+ int ret;
+ unsigned int reg;
+
+ ret = regmap_read(tscs42xx->regmap, R_DEVIDH, &reg);
+ if (ret < 0)
+ return ret;
+
+ val = reg << 8;
+ ret = regmap_read(tscs42xx->regmap, R_DEVIDL, &reg);
+ if (ret < 0)
+ return ret;
+
+ val |= reg;
+
+ switch (val) {
+ case 0x4A74:
+ case 0x4A73:
+ return true;
+ default:
+ return false;
+ };
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_tscs42xx = {
+ .component_driver = {
+ .dapm_widgets = tscs42xx_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(tscs42xx_dapm_widgets),
+ .dapm_routes = tscs42xx_intercon,
+ .num_dapm_routes = ARRAY_SIZE(tscs42xx_intercon),
+ .controls = tscs42xx_snd_controls,
+ .num_controls = ARRAY_SIZE(tscs42xx_snd_controls),
+ },
+};
+
+static inline void init_coeff_ram_cache(struct tscs42xx *tscs42xx)
+{
+ const u8 norm_addrs[] = { 0x00, 0x05, 0x0a, 0x0f, 0x14, 0x19, 0x1f,
+ 0x20, 0x25, 0x2a, 0x2f, 0x34, 0x39, 0x3f, 0x40, 0x45, 0x4a,
+ 0x4f, 0x54, 0x59, 0x5f, 0x60, 0x65, 0x6a, 0x6f, 0x74, 0x79,
+ 0x7f, 0x80, 0x85, 0x8c, 0x91, 0x96, 0x97, 0x9c, 0xa3, 0xa8,
+ 0xad, 0xaf, 0xb0, 0xb5, 0xba, 0xbf, 0xc4, 0xc9, };
+ u8 *coeff_ram = tscs42xx->coeff_ram;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(norm_addrs); i++)
+ coeff_ram[((norm_addrs[i] + 1) * COEFF_SIZE) - 1] = 0x40;
+}
+
+#define TSCS42XX_RATES SNDRV_PCM_RATE_8000_96000
+
+#define TSCS42XX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE \
+ | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver tscs42xx_dai = {
+ .name = "tscs42xx-HiFi",
+ .playback = {
+ .stream_name = "HiFi Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = TSCS42XX_RATES,
+ .formats = TSCS42XX_FORMATS,},
+ .capture = {
+ .stream_name = "HiFi Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = TSCS42XX_RATES,
+ .formats = TSCS42XX_FORMATS,},
+ .ops = &tscs42xx_dai_ops,
+ .symmetric_rates = 1,
+ .symmetric_channels = 1,
+ .symmetric_samplebits = 1,
+};
+
+static const struct reg_sequence tscs42xx_patch[] = {
+ { R_AIC2, RV_AIC2_BLRCM_DAC_BCLK_LRCLK_SHARED },
+};
+
+static int tscs42xx_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct tscs42xx *tscs42xx;
+ int ret = 0;
+
+ tscs42xx = devm_kzalloc(&i2c->dev, sizeof(*tscs42xx), GFP_KERNEL);
+ if (!tscs42xx) {
+ ret = -ENOMEM;
+ dev_err(&i2c->dev,
+ "Failed to allocate memory for data (%d)\n", ret);
+ return ret;
+ }
+ i2c_set_clientdata(i2c, tscs42xx);
+ tscs42xx->dev = &i2c->dev;
+
+ tscs42xx->regmap = devm_regmap_init_i2c(i2c, &tscs42xx_regmap);
+ if (IS_ERR(tscs42xx->regmap)) {
+ ret = PTR_ERR(tscs42xx->regmap);
+ dev_err(tscs42xx->dev, "Failed to allocate regmap (%d)\n", ret);
+ return ret;
+ }
+
+ init_coeff_ram_cache(tscs42xx);
+
+ ret = part_is_valid(tscs42xx);
+ if (ret <= 0) {
+ dev_err(tscs42xx->dev, "No valid part (%d)\n", ret);
+ ret = -ENODEV;
+ return ret;
+ }
+
+ ret = regmap_write(tscs42xx->regmap, R_RESET, RV_RESET_ENABLE);
+ if (ret < 0) {
+ dev_err(tscs42xx->dev, "Failed to reset device (%d)\n", ret);
+ return ret;
+ }
+
+ ret = regmap_register_patch(tscs42xx->regmap, tscs42xx_patch,
+ ARRAY_SIZE(tscs42xx_patch));
+ if (ret < 0) {
+ dev_err(tscs42xx->dev, "Failed to apply patch (%d)\n", ret);
+ return ret;
+ }
+
+ mutex_init(&tscs42xx->audio_params_lock);
+ mutex_init(&tscs42xx->coeff_ram_lock);
+ mutex_init(&tscs42xx->pll_lock);
+
+ ret = snd_soc_register_codec(tscs42xx->dev, &soc_codec_dev_tscs42xx,
+ &tscs42xx_dai, 1);
+ if (ret) {
+ dev_err(tscs42xx->dev, "Failed to register codec (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tscs42xx_i2c_remove(struct i2c_client *client)
+{
+ snd_soc_unregister_codec(&client->dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id tscs42xx_i2c_id[] = {
+ { "tscs42A1", 0 },
+ { "tscs42A2", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tscs42xx_i2c_id);
+
+static const struct of_device_id tscs42xx_of_match[] = {
+ { .compatible = "tempo,tscs42A1", },
+ { .compatible = "tempo,tscs42A2", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tscs42xx_of_match);
+
+static struct i2c_driver tscs42xx_i2c_driver = {
+ .driver = {
+ .name = "tscs42xx",
+ .owner = THIS_MODULE,
+ .of_match_table = tscs42xx_of_match,
+ },
+ .probe = tscs42xx_i2c_probe,
+ .remove = tscs42xx_i2c_remove,
+ .id_table = tscs42xx_i2c_id,
+};
+
+module_i2c_driver(tscs42xx_i2c_driver);
+
+MODULE_AUTHOR("Tempo Semiconductor <steven.eckhoff.opensource@gmail.com");
+MODULE_DESCRIPTION("ASoC TSCS42xx driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/tscs42xx.h b/sound/soc/codecs/tscs42xx.h
new file mode 100644
index 000000000000..d4a30bcbf64b
--- /dev/null
+++ b/sound/soc/codecs/tscs42xx.h
@@ -0,0 +1,2693 @@
+// SPDX-License-Identifier: GPL-2.0
+// tscs42xx.h -- TSCS42xx ALSA SoC Audio driver
+// Copyright 2017 Tempo Semiconductor, Inc.
+// Author: Steven Eckhoff <steven.eckhoff.opensource@gmail.com>
+
+#ifndef __WOOKIE_H__
+#define __WOOKIE_H__
+
+enum {
+ TSCS42XX_PLL_SRC_NONE,
+ TSCS42XX_PLL_SRC_XTAL,
+ TSCS42XX_PLL_SRC_MCLK1,
+ TSCS42XX_PLL_SRC_MCLK2,
+};
+
+#define R_HPVOLL 0x0
+#define R_HPVOLR 0x1
+#define R_SPKVOLL 0x2
+#define R_SPKVOLR 0x3
+#define R_DACVOLL 0x4
+#define R_DACVOLR 0x5
+#define R_ADCVOLL 0x6
+#define R_ADCVOLR 0x7
+#define R_INVOLL 0x8
+#define R_INVOLR 0x9
+#define R_INMODE 0x0B
+#define R_INSELL 0x0C
+#define R_INSELR 0x0D
+#define R_AIC1 0x13
+#define R_AIC2 0x14
+#define R_CNVRTR0 0x16
+#define R_ADCSR 0x17
+#define R_CNVRTR1 0x18
+#define R_DACSR 0x19
+#define R_PWRM1 0x1A
+#define R_PWRM2 0x1B
+#define R_CONFIG0 0x1F
+#define R_CONFIG1 0x20
+#define R_DMICCTL 0x24
+#define R_CLECTL 0x25
+#define R_MUGAIN 0x26
+#define R_COMPTH 0x27
+#define R_CMPRAT 0x28
+#define R_CATKTCL 0x29
+#define R_CATKTCH 0x2A
+#define R_CRELTCL 0x2B
+#define R_CRELTCH 0x2C
+#define R_LIMTH 0x2D
+#define R_LIMTGT 0x2E
+#define R_LATKTCL 0x2F
+#define R_LATKTCH 0x30
+#define R_LRELTCL 0x31
+#define R_LRELTCH 0x32
+#define R_EXPTH 0x33
+#define R_EXPRAT 0x34
+#define R_XATKTCL 0x35
+#define R_XATKTCH 0x36
+#define R_XRELTCL 0x37
+#define R_XRELTCH 0x38
+#define R_FXCTL 0x39
+#define R_DACCRWRL 0x3A
+#define R_DACCRWRM 0x3B
+#define R_DACCRWRH 0x3C
+#define R_DACCRRDL 0x3D
+#define R_DACCRRDM 0x3E
+#define R_DACCRRDH 0x3F
+#define R_DACCRADDR 0x40
+#define R_DCOFSEL 0x41
+#define R_PLLCTL9 0x4E
+#define R_PLLCTLA 0x4F
+#define R_PLLCTLB 0x50
+#define R_PLLCTLC 0x51
+#define R_PLLCTLD 0x52
+#define R_PLLCTLE 0x53
+#define R_PLLCTLF 0x54
+#define R_PLLCTL10 0x55
+#define R_PLLCTL11 0x56
+#define R_PLLCTL12 0x57
+#define R_PLLCTL1B 0x60
+#define R_PLLCTL1C 0x61
+#define R_TIMEBASE 0x77
+#define R_DEVIDL 0x7D
+#define R_DEVIDH 0x7E
+#define R_RESET 0x80
+#define R_DACCRSTAT 0x8A
+#define R_PLLCTL0 0x8E
+#define R_PLLREFSEL 0x8F
+#define R_DACMBCEN 0xC7
+#define R_DACMBCCTL 0xC8
+#define R_DACMBCMUG1 0xC9
+#define R_DACMBCTHR1 0xCA
+#define R_DACMBCRAT1 0xCB
+#define R_DACMBCATK1L 0xCC
+#define R_DACMBCATK1H 0xCD
+#define R_DACMBCREL1L 0xCE
+#define R_DACMBCREL1H 0xCF
+#define R_DACMBCMUG2 0xD0
+#define R_DACMBCTHR2 0xD1
+#define R_DACMBCRAT2 0xD2
+#define R_DACMBCATK2L 0xD3
+#define R_DACMBCATK2H 0xD4
+#define R_DACMBCREL2L 0xD5
+#define R_DACMBCREL2H 0xD6
+#define R_DACMBCMUG3 0xD7
+#define R_DACMBCTHR3 0xD8
+#define R_DACMBCRAT3 0xD9
+#define R_DACMBCATK3L 0xDA
+#define R_DACMBCATK3H 0xDB
+#define R_DACMBCREL3L 0xDC
+#define R_DACMBCREL3H 0xDD
+
+/* Helpers */
+#define RM(m, b) ((m)<<(b))
+#define RV(v, b) ((v)<<(b))
+
+/****************************
+ * R_HPVOLL (0x0) *
+ ****************************/
+
+/* Field Offsets */
+#define FB_HPVOLL 0
+
+/* Field Masks */
+#define FM_HPVOLL 0X7F
+
+/* Field Values */
+#define FV_HPVOLL_P6DB 0x7F
+#define FV_HPVOLL_N88PT5DB 0x1
+#define FV_HPVOLL_MUTE 0x0
+
+/* Register Masks */
+#define RM_HPVOLL RM(FM_HPVOLL, FB_HPVOLL)
+
+/* Register Values */
+#define RV_HPVOLL_P6DB RV(FV_HPVOLL_P6DB, FB_HPVOLL)
+#define RV_HPVOLL_N88PT5DB RV(FV_HPVOLL_N88PT5DB, FB_HPVOLL)
+#define RV_HPVOLL_MUTE RV(FV_HPVOLL_MUTE, FB_HPVOLL)
+
+/****************************
+ * R_HPVOLR (0x1) *
+ ****************************/
+
+/* Field Offsets */
+#define FB_HPVOLR 0
+
+/* Field Masks */
+#define FM_HPVOLR 0X7F
+
+/* Field Values */
+#define FV_HPVOLR_P6DB 0x7F
+#define FV_HPVOLR_N88PT5DB 0x1
+#define FV_HPVOLR_MUTE 0x0
+
+/* Register Masks */
+#define RM_HPVOLR RM(FM_HPVOLR, FB_HPVOLR)
+
+/* Register Values */
+#define RV_HPVOLR_P6DB RV(FV_HPVOLR_P6DB, FB_HPVOLR)
+#define RV_HPVOLR_N88PT5DB RV(FV_HPVOLR_N88PT5DB, FB_HPVOLR)
+#define RV_HPVOLR_MUTE RV(FV_HPVOLR_MUTE, FB_HPVOLR)
+
+/*****************************
+ * R_SPKVOLL (0x2) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_SPKVOLL 0
+
+/* Field Masks */
+#define FM_SPKVOLL 0X7F
+
+/* Field Values */
+#define FV_SPKVOLL_P12DB 0x7F
+#define FV_SPKVOLL_N77PT25DB 0x8
+#define FV_SPKVOLL_MUTE 0x0
+
+/* Register Masks */
+#define RM_SPKVOLL RM(FM_SPKVOLL, FB_SPKVOLL)
+
+/* Register Values */
+#define RV_SPKVOLL_P12DB RV(FV_SPKVOLL_P12DB, FB_SPKVOLL)
+#define RV_SPKVOLL_N77PT25DB \
+ RV(FV_SPKVOLL_N77PT25DB, FB_SPKVOLL)
+
+#define RV_SPKVOLL_MUTE RV(FV_SPKVOLL_MUTE, FB_SPKVOLL)
+
+/*****************************
+ * R_SPKVOLR (0x3) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_SPKVOLR 0
+
+/* Field Masks */
+#define FM_SPKVOLR 0X7F
+
+/* Field Values */
+#define FV_SPKVOLR_P12DB 0x7F
+#define FV_SPKVOLR_N77PT25DB 0x8
+#define FV_SPKVOLR_MUTE 0x0
+
+/* Register Masks */
+#define RM_SPKVOLR RM(FM_SPKVOLR, FB_SPKVOLR)
+
+/* Register Values */
+#define RV_SPKVOLR_P12DB RV(FV_SPKVOLR_P12DB, FB_SPKVOLR)
+#define RV_SPKVOLR_N77PT25DB \
+ RV(FV_SPKVOLR_N77PT25DB, FB_SPKVOLR)
+
+#define RV_SPKVOLR_MUTE RV(FV_SPKVOLR_MUTE, FB_SPKVOLR)
+
+/*****************************
+ * R_DACVOLL (0x4) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_DACVOLL 0
+
+/* Field Masks */
+#define FM_DACVOLL 0XFF
+
+/* Field Values */
+#define FV_DACVOLL_0DB 0xFF
+#define FV_DACVOLL_N95PT625DB 0x1
+#define FV_DACVOLL_MUTE 0x0
+
+/* Register Masks */
+#define RM_DACVOLL RM(FM_DACVOLL, FB_DACVOLL)
+
+/* Register Values */
+#define RV_DACVOLL_0DB RV(FV_DACVOLL_0DB, FB_DACVOLL)
+#define RV_DACVOLL_N95PT625DB \
+ RV(FV_DACVOLL_N95PT625DB, FB_DACVOLL)
+
+#define RV_DACVOLL_MUTE RV(FV_DACVOLL_MUTE, FB_DACVOLL)
+
+/*****************************
+ * R_DACVOLR (0x5) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_DACVOLR 0
+
+/* Field Masks */
+#define FM_DACVOLR 0XFF
+
+/* Field Values */
+#define FV_DACVOLR_0DB 0xFF
+#define FV_DACVOLR_N95PT625DB 0x1
+#define FV_DACVOLR_MUTE 0x0
+
+/* Register Masks */
+#define RM_DACVOLR RM(FM_DACVOLR, FB_DACVOLR)
+
+/* Register Values */
+#define RV_DACVOLR_0DB RV(FV_DACVOLR_0DB, FB_DACVOLR)
+#define RV_DACVOLR_N95PT625DB \
+ RV(FV_DACVOLR_N95PT625DB, FB_DACVOLR)
+
+#define RV_DACVOLR_MUTE RV(FV_DACVOLR_MUTE, FB_DACVOLR)
+
+/*****************************
+ * R_ADCVOLL (0x6) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_ADCVOLL 0
+
+/* Field Masks */
+#define FM_ADCVOLL 0XFF
+
+/* Field Values */
+#define FV_ADCVOLL_P24DB 0xFF
+#define FV_ADCVOLL_N71PT25DB 0x1
+#define FV_ADCVOLL_MUTE 0x0
+
+/* Register Masks */
+#define RM_ADCVOLL RM(FM_ADCVOLL, FB_ADCVOLL)
+
+/* Register Values */
+#define RV_ADCVOLL_P24DB RV(FV_ADCVOLL_P24DB, FB_ADCVOLL)
+#define RV_ADCVOLL_N71PT25DB \
+ RV(FV_ADCVOLL_N71PT25DB, FB_ADCVOLL)
+
+#define RV_ADCVOLL_MUTE RV(FV_ADCVOLL_MUTE, FB_ADCVOLL)
+
+/*****************************
+ * R_ADCVOLR (0x7) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_ADCVOLR 0
+
+/* Field Masks */
+#define FM_ADCVOLR 0XFF
+
+/* Field Values */
+#define FV_ADCVOLR_P24DB 0xFF
+#define FV_ADCVOLR_N71PT25DB 0x1
+#define FV_ADCVOLR_MUTE 0x0
+
+/* Register Masks */
+#define RM_ADCVOLR RM(FM_ADCVOLR, FB_ADCVOLR)
+
+/* Register Values */
+#define RV_ADCVOLR_P24DB RV(FV_ADCVOLR_P24DB, FB_ADCVOLR)
+#define RV_ADCVOLR_N71PT25DB \
+ RV(FV_ADCVOLR_N71PT25DB, FB_ADCVOLR)
+
+#define RV_ADCVOLR_MUTE RV(FV_ADCVOLR_MUTE, FB_ADCVOLR)
+
+/****************************
+ * R_INVOLL (0x8) *
+ ****************************/
+
+/* Field Offsets */
+#define FB_INVOLL_INMUTEL 7
+#define FB_INVOLL_IZCL 6
+#define FB_INVOLL 0
+
+/* Field Masks */
+#define FM_INVOLL_INMUTEL 0X1
+#define FM_INVOLL_IZCL 0X1
+#define FM_INVOLL 0X3F
+
+/* Field Values */
+#define FV_INVOLL_INMUTEL_ENABLE 0x1
+#define FV_INVOLL_INMUTEL_DISABLE 0x0
+#define FV_INVOLL_IZCL_ENABLE 0x1
+#define FV_INVOLL_IZCL_DISABLE 0x0
+#define FV_INVOLL_P30DB 0x3F
+#define FV_INVOLL_N17PT25DB 0x0
+
+/* Register Masks */
+#define RM_INVOLL_INMUTEL \
+ RM(FM_INVOLL_INMUTEL, FB_INVOLL_INMUTEL)
+
+#define RM_INVOLL_IZCL RM(FM_INVOLL_IZCL, FB_INVOLL_IZCL)
+#define RM_INVOLL RM(FM_INVOLL, FB_INVOLL)
+
+/* Register Values */
+#define RV_INVOLL_INMUTEL_ENABLE \
+ RV(FV_INVOLL_INMUTEL_ENABLE, FB_INVOLL_INMUTEL)
+
+#define RV_INVOLL_INMUTEL_DISABLE \
+ RV(FV_INVOLL_INMUTEL_DISABLE, FB_INVOLL_INMUTEL)
+
+#define RV_INVOLL_IZCL_ENABLE \
+ RV(FV_INVOLL_IZCL_ENABLE, FB_INVOLL_IZCL)
+
+#define RV_INVOLL_IZCL_DISABLE \
+ RV(FV_INVOLL_IZCL_DISABLE, FB_INVOLL_IZCL)
+
+#define RV_INVOLL_P30DB RV(FV_INVOLL_P30DB, FB_INVOLL)
+#define RV_INVOLL_N17PT25DB RV(FV_INVOLL_N17PT25DB, FB_INVOLL)
+
+/****************************
+ * R_INVOLR (0x9) *
+ ****************************/
+
+/* Field Offsets */
+#define FB_INVOLR_INMUTER 7
+#define FB_INVOLR_IZCR 6
+#define FB_INVOLR 0
+
+/* Field Masks */
+#define FM_INVOLR_INMUTER 0X1
+#define FM_INVOLR_IZCR 0X1
+#define FM_INVOLR 0X3F
+
+/* Field Values */
+#define FV_INVOLR_INMUTER_ENABLE 0x1
+#define FV_INVOLR_INMUTER_DISABLE 0x0
+#define FV_INVOLR_IZCR_ENABLE 0x1
+#define FV_INVOLR_IZCR_DISABLE 0x0
+#define FV_INVOLR_P30DB 0x3F
+#define FV_INVOLR_N17PT25DB 0x0
+
+/* Register Masks */
+#define RM_INVOLR_INMUTER \
+ RM(FM_INVOLR_INMUTER, FB_INVOLR_INMUTER)
+
+#define RM_INVOLR_IZCR RM(FM_INVOLR_IZCR, FB_INVOLR_IZCR)
+#define RM_INVOLR RM(FM_INVOLR, FB_INVOLR)
+
+/* Register Values */
+#define RV_INVOLR_INMUTER_ENABLE \
+ RV(FV_INVOLR_INMUTER_ENABLE, FB_INVOLR_INMUTER)
+
+#define RV_INVOLR_INMUTER_DISABLE \
+ RV(FV_INVOLR_INMUTER_DISABLE, FB_INVOLR_INMUTER)
+
+#define RV_INVOLR_IZCR_ENABLE \
+ RV(FV_INVOLR_IZCR_ENABLE, FB_INVOLR_IZCR)
+
+#define RV_INVOLR_IZCR_DISABLE \
+ RV(FV_INVOLR_IZCR_DISABLE, FB_INVOLR_IZCR)
+
+#define RV_INVOLR_P30DB RV(FV_INVOLR_P30DB, FB_INVOLR)
+#define RV_INVOLR_N17PT25DB RV(FV_INVOLR_N17PT25DB, FB_INVOLR)
+
+/*****************************
+ * R_INMODE (0x0B) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_INMODE_DS 0
+
+/* Field Masks */
+#define FM_INMODE_DS 0X1
+
+/* Field Values */
+#define FV_INMODE_DS_LRIN1 0x0
+#define FV_INMODE_DS_LRIN2 0x1
+
+/* Register Masks */
+#define RM_INMODE_DS RM(FM_INMODE_DS, FB_INMODE_DS)
+
+/* Register Values */
+#define RV_INMODE_DS_LRIN1 \
+ RV(FV_INMODE_DS_LRIN1, FB_INMODE_DS)
+
+#define RV_INMODE_DS_LRIN2 \
+ RV(FV_INMODE_DS_LRIN2, FB_INMODE_DS)
+
+
+/*****************************
+ * R_INSELL (0x0C) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_INSELL 6
+#define FB_INSELL_MICBSTL 4
+
+/* Field Masks */
+#define FM_INSELL 0X3
+#define FM_INSELL_MICBSTL 0X3
+
+/* Field Values */
+#define FV_INSELL_IN1 0x0
+#define FV_INSELL_IN2 0x1
+#define FV_INSELL_IN3 0x2
+#define FV_INSELL_D2S 0x3
+#define FV_INSELL_MICBSTL_OFF 0x0
+#define FV_INSELL_MICBSTL_10DB 0x1
+#define FV_INSELL_MICBSTL_20DB 0x2
+#define FV_INSELL_MICBSTL_30DB 0x3
+
+/* Register Masks */
+#define RM_INSELL RM(FM_INSELL, FB_INSELL)
+#define RM_INSELL_MICBSTL \
+ RM(FM_INSELL_MICBSTL, FB_INSELL_MICBSTL)
+
+
+/* Register Values */
+#define RV_INSELL_IN1 RV(FV_INSELL_IN1, FB_INSELL)
+#define RV_INSELL_IN2 RV(FV_INSELL_IN2, FB_INSELL)
+#define RV_INSELL_IN3 RV(FV_INSELL_IN3, FB_INSELL)
+#define RV_INSELL_D2S RV(FV_INSELL_D2S, FB_INSELL)
+#define RV_INSELL_MICBSTL_OFF \
+ RV(FV_INSELL_MICBSTL_OFF, FB_INSELL_MICBSTL)
+
+#define RV_INSELL_MICBSTL_10DB \
+ RV(FV_INSELL_MICBSTL_10DB, FB_INSELL_MICBSTL)
+
+#define RV_INSELL_MICBSTL_20DB \
+ RV(FV_INSELL_MICBSTL_20DB, FB_INSELL_MICBSTL)
+
+#define RV_INSELL_MICBSTL_30DB \
+ RV(FV_INSELL_MICBSTL_30DB, FB_INSELL_MICBSTL)
+
+
+/*****************************
+ * R_INSELR (0x0D) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_INSELR 6
+#define FB_INSELR_MICBSTR 4
+
+/* Field Masks */
+#define FM_INSELR 0X3
+#define FM_INSELR_MICBSTR 0X3
+
+/* Field Values */
+#define FV_INSELR_IN1 0x0
+#define FV_INSELR_IN2 0x1
+#define FV_INSELR_IN3 0x2
+#define FV_INSELR_D2S 0x3
+#define FV_INSELR_MICBSTR_OFF 0x0
+#define FV_INSELR_MICBSTR_10DB 0x1
+#define FV_INSELR_MICBSTR_20DB 0x2
+#define FV_INSELR_MICBSTR_30DB 0x3
+
+/* Register Masks */
+#define RM_INSELR RM(FM_INSELR, FB_INSELR)
+#define RM_INSELR_MICBSTR \
+ RM(FM_INSELR_MICBSTR, FB_INSELR_MICBSTR)
+
+
+/* Register Values */
+#define RV_INSELR_IN1 RV(FV_INSELR_IN1, FB_INSELR)
+#define RV_INSELR_IN2 RV(FV_INSELR_IN2, FB_INSELR)
+#define RV_INSELR_IN3 RV(FV_INSELR_IN3, FB_INSELR)
+#define RV_INSELR_D2S RV(FV_INSELR_D2S, FB_INSELR)
+#define RV_INSELR_MICBSTR_OFF \
+ RV(FV_INSELR_MICBSTR_OFF, FB_INSELR_MICBSTR)
+
+#define RV_INSELR_MICBSTR_10DB \
+ RV(FV_INSELR_MICBSTR_10DB, FB_INSELR_MICBSTR)
+
+#define RV_INSELR_MICBSTR_20DB \
+ RV(FV_INSELR_MICBSTR_20DB, FB_INSELR_MICBSTR)
+
+#define RV_INSELR_MICBSTR_30DB \
+ RV(FV_INSELR_MICBSTR_30DB, FB_INSELR_MICBSTR)
+
+
+/***************************
+ * R_AIC1 (0x13) *
+ ***************************/
+
+/* Field Offsets */
+#define FB_AIC1_BCLKINV 6
+#define FB_AIC1_MS 5
+#define FB_AIC1_LRP 4
+#define FB_AIC1_WL 2
+#define FB_AIC1_FORMAT 0
+
+/* Field Masks */
+#define FM_AIC1_BCLKINV 0X1
+#define FM_AIC1_MS 0X1
+#define FM_AIC1_LRP 0X1
+#define FM_AIC1_WL 0X3
+#define FM_AIC1_FORMAT 0X3
+
+/* Field Values */
+#define FV_AIC1_BCLKINV_ENABLE 0x1
+#define FV_AIC1_BCLKINV_DISABLE 0x0
+#define FV_AIC1_MS_MASTER 0x1
+#define FV_AIC1_MS_SLAVE 0x0
+#define FV_AIC1_LRP_INVERT 0x1
+#define FV_AIC1_LRP_NORMAL 0x0
+#define FV_AIC1_WL_16 0x0
+#define FV_AIC1_WL_20 0x1
+#define FV_AIC1_WL_24 0x2
+#define FV_AIC1_WL_32 0x3
+#define FV_AIC1_FORMAT_RIGHT 0x0
+#define FV_AIC1_FORMAT_LEFT 0x1
+#define FV_AIC1_FORMAT_I2S 0x2
+
+/* Register Masks */
+#define RM_AIC1_BCLKINV \
+ RM(FM_AIC1_BCLKINV, FB_AIC1_BCLKINV)
+
+#define RM_AIC1_MS RM(FM_AIC1_MS, FB_AIC1_MS)
+#define RM_AIC1_LRP RM(FM_AIC1_LRP, FB_AIC1_LRP)
+#define RM_AIC1_WL RM(FM_AIC1_WL, FB_AIC1_WL)
+#define RM_AIC1_FORMAT RM(FM_AIC1_FORMAT, FB_AIC1_FORMAT)
+
+/* Register Values */
+#define RV_AIC1_BCLKINV_ENABLE \
+ RV(FV_AIC1_BCLKINV_ENABLE, FB_AIC1_BCLKINV)
+
+#define RV_AIC1_BCLKINV_DISABLE \
+ RV(FV_AIC1_BCLKINV_DISABLE, FB_AIC1_BCLKINV)
+
+#define RV_AIC1_MS_MASTER RV(FV_AIC1_MS_MASTER, FB_AIC1_MS)
+#define RV_AIC1_MS_SLAVE RV(FV_AIC1_MS_SLAVE, FB_AIC1_MS)
+#define RV_AIC1_LRP_INVERT \
+ RV(FV_AIC1_LRP_INVERT, FB_AIC1_LRP)
+
+#define RV_AIC1_LRP_NORMAL \
+ RV(FV_AIC1_LRP_NORMAL, FB_AIC1_LRP)
+
+#define RV_AIC1_WL_16 RV(FV_AIC1_WL_16, FB_AIC1_WL)
+#define RV_AIC1_WL_20 RV(FV_AIC1_WL_20, FB_AIC1_WL)
+#define RV_AIC1_WL_24 RV(FV_AIC1_WL_24, FB_AIC1_WL)
+#define RV_AIC1_WL_32 RV(FV_AIC1_WL_32, FB_AIC1_WL)
+#define RV_AIC1_FORMAT_RIGHT \
+ RV(FV_AIC1_FORMAT_RIGHT, FB_AIC1_FORMAT)
+
+#define RV_AIC1_FORMAT_LEFT \
+ RV(FV_AIC1_FORMAT_LEFT, FB_AIC1_FORMAT)
+
+#define RV_AIC1_FORMAT_I2S \
+ RV(FV_AIC1_FORMAT_I2S, FB_AIC1_FORMAT)
+
+
+/***************************
+ * R_AIC2 (0x14) *
+ ***************************/
+
+/* Field Offsets */
+#define FB_AIC2_DACDSEL 6
+#define FB_AIC2_ADCDSEL 4
+#define FB_AIC2_TRI 3
+#define FB_AIC2_BLRCM 0
+
+/* Field Masks */
+#define FM_AIC2_DACDSEL 0X3
+#define FM_AIC2_ADCDSEL 0X3
+#define FM_AIC2_TRI 0X1
+#define FM_AIC2_BLRCM 0X7
+
+/* Field Values */
+#define FV_AIC2_BLRCM_DAC_BCLK_LRCLK_SHARED 0x3
+
+/* Register Masks */
+#define RM_AIC2_DACDSEL \
+ RM(FM_AIC2_DACDSEL, FB_AIC2_DACDSEL)
+
+#define RM_AIC2_ADCDSEL \
+ RM(FM_AIC2_ADCDSEL, FB_AIC2_ADCDSEL)
+
+#define RM_AIC2_TRI RM(FM_AIC2_TRI, FB_AIC2_TRI)
+#define RM_AIC2_BLRCM RM(FM_AIC2_BLRCM, FB_AIC2_BLRCM)
+
+/* Register Values */
+#define RV_AIC2_BLRCM_DAC_BCLK_LRCLK_SHARED \
+ RV(FV_AIC2_BLRCM_DAC_BCLK_LRCLK_SHARED, FB_AIC2_BLRCM)
+
+
+/******************************
+ * R_CNVRTR0 (0x16) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CNVRTR0_ADCPOLR 7
+#define FB_CNVRTR0_ADCPOLL 6
+#define FB_CNVRTR0_AMONOMIX 4
+#define FB_CNVRTR0_ADCMU 3
+#define FB_CNVRTR0_HPOR 2
+#define FB_CNVRTR0_ADCHPDR 1
+#define FB_CNVRTR0_ADCHPDL 0
+
+/* Field Masks */
+#define FM_CNVRTR0_ADCPOLR 0X1
+#define FM_CNVRTR0_ADCPOLL 0X1
+#define FM_CNVRTR0_AMONOMIX 0X3
+#define FM_CNVRTR0_ADCMU 0X1
+#define FM_CNVRTR0_HPOR 0X1
+#define FM_CNVRTR0_ADCHPDR 0X1
+#define FM_CNVRTR0_ADCHPDL 0X1
+
+/* Field Values */
+#define FV_CNVRTR0_ADCPOLR_INVERT 0x1
+#define FV_CNVRTR0_ADCPOLR_NORMAL 0x0
+#define FV_CNVRTR0_ADCPOLL_INVERT 0x1
+#define FV_CNVRTR0_ADCPOLL_NORMAL 0x0
+#define FV_CNVRTR0_ADCMU_ENABLE 0x1
+#define FV_CNVRTR0_ADCMU_DISABLE 0x0
+#define FV_CNVRTR0_ADCHPDR_ENABLE 0x1
+#define FV_CNVRTR0_ADCHPDR_DISABLE 0x0
+#define FV_CNVRTR0_ADCHPDL_ENABLE 0x1
+#define FV_CNVRTR0_ADCHPDL_DISABLE 0x0
+
+/* Register Masks */
+#define RM_CNVRTR0_ADCPOLR \
+ RM(FM_CNVRTR0_ADCPOLR, FB_CNVRTR0_ADCPOLR)
+
+#define RM_CNVRTR0_ADCPOLL \
+ RM(FM_CNVRTR0_ADCPOLL, FB_CNVRTR0_ADCPOLL)
+
+#define RM_CNVRTR0_AMONOMIX \
+ RM(FM_CNVRTR0_AMONOMIX, FB_CNVRTR0_AMONOMIX)
+
+#define RM_CNVRTR0_ADCMU \
+ RM(FM_CNVRTR0_ADCMU, FB_CNVRTR0_ADCMU)
+
+#define RM_CNVRTR0_HPOR \
+ RM(FM_CNVRTR0_HPOR, FB_CNVRTR0_HPOR)
+
+#define RM_CNVRTR0_ADCHPDR \
+ RM(FM_CNVRTR0_ADCHPDR, FB_CNVRTR0_ADCHPDR)
+
+#define RM_CNVRTR0_ADCHPDL \
+ RM(FM_CNVRTR0_ADCHPDL, FB_CNVRTR0_ADCHPDL)
+
+
+/* Register Values */
+#define RV_CNVRTR0_ADCPOLR_INVERT \
+ RV(FV_CNVRTR0_ADCPOLR_INVERT, FB_CNVRTR0_ADCPOLR)
+
+#define RV_CNVRTR0_ADCPOLR_NORMAL \
+ RV(FV_CNVRTR0_ADCPOLR_NORMAL, FB_CNVRTR0_ADCPOLR)
+
+#define RV_CNVRTR0_ADCPOLL_INVERT \
+ RV(FV_CNVRTR0_ADCPOLL_INVERT, FB_CNVRTR0_ADCPOLL)
+
+#define RV_CNVRTR0_ADCPOLL_NORMAL \
+ RV(FV_CNVRTR0_ADCPOLL_NORMAL, FB_CNVRTR0_ADCPOLL)
+
+#define RV_CNVRTR0_ADCMU_ENABLE \
+ RV(FV_CNVRTR0_ADCMU_ENABLE, FB_CNVRTR0_ADCMU)
+
+#define RV_CNVRTR0_ADCMU_DISABLE \
+ RV(FV_CNVRTR0_ADCMU_DISABLE, FB_CNVRTR0_ADCMU)
+
+#define RV_CNVRTR0_ADCHPDR_ENABLE \
+ RV(FV_CNVRTR0_ADCHPDR_ENABLE, FB_CNVRTR0_ADCHPDR)
+
+#define RV_CNVRTR0_ADCHPDR_DISABLE \
+ RV(FV_CNVRTR0_ADCHPDR_DISABLE, FB_CNVRTR0_ADCHPDR)
+
+#define RV_CNVRTR0_ADCHPDL_ENABLE \
+ RV(FV_CNVRTR0_ADCHPDL_ENABLE, FB_CNVRTR0_ADCHPDL)
+
+#define RV_CNVRTR0_ADCHPDL_DISABLE \
+ RV(FV_CNVRTR0_ADCHPDL_DISABLE, FB_CNVRTR0_ADCHPDL)
+
+
+/****************************
+ * R_ADCSR (0x17) *
+ ****************************/
+
+/* Field Offsets */
+#define FB_ADCSR_ABCM 6
+#define FB_ADCSR_ABR 3
+#define FB_ADCSR_ABM 0
+
+/* Field Masks */
+#define FM_ADCSR_ABCM 0X3
+#define FM_ADCSR_ABR 0X3
+#define FM_ADCSR_ABM 0X7
+
+/* Field Values */
+#define FV_ADCSR_ABCM_AUTO 0x0
+#define FV_ADCSR_ABCM_32 0x1
+#define FV_ADCSR_ABCM_40 0x2
+#define FV_ADCSR_ABCM_64 0x3
+#define FV_ADCSR_ABR_32 0x0
+#define FV_ADCSR_ABR_44_1 0x1
+#define FV_ADCSR_ABR_48 0x2
+#define FV_ADCSR_ABM_PT25 0x0
+#define FV_ADCSR_ABM_PT5 0x1
+#define FV_ADCSR_ABM_1 0x2
+#define FV_ADCSR_ABM_2 0x3
+
+/* Register Masks */
+#define RM_ADCSR_ABCM RM(FM_ADCSR_ABCM, FB_ADCSR_ABCM)
+#define RM_ADCSR_ABR RM(FM_ADCSR_ABR, FB_ADCSR_ABR)
+#define RM_ADCSR_ABM RM(FM_ADCSR_ABM, FB_ADCSR_ABM)
+
+/* Register Values */
+#define RV_ADCSR_ABCM_AUTO \
+ RV(FV_ADCSR_ABCM_AUTO, FB_ADCSR_ABCM)
+
+#define RV_ADCSR_ABCM_32 \
+ RV(FV_ADCSR_ABCM_32, FB_ADCSR_ABCM)
+
+#define RV_ADCSR_ABCM_40 \
+ RV(FV_ADCSR_ABCM_40, FB_ADCSR_ABCM)
+
+#define RV_ADCSR_ABCM_64 \
+ RV(FV_ADCSR_ABCM_64, FB_ADCSR_ABCM)
+
+#define RV_ADCSR_ABR_32 RV(FV_ADCSR_ABR_32, FB_ADCSR_ABR)
+#define RV_ADCSR_ABR_44_1 \
+ RV(FV_ADCSR_ABR_44_1, FB_ADCSR_ABR)
+
+#define RV_ADCSR_ABR_48 RV(FV_ADCSR_ABR_48, FB_ADCSR_ABR)
+#define RV_ADCSR_ABR_ RV(FV_ADCSR_ABR_, FB_ADCSR_ABR)
+#define RV_ADCSR_ABM_PT25 \
+ RV(FV_ADCSR_ABM_PT25, FB_ADCSR_ABM)
+
+#define RV_ADCSR_ABM_PT5 RV(FV_ADCSR_ABM_PT5, FB_ADCSR_ABM)
+#define RV_ADCSR_ABM_1 RV(FV_ADCSR_ABM_1, FB_ADCSR_ABM)
+#define RV_ADCSR_ABM_2 RV(FV_ADCSR_ABM_2, FB_ADCSR_ABM)
+
+/******************************
+ * R_CNVRTR1 (0x18) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CNVRTR1_DACPOLR 7
+#define FB_CNVRTR1_DACPOLL 6
+#define FB_CNVRTR1_DMONOMIX 4
+#define FB_CNVRTR1_DACMU 3
+#define FB_CNVRTR1_DEEMPH 2
+#define FB_CNVRTR1_DACDITH 0
+
+/* Field Masks */
+#define FM_CNVRTR1_DACPOLR 0X1
+#define FM_CNVRTR1_DACPOLL 0X1
+#define FM_CNVRTR1_DMONOMIX 0X3
+#define FM_CNVRTR1_DACMU 0X1
+#define FM_CNVRTR1_DEEMPH 0X1
+#define FM_CNVRTR1_DACDITH 0X3
+
+/* Field Values */
+#define FV_CNVRTR1_DACPOLR_INVERT 0x1
+#define FV_CNVRTR1_DACPOLR_NORMAL 0x0
+#define FV_CNVRTR1_DACPOLL_INVERT 0x1
+#define FV_CNVRTR1_DACPOLL_NORMAL 0x0
+#define FV_CNVRTR1_DMONOMIX_ENABLE 0x1
+#define FV_CNVRTR1_DMONOMIX_DISABLE 0x0
+#define FV_CNVRTR1_DACMU_ENABLE 0x1
+#define FV_CNVRTR1_DACMU_DISABLE 0x0
+
+/* Register Masks */
+#define RM_CNVRTR1_DACPOLR \
+ RM(FM_CNVRTR1_DACPOLR, FB_CNVRTR1_DACPOLR)
+
+#define RM_CNVRTR1_DACPOLL \
+ RM(FM_CNVRTR1_DACPOLL, FB_CNVRTR1_DACPOLL)
+
+#define RM_CNVRTR1_DMONOMIX \
+ RM(FM_CNVRTR1_DMONOMIX, FB_CNVRTR1_DMONOMIX)
+
+#define RM_CNVRTR1_DACMU \
+ RM(FM_CNVRTR1_DACMU, FB_CNVRTR1_DACMU)
+
+#define RM_CNVRTR1_DEEMPH \
+ RM(FM_CNVRTR1_DEEMPH, FB_CNVRTR1_DEEMPH)
+
+#define RM_CNVRTR1_DACDITH \
+ RM(FM_CNVRTR1_DACDITH, FB_CNVRTR1_DACDITH)
+
+
+/* Register Values */
+#define RV_CNVRTR1_DACPOLR_INVERT \
+ RV(FV_CNVRTR1_DACPOLR_INVERT, FB_CNVRTR1_DACPOLR)
+
+#define RV_CNVRTR1_DACPOLR_NORMAL \
+ RV(FV_CNVRTR1_DACPOLR_NORMAL, FB_CNVRTR1_DACPOLR)
+
+#define RV_CNVRTR1_DACPOLL_INVERT \
+ RV(FV_CNVRTR1_DACPOLL_INVERT, FB_CNVRTR1_DACPOLL)
+
+#define RV_CNVRTR1_DACPOLL_NORMAL \
+ RV(FV_CNVRTR1_DACPOLL_NORMAL, FB_CNVRTR1_DACPOLL)
+
+#define RV_CNVRTR1_DMONOMIX_ENABLE \
+ RV(FV_CNVRTR1_DMONOMIX_ENABLE, FB_CNVRTR1_DMONOMIX)
+
+#define RV_CNVRTR1_DMONOMIX_DISABLE \
+ RV(FV_CNVRTR1_DMONOMIX_DISABLE, FB_CNVRTR1_DMONOMIX)
+
+#define RV_CNVRTR1_DACMU_ENABLE \
+ RV(FV_CNVRTR1_DACMU_ENABLE, FB_CNVRTR1_DACMU)
+
+#define RV_CNVRTR1_DACMU_DISABLE \
+ RV(FV_CNVRTR1_DACMU_DISABLE, FB_CNVRTR1_DACMU)
+
+
+/****************************
+ * R_DACSR (0x19) *
+ ****************************/
+
+/* Field Offsets */
+#define FB_DACSR_DBCM 6
+#define FB_DACSR_DBR 3
+#define FB_DACSR_DBM 0
+
+/* Field Masks */
+#define FM_DACSR_DBCM 0X3
+#define FM_DACSR_DBR 0X3
+#define FM_DACSR_DBM 0X7
+
+/* Field Values */
+#define FV_DACSR_DBCM_AUTO 0x0
+#define FV_DACSR_DBCM_32 0x1
+#define FV_DACSR_DBCM_40 0x2
+#define FV_DACSR_DBCM_64 0x3
+#define FV_DACSR_DBR_32 0x0
+#define FV_DACSR_DBR_44_1 0x1
+#define FV_DACSR_DBR_48 0x2
+#define FV_DACSR_DBM_PT25 0x0
+#define FV_DACSR_DBM_PT5 0x1
+#define FV_DACSR_DBM_1 0x2
+#define FV_DACSR_DBM_2 0x3
+
+/* Register Masks */
+#define RM_DACSR_DBCM RM(FM_DACSR_DBCM, FB_DACSR_DBCM)
+#define RM_DACSR_DBR RM(FM_DACSR_DBR, FB_DACSR_DBR)
+#define RM_DACSR_DBM RM(FM_DACSR_DBM, FB_DACSR_DBM)
+
+/* Register Values */
+#define RV_DACSR_DBCM_AUTO \
+ RV(FV_DACSR_DBCM_AUTO, FB_DACSR_DBCM)
+
+#define RV_DACSR_DBCM_32 \
+ RV(FV_DACSR_DBCM_32, FB_DACSR_DBCM)
+
+#define RV_DACSR_DBCM_40 \
+ RV(FV_DACSR_DBCM_40, FB_DACSR_DBCM)
+
+#define RV_DACSR_DBCM_64 \
+ RV(FV_DACSR_DBCM_64, FB_DACSR_DBCM)
+
+#define RV_DACSR_DBR_32 RV(FV_DACSR_DBR_32, FB_DACSR_DBR)
+#define RV_DACSR_DBR_44_1 \
+ RV(FV_DACSR_DBR_44_1, FB_DACSR_DBR)
+
+#define RV_DACSR_DBR_48 RV(FV_DACSR_DBR_48, FB_DACSR_DBR)
+#define RV_DACSR_DBM_PT25 \
+ RV(FV_DACSR_DBM_PT25, FB_DACSR_DBM)
+
+#define RV_DACSR_DBM_PT5 RV(FV_DACSR_DBM_PT5, FB_DACSR_DBM)
+#define RV_DACSR_DBM_1 RV(FV_DACSR_DBM_1, FB_DACSR_DBM)
+#define RV_DACSR_DBM_2 RV(FV_DACSR_DBM_2, FB_DACSR_DBM)
+
+/****************************
+ * R_PWRM1 (0x1A) *
+ ****************************/
+
+/* Field Offsets */
+#define FB_PWRM1_BSTL 7
+#define FB_PWRM1_BSTR 6
+#define FB_PWRM1_PGAL 5
+#define FB_PWRM1_PGAR 4
+#define FB_PWRM1_ADCL 3
+#define FB_PWRM1_ADCR 2
+#define FB_PWRM1_MICB 1
+#define FB_PWRM1_DIGENB 0
+
+/* Field Masks */
+#define FM_PWRM1_BSTL 0X1
+#define FM_PWRM1_BSTR 0X1
+#define FM_PWRM1_PGAL 0X1
+#define FM_PWRM1_PGAR 0X1
+#define FM_PWRM1_ADCL 0X1
+#define FM_PWRM1_ADCR 0X1
+#define FM_PWRM1_MICB 0X1
+#define FM_PWRM1_DIGENB 0X1
+
+/* Field Values */
+#define FV_PWRM1_BSTL_ENABLE 0x1
+#define FV_PWRM1_BSTL_DISABLE 0x0
+#define FV_PWRM1_BSTR_ENABLE 0x1
+#define FV_PWRM1_BSTR_DISABLE 0x0
+#define FV_PWRM1_PGAL_ENABLE 0x1
+#define FV_PWRM1_PGAL_DISABLE 0x0
+#define FV_PWRM1_PGAR_ENABLE 0x1
+#define FV_PWRM1_PGAR_DISABLE 0x0
+#define FV_PWRM1_ADCL_ENABLE 0x1
+#define FV_PWRM1_ADCL_DISABLE 0x0
+#define FV_PWRM1_ADCR_ENABLE 0x1
+#define FV_PWRM1_ADCR_DISABLE 0x0
+#define FV_PWRM1_MICB_ENABLE 0x1
+#define FV_PWRM1_MICB_DISABLE 0x0
+#define FV_PWRM1_DIGENB_DISABLE 0x1
+#define FV_PWRM1_DIGENB_ENABLE 0x0
+
+/* Register Masks */
+#define RM_PWRM1_BSTL RM(FM_PWRM1_BSTL, FB_PWRM1_BSTL)
+#define RM_PWRM1_BSTR RM(FM_PWRM1_BSTR, FB_PWRM1_BSTR)
+#define RM_PWRM1_PGAL RM(FM_PWRM1_PGAL, FB_PWRM1_PGAL)
+#define RM_PWRM1_PGAR RM(FM_PWRM1_PGAR, FB_PWRM1_PGAR)
+#define RM_PWRM1_ADCL RM(FM_PWRM1_ADCL, FB_PWRM1_ADCL)
+#define RM_PWRM1_ADCR RM(FM_PWRM1_ADCR, FB_PWRM1_ADCR)
+#define RM_PWRM1_MICB RM(FM_PWRM1_MICB, FB_PWRM1_MICB)
+#define RM_PWRM1_DIGENB \
+ RM(FM_PWRM1_DIGENB, FB_PWRM1_DIGENB)
+
+
+/* Register Values */
+#define RV_PWRM1_BSTL_ENABLE \
+ RV(FV_PWRM1_BSTL_ENABLE, FB_PWRM1_BSTL)
+
+#define RV_PWRM1_BSTL_DISABLE \
+ RV(FV_PWRM1_BSTL_DISABLE, FB_PWRM1_BSTL)
+
+#define RV_PWRM1_BSTR_ENABLE \
+ RV(FV_PWRM1_BSTR_ENABLE, FB_PWRM1_BSTR)
+
+#define RV_PWRM1_BSTR_DISABLE \
+ RV(FV_PWRM1_BSTR_DISABLE, FB_PWRM1_BSTR)
+
+#define RV_PWRM1_PGAL_ENABLE \
+ RV(FV_PWRM1_PGAL_ENABLE, FB_PWRM1_PGAL)
+
+#define RV_PWRM1_PGAL_DISABLE \
+ RV(FV_PWRM1_PGAL_DISABLE, FB_PWRM1_PGAL)
+
+#define RV_PWRM1_PGAR_ENABLE \
+ RV(FV_PWRM1_PGAR_ENABLE, FB_PWRM1_PGAR)
+
+#define RV_PWRM1_PGAR_DISABLE \
+ RV(FV_PWRM1_PGAR_DISABLE, FB_PWRM1_PGAR)
+
+#define RV_PWRM1_ADCL_ENABLE \
+ RV(FV_PWRM1_ADCL_ENABLE, FB_PWRM1_ADCL)
+
+#define RV_PWRM1_ADCL_DISABLE \
+ RV(FV_PWRM1_ADCL_DISABLE, FB_PWRM1_ADCL)
+
+#define RV_PWRM1_ADCR_ENABLE \
+ RV(FV_PWRM1_ADCR_ENABLE, FB_PWRM1_ADCR)
+
+#define RV_PWRM1_ADCR_DISABLE \
+ RV(FV_PWRM1_ADCR_DISABLE, FB_PWRM1_ADCR)
+
+#define RV_PWRM1_MICB_ENABLE \
+ RV(FV_PWRM1_MICB_ENABLE, FB_PWRM1_MICB)
+
+#define RV_PWRM1_MICB_DISABLE \
+ RV(FV_PWRM1_MICB_DISABLE, FB_PWRM1_MICB)
+
+#define RV_PWRM1_DIGENB_DISABLE \
+ RV(FV_PWRM1_DIGENB_DISABLE, FB_PWRM1_DIGENB)
+
+#define RV_PWRM1_DIGENB_ENABLE \
+ RV(FV_PWRM1_DIGENB_ENABLE, FB_PWRM1_DIGENB)
+
+
+/****************************
+ * R_PWRM2 (0x1B) *
+ ****************************/
+
+/* Field Offsets */
+#define FB_PWRM2_D2S 7
+#define FB_PWRM2_HPL 6
+#define FB_PWRM2_HPR 5
+#define FB_PWRM2_SPKL 4
+#define FB_PWRM2_SPKR 3
+#define FB_PWRM2_INSELL 2
+#define FB_PWRM2_INSELR 1
+#define FB_PWRM2_VREF 0
+
+/* Field Masks */
+#define FM_PWRM2_D2S 0X1
+#define FM_PWRM2_HPL 0X1
+#define FM_PWRM2_HPR 0X1
+#define FM_PWRM2_SPKL 0X1
+#define FM_PWRM2_SPKR 0X1
+#define FM_PWRM2_INSELL 0X1
+#define FM_PWRM2_INSELR 0X1
+#define FM_PWRM2_VREF 0X1
+
+/* Field Values */
+#define FV_PWRM2_D2S_ENABLE 0x1
+#define FV_PWRM2_D2S_DISABLE 0x0
+#define FV_PWRM2_HPL_ENABLE 0x1
+#define FV_PWRM2_HPL_DISABLE 0x0
+#define FV_PWRM2_HPR_ENABLE 0x1
+#define FV_PWRM2_HPR_DISABLE 0x0
+#define FV_PWRM2_SPKL_ENABLE 0x1
+#define FV_PWRM2_SPKL_DISABLE 0x0
+#define FV_PWRM2_SPKR_ENABLE 0x1
+#define FV_PWRM2_SPKR_DISABLE 0x0
+#define FV_PWRM2_INSELL_ENABLE 0x1
+#define FV_PWRM2_INSELL_DISABLE 0x0
+#define FV_PWRM2_INSELR_ENABLE 0x1
+#define FV_PWRM2_INSELR_DISABLE 0x0
+#define FV_PWRM2_VREF_ENABLE 0x1
+#define FV_PWRM2_VREF_DISABLE 0x0
+
+/* Register Masks */
+#define RM_PWRM2_D2S RM(FM_PWRM2_D2S, FB_PWRM2_D2S)
+#define RM_PWRM2_HPL RM(FM_PWRM2_HPL, FB_PWRM2_HPL)
+#define RM_PWRM2_HPR RM(FM_PWRM2_HPR, FB_PWRM2_HPR)
+#define RM_PWRM2_SPKL RM(FM_PWRM2_SPKL, FB_PWRM2_SPKL)
+#define RM_PWRM2_SPKR RM(FM_PWRM2_SPKR, FB_PWRM2_SPKR)
+#define RM_PWRM2_INSELL \
+ RM(FM_PWRM2_INSELL, FB_PWRM2_INSELL)
+
+#define RM_PWRM2_INSELR \
+ RM(FM_PWRM2_INSELR, FB_PWRM2_INSELR)
+
+#define RM_PWRM2_VREF RM(FM_PWRM2_VREF, FB_PWRM2_VREF)
+
+/* Register Values */
+#define RV_PWRM2_D2S_ENABLE \
+ RV(FV_PWRM2_D2S_ENABLE, FB_PWRM2_D2S)
+
+#define RV_PWRM2_D2S_DISABLE \
+ RV(FV_PWRM2_D2S_DISABLE, FB_PWRM2_D2S)
+
+#define RV_PWRM2_HPL_ENABLE \
+ RV(FV_PWRM2_HPL_ENABLE, FB_PWRM2_HPL)
+
+#define RV_PWRM2_HPL_DISABLE \
+ RV(FV_PWRM2_HPL_DISABLE, FB_PWRM2_HPL)
+
+#define RV_PWRM2_HPR_ENABLE \
+ RV(FV_PWRM2_HPR_ENABLE, FB_PWRM2_HPR)
+
+#define RV_PWRM2_HPR_DISABLE \
+ RV(FV_PWRM2_HPR_DISABLE, FB_PWRM2_HPR)
+
+#define RV_PWRM2_SPKL_ENABLE \
+ RV(FV_PWRM2_SPKL_ENABLE, FB_PWRM2_SPKL)
+
+#define RV_PWRM2_SPKL_DISABLE \
+ RV(FV_PWRM2_SPKL_DISABLE, FB_PWRM2_SPKL)
+
+#define RV_PWRM2_SPKR_ENABLE \
+ RV(FV_PWRM2_SPKR_ENABLE, FB_PWRM2_SPKR)
+
+#define RV_PWRM2_SPKR_DISABLE \
+ RV(FV_PWRM2_SPKR_DISABLE, FB_PWRM2_SPKR)
+
+#define RV_PWRM2_INSELL_ENABLE \
+ RV(FV_PWRM2_INSELL_ENABLE, FB_PWRM2_INSELL)
+
+#define RV_PWRM2_INSELL_DISABLE \
+ RV(FV_PWRM2_INSELL_DISABLE, FB_PWRM2_INSELL)
+
+#define RV_PWRM2_INSELR_ENABLE \
+ RV(FV_PWRM2_INSELR_ENABLE, FB_PWRM2_INSELR)
+
+#define RV_PWRM2_INSELR_DISABLE \
+ RV(FV_PWRM2_INSELR_DISABLE, FB_PWRM2_INSELR)
+
+#define RV_PWRM2_VREF_ENABLE \
+ RV(FV_PWRM2_VREF_ENABLE, FB_PWRM2_VREF)
+
+#define RV_PWRM2_VREF_DISABLE \
+ RV(FV_PWRM2_VREF_DISABLE, FB_PWRM2_VREF)
+
+
+/******************************
+ * R_CONFIG0 (0x1F) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CONFIG0_ASDM 6
+#define FB_CONFIG0_DSDM 4
+#define FB_CONFIG0_DC_BYPASS 1
+#define FB_CONFIG0_SD_FORCE_ON 0
+
+/* Field Masks */
+#define FM_CONFIG0_ASDM 0X3
+#define FM_CONFIG0_DSDM 0X3
+#define FM_CONFIG0_DC_BYPASS 0X1
+#define FM_CONFIG0_SD_FORCE_ON 0X1
+
+/* Field Values */
+#define FV_CONFIG0_ASDM_HALF 0x1
+#define FV_CONFIG0_ASDM_FULL 0x2
+#define FV_CONFIG0_ASDM_AUTO 0x3
+#define FV_CONFIG0_DSDM_HALF 0x1
+#define FV_CONFIG0_DSDM_FULL 0x2
+#define FV_CONFIG0_DSDM_AUTO 0x3
+#define FV_CONFIG0_DC_BYPASS_ENABLE 0x1
+#define FV_CONFIG0_DC_BYPASS_DISABLE 0x0
+#define FV_CONFIG0_SD_FORCE_ON_ENABLE 0x1
+#define FV_CONFIG0_SD_FORCE_ON_DISABLE 0x0
+
+/* Register Masks */
+#define RM_CONFIG0_ASDM \
+ RM(FM_CONFIG0_ASDM, FB_CONFIG0_ASDM)
+
+#define RM_CONFIG0_DSDM \
+ RM(FM_CONFIG0_DSDM, FB_CONFIG0_DSDM)
+
+#define RM_CONFIG0_DC_BYPASS \
+ RM(FM_CONFIG0_DC_BYPASS, FB_CONFIG0_DC_BYPASS)
+
+#define RM_CONFIG0_SD_FORCE_ON \
+ RM(FM_CONFIG0_SD_FORCE_ON, FB_CONFIG0_SD_FORCE_ON)
+
+
+/* Register Values */
+#define RV_CONFIG0_ASDM_HALF \
+ RV(FV_CONFIG0_ASDM_HALF, FB_CONFIG0_ASDM)
+
+#define RV_CONFIG0_ASDM_FULL \
+ RV(FV_CONFIG0_ASDM_FULL, FB_CONFIG0_ASDM)
+
+#define RV_CONFIG0_ASDM_AUTO \
+ RV(FV_CONFIG0_ASDM_AUTO, FB_CONFIG0_ASDM)
+
+#define RV_CONFIG0_DSDM_HALF \
+ RV(FV_CONFIG0_DSDM_HALF, FB_CONFIG0_DSDM)
+
+#define RV_CONFIG0_DSDM_FULL \
+ RV(FV_CONFIG0_DSDM_FULL, FB_CONFIG0_DSDM)
+
+#define RV_CONFIG0_DSDM_AUTO \
+ RV(FV_CONFIG0_DSDM_AUTO, FB_CONFIG0_DSDM)
+
+#define RV_CONFIG0_DC_BYPASS_ENABLE \
+ RV(FV_CONFIG0_DC_BYPASS_ENABLE, FB_CONFIG0_DC_BYPASS)
+
+#define RV_CONFIG0_DC_BYPASS_DISABLE \
+ RV(FV_CONFIG0_DC_BYPASS_DISABLE, FB_CONFIG0_DC_BYPASS)
+
+#define RV_CONFIG0_SD_FORCE_ON_ENABLE \
+ RV(FV_CONFIG0_SD_FORCE_ON_ENABLE, FB_CONFIG0_SD_FORCE_ON)
+
+#define RV_CONFIG0_SD_FORCE_ON_DISABLE \
+ RV(FV_CONFIG0_SD_FORCE_ON_DISABLE, FB_CONFIG0_SD_FORCE_ON)
+
+
+/******************************
+ * R_CONFIG1 (0x20) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CONFIG1_EQ2_EN 7
+#define FB_CONFIG1_EQ2_BE 4
+#define FB_CONFIG1_EQ1_EN 3
+#define FB_CONFIG1_EQ1_BE 0
+
+/* Field Masks */
+#define FM_CONFIG1_EQ2_EN 0X1
+#define FM_CONFIG1_EQ2_BE 0X7
+#define FM_CONFIG1_EQ1_EN 0X1
+#define FM_CONFIG1_EQ1_BE 0X7
+
+/* Field Values */
+#define FV_CONFIG1_EQ2_EN_ENABLE 0x1
+#define FV_CONFIG1_EQ2_EN_DISABLE 0x0
+#define FV_CONFIG1_EQ2_BE_PRE 0x0
+#define FV_CONFIG1_EQ2_BE_PRE_EQ_0 0x1
+#define FV_CONFIG1_EQ2_BE_PRE_EQ0_1 0x2
+#define FV_CONFIG1_EQ2_BE_PRE_EQ0_2 0x3
+#define FV_CONFIG1_EQ2_BE_PRE_EQ0_3 0x4
+#define FV_CONFIG1_EQ2_BE_PRE_EQ0_4 0x5
+#define FV_CONFIG1_EQ2_BE_PRE_EQ0_5 0x6
+#define FV_CONFIG1_EQ1_EN_ENABLE 0x1
+#define FV_CONFIG1_EQ1_EN_DISABLE 0x0
+#define FV_CONFIG1_EQ1_BE_PRE 0x0
+#define FV_CONFIG1_EQ1_BE_PRE_EQ_0 0x1
+#define FV_CONFIG1_EQ1_BE_PRE_EQ0_1 0x2
+#define FV_CONFIG1_EQ1_BE_PRE_EQ0_2 0x3
+#define FV_CONFIG1_EQ1_BE_PRE_EQ0_3 0x4
+#define FV_CONFIG1_EQ1_BE_PRE_EQ0_4 0x5
+#define FV_CONFIG1_EQ1_BE_PRE_EQ0_5 0x6
+
+/* Register Masks */
+#define RM_CONFIG1_EQ2_EN \
+ RM(FM_CONFIG1_EQ2_EN, FB_CONFIG1_EQ2_EN)
+
+#define RM_CONFIG1_EQ2_BE \
+ RM(FM_CONFIG1_EQ2_BE, FB_CONFIG1_EQ2_BE)
+
+#define RM_CONFIG1_EQ1_EN \
+ RM(FM_CONFIG1_EQ1_EN, FB_CONFIG1_EQ1_EN)
+
+#define RM_CONFIG1_EQ1_BE \
+ RM(FM_CONFIG1_EQ1_BE, FB_CONFIG1_EQ1_BE)
+
+
+/* Register Values */
+#define RV_CONFIG1_EQ2_EN_ENABLE \
+ RV(FV_CONFIG1_EQ2_EN_ENABLE, FB_CONFIG1_EQ2_EN)
+
+#define RV_CONFIG1_EQ2_EN_DISABLE \
+ RV(FV_CONFIG1_EQ2_EN_DISABLE, FB_CONFIG1_EQ2_EN)
+
+#define RV_CONFIG1_EQ2_BE_PRE \
+ RV(FV_CONFIG1_EQ2_BE_PRE, FB_CONFIG1_EQ2_BE)
+
+#define RV_CONFIG1_EQ2_BE_PRE_EQ_0 \
+ RV(FV_CONFIG1_EQ2_BE_PRE_EQ_0, FB_CONFIG1_EQ2_BE)
+
+#define RV_CONFIG1_EQ2_BE_PRE_EQ0_1 \
+ RV(FV_CONFIG1_EQ2_BE_PRE_EQ0_1, FB_CONFIG1_EQ2_BE)
+
+#define RV_CONFIG1_EQ2_BE_PRE_EQ0_2 \
+ RV(FV_CONFIG1_EQ2_BE_PRE_EQ0_2, FB_CONFIG1_EQ2_BE)
+
+#define RV_CONFIG1_EQ2_BE_PRE_EQ0_3 \
+ RV(FV_CONFIG1_EQ2_BE_PRE_EQ0_3, FB_CONFIG1_EQ2_BE)
+
+#define RV_CONFIG1_EQ2_BE_PRE_EQ0_4 \
+ RV(FV_CONFIG1_EQ2_BE_PRE_EQ0_4, FB_CONFIG1_EQ2_BE)
+
+#define RV_CONFIG1_EQ2_BE_PRE_EQ0_5 \
+ RV(FV_CONFIG1_EQ2_BE_PRE_EQ0_5, FB_CONFIG1_EQ2_BE)
+
+#define RV_CONFIG1_EQ1_EN_ENABLE \
+ RV(FV_CONFIG1_EQ1_EN_ENABLE, FB_CONFIG1_EQ1_EN)
+
+#define RV_CONFIG1_EQ1_EN_DISABLE \
+ RV(FV_CONFIG1_EQ1_EN_DISABLE, FB_CONFIG1_EQ1_EN)
+
+#define RV_CONFIG1_EQ1_BE_PRE \
+ RV(FV_CONFIG1_EQ1_BE_PRE, FB_CONFIG1_EQ1_BE)
+
+#define RV_CONFIG1_EQ1_BE_PRE_EQ_0 \
+ RV(FV_CONFIG1_EQ1_BE_PRE_EQ_0, FB_CONFIG1_EQ1_BE)
+
+#define RV_CONFIG1_EQ1_BE_PRE_EQ0_1 \
+ RV(FV_CONFIG1_EQ1_BE_PRE_EQ0_1, FB_CONFIG1_EQ1_BE)
+
+#define RV_CONFIG1_EQ1_BE_PRE_EQ0_2 \
+ RV(FV_CONFIG1_EQ1_BE_PRE_EQ0_2, FB_CONFIG1_EQ1_BE)
+
+#define RV_CONFIG1_EQ1_BE_PRE_EQ0_3 \
+ RV(FV_CONFIG1_EQ1_BE_PRE_EQ0_3, FB_CONFIG1_EQ1_BE)
+
+#define RV_CONFIG1_EQ1_BE_PRE_EQ0_4 \
+ RV(FV_CONFIG1_EQ1_BE_PRE_EQ0_4, FB_CONFIG1_EQ1_BE)
+
+#define RV_CONFIG1_EQ1_BE_PRE_EQ0_5 \
+ RV(FV_CONFIG1_EQ1_BE_PRE_EQ0_5, FB_CONFIG1_EQ1_BE)
+
+
+/******************************
+ * R_DMICCTL (0x24) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_DMICCTL_DMICEN 7
+#define FB_DMICCTL_DMONO 4
+#define FB_DMICCTL_DMPHADJ 2
+#define FB_DMICCTL_DMRATE 0
+
+/* Field Masks */
+#define FM_DMICCTL_DMICEN 0X1
+#define FM_DMICCTL_DMONO 0X1
+#define FM_DMICCTL_DMPHADJ 0X3
+#define FM_DMICCTL_DMRATE 0X3
+
+/* Field Values */
+#define FV_DMICCTL_DMICEN_ENABLE 0x1
+#define FV_DMICCTL_DMICEN_DISABLE 0x0
+#define FV_DMICCTL_DMONO_STEREO 0x0
+#define FV_DMICCTL_DMONO_MONO 0x1
+
+/* Register Masks */
+#define RM_DMICCTL_DMICEN \
+ RM(FM_DMICCTL_DMICEN, FB_DMICCTL_DMICEN)
+
+#define RM_DMICCTL_DMONO \
+ RM(FM_DMICCTL_DMONO, FB_DMICCTL_DMONO)
+
+#define RM_DMICCTL_DMPHADJ \
+ RM(FM_DMICCTL_DMPHADJ, FB_DMICCTL_DMPHADJ)
+
+#define RM_DMICCTL_DMRATE \
+ RM(FM_DMICCTL_DMRATE, FB_DMICCTL_DMRATE)
+
+
+/* Register Values */
+#define RV_DMICCTL_DMICEN_ENABLE \
+ RV(FV_DMICCTL_DMICEN_ENABLE, FB_DMICCTL_DMICEN)
+
+#define RV_DMICCTL_DMICEN_DISABLE \
+ RV(FV_DMICCTL_DMICEN_DISABLE, FB_DMICCTL_DMICEN)
+
+#define RV_DMICCTL_DMONO_STEREO \
+ RV(FV_DMICCTL_DMONO_STEREO, FB_DMICCTL_DMONO)
+
+#define RV_DMICCTL_DMONO_MONO \
+ RV(FV_DMICCTL_DMONO_MONO, FB_DMICCTL_DMONO)
+
+
+/*****************************
+ * R_CLECTL (0x25) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_CLECTL_LVL_MODE 4
+#define FB_CLECTL_WINDOWSEL 3
+#define FB_CLECTL_EXP_EN 2
+#define FB_CLECTL_LIMIT_EN 1
+#define FB_CLECTL_COMP_EN 0
+
+/* Field Masks */
+#define FM_CLECTL_LVL_MODE 0X1
+#define FM_CLECTL_WINDOWSEL 0X1
+#define FM_CLECTL_EXP_EN 0X1
+#define FM_CLECTL_LIMIT_EN 0X1
+#define FM_CLECTL_COMP_EN 0X1
+
+/* Field Values */
+#define FV_CLECTL_LVL_MODE_AVG 0x0
+#define FV_CLECTL_LVL_MODE_PEAK 0x1
+#define FV_CLECTL_WINDOWSEL_512 0x0
+#define FV_CLECTL_WINDOWSEL_64 0x1
+#define FV_CLECTL_EXP_EN_ENABLE 0x1
+#define FV_CLECTL_EXP_EN_DISABLE 0x0
+#define FV_CLECTL_LIMIT_EN_ENABLE 0x1
+#define FV_CLECTL_LIMIT_EN_DISABLE 0x0
+#define FV_CLECTL_COMP_EN_ENABLE 0x1
+#define FV_CLECTL_COMP_EN_DISABLE 0x0
+
+/* Register Masks */
+#define RM_CLECTL_LVL_MODE \
+ RM(FM_CLECTL_LVL_MODE, FB_CLECTL_LVL_MODE)
+
+#define RM_CLECTL_WINDOWSEL \
+ RM(FM_CLECTL_WINDOWSEL, FB_CLECTL_WINDOWSEL)
+
+#define RM_CLECTL_EXP_EN \
+ RM(FM_CLECTL_EXP_EN, FB_CLECTL_EXP_EN)
+
+#define RM_CLECTL_LIMIT_EN \
+ RM(FM_CLECTL_LIMIT_EN, FB_CLECTL_LIMIT_EN)
+
+#define RM_CLECTL_COMP_EN \
+ RM(FM_CLECTL_COMP_EN, FB_CLECTL_COMP_EN)
+
+
+/* Register Values */
+#define RV_CLECTL_LVL_MODE_AVG \
+ RV(FV_CLECTL_LVL_MODE_AVG, FB_CLECTL_LVL_MODE)
+
+#define RV_CLECTL_LVL_MODE_PEAK \
+ RV(FV_CLECTL_LVL_MODE_PEAK, FB_CLECTL_LVL_MODE)
+
+#define RV_CLECTL_WINDOWSEL_512 \
+ RV(FV_CLECTL_WINDOWSEL_512, FB_CLECTL_WINDOWSEL)
+
+#define RV_CLECTL_WINDOWSEL_64 \
+ RV(FV_CLECTL_WINDOWSEL_64, FB_CLECTL_WINDOWSEL)
+
+#define RV_CLECTL_EXP_EN_ENABLE \
+ RV(FV_CLECTL_EXP_EN_ENABLE, FB_CLECTL_EXP_EN)
+
+#define RV_CLECTL_EXP_EN_DISABLE \
+ RV(FV_CLECTL_EXP_EN_DISABLE, FB_CLECTL_EXP_EN)
+
+#define RV_CLECTL_LIMIT_EN_ENABLE \
+ RV(FV_CLECTL_LIMIT_EN_ENABLE, FB_CLECTL_LIMIT_EN)
+
+#define RV_CLECTL_LIMIT_EN_DISABLE \
+ RV(FV_CLECTL_LIMIT_EN_DISABLE, FB_CLECTL_LIMIT_EN)
+
+#define RV_CLECTL_COMP_EN_ENABLE \
+ RV(FV_CLECTL_COMP_EN_ENABLE, FB_CLECTL_COMP_EN)
+
+#define RV_CLECTL_COMP_EN_DISABLE \
+ RV(FV_CLECTL_COMP_EN_DISABLE, FB_CLECTL_COMP_EN)
+
+
+/*****************************
+ * R_MUGAIN (0x26) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_MUGAIN_CLEMUG 0
+
+/* Field Masks */
+#define FM_MUGAIN_CLEMUG 0X1F
+
+/* Field Values */
+#define FV_MUGAIN_CLEMUG_46PT5DB 0x1F
+#define FV_MUGAIN_CLEMUG_0DB 0x0
+
+/* Register Masks */
+#define RM_MUGAIN_CLEMUG \
+ RM(FM_MUGAIN_CLEMUG, FB_MUGAIN_CLEMUG)
+
+
+/* Register Values */
+#define RV_MUGAIN_CLEMUG_46PT5DB \
+ RV(FV_MUGAIN_CLEMUG_46PT5DB, FB_MUGAIN_CLEMUG)
+
+#define RV_MUGAIN_CLEMUG_0DB \
+ RV(FV_MUGAIN_CLEMUG_0DB, FB_MUGAIN_CLEMUG)
+
+
+/*****************************
+ * R_COMPTH (0x27) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_COMPTH 0
+
+/* Field Masks */
+#define FM_COMPTH 0XFF
+
+/* Field Values */
+#define FV_COMPTH_0DB 0xFF
+#define FV_COMPTH_N95PT625DB 0x0
+
+/* Register Masks */
+#define RM_COMPTH RM(FM_COMPTH, FB_COMPTH)
+
+/* Register Values */
+#define RV_COMPTH_0DB RV(FV_COMPTH_0DB, FB_COMPTH)
+#define RV_COMPTH_N95PT625DB \
+ RV(FV_COMPTH_N95PT625DB, FB_COMPTH)
+
+
+/*****************************
+ * R_CMPRAT (0x28) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_CMPRAT 0
+
+/* Field Masks */
+#define FM_CMPRAT 0X1F
+
+/* Register Masks */
+#define RM_CMPRAT RM(FM_CMPRAT, FB_CMPRAT)
+
+/******************************
+ * R_CATKTCL (0x29) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CATKTCL 0
+
+/* Field Masks */
+#define FM_CATKTCL 0XFF
+
+/* Register Masks */
+#define RM_CATKTCL RM(FM_CATKTCL, FB_CATKTCL)
+
+/******************************
+ * R_CATKTCH (0x2A) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CATKTCH 0
+
+/* Field Masks */
+#define FM_CATKTCH 0XFF
+
+/* Register Masks */
+#define RM_CATKTCH RM(FM_CATKTCH, FB_CATKTCH)
+
+/******************************
+ * R_CRELTCL (0x2B) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CRELTCL 0
+
+/* Field Masks */
+#define FM_CRELTCL 0XFF
+
+/* Register Masks */
+#define RM_CRELTCL RM(FM_CRELTCL, FB_CRELTCL)
+
+/******************************
+ * R_CRELTCH (0x2C) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_CRELTCH 0
+
+/* Field Masks */
+#define FM_CRELTCH 0XFF
+
+/* Register Masks */
+#define RM_CRELTCH RM(FM_CRELTCH, FB_CRELTCH)
+
+/****************************
+ * R_LIMTH (0x2D) *
+ ****************************/
+
+/* Field Offsets */
+#define FB_LIMTH 0
+
+/* Field Masks */
+#define FM_LIMTH 0XFF
+
+/* Field Values */
+#define FV_LIMTH_0DB 0xFF
+#define FV_LIMTH_N95PT625DB 0x0
+
+/* Register Masks */
+#define RM_LIMTH RM(FM_LIMTH, FB_LIMTH)
+
+/* Register Values */
+#define RV_LIMTH_0DB RV(FV_LIMTH_0DB, FB_LIMTH)
+#define RV_LIMTH_N95PT625DB RV(FV_LIMTH_N95PT625DB, FB_LIMTH)
+
+/*****************************
+ * R_LIMTGT (0x2E) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_LIMTGT 0
+
+/* Field Masks */
+#define FM_LIMTGT 0XFF
+
+/* Field Values */
+#define FV_LIMTGT_0DB 0xFF
+#define FV_LIMTGT_N95PT625DB 0x0
+
+/* Register Masks */
+#define RM_LIMTGT RM(FM_LIMTGT, FB_LIMTGT)
+
+/* Register Values */
+#define RV_LIMTGT_0DB RV(FV_LIMTGT_0DB, FB_LIMTGT)
+#define RV_LIMTGT_N95PT625DB \
+ RV(FV_LIMTGT_N95PT625DB, FB_LIMTGT)
+
+
+/******************************
+ * R_LATKTCL (0x2F) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_LATKTCL 0
+
+/* Field Masks */
+#define FM_LATKTCL 0XFF
+
+/* Register Masks */
+#define RM_LATKTCL RM(FM_LATKTCL, FB_LATKTCL)
+
+/******************************
+ * R_LATKTCH (0x30) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_LATKTCH 0
+
+/* Field Masks */
+#define FM_LATKTCH 0XFF
+
+/* Register Masks */
+#define RM_LATKTCH RM(FM_LATKTCH, FB_LATKTCH)
+
+/******************************
+ * R_LRELTCL (0x31) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_LRELTCL 0
+
+/* Field Masks */
+#define FM_LRELTCL 0XFF
+
+/* Register Masks */
+#define RM_LRELTCL RM(FM_LRELTCL, FB_LRELTCL)
+
+/******************************
+ * R_LRELTCH (0x32) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_LRELTCH 0
+
+/* Field Masks */
+#define FM_LRELTCH 0XFF
+
+/* Register Masks */
+#define RM_LRELTCH RM(FM_LRELTCH, FB_LRELTCH)
+
+/****************************
+ * R_EXPTH (0x33) *
+ ****************************/
+
+/* Field Offsets */
+#define FB_EXPTH 0
+
+/* Field Masks */
+#define FM_EXPTH 0XFF
+
+/* Field Values */
+#define FV_EXPTH_0DB 0xFF
+#define FV_EXPTH_N95PT625DB 0x0
+
+/* Register Masks */
+#define RM_EXPTH RM(FM_EXPTH, FB_EXPTH)
+
+/* Register Values */
+#define RV_EXPTH_0DB RV(FV_EXPTH_0DB, FB_EXPTH)
+#define RV_EXPTH_N95PT625DB RV(FV_EXPTH_N95PT625DB, FB_EXPTH)
+
+/*****************************
+ * R_EXPRAT (0x34) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_EXPRAT 0
+
+/* Field Masks */
+#define FM_EXPRAT 0X7
+
+/* Register Masks */
+#define RM_EXPRAT RM(FM_EXPRAT, FB_EXPRAT)
+
+/******************************
+ * R_XATKTCL (0x35) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_XATKTCL 0
+
+/* Field Masks */
+#define FM_XATKTCL 0XFF
+
+/* Register Masks */
+#define RM_XATKTCL RM(FM_XATKTCL, FB_XATKTCL)
+
+/******************************
+ * R_XATKTCH (0x36) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_XATKTCH 0
+
+/* Field Masks */
+#define FM_XATKTCH 0XFF
+
+/* Register Masks */
+#define RM_XATKTCH RM(FM_XATKTCH, FB_XATKTCH)
+
+/******************************
+ * R_XRELTCL (0x37) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_XRELTCL 0
+
+/* Field Masks */
+#define FM_XRELTCL 0XFF
+
+/* Register Masks */
+#define RM_XRELTCL RM(FM_XRELTCL, FB_XRELTCL)
+
+/******************************
+ * R_XRELTCH (0x38) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_XRELTCH 0
+
+/* Field Masks */
+#define FM_XRELTCH 0XFF
+
+/* Register Masks */
+#define RM_XRELTCH RM(FM_XRELTCH, FB_XRELTCH)
+
+/****************************
+ * R_FXCTL (0x39) *
+ ****************************/
+
+/* Field Offsets */
+#define FB_FXCTL_3DEN 4
+#define FB_FXCTL_TEEN 3
+#define FB_FXCTL_TNLFBYPASS 2
+#define FB_FXCTL_BEEN 1
+#define FB_FXCTL_BNLFBYPASS 0
+
+/* Field Masks */
+#define FM_FXCTL_3DEN 0X1
+#define FM_FXCTL_TEEN 0X1
+#define FM_FXCTL_TNLFBYPASS 0X1
+#define FM_FXCTL_BEEN 0X1
+#define FM_FXCTL_BNLFBYPASS 0X1
+
+/* Field Values */
+#define FV_FXCTL_3DEN_ENABLE 0x1
+#define FV_FXCTL_3DEN_DISABLE 0x0
+#define FV_FXCTL_TEEN_ENABLE 0x1
+#define FV_FXCTL_TEEN_DISABLE 0x0
+#define FV_FXCTL_TNLFBYPASS_ENABLE 0x1
+#define FV_FXCTL_TNLFBYPASS_DISABLE 0x0
+#define FV_FXCTL_BEEN_ENABLE 0x1
+#define FV_FXCTL_BEEN_DISABLE 0x0
+#define FV_FXCTL_BNLFBYPASS_ENABLE 0x1
+#define FV_FXCTL_BNLFBYPASS_DISABLE 0x0
+
+/* Register Masks */
+#define RM_FXCTL_3DEN RM(FM_FXCTL_3DEN, FB_FXCTL_3DEN)
+#define RM_FXCTL_TEEN RM(FM_FXCTL_TEEN, FB_FXCTL_TEEN)
+#define RM_FXCTL_TNLFBYPASS \
+ RM(FM_FXCTL_TNLFBYPASS, FB_FXCTL_TNLFBYPASS)
+
+#define RM_FXCTL_BEEN RM(FM_FXCTL_BEEN, FB_FXCTL_BEEN)
+#define RM_FXCTL_BNLFBYPASS \
+ RM(FM_FXCTL_BNLFBYPASS, FB_FXCTL_BNLFBYPASS)
+
+
+/* Register Values */
+#define RV_FXCTL_3DEN_ENABLE \
+ RV(FV_FXCTL_3DEN_ENABLE, FB_FXCTL_3DEN)
+
+#define RV_FXCTL_3DEN_DISABLE \
+ RV(FV_FXCTL_3DEN_DISABLE, FB_FXCTL_3DEN)
+
+#define RV_FXCTL_TEEN_ENABLE \
+ RV(FV_FXCTL_TEEN_ENABLE, FB_FXCTL_TEEN)
+
+#define RV_FXCTL_TEEN_DISABLE \
+ RV(FV_FXCTL_TEEN_DISABLE, FB_FXCTL_TEEN)
+
+#define RV_FXCTL_TNLFBYPASS_ENABLE \
+ RV(FV_FXCTL_TNLFBYPASS_ENABLE, FB_FXCTL_TNLFBYPASS)
+
+#define RV_FXCTL_TNLFBYPASS_DISABLE \
+ RV(FV_FXCTL_TNLFBYPASS_DISABLE, FB_FXCTL_TNLFBYPASS)
+
+#define RV_FXCTL_BEEN_ENABLE \
+ RV(FV_FXCTL_BEEN_ENABLE, FB_FXCTL_BEEN)
+
+#define RV_FXCTL_BEEN_DISABLE \
+ RV(FV_FXCTL_BEEN_DISABLE, FB_FXCTL_BEEN)
+
+#define RV_FXCTL_BNLFBYPASS_ENABLE \
+ RV(FV_FXCTL_BNLFBYPASS_ENABLE, FB_FXCTL_BNLFBYPASS)
+
+#define RV_FXCTL_BNLFBYPASS_DISABLE \
+ RV(FV_FXCTL_BNLFBYPASS_DISABLE, FB_FXCTL_BNLFBYPASS)
+
+
+/*******************************
+ * R_DACCRWRL (0x3A) *
+ *******************************/
+
+/* Field Offsets */
+#define FB_DACCRWRL_DACCRWDL 0
+
+/* Field Masks */
+#define FM_DACCRWRL_DACCRWDL 0XFF
+
+/* Register Masks */
+#define RM_DACCRWRL_DACCRWDL \
+ RM(FM_DACCRWRL_DACCRWDL, FB_DACCRWRL_DACCRWDL)
+
+
+/*******************************
+ * R_DACCRWRM (0x3B) *
+ *******************************/
+
+/* Field Offsets */
+#define FB_DACCRWRM_DACCRWDM 0
+
+/* Field Masks */
+#define FM_DACCRWRM_DACCRWDM 0XFF
+
+/* Register Masks */
+#define RM_DACCRWRM_DACCRWDM \
+ RM(FM_DACCRWRM_DACCRWDM, FB_DACCRWRM_DACCRWDM)
+
+
+/*******************************
+ * R_DACCRWRH (0x3C) *
+ *******************************/
+
+/* Field Offsets */
+#define FB_DACCRWRH_DACCRWDH 0
+
+/* Field Masks */
+#define FM_DACCRWRH_DACCRWDH 0XFF
+
+/* Register Masks */
+#define RM_DACCRWRH_DACCRWDH \
+ RM(FM_DACCRWRH_DACCRWDH, FB_DACCRWRH_DACCRWDH)
+
+
+/*******************************
+ * R_DACCRRDL (0x3D) *
+ *******************************/
+
+/* Field Offsets */
+#define FB_DACCRRDL 0
+
+/* Field Masks */
+#define FM_DACCRRDL 0XFF
+
+/* Register Masks */
+#define RM_DACCRRDL RM(FM_DACCRRDL, FB_DACCRRDL)
+
+/*******************************
+ * R_DACCRRDM (0x3E) *
+ *******************************/
+
+/* Field Offsets */
+#define FB_DACCRRDM 0
+
+/* Field Masks */
+#define FM_DACCRRDM 0XFF
+
+/* Register Masks */
+#define RM_DACCRRDM RM(FM_DACCRRDM, FB_DACCRRDM)
+
+/*******************************
+ * R_DACCRRDH (0x3F) *
+ *******************************/
+
+/* Field Offsets */
+#define FB_DACCRRDH 0
+
+/* Field Masks */
+#define FM_DACCRRDH 0XFF
+
+/* Register Masks */
+#define RM_DACCRRDH RM(FM_DACCRRDH, FB_DACCRRDH)
+
+/********************************
+ * R_DACCRADDR (0x40) *
+ ********************************/
+
+/* Field Offsets */
+#define FB_DACCRADDR_DACCRADD 0
+
+/* Field Masks */
+#define FM_DACCRADDR_DACCRADD 0XFF
+
+/* Register Masks */
+#define RM_DACCRADDR_DACCRADD \
+ RM(FM_DACCRADDR_DACCRADD, FB_DACCRADDR_DACCRADD)
+
+
+/******************************
+ * R_DCOFSEL (0x41) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_DCOFSEL_DC_COEF_SEL 0
+
+/* Field Masks */
+#define FM_DCOFSEL_DC_COEF_SEL 0X7
+
+/* Field Values */
+#define FV_DCOFSEL_DC_COEF_SEL_2_N8 0x0
+#define FV_DCOFSEL_DC_COEF_SEL_2_N9 0x1
+#define FV_DCOFSEL_DC_COEF_SEL_2_N10 0x2
+#define FV_DCOFSEL_DC_COEF_SEL_2_N11 0x3
+#define FV_DCOFSEL_DC_COEF_SEL_2_N12 0x4
+#define FV_DCOFSEL_DC_COEF_SEL_2_N13 0x5
+#define FV_DCOFSEL_DC_COEF_SEL_2_N14 0x6
+#define FV_DCOFSEL_DC_COEF_SEL_2_N15 0x7
+
+/* Register Masks */
+#define RM_DCOFSEL_DC_COEF_SEL \
+ RM(FM_DCOFSEL_DC_COEF_SEL, FB_DCOFSEL_DC_COEF_SEL)
+
+
+/* Register Values */
+#define RV_DCOFSEL_DC_COEF_SEL_2_N8 \
+ RV(FV_DCOFSEL_DC_COEF_SEL_2_N8, FB_DCOFSEL_DC_COEF_SEL)
+
+#define RV_DCOFSEL_DC_COEF_SEL_2_N9 \
+ RV(FV_DCOFSEL_DC_COEF_SEL_2_N9, FB_DCOFSEL_DC_COEF_SEL)
+
+#define RV_DCOFSEL_DC_COEF_SEL_2_N10 \
+ RV(FV_DCOFSEL_DC_COEF_SEL_2_N10, FB_DCOFSEL_DC_COEF_SEL)
+
+#define RV_DCOFSEL_DC_COEF_SEL_2_N11 \
+ RV(FV_DCOFSEL_DC_COEF_SEL_2_N11, FB_DCOFSEL_DC_COEF_SEL)
+
+#define RV_DCOFSEL_DC_COEF_SEL_2_N12 \
+ RV(FV_DCOFSEL_DC_COEF_SEL_2_N12, FB_DCOFSEL_DC_COEF_SEL)
+
+#define RV_DCOFSEL_DC_COEF_SEL_2_N13 \
+ RV(FV_DCOFSEL_DC_COEF_SEL_2_N13, FB_DCOFSEL_DC_COEF_SEL)
+
+#define RV_DCOFSEL_DC_COEF_SEL_2_N14 \
+ RV(FV_DCOFSEL_DC_COEF_SEL_2_N14, FB_DCOFSEL_DC_COEF_SEL)
+
+#define RV_DCOFSEL_DC_COEF_SEL_2_N15 \
+ RV(FV_DCOFSEL_DC_COEF_SEL_2_N15, FB_DCOFSEL_DC_COEF_SEL)
+
+
+/******************************
+ * R_PLLCTL9 (0x4E) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTL9_REFDIV_PLL1 0
+
+/* Field Masks */
+#define FM_PLLCTL9_REFDIV_PLL1 0XFF
+
+/* Register Masks */
+#define RM_PLLCTL9_REFDIV_PLL1 \
+ RM(FM_PLLCTL9_REFDIV_PLL1, FB_PLLCTL9_REFDIV_PLL1)
+
+
+/******************************
+ * R_PLLCTLA (0x4F) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTLA_OUTDIV_PLL1 0
+
+/* Field Masks */
+#define FM_PLLCTLA_OUTDIV_PLL1 0XFF
+
+/* Register Masks */
+#define RM_PLLCTLA_OUTDIV_PLL1 \
+ RM(FM_PLLCTLA_OUTDIV_PLL1, FB_PLLCTLA_OUTDIV_PLL1)
+
+
+/******************************
+ * R_PLLCTLB (0x50) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTLB_FBDIV_PLL1L 0
+
+/* Field Masks */
+#define FM_PLLCTLB_FBDIV_PLL1L 0XFF
+
+/* Register Masks */
+#define RM_PLLCTLB_FBDIV_PLL1L \
+ RM(FM_PLLCTLB_FBDIV_PLL1L, FB_PLLCTLB_FBDIV_PLL1L)
+
+
+/******************************
+ * R_PLLCTLC (0x51) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTLC_FBDIV_PLL1H 0
+
+/* Field Masks */
+#define FM_PLLCTLC_FBDIV_PLL1H 0X7
+
+/* Register Masks */
+#define RM_PLLCTLC_FBDIV_PLL1H \
+ RM(FM_PLLCTLC_FBDIV_PLL1H, FB_PLLCTLC_FBDIV_PLL1H)
+
+
+/******************************
+ * R_PLLCTLD (0x52) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTLD_RZ_PLL1 3
+#define FB_PLLCTLD_CP_PLL1 0
+
+/* Field Masks */
+#define FM_PLLCTLD_RZ_PLL1 0X7
+#define FM_PLLCTLD_CP_PLL1 0X7
+
+/* Register Masks */
+#define RM_PLLCTLD_RZ_PLL1 \
+ RM(FM_PLLCTLD_RZ_PLL1, FB_PLLCTLD_RZ_PLL1)
+
+#define RM_PLLCTLD_CP_PLL1 \
+ RM(FM_PLLCTLD_CP_PLL1, FB_PLLCTLD_CP_PLL1)
+
+
+/******************************
+ * R_PLLCTLE (0x53) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTLE_REFDIV_PLL2 0
+
+/* Field Masks */
+#define FM_PLLCTLE_REFDIV_PLL2 0XFF
+
+/* Register Masks */
+#define RM_PLLCTLE_REFDIV_PLL2 \
+ RM(FM_PLLCTLE_REFDIV_PLL2, FB_PLLCTLE_REFDIV_PLL2)
+
+
+/******************************
+ * R_PLLCTLF (0x54) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTLF_OUTDIV_PLL2 0
+
+/* Field Masks */
+#define FM_PLLCTLF_OUTDIV_PLL2 0XFF
+
+/* Register Masks */
+#define RM_PLLCTLF_OUTDIV_PLL2 \
+ RM(FM_PLLCTLF_OUTDIV_PLL2, FB_PLLCTLF_OUTDIV_PLL2)
+
+
+/*******************************
+ * R_PLLCTL10 (0x55) *
+ *******************************/
+
+/* Field Offsets */
+#define FB_PLLCTL10_FBDIV_PLL2L 0
+
+/* Field Masks */
+#define FM_PLLCTL10_FBDIV_PLL2L 0XFF
+
+/* Register Masks */
+#define RM_PLLCTL10_FBDIV_PLL2L \
+ RM(FM_PLLCTL10_FBDIV_PLL2L, FB_PLLCTL10_FBDIV_PLL2L)
+
+
+/*******************************
+ * R_PLLCTL11 (0x56) *
+ *******************************/
+
+/* Field Offsets */
+#define FB_PLLCTL11_FBDIV_PLL2H 0
+
+/* Field Masks */
+#define FM_PLLCTL11_FBDIV_PLL2H 0X7
+
+/* Register Masks */
+#define RM_PLLCTL11_FBDIV_PLL2H \
+ RM(FM_PLLCTL11_FBDIV_PLL2H, FB_PLLCTL11_FBDIV_PLL2H)
+
+
+/*******************************
+ * R_PLLCTL12 (0x57) *
+ *******************************/
+
+/* Field Offsets */
+#define FB_PLLCTL12_RZ_PLL2 3
+#define FB_PLLCTL12_CP_PLL2 0
+
+/* Field Masks */
+#define FM_PLLCTL12_RZ_PLL2 0X7
+#define FM_PLLCTL12_CP_PLL2 0X7
+
+/* Register Masks */
+#define RM_PLLCTL12_RZ_PLL2 \
+ RM(FM_PLLCTL12_RZ_PLL2, FB_PLLCTL12_RZ_PLL2)
+
+#define RM_PLLCTL12_CP_PLL2 \
+ RM(FM_PLLCTL12_CP_PLL2, FB_PLLCTL12_CP_PLL2)
+
+
+/*******************************
+ * R_PLLCTL1B (0x60) *
+ *******************************/
+
+/* Field Offsets */
+#define FB_PLLCTL1B_VCOI_PLL2 4
+#define FB_PLLCTL1B_VCOI_PLL1 2
+
+/* Field Masks */
+#define FM_PLLCTL1B_VCOI_PLL2 0X3
+#define FM_PLLCTL1B_VCOI_PLL1 0X3
+
+/* Register Masks */
+#define RM_PLLCTL1B_VCOI_PLL2 \
+ RM(FM_PLLCTL1B_VCOI_PLL2, FB_PLLCTL1B_VCOI_PLL2)
+
+#define RM_PLLCTL1B_VCOI_PLL1 \
+ RM(FM_PLLCTL1B_VCOI_PLL1, FB_PLLCTL1B_VCOI_PLL1)
+
+
+/*******************************
+ * R_PLLCTL1C (0x61) *
+ *******************************/
+
+/* Field Offsets */
+#define FB_PLLCTL1C_PDB_PLL2 2
+#define FB_PLLCTL1C_PDB_PLL1 1
+
+/* Field Masks */
+#define FM_PLLCTL1C_PDB_PLL2 0X1
+#define FM_PLLCTL1C_PDB_PLL1 0X1
+
+/* Field Values */
+#define FV_PLLCTL1C_PDB_PLL2_ENABLE 0x1
+#define FV_PLLCTL1C_PDB_PLL2_DISABLE 0x0
+#define FV_PLLCTL1C_PDB_PLL1_ENABLE 0x1
+#define FV_PLLCTL1C_PDB_PLL1_DISABLE 0x0
+
+/* Register Masks */
+#define RM_PLLCTL1C_PDB_PLL2 \
+ RM(FM_PLLCTL1C_PDB_PLL2, FB_PLLCTL1C_PDB_PLL2)
+
+#define RM_PLLCTL1C_PDB_PLL1 \
+ RM(FM_PLLCTL1C_PDB_PLL1, FB_PLLCTL1C_PDB_PLL1)
+
+
+/* Register Values */
+#define RV_PLLCTL1C_PDB_PLL2_ENABLE \
+ RV(FV_PLLCTL1C_PDB_PLL2_ENABLE, FB_PLLCTL1C_PDB_PLL2)
+
+#define RV_PLLCTL1C_PDB_PLL2_DISABLE \
+ RV(FV_PLLCTL1C_PDB_PLL2_DISABLE, FB_PLLCTL1C_PDB_PLL2)
+
+#define RV_PLLCTL1C_PDB_PLL1_ENABLE \
+ RV(FV_PLLCTL1C_PDB_PLL1_ENABLE, FB_PLLCTL1C_PDB_PLL1)
+
+#define RV_PLLCTL1C_PDB_PLL1_DISABLE \
+ RV(FV_PLLCTL1C_PDB_PLL1_DISABLE, FB_PLLCTL1C_PDB_PLL1)
+
+
+/*******************************
+ * R_TIMEBASE (0x77) *
+ *******************************/
+
+/* Field Offsets */
+#define FB_TIMEBASE_DIVIDER 0
+
+/* Field Masks */
+#define FM_TIMEBASE_DIVIDER 0XFF
+
+/* Register Masks */
+#define RM_TIMEBASE_DIVIDER \
+ RM(FM_TIMEBASE_DIVIDER, FB_TIMEBASE_DIVIDER)
+
+
+/*****************************
+ * R_DEVIDL (0x7D) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_DEVIDL_DIDL 0
+
+/* Field Masks */
+#define FM_DEVIDL_DIDL 0XFF
+
+/* Register Masks */
+#define RM_DEVIDL_DIDL RM(FM_DEVIDL_DIDL, FB_DEVIDL_DIDL)
+
+/*****************************
+ * R_DEVIDH (0x7E) *
+ *****************************/
+
+/* Field Offsets */
+#define FB_DEVIDH_DIDH 0
+
+/* Field Masks */
+#define FM_DEVIDH_DIDH 0XFF
+
+/* Register Masks */
+#define RM_DEVIDH_DIDH RM(FM_DEVIDH_DIDH, FB_DEVIDH_DIDH)
+
+/****************************
+ * R_RESET (0x80) *
+ ****************************/
+
+/* Field Offsets */
+#define FB_RESET 0
+
+/* Field Masks */
+#define FM_RESET 0XFF
+
+/* Field Values */
+#define FV_RESET_ENABLE 0x85
+
+/* Register Masks */
+#define RM_RESET RM(FM_RESET, FB_RESET)
+
+/* Register Values */
+#define RV_RESET_ENABLE RV(FV_RESET_ENABLE, FB_RESET)
+
+/********************************
+ * R_DACCRSTAT (0x8A) *
+ ********************************/
+
+/* Field Offsets */
+#define FB_DACCRSTAT_DACCR_BUSY 7
+
+/* Field Masks */
+#define FM_DACCRSTAT_DACCR_BUSY 0X1
+
+/* Register Masks */
+#define RM_DACCRSTAT_DACCR_BUSY \
+ RM(FM_DACCRSTAT_DACCR_BUSY, FB_DACCRSTAT_DACCR_BUSY)
+
+
+/******************************
+ * R_PLLCTL0 (0x8E) *
+ ******************************/
+
+/* Field Offsets */
+#define FB_PLLCTL0_PLL2_LOCK 1
+#define FB_PLLCTL0_PLL1_LOCK 0
+
+/* Field Masks */
+#define FM_PLLCTL0_PLL2_LOCK 0X1
+#define FM_PLLCTL0_PLL1_LOCK 0X1
+
+/* Register Masks */
+#define RM_PLLCTL0_PLL2_LOCK \
+ RM(FM_PLLCTL0_PLL2_LOCK, FB_PLLCTL0_PLL2_LOCK)
+
+#define RM_PLLCTL0_PLL1_LOCK \
+ RM(FM_PLLCTL0_PLL1_LOCK, FB_PLLCTL0_PLL1_LOCK)
+
+
+/********************************
+ * R_PLLREFSEL (0x8F) *
+ ********************************/
+
+/* Field Offsets */
+#define FB_PLLREFSEL_PLL2_REF_SEL 4
+#define FB_PLLREFSEL_PLL1_REF_SEL 0
+
+/* Field Masks */
+#define FM_PLLREFSEL_PLL2_REF_SEL 0X7
+#define FM_PLLREFSEL_PLL1_REF_SEL 0X7
+
+/* Field Values */
+#define FV_PLLREFSEL_PLL2_REF_SEL_XTAL_MCLK1 0x0
+#define FV_PLLREFSEL_PLL2_REF_SEL_MCLK2 0x1
+#define FV_PLLREFSEL_PLL1_REF_SEL_XTAL_MCLK1 0x0
+#define FV_PLLREFSEL_PLL1_REF_SEL_MCLK2 0x1
+
+/* Register Masks */
+#define RM_PLLREFSEL_PLL2_REF_SEL \
+ RM(FM_PLLREFSEL_PLL2_REF_SEL, FB_PLLREFSEL_PLL2_REF_SEL)
+
+#define RM_PLLREFSEL_PLL1_REF_SEL \
+ RM(FM_PLLREFSEL_PLL1_REF_SEL, FB_PLLREFSEL_PLL1_REF_SEL)
+
+
+/* Register Values */
+#define RV_PLLREFSEL_PLL2_REF_SEL_XTAL_MCLK1 \
+ RV(FV_PLLREFSEL_PLL2_REF_SEL_XTAL_MCLK1, FB_PLLREFSEL_PLL2_REF_SEL)
+
+#define RV_PLLREFSEL_PLL2_REF_SEL_MCLK2 \
+ RV(FV_PLLREFSEL_PLL2_REF_SEL_MCLK2, FB_PLLREFSEL_PLL2_REF_SEL)
+
+#define RV_PLLREFSEL_PLL1_REF_SEL_XTAL_MCLK1 \
+ RV(FV_PLLREFSEL_PLL1_REF_SEL_XTAL_MCLK1, FB_PLLREFSEL_PLL1_REF_SEL)
+
+#define RV_PLLREFSEL_PLL1_REF_SEL_MCLK2 \
+ RV(FV_PLLREFSEL_PLL1_REF_SEL_MCLK2, FB_PLLREFSEL_PLL1_REF_SEL)
+
+
+/*******************************
+ * R_DACMBCEN (0xC7) *
+ *******************************/
+
+/* Field Offsets */
+#define FB_DACMBCEN_MBCEN3 2
+#define FB_DACMBCEN_MBCEN2 1
+#define FB_DACMBCEN_MBCEN1 0
+
+/* Field Masks */
+#define FM_DACMBCEN_MBCEN3 0X1
+#define FM_DACMBCEN_MBCEN2 0X1
+#define FM_DACMBCEN_MBCEN1 0X1
+
+/* Register Masks */
+#define RM_DACMBCEN_MBCEN3 \
+ RM(FM_DACMBCEN_MBCEN3, FB_DACMBCEN_MBCEN3)
+
+#define RM_DACMBCEN_MBCEN2 \
+ RM(FM_DACMBCEN_MBCEN2, FB_DACMBCEN_MBCEN2)
+
+#define RM_DACMBCEN_MBCEN1 \
+ RM(FM_DACMBCEN_MBCEN1, FB_DACMBCEN_MBCEN1)
+
+
+/********************************
+ * R_DACMBCCTL (0xC8) *
+ ********************************/
+
+/* Field Offsets */
+#define FB_DACMBCCTL_LVLMODE3 5
+#define FB_DACMBCCTL_WINSEL3 4
+#define FB_DACMBCCTL_LVLMODE2 3
+#define FB_DACMBCCTL_WINSEL2 2
+#define FB_DACMBCCTL_LVLMODE1 1
+#define FB_DACMBCCTL_WINSEL1 0
+
+/* Field Masks */
+#define FM_DACMBCCTL_LVLMODE3 0X1
+#define FM_DACMBCCTL_WINSEL3 0X1
+#define FM_DACMBCCTL_LVLMODE2 0X1
+#define FM_DACMBCCTL_WINSEL2 0X1
+#define FM_DACMBCCTL_LVLMODE1 0X1
+#define FM_DACMBCCTL_WINSEL1 0X1
+
+/* Register Masks */
+#define RM_DACMBCCTL_LVLMODE3 \
+ RM(FM_DACMBCCTL_LVLMODE3, FB_DACMBCCTL_LVLMODE3)
+
+#define RM_DACMBCCTL_WINSEL3 \
+ RM(FM_DACMBCCTL_WINSEL3, FB_DACMBCCTL_WINSEL3)
+
+#define RM_DACMBCCTL_LVLMODE2 \
+ RM(FM_DACMBCCTL_LVLMODE2, FB_DACMBCCTL_LVLMODE2)
+
+#define RM_DACMBCCTL_WINSEL2 \
+ RM(FM_DACMBCCTL_WINSEL2, FB_DACMBCCTL_WINSEL2)
+
+#define RM_DACMBCCTL_LVLMODE1 \
+ RM(FM_DACMBCCTL_LVLMODE1, FB_DACMBCCTL_LVLMODE1)
+
+#define RM_DACMBCCTL_WINSEL1 \
+ RM(FM_DACMBCCTL_WINSEL1, FB_DACMBCCTL_WINSEL1)
+
+
+/*********************************
+ * R_DACMBCMUG1 (0xC9) *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCMUG1_PHASE 5
+#define FB_DACMBCMUG1_MUGAIN 0
+
+/* Field Masks */
+#define FM_DACMBCMUG1_PHASE 0X1
+#define FM_DACMBCMUG1_MUGAIN 0X1F
+
+/* Register Masks */
+#define RM_DACMBCMUG1_PHASE \
+ RM(FM_DACMBCMUG1_PHASE, FB_DACMBCMUG1_PHASE)
+
+#define RM_DACMBCMUG1_MUGAIN \
+ RM(FM_DACMBCMUG1_MUGAIN, FB_DACMBCMUG1_MUGAIN)
+
+
+/*********************************
+ * R_DACMBCTHR1 (0xCA) *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCTHR1_THRESH 0
+
+/* Field Masks */
+#define FM_DACMBCTHR1_THRESH 0XFF
+
+/* Register Masks */
+#define RM_DACMBCTHR1_THRESH \
+ RM(FM_DACMBCTHR1_THRESH, FB_DACMBCTHR1_THRESH)
+
+
+/*********************************
+ * R_DACMBCRAT1 (0xCB) *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCRAT1_RATIO 0
+
+/* Field Masks */
+#define FM_DACMBCRAT1_RATIO 0X1F
+
+/* Register Masks */
+#define RM_DACMBCRAT1_RATIO \
+ RM(FM_DACMBCRAT1_RATIO, FB_DACMBCRAT1_RATIO)
+
+
+/**********************************
+ * R_DACMBCATK1L (0xCC) *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCATK1L_TCATKL 0
+
+/* Field Masks */
+#define FM_DACMBCATK1L_TCATKL 0XFF
+
+/* Register Masks */
+#define RM_DACMBCATK1L_TCATKL \
+ RM(FM_DACMBCATK1L_TCATKL, FB_DACMBCATK1L_TCATKL)
+
+
+/**********************************
+ * R_DACMBCATK1H (0xCD) *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCATK1H_TCATKH 0
+
+/* Field Masks */
+#define FM_DACMBCATK1H_TCATKH 0XFF
+
+/* Register Masks */
+#define RM_DACMBCATK1H_TCATKH \
+ RM(FM_DACMBCATK1H_TCATKH, FB_DACMBCATK1H_TCATKH)
+
+
+/**********************************
+ * R_DACMBCREL1L (0xCE) *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCREL1L_TCRELL 0
+
+/* Field Masks */
+#define FM_DACMBCREL1L_TCRELL 0XFF
+
+/* Register Masks */
+#define RM_DACMBCREL1L_TCRELL \
+ RM(FM_DACMBCREL1L_TCRELL, FB_DACMBCREL1L_TCRELL)
+
+
+/**********************************
+ * R_DACMBCREL1H (0xCF) *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCREL1H_TCRELH 0
+
+/* Field Masks */
+#define FM_DACMBCREL1H_TCRELH 0XFF
+
+/* Register Masks */
+#define RM_DACMBCREL1H_TCRELH \
+ RM(FM_DACMBCREL1H_TCRELH, FB_DACMBCREL1H_TCRELH)
+
+
+/*********************************
+ * R_DACMBCMUG2 (0xD0) *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCMUG2_PHASE 5
+#define FB_DACMBCMUG2_MUGAIN 0
+
+/* Field Masks */
+#define FM_DACMBCMUG2_PHASE 0X1
+#define FM_DACMBCMUG2_MUGAIN 0X1F
+
+/* Register Masks */
+#define RM_DACMBCMUG2_PHASE \
+ RM(FM_DACMBCMUG2_PHASE, FB_DACMBCMUG2_PHASE)
+
+#define RM_DACMBCMUG2_MUGAIN \
+ RM(FM_DACMBCMUG2_MUGAIN, FB_DACMBCMUG2_MUGAIN)
+
+
+/*********************************
+ * R_DACMBCTHR2 (0xD1) *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCTHR2_THRESH 0
+
+/* Field Masks */
+#define FM_DACMBCTHR2_THRESH 0XFF
+
+/* Register Masks */
+#define RM_DACMBCTHR2_THRESH \
+ RM(FM_DACMBCTHR2_THRESH, FB_DACMBCTHR2_THRESH)
+
+
+/*********************************
+ * R_DACMBCRAT2 (0xD2) *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCRAT2_RATIO 0
+
+/* Field Masks */
+#define FM_DACMBCRAT2_RATIO 0X1F
+
+/* Register Masks */
+#define RM_DACMBCRAT2_RATIO \
+ RM(FM_DACMBCRAT2_RATIO, FB_DACMBCRAT2_RATIO)
+
+
+/**********************************
+ * R_DACMBCATK2L (0xD3) *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCATK2L_TCATKL 0
+
+/* Field Masks */
+#define FM_DACMBCATK2L_TCATKL 0XFF
+
+/* Register Masks */
+#define RM_DACMBCATK2L_TCATKL \
+ RM(FM_DACMBCATK2L_TCATKL, FB_DACMBCATK2L_TCATKL)
+
+
+/**********************************
+ * R_DACMBCATK2H (0xD4) *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCATK2H_TCATKH 0
+
+/* Field Masks */
+#define FM_DACMBCATK2H_TCATKH 0XFF
+
+/* Register Masks */
+#define RM_DACMBCATK2H_TCATKH \
+ RM(FM_DACMBCATK2H_TCATKH, FB_DACMBCATK2H_TCATKH)
+
+
+/**********************************
+ * R_DACMBCREL2L (0xD5) *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCREL2L_TCRELL 0
+
+/* Field Masks */
+#define FM_DACMBCREL2L_TCRELL 0XFF
+
+/* Register Masks */
+#define RM_DACMBCREL2L_TCRELL \
+ RM(FM_DACMBCREL2L_TCRELL, FB_DACMBCREL2L_TCRELL)
+
+
+/**********************************
+ * R_DACMBCREL2H (0xD6) *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCREL2H_TCRELH 0
+
+/* Field Masks */
+#define FM_DACMBCREL2H_TCRELH 0XFF
+
+/* Register Masks */
+#define RM_DACMBCREL2H_TCRELH \
+ RM(FM_DACMBCREL2H_TCRELH, FB_DACMBCREL2H_TCRELH)
+
+
+/*********************************
+ * R_DACMBCMUG3 (0xD7) *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCMUG3_PHASE 5
+#define FB_DACMBCMUG3_MUGAIN 0
+
+/* Field Masks */
+#define FM_DACMBCMUG3_PHASE 0X1
+#define FM_DACMBCMUG3_MUGAIN 0X1F
+
+/* Register Masks */
+#define RM_DACMBCMUG3_PHASE \
+ RM(FM_DACMBCMUG3_PHASE, FB_DACMBCMUG3_PHASE)
+
+#define RM_DACMBCMUG3_MUGAIN \
+ RM(FM_DACMBCMUG3_MUGAIN, FB_DACMBCMUG3_MUGAIN)
+
+
+/*********************************
+ * R_DACMBCTHR3 (0xD8) *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCTHR3_THRESH 0
+
+/* Field Masks */
+#define FM_DACMBCTHR3_THRESH 0XFF
+
+/* Register Masks */
+#define RM_DACMBCTHR3_THRESH \
+ RM(FM_DACMBCTHR3_THRESH, FB_DACMBCTHR3_THRESH)
+
+
+/*********************************
+ * R_DACMBCRAT3 (0xD9) *
+ *********************************/
+
+/* Field Offsets */
+#define FB_DACMBCRAT3_RATIO 0
+
+/* Field Masks */
+#define FM_DACMBCRAT3_RATIO 0X1F
+
+/* Register Masks */
+#define RM_DACMBCRAT3_RATIO \
+ RM(FM_DACMBCRAT3_RATIO, FB_DACMBCRAT3_RATIO)
+
+
+/**********************************
+ * R_DACMBCATK3L (0xDA) *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCATK3L_TCATKL 0
+
+/* Field Masks */
+#define FM_DACMBCATK3L_TCATKL 0XFF
+
+/* Register Masks */
+#define RM_DACMBCATK3L_TCATKL \
+ RM(FM_DACMBCATK3L_TCATKL, FB_DACMBCATK3L_TCATKL)
+
+
+/**********************************
+ * R_DACMBCATK3H (0xDB) *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCATK3H_TCATKH 0
+
+/* Field Masks */
+#define FM_DACMBCATK3H_TCATKH 0XFF
+
+/* Register Masks */
+#define RM_DACMBCATK3H_TCATKH \
+ RM(FM_DACMBCATK3H_TCATKH, FB_DACMBCATK3H_TCATKH)
+
+
+/**********************************
+ * R_DACMBCREL3L (0xDC) *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCREL3L_TCRELL 0
+
+/* Field Masks */
+#define FM_DACMBCREL3L_TCRELL 0XFF
+
+/* Register Masks */
+#define RM_DACMBCREL3L_TCRELL \
+ RM(FM_DACMBCREL3L_TCRELL, FB_DACMBCREL3L_TCRELL)
+
+
+/**********************************
+ * R_DACMBCREL3H (0xDD) *
+ **********************************/
+
+/* Field Offsets */
+#define FB_DACMBCREL3H_TCRELH 0
+
+/* Field Masks */
+#define FM_DACMBCREL3H_TCRELH 0XFF
+
+/* Register Masks */
+#define RM_DACMBCREL3H_TCRELH \
+ RM(FM_DACMBCREL3H_TCRELH, FB_DACMBCREL3H_TCRELH)
+
+
+#endif /* __WOOKIE_H__ */
diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
index cfe72b9d4356..8798182959c1 100644
--- a/sound/soc/codecs/twl4030.c
+++ b/sound/soc/codecs/twl4030.c
@@ -240,7 +240,6 @@ static struct twl4030_codec_data *twl4030_get_pdata(struct snd_soc_codec *codec)
sizeof(struct twl4030_codec_data),
GFP_KERNEL);
if (!pdata) {
- dev_err(codec->dev, "Can not allocate memory\n");
of_node_put(twl4030_codec_node);
return NULL;
}
@@ -851,14 +850,14 @@ static int snd_soc_get_volsw_twl4030(struct snd_kcontrol *kcontrol,
int mask = (1 << fls(max)) - 1;
ucontrol->value.integer.value[0] =
- (snd_soc_read(codec, reg) >> shift) & mask;
+ (twl4030_read(codec, reg) >> shift) & mask;
if (ucontrol->value.integer.value[0])
ucontrol->value.integer.value[0] =
max + 1 - ucontrol->value.integer.value[0];
if (shift != rshift) {
ucontrol->value.integer.value[1] =
- (snd_soc_read(codec, reg) >> rshift) & mask;
+ (twl4030_read(codec, reg) >> rshift) & mask;
if (ucontrol->value.integer.value[1])
ucontrol->value.integer.value[1] =
max + 1 - ucontrol->value.integer.value[1];
@@ -909,9 +908,9 @@ static int snd_soc_get_volsw_r2_twl4030(struct snd_kcontrol *kcontrol,
int mask = (1<<fls(max))-1;
ucontrol->value.integer.value[0] =
- (snd_soc_read(codec, reg) >> shift) & mask;
+ (twl4030_read(codec, reg) >> shift) & mask;
ucontrol->value.integer.value[1] =
- (snd_soc_read(codec, reg2) >> shift) & mask;
+ (twl4030_read(codec, reg2) >> shift) & mask;
if (ucontrol->value.integer.value[0])
ucontrol->value.integer.value[0] =
@@ -2196,8 +2195,6 @@ static int twl4030_soc_remove(struct snd_soc_codec *codec)
static const struct snd_soc_codec_driver soc_codec_dev_twl4030 = {
.probe = twl4030_soc_probe,
.remove = twl4030_soc_remove,
- .read = twl4030_read,
- .write = twl4030_write,
.set_bias_level = twl4030_set_bias_level,
.idle_bias_off = true,
diff --git a/sound/soc/codecs/twl6040.c b/sound/soc/codecs/twl6040.c
index 1773ff84ee3b..3b895b4b451c 100644
--- a/sound/soc/codecs/twl6040.c
+++ b/sound/soc/codecs/twl6040.c
@@ -106,10 +106,12 @@ static const struct snd_pcm_hw_constraint_list sysclk_constraints[] = {
{ .count = ARRAY_SIZE(hp_rates), .list = hp_rates, },
};
+#define to_twl6040(codec) dev_get_drvdata((codec)->dev->parent)
+
static unsigned int twl6040_read(struct snd_soc_codec *codec, unsigned int reg)
{
struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
- struct twl6040 *twl6040 = codec->control_data;
+ struct twl6040 *twl6040 = to_twl6040(codec);
u8 value;
if (reg >= TWL6040_CACHEREGNUM)
@@ -171,7 +173,7 @@ static inline void twl6040_update_dl12_cache(struct snd_soc_codec *codec,
static int twl6040_write(struct snd_soc_codec *codec,
unsigned int reg, unsigned int value)
{
- struct twl6040 *twl6040 = codec->control_data;
+ struct twl6040 *twl6040 = to_twl6040(codec);
if (reg >= TWL6040_CACHEREGNUM)
return -EIO;
@@ -541,7 +543,7 @@ int twl6040_get_dl1_gain(struct snd_soc_codec *codec)
if (snd_soc_dapm_get_pin_status(dapm, "HSOR") ||
snd_soc_dapm_get_pin_status(dapm, "HSOL")) {
- u8 val = snd_soc_read(codec, TWL6040_REG_HSLCTL);
+ u8 val = twl6040_read(codec, TWL6040_REG_HSLCTL);
if (val & TWL6040_HSDACMODE)
/* HSDACL in LP mode */
return -8; /* -8dB */
@@ -572,7 +574,7 @@ EXPORT_SYMBOL_GPL(twl6040_get_trim_value);
int twl6040_get_hs_step_size(struct snd_soc_codec *codec)
{
- struct twl6040 *twl6040 = codec->control_data;
+ struct twl6040 *twl6040 = to_twl6040(codec);
if (twl6040_get_revid(twl6040) < TWL6040_REV_ES1_3)
/* For ES under ES_1.3 HS step is 2 mV */
@@ -830,7 +832,7 @@ static const struct snd_soc_dapm_route intercon[] = {
static int twl6040_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
- struct twl6040 *twl6040 = codec->control_data;
+ struct twl6040 *twl6040 = to_twl6040(codec);
struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
int ret = 0;
@@ -922,7 +924,7 @@ static int twl6040_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
- struct twl6040 *twl6040 = codec->control_data;
+ struct twl6040 *twl6040 = to_twl6040(codec);
struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
int ret;
@@ -964,7 +966,7 @@ static int twl6040_set_dai_sysclk(struct snd_soc_dai *codec_dai,
static void twl6040_mute_path(struct snd_soc_codec *codec, enum twl6040_dai_id id,
int mute)
{
- struct twl6040 *twl6040 = codec->control_data;
+ struct twl6040 *twl6040 = to_twl6040(codec);
struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
int hslctl, hsrctl, earctl;
int hflctl, hfrctl;
@@ -1108,7 +1110,6 @@ static struct snd_soc_dai_driver twl6040_dai[] = {
static int twl6040_probe(struct snd_soc_codec *codec)
{
struct twl6040_data *priv;
- struct twl6040 *twl6040 = dev_get_drvdata(codec->dev->parent);
struct platform_device *pdev = to_platform_device(codec->dev);
int ret = 0;
@@ -1119,7 +1120,6 @@ static int twl6040_probe(struct snd_soc_codec *codec)
snd_soc_codec_set_drvdata(codec, priv);
priv->codec = codec;
- codec->control_data = twl6040;
priv->plug_irq = platform_get_irq(pdev, 0);
if (priv->plug_irq < 0) {
@@ -1158,8 +1158,6 @@ static int twl6040_remove(struct snd_soc_codec *codec)
static const struct snd_soc_codec_driver soc_codec_dev_twl6040 = {
.probe = twl6040_probe,
.remove = twl6040_remove,
- .read = twl6040_read,
- .write = twl6040_write,
.set_bias_level = twl6040_set_bias_level,
.suspend_bias_off = true,
.ignore_pmdown_time = true,
diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c
index 926c81ae8185..c73e6a192224 100644
--- a/sound/soc/codecs/uda1380.c
+++ b/sound/soc/codecs/uda1380.c
@@ -37,7 +37,8 @@ struct uda1380_priv {
struct snd_soc_codec *codec;
unsigned int dac_clk;
struct work_struct work;
- void *control_data;
+ struct i2c_client *i2c;
+ u16 *reg_cache;
};
/*
@@ -63,7 +64,9 @@ static unsigned long uda1380_cache_dirty;
static inline unsigned int uda1380_read_reg_cache(struct snd_soc_codec *codec,
unsigned int reg)
{
- u16 *cache = codec->reg_cache;
+ struct uda1380_priv *uda1380 = snd_soc_codec_get_drvdata(codec);
+ u16 *cache = uda1380->reg_cache;
+
if (reg == UDA1380_RESET)
return 0;
if (reg >= UDA1380_CACHEREGNUM)
@@ -77,7 +80,8 @@ static inline unsigned int uda1380_read_reg_cache(struct snd_soc_codec *codec,
static inline void uda1380_write_reg_cache(struct snd_soc_codec *codec,
u16 reg, unsigned int value)
{
- u16 *cache = codec->reg_cache;
+ struct uda1380_priv *uda1380 = snd_soc_codec_get_drvdata(codec);
+ u16 *cache = uda1380->reg_cache;
if (reg >= UDA1380_CACHEREGNUM)
return;
@@ -92,6 +96,7 @@ static inline void uda1380_write_reg_cache(struct snd_soc_codec *codec,
static int uda1380_write(struct snd_soc_codec *codec, unsigned int reg,
unsigned int value)
{
+ struct uda1380_priv *uda1380 = snd_soc_codec_get_drvdata(codec);
u8 data[3];
/* data is
@@ -111,10 +116,10 @@ static int uda1380_write(struct snd_soc_codec *codec, unsigned int reg,
if (!snd_soc_codec_is_active(codec) && (reg >= UDA1380_MVOL))
return 0;
pr_debug("uda1380: hw write %x val %x\n", reg, value);
- if (codec->hw_write(codec->control_data, data, 3) == 3) {
+ if (i2c_master_send(uda1380->i2c, data, 3) == 3) {
unsigned int val;
- i2c_master_send(codec->control_data, data, 1);
- i2c_master_recv(codec->control_data, data, 2);
+ i2c_master_send(uda1380->i2c, data, 1);
+ i2c_master_recv(uda1380->i2c, data, 2);
val = (data[0]<<8) | data[1];
if (val != value) {
pr_debug("uda1380: READ BACK VAL %x\n",
@@ -130,16 +135,17 @@ static int uda1380_write(struct snd_soc_codec *codec, unsigned int reg,
static void uda1380_sync_cache(struct snd_soc_codec *codec)
{
+ struct uda1380_priv *uda1380 = snd_soc_codec_get_drvdata(codec);
int reg;
u8 data[3];
- u16 *cache = codec->reg_cache;
+ u16 *cache = uda1380->reg_cache;
/* Sync reg_cache with the hardware */
for (reg = 0; reg < UDA1380_MVOL; reg++) {
data[0] = reg;
data[1] = (cache[reg] & 0xff00) >> 8;
data[2] = cache[reg] & 0x00ff;
- if (codec->hw_write(codec->control_data, data, 3) != 3)
+ if (i2c_master_send(uda1380->i2c, data, 3) != 3)
dev_err(codec->dev, "%s: write to reg 0x%x failed\n",
__func__, reg);
}
@@ -148,6 +154,7 @@ static void uda1380_sync_cache(struct snd_soc_codec *codec)
static int uda1380_reset(struct snd_soc_codec *codec)
{
struct uda1380_platform_data *pdata = codec->dev->platform_data;
+ struct uda1380_priv *uda1380 = snd_soc_codec_get_drvdata(codec);
if (gpio_is_valid(pdata->gpio_reset)) {
gpio_set_value(pdata->gpio_reset, 1);
@@ -160,7 +167,7 @@ static int uda1380_reset(struct snd_soc_codec *codec)
data[1] = 0;
data[2] = 0;
- if (codec->hw_write(codec->control_data, data, 3) != 3) {
+ if (i2c_master_send(uda1380->i2c, data, 3) != 3) {
dev_err(codec->dev, "%s: failed\n", __func__);
return -EIO;
}
@@ -695,9 +702,6 @@ static int uda1380_probe(struct snd_soc_codec *codec)
uda1380->codec = codec;
- codec->hw_write = (hw_write_t)i2c_master_send;
- codec->control_data = uda1380->control_data;
-
if (!gpio_is_valid(pdata->gpio_power)) {
ret = uda1380_reset(codec);
if (ret)
@@ -727,11 +731,6 @@ static const struct snd_soc_codec_driver soc_codec_dev_uda1380 = {
.set_bias_level = uda1380_set_bias_level,
.suspend_bias_off = true,
- .reg_cache_size = ARRAY_SIZE(uda1380_reg),
- .reg_word_size = sizeof(u16),
- .reg_cache_default = uda1380_reg,
- .reg_cache_step = 1,
-
.component_driver = {
.controls = uda1380_snd_controls,
.num_controls = ARRAY_SIZE(uda1380_snd_controls),
@@ -771,8 +770,15 @@ static int uda1380_i2c_probe(struct i2c_client *i2c,
return ret;
}
+ uda1380->reg_cache = devm_kmemdup(&i2c->dev,
+ uda1380_reg,
+ ARRAY_SIZE(uda1380_reg) * sizeof(u16),
+ GFP_KERNEL);
+ if (!uda1380->reg_cache)
+ return -ENOMEM;
+
i2c_set_clientdata(i2c, uda1380);
- uda1380->control_data = i2c;
+ uda1380->i2c = i2c;
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_uda1380, uda1380_dai, ARRAY_SIZE(uda1380_dai));
diff --git a/sound/soc/codecs/wm0010.c b/sound/soc/codecs/wm0010.c
index 4f5f5710b569..0147d2fb7b0a 100644
--- a/sound/soc/codecs/wm0010.c
+++ b/sound/soc/codecs/wm0010.c
@@ -655,11 +655,8 @@ static int wm0010_boot(struct snd_soc_codec *codec)
ret = -ENOMEM;
len = pll_rec.length + 8;
out = kzalloc(len, GFP_KERNEL | GFP_DMA);
- if (!out) {
- dev_err(codec->dev,
- "Failed to allocate RX buffer\n");
+ if (!out)
goto abort;
- }
img_swap = kzalloc(len, GFP_KERNEL | GFP_DMA);
if (!img_swap)
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
index 23cde3a0dc11..abfa052c07d8 100644
--- a/sound/soc/codecs/wm2000.c
+++ b/sound/soc/codecs/wm2000.c
@@ -13,7 +13,7 @@
* 'wm2000_anc.bin' by default (overridable via platform data) at
* runtime and is expected to be in flat binary format. This is
* generated by Wolfson configuration tools and includes
- * system-specific callibration information. If supplied as a
+ * system-specific calibration information. If supplied as a
* sequence of ASCII-encoded hexidecimal bytes this can be converted
* into a flat binary with a command such as this on the command line:
*
@@ -826,8 +826,7 @@ static int wm2000_i2c_probe(struct i2c_client *i2c,
int reg;
u16 id;
- wm2000 = devm_kzalloc(&i2c->dev, sizeof(struct wm2000_priv),
- GFP_KERNEL);
+ wm2000 = devm_kzalloc(&i2c->dev, sizeof(*wm2000), GFP_KERNEL);
if (!wm2000)
return -ENOMEM;
@@ -902,7 +901,6 @@ static int wm2000_i2c_probe(struct i2c_client *i2c,
wm2000->anc_download_size,
GFP_KERNEL);
if (wm2000->anc_download == NULL) {
- dev_err(&i2c->dev, "Out of memory\n");
ret = -ENOMEM;
goto err_supplies;
}
diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
index d83dab57a1d1..5c2f5727244d 100644
--- a/sound/soc/codecs/wm2200.c
+++ b/sound/soc/codecs/wm2200.c
@@ -98,6 +98,8 @@ struct wm2200_priv {
int rev;
int sysclk;
+
+ unsigned int symmetric_rates:1;
};
#define WM2200_DSP_RANGE_BASE (WM2200_MAX_REGISTER + 1)
@@ -1550,7 +1552,7 @@ static const struct snd_soc_dapm_route wm2200_dapm_routes[] = {
static int wm2200_probe(struct snd_soc_codec *codec)
{
- struct wm2200_priv *wm2200 = dev_get_drvdata(codec->dev);
+ struct wm2200_priv *wm2200 = snd_soc_codec_get_drvdata(codec);
int ret;
wm2200->codec = codec;
@@ -1758,7 +1760,7 @@ static int wm2200_hw_params(struct snd_pcm_substream *substream,
lrclk = bclk_rates[bclk] / params_rate(params);
dev_dbg(codec->dev, "Setting %dHz LRCLK\n", bclk_rates[bclk] / lrclk);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ||
- dai->symmetric_rates)
+ wm2200->symmetric_rates)
snd_soc_update_bits(codec, WM2200_AUDIO_IF_1_7,
WM2200_AIF1RX_BCPF_MASK, lrclk);
else
@@ -2059,13 +2061,14 @@ static int wm2200_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
static int wm2200_dai_probe(struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
+ struct wm2200_priv *wm2200 = snd_soc_codec_get_drvdata(codec);
unsigned int val = 0;
int ret;
ret = snd_soc_read(codec, WM2200_GPIO_CTRL_1);
if (ret >= 0) {
if ((ret & WM2200_GP1_FN_MASK) != 0) {
- dai->symmetric_rates = true;
+ wm2200->symmetric_rates = true;
val = WM2200_AIF1TX_LRCLK_SRC;
}
} else {
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index 4f0481d3c7a7..fc066caa1918 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -1935,8 +1935,11 @@ static int wm5102_codec_probe(struct snd_soc_codec *codec)
struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
struct wm5102_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct arizona *arizona = priv->core.arizona;
int ret;
+ snd_soc_codec_init_regmap(codec, arizona->regmap);
+
ret = wm_adsp2_codec_probe(&priv->core.adsp[0], codec);
if (ret)
return ret;
@@ -1989,17 +1992,9 @@ static unsigned int wm5102_digital_vu[] = {
ARIZONA_DAC_DIGITAL_VOLUME_5R,
};
-static struct regmap *wm5102_get_regmap(struct device *dev)
-{
- struct wm5102_priv *priv = dev_get_drvdata(dev);
-
- return priv->core.arizona->regmap;
-}
-
static const struct snd_soc_codec_driver soc_codec_dev_wm5102 = {
.probe = wm5102_codec_probe,
.remove = wm5102_codec_remove,
- .get_regmap = wm5102_get_regmap,
.idle_bias_off = true,
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 6ed1e1f9ce51..fb0cf9c61f48 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -2280,9 +2280,11 @@ static int wm5110_codec_probe(struct snd_soc_codec *codec)
struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
struct wm5110_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct arizona *arizona = priv->core.arizona;
int i, ret;
- priv->core.arizona->dapm = dapm;
+ arizona->dapm = dapm;
+ snd_soc_codec_init_regmap(codec, arizona->regmap);
ret = arizona_init_spk(codec);
if (ret < 0)
@@ -2344,17 +2346,9 @@ static unsigned int wm5110_digital_vu[] = {
ARIZONA_DAC_DIGITAL_VOLUME_6R,
};
-static struct regmap *wm5110_get_regmap(struct device *dev)
-{
- struct wm5110_priv *priv = dev_get_drvdata(dev);
-
- return priv->core.arizona->regmap;
-}
-
static const struct snd_soc_codec_driver soc_codec_dev_wm5110 = {
.probe = wm5110_codec_probe,
.remove = wm5110_codec_remove,
- .get_regmap = wm5110_get_regmap,
.idle_bias_off = true,
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index 2efc5b41ad0f..fc79c6725d06 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -1472,6 +1472,8 @@ static int wm8350_codec_probe(struct snd_soc_codec *codec)
GFP_KERNEL);
if (priv == NULL)
return -ENOMEM;
+
+ snd_soc_codec_init_regmap(codec, wm8350->regmap);
snd_soc_codec_set_drvdata(codec, priv);
priv->wm8350 = wm8350;
@@ -1580,17 +1582,9 @@ static int wm8350_codec_remove(struct snd_soc_codec *codec)
return 0;
}
-static struct regmap *wm8350_get_regmap(struct device *dev)
-{
- struct wm8350 *wm8350 = dev_get_platdata(dev);
-
- return wm8350->regmap;
-}
-
static const struct snd_soc_codec_driver soc_codec_dev_wm8350 = {
.probe = wm8350_codec_probe,
.remove = wm8350_codec_remove,
- .get_regmap = wm8350_get_regmap,
.set_bias_level = wm8350_set_bias_level,
.suspend_bias_off = true,
diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c
index 6c59fb933bd6..a36adf881bca 100644
--- a/sound/soc/codecs/wm8400.c
+++ b/sound/soc/codecs/wm8400.c
@@ -1285,6 +1285,7 @@ static int wm8400_codec_probe(struct snd_soc_codec *codec)
if (priv == NULL)
return -ENOMEM;
+ snd_soc_codec_init_regmap(codec, wm8400->regmap);
snd_soc_codec_set_drvdata(codec, priv);
priv->wm8400 = wm8400;
@@ -1325,17 +1326,9 @@ static int wm8400_codec_remove(struct snd_soc_codec *codec)
return 0;
}
-static struct regmap *wm8400_get_regmap(struct device *dev)
-{
- struct wm8400 *wm8400 = dev_get_platdata(dev);
-
- return wm8400->regmap;
-}
-
static const struct snd_soc_codec_driver soc_codec_dev_wm8400 = {
.probe = wm8400_codec_probe,
.remove = wm8400_codec_remove,
- .get_regmap = wm8400_get_regmap,
.set_bias_level = wm8400_set_bias_level,
.suspend_bias_off = true,
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index 237eeb9a8b97..cba90f21161f 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -1995,8 +1995,7 @@ static int wm8903_i2c_probe(struct i2c_client *i2c,
unsigned int val, irq_pol;
int ret, i;
- wm8903 = devm_kzalloc(&i2c->dev, sizeof(struct wm8903_priv),
- GFP_KERNEL);
+ wm8903 = devm_kzalloc(&i2c->dev, sizeof(*wm8903), GFP_KERNEL);
if (wm8903 == NULL)
return -ENOMEM;
@@ -2017,13 +2016,10 @@ static int wm8903_i2c_probe(struct i2c_client *i2c,
if (pdata) {
wm8903->pdata = pdata;
} else {
- wm8903->pdata = devm_kzalloc(&i2c->dev,
- sizeof(struct wm8903_platform_data),
- GFP_KERNEL);
- if (wm8903->pdata == NULL) {
- dev_err(&i2c->dev, "Failed to allocate pdata\n");
+ wm8903->pdata = devm_kzalloc(&i2c->dev, sizeof(*wm8903->pdata),
+ GFP_KERNEL);
+ if (!wm8903->pdata)
return -ENOMEM;
- }
if (i2c->irq) {
ret = wm8903_set_pdata_irq_trigger(i2c, wm8903->pdata);
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index f91b49e1ece3..21ffd6403173 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -3993,6 +3993,8 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
unsigned int reg;
int ret, i;
+ snd_soc_codec_init_regmap(codec, control->regmap);
+
wm8994->hubs.codec = codec;
mutex_init(&wm8994->accdet_lock);
@@ -4434,19 +4436,11 @@ static int wm8994_codec_remove(struct snd_soc_codec *codec)
return 0;
}
-static struct regmap *wm8994_get_regmap(struct device *dev)
-{
- struct wm8994 *control = dev_get_drvdata(dev->parent);
-
- return control->regmap;
-}
-
static const struct snd_soc_codec_driver soc_codec_dev_wm8994 = {
.probe = wm8994_codec_probe,
.remove = wm8994_codec_remove,
.suspend = wm8994_codec_suspend,
.resume = wm8994_codec_resume,
- .get_regmap = wm8994_get_regmap,
.set_bias_level = wm8994_set_bias_level,
};
diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c
index 77f512767273..cac9b3e7e15d 100644
--- a/sound/soc/codecs/wm8997.c
+++ b/sound/soc/codecs/wm8997.c
@@ -1062,8 +1062,11 @@ static int wm8997_codec_probe(struct snd_soc_codec *codec)
struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
struct wm8997_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct arizona *arizona = priv->core.arizona;
int ret;
+ snd_soc_codec_init_regmap(codec, arizona->regmap);
+
ret = arizona_init_spk(codec);
if (ret < 0)
return ret;
@@ -1095,17 +1098,9 @@ static unsigned int wm8997_digital_vu[] = {
ARIZONA_DAC_DIGITAL_VOLUME_5R,
};
-static struct regmap *wm8997_get_regmap(struct device *dev)
-{
- struct wm8997_priv *priv = dev_get_drvdata(dev);
-
- return priv->core.arizona->regmap;
-}
-
static const struct snd_soc_codec_driver soc_codec_dev_wm8997 = {
.probe = wm8997_codec_probe,
.remove = wm8997_codec_remove,
- .get_regmap = wm8997_get_regmap,
.idle_bias_off = true,
diff --git a/sound/soc/codecs/wm8998.c b/sound/soc/codecs/wm8998.c
index 2d211dbe7422..1288e1f67dcf 100644
--- a/sound/soc/codecs/wm8998.c
+++ b/sound/soc/codecs/wm8998.c
@@ -1275,9 +1275,11 @@ static int wm8998_codec_probe(struct snd_soc_codec *codec)
struct wm8998_priv *priv = snd_soc_codec_get_drvdata(codec);
struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
struct snd_soc_component *component = snd_soc_dapm_to_component(dapm);
+ struct arizona *arizona = priv->core.arizona;
int ret;
- priv->core.arizona->dapm = dapm;
+ arizona->dapm = dapm;
+ snd_soc_codec_init_regmap(codec, arizona->regmap);
ret = arizona_init_spk(codec);
if (ret < 0)
@@ -1313,17 +1315,9 @@ static unsigned int wm8998_digital_vu[] = {
ARIZONA_DAC_DIGITAL_VOLUME_5R,
};
-static struct regmap *wm8998_get_regmap(struct device *dev)
-{
- struct wm8998_priv *priv = dev_get_drvdata(dev);
-
- return priv->core.arizona->regmap;
-}
-
static const struct snd_soc_codec_driver soc_codec_dev_wm8998 = {
.probe = wm8998_codec_probe,
.remove = wm8998_codec_remove,
- .get_regmap = wm8998_get_regmap,
.idle_bias_off = true,
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 804c6f2bcf21..03ba218160ca 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -1242,6 +1242,20 @@ static int davinci_mcasp_hw_rule_format(struct snd_pcm_hw_params *params,
return snd_mask_refine(fmt, &nfmt);
}
+static int davinci_mcasp_hw_rule_min_periodsize(
+ struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule)
+{
+ struct snd_interval *period_size = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
+ struct snd_interval frames;
+
+ snd_interval_any(&frames);
+ frames.min = 64;
+ frames.integer = 1;
+
+ return snd_interval_refine(period_size, &frames);
+}
+
static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
@@ -1333,6 +1347,11 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
return ret;
}
+ snd_pcm_hw_rule_add(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ davinci_mcasp_hw_rule_min_periodsize, NULL,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
+
return 0;
}
diff --git a/sound/soc/fsl/eukrea-tlv320.c b/sound/soc/fsl/eukrea-tlv320.c
index 84ef6385736c..191426a6d9ad 100644
--- a/sound/soc/fsl/eukrea-tlv320.c
+++ b/sound/soc/fsl/eukrea-tlv320.c
@@ -29,7 +29,6 @@
#include "../codecs/tlv320aic23.h"
#include "imx-ssi.h"
-#include "fsl_ssi.h"
#include "imx-audmux.h"
#define CODEC_CLOCK 12000000
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index 1225e0399de8..989be518c4ed 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -442,8 +442,8 @@ static int fsl_asoc_card_late_probe(struct snd_soc_card *card)
if (fsl_asoc_card_is_ac97(priv)) {
#if IS_ENABLED(CONFIG_SND_AC97_CODEC)
- struct snd_soc_codec *codec = rtd->codec;
- struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
+ struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_ac97 *ac97 = snd_soc_component_get_drvdata(component);
/*
* Use slots 3/4 for S/PDIF so SSI won't try to enable
diff --git a/sound/soc/fsl/fsl_asrc.h b/sound/soc/fsl/fsl_asrc.h
index 52c27a358933..2c5856ac5bc3 100644
--- a/sound/soc/fsl/fsl_asrc.h
+++ b/sound/soc/fsl/fsl_asrc.h
@@ -57,7 +57,7 @@
#define REG_ASRDOC 0x74
#define REG_ASRDI(i) (REG_ASRDIA + (i << 3))
#define REG_ASRDO(i) (REG_ASRDOA + (i << 3))
-#define REG_ASRDx(x, i) (x == IN ? REG_ASRDI(i) : REG_ASRDO(i))
+#define REG_ASRDx(x, i) ((x) == IN ? REG_ASRDI(i) : REG_ASRDO(i))
#define REG_ASRIDRHA 0x80
#define REG_ASRIDRLA 0x84
diff --git a/sound/soc/fsl/fsl_dma.c b/sound/soc/fsl/fsl_dma.c
index 0c11f434a374..8c2981b70f64 100644
--- a/sound/soc/fsl/fsl_dma.c
+++ b/sound/soc/fsl/fsl_dma.c
@@ -913,8 +913,8 @@ static int fsl_soc_dma_probe(struct platform_device *pdev)
dma->dai.pcm_free = fsl_dma_free_dma_buffers;
/* Store the SSI-specific information that we need */
- dma->ssi_stx_phys = res.start + CCSR_SSI_STX0;
- dma->ssi_srx_phys = res.start + CCSR_SSI_SRX0;
+ dma->ssi_stx_phys = res.start + REG_SSI_STX0;
+ dma->ssi_srx_phys = res.start + REG_SSI_SRX0;
iprop = of_get_property(ssi_np, "fsl,fifo-depth", NULL);
if (iprop)
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 424bafaf51ef..aecd00f7929d 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -69,21 +69,35 @@
* samples will be written to STX properly.
*/
#ifdef __BIG_ENDIAN
-#define FSLSSI_I2S_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_BE | \
- SNDRV_PCM_FMTBIT_S18_3BE | SNDRV_PCM_FMTBIT_S20_3BE | \
- SNDRV_PCM_FMTBIT_S24_3BE | SNDRV_PCM_FMTBIT_S24_BE)
+#define FSLSSI_I2S_FORMATS \
+ (SNDRV_PCM_FMTBIT_S8 | \
+ SNDRV_PCM_FMTBIT_S16_BE | \
+ SNDRV_PCM_FMTBIT_S18_3BE | \
+ SNDRV_PCM_FMTBIT_S20_3BE | \
+ SNDRV_PCM_FMTBIT_S24_3BE | \
+ SNDRV_PCM_FMTBIT_S24_BE)
#else
-#define FSLSSI_I2S_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE | \
- SNDRV_PCM_FMTBIT_S18_3LE | SNDRV_PCM_FMTBIT_S20_3LE | \
- SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_LE)
+#define FSLSSI_I2S_FORMATS \
+ (SNDRV_PCM_FMTBIT_S8 | \
+ SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S18_3LE | \
+ SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_3LE | \
+ SNDRV_PCM_FMTBIT_S24_LE)
#endif
-#define FSLSSI_SIER_DBG_RX_FLAGS (CCSR_SSI_SIER_RFF0_EN | \
- CCSR_SSI_SIER_RLS_EN | CCSR_SSI_SIER_RFS_EN | \
- CCSR_SSI_SIER_ROE0_EN | CCSR_SSI_SIER_RFRC_EN)
-#define FSLSSI_SIER_DBG_TX_FLAGS (CCSR_SSI_SIER_TFE0_EN | \
- CCSR_SSI_SIER_TLS_EN | CCSR_SSI_SIER_TFS_EN | \
- CCSR_SSI_SIER_TUE0_EN | CCSR_SSI_SIER_TFRC_EN)
+#define FSLSSI_SIER_DBG_RX_FLAGS \
+ (SSI_SIER_RFF0_EN | \
+ SSI_SIER_RLS_EN | \
+ SSI_SIER_RFS_EN | \
+ SSI_SIER_ROE0_EN | \
+ SSI_SIER_RFRC_EN)
+#define FSLSSI_SIER_DBG_TX_FLAGS \
+ (SSI_SIER_TFE0_EN | \
+ SSI_SIER_TLS_EN | \
+ SSI_SIER_TFS_EN | \
+ SSI_SIER_TUE0_EN | \
+ SSI_SIER_TFRC_EN)
enum fsl_ssi_type {
FSL_SSI_MCP8610,
@@ -92,23 +106,18 @@ enum fsl_ssi_type {
FSL_SSI_MX51,
};
-struct fsl_ssi_reg_val {
+struct fsl_ssi_regvals {
u32 sier;
u32 srcr;
u32 stcr;
u32 scr;
};
-struct fsl_ssi_rxtx_reg_val {
- struct fsl_ssi_reg_val rx;
- struct fsl_ssi_reg_val tx;
-};
-
static bool fsl_ssi_readable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
- case CCSR_SSI_SACCEN:
- case CCSR_SSI_SACCDIS:
+ case REG_SSI_SACCEN:
+ case REG_SSI_SACCDIS:
return false;
default:
return true;
@@ -118,18 +127,18 @@ static bool fsl_ssi_readable_reg(struct device *dev, unsigned int reg)
static bool fsl_ssi_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
- case CCSR_SSI_STX0:
- case CCSR_SSI_STX1:
- case CCSR_SSI_SRX0:
- case CCSR_SSI_SRX1:
- case CCSR_SSI_SISR:
- case CCSR_SSI_SFCSR:
- case CCSR_SSI_SACNT:
- case CCSR_SSI_SACADD:
- case CCSR_SSI_SACDAT:
- case CCSR_SSI_SATAG:
- case CCSR_SSI_SACCST:
- case CCSR_SSI_SOR:
+ case REG_SSI_STX0:
+ case REG_SSI_STX1:
+ case REG_SSI_SRX0:
+ case REG_SSI_SRX1:
+ case REG_SSI_SISR:
+ case REG_SSI_SFCSR:
+ case REG_SSI_SACNT:
+ case REG_SSI_SACADD:
+ case REG_SSI_SACDAT:
+ case REG_SSI_SATAG:
+ case REG_SSI_SACCST:
+ case REG_SSI_SOR:
return true;
default:
return false;
@@ -139,12 +148,12 @@ static bool fsl_ssi_volatile_reg(struct device *dev, unsigned int reg)
static bool fsl_ssi_precious_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
- case CCSR_SSI_SRX0:
- case CCSR_SSI_SRX1:
- case CCSR_SSI_SISR:
- case CCSR_SSI_SACADD:
- case CCSR_SSI_SACDAT:
- case CCSR_SSI_SATAG:
+ case REG_SSI_SRX0:
+ case REG_SSI_SRX1:
+ case REG_SSI_SISR:
+ case REG_SSI_SACADD:
+ case REG_SSI_SACDAT:
+ case REG_SSI_SATAG:
return true;
default:
return false;
@@ -154,9 +163,9 @@ static bool fsl_ssi_precious_reg(struct device *dev, unsigned int reg)
static bool fsl_ssi_writeable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
- case CCSR_SSI_SRX0:
- case CCSR_SSI_SRX1:
- case CCSR_SSI_SACCST:
+ case REG_SSI_SRX0:
+ case REG_SSI_SRX1:
+ case REG_SSI_SACCST:
return false;
default:
return true;
@@ -164,12 +173,12 @@ static bool fsl_ssi_writeable_reg(struct device *dev, unsigned int reg)
}
static const struct regmap_config fsl_ssi_regconfig = {
- .max_register = CCSR_SSI_SACCDIS,
+ .max_register = REG_SSI_SACCDIS,
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.val_format_endian = REGMAP_ENDIAN_NATIVE,
- .num_reg_defaults_raw = CCSR_SSI_SACCDIS / sizeof(uint32_t) + 1,
+ .num_reg_defaults_raw = REG_SSI_SACCDIS / sizeof(uint32_t) + 1,
.readable_reg = fsl_ssi_readable_reg,
.volatile_reg = fsl_ssi_volatile_reg,
.precious_reg = fsl_ssi_precious_reg,
@@ -185,78 +194,79 @@ struct fsl_ssi_soc_data {
};
/**
- * fsl_ssi_private: per-SSI private data
+ * fsl_ssi: per-SSI private data
*
- * @reg: Pointer to the regmap registers
+ * @regs: Pointer to the regmap registers
* @irq: IRQ of this SSI
* @cpu_dai_drv: CPU DAI driver for this device
*
* @dai_fmt: DAI configuration this device is currently used with
- * @i2s_mode: i2s and network mode configuration of the device. Is used to
- * switch between normal and i2s/network mode
- * mode depending on the number of channels
+ * @i2s_net: I2S and Network mode configurations of SCR register
* @use_dma: DMA is used or FIQ with stream filter
- * @use_dual_fifo: DMA with support for both FIFOs used
- * @fifo_deph: Depth of the SSI FIFOs
- * @slot_width: width of each DAI slot
- * @slots: number of slots
- * @rxtx_reg_val: Specific register settings for receive/transmit configuration
+ * @use_dual_fifo: DMA with support for dual FIFO mode
+ * @has_ipg_clk_name: If "ipg" is in the clock name list of device tree
+ * @fifo_depth: Depth of the SSI FIFOs
+ * @slot_width: Width of each DAI slot
+ * @slots: Number of slots
+ * @regvals: Specific RX/TX register settings
*
- * @clk: SSI clock
- * @baudclk: SSI baud clock for master mode
+ * @clk: Clock source to access register
+ * @baudclk: Clock source to generate bit and frame-sync clocks
* @baudclk_streams: Active streams that are using baudclk
*
+ * @regcache_sfcsr: Cache sfcsr register value during suspend and resume
+ * @regcache_sacnt: Cache sacnt register value during suspend and resume
+ *
* @dma_params_tx: DMA transmit parameters
* @dma_params_rx: DMA receive parameters
* @ssi_phys: physical address of the SSI registers
*
* @fiq_params: FIQ stream filtering parameters
*
- * @pdev: Pointer to pdev used for deprecated fsl-ssi sound card
+ * @pdev: Pointer to pdev when using fsl-ssi as sound card (ppc only)
+ * TODO: Should be replaced with simple-sound-card
*
* @dbg_stats: Debugging statistics
*
* @soc: SoC specific data
+ * @dev: Pointer to &pdev->dev
*
- * @fifo_watermark: the FIFO watermark setting. Notifies DMA when
- * there are @fifo_watermark or fewer words in TX fifo or
- * @fifo_watermark or more empty words in RX fifo.
- * @dma_maxburst: max number of words to transfer in one go. So far,
- * this is always the same as fifo_watermark.
+ * @fifo_watermark: The FIFO watermark setting. Notifies DMA when there are
+ * @fifo_watermark or fewer words in TX fifo or
+ * @fifo_watermark or more empty words in RX fifo.
+ * @dma_maxburst: Max number of words to transfer in one go. So far,
+ * this is always the same as fifo_watermark.
+ *
+ * @ac97_reg_lock: Mutex lock to serialize AC97 register access operations
*/
-struct fsl_ssi_private {
+struct fsl_ssi {
struct regmap *regs;
int irq;
struct snd_soc_dai_driver cpu_dai_drv;
unsigned int dai_fmt;
- u8 i2s_mode;
+ u8 i2s_net;
bool use_dma;
bool use_dual_fifo;
bool has_ipg_clk_name;
unsigned int fifo_depth;
unsigned int slot_width;
unsigned int slots;
- struct fsl_ssi_rxtx_reg_val rxtx_reg_val;
+ struct fsl_ssi_regvals regvals[2];
struct clk *clk;
struct clk *baudclk;
unsigned int baudclk_streams;
- /* regcache for volatile regs */
u32 regcache_sfcsr;
u32 regcache_sacnt;
- /* DMA params */
struct snd_dmaengine_dai_dma_data dma_params_tx;
struct snd_dmaengine_dai_dma_data dma_params_rx;
dma_addr_t ssi_phys;
- /* params for non-dma FIQ stream filtered mode */
struct imx_pcm_fiq_params fiq_params;
- /* Used when using fsl-ssi as sound-card. This is only used by ppc and
- * should be replaced with simple-sound-card. */
struct platform_device *pdev;
struct fsl_ssi_dbg dbg_stats;
@@ -271,27 +281,27 @@ struct fsl_ssi_private {
};
/*
- * imx51 and later SoCs have a slightly different IP that allows the
- * SSI configuration while the SSI unit is running.
- *
- * More important, it is necessary on those SoCs to configure the
- * sperate TX/RX DMA bits just before starting the stream
- * (fsl_ssi_trigger). The SDMA unit has to be configured before fsl_ssi
- * sends any DMA requests to the SDMA unit, otherwise it is not defined
- * how the SDMA unit handles the DMA request.
+ * SoC specific data
*
- * SDMA units are present on devices starting at imx35 but the imx35
- * reference manual states that the DMA bits should not be changed
- * while the SSI unit is running (SSIEN). So we support the necessary
- * online configuration of fsl-ssi starting at imx51.
+ * Notes:
+ * 1) SSI in earlier SoCS has critical bits in control registers that
+ * cannot be changed after SSI starts running -- a software reset
+ * (set SSIEN to 0) is required to change their values. So adding
+ * an offline_config flag for these SoCs.
+ * 2) SDMA is available since imx35. However, imx35 does not support
+ * DMA bits changing when SSI is running, so set offline_config.
+ * 3) imx51 and later versions support register configurations when
+ * SSI is running (SSIEN); For these versions, DMA needs to be
+ * configured before SSI sends DMA request to avoid an undefined
+ * DMA request on the SDMA side.
*/
static struct fsl_ssi_soc_data fsl_ssi_mpc8610 = {
.imx = false,
.offline_config = true,
- .sisr_write_mask = CCSR_SSI_SISR_RFRC | CCSR_SSI_SISR_TFRC |
- CCSR_SSI_SISR_ROE0 | CCSR_SSI_SISR_ROE1 |
- CCSR_SSI_SISR_TUE0 | CCSR_SSI_SISR_TUE1,
+ .sisr_write_mask = SSI_SISR_RFRC | SSI_SISR_TFRC |
+ SSI_SISR_ROE0 | SSI_SISR_ROE1 |
+ SSI_SISR_TUE0 | SSI_SISR_TUE1,
};
static struct fsl_ssi_soc_data fsl_ssi_imx21 = {
@@ -304,16 +314,16 @@ static struct fsl_ssi_soc_data fsl_ssi_imx21 = {
static struct fsl_ssi_soc_data fsl_ssi_imx35 = {
.imx = true,
.offline_config = true,
- .sisr_write_mask = CCSR_SSI_SISR_RFRC | CCSR_SSI_SISR_TFRC |
- CCSR_SSI_SISR_ROE0 | CCSR_SSI_SISR_ROE1 |
- CCSR_SSI_SISR_TUE0 | CCSR_SSI_SISR_TUE1,
+ .sisr_write_mask = SSI_SISR_RFRC | SSI_SISR_TFRC |
+ SSI_SISR_ROE0 | SSI_SISR_ROE1 |
+ SSI_SISR_TUE0 | SSI_SISR_TUE1,
};
static struct fsl_ssi_soc_data fsl_ssi_imx51 = {
.imx = true,
.offline_config = false,
- .sisr_write_mask = CCSR_SSI_SISR_ROE0 | CCSR_SSI_SISR_ROE1 |
- CCSR_SSI_SISR_TUE0 | CCSR_SSI_SISR_TUE1,
+ .sisr_write_mask = SSI_SISR_ROE0 | SSI_SISR_ROE1 |
+ SSI_SISR_TUE0 | SSI_SISR_TUE1,
};
static const struct of_device_id fsl_ssi_ids[] = {
@@ -325,108 +335,86 @@ static const struct of_device_id fsl_ssi_ids[] = {
};
MODULE_DEVICE_TABLE(of, fsl_ssi_ids);
-static bool fsl_ssi_is_ac97(struct fsl_ssi_private *ssi_private)
+static bool fsl_ssi_is_ac97(struct fsl_ssi *ssi)
{
- return (ssi_private->dai_fmt & SND_SOC_DAIFMT_FORMAT_MASK) ==
+ return (ssi->dai_fmt & SND_SOC_DAIFMT_FORMAT_MASK) ==
SND_SOC_DAIFMT_AC97;
}
-static bool fsl_ssi_is_i2s_master(struct fsl_ssi_private *ssi_private)
+static bool fsl_ssi_is_i2s_master(struct fsl_ssi *ssi)
{
- return (ssi_private->dai_fmt & SND_SOC_DAIFMT_MASTER_MASK) ==
+ return (ssi->dai_fmt & SND_SOC_DAIFMT_MASTER_MASK) ==
SND_SOC_DAIFMT_CBS_CFS;
}
-static bool fsl_ssi_is_i2s_cbm_cfs(struct fsl_ssi_private *ssi_private)
+static bool fsl_ssi_is_i2s_cbm_cfs(struct fsl_ssi *ssi)
{
- return (ssi_private->dai_fmt & SND_SOC_DAIFMT_MASTER_MASK) ==
+ return (ssi->dai_fmt & SND_SOC_DAIFMT_MASTER_MASK) ==
SND_SOC_DAIFMT_CBM_CFS;
}
+
/**
- * fsl_ssi_isr: SSI interrupt handler
- *
- * Although it's possible to use the interrupt handler to send and receive
- * data to/from the SSI, we use the DMA instead. Programming is more
- * complicated, but the performance is much better.
- *
- * This interrupt handler is used only to gather statistics.
- *
- * @irq: IRQ of the SSI device
- * @dev_id: pointer to the ssi_private structure for this SSI device
+ * Interrupt handler to gather states
*/
static irqreturn_t fsl_ssi_isr(int irq, void *dev_id)
{
- struct fsl_ssi_private *ssi_private = dev_id;
- struct regmap *regs = ssi_private->regs;
+ struct fsl_ssi *ssi = dev_id;
+ struct regmap *regs = ssi->regs;
__be32 sisr;
__be32 sisr2;
- /* We got an interrupt, so read the status register to see what we
- were interrupted for. We mask it with the Interrupt Enable register
- so that we only check for events that we're interested in.
- */
- regmap_read(regs, CCSR_SSI_SISR, &sisr);
+ regmap_read(regs, REG_SSI_SISR, &sisr);
- sisr2 = sisr & ssi_private->soc->sisr_write_mask;
+ sisr2 = sisr & ssi->soc->sisr_write_mask;
/* Clear the bits that we set */
if (sisr2)
- regmap_write(regs, CCSR_SSI_SISR, sisr2);
+ regmap_write(regs, REG_SSI_SISR, sisr2);
- fsl_ssi_dbg_isr(&ssi_private->dbg_stats, sisr);
+ fsl_ssi_dbg_isr(&ssi->dbg_stats, sisr);
return IRQ_HANDLED;
}
-/*
- * Enable/Disable all rx/tx config flags at once.
+/**
+ * Enable or disable all rx/tx config flags at once
*/
-static void fsl_ssi_rxtx_config(struct fsl_ssi_private *ssi_private,
- bool enable)
+static void fsl_ssi_rxtx_config(struct fsl_ssi *ssi, bool enable)
{
- struct regmap *regs = ssi_private->regs;
- struct fsl_ssi_rxtx_reg_val *vals = &ssi_private->rxtx_reg_val;
+ struct regmap *regs = ssi->regs;
+ struct fsl_ssi_regvals *vals = ssi->regvals;
if (enable) {
- regmap_update_bits(regs, CCSR_SSI_SIER,
- vals->rx.sier | vals->tx.sier,
- vals->rx.sier | vals->tx.sier);
- regmap_update_bits(regs, CCSR_SSI_SRCR,
- vals->rx.srcr | vals->tx.srcr,
- vals->rx.srcr | vals->tx.srcr);
- regmap_update_bits(regs, CCSR_SSI_STCR,
- vals->rx.stcr | vals->tx.stcr,
- vals->rx.stcr | vals->tx.stcr);
+ regmap_update_bits(regs, REG_SSI_SIER,
+ vals[RX].sier | vals[TX].sier,
+ vals[RX].sier | vals[TX].sier);
+ regmap_update_bits(regs, REG_SSI_SRCR,
+ vals[RX].srcr | vals[TX].srcr,
+ vals[RX].srcr | vals[TX].srcr);
+ regmap_update_bits(regs, REG_SSI_STCR,
+ vals[RX].stcr | vals[TX].stcr,
+ vals[RX].stcr | vals[TX].stcr);
} else {
- regmap_update_bits(regs, CCSR_SSI_SRCR,
- vals->rx.srcr | vals->tx.srcr, 0);
- regmap_update_bits(regs, CCSR_SSI_STCR,
- vals->rx.stcr | vals->tx.stcr, 0);
- regmap_update_bits(regs, CCSR_SSI_SIER,
- vals->rx.sier | vals->tx.sier, 0);
+ regmap_update_bits(regs, REG_SSI_SRCR,
+ vals[RX].srcr | vals[TX].srcr, 0);
+ regmap_update_bits(regs, REG_SSI_STCR,
+ vals[RX].stcr | vals[TX].stcr, 0);
+ regmap_update_bits(regs, REG_SSI_SIER,
+ vals[RX].sier | vals[TX].sier, 0);
}
}
-/*
- * Clear RX or TX FIFO to remove samples from the previous
- * stream session which may be still present in the FIFO and
- * may introduce bad samples and/or channel slipping.
- *
- * Note: The SOR is not documented in recent IMX datasheet, but
- * is described in IMX51 reference manual at section 56.3.3.15.
+/**
+ * Clear remaining data in the FIFO to avoid dirty data or channel slipping
*/
-static void fsl_ssi_fifo_clear(struct fsl_ssi_private *ssi_private,
- bool is_rx)
+static void fsl_ssi_fifo_clear(struct fsl_ssi *ssi, bool is_rx)
{
- if (is_rx) {
- regmap_update_bits(ssi_private->regs, CCSR_SSI_SOR,
- CCSR_SSI_SOR_RX_CLR, CCSR_SSI_SOR_RX_CLR);
- } else {
- regmap_update_bits(ssi_private->regs, CCSR_SSI_SOR,
- CCSR_SSI_SOR_TX_CLR, CCSR_SSI_SOR_TX_CLR);
- }
+ bool tx = !is_rx;
+
+ regmap_update_bits(ssi->regs, REG_SSI_SOR,
+ SSI_SOR_xX_CLR(tx), SSI_SOR_xX_CLR(tx));
}
-/*
+/**
* Calculate the bits that have to be disabled for the current stream that is
* getting disabled. This keeps the bits enabled that are necessary for the
* second stream to work if 'stream_active' is true.
@@ -446,261 +434,239 @@ static void fsl_ssi_fifo_clear(struct fsl_ssi_private *ssi_private,
((vals_disable) & \
((vals_disable) ^ ((vals_stream) * (u32)!!(stream_active))))
-/*
- * Enable/Disable a ssi configuration. You have to pass either
- * ssi_private->rxtx_reg_val.rx or tx as vals parameter.
+/**
+ * Enable or disable SSI configuration.
*/
-static void fsl_ssi_config(struct fsl_ssi_private *ssi_private, bool enable,
- struct fsl_ssi_reg_val *vals)
+static void fsl_ssi_config(struct fsl_ssi *ssi, bool enable,
+ struct fsl_ssi_regvals *vals)
{
- struct regmap *regs = ssi_private->regs;
- struct fsl_ssi_reg_val *avals;
+ struct regmap *regs = ssi->regs;
+ struct fsl_ssi_regvals *avals;
int nr_active_streams;
- u32 scr_val;
+ u32 scr;
int keep_active;
- regmap_read(regs, CCSR_SSI_SCR, &scr_val);
+ regmap_read(regs, REG_SSI_SCR, &scr);
- nr_active_streams = !!(scr_val & CCSR_SSI_SCR_TE) +
- !!(scr_val & CCSR_SSI_SCR_RE);
+ nr_active_streams = !!(scr & SSI_SCR_TE) + !!(scr & SSI_SCR_RE);
if (nr_active_streams - 1 > 0)
keep_active = 1;
else
keep_active = 0;
- /* Find the other direction values rx or tx which we do not want to
- * modify */
- if (&ssi_private->rxtx_reg_val.rx == vals)
- avals = &ssi_private->rxtx_reg_val.tx;
+ /* Get the opposite direction to keep its values untouched */
+ if (&ssi->regvals[RX] == vals)
+ avals = &ssi->regvals[TX];
else
- avals = &ssi_private->rxtx_reg_val.rx;
+ avals = &ssi->regvals[RX];
- /* If vals should be disabled, start with disabling the unit */
if (!enable) {
+ /*
+ * To keep the other stream safe, exclude shared bits between
+ * both streams, and get safe bits to disable current stream
+ */
u32 scr = fsl_ssi_disable_val(vals->scr, avals->scr,
- keep_active);
- regmap_update_bits(regs, CCSR_SSI_SCR, scr, 0);
+ keep_active);
+ /* Safely disable SCR register for the stream */
+ regmap_update_bits(regs, REG_SSI_SCR, scr, 0);
}
/*
- * We are running on a SoC which does not support online SSI
- * reconfiguration, so we have to enable all necessary flags at once
- * even if we do not use them later (capture and playback configuration)
+ * For cases where online configuration is not supported,
+ * 1) Enable all necessary bits of both streams when 1st stream starts
+ * even if the opposite stream will not start
+ * 2) Disable all remaining bits of both streams when last stream ends
*/
- if (ssi_private->soc->offline_config) {
- if ((enable && !nr_active_streams) ||
- (!enable && !keep_active))
- fsl_ssi_rxtx_config(ssi_private, enable);
+ if (ssi->soc->offline_config) {
+ if ((enable && !nr_active_streams) || (!enable && !keep_active))
+ fsl_ssi_rxtx_config(ssi, enable);
goto config_done;
}
- /*
- * Configure single direction units while the SSI unit is running
- * (online configuration)
- */
+ /* Online configure single direction while SSI is running */
if (enable) {
- fsl_ssi_fifo_clear(ssi_private, vals->scr & CCSR_SSI_SCR_RE);
+ fsl_ssi_fifo_clear(ssi, vals->scr & SSI_SCR_RE);
- regmap_update_bits(regs, CCSR_SSI_SRCR, vals->srcr, vals->srcr);
- regmap_update_bits(regs, CCSR_SSI_STCR, vals->stcr, vals->stcr);
- regmap_update_bits(regs, CCSR_SSI_SIER, vals->sier, vals->sier);
+ regmap_update_bits(regs, REG_SSI_SRCR, vals->srcr, vals->srcr);
+ regmap_update_bits(regs, REG_SSI_STCR, vals->stcr, vals->stcr);
+ regmap_update_bits(regs, REG_SSI_SIER, vals->sier, vals->sier);
} else {
u32 sier;
u32 srcr;
u32 stcr;
/*
- * Disabling the necessary flags for one of rx/tx while the
- * other stream is active is a little bit more difficult. We
- * have to disable only those flags that differ between both
- * streams (rx XOR tx) and that are set in the stream that is
- * disabled now. Otherwise we could alter flags of the other
- * stream
+ * To keep the other stream safe, exclude shared bits between
+ * both streams, and get safe bits to disable current stream
*/
-
- /* These assignments are simply vals without bits set in avals*/
sier = fsl_ssi_disable_val(vals->sier, avals->sier,
- keep_active);
+ keep_active);
srcr = fsl_ssi_disable_val(vals->srcr, avals->srcr,
- keep_active);
+ keep_active);
stcr = fsl_ssi_disable_val(vals->stcr, avals->stcr,
- keep_active);
+ keep_active);
- regmap_update_bits(regs, CCSR_SSI_SRCR, srcr, 0);
- regmap_update_bits(regs, CCSR_SSI_STCR, stcr, 0);
- regmap_update_bits(regs, CCSR_SSI_SIER, sier, 0);
+ /* Safely disable other control registers for the stream */
+ regmap_update_bits(regs, REG_SSI_SRCR, srcr, 0);
+ regmap_update_bits(regs, REG_SSI_STCR, stcr, 0);
+ regmap_update_bits(regs, REG_SSI_SIER, sier, 0);
}
config_done:
/* Enabling of subunits is done after configuration */
if (enable) {
- if (ssi_private->use_dma && (vals->scr & CCSR_SSI_SCR_TE)) {
- /*
- * Be sure the Tx FIFO is filled when TE is set.
- * Otherwise, there are some chances to start the
- * playback with some void samples inserted first,
- * generating a channel slip.
- *
- * First, SSIEN must be set, to let the FIFO be filled.
- *
- * Notes:
- * - Limit this fix to the DMA case until FIQ cases can
- * be tested.
- * - Limit the length of the busy loop to not lock the
- * system too long, even if 1-2 loops are sufficient
- * in general.
- */
+ /*
+ * Start DMA before setting TE to avoid FIFO underrun
+ * which may cause a channel slip or a channel swap
+ *
+ * TODO: FIQ cases might also need this upon testing
+ */
+ if (ssi->use_dma && (vals->scr & SSI_SCR_TE)) {
int i;
int max_loop = 100;
- regmap_update_bits(regs, CCSR_SSI_SCR,
- CCSR_SSI_SCR_SSIEN, CCSR_SSI_SCR_SSIEN);
+
+ /* Enable SSI first to send TX DMA request */
+ regmap_update_bits(regs, REG_SSI_SCR,
+ SSI_SCR_SSIEN, SSI_SCR_SSIEN);
+
+ /* Busy wait until TX FIFO not empty -- DMA working */
for (i = 0; i < max_loop; i++) {
u32 sfcsr;
- regmap_read(regs, CCSR_SSI_SFCSR, &sfcsr);
- if (CCSR_SSI_SFCSR_TFCNT0(sfcsr))
+ regmap_read(regs, REG_SSI_SFCSR, &sfcsr);
+ if (SSI_SFCSR_TFCNT0(sfcsr))
break;
}
if (i == max_loop) {
- dev_err(ssi_private->dev,
+ dev_err(ssi->dev,
"Timeout waiting TX FIFO filling\n");
}
}
- regmap_update_bits(regs, CCSR_SSI_SCR, vals->scr, vals->scr);
+ /* Enable all remaining bits */
+ regmap_update_bits(regs, REG_SSI_SCR, vals->scr, vals->scr);
}
}
+static void fsl_ssi_rx_config(struct fsl_ssi *ssi, bool enable)
+{
+ fsl_ssi_config(ssi, enable, &ssi->regvals[RX]);
+}
-static void fsl_ssi_rx_config(struct fsl_ssi_private *ssi_private, bool enable)
+static void fsl_ssi_tx_ac97_saccst_setup(struct fsl_ssi *ssi)
{
- fsl_ssi_config(ssi_private, enable, &ssi_private->rxtx_reg_val.rx);
+ struct regmap *regs = ssi->regs;
+
+ /* no SACC{ST,EN,DIS} regs on imx21-class SSI */
+ if (!ssi->soc->imx21regs) {
+ /* Disable all channel slots */
+ regmap_write(regs, REG_SSI_SACCDIS, 0xff);
+ /* Enable slots 3 & 4 -- PCM Playback Left & Right channels */
+ regmap_write(regs, REG_SSI_SACCEN, 0x300);
+ }
}
-static void fsl_ssi_tx_config(struct fsl_ssi_private *ssi_private, bool enable)
+static void fsl_ssi_tx_config(struct fsl_ssi *ssi, bool enable)
{
- fsl_ssi_config(ssi_private, enable, &ssi_private->rxtx_reg_val.tx);
+ /*
+ * SACCST might be modified via AC Link by a CODEC if it sends
+ * extra bits in their SLOTREQ requests, which'll accidentally
+ * send valid data to slots other than normal playback slots.
+ *
+ * To be safe, configure SACCST right before TX starts.
+ */
+ if (enable && fsl_ssi_is_ac97(ssi))
+ fsl_ssi_tx_ac97_saccst_setup(ssi);
+
+ fsl_ssi_config(ssi, enable, &ssi->regvals[TX]);
}
-/*
- * Setup rx/tx register values used to enable/disable the streams. These will
- * be used later in fsl_ssi_config to setup the streams without the need to
- * check for all different SSI modes.
+/**
+ * Cache critical bits of SIER, SRCR, STCR and SCR to later set them safely
*/
-static void fsl_ssi_setup_reg_vals(struct fsl_ssi_private *ssi_private)
+static void fsl_ssi_setup_regvals(struct fsl_ssi *ssi)
{
- struct fsl_ssi_rxtx_reg_val *reg = &ssi_private->rxtx_reg_val;
-
- reg->rx.sier = CCSR_SSI_SIER_RFF0_EN;
- reg->rx.srcr = CCSR_SSI_SRCR_RFEN0;
- reg->rx.scr = 0;
- reg->tx.sier = CCSR_SSI_SIER_TFE0_EN;
- reg->tx.stcr = CCSR_SSI_STCR_TFEN0;
- reg->tx.scr = 0;
-
- if (!fsl_ssi_is_ac97(ssi_private)) {
- reg->rx.scr = CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_RE;
- reg->rx.sier |= CCSR_SSI_SIER_RFF0_EN;
- reg->tx.scr = CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_TE;
- reg->tx.sier |= CCSR_SSI_SIER_TFE0_EN;
+ struct fsl_ssi_regvals *vals = ssi->regvals;
+
+ vals[RX].sier = SSI_SIER_RFF0_EN;
+ vals[RX].srcr = SSI_SRCR_RFEN0;
+ vals[RX].scr = 0;
+ vals[TX].sier = SSI_SIER_TFE0_EN;
+ vals[TX].stcr = SSI_STCR_TFEN0;
+ vals[TX].scr = 0;
+
+ /* AC97 has already enabled SSIEN, RE and TE, so ignore them */
+ if (!fsl_ssi_is_ac97(ssi)) {
+ vals[RX].scr = SSI_SCR_SSIEN | SSI_SCR_RE;
+ vals[TX].scr = SSI_SCR_SSIEN | SSI_SCR_TE;
}
- if (ssi_private->use_dma) {
- reg->rx.sier |= CCSR_SSI_SIER_RDMAE;
- reg->tx.sier |= CCSR_SSI_SIER_TDMAE;
+ if (ssi->use_dma) {
+ vals[RX].sier |= SSI_SIER_RDMAE;
+ vals[TX].sier |= SSI_SIER_TDMAE;
} else {
- reg->rx.sier |= CCSR_SSI_SIER_RIE;
- reg->tx.sier |= CCSR_SSI_SIER_TIE;
+ vals[RX].sier |= SSI_SIER_RIE;
+ vals[TX].sier |= SSI_SIER_TIE;
}
- reg->rx.sier |= FSLSSI_SIER_DBG_RX_FLAGS;
- reg->tx.sier |= FSLSSI_SIER_DBG_TX_FLAGS;
+ vals[RX].sier |= FSLSSI_SIER_DBG_RX_FLAGS;
+ vals[TX].sier |= FSLSSI_SIER_DBG_TX_FLAGS;
}
-static void fsl_ssi_setup_ac97(struct fsl_ssi_private *ssi_private)
+static void fsl_ssi_setup_ac97(struct fsl_ssi *ssi)
{
- struct regmap *regs = ssi_private->regs;
-
- /*
- * Setup the clock control register
- */
- regmap_write(regs, CCSR_SSI_STCCR,
- CCSR_SSI_SxCCR_WL(17) | CCSR_SSI_SxCCR_DC(13));
- regmap_write(regs, CCSR_SSI_SRCCR,
- CCSR_SSI_SxCCR_WL(17) | CCSR_SSI_SxCCR_DC(13));
+ struct regmap *regs = ssi->regs;
- /*
- * Enable AC97 mode and startup the SSI
- */
- regmap_write(regs, CCSR_SSI_SACNT,
- CCSR_SSI_SACNT_AC97EN | CCSR_SSI_SACNT_FV);
+ /* Setup the clock control register */
+ regmap_write(regs, REG_SSI_STCCR, SSI_SxCCR_WL(17) | SSI_SxCCR_DC(13));
+ regmap_write(regs, REG_SSI_SRCCR, SSI_SxCCR_WL(17) | SSI_SxCCR_DC(13));
- /* no SACC{ST,EN,DIS} regs on imx21-class SSI */
- if (!ssi_private->soc->imx21regs) {
- regmap_write(regs, CCSR_SSI_SACCDIS, 0xff);
- regmap_write(regs, CCSR_SSI_SACCEN, 0x300);
- }
+ /* Enable AC97 mode and startup the SSI */
+ regmap_write(regs, REG_SSI_SACNT, SSI_SACNT_AC97EN | SSI_SACNT_FV);
- /*
- * Enable SSI, Transmit and Receive. AC97 has to communicate with the
- * codec before a stream is started.
- */
- regmap_update_bits(regs, CCSR_SSI_SCR,
- CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_TE | CCSR_SSI_SCR_RE,
- CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_TE | CCSR_SSI_SCR_RE);
+ /* AC97 has to communicate with codec before starting a stream */
+ regmap_update_bits(regs, REG_SSI_SCR,
+ SSI_SCR_SSIEN | SSI_SCR_TE | SSI_SCR_RE,
+ SSI_SCR_SSIEN | SSI_SCR_TE | SSI_SCR_RE);
- regmap_write(regs, CCSR_SSI_SOR, CCSR_SSI_SOR_WAIT(3));
+ regmap_write(regs, REG_SSI_SOR, SSI_SOR_WAIT(3));
}
-/**
- * fsl_ssi_startup: create a new substream
- *
- * This is the first function called when a stream is opened.
- *
- * If this is the first stream open, then grab the IRQ and program most of
- * the SSI registers.
- */
static int fsl_ssi_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct fsl_ssi_private *ssi_private =
- snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(rtd->cpu_dai);
int ret;
- ret = clk_prepare_enable(ssi_private->clk);
+ ret = clk_prepare_enable(ssi->clk);
if (ret)
return ret;
- /* When using dual fifo mode, it is safer to ensure an even period
+ /*
+ * When using dual fifo mode, it is safer to ensure an even period
* size. If appearing to an odd number while DMA always starts its
* task from fifo0, fifo1 would be neglected at the end of each
* period. But SSI would still access fifo1 with an invalid data.
*/
- if (ssi_private->use_dual_fifo)
+ if (ssi->use_dual_fifo)
snd_pcm_hw_constraint_step(substream->runtime, 0,
- SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 2);
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 2);
return 0;
}
-/**
- * fsl_ssi_shutdown: shutdown the SSI
- *
- */
static void fsl_ssi_shutdown(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
+ struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct fsl_ssi_private *ssi_private =
- snd_soc_dai_get_drvdata(rtd->cpu_dai);
-
- clk_disable_unprepare(ssi_private->clk);
+ struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ clk_disable_unprepare(ssi->clk);
}
/**
- * fsl_ssi_set_bclk - configure Digital Audio Interface bit clock
+ * Configure Digital Audio Interface bit clock
*
* Note: This function can be only called when using SSI as DAI master
*
@@ -709,12 +675,13 @@ static void fsl_ssi_shutdown(struct snd_pcm_substream *substream,
* (In 2-channel I2S Master mode, slot_width is fixed 32)
*/
static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
- struct snd_soc_dai *cpu_dai,
- struct snd_pcm_hw_params *hw_params)
+ struct snd_soc_dai *dai,
+ struct snd_pcm_hw_params *hw_params)
{
- struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai);
- struct regmap *regs = ssi_private->regs;
- int synchronous = ssi_private->cpu_dai_drv.symmetric_rates, ret;
+ bool tx2, tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+ struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(dai);
+ struct regmap *regs = ssi->regs;
+ int synchronous = ssi->cpu_dai_drv.symmetric_rates, ret;
u32 pm = 999, div2, psr, stccr, mask, afreq, factor, i;
unsigned long clkrate, baudrate, tmprate;
unsigned int slots = params_channels(hw_params);
@@ -724,29 +691,29 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
bool baudclk_is_used;
/* Override slots and slot_width if being specifically set... */
- if (ssi_private->slots)
- slots = ssi_private->slots;
+ if (ssi->slots)
+ slots = ssi->slots;
/* ...but keep 32 bits if slots is 2 -- I2S Master mode */
- if (ssi_private->slot_width && slots != 2)
- slot_width = ssi_private->slot_width;
+ if (ssi->slot_width && slots != 2)
+ slot_width = ssi->slot_width;
/* Generate bit clock based on the slot number and slot width */
freq = slots * slot_width * params_rate(hw_params);
/* Don't apply it to any non-baudclk circumstance */
- if (IS_ERR(ssi_private->baudclk))
+ if (IS_ERR(ssi->baudclk))
return -EINVAL;
/*
* Hardware limitation: The bclk rate must be
* never greater than 1/5 IPG clock rate
*/
- if (freq * 5 > clk_get_rate(ssi_private->clk)) {
- dev_err(cpu_dai->dev, "bitclk > ipgclk/5\n");
+ if (freq * 5 > clk_get_rate(ssi->clk)) {
+ dev_err(dai->dev, "bitclk > ipgclk / 5\n");
return -EINVAL;
}
- baudclk_is_used = ssi_private->baudclk_streams & ~(BIT(substream->stream));
+ baudclk_is_used = ssi->baudclk_streams & ~(BIT(substream->stream));
/* It should be already enough to divide clock by setting pm alone */
psr = 0;
@@ -758,9 +725,9 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
tmprate = freq * factor * (i + 1);
if (baudclk_is_used)
- clkrate = clk_get_rate(ssi_private->baudclk);
+ clkrate = clk_get_rate(ssi->baudclk);
else
- clkrate = clk_round_rate(ssi_private->baudclk, tmprate);
+ clkrate = clk_round_rate(ssi->baudclk, tmprate);
clkrate /= factor;
afreq = clkrate / (i + 1);
@@ -791,24 +758,22 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
/* No proper pm found if it is still remaining the initial value */
if (pm == 999) {
- dev_err(cpu_dai->dev, "failed to handle the required sysclk\n");
+ dev_err(dai->dev, "failed to handle the required sysclk\n");
return -EINVAL;
}
- stccr = CCSR_SSI_SxCCR_PM(pm + 1) | (div2 ? CCSR_SSI_SxCCR_DIV2 : 0) |
- (psr ? CCSR_SSI_SxCCR_PSR : 0);
- mask = CCSR_SSI_SxCCR_PM_MASK | CCSR_SSI_SxCCR_DIV2 |
- CCSR_SSI_SxCCR_PSR;
+ stccr = SSI_SxCCR_PM(pm + 1) | (div2 ? SSI_SxCCR_DIV2 : 0) |
+ (psr ? SSI_SxCCR_PSR : 0);
+ mask = SSI_SxCCR_PM_MASK | SSI_SxCCR_DIV2 | SSI_SxCCR_PSR;
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK || synchronous)
- regmap_update_bits(regs, CCSR_SSI_STCCR, mask, stccr);
- else
- regmap_update_bits(regs, CCSR_SSI_SRCCR, mask, stccr);
+ /* STCCR is used for RX in synchronous mode */
+ tx2 = tx || synchronous;
+ regmap_update_bits(regs, REG_SSI_SxCCR(tx2), mask, stccr);
if (!baudclk_is_used) {
- ret = clk_set_rate(ssi_private->baudclk, baudrate);
+ ret = clk_set_rate(ssi->baudclk, baudrate);
if (ret) {
- dev_err(cpu_dai->dev, "failed to set baudclk rate\n");
+ dev_err(dai->dev, "failed to set baudclk rate\n");
return -EINVAL;
}
}
@@ -817,185 +782,165 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
}
/**
- * fsl_ssi_hw_params - program the sample size
- *
- * Most of the SSI registers have been programmed in the startup function,
- * but the word length must be programmed here. Unfortunately, programming
- * the SxCCR.WL bits requires the SSI to be temporarily disabled. This can
- * cause a problem with supporting simultaneous playback and capture. If
- * the SSI is already playing a stream, then that stream may be temporarily
- * stopped when you start capture.
+ * Configure SSI based on PCM hardware parameters
*
- * Note: The SxCCR.DC and SxCCR.PM bits are only used if the SSI is the
- * clock master.
+ * Notes:
+ * 1) SxCCR.WL bits are critical bits that require SSI to be temporarily
+ * disabled on offline_config SoCs. Even for online configurable SoCs
+ * running in synchronous mode (both TX and RX use STCCR), it is not
+ * safe to re-configure them when both two streams start running.
+ * 2) SxCCR.PM, SxCCR.DIV2 and SxCCR.PSR bits will be configured in the
+ * fsl_ssi_set_bclk() if SSI is the DAI clock master.
*/
static int fsl_ssi_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params, struct snd_soc_dai *cpu_dai)
+ struct snd_pcm_hw_params *hw_params,
+ struct snd_soc_dai *dai)
{
- struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai);
- struct regmap *regs = ssi_private->regs;
+ bool tx2, tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+ struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(dai);
+ struct regmap *regs = ssi->regs;
unsigned int channels = params_channels(hw_params);
unsigned int sample_size = params_width(hw_params);
- u32 wl = CCSR_SSI_SxCCR_WL(sample_size);
+ u32 wl = SSI_SxCCR_WL(sample_size);
int ret;
- u32 scr_val;
+ u32 scr;
int enabled;
- regmap_read(regs, CCSR_SSI_SCR, &scr_val);
- enabled = scr_val & CCSR_SSI_SCR_SSIEN;
+ regmap_read(regs, REG_SSI_SCR, &scr);
+ enabled = scr & SSI_SCR_SSIEN;
/*
- * If we're in synchronous mode, and the SSI is already enabled,
- * then STCCR is already set properly.
+ * SSI is properly configured if it is enabled and running in
+ * the synchronous mode; Note that AC97 mode is an exception
+ * that should set separate configurations for STCCR and SRCCR
+ * despite running in the synchronous mode.
*/
- if (enabled && ssi_private->cpu_dai_drv.symmetric_rates)
+ if (enabled && ssi->cpu_dai_drv.symmetric_rates)
return 0;
- if (fsl_ssi_is_i2s_master(ssi_private)) {
- ret = fsl_ssi_set_bclk(substream, cpu_dai, hw_params);
+ if (fsl_ssi_is_i2s_master(ssi)) {
+ ret = fsl_ssi_set_bclk(substream, dai, hw_params);
if (ret)
return ret;
/* Do not enable the clock if it is already enabled */
- if (!(ssi_private->baudclk_streams & BIT(substream->stream))) {
- ret = clk_prepare_enable(ssi_private->baudclk);
+ if (!(ssi->baudclk_streams & BIT(substream->stream))) {
+ ret = clk_prepare_enable(ssi->baudclk);
if (ret)
return ret;
- ssi_private->baudclk_streams |= BIT(substream->stream);
+ ssi->baudclk_streams |= BIT(substream->stream);
}
}
- if (!fsl_ssi_is_ac97(ssi_private)) {
- u8 i2smode;
- /*
- * Switch to normal net mode in order to have a frame sync
- * signal every 32 bits instead of 16 bits
- */
- if (fsl_ssi_is_i2s_cbm_cfs(ssi_private) && sample_size == 16)
- i2smode = CCSR_SSI_SCR_I2S_MODE_NORMAL |
- CCSR_SSI_SCR_NET;
+ if (!fsl_ssi_is_ac97(ssi)) {
+ u8 i2s_net;
+ /* Normal + Network mode to send 16-bit data in 32-bit frames */
+ if (fsl_ssi_is_i2s_cbm_cfs(ssi) && sample_size == 16)
+ i2s_net = SSI_SCR_I2S_MODE_NORMAL | SSI_SCR_NET;
else
- i2smode = ssi_private->i2s_mode;
+ i2s_net = ssi->i2s_net;
- regmap_update_bits(regs, CCSR_SSI_SCR,
- CCSR_SSI_SCR_NET | CCSR_SSI_SCR_I2S_MODE_MASK,
- channels == 1 ? 0 : i2smode);
+ regmap_update_bits(regs, REG_SSI_SCR,
+ SSI_SCR_I2S_NET_MASK,
+ channels == 1 ? 0 : i2s_net);
}
- /*
- * FIXME: The documentation says that SxCCR[WL] should not be
- * modified while the SSI is enabled. The only time this can
- * happen is if we're trying to do simultaneous playback and
- * capture in asynchronous mode. Unfortunately, I have been enable
- * to get that to work at all on the P1022DS. Therefore, we don't
- * bother to disable/enable the SSI when setting SxCCR[WL], because
- * the SSI will stop anyway. Maybe one day, this will get fixed.
- */
-
/* In synchronous mode, the SSI uses STCCR for capture */
- if ((substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ||
- ssi_private->cpu_dai_drv.symmetric_rates)
- regmap_update_bits(regs, CCSR_SSI_STCCR, CCSR_SSI_SxCCR_WL_MASK,
- wl);
- else
- regmap_update_bits(regs, CCSR_SSI_SRCCR, CCSR_SSI_SxCCR_WL_MASK,
- wl);
+ tx2 = tx || ssi->cpu_dai_drv.symmetric_rates;
+ regmap_update_bits(regs, REG_SSI_SxCCR(tx2), SSI_SxCCR_WL_MASK, wl);
return 0;
}
static int fsl_ssi_hw_free(struct snd_pcm_substream *substream,
- struct snd_soc_dai *cpu_dai)
+ struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct fsl_ssi_private *ssi_private =
- snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(rtd->cpu_dai);
- if (fsl_ssi_is_i2s_master(ssi_private) &&
- ssi_private->baudclk_streams & BIT(substream->stream)) {
- clk_disable_unprepare(ssi_private->baudclk);
- ssi_private->baudclk_streams &= ~BIT(substream->stream);
+ if (fsl_ssi_is_i2s_master(ssi) &&
+ ssi->baudclk_streams & BIT(substream->stream)) {
+ clk_disable_unprepare(ssi->baudclk);
+ ssi->baudclk_streams &= ~BIT(substream->stream);
}
return 0;
}
static int _fsl_ssi_set_dai_fmt(struct device *dev,
- struct fsl_ssi_private *ssi_private,
- unsigned int fmt)
+ struct fsl_ssi *ssi, unsigned int fmt)
{
- struct regmap *regs = ssi_private->regs;
+ struct regmap *regs = ssi->regs;
u32 strcr = 0, stcr, srcr, scr, mask;
u8 wm;
- ssi_private->dai_fmt = fmt;
+ ssi->dai_fmt = fmt;
- if (fsl_ssi_is_i2s_master(ssi_private) && IS_ERR(ssi_private->baudclk)) {
- dev_err(dev, "baudclk is missing which is necessary for master mode\n");
+ if (fsl_ssi_is_i2s_master(ssi) && IS_ERR(ssi->baudclk)) {
+ dev_err(dev, "missing baudclk for master mode\n");
return -EINVAL;
}
- fsl_ssi_setup_reg_vals(ssi_private);
+ fsl_ssi_setup_regvals(ssi);
- regmap_read(regs, CCSR_SSI_SCR, &scr);
- scr &= ~(CCSR_SSI_SCR_SYN | CCSR_SSI_SCR_I2S_MODE_MASK);
- scr |= CCSR_SSI_SCR_SYNC_TX_FS;
+ regmap_read(regs, REG_SSI_SCR, &scr);
+ scr &= ~(SSI_SCR_SYN | SSI_SCR_I2S_MODE_MASK);
+ /* Synchronize frame sync clock for TE to avoid data slipping */
+ scr |= SSI_SCR_SYNC_TX_FS;
- mask = CCSR_SSI_STCR_TXBIT0 | CCSR_SSI_STCR_TFDIR | CCSR_SSI_STCR_TXDIR |
- CCSR_SSI_STCR_TSCKP | CCSR_SSI_STCR_TFSI | CCSR_SSI_STCR_TFSL |
- CCSR_SSI_STCR_TEFS;
- regmap_read(regs, CCSR_SSI_STCR, &stcr);
- regmap_read(regs, CCSR_SSI_SRCR, &srcr);
+ mask = SSI_STCR_TXBIT0 | SSI_STCR_TFDIR | SSI_STCR_TXDIR |
+ SSI_STCR_TSCKP | SSI_STCR_TFSI | SSI_STCR_TFSL | SSI_STCR_TEFS;
+ regmap_read(regs, REG_SSI_STCR, &stcr);
+ regmap_read(regs, REG_SSI_SRCR, &srcr);
stcr &= ~mask;
srcr &= ~mask;
- ssi_private->i2s_mode = CCSR_SSI_SCR_NET;
+ /* Use Network mode as default */
+ ssi->i2s_net = SSI_SCR_NET;
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
- regmap_update_bits(regs, CCSR_SSI_STCCR,
- CCSR_SSI_SxCCR_DC_MASK,
- CCSR_SSI_SxCCR_DC(2));
- regmap_update_bits(regs, CCSR_SSI_SRCCR,
- CCSR_SSI_SxCCR_DC_MASK,
- CCSR_SSI_SxCCR_DC(2));
+ regmap_update_bits(regs, REG_SSI_STCCR,
+ SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(2));
+ regmap_update_bits(regs, REG_SSI_SRCCR,
+ SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(2));
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFS:
case SND_SOC_DAIFMT_CBS_CFS:
- ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_MASTER;
+ ssi->i2s_net |= SSI_SCR_I2S_MODE_MASTER;
break;
case SND_SOC_DAIFMT_CBM_CFM:
- ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_SLAVE;
+ ssi->i2s_net |= SSI_SCR_I2S_MODE_SLAVE;
break;
default:
return -EINVAL;
}
/* Data on rising edge of bclk, frame low, 1clk before data */
- strcr |= CCSR_SSI_STCR_TFSI | CCSR_SSI_STCR_TSCKP |
- CCSR_SSI_STCR_TXBIT0 | CCSR_SSI_STCR_TEFS;
+ strcr |= SSI_STCR_TFSI | SSI_STCR_TSCKP |
+ SSI_STCR_TXBIT0 | SSI_STCR_TEFS;
break;
case SND_SOC_DAIFMT_LEFT_J:
/* Data on rising edge of bclk, frame high */
- strcr |= CCSR_SSI_STCR_TXBIT0 | CCSR_SSI_STCR_TSCKP;
+ strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP;
break;
case SND_SOC_DAIFMT_DSP_A:
/* Data on rising edge of bclk, frame high, 1clk before data */
- strcr |= CCSR_SSI_STCR_TFSL | CCSR_SSI_STCR_TSCKP |
- CCSR_SSI_STCR_TXBIT0 | CCSR_SSI_STCR_TEFS;
+ strcr |= SSI_STCR_TFSL | SSI_STCR_TSCKP |
+ SSI_STCR_TXBIT0 | SSI_STCR_TEFS;
break;
case SND_SOC_DAIFMT_DSP_B:
/* Data on rising edge of bclk, frame high */
- strcr |= CCSR_SSI_STCR_TFSL | CCSR_SSI_STCR_TSCKP |
- CCSR_SSI_STCR_TXBIT0;
+ strcr |= SSI_STCR_TFSL | SSI_STCR_TSCKP | SSI_STCR_TXBIT0;
break;
case SND_SOC_DAIFMT_AC97:
- ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_NORMAL;
+ /* Data on falling edge of bclk, frame high, 1clk before data */
+ ssi->i2s_net |= SSI_SCR_I2S_MODE_NORMAL;
break;
default:
return -EINVAL;
}
- scr |= ssi_private->i2s_mode;
+ scr |= ssi->i2s_net;
/* DAI clock inversion */
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
@@ -1004,16 +949,16 @@ static int _fsl_ssi_set_dai_fmt(struct device *dev,
break;
case SND_SOC_DAIFMT_IB_NF:
/* Invert bit clock */
- strcr ^= CCSR_SSI_STCR_TSCKP;
+ strcr ^= SSI_STCR_TSCKP;
break;
case SND_SOC_DAIFMT_NB_IF:
/* Invert frame clock */
- strcr ^= CCSR_SSI_STCR_TFSI;
+ strcr ^= SSI_STCR_TFSI;
break;
case SND_SOC_DAIFMT_IB_IF:
/* Invert both clocks */
- strcr ^= CCSR_SSI_STCR_TSCKP;
- strcr ^= CCSR_SSI_STCR_TFSI;
+ strcr ^= SSI_STCR_TSCKP;
+ strcr ^= SSI_STCR_TFSI;
break;
default:
return -EINVAL;
@@ -1022,123 +967,122 @@ static int _fsl_ssi_set_dai_fmt(struct device *dev,
/* DAI clock master masks */
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
- strcr |= CCSR_SSI_STCR_TFDIR | CCSR_SSI_STCR_TXDIR;
- scr |= CCSR_SSI_SCR_SYS_CLK_EN;
+ /* Output bit and frame sync clocks */
+ strcr |= SSI_STCR_TFDIR | SSI_STCR_TXDIR;
+ scr |= SSI_SCR_SYS_CLK_EN;
break;
case SND_SOC_DAIFMT_CBM_CFM:
- scr &= ~CCSR_SSI_SCR_SYS_CLK_EN;
+ /* Input bit or frame sync clocks */
+ scr &= ~SSI_SCR_SYS_CLK_EN;
break;
case SND_SOC_DAIFMT_CBM_CFS:
- strcr &= ~CCSR_SSI_STCR_TXDIR;
- strcr |= CCSR_SSI_STCR_TFDIR;
- scr &= ~CCSR_SSI_SCR_SYS_CLK_EN;
+ /* Input bit clock but output frame sync clock */
+ strcr &= ~SSI_STCR_TXDIR;
+ strcr |= SSI_STCR_TFDIR;
+ scr &= ~SSI_SCR_SYS_CLK_EN;
break;
default:
- if (!fsl_ssi_is_ac97(ssi_private))
+ if (!fsl_ssi_is_ac97(ssi))
return -EINVAL;
}
stcr |= strcr;
srcr |= strcr;
- if (ssi_private->cpu_dai_drv.symmetric_rates
- || fsl_ssi_is_ac97(ssi_private)) {
- /* Need to clear RXDIR when using SYNC or AC97 mode */
- srcr &= ~CCSR_SSI_SRCR_RXDIR;
- scr |= CCSR_SSI_SCR_SYN;
+ /* Set SYN mode and clear RXDIR bit when using SYN or AC97 mode */
+ if (ssi->cpu_dai_drv.symmetric_rates || fsl_ssi_is_ac97(ssi)) {
+ srcr &= ~SSI_SRCR_RXDIR;
+ scr |= SSI_SCR_SYN;
}
- regmap_write(regs, CCSR_SSI_STCR, stcr);
- regmap_write(regs, CCSR_SSI_SRCR, srcr);
- regmap_write(regs, CCSR_SSI_SCR, scr);
+ regmap_write(regs, REG_SSI_STCR, stcr);
+ regmap_write(regs, REG_SSI_SRCR, srcr);
+ regmap_write(regs, REG_SSI_SCR, scr);
- wm = ssi_private->fifo_watermark;
+ wm = ssi->fifo_watermark;
- regmap_write(regs, CCSR_SSI_SFCSR,
- CCSR_SSI_SFCSR_TFWM0(wm) | CCSR_SSI_SFCSR_RFWM0(wm) |
- CCSR_SSI_SFCSR_TFWM1(wm) | CCSR_SSI_SFCSR_RFWM1(wm));
+ regmap_write(regs, REG_SSI_SFCSR,
+ SSI_SFCSR_TFWM0(wm) | SSI_SFCSR_RFWM0(wm) |
+ SSI_SFCSR_TFWM1(wm) | SSI_SFCSR_RFWM1(wm));
- if (ssi_private->use_dual_fifo) {
- regmap_update_bits(regs, CCSR_SSI_SRCR, CCSR_SSI_SRCR_RFEN1,
- CCSR_SSI_SRCR_RFEN1);
- regmap_update_bits(regs, CCSR_SSI_STCR, CCSR_SSI_STCR_TFEN1,
- CCSR_SSI_STCR_TFEN1);
- regmap_update_bits(regs, CCSR_SSI_SCR, CCSR_SSI_SCR_TCH_EN,
- CCSR_SSI_SCR_TCH_EN);
+ if (ssi->use_dual_fifo) {
+ regmap_update_bits(regs, REG_SSI_SRCR,
+ SSI_SRCR_RFEN1, SSI_SRCR_RFEN1);
+ regmap_update_bits(regs, REG_SSI_STCR,
+ SSI_STCR_TFEN1, SSI_STCR_TFEN1);
+ regmap_update_bits(regs, REG_SSI_SCR,
+ SSI_SCR_TCH_EN, SSI_SCR_TCH_EN);
}
if ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_AC97)
- fsl_ssi_setup_ac97(ssi_private);
+ fsl_ssi_setup_ac97(ssi);
return 0;
-
}
/**
- * fsl_ssi_set_dai_fmt - configure Digital Audio Interface Format.
+ * Configure Digital Audio Interface (DAI) Format
*/
-static int fsl_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
+static int fsl_ssi_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
- struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai);
+ struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(dai);
- return _fsl_ssi_set_dai_fmt(cpu_dai->dev, ssi_private, fmt);
+ /* AC97 configured DAIFMT earlier in the probe() */
+ if (fsl_ssi_is_ac97(ssi))
+ return 0;
+
+ return _fsl_ssi_set_dai_fmt(dai->dev, ssi, fmt);
}
/**
- * fsl_ssi_set_dai_tdm_slot - set TDM slot number
- *
- * Note: This function can be only called when using SSI as DAI master
+ * Set TDM slot number and slot width
*/
-static int fsl_ssi_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai, u32 tx_mask,
- u32 rx_mask, int slots, int slot_width)
+static int fsl_ssi_set_dai_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask,
+ u32 rx_mask, int slots, int slot_width)
{
- struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai);
- struct regmap *regs = ssi_private->regs;
+ struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(dai);
+ struct regmap *regs = ssi->regs;
u32 val;
/* The word length should be 8, 10, 12, 16, 18, 20, 22 or 24 */
if (slot_width & 1 || slot_width < 8 || slot_width > 24) {
- dev_err(cpu_dai->dev, "invalid slot width: %d\n", slot_width);
+ dev_err(dai->dev, "invalid slot width: %d\n", slot_width);
return -EINVAL;
}
/* The slot number should be >= 2 if using Network mode or I2S mode */
- regmap_read(regs, CCSR_SSI_SCR, &val);
- val &= CCSR_SSI_SCR_I2S_MODE_MASK | CCSR_SSI_SCR_NET;
+ regmap_read(regs, REG_SSI_SCR, &val);
+ val &= SSI_SCR_I2S_MODE_MASK | SSI_SCR_NET;
if (val && slots < 2) {
- dev_err(cpu_dai->dev, "slot number should be >= 2 in I2S or NET\n");
+ dev_err(dai->dev, "slot number should be >= 2 in I2S or NET\n");
return -EINVAL;
}
- regmap_update_bits(regs, CCSR_SSI_STCCR, CCSR_SSI_SxCCR_DC_MASK,
- CCSR_SSI_SxCCR_DC(slots));
- regmap_update_bits(regs, CCSR_SSI_SRCCR, CCSR_SSI_SxCCR_DC_MASK,
- CCSR_SSI_SxCCR_DC(slots));
+ regmap_update_bits(regs, REG_SSI_STCCR,
+ SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(slots));
+ regmap_update_bits(regs, REG_SSI_SRCCR,
+ SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(slots));
- /* The register SxMSKs needs SSI to provide essential clock due to
- * hardware design. So we here temporarily enable SSI to set them.
- */
- regmap_read(regs, CCSR_SSI_SCR, &val);
- val &= CCSR_SSI_SCR_SSIEN;
- regmap_update_bits(regs, CCSR_SSI_SCR, CCSR_SSI_SCR_SSIEN,
- CCSR_SSI_SCR_SSIEN);
+ /* Save SSIEN bit of the SCR register */
+ regmap_read(regs, REG_SSI_SCR, &val);
+ val &= SSI_SCR_SSIEN;
+ /* Temporarily enable SSI to allow SxMSKs to be configurable */
+ regmap_update_bits(regs, REG_SSI_SCR, SSI_SCR_SSIEN, SSI_SCR_SSIEN);
- regmap_write(regs, CCSR_SSI_STMSK, ~tx_mask);
- regmap_write(regs, CCSR_SSI_SRMSK, ~rx_mask);
+ regmap_write(regs, REG_SSI_STMSK, ~tx_mask);
+ regmap_write(regs, REG_SSI_SRMSK, ~rx_mask);
- regmap_update_bits(regs, CCSR_SSI_SCR, CCSR_SSI_SCR_SSIEN, val);
+ /* Restore the value of SSIEN bit */
+ regmap_update_bits(regs, REG_SSI_SCR, SSI_SCR_SSIEN, val);
- ssi_private->slot_width = slot_width;
- ssi_private->slots = slots;
+ ssi->slot_width = slot_width;
+ ssi->slots = slots;
return 0;
}
/**
- * fsl_ssi_trigger: start and stop the DMA transfer.
- *
- * This function is called by ALSA to start, stop, pause, and resume the DMA
- * transfer of data.
+ * Start or stop SSI and corresponding DMA transaction.
*
* The DMA channel is in external master start and pause mode, which
* means the SSI completely controls the flow of data.
@@ -1147,37 +1091,38 @@ static int fsl_ssi_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(rtd->cpu_dai);
- struct regmap *regs = ssi_private->regs;
+ struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct regmap *regs = ssi->regs;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- fsl_ssi_tx_config(ssi_private, true);
+ fsl_ssi_tx_config(ssi, true);
else
- fsl_ssi_rx_config(ssi_private, true);
+ fsl_ssi_rx_config(ssi, true);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- fsl_ssi_tx_config(ssi_private, false);
+ fsl_ssi_tx_config(ssi, false);
else
- fsl_ssi_rx_config(ssi_private, false);
+ fsl_ssi_rx_config(ssi, false);
break;
default:
return -EINVAL;
}
- if (fsl_ssi_is_ac97(ssi_private)) {
+ /* Clear corresponding FIFO */
+ if (fsl_ssi_is_ac97(ssi)) {
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- regmap_write(regs, CCSR_SSI_SOR, CCSR_SSI_SOR_TX_CLR);
+ regmap_write(regs, REG_SSI_SOR, SSI_SOR_TX_CLR);
else
- regmap_write(regs, CCSR_SSI_SOR, CCSR_SSI_SOR_RX_CLR);
+ regmap_write(regs, REG_SSI_SOR, SSI_SOR_RX_CLR);
}
return 0;
@@ -1185,27 +1130,26 @@ static int fsl_ssi_trigger(struct snd_pcm_substream *substream, int cmd,
static int fsl_ssi_dai_probe(struct snd_soc_dai *dai)
{
- struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(dai);
+ struct fsl_ssi *ssi = snd_soc_dai_get_drvdata(dai);
- if (ssi_private->soc->imx && ssi_private->use_dma) {
- dai->playback_dma_data = &ssi_private->dma_params_tx;
- dai->capture_dma_data = &ssi_private->dma_params_rx;
+ if (ssi->soc->imx && ssi->use_dma) {
+ dai->playback_dma_data = &ssi->dma_params_tx;
+ dai->capture_dma_data = &ssi->dma_params_rx;
}
return 0;
}
static const struct snd_soc_dai_ops fsl_ssi_dai_ops = {
- .startup = fsl_ssi_startup,
- .shutdown = fsl_ssi_shutdown,
- .hw_params = fsl_ssi_hw_params,
- .hw_free = fsl_ssi_hw_free,
- .set_fmt = fsl_ssi_set_dai_fmt,
- .set_tdm_slot = fsl_ssi_set_dai_tdm_slot,
- .trigger = fsl_ssi_trigger,
+ .startup = fsl_ssi_startup,
+ .shutdown = fsl_ssi_shutdown,
+ .hw_params = fsl_ssi_hw_params,
+ .hw_free = fsl_ssi_hw_free,
+ .set_fmt = fsl_ssi_set_dai_fmt,
+ .set_tdm_slot = fsl_ssi_set_dai_tdm_slot,
+ .trigger = fsl_ssi_trigger,
};
-/* Template for the CPU dai driver structure */
static struct snd_soc_dai_driver fsl_ssi_dai_template = {
.probe = fsl_ssi_dai_probe,
.playback = {
@@ -1226,7 +1170,7 @@ static struct snd_soc_dai_driver fsl_ssi_dai_template = {
};
static const struct snd_soc_component_driver fsl_ssi_component = {
- .name = "fsl-ssi",
+ .name = "fsl-ssi",
};
static struct snd_soc_dai_driver fsl_ssi_ac97_dai = {
@@ -1237,23 +1181,23 @@ static struct snd_soc_dai_driver fsl_ssi_ac97_dai = {
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .formats = SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_S20,
},
.capture = {
.stream_name = "AC97 Capture",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ /* 16-bit capture is broken (errata ERR003778) */
+ .formats = SNDRV_PCM_FMTBIT_S20,
},
.ops = &fsl_ssi_dai_ops,
};
-
-static struct fsl_ssi_private *fsl_ac97_data;
+static struct fsl_ssi *fsl_ac97_data;
static void fsl_ssi_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
- unsigned short val)
+ unsigned short val)
{
struct regmap *regs = fsl_ac97_data->regs;
unsigned int lreg;
@@ -1273,13 +1217,13 @@ static void fsl_ssi_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
}
lreg = reg << 12;
- regmap_write(regs, CCSR_SSI_SACADD, lreg);
+ regmap_write(regs, REG_SSI_SACADD, lreg);
lval = val << 4;
- regmap_write(regs, CCSR_SSI_SACDAT, lval);
+ regmap_write(regs, REG_SSI_SACDAT, lval);
- regmap_update_bits(regs, CCSR_SSI_SACNT, CCSR_SSI_SACNT_RDWR_MASK,
- CCSR_SSI_SACNT_WR);
+ regmap_update_bits(regs, REG_SSI_SACNT,
+ SSI_SACNT_RDWR_MASK, SSI_SACNT_WR);
udelay(100);
clk_disable_unprepare(fsl_ac97_data->clk);
@@ -1289,10 +1233,9 @@ ret_unlock:
}
static unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97,
- unsigned short reg)
+ unsigned short reg)
{
struct regmap *regs = fsl_ac97_data->regs;
-
unsigned short val = 0;
u32 reg_val;
unsigned int lreg;
@@ -1302,19 +1245,18 @@ static unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97,
ret = clk_prepare_enable(fsl_ac97_data->clk);
if (ret) {
- pr_err("ac97 read clk_prepare_enable failed: %d\n",
- ret);
+ pr_err("ac97 read clk_prepare_enable failed: %d\n", ret);
goto ret_unlock;
}
lreg = (reg & 0x7f) << 12;
- regmap_write(regs, CCSR_SSI_SACADD, lreg);
- regmap_update_bits(regs, CCSR_SSI_SACNT, CCSR_SSI_SACNT_RDWR_MASK,
- CCSR_SSI_SACNT_RD);
+ regmap_write(regs, REG_SSI_SACADD, lreg);
+ regmap_update_bits(regs, REG_SSI_SACNT,
+ SSI_SACNT_RDWR_MASK, SSI_SACNT_RD);
udelay(100);
- regmap_read(regs, CCSR_SSI_SACDAT, &reg_val);
+ regmap_read(regs, REG_SSI_SACDAT, &reg_val);
val = (reg_val >> 4) & 0xffff;
clk_disable_unprepare(fsl_ac97_data->clk);
@@ -1325,8 +1267,8 @@ ret_unlock:
}
static struct snd_ac97_bus_ops fsl_ssi_ac97_ops = {
- .read = fsl_ssi_ac97_read,
- .write = fsl_ssi_ac97_write,
+ .read = fsl_ssi_ac97_read,
+ .write = fsl_ssi_ac97_write,
};
/**
@@ -1341,70 +1283,67 @@ static void make_lowercase(char *s)
}
static int fsl_ssi_imx_probe(struct platform_device *pdev,
- struct fsl_ssi_private *ssi_private, void __iomem *iomem)
+ struct fsl_ssi *ssi, void __iomem *iomem)
{
struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
u32 dmas[4];
int ret;
- if (ssi_private->has_ipg_clk_name)
- ssi_private->clk = devm_clk_get(&pdev->dev, "ipg");
+ /* Backward compatible for a DT without ipg clock name assigned */
+ if (ssi->has_ipg_clk_name)
+ ssi->clk = devm_clk_get(dev, "ipg");
else
- ssi_private->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(ssi_private->clk)) {
- ret = PTR_ERR(ssi_private->clk);
- dev_err(&pdev->dev, "could not get clock: %d\n", ret);
+ ssi->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ssi->clk)) {
+ ret = PTR_ERR(ssi->clk);
+ dev_err(dev, "failed to get clock: %d\n", ret);
return ret;
}
- if (!ssi_private->has_ipg_clk_name) {
- ret = clk_prepare_enable(ssi_private->clk);
+ /* Enable the clock since regmap will not handle it in this case */
+ if (!ssi->has_ipg_clk_name) {
+ ret = clk_prepare_enable(ssi->clk);
if (ret) {
- dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
+ dev_err(dev, "clk_prepare_enable failed: %d\n", ret);
return ret;
}
}
- /* For those SLAVE implementations, we ignore non-baudclk cases
- * and, instead, abandon MASTER mode that needs baud clock.
- */
- ssi_private->baudclk = devm_clk_get(&pdev->dev, "baud");
- if (IS_ERR(ssi_private->baudclk))
- dev_dbg(&pdev->dev, "could not get baud clock: %ld\n",
- PTR_ERR(ssi_private->baudclk));
+ /* Do not error out for slave cases that live without a baud clock */
+ ssi->baudclk = devm_clk_get(dev, "baud");
+ if (IS_ERR(ssi->baudclk))
+ dev_dbg(dev, "failed to get baud clock: %ld\n",
+ PTR_ERR(ssi->baudclk));
- ssi_private->dma_params_tx.maxburst = ssi_private->dma_maxburst;
- ssi_private->dma_params_rx.maxburst = ssi_private->dma_maxburst;
- ssi_private->dma_params_tx.addr = ssi_private->ssi_phys + CCSR_SSI_STX0;
- ssi_private->dma_params_rx.addr = ssi_private->ssi_phys + CCSR_SSI_SRX0;
+ ssi->dma_params_tx.maxburst = ssi->dma_maxburst;
+ ssi->dma_params_rx.maxburst = ssi->dma_maxburst;
+ ssi->dma_params_tx.addr = ssi->ssi_phys + REG_SSI_STX0;
+ ssi->dma_params_rx.addr = ssi->ssi_phys + REG_SSI_SRX0;
+ /* Set to dual FIFO mode according to the SDMA sciprt */
ret = of_property_read_u32_array(np, "dmas", dmas, 4);
- if (ssi_private->use_dma && !ret && dmas[2] == IMX_DMATYPE_SSI_DUAL) {
- ssi_private->use_dual_fifo = true;
- /* When using dual fifo mode, we need to keep watermark
- * as even numbers due to dma script limitation.
+ if (ssi->use_dma && !ret && dmas[2] == IMX_DMATYPE_SSI_DUAL) {
+ ssi->use_dual_fifo = true;
+ /*
+ * Use even numbers to avoid channel swap due to SDMA
+ * script design
*/
- ssi_private->dma_params_tx.maxburst &= ~0x1;
- ssi_private->dma_params_rx.maxburst &= ~0x1;
+ ssi->dma_params_tx.maxburst &= ~0x1;
+ ssi->dma_params_rx.maxburst &= ~0x1;
}
- if (!ssi_private->use_dma) {
-
+ if (!ssi->use_dma) {
/*
- * Some boards use an incompatible codec. To get it
- * working, we are using imx-fiq-pcm-audio, that
- * can handle those codecs. DMA is not possible in this
- * situation.
+ * Some boards use an incompatible codec. Use imx-fiq-pcm-audio
+ * to get it working, as DMA is not possible in this situation.
*/
+ ssi->fiq_params.irq = ssi->irq;
+ ssi->fiq_params.base = iomem;
+ ssi->fiq_params.dma_params_rx = &ssi->dma_params_rx;
+ ssi->fiq_params.dma_params_tx = &ssi->dma_params_tx;
- ssi_private->fiq_params.irq = ssi_private->irq;
- ssi_private->fiq_params.base = iomem;
- ssi_private->fiq_params.dma_params_rx =
- &ssi_private->dma_params_rx;
- ssi_private->fiq_params.dma_params_tx =
- &ssi_private->dma_params_tx;
-
- ret = imx_pcm_fiq_init(pdev, &ssi_private->fiq_params);
+ ret = imx_pcm_fiq_init(pdev, &ssi->fiq_params);
if (ret)
goto error_pcm;
} else {
@@ -1416,26 +1355,26 @@ static int fsl_ssi_imx_probe(struct platform_device *pdev,
return 0;
error_pcm:
+ if (!ssi->has_ipg_clk_name)
+ clk_disable_unprepare(ssi->clk);
- if (!ssi_private->has_ipg_clk_name)
- clk_disable_unprepare(ssi_private->clk);
return ret;
}
-static void fsl_ssi_imx_clean(struct platform_device *pdev,
- struct fsl_ssi_private *ssi_private)
+static void fsl_ssi_imx_clean(struct platform_device *pdev, struct fsl_ssi *ssi)
{
- if (!ssi_private->use_dma)
+ if (!ssi->use_dma)
imx_pcm_fiq_exit(pdev);
- if (!ssi_private->has_ipg_clk_name)
- clk_disable_unprepare(ssi_private->clk);
+ if (!ssi->has_ipg_clk_name)
+ clk_disable_unprepare(ssi->clk);
}
static int fsl_ssi_probe(struct platform_device *pdev)
{
- struct fsl_ssi_private *ssi_private;
+ struct fsl_ssi *ssi;
int ret = 0;
struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
const struct of_device_id *of_id;
const char *p, *sprop;
const uint32_t *iprop;
@@ -1444,185 +1383,159 @@ static int fsl_ssi_probe(struct platform_device *pdev)
char name[64];
struct regmap_config regconfig = fsl_ssi_regconfig;
- of_id = of_match_device(fsl_ssi_ids, &pdev->dev);
+ of_id = of_match_device(fsl_ssi_ids, dev);
if (!of_id || !of_id->data)
return -EINVAL;
- ssi_private = devm_kzalloc(&pdev->dev, sizeof(*ssi_private),
- GFP_KERNEL);
- if (!ssi_private)
+ ssi = devm_kzalloc(dev, sizeof(*ssi), GFP_KERNEL);
+ if (!ssi)
return -ENOMEM;
- ssi_private->soc = of_id->data;
- ssi_private->dev = &pdev->dev;
+ ssi->soc = of_id->data;
+ ssi->dev = dev;
+ /* Check if being used in AC97 mode */
sprop = of_get_property(np, "fsl,mode", NULL);
if (sprop) {
if (!strcmp(sprop, "ac97-slave"))
- ssi_private->dai_fmt = SND_SOC_DAIFMT_AC97;
+ ssi->dai_fmt = SND_SOC_DAIFMT_AC97;
}
- ssi_private->use_dma = !of_property_read_bool(np,
- "fsl,fiq-stream-filter");
-
- if (fsl_ssi_is_ac97(ssi_private)) {
- memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_ac97_dai,
- sizeof(fsl_ssi_ac97_dai));
+ /* Select DMA or FIQ */
+ ssi->use_dma = !of_property_read_bool(np, "fsl,fiq-stream-filter");
- fsl_ac97_data = ssi_private;
+ if (fsl_ssi_is_ac97(ssi)) {
+ memcpy(&ssi->cpu_dai_drv, &fsl_ssi_ac97_dai,
+ sizeof(fsl_ssi_ac97_dai));
+ fsl_ac97_data = ssi;
} else {
- /* Initialize this copy of the CPU DAI driver structure */
- memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_dai_template,
+ memcpy(&ssi->cpu_dai_drv, &fsl_ssi_dai_template,
sizeof(fsl_ssi_dai_template));
}
- ssi_private->cpu_dai_drv.name = dev_name(&pdev->dev);
+ ssi->cpu_dai_drv.name = dev_name(dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- iomem = devm_ioremap_resource(&pdev->dev, res);
+ iomem = devm_ioremap_resource(dev, res);
if (IS_ERR(iomem))
return PTR_ERR(iomem);
- ssi_private->ssi_phys = res->start;
+ ssi->ssi_phys = res->start;
- if (ssi_private->soc->imx21regs) {
- /*
- * According to datasheet imx21-class SSI
- * don't have SACC{ST,EN,DIS} regs.
- */
- regconfig.max_register = CCSR_SSI_SRMSK;
+ if (ssi->soc->imx21regs) {
+ /* No SACC{ST,EN,DIS} regs in imx21-class SSI */
+ regconfig.max_register = REG_SSI_SRMSK;
regconfig.num_reg_defaults_raw =
- CCSR_SSI_SRMSK / sizeof(uint32_t) + 1;
+ REG_SSI_SRMSK / sizeof(uint32_t) + 1;
}
ret = of_property_match_string(np, "clock-names", "ipg");
if (ret < 0) {
- ssi_private->has_ipg_clk_name = false;
- ssi_private->regs = devm_regmap_init_mmio(&pdev->dev, iomem,
- &regconfig);
+ ssi->has_ipg_clk_name = false;
+ ssi->regs = devm_regmap_init_mmio(dev, iomem, &regconfig);
} else {
- ssi_private->has_ipg_clk_name = true;
- ssi_private->regs = devm_regmap_init_mmio_clk(&pdev->dev,
- "ipg", iomem, &regconfig);
+ ssi->has_ipg_clk_name = true;
+ ssi->regs = devm_regmap_init_mmio_clk(dev, "ipg", iomem,
+ &regconfig);
}
- if (IS_ERR(ssi_private->regs)) {
- dev_err(&pdev->dev, "Failed to init register map\n");
- return PTR_ERR(ssi_private->regs);
+ if (IS_ERR(ssi->regs)) {
+ dev_err(dev, "failed to init register map\n");
+ return PTR_ERR(ssi->regs);
}
- ssi_private->irq = platform_get_irq(pdev, 0);
- if (ssi_private->irq < 0) {
- dev_err(&pdev->dev, "no irq for node %s\n", pdev->name);
- return ssi_private->irq;
+ ssi->irq = platform_get_irq(pdev, 0);
+ if (ssi->irq < 0) {
+ dev_err(dev, "no irq for node %s\n", pdev->name);
+ return ssi->irq;
}
- /* Are the RX and the TX clocks locked? */
+ /* Set software limitations for synchronous mode */
if (!of_find_property(np, "fsl,ssi-asynchronous", NULL)) {
- if (!fsl_ssi_is_ac97(ssi_private))
- ssi_private->cpu_dai_drv.symmetric_rates = 1;
+ if (!fsl_ssi_is_ac97(ssi)) {
+ ssi->cpu_dai_drv.symmetric_rates = 1;
+ ssi->cpu_dai_drv.symmetric_samplebits = 1;
+ }
- ssi_private->cpu_dai_drv.symmetric_channels = 1;
- ssi_private->cpu_dai_drv.symmetric_samplebits = 1;
+ ssi->cpu_dai_drv.symmetric_channels = 1;
}
- /* Determine the FIFO depth. */
+ /* Fetch FIFO depth; Set to 8 for older DT without this property */
iprop = of_get_property(np, "fsl,fifo-depth", NULL);
if (iprop)
- ssi_private->fifo_depth = be32_to_cpup(iprop);
+ ssi->fifo_depth = be32_to_cpup(iprop);
else
- /* Older 8610 DTs didn't have the fifo-depth property */
- ssi_private->fifo_depth = 8;
+ ssi->fifo_depth = 8;
/*
- * Set the watermark for transmit FIFO 0 and receive FIFO 0. We don't
- * use FIFO 1 but set the watermark appropriately nontheless.
- * We program the transmit water to signal a DMA transfer
- * if there are N elements left in the FIFO. For chips with 15-deep
- * FIFOs, set watermark to 8. This allows the SSI to operate at a
- * high data rate without channel slipping. Behavior is unchanged
- * for the older chips with a fifo depth of only 8. A value of 4
- * might be appropriate for the older chips, but is left at
- * fifo_depth-2 until sombody has a chance to test.
+ * Configure TX and RX DMA watermarks -- when to send a DMA request
*
- * We set the watermark on the same level as the DMA burstsize. For
- * fiq it is probably better to use the biggest possible watermark
- * size.
+ * Values should be tested to avoid FIFO under/over run. Set maxburst
+ * to fifo_watermark to maxiumize DMA transaction to reduce overhead.
*/
- switch (ssi_private->fifo_depth) {
+ switch (ssi->fifo_depth) {
case 15:
/*
- * 2 samples is not enough when running at high data
- * rates (like 48kHz @ 16 bits/channel, 16 channels)
- * 8 seems to split things evenly and leave enough time
- * for the DMA to fill the FIFO before it's over/under
- * run.
+ * Set to 8 as a balanced configuration -- When TX FIFO has 8
+ * empty slots, send a DMA request to fill these 8 slots. The
+ * remaining 7 slots should be able to allow DMA to finish the
+ * transaction before TX FIFO underruns; Same applies to RX.
+ *
+ * Tested with cases running at 48kHz @ 16 bits x 16 channels
*/
- ssi_private->fifo_watermark = 8;
- ssi_private->dma_maxburst = 8;
+ ssi->fifo_watermark = 8;
+ ssi->dma_maxburst = 8;
break;
case 8:
default:
- /*
- * maintain old behavior for older chips.
- * Keeping it the same because I don't have an older
- * board to test with.
- * I suspect this could be changed to be something to
- * leave some more space in the fifo.
- */
- ssi_private->fifo_watermark = ssi_private->fifo_depth - 2;
- ssi_private->dma_maxburst = ssi_private->fifo_depth - 2;
+ /* Safely use old watermark configurations for older chips */
+ ssi->fifo_watermark = ssi->fifo_depth - 2;
+ ssi->dma_maxburst = ssi->fifo_depth - 2;
break;
}
- dev_set_drvdata(&pdev->dev, ssi_private);
+ dev_set_drvdata(dev, ssi);
- if (ssi_private->soc->imx) {
- ret = fsl_ssi_imx_probe(pdev, ssi_private, iomem);
+ if (ssi->soc->imx) {
+ ret = fsl_ssi_imx_probe(pdev, ssi, iomem);
if (ret)
return ret;
}
- if (fsl_ssi_is_ac97(ssi_private)) {
- mutex_init(&ssi_private->ac97_reg_lock);
+ if (fsl_ssi_is_ac97(ssi)) {
+ mutex_init(&ssi->ac97_reg_lock);
ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev);
if (ret) {
- dev_err(&pdev->dev, "could not set AC'97 ops\n");
+ dev_err(dev, "failed to set AC'97 ops\n");
goto error_ac97_ops;
}
}
- ret = devm_snd_soc_register_component(&pdev->dev, &fsl_ssi_component,
- &ssi_private->cpu_dai_drv, 1);
+ ret = devm_snd_soc_register_component(dev, &fsl_ssi_component,
+ &ssi->cpu_dai_drv, 1);
if (ret) {
- dev_err(&pdev->dev, "failed to register DAI: %d\n", ret);
+ dev_err(dev, "failed to register DAI: %d\n", ret);
goto error_asoc_register;
}
- if (ssi_private->use_dma) {
- ret = devm_request_irq(&pdev->dev, ssi_private->irq,
- fsl_ssi_isr, 0, dev_name(&pdev->dev),
- ssi_private);
+ if (ssi->use_dma) {
+ ret = devm_request_irq(dev, ssi->irq, fsl_ssi_isr, 0,
+ dev_name(dev), ssi);
if (ret < 0) {
- dev_err(&pdev->dev, "could not claim irq %u\n",
- ssi_private->irq);
+ dev_err(dev, "failed to claim irq %u\n", ssi->irq);
goto error_asoc_register;
}
}
- ret = fsl_ssi_debugfs_create(&ssi_private->dbg_stats, &pdev->dev);
+ ret = fsl_ssi_debugfs_create(&ssi->dbg_stats, dev);
if (ret)
goto error_asoc_register;
- /*
- * If codec-handle property is missing from SSI node, we assume
- * that the machine driver uses new binding which does not require
- * SSI driver to trigger machine driver's probe.
- */
+ /* Bypass it if using newer DT bindings of ASoC machine drivers */
if (!of_get_property(np, "codec-handle", NULL))
goto done;
- /* Trigger the machine driver's probe function. The platform driver
- * name of the machine driver is taken from /compatible property of the
- * device tree. We also pass the address of the CPU DAI driver
- * structure.
+ /*
+ * Backward compatible for older bindings by manually triggering the
+ * machine driver's probe(). Use /compatible property, including the
+ * address of CPU DAI driver structure, as the name of machine driver.
*/
sprop = of_get_property(of_find_node_by_path("/"), "compatible", NULL);
/* Sometimes the compatible name has a "fsl," prefix, so we strip it. */
@@ -1632,34 +1545,31 @@ static int fsl_ssi_probe(struct platform_device *pdev)
snprintf(name, sizeof(name), "snd-soc-%s", sprop);
make_lowercase(name);
- ssi_private->pdev =
- platform_device_register_data(&pdev->dev, name, 0, NULL, 0);
- if (IS_ERR(ssi_private->pdev)) {
- ret = PTR_ERR(ssi_private->pdev);
- dev_err(&pdev->dev, "failed to register platform: %d\n", ret);
+ ssi->pdev = platform_device_register_data(dev, name, 0, NULL, 0);
+ if (IS_ERR(ssi->pdev)) {
+ ret = PTR_ERR(ssi->pdev);
+ dev_err(dev, "failed to register platform: %d\n", ret);
goto error_sound_card;
}
done:
- if (ssi_private->dai_fmt)
- _fsl_ssi_set_dai_fmt(&pdev->dev, ssi_private,
- ssi_private->dai_fmt);
+ if (ssi->dai_fmt)
+ _fsl_ssi_set_dai_fmt(dev, ssi, ssi->dai_fmt);
- if (fsl_ssi_is_ac97(ssi_private)) {
+ if (fsl_ssi_is_ac97(ssi)) {
u32 ssi_idx;
ret = of_property_read_u32(np, "cell-index", &ssi_idx);
if (ret) {
- dev_err(&pdev->dev, "cannot get SSI index property\n");
+ dev_err(dev, "failed to get SSI index property\n");
goto error_sound_card;
}
- ssi_private->pdev =
- platform_device_register_data(NULL,
- "ac97-codec", ssi_idx, NULL, 0);
- if (IS_ERR(ssi_private->pdev)) {
- ret = PTR_ERR(ssi_private->pdev);
- dev_err(&pdev->dev,
+ ssi->pdev = platform_device_register_data(NULL, "ac97-codec",
+ ssi_idx, NULL, 0);
+ if (IS_ERR(ssi->pdev)) {
+ ret = PTR_ERR(ssi->pdev);
+ dev_err(dev,
"failed to register AC97 codec platform: %d\n",
ret);
goto error_sound_card;
@@ -1669,37 +1579,35 @@ done:
return 0;
error_sound_card:
- fsl_ssi_debugfs_remove(&ssi_private->dbg_stats);
-
+ fsl_ssi_debugfs_remove(&ssi->dbg_stats);
error_asoc_register:
- if (fsl_ssi_is_ac97(ssi_private))
+ if (fsl_ssi_is_ac97(ssi))
snd_soc_set_ac97_ops(NULL);
-
error_ac97_ops:
- if (fsl_ssi_is_ac97(ssi_private))
- mutex_destroy(&ssi_private->ac97_reg_lock);
+ if (fsl_ssi_is_ac97(ssi))
+ mutex_destroy(&ssi->ac97_reg_lock);
- if (ssi_private->soc->imx)
- fsl_ssi_imx_clean(pdev, ssi_private);
+ if (ssi->soc->imx)
+ fsl_ssi_imx_clean(pdev, ssi);
return ret;
}
static int fsl_ssi_remove(struct platform_device *pdev)
{
- struct fsl_ssi_private *ssi_private = dev_get_drvdata(&pdev->dev);
+ struct fsl_ssi *ssi = dev_get_drvdata(&pdev->dev);
- fsl_ssi_debugfs_remove(&ssi_private->dbg_stats);
+ fsl_ssi_debugfs_remove(&ssi->dbg_stats);
- if (ssi_private->pdev)
- platform_device_unregister(ssi_private->pdev);
+ if (ssi->pdev)
+ platform_device_unregister(ssi->pdev);
- if (ssi_private->soc->imx)
- fsl_ssi_imx_clean(pdev, ssi_private);
+ if (ssi->soc->imx)
+ fsl_ssi_imx_clean(pdev, ssi);
- if (fsl_ssi_is_ac97(ssi_private)) {
+ if (fsl_ssi_is_ac97(ssi)) {
snd_soc_set_ac97_ops(NULL);
- mutex_destroy(&ssi_private->ac97_reg_lock);
+ mutex_destroy(&ssi->ac97_reg_lock);
}
return 0;
@@ -1708,13 +1616,11 @@ static int fsl_ssi_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int fsl_ssi_suspend(struct device *dev)
{
- struct fsl_ssi_private *ssi_private = dev_get_drvdata(dev);
- struct regmap *regs = ssi_private->regs;
+ struct fsl_ssi *ssi = dev_get_drvdata(dev);
+ struct regmap *regs = ssi->regs;
- regmap_read(regs, CCSR_SSI_SFCSR,
- &ssi_private->regcache_sfcsr);
- regmap_read(regs, CCSR_SSI_SACNT,
- &ssi_private->regcache_sacnt);
+ regmap_read(regs, REG_SSI_SFCSR, &ssi->regcache_sfcsr);
+ regmap_read(regs, REG_SSI_SACNT, &ssi->regcache_sacnt);
regcache_cache_only(regs, true);
regcache_mark_dirty(regs);
@@ -1724,17 +1630,16 @@ static int fsl_ssi_suspend(struct device *dev)
static int fsl_ssi_resume(struct device *dev)
{
- struct fsl_ssi_private *ssi_private = dev_get_drvdata(dev);
- struct regmap *regs = ssi_private->regs;
+ struct fsl_ssi *ssi = dev_get_drvdata(dev);
+ struct regmap *regs = ssi->regs;
regcache_cache_only(regs, false);
- regmap_update_bits(regs, CCSR_SSI_SFCSR,
- CCSR_SSI_SFCSR_RFWM1_MASK | CCSR_SSI_SFCSR_TFWM1_MASK |
- CCSR_SSI_SFCSR_RFWM0_MASK | CCSR_SSI_SFCSR_TFWM0_MASK,
- ssi_private->regcache_sfcsr);
- regmap_write(regs, CCSR_SSI_SACNT,
- ssi_private->regcache_sacnt);
+ regmap_update_bits(regs, REG_SSI_SFCSR,
+ SSI_SFCSR_RFWM1_MASK | SSI_SFCSR_TFWM1_MASK |
+ SSI_SFCSR_RFWM0_MASK | SSI_SFCSR_TFWM0_MASK,
+ ssi->regcache_sfcsr);
+ regmap_write(regs, REG_SSI_SACNT, ssi->regcache_sacnt);
return regcache_sync(regs);
}
diff --git a/sound/soc/fsl/fsl_ssi.h b/sound/soc/fsl/fsl_ssi.h
index 506510540d0a..de2fdc5db726 100644
--- a/sound/soc/fsl/fsl_ssi.h
+++ b/sound/soc/fsl/fsl_ssi.h
@@ -1,5 +1,5 @@
/*
- * fsl_ssi.h - ALSA SSI interface for the Freescale MPC8610 SoC
+ * fsl_ssi.h - ALSA SSI interface for the Freescale MPC8610 and i.MX SoC
*
* Author: Timur Tabi <timur@freescale.com>
*
@@ -12,198 +12,261 @@
#ifndef _MPC8610_I2S_H
#define _MPC8610_I2S_H
-/* SSI registers */
-#define CCSR_SSI_STX0 0x00
-#define CCSR_SSI_STX1 0x04
-#define CCSR_SSI_SRX0 0x08
-#define CCSR_SSI_SRX1 0x0c
-#define CCSR_SSI_SCR 0x10
-#define CCSR_SSI_SISR 0x14
-#define CCSR_SSI_SIER 0x18
-#define CCSR_SSI_STCR 0x1c
-#define CCSR_SSI_SRCR 0x20
-#define CCSR_SSI_STCCR 0x24
-#define CCSR_SSI_SRCCR 0x28
-#define CCSR_SSI_SFCSR 0x2c
-#define CCSR_SSI_STR 0x30
-#define CCSR_SSI_SOR 0x34
-#define CCSR_SSI_SACNT 0x38
-#define CCSR_SSI_SACADD 0x3c
-#define CCSR_SSI_SACDAT 0x40
-#define CCSR_SSI_SATAG 0x44
-#define CCSR_SSI_STMSK 0x48
-#define CCSR_SSI_SRMSK 0x4c
-#define CCSR_SSI_SACCST 0x50
-#define CCSR_SSI_SACCEN 0x54
-#define CCSR_SSI_SACCDIS 0x58
+#define RX 0
+#define TX 1
-#define CCSR_SSI_SCR_SYNC_TX_FS 0x00001000
-#define CCSR_SSI_SCR_RFR_CLK_DIS 0x00000800
-#define CCSR_SSI_SCR_TFR_CLK_DIS 0x00000400
-#define CCSR_SSI_SCR_TCH_EN 0x00000100
-#define CCSR_SSI_SCR_SYS_CLK_EN 0x00000080
-#define CCSR_SSI_SCR_I2S_MODE_MASK 0x00000060
-#define CCSR_SSI_SCR_I2S_MODE_NORMAL 0x00000000
-#define CCSR_SSI_SCR_I2S_MODE_MASTER 0x00000020
-#define CCSR_SSI_SCR_I2S_MODE_SLAVE 0x00000040
-#define CCSR_SSI_SCR_SYN 0x00000010
-#define CCSR_SSI_SCR_NET 0x00000008
-#define CCSR_SSI_SCR_RE 0x00000004
-#define CCSR_SSI_SCR_TE 0x00000002
-#define CCSR_SSI_SCR_SSIEN 0x00000001
+/* -- SSI Register Map -- */
-#define CCSR_SSI_SISR_RFRC 0x01000000
-#define CCSR_SSI_SISR_TFRC 0x00800000
-#define CCSR_SSI_SISR_CMDAU 0x00040000
-#define CCSR_SSI_SISR_CMDDU 0x00020000
-#define CCSR_SSI_SISR_RXT 0x00010000
-#define CCSR_SSI_SISR_RDR1 0x00008000
-#define CCSR_SSI_SISR_RDR0 0x00004000
-#define CCSR_SSI_SISR_TDE1 0x00002000
-#define CCSR_SSI_SISR_TDE0 0x00001000
-#define CCSR_SSI_SISR_ROE1 0x00000800
-#define CCSR_SSI_SISR_ROE0 0x00000400
-#define CCSR_SSI_SISR_TUE1 0x00000200
-#define CCSR_SSI_SISR_TUE0 0x00000100
-#define CCSR_SSI_SISR_TFS 0x00000080
-#define CCSR_SSI_SISR_RFS 0x00000040
-#define CCSR_SSI_SISR_TLS 0x00000020
-#define CCSR_SSI_SISR_RLS 0x00000010
-#define CCSR_SSI_SISR_RFF1 0x00000008
-#define CCSR_SSI_SISR_RFF0 0x00000004
-#define CCSR_SSI_SISR_TFE1 0x00000002
-#define CCSR_SSI_SISR_TFE0 0x00000001
+/* SSI Transmit Data Register 0 */
+#define REG_SSI_STX0 0x00
+/* SSI Transmit Data Register 1 */
+#define REG_SSI_STX1 0x04
+/* SSI Receive Data Register 0 */
+#define REG_SSI_SRX0 0x08
+/* SSI Receive Data Register 1 */
+#define REG_SSI_SRX1 0x0c
+/* SSI Control Register */
+#define REG_SSI_SCR 0x10
+/* SSI Interrupt Status Register */
+#define REG_SSI_SISR 0x14
+/* SSI Interrupt Enable Register */
+#define REG_SSI_SIER 0x18
+/* SSI Transmit Configuration Register */
+#define REG_SSI_STCR 0x1c
+/* SSI Receive Configuration Register */
+#define REG_SSI_SRCR 0x20
+#define REG_SSI_SxCR(tx) ((tx) ? REG_SSI_STCR : REG_SSI_SRCR)
+/* SSI Transmit Clock Control Register */
+#define REG_SSI_STCCR 0x24
+/* SSI Receive Clock Control Register */
+#define REG_SSI_SRCCR 0x28
+#define REG_SSI_SxCCR(tx) ((tx) ? REG_SSI_STCCR : REG_SSI_SRCCR)
+/* SSI FIFO Control/Status Register */
+#define REG_SSI_SFCSR 0x2c
+/*
+ * SSI Test Register (Intended for debugging purposes only)
+ *
+ * Note: STR is not documented in recent IMX datasheet, but
+ * is described in IMX51 reference manual at section 56.3.3.14
+ */
+#define REG_SSI_STR 0x30
+/*
+ * SSI Option Register (Intended for internal use only)
+ *
+ * Note: SOR is not documented in recent IMX datasheet, but
+ * is described in IMX51 reference manual at section 56.3.3.15
+ */
+#define REG_SSI_SOR 0x34
+/* SSI AC97 Control Register */
+#define REG_SSI_SACNT 0x38
+/* SSI AC97 Command Address Register */
+#define REG_SSI_SACADD 0x3c
+/* SSI AC97 Command Data Register */
+#define REG_SSI_SACDAT 0x40
+/* SSI AC97 Tag Register */
+#define REG_SSI_SATAG 0x44
+/* SSI Transmit Time Slot Mask Register */
+#define REG_SSI_STMSK 0x48
+/* SSI Receive Time Slot Mask Register */
+#define REG_SSI_SRMSK 0x4c
+#define REG_SSI_SxMSK(tx) ((tx) ? REG_SSI_STMSK : REG_SSI_SRMSK)
+/*
+ * SSI AC97 Channel Status Register
+ *
+ * The status could be changed by:
+ * 1) Writing a '1' bit at some position in SACCEN sets relevant bit in SACCST
+ * 2) Writing a '1' bit at some position in SACCDIS unsets the relevant bit
+ * 3) Receivng a '1' in SLOTREQ bit from external CODEC via AC Link
+ */
+#define REG_SSI_SACCST 0x50
+/* SSI AC97 Channel Enable Register -- Set bits in SACCST */
+#define REG_SSI_SACCEN 0x54
+/* SSI AC97 Channel Disable Register -- Clear bits in SACCST */
+#define REG_SSI_SACCDIS 0x58
+
+/* -- SSI Register Field Maps -- */
-#define CCSR_SSI_SIER_RFRC_EN 0x01000000
-#define CCSR_SSI_SIER_TFRC_EN 0x00800000
-#define CCSR_SSI_SIER_RDMAE 0x00400000
-#define CCSR_SSI_SIER_RIE 0x00200000
-#define CCSR_SSI_SIER_TDMAE 0x00100000
-#define CCSR_SSI_SIER_TIE 0x00080000
-#define CCSR_SSI_SIER_CMDAU_EN 0x00040000
-#define CCSR_SSI_SIER_CMDDU_EN 0x00020000
-#define CCSR_SSI_SIER_RXT_EN 0x00010000
-#define CCSR_SSI_SIER_RDR1_EN 0x00008000
-#define CCSR_SSI_SIER_RDR0_EN 0x00004000
-#define CCSR_SSI_SIER_TDE1_EN 0x00002000
-#define CCSR_SSI_SIER_TDE0_EN 0x00001000
-#define CCSR_SSI_SIER_ROE1_EN 0x00000800
-#define CCSR_SSI_SIER_ROE0_EN 0x00000400
-#define CCSR_SSI_SIER_TUE1_EN 0x00000200
-#define CCSR_SSI_SIER_TUE0_EN 0x00000100
-#define CCSR_SSI_SIER_TFS_EN 0x00000080
-#define CCSR_SSI_SIER_RFS_EN 0x00000040
-#define CCSR_SSI_SIER_TLS_EN 0x00000020
-#define CCSR_SSI_SIER_RLS_EN 0x00000010
-#define CCSR_SSI_SIER_RFF1_EN 0x00000008
-#define CCSR_SSI_SIER_RFF0_EN 0x00000004
-#define CCSR_SSI_SIER_TFE1_EN 0x00000002
-#define CCSR_SSI_SIER_TFE0_EN 0x00000001
+/* SSI Control Register -- REG_SSI_SCR 0x10 */
+#define SSI_SCR_SYNC_TX_FS 0x00001000
+#define SSI_SCR_RFR_CLK_DIS 0x00000800
+#define SSI_SCR_TFR_CLK_DIS 0x00000400
+#define SSI_SCR_TCH_EN 0x00000100
+#define SSI_SCR_SYS_CLK_EN 0x00000080
+#define SSI_SCR_I2S_MODE_MASK 0x00000060
+#define SSI_SCR_I2S_MODE_NORMAL 0x00000000
+#define SSI_SCR_I2S_MODE_MASTER 0x00000020
+#define SSI_SCR_I2S_MODE_SLAVE 0x00000040
+#define SSI_SCR_SYN 0x00000010
+#define SSI_SCR_NET 0x00000008
+#define SSI_SCR_I2S_NET_MASK (SSI_SCR_NET | SSI_SCR_I2S_MODE_MASK)
+#define SSI_SCR_RE 0x00000004
+#define SSI_SCR_TE 0x00000002
+#define SSI_SCR_SSIEN 0x00000001
-#define CCSR_SSI_STCR_TXBIT0 0x00000200
-#define CCSR_SSI_STCR_TFEN1 0x00000100
-#define CCSR_SSI_STCR_TFEN0 0x00000080
-#define CCSR_SSI_STCR_TFDIR 0x00000040
-#define CCSR_SSI_STCR_TXDIR 0x00000020
-#define CCSR_SSI_STCR_TSHFD 0x00000010
-#define CCSR_SSI_STCR_TSCKP 0x00000008
-#define CCSR_SSI_STCR_TFSI 0x00000004
-#define CCSR_SSI_STCR_TFSL 0x00000002
-#define CCSR_SSI_STCR_TEFS 0x00000001
+/* SSI Interrupt Status Register -- REG_SSI_SISR 0x14 */
+#define SSI_SISR_RFRC 0x01000000
+#define SSI_SISR_TFRC 0x00800000
+#define SSI_SISR_CMDAU 0x00040000
+#define SSI_SISR_CMDDU 0x00020000
+#define SSI_SISR_RXT 0x00010000
+#define SSI_SISR_RDR1 0x00008000
+#define SSI_SISR_RDR0 0x00004000
+#define SSI_SISR_TDE1 0x00002000
+#define SSI_SISR_TDE0 0x00001000
+#define SSI_SISR_ROE1 0x00000800
+#define SSI_SISR_ROE0 0x00000400
+#define SSI_SISR_TUE1 0x00000200
+#define SSI_SISR_TUE0 0x00000100
+#define SSI_SISR_TFS 0x00000080
+#define SSI_SISR_RFS 0x00000040
+#define SSI_SISR_TLS 0x00000020
+#define SSI_SISR_RLS 0x00000010
+#define SSI_SISR_RFF1 0x00000008
+#define SSI_SISR_RFF0 0x00000004
+#define SSI_SISR_TFE1 0x00000002
+#define SSI_SISR_TFE0 0x00000001
-#define CCSR_SSI_SRCR_RXEXT 0x00000400
-#define CCSR_SSI_SRCR_RXBIT0 0x00000200
-#define CCSR_SSI_SRCR_RFEN1 0x00000100
-#define CCSR_SSI_SRCR_RFEN0 0x00000080
-#define CCSR_SSI_SRCR_RFDIR 0x00000040
-#define CCSR_SSI_SRCR_RXDIR 0x00000020
-#define CCSR_SSI_SRCR_RSHFD 0x00000010
-#define CCSR_SSI_SRCR_RSCKP 0x00000008
-#define CCSR_SSI_SRCR_RFSI 0x00000004
-#define CCSR_SSI_SRCR_RFSL 0x00000002
-#define CCSR_SSI_SRCR_REFS 0x00000001
+/* SSI Interrupt Enable Register -- REG_SSI_SIER 0x18 */
+#define SSI_SIER_RFRC_EN 0x01000000
+#define SSI_SIER_TFRC_EN 0x00800000
+#define SSI_SIER_RDMAE 0x00400000
+#define SSI_SIER_RIE 0x00200000
+#define SSI_SIER_TDMAE 0x00100000
+#define SSI_SIER_TIE 0x00080000
+#define SSI_SIER_CMDAU_EN 0x00040000
+#define SSI_SIER_CMDDU_EN 0x00020000
+#define SSI_SIER_RXT_EN 0x00010000
+#define SSI_SIER_RDR1_EN 0x00008000
+#define SSI_SIER_RDR0_EN 0x00004000
+#define SSI_SIER_TDE1_EN 0x00002000
+#define SSI_SIER_TDE0_EN 0x00001000
+#define SSI_SIER_ROE1_EN 0x00000800
+#define SSI_SIER_ROE0_EN 0x00000400
+#define SSI_SIER_TUE1_EN 0x00000200
+#define SSI_SIER_TUE0_EN 0x00000100
+#define SSI_SIER_TFS_EN 0x00000080
+#define SSI_SIER_RFS_EN 0x00000040
+#define SSI_SIER_TLS_EN 0x00000020
+#define SSI_SIER_RLS_EN 0x00000010
+#define SSI_SIER_RFF1_EN 0x00000008
+#define SSI_SIER_RFF0_EN 0x00000004
+#define SSI_SIER_TFE1_EN 0x00000002
+#define SSI_SIER_TFE0_EN 0x00000001
-/* STCCR and SRCCR */
-#define CCSR_SSI_SxCCR_DIV2_SHIFT 18
-#define CCSR_SSI_SxCCR_DIV2 0x00040000
-#define CCSR_SSI_SxCCR_PSR_SHIFT 17
-#define CCSR_SSI_SxCCR_PSR 0x00020000
-#define CCSR_SSI_SxCCR_WL_SHIFT 13
-#define CCSR_SSI_SxCCR_WL_MASK 0x0001E000
-#define CCSR_SSI_SxCCR_WL(x) \
- (((((x) / 2) - 1) << CCSR_SSI_SxCCR_WL_SHIFT) & CCSR_SSI_SxCCR_WL_MASK)
-#define CCSR_SSI_SxCCR_DC_SHIFT 8
-#define CCSR_SSI_SxCCR_DC_MASK 0x00001F00
-#define CCSR_SSI_SxCCR_DC(x) \
- ((((x) - 1) << CCSR_SSI_SxCCR_DC_SHIFT) & CCSR_SSI_SxCCR_DC_MASK)
-#define CCSR_SSI_SxCCR_PM_SHIFT 0
-#define CCSR_SSI_SxCCR_PM_MASK 0x000000FF
-#define CCSR_SSI_SxCCR_PM(x) \
- ((((x) - 1) << CCSR_SSI_SxCCR_PM_SHIFT) & CCSR_SSI_SxCCR_PM_MASK)
+/* SSI Transmit Configuration Register -- REG_SSI_STCR 0x1C */
+#define SSI_STCR_TXBIT0 0x00000200
+#define SSI_STCR_TFEN1 0x00000100
+#define SSI_STCR_TFEN0 0x00000080
+#define SSI_STCR_TFDIR 0x00000040
+#define SSI_STCR_TXDIR 0x00000020
+#define SSI_STCR_TSHFD 0x00000010
+#define SSI_STCR_TSCKP 0x00000008
+#define SSI_STCR_TFSI 0x00000004
+#define SSI_STCR_TFSL 0x00000002
+#define SSI_STCR_TEFS 0x00000001
+
+/* SSI Receive Configuration Register -- REG_SSI_SRCR 0x20 */
+#define SSI_SRCR_RXEXT 0x00000400
+#define SSI_SRCR_RXBIT0 0x00000200
+#define SSI_SRCR_RFEN1 0x00000100
+#define SSI_SRCR_RFEN0 0x00000080
+#define SSI_SRCR_RFDIR 0x00000040
+#define SSI_SRCR_RXDIR 0x00000020
+#define SSI_SRCR_RSHFD 0x00000010
+#define SSI_SRCR_RSCKP 0x00000008
+#define SSI_SRCR_RFSI 0x00000004
+#define SSI_SRCR_RFSL 0x00000002
+#define SSI_SRCR_REFS 0x00000001
/*
- * The xFCNT bits are read-only, and the xFWM bits are read/write. Use the
- * CCSR_SSI_SFCSR_xFCNTy() macros to read the FIFO counters, and use the
- * CCSR_SSI_SFCSR_xFWMy() macros to set the watermarks.
+ * SSI Transmit Clock Control Register -- REG_SSI_STCCR 0x24
+ * SSI Receive Clock Control Register -- REG_SSI_SRCCR 0x28
+ */
+#define SSI_SxCCR_DIV2_SHIFT 18
+#define SSI_SxCCR_DIV2 0x00040000
+#define SSI_SxCCR_PSR_SHIFT 17
+#define SSI_SxCCR_PSR 0x00020000
+#define SSI_SxCCR_WL_SHIFT 13
+#define SSI_SxCCR_WL_MASK 0x0001E000
+#define SSI_SxCCR_WL(x) \
+ (((((x) / 2) - 1) << SSI_SxCCR_WL_SHIFT) & SSI_SxCCR_WL_MASK)
+#define SSI_SxCCR_DC_SHIFT 8
+#define SSI_SxCCR_DC_MASK 0x00001F00
+#define SSI_SxCCR_DC(x) \
+ ((((x) - 1) << SSI_SxCCR_DC_SHIFT) & SSI_SxCCR_DC_MASK)
+#define SSI_SxCCR_PM_SHIFT 0
+#define SSI_SxCCR_PM_MASK 0x000000FF
+#define SSI_SxCCR_PM(x) \
+ ((((x) - 1) << SSI_SxCCR_PM_SHIFT) & SSI_SxCCR_PM_MASK)
+
+/*
+ * SSI FIFO Control/Status Register -- REG_SSI_SFCSR 0x2c
+ *
+ * Tx or Rx FIFO Counter -- SSI_SFCSR_xFCNTy Read-Only
+ * Tx or Rx FIFO Watermarks -- SSI_SFCSR_xFWMy Read/Write
*/
-#define CCSR_SSI_SFCSR_RFCNT1_SHIFT 28
-#define CCSR_SSI_SFCSR_RFCNT1_MASK 0xF0000000
-#define CCSR_SSI_SFCSR_RFCNT1(x) \
- (((x) & CCSR_SSI_SFCSR_RFCNT1_MASK) >> CCSR_SSI_SFCSR_RFCNT1_SHIFT)
-#define CCSR_SSI_SFCSR_TFCNT1_SHIFT 24
-#define CCSR_SSI_SFCSR_TFCNT1_MASK 0x0F000000
-#define CCSR_SSI_SFCSR_TFCNT1(x) \
- (((x) & CCSR_SSI_SFCSR_TFCNT1_MASK) >> CCSR_SSI_SFCSR_TFCNT1_SHIFT)
-#define CCSR_SSI_SFCSR_RFWM1_SHIFT 20
-#define CCSR_SSI_SFCSR_RFWM1_MASK 0x00F00000
-#define CCSR_SSI_SFCSR_RFWM1(x) \
- (((x) << CCSR_SSI_SFCSR_RFWM1_SHIFT) & CCSR_SSI_SFCSR_RFWM1_MASK)
-#define CCSR_SSI_SFCSR_TFWM1_SHIFT 16
-#define CCSR_SSI_SFCSR_TFWM1_MASK 0x000F0000
-#define CCSR_SSI_SFCSR_TFWM1(x) \
- (((x) << CCSR_SSI_SFCSR_TFWM1_SHIFT) & CCSR_SSI_SFCSR_TFWM1_MASK)
-#define CCSR_SSI_SFCSR_RFCNT0_SHIFT 12
-#define CCSR_SSI_SFCSR_RFCNT0_MASK 0x0000F000
-#define CCSR_SSI_SFCSR_RFCNT0(x) \
- (((x) & CCSR_SSI_SFCSR_RFCNT0_MASK) >> CCSR_SSI_SFCSR_RFCNT0_SHIFT)
-#define CCSR_SSI_SFCSR_TFCNT0_SHIFT 8
-#define CCSR_SSI_SFCSR_TFCNT0_MASK 0x00000F00
-#define CCSR_SSI_SFCSR_TFCNT0(x) \
- (((x) & CCSR_SSI_SFCSR_TFCNT0_MASK) >> CCSR_SSI_SFCSR_TFCNT0_SHIFT)
-#define CCSR_SSI_SFCSR_RFWM0_SHIFT 4
-#define CCSR_SSI_SFCSR_RFWM0_MASK 0x000000F0
-#define CCSR_SSI_SFCSR_RFWM0(x) \
- (((x) << CCSR_SSI_SFCSR_RFWM0_SHIFT) & CCSR_SSI_SFCSR_RFWM0_MASK)
-#define CCSR_SSI_SFCSR_TFWM0_SHIFT 0
-#define CCSR_SSI_SFCSR_TFWM0_MASK 0x0000000F
-#define CCSR_SSI_SFCSR_TFWM0(x) \
- (((x) << CCSR_SSI_SFCSR_TFWM0_SHIFT) & CCSR_SSI_SFCSR_TFWM0_MASK)
+#define SSI_SFCSR_RFCNT1_SHIFT 28
+#define SSI_SFCSR_RFCNT1_MASK 0xF0000000
+#define SSI_SFCSR_RFCNT1(x) \
+ (((x) & SSI_SFCSR_RFCNT1_MASK) >> SSI_SFCSR_RFCNT1_SHIFT)
+#define SSI_SFCSR_TFCNT1_SHIFT 24
+#define SSI_SFCSR_TFCNT1_MASK 0x0F000000
+#define SSI_SFCSR_TFCNT1(x) \
+ (((x) & SSI_SFCSR_TFCNT1_MASK) >> SSI_SFCSR_TFCNT1_SHIFT)
+#define SSI_SFCSR_RFWM1_SHIFT 20
+#define SSI_SFCSR_RFWM1_MASK 0x00F00000
+#define SSI_SFCSR_RFWM1(x) \
+ (((x) << SSI_SFCSR_RFWM1_SHIFT) & SSI_SFCSR_RFWM1_MASK)
+#define SSI_SFCSR_TFWM1_SHIFT 16
+#define SSI_SFCSR_TFWM1_MASK 0x000F0000
+#define SSI_SFCSR_TFWM1(x) \
+ (((x) << SSI_SFCSR_TFWM1_SHIFT) & SSI_SFCSR_TFWM1_MASK)
+#define SSI_SFCSR_RFCNT0_SHIFT 12
+#define SSI_SFCSR_RFCNT0_MASK 0x0000F000
+#define SSI_SFCSR_RFCNT0(x) \
+ (((x) & SSI_SFCSR_RFCNT0_MASK) >> SSI_SFCSR_RFCNT0_SHIFT)
+#define SSI_SFCSR_TFCNT0_SHIFT 8
+#define SSI_SFCSR_TFCNT0_MASK 0x00000F00
+#define SSI_SFCSR_TFCNT0(x) \
+ (((x) & SSI_SFCSR_TFCNT0_MASK) >> SSI_SFCSR_TFCNT0_SHIFT)
+#define SSI_SFCSR_RFWM0_SHIFT 4
+#define SSI_SFCSR_RFWM0_MASK 0x000000F0
+#define SSI_SFCSR_RFWM0(x) \
+ (((x) << SSI_SFCSR_RFWM0_SHIFT) & SSI_SFCSR_RFWM0_MASK)
+#define SSI_SFCSR_TFWM0_SHIFT 0
+#define SSI_SFCSR_TFWM0_MASK 0x0000000F
+#define SSI_SFCSR_TFWM0(x) \
+ (((x) << SSI_SFCSR_TFWM0_SHIFT) & SSI_SFCSR_TFWM0_MASK)
-#define CCSR_SSI_STR_TEST 0x00008000
-#define CCSR_SSI_STR_RCK2TCK 0x00004000
-#define CCSR_SSI_STR_RFS2TFS 0x00002000
-#define CCSR_SSI_STR_RXSTATE(x) (((x) >> 8) & 0x1F)
-#define CCSR_SSI_STR_TXD2RXD 0x00000080
-#define CCSR_SSI_STR_TCK2RCK 0x00000040
-#define CCSR_SSI_STR_TFS2RFS 0x00000020
-#define CCSR_SSI_STR_TXSTATE(x) ((x) & 0x1F)
+/* SSI Test Register -- REG_SSI_STR 0x30 */
+#define SSI_STR_TEST 0x00008000
+#define SSI_STR_RCK2TCK 0x00004000
+#define SSI_STR_RFS2TFS 0x00002000
+#define SSI_STR_RXSTATE(x) (((x) >> 8) & 0x1F)
+#define SSI_STR_TXD2RXD 0x00000080
+#define SSI_STR_TCK2RCK 0x00000040
+#define SSI_STR_TFS2RFS 0x00000020
+#define SSI_STR_TXSTATE(x) ((x) & 0x1F)
-#define CCSR_SSI_SOR_CLKOFF 0x00000040
-#define CCSR_SSI_SOR_RX_CLR 0x00000020
-#define CCSR_SSI_SOR_TX_CLR 0x00000010
-#define CCSR_SSI_SOR_INIT 0x00000008
-#define CCSR_SSI_SOR_WAIT_SHIFT 1
-#define CCSR_SSI_SOR_WAIT_MASK 0x00000006
-#define CCSR_SSI_SOR_WAIT(x) (((x) & 3) << CCSR_SSI_SOR_WAIT_SHIFT)
-#define CCSR_SSI_SOR_SYNRST 0x00000001
+/* SSI Option Register -- REG_SSI_SOR 0x34 */
+#define SSI_SOR_CLKOFF 0x00000040
+#define SSI_SOR_RX_CLR 0x00000020
+#define SSI_SOR_TX_CLR 0x00000010
+#define SSI_SOR_xX_CLR(tx) ((tx) ? SSI_SOR_TX_CLR : SSI_SOR_RX_CLR)
+#define SSI_SOR_INIT 0x00000008
+#define SSI_SOR_WAIT_SHIFT 1
+#define SSI_SOR_WAIT_MASK 0x00000006
+#define SSI_SOR_WAIT(x) (((x) & 3) << SSI_SOR_WAIT_SHIFT)
+#define SSI_SOR_SYNRST 0x00000001
-#define CCSR_SSI_SACNT_FRDIV(x) (((x) & 0x3f) << 5)
-#define CCSR_SSI_SACNT_WR 0x00000010
-#define CCSR_SSI_SACNT_RD 0x00000008
-#define CCSR_SSI_SACNT_RDWR_MASK 0x00000018
-#define CCSR_SSI_SACNT_TIF 0x00000004
-#define CCSR_SSI_SACNT_FV 0x00000002
-#define CCSR_SSI_SACNT_AC97EN 0x00000001
+/* SSI AC97 Control Register -- REG_SSI_SACNT 0x38 */
+#define SSI_SACNT_FRDIV(x) (((x) & 0x3f) << 5)
+#define SSI_SACNT_WR 0x00000010
+#define SSI_SACNT_RD 0x00000008
+#define SSI_SACNT_RDWR_MASK 0x00000018
+#define SSI_SACNT_TIF 0x00000004
+#define SSI_SACNT_FV 0x00000002
+#define SSI_SACNT_AC97EN 0x00000001
struct device;
@@ -255,7 +318,7 @@ static inline void fsl_ssi_dbg_isr(struct fsl_ssi_dbg *stats, u32 sisr)
}
static inline int fsl_ssi_debugfs_create(struct fsl_ssi_dbg *ssi_dbg,
- struct device *dev)
+ struct device *dev)
{
return 0;
}
diff --git a/sound/soc/fsl/fsl_ssi_dbg.c b/sound/soc/fsl/fsl_ssi_dbg.c
index 5469ffbc0253..7aac63e2c561 100644
--- a/sound/soc/fsl/fsl_ssi_dbg.c
+++ b/sound/soc/fsl/fsl_ssi_dbg.c
@@ -18,86 +18,86 @@
void fsl_ssi_dbg_isr(struct fsl_ssi_dbg *dbg, u32 sisr)
{
- if (sisr & CCSR_SSI_SISR_RFRC)
+ if (sisr & SSI_SISR_RFRC)
dbg->stats.rfrc++;
- if (sisr & CCSR_SSI_SISR_TFRC)
+ if (sisr & SSI_SISR_TFRC)
dbg->stats.tfrc++;
- if (sisr & CCSR_SSI_SISR_CMDAU)
+ if (sisr & SSI_SISR_CMDAU)
dbg->stats.cmdau++;
- if (sisr & CCSR_SSI_SISR_CMDDU)
+ if (sisr & SSI_SISR_CMDDU)
dbg->stats.cmddu++;
- if (sisr & CCSR_SSI_SISR_RXT)
+ if (sisr & SSI_SISR_RXT)
dbg->stats.rxt++;
- if (sisr & CCSR_SSI_SISR_RDR1)
+ if (sisr & SSI_SISR_RDR1)
dbg->stats.rdr1++;
- if (sisr & CCSR_SSI_SISR_RDR0)
+ if (sisr & SSI_SISR_RDR0)
dbg->stats.rdr0++;
- if (sisr & CCSR_SSI_SISR_TDE1)
+ if (sisr & SSI_SISR_TDE1)
dbg->stats.tde1++;
- if (sisr & CCSR_SSI_SISR_TDE0)
+ if (sisr & SSI_SISR_TDE0)
dbg->stats.tde0++;
- if (sisr & CCSR_SSI_SISR_ROE1)
+ if (sisr & SSI_SISR_ROE1)
dbg->stats.roe1++;
- if (sisr & CCSR_SSI_SISR_ROE0)
+ if (sisr & SSI_SISR_ROE0)
dbg->stats.roe0++;
- if (sisr & CCSR_SSI_SISR_TUE1)
+ if (sisr & SSI_SISR_TUE1)
dbg->stats.tue1++;
- if (sisr & CCSR_SSI_SISR_TUE0)
+ if (sisr & SSI_SISR_TUE0)
dbg->stats.tue0++;
- if (sisr & CCSR_SSI_SISR_TFS)
+ if (sisr & SSI_SISR_TFS)
dbg->stats.tfs++;
- if (sisr & CCSR_SSI_SISR_RFS)
+ if (sisr & SSI_SISR_RFS)
dbg->stats.rfs++;
- if (sisr & CCSR_SSI_SISR_TLS)
+ if (sisr & SSI_SISR_TLS)
dbg->stats.tls++;
- if (sisr & CCSR_SSI_SISR_RLS)
+ if (sisr & SSI_SISR_RLS)
dbg->stats.rls++;
- if (sisr & CCSR_SSI_SISR_RFF1)
+ if (sisr & SSI_SISR_RFF1)
dbg->stats.rff1++;
- if (sisr & CCSR_SSI_SISR_RFF0)
+ if (sisr & SSI_SISR_RFF0)
dbg->stats.rff0++;
- if (sisr & CCSR_SSI_SISR_TFE1)
+ if (sisr & SSI_SISR_TFE1)
dbg->stats.tfe1++;
- if (sisr & CCSR_SSI_SISR_TFE0)
+ if (sisr & SSI_SISR_TFE0)
dbg->stats.tfe0++;
}
-/* Show the statistics of a flag only if its interrupt is enabled. The
- * compiler will optimze this code to a no-op if the interrupt is not
- * enabled.
+/**
+ * Show the statistics of a flag only if its interrupt is enabled
+ *
+ * Compilers will optimize it to a no-op if the interrupt is disabled
*/
#define SIER_SHOW(flag, name) \
do { \
- if (CCSR_SSI_SIER_##flag) \
+ if (SSI_SIER_##flag) \
seq_printf(s, #name "=%u\n", ssi_dbg->stats.name); \
} while (0)
/**
- * fsl_sysfs_ssi_show: display SSI statistics
+ * Display the statistics for the current SSI device
*
- * Display the statistics for the current SSI device. To avoid confusion,
- * we only show those counts that are enabled.
+ * To avoid confusion, only show those counts that are enabled
*/
static int fsl_ssi_stats_show(struct seq_file *s, void *unused)
{
@@ -147,7 +147,8 @@ int fsl_ssi_debugfs_create(struct fsl_ssi_dbg *ssi_dbg, struct device *dev)
return -ENOMEM;
ssi_dbg->dbg_stats = debugfs_create_file("stats", S_IRUGO,
- ssi_dbg->dbg_dir, ssi_dbg, &fsl_ssi_stats_ops);
+ ssi_dbg->dbg_dir, ssi_dbg,
+ &fsl_ssi_stats_ops);
if (!ssi_dbg->dbg_stats) {
debugfs_remove(ssi_dbg->dbg_dir);
return -ENOMEM;
diff --git a/sound/soc/hisilicon/hi6210-i2s.c b/sound/soc/hisilicon/hi6210-i2s.c
index 0c8f86d4020e..07a57209e055 100644
--- a/sound/soc/hisilicon/hi6210-i2s.c
+++ b/sound/soc/hisilicon/hi6210-i2s.c
@@ -36,7 +36,6 @@
#include <linux/of_irq.h>
#include <linux/mfd/syscon.h>
#include <linux/reset-controller.h>
-#include <linux/clk.h>
#include "hi6210-i2s.h"
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index 7b49d04e3c60..f2c9e8c5970a 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -1,71 +1,122 @@
+config SND_SOC_INTEL_SST_TOPLEVEL
+ bool "Intel ASoC SST drivers"
+ default y
+ depends on X86 || COMPILE_TEST
+ select SND_SOC_INTEL_MACH
+ help
+ Intel ASoC SST Platform Drivers. If you have a Intel machine that
+ has an audio controller with a DSP and I2S or DMIC port, then
+ enable this option by saying Y
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Intel SST drivers.
+
+if SND_SOC_INTEL_SST_TOPLEVEL
+
config SND_SST_IPC
tristate
+ # This option controls the IPC core for HiFi2 platforms
config SND_SST_IPC_PCI
tristate
select SND_SST_IPC
+ # This option controls the PCI-based IPC for HiFi2 platforms
+ # (Medfield, Merrifield).
config SND_SST_IPC_ACPI
tristate
select SND_SST_IPC
- select SND_SOC_INTEL_SST
- select IOSF_MBI
+ # This option controls the ACPI-based IPC for HiFi2 platforms
+ # (Baytrail, Cherrytrail)
-config SND_SOC_INTEL_COMMON
+config SND_SOC_INTEL_SST_ACPI
tristate
+ # This option controls ACPI-based probing on
+ # Haswell/Broadwell/Baytrail legacy and will be set
+ # when these platforms are enabled
config SND_SOC_INTEL_SST
tristate
- select SND_SOC_INTEL_SST_ACPI if ACPI
config SND_SOC_INTEL_SST_FIRMWARE
tristate
select DW_DMAC_CORE
-
-config SND_SOC_INTEL_SST_ACPI
- tristate
-
-config SND_SOC_ACPI_INTEL_MATCH
- tristate
- select SND_SOC_ACPI if ACPI
-
-config SND_SOC_INTEL_SST_TOPLEVEL
- tristate "Intel ASoC SST drivers"
- depends on X86 || COMPILE_TEST
- select SND_SOC_INTEL_MACH
- select SND_SOC_INTEL_COMMON
- help
- Intel ASoC Audio Drivers. If you have a Intel machine that
- has audio controller with a DSP and I2S or DMIC port, then
- enable this option by saying Y or M
- If unsure select "N".
+ # This option controls firmware download on
+ # Haswell/Broadwell/Baytrail legacy and will be set
+ # when these platforms are enabled
config SND_SOC_INTEL_HASWELL
- tristate "Intel ASoC SST driver for Haswell/Broadwell"
- depends on SND_SOC_INTEL_SST_TOPLEVEL && SND_DMA_SGBUF
- depends on DMADEVICES
+ tristate "Haswell/Broadwell Platforms"
+ depends on SND_DMA_SGBUF
+ depends on DMADEVICES && ACPI
select SND_SOC_INTEL_SST
+ select SND_SOC_INTEL_SST_ACPI
select SND_SOC_INTEL_SST_FIRMWARE
+ select SND_SOC_ACPI_INTEL_MATCH
+ help
+ If you have a Intel Haswell or Broadwell platform connected to
+ an I2S codec, then enable this option by saying Y or m. This is
+ typically used for Chromebooks. This is a recommended option.
config SND_SOC_INTEL_BAYTRAIL
- tristate "Intel ASoC SST driver for Baytrail (legacy)"
- depends on SND_SOC_INTEL_SST_TOPLEVEL
- depends on DMADEVICES
+ tristate "Baytrail (legacy) Platforms"
+ depends on DMADEVICES && ACPI
select SND_SOC_INTEL_SST
+ select SND_SOC_INTEL_SST_ACPI
select SND_SOC_INTEL_SST_FIRMWARE
+ select SND_SOC_ACPI_INTEL_MATCH
+ help
+ If you have a Intel Baytrail platform connected to an I2S codec,
+ then enable this option by saying Y or m. This was typically used
+ for Baytrail Chromebooks but this option is now deprecated and is
+ not recommended, use SND_SST_ATOM_HIFI2_PLATFORM instead.
+
+config SND_SST_ATOM_HIFI2_PLATFORM_PCI
+ tristate "PCI HiFi2 (Medfield, Merrifield) Platforms"
+ depends on X86 && PCI
+ select SND_SST_IPC_PCI
+ select SND_SOC_COMPRESS
+ help
+ If you have a Intel Medfield or Merrifield/Edison platform, then
+ enable this option by saying Y or m. Distros will typically not
+ enable this option: Medfield devices are not available to
+ developers and while Merrifield/Edison can run a mainline kernel with
+ limited functionality it will require a firmware file which
+ is not in the standard firmware tree
config SND_SST_ATOM_HIFI2_PLATFORM
- tristate "Intel ASoC SST driver for HiFi2 platforms (*field, *trail)"
- depends on SND_SOC_INTEL_SST_TOPLEVEL && X86
+ tristate "ACPI HiFi2 (Baytrail, Cherrytrail) Platforms"
+ depends on X86 && ACPI
+ select SND_SST_IPC_ACPI
select SND_SOC_COMPRESS
+ select SND_SOC_ACPI_INTEL_MATCH
+ select IOSF_MBI
+ help
+ If you have a Intel Baytrail or Cherrytrail platform with an I2S
+ codec, then enable this option by saying Y or m. This is a
+ recommended option
config SND_SOC_INTEL_SKYLAKE
- tristate "Intel ASoC SST driver for SKL/BXT/KBL/GLK/CNL"
- depends on SND_SOC_INTEL_SST_TOPLEVEL && PCI && ACPI
+ tristate "SKL/BXT/KBL/GLK/CNL... Platforms"
+ depends on PCI && ACPI
select SND_HDA_EXT_CORE
select SND_HDA_DSP_LOADER
select SND_SOC_TOPOLOGY
select SND_SOC_INTEL_SST
+ select SND_SOC_ACPI_INTEL_MATCH
+ help
+ If you have a Intel Skylake/Broxton/ApolloLake/KabyLake/
+ GeminiLake or CannonLake platform with the DSP enabled in the BIOS
+ then enable this option by saying Y or m.
+
+config SND_SOC_ACPI_INTEL_MATCH
+ tristate
+ select SND_SOC_ACPI if ACPI
+ # this option controls the compilation of ACPI matching tables and
+ # helpers and is not meant to be selected by the user.
+
+endif ## SND_SOC_INTEL_SST_TOPLEVEL
# ASoC codec drivers
source "sound/soc/intel/boards/Kconfig"
diff --git a/sound/soc/intel/Makefile b/sound/soc/intel/Makefile
index b973d457e834..8160520fd74c 100644
--- a/sound/soc/intel/Makefile
+++ b/sound/soc/intel/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
# Core support
-obj-$(CONFIG_SND_SOC_INTEL_COMMON) += common/
+obj-$(CONFIG_SND_SOC) += common/
# Platform Support
obj-$(CONFIG_SND_SOC_INTEL_HASWELL) += haswell/
diff --git a/sound/soc/intel/atom/sst/sst_acpi.c b/sound/soc/intel/atom/sst/sst_acpi.c
index 32d6e02e2104..6cd481bec275 100644
--- a/sound/soc/intel/atom/sst/sst_acpi.c
+++ b/sound/soc/intel/atom/sst/sst_acpi.c
@@ -236,6 +236,9 @@ static int sst_platform_get_resources(struct intel_sst_drv *ctx)
/* Find the IRQ */
ctx->irq_num = platform_get_irq(pdev,
ctx->pdata->res_info->acpi_ipc_irq_index);
+ if (ctx->irq_num <= 0)
+ return ctx->irq_num < 0 ? ctx->irq_num : -EIO;
+
return 0;
}
diff --git a/sound/soc/intel/atom/sst/sst_stream.c b/sound/soc/intel/atom/sst/sst_stream.c
index 65e257b17a7e..7ee6aeb7e0af 100644
--- a/sound/soc/intel/atom/sst/sst_stream.c
+++ b/sound/soc/intel/atom/sst/sst_stream.c
@@ -220,10 +220,10 @@ int sst_send_byte_stream_mrfld(struct intel_sst_drv *sst_drv_ctx,
sst_free_block(sst_drv_ctx, block);
out:
test_and_clear_bit(pvt_id, &sst_drv_ctx->pvt_id);
- return 0;
+ return ret;
}
-/*
+/**
* sst_pause_stream - Send msg for a pausing stream
* @str_id: stream ID
*
@@ -261,7 +261,7 @@ int sst_pause_stream(struct intel_sst_drv *sst_drv_ctx, int str_id)
}
} else {
retval = -EBADRQC;
- dev_dbg(sst_drv_ctx->dev, "SST DBG:BADRQC for stream\n ");
+ dev_dbg(sst_drv_ctx->dev, "SST DBG:BADRQC for stream\n");
}
return retval;
@@ -284,7 +284,7 @@ int sst_resume_stream(struct intel_sst_drv *sst_drv_ctx, int str_id)
if (!str_info)
return -EINVAL;
if (str_info->status == STREAM_RUNNING)
- return 0;
+ return 0;
if (str_info->status == STREAM_PAUSED) {
retval = sst_prepare_and_post_msg(sst_drv_ctx, str_info->task_id,
IPC_CMD, IPC_IA_RESUME_STREAM_MRFLD,
diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig
index 6f754708a48c..d4e103615f51 100644
--- a/sound/soc/intel/boards/Kconfig
+++ b/sound/soc/intel/boards/Kconfig
@@ -1,183 +1,183 @@
-config SND_SOC_INTEL_MACH
- tristate "Intel Audio machine drivers"
+menuconfig SND_SOC_INTEL_MACH
+ bool "Intel Machine drivers"
depends on SND_SOC_INTEL_SST_TOPLEVEL
- select SND_SOC_ACPI_INTEL_MATCH if ACPI
+ help
+ Intel ASoC Machine Drivers. If you have a Intel machine that
+ has an audio controller with a DSP and I2S or DMIC port, then
+ enable this option by saying Y
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Intel ASoC machine drivers.
if SND_SOC_INTEL_MACH
-config SND_MFLD_MACHINE
- tristate "SOC Machine Audio driver for Intel Medfield MID platform"
- depends on INTEL_SCU_IPC
- select SND_SOC_SN95031
- depends on SND_SST_ATOM_HIFI2_PLATFORM
- select SND_SST_IPC_PCI
- help
- This adds support for ASoC machine driver for Intel(R) MID Medfield platform
- used as alsa device in audio substem in Intel(R) MID devices
- Say Y if you have such a device.
- If unsure select "N".
+if SND_SOC_INTEL_HASWELL
config SND_SOC_INTEL_HASWELL_MACH
- tristate "ASoC Audio DSP support for Intel Haswell Lynxpoint"
+ tristate "Haswell Lynxpoint"
depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM
- depends on SND_SOC_INTEL_HASWELL
select SND_SOC_RT5640
help
This adds support for the Lynxpoint Audio DSP on Intel(R) Haswell
- Ultrabook platforms.
- Say Y if you have such a device.
+ Ultrabook platforms. This is a recommended option.
+ Say Y or m if you have such a device.
If unsure select "N".
config SND_SOC_INTEL_BDW_RT5677_MACH
- tristate "ASoC Audio driver for Intel Broadwell with RT5677 codec"
- depends on X86_INTEL_LPSS && GPIOLIB && I2C
- depends on SND_SOC_INTEL_HASWELL
+ tristate "Broadwell with RT5677 codec"
+ depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM && GPIOLIB
select SND_SOC_RT5677
help
This adds support for Intel Broadwell platform based boards with
- the RT5677 audio codec.
+ the RT5677 audio codec. This is a recommended option.
+ Say Y or m if you have such a device.
+ If unsure select "N".
config SND_SOC_INTEL_BROADWELL_MACH
- tristate "ASoC Audio DSP support for Intel Broadwell Wildcatpoint"
+ tristate "Broadwell Wildcatpoint"
depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM
- depends on SND_SOC_INTEL_HASWELL
select SND_SOC_RT286
help
This adds support for the Wilcatpoint Audio DSP on Intel(R) Broadwell
Ultrabook platforms.
- Say Y if you have such a device.
+ Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
+endif ## SND_SOC_INTEL_HASWELL
+
+if SND_SOC_INTEL_BAYTRAIL
config SND_SOC_INTEL_BYT_MAX98090_MACH
- tristate "ASoC Audio driver for Intel Baytrail with MAX98090 codec"
+ tristate "Baytrail with MAX98090 codec"
depends on X86_INTEL_LPSS && I2C
- depends on SND_SST_IPC_ACPI = n
- depends on SND_SOC_INTEL_BAYTRAIL
select SND_SOC_MAX98090
help
This adds audio driver for Intel Baytrail platform based boards
- with the MAX98090 audio codec.
+ with the MAX98090 audio codec. This driver is deprecated, use
+ SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH instead for better
+ functionality.
config SND_SOC_INTEL_BYT_RT5640_MACH
- tristate "ASoC Audio driver for Intel Baytrail with RT5640 codec"
+ tristate "Baytrail with RT5640 codec"
depends on X86_INTEL_LPSS && I2C
- depends on SND_SST_IPC_ACPI = n
- depends on SND_SOC_INTEL_BAYTRAIL
select SND_SOC_RT5640
help
This adds audio driver for Intel Baytrail platform based boards
with the RT5640 audio codec. This driver is deprecated, use
SND_SOC_INTEL_BYTCR_RT5640_MACH instead for better functionality.
+endif ## SND_SOC_INTEL_BAYTRAIL
+
+if SND_SST_ATOM_HIFI2_PLATFORM
+
config SND_SOC_INTEL_BYTCR_RT5640_MACH
- tristate "ASoC Audio driver for Intel Baytrail and Baytrail-CR with RT5640 codec"
- depends on X86 && I2C && ACPI
+ tristate "Baytrail and Baytrail-CR with RT5640 codec"
+ depends on X86_INTEL_LPSS && I2C && ACPI
+ select SND_SOC_ACPI
select SND_SOC_RT5640
- depends on SND_SST_ATOM_HIFI2_PLATFORM
- select SND_SST_IPC_ACPI
help
- This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR
- platforms with RT5640 audio codec.
- Say Y if you have such a device.
- If unsure select "N".
+ This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR
+ platforms with RT5640 audio codec.
+ Say Y or m if you have such a device. This is a recommended option.
+ If unsure select "N".
config SND_SOC_INTEL_BYTCR_RT5651_MACH
- tristate "ASoC Audio driver for Intel Baytrail and Baytrail-CR with RT5651 codec"
- depends on X86 && I2C && ACPI
+ tristate "Baytrail and Baytrail-CR with RT5651 codec"
+ depends on X86_INTEL_LPSS && I2C && ACPI
+ select SND_SOC_ACPI
select SND_SOC_RT5651
- depends on SND_SST_ATOM_HIFI2_PLATFORM
- select SND_SST_IPC_ACPI
help
- This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR
- platforms with RT5651 audio codec.
- Say Y if you have such a device.
- If unsure select "N".
+ This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR
+ platforms with RT5651 audio codec.
+ Say Y or m if you have such a device. This is a recommended option.
+ If unsure select "N".
config SND_SOC_INTEL_CHT_BSW_RT5672_MACH
- tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with RT5672 codec"
+ tristate "Cherrytrail & Braswell with RT5672 codec"
depends on X86_INTEL_LPSS && I2C && ACPI
- select SND_SOC_RT5670
- depends on SND_SST_ATOM_HIFI2_PLATFORM
- select SND_SST_IPC_ACPI
+ select SND_SOC_ACPI
+ select SND_SOC_RT5670
help
This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
platforms with RT5672 audio codec.
- Say Y if you have such a device.
+ Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
config SND_SOC_INTEL_CHT_BSW_RT5645_MACH
- tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with RT5645/5650 codec"
+ tristate "Cherrytrail & Braswell with RT5645/5650 codec"
depends on X86_INTEL_LPSS && I2C && ACPI
+ select SND_SOC_ACPI
select SND_SOC_RT5645
- depends on SND_SST_ATOM_HIFI2_PLATFORM
- select SND_SST_IPC_ACPI
help
This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
platforms with RT5645/5650 audio codec.
+ Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
config SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH
- tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with MAX98090 & TI codec"
+ tristate "Cherrytrail & Braswell with MAX98090 & TI codec"
depends on X86_INTEL_LPSS && I2C && ACPI
select SND_SOC_MAX98090
select SND_SOC_TS3A227E
- depends on SND_SST_ATOM_HIFI2_PLATFORM
- select SND_SST_IPC_ACPI
help
This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
platforms with MAX98090 audio codec it also can support TI jack chip as aux device.
+ Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
config SND_SOC_INTEL_BYT_CHT_DA7213_MACH
- tristate "ASoC Audio driver for Intel Baytrail & Cherrytrail with DA7212/7213 codec"
+ tristate "Baytrail & Cherrytrail with DA7212/7213 codec"
depends on X86_INTEL_LPSS && I2C && ACPI
+ select SND_SOC_ACPI
select SND_SOC_DA7213
- depends on SND_SST_ATOM_HIFI2_PLATFORM
- select SND_SST_IPC_ACPI
help
This adds support for ASoC machine driver for Intel(R) Baytrail & CherryTrail
platforms with DA7212/7213 audio codec.
+ Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
config SND_SOC_INTEL_BYT_CHT_ES8316_MACH
- tristate "ASoC Audio driver for Intel Baytrail & Cherrytrail with ES8316 codec"
+ tristate "Baytrail & Cherrytrail with ES8316 codec"
depends on X86_INTEL_LPSS && I2C && ACPI
+ select SND_SOC_ACPI
select SND_SOC_ES8316
- depends on SND_SST_ATOM_HIFI2_PLATFORM
- select SND_SST_IPC_ACPI
help
This adds support for ASoC machine driver for Intel(R) Baytrail &
Cherrytrail platforms with ES8316 audio codec.
+ Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
config SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH
- tristate "ASoC Audio driver for Intel Baytrail & Cherrytrail platform with no codec (MinnowBoard MAX, Up)"
+ tristate "Baytrail & Cherrytrail platform with no codec (MinnowBoard MAX, Up)"
depends on X86_INTEL_LPSS && I2C && ACPI
- depends on SND_SST_ATOM_HIFI2_PLATFORM
- select SND_SST_IPC_ACPI
help
This adds support for ASoC machine driver for the MinnowBoard Max or
Up boards and provides access to I2S signals on the Low-Speed
- connector
+ connector. This is not a recommended option outside of these cases.
+ It is not intended to be enabled by distros by default.
+ Say Y or m if you have such a device.
+
If unsure select "N".
+endif ## SND_SST_ATOM_HIFI2_PLATFORM
+
+if SND_SOC_INTEL_SKYLAKE
+
config SND_SOC_INTEL_SKL_RT286_MACH
- tristate "ASoC Audio driver for SKL with RT286 I2S mode"
- depends on X86 && ACPI && I2C
- depends on SND_SOC_INTEL_SKYLAKE
+ tristate "SKL with RT286 I2S mode"
+ depends on MFD_INTEL_LPSS && I2C && ACPI
select SND_SOC_RT286
select SND_SOC_DMIC
select SND_SOC_HDAC_HDMI
help
This adds support for ASoC machine driver for Skylake platforms
with RT286 I2S audio codec.
- Say Y if you have such a device.
+ Say Y or m if you have such a device.
If unsure select "N".
config SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH
- tristate "ASoC Audio driver for SKL with NAU88L25 and SSM4567 in I2S Mode"
- depends on X86_INTEL_LPSS && I2C
- depends on SND_SOC_INTEL_SKYLAKE
+ tristate "SKL with NAU88L25 and SSM4567 in I2S Mode"
+ depends on MFD_INTEL_LPSS && I2C && ACPI
select SND_SOC_NAU8825
select SND_SOC_SSM4567
select SND_SOC_DMIC
@@ -185,13 +185,12 @@ config SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH
help
This adds support for ASoC Onboard Codec I2S machine driver. This will
create an alsa sound card for NAU88L25 + SSM4567.
- Say Y if you have such a device.
+ Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
config SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH
- tristate "ASoC Audio driver for SKL with NAU88L25 and MAX98357A in I2S Mode"
- depends on X86_INTEL_LPSS && I2C
- depends on SND_SOC_INTEL_SKYLAKE
+ tristate "SKL with NAU88L25 and MAX98357A in I2S Mode"
+ depends on MFD_INTEL_LPSS && I2C && ACPI
select SND_SOC_NAU8825
select SND_SOC_MAX98357A
select SND_SOC_DMIC
@@ -199,13 +198,12 @@ config SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH
help
This adds support for ASoC Onboard Codec I2S machine driver. This will
create an alsa sound card for NAU88L25 + MAX98357A.
- Say Y if you have such a device.
+ Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
config SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH
- tristate "ASoC Audio driver for Broxton with DA7219 and MAX98357A in I2S Mode"
- depends on X86 && ACPI && I2C
- depends on SND_SOC_INTEL_SKYLAKE
+ tristate "Broxton with DA7219 and MAX98357A in I2S Mode"
+ depends on MFD_INTEL_LPSS && I2C && ACPI
select SND_SOC_DA7219
select SND_SOC_MAX98357A
select SND_SOC_DMIC
@@ -214,13 +212,12 @@ config SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH
help
This adds support for ASoC machine driver for Broxton-P platforms
with DA7219 + MAX98357A I2S audio codec.
- Say Y if you have such a device.
+ Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
config SND_SOC_INTEL_BXT_RT298_MACH
- tristate "ASoC Audio driver for Broxton with RT298 I2S mode"
- depends on X86 && ACPI && I2C
- depends on SND_SOC_INTEL_SKYLAKE
+ tristate "Broxton with RT298 I2S mode"
+ depends on MFD_INTEL_LPSS && I2C && ACPI
select SND_SOC_RT298
select SND_SOC_DMIC
select SND_SOC_HDAC_HDMI
@@ -228,14 +225,12 @@ config SND_SOC_INTEL_BXT_RT298_MACH
help
This adds support for ASoC machine driver for Broxton platforms
with RT286 I2S audio codec.
- Say Y if you have such a device.
+ Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
config SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH
- tristate "ASoC Audio driver for KBL with RT5663 and MAX98927 in I2S Mode"
- depends on X86_INTEL_LPSS && I2C
- select SND_SOC_INTEL_SST
- depends on SND_SOC_INTEL_SKYLAKE
+ tristate "KBL with RT5663 and MAX98927 in I2S Mode"
+ depends on MFD_INTEL_LPSS && I2C && ACPI
select SND_SOC_RT5663
select SND_SOC_MAX98927
select SND_SOC_DMIC
@@ -243,14 +238,13 @@ config SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH
help
This adds support for ASoC Onboard Codec I2S machine driver. This will
create an alsa sound card for RT5663 + MAX98927.
- Say Y if you have such a device.
+ Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
config SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH
- tristate "ASoC Audio driver for KBL with RT5663, RT5514 and MAX98927 in I2S Mode"
- depends on X86_INTEL_LPSS && I2C && SPI
- select SND_SOC_INTEL_SST
- depends on SND_SOC_INTEL_SKYLAKE
+ tristate "KBL with RT5663, RT5514 and MAX98927 in I2S Mode"
+ depends on MFD_INTEL_LPSS && I2C && ACPI
+ depends on SPI
select SND_SOC_RT5663
select SND_SOC_RT5514
select SND_SOC_RT5514_SPI
@@ -259,7 +253,8 @@ config SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH
help
This adds support for ASoC Onboard Codec I2S machine driver. This will
create an alsa sound card for RT5663 + RT5514 + MAX98927.
- Say Y if you have such a device.
+ Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
+endif ## SND_SOC_INTEL_SKYLAKE
-endif
+endif ## SND_SOC_INTEL_MACH
diff --git a/sound/soc/intel/boards/bytcht_da7213.c b/sound/soc/intel/boards/bytcht_da7213.c
index c4d82ad41bd7..2179dedb28ad 100644
--- a/sound/soc/intel/boards/bytcht_da7213.c
+++ b/sound/soc/intel/boards/bytcht_da7213.c
@@ -219,7 +219,7 @@ static struct snd_soc_card bytcht_da7213_card = {
.num_dapm_routes = ARRAY_SIZE(audio_map),
};
-static char codec_name[16]; /* i2c-<HID>:00 with HID being 8 chars */
+static char codec_name[SND_ACPI_I2C_ID_LEN];
static int bytcht_da7213_probe(struct platform_device *pdev)
{
@@ -243,7 +243,7 @@ static int bytcht_da7213_probe(struct platform_device *pdev)
}
/* fixup codec name based on HID */
- i2c_name = snd_soc_acpi_find_name_from_hid(mach->id);
+ i2c_name = acpi_dev_get_first_match_name(mach->id, NULL, -1);
if (i2c_name) {
snprintf(codec_name, sizeof(codec_name),
"%s%s", "i2c-", i2c_name);
diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c
index 8088396717e3..305e7f4fe55a 100644
--- a/sound/soc/intel/boards/bytcht_es8316.c
+++ b/sound/soc/intel/boards/bytcht_es8316.c
@@ -232,15 +232,39 @@ static struct snd_soc_card byt_cht_es8316_card = {
.fully_routed = true,
};
+static char codec_name[SND_ACPI_I2C_ID_LEN];
+
static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
{
- int ret = 0;
struct byt_cht_es8316_private *priv;
+ struct snd_soc_acpi_mach *mach;
+ const char *i2c_name = NULL;
+ int dai_index = 0;
+ int i;
+ int ret = 0;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_ATOMIC);
if (!priv)
return -ENOMEM;
+ mach = (&pdev->dev)->platform_data;
+ /* fix index of codec dai */
+ for (i = 0; i < ARRAY_SIZE(byt_cht_es8316_dais); i++) {
+ if (!strcmp(byt_cht_es8316_dais[i].codec_name,
+ "i2c-ESSX8316:00")) {
+ dai_index = i;
+ break;
+ }
+ }
+
+ /* fixup codec name based on HID */
+ i2c_name = acpi_dev_get_first_match_name(mach->id, NULL, -1);
+ if (i2c_name) {
+ snprintf(codec_name, sizeof(codec_name),
+ "%s%s", "i2c-", i2c_name);
+ byt_cht_es8316_dais[dai_index].codec_name = codec_name;
+ }
+
/* register the soc card */
byt_cht_es8316_card.dev = &pdev->dev;
snd_soc_card_set_drvdata(&byt_cht_es8316_card, priv);
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index f2c0fc415e52..b6a1cfeec830 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -713,7 +713,7 @@ static struct snd_soc_card byt_rt5640_card = {
.fully_routed = true,
};
-static char byt_rt5640_codec_name[16]; /* i2c-<HID>:00 with HID being 8 chars */
+static char byt_rt5640_codec_name[SND_ACPI_I2C_ID_LEN];
static char byt_rt5640_codec_aif_name[12]; /* = "rt5640-aif[1|2]" */
static char byt_rt5640_cpu_dai_name[10]; /* = "ssp[0|2]-port" */
@@ -762,7 +762,7 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
}
/* fixup codec name based on HID */
- i2c_name = snd_soc_acpi_find_name_from_hid(mach->id);
+ i2c_name = acpi_dev_get_first_match_name(mach->id, NULL, -1);
if (i2c_name) {
snprintf(byt_rt5640_codec_name, sizeof(byt_rt5640_codec_name),
"%s%s", "i2c-", i2c_name);
diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
index d955836c6870..456526a93dd5 100644
--- a/sound/soc/intel/boards/bytcr_rt5651.c
+++ b/sound/soc/intel/boards/bytcr_rt5651.c
@@ -38,6 +38,8 @@ enum {
BYT_RT5651_DMIC_MAP,
BYT_RT5651_IN1_MAP,
BYT_RT5651_IN2_MAP,
+ BYT_RT5651_IN1_IN2_MAP,
+ BYT_RT5651_IN3_MAP,
};
#define BYT_RT5651_MAP(quirk) ((quirk) & GENMASK(7, 0))
@@ -62,6 +64,8 @@ static void log_quirks(struct device *dev)
dev_info(dev, "quirk IN1_MAP enabled");
if (BYT_RT5651_MAP(byt_rt5651_quirk) == BYT_RT5651_IN2_MAP)
dev_info(dev, "quirk IN2_MAP enabled");
+ if (BYT_RT5651_MAP(byt_rt5651_quirk) == BYT_RT5651_IN3_MAP)
+ dev_info(dev, "quirk IN3_MAP enabled");
if (byt_rt5651_quirk & BYT_RT5651_DMIC_EN)
dev_info(dev, "quirk DMIC enabled");
if (byt_rt5651_quirk & BYT_RT5651_MCLK_EN)
@@ -127,6 +131,7 @@ static const struct snd_soc_dapm_widget byt_rt5651_widgets[] = {
SND_SOC_DAPM_MIC("Headset Mic", NULL),
SND_SOC_DAPM_MIC("Internal Mic", NULL),
SND_SOC_DAPM_SPK("Speaker", NULL),
+ SND_SOC_DAPM_LINE("Line In", NULL),
SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
platform_clock_control, SND_SOC_DAPM_PRE_PMU |
SND_SOC_DAPM_POST_PMD),
@@ -138,6 +143,7 @@ static const struct snd_soc_dapm_route byt_rt5651_audio_map[] = {
{"Headset Mic", NULL, "Platform Clock"},
{"Internal Mic", NULL, "Platform Clock"},
{"Speaker", NULL, "Platform Clock"},
+ {"Line In", NULL, "Platform Clock"},
{"AIF1 Playback", NULL, "ssp2 Tx"},
{"ssp2 Tx", NULL, "codec_out0"},
@@ -151,6 +157,9 @@ static const struct snd_soc_dapm_route byt_rt5651_audio_map[] = {
{"Headphone", NULL, "HPOR"},
{"Speaker", NULL, "LOUTL"},
{"Speaker", NULL, "LOUTR"},
+ {"IN2P", NULL, "Line In"},
+ {"IN2N", NULL, "Line In"},
+
};
static const struct snd_soc_dapm_route byt_rt5651_intmic_dmic_map[] = {
@@ -171,11 +180,25 @@ static const struct snd_soc_dapm_route byt_rt5651_intmic_in2_map[] = {
{"IN2P", NULL, "Internal Mic"},
};
+static const struct snd_soc_dapm_route byt_rt5651_intmic_in1_in2_map[] = {
+ {"Internal Mic", NULL, "micbias1"},
+ {"IN1P", NULL, "Internal Mic"},
+ {"IN2P", NULL, "Internal Mic"},
+ {"IN3P", NULL, "Headset Mic"},
+};
+
+static const struct snd_soc_dapm_route byt_rt5651_intmic_in3_map[] = {
+ {"Internal Mic", NULL, "micbias1"},
+ {"IN3P", NULL, "Headset Mic"},
+ {"IN1P", NULL, "Internal Mic"},
+};
+
static const struct snd_kcontrol_new byt_rt5651_controls[] = {
SOC_DAPM_PIN_SWITCH("Headphone"),
SOC_DAPM_PIN_SWITCH("Headset Mic"),
SOC_DAPM_PIN_SWITCH("Internal Mic"),
SOC_DAPM_PIN_SWITCH("Speaker"),
+ SOC_DAPM_PIN_SWITCH("Line In"),
};
static struct snd_soc_jack_pin bytcr_jack_pins[] = {
@@ -247,8 +270,16 @@ static const struct dmi_system_id byt_rt5651_quirk_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "Circuitco"),
DMI_MATCH(DMI_PRODUCT_NAME, "Minnowboard Max B3 PLATFORM"),
},
- .driver_data = (void *)(BYT_RT5651_DMIC_MAP |
- BYT_RT5651_DMIC_EN),
+ .driver_data = (void *)(BYT_RT5651_IN3_MAP),
+ },
+ {
+ .callback = byt_rt5651_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ADI"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Minnowboard Turbot"),
+ },
+ .driver_data = (void *)(BYT_RT5651_MCLK_EN |
+ BYT_RT5651_IN3_MAP),
},
{
.callback = byt_rt5651_quirk_cb,
@@ -256,7 +287,8 @@ static const struct dmi_system_id byt_rt5651_quirk_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "KIANO"),
DMI_MATCH(DMI_PRODUCT_NAME, "KIANO SlimNote 14.2"),
},
- .driver_data = (void *)(BYT_RT5651_IN2_MAP),
+ .driver_data = (void *)(BYT_RT5651_MCLK_EN |
+ BYT_RT5651_IN1_IN2_MAP),
},
{}
};
@@ -281,6 +313,14 @@ static int byt_rt5651_init(struct snd_soc_pcm_runtime *runtime)
custom_map = byt_rt5651_intmic_in2_map;
num_routes = ARRAY_SIZE(byt_rt5651_intmic_in2_map);
break;
+ case BYT_RT5651_IN1_IN2_MAP:
+ custom_map = byt_rt5651_intmic_in1_in2_map;
+ num_routes = ARRAY_SIZE(byt_rt5651_intmic_in1_in2_map);
+ break;
+ case BYT_RT5651_IN3_MAP:
+ custom_map = byt_rt5651_intmic_in3_map;
+ num_routes = ARRAY_SIZE(byt_rt5651_intmic_in3_map);
+ break;
default:
custom_map = byt_rt5651_intmic_dmic_map;
num_routes = ARRAY_SIZE(byt_rt5651_intmic_dmic_map);
@@ -469,7 +509,7 @@ static struct snd_soc_card byt_rt5651_card = {
.fully_routed = true,
};
-static char byt_rt5651_codec_name[16]; /* i2c-<HID>:00 with HID being 8 chars */
+static char byt_rt5651_codec_name[SND_ACPI_I2C_ID_LEN];
static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
{
@@ -499,7 +539,7 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
}
/* fixup codec name based on HID */
- i2c_name = snd_soc_acpi_find_name_from_hid(mach->id);
+ i2c_name = acpi_dev_get_first_match_name(mach->id, NULL, -1);
if (i2c_name) {
snprintf(byt_rt5651_codec_name, sizeof(byt_rt5651_codec_name),
"%s%s", "i2c-", i2c_name);
diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
index 18d129caa974..31641aab62cd 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
@@ -49,7 +49,7 @@ struct cht_acpi_card {
struct cht_mc_private {
struct snd_soc_jack jack;
struct cht_acpi_card *acpi_card;
- char codec_name[16];
+ char codec_name[SND_ACPI_I2C_ID_LEN];
struct clk *mclk;
};
@@ -118,6 +118,7 @@ static const struct snd_soc_dapm_widget cht_dapm_widgets[] = {
SND_SOC_DAPM_HP("Headphone", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
SND_SOC_DAPM_MIC("Int Mic", NULL),
+ SND_SOC_DAPM_MIC("Int Analog Mic", NULL),
SND_SOC_DAPM_SPK("Ext Spk", NULL),
SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
platform_clock_control, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
@@ -128,6 +129,8 @@ static const struct snd_soc_dapm_route cht_rt5645_audio_map[] = {
{"IN1N", NULL, "Headset Mic"},
{"DMIC L1", NULL, "Int Mic"},
{"DMIC R1", NULL, "Int Mic"},
+ {"IN2P", NULL, "Int Analog Mic"},
+ {"IN2N", NULL, "Int Analog Mic"},
{"Headphone", NULL, "HPOL"},
{"Headphone", NULL, "HPOR"},
{"Ext Spk", NULL, "SPOL"},
@@ -135,6 +138,9 @@ static const struct snd_soc_dapm_route cht_rt5645_audio_map[] = {
{"Headphone", NULL, "Platform Clock"},
{"Headset Mic", NULL, "Platform Clock"},
{"Int Mic", NULL, "Platform Clock"},
+ {"Int Analog Mic", NULL, "Platform Clock"},
+ {"Int Analog Mic", NULL, "micbias1"},
+ {"Int Analog Mic", NULL, "micbias2"},
{"Ext Spk", NULL, "Platform Clock"},
};
@@ -189,6 +195,7 @@ static const struct snd_kcontrol_new cht_mc_controls[] = {
SOC_DAPM_PIN_SWITCH("Headphone"),
SOC_DAPM_PIN_SWITCH("Headset Mic"),
SOC_DAPM_PIN_SWITCH("Int Mic"),
+ SOC_DAPM_PIN_SWITCH("Int Analog Mic"),
SOC_DAPM_PIN_SWITCH("Ext Spk"),
};
@@ -499,7 +506,7 @@ static struct cht_acpi_card snd_soc_cards[] = {
{"10EC5650", CODEC_TYPE_RT5650, &snd_soc_card_chtrt5650},
};
-static char cht_rt5645_codec_name[16]; /* i2c-<HID>:00 with HID being 8 chars */
+static char cht_rt5645_codec_name[SND_ACPI_I2C_ID_LEN];
static char cht_rt5645_codec_aif_name[12]; /* = "rt5645-aif[1|2]" */
static char cht_rt5645_cpu_dai_name[10]; /* = "ssp[0|2]-port" */
@@ -566,7 +573,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
}
/* fixup codec name based on HID */
- i2c_name = snd_soc_acpi_find_name_from_hid(mach->id);
+ i2c_name = acpi_dev_get_first_match_name(mach->id, NULL, -1);
if (i2c_name) {
snprintf(cht_rt5645_codec_name, sizeof(cht_rt5645_codec_name),
"%s%s", "i2c-", i2c_name);
diff --git a/sound/soc/intel/boards/cht_bsw_rt5672.c b/sound/soc/intel/boards/cht_bsw_rt5672.c
index f8f21eee9b2d..c14a52d2f714 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5672.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5672.c
@@ -35,7 +35,7 @@
struct cht_mc_private {
struct snd_soc_jack headset;
- char codec_name[16];
+ char codec_name[SND_ACPI_I2C_ID_LEN];
struct clk *mclk;
};
@@ -396,7 +396,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
/* fixup codec name based on HID */
if (mach) {
- i2c_name = snd_soc_acpi_find_name_from_hid(mach->id);
+ i2c_name = acpi_dev_get_first_match_name(mach->id, NULL, -1);
if (i2c_name) {
snprintf(drv->codec_name, sizeof(drv->codec_name),
"i2c-%s", i2c_name);
diff --git a/sound/soc/intel/boards/haswell.c b/sound/soc/intel/boards/haswell.c
index 5e1ea0371c90..3c5160779204 100644
--- a/sound/soc/intel/boards/haswell.c
+++ b/sound/soc/intel/boards/haswell.c
@@ -76,7 +76,7 @@ static int haswell_rt5640_hw_params(struct snd_pcm_substream *substream,
}
/* set correct codec filter for DAI format and clock config */
- snd_soc_update_bits(rtd->codec, 0x83, 0xffff, 0x8000);
+ snd_soc_component_update_bits(codec_dai->component, 0x83, 0xffff, 0x8000);
return ret;
}
diff --git a/sound/soc/intel/boards/kbl_rt5663_max98927.c b/sound/soc/intel/boards/kbl_rt5663_max98927.c
index 6dcad0a8a0d0..bf7014ca486f 100644
--- a/sound/soc/intel/boards/kbl_rt5663_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_max98927.c
@@ -225,7 +225,7 @@ static int kabylake_rt5663_codec_init(struct snd_soc_pcm_runtime *rtd)
}
jack = &ctx->kabylake_headset;
- snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_MEDIA);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
index 271ae3c2c535..90ea98f01c4c 100644
--- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
@@ -195,7 +195,7 @@ static int kabylake_rt5663_codec_init(struct snd_soc_pcm_runtime *rtd)
}
jack = &ctx->kabylake_headset;
- snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_MEDIA);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
diff --git a/sound/soc/intel/boards/mfld_machine.c b/sound/soc/intel/boards/mfld_machine.c
deleted file mode 100644
index 6f44acfb4aae..000000000000
--- a/sound/soc/intel/boards/mfld_machine.c
+++ /dev/null
@@ -1,428 +0,0 @@
-/*
- * mfld_machine.c - ASoc Machine driver for Intel Medfield MID platform
- *
- * Copyright (C) 2010 Intel Corp
- * Author: Vinod Koul <vinod.koul@intel.com>
- * Author: Harsha Priya <priya.harsha@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-#include <sound/jack.h>
-#include "../codecs/sn95031.h"
-
-#define MID_MONO 1
-#define MID_STEREO 2
-#define MID_MAX_CAP 5
-#define MFLD_JACK_INSERT 0x04
-
-enum soc_mic_bias_zones {
- MFLD_MV_START = 0,
- /* mic bias volutage range for Headphones*/
- MFLD_MV_HP = 400,
- /* mic bias volutage range for American Headset*/
- MFLD_MV_AM_HS = 650,
- /* mic bias volutage range for Headset*/
- MFLD_MV_HS = 2000,
- MFLD_MV_UNDEFINED,
-};
-
-static unsigned int hs_switch;
-static unsigned int lo_dac;
-static struct snd_soc_codec *mfld_codec;
-
-struct mfld_mc_private {
- void __iomem *int_base;
- u8 interrupt_status;
-};
-
-struct snd_soc_jack mfld_jack;
-
-/*Headset jack detection DAPM pins */
-static struct snd_soc_jack_pin mfld_jack_pins[] = {
- {
- .pin = "Headphones",
- .mask = SND_JACK_HEADPHONE,
- },
- {
- .pin = "AMIC1",
- .mask = SND_JACK_MICROPHONE,
- },
-};
-
-/* jack detection voltage zones */
-static struct snd_soc_jack_zone mfld_zones[] = {
- {MFLD_MV_START, MFLD_MV_AM_HS, SND_JACK_HEADPHONE},
- {MFLD_MV_AM_HS, MFLD_MV_HS, SND_JACK_HEADSET},
-};
-
-/* sound card controls */
-static const char * const headset_switch_text[] = {"Earpiece", "Headset"};
-
-static const char * const lo_text[] = {"Vibra", "Headset", "IHF", "None"};
-
-static const struct soc_enum headset_enum =
- SOC_ENUM_SINGLE_EXT(2, headset_switch_text);
-
-static const struct soc_enum lo_enum =
- SOC_ENUM_SINGLE_EXT(4, lo_text);
-
-static int headset_get_switch(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- ucontrol->value.enumerated.item[0] = hs_switch;
- return 0;
-}
-
-static int headset_set_switch(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
- struct snd_soc_dapm_context *dapm = &card->dapm;
-
- if (ucontrol->value.enumerated.item[0] == hs_switch)
- return 0;
-
- snd_soc_dapm_mutex_lock(dapm);
-
- if (ucontrol->value.enumerated.item[0]) {
- pr_debug("hs_set HS path\n");
- snd_soc_dapm_enable_pin_unlocked(dapm, "Headphones");
- snd_soc_dapm_disable_pin_unlocked(dapm, "EPOUT");
- } else {
- pr_debug("hs_set EP path\n");
- snd_soc_dapm_disable_pin_unlocked(dapm, "Headphones");
- snd_soc_dapm_enable_pin_unlocked(dapm, "EPOUT");
- }
-
- snd_soc_dapm_sync_unlocked(dapm);
-
- snd_soc_dapm_mutex_unlock(dapm);
-
- hs_switch = ucontrol->value.enumerated.item[0];
-
- return 0;
-}
-
-static void lo_enable_out_pins(struct snd_soc_dapm_context *dapm)
-{
- snd_soc_dapm_enable_pin_unlocked(dapm, "IHFOUTL");
- snd_soc_dapm_enable_pin_unlocked(dapm, "IHFOUTR");
- snd_soc_dapm_enable_pin_unlocked(dapm, "LINEOUTL");
- snd_soc_dapm_enable_pin_unlocked(dapm, "LINEOUTR");
- snd_soc_dapm_enable_pin_unlocked(dapm, "VIB1OUT");
- snd_soc_dapm_enable_pin_unlocked(dapm, "VIB2OUT");
- if (hs_switch) {
- snd_soc_dapm_enable_pin_unlocked(dapm, "Headphones");
- snd_soc_dapm_disable_pin_unlocked(dapm, "EPOUT");
- } else {
- snd_soc_dapm_disable_pin_unlocked(dapm, "Headphones");
- snd_soc_dapm_enable_pin_unlocked(dapm, "EPOUT");
- }
-}
-
-static int lo_get_switch(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- ucontrol->value.enumerated.item[0] = lo_dac;
- return 0;
-}
-
-static int lo_set_switch(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
- struct snd_soc_dapm_context *dapm = &card->dapm;
-
- if (ucontrol->value.enumerated.item[0] == lo_dac)
- return 0;
-
- snd_soc_dapm_mutex_lock(dapm);
-
- /* we dont want to work with last state of lineout so just enable all
- * pins and then disable pins not required
- */
- lo_enable_out_pins(dapm);
-
- switch (ucontrol->value.enumerated.item[0]) {
- case 0:
- pr_debug("set vibra path\n");
- snd_soc_dapm_disable_pin_unlocked(dapm, "VIB1OUT");
- snd_soc_dapm_disable_pin_unlocked(dapm, "VIB2OUT");
- snd_soc_update_bits(mfld_codec, SN95031_LOCTL, 0x66, 0);
- break;
-
- case 1:
- pr_debug("set hs path\n");
- snd_soc_dapm_disable_pin_unlocked(dapm, "Headphones");
- snd_soc_dapm_disable_pin_unlocked(dapm, "EPOUT");
- snd_soc_update_bits(mfld_codec, SN95031_LOCTL, 0x66, 0x22);
- break;
-
- case 2:
- pr_debug("set spkr path\n");
- snd_soc_dapm_disable_pin_unlocked(dapm, "IHFOUTL");
- snd_soc_dapm_disable_pin_unlocked(dapm, "IHFOUTR");
- snd_soc_update_bits(mfld_codec, SN95031_LOCTL, 0x66, 0x44);
- break;
-
- case 3:
- pr_debug("set null path\n");
- snd_soc_dapm_disable_pin_unlocked(dapm, "LINEOUTL");
- snd_soc_dapm_disable_pin_unlocked(dapm, "LINEOUTR");
- snd_soc_update_bits(mfld_codec, SN95031_LOCTL, 0x66, 0x66);
- break;
- }
-
- snd_soc_dapm_sync_unlocked(dapm);
-
- snd_soc_dapm_mutex_unlock(dapm);
-
- lo_dac = ucontrol->value.enumerated.item[0];
- return 0;
-}
-
-static const struct snd_kcontrol_new mfld_snd_controls[] = {
- SOC_ENUM_EXT("Playback Switch", headset_enum,
- headset_get_switch, headset_set_switch),
- SOC_ENUM_EXT("Lineout Mux", lo_enum,
- lo_get_switch, lo_set_switch),
-};
-
-static const struct snd_soc_dapm_widget mfld_widgets[] = {
- SND_SOC_DAPM_HP("Headphones", NULL),
- SND_SOC_DAPM_MIC("Mic", NULL),
-};
-
-static const struct snd_soc_dapm_route mfld_map[] = {
- {"Headphones", NULL, "HPOUTR"},
- {"Headphones", NULL, "HPOUTL"},
- {"Mic", NULL, "AMIC1"},
-};
-
-static void mfld_jack_check(unsigned int intr_status)
-{
- struct mfld_jack_data jack_data;
-
- if (!mfld_codec)
- return;
-
- jack_data.mfld_jack = &mfld_jack;
- jack_data.intr_id = intr_status;
-
- sn95031_jack_detection(mfld_codec, &jack_data);
- /* TODO: add american headset detection post gpiolib support */
-}
-
-static int mfld_init(struct snd_soc_pcm_runtime *runtime)
-{
- struct snd_soc_dapm_context *dapm = &runtime->card->dapm;
- int ret_val;
-
- /* default is earpiece pin, userspace sets it explcitly */
- snd_soc_dapm_disable_pin(dapm, "Headphones");
- /* default is lineout NC, userspace sets it explcitly */
- snd_soc_dapm_disable_pin(dapm, "LINEOUTL");
- snd_soc_dapm_disable_pin(dapm, "LINEOUTR");
- lo_dac = 3;
- hs_switch = 0;
- /* we dont use linein in this so set to NC */
- snd_soc_dapm_disable_pin(dapm, "LINEINL");
- snd_soc_dapm_disable_pin(dapm, "LINEINR");
-
- /* Headset and button jack detection */
- ret_val = snd_soc_card_jack_new(runtime->card,
- "Intel(R) MID Audio Jack", SND_JACK_HEADSET |
- SND_JACK_BTN_0 | SND_JACK_BTN_1, &mfld_jack,
- mfld_jack_pins, ARRAY_SIZE(mfld_jack_pins));
- if (ret_val) {
- pr_err("jack creation failed\n");
- return ret_val;
- }
-
- ret_val = snd_soc_jack_add_zones(&mfld_jack,
- ARRAY_SIZE(mfld_zones), mfld_zones);
- if (ret_val) {
- pr_err("adding jack zones failed\n");
- return ret_val;
- }
-
- mfld_codec = runtime->codec;
-
- /* we want to check if anything is inserted at boot,
- * so send a fake event to codec and it will read adc
- * to find if anything is there or not */
- mfld_jack_check(MFLD_JACK_INSERT);
- return ret_val;
-}
-
-static struct snd_soc_dai_link mfld_msic_dailink[] = {
- {
- .name = "Medfield Headset",
- .stream_name = "Headset",
- .cpu_dai_name = "Headset-cpu-dai",
- .codec_dai_name = "SN95031 Headset",
- .codec_name = "sn95031",
- .platform_name = "sst-platform",
- .init = mfld_init,
- },
- {
- .name = "Medfield Speaker",
- .stream_name = "Speaker",
- .cpu_dai_name = "Speaker-cpu-dai",
- .codec_dai_name = "SN95031 Speaker",
- .codec_name = "sn95031",
- .platform_name = "sst-platform",
- .init = NULL,
- },
- {
- .name = "Medfield Vibra",
- .stream_name = "Vibra1",
- .cpu_dai_name = "Vibra1-cpu-dai",
- .codec_dai_name = "SN95031 Vibra1",
- .codec_name = "sn95031",
- .platform_name = "sst-platform",
- .init = NULL,
- },
- {
- .name = "Medfield Haptics",
- .stream_name = "Vibra2",
- .cpu_dai_name = "Vibra2-cpu-dai",
- .codec_dai_name = "SN95031 Vibra2",
- .codec_name = "sn95031",
- .platform_name = "sst-platform",
- .init = NULL,
- },
- {
- .name = "Medfield Compress",
- .stream_name = "Speaker",
- .cpu_dai_name = "Compress-cpu-dai",
- .codec_dai_name = "SN95031 Speaker",
- .codec_name = "sn95031",
- .platform_name = "sst-platform",
- .init = NULL,
- },
-};
-
-/* SoC card */
-static struct snd_soc_card snd_soc_card_mfld = {
- .name = "medfield_audio",
- .owner = THIS_MODULE,
- .dai_link = mfld_msic_dailink,
- .num_links = ARRAY_SIZE(mfld_msic_dailink),
-
- .controls = mfld_snd_controls,
- .num_controls = ARRAY_SIZE(mfld_snd_controls),
- .dapm_widgets = mfld_widgets,
- .num_dapm_widgets = ARRAY_SIZE(mfld_widgets),
- .dapm_routes = mfld_map,
- .num_dapm_routes = ARRAY_SIZE(mfld_map),
-};
-
-static irqreturn_t snd_mfld_jack_intr_handler(int irq, void *dev)
-{
- struct mfld_mc_private *mc_private = (struct mfld_mc_private *) dev;
-
- memcpy_fromio(&mc_private->interrupt_status,
- ((void *)(mc_private->int_base)),
- sizeof(u8));
- return IRQ_WAKE_THREAD;
-}
-
-static irqreturn_t snd_mfld_jack_detection(int irq, void *data)
-{
- struct mfld_mc_private *mc_drv_ctx = (struct mfld_mc_private *) data;
-
- mfld_jack_check(mc_drv_ctx->interrupt_status);
-
- return IRQ_HANDLED;
-}
-
-static int snd_mfld_mc_probe(struct platform_device *pdev)
-{
- int ret_val = 0, irq;
- struct mfld_mc_private *mc_drv_ctx;
- struct resource *irq_mem;
-
- pr_debug("snd_mfld_mc_probe called\n");
-
- /* retrive the irq number */
- irq = platform_get_irq(pdev, 0);
-
- /* audio interrupt base of SRAM location where
- * interrupts are stored by System FW */
- mc_drv_ctx = devm_kzalloc(&pdev->dev, sizeof(*mc_drv_ctx), GFP_ATOMIC);
- if (!mc_drv_ctx)
- return -ENOMEM;
-
- irq_mem = platform_get_resource_byname(
- pdev, IORESOURCE_MEM, "IRQ_BASE");
- if (!irq_mem) {
- pr_err("no mem resource given\n");
- return -ENODEV;
- }
- mc_drv_ctx->int_base = devm_ioremap_nocache(&pdev->dev, irq_mem->start,
- resource_size(irq_mem));
- if (!mc_drv_ctx->int_base) {
- pr_err("Mapping of cache failed\n");
- return -ENOMEM;
- }
- /* register for interrupt */
- ret_val = devm_request_threaded_irq(&pdev->dev, irq,
- snd_mfld_jack_intr_handler,
- snd_mfld_jack_detection,
- IRQF_SHARED, pdev->dev.driver->name, mc_drv_ctx);
- if (ret_val) {
- pr_err("cannot register IRQ\n");
- return ret_val;
- }
- /* register the soc card */
- snd_soc_card_mfld.dev = &pdev->dev;
- ret_val = devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_mfld);
- if (ret_val) {
- pr_debug("snd_soc_register_card failed %d\n", ret_val);
- return ret_val;
- }
- platform_set_drvdata(pdev, mc_drv_ctx);
- pr_debug("successfully exited probe\n");
- return 0;
-}
-
-static struct platform_driver snd_mfld_mc_driver = {
- .driver = {
- .name = "msic_audio",
- },
- .probe = snd_mfld_mc_probe,
-};
-
-module_platform_driver(snd_mfld_mc_driver);
-
-MODULE_DESCRIPTION("ASoC Intel(R) MID Machine driver");
-MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
-MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:msic-audio");
diff --git a/sound/soc/intel/common/sst-dsp.c b/sound/soc/intel/common/sst-dsp.c
index 11c0805393ff..fd82f4b1d4a0 100644
--- a/sound/soc/intel/common/sst-dsp.c
+++ b/sound/soc/intel/common/sst-dsp.c
@@ -269,7 +269,7 @@ int sst_dsp_register_poll(struct sst_dsp *ctx, u32 offset, u32 mask,
*/
timeout = jiffies + msecs_to_jiffies(time);
- while (((sst_dsp_shim_read_unlocked(ctx, offset) & mask) != target)
+ while ((((reg = sst_dsp_shim_read_unlocked(ctx, offset)) & mask) != target)
&& time_before(jiffies, timeout)) {
k++;
if (k > 10)
@@ -278,8 +278,6 @@ int sst_dsp_register_poll(struct sst_dsp *ctx, u32 offset, u32 mask,
usleep_range(s, 2*s);
}
- reg = sst_dsp_shim_read_unlocked(ctx, offset);
-
if ((reg & mask) == target) {
dev_dbg(ctx->dev, "FW Poll Status: reg=%#x %s successful\n",
reg, operation);
diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c
index 4524211960e4..440bca7afbf1 100644
--- a/sound/soc/intel/skylake/bxt-sst.c
+++ b/sound/soc/intel/skylake/bxt-sst.c
@@ -595,7 +595,7 @@ int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
INIT_DELAYED_WORK(&skl->d0i3.work, bxt_set_dsp_D0i3);
skl->d0i3.state = SKL_DSP_D0I3_NONE;
- return 0;
+ return skl_dsp_acquire_irq(sst);
}
EXPORT_SYMBOL_GPL(bxt_sst_dsp_init);
diff --git a/sound/soc/intel/skylake/cnl-sst.c b/sound/soc/intel/skylake/cnl-sst.c
index 387de388ce29..245df1067ba8 100644
--- a/sound/soc/intel/skylake/cnl-sst.c
+++ b/sound/soc/intel/skylake/cnl-sst.c
@@ -458,7 +458,7 @@ int cnl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
cnl->boot_complete = false;
init_waitqueue_head(&cnl->boot_wait);
- return 0;
+ return skl_dsp_acquire_irq(sst);
}
EXPORT_SYMBOL_GPL(cnl_sst_dsp_init);
diff --git a/sound/soc/intel/skylake/skl-i2s.h b/sound/soc/intel/skylake/skl-i2s.h
new file mode 100644
index 000000000000..dcf819bc688f
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-i2s.h
@@ -0,0 +1,64 @@
+/*
+ * skl-i2s.h - i2s blob mapping
+ *
+ * Copyright (C) 2017 Intel Corp
+ * Author: Subhransu S. Prusty < subhransu.s.prusty@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#ifndef __SOUND_SOC_SKL_I2S_H
+#define __SOUND_SOC_SKL_I2S_H
+
+#define SKL_I2S_MAX_TIME_SLOTS 8
+#define SKL_MCLK_DIV_CLK_SRC_MASK GENMASK(17, 16)
+
+#define SKL_MNDSS_DIV_CLK_SRC_MASK GENMASK(21, 20)
+#define SKL_SHIFT(x) (ffs(x) - 1)
+#define SKL_MCLK_DIV_RATIO_MASK GENMASK(11, 0)
+
+struct skl_i2s_config {
+ u32 ssc0;
+ u32 ssc1;
+ u32 sscto;
+ u32 sspsp;
+ u32 sstsa;
+ u32 ssrsa;
+ u32 ssc2;
+ u32 sspsp2;
+ u32 ssc3;
+ u32 ssioc;
+} __packed;
+
+struct skl_i2s_config_mclk {
+ u32 mdivctrl;
+ u32 mdivr;
+};
+
+/**
+ * struct skl_i2s_config_blob_legacy - Structure defines I2S Gateway
+ * configuration legacy blob
+ *
+ * @gtw_attr: Gateway attribute for the I2S Gateway
+ * @tdm_ts_group: TDM slot mapping against channels in the Gateway.
+ * @i2s_cfg: I2S HW registers
+ * @mclk: MCLK clock source and divider values
+ */
+struct skl_i2s_config_blob_legacy {
+ u32 gtw_attr;
+ u32 tdm_ts_group[SKL_I2S_MAX_TIME_SLOTS];
+ struct skl_i2s_config i2s_cfg;
+ struct skl_i2s_config_mclk mclk;
+};
+
+#endif /* __SOUND_SOC_SKL_I2S_H */
diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c
index 61b5bfa79d13..8cbf080c38b3 100644
--- a/sound/soc/intel/skylake/skl-messages.c
+++ b/sound/soc/intel/skylake/skl-messages.c
@@ -55,6 +55,19 @@ static int skl_free_dma_buf(struct device *dev, struct snd_dma_buffer *dmab)
return 0;
}
+#define SKL_ASTATE_PARAM_ID 4
+
+void skl_dsp_set_astate_cfg(struct skl_sst *ctx, u32 cnt, void *data)
+{
+ struct skl_ipc_large_config_msg msg = {0};
+
+ msg.large_param_id = SKL_ASTATE_PARAM_ID;
+ msg.param_data_size = (cnt * sizeof(struct skl_astate_param) +
+ sizeof(cnt));
+
+ skl_ipc_set_large_config(&ctx->ipc, &msg, data);
+}
+
#define NOTIFICATION_PARAM_ID 3
#define NOTIFICATION_MASK 0xf
@@ -404,11 +417,20 @@ int skl_resume_dsp(struct skl *skl)
if (skl->skl_sst->is_first_boot == true)
return 0;
+ /* disable dynamic clock gating during fw and lib download */
+ ctx->enable_miscbdcge(ctx->dev, false);
+
ret = skl_dsp_wake(ctx->dsp);
+ ctx->enable_miscbdcge(ctx->dev, true);
if (ret < 0)
return ret;
skl_dsp_enable_notification(skl->skl_sst, false);
+
+ if (skl->cfg.astate_cfg != NULL) {
+ skl_dsp_set_astate_cfg(skl->skl_sst, skl->cfg.astate_cfg->count,
+ skl->cfg.astate_cfg);
+ }
return ret;
}
diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c
index 3eaac41090ca..3b1d2b828c1b 100644
--- a/sound/soc/intel/skylake/skl-nhlt.c
+++ b/sound/soc/intel/skylake/skl-nhlt.c
@@ -19,6 +19,7 @@
*/
#include <linux/pci.h>
#include "skl.h"
+#include "skl-i2s.h"
#define NHLT_ACPI_HEADER_SIG "NHLT"
@@ -43,7 +44,8 @@ struct nhlt_acpi_table *skl_nhlt_init(struct device *dev)
obj = acpi_evaluate_dsm(handle, &osc_guid, 1, 1, NULL);
if (obj && obj->type == ACPI_TYPE_BUFFER) {
nhlt_ptr = (struct nhlt_resource_desc *)obj->buffer.pointer;
- nhlt_table = (struct nhlt_acpi_table *)
+ if (nhlt_ptr->length)
+ nhlt_table = (struct nhlt_acpi_table *)
memremap(nhlt_ptr->min_addr, nhlt_ptr->length,
MEMREMAP_WB);
ACPI_FREE(obj);
@@ -276,3 +278,157 @@ void skl_nhlt_remove_sysfs(struct skl *skl)
sysfs_remove_file(&dev->kobj, &dev_attr_platform_id.attr);
}
+
+/*
+ * Queries NHLT for all the fmt configuration for a particular endpoint and
+ * stores all possible rates supported in a rate table for the corresponding
+ * sclk/sclkfs.
+ */
+static void skl_get_ssp_clks(struct skl *skl, struct skl_ssp_clk *ssp_clks,
+ struct nhlt_fmt *fmt, u8 id)
+{
+ struct skl_i2s_config_blob_legacy *i2s_config;
+ struct skl_clk_parent_src *parent;
+ struct skl_ssp_clk *sclk, *sclkfs;
+ struct nhlt_fmt_cfg *fmt_cfg;
+ struct wav_fmt_ext *wav_fmt;
+ unsigned long rate = 0;
+ bool present = false;
+ int rate_index = 0;
+ u16 channels, bps;
+ u8 clk_src;
+ int i, j;
+ u32 fs;
+
+ sclk = &ssp_clks[SKL_SCLK_OFS];
+ sclkfs = &ssp_clks[SKL_SCLKFS_OFS];
+
+ if (fmt->fmt_count == 0)
+ return;
+
+ for (i = 0; i < fmt->fmt_count; i++) {
+ fmt_cfg = &fmt->fmt_config[i];
+ wav_fmt = &fmt_cfg->fmt_ext;
+
+ channels = wav_fmt->fmt.channels;
+ bps = wav_fmt->fmt.bits_per_sample;
+ fs = wav_fmt->fmt.samples_per_sec;
+
+ /*
+ * In case of TDM configuration on a ssp, there can
+ * be more than one blob in which channel masks are
+ * different for each usecase for a specific rate and bps.
+ * But the sclk rate will be generated for the total
+ * number of channels used for that endpoint.
+ *
+ * So for the given fs and bps, choose blob which has
+ * the superset of all channels for that endpoint and
+ * derive the rate.
+ */
+ for (j = i; j < fmt->fmt_count; j++) {
+ fmt_cfg = &fmt->fmt_config[j];
+ wav_fmt = &fmt_cfg->fmt_ext;
+ if ((fs == wav_fmt->fmt.samples_per_sec) &&
+ (bps == wav_fmt->fmt.bits_per_sample))
+ channels = max_t(u16, channels,
+ wav_fmt->fmt.channels);
+ }
+
+ rate = channels * bps * fs;
+
+ /* check if the rate is added already to the given SSP's sclk */
+ for (j = 0; (j < SKL_MAX_CLK_RATES) &&
+ (sclk[id].rate_cfg[j].rate != 0); j++) {
+ if (sclk[id].rate_cfg[j].rate == rate) {
+ present = true;
+ break;
+ }
+ }
+
+ /* Fill rate and parent for sclk/sclkfs */
+ if (!present) {
+ /* MCLK Divider Source Select */
+ i2s_config = (struct skl_i2s_config_blob_legacy *)
+ fmt->fmt_config[0].config.caps;
+ clk_src = ((i2s_config->mclk.mdivctrl)
+ & SKL_MNDSS_DIV_CLK_SRC_MASK) >>
+ SKL_SHIFT(SKL_MNDSS_DIV_CLK_SRC_MASK);
+
+ parent = skl_get_parent_clk(clk_src);
+
+ /*
+ * Do not copy the config data if there is no parent
+ * clock available for this clock source select
+ */
+ if (!parent)
+ continue;
+
+ sclk[id].rate_cfg[rate_index].rate = rate;
+ sclk[id].rate_cfg[rate_index].config = fmt_cfg;
+ sclkfs[id].rate_cfg[rate_index].rate = rate;
+ sclkfs[id].rate_cfg[rate_index].config = fmt_cfg;
+ sclk[id].parent_name = parent->name;
+ sclkfs[id].parent_name = parent->name;
+
+ rate_index++;
+ }
+ }
+}
+
+static void skl_get_mclk(struct skl *skl, struct skl_ssp_clk *mclk,
+ struct nhlt_fmt *fmt, u8 id)
+{
+ struct skl_i2s_config_blob_legacy *i2s_config;
+ struct nhlt_specific_cfg *fmt_cfg;
+ struct skl_clk_parent_src *parent;
+ u32 clkdiv, div_ratio;
+ u8 clk_src;
+
+ fmt_cfg = &fmt->fmt_config[0].config;
+ i2s_config = (struct skl_i2s_config_blob_legacy *)fmt_cfg->caps;
+
+ /* MCLK Divider Source Select */
+ clk_src = ((i2s_config->mclk.mdivctrl) & SKL_MCLK_DIV_CLK_SRC_MASK) >>
+ SKL_SHIFT(SKL_MCLK_DIV_CLK_SRC_MASK);
+
+ clkdiv = i2s_config->mclk.mdivr & SKL_MCLK_DIV_RATIO_MASK;
+
+ /* bypass divider */
+ div_ratio = 1;
+
+ if (clkdiv != SKL_MCLK_DIV_RATIO_MASK)
+ /* Divider is 2 + clkdiv */
+ div_ratio = clkdiv + 2;
+
+ /* Calculate MCLK rate from source using div value */
+ parent = skl_get_parent_clk(clk_src);
+ if (!parent)
+ return;
+
+ mclk[id].rate_cfg[0].rate = parent->rate/div_ratio;
+ mclk[id].rate_cfg[0].config = &fmt->fmt_config[0];
+ mclk[id].parent_name = parent->name;
+}
+
+void skl_get_clks(struct skl *skl, struct skl_ssp_clk *ssp_clks)
+{
+ struct nhlt_acpi_table *nhlt = (struct nhlt_acpi_table *)skl->nhlt;
+ struct nhlt_endpoint *epnt;
+ struct nhlt_fmt *fmt;
+ int i;
+ u8 id;
+
+ epnt = (struct nhlt_endpoint *)nhlt->desc;
+ for (i = 0; i < nhlt->endpoint_count; i++) {
+ if (epnt->linktype == NHLT_LINK_SSP) {
+ id = epnt->virtual_bus_id;
+
+ fmt = (struct nhlt_fmt *)(epnt->config.caps
+ + epnt->config.size);
+
+ skl_get_ssp_clks(skl, ssp_clks, fmt, id);
+ skl_get_mclk(skl, ssp_clks, fmt, id);
+ }
+ epnt = (struct nhlt_endpoint *)((u8 *)epnt + epnt->length);
+ }
+}
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index 1dd97479e0c0..e46828533826 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -537,7 +537,7 @@ static int skl_link_hw_params(struct snd_pcm_substream *substream,
snd_soc_dai_set_dma_data(dai, substream, (void *)link_dev);
- link = snd_hdac_ext_bus_get_link(ebus, rtd->codec->component.name);
+ link = snd_hdac_ext_bus_get_link(ebus, codec_dai->component->name);
if (!link)
return -EINVAL;
@@ -620,7 +620,7 @@ static int skl_link_hw_free(struct snd_pcm_substream *substream,
link_dev->link_prepared = 0;
- link = snd_hdac_ext_bus_get_link(ebus, rtd->codec->component.name);
+ link = snd_hdac_ext_bus_get_link(ebus, rtd->codec_dai->component->name);
if (!link)
return -EINVAL;
@@ -1343,7 +1343,11 @@ static int skl_platform_soc_probe(struct snd_soc_platform *platform)
return -EIO;
}
+ /* disable dynamic clock gating during fw and lib download */
+ skl->skl_sst->enable_miscbdcge(platform->dev, false);
+
ret = ops->init_fw(platform->dev, skl->skl_sst);
+ skl->skl_sst->enable_miscbdcge(platform->dev, true);
if (ret < 0) {
dev_err(platform->dev, "Failed to boot first fw: %d\n", ret);
return ret;
@@ -1351,6 +1355,12 @@ static int skl_platform_soc_probe(struct snd_soc_platform *platform)
skl_populate_modules(skl);
skl->skl_sst->update_d0i3c = skl_update_d0i3c;
skl_dsp_enable_notification(skl->skl_sst, false);
+
+ if (skl->cfg.astate_cfg != NULL) {
+ skl_dsp_set_astate_cfg(skl->skl_sst,
+ skl->cfg.astate_cfg->count,
+ skl->cfg.astate_cfg);
+ }
}
pm_runtime_mark_last_busy(platform->dev);
pm_runtime_put_autosuspend(platform->dev);
diff --git a/sound/soc/intel/skylake/skl-ssp-clk.h b/sound/soc/intel/skylake/skl-ssp-clk.h
new file mode 100644
index 000000000000..c9ea84004260
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-ssp-clk.h
@@ -0,0 +1,79 @@
+/*
+ * skl-ssp-clk.h - Skylake ssp clock information and ipc structure
+ *
+ * Copyright (C) 2017 Intel Corp
+ * Author: Jaikrishna Nemallapudi <jaikrishnax.nemallapudi@intel.com>
+ * Author: Subhransu S. Prusty <subhransu.s.prusty@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#ifndef SOUND_SOC_SKL_SSP_CLK_H
+#define SOUND_SOC_SKL_SSP_CLK_H
+
+#define SKL_MAX_SSP 6
+/* xtal/cardinal/pll, parent of ssp clocks and mclk */
+#define SKL_MAX_CLK_SRC 3
+#define SKL_MAX_SSP_CLK_TYPES 3 /* mclk, sclk, sclkfs */
+
+#define SKL_MAX_CLK_CNT (SKL_MAX_SSP * SKL_MAX_SSP_CLK_TYPES)
+
+/* Max number of configurations supported for each clock */
+#define SKL_MAX_CLK_RATES 10
+
+#define SKL_SCLK_OFS SKL_MAX_SSP
+#define SKL_SCLKFS_OFS (SKL_SCLK_OFS + SKL_MAX_SSP)
+
+enum skl_clk_type {
+ SKL_MCLK,
+ SKL_SCLK,
+ SKL_SCLK_FS,
+};
+
+enum skl_clk_src_type {
+ SKL_XTAL,
+ SKL_CARDINAL,
+ SKL_PLL,
+};
+
+struct skl_clk_parent_src {
+ u8 clk_id;
+ const char *name;
+ unsigned long rate;
+ const char *parent_name;
+};
+
+struct skl_clk_rate_cfg_table {
+ unsigned long rate;
+ void *config;
+};
+
+/*
+ * rate for mclk will be in rates[0]. For sclk and sclkfs, rates[] store
+ * all possible clocks ssp can generate for that platform.
+ */
+struct skl_ssp_clk {
+ const char *name;
+ const char *parent_name;
+ struct skl_clk_rate_cfg_table rate_cfg[SKL_MAX_CLK_RATES];
+};
+
+struct skl_clk_pdata {
+ struct skl_clk_parent_src *parent_clks;
+ int num_clks;
+ struct skl_ssp_clk *ssp_clks;
+ void *pvt_data;
+};
+
+#endif /* SOUND_SOC_SKL_SSP_CLK_H */
diff --git a/sound/soc/intel/skylake/skl-sst-dsp.c b/sound/soc/intel/skylake/skl-sst-dsp.c
index 19ee1d4f3bdf..71e31ad0bb3f 100644
--- a/sound/soc/intel/skylake/skl-sst-dsp.c
+++ b/sound/soc/intel/skylake/skl-sst-dsp.c
@@ -435,16 +435,22 @@ struct sst_dsp *skl_dsp_ctx_init(struct device *dev,
return NULL;
}
+ return sst;
+}
+
+int skl_dsp_acquire_irq(struct sst_dsp *sst)
+{
+ struct sst_dsp_device *sst_dev = sst->sst_dev;
+ int ret;
+
/* Register the ISR */
ret = request_threaded_irq(sst->irq, sst->ops->irq_handler,
sst_dev->thread, IRQF_SHARED, "AudioDSP", sst);
- if (ret) {
+ if (ret)
dev_err(sst->dev, "unable to grab threaded IRQ %d, disabling device\n",
sst->irq);
- return NULL;
- }
- return sst;
+ return ret;
}
void skl_dsp_free(struct sst_dsp *dsp)
diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h
index eba20d37ba8c..12fc9a73dc8a 100644
--- a/sound/soc/intel/skylake/skl-sst-dsp.h
+++ b/sound/soc/intel/skylake/skl-sst-dsp.h
@@ -206,6 +206,7 @@ int skl_cldma_wait_interruptible(struct sst_dsp *ctx);
void skl_dsp_set_state_locked(struct sst_dsp *ctx, int state);
struct sst_dsp *skl_dsp_ctx_init(struct device *dev,
struct sst_dsp_device *sst_dev, int irq);
+int skl_dsp_acquire_irq(struct sst_dsp *sst);
bool is_skl_dsp_running(struct sst_dsp *ctx);
unsigned int skl_dsp_get_enabled_cores(struct sst_dsp *ctx);
@@ -251,6 +252,9 @@ void skl_freeup_uuid_list(struct skl_sst *ctx);
int skl_dsp_strip_extended_manifest(struct firmware *fw);
void skl_dsp_enable_notification(struct skl_sst *ctx, bool enable);
+
+void skl_dsp_set_astate_cfg(struct skl_sst *ctx, u32 cnt, void *data);
+
int skl_sst_ctx_init(struct device *dev, int irq, const char *fw_name,
struct skl_dsp_loader_ops dsp_ops, struct skl_sst **dsp,
struct sst_dsp_device *skl_dev);
diff --git a/sound/soc/intel/skylake/skl-sst-utils.c b/sound/soc/intel/skylake/skl-sst-utils.c
index 8ff89280d9fd..2ae405617876 100644
--- a/sound/soc/intel/skylake/skl-sst-utils.c
+++ b/sound/soc/intel/skylake/skl-sst-utils.c
@@ -178,7 +178,8 @@ static inline int skl_pvtid_128(struct uuid_module *module)
* skl_get_pvt_id: generate a private id for use as module id
*
* @ctx: driver context
- * @mconfig: module configuration data
+ * @uuid_mod: module's uuid
+ * @instance_id: module's instance id
*
* This generates a 128 bit private unique id for a module TYPE so that
* module instance is unique
@@ -208,7 +209,8 @@ EXPORT_SYMBOL_GPL(skl_get_pvt_id);
* skl_put_pvt_id: free up the private id allocated
*
* @ctx: driver context
- * @mconfig: module configuration data
+ * @uuid_mod: module's uuid
+ * @pvt_id: module pvt id
*
* This frees a 128 bit private unique id previously generated
*/
diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c
index a436abf2fe3f..5a7e41b65ef3 100644
--- a/sound/soc/intel/skylake/skl-sst.c
+++ b/sound/soc/intel/skylake/skl-sst.c
@@ -569,7 +569,7 @@ int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
sst->fw_ops = skl_fw_ops;
- return 0;
+ return skl_dsp_acquire_irq(sst);
}
EXPORT_SYMBOL_GPL(skl_sst_dsp_init);
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index 81923da18ac2..73af6e19ebbd 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -190,7 +190,6 @@ skl_tplg_free_pipe_mcps(struct skl *skl, struct skl_module_cfg *mconfig)
u8 res_idx = mconfig->res_idx;
struct skl_module_res *res = &mconfig->module->resources[res_idx];
- res = &mconfig->module->resources[res_idx];
skl->resource.mcps -= res->cps;
}
@@ -3056,11 +3055,13 @@ static int skl_tplg_get_int_tkn(struct device *dev,
struct snd_soc_tplg_vendor_value_elem *tkn_elem,
struct skl *skl)
{
- int tkn_count = 0, ret;
+ int tkn_count = 0, ret, size;
static int mod_idx, res_val_idx, intf_val_idx, dir, pin_idx;
struct skl_module_res *res = NULL;
struct skl_module_iface *fmt = NULL;
struct skl_module *mod = NULL;
+ static struct skl_astate_param *astate_table;
+ static int astate_cfg_idx, count;
int i;
if (skl->modules) {
@@ -3093,6 +3094,46 @@ static int skl_tplg_get_int_tkn(struct device *dev,
mod_idx = tkn_elem->value;
break;
+ case SKL_TKN_U32_ASTATE_COUNT:
+ if (astate_table != NULL) {
+ dev_err(dev, "More than one entry for A-State count");
+ return -EINVAL;
+ }
+
+ if (tkn_elem->value > SKL_MAX_ASTATE_CFG) {
+ dev_err(dev, "Invalid A-State count %d\n",
+ tkn_elem->value);
+ return -EINVAL;
+ }
+
+ size = tkn_elem->value * sizeof(struct skl_astate_param) +
+ sizeof(count);
+ skl->cfg.astate_cfg = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (!skl->cfg.astate_cfg)
+ return -ENOMEM;
+
+ astate_table = skl->cfg.astate_cfg->astate_table;
+ count = skl->cfg.astate_cfg->count = tkn_elem->value;
+ break;
+
+ case SKL_TKN_U32_ASTATE_IDX:
+ if (tkn_elem->value >= count) {
+ dev_err(dev, "Invalid A-State index %d\n",
+ tkn_elem->value);
+ return -EINVAL;
+ }
+
+ astate_cfg_idx = tkn_elem->value;
+ break;
+
+ case SKL_TKN_U32_ASTATE_KCPS:
+ astate_table[astate_cfg_idx].kcps = tkn_elem->value;
+ break;
+
+ case SKL_TKN_U32_ASTATE_CLK_SRC:
+ astate_table[astate_cfg_idx].clk_src = tkn_elem->value;
+ break;
+
case SKL_TKN_U8_IN_PIN_TYPE:
case SKL_TKN_U8_OUT_PIN_TYPE:
case SKL_TKN_U8_IN_QUEUE_COUNT:
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 31d8634e8aa1..32ce64c6b2dc 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -355,6 +355,7 @@ static int skl_resume(struct device *dev)
if (ebus->cmd_dma_state)
snd_hdac_bus_init_cmd_io(&ebus->bus);
+ ret = 0;
} else {
ret = _skl_resume(ebus);
@@ -435,19 +436,51 @@ static int skl_free(struct hdac_ext_bus *ebus)
return 0;
}
-static int skl_machine_device_register(struct skl *skl, void *driver_data)
+/*
+ * For each ssp there are 3 clocks (mclk/sclk/sclkfs).
+ * e.g. for ssp0, clocks will be named as
+ * "ssp0_mclk", "ssp0_sclk", "ssp0_sclkfs"
+ * So for skl+, there are 6 ssps, so 18 clocks will be created.
+ */
+static struct skl_ssp_clk skl_ssp_clks[] = {
+ {.name = "ssp0_mclk"}, {.name = "ssp1_mclk"}, {.name = "ssp2_mclk"},
+ {.name = "ssp3_mclk"}, {.name = "ssp4_mclk"}, {.name = "ssp5_mclk"},
+ {.name = "ssp0_sclk"}, {.name = "ssp1_sclk"}, {.name = "ssp2_sclk"},
+ {.name = "ssp3_sclk"}, {.name = "ssp4_sclk"}, {.name = "ssp5_sclk"},
+ {.name = "ssp0_sclkfs"}, {.name = "ssp1_sclkfs"},
+ {.name = "ssp2_sclkfs"},
+ {.name = "ssp3_sclkfs"}, {.name = "ssp4_sclkfs"},
+ {.name = "ssp5_sclkfs"},
+};
+
+static int skl_find_machine(struct skl *skl, void *driver_data)
{
- struct hdac_bus *bus = ebus_to_hbus(&skl->ebus);
- struct platform_device *pdev;
struct snd_soc_acpi_mach *mach = driver_data;
- int ret;
+ struct hdac_bus *bus = ebus_to_hbus(&skl->ebus);
+ struct skl_machine_pdata *pdata;
mach = snd_soc_acpi_find_machine(mach);
if (mach == NULL) {
dev_err(bus->dev, "No matching machine driver found\n");
return -ENODEV;
}
+
+ skl->mach = mach;
skl->fw_name = mach->fw_filename;
+ pdata = skl->mach->pdata;
+
+ if (mach->pdata)
+ skl->use_tplg_pcm = pdata->use_tplg_pcm;
+
+ return 0;
+}
+
+static int skl_machine_device_register(struct skl *skl)
+{
+ struct hdac_bus *bus = ebus_to_hbus(&skl->ebus);
+ struct snd_soc_acpi_mach *mach = skl->mach;
+ struct platform_device *pdev;
+ int ret;
pdev = platform_device_alloc(mach->drv_name, -1);
if (pdev == NULL) {
@@ -462,11 +495,8 @@ static int skl_machine_device_register(struct skl *skl, void *driver_data)
return -EIO;
}
- if (mach->pdata) {
- skl->use_tplg_pcm =
- ((struct skl_machine_pdata *)mach->pdata)->use_tplg_pcm;
+ if (mach->pdata)
dev_set_drvdata(&pdev->dev, mach->pdata);
- }
skl->i2s_dev = pdev;
@@ -509,6 +539,74 @@ static void skl_dmic_device_unregister(struct skl *skl)
platform_device_unregister(skl->dmic_dev);
}
+static struct skl_clk_parent_src skl_clk_src[] = {
+ { .clk_id = SKL_XTAL, .name = "xtal" },
+ { .clk_id = SKL_CARDINAL, .name = "cardinal", .rate = 24576000 },
+ { .clk_id = SKL_PLL, .name = "pll", .rate = 96000000 },
+};
+
+struct skl_clk_parent_src *skl_get_parent_clk(u8 clk_id)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(skl_clk_src); i++) {
+ if (skl_clk_src[i].clk_id == clk_id)
+ return &skl_clk_src[i];
+ }
+
+ return NULL;
+}
+
+static void init_skl_xtal_rate(int pci_id)
+{
+ switch (pci_id) {
+ case 0x9d70:
+ case 0x9d71:
+ skl_clk_src[0].rate = 24000000;
+ return;
+
+ default:
+ skl_clk_src[0].rate = 19200000;
+ return;
+ }
+}
+
+static int skl_clock_device_register(struct skl *skl)
+{
+ struct platform_device_info pdevinfo = {NULL};
+ struct skl_clk_pdata *clk_pdata;
+
+ clk_pdata = devm_kzalloc(&skl->pci->dev, sizeof(*clk_pdata),
+ GFP_KERNEL);
+ if (!clk_pdata)
+ return -ENOMEM;
+
+ init_skl_xtal_rate(skl->pci->device);
+
+ clk_pdata->parent_clks = skl_clk_src;
+ clk_pdata->ssp_clks = skl_ssp_clks;
+ clk_pdata->num_clks = ARRAY_SIZE(skl_ssp_clks);
+
+ /* Query NHLT to fill the rates and parent */
+ skl_get_clks(skl, clk_pdata->ssp_clks);
+ clk_pdata->pvt_data = skl;
+
+ /* Register Platform device */
+ pdevinfo.parent = &skl->pci->dev;
+ pdevinfo.id = -1;
+ pdevinfo.name = "skl-ssp-clk";
+ pdevinfo.data = clk_pdata;
+ pdevinfo.size_data = sizeof(*clk_pdata);
+ skl->clk_dev = platform_device_register_full(&pdevinfo);
+ return PTR_ERR_OR_ZERO(skl->clk_dev);
+}
+
+static void skl_clock_device_unregister(struct skl *skl)
+{
+ if (skl->clk_dev)
+ platform_device_unregister(skl->clk_dev);
+}
+
/*
* Probe the given codec address
*/
@@ -615,18 +713,30 @@ static void skl_probe_work(struct work_struct *work)
/* create codec instances */
skl_codec_create(ebus);
+ /* register platform dai and controls */
+ err = skl_platform_register(bus->dev);
+ if (err < 0) {
+ dev_err(bus->dev, "platform register failed: %d\n", err);
+ return;
+ }
+
+ if (bus->ppcap) {
+ err = skl_machine_device_register(skl);
+ if (err < 0) {
+ dev_err(bus->dev, "machine register failed: %d\n", err);
+ goto out_err;
+ }
+ }
+
if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
err = snd_hdac_display_power(bus, false);
if (err < 0) {
dev_err(bus->dev, "Cannot turn off display power on i915\n");
+ skl_machine_device_unregister(skl);
return;
}
}
- /* register platform dai and controls */
- err = skl_platform_register(bus->dev);
- if (err < 0)
- return;
/*
* we are done probing so decrement link counts
*/
@@ -791,18 +901,21 @@ static int skl_probe(struct pci_dev *pci,
/* check if dsp is there */
if (bus->ppcap) {
- err = skl_machine_device_register(skl,
- (void *)pci_id->driver_data);
+ /* create device for dsp clk */
+ err = skl_clock_device_register(skl);
+ if (err < 0)
+ goto out_clk_free;
+
+ err = skl_find_machine(skl, (void *)pci_id->driver_data);
if (err < 0)
goto out_nhlt_free;
err = skl_init_dsp(skl);
if (err < 0) {
dev_dbg(bus->dev, "error failed to register dsp\n");
- goto out_mach_free;
+ goto out_nhlt_free;
}
skl->skl_sst->enable_miscbdcge = skl_enable_miscbdcge;
-
}
if (bus->mlcap)
snd_hdac_ext_bus_get_ml_capabilities(ebus);
@@ -820,8 +933,8 @@ static int skl_probe(struct pci_dev *pci,
out_dsp_free:
skl_free_dsp(skl);
-out_mach_free:
- skl_machine_device_unregister(skl);
+out_clk_free:
+ skl_clock_device_unregister(skl);
out_nhlt_free:
skl_nhlt_free(skl->nhlt);
out_free:
@@ -872,6 +985,7 @@ static void skl_remove(struct pci_dev *pci)
skl_free_dsp(skl);
skl_machine_device_unregister(skl);
skl_dmic_device_unregister(skl);
+ skl_clock_device_unregister(skl);
skl_nhlt_remove_sysfs(skl);
skl_nhlt_free(skl->nhlt);
skl_free(ebus);
diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h
index e00cde8200dd..f411579bc713 100644
--- a/sound/soc/intel/skylake/skl.h
+++ b/sound/soc/intel/skylake/skl.h
@@ -25,9 +25,12 @@
#include <sound/hdaudio_ext.h>
#include <sound/soc.h>
#include "skl-nhlt.h"
+#include "skl-ssp-clk.h"
#define SKL_SUSPEND_DELAY 2000
+#define SKL_MAX_ASTATE_CFG 3
+
#define AZX_PCIREG_PGCTL 0x44
#define AZX_PGCTL_LSRMD_MASK (1 << 4)
#define AZX_PCIREG_CGCTL 0x48
@@ -45,6 +48,20 @@ struct skl_dsp_resource {
struct skl_debug;
+struct skl_astate_param {
+ u32 kcps;
+ u32 clk_src;
+};
+
+struct skl_astate_config {
+ u32 count;
+ struct skl_astate_param astate_table[0];
+};
+
+struct skl_fw_config {
+ struct skl_astate_config *astate_cfg;
+};
+
struct skl {
struct hdac_ext_bus ebus;
struct pci_dev *pci;
@@ -52,6 +69,7 @@ struct skl {
unsigned int init_done:1; /* delayed init status */
struct platform_device *dmic_dev;
struct platform_device *i2s_dev;
+ struct platform_device *clk_dev;
struct snd_soc_platform *platform;
struct snd_soc_dai_driver *dais;
@@ -75,6 +93,8 @@ struct skl {
u8 nr_modules;
struct skl_module **modules;
bool use_tplg_pcm;
+ struct skl_fw_config cfg;
+ struct snd_soc_acpi_mach *mach;
};
#define skl_to_ebus(s) (&(s)->ebus)
@@ -125,6 +145,8 @@ const struct skl_dsp_ops *skl_get_dsp_ops(int pci_id);
void skl_update_d0i3c(struct device *dev, bool enable);
int skl_nhlt_create_sysfs(struct skl *skl);
void skl_nhlt_remove_sysfs(struct skl *skl);
+void skl_get_clks(struct skl *skl, struct skl_ssp_clk *ssp_clks);
+struct skl_clk_parent_src *skl_get_parent_clk(u8 clk_id);
struct skl_module_cfg;
diff --git a/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.c b/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.c
index affa7fb25dd9..949fc3a1d025 100644
--- a/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.c
+++ b/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.c
@@ -14,451 +14,285 @@
* GNU General Public License for more details.
*/
-#include <sound/soc.h>
-#include <linux/regmap.h>
-#include <linux/pm_runtime.h>
-
#include "mt2701-afe-common.h"
#include "mt2701-afe-clock-ctrl.h"
-static const char *aud_clks[MT2701_CLOCK_NUM] = {
- [MT2701_AUD_INFRA_SYS_AUDIO] = "infra_sys_audio_clk",
- [MT2701_AUD_AUD_MUX1_SEL] = "top_audio_mux1_sel",
- [MT2701_AUD_AUD_MUX2_SEL] = "top_audio_mux2_sel",
- [MT2701_AUD_AUD_MUX1_DIV] = "top_audio_mux1_div",
- [MT2701_AUD_AUD_MUX2_DIV] = "top_audio_mux2_div",
- [MT2701_AUD_AUD_48K_TIMING] = "top_audio_48k_timing",
- [MT2701_AUD_AUD_44K_TIMING] = "top_audio_44k_timing",
- [MT2701_AUD_AUDPLL_MUX_SEL] = "top_audpll_mux_sel",
- [MT2701_AUD_APLL_SEL] = "top_apll_sel",
- [MT2701_AUD_AUD1PLL_98M] = "top_aud1_pll_98M",
- [MT2701_AUD_AUD2PLL_90M] = "top_aud2_pll_90M",
- [MT2701_AUD_HADDS2PLL_98M] = "top_hadds2_pll_98M",
- [MT2701_AUD_HADDS2PLL_294M] = "top_hadds2_pll_294M",
- [MT2701_AUD_AUDPLL] = "top_audpll",
- [MT2701_AUD_AUDPLL_D4] = "top_audpll_d4",
- [MT2701_AUD_AUDPLL_D8] = "top_audpll_d8",
- [MT2701_AUD_AUDPLL_D16] = "top_audpll_d16",
- [MT2701_AUD_AUDPLL_D24] = "top_audpll_d24",
- [MT2701_AUD_AUDINTBUS] = "top_audintbus_sel",
- [MT2701_AUD_CLK_26M] = "clk_26m",
- [MT2701_AUD_SYSPLL1_D4] = "top_syspll1_d4",
- [MT2701_AUD_AUD_K1_SRC_SEL] = "top_aud_k1_src_sel",
- [MT2701_AUD_AUD_K2_SRC_SEL] = "top_aud_k2_src_sel",
- [MT2701_AUD_AUD_K3_SRC_SEL] = "top_aud_k3_src_sel",
- [MT2701_AUD_AUD_K4_SRC_SEL] = "top_aud_k4_src_sel",
- [MT2701_AUD_AUD_K5_SRC_SEL] = "top_aud_k5_src_sel",
- [MT2701_AUD_AUD_K6_SRC_SEL] = "top_aud_k6_src_sel",
- [MT2701_AUD_AUD_K1_SRC_DIV] = "top_aud_k1_src_div",
- [MT2701_AUD_AUD_K2_SRC_DIV] = "top_aud_k2_src_div",
- [MT2701_AUD_AUD_K3_SRC_DIV] = "top_aud_k3_src_div",
- [MT2701_AUD_AUD_K4_SRC_DIV] = "top_aud_k4_src_div",
- [MT2701_AUD_AUD_K5_SRC_DIV] = "top_aud_k5_src_div",
- [MT2701_AUD_AUD_K6_SRC_DIV] = "top_aud_k6_src_div",
- [MT2701_AUD_AUD_I2S1_MCLK] = "top_aud_i2s1_mclk",
- [MT2701_AUD_AUD_I2S2_MCLK] = "top_aud_i2s2_mclk",
- [MT2701_AUD_AUD_I2S3_MCLK] = "top_aud_i2s3_mclk",
- [MT2701_AUD_AUD_I2S4_MCLK] = "top_aud_i2s4_mclk",
- [MT2701_AUD_AUD_I2S5_MCLK] = "top_aud_i2s5_mclk",
- [MT2701_AUD_AUD_I2S6_MCLK] = "top_aud_i2s6_mclk",
- [MT2701_AUD_ASM_M_SEL] = "top_asm_m_sel",
- [MT2701_AUD_ASM_H_SEL] = "top_asm_h_sel",
- [MT2701_AUD_UNIVPLL2_D4] = "top_univpll2_d4",
- [MT2701_AUD_UNIVPLL2_D2] = "top_univpll2_d2",
- [MT2701_AUD_SYSPLL_D5] = "top_syspll_d5",
+static const char *const base_clks[] = {
+ [MT2701_INFRA_SYS_AUDIO] = "infra_sys_audio_clk",
+ [MT2701_TOP_AUD_MCLK_SRC0] = "top_audio_mux1_sel",
+ [MT2701_TOP_AUD_MCLK_SRC1] = "top_audio_mux2_sel",
+ [MT2701_TOP_AUD_A1SYS] = "top_audio_a1sys_hp",
+ [MT2701_TOP_AUD_A2SYS] = "top_audio_a2sys_hp",
+ [MT2701_AUDSYS_AFE] = "audio_afe_pd",
+ [MT2701_AUDSYS_AFE_CONN] = "audio_afe_conn_pd",
+ [MT2701_AUDSYS_A1SYS] = "audio_a1sys_pd",
+ [MT2701_AUDSYS_A2SYS] = "audio_a2sys_pd",
};
int mt2701_init_clock(struct mtk_base_afe *afe)
{
struct mt2701_afe_private *afe_priv = afe->platform_priv;
- int i = 0;
-
- for (i = 0; i < MT2701_CLOCK_NUM; i++) {
- afe_priv->clocks[i] = devm_clk_get(afe->dev, aud_clks[i]);
- if (IS_ERR(afe_priv->clocks[i])) {
- dev_warn(afe->dev, "%s devm_clk_get %s fail\n",
- __func__, aud_clks[i]);
- return PTR_ERR(aud_clks[i]);
+ int i;
+
+ for (i = 0; i < MT2701_BASE_CLK_NUM; i++) {
+ afe_priv->base_ck[i] = devm_clk_get(afe->dev, base_clks[i]);
+ if (IS_ERR(afe_priv->base_ck[i])) {
+ dev_err(afe->dev, "failed to get %s\n", base_clks[i]);
+ return PTR_ERR(afe_priv->base_ck[i]);
+ }
+ }
+
+ /* Get I2S related clocks */
+ for (i = 0; i < MT2701_I2S_NUM; i++) {
+ struct mt2701_i2s_path *i2s_path = &afe_priv->i2s_path[i];
+ char name[13];
+
+ snprintf(name, sizeof(name), "i2s%d_src_sel", i);
+ i2s_path->sel_ck = devm_clk_get(afe->dev, name);
+ if (IS_ERR(i2s_path->sel_ck)) {
+ dev_err(afe->dev, "failed to get %s\n", name);
+ return PTR_ERR(i2s_path->sel_ck);
+ }
+
+ snprintf(name, sizeof(name), "i2s%d_src_div", i);
+ i2s_path->div_ck = devm_clk_get(afe->dev, name);
+ if (IS_ERR(i2s_path->div_ck)) {
+ dev_err(afe->dev, "failed to get %s\n", name);
+ return PTR_ERR(i2s_path->div_ck);
+ }
+
+ snprintf(name, sizeof(name), "i2s%d_mclk_en", i);
+ i2s_path->mclk_ck = devm_clk_get(afe->dev, name);
+ if (IS_ERR(i2s_path->mclk_ck)) {
+ dev_err(afe->dev, "failed to get %s\n", name);
+ return PTR_ERR(i2s_path->mclk_ck);
+ }
+
+ snprintf(name, sizeof(name), "i2so%d_hop_ck", i);
+ i2s_path->hop_ck[I2S_OUT] = devm_clk_get(afe->dev, name);
+ if (IS_ERR(i2s_path->hop_ck[I2S_OUT])) {
+ dev_err(afe->dev, "failed to get %s\n", name);
+ return PTR_ERR(i2s_path->hop_ck[I2S_OUT]);
+ }
+
+ snprintf(name, sizeof(name), "i2si%d_hop_ck", i);
+ i2s_path->hop_ck[I2S_IN] = devm_clk_get(afe->dev, name);
+ if (IS_ERR(i2s_path->hop_ck[I2S_IN])) {
+ dev_err(afe->dev, "failed to get %s\n", name);
+ return PTR_ERR(i2s_path->hop_ck[I2S_IN]);
+ }
+
+ snprintf(name, sizeof(name), "asrc%d_out_ck", i);
+ i2s_path->asrco_ck = devm_clk_get(afe->dev, name);
+ if (IS_ERR(i2s_path->asrco_ck)) {
+ dev_err(afe->dev, "failed to get %s\n", name);
+ return PTR_ERR(i2s_path->asrco_ck);
}
}
+ /* Some platforms may support BT path */
+ afe_priv->mrgif_ck = devm_clk_get(afe->dev, "audio_mrgif_pd");
+ if (IS_ERR(afe_priv->mrgif_ck)) {
+ if (PTR_ERR(afe_priv->mrgif_ck) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ afe_priv->mrgif_ck = NULL;
+ }
+
return 0;
}
-int mt2701_afe_enable_clock(struct mtk_base_afe *afe)
+int mt2701_afe_enable_i2s(struct mtk_base_afe *afe, int id, int dir)
{
- int ret = 0;
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ struct mt2701_i2s_path *i2s_path = &afe_priv->i2s_path[id];
+ int ret;
- ret = mt2701_turn_on_a1sys_clock(afe);
+ ret = clk_prepare_enable(i2s_path->asrco_ck);
if (ret) {
- dev_err(afe->dev, "%s turn_on_a1sys_clock fail %d\n",
- __func__, ret);
+ dev_err(afe->dev, "failed to enable ASRC clock %d\n", ret);
return ret;
}
- ret = mt2701_turn_on_a2sys_clock(afe);
+ ret = clk_prepare_enable(i2s_path->hop_ck[dir]);
if (ret) {
- dev_err(afe->dev, "%s turn_on_a2sys_clock fail %d\n",
- __func__, ret);
- mt2701_turn_off_a1sys_clock(afe);
- return ret;
+ dev_err(afe->dev, "failed to enable I2S clock %d\n", ret);
+ goto err_hop_ck;
}
- ret = mt2701_turn_on_afe_clock(afe);
- if (ret) {
- dev_err(afe->dev, "%s turn_on_afe_clock fail %d\n",
- __func__, ret);
- mt2701_turn_off_a1sys_clock(afe);
- mt2701_turn_off_a2sys_clock(afe);
- return ret;
- }
+ return 0;
- regmap_update_bits(afe->regmap, ASYS_TOP_CON,
- AUDIO_TOP_CON0_A1SYS_A2SYS_ON,
- AUDIO_TOP_CON0_A1SYS_A2SYS_ON);
- regmap_update_bits(afe->regmap, AFE_DAC_CON0,
- AFE_DAC_CON0_AFE_ON,
- AFE_DAC_CON0_AFE_ON);
- regmap_write(afe->regmap, PWR2_TOP_CON,
- PWR2_TOP_CON_INIT_VAL);
- regmap_write(afe->regmap, PWR1_ASM_CON1,
- PWR1_ASM_CON1_INIT_VAL);
- regmap_write(afe->regmap, PWR2_ASM_CON1,
- PWR2_ASM_CON1_INIT_VAL);
+err_hop_ck:
+ clk_disable_unprepare(i2s_path->asrco_ck);
- return 0;
+ return ret;
}
-void mt2701_afe_disable_clock(struct mtk_base_afe *afe)
+void mt2701_afe_disable_i2s(struct mtk_base_afe *afe, int id, int dir)
{
- mt2701_turn_off_afe_clock(afe);
- mt2701_turn_off_a1sys_clock(afe);
- mt2701_turn_off_a2sys_clock(afe);
- regmap_update_bits(afe->regmap, ASYS_TOP_CON,
- AUDIO_TOP_CON0_A1SYS_A2SYS_ON, 0);
- regmap_update_bits(afe->regmap, AFE_DAC_CON0,
- AFE_DAC_CON0_AFE_ON, 0);
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ struct mt2701_i2s_path *i2s_path = &afe_priv->i2s_path[id];
+
+ clk_disable_unprepare(i2s_path->hop_ck[dir]);
+ clk_disable_unprepare(i2s_path->asrco_ck);
}
-int mt2701_turn_on_a1sys_clock(struct mtk_base_afe *afe)
+int mt2701_afe_enable_mclk(struct mtk_base_afe *afe, int id)
{
struct mt2701_afe_private *afe_priv = afe->platform_priv;
- int ret = 0;
-
- /* Set Mux */
- ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL]);
- if (ret) {
- dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
- __func__, aud_clks[MT2701_AUD_AUD_MUX1_SEL], ret);
- goto A1SYS_CLK_AUD_MUX1_SEL_ERR;
- }
-
- ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL],
- afe_priv->clocks[MT2701_AUD_AUD1PLL_98M]);
- if (ret) {
- dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__,
- aud_clks[MT2701_AUD_AUD_MUX1_SEL],
- aud_clks[MT2701_AUD_AUD1PLL_98M], ret);
- goto A1SYS_CLK_AUD_MUX1_SEL_ERR;
- }
-
- /* Set Divider */
- ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_MUX1_DIV]);
- if (ret) {
- dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
- __func__,
- aud_clks[MT2701_AUD_AUD_MUX1_DIV],
- ret);
- goto A1SYS_CLK_AUD_MUX1_DIV_ERR;
- }
-
- ret = clk_set_rate(afe_priv->clocks[MT2701_AUD_AUD_MUX1_DIV],
- MT2701_AUD_AUD_MUX1_DIV_RATE);
- if (ret) {
- dev_err(afe->dev, "%s clk_set_parent %s-%d fail %d\n", __func__,
- aud_clks[MT2701_AUD_AUD_MUX1_DIV],
- MT2701_AUD_AUD_MUX1_DIV_RATE, ret);
- goto A1SYS_CLK_AUD_MUX1_DIV_ERR;
- }
+ struct mt2701_i2s_path *i2s_path = &afe_priv->i2s_path[id];
- /* Enable clock gate */
- ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_48K_TIMING]);
- if (ret) {
- dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
- __func__, aud_clks[MT2701_AUD_AUD_48K_TIMING], ret);
- goto A1SYS_CLK_AUD_48K_ERR;
- }
+ return clk_prepare_enable(i2s_path->mclk_ck);
+}
- /* Enable infra audio */
- ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
- if (ret) {
- dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
- __func__, aud_clks[MT2701_AUD_INFRA_SYS_AUDIO], ret);
- goto A1SYS_CLK_INFRA_ERR;
- }
+void mt2701_afe_disable_mclk(struct mtk_base_afe *afe, int id)
+{
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ struct mt2701_i2s_path *i2s_path = &afe_priv->i2s_path[id];
- return 0;
+ clk_disable_unprepare(i2s_path->mclk_ck);
+}
-A1SYS_CLK_INFRA_ERR:
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
-A1SYS_CLK_AUD_48K_ERR:
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_48K_TIMING]);
-A1SYS_CLK_AUD_MUX1_DIV_ERR:
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX1_DIV]);
-A1SYS_CLK_AUD_MUX1_SEL_ERR:
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL]);
+int mt2701_enable_btmrg_clk(struct mtk_base_afe *afe)
+{
+ struct mt2701_afe_private *afe_priv = afe->platform_priv;
- return ret;
+ return clk_prepare_enable(afe_priv->mrgif_ck);
}
-void mt2701_turn_off_a1sys_clock(struct mtk_base_afe *afe)
+void mt2701_disable_btmrg_clk(struct mtk_base_afe *afe)
{
struct mt2701_afe_private *afe_priv = afe->platform_priv;
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_48K_TIMING]);
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX1_DIV]);
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL]);
+ clk_disable_unprepare(afe_priv->mrgif_ck);
}
-int mt2701_turn_on_a2sys_clock(struct mtk_base_afe *afe)
+static int mt2701_afe_enable_audsys(struct mtk_base_afe *afe)
{
struct mt2701_afe_private *afe_priv = afe->platform_priv;
- int ret = 0;
+ int ret;
- /* Set Mux */
- ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL]);
- if (ret) {
- dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
- __func__, aud_clks[MT2701_AUD_AUD_MUX2_SEL], ret);
- goto A2SYS_CLK_AUD_MUX2_SEL_ERR;
- }
+ /* Enable infra clock gate */
+ ret = clk_prepare_enable(afe_priv->base_ck[MT2701_INFRA_SYS_AUDIO]);
+ if (ret)
+ return ret;
- ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL],
- afe_priv->clocks[MT2701_AUD_AUD2PLL_90M]);
- if (ret) {
- dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__,
- aud_clks[MT2701_AUD_AUD_MUX2_SEL],
- aud_clks[MT2701_AUD_AUD2PLL_90M], ret);
- goto A2SYS_CLK_AUD_MUX2_SEL_ERR;
- }
+ /* Enable top a1sys clock gate */
+ ret = clk_prepare_enable(afe_priv->base_ck[MT2701_TOP_AUD_A1SYS]);
+ if (ret)
+ goto err_a1sys;
- /* Set Divider */
- ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_MUX2_DIV]);
- if (ret) {
- dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
- __func__, aud_clks[MT2701_AUD_AUD_MUX2_DIV], ret);
- goto A2SYS_CLK_AUD_MUX2_DIV_ERR;
- }
+ /* Enable top a2sys clock gate */
+ ret = clk_prepare_enable(afe_priv->base_ck[MT2701_TOP_AUD_A2SYS]);
+ if (ret)
+ goto err_a2sys;
- ret = clk_set_rate(afe_priv->clocks[MT2701_AUD_AUD_MUX2_DIV],
- MT2701_AUD_AUD_MUX2_DIV_RATE);
- if (ret) {
- dev_err(afe->dev, "%s clk_set_parent %s-%d fail %d\n", __func__,
- aud_clks[MT2701_AUD_AUD_MUX2_DIV],
- MT2701_AUD_AUD_MUX2_DIV_RATE, ret);
- goto A2SYS_CLK_AUD_MUX2_DIV_ERR;
- }
+ /* Internal clock gates */
+ ret = clk_prepare_enable(afe_priv->base_ck[MT2701_AUDSYS_AFE]);
+ if (ret)
+ goto err_afe;
- /* Enable clock gate */
- ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUD_44K_TIMING]);
- if (ret) {
- dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
- __func__, aud_clks[MT2701_AUD_AUD_44K_TIMING], ret);
- goto A2SYS_CLK_AUD_44K_ERR;
- }
+ ret = clk_prepare_enable(afe_priv->base_ck[MT2701_AUDSYS_A1SYS]);
+ if (ret)
+ goto err_audio_a1sys;
- /* Enable infra audio */
- ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
- if (ret) {
- dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
- __func__, aud_clks[MT2701_AUD_INFRA_SYS_AUDIO], ret);
- goto A2SYS_CLK_INFRA_ERR;
- }
+ ret = clk_prepare_enable(afe_priv->base_ck[MT2701_AUDSYS_A2SYS]);
+ if (ret)
+ goto err_audio_a2sys;
+
+ ret = clk_prepare_enable(afe_priv->base_ck[MT2701_AUDSYS_AFE_CONN]);
+ if (ret)
+ goto err_afe_conn;
return 0;
-A2SYS_CLK_INFRA_ERR:
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
-A2SYS_CLK_AUD_44K_ERR:
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_44K_TIMING]);
-A2SYS_CLK_AUD_MUX2_DIV_ERR:
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX2_DIV]);
-A2SYS_CLK_AUD_MUX2_SEL_ERR:
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL]);
+err_afe_conn:
+ clk_disable_unprepare(afe_priv->base_ck[MT2701_AUDSYS_A2SYS]);
+err_audio_a2sys:
+ clk_disable_unprepare(afe_priv->base_ck[MT2701_AUDSYS_A1SYS]);
+err_audio_a1sys:
+ clk_disable_unprepare(afe_priv->base_ck[MT2701_AUDSYS_AFE]);
+err_afe:
+ clk_disable_unprepare(afe_priv->base_ck[MT2701_TOP_AUD_A2SYS]);
+err_a2sys:
+ clk_disable_unprepare(afe_priv->base_ck[MT2701_TOP_AUD_A1SYS]);
+err_a1sys:
+ clk_disable_unprepare(afe_priv->base_ck[MT2701_INFRA_SYS_AUDIO]);
return ret;
}
-void mt2701_turn_off_a2sys_clock(struct mtk_base_afe *afe)
+static void mt2701_afe_disable_audsys(struct mtk_base_afe *afe)
{
struct mt2701_afe_private *afe_priv = afe->platform_priv;
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_44K_TIMING]);
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX2_DIV]);
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL]);
+ clk_disable_unprepare(afe_priv->base_ck[MT2701_AUDSYS_AFE_CONN]);
+ clk_disable_unprepare(afe_priv->base_ck[MT2701_AUDSYS_A2SYS]);
+ clk_disable_unprepare(afe_priv->base_ck[MT2701_AUDSYS_A1SYS]);
+ clk_disable_unprepare(afe_priv->base_ck[MT2701_AUDSYS_AFE]);
+ clk_disable_unprepare(afe_priv->base_ck[MT2701_TOP_AUD_A1SYS]);
+ clk_disable_unprepare(afe_priv->base_ck[MT2701_TOP_AUD_A2SYS]);
+ clk_disable_unprepare(afe_priv->base_ck[MT2701_INFRA_SYS_AUDIO]);
}
-int mt2701_turn_on_afe_clock(struct mtk_base_afe *afe)
+int mt2701_afe_enable_clock(struct mtk_base_afe *afe)
{
- struct mt2701_afe_private *afe_priv = afe->platform_priv;
int ret;
- /* enable INFRA_SYS */
- ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
- if (ret) {
- dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
- __func__, aud_clks[MT2701_AUD_INFRA_SYS_AUDIO], ret);
- goto AFE_AUD_INFRA_ERR;
- }
-
- /* Set MT2701_AUD_AUDINTBUS to MT2701_AUD_SYSPLL1_D4 */
- ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_AUDINTBUS]);
- if (ret) {
- dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
- __func__, aud_clks[MT2701_AUD_AUDINTBUS], ret);
- goto AFE_AUD_AUDINTBUS_ERR;
- }
-
- ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_AUDINTBUS],
- afe_priv->clocks[MT2701_AUD_SYSPLL1_D4]);
- if (ret) {
- dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__,
- aud_clks[MT2701_AUD_AUDINTBUS],
- aud_clks[MT2701_AUD_SYSPLL1_D4], ret);
- goto AFE_AUD_AUDINTBUS_ERR;
- }
-
- /* Set MT2701_AUD_ASM_H_SEL to MT2701_AUD_UNIVPLL2_D2 */
- ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_ASM_H_SEL]);
+ /* Enable audio system */
+ ret = mt2701_afe_enable_audsys(afe);
if (ret) {
- dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
- __func__, aud_clks[MT2701_AUD_ASM_H_SEL], ret);
- goto AFE_AUD_ASM_H_ERR;
- }
-
- ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_ASM_H_SEL],
- afe_priv->clocks[MT2701_AUD_UNIVPLL2_D2]);
- if (ret) {
- dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__,
- aud_clks[MT2701_AUD_ASM_H_SEL],
- aud_clks[MT2701_AUD_UNIVPLL2_D2], ret);
- goto AFE_AUD_ASM_H_ERR;
- }
-
- /* Set MT2701_AUD_ASM_M_SEL to MT2701_AUD_UNIVPLL2_D4 */
- ret = clk_prepare_enable(afe_priv->clocks[MT2701_AUD_ASM_M_SEL]);
- if (ret) {
- dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
- __func__, aud_clks[MT2701_AUD_ASM_M_SEL], ret);
- goto AFE_AUD_ASM_M_ERR;
+ dev_err(afe->dev, "failed to enable audio system %d\n", ret);
+ return ret;
}
- ret = clk_set_parent(afe_priv->clocks[MT2701_AUD_ASM_M_SEL],
- afe_priv->clocks[MT2701_AUD_UNIVPLL2_D4]);
- if (ret) {
- dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n", __func__,
- aud_clks[MT2701_AUD_ASM_M_SEL],
- aud_clks[MT2701_AUD_UNIVPLL2_D4], ret);
- goto AFE_AUD_ASM_M_ERR;
- }
+ regmap_update_bits(afe->regmap, ASYS_TOP_CON,
+ ASYS_TOP_CON_ASYS_TIMING_ON,
+ ASYS_TOP_CON_ASYS_TIMING_ON);
+ regmap_update_bits(afe->regmap, AFE_DAC_CON0,
+ AFE_DAC_CON0_AFE_ON,
+ AFE_DAC_CON0_AFE_ON);
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
- AUDIO_TOP_CON0_PDN_AFE, 0);
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
- AUDIO_TOP_CON0_PDN_APLL_CK, 0);
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
- AUDIO_TOP_CON4_PDN_A1SYS, 0);
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
- AUDIO_TOP_CON4_PDN_A2SYS, 0);
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
- AUDIO_TOP_CON4_PDN_AFE_CONN, 0);
+ /* Configure ASRC */
+ regmap_write(afe->regmap, PWR1_ASM_CON1, PWR1_ASM_CON1_INIT_VAL);
+ regmap_write(afe->regmap, PWR2_ASM_CON1, PWR2_ASM_CON1_INIT_VAL);
return 0;
-
-AFE_AUD_ASM_M_ERR:
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_ASM_M_SEL]);
-AFE_AUD_ASM_H_ERR:
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_ASM_H_SEL]);
-AFE_AUD_AUDINTBUS_ERR:
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUDINTBUS]);
-AFE_AUD_INFRA_ERR:
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
-
- return ret;
}
-void mt2701_turn_off_afe_clock(struct mtk_base_afe *afe)
+int mt2701_afe_disable_clock(struct mtk_base_afe *afe)
{
- struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ regmap_update_bits(afe->regmap, ASYS_TOP_CON,
+ ASYS_TOP_CON_ASYS_TIMING_ON, 0);
+ regmap_update_bits(afe->regmap, AFE_DAC_CON0,
+ AFE_DAC_CON0_AFE_ON, 0);
+
+ mt2701_afe_disable_audsys(afe);
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_INFRA_SYS_AUDIO]);
-
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_AUDINTBUS]);
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_ASM_H_SEL]);
- clk_disable_unprepare(afe_priv->clocks[MT2701_AUD_ASM_M_SEL]);
-
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
- AUDIO_TOP_CON0_PDN_AFE, AUDIO_TOP_CON0_PDN_AFE);
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON0,
- AUDIO_TOP_CON0_PDN_APLL_CK,
- AUDIO_TOP_CON0_PDN_APLL_CK);
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
- AUDIO_TOP_CON4_PDN_A1SYS,
- AUDIO_TOP_CON4_PDN_A1SYS);
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
- AUDIO_TOP_CON4_PDN_A2SYS,
- AUDIO_TOP_CON4_PDN_A2SYS);
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
- AUDIO_TOP_CON4_PDN_AFE_CONN,
- AUDIO_TOP_CON4_PDN_AFE_CONN);
+ return 0;
}
void mt2701_mclk_configuration(struct mtk_base_afe *afe, int id, int domain,
int mclk)
{
- struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ struct mt2701_afe_private *priv = afe->platform_priv;
+ struct mt2701_i2s_path *i2s_path = &priv->i2s_path[id];
int ret;
- int aud_src_div_id = MT2701_AUD_AUD_K1_SRC_DIV + id;
- int aud_src_clk_id = MT2701_AUD_AUD_K1_SRC_SEL + id;
- /* Set MCLK Kx_SRC_SEL(domain) */
- ret = clk_prepare_enable(afe_priv->clocks[aud_src_clk_id]);
- if (ret)
- dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
- __func__, aud_clks[aud_src_clk_id], ret);
-
- if (domain == 0) {
- ret = clk_set_parent(afe_priv->clocks[aud_src_clk_id],
- afe_priv->clocks[MT2701_AUD_AUD_MUX1_SEL]);
- if (ret)
- dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
- __func__, aud_clks[aud_src_clk_id],
- aud_clks[MT2701_AUD_AUD_MUX1_SEL], ret);
- } else {
- ret = clk_set_parent(afe_priv->clocks[aud_src_clk_id],
- afe_priv->clocks[MT2701_AUD_AUD_MUX2_SEL]);
- if (ret)
- dev_err(afe->dev, "%s clk_set_parent %s-%s fail %d\n",
- __func__, aud_clks[aud_src_clk_id],
- aud_clks[MT2701_AUD_AUD_MUX2_SEL], ret);
- }
- clk_disable_unprepare(afe_priv->clocks[aud_src_clk_id]);
+ /* Set mclk source */
+ if (domain == 0)
+ ret = clk_set_parent(i2s_path->sel_ck,
+ priv->base_ck[MT2701_TOP_AUD_MCLK_SRC0]);
+ else
+ ret = clk_set_parent(i2s_path->sel_ck,
+ priv->base_ck[MT2701_TOP_AUD_MCLK_SRC1]);
- /* Set MCLK Kx_SRC_DIV(divider) */
- ret = clk_prepare_enable(afe_priv->clocks[aud_src_div_id]);
if (ret)
- dev_err(afe->dev, "%s clk_prepare_enable %s fail %d\n",
- __func__, aud_clks[aud_src_div_id], ret);
+ dev_err(afe->dev, "failed to set domain%d mclk source %d\n",
+ domain, ret);
- ret = clk_set_rate(afe_priv->clocks[aud_src_div_id], mclk);
+ /* Set mclk divider */
+ ret = clk_set_rate(i2s_path->div_ck, mclk);
if (ret)
- dev_err(afe->dev, "%s clk_set_rate %s-%d fail %d\n", __func__,
- aud_clks[aud_src_div_id], mclk, ret);
- clk_disable_unprepare(afe_priv->clocks[aud_src_div_id]);
+ dev_err(afe->dev, "failed to set mclk divider %d\n", ret);
}
-
-MODULE_DESCRIPTION("MT2701 afe clock control");
-MODULE_AUTHOR("Garlic Tseng <garlic.tseng@mediatek.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.h b/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.h
index 6497d570cf09..15417d9d6597 100644
--- a/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.h
+++ b/sound/soc/mediatek/mt2701/mt2701-afe-clock-ctrl.h
@@ -21,16 +21,15 @@ struct mtk_base_afe;
int mt2701_init_clock(struct mtk_base_afe *afe);
int mt2701_afe_enable_clock(struct mtk_base_afe *afe);
-void mt2701_afe_disable_clock(struct mtk_base_afe *afe);
+int mt2701_afe_disable_clock(struct mtk_base_afe *afe);
-int mt2701_turn_on_a1sys_clock(struct mtk_base_afe *afe);
-void mt2701_turn_off_a1sys_clock(struct mtk_base_afe *afe);
+int mt2701_afe_enable_i2s(struct mtk_base_afe *afe, int id, int dir);
+void mt2701_afe_disable_i2s(struct mtk_base_afe *afe, int id, int dir);
+int mt2701_afe_enable_mclk(struct mtk_base_afe *afe, int id);
+void mt2701_afe_disable_mclk(struct mtk_base_afe *afe, int id);
-int mt2701_turn_on_a2sys_clock(struct mtk_base_afe *afe);
-void mt2701_turn_off_a2sys_clock(struct mtk_base_afe *afe);
-
-int mt2701_turn_on_afe_clock(struct mtk_base_afe *afe);
-void mt2701_turn_off_afe_clock(struct mtk_base_afe *afe);
+int mt2701_enable_btmrg_clk(struct mtk_base_afe *afe);
+void mt2701_disable_btmrg_clk(struct mtk_base_afe *afe);
void mt2701_mclk_configuration(struct mtk_base_afe *afe, int id, int domain,
int mclk);
diff --git a/sound/soc/mediatek/mt2701/mt2701-afe-common.h b/sound/soc/mediatek/mt2701/mt2701-afe-common.h
index c19430e98adf..ae8ddeacfbfe 100644
--- a/sound/soc/mediatek/mt2701/mt2701-afe-common.h
+++ b/sound/soc/mediatek/mt2701/mt2701-afe-common.h
@@ -16,6 +16,7 @@
#ifndef _MT_2701_AFE_COMMON_H_
#define _MT_2701_AFE_COMMON_H_
+
#include <sound/soc.h>
#include <linux/clk.h>
#include <linux/regmap.h>
@@ -25,16 +26,7 @@
#define MT2701_STREAM_DIR_NUM (SNDRV_PCM_STREAM_LAST + 1)
#define MT2701_PLL_DOMAIN_0_RATE 98304000
#define MT2701_PLL_DOMAIN_1_RATE 90316800
-#define MT2701_AUD_AUD_MUX1_DIV_RATE (MT2701_PLL_DOMAIN_0_RATE / 2)
-#define MT2701_AUD_AUD_MUX2_DIV_RATE (MT2701_PLL_DOMAIN_1_RATE / 2)
-
-enum {
- MT2701_I2S_1,
- MT2701_I2S_2,
- MT2701_I2S_3,
- MT2701_I2S_4,
- MT2701_I2S_NUM,
-};
+#define MT2701_I2S_NUM 4
enum {
MT2701_MEMIF_DL1,
@@ -62,60 +54,23 @@ enum {
};
enum {
- MT2701_IRQ_ASYS_START,
- MT2701_IRQ_ASYS_IRQ1 = MT2701_IRQ_ASYS_START,
+ MT2701_IRQ_ASYS_IRQ1,
MT2701_IRQ_ASYS_IRQ2,
MT2701_IRQ_ASYS_IRQ3,
MT2701_IRQ_ASYS_END,
};
-/* 2701 clock def */
-enum audio_system_clock_type {
- MT2701_AUD_INFRA_SYS_AUDIO,
- MT2701_AUD_AUD_MUX1_SEL,
- MT2701_AUD_AUD_MUX2_SEL,
- MT2701_AUD_AUD_MUX1_DIV,
- MT2701_AUD_AUD_MUX2_DIV,
- MT2701_AUD_AUD_48K_TIMING,
- MT2701_AUD_AUD_44K_TIMING,
- MT2701_AUD_AUDPLL_MUX_SEL,
- MT2701_AUD_APLL_SEL,
- MT2701_AUD_AUD1PLL_98M,
- MT2701_AUD_AUD2PLL_90M,
- MT2701_AUD_HADDS2PLL_98M,
- MT2701_AUD_HADDS2PLL_294M,
- MT2701_AUD_AUDPLL,
- MT2701_AUD_AUDPLL_D4,
- MT2701_AUD_AUDPLL_D8,
- MT2701_AUD_AUDPLL_D16,
- MT2701_AUD_AUDPLL_D24,
- MT2701_AUD_AUDINTBUS,
- MT2701_AUD_CLK_26M,
- MT2701_AUD_SYSPLL1_D4,
- MT2701_AUD_AUD_K1_SRC_SEL,
- MT2701_AUD_AUD_K2_SRC_SEL,
- MT2701_AUD_AUD_K3_SRC_SEL,
- MT2701_AUD_AUD_K4_SRC_SEL,
- MT2701_AUD_AUD_K5_SRC_SEL,
- MT2701_AUD_AUD_K6_SRC_SEL,
- MT2701_AUD_AUD_K1_SRC_DIV,
- MT2701_AUD_AUD_K2_SRC_DIV,
- MT2701_AUD_AUD_K3_SRC_DIV,
- MT2701_AUD_AUD_K4_SRC_DIV,
- MT2701_AUD_AUD_K5_SRC_DIV,
- MT2701_AUD_AUD_K6_SRC_DIV,
- MT2701_AUD_AUD_I2S1_MCLK,
- MT2701_AUD_AUD_I2S2_MCLK,
- MT2701_AUD_AUD_I2S3_MCLK,
- MT2701_AUD_AUD_I2S4_MCLK,
- MT2701_AUD_AUD_I2S5_MCLK,
- MT2701_AUD_AUD_I2S6_MCLK,
- MT2701_AUD_ASM_M_SEL,
- MT2701_AUD_ASM_H_SEL,
- MT2701_AUD_UNIVPLL2_D4,
- MT2701_AUD_UNIVPLL2_D2,
- MT2701_AUD_SYSPLL_D5,
- MT2701_CLOCK_NUM
+enum audio_base_clock {
+ MT2701_INFRA_SYS_AUDIO,
+ MT2701_TOP_AUD_MCLK_SRC0,
+ MT2701_TOP_AUD_MCLK_SRC1,
+ MT2701_TOP_AUD_A1SYS,
+ MT2701_TOP_AUD_A2SYS,
+ MT2701_AUDSYS_AFE,
+ MT2701_AUDSYS_AFE_CONN,
+ MT2701_AUDSYS_A1SYS,
+ MT2701_AUDSYS_A2SYS,
+ MT2701_BASE_CLK_NUM,
};
static const unsigned int mt2701_afe_backup_list[] = {
@@ -139,12 +94,8 @@ static const unsigned int mt2701_afe_backup_list[] = {
AFE_MEMIF_PBUF_SIZE,
};
-struct snd_pcm_substream;
-struct mtk_base_irq_data;
-
struct mt2701_i2s_data {
int i2s_ctrl_reg;
- int i2s_pwn_shift;
int i2s_asrc_fs_shift;
int i2s_asrc_fs_mask;
};
@@ -160,12 +111,18 @@ struct mt2701_i2s_path {
int mclk_rate;
int on[I2S_DIR_NUM];
int occupied[I2S_DIR_NUM];
- const struct mt2701_i2s_data *i2s_data[2];
+ const struct mt2701_i2s_data *i2s_data[I2S_DIR_NUM];
+ struct clk *hop_ck[I2S_DIR_NUM];
+ struct clk *sel_ck;
+ struct clk *div_ck;
+ struct clk *mclk_ck;
+ struct clk *asrco_ck;
};
struct mt2701_afe_private {
- struct clk *clocks[MT2701_CLOCK_NUM];
struct mt2701_i2s_path i2s_path[MT2701_I2S_NUM];
+ struct clk *base_ck[MT2701_BASE_CLK_NUM];
+ struct clk *mrgif_ck;
bool mrg_enable[MT2701_STREAM_DIR_NUM];
};
diff --git a/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c b/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c
index 8fda182f849b..5bc4e00a4a29 100644
--- a/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c
+++ b/sound/soc/mediatek/mt2701/mt2701-afe-pcm.c
@@ -17,19 +17,16 @@
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/pm_runtime.h>
-#include <sound/soc.h>
#include "mt2701-afe-common.h"
-
#include "mt2701-afe-clock-ctrl.h"
#include "../common/mtk-afe-platform-driver.h"
#include "../common/mtk-afe-fe-dai.h"
-#define AFE_IRQ_STATUS_BITS 0xff
-
static const struct snd_pcm_hardware mt2701_afe_hardware = {
.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED
| SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP_VALID,
@@ -97,40 +94,26 @@ static int mt2701_afe_i2s_startup(struct snd_pcm_substream *substream,
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
- struct mt2701_afe_private *afe_priv = afe->platform_priv;
int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id);
- int clk_num = MT2701_AUD_AUD_I2S1_MCLK + i2s_num;
- int ret = 0;
if (i2s_num < 0)
return i2s_num;
- /* enable mclk */
- ret = clk_prepare_enable(afe_priv->clocks[clk_num]);
- if (ret)
- dev_err(afe->dev, "Failed to enable mclk for I2S: %d\n",
- i2s_num);
-
- return ret;
+ return mt2701_afe_enable_mclk(afe, i2s_num);
}
static int mt2701_afe_i2s_path_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai,
+ int i2s_num,
int dir_invert)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
struct mt2701_afe_private *afe_priv = afe->platform_priv;
- int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id);
- struct mt2701_i2s_path *i2s_path;
+ struct mt2701_i2s_path *i2s_path = &afe_priv->i2s_path[i2s_num];
const struct mt2701_i2s_data *i2s_data;
int stream_dir = substream->stream;
- if (i2s_num < 0)
- return i2s_num;
-
- i2s_path = &afe_priv->i2s_path[i2s_num];
-
if (dir_invert) {
if (stream_dir == SNDRV_PCM_STREAM_PLAYBACK)
stream_dir = SNDRV_PCM_STREAM_CAPTURE;
@@ -151,9 +134,9 @@ static int mt2701_afe_i2s_path_shutdown(struct snd_pcm_substream *substream,
/* disable i2s */
regmap_update_bits(afe->regmap, i2s_data->i2s_ctrl_reg,
ASYS_I2S_CON_I2S_EN, 0);
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
- 1 << i2s_data->i2s_pwn_shift,
- 1 << i2s_data->i2s_pwn_shift);
+
+ mt2701_afe_disable_i2s(afe, i2s_num, stream_dir);
+
return 0;
}
@@ -165,7 +148,6 @@ static void mt2701_afe_i2s_shutdown(struct snd_pcm_substream *substream,
struct mt2701_afe_private *afe_priv = afe->platform_priv;
int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id);
struct mt2701_i2s_path *i2s_path;
- int clk_num = MT2701_AUD_AUD_I2S1_MCLK + i2s_num;
if (i2s_num < 0)
return;
@@ -177,37 +159,32 @@ static void mt2701_afe_i2s_shutdown(struct snd_pcm_substream *substream,
else
goto I2S_UNSTART;
- mt2701_afe_i2s_path_shutdown(substream, dai, 0);
+ mt2701_afe_i2s_path_shutdown(substream, dai, i2s_num, 0);
/* need to disable i2s-out path when disable i2s-in */
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
- mt2701_afe_i2s_path_shutdown(substream, dai, 1);
+ mt2701_afe_i2s_path_shutdown(substream, dai, i2s_num, 1);
I2S_UNSTART:
/* disable mclk */
- clk_disable_unprepare(afe_priv->clocks[clk_num]);
+ mt2701_afe_disable_mclk(afe, i2s_num);
}
static int mt2701_i2s_path_prepare_enable(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai,
+ int i2s_num,
int dir_invert)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
struct mt2701_afe_private *afe_priv = afe->platform_priv;
- int i2s_num = mt2701_dai_num_to_i2s(afe, dai->id);
- struct mt2701_i2s_path *i2s_path;
+ struct mt2701_i2s_path *i2s_path = &afe_priv->i2s_path[i2s_num];
const struct mt2701_i2s_data *i2s_data;
struct snd_pcm_runtime * const runtime = substream->runtime;
int reg, fs, w_len = 1; /* now we support bck 64bits only */
int stream_dir = substream->stream;
unsigned int mask = 0, val = 0;
- if (i2s_num < 0)
- return i2s_num;
-
- i2s_path = &afe_priv->i2s_path[i2s_num];
-
if (dir_invert) {
if (stream_dir == SNDRV_PCM_STREAM_PLAYBACK)
stream_dir = SNDRV_PCM_STREAM_CAPTURE;
@@ -251,9 +228,7 @@ static int mt2701_i2s_path_prepare_enable(struct snd_pcm_substream *substream,
fs << i2s_data->i2s_asrc_fs_shift);
/* enable i2s */
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
- 1 << i2s_data->i2s_pwn_shift,
- 0 << i2s_data->i2s_pwn_shift);
+ mt2701_afe_enable_i2s(afe, i2s_num, stream_dir);
/* reset i2s hw status before enable */
regmap_update_bits(afe->regmap, i2s_data->i2s_ctrl_reg,
@@ -300,13 +275,13 @@ static int mt2701_afe_i2s_prepare(struct snd_pcm_substream *substream,
mt2701_mclk_configuration(afe, i2s_num, clk_domain, mclk_rate);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- mt2701_i2s_path_prepare_enable(substream, dai, 0);
+ mt2701_i2s_path_prepare_enable(substream, dai, i2s_num, 0);
} else {
/* need to enable i2s-out path when enable i2s-in */
/* prepare for another direction "out" */
- mt2701_i2s_path_prepare_enable(substream, dai, 1);
+ mt2701_i2s_path_prepare_enable(substream, dai, i2s_num, 1);
/* prepare for "in" */
- mt2701_i2s_path_prepare_enable(substream, dai, 0);
+ mt2701_i2s_path_prepare_enable(substream, dai, i2s_num, 0);
}
return 0;
@@ -339,9 +314,11 @@ static int mt2701_btmrg_startup(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
struct mt2701_afe_private *afe_priv = afe->platform_priv;
+ int ret;
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
- AUDIO_TOP_CON4_PDN_MRGIF, 0);
+ ret = mt2701_enable_btmrg_clk(afe);
+ if (ret)
+ return ret;
afe_priv->mrg_enable[substream->stream] = 1;
return 0;
@@ -406,9 +383,7 @@ static void mt2701_btmrg_shutdown(struct snd_pcm_substream *substream,
AFE_MRGIF_CON_MRG_EN, 0);
regmap_update_bits(afe->regmap, AFE_MRGIF_CON,
AFE_MRGIF_CON_MRG_I2S_EN, 0);
- regmap_update_bits(afe->regmap, AUDIO_TOP_CON4,
- AUDIO_TOP_CON4_PDN_MRGIF,
- AUDIO_TOP_CON4_PDN_MRGIF);
+ mt2701_disable_btmrg_clk(afe);
}
afe_priv->mrg_enable[substream->stream] = 0;
}
@@ -574,7 +549,6 @@ static const struct snd_soc_dai_ops mt2701_single_memif_dai_ops = {
.hw_free = mtk_afe_fe_hw_free,
.prepare = mtk_afe_fe_prepare,
.trigger = mtk_afe_fe_trigger,
-
};
static const struct snd_soc_dai_ops mt2701_dlm_memif_dai_ops = {
@@ -915,31 +889,6 @@ static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_i2s4[] = {
PWR2_TOP_CON, 19, 1, 0),
};
-static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_asrc0[] = {
- SOC_DAPM_SINGLE_AUTODISABLE("Asrc0 out Switch", AUDIO_TOP_CON4, 14, 1,
- 1),
-};
-
-static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_asrc1[] = {
- SOC_DAPM_SINGLE_AUTODISABLE("Asrc1 out Switch", AUDIO_TOP_CON4, 15, 1,
- 1),
-};
-
-static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_asrc2[] = {
- SOC_DAPM_SINGLE_AUTODISABLE("Asrc2 out Switch", PWR2_TOP_CON, 6, 1,
- 1),
-};
-
-static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_asrc3[] = {
- SOC_DAPM_SINGLE_AUTODISABLE("Asrc3 out Switch", PWR2_TOP_CON, 7, 1,
- 1),
-};
-
-static const struct snd_kcontrol_new mt2701_afe_multi_ch_out_asrc4[] = {
- SOC_DAPM_SINGLE_AUTODISABLE("Asrc4 out Switch", PWR2_TOP_CON, 8, 1,
- 1),
-};
-
static const struct snd_soc_dapm_widget mt2701_afe_pcm_widgets[] = {
/* inter-connections */
SND_SOC_DAPM_MIXER("I00", SND_SOC_NOPM, 0, 0, NULL, 0),
@@ -999,19 +948,6 @@ static const struct snd_soc_dapm_widget mt2701_afe_pcm_widgets[] = {
SND_SOC_DAPM_MIXER("I18I19", SND_SOC_NOPM, 0, 0,
mt2701_afe_multi_ch_out_i2s3,
ARRAY_SIZE(mt2701_afe_multi_ch_out_i2s3)),
-
- SND_SOC_DAPM_MIXER("ASRC_O0", SND_SOC_NOPM, 0, 0,
- mt2701_afe_multi_ch_out_asrc0,
- ARRAY_SIZE(mt2701_afe_multi_ch_out_asrc0)),
- SND_SOC_DAPM_MIXER("ASRC_O1", SND_SOC_NOPM, 0, 0,
- mt2701_afe_multi_ch_out_asrc1,
- ARRAY_SIZE(mt2701_afe_multi_ch_out_asrc1)),
- SND_SOC_DAPM_MIXER("ASRC_O2", SND_SOC_NOPM, 0, 0,
- mt2701_afe_multi_ch_out_asrc2,
- ARRAY_SIZE(mt2701_afe_multi_ch_out_asrc2)),
- SND_SOC_DAPM_MIXER("ASRC_O3", SND_SOC_NOPM, 0, 0,
- mt2701_afe_multi_ch_out_asrc3,
- ARRAY_SIZE(mt2701_afe_multi_ch_out_asrc3)),
};
static const struct snd_soc_dapm_route mt2701_afe_pcm_routes[] = {
@@ -1021,7 +957,6 @@ static const struct snd_soc_dapm_route mt2701_afe_pcm_routes[] = {
{"I2S0 Playback", NULL, "O15"},
{"I2S0 Playback", NULL, "O16"},
-
{"I2S1 Playback", NULL, "O17"},
{"I2S1 Playback", NULL, "O18"},
{"I2S2 Playback", NULL, "O19"},
@@ -1038,7 +973,6 @@ static const struct snd_soc_dapm_route mt2701_afe_pcm_routes[] = {
{"I00", NULL, "I2S0 Capture"},
{"I01", NULL, "I2S0 Capture"},
-
{"I02", NULL, "I2S1 Capture"},
{"I03", NULL, "I2S1 Capture"},
/* I02,03 link to UL2, also need to open I2S0 */
@@ -1046,15 +980,10 @@ static const struct snd_soc_dapm_route mt2701_afe_pcm_routes[] = {
{"I26", NULL, "BT Capture"},
- {"ASRC_O0", "Asrc0 out Switch", "DLM"},
- {"ASRC_O1", "Asrc1 out Switch", "DLM"},
- {"ASRC_O2", "Asrc2 out Switch", "DLM"},
- {"ASRC_O3", "Asrc3 out Switch", "DLM"},
-
- {"I12I13", "Multich I2S0 Out Switch", "ASRC_O0"},
- {"I14I15", "Multich I2S1 Out Switch", "ASRC_O1"},
- {"I16I17", "Multich I2S2 Out Switch", "ASRC_O2"},
- {"I18I19", "Multich I2S3 Out Switch", "ASRC_O3"},
+ {"I12I13", "Multich I2S0 Out Switch", "DLM"},
+ {"I14I15", "Multich I2S1 Out Switch", "DLM"},
+ {"I16I17", "Multich I2S2 Out Switch", "DLM"},
+ {"I18I19", "Multich I2S3 Out Switch", "DLM"},
{ "I12", NULL, "I12I13" },
{ "I13", NULL, "I12I13" },
@@ -1079,7 +1008,6 @@ static const struct snd_soc_dapm_route mt2701_afe_pcm_routes[] = {
{ "O21", "I18 Switch", "I18" },
{ "O22", "I19 Switch", "I19" },
{ "O31", "I35 Switch", "I35" },
-
};
static const struct snd_soc_component_driver mt2701_afe_pcm_dai_component = {
@@ -1386,14 +1314,12 @@ static const struct mt2701_i2s_data mt2701_i2s_data[MT2701_I2S_NUM][2] = {
{
{
.i2s_ctrl_reg = ASYS_I2SO1_CON,
- .i2s_pwn_shift = 6,
.i2s_asrc_fs_shift = 0,
.i2s_asrc_fs_mask = 0x1f,
},
{
.i2s_ctrl_reg = ASYS_I2SIN1_CON,
- .i2s_pwn_shift = 0,
.i2s_asrc_fs_shift = 0,
.i2s_asrc_fs_mask = 0x1f,
@@ -1402,14 +1328,12 @@ static const struct mt2701_i2s_data mt2701_i2s_data[MT2701_I2S_NUM][2] = {
{
{
.i2s_ctrl_reg = ASYS_I2SO2_CON,
- .i2s_pwn_shift = 7,
.i2s_asrc_fs_shift = 5,
.i2s_asrc_fs_mask = 0x1f,
},
{
.i2s_ctrl_reg = ASYS_I2SIN2_CON,
- .i2s_pwn_shift = 1,
.i2s_asrc_fs_shift = 5,
.i2s_asrc_fs_mask = 0x1f,
@@ -1418,14 +1342,12 @@ static const struct mt2701_i2s_data mt2701_i2s_data[MT2701_I2S_NUM][2] = {
{
{
.i2s_ctrl_reg = ASYS_I2SO3_CON,
- .i2s_pwn_shift = 8,
.i2s_asrc_fs_shift = 10,
.i2s_asrc_fs_mask = 0x1f,
},
{
.i2s_ctrl_reg = ASYS_I2SIN3_CON,
- .i2s_pwn_shift = 2,
.i2s_asrc_fs_shift = 10,
.i2s_asrc_fs_mask = 0x1f,
@@ -1434,14 +1356,12 @@ static const struct mt2701_i2s_data mt2701_i2s_data[MT2701_I2S_NUM][2] = {
{
{
.i2s_ctrl_reg = ASYS_I2SO4_CON,
- .i2s_pwn_shift = 9,
.i2s_asrc_fs_shift = 15,
.i2s_asrc_fs_mask = 0x1f,
},
{
.i2s_ctrl_reg = ASYS_I2SIN4_CON,
- .i2s_pwn_shift = 3,
.i2s_asrc_fs_shift = 15,
.i2s_asrc_fs_mask = 0x1f,
@@ -1449,14 +1369,6 @@ static const struct mt2701_i2s_data mt2701_i2s_data[MT2701_I2S_NUM][2] = {
},
};
-static const struct regmap_config mt2701_afe_regmap_config = {
- .reg_bits = 32,
- .reg_stride = 4,
- .val_bits = 32,
- .max_register = AFE_END_ADDR,
- .cache_type = REGCACHE_NONE,
-};
-
static irqreturn_t mt2701_asys_isr(int irq_id, void *dev)
{
int id;
@@ -1483,8 +1395,7 @@ static int mt2701_afe_runtime_suspend(struct device *dev)
{
struct mtk_base_afe *afe = dev_get_drvdata(dev);
- mt2701_afe_disable_clock(afe);
- return 0;
+ return mt2701_afe_disable_clock(afe);
}
static int mt2701_afe_runtime_resume(struct device *dev)
@@ -1496,21 +1407,22 @@ static int mt2701_afe_runtime_resume(struct device *dev)
static int mt2701_afe_pcm_dev_probe(struct platform_device *pdev)
{
+ struct snd_soc_component *component;
struct mtk_base_afe *afe;
struct mt2701_afe_private *afe_priv;
- struct resource *res;
struct device *dev;
int i, irq_id, ret;
afe = devm_kzalloc(&pdev->dev, sizeof(*afe), GFP_KERNEL);
if (!afe)
return -ENOMEM;
+
afe->platform_priv = devm_kzalloc(&pdev->dev, sizeof(*afe_priv),
GFP_KERNEL);
if (!afe->platform_priv)
return -ENOMEM;
- afe_priv = afe->platform_priv;
+ afe_priv = afe->platform_priv;
afe->dev = &pdev->dev;
dev = afe->dev;
@@ -1527,17 +1439,11 @@ static int mt2701_afe_pcm_dev_probe(struct platform_device *pdev)
return ret;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- afe->base_addr = devm_ioremap_resource(&pdev->dev, res);
-
- if (IS_ERR(afe->base_addr))
- return PTR_ERR(afe->base_addr);
-
- afe->regmap = devm_regmap_init_mmio(&pdev->dev, afe->base_addr,
- &mt2701_afe_regmap_config);
- if (IS_ERR(afe->regmap))
+ afe->regmap = syscon_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(afe->regmap)) {
+ dev_err(dev, "could not get regmap from parent\n");
return PTR_ERR(afe->regmap);
+ }
mutex_init(&afe->irq_alloc_lock);
@@ -1545,7 +1451,6 @@ static int mt2701_afe_pcm_dev_probe(struct platform_device *pdev)
afe->memif_size = MT2701_MEMIF_NUM;
afe->memif = devm_kcalloc(dev, afe->memif_size, sizeof(*afe->memif),
GFP_KERNEL);
-
if (!afe->memif)
return -ENOMEM;
@@ -1558,7 +1463,6 @@ static int mt2701_afe_pcm_dev_probe(struct platform_device *pdev)
afe->irqs_size = MT2701_IRQ_ASYS_END;
afe->irqs = devm_kcalloc(dev, afe->irqs_size, sizeof(*afe->irqs),
GFP_KERNEL);
-
if (!afe->irqs)
return -ENOMEM;
@@ -1573,10 +1477,15 @@ static int mt2701_afe_pcm_dev_probe(struct platform_device *pdev)
= &mt2701_i2s_data[i][I2S_IN];
}
+ component = kzalloc(sizeof(*component), GFP_KERNEL);
+ if (!component)
+ return -ENOMEM;
+
+ component->regmap = afe->regmap;
+
afe->mtk_afe_hardware = &mt2701_afe_hardware;
afe->memif_fs = mt2701_memif_fs;
afe->irq_fs = mt2701_irq_fs;
-
afe->reg_back_up_list = mt2701_afe_backup_list;
afe->reg_back_up_list_num = ARRAY_SIZE(mt2701_afe_backup_list);
afe->runtime_resume = mt2701_afe_runtime_resume;
@@ -1586,59 +1495,58 @@ static int mt2701_afe_pcm_dev_probe(struct platform_device *pdev)
ret = mt2701_init_clock(afe);
if (ret) {
dev_err(dev, "init clock error\n");
- return ret;
+ goto err_init_clock;
}
platform_set_drvdata(pdev, afe);
- pm_runtime_enable(&pdev->dev);
- if (!pm_runtime_enabled(&pdev->dev))
- goto err_pm_disable;
- pm_runtime_get_sync(&pdev->dev);
- ret = snd_soc_register_platform(&pdev->dev, &mtk_afe_pcm_platform);
+ pm_runtime_enable(dev);
+ if (!pm_runtime_enabled(dev)) {
+ ret = mt2701_afe_runtime_resume(dev);
+ if (ret)
+ goto err_pm_disable;
+ }
+ pm_runtime_get_sync(dev);
+
+ ret = snd_soc_register_platform(dev, &mtk_afe_pcm_platform);
if (ret) {
dev_warn(dev, "err_platform\n");
goto err_platform;
}
- ret = snd_soc_register_component(&pdev->dev,
- &mt2701_afe_pcm_dai_component,
- mt2701_afe_pcm_dais,
- ARRAY_SIZE(mt2701_afe_pcm_dais));
+ ret = snd_soc_add_component(dev, component,
+ &mt2701_afe_pcm_dai_component,
+ mt2701_afe_pcm_dais,
+ ARRAY_SIZE(mt2701_afe_pcm_dais));
if (ret) {
dev_warn(dev, "err_dai_component\n");
goto err_dai_component;
}
- mt2701_afe_runtime_resume(&pdev->dev);
-
return 0;
err_dai_component:
- snd_soc_unregister_component(&pdev->dev);
-
+ snd_soc_unregister_platform(dev);
err_platform:
- snd_soc_unregister_platform(&pdev->dev);
-
+ pm_runtime_put_sync(dev);
err_pm_disable:
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_disable(dev);
+err_init_clock:
+ kfree(component);
return ret;
}
static int mt2701_afe_pcm_dev_remove(struct platform_device *pdev)
{
- struct mtk_base_afe *afe = platform_get_drvdata(pdev);
-
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
mt2701_afe_runtime_suspend(&pdev->dev);
- pm_runtime_put_sync(&pdev->dev);
snd_soc_unregister_component(&pdev->dev);
snd_soc_unregister_platform(&pdev->dev);
- /* disable afe clock */
- mt2701_afe_disable_clock(afe);
+
return 0;
}
@@ -1670,4 +1578,3 @@ module_platform_driver(mt2701_afe_pcm_driver);
MODULE_DESCRIPTION("Mediatek ALSA SoC AFE platform driver for 2701");
MODULE_AUTHOR("Garlic Tseng <garlic.tseng@mediatek.com>");
MODULE_LICENSE("GPL v2");
-
diff --git a/sound/soc/mediatek/mt2701/mt2701-reg.h b/sound/soc/mediatek/mt2701/mt2701-reg.h
index bb62b1c55957..18e676974f22 100644
--- a/sound/soc/mediatek/mt2701/mt2701-reg.h
+++ b/sound/soc/mediatek/mt2701/mt2701-reg.h
@@ -17,17 +17,6 @@
#ifndef _MT2701_REG_H_
#define _MT2701_REG_H_
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/pm_runtime.h>
-#include <sound/soc.h>
-#include "mt2701-afe-common.h"
-
-/*****************************************************************************
- * R E G I S T E R D E F I N I T I O N
- *****************************************************************************/
#define AUDIO_TOP_CON0 0x0000
#define AUDIO_TOP_CON4 0x0010
#define AUDIO_TOP_CON5 0x0014
@@ -109,18 +98,6 @@
#define AFE_DAI_BASE 0x1370
#define AFE_DAI_CUR 0x137c
-/* AUDIO_TOP_CON0 (0x0000) */
-#define AUDIO_TOP_CON0_A1SYS_A2SYS_ON (0x3 << 0)
-#define AUDIO_TOP_CON0_PDN_AFE (0x1 << 2)
-#define AUDIO_TOP_CON0_PDN_APLL_CK (0x1 << 23)
-
-/* AUDIO_TOP_CON4 (0x0010) */
-#define AUDIO_TOP_CON4_I2SO1_PWN (0x1 << 6)
-#define AUDIO_TOP_CON4_PDN_A1SYS (0x1 << 21)
-#define AUDIO_TOP_CON4_PDN_A2SYS (0x1 << 22)
-#define AUDIO_TOP_CON4_PDN_AFE_CONN (0x1 << 23)
-#define AUDIO_TOP_CON4_PDN_MRGIF (0x1 << 25)
-
/* AFE_DAIBT_CON0 (0x001c) */
#define AFE_DAIBT_CON0_DAIBT_EN (0x1 << 0)
#define AFE_DAIBT_CON0_BT_FUNC_EN (0x1 << 1)
@@ -137,22 +114,8 @@
#define AFE_MRGIF_CON_I2S_MODE_MASK (0xf << 20)
#define AFE_MRGIF_CON_I2S_MODE_32K (0x4 << 20)
-/* ASYS_I2SO1_CON (0x061c) */
-#define ASYS_I2SO1_CON_FS (0x1f << 8)
-#define ASYS_I2SO1_CON_FS_SET(x) ((x) << 8)
-#define ASYS_I2SO1_CON_MULTI_CH (0x1 << 16)
-#define ASYS_I2SO1_CON_SIDEGEN (0x1 << 30)
-#define ASYS_I2SO1_CON_I2S_EN (0x1 << 0)
-/* 0:EIAJ 1:I2S */
-#define ASYS_I2SO1_CON_I2S_MODE (0x1 << 3)
-#define ASYS_I2SO1_CON_WIDE_MODE (0x1 << 1)
-#define ASYS_I2SO1_CON_WIDE_MODE_SET(x) ((x) << 1)
-
-/* PWR2_TOP_CON (0x0634) */
-#define PWR2_TOP_CON_INIT_VAL (0xffe1ffff)
-
-/* ASYS_IRQ_CLR (0x07c0) */
-#define ASYS_IRQ_CLR_ALL (0xffffffff)
+/* ASYS_TOP_CON (0x0600) */
+#define ASYS_TOP_CON_ASYS_TIMING_ON (0x3 << 0)
/* PWR2_ASM_CON1 (0x1070) */
#define PWR2_ASM_CON1_INIT_VAL (0x492492)
@@ -182,5 +145,4 @@
#define ASYS_I2S_CON_WIDE_MODE_SET(x) ((x) << 1)
#define ASYS_I2S_IN_PHASE_FIX (0x1 << 31)
-#define AFE_END_ADDR 0x15e0
#endif
diff --git a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
index 8a643a35d3d4..c7f7f8add5d9 100644
--- a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
+++ b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
@@ -1083,7 +1083,7 @@ static int mt8173_afe_init_audio_clk(struct mtk_base_afe *afe)
static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev)
{
int ret, i;
- unsigned int irq_id;
+ int irq_id;
struct mtk_base_afe *afe;
struct mt8173_afe_private *afe_priv;
struct resource *res;
@@ -1105,9 +1105,9 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev)
afe->dev = &pdev->dev;
irq_id = platform_get_irq(pdev, 0);
- if (!irq_id) {
+ if (irq_id <= 0) {
dev_err(afe->dev, "np %s no irq\n", afe->dev->of_node->name);
- return -ENXIO;
+ return irq_id < 0 ? irq_id : -ENXIO;
}
ret = devm_request_irq(afe->dev, irq_id, mt8173_afe_irq_handler,
0, "Afe_ISR_Handle", (void *)afe);
diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
index 99c15219dbc8..5a9a5482976e 100644
--- a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
+++ b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
@@ -37,8 +37,6 @@ static const struct snd_soc_dapm_route mt8173_rt5650_rt5514_routes[] = {
{"Sub DMIC1R", NULL, "Int Mic"},
{"Headphone", NULL, "HPOL"},
{"Headphone", NULL, "HPOR"},
- {"Headset Mic", NULL, "micbias1"},
- {"Headset Mic", NULL, "micbias2"},
{"IN1P", NULL, "Headset Mic"},
{"IN1N", NULL, "Headset Mic"},
};
diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c
index 42de84ca8c84..b7248085ca04 100644
--- a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c
+++ b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c
@@ -40,8 +40,6 @@ static const struct snd_soc_dapm_route mt8173_rt5650_rt5676_routes[] = {
{"Headphone", NULL, "HPOL"},
{"Headphone", NULL, "HPOR"},
{"Headphone", NULL, "Sub AIF2TX"}, /* IF2 ADC to 5650 */
- {"Headset Mic", NULL, "micbias1"},
- {"Headset Mic", NULL, "micbias2"},
{"IN1P", NULL, "Headset Mic"},
{"IN1N", NULL, "Headset Mic"},
{"Sub AIF2RX", NULL, "Headset Mic"}, /* IF2 DAC from 5650 */
diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650.c b/sound/soc/mediatek/mt8173/mt8173-rt5650.c
index e69c141d8ed4..40ebefd625c1 100644
--- a/sound/soc/mediatek/mt8173/mt8173-rt5650.c
+++ b/sound/soc/mediatek/mt8173/mt8173-rt5650.c
@@ -51,8 +51,6 @@ static const struct snd_soc_dapm_route mt8173_rt5650_routes[] = {
{"DMIC R1", NULL, "Int Mic"},
{"Headphone", NULL, "HPOL"},
{"Headphone", NULL, "HPOR"},
- {"Headset Mic", NULL, "micbias1"},
- {"Headset Mic", NULL, "micbias2"},
{"IN1P", NULL, "Headset Mic"},
{"IN1N", NULL, "Headset Mic"},
};
diff --git a/sound/soc/mxs/mxs-sgtl5000.c b/sound/soc/mxs/mxs-sgtl5000.c
index 2ed3240cc682..2b3f2408301a 100644
--- a/sound/soc/mxs/mxs-sgtl5000.c
+++ b/sound/soc/mxs/mxs-sgtl5000.c
@@ -93,6 +93,14 @@ static struct snd_soc_dai_link mxs_sgtl5000_dai[] = {
},
};
+static const struct snd_soc_dapm_widget mxs_sgtl5000_dapm_widgets[] = {
+ SND_SOC_DAPM_MIC("Mic Jack", NULL),
+ SND_SOC_DAPM_LINE("Line In Jack", NULL),
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_SPK("Line Out Jack", NULL),
+ SND_SOC_DAPM_SPK("Ext Spk", NULL),
+};
+
static struct snd_soc_card mxs_sgtl5000 = {
.name = "mxs_sgtl5000",
.owner = THIS_MODULE,
@@ -141,10 +149,23 @@ static int mxs_sgtl5000_probe(struct platform_device *pdev)
card->dev = &pdev->dev;
+ if (of_find_property(np, "audio-routing", NULL)) {
+ card->dapm_widgets = mxs_sgtl5000_dapm_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(mxs_sgtl5000_dapm_widgets);
+
+ ret = snd_soc_of_parse_audio_routing(card, "audio-routing");
+ if (ret) {
+ dev_err(&pdev->dev, "failed to parse audio-routing (%d)\n",
+ ret);
+ return ret;
+ }
+ }
+
ret = devm_snd_soc_register_card(&pdev->dev, card);
if (ret) {
- dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
- ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n",
+ ret);
return ret;
}
diff --git a/sound/soc/nuc900/nuc900-ac97.c b/sound/soc/nuc900/nuc900-ac97.c
index b6615affe571..81b09d740ed9 100644
--- a/sound/soc/nuc900/nuc900-ac97.c
+++ b/sound/soc/nuc900/nuc900-ac97.c
@@ -67,7 +67,7 @@ static unsigned short nuc900_ac97_read(struct snd_ac97 *ac97,
/* polling the AC_R_FINISH */
while (!(AUDIO_READ(nuc900_audio->mmio + ACTL_ACCON) & AC_R_FINISH)
- && timeout--)
+ && --timeout)
mdelay(1);
if (!timeout) {
@@ -121,7 +121,7 @@ static void nuc900_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
/* polling the AC_W_FINISH */
while ((AUDIO_READ(nuc900_audio->mmio + ACTL_ACCON) & AC_W_FINISH)
- && timeout--)
+ && --timeout)
mdelay(1);
if (!timeout)
@@ -345,11 +345,10 @@ static int nuc900_ac97_drvprobe(struct platform_device *pdev)
goto out;
}
- nuc900_audio->irq_num = platform_get_irq(pdev, 0);
- if (!nuc900_audio->irq_num) {
- ret = -EBUSY;
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
goto out;
- }
+ nuc900_audio->irq_num = ret;
nuc900_ac97_data = nuc900_audio;
diff --git a/sound/soc/omap/ams-delta.c b/sound/soc/omap/ams-delta.c
index d40219678700..cb72c1e57da0 100644
--- a/sound/soc/omap/ams-delta.c
+++ b/sound/soc/omap/ams-delta.c
@@ -105,7 +105,7 @@ static int ams_delta_set_audio_mode(struct snd_kcontrol *kcontrol,
int pin, changed = 0;
/* Refuse any mode changes if we are not able to control the codec. */
- if (!cx20442_codec->hw_write)
+ if (!cx20442_codec->component.card->pop_time)
return -EUNATCH;
if (ucontrol->value.enumerated.item[0] >= control->items)
@@ -345,7 +345,7 @@ static void cx81801_receive(struct tty_struct *tty,
if (!codec)
return;
- if (!codec->hw_write) {
+ if (!codec->component.card->pop_time) {
/* First modem response, complete setup procedure */
/* Initialize timer used for config pulse generation */
diff --git a/sound/soc/qcom/apq8016_sbc.c b/sound/soc/qcom/apq8016_sbc.c
index d49adc822a11..704428735e3c 100644
--- a/sound/soc/qcom/apq8016_sbc.c
+++ b/sound/soc/qcom/apq8016_sbc.c
@@ -43,7 +43,7 @@ struct apq8016_sbc_data {
static int apq8016_sbc_dai_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_codec *codec;
+ struct snd_soc_component *component;
struct snd_soc_dai_link *dai_link = rtd->dai_link;
struct snd_soc_card *card = rtd->card;
struct apq8016_sbc_data *pdata = snd_soc_card_get_drvdata(card);
@@ -92,7 +92,7 @@ static int apq8016_sbc_dai_init(struct snd_soc_pcm_runtime *rtd)
jack = pdata->jack.jack;
- snd_jack_set_key(jack, SND_JACK_BTN_0, KEY_MEDIA);
+ snd_jack_set_key(jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
snd_jack_set_key(jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
snd_jack_set_key(jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
snd_jack_set_key(jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
@@ -102,15 +102,15 @@ static int apq8016_sbc_dai_init(struct snd_soc_pcm_runtime *rtd)
for (i = 0 ; i < dai_link->num_codecs; i++) {
struct snd_soc_dai *dai = rtd->codec_dais[i];
- codec = dai->codec;
+ component = dai->component;
/* Set default mclk for internal codec */
- rval = snd_soc_codec_set_sysclk(codec, 0, 0, DEFAULT_MCLK_RATE,
+ rval = snd_soc_component_set_sysclk(component, 0, 0, DEFAULT_MCLK_RATE,
SND_SOC_CLOCK_IN);
if (rval != 0 && rval != -ENOTSUPP) {
dev_warn(card->dev, "Failed to set mclk: %d\n", rval);
return rval;
}
- rval = snd_soc_codec_set_jack(codec, &pdata->jack, NULL);
+ rval = snd_soc_component_set_jack(component, &pdata->jack, NULL);
if (rval != 0 && rval != -ENOTSUPP) {
dev_warn(card->dev, "Failed to set jack: %d\n", rval);
return rval;
diff --git a/sound/soc/rockchip/rk3399_gru_sound.c b/sound/soc/rockchip/rk3399_gru_sound.c
index d64fbbd50544..fa6cd1de828b 100644
--- a/sound/soc/rockchip/rk3399_gru_sound.c
+++ b/sound/soc/rockchip/rk3399_gru_sound.c
@@ -206,7 +206,8 @@ static int rockchip_sound_da7219_init(struct snd_soc_pcm_runtime *rtd)
return ret;
}
- snd_jack_set_key(rockchip_sound_jack.jack, SND_JACK_BTN_0, KEY_MEDIA);
+ snd_jack_set_key(
+ rockchip_sound_jack.jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
snd_jack_set_key(
rockchip_sound_jack.jack, SND_JACK_BTN_1, KEY_VOLUMEUP);
snd_jack_set_key(
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index 908211e1d6fc..950823d69e9c 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -328,6 +328,7 @@ static int rockchip_i2s_hw_params(struct snd_pcm_substream *substream,
val |= I2S_CHN_4;
break;
case 2:
+ case 1:
val |= I2S_CHN_2;
break;
default:
@@ -460,7 +461,7 @@ static struct snd_soc_dai_driver rockchip_i2s_dai = {
},
.capture = {
.stream_name = "Capture",
- .channels_min = 2,
+ .channels_min = 1,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_192000,
.formats = (SNDRV_PCM_FMTBIT_S8 |
@@ -504,6 +505,7 @@ static bool rockchip_i2s_rd_reg(struct device *dev, unsigned int reg)
case I2S_INTCR:
case I2S_XFER:
case I2S_CLR:
+ case I2S_TXDR:
case I2S_RXDR:
case I2S_FIFOLR:
case I2S_INTSR:
@@ -518,6 +520,9 @@ static bool rockchip_i2s_volatile_reg(struct device *dev, unsigned int reg)
switch (reg) {
case I2S_INTSR:
case I2S_CLR:
+ case I2S_FIFOLR:
+ case I2S_TXDR:
+ case I2S_RXDR:
return true;
default:
return false;
@@ -527,6 +532,8 @@ static bool rockchip_i2s_volatile_reg(struct device *dev, unsigned int reg)
static bool rockchip_i2s_precious_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
+ case I2S_RXDR:
+ return true;
default:
return false;
}
@@ -654,7 +661,7 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
}
if (!of_property_read_u32(node, "rockchip,capture-channels", &val)) {
- if (val >= 2 && val <= 8)
+ if (val >= 1 && val <= 8)
soc_dai->capture.channels_max = val;
}
diff --git a/sound/soc/samsung/bells.c b/sound/soc/samsung/bells.c
index 34deba461ae1..0e66cd8ef2f9 100644
--- a/sound/soc/samsung/bells.c
+++ b/sound/soc/samsung/bells.c
@@ -60,13 +60,13 @@ static int bells_set_bias_level(struct snd_soc_card *card,
{
struct snd_soc_pcm_runtime *rtd;
struct snd_soc_dai *codec_dai;
- struct snd_soc_codec *codec;
+ struct snd_soc_component *component;
struct bells_drvdata *bells = card->drvdata;
int ret;
rtd = snd_soc_get_pcm_runtime(card, card->dai_link[DAI_DSP_CODEC].name);
codec_dai = rtd->codec_dai;
- codec = codec_dai->codec;
+ component = codec_dai->component;
if (dapm->dev != codec_dai->dev)
return 0;
@@ -76,7 +76,7 @@ static int bells_set_bias_level(struct snd_soc_card *card,
if (dapm->bias_level != SND_SOC_BIAS_STANDBY)
break;
- ret = snd_soc_codec_set_pll(codec, WM5102_FLL1,
+ ret = snd_soc_component_set_pll(component, WM5102_FLL1,
ARIZONA_FLL_SRC_MCLK1,
MCLK_RATE,
bells->sysclk_rate);
@@ -84,7 +84,7 @@ static int bells_set_bias_level(struct snd_soc_card *card,
pr_err("Failed to start FLL: %d\n", ret);
if (bells->asyncclk_rate) {
- ret = snd_soc_codec_set_pll(codec, WM5102_FLL2,
+ ret = snd_soc_component_set_pll(component, WM5102_FLL2,
ARIZONA_FLL_SRC_AIF2BCLK,
BCLK2_RATE,
bells->asyncclk_rate);
@@ -106,27 +106,27 @@ static int bells_set_bias_level_post(struct snd_soc_card *card,
{
struct snd_soc_pcm_runtime *rtd;
struct snd_soc_dai *codec_dai;
- struct snd_soc_codec *codec;
+ struct snd_soc_component *component;
struct bells_drvdata *bells = card->drvdata;
int ret;
rtd = snd_soc_get_pcm_runtime(card, card->dai_link[DAI_DSP_CODEC].name);
codec_dai = rtd->codec_dai;
- codec = codec_dai->codec;
+ component = codec_dai->component;
if (dapm->dev != codec_dai->dev)
return 0;
switch (level) {
case SND_SOC_BIAS_STANDBY:
- ret = snd_soc_codec_set_pll(codec, WM5102_FLL1, 0, 0, 0);
+ ret = snd_soc_component_set_pll(component, WM5102_FLL1, 0, 0, 0);
if (ret < 0) {
pr_err("Failed to stop FLL: %d\n", ret);
return ret;
}
if (bells->asyncclk_rate) {
- ret = snd_soc_codec_set_pll(codec, WM5102_FLL2,
+ ret = snd_soc_component_set_pll(component, WM5102_FLL2,
0, 0, 0);
if (ret < 0) {
pr_err("Failed to stop FLL: %d\n", ret);
@@ -148,8 +148,8 @@ static int bells_late_probe(struct snd_soc_card *card)
{
struct bells_drvdata *bells = card->drvdata;
struct snd_soc_pcm_runtime *rtd;
- struct snd_soc_codec *wm0010;
- struct snd_soc_codec *codec;
+ struct snd_soc_component *wm0010;
+ struct snd_soc_component *component;
struct snd_soc_dai *aif1_dai;
struct snd_soc_dai *aif2_dai;
struct snd_soc_dai *aif3_dai;
@@ -157,22 +157,22 @@ static int bells_late_probe(struct snd_soc_card *card)
int ret;
rtd = snd_soc_get_pcm_runtime(card, card->dai_link[DAI_AP_DSP].name);
- wm0010 = rtd->codec;
+ wm0010 = rtd->codec_dai->component;
rtd = snd_soc_get_pcm_runtime(card, card->dai_link[DAI_DSP_CODEC].name);
- codec = rtd->codec;
+ component = rtd->codec_dai->component;
aif1_dai = rtd->codec_dai;
- ret = snd_soc_codec_set_sysclk(codec, ARIZONA_CLK_SYSCLK,
+ ret = snd_soc_component_set_sysclk(component, ARIZONA_CLK_SYSCLK,
ARIZONA_CLK_SRC_FLL1,
bells->sysclk_rate,
SND_SOC_CLOCK_IN);
if (ret != 0) {
- dev_err(codec->dev, "Failed to set SYSCLK: %d\n", ret);
+ dev_err(component->dev, "Failed to set SYSCLK: %d\n", ret);
return ret;
}
- ret = snd_soc_codec_set_sysclk(wm0010, 0, 0, SYS_MCLK_RATE, 0);
+ ret = snd_soc_component_set_sysclk(wm0010, 0, 0, SYS_MCLK_RATE, 0);
if (ret != 0) {
dev_err(wm0010->dev, "Failed to set WM0010 clock: %d\n", ret);
return ret;
@@ -182,20 +182,20 @@ static int bells_late_probe(struct snd_soc_card *card)
if (ret != 0)
dev_err(aif1_dai->dev, "Failed to set AIF1 clock: %d\n", ret);
- ret = snd_soc_codec_set_sysclk(codec, ARIZONA_CLK_OPCLK, 0,
+ ret = snd_soc_component_set_sysclk(component, ARIZONA_CLK_OPCLK, 0,
SYS_MCLK_RATE, SND_SOC_CLOCK_OUT);
if (ret != 0)
- dev_err(codec->dev, "Failed to set OPCLK: %d\n", ret);
+ dev_err(component->dev, "Failed to set OPCLK: %d\n", ret);
if (card->num_rtd == DAI_CODEC_CP)
return 0;
- ret = snd_soc_codec_set_sysclk(codec, ARIZONA_CLK_ASYNCCLK,
+ ret = snd_soc_component_set_sysclk(component, ARIZONA_CLK_ASYNCCLK,
ARIZONA_CLK_SRC_FLL2,
bells->asyncclk_rate,
SND_SOC_CLOCK_IN);
if (ret != 0) {
- dev_err(codec->dev, "Failed to set ASYNCCLK: %d\n", ret);
+ dev_err(component->dev, "Failed to set ASYNCCLK: %d\n", ret);
return ret;
}
@@ -221,7 +221,7 @@ static int bells_late_probe(struct snd_soc_card *card)
return ret;
}
- ret = snd_soc_codec_set_sysclk(wm9081_dai->codec, WM9081_SYSCLK_MCLK,
+ ret = snd_soc_component_set_sysclk(wm9081_dai->component, WM9081_SYSCLK_MCLK,
0, SYS_MCLK_RATE, 0);
if (ret != 0) {
dev_err(wm9081_dai->dev, "Failed to set MCLK: %d\n", ret);
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index f12a88a21dfa..64d5ecb86528 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -197,16 +197,27 @@ int rsnd_io_is_working(struct rsnd_dai_stream *io)
return 0;
}
-int rsnd_runtime_channel_original(struct rsnd_dai_stream *io)
+int rsnd_runtime_channel_original_with_params(struct rsnd_dai_stream *io,
+ struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
- return runtime->channels;
+ /*
+ * params will be added when refine
+ * see
+ * __rsnd_soc_hw_rule_rate()
+ * __rsnd_soc_hw_rule_channels()
+ */
+ if (params)
+ return params_channels(params);
+ else
+ return runtime->channels;
}
-int rsnd_runtime_channel_after_ctu(struct rsnd_dai_stream *io)
+int rsnd_runtime_channel_after_ctu_with_params(struct rsnd_dai_stream *io,
+ struct snd_pcm_hw_params *params)
{
- int chan = rsnd_runtime_channel_original(io);
+ int chan = rsnd_runtime_channel_original_with_params(io, params);
struct rsnd_mod *ctu_mod = rsnd_io_to_mod_ctu(io);
if (ctu_mod) {
@@ -219,12 +230,13 @@ int rsnd_runtime_channel_after_ctu(struct rsnd_dai_stream *io)
return chan;
}
-int rsnd_runtime_channel_for_ssi(struct rsnd_dai_stream *io)
+int rsnd_runtime_channel_for_ssi_with_params(struct rsnd_dai_stream *io,
+ struct snd_pcm_hw_params *params)
{
struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
int chan = rsnd_io_is_play(io) ?
- rsnd_runtime_channel_after_ctu(io) :
- rsnd_runtime_channel_original(io);
+ rsnd_runtime_channel_after_ctu_with_params(io, params) :
+ rsnd_runtime_channel_original_with_params(io, params);
/* Use Multi SSI */
if (rsnd_runtime_is_ssi_multi(io))
@@ -262,10 +274,10 @@ u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
struct device *dev = rsnd_priv_to_dev(priv);
- switch (runtime->sample_bits) {
+ switch (snd_pcm_format_width(runtime->format)) {
case 16:
return 8 << 16;
- case 32:
+ case 24:
return 0 << 16;
}
@@ -282,11 +294,12 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
struct rsnd_mod *ssiu = rsnd_io_to_mod_ssiu(io);
struct rsnd_mod *target;
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
- u32 val = 0x76543210;
- u32 mask = ~0;
/*
- * *Hardware* L/R and *Software* L/R are inverted.
+ * *Hardware* L/R and *Software* L/R are inverted for 16bit data.
+ * 31..16 15...0
+ * HW: [L ch] [R ch]
+ * SW: [R ch] [L ch]
* We need to care about inversion timing to control
* Playback/Capture correctly.
* The point is [DVC] needs *Hardware* L/R, [MEM] needs *Software* L/R
@@ -313,27 +326,13 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
target = cmd ? cmd : ssiu;
}
- mask <<= runtime->channels * 4;
- val = val & mask;
-
- switch (runtime->sample_bits) {
- case 16:
- val |= 0x67452301 & ~mask;
- break;
- case 32:
- val |= 0x76543210 & ~mask;
- break;
- }
-
- /*
- * exchange channeles on SRC if possible,
- * otherwise, R/L volume settings on DVC
- * changes inverted channels
- */
- if (mod == target)
- return val;
- else
+ /* Non target mod or 24bit data needs normal DALIGN */
+ if ((snd_pcm_format_width(runtime->format) != 16) ||
+ (mod != target))
return 0x76543210;
+ /* Target mod needs inverted DALIGN when 16bit */
+ else
+ return 0x67452301;
}
u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod)
@@ -363,12 +362,8 @@ u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod)
* HW 24bit data is located as 0x******00
*
*/
- switch (runtime->sample_bits) {
- case 16:
+ if (snd_pcm_format_width(runtime->format) == 16)
return 0;
- case 32:
- break;
- }
for (i = 0; i < ARRAY_SIZE(playback_mods); i++) {
tmod = rsnd_io_to_mod(io, mods[i]);
@@ -616,8 +611,6 @@ static int rsnd_soc_dai_trigger(struct snd_pcm_substream *substream, int cmd,
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
- rsnd_dai_stream_init(io, substream);
-
ret = rsnd_dai_call(init, io, priv);
if (ret < 0)
goto dai_trigger_end;
@@ -639,7 +632,6 @@ static int rsnd_soc_dai_trigger(struct snd_pcm_substream *substream, int cmd,
ret |= rsnd_dai_call(quit, io, priv);
- rsnd_dai_stream_quit(io);
break;
default:
ret = -EINVAL;
@@ -784,8 +776,9 @@ static int rsnd_soc_hw_rule(struct rsnd_priv *priv,
return snd_interval_refine(iv, &p);
}
-static int rsnd_soc_hw_rule_rate(struct snd_pcm_hw_params *params,
- struct snd_pcm_hw_rule *rule)
+static int __rsnd_soc_hw_rule_rate(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule,
+ int is_play)
{
struct snd_interval *ic_ = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
struct snd_interval *ir = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
@@ -793,25 +786,37 @@ static int rsnd_soc_hw_rule_rate(struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai = rule->private;
struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
struct rsnd_priv *priv = rsnd_rdai_to_priv(rdai);
+ struct rsnd_dai_stream *io = is_play ? &rdai->playback : &rdai->capture;
/*
* possible sampling rate limitation is same as
* 2ch if it supports multi ssi
+ * and same as 8ch if TDM 6ch (see rsnd_ssi_config_init())
*/
ic = *ic_;
- if (1 < rsnd_rdai_ssi_lane_get(rdai)) {
- ic.min = 2;
- ic.max = 2;
- }
+ ic.min =
+ ic.max = rsnd_runtime_channel_for_ssi_with_params(io, params);
return rsnd_soc_hw_rule(priv, rsnd_soc_hw_rate_list,
ARRAY_SIZE(rsnd_soc_hw_rate_list),
&ic, ir);
}
+static int rsnd_soc_hw_rule_rate_playback(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ return __rsnd_soc_hw_rule_rate(params, rule, 1);
+}
+
+static int rsnd_soc_hw_rule_rate_capture(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ return __rsnd_soc_hw_rule_rate(params, rule, 0);
+}
-static int rsnd_soc_hw_rule_channels(struct snd_pcm_hw_params *params,
- struct snd_pcm_hw_rule *rule)
+static int __rsnd_soc_hw_rule_channels(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule,
+ int is_play)
{
struct snd_interval *ic_ = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
struct snd_interval *ir = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
@@ -819,22 +824,34 @@ static int rsnd_soc_hw_rule_channels(struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai = rule->private;
struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
struct rsnd_priv *priv = rsnd_rdai_to_priv(rdai);
+ struct rsnd_dai_stream *io = is_play ? &rdai->playback : &rdai->capture;
/*
* possible sampling rate limitation is same as
* 2ch if it supports multi ssi
+ * and same as 8ch if TDM 6ch (see rsnd_ssi_config_init())
*/
ic = *ic_;
- if (1 < rsnd_rdai_ssi_lane_get(rdai)) {
- ic.min = 2;
- ic.max = 2;
- }
+ ic.min =
+ ic.max = rsnd_runtime_channel_for_ssi_with_params(io, params);
return rsnd_soc_hw_rule(priv, rsnd_soc_hw_channels_list,
ARRAY_SIZE(rsnd_soc_hw_channels_list),
ir, &ic);
}
+static int rsnd_soc_hw_rule_channels_playback(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ return __rsnd_soc_hw_rule_channels(params, rule, 1);
+}
+
+static int rsnd_soc_hw_rule_channels_capture(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ return __rsnd_soc_hw_rule_channels(params, rule, 0);
+}
+
static const struct snd_pcm_hardware rsnd_pcm_hardware = {
.info = SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_MMAP |
@@ -859,6 +876,8 @@ static int rsnd_soc_dai_startup(struct snd_pcm_substream *substream,
int ret;
int i;
+ rsnd_dai_stream_init(io, substream);
+
/*
* Channel Limitation
* It depends on Platform design
@@ -886,11 +905,17 @@ static int rsnd_soc_dai_startup(struct snd_pcm_substream *substream,
* It depends on Clock Master Mode
*/
if (rsnd_rdai_is_clk_master(rdai)) {
+ int is_play = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
- rsnd_soc_hw_rule_rate, dai,
+ is_play ? rsnd_soc_hw_rule_rate_playback :
+ rsnd_soc_hw_rule_rate_capture,
+ dai,
SNDRV_PCM_HW_PARAM_CHANNELS, -1);
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
- rsnd_soc_hw_rule_channels, dai,
+ is_play ? rsnd_soc_hw_rule_channels_playback :
+ rsnd_soc_hw_rule_channels_capture,
+ dai,
SNDRV_PCM_HW_PARAM_RATE, -1);
}
@@ -915,6 +940,8 @@ static void rsnd_soc_dai_shutdown(struct snd_pcm_substream *substream,
* call rsnd_dai_call without spinlock
*/
rsnd_dai_call(nolock_stop, io, priv);
+
+ rsnd_dai_stream_quit(io);
}
static const struct snd_soc_dai_ops rsnd_soc_dai_ops = {
@@ -990,7 +1017,7 @@ of_node_compatible:
static void __rsnd_dai_probe(struct rsnd_priv *priv,
struct device_node *dai_np,
- int dai_i, int is_graph)
+ int dai_i)
{
struct device_node *playback, *capture;
struct rsnd_dai_stream *io_playback;
@@ -1089,13 +1116,13 @@ static int rsnd_dai_probe(struct rsnd_priv *priv)
dai_i = 0;
if (is_graph) {
for_each_endpoint_of_node(dai_node, dai_np) {
- __rsnd_dai_probe(priv, dai_np, dai_i, is_graph);
+ __rsnd_dai_probe(priv, dai_np, dai_i);
rsnd_ssi_parse_hdmi_connection(priv, dai_np, dai_i);
dai_i++;
}
} else {
for_each_child_of_node(dai_node, dai_np)
- __rsnd_dai_probe(priv, dai_np, dai_i++, is_graph);
+ __rsnd_dai_probe(priv, dai_np, dai_i++);
}
return 0;
@@ -1496,6 +1523,8 @@ static int rsnd_remove(struct platform_device *pdev)
};
int ret = 0, i;
+ snd_soc_disconnect_sync(&pdev->dev);
+
pm_runtime_disable(&pdev->dev);
for_each_rsnd_dai(rdai, priv, i) {
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index 4d750bdf8e24..41de23417c4a 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -71,25 +71,7 @@ static struct rsnd_mod mem = {
static void __rsnd_dmaen_complete(struct rsnd_mod *mod,
struct rsnd_dai_stream *io)
{
- struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
- bool elapsed = false;
- unsigned long flags;
-
- /*
- * Renesas sound Gen1 needs 1 DMAC,
- * Gen2 needs 2 DMAC.
- * In Gen2 case, it are Audio-DMAC, and Audio-DMAC-peri-peri.
- * But, Audio-DMAC-peri-peri doesn't have interrupt,
- * and this driver is assuming that here.
- */
- spin_lock_irqsave(&priv->lock, flags);
-
if (rsnd_io_is_working(io))
- elapsed = true;
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (elapsed)
rsnd_dai_period_elapsed(io);
}
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index 57cd2bc773c2..ad6523595b0a 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -399,9 +399,18 @@ void rsnd_parse_connect_common(struct rsnd_dai *rdai,
struct device_node *playback,
struct device_node *capture);
-int rsnd_runtime_channel_original(struct rsnd_dai_stream *io);
-int rsnd_runtime_channel_after_ctu(struct rsnd_dai_stream *io);
-int rsnd_runtime_channel_for_ssi(struct rsnd_dai_stream *io);
+#define rsnd_runtime_channel_original(io) \
+ rsnd_runtime_channel_original_with_params(io, NULL)
+int rsnd_runtime_channel_original_with_params(struct rsnd_dai_stream *io,
+ struct snd_pcm_hw_params *params);
+#define rsnd_runtime_channel_after_ctu(io) \
+ rsnd_runtime_channel_after_ctu_with_params(io, NULL)
+int rsnd_runtime_channel_after_ctu_with_params(struct rsnd_dai_stream *io,
+ struct snd_pcm_hw_params *params);
+#define rsnd_runtime_channel_for_ssi(io) \
+ rsnd_runtime_channel_for_ssi_with_params(io, NULL)
+int rsnd_runtime_channel_for_ssi_with_params(struct rsnd_dai_stream *io,
+ struct snd_pcm_hw_params *params);
int rsnd_runtime_is_ssi_multi(struct rsnd_dai_stream *io);
int rsnd_runtime_is_ssi_tdm(struct rsnd_dai_stream *io);
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index cbf3bf312d23..97a9db892a8f 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -79,8 +79,8 @@ struct rsnd_ssi {
int irq;
unsigned int usrcnt;
+ /* for PIO */
int byte_pos;
- int period_pos;
int byte_per_period;
int next_period_byte;
};
@@ -371,11 +371,11 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod,
if (rsnd_io_is_play(io))
cr_own |= TRMD;
- switch (runtime->sample_bits) {
+ switch (snd_pcm_format_width(runtime->format)) {
case 16:
cr_own |= DWL_16;
break;
- case 32:
+ case 24:
cr_own |= DWL_24;
break;
}
@@ -414,63 +414,6 @@ static void rsnd_ssi_register_setup(struct rsnd_mod *mod)
ssi->cr_en);
}
-static void rsnd_ssi_pointer_init(struct rsnd_mod *mod,
- struct rsnd_dai_stream *io)
-{
- struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
- struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
-
- ssi->byte_pos = 0;
- ssi->period_pos = 0;
- ssi->byte_per_period = runtime->period_size *
- runtime->channels *
- samples_to_bytes(runtime, 1);
- ssi->next_period_byte = ssi->byte_per_period;
-}
-
-static int rsnd_ssi_pointer_offset(struct rsnd_mod *mod,
- struct rsnd_dai_stream *io,
- int additional)
-{
- struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
- struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
- int pos = ssi->byte_pos + additional;
-
- pos %= (runtime->periods * ssi->byte_per_period);
-
- return pos;
-}
-
-static bool rsnd_ssi_pointer_update(struct rsnd_mod *mod,
- struct rsnd_dai_stream *io,
- int byte)
-{
- struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
- bool ret = false;
- int byte_pos;
-
- byte_pos = ssi->byte_pos + byte;
-
- if (byte_pos >= ssi->next_period_byte) {
- struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
-
- ssi->period_pos++;
- ssi->next_period_byte += ssi->byte_per_period;
-
- if (ssi->period_pos >= runtime->periods) {
- byte_pos = 0;
- ssi->period_pos = 0;
- ssi->next_period_byte = ssi->byte_per_period;
- }
-
- ret = true;
- }
-
- WRITE_ONCE(ssi->byte_pos, byte_pos);
-
- return ret;
-}
-
/*
* SSI mod common functions
*/
@@ -484,8 +427,6 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
if (!rsnd_ssi_is_run_mods(mod, io))
return 0;
- rsnd_ssi_pointer_init(mod, io);
-
ssi->usrcnt++;
rsnd_mod_power_on(mod);
@@ -656,6 +597,8 @@ static int rsnd_ssi_irq(struct rsnd_mod *mod,
return 0;
}
+static bool rsnd_ssi_pio_interrupt(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io);
static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
struct rsnd_dai_stream *io)
{
@@ -674,30 +617,8 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
status = rsnd_ssi_status_get(mod);
/* PIO only */
- if (!is_dma && (status & DIRQ)) {
- struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
- u32 *buf = (u32 *)(runtime->dma_area +
- rsnd_ssi_pointer_offset(mod, io, 0));
- int shift = 0;
-
- switch (runtime->sample_bits) {
- case 32:
- shift = 8;
- break;
- }
-
- /*
- * 8/16/32 data can be assesse to TDR/RDR register
- * directly as 32bit data
- * see rsnd_ssi_init()
- */
- if (rsnd_io_is_play(io))
- rsnd_mod_write(mod, SSITDR, (*buf) << shift);
- else
- *buf = (rsnd_mod_read(mod, SSIRDR) >> shift);
-
- elapsed = rsnd_ssi_pointer_update(mod, io, sizeof(*buf));
- }
+ if (!is_dma && (status & DIRQ))
+ elapsed = rsnd_ssi_pio_interrupt(mod, io);
/* DMA only */
if (is_dma && (status & (UIRQ | OIRQ)))
@@ -835,7 +756,71 @@ static int rsnd_ssi_common_remove(struct rsnd_mod *mod,
return 0;
}
-static int rsnd_ssi_pointer(struct rsnd_mod *mod,
+/*
+ * SSI PIO functions
+ */
+static bool rsnd_ssi_pio_interrupt(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io)
+{
+ struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+ struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+ u32 *buf = (u32 *)(runtime->dma_area + ssi->byte_pos);
+ int shift = 0;
+ int byte_pos;
+ bool elapsed = false;
+
+ if (snd_pcm_format_width(runtime->format) == 24)
+ shift = 8;
+
+ /*
+ * 8/16/32 data can be assesse to TDR/RDR register
+ * directly as 32bit data
+ * see rsnd_ssi_init()
+ */
+ if (rsnd_io_is_play(io))
+ rsnd_mod_write(mod, SSITDR, (*buf) << shift);
+ else
+ *buf = (rsnd_mod_read(mod, SSIRDR) >> shift);
+
+ byte_pos = ssi->byte_pos + sizeof(*buf);
+
+ if (byte_pos >= ssi->next_period_byte) {
+ int period_pos = byte_pos / ssi->byte_per_period;
+
+ if (period_pos >= runtime->periods) {
+ byte_pos = 0;
+ period_pos = 0;
+ }
+
+ ssi->next_period_byte = (period_pos + 1) * ssi->byte_per_period;
+
+ elapsed = true;
+ }
+
+ WRITE_ONCE(ssi->byte_pos, byte_pos);
+
+ return elapsed;
+}
+
+static int rsnd_ssi_pio_init(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
+ struct rsnd_priv *priv)
+{
+ struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+ struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+
+ if (!rsnd_ssi_is_parent(mod, io)) {
+ ssi->byte_pos = 0;
+ ssi->byte_per_period = runtime->period_size *
+ runtime->channels *
+ samples_to_bytes(runtime, 1);
+ ssi->next_period_byte = ssi->byte_per_period;
+ }
+
+ return rsnd_ssi_init(mod, io, priv);
+}
+
+static int rsnd_ssi_pio_pointer(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
snd_pcm_uframes_t *pointer)
{
@@ -851,12 +836,12 @@ static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
.name = SSI_NAME,
.probe = rsnd_ssi_common_probe,
.remove = rsnd_ssi_common_remove,
- .init = rsnd_ssi_init,
+ .init = rsnd_ssi_pio_init,
.quit = rsnd_ssi_quit,
.start = rsnd_ssi_start,
.stop = rsnd_ssi_stop,
.irq = rsnd_ssi_irq,
- .pointer= rsnd_ssi_pointer,
+ .pointer = rsnd_ssi_pio_pointer,
.pcm_new = rsnd_ssi_pcm_new,
.hw_params = rsnd_ssi_hw_params,
};
diff --git a/sound/soc/soc-acpi.c b/sound/soc/soc-acpi.c
index f21df28bc28e..3d7e1ff79139 100644
--- a/sound/soc/soc-acpi.c
+++ b/sound/soc/soc-acpi.c
@@ -16,79 +16,16 @@
#include <sound/soc-acpi.h>
-static acpi_status snd_soc_acpi_find_name(acpi_handle handle, u32 level,
- void *context, void **ret)
-{
- struct acpi_device *adev;
- const char *name = NULL;
-
- if (acpi_bus_get_device(handle, &adev))
- return AE_OK;
-
- if (adev->status.present && adev->status.functional) {
- name = acpi_dev_name(adev);
- *(const char **)ret = name;
- return AE_CTRL_TERMINATE;
- }
-
- return AE_OK;
-}
-
-const char *snd_soc_acpi_find_name_from_hid(const u8 hid[ACPI_ID_LEN])
-{
- const char *name = NULL;
- acpi_status status;
-
- status = acpi_get_devices(hid, snd_soc_acpi_find_name, NULL,
- (void **)&name);
-
- if (ACPI_FAILURE(status) || name[0] == '\0')
- return NULL;
-
- return name;
-}
-EXPORT_SYMBOL_GPL(snd_soc_acpi_find_name_from_hid);
-
-static acpi_status snd_soc_acpi_mach_match(acpi_handle handle, u32 level,
- void *context, void **ret)
-{
- unsigned long long sta;
- acpi_status status;
-
- *(bool *)context = true;
- status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
- if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT))
- *(bool *)context = false;
-
- return AE_OK;
-}
-
-bool snd_soc_acpi_check_hid(const u8 hid[ACPI_ID_LEN])
-{
- acpi_status status;
- bool found = false;
-
- status = acpi_get_devices(hid, snd_soc_acpi_mach_match, &found, NULL);
-
- if (ACPI_FAILURE(status))
- return false;
-
- return found;
-}
-EXPORT_SYMBOL_GPL(snd_soc_acpi_check_hid);
-
struct snd_soc_acpi_mach *
snd_soc_acpi_find_machine(struct snd_soc_acpi_mach *machines)
{
struct snd_soc_acpi_mach *mach;
for (mach = machines; mach->id[0]; mach++) {
- if (snd_soc_acpi_check_hid(mach->id) == true) {
- if (mach->machine_quirk == NULL)
- return mach;
-
- if (mach->machine_quirk(mach) != NULL)
- return mach;
+ if (acpi_dev_present(mach->id, NULL, -1)) {
+ if (mach->machine_quirk)
+ mach = mach->machine_quirk(mach);
+ return mach;
}
}
return NULL;
@@ -163,7 +100,7 @@ struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg)
return mach;
for (i = 0; i < codec_list->num_codecs; i++) {
- if (snd_soc_acpi_check_hid(codec_list->codecs[i]) != true)
+ if (!acpi_dev_present(codec_list->codecs[i], NULL, -1))
return NULL;
}
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index d9b1e6417fb9..81232f4ab614 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -1096,7 +1096,6 @@ static struct snd_compr_ops soc_compr_dyn_ops = {
*/
int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
{
- struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_platform *platform = rtd->platform;
struct snd_soc_component *component;
struct snd_soc_rtdcom_list *rtdcom;
@@ -1199,8 +1198,9 @@ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
ret = snd_compress_new(rtd->card->snd_card, num, direction,
new_name, compr);
if (ret < 0) {
+ component = rtd->codec_dai->component;
pr_err("compress asoc: can't create compress for codec %s\n",
- codec->component.name);
+ component->name);
goto compr_err;
}
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index c0edac80df34..e91879569a0f 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -213,7 +213,7 @@ static umode_t soc_dev_attr_is_visible(struct kobject *kobj,
if (attr == &dev_attr_pmdown_time.attr)
return attr->mode; /* always visible */
- return rtd->codec ? attr->mode : 0; /* enabled only with codec */
+ return rtd->num_codecs ? attr->mode : 0; /* enabled only with codec */
}
static const struct attribute_group soc_dapm_dev_group = {
@@ -349,120 +349,84 @@ static void soc_init_codec_debugfs(struct snd_soc_component *component)
"ASoC: Failed to create codec register debugfs file\n");
}
-static ssize_t codec_list_read_file(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
+static int codec_list_seq_show(struct seq_file *m, void *v)
{
- char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
- ssize_t len, ret = 0;
struct snd_soc_codec *codec;
- if (!buf)
- return -ENOMEM;
-
mutex_lock(&client_mutex);
- list_for_each_entry(codec, &codec_list, list) {
- len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n",
- codec->component.name);
- if (len >= 0)
- ret += len;
- if (ret > PAGE_SIZE) {
- ret = PAGE_SIZE;
- break;
- }
- }
+ list_for_each_entry(codec, &codec_list, list)
+ seq_printf(m, "%s\n", codec->component.name);
mutex_unlock(&client_mutex);
- if (ret >= 0)
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
-
- kfree(buf);
+ return 0;
+}
- return ret;
+static int codec_list_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, codec_list_seq_show, NULL);
}
static const struct file_operations codec_list_fops = {
- .read = codec_list_read_file,
- .llseek = default_llseek,/* read accesses f_pos */
+ .open = codec_list_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
-static ssize_t dai_list_read_file(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
+static int dai_list_seq_show(struct seq_file *m, void *v)
{
- char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
- ssize_t len, ret = 0;
struct snd_soc_component *component;
struct snd_soc_dai *dai;
- if (!buf)
- return -ENOMEM;
-
mutex_lock(&client_mutex);
- list_for_each_entry(component, &component_list, list) {
- list_for_each_entry(dai, &component->dai_list, list) {
- len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n",
- dai->name);
- if (len >= 0)
- ret += len;
- if (ret > PAGE_SIZE) {
- ret = PAGE_SIZE;
- break;
- }
- }
- }
+ list_for_each_entry(component, &component_list, list)
+ list_for_each_entry(dai, &component->dai_list, list)
+ seq_printf(m, "%s\n", dai->name);
mutex_unlock(&client_mutex);
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
-
- kfree(buf);
+ return 0;
+}
- return ret;
+static int dai_list_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dai_list_seq_show, NULL);
}
static const struct file_operations dai_list_fops = {
- .read = dai_list_read_file,
- .llseek = default_llseek,/* read accesses f_pos */
+ .open = dai_list_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
-static ssize_t platform_list_read_file(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
+static int platform_list_seq_show(struct seq_file *m, void *v)
{
- char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
- ssize_t len, ret = 0;
struct snd_soc_platform *platform;
- if (!buf)
- return -ENOMEM;
-
mutex_lock(&client_mutex);
- list_for_each_entry(platform, &platform_list, list) {
- len = snprintf(buf + ret, PAGE_SIZE - ret, "%s\n",
- platform->component.name);
- if (len >= 0)
- ret += len;
- if (ret > PAGE_SIZE) {
- ret = PAGE_SIZE;
- break;
- }
- }
+ list_for_each_entry(platform, &platform_list, list)
+ seq_printf(m, "%s\n", platform->component.name);
mutex_unlock(&client_mutex);
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
-
- kfree(buf);
+ return 0;
+}
- return ret;
+static int platform_list_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, platform_list_seq_show, NULL);
}
static const struct file_operations platform_list_fops = {
- .read = platform_list_read_file,
- .llseek = default_llseek,/* read accesses f_pos */
+ .open = platform_list_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
static void soc_init_card_debugfs(struct snd_soc_card *card)
@@ -491,7 +455,6 @@ static void soc_cleanup_card_debugfs(struct snd_soc_card *card)
debugfs_remove_recursive(card->debugfs_card_root);
}
-
static void snd_soc_debugfs_init(void)
{
snd_soc_debugfs_root = debugfs_create_dir("asoc", NULL);
@@ -598,6 +561,7 @@ struct snd_soc_component *snd_soc_rtdcom_lookup(struct snd_soc_pcm_runtime *rtd,
return NULL;
}
+EXPORT_SYMBOL_GPL(snd_soc_rtdcom_lookup);
struct snd_pcm_substream *snd_soc_get_dai_substream(struct snd_soc_card *card,
const char *dai_link, int stream)
@@ -1392,6 +1356,17 @@ static int soc_init_dai_link(struct snd_soc_card *card,
return 0;
}
+void snd_soc_disconnect_sync(struct device *dev)
+{
+ struct snd_soc_component *component = snd_soc_lookup_component(dev, NULL);
+
+ if (!component || !component->card)
+ return;
+
+ snd_card_disconnect_sync(component->card->snd_card);
+}
+EXPORT_SYMBOL_GPL(snd_soc_disconnect_sync);
+
/**
* snd_soc_add_dai_link - Add a DAI link dynamically
* @card: The ASoC card to which the DAI link is added
@@ -1945,7 +1920,9 @@ int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd,
}
/* Flip the polarity for the "CPU" end of a CODEC<->CODEC link */
- if (cpu_dai->codec) {
+ /* the component which has non_legacy_dai_naming is Codec */
+ if (cpu_dai->codec ||
+ cpu_dai->component->driver->non_legacy_dai_naming) {
unsigned int inv_dai_fmt;
inv_dai_fmt = dai_fmt & ~SND_SOC_DAIFMT_MASTER_MASK;
@@ -3149,7 +3126,7 @@ static struct snd_soc_dai *soc_add_dai(struct snd_soc_component *component,
if (!dai->driver->ops)
dai->driver->ops = &null_dai_ops;
- list_add(&dai->list, &component->dai_list);
+ list_add_tail(&dai->list, &component->dai_list);
component->num_dai++;
dev_dbg(dev, "ASoC: Registered DAI '%s'\n", dai->name);
@@ -3176,8 +3153,6 @@ static int snd_soc_register_dais(struct snd_soc_component *component,
dev_dbg(dev, "ASoC: dai register %s #%zu\n", dev_name(dev), count);
- component->dai_drv = dai_drv;
-
for (i = 0; i < count; i++) {
dai = soc_add_dai(component, dai_drv + i,
@@ -4354,6 +4329,7 @@ int snd_soc_get_dai_name(struct of_phandle_args *args,
args,
dai_name);
} else {
+ struct snd_soc_dai *dai;
int id = -1;
switch (args->args_count) {
@@ -4375,7 +4351,14 @@ int snd_soc_get_dai_name(struct of_phandle_args *args,
ret = 0;
- *dai_name = pos->dai_drv[id].name;
+ /* find target DAI */
+ list_for_each_entry(dai, &pos->dai_list, list) {
+ if (id == 0)
+ break;
+ id--;
+ }
+
+ *dai_name = dai->driver->name;
if (!*dai_name)
*dai_name = pos->name;
}
diff --git a/sound/soc/soc-io.c b/sound/soc/soc-io.c
index 20340ade20a7..2bc1c4c17896 100644
--- a/sound/soc/soc-io.c
+++ b/sound/soc/soc-io.c
@@ -34,6 +34,10 @@ int snd_soc_component_read(struct snd_soc_component *component,
ret = regmap_read(component->regmap, reg, val);
else if (component->read)
ret = component->read(component, reg, val);
+ else if (component->driver->read) {
+ *val = component->driver->read(component, reg);
+ ret = 0;
+ }
else
ret = -EIO;
@@ -70,6 +74,8 @@ int snd_soc_component_write(struct snd_soc_component *component,
return regmap_write(component->regmap, reg, val);
else if (component->write)
return component->write(component, reg, val);
+ else if (component->driver->write)
+ return component->driver->write(component, reg, val);
else
return -EIO;
}
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
index 500f98c730b9..7144a51ddfa9 100644
--- a/sound/soc/soc-ops.c
+++ b/sound/soc/soc-ops.c
@@ -378,7 +378,7 @@ int snd_soc_get_volsw_sx(struct snd_kcontrol *kcontrol,
unsigned int rshift = mc->rshift;
int max = mc->max;
int min = mc->min;
- int mask = (1 << (fls(min + max) - 1)) - 1;
+ unsigned int mask = (1 << (fls(min + max) - 1)) - 1;
unsigned int val;
int ret;
@@ -423,7 +423,7 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
unsigned int rshift = mc->rshift;
int max = mc->max;
int min = mc->min;
- int mask = (1 << (fls(min + max) - 1)) - 1;
+ unsigned int mask = (1 << (fls(min + max) - 1)) - 1;
int err = 0;
unsigned int val, val_mask, val2 = 0;
diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
index e30aacbcfc29..bcd3da2739e2 100644
--- a/sound/soc/soc-utils.c
+++ b/sound/soc/soc-utils.c
@@ -288,7 +288,7 @@ static const struct snd_soc_platform_driver dummy_platform = {
.ops = &dummy_dma_ops,
};
-static struct snd_soc_codec_driver dummy_codec;
+static const struct snd_soc_codec_driver dummy_codec;
#define STUB_RATES SNDRV_PCM_RATE_8000_192000
#define STUB_FORMATS (SNDRV_PCM_FMTBIT_S8 | \
diff --git a/sound/soc/stm/Kconfig b/sound/soc/stm/Kconfig
index 3398e6c57f37..3ad881fc40a1 100644
--- a/sound/soc/stm/Kconfig
+++ b/sound/soc/stm/Kconfig
@@ -28,4 +28,16 @@ config SND_SOC_STM32_SPDIFRX
help
Say Y if you want to enable S/PDIF capture for STM32
+config SND_SOC_STM32_DFSDM
+ tristate "SoC Audio support for STM32 DFSDM"
+ depends on ARCH_STM32 || COMPILE_TEST
+ depends on SND_SOC
+ depends on STM32_DFSDM_ADC
+ select SND_SOC_GENERIC_DMAENGINE_PCM
+ select SND_SOC_DMIC
+ select IIO_BUFFER_CB
+ help
+ Select this option to enable the STM32 Digital Filter
+ for Sigma Delta Modulators (DFSDM) driver used
+ in various STM32 series for digital microphone capture.
endmenu
diff --git a/sound/soc/stm/Makefile b/sound/soc/stm/Makefile
index 5b7f0fab0bd6..3143c0b47042 100644
--- a/sound/soc/stm/Makefile
+++ b/sound/soc/stm/Makefile
@@ -13,3 +13,6 @@ obj-$(CONFIG_SND_SOC_STM32_I2S) += snd-soc-stm32-i2s.o
# SPDIFRX
snd-soc-stm32-spdifrx-objs := stm32_spdifrx.o
obj-$(CONFIG_SND_SOC_STM32_SPDIFRX) += snd-soc-stm32-spdifrx.o
+
+#DFSDM
+obj-$(CONFIG_SND_SOC_STM32_DFSDM) += stm32_adfsdm.o
diff --git a/sound/soc/stm/stm32_adfsdm.c b/sound/soc/stm/stm32_adfsdm.c
new file mode 100644
index 000000000000..7306e3eca9e1
--- /dev/null
+++ b/sound/soc/stm/stm32_adfsdm.c
@@ -0,0 +1,347 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is part of STM32 DFSDM ASoC DAI driver
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Authors: Arnaud Pouliquen <arnaud.pouliquen@st.com>
+ * Olivier Moysan <olivier.moysan@st.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/consumer.h>
+#include <linux/iio/adc/stm32-dfsdm-adc.h>
+
+#include <sound/pcm.h>
+#include <sound/soc.h>
+
+#define STM32_ADFSDM_DRV_NAME "stm32-adfsdm"
+
+#define DFSDM_MAX_PERIOD_SIZE (PAGE_SIZE / 2)
+#define DFSDM_MAX_PERIODS 6
+
+struct stm32_adfsdm_priv {
+ struct snd_soc_dai_driver dai_drv;
+ struct snd_pcm_substream *substream;
+ struct device *dev;
+
+ /* IIO */
+ struct iio_channel *iio_ch;
+ struct iio_cb_buffer *iio_cb;
+ bool iio_active;
+
+ /* PCM buffer */
+ unsigned char *pcm_buff;
+ unsigned int pos;
+};
+
+static const struct snd_pcm_hardware stm32_adfsdm_pcm_hw = {
+ .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_PAUSE,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+
+ .rate_min = 8000,
+ .rate_max = 32000,
+
+ .channels_min = 1,
+ .channels_max = 1,
+
+ .periods_min = 2,
+ .periods_max = DFSDM_MAX_PERIODS,
+
+ .period_bytes_max = DFSDM_MAX_PERIOD_SIZE,
+ .buffer_bytes_max = DFSDM_MAX_PERIODS * DFSDM_MAX_PERIOD_SIZE
+};
+
+static void stm32_adfsdm_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct stm32_adfsdm_priv *priv = snd_soc_dai_get_drvdata(dai);
+
+ if (priv->iio_active) {
+ iio_channel_stop_all_cb(priv->iio_cb);
+ priv->iio_active = false;
+ }
+}
+
+static int stm32_adfsdm_dai_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct stm32_adfsdm_priv *priv = snd_soc_dai_get_drvdata(dai);
+ int ret;
+
+ ret = iio_write_channel_attribute(priv->iio_ch,
+ substream->runtime->rate, 0,
+ IIO_CHAN_INFO_SAMP_FREQ);
+ if (ret < 0) {
+ dev_err(dai->dev, "%s: Failed to set %d sampling rate\n",
+ __func__, substream->runtime->rate);
+ return ret;
+ }
+
+ if (!priv->iio_active) {
+ ret = iio_channel_start_all_cb(priv->iio_cb);
+ if (!ret)
+ priv->iio_active = true;
+ else
+ dev_err(dai->dev, "%s: IIO channel start failed (%d)\n",
+ __func__, ret);
+ }
+
+ return ret;
+}
+
+static int stm32_adfsdm_set_sysclk(struct snd_soc_dai *dai, int clk_id,
+ unsigned int freq, int dir)
+{
+ struct stm32_adfsdm_priv *priv = snd_soc_dai_get_drvdata(dai);
+ ssize_t size;
+ char str_freq[10];
+
+ dev_dbg(dai->dev, "%s: Enter for freq %d\n", __func__, freq);
+
+ /* Set IIO frequency if CODEC is master as clock comes from SPI_IN */
+
+ snprintf(str_freq, sizeof(str_freq), "%d\n", freq);
+ size = iio_write_channel_ext_info(priv->iio_ch, "spi_clk_freq",
+ str_freq, sizeof(str_freq));
+ if (size != sizeof(str_freq)) {
+ dev_err(dai->dev, "%s: Failed to set SPI clock\n",
+ __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static const struct snd_soc_dai_ops stm32_adfsdm_dai_ops = {
+ .shutdown = stm32_adfsdm_shutdown,
+ .prepare = stm32_adfsdm_dai_prepare,
+ .set_sysclk = stm32_adfsdm_set_sysclk,
+};
+
+static const struct snd_soc_dai_driver stm32_adfsdm_dai = {
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 1,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
+ SNDRV_PCM_RATE_32000),
+ },
+ .ops = &stm32_adfsdm_dai_ops,
+};
+
+static const struct snd_soc_component_driver stm32_adfsdm_dai_component = {
+ .name = "stm32_dfsdm_audio",
+};
+
+static int stm32_afsdm_pcm_cb(const void *data, size_t size, void *private)
+{
+ struct stm32_adfsdm_priv *priv = private;
+ struct snd_soc_pcm_runtime *rtd = priv->substream->private_data;
+ u8 *pcm_buff = priv->pcm_buff;
+ u8 *src_buff = (u8 *)data;
+ unsigned int buff_size = snd_pcm_lib_buffer_bytes(priv->substream);
+ unsigned int period_size = snd_pcm_lib_period_bytes(priv->substream);
+ unsigned int old_pos = priv->pos;
+ unsigned int cur_size = size;
+
+ dev_dbg(rtd->dev, "%s: buff_add :%p, pos = %d, size = %zu\n",
+ __func__, &pcm_buff[priv->pos], priv->pos, size);
+
+ if ((priv->pos + size) > buff_size) {
+ memcpy(&pcm_buff[priv->pos], src_buff, buff_size - priv->pos);
+ cur_size -= buff_size - priv->pos;
+ priv->pos = 0;
+ }
+
+ memcpy(&pcm_buff[priv->pos], &src_buff[size - cur_size], cur_size);
+ priv->pos = (priv->pos + cur_size) % buff_size;
+
+ if (cur_size != size || (old_pos && (old_pos % period_size < size)))
+ snd_pcm_period_elapsed(priv->substream);
+
+ return 0;
+}
+
+static int stm32_adfsdm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct stm32_adfsdm_priv *priv =
+ snd_soc_dai_get_drvdata(rtd->cpu_dai);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ priv->pos = 0;
+ return stm32_dfsdm_get_buff_cb(priv->iio_ch->indio_dev,
+ stm32_afsdm_pcm_cb, priv);
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_STOP:
+ return stm32_dfsdm_release_buff_cb(priv->iio_ch->indio_dev);
+ }
+
+ return -EINVAL;
+}
+
+static int stm32_adfsdm_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct stm32_adfsdm_priv *priv = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ int ret;
+
+ ret = snd_soc_set_runtime_hwparams(substream, &stm32_adfsdm_pcm_hw);
+ if (!ret)
+ priv->substream = substream;
+
+ return ret;
+}
+
+static int stm32_adfsdm_pcm_close(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct stm32_adfsdm_priv *priv =
+ snd_soc_dai_get_drvdata(rtd->cpu_dai);
+
+ snd_pcm_lib_free_pages(substream);
+ priv->substream = NULL;
+
+ return 0;
+}
+
+static snd_pcm_uframes_t stm32_adfsdm_pcm_pointer(
+ struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct stm32_adfsdm_priv *priv =
+ snd_soc_dai_get_drvdata(rtd->cpu_dai);
+
+ return bytes_to_frames(substream->runtime, priv->pos);
+}
+
+static int stm32_adfsdm_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct stm32_adfsdm_priv *priv =
+ snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ int ret;
+
+ ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
+ if (ret < 0)
+ return ret;
+ priv->pcm_buff = substream->runtime->dma_area;
+
+ return iio_channel_cb_set_buffer_watermark(priv->iio_cb,
+ params_period_size(params));
+}
+
+static int stm32_adfsdm_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+ snd_pcm_lib_free_pages(substream);
+
+ return 0;
+}
+
+static struct snd_pcm_ops stm32_adfsdm_pcm_ops = {
+ .open = stm32_adfsdm_pcm_open,
+ .close = stm32_adfsdm_pcm_close,
+ .hw_params = stm32_adfsdm_pcm_hw_params,
+ .hw_free = stm32_adfsdm_pcm_hw_free,
+ .trigger = stm32_adfsdm_trigger,
+ .pointer = stm32_adfsdm_pcm_pointer,
+};
+
+static int stm32_adfsdm_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_pcm *pcm = rtd->pcm;
+ struct stm32_adfsdm_priv *priv =
+ snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ unsigned int size = DFSDM_MAX_PERIODS * DFSDM_MAX_PERIOD_SIZE;
+
+ return snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+ priv->dev, size, size);
+}
+
+static void stm32_adfsdm_pcm_free(struct snd_pcm *pcm)
+{
+ struct snd_pcm_substream *substream;
+ struct snd_soc_pcm_runtime *rtd;
+ struct stm32_adfsdm_priv *priv;
+
+ substream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
+ if (substream) {
+ rtd = substream->private_data;
+ priv = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+
+ snd_pcm_lib_preallocate_free_for_all(pcm);
+ }
+}
+
+static struct snd_soc_platform_driver stm32_adfsdm_soc_platform = {
+ .ops = &stm32_adfsdm_pcm_ops,
+ .pcm_new = stm32_adfsdm_pcm_new,
+ .pcm_free = stm32_adfsdm_pcm_free,
+};
+
+static const struct of_device_id stm32_adfsdm_of_match[] = {
+ {.compatible = "st,stm32h7-dfsdm-dai"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, stm32_adfsdm_of_match);
+
+static int stm32_adfsdm_probe(struct platform_device *pdev)
+{
+ struct stm32_adfsdm_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = &pdev->dev;
+ priv->dai_drv = stm32_adfsdm_dai;
+
+ dev_set_drvdata(&pdev->dev, priv);
+
+ ret = devm_snd_soc_register_component(&pdev->dev,
+ &stm32_adfsdm_dai_component,
+ &priv->dai_drv, 1);
+ if (ret < 0)
+ return ret;
+
+ /* Associate iio channel */
+ priv->iio_ch = devm_iio_channel_get_all(&pdev->dev);
+ if (IS_ERR(priv->iio_ch))
+ return PTR_ERR(priv->iio_ch);
+
+ priv->iio_cb = iio_channel_get_all_cb(&pdev->dev, NULL, NULL);
+ if (IS_ERR(priv->iio_cb))
+ return PTR_ERR(priv->iio_cb);
+
+ ret = devm_snd_soc_register_platform(&pdev->dev,
+ &stm32_adfsdm_soc_platform);
+ if (ret < 0)
+ dev_err(&pdev->dev, "%s: Failed to register PCM platform\n",
+ __func__);
+
+ return ret;
+}
+
+static struct platform_driver stm32_adfsdm_driver = {
+ .driver = {
+ .name = STM32_ADFSDM_DRV_NAME,
+ .of_match_table = stm32_adfsdm_of_match,
+ },
+ .probe = stm32_adfsdm_probe,
+};
+
+module_platform_driver(stm32_adfsdm_driver);
+
+MODULE_DESCRIPTION("stm32 DFSDM DAI driver");
+MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" STM32_ADFSDM_DRV_NAME);
diff --git a/sound/soc/stm/stm32_sai.c b/sound/soc/stm/stm32_sai.c
index d6f71a3406e9..d743b7dd52fb 100644
--- a/sound/soc/stm/stm32_sai.c
+++ b/sound/soc/stm/stm32_sai.c
@@ -28,16 +28,6 @@
#include "stm32_sai.h"
-static LIST_HEAD(sync_providers);
-static DEFINE_MUTEX(sync_mutex);
-
-struct sync_provider {
- struct list_head link;
- struct device_node *node;
- int (*sync_conf)(void *data, int synco);
- void *data;
-};
-
static const struct stm32_sai_conf stm32_sai_conf_f4 = {
.version = SAI_STM32F4,
};
@@ -70,9 +60,8 @@ static int stm32_sai_sync_conf_client(struct stm32_sai_data *sai, int synci)
return 0;
}
-static int stm32_sai_sync_conf_provider(void *data, int synco)
+static int stm32_sai_sync_conf_provider(struct stm32_sai_data *sai, int synco)
{
- struct stm32_sai_data *sai = (struct stm32_sai_data *)data;
u32 prev_synco;
int ret;
@@ -103,83 +92,42 @@ static int stm32_sai_sync_conf_provider(void *data, int synco)
return 0;
}
-static int stm32_sai_set_sync_provider(struct device_node *np, int synco)
+static int stm32_sai_set_sync(struct stm32_sai_data *sai_client,
+ struct device_node *np_provider,
+ int synco, int synci)
{
- struct sync_provider *provider;
+ struct platform_device *pdev = of_find_device_by_node(np_provider);
+ struct stm32_sai_data *sai_provider;
int ret;
- mutex_lock(&sync_mutex);
- list_for_each_entry(provider, &sync_providers, link) {
- if (provider->node == np) {
- ret = provider->sync_conf(provider->data, synco);
- mutex_unlock(&sync_mutex);
- return ret;
- }
+ if (!pdev) {
+ dev_err(&sai_client->pdev->dev,
+ "Device not found for node %s\n", np_provider->name);
+ return -ENODEV;
}
- mutex_unlock(&sync_mutex);
- /* SAI sync provider not found */
- return -ENODEV;
-}
-
-static int stm32_sai_set_sync(struct stm32_sai_data *sai,
- struct device_node *np_provider,
- int synco, int synci)
-{
- int ret;
+ sai_provider = platform_get_drvdata(pdev);
+ if (!sai_provider) {
+ dev_err(&sai_client->pdev->dev,
+ "SAI sync provider data not found\n");
+ return -EINVAL;
+ }
/* Configure sync client */
- stm32_sai_sync_conf_client(sai, synci);
+ ret = stm32_sai_sync_conf_client(sai_client, synci);
+ if (ret < 0)
+ return ret;
/* Configure sync provider */
- ret = stm32_sai_set_sync_provider(np_provider, synco);
-
- return ret;
-}
-
-static int stm32_sai_sync_add_provider(struct platform_device *pdev,
- void *data)
-{
- struct sync_provider *sp;
-
- sp = devm_kzalloc(&pdev->dev, sizeof(*sp), GFP_KERNEL);
- if (!sp)
- return -ENOMEM;
-
- sp->node = of_node_get(pdev->dev.of_node);
- sp->data = data;
- sp->sync_conf = &stm32_sai_sync_conf_provider;
-
- mutex_lock(&sync_mutex);
- list_add(&sp->link, &sync_providers);
- mutex_unlock(&sync_mutex);
-
- return 0;
-}
-
-static void stm32_sai_sync_del_provider(struct device_node *np)
-{
- struct sync_provider *sp;
-
- mutex_lock(&sync_mutex);
- list_for_each_entry(sp, &sync_providers, link) {
- if (sp->node == np) {
- list_del(&sp->link);
- of_node_put(sp->node);
- break;
- }
- }
- mutex_unlock(&sync_mutex);
+ return stm32_sai_sync_conf_provider(sai_provider, synco);
}
static int stm32_sai_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
struct stm32_sai_data *sai;
struct reset_control *rst;
struct resource *res;
const struct of_device_id *of_id;
- int ret;
sai = devm_kzalloc(&pdev->dev, sizeof(*sai), GFP_KERNEL);
if (!sai)
@@ -231,28 +179,11 @@ static int stm32_sai_probe(struct platform_device *pdev)
reset_control_deassert(rst);
}
- ret = stm32_sai_sync_add_provider(pdev, sai);
- if (ret < 0)
- return ret;
- sai->set_sync = &stm32_sai_set_sync;
-
sai->pdev = pdev;
+ sai->set_sync = &stm32_sai_set_sync;
platform_set_drvdata(pdev, sai);
- ret = of_platform_populate(np, NULL, NULL, &pdev->dev);
- if (ret < 0)
- stm32_sai_sync_del_provider(np);
-
- return ret;
-}
-
-static int stm32_sai_remove(struct platform_device *pdev)
-{
- of_platform_depopulate(&pdev->dev);
-
- stm32_sai_sync_del_provider(pdev->dev.of_node);
-
- return 0;
+ return devm_of_platform_populate(&pdev->dev);
}
MODULE_DEVICE_TABLE(of, stm32_sai_ids);
@@ -263,7 +194,6 @@ static struct platform_driver stm32_sai_driver = {
.of_match_table = stm32_sai_ids,
},
.probe = stm32_sai_probe,
- .remove = stm32_sai_remove,
};
module_platform_driver(stm32_sai_driver);
diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c
index 5da4efe7a550..886281673972 100644
--- a/sound/soc/sunxi/sun4i-codec.c
+++ b/sound/soc/sunxi/sun4i-codec.c
@@ -590,12 +590,28 @@ static int sun4i_codec_hw_params(struct snd_pcm_substream *substream,
hwrate);
}
+
+static unsigned int sun4i_codec_src_rates[] = {
+ 8000, 11025, 12000, 16000, 22050, 24000, 32000,
+ 44100, 48000, 96000, 192000
+};
+
+
+static struct snd_pcm_hw_constraint_list sun4i_codec_constraints = {
+ .count = ARRAY_SIZE(sun4i_codec_src_rates),
+ .list = sun4i_codec_src_rates,
+};
+
+
static int sun4i_codec_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct sun4i_codec *scodec = snd_soc_card_get_drvdata(rtd->card);
+ snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &sun4i_codec_constraints);
+
/*
* Stop issuing DRQ when we have room for less than 16 samples
* in our TX FIFO
@@ -633,9 +649,7 @@ static struct snd_soc_dai_driver sun4i_codec_dai = {
.channels_max = 2,
.rate_min = 8000,
.rate_max = 192000,
- .rates = SNDRV_PCM_RATE_8000_48000 |
- SNDRV_PCM_RATE_96000 |
- SNDRV_PCM_RATE_192000,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.sig_bits = 24,
@@ -645,11 +659,8 @@ static struct snd_soc_dai_driver sun4i_codec_dai = {
.channels_min = 1,
.channels_max = 2,
.rate_min = 8000,
- .rate_max = 192000,
- .rates = SNDRV_PCM_RATE_8000_48000 |
- SNDRV_PCM_RATE_96000 |
- SNDRV_PCM_RATE_192000 |
- SNDRV_PCM_RATE_KNOT,
+ .rate_max = 48000,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.sig_bits = 24,
@@ -1128,7 +1139,7 @@ static const struct snd_soc_component_driver sun4i_codec_component = {
.name = "sun4i-codec",
};
-#define SUN4I_CODEC_RATES SNDRV_PCM_RATE_8000_192000
+#define SUN4I_CODEC_RATES SNDRV_PCM_RATE_CONTINUOUS
#define SUN4I_CODEC_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
SNDRV_PCM_FMTBIT_S32_LE)
diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c
index 04f92583a969..dca1143c1150 100644
--- a/sound/soc/sunxi/sun4i-i2s.c
+++ b/sound/soc/sunxi/sun4i-i2s.c
@@ -269,10 +269,11 @@ static bool sun4i_i2s_oversample_is_valid(unsigned int oversample)
return false;
}
-static int sun4i_i2s_set_clk_rate(struct sun4i_i2s *i2s,
+static int sun4i_i2s_set_clk_rate(struct snd_soc_dai *dai,
unsigned int rate,
unsigned int word_size)
{
+ struct sun4i_i2s *i2s = snd_soc_dai_get_drvdata(dai);
unsigned int oversample_rate, clk_rate;
int bclk_div, mclk_div;
int ret;
@@ -300,6 +301,7 @@ static int sun4i_i2s_set_clk_rate(struct sun4i_i2s *i2s,
break;
default:
+ dev_err(dai->dev, "Unsupported sample rate: %u\n", rate);
return -EINVAL;
}
@@ -308,18 +310,25 @@ static int sun4i_i2s_set_clk_rate(struct sun4i_i2s *i2s,
return ret;
oversample_rate = i2s->mclk_freq / rate;
- if (!sun4i_i2s_oversample_is_valid(oversample_rate))
+ if (!sun4i_i2s_oversample_is_valid(oversample_rate)) {
+ dev_err(dai->dev, "Unsupported oversample rate: %d\n",
+ oversample_rate);
return -EINVAL;
+ }
bclk_div = sun4i_i2s_get_bclk_div(i2s, oversample_rate,
word_size);
- if (bclk_div < 0)
+ if (bclk_div < 0) {
+ dev_err(dai->dev, "Unsupported BCLK divider: %d\n", bclk_div);
return -EINVAL;
+ }
mclk_div = sun4i_i2s_get_mclk_div(i2s, oversample_rate,
clk_rate, rate);
- if (mclk_div < 0)
+ if (mclk_div < 0) {
+ dev_err(dai->dev, "Unsupported MCLK divider: %d\n", mclk_div);
return -EINVAL;
+ }
/* Adjust the clock division values if needed */
bclk_div += i2s->variant->bclk_offset;
@@ -349,8 +358,11 @@ static int sun4i_i2s_hw_params(struct snd_pcm_substream *substream,
u32 width;
channels = params_channels(params);
- if (channels != 2)
+ if (channels != 2) {
+ dev_err(dai->dev, "Unsupported number of channels: %d\n",
+ channels);
return -EINVAL;
+ }
if (i2s->variant->has_chcfg) {
regmap_update_bits(i2s->regmap, SUN8I_I2S_CHAN_CFG_REG,
@@ -382,6 +394,8 @@ static int sun4i_i2s_hw_params(struct snd_pcm_substream *substream,
width = DMA_SLAVE_BUSWIDTH_2_BYTES;
break;
default:
+ dev_err(dai->dev, "Unsupported physical sample width: %d\n",
+ params_physical_width(params));
return -EINVAL;
}
i2s->playback_dma_data.addr_width = width;
@@ -393,6 +407,8 @@ static int sun4i_i2s_hw_params(struct snd_pcm_substream *substream,
break;
default:
+ dev_err(dai->dev, "Unsupported sample width: %d\n",
+ params_width(params));
return -EINVAL;
}
@@ -401,7 +417,7 @@ static int sun4i_i2s_hw_params(struct snd_pcm_substream *substream,
regmap_field_write(i2s->field_fmt_sr,
sr + i2s->variant->fmt_offset);
- return sun4i_i2s_set_clk_rate(i2s, params_rate(params),
+ return sun4i_i2s_set_clk_rate(dai, params_rate(params),
params_width(params));
}
@@ -426,6 +442,8 @@ static int sun4i_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
val = SUN4I_I2S_FMT0_FMT_RIGHT_J;
break;
default:
+ dev_err(dai->dev, "Unsupported format: %d\n",
+ fmt & SND_SOC_DAIFMT_FORMAT_MASK);
return -EINVAL;
}
@@ -464,6 +482,8 @@ static int sun4i_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
case SND_SOC_DAIFMT_NB_NF:
break;
default:
+ dev_err(dai->dev, "Unsupported clock polarity: %d\n",
+ fmt & SND_SOC_DAIFMT_INV_MASK);
return -EINVAL;
}
@@ -482,6 +502,8 @@ static int sun4i_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
val = SUN4I_I2S_CTRL_MODE_SLAVE;
break;
default:
+ dev_err(dai->dev, "Unsupported slave setting: %d\n",
+ fmt & SND_SOC_DAIFMT_MASTER_MASK);
return -EINVAL;
}
regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
@@ -504,6 +526,8 @@ static int sun4i_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
val = 0;
break;
default:
+ dev_err(dai->dev, "Unsupported slave setting: %d\n",
+ fmt & SND_SOC_DAIFMT_MASTER_MASK);
return -EINVAL;
}
regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
@@ -897,6 +921,23 @@ static const struct sun4i_i2s_quirks sun6i_a31_i2s_quirks = {
.field_rxchansel = REG_FIELD(SUN4I_I2S_RX_CHAN_SEL_REG, 0, 2),
};
+static const struct sun4i_i2s_quirks sun8i_a83t_i2s_quirks = {
+ .has_reset = true,
+ .reg_offset_txdata = SUN8I_I2S_FIFO_TX_REG,
+ .sun4i_i2s_regmap = &sun4i_i2s_regmap_config,
+ .field_clkdiv_mclk_en = REG_FIELD(SUN4I_I2S_CLK_DIV_REG, 7, 7),
+ .field_fmt_wss = REG_FIELD(SUN4I_I2S_FMT0_REG, 2, 3),
+ .field_fmt_sr = REG_FIELD(SUN4I_I2S_FMT0_REG, 4, 5),
+ .field_fmt_bclk = REG_FIELD(SUN4I_I2S_FMT0_REG, 6, 6),
+ .field_fmt_lrclk = REG_FIELD(SUN4I_I2S_FMT0_REG, 7, 7),
+ .has_slave_select_bit = true,
+ .field_fmt_mode = REG_FIELD(SUN4I_I2S_FMT0_REG, 0, 1),
+ .field_txchanmap = REG_FIELD(SUN4I_I2S_TX_CHAN_MAP_REG, 0, 31),
+ .field_rxchanmap = REG_FIELD(SUN4I_I2S_RX_CHAN_MAP_REG, 0, 31),
+ .field_txchansel = REG_FIELD(SUN4I_I2S_TX_CHAN_SEL_REG, 0, 2),
+ .field_rxchansel = REG_FIELD(SUN4I_I2S_RX_CHAN_SEL_REG, 0, 2),
+};
+
static const struct sun4i_i2s_quirks sun8i_h3_i2s_quirks = {
.has_reset = true,
.reg_offset_txdata = SUN8I_I2S_FIFO_TX_REG,
@@ -1121,6 +1162,10 @@ static const struct of_device_id sun4i_i2s_match[] = {
.data = &sun6i_a31_i2s_quirks,
},
{
+ .compatible = "allwinner,sun8i-a83t-i2s",
+ .data = &sun8i_a83t_i2s_quirks,
+ },
+ {
.compatible = "allwinner,sun8i-h3-i2s",
.data = &sun8i_h3_i2s_quirks,
},
diff --git a/sound/soc/uniphier/Kconfig b/sound/soc/uniphier/Kconfig
new file mode 100644
index 000000000000..02886a457eaf
--- /dev/null
+++ b/sound/soc/uniphier/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+config SND_SOC_UNIPHIER
+ tristate "ASoC support for UniPhier"
+ depends on (ARCH_UNIPHIER || COMPILE_TEST)
+ help
+ Say Y or M if you want to add support for the Socionext
+ UniPhier SoC audio interfaces. You will also need to select the
+ audio interfaces to support below.
+ If unsure select "N".
+
+config SND_SOC_UNIPHIER_EVEA_CODEC
+ tristate "UniPhier SoC internal audio codec"
+ depends on SND_SOC_UNIPHIER
+ select REGMAP_MMIO
+ help
+ This adds Codec driver for Socionext UniPhier LD11/20 SoC
+ internal DAC. This driver supports Line In / Out and HeadPhone.
+ Select Y if you use such device.
+ If unsure select "N".
diff --git a/sound/soc/uniphier/Makefile b/sound/soc/uniphier/Makefile
new file mode 100644
index 000000000000..3be00d72f5e5
--- /dev/null
+++ b/sound/soc/uniphier/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+snd-soc-uniphier-evea-objs := evea.o
+obj-$(CONFIG_SND_SOC_UNIPHIER_EVEA_CODEC) += snd-soc-uniphier-evea.o
diff --git a/sound/soc/uniphier/evea.c b/sound/soc/uniphier/evea.c
new file mode 100644
index 000000000000..0cc9efff1d9a
--- /dev/null
+++ b/sound/soc/uniphier/evea.c
@@ -0,0 +1,567 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Socionext UniPhier EVEA ADC/DAC codec driver.
+ *
+ * Copyright (c) 2016-2017 Socionext Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+
+#define DRV_NAME "evea"
+#define EVEA_RATES SNDRV_PCM_RATE_48000
+#define EVEA_FORMATS SNDRV_PCM_FMTBIT_S32_LE
+
+#define AADCPOW(n) (0x0078 + 0x04 * (n))
+#define AADCPOW_AADC_POWD BIT(0)
+#define AHPOUTPOW 0x0098
+#define AHPOUTPOW_HP_ON BIT(4)
+#define ALINEPOW 0x009c
+#define ALINEPOW_LIN2_POWD BIT(3)
+#define ALINEPOW_LIN1_POWD BIT(4)
+#define ALO1OUTPOW 0x00a8
+#define ALO1OUTPOW_LO1_ON BIT(4)
+#define ALO2OUTPOW 0x00ac
+#define ALO2OUTPOW_ADAC2_MUTE BIT(0)
+#define ALO2OUTPOW_LO2_ON BIT(4)
+#define AANAPOW 0x00b8
+#define AANAPOW_A_POWD BIT(4)
+#define ADACSEQ1(n) (0x0144 + 0x40 * (n))
+#define ADACSEQ1_MMUTE BIT(1)
+#define ADACSEQ2(n) (0x0160 + 0x40 * (n))
+#define ADACSEQ2_ADACIN_FIX BIT(0)
+#define ADAC1ODC 0x0200
+#define ADAC1ODC_HP_DIS_RES_MASK GENMASK(2, 1)
+#define ADAC1ODC_HP_DIS_RES_OFF (0x0 << 1)
+#define ADAC1ODC_HP_DIS_RES_ON (0x3 << 1)
+#define ADAC1ODC_ADAC_RAMPCLT_MASK GENMASK(8, 7)
+#define ADAC1ODC_ADAC_RAMPCLT_NORMAL (0x0 << 7)
+#define ADAC1ODC_ADAC_RAMPCLT_REDUCE (0x1 << 7)
+
+struct evea_priv {
+ struct clk *clk, *clk_exiv;
+ struct reset_control *rst, *rst_exiv, *rst_adamv;
+ struct regmap *regmap;
+
+ int switch_lin;
+ int switch_lo;
+ int switch_hp;
+};
+
+static const struct snd_soc_dapm_widget evea_widgets[] = {
+ SND_SOC_DAPM_ADC("ADC", "Capture", SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_INPUT("LIN1_LP"),
+ SND_SOC_DAPM_INPUT("LIN1_RP"),
+ SND_SOC_DAPM_INPUT("LIN2_LP"),
+ SND_SOC_DAPM_INPUT("LIN2_RP"),
+ SND_SOC_DAPM_INPUT("LIN3_LP"),
+ SND_SOC_DAPM_INPUT("LIN3_RP"),
+
+ SND_SOC_DAPM_DAC("DAC", "Playback", SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_OUTPUT("HP1_L"),
+ SND_SOC_DAPM_OUTPUT("HP1_R"),
+ SND_SOC_DAPM_OUTPUT("LO2_L"),
+ SND_SOC_DAPM_OUTPUT("LO2_R"),
+};
+
+static const struct snd_soc_dapm_route evea_routes[] = {
+ { "ADC", NULL, "LIN1_LP" },
+ { "ADC", NULL, "LIN1_RP" },
+ { "ADC", NULL, "LIN2_LP" },
+ { "ADC", NULL, "LIN2_RP" },
+ { "ADC", NULL, "LIN3_LP" },
+ { "ADC", NULL, "LIN3_RP" },
+
+ { "HP1_L", NULL, "DAC" },
+ { "HP1_R", NULL, "DAC" },
+ { "LO2_L", NULL, "DAC" },
+ { "LO2_R", NULL, "DAC" },
+};
+
+static void evea_set_power_state_on(struct evea_priv *evea)
+{
+ struct regmap *map = evea->regmap;
+
+ regmap_update_bits(map, AANAPOW, AANAPOW_A_POWD,
+ AANAPOW_A_POWD);
+
+ regmap_update_bits(map, ADAC1ODC, ADAC1ODC_HP_DIS_RES_MASK,
+ ADAC1ODC_HP_DIS_RES_ON);
+
+ regmap_update_bits(map, ADAC1ODC, ADAC1ODC_ADAC_RAMPCLT_MASK,
+ ADAC1ODC_ADAC_RAMPCLT_REDUCE);
+
+ regmap_update_bits(map, ADACSEQ2(0), ADACSEQ2_ADACIN_FIX, 0);
+ regmap_update_bits(map, ADACSEQ2(1), ADACSEQ2_ADACIN_FIX, 0);
+ regmap_update_bits(map, ADACSEQ2(2), ADACSEQ2_ADACIN_FIX, 0);
+}
+
+static void evea_set_power_state_off(struct evea_priv *evea)
+{
+ struct regmap *map = evea->regmap;
+
+ regmap_update_bits(map, ADAC1ODC, ADAC1ODC_HP_DIS_RES_MASK,
+ ADAC1ODC_HP_DIS_RES_ON);
+
+ regmap_update_bits(map, ADACSEQ1(0), ADACSEQ1_MMUTE,
+ ADACSEQ1_MMUTE);
+ regmap_update_bits(map, ADACSEQ1(1), ADACSEQ1_MMUTE,
+ ADACSEQ1_MMUTE);
+ regmap_update_bits(map, ADACSEQ1(2), ADACSEQ1_MMUTE,
+ ADACSEQ1_MMUTE);
+
+ regmap_update_bits(map, ALO1OUTPOW, ALO1OUTPOW_LO1_ON, 0);
+ regmap_update_bits(map, ALO2OUTPOW, ALO2OUTPOW_LO2_ON, 0);
+ regmap_update_bits(map, AHPOUTPOW, AHPOUTPOW_HP_ON, 0);
+}
+
+static int evea_update_switch_lin(struct evea_priv *evea)
+{
+ struct regmap *map = evea->regmap;
+
+ if (evea->switch_lin) {
+ regmap_update_bits(map, ALINEPOW,
+ ALINEPOW_LIN2_POWD | ALINEPOW_LIN1_POWD,
+ ALINEPOW_LIN2_POWD | ALINEPOW_LIN1_POWD);
+
+ regmap_update_bits(map, AADCPOW(0), AADCPOW_AADC_POWD,
+ AADCPOW_AADC_POWD);
+ regmap_update_bits(map, AADCPOW(1), AADCPOW_AADC_POWD,
+ AADCPOW_AADC_POWD);
+ } else {
+ regmap_update_bits(map, AADCPOW(0), AADCPOW_AADC_POWD, 0);
+ regmap_update_bits(map, AADCPOW(1), AADCPOW_AADC_POWD, 0);
+
+ regmap_update_bits(map, ALINEPOW,
+ ALINEPOW_LIN2_POWD | ALINEPOW_LIN1_POWD, 0);
+ }
+
+ return 0;
+}
+
+static int evea_update_switch_lo(struct evea_priv *evea)
+{
+ struct regmap *map = evea->regmap;
+
+ if (evea->switch_lo) {
+ regmap_update_bits(map, ADACSEQ1(0), ADACSEQ1_MMUTE, 0);
+ regmap_update_bits(map, ADACSEQ1(2), ADACSEQ1_MMUTE, 0);
+
+ regmap_update_bits(map, ALO1OUTPOW, ALO1OUTPOW_LO1_ON,
+ ALO1OUTPOW_LO1_ON);
+ regmap_update_bits(map, ALO2OUTPOW,
+ ALO2OUTPOW_ADAC2_MUTE | ALO2OUTPOW_LO2_ON,
+ ALO2OUTPOW_ADAC2_MUTE | ALO2OUTPOW_LO2_ON);
+ } else {
+ regmap_update_bits(map, ADACSEQ1(0), ADACSEQ1_MMUTE,
+ ADACSEQ1_MMUTE);
+ regmap_update_bits(map, ADACSEQ1(2), ADACSEQ1_MMUTE,
+ ADACSEQ1_MMUTE);
+
+ regmap_update_bits(map, ALO1OUTPOW, ALO1OUTPOW_LO1_ON, 0);
+ regmap_update_bits(map, ALO2OUTPOW,
+ ALO2OUTPOW_ADAC2_MUTE | ALO2OUTPOW_LO2_ON,
+ 0);
+ }
+
+ return 0;
+}
+
+static int evea_update_switch_hp(struct evea_priv *evea)
+{
+ struct regmap *map = evea->regmap;
+
+ if (evea->switch_hp) {
+ regmap_update_bits(map, ADACSEQ1(1), ADACSEQ1_MMUTE, 0);
+
+ regmap_update_bits(map, AHPOUTPOW, AHPOUTPOW_HP_ON,
+ AHPOUTPOW_HP_ON);
+
+ regmap_update_bits(map, ADAC1ODC, ADAC1ODC_HP_DIS_RES_MASK,
+ ADAC1ODC_HP_DIS_RES_OFF);
+ } else {
+ regmap_update_bits(map, ADAC1ODC, ADAC1ODC_HP_DIS_RES_MASK,
+ ADAC1ODC_HP_DIS_RES_ON);
+
+ regmap_update_bits(map, ADACSEQ1(1), ADACSEQ1_MMUTE,
+ ADACSEQ1_MMUTE);
+
+ regmap_update_bits(map, AHPOUTPOW, AHPOUTPOW_HP_ON, 0);
+ }
+
+ return 0;
+}
+
+static void evea_update_switch_all(struct evea_priv *evea)
+{
+ evea_update_switch_lin(evea);
+ evea_update_switch_lo(evea);
+ evea_update_switch_hp(evea);
+}
+
+static int evea_get_switch_lin(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+ ucontrol->value.integer.value[0] = evea->switch_lin;
+
+ return 0;
+}
+
+static int evea_set_switch_lin(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+ if (evea->switch_lin == ucontrol->value.integer.value[0])
+ return 0;
+
+ evea->switch_lin = ucontrol->value.integer.value[0];
+
+ return evea_update_switch_lin(evea);
+}
+
+static int evea_get_switch_lo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+ ucontrol->value.integer.value[0] = evea->switch_lo;
+
+ return 0;
+}
+
+static int evea_set_switch_lo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+ if (evea->switch_lo == ucontrol->value.integer.value[0])
+ return 0;
+
+ evea->switch_lo = ucontrol->value.integer.value[0];
+
+ return evea_update_switch_lo(evea);
+}
+
+static int evea_get_switch_hp(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+ ucontrol->value.integer.value[0] = evea->switch_hp;
+
+ return 0;
+}
+
+static int evea_set_switch_hp(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+ if (evea->switch_hp == ucontrol->value.integer.value[0])
+ return 0;
+
+ evea->switch_hp = ucontrol->value.integer.value[0];
+
+ return evea_update_switch_hp(evea);
+}
+
+static const struct snd_kcontrol_new eva_controls[] = {
+ SOC_SINGLE_BOOL_EXT("Line Capture Switch", 0,
+ evea_get_switch_lin, evea_set_switch_lin),
+ SOC_SINGLE_BOOL_EXT("Line Playback Switch", 0,
+ evea_get_switch_lo, evea_set_switch_lo),
+ SOC_SINGLE_BOOL_EXT("Headphone Playback Switch", 0,
+ evea_get_switch_hp, evea_set_switch_hp),
+};
+
+static int evea_codec_probe(struct snd_soc_codec *codec)
+{
+ struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+ evea->switch_lin = 1;
+ evea->switch_lo = 1;
+ evea->switch_hp = 1;
+
+ evea_set_power_state_on(evea);
+ evea_update_switch_all(evea);
+
+ return 0;
+}
+
+static int evea_codec_suspend(struct snd_soc_codec *codec)
+{
+ struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+
+ evea_set_power_state_off(evea);
+
+ reset_control_assert(evea->rst_adamv);
+ reset_control_assert(evea->rst_exiv);
+ reset_control_assert(evea->rst);
+
+ clk_disable_unprepare(evea->clk_exiv);
+ clk_disable_unprepare(evea->clk);
+
+ return 0;
+}
+
+static int evea_codec_resume(struct snd_soc_codec *codec)
+{
+ struct evea_priv *evea = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ ret = clk_prepare_enable(evea->clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(evea->clk_exiv);
+ if (ret)
+ goto err_out_clock;
+
+ ret = reset_control_deassert(evea->rst);
+ if (ret)
+ goto err_out_clock_exiv;
+
+ ret = reset_control_deassert(evea->rst_exiv);
+ if (ret)
+ goto err_out_reset;
+
+ ret = reset_control_deassert(evea->rst_adamv);
+ if (ret)
+ goto err_out_reset_exiv;
+
+ evea_set_power_state_on(evea);
+ evea_update_switch_all(evea);
+
+ return 0;
+
+err_out_reset_exiv:
+ reset_control_assert(evea->rst_exiv);
+
+err_out_reset:
+ reset_control_assert(evea->rst);
+
+err_out_clock_exiv:
+ clk_disable_unprepare(evea->clk_exiv);
+
+err_out_clock:
+ clk_disable_unprepare(evea->clk);
+
+ return ret;
+}
+
+static struct snd_soc_codec_driver soc_codec_evea = {
+ .probe = evea_codec_probe,
+ .suspend = evea_codec_suspend,
+ .resume = evea_codec_resume,
+
+ .component_driver = {
+ .dapm_widgets = evea_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(evea_widgets),
+ .dapm_routes = evea_routes,
+ .num_dapm_routes = ARRAY_SIZE(evea_routes),
+ .controls = eva_controls,
+ .num_controls = ARRAY_SIZE(eva_controls),
+ },
+};
+
+static struct snd_soc_dai_driver soc_dai_evea[] = {
+ {
+ .name = DRV_NAME "-line1",
+ .playback = {
+ .stream_name = "Line Out 1",
+ .formats = EVEA_FORMATS,
+ .rates = EVEA_RATES,
+ .channels_min = 2,
+ .channels_max = 2,
+ },
+ .capture = {
+ .stream_name = "Line In 1",
+ .formats = EVEA_FORMATS,
+ .rates = EVEA_RATES,
+ .channels_min = 2,
+ .channels_max = 2,
+ },
+ },
+ {
+ .name = DRV_NAME "-hp1",
+ .playback = {
+ .stream_name = "Headphone 1",
+ .formats = EVEA_FORMATS,
+ .rates = EVEA_RATES,
+ .channels_min = 2,
+ .channels_max = 2,
+ },
+ },
+ {
+ .name = DRV_NAME "-lo2",
+ .playback = {
+ .stream_name = "Line Out 2",
+ .formats = EVEA_FORMATS,
+ .rates = EVEA_RATES,
+ .channels_min = 2,
+ .channels_max = 2,
+ },
+ },
+};
+
+static const struct regmap_config evea_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xffc,
+ .cache_type = REGCACHE_NONE,
+};
+
+static int evea_probe(struct platform_device *pdev)
+{
+ struct evea_priv *evea;
+ struct resource *res;
+ void __iomem *preg;
+ int ret;
+
+ evea = devm_kzalloc(&pdev->dev, sizeof(struct evea_priv), GFP_KERNEL);
+ if (!evea)
+ return -ENOMEM;
+
+ evea->clk = devm_clk_get(&pdev->dev, "evea");
+ if (IS_ERR(evea->clk))
+ return PTR_ERR(evea->clk);
+
+ evea->clk_exiv = devm_clk_get(&pdev->dev, "exiv");
+ if (IS_ERR(evea->clk_exiv))
+ return PTR_ERR(evea->clk_exiv);
+
+ evea->rst = devm_reset_control_get_shared(&pdev->dev, "evea");
+ if (IS_ERR(evea->rst))
+ return PTR_ERR(evea->rst);
+
+ evea->rst_exiv = devm_reset_control_get_shared(&pdev->dev, "exiv");
+ if (IS_ERR(evea->rst_exiv))
+ return PTR_ERR(evea->rst_exiv);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ preg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(preg))
+ return PTR_ERR(preg);
+
+ evea->regmap = devm_regmap_init_mmio(&pdev->dev, preg,
+ &evea_regmap_config);
+ if (IS_ERR(evea->regmap))
+ return PTR_ERR(evea->regmap);
+
+ ret = clk_prepare_enable(evea->clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(evea->clk_exiv);
+ if (ret)
+ goto err_out_clock;
+
+ ret = reset_control_deassert(evea->rst);
+ if (ret)
+ goto err_out_clock_exiv;
+
+ ret = reset_control_deassert(evea->rst_exiv);
+ if (ret)
+ goto err_out_reset;
+
+ /* ADAMV will hangup if EXIV reset is asserted */
+ evea->rst_adamv = devm_reset_control_get_shared(&pdev->dev, "adamv");
+ if (IS_ERR(evea->rst_adamv)) {
+ ret = PTR_ERR(evea->rst_adamv);
+ goto err_out_reset_exiv;
+ }
+
+ ret = reset_control_deassert(evea->rst_adamv);
+ if (ret)
+ goto err_out_reset_exiv;
+
+ platform_set_drvdata(pdev, evea);
+
+ ret = snd_soc_register_codec(&pdev->dev, &soc_codec_evea,
+ soc_dai_evea, ARRAY_SIZE(soc_dai_evea));
+ if (ret)
+ goto err_out_reset_adamv;
+
+ return 0;
+
+err_out_reset_adamv:
+ reset_control_assert(evea->rst_adamv);
+
+err_out_reset_exiv:
+ reset_control_assert(evea->rst_exiv);
+
+err_out_reset:
+ reset_control_assert(evea->rst);
+
+err_out_clock_exiv:
+ clk_disable_unprepare(evea->clk_exiv);
+
+err_out_clock:
+ clk_disable_unprepare(evea->clk);
+
+ return ret;
+}
+
+static int evea_remove(struct platform_device *pdev)
+{
+ struct evea_priv *evea = platform_get_drvdata(pdev);
+
+ snd_soc_unregister_codec(&pdev->dev);
+
+ reset_control_assert(evea->rst_adamv);
+ reset_control_assert(evea->rst_exiv);
+ reset_control_assert(evea->rst);
+
+ clk_disable_unprepare(evea->clk_exiv);
+ clk_disable_unprepare(evea->clk);
+
+ return 0;
+}
+
+static const struct of_device_id evea_of_match[] = {
+ { .compatible = "socionext,uniphier-evea", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, evea_of_match);
+
+static struct platform_driver evea_codec_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(evea_of_match),
+ },
+ .probe = evea_probe,
+ .remove = evea_remove,
+};
+module_platform_driver(evea_codec_driver);
+
+MODULE_AUTHOR("Katsuhiro Suzuki <suzuki.katsuhiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier EVEA codec driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/ux500/mop500.c b/sound/soc/ux500/mop500.c
index 070a6880980e..c60a57797640 100644
--- a/sound/soc/ux500/mop500.c
+++ b/sound/soc/ux500/mop500.c
@@ -163,3 +163,7 @@ static struct platform_driver snd_soc_mop500_driver = {
};
module_platform_driver(snd_soc_mop500_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ASoC MOP500 board driver");
+MODULE_AUTHOR("Ola Lilja");
diff --git a/sound/soc/ux500/ux500_pcm.c b/sound/soc/ux500/ux500_pcm.c
index f12c01dddc8d..d35ba7700f46 100644
--- a/sound/soc/ux500/ux500_pcm.c
+++ b/sound/soc/ux500/ux500_pcm.c
@@ -165,3 +165,8 @@ int ux500_pcm_unregister_platform(struct platform_device *pdev)
return 0;
}
EXPORT_SYMBOL_GPL(ux500_pcm_unregister_platform);
+
+MODULE_AUTHOR("Ola Lilja");
+MODULE_AUTHOR("Roger Nilsson");
+MODULE_DESCRIPTION("ASoC UX500 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/sound_core.c b/sound/sound_core.c
index 99b73c675743..b4efb22db561 100644
--- a/sound/sound_core.c
+++ b/sound/sound_core.c
@@ -119,13 +119,6 @@ struct sound_unit
char name[32];
};
-#ifdef CONFIG_SOUND_MSNDCLAS
-extern int msnd_classic_init(void);
-#endif
-#ifdef CONFIG_SOUND_MSNDPIN
-extern int msnd_pinnacle_init(void);
-#endif
-
/*
* By default, OSS sound_core claims full legacy minor range (0-255)
* of SOUND_MAJOR to trap open attempts to any sound minor and
@@ -452,26 +445,6 @@ int register_sound_mixer(const struct file_operations *fops, int dev)
EXPORT_SYMBOL(register_sound_mixer);
-/**
- * register_sound_midi - register a midi device
- * @fops: File operations for the driver
- * @dev: Unit number to allocate
- *
- * Allocate a midi device. Unit is the number of the midi device requested.
- * Pass -1 to request the next free midi unit.
- *
- * Return: On success, the allocated number is returned. On failure,
- * a negative error code is returned.
- */
-
-int register_sound_midi(const struct file_operations *fops, int dev)
-{
- return sound_insert_unit(&chains[2], fops, dev, 2, 130,
- "midi", S_IRUSR | S_IWUSR, NULL);
-}
-
-EXPORT_SYMBOL(register_sound_midi);
-
/*
* DSP's are registered as a triple. Register only one and cheat
* in open - see below.
@@ -533,21 +506,6 @@ void unregister_sound_mixer(int unit)
EXPORT_SYMBOL(unregister_sound_mixer);
/**
- * unregister_sound_midi - unregister a midi device
- * @unit: unit number to allocate
- *
- * Release a sound device that was allocated with register_sound_midi().
- * The unit passed is the return value from the register function.
- */
-
-void unregister_sound_midi(int unit)
-{
- sound_remove_unit(&chains[2], unit);
-}
-
-EXPORT_SYMBOL(unregister_sound_midi);
-
-/**
* unregister_sound_dsp - unregister a DSP device
* @unit: unit number to allocate
*
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 23d1d23aefec..8018d56cfecc 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -585,15 +585,24 @@ static int usb_audio_probe(struct usb_interface *intf,
* now look for an empty slot and create a new card instance
*/
for (i = 0; i < SNDRV_CARDS; i++)
- if (enable[i] && ! usb_chip[i] &&
+ if (!usb_chip[i] &&
(vid[i] == -1 || vid[i] == USB_ID_VENDOR(id)) &&
(pid[i] == -1 || pid[i] == USB_ID_PRODUCT(id))) {
- err = snd_usb_audio_create(intf, dev, i, quirk,
- id, &chip);
- if (err < 0)
+ if (enable[i]) {
+ err = snd_usb_audio_create(intf, dev, i, quirk,
+ id, &chip);
+ if (err < 0)
+ goto __error;
+ chip->pm_intf = intf;
+ break;
+ } else if (vid[i] != -1 || pid[i] != -1) {
+ dev_info(&dev->dev,
+ "device (%04x:%04x) is disabled\n",
+ USB_ID_VENDOR(id),
+ USB_ID_PRODUCT(id));
+ err = -ENOENT;
goto __error;
- chip->pm_intf = intf;
- break;
+ }
}
if (!chip) {
dev_err(&dev->dev, "no available usb audio device\n");
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 2b4ceda36291..9afb8ab524c7 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -656,10 +656,14 @@ static int get_term_name(struct mixer_build *state, struct usb_audio_term *iterm
unsigned char *name, int maxlen, int term_only)
{
struct iterm_name_combo *names;
+ int len;
- if (iterm->name)
- return snd_usb_copy_string_desc(state, iterm->name,
+ if (iterm->name) {
+ len = snd_usb_copy_string_desc(state, iterm->name,
name, maxlen);
+ if (len)
+ return len;
+ }
/* virtual type - not a real terminal */
if (iterm->type >> 16) {
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index e1e7ce9ab217..05ccc7fdcc09 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -27,6 +27,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/hid.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/usb.h>
@@ -239,7 +240,7 @@ static long snd_usb_sbrc_hwdep_read(struct snd_hwdep *hw, char __user *buf,
return err < 0 ? err : count;
}
-static unsigned int snd_usb_sbrc_hwdep_poll(struct snd_hwdep *hw, struct file *file,
+static __poll_t snd_usb_sbrc_hwdep_poll(struct snd_hwdep *hw, struct file *file,
poll_table *wait)
{
struct usb_mixer_interface *mixer = hw->private_data;
@@ -1721,6 +1722,83 @@ static int snd_microii_controls_create(struct usb_mixer_interface *mixer)
return 0;
}
+/* Creative Sound Blaster E1 */
+
+static int snd_soundblaster_e1_switch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = kcontrol->private_value;
+ return 0;
+}
+
+static int snd_soundblaster_e1_switch_update(struct usb_mixer_interface *mixer,
+ unsigned char state)
+{
+ struct snd_usb_audio *chip = mixer->chip;
+ int err;
+ unsigned char buff[2];
+
+ buff[0] = 0x02;
+ buff[1] = state ? 0x02 : 0x00;
+
+ err = snd_usb_lock_shutdown(chip);
+ if (err < 0)
+ return err;
+ err = snd_usb_ctl_msg(chip->dev,
+ usb_sndctrlpipe(chip->dev, 0), HID_REQ_SET_REPORT,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ 0x0202, 3, buff, 2);
+ snd_usb_unlock_shutdown(chip);
+ return err;
+}
+
+static int snd_soundblaster_e1_switch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_list *list = snd_kcontrol_chip(kcontrol);
+ unsigned char value = !!ucontrol->value.integer.value[0];
+ int err;
+
+ if (kcontrol->private_value == value)
+ return 0;
+ kcontrol->private_value = value;
+ err = snd_soundblaster_e1_switch_update(list->mixer, value);
+ return err < 0 ? err : 1;
+}
+
+static int snd_soundblaster_e1_switch_resume(struct usb_mixer_elem_list *list)
+{
+ return snd_soundblaster_e1_switch_update(list->mixer,
+ list->kctl->private_value);
+}
+
+static int snd_soundblaster_e1_switch_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ static const char *const texts[2] = {
+ "Mic", "Aux"
+ };
+
+ return snd_ctl_enum_info(uinfo, 1, ARRAY_SIZE(texts), texts);
+}
+
+static struct snd_kcontrol_new snd_soundblaster_e1_input_switch = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Input Source",
+ .info = snd_soundblaster_e1_switch_info,
+ .get = snd_soundblaster_e1_switch_get,
+ .put = snd_soundblaster_e1_switch_put,
+ .private_value = 0,
+};
+
+static int snd_soundblaster_e1_switch_create(struct usb_mixer_interface *mixer)
+{
+ return add_single_ctl_with_resume(mixer, 0,
+ snd_soundblaster_e1_switch_resume,
+ &snd_soundblaster_e1_input_switch,
+ NULL);
+}
+
int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
{
int err = 0;
@@ -1802,6 +1880,10 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
case USB_ID(0x1235, 0x800c): /* Focusrite Scarlett 18i20 */
err = snd_scarlett_controls_create(mixer);
break;
+
+ case USB_ID(0x041e, 0x323b): /* Creative Sound Blaster E1 */
+ err = snd_soundblaster_e1_switch_create(mixer);
+ break;
}
return err;
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 8a59d4782a0f..50252046b01d 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -3277,4 +3277,52 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
}
},
+{
+ /*
+ * Nura's first gen headphones use Cambridge Silicon Radio's vendor
+ * ID, but it looks like the product ID actually is only for Nura.
+ * The capture interface does not work at all (even on Windows),
+ * and only the 48 kHz sample rate works for the playback interface.
+ */
+ USB_DEVICE(0x0a12, 0x1243),
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = (const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_STANDARD_MIXER,
+ },
+ /* Capture */
+ {
+ .ifnum = 1,
+ .type = QUIRK_IGNORE_INTERFACE,
+ },
+ /* Playback */
+ {
+ .ifnum = 2,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels = 2,
+ .iface = 2,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .attributes = UAC_EP_CS_ATTR_FILL_MAX |
+ UAC_EP_CS_ATTR_SAMPLE_RATE,
+ .endpoint = 0x03,
+ .ep_attr = USB_ENDPOINT_XFER_ISOC,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .nr_rates = 1,
+ .rate_table = (unsigned int[]) {
+ 48000
+ }
+ }
+ },
+ }
+ }
+},
+
#undef USB_DEVICE_VENDOR_SPEC
diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c
index 159da1f3924e..e2be10d17118 100644
--- a/sound/usb/usx2y/us122l.c
+++ b/sound/usb/usx2y/us122l.c
@@ -271,12 +271,12 @@ out:
return err;
}
-static unsigned int usb_stream_hwdep_poll(struct snd_hwdep *hw,
+static __poll_t usb_stream_hwdep_poll(struct snd_hwdep *hw,
struct file *file, poll_table *wait)
{
struct us122l *us122l = hw->private_data;
unsigned *polled;
- unsigned int mask;
+ __poll_t mask;
poll_wait(file, &us122l->sk.sleep, wait);
@@ -378,7 +378,7 @@ out:
static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
unsigned cmd, unsigned long arg)
{
- struct usb_stream_config *cfg;
+ struct usb_stream_config cfg;
struct us122l *us122l = hw->private_data;
struct usb_stream *s;
unsigned min_period_frames;
@@ -388,24 +388,21 @@ static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
if (cmd != SNDRV_USB_STREAM_IOCTL_SET_PARAMS)
return -ENOTTY;
- cfg = memdup_user((void *)arg, sizeof(*cfg));
- if (IS_ERR(cfg))
- return PTR_ERR(cfg);
+ if (copy_from_user(&cfg, (void __user *)arg, sizeof(cfg)))
+ return -EFAULT;
+
+ if (cfg.version != USB_STREAM_INTERFACE_VERSION)
+ return -ENXIO;
- if (cfg->version != USB_STREAM_INTERFACE_VERSION) {
- err = -ENXIO;
- goto free;
- }
high_speed = us122l->dev->speed == USB_SPEED_HIGH;
- if ((cfg->sample_rate != 44100 && cfg->sample_rate != 48000 &&
+ if ((cfg.sample_rate != 44100 && cfg.sample_rate != 48000 &&
(!high_speed ||
- (cfg->sample_rate != 88200 && cfg->sample_rate != 96000))) ||
- cfg->frame_size != 6 ||
- cfg->period_frames > 0x3000) {
- err = -EINVAL;
- goto free;
- }
- switch (cfg->sample_rate) {
+ (cfg.sample_rate != 88200 && cfg.sample_rate != 96000))) ||
+ cfg.frame_size != 6 ||
+ cfg.period_frames > 0x3000)
+ return -EINVAL;
+
+ switch (cfg.sample_rate) {
case 44100:
min_period_frames = 48;
break;
@@ -418,10 +415,8 @@ static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
}
if (!high_speed)
min_period_frames <<= 1;
- if (cfg->period_frames < min_period_frames) {
- err = -EINVAL;
- goto free;
- }
+ if (cfg.period_frames < min_period_frames)
+ return -EINVAL;
snd_power_wait(hw->card, SNDRV_CTL_POWER_D0);
@@ -430,24 +425,22 @@ static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
if (!us122l->master)
us122l->master = file;
else if (us122l->master != file) {
- if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg))) {
+ if (!s || memcmp(&cfg, &s->cfg, sizeof(cfg))) {
err = -EIO;
goto unlock;
}
us122l->slave = file;
}
- if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg)) ||
+ if (!s || memcmp(&cfg, &s->cfg, sizeof(cfg)) ||
s->state == usb_stream_xrun) {
us122l_stop(us122l);
- if (!us122l_start(us122l, cfg->sample_rate, cfg->period_frames))
+ if (!us122l_start(us122l, cfg.sample_rate, cfg.period_frames))
err = -EIO;
else
err = 1;
}
unlock:
mutex_unlock(&us122l->mutex);
-free:
- kfree(cfg);
wake_up_all(&us122l->sk.sleep);
return err;
}
diff --git a/sound/usb/usx2y/usX2Yhwdep.c b/sound/usb/usx2y/usX2Yhwdep.c
index f4b3cda412fc..07d15bae75bc 100644
--- a/sound/usb/usx2y/usX2Yhwdep.c
+++ b/sound/usb/usx2y/usX2Yhwdep.c
@@ -86,9 +86,9 @@ static int snd_us428ctls_mmap(struct snd_hwdep * hw, struct file *filp, struct v
return 0;
}
-static unsigned int snd_us428ctls_poll(struct snd_hwdep *hw, struct file *file, poll_table *wait)
+static __poll_t snd_us428ctls_poll(struct snd_hwdep *hw, struct file *file, poll_table *wait)
{
- unsigned int mask = 0;
+ __poll_t mask = 0;
struct usX2Ydev *us428 = hw->private_data;
struct us428ctls_sharedmem *shm = us428->us428ctls_sharedmem;
if (us428->chip_status & USX2Y_STAT_CHIP_HUP)
@@ -198,24 +198,22 @@ static int snd_usX2Y_hwdep_dsp_load(struct snd_hwdep *hw,
struct snd_hwdep_dsp_image *dsp)
{
struct usX2Ydev *priv = hw->private_data;
- int lret, err = -EINVAL;
- snd_printdd( "dsp_load %s\n", dsp->name);
+ struct usb_device* dev = priv->dev;
+ int lret, err;
+ char *buf;
- if (access_ok(VERIFY_READ, dsp->image, dsp->length)) {
- struct usb_device* dev = priv->dev;
- char *buf;
+ snd_printdd( "dsp_load %s\n", dsp->name);
- buf = memdup_user(dsp->image, dsp->length);
- if (IS_ERR(buf))
- return PTR_ERR(buf);
+ buf = memdup_user(dsp->image, dsp->length);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
- err = usb_set_interface(dev, 0, 1);
- if (err)
- snd_printk(KERN_ERR "usb_set_interface error \n");
- else
- err = usb_bulk_msg(dev, usb_sndbulkpipe(dev, 2), buf, dsp->length, &lret, 6000);
- kfree(buf);
- }
+ err = usb_set_interface(dev, 0, 1);
+ if (err)
+ snd_printk(KERN_ERR "usb_set_interface error \n");
+ else
+ err = usb_bulk_msg(dev, usb_sndbulkpipe(dev, 2), buf, dsp->length, &lret, 6000);
+ kfree(buf);
if (err)
return err;
if (dsp->index == 1) {
diff --git a/tools/arch/alpha/include/uapi/asm/errno.h b/tools/arch/alpha/include/uapi/asm/errno.h
new file mode 100644
index 000000000000..3d265f6babaf
--- /dev/null
+++ b/tools/arch/alpha/include/uapi/asm/errno.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ALPHA_ERRNO_H
+#define _ALPHA_ERRNO_H
+
+#include <asm-generic/errno-base.h>
+
+#undef EAGAIN /* 11 in errno-base.h */
+
+#define EDEADLK 11 /* Resource deadlock would occur */
+
+#define EAGAIN 35 /* Try again */
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define EINPROGRESS 36 /* Operation now in progress */
+#define EALREADY 37 /* Operation already in progress */
+#define ENOTSOCK 38 /* Socket operation on non-socket */
+#define EDESTADDRREQ 39 /* Destination address required */
+#define EMSGSIZE 40 /* Message too long */
+#define EPROTOTYPE 41 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 42 /* Protocol not available */
+#define EPROTONOSUPPORT 43 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 44 /* Socket type not supported */
+#define EOPNOTSUPP 45 /* Operation not supported on transport endpoint */
+#define EPFNOSUPPORT 46 /* Protocol family not supported */
+#define EAFNOSUPPORT 47 /* Address family not supported by protocol */
+#define EADDRINUSE 48 /* Address already in use */
+#define EADDRNOTAVAIL 49 /* Cannot assign requested address */
+#define ENETDOWN 50 /* Network is down */
+#define ENETUNREACH 51 /* Network is unreachable */
+#define ENETRESET 52 /* Network dropped connection because of reset */
+#define ECONNABORTED 53 /* Software caused connection abort */
+#define ECONNRESET 54 /* Connection reset by peer */
+#define ENOBUFS 55 /* No buffer space available */
+#define EISCONN 56 /* Transport endpoint is already connected */
+#define ENOTCONN 57 /* Transport endpoint is not connected */
+#define ESHUTDOWN 58 /* Cannot send after transport endpoint shutdown */
+#define ETOOMANYREFS 59 /* Too many references: cannot splice */
+#define ETIMEDOUT 60 /* Connection timed out */
+#define ECONNREFUSED 61 /* Connection refused */
+#define ELOOP 62 /* Too many symbolic links encountered */
+#define ENAMETOOLONG 63 /* File name too long */
+#define EHOSTDOWN 64 /* Host is down */
+#define EHOSTUNREACH 65 /* No route to host */
+#define ENOTEMPTY 66 /* Directory not empty */
+
+#define EUSERS 68 /* Too many users */
+#define EDQUOT 69 /* Quota exceeded */
+#define ESTALE 70 /* Stale file handle */
+#define EREMOTE 71 /* Object is remote */
+
+#define ENOLCK 77 /* No record locks available */
+#define ENOSYS 78 /* Function not implemented */
+
+#define ENOMSG 80 /* No message of desired type */
+#define EIDRM 81 /* Identifier removed */
+#define ENOSR 82 /* Out of streams resources */
+#define ETIME 83 /* Timer expired */
+#define EBADMSG 84 /* Not a data message */
+#define EPROTO 85 /* Protocol error */
+#define ENODATA 86 /* No data available */
+#define ENOSTR 87 /* Device not a stream */
+
+#define ENOPKG 92 /* Package not installed */
+
+#define EILSEQ 116 /* Illegal byte sequence */
+
+/* The following are just random noise.. */
+#define ECHRNG 88 /* Channel number out of range */
+#define EL2NSYNC 89 /* Level 2 not synchronized */
+#define EL3HLT 90 /* Level 3 halted */
+#define EL3RST 91 /* Level 3 reset */
+
+#define ELNRNG 93 /* Link number out of range */
+#define EUNATCH 94 /* Protocol driver not attached */
+#define ENOCSI 95 /* No CSI structure available */
+#define EL2HLT 96 /* Level 2 halted */
+#define EBADE 97 /* Invalid exchange */
+#define EBADR 98 /* Invalid request descriptor */
+#define EXFULL 99 /* Exchange full */
+#define ENOANO 100 /* No anode */
+#define EBADRQC 101 /* Invalid request code */
+#define EBADSLT 102 /* Invalid slot */
+
+#define EDEADLOCK EDEADLK
+
+#define EBFONT 104 /* Bad font file format */
+#define ENONET 105 /* Machine is not on the network */
+#define ENOLINK 106 /* Link has been severed */
+#define EADV 107 /* Advertise error */
+#define ESRMNT 108 /* Srmount error */
+#define ECOMM 109 /* Communication error on send */
+#define EMULTIHOP 110 /* Multihop attempted */
+#define EDOTDOT 111 /* RFS specific error */
+#define EOVERFLOW 112 /* Value too large for defined data type */
+#define ENOTUNIQ 113 /* Name not unique on network */
+#define EBADFD 114 /* File descriptor in bad state */
+#define EREMCHG 115 /* Remote address changed */
+
+#define EUCLEAN 117 /* Structure needs cleaning */
+#define ENOTNAM 118 /* Not a XENIX named type file */
+#define ENAVAIL 119 /* No XENIX semaphores available */
+#define EISNAM 120 /* Is a named type file */
+#define EREMOTEIO 121 /* Remote I/O error */
+
+#define ELIBACC 122 /* Can not access a needed shared library */
+#define ELIBBAD 123 /* Accessing a corrupted shared library */
+#define ELIBSCN 124 /* .lib section in a.out corrupted */
+#define ELIBMAX 125 /* Attempting to link in too many shared libraries */
+#define ELIBEXEC 126 /* Cannot exec a shared library directly */
+#define ERESTART 127 /* Interrupted system call should be restarted */
+#define ESTRPIPE 128 /* Streams pipe error */
+
+#define ENOMEDIUM 129 /* No medium found */
+#define EMEDIUMTYPE 130 /* Wrong medium type */
+#define ECANCELED 131 /* Operation Cancelled */
+#define ENOKEY 132 /* Required key not available */
+#define EKEYEXPIRED 133 /* Key has expired */
+#define EKEYREVOKED 134 /* Key has been revoked */
+#define EKEYREJECTED 135 /* Key was rejected by service */
+
+/* for robust mutexes */
+#define EOWNERDEAD 136 /* Owner died */
+#define ENOTRECOVERABLE 137 /* State not recoverable */
+
+#define ERFKILL 138 /* Operation not possible due to RF-kill */
+
+#define EHWPOISON 139 /* Memory page has hardware error */
+
+#endif
diff --git a/tools/arch/mips/include/asm/errno.h b/tools/arch/mips/include/asm/errno.h
new file mode 100644
index 000000000000..21d91cdfe3c9
--- /dev/null
+++ b/tools/arch/mips/include/asm/errno.h
@@ -0,0 +1,17 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 1999, 2001, 2002 by Ralf Baechle
+ */
+#ifndef _ASM_ERRNO_H
+#define _ASM_ERRNO_H
+
+#include <uapi/asm/errno.h>
+
+
+/* The biggest error number defined here or in <linux/errno.h>. */
+#define EMAXERRNO 1133
+
+#endif /* _ASM_ERRNO_H */
diff --git a/tools/arch/mips/include/uapi/asm/errno.h b/tools/arch/mips/include/uapi/asm/errno.h
new file mode 100644
index 000000000000..2fb714e2d6d8
--- /dev/null
+++ b/tools/arch/mips/include/uapi/asm/errno.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 1999, 2001, 2002 by Ralf Baechle
+ */
+#ifndef _UAPI_ASM_ERRNO_H
+#define _UAPI_ASM_ERRNO_H
+
+/*
+ * These error numbers are intended to be MIPS ABI compatible
+ */
+
+#include <asm-generic/errno-base.h>
+
+#define ENOMSG 35 /* No message of desired type */
+#define EIDRM 36 /* Identifier removed */
+#define ECHRNG 37 /* Channel number out of range */
+#define EL2NSYNC 38 /* Level 2 not synchronized */
+#define EL3HLT 39 /* Level 3 halted */
+#define EL3RST 40 /* Level 3 reset */
+#define ELNRNG 41 /* Link number out of range */
+#define EUNATCH 42 /* Protocol driver not attached */
+#define ENOCSI 43 /* No CSI structure available */
+#define EL2HLT 44 /* Level 2 halted */
+#define EDEADLK 45 /* Resource deadlock would occur */
+#define ENOLCK 46 /* No record locks available */
+#define EBADE 50 /* Invalid exchange */
+#define EBADR 51 /* Invalid request descriptor */
+#define EXFULL 52 /* Exchange full */
+#define ENOANO 53 /* No anode */
+#define EBADRQC 54 /* Invalid request code */
+#define EBADSLT 55 /* Invalid slot */
+#define EDEADLOCK 56 /* File locking deadlock error */
+#define EBFONT 59 /* Bad font file format */
+#define ENOSTR 60 /* Device not a stream */
+#define ENODATA 61 /* No data available */
+#define ETIME 62 /* Timer expired */
+#define ENOSR 63 /* Out of streams resources */
+#define ENONET 64 /* Machine is not on the network */
+#define ENOPKG 65 /* Package not installed */
+#define EREMOTE 66 /* Object is remote */
+#define ENOLINK 67 /* Link has been severed */
+#define EADV 68 /* Advertise error */
+#define ESRMNT 69 /* Srmount error */
+#define ECOMM 70 /* Communication error on send */
+#define EPROTO 71 /* Protocol error */
+#define EDOTDOT 73 /* RFS specific error */
+#define EMULTIHOP 74 /* Multihop attempted */
+#define EBADMSG 77 /* Not a data message */
+#define ENAMETOOLONG 78 /* File name too long */
+#define EOVERFLOW 79 /* Value too large for defined data type */
+#define ENOTUNIQ 80 /* Name not unique on network */
+#define EBADFD 81 /* File descriptor in bad state */
+#define EREMCHG 82 /* Remote address changed */
+#define ELIBACC 83 /* Can not access a needed shared library */
+#define ELIBBAD 84 /* Accessing a corrupted shared library */
+#define ELIBSCN 85 /* .lib section in a.out corrupted */
+#define ELIBMAX 86 /* Attempting to link in too many shared libraries */
+#define ELIBEXEC 87 /* Cannot exec a shared library directly */
+#define EILSEQ 88 /* Illegal byte sequence */
+#define ENOSYS 89 /* Function not implemented */
+#define ELOOP 90 /* Too many symbolic links encountered */
+#define ERESTART 91 /* Interrupted system call should be restarted */
+#define ESTRPIPE 92 /* Streams pipe error */
+#define ENOTEMPTY 93 /* Directory not empty */
+#define EUSERS 94 /* Too many users */
+#define ENOTSOCK 95 /* Socket operation on non-socket */
+#define EDESTADDRREQ 96 /* Destination address required */
+#define EMSGSIZE 97 /* Message too long */
+#define EPROTOTYPE 98 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 99 /* Protocol not available */
+#define EPROTONOSUPPORT 120 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 121 /* Socket type not supported */
+#define EOPNOTSUPP 122 /* Operation not supported on transport endpoint */
+#define EPFNOSUPPORT 123 /* Protocol family not supported */
+#define EAFNOSUPPORT 124 /* Address family not supported by protocol */
+#define EADDRINUSE 125 /* Address already in use */
+#define EADDRNOTAVAIL 126 /* Cannot assign requested address */
+#define ENETDOWN 127 /* Network is down */
+#define ENETUNREACH 128 /* Network is unreachable */
+#define ENETRESET 129 /* Network dropped connection because of reset */
+#define ECONNABORTED 130 /* Software caused connection abort */
+#define ECONNRESET 131 /* Connection reset by peer */
+#define ENOBUFS 132 /* No buffer space available */
+#define EISCONN 133 /* Transport endpoint is already connected */
+#define ENOTCONN 134 /* Transport endpoint is not connected */
+#define EUCLEAN 135 /* Structure needs cleaning */
+#define ENOTNAM 137 /* Not a XENIX named type file */
+#define ENAVAIL 138 /* No XENIX semaphores available */
+#define EISNAM 139 /* Is a named type file */
+#define EREMOTEIO 140 /* Remote I/O error */
+#define EINIT 141 /* Reserved */
+#define EREMDEV 142 /* Error 142 */
+#define ESHUTDOWN 143 /* Cannot send after transport endpoint shutdown */
+#define ETOOMANYREFS 144 /* Too many references: cannot splice */
+#define ETIMEDOUT 145 /* Connection timed out */
+#define ECONNREFUSED 146 /* Connection refused */
+#define EHOSTDOWN 147 /* Host is down */
+#define EHOSTUNREACH 148 /* No route to host */
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define EALREADY 149 /* Operation already in progress */
+#define EINPROGRESS 150 /* Operation now in progress */
+#define ESTALE 151 /* Stale file handle */
+#define ECANCELED 158 /* AIO operation canceled */
+
+/*
+ * These error are Linux extensions.
+ */
+#define ENOMEDIUM 159 /* No medium found */
+#define EMEDIUMTYPE 160 /* Wrong medium type */
+#define ENOKEY 161 /* Required key not available */
+#define EKEYEXPIRED 162 /* Key has expired */
+#define EKEYREVOKED 163 /* Key has been revoked */
+#define EKEYREJECTED 164 /* Key was rejected by service */
+
+/* for robust mutexes */
+#define EOWNERDEAD 165 /* Owner died */
+#define ENOTRECOVERABLE 166 /* State not recoverable */
+
+#define ERFKILL 167 /* Operation not possible due to RF-kill */
+
+#define EHWPOISON 168 /* Memory page has hardware error */
+
+#define EDQUOT 1133 /* Quota exceeded */
+
+
+#endif /* _UAPI_ASM_ERRNO_H */
diff --git a/tools/arch/parisc/include/uapi/asm/errno.h b/tools/arch/parisc/include/uapi/asm/errno.h
new file mode 100644
index 000000000000..fc0df353ff0d
--- /dev/null
+++ b/tools/arch/parisc/include/uapi/asm/errno.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _PARISC_ERRNO_H
+#define _PARISC_ERRNO_H
+
+#include <asm-generic/errno-base.h>
+
+#define ENOMSG 35 /* No message of desired type */
+#define EIDRM 36 /* Identifier removed */
+#define ECHRNG 37 /* Channel number out of range */
+#define EL2NSYNC 38 /* Level 2 not synchronized */
+#define EL3HLT 39 /* Level 3 halted */
+#define EL3RST 40 /* Level 3 reset */
+#define ELNRNG 41 /* Link number out of range */
+#define EUNATCH 42 /* Protocol driver not attached */
+#define ENOCSI 43 /* No CSI structure available */
+#define EL2HLT 44 /* Level 2 halted */
+#define EDEADLK 45 /* Resource deadlock would occur */
+#define EDEADLOCK EDEADLK
+#define ENOLCK 46 /* No record locks available */
+#define EILSEQ 47 /* Illegal byte sequence */
+
+#define ENONET 50 /* Machine is not on the network */
+#define ENODATA 51 /* No data available */
+#define ETIME 52 /* Timer expired */
+#define ENOSR 53 /* Out of streams resources */
+#define ENOSTR 54 /* Device not a stream */
+#define ENOPKG 55 /* Package not installed */
+
+#define ENOLINK 57 /* Link has been severed */
+#define EADV 58 /* Advertise error */
+#define ESRMNT 59 /* Srmount error */
+#define ECOMM 60 /* Communication error on send */
+#define EPROTO 61 /* Protocol error */
+
+#define EMULTIHOP 64 /* Multihop attempted */
+
+#define EDOTDOT 66 /* RFS specific error */
+#define EBADMSG 67 /* Not a data message */
+#define EUSERS 68 /* Too many users */
+#define EDQUOT 69 /* Quota exceeded */
+#define ESTALE 70 /* Stale file handle */
+#define EREMOTE 71 /* Object is remote */
+#define EOVERFLOW 72 /* Value too large for defined data type */
+
+/* these errnos are defined by Linux but not HPUX. */
+
+#define EBADE 160 /* Invalid exchange */
+#define EBADR 161 /* Invalid request descriptor */
+#define EXFULL 162 /* Exchange full */
+#define ENOANO 163 /* No anode */
+#define EBADRQC 164 /* Invalid request code */
+#define EBADSLT 165 /* Invalid slot */
+#define EBFONT 166 /* Bad font file format */
+#define ENOTUNIQ 167 /* Name not unique on network */
+#define EBADFD 168 /* File descriptor in bad state */
+#define EREMCHG 169 /* Remote address changed */
+#define ELIBACC 170 /* Can not access a needed shared library */
+#define ELIBBAD 171 /* Accessing a corrupted shared library */
+#define ELIBSCN 172 /* .lib section in a.out corrupted */
+#define ELIBMAX 173 /* Attempting to link in too many shared libraries */
+#define ELIBEXEC 174 /* Cannot exec a shared library directly */
+#define ERESTART 175 /* Interrupted system call should be restarted */
+#define ESTRPIPE 176 /* Streams pipe error */
+#define EUCLEAN 177 /* Structure needs cleaning */
+#define ENOTNAM 178 /* Not a XENIX named type file */
+#define ENAVAIL 179 /* No XENIX semaphores available */
+#define EISNAM 180 /* Is a named type file */
+#define EREMOTEIO 181 /* Remote I/O error */
+#define ENOMEDIUM 182 /* No medium found */
+#define EMEDIUMTYPE 183 /* Wrong medium type */
+#define ENOKEY 184 /* Required key not available */
+#define EKEYEXPIRED 185 /* Key has expired */
+#define EKEYREVOKED 186 /* Key has been revoked */
+#define EKEYREJECTED 187 /* Key was rejected by service */
+
+/* We now return you to your regularly scheduled HPUX. */
+
+#define ENOSYM 215 /* symbol does not exist in executable */
+#define ENOTSOCK 216 /* Socket operation on non-socket */
+#define EDESTADDRREQ 217 /* Destination address required */
+#define EMSGSIZE 218 /* Message too long */
+#define EPROTOTYPE 219 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 220 /* Protocol not available */
+#define EPROTONOSUPPORT 221 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 222 /* Socket type not supported */
+#define EOPNOTSUPP 223 /* Operation not supported on transport endpoint */
+#define EPFNOSUPPORT 224 /* Protocol family not supported */
+#define EAFNOSUPPORT 225 /* Address family not supported by protocol */
+#define EADDRINUSE 226 /* Address already in use */
+#define EADDRNOTAVAIL 227 /* Cannot assign requested address */
+#define ENETDOWN 228 /* Network is down */
+#define ENETUNREACH 229 /* Network is unreachable */
+#define ENETRESET 230 /* Network dropped connection because of reset */
+#define ECONNABORTED 231 /* Software caused connection abort */
+#define ECONNRESET 232 /* Connection reset by peer */
+#define ENOBUFS 233 /* No buffer space available */
+#define EISCONN 234 /* Transport endpoint is already connected */
+#define ENOTCONN 235 /* Transport endpoint is not connected */
+#define ESHUTDOWN 236 /* Cannot send after transport endpoint shutdown */
+#define ETOOMANYREFS 237 /* Too many references: cannot splice */
+#define ETIMEDOUT 238 /* Connection timed out */
+#define ECONNREFUSED 239 /* Connection refused */
+#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
+#define EREMOTERELEASE 240 /* Remote peer released connection */
+#define EHOSTDOWN 241 /* Host is down */
+#define EHOSTUNREACH 242 /* No route to host */
+
+#define EALREADY 244 /* Operation already in progress */
+#define EINPROGRESS 245 /* Operation now in progress */
+#define EWOULDBLOCK EAGAIN /* Operation would block (Not HPUX compliant) */
+#define ENOTEMPTY 247 /* Directory not empty */
+#define ENAMETOOLONG 248 /* File name too long */
+#define ELOOP 249 /* Too many symbolic links encountered */
+#define ENOSYS 251 /* Function not implemented */
+
+#define ENOTSUP 252 /* Function not implemented (POSIX.4 / HPUX) */
+#define ECANCELLED 253 /* aio request was canceled before complete (POSIX.4 / HPUX) */
+#define ECANCELED ECANCELLED /* SuSv3 and Solaris wants one 'L' */
+
+/* for robust mutexes */
+#define EOWNERDEAD 254 /* Owner died */
+#define ENOTRECOVERABLE 255 /* State not recoverable */
+
+#define ERFKILL 256 /* Operation not possible due to RF-kill */
+
+#define EHWPOISON 257 /* Memory page has hardware error */
+
+#endif
diff --git a/tools/arch/powerpc/include/uapi/asm/errno.h b/tools/arch/powerpc/include/uapi/asm/errno.h
new file mode 100644
index 000000000000..cc79856896a1
--- /dev/null
+++ b/tools/arch/powerpc/include/uapi/asm/errno.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_POWERPC_ERRNO_H
+#define _ASM_POWERPC_ERRNO_H
+
+#include <asm-generic/errno.h>
+
+#undef EDEADLOCK
+#define EDEADLOCK 58 /* File locking deadlock error */
+
+#endif /* _ASM_POWERPC_ERRNO_H */
diff --git a/tools/arch/s390/include/uapi/asm/unistd.h b/tools/arch/s390/include/uapi/asm/unistd.h
new file mode 100644
index 000000000000..725120939051
--- /dev/null
+++ b/tools/arch/s390/include/uapi/asm/unistd.h
@@ -0,0 +1,412 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * S390 version
+ *
+ * Derived from "include/asm-i386/unistd.h"
+ */
+
+#ifndef _UAPI_ASM_S390_UNISTD_H_
+#define _UAPI_ASM_S390_UNISTD_H_
+
+/*
+ * This file contains the system call numbers.
+ */
+
+#define __NR_exit 1
+#define __NR_fork 2
+#define __NR_read 3
+#define __NR_write 4
+#define __NR_open 5
+#define __NR_close 6
+#define __NR_restart_syscall 7
+#define __NR_creat 8
+#define __NR_link 9
+#define __NR_unlink 10
+#define __NR_execve 11
+#define __NR_chdir 12
+#define __NR_mknod 14
+#define __NR_chmod 15
+#define __NR_lseek 19
+#define __NR_getpid 20
+#define __NR_mount 21
+#define __NR_umount 22
+#define __NR_ptrace 26
+#define __NR_alarm 27
+#define __NR_pause 29
+#define __NR_utime 30
+#define __NR_access 33
+#define __NR_nice 34
+#define __NR_sync 36
+#define __NR_kill 37
+#define __NR_rename 38
+#define __NR_mkdir 39
+#define __NR_rmdir 40
+#define __NR_dup 41
+#define __NR_pipe 42
+#define __NR_times 43
+#define __NR_brk 45
+#define __NR_signal 48
+#define __NR_acct 51
+#define __NR_umount2 52
+#define __NR_ioctl 54
+#define __NR_fcntl 55
+#define __NR_setpgid 57
+#define __NR_umask 60
+#define __NR_chroot 61
+#define __NR_ustat 62
+#define __NR_dup2 63
+#define __NR_getppid 64
+#define __NR_getpgrp 65
+#define __NR_setsid 66
+#define __NR_sigaction 67
+#define __NR_sigsuspend 72
+#define __NR_sigpending 73
+#define __NR_sethostname 74
+#define __NR_setrlimit 75
+#define __NR_getrusage 77
+#define __NR_gettimeofday 78
+#define __NR_settimeofday 79
+#define __NR_symlink 83
+#define __NR_readlink 85
+#define __NR_uselib 86
+#define __NR_swapon 87
+#define __NR_reboot 88
+#define __NR_readdir 89
+#define __NR_mmap 90
+#define __NR_munmap 91
+#define __NR_truncate 92
+#define __NR_ftruncate 93
+#define __NR_fchmod 94
+#define __NR_getpriority 96
+#define __NR_setpriority 97
+#define __NR_statfs 99
+#define __NR_fstatfs 100
+#define __NR_socketcall 102
+#define __NR_syslog 103
+#define __NR_setitimer 104
+#define __NR_getitimer 105
+#define __NR_stat 106
+#define __NR_lstat 107
+#define __NR_fstat 108
+#define __NR_lookup_dcookie 110
+#define __NR_vhangup 111
+#define __NR_idle 112
+#define __NR_wait4 114
+#define __NR_swapoff 115
+#define __NR_sysinfo 116
+#define __NR_ipc 117
+#define __NR_fsync 118
+#define __NR_sigreturn 119
+#define __NR_clone 120
+#define __NR_setdomainname 121
+#define __NR_uname 122
+#define __NR_adjtimex 124
+#define __NR_mprotect 125
+#define __NR_sigprocmask 126
+#define __NR_create_module 127
+#define __NR_init_module 128
+#define __NR_delete_module 129
+#define __NR_get_kernel_syms 130
+#define __NR_quotactl 131
+#define __NR_getpgid 132
+#define __NR_fchdir 133
+#define __NR_bdflush 134
+#define __NR_sysfs 135
+#define __NR_personality 136
+#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
+#define __NR_getdents 141
+#define __NR_flock 143
+#define __NR_msync 144
+#define __NR_readv 145
+#define __NR_writev 146
+#define __NR_getsid 147
+#define __NR_fdatasync 148
+#define __NR__sysctl 149
+#define __NR_mlock 150
+#define __NR_munlock 151
+#define __NR_mlockall 152
+#define __NR_munlockall 153
+#define __NR_sched_setparam 154
+#define __NR_sched_getparam 155
+#define __NR_sched_setscheduler 156
+#define __NR_sched_getscheduler 157
+#define __NR_sched_yield 158
+#define __NR_sched_get_priority_max 159
+#define __NR_sched_get_priority_min 160
+#define __NR_sched_rr_get_interval 161
+#define __NR_nanosleep 162
+#define __NR_mremap 163
+#define __NR_query_module 167
+#define __NR_poll 168
+#define __NR_nfsservctl 169
+#define __NR_prctl 172
+#define __NR_rt_sigreturn 173
+#define __NR_rt_sigaction 174
+#define __NR_rt_sigprocmask 175
+#define __NR_rt_sigpending 176
+#define __NR_rt_sigtimedwait 177
+#define __NR_rt_sigqueueinfo 178
+#define __NR_rt_sigsuspend 179
+#define __NR_pread64 180
+#define __NR_pwrite64 181
+#define __NR_getcwd 183
+#define __NR_capget 184
+#define __NR_capset 185
+#define __NR_sigaltstack 186
+#define __NR_sendfile 187
+#define __NR_getpmsg 188
+#define __NR_putpmsg 189
+#define __NR_vfork 190
+#define __NR_pivot_root 217
+#define __NR_mincore 218
+#define __NR_madvise 219
+#define __NR_getdents64 220
+#define __NR_readahead 222
+#define __NR_setxattr 224
+#define __NR_lsetxattr 225
+#define __NR_fsetxattr 226
+#define __NR_getxattr 227
+#define __NR_lgetxattr 228
+#define __NR_fgetxattr 229
+#define __NR_listxattr 230
+#define __NR_llistxattr 231
+#define __NR_flistxattr 232
+#define __NR_removexattr 233
+#define __NR_lremovexattr 234
+#define __NR_fremovexattr 235
+#define __NR_gettid 236
+#define __NR_tkill 237
+#define __NR_futex 238
+#define __NR_sched_setaffinity 239
+#define __NR_sched_getaffinity 240
+#define __NR_tgkill 241
+/* Number 242 is reserved for tux */
+#define __NR_io_setup 243
+#define __NR_io_destroy 244
+#define __NR_io_getevents 245
+#define __NR_io_submit 246
+#define __NR_io_cancel 247
+#define __NR_exit_group 248
+#define __NR_epoll_create 249
+#define __NR_epoll_ctl 250
+#define __NR_epoll_wait 251
+#define __NR_set_tid_address 252
+#define __NR_fadvise64 253
+#define __NR_timer_create 254
+#define __NR_timer_settime 255
+#define __NR_timer_gettime 256
+#define __NR_timer_getoverrun 257
+#define __NR_timer_delete 258
+#define __NR_clock_settime 259
+#define __NR_clock_gettime 260
+#define __NR_clock_getres 261
+#define __NR_clock_nanosleep 262
+/* Number 263 is reserved for vserver */
+#define __NR_statfs64 265
+#define __NR_fstatfs64 266
+#define __NR_remap_file_pages 267
+#define __NR_mbind 268
+#define __NR_get_mempolicy 269
+#define __NR_set_mempolicy 270
+#define __NR_mq_open 271
+#define __NR_mq_unlink 272
+#define __NR_mq_timedsend 273
+#define __NR_mq_timedreceive 274
+#define __NR_mq_notify 275
+#define __NR_mq_getsetattr 276
+#define __NR_kexec_load 277
+#define __NR_add_key 278
+#define __NR_request_key 279
+#define __NR_keyctl 280
+#define __NR_waitid 281
+#define __NR_ioprio_set 282
+#define __NR_ioprio_get 283
+#define __NR_inotify_init 284
+#define __NR_inotify_add_watch 285
+#define __NR_inotify_rm_watch 286
+#define __NR_migrate_pages 287
+#define __NR_openat 288
+#define __NR_mkdirat 289
+#define __NR_mknodat 290
+#define __NR_fchownat 291
+#define __NR_futimesat 292
+#define __NR_unlinkat 294
+#define __NR_renameat 295
+#define __NR_linkat 296
+#define __NR_symlinkat 297
+#define __NR_readlinkat 298
+#define __NR_fchmodat 299
+#define __NR_faccessat 300
+#define __NR_pselect6 301
+#define __NR_ppoll 302
+#define __NR_unshare 303
+#define __NR_set_robust_list 304
+#define __NR_get_robust_list 305
+#define __NR_splice 306
+#define __NR_sync_file_range 307
+#define __NR_tee 308
+#define __NR_vmsplice 309
+#define __NR_move_pages 310
+#define __NR_getcpu 311
+#define __NR_epoll_pwait 312
+#define __NR_utimes 313
+#define __NR_fallocate 314
+#define __NR_utimensat 315
+#define __NR_signalfd 316
+#define __NR_timerfd 317
+#define __NR_eventfd 318
+#define __NR_timerfd_create 319
+#define __NR_timerfd_settime 320
+#define __NR_timerfd_gettime 321
+#define __NR_signalfd4 322
+#define __NR_eventfd2 323
+#define __NR_inotify_init1 324
+#define __NR_pipe2 325
+#define __NR_dup3 326
+#define __NR_epoll_create1 327
+#define __NR_preadv 328
+#define __NR_pwritev 329
+#define __NR_rt_tgsigqueueinfo 330
+#define __NR_perf_event_open 331
+#define __NR_fanotify_init 332
+#define __NR_fanotify_mark 333
+#define __NR_prlimit64 334
+#define __NR_name_to_handle_at 335
+#define __NR_open_by_handle_at 336
+#define __NR_clock_adjtime 337
+#define __NR_syncfs 338
+#define __NR_setns 339
+#define __NR_process_vm_readv 340
+#define __NR_process_vm_writev 341
+#define __NR_s390_runtime_instr 342
+#define __NR_kcmp 343
+#define __NR_finit_module 344
+#define __NR_sched_setattr 345
+#define __NR_sched_getattr 346
+#define __NR_renameat2 347
+#define __NR_seccomp 348
+#define __NR_getrandom 349
+#define __NR_memfd_create 350
+#define __NR_bpf 351
+#define __NR_s390_pci_mmio_write 352
+#define __NR_s390_pci_mmio_read 353
+#define __NR_execveat 354
+#define __NR_userfaultfd 355
+#define __NR_membarrier 356
+#define __NR_recvmmsg 357
+#define __NR_sendmmsg 358
+#define __NR_socket 359
+#define __NR_socketpair 360
+#define __NR_bind 361
+#define __NR_connect 362
+#define __NR_listen 363
+#define __NR_accept4 364
+#define __NR_getsockopt 365
+#define __NR_setsockopt 366
+#define __NR_getsockname 367
+#define __NR_getpeername 368
+#define __NR_sendto 369
+#define __NR_sendmsg 370
+#define __NR_recvfrom 371
+#define __NR_recvmsg 372
+#define __NR_shutdown 373
+#define __NR_mlock2 374
+#define __NR_copy_file_range 375
+#define __NR_preadv2 376
+#define __NR_pwritev2 377
+#define __NR_s390_guarded_storage 378
+#define __NR_statx 379
+#define __NR_s390_sthyi 380
+#define NR_syscalls 381
+
+/*
+ * There are some system calls that are not present on 64 bit, some
+ * have a different name although they do the same (e.g. __NR_chown32
+ * is __NR_chown on 64 bit).
+ */
+#ifndef __s390x__
+
+#define __NR_time 13
+#define __NR_lchown 16
+#define __NR_setuid 23
+#define __NR_getuid 24
+#define __NR_stime 25
+#define __NR_setgid 46
+#define __NR_getgid 47
+#define __NR_geteuid 49
+#define __NR_getegid 50
+#define __NR_setreuid 70
+#define __NR_setregid 71
+#define __NR_getrlimit 76
+#define __NR_getgroups 80
+#define __NR_setgroups 81
+#define __NR_fchown 95
+#define __NR_ioperm 101
+#define __NR_setfsuid 138
+#define __NR_setfsgid 139
+#define __NR__llseek 140
+#define __NR__newselect 142
+#define __NR_setresuid 164
+#define __NR_getresuid 165
+#define __NR_setresgid 170
+#define __NR_getresgid 171
+#define __NR_chown 182
+#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
+#define __NR_mmap2 192
+#define __NR_truncate64 193
+#define __NR_ftruncate64 194
+#define __NR_stat64 195
+#define __NR_lstat64 196
+#define __NR_fstat64 197
+#define __NR_lchown32 198
+#define __NR_getuid32 199
+#define __NR_getgid32 200
+#define __NR_geteuid32 201
+#define __NR_getegid32 202
+#define __NR_setreuid32 203
+#define __NR_setregid32 204
+#define __NR_getgroups32 205
+#define __NR_setgroups32 206
+#define __NR_fchown32 207
+#define __NR_setresuid32 208
+#define __NR_getresuid32 209
+#define __NR_setresgid32 210
+#define __NR_getresgid32 211
+#define __NR_chown32 212
+#define __NR_setuid32 213
+#define __NR_setgid32 214
+#define __NR_setfsuid32 215
+#define __NR_setfsgid32 216
+#define __NR_fcntl64 221
+#define __NR_sendfile64 223
+#define __NR_fadvise64_64 264
+#define __NR_fstatat64 293
+
+#else
+
+#define __NR_select 142
+#define __NR_getrlimit 191 /* SuS compliant getrlimit */
+#define __NR_lchown 198
+#define __NR_getuid 199
+#define __NR_getgid 200
+#define __NR_geteuid 201
+#define __NR_getegid 202
+#define __NR_setreuid 203
+#define __NR_setregid 204
+#define __NR_getgroups 205
+#define __NR_setgroups 206
+#define __NR_fchown 207
+#define __NR_setresuid 208
+#define __NR_getresuid 209
+#define __NR_setresgid 210
+#define __NR_getresgid 211
+#define __NR_chown 212
+#define __NR_setuid 213
+#define __NR_setgid 214
+#define __NR_setfsuid 215
+#define __NR_setfsgid 216
+#define __NR_newfstatat 293
+
+#endif
+
+#endif /* _UAPI_ASM_S390_UNISTD_H_ */
diff --git a/tools/arch/sparc/include/uapi/asm/errno.h b/tools/arch/sparc/include/uapi/asm/errno.h
new file mode 100644
index 000000000000..81a732b902ee
--- /dev/null
+++ b/tools/arch/sparc/include/uapi/asm/errno.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _SPARC_ERRNO_H
+#define _SPARC_ERRNO_H
+
+/* These match the SunOS error numbering scheme. */
+
+#include <asm-generic/errno-base.h>
+
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define EINPROGRESS 36 /* Operation now in progress */
+#define EALREADY 37 /* Operation already in progress */
+#define ENOTSOCK 38 /* Socket operation on non-socket */
+#define EDESTADDRREQ 39 /* Destination address required */
+#define EMSGSIZE 40 /* Message too long */
+#define EPROTOTYPE 41 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 42 /* Protocol not available */
+#define EPROTONOSUPPORT 43 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 44 /* Socket type not supported */
+#define EOPNOTSUPP 45 /* Op not supported on transport endpoint */
+#define EPFNOSUPPORT 46 /* Protocol family not supported */
+#define EAFNOSUPPORT 47 /* Address family not supported by protocol */
+#define EADDRINUSE 48 /* Address already in use */
+#define EADDRNOTAVAIL 49 /* Cannot assign requested address */
+#define ENETDOWN 50 /* Network is down */
+#define ENETUNREACH 51 /* Network is unreachable */
+#define ENETRESET 52 /* Net dropped connection because of reset */
+#define ECONNABORTED 53 /* Software caused connection abort */
+#define ECONNRESET 54 /* Connection reset by peer */
+#define ENOBUFS 55 /* No buffer space available */
+#define EISCONN 56 /* Transport endpoint is already connected */
+#define ENOTCONN 57 /* Transport endpoint is not connected */
+#define ESHUTDOWN 58 /* No send after transport endpoint shutdown */
+#define ETOOMANYREFS 59 /* Too many references: cannot splice */
+#define ETIMEDOUT 60 /* Connection timed out */
+#define ECONNREFUSED 61 /* Connection refused */
+#define ELOOP 62 /* Too many symbolic links encountered */
+#define ENAMETOOLONG 63 /* File name too long */
+#define EHOSTDOWN 64 /* Host is down */
+#define EHOSTUNREACH 65 /* No route to host */
+#define ENOTEMPTY 66 /* Directory not empty */
+#define EPROCLIM 67 /* SUNOS: Too many processes */
+#define EUSERS 68 /* Too many users */
+#define EDQUOT 69 /* Quota exceeded */
+#define ESTALE 70 /* Stale file handle */
+#define EREMOTE 71 /* Object is remote */
+#define ENOSTR 72 /* Device not a stream */
+#define ETIME 73 /* Timer expired */
+#define ENOSR 74 /* Out of streams resources */
+#define ENOMSG 75 /* No message of desired type */
+#define EBADMSG 76 /* Not a data message */
+#define EIDRM 77 /* Identifier removed */
+#define EDEADLK 78 /* Resource deadlock would occur */
+#define ENOLCK 79 /* No record locks available */
+#define ENONET 80 /* Machine is not on the network */
+#define ERREMOTE 81 /* SunOS: Too many lvls of remote in path */
+#define ENOLINK 82 /* Link has been severed */
+#define EADV 83 /* Advertise error */
+#define ESRMNT 84 /* Srmount error */
+#define ECOMM 85 /* Communication error on send */
+#define EPROTO 86 /* Protocol error */
+#define EMULTIHOP 87 /* Multihop attempted */
+#define EDOTDOT 88 /* RFS specific error */
+#define EREMCHG 89 /* Remote address changed */
+#define ENOSYS 90 /* Function not implemented */
+
+/* The rest have no SunOS equivalent. */
+#define ESTRPIPE 91 /* Streams pipe error */
+#define EOVERFLOW 92 /* Value too large for defined data type */
+#define EBADFD 93 /* File descriptor in bad state */
+#define ECHRNG 94 /* Channel number out of range */
+#define EL2NSYNC 95 /* Level 2 not synchronized */
+#define EL3HLT 96 /* Level 3 halted */
+#define EL3RST 97 /* Level 3 reset */
+#define ELNRNG 98 /* Link number out of range */
+#define EUNATCH 99 /* Protocol driver not attached */
+#define ENOCSI 100 /* No CSI structure available */
+#define EL2HLT 101 /* Level 2 halted */
+#define EBADE 102 /* Invalid exchange */
+#define EBADR 103 /* Invalid request descriptor */
+#define EXFULL 104 /* Exchange full */
+#define ENOANO 105 /* No anode */
+#define EBADRQC 106 /* Invalid request code */
+#define EBADSLT 107 /* Invalid slot */
+#define EDEADLOCK 108 /* File locking deadlock error */
+#define EBFONT 109 /* Bad font file format */
+#define ELIBEXEC 110 /* Cannot exec a shared library directly */
+#define ENODATA 111 /* No data available */
+#define ELIBBAD 112 /* Accessing a corrupted shared library */
+#define ENOPKG 113 /* Package not installed */
+#define ELIBACC 114 /* Can not access a needed shared library */
+#define ENOTUNIQ 115 /* Name not unique on network */
+#define ERESTART 116 /* Interrupted syscall should be restarted */
+#define EUCLEAN 117 /* Structure needs cleaning */
+#define ENOTNAM 118 /* Not a XENIX named type file */
+#define ENAVAIL 119 /* No XENIX semaphores available */
+#define EISNAM 120 /* Is a named type file */
+#define EREMOTEIO 121 /* Remote I/O error */
+#define EILSEQ 122 /* Illegal byte sequence */
+#define ELIBMAX 123 /* Atmpt to link in too many shared libs */
+#define ELIBSCN 124 /* .lib section in a.out corrupted */
+
+#define ENOMEDIUM 125 /* No medium found */
+#define EMEDIUMTYPE 126 /* Wrong medium type */
+#define ECANCELED 127 /* Operation Cancelled */
+#define ENOKEY 128 /* Required key not available */
+#define EKEYEXPIRED 129 /* Key has expired */
+#define EKEYREVOKED 130 /* Key has been revoked */
+#define EKEYREJECTED 131 /* Key was rejected by service */
+
+/* for robust mutexes */
+#define EOWNERDEAD 132 /* Owner died */
+#define ENOTRECOVERABLE 133 /* State not recoverable */
+
+#define ERFKILL 134 /* Operation not possible due to RF-kill */
+
+#define EHWPOISON 135 /* Memory page has hardware error */
+
+#endif
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 800104c8a3ed..21ac898df2d8 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -197,11 +197,12 @@
#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */
#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
+#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
-
+#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
#define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */
@@ -340,5 +341,6 @@
#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */
+#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h
index 14d6d5007314..b027633e7300 100644
--- a/tools/arch/x86/include/asm/disabled-features.h
+++ b/tools/arch/x86/include/asm/disabled-features.h
@@ -50,6 +50,12 @@
# define DISABLE_LA57 (1<<(X86_FEATURE_LA57 & 31))
#endif
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+# define DISABLE_PTI 0
+#else
+# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31))
+#endif
+
/*
* Make sure to add features to the correct mask
*/
@@ -60,7 +66,7 @@
#define DISABLED_MASK4 (DISABLE_PCID)
#define DISABLED_MASK5 0
#define DISABLED_MASK6 0
-#define DISABLED_MASK7 0
+#define DISABLED_MASK7 (DISABLE_PTI)
#define DISABLED_MASK8 0
#define DISABLED_MASK9 (DISABLE_MPX)
#define DISABLED_MASK10 0
diff --git a/tools/arch/x86/include/uapi/asm/errno.h b/tools/arch/x86/include/uapi/asm/errno.h
new file mode 100644
index 000000000000..4c82b503d92f
--- /dev/null
+++ b/tools/arch/x86/include/uapi/asm/errno.h
@@ -0,0 +1 @@
+#include <asm-generic/errno.h>
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index c71a05b9c984..c378f003b007 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -56,6 +56,7 @@ FEATURE_TESTS_BASIC := \
libunwind-arm \
libunwind-aarch64 \
pthread-attr-setaffinity-np \
+ pthread-barrier \
stackprotector-all \
timerfd \
libdw-dwarf-unwind \
@@ -65,7 +66,8 @@ FEATURE_TESTS_BASIC := \
bpf \
sched_getcpu \
sdt \
- setns
+ setns \
+ libopencsd
# FEATURE_TESTS_BASIC + FEATURE_TESTS_EXTRA is the complete list
# of all feature tests
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index bc715f6ac320..0a490cb15149 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -38,6 +38,7 @@ FILES= \
test-libunwind-debug-frame-arm.bin \
test-libunwind-debug-frame-aarch64.bin \
test-pthread-attr-setaffinity-np.bin \
+ test-pthread-barrier.bin \
test-stackprotector-all.bin \
test-timerfd.bin \
test-libdw-dwarf-unwind.bin \
@@ -52,7 +53,8 @@ FILES= \
test-cxx.bin \
test-jvmti.bin \
test-sched_getcpu.bin \
- test-setns.bin
+ test-setns.bin \
+ test-libopencsd.bin
FILES := $(addprefix $(OUTPUT),$(FILES))
@@ -80,6 +82,9 @@ $(OUTPUT)test-hello.bin:
$(OUTPUT)test-pthread-attr-setaffinity-np.bin:
$(BUILD) -D_GNU_SOURCE -lpthread
+$(OUTPUT)test-pthread-barrier.bin:
+ $(BUILD) -lpthread
+
$(OUTPUT)test-stackprotector-all.bin:
$(BUILD) -fstack-protector-all
@@ -101,6 +106,10 @@ $(OUTPUT)test-sched_getcpu.bin:
$(OUTPUT)test-setns.bin:
$(BUILD)
+$(OUTPUT)test-libopencsd.bin:
+ $(BUILD) # -lopencsd_c_api -lopencsd provided by
+ # $(FEATURE_CHECK_LDFLAGS-libopencsd)
+
DWARFLIBS := -ldw
ifeq ($(findstring -static,${LDFLAGS}),-static)
DWARFLIBS += -lelf -lebl -lz -llzma -lbz2
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
index 4112702e4aed..8dc20a61341f 100644
--- a/tools/build/feature/test-all.c
+++ b/tools/build/feature/test-all.c
@@ -118,6 +118,10 @@
# include "test-pthread-attr-setaffinity-np.c"
#undef main
+#define main main_test_pthread_barrier
+# include "test-pthread-barrier.c"
+#undef main
+
#define main main_test_sched_getcpu
# include "test-sched_getcpu.c"
#undef main
@@ -158,6 +162,10 @@
# include "test-setns.c"
#undef main
+#define main main_test_libopencsd
+# include "test-libopencsd.c"
+#undef main
+
int main(int argc, char *argv[])
{
main_test_libpython();
@@ -187,6 +195,7 @@ int main(int argc, char *argv[])
main_test_sync_compare_and_swap(argc, argv);
main_test_zlib();
main_test_pthread_attr_setaffinity_np();
+ main_test_pthread_barrier();
main_test_lzma();
main_test_get_cpuid();
main_test_bpf();
@@ -194,6 +203,7 @@ int main(int argc, char *argv[])
main_test_sched_getcpu();
main_test_sdt();
main_test_setns();
+ main_test_libopencsd();
return 0;
}
diff --git a/tools/build/feature/test-libopencsd.c b/tools/build/feature/test-libopencsd.c
new file mode 100644
index 000000000000..5ff1246e6194
--- /dev/null
+++ b/tools/build/feature/test-libopencsd.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <opencsd/c_api/opencsd_c_api.h>
+
+int main(void)
+{
+ (void)ocsd_get_version();
+ return 0;
+}
diff --git a/tools/build/feature/test-pthread-barrier.c b/tools/build/feature/test-pthread-barrier.c
new file mode 100644
index 000000000000..0558d9334d97
--- /dev/null
+++ b/tools/build/feature/test-pthread-barrier.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdint.h>
+#include <pthread.h>
+
+int main(void)
+{
+ pthread_barrier_t barrier;
+
+ pthread_barrier_init(&barrier, NULL, 1);
+ pthread_barrier_wait(&barrier);
+ return pthread_barrier_destroy(&barrier);
+}
diff --git a/tools/gpio/gpio-event-mon.c b/tools/gpio/gpio-event-mon.c
index 1c14c2595158..dac4d4131d9b 100644
--- a/tools/gpio/gpio-event-mon.c
+++ b/tools/gpio/gpio-event-mon.c
@@ -14,6 +14,7 @@
#include <unistd.h>
#include <stdlib.h>
#include <stdbool.h>
+#include <stdint.h>
#include <stdio.h>
#include <dirent.h>
#include <errno.h>
@@ -23,12 +24,13 @@
#include <getopt.h>
#include <inttypes.h>
#include <sys/ioctl.h>
+#include <sys/types.h>
#include <linux/gpio.h>
int monitor_device(const char *device_name,
unsigned int line,
- u_int32_t handleflags,
- u_int32_t eventflags,
+ uint32_t handleflags,
+ uint32_t eventflags,
unsigned int loops)
{
struct gpioevent_request req;
@@ -145,8 +147,8 @@ int main(int argc, char **argv)
const char *device_name = NULL;
unsigned int line = -1;
unsigned int loops = 0;
- u_int32_t handleflags = GPIOHANDLE_REQUEST_INPUT;
- u_int32_t eventflags = 0;
+ uint32_t handleflags = GPIOHANDLE_REQUEST_INPUT;
+ uint32_t eventflags = 0;
int c;
while ((c = getopt(argc, argv, "c:n:o:dsrf?")) != -1) {
diff --git a/tools/include/uapi/asm-generic/errno-base.h b/tools/include/uapi/asm-generic/errno-base.h
new file mode 100644
index 000000000000..9653140bff92
--- /dev/null
+++ b/tools/include/uapi/asm-generic/errno-base.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_GENERIC_ERRNO_BASE_H
+#define _ASM_GENERIC_ERRNO_BASE_H
+
+#define EPERM 1 /* Operation not permitted */
+#define ENOENT 2 /* No such file or directory */
+#define ESRCH 3 /* No such process */
+#define EINTR 4 /* Interrupted system call */
+#define EIO 5 /* I/O error */
+#define ENXIO 6 /* No such device or address */
+#define E2BIG 7 /* Argument list too long */
+#define ENOEXEC 8 /* Exec format error */
+#define EBADF 9 /* Bad file number */
+#define ECHILD 10 /* No child processes */
+#define EAGAIN 11 /* Try again */
+#define ENOMEM 12 /* Out of memory */
+#define EACCES 13 /* Permission denied */
+#define EFAULT 14 /* Bad address */
+#define ENOTBLK 15 /* Block device required */
+#define EBUSY 16 /* Device or resource busy */
+#define EEXIST 17 /* File exists */
+#define EXDEV 18 /* Cross-device link */
+#define ENODEV 19 /* No such device */
+#define ENOTDIR 20 /* Not a directory */
+#define EISDIR 21 /* Is a directory */
+#define EINVAL 22 /* Invalid argument */
+#define ENFILE 23 /* File table overflow */
+#define EMFILE 24 /* Too many open files */
+#define ENOTTY 25 /* Not a typewriter */
+#define ETXTBSY 26 /* Text file busy */
+#define EFBIG 27 /* File too large */
+#define ENOSPC 28 /* No space left on device */
+#define ESPIPE 29 /* Illegal seek */
+#define EROFS 30 /* Read-only file system */
+#define EMLINK 31 /* Too many links */
+#define EPIPE 32 /* Broken pipe */
+#define EDOM 33 /* Math argument out of domain of func */
+#define ERANGE 34 /* Math result not representable */
+
+#endif
diff --git a/tools/include/uapi/asm-generic/errno.h b/tools/include/uapi/asm-generic/errno.h
new file mode 100644
index 000000000000..cf9c51ac49f9
--- /dev/null
+++ b/tools/include/uapi/asm-generic/errno.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_GENERIC_ERRNO_H
+#define _ASM_GENERIC_ERRNO_H
+
+#include <asm-generic/errno-base.h>
+
+#define EDEADLK 35 /* Resource deadlock would occur */
+#define ENAMETOOLONG 36 /* File name too long */
+#define ENOLCK 37 /* No record locks available */
+
+/*
+ * This error code is special: arch syscall entry code will return
+ * -ENOSYS if users try to call a syscall that doesn't exist. To keep
+ * failures of syscalls that really do exist distinguishable from
+ * failures due to attempts to use a nonexistent syscall, syscall
+ * implementations should refrain from returning -ENOSYS.
+ */
+#define ENOSYS 38 /* Invalid system call number */
+
+#define ENOTEMPTY 39 /* Directory not empty */
+#define ELOOP 40 /* Too many symbolic links encountered */
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define ENOMSG 42 /* No message of desired type */
+#define EIDRM 43 /* Identifier removed */
+#define ECHRNG 44 /* Channel number out of range */
+#define EL2NSYNC 45 /* Level 2 not synchronized */
+#define EL3HLT 46 /* Level 3 halted */
+#define EL3RST 47 /* Level 3 reset */
+#define ELNRNG 48 /* Link number out of range */
+#define EUNATCH 49 /* Protocol driver not attached */
+#define ENOCSI 50 /* No CSI structure available */
+#define EL2HLT 51 /* Level 2 halted */
+#define EBADE 52 /* Invalid exchange */
+#define EBADR 53 /* Invalid request descriptor */
+#define EXFULL 54 /* Exchange full */
+#define ENOANO 55 /* No anode */
+#define EBADRQC 56 /* Invalid request code */
+#define EBADSLT 57 /* Invalid slot */
+
+#define EDEADLOCK EDEADLK
+
+#define EBFONT 59 /* Bad font file format */
+#define ENOSTR 60 /* Device not a stream */
+#define ENODATA 61 /* No data available */
+#define ETIME 62 /* Timer expired */
+#define ENOSR 63 /* Out of streams resources */
+#define ENONET 64 /* Machine is not on the network */
+#define ENOPKG 65 /* Package not installed */
+#define EREMOTE 66 /* Object is remote */
+#define ENOLINK 67 /* Link has been severed */
+#define EADV 68 /* Advertise error */
+#define ESRMNT 69 /* Srmount error */
+#define ECOMM 70 /* Communication error on send */
+#define EPROTO 71 /* Protocol error */
+#define EMULTIHOP 72 /* Multihop attempted */
+#define EDOTDOT 73 /* RFS specific error */
+#define EBADMSG 74 /* Not a data message */
+#define EOVERFLOW 75 /* Value too large for defined data type */
+#define ENOTUNIQ 76 /* Name not unique on network */
+#define EBADFD 77 /* File descriptor in bad state */
+#define EREMCHG 78 /* Remote address changed */
+#define ELIBACC 79 /* Can not access a needed shared library */
+#define ELIBBAD 80 /* Accessing a corrupted shared library */
+#define ELIBSCN 81 /* .lib section in a.out corrupted */
+#define ELIBMAX 82 /* Attempting to link in too many shared libraries */
+#define ELIBEXEC 83 /* Cannot exec a shared library directly */
+#define EILSEQ 84 /* Illegal byte sequence */
+#define ERESTART 85 /* Interrupted system call should be restarted */
+#define ESTRPIPE 86 /* Streams pipe error */
+#define EUSERS 87 /* Too many users */
+#define ENOTSOCK 88 /* Socket operation on non-socket */
+#define EDESTADDRREQ 89 /* Destination address required */
+#define EMSGSIZE 90 /* Message too long */
+#define EPROTOTYPE 91 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 92 /* Protocol not available */
+#define EPROTONOSUPPORT 93 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 94 /* Socket type not supported */
+#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
+#define EPFNOSUPPORT 96 /* Protocol family not supported */
+#define EAFNOSUPPORT 97 /* Address family not supported by protocol */
+#define EADDRINUSE 98 /* Address already in use */
+#define EADDRNOTAVAIL 99 /* Cannot assign requested address */
+#define ENETDOWN 100 /* Network is down */
+#define ENETUNREACH 101 /* Network is unreachable */
+#define ENETRESET 102 /* Network dropped connection because of reset */
+#define ECONNABORTED 103 /* Software caused connection abort */
+#define ECONNRESET 104 /* Connection reset by peer */
+#define ENOBUFS 105 /* No buffer space available */
+#define EISCONN 106 /* Transport endpoint is already connected */
+#define ENOTCONN 107 /* Transport endpoint is not connected */
+#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
+#define ETOOMANYREFS 109 /* Too many references: cannot splice */
+#define ETIMEDOUT 110 /* Connection timed out */
+#define ECONNREFUSED 111 /* Connection refused */
+#define EHOSTDOWN 112 /* Host is down */
+#define EHOSTUNREACH 113 /* No route to host */
+#define EALREADY 114 /* Operation already in progress */
+#define EINPROGRESS 115 /* Operation now in progress */
+#define ESTALE 116 /* Stale file handle */
+#define EUCLEAN 117 /* Structure needs cleaning */
+#define ENOTNAM 118 /* Not a XENIX named type file */
+#define ENAVAIL 119 /* No XENIX semaphores available */
+#define EISNAM 120 /* Is a named type file */
+#define EREMOTEIO 121 /* Remote I/O error */
+#define EDQUOT 122 /* Quota exceeded */
+
+#define ENOMEDIUM 123 /* No medium found */
+#define EMEDIUMTYPE 124 /* Wrong medium type */
+#define ECANCELED 125 /* Operation Canceled */
+#define ENOKEY 126 /* Required key not available */
+#define EKEYEXPIRED 127 /* Key has expired */
+#define EKEYREVOKED 128 /* Key has been revoked */
+#define EKEYREJECTED 129 /* Key was rejected by service */
+
+/* for robust mutexes */
+#define EOWNERDEAD 130 /* Owner died */
+#define ENOTRECOVERABLE 131 /* State not recoverable */
+
+#define ERFKILL 132 /* Operation not possible due to RF-kill */
+
+#define EHWPOISON 133 /* Memory page has hardware error */
+
+#endif
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index 769533696483..e0739a1aa4b2 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -634,9 +634,12 @@ struct perf_event_mmap_page {
*/
#define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12)
/*
- * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
- * different events so can reuse the same bit position.
- * Ditto PERF_RECORD_MISC_SWITCH_OUT.
+ * Following PERF_RECORD_MISC_* are used on different
+ * events, so can reuse the same bit position:
+ *
+ * PERF_RECORD_MISC_MMAP_DATA - PERF_RECORD_MMAP* events
+ * PERF_RECORD_MISC_COMM_EXEC - PERF_RECORD_COMM event
+ * PERF_RECORD_MISC_SWITCH_OUT - PERF_RECORD_SWITCH* events
*/
#define PERF_RECORD_MISC_MMAP_DATA (1 << 13)
#define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
@@ -886,6 +889,7 @@ enum perf_event_type {
* struct perf_event_header header;
* u32 pid;
* u32 tid;
+ * struct sample_id sample_id;
* };
*/
PERF_RECORD_ITRACE_START = 12,
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 7ce724fc0544..e5f2acbb70cc 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -1094,7 +1094,7 @@ static enum event_type __read_token(char **tok)
if (strcmp(*tok, "LOCAL_PR_FMT") == 0) {
free(*tok);
*tok = NULL;
- return force_token("\"\%s\" ", tok);
+ return force_token("\"%s\" ", tok);
} else if (strcmp(*tok, "STA_PR_FMT") == 0) {
free(*tok);
*tok = NULL;
@@ -3970,6 +3970,11 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
val &= ~fval;
}
}
+ if (val) {
+ if (print && arg->flags.delim)
+ trace_seq_puts(s, arg->flags.delim);
+ trace_seq_printf(s, "0x%llx", val);
+ }
break;
case PRINT_SYMBOL:
val = eval_num_arg(data, size, event, arg->symbol.field);
@@ -3980,6 +3985,8 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
break;
}
}
+ if (!flag)
+ trace_seq_printf(s, "0x%llx", val);
break;
case PRINT_HEX:
case PRINT_HEX_STR:
@@ -4293,6 +4300,26 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc
goto process_again;
case 'p':
ls = 1;
+ if (isalnum(ptr[1])) {
+ ptr++;
+ /* Check for special pointers */
+ switch (*ptr) {
+ case 's':
+ case 'S':
+ case 'f':
+ case 'F':
+ break;
+ default:
+ /*
+ * Older kernels do not process
+ * dereferenced pointers.
+ * Only process if the pointer
+ * value is a printable.
+ */
+ if (isprint(*(char *)bptr))
+ goto process_string;
+ }
+ }
/* fall through */
case 'd':
case 'u':
@@ -4345,6 +4372,7 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc
break;
case 's':
+ process_string:
arg = alloc_arg();
if (!arg) {
do_warning_event(event, "%s(%d): not enough memory!",
@@ -4949,21 +4977,27 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
else
ls = 2;
- if (*(ptr+1) == 'F' || *(ptr+1) == 'f' ||
- *(ptr+1) == 'S' || *(ptr+1) == 's') {
+ if (isalnum(ptr[1]))
ptr++;
+
+ if (arg->type == PRINT_BSTRING) {
+ trace_seq_puts(s, arg->string.string);
+ break;
+ }
+
+ if (*ptr == 'F' || *ptr == 'f' ||
+ *ptr == 'S' || *ptr == 's') {
show_func = *ptr;
- } else if (*(ptr+1) == 'M' || *(ptr+1) == 'm') {
- print_mac_arg(s, *(ptr+1), data, size, event, arg);
- ptr++;
+ } else if (*ptr == 'M' || *ptr == 'm') {
+ print_mac_arg(s, *ptr, data, size, event, arg);
arg = arg->next;
break;
- } else if (*(ptr+1) == 'I' || *(ptr+1) == 'i') {
+ } else if (*ptr == 'I' || *ptr == 'i') {
int n;
- n = print_ip_arg(s, ptr+1, data, size, event, arg);
+ n = print_ip_arg(s, ptr, data, size, event, arg);
if (n > 0) {
- ptr += n;
+ ptr += n - 1;
arg = arg->next;
break;
}
@@ -5532,8 +5566,14 @@ void pevent_print_event(struct pevent *pevent, struct trace_seq *s,
event = pevent_find_event_by_record(pevent, record);
if (!event) {
- do_warning("ug! no event found for type %d",
- trace_parse_common_type(pevent, record->data));
+ int i;
+ int type = trace_parse_common_type(pevent, record->data);
+
+ do_warning("ug! no event found for type %d", type);
+ trace_seq_printf(s, "[UNKNOWN TYPE %d]", type);
+ for (i = 0; i < record->size; i++)
+ trace_seq_printf(s, " %02x",
+ ((unsigned char *)record->data)[i]);
return;
}
diff --git a/tools/lib/traceevent/event-plugin.c b/tools/lib/traceevent/event-plugin.c
index a16756ae3526..d542cb60ca1a 100644
--- a/tools/lib/traceevent/event-plugin.c
+++ b/tools/lib/traceevent/event-plugin.c
@@ -120,12 +120,12 @@ char **traceevent_plugin_list_options(void)
for (op = reg->options; op->name; op++) {
char *alias = op->plugin_alias ? op->plugin_alias : op->file;
char **temp = list;
+ int ret;
- name = malloc(strlen(op->name) + strlen(alias) + 2);
- if (!name)
+ ret = asprintf(&name, "%s:%s", alias, op->name);
+ if (ret < 0)
goto err;
- sprintf(name, "%s:%s", alias, op->name);
list = realloc(list, count + 2);
if (!list) {
list = temp;
@@ -290,17 +290,14 @@ load_plugin(struct pevent *pevent, const char *path,
const char *alias;
char *plugin;
void *handle;
+ int ret;
- plugin = malloc(strlen(path) + strlen(file) + 2);
- if (!plugin) {
+ ret = asprintf(&plugin, "%s/%s", path, file);
+ if (ret < 0) {
warning("could not allocate plugin memory\n");
return;
}
- strcpy(plugin, path);
- strcat(plugin, "/");
- strcat(plugin, file);
-
handle = dlopen(plugin, RTLD_NOW | RTLD_GLOBAL);
if (!handle) {
warning("could not load plugin '%s'\n%s\n",
@@ -391,6 +388,7 @@ load_plugins(struct pevent *pevent, const char *suffix,
char *home;
char *path;
char *envdir;
+ int ret;
if (pevent->flags & PEVENT_DISABLE_PLUGINS)
return;
@@ -421,16 +419,12 @@ load_plugins(struct pevent *pevent, const char *suffix,
if (!home)
return;
- path = malloc(strlen(home) + strlen(LOCAL_PLUGIN_DIR) + 2);
- if (!path) {
+ ret = asprintf(&path, "%s/%s", home, LOCAL_PLUGIN_DIR);
+ if (ret < 0) {
warning("could not allocate plugin memory\n");
return;
}
- strcpy(path, home);
- strcat(path, "/");
- strcat(path, LOCAL_PLUGIN_DIR);
-
load_plugins_dir(pevent, suffix, path, load_plugin, data);
free(path);
diff --git a/tools/lib/traceevent/kbuffer-parse.c b/tools/lib/traceevent/kbuffer-parse.c
index c94e3641b046..ca424b157e46 100644
--- a/tools/lib/traceevent/kbuffer-parse.c
+++ b/tools/lib/traceevent/kbuffer-parse.c
@@ -24,8 +24,8 @@
#include "kbuffer.h"
-#define MISSING_EVENTS (1 << 31)
-#define MISSING_STORED (1 << 30)
+#define MISSING_EVENTS (1UL << 31)
+#define MISSING_STORED (1UL << 30)
#define COMMIT_MASK ((1 << 27) - 1)
diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
index 315df0a70265..431e8b309f6e 100644
--- a/tools/lib/traceevent/parse-filter.c
+++ b/tools/lib/traceevent/parse-filter.c
@@ -287,12 +287,10 @@ find_event(struct pevent *pevent, struct event_list **events,
sys_name = NULL;
}
- reg = malloc(strlen(event_name) + 3);
- if (reg == NULL)
+ ret = asprintf(&reg, "^%s$", event_name);
+ if (ret < 0)
return PEVENT_ERRNO__MEM_ALLOC_FAILED;
- sprintf(reg, "^%s$", event_name);
-
ret = regcomp(&ereg, reg, REG_ICASE|REG_NOSUB);
free(reg);
@@ -300,13 +298,12 @@ find_event(struct pevent *pevent, struct event_list **events,
return PEVENT_ERRNO__INVALID_EVENT_NAME;
if (sys_name) {
- reg = malloc(strlen(sys_name) + 3);
- if (reg == NULL) {
+ ret = asprintf(&reg, "^%s$", sys_name);
+ if (ret < 0) {
regfree(&ereg);
return PEVENT_ERRNO__MEM_ALLOC_FAILED;
}
- sprintf(reg, "^%s$", sys_name);
ret = regcomp(&sreg, reg, REG_ICASE|REG_NOSUB);
free(reg);
if (ret) {
@@ -1634,6 +1631,7 @@ int pevent_filter_clear_trivial(struct event_filter *filter,
case FILTER_TRIVIAL_FALSE:
if (filter_type->filter->boolean.value)
continue;
+ break;
case FILTER_TRIVIAL_TRUE:
if (!filter_type->filter->boolean.value)
continue;
@@ -1879,17 +1877,25 @@ static const char *get_field_str(struct filter_arg *arg, struct pevent_record *r
struct pevent *pevent;
unsigned long long addr;
const char *val = NULL;
+ unsigned int size;
char hex[64];
/* If the field is not a string convert it */
if (arg->str.field->flags & FIELD_IS_STRING) {
val = record->data + arg->str.field->offset;
+ size = arg->str.field->size;
+
+ if (arg->str.field->flags & FIELD_IS_DYNAMIC) {
+ addr = *(unsigned int *)val;
+ val = record->data + (addr & 0xffff);
+ size = addr >> 16;
+ }
/*
* We need to copy the data since we can't be sure the field
* is null terminated.
*/
- if (*(val + arg->str.field->size - 1)) {
+ if (*(val + size - 1)) {
/* copy it */
memcpy(arg->str.buffer, val, arg->str.field->size);
/* the buffer is already NULL terminated */
diff --git a/tools/perf/Build b/tools/perf/Build
index b48ca40fccf9..e5232d567611 100644
--- a/tools/perf/Build
+++ b/tools/perf/Build
@@ -25,7 +25,7 @@ perf-y += builtin-data.o
perf-y += builtin-version.o
perf-y += builtin-c2c.o
-perf-$(CONFIG_AUDIT) += builtin-trace.o
+perf-$(CONFIG_TRACE) += builtin-trace.o
perf-$(CONFIG_LIBELF) += builtin-probe.o
perf-y += bench/
@@ -50,6 +50,6 @@ libperf-y += util/
libperf-y += arch/
libperf-y += ui/
libperf-y += scripts/
-libperf-$(CONFIG_AUDIT) += trace/beauty/
+libperf-$(CONFIG_TRACE) += trace/beauty/
gtk-y += ui/gtk/
diff --git a/tools/perf/Documentation/perf-buildid-cache.txt b/tools/perf/Documentation/perf-buildid-cache.txt
index 84681007f80f..73c2650bd0db 100644
--- a/tools/perf/Documentation/perf-buildid-cache.txt
+++ b/tools/perf/Documentation/perf-buildid-cache.txt
@@ -24,6 +24,9 @@ OPTIONS
-a::
--add=::
Add specified file to the cache.
+-f::
+--force::
+ Don't complain, do it.
-k::
--kcore::
Add specified kcore file to the cache. For the current host that is
diff --git a/tools/perf/Documentation/perf-evlist.txt b/tools/perf/Documentation/perf-evlist.txt
index 6f7200fb85cf..c0a66400a960 100644
--- a/tools/perf/Documentation/perf-evlist.txt
+++ b/tools/perf/Documentation/perf-evlist.txt
@@ -20,6 +20,10 @@ OPTIONS
--input=::
Input file name. (default: perf.data unless stdin is a fifo)
+-f::
+--force::
+ Don't complain, do it.
+
-F::
--freq=::
Show just the sample frequency used for each event.
diff --git a/tools/perf/Documentation/perf-inject.txt b/tools/perf/Documentation/perf-inject.txt
index 87b2588d1cbd..a64d6588470e 100644
--- a/tools/perf/Documentation/perf-inject.txt
+++ b/tools/perf/Documentation/perf-inject.txt
@@ -60,6 +60,10 @@ include::itrace.txt[]
found in the jitdumps files captured in the input perf.data file. Use this option
if you are monitoring environment using JIT runtimes, such as Java, DART or V8.
+-f::
+--force::
+ Don't complain, do it.
+
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-report[1], linkperf:perf-archive[1]
diff --git a/tools/perf/Documentation/perf-lock.txt b/tools/perf/Documentation/perf-lock.txt
index ab25be28c9dc..74d774592196 100644
--- a/tools/perf/Documentation/perf-lock.txt
+++ b/tools/perf/Documentation/perf-lock.txt
@@ -42,6 +42,10 @@ COMMON OPTIONS
--dump-raw-trace::
Dump raw trace in ASCII.
+-f::
+--force::
+ Don't complan, do it.
+
REPORT OPTIONS
--------------
diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt
index d7e4869905f1..b6866a05edd2 100644
--- a/tools/perf/Documentation/perf-probe.txt
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -170,7 +170,7 @@ Probe points are defined by following syntax.
or,
sdt_PROVIDER:SDTEVENT
-'EVENT' specifies the name of new event, if omitted, it will be set the name of the probed function. You can also specify a group name by 'GROUP', if omitted, set 'probe' is used for kprobe and 'probe_<bin>' is used for uprobe.
+'EVENT' specifies the name of new event, if omitted, it will be set the name of the probed function, and for return probes, a "\_\_return" suffix is automatically added to the function name. You can also specify a group name by 'GROUP', if omitted, set 'probe' is used for kprobe and 'probe_<bin>' is used for uprobe.
Note that using existing group name can conflict with other events. Especially, using the group name reserved for kernel modules can hide embedded events in the
modules.
'FUNC' specifies a probed function name, and it may have one of the following options; '+OFFS' is the offset from function entry address in bytes, ':RLN' is the relative-line number from function entry line, and '%return' means that it probes function return. And ';PTN' means lazy matching pattern (see LAZY MATCHING). Note that ';PTN' must be the end of the probe point definition. In addition, '@SRC' specifies a source file which has that function.
@@ -182,6 +182,14 @@ Note that before using the SDT event, the target binary (on which SDT events are
For details of the SDT, see below.
https://sourceware.org/gdb/onlinedocs/gdb/Static-Probe-Points.html
+ESCAPED CHARACTER
+-----------------
+
+In the probe syntax, '=', '@', '+', ':' and ';' are treated as a special character. You can use a backslash ('\') to escape the special characters.
+This is useful if you need to probe on a specific versioned symbols, like @GLIBC_... suffixes, or also you need to specify a source file which includes the special characters.
+Note that usually single backslash is consumed by shell, so you might need to pass double backslash (\\) or wrapping with single quotes (\'AAA\@BBB').
+See EXAMPLES how it is used.
+
PROBE ARGUMENT
--------------
Each probe argument follows below syntax.
@@ -277,6 +285,14 @@ Add a USDT probe to a target process running in a different mount namespace
./perf probe --target-ns <target pid> -x /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.121-0.b13.el7_3.x86_64/jre/lib/amd64/server/libjvm.so %sdt_hotspot:thread__sleep__end
+Add a probe on specific versioned symbol by backslash escape
+
+ ./perf probe -x /lib64/libc-2.25.so 'malloc_get_state\@GLIBC_2.2.5'
+
+Add a probe in a source file using special characters by backslash escape
+
+ ./perf probe -x /opt/test/a.out 'foo\+bar.c:4'
+
SEE ALSO
--------
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 5a626ef666c2..3eea6de35a38 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -430,6 +430,9 @@ Configure all used events to run in user space.
--timestamp-filename
Append timestamp to output file name.
+--timestamp-boundary::
+Record timestamp boundary (time of first/last samples).
+
--switch-output[=mode]::
Generate multiple perf.data files, timestamp prefixed, switching to a new one
based on 'mode' value:
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index ddde2b54af57..907e505b6309 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -402,6 +402,26 @@ OPTIONS
stop time is not given (i.e, time string is 'x.y,') then analysis goes
to end of file.
+ Also support time percent with multiple time range. Time string is
+ 'a%/n,b%/m,...' or 'a%-b%,c%-%d,...'.
+
+ For example:
+ Select the second 10% time slice:
+
+ perf report --time 10%/2
+
+ Select from 0% to 10% time slice:
+
+ perf report --time 0%-10%
+
+ Select the first and second 10% time slices:
+
+ perf report --time 10%/1,10%/2
+
+ Select from 0% to 10% and 30% to 40% slices:
+
+ perf report --time 0%-10%,30%-40%
+
--itrace::
Options for decoding instruction tracing data. The options are:
@@ -437,8 +457,23 @@ include::itrace.txt[]
will be printed. Each entry is function name or file/line. Enabled by
default, disable with --no-inline.
+--mmaps::
+ Show --tasks output plus mmap information in a format similar to
+ /proc/<PID>/maps.
+
+ Please note that not all mmaps are stored, options affecting which ones
+ are include 'perf record --data', for instance.
+
+--stats::
+ Display overall events statistics without any further processing.
+ (like the one at the end of the perf report -D command)
+
+--tasks::
+ Display monitored tasks stored in perf data. Displaying pid/tid/ppid
+ plus the command string aligned to distinguish parent and child tasks.
+
include::callchain-overhead-calculation.txt[]
SEE ALSO
--------
-linkperf:perf-stat[1], linkperf:perf-annotate[1]
+linkperf:perf-stat[1], linkperf:perf-annotate[1], linkperf:perf-record[1]
diff --git a/tools/perf/Documentation/perf-sched.txt b/tools/perf/Documentation/perf-sched.txt
index 55b67338548e..c7e50f263887 100644
--- a/tools/perf/Documentation/perf-sched.txt
+++ b/tools/perf/Documentation/perf-sched.txt
@@ -74,6 +74,10 @@ OPTIONS
--dump-raw-trace=::
Display verbose dump of the sched data.
+-f::
+--force::
+ Don't complain, do it.
+
OPTIONS for 'perf sched map'
----------------------------
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 2811fcf684cb..7730c1d2b5d3 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -117,7 +117,7 @@ OPTIONS
Comma separated list of fields to print. Options are:
comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff,
srcline, period, iregs, uregs, brstack, brstacksym, flags, bpf-output, brstackinsn,
- brstackoff, callindent, insn, insnlen, synth, phys_addr.
+ brstackoff, callindent, insn, insnlen, synth, phys_addr, metric, misc.
Field list can be prepended with the type, trace, sw or hw,
to indicate to which event type the field list applies.
e.g., -F sw:comm,tid,time,ip,sym and -F trace:time,cpu,trace
@@ -217,6 +217,32 @@ OPTIONS
The brstackoff field will print an offset into a specific dso/binary.
+ With the metric option perf script can compute metrics for
+ sampling periods, similar to perf stat. This requires
+ specifying a group with multiple metrics with the :S option
+ for perf record. perf will sample on the first event, and
+ compute metrics for all the events in the group. Please note
+ that the metric computed is averaged over the whole sampling
+ period, not just for the sample point.
+
+ For sample events it's possible to display misc field with -F +misc option,
+ following letters are displayed for each bit:
+
+ PERF_RECORD_MISC_KERNEL K
+ PERF_RECORD_MISC_USER U
+ PERF_RECORD_MISC_HYPERVISOR H
+ PERF_RECORD_MISC_GUEST_KERNEL G
+ PERF_RECORD_MISC_GUEST_USER g
+ PERF_RECORD_MISC_MMAP_DATA* M
+ PERF_RECORD_MISC_COMM_EXEC E
+ PERF_RECORD_MISC_SWITCH_OUT S
+
+ $ perf script -F +misc ...
+ sched-messaging 1414 K 28690.636582: 4590 cycles ...
+ sched-messaging 1407 U 28690.636600: 325620 cycles ...
+ sched-messaging 1414 K 28690.636608: 19473 cycles ...
+ misc field ___________/
+
-k::
--vmlinux=<file>::
vmlinux pathname
@@ -274,6 +300,9 @@ OPTIONS
Display context switch events i.e. events of type PERF_RECORD_SWITCH or
PERF_RECORD_SWITCH_CPU_WIDE.
+--show-lost-events
+ Display lost events i.e. events of type PERF_RECORD_LOST.
+
--demangle::
Demangle symbol names to human readable form. It's enabled by default,
disable with --no-demangle.
@@ -321,6 +350,22 @@ include::itrace.txt[]
stop time is not given (i.e, time string is 'x.y,') then analysis goes
to end of file.
+ Also support time percent with multipe time range. Time string is
+ 'a%/n,b%/m,...' or 'a%-b%,c%-%d,...'.
+
+ For example:
+ Select the second 10% time slice:
+ perf script --time 10%/2
+
+ Select from 0% to 10% time slice:
+ perf script --time 0%-10%
+
+ Select the first and second 10% time slices:
+ perf script --time 10%/1,10%/2
+
+ Select from 0% to 10% and 30% to 40% slices:
+ perf script --time 0%-10%,30%-40%
+
--max-blocks::
Set the maximum number of program blocks to print with brstackasm for
each sample.
diff --git a/tools/perf/Documentation/perf-timechart.txt b/tools/perf/Documentation/perf-timechart.txt
index df98d1c82688..ef0c7565bd5c 100644
--- a/tools/perf/Documentation/perf-timechart.txt
+++ b/tools/perf/Documentation/perf-timechart.txt
@@ -50,7 +50,9 @@ TIMECHART OPTIONS
-p::
--process::
Select the processes to display, by name or PID
-
+-f::
+--force::
+ Don't complain, do it.
--symfs=<directory>::
Look for files with symbols relative to this directory.
-n::
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index 4353262bc462..8a32cc77bead 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -268,6 +268,12 @@ INTERACTIVE PROMPTING KEYS
[S]::
Stop annotation, return to full profile display.
+[K]::
+ Hide kernel symbols.
+
+[U]::
+ Hide user symbols.
+
[z]::
Toggle event count zeroing across display updates.
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index d53bea6bd571..33a88e984e66 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -86,18 +86,18 @@ comma-separated list with no space: 0,1. Ranges of CPUs are specified with -: 0-
In per-thread mode with inheritance mode on (default), Events are captured only when
the thread executes on the designated CPUs. Default is to monitor all CPUs.
---duration:
+--duration::
Show only events that had a duration greater than N.M ms.
---sched:
+--sched::
Accrue thread runtime and provide a summary at the end of the session.
--i
---input
+-i::
+--input::
Process events from a given perf data file.
--T
---time
+-T::
+--time::
Print full timestamp rather time relative to first sample.
--comm::
@@ -117,6 +117,10 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
Show tool stats such as number of times fd->pathname was discovered thru
hooking the open syscall return + vfs_getname or via reading /proc/pid/fd, etc.
+-f::
+--force::
+ Don't complain, do it.
+
-F=[all|min|maj]::
--pf=[all|min|maj]::
Trace pagefaults. Optionally, you can specify whether you want minor,
@@ -159,6 +163,10 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
Implies '--call-graph dwarf' when --call-graph not present on the
command line, on systems where DWARF unwinding was built in.
+--print-sample::
+ Print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info for the
+ raw_syscalls:sys_{enter,exit} tracepoints, for debugging.
+
--proc-map-timeout::
When processing pre-existing threads /proc/XXX/mmap, it may take a long time,
because the file may be huge. A time out is needed in such cases.
diff --git a/tools/perf/Documentation/perf.data-file-format.txt b/tools/perf/Documentation/perf.data-file-format.txt
index e90c59c6d815..f7d85e89a98a 100644
--- a/tools/perf/Documentation/perf.data-file-format.txt
+++ b/tools/perf/Documentation/perf.data-file-format.txt
@@ -238,6 +238,33 @@ struct auxtrace_index {
struct auxtrace_index_entry entries[PERF_AUXTRACE_INDEX_ENTRY_COUNT];
};
+ HEADER_STAT = 19,
+
+This is merely a flag signifying that the data section contains data
+recorded from perf stat record.
+
+ HEADER_CACHE = 20,
+
+Description of the cache hierarchy. Based on the Linux sysfs format
+in /sys/devices/system/cpu/cpu*/cache/
+
+ u32 version Currently always 1
+ u32 number_of_cache_levels
+
+struct {
+ u32 level;
+ u32 line_size;
+ u32 sets;
+ u32 ways;
+ struct perf_header_string type;
+ struct perf_header_string size;
+ struct perf_header_string map;
+}[number_of_cache_levels];
+
+ HEADER_SAMPLE_TIME = 21,
+
+Two uint64_t for the time of first sample and the time of last sample.
+
other bits are reserved and should ignored for now
HEADER_FEAT_BITS = 256,
diff --git a/tools/perf/Documentation/tips.txt b/tools/perf/Documentation/tips.txt
index db0ca3063eae..849599f39c5e 100644
--- a/tools/perf/Documentation/tips.txt
+++ b/tools/perf/Documentation/tips.txt
@@ -32,3 +32,5 @@ Order by the overhead of source file name and line number: perf report -s srclin
System-wide collection from all CPUs: perf record -a
Show current config key-value pairs: perf config --list
Show user configuration overrides: perf config --user --list
+To add Node.js USDT(User-Level Statically Defined Tracing): perf buildid-cache --add `which node`
+To report cacheline events from previous recording: perf c2c report
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 0294bfb6c5f8..0dfdaa9fa81e 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -22,6 +22,7 @@ include $(srctree)/tools/scripts/Makefile.arch
$(call detected_var,SRCARCH)
NO_PERF_REGS := 1
+NO_SYSCALL_TABLE := 1
# Additional ARCH settings for ppc
ifeq ($(SRCARCH),powerpc)
@@ -33,7 +34,8 @@ endif
ifeq ($(SRCARCH),x86)
$(call detected,CONFIG_X86)
ifeq (${IS_64_BIT}, 1)
- CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT -DHAVE_SYSCALL_TABLE -I$(OUTPUT)arch/x86/include/generated
+ NO_SYSCALL_TABLE := 0
+ CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT -I$(OUTPUT)arch/x86/include/generated
ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memset_64.S
LIBUNWIND_LIBS = -lunwind-x86_64 -lunwind -llzma
$(call detected,CONFIG_X86_64)
@@ -55,12 +57,18 @@ endif
ifeq ($(ARCH),s390)
NO_PERF_REGS := 0
+ NO_SYSCALL_TABLE := 0
+ CFLAGS += -fPIC -I$(OUTPUT)arch/s390/include/generated
endif
ifeq ($(NO_PERF_REGS),0)
$(call detected,CONFIG_PERF_REGS)
endif
+ifneq ($(NO_SYSCALL_TABLE),1)
+ CFLAGS += -DHAVE_SYSCALL_TABLE
+endif
+
# So far there's only x86 and arm libdw unwind support merged in perf.
# Disable it on all other architectures in case libdw unwind
# support is detected in system. Add supported architectures
@@ -97,6 +105,16 @@ FEATURE_CHECK_LDFLAGS-libunwind = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
FEATURE_CHECK_CFLAGS-libunwind-debug-frame = $(LIBUNWIND_CFLAGS)
FEATURE_CHECK_LDFLAGS-libunwind-debug-frame = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
+ifdef CSINCLUDES
+ LIBOPENCSD_CFLAGS := -I$(CSINCLUDES)
+endif
+OPENCSDLIBS := -lopencsd_c_api -lopencsd
+ifdef CSLIBS
+ LIBOPENCSD_LDFLAGS := -L$(CSLIBS)
+endif
+FEATURE_CHECK_CFLAGS-libopencsd := $(LIBOPENCSD_CFLAGS)
+FEATURE_CHECK_LDFLAGS-libopencsd := $(LIBOPENCSD_LDFLAGS) $(OPENCSDLIBS)
+
ifeq ($(NO_PERF_REGS),0)
CFLAGS += -DHAVE_PERF_REGS_SUPPORT
endif
@@ -265,6 +283,10 @@ ifeq ($(feature-pthread-attr-setaffinity-np), 1)
CFLAGS += -DHAVE_PTHREAD_ATTR_SETAFFINITY_NP
endif
+ifeq ($(feature-pthread-barrier), 1)
+ CFLAGS += -DHAVE_PTHREAD_BARRIER
+endif
+
ifndef NO_BIONIC
$(call feature_check,bionic)
ifeq ($(feature-bionic), 1)
@@ -341,6 +363,21 @@ ifeq ($(feature-setns), 1)
$(call detected,CONFIG_SETNS)
endif
+ifndef NO_CORESIGHT
+ ifeq ($(feature-libopencsd), 1)
+ CFLAGS += -DHAVE_CSTRACE_SUPPORT $(LIBOPENCSD_CFLAGS)
+ LDFLAGS += $(LIBOPENCSD_LDFLAGS)
+ EXTLIBS += $(OPENCSDLIBS)
+ $(call detected,CONFIG_LIBOPENCSD)
+ ifdef CSTRACE_RAW
+ CFLAGS += -DCS_DEBUG_RAW
+ ifeq (${CSTRACE_RAW}, packed)
+ CFLAGS += -DCS_RAW_PACKED
+ endif
+ endif
+ endif
+endif
+
ifndef NO_LIBELF
CFLAGS += -DHAVE_LIBELF_SUPPORT
EXTLIBS += -lelf
@@ -519,14 +556,18 @@ ifndef NO_LIBUNWIND
EXTLIBS += $(EXTLIBS_LIBUNWIND)
endif
-ifndef NO_LIBAUDIT
- ifneq ($(feature-libaudit), 1)
- msg := $(warning No libaudit.h found, disables 'trace' tool, please install audit-libs-devel or libaudit-dev);
- NO_LIBAUDIT := 1
- else
- CFLAGS += -DHAVE_LIBAUDIT_SUPPORT
- EXTLIBS += -laudit
- $(call detected,CONFIG_AUDIT)
+ifeq ($(NO_SYSCALL_TABLE),0)
+ $(call detected,CONFIG_TRACE)
+else
+ ifndef NO_LIBAUDIT
+ ifneq ($(feature-libaudit), 1)
+ msg := $(warning No libaudit.h found, disables 'trace' tool, please install audit-libs-devel or libaudit-dev);
+ NO_LIBAUDIT := 1
+ else
+ CFLAGS += -DHAVE_LIBAUDIT_SUPPORT
+ EXTLIBS += -laudit
+ $(call detected,CONFIG_TRACE)
+ endif
endif
endif
@@ -768,7 +809,7 @@ else
NO_PERF_READ_VDSOX32 := 1
endif
-ifdef LIBBABELTRACE
+ifndef NO_LIBBABELTRACE
$(call feature_check,libbabeltrace)
ifeq ($(feature-libbabeltrace), 1)
CFLAGS += -DHAVE_LIBBABELTRACE_SUPPORT $(LIBBABELTRACE_CFLAGS)
@@ -935,6 +976,10 @@ define print_var_code
endef
ifeq ($(VF),1)
+ # Display EXTRA features which are detected manualy
+ # from here with feature_check call and thus cannot
+ # be partof global state output.
+ $(foreach feat,$(FEATURE_TESTS_EXTRA),$(call feature_print_status,$(feat),))
$(call print_var,prefix)
$(call print_var,bindir)
$(call print_var,libdir)
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 68cf1360a3f3..9b0351d3ce34 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -77,7 +77,7 @@ include ../scripts/utilities.mak
#
# Define NO_ZLIB if you do not want to support compressed kernel modules
#
-# Define LIBBABELTRACE if you DO want libbabeltrace support
+# Define NO_LIBBABELTRACE if you do not want libbabeltrace support
# for CTF data format.
#
# Define NO_LZMA if you do not want to support compressed (xz) kernel modules
@@ -98,6 +98,8 @@ include ../scripts/utilities.mak
# When selected, pass LLVM_CONFIG=/path/to/llvm-config to `make' if
# llvm-config is not in $PATH.
+# Define NO_CORESIGHT if you do not want support for CoreSight trace decoding.
+
# As per kernel Makefile, avoid funny character set dependencies
unexport LC_ALL
LC_COLLATE=C
@@ -462,6 +464,13 @@ prctl_option_tbl := $(srctree)/tools/perf/trace/beauty/prctl_option.sh
$(prctl_option_array): $(prctl_hdr_dir)/prctl.h $(prctl_option_tbl)
$(Q)$(SHELL) '$(prctl_option_tbl)' $(prctl_hdr_dir) > $@
+arch_errno_name_array := $(beauty_outdir)/arch_errno_name_array.c
+arch_errno_hdr_dir := $(srctree)/tools
+arch_errno_tbl := $(srctree)/tools/perf/trace/beauty/arch_errno_names.sh
+
+$(arch_errno_name_array): $(arch_errno_tbl)
+ $(Q)$(SHELL) '$(arch_errno_tbl)' $(CC) $(arch_errno_hdr_dir) > $@
+
all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS)
$(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(LIBTRACEEVENT_DYNAMIC_LIST)
@@ -565,7 +574,8 @@ prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders $(drm_ioc
$(vhost_virtio_ioctl_array) \
$(madvise_behavior_array) \
$(perf_ioctl_array) \
- $(prctl_option_array)
+ $(prctl_option_array) \
+ $(arch_errno_name_array)
$(OUTPUT)%.o: %.c prepare FORCE
$(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
@@ -847,7 +857,8 @@ clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clea
$(OUTPUT)$(kcmp_type_array) \
$(OUTPUT)$(vhost_virtio_ioctl_array) \
$(OUTPUT)$(perf_ioctl_array) \
- $(OUTPUT)$(prctl_option_array)
+ $(OUTPUT)$(prctl_option_array) \
+ $(OUTPUT)$(arch_errno_name_array)
$(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean
#
diff --git a/tools/perf/arch/arm/util/auxtrace.c b/tools/perf/arch/arm/util/auxtrace.c
index 8edf2cb71564..2323581b157d 100644
--- a/tools/perf/arch/arm/util/auxtrace.c
+++ b/tools/perf/arch/arm/util/auxtrace.c
@@ -22,6 +22,42 @@
#include "../../util/evlist.h"
#include "../../util/pmu.h"
#include "cs-etm.h"
+#include "arm-spe.h"
+
+static struct perf_pmu **find_all_arm_spe_pmus(int *nr_spes, int *err)
+{
+ struct perf_pmu **arm_spe_pmus = NULL;
+ int ret, i, nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
+ /* arm_spe_xxxxxxxxx\0 */
+ char arm_spe_pmu_name[sizeof(ARM_SPE_PMU_NAME) + 10];
+
+ arm_spe_pmus = zalloc(sizeof(struct perf_pmu *) * nr_cpus);
+ if (!arm_spe_pmus) {
+ pr_err("spes alloc failed\n");
+ *err = -ENOMEM;
+ return NULL;
+ }
+
+ for (i = 0; i < nr_cpus; i++) {
+ ret = sprintf(arm_spe_pmu_name, "%s%d", ARM_SPE_PMU_NAME, i);
+ if (ret < 0) {
+ pr_err("sprintf failed\n");
+ *err = -ENOMEM;
+ return NULL;
+ }
+
+ arm_spe_pmus[*nr_spes] = perf_pmu__find(arm_spe_pmu_name);
+ if (arm_spe_pmus[*nr_spes]) {
+ pr_debug2("%s %d: arm_spe_pmu %d type %d name %s\n",
+ __func__, __LINE__, *nr_spes,
+ arm_spe_pmus[*nr_spes]->type,
+ arm_spe_pmus[*nr_spes]->name);
+ (*nr_spes)++;
+ }
+ }
+
+ return arm_spe_pmus;
+}
struct auxtrace_record
*auxtrace_record__init(struct perf_evlist *evlist, int *err)
@@ -29,22 +65,51 @@ struct auxtrace_record
struct perf_pmu *cs_etm_pmu;
struct perf_evsel *evsel;
bool found_etm = false;
+ bool found_spe = false;
+ static struct perf_pmu **arm_spe_pmus = NULL;
+ static int nr_spes = 0;
+ int i;
+
+ if (!evlist)
+ return NULL;
cs_etm_pmu = perf_pmu__find(CORESIGHT_ETM_PMU_NAME);
- if (evlist) {
- evlist__for_each_entry(evlist, evsel) {
- if (cs_etm_pmu &&
- evsel->attr.type == cs_etm_pmu->type)
- found_etm = true;
+ if (!arm_spe_pmus)
+ arm_spe_pmus = find_all_arm_spe_pmus(&nr_spes, err);
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (cs_etm_pmu &&
+ evsel->attr.type == cs_etm_pmu->type)
+ found_etm = true;
+
+ if (!nr_spes)
+ continue;
+
+ for (i = 0; i < nr_spes; i++) {
+ if (evsel->attr.type == arm_spe_pmus[i]->type) {
+ found_spe = true;
+ break;
+ }
}
}
+ if (found_etm && found_spe) {
+ pr_err("Concurrent ARM Coresight ETM and SPE operation not currently supported\n");
+ *err = -EOPNOTSUPP;
+ return NULL;
+ }
+
if (found_etm)
return cs_etm_record_init(err);
+#if defined(__aarch64__)
+ if (found_spe)
+ return arm_spe_recording_init(err, arm_spe_pmus[i]);
+#endif
+
/*
- * Clear 'err' even if we haven't found a cs_etm event - that way perf
+ * Clear 'err' even if we haven't found an event - that way perf
* record can still be used even if tracers aren't present. The NULL
* return value will take care of telling the infrastructure HW tracing
* isn't available.
diff --git a/tools/perf/arch/arm/util/pmu.c b/tools/perf/arch/arm/util/pmu.c
index 98d67399a0d6..ac4dffc807b8 100644
--- a/tools/perf/arch/arm/util/pmu.c
+++ b/tools/perf/arch/arm/util/pmu.c
@@ -20,6 +20,7 @@
#include <linux/perf_event.h>
#include "cs-etm.h"
+#include "arm-spe.h"
#include "../../util/pmu.h"
struct perf_event_attr
@@ -30,7 +31,12 @@ struct perf_event_attr
/* add ETM default config here */
pmu->selectable = true;
pmu->set_drv_config = cs_etm_set_drv_config;
+#if defined(__aarch64__)
+ } else if (strstarts(pmu->name, ARM_SPE_PMU_NAME)) {
+ return arm_spe_pmu_default_config(pmu);
+#endif
}
+
#endif
return NULL;
}
diff --git a/tools/perf/arch/arm64/util/Build b/tools/perf/arch/arm64/util/Build
index cef6fb38d17e..c0b8dfef98ba 100644
--- a/tools/perf/arch/arm64/util/Build
+++ b/tools/perf/arch/arm64/util/Build
@@ -1,6 +1,9 @@
+libperf-y += header.o
+libperf-y += sym-handling.o
libperf-$(CONFIG_DWARF) += dwarf-regs.o
libperf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
libperf-$(CONFIG_AUXTRACE) += ../../arm/util/pmu.o \
../../arm/util/auxtrace.o \
- ../../arm/util/cs-etm.o
+ ../../arm/util/cs-etm.o \
+ arm-spe.o
diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c
new file mode 100644
index 000000000000..1120e39c1b00
--- /dev/null
+++ b/tools/perf/arch/arm64/util/arm-spe.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Arm Statistical Profiling Extensions (SPE) support
+ * Copyright (c) 2017-2018, Arm Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/log2.h>
+#include <time.h>
+
+#include "../../util/cpumap.h"
+#include "../../util/evsel.h"
+#include "../../util/evlist.h"
+#include "../../util/session.h"
+#include "../../util/util.h"
+#include "../../util/pmu.h"
+#include "../../util/debug.h"
+#include "../../util/auxtrace.h"
+#include "../../util/arm-spe.h"
+
+#define KiB(x) ((x) * 1024)
+#define MiB(x) ((x) * 1024 * 1024)
+
+struct arm_spe_recording {
+ struct auxtrace_record itr;
+ struct perf_pmu *arm_spe_pmu;
+ struct perf_evlist *evlist;
+};
+
+static size_t
+arm_spe_info_priv_size(struct auxtrace_record *itr __maybe_unused,
+ struct perf_evlist *evlist __maybe_unused)
+{
+ return ARM_SPE_AUXTRACE_PRIV_SIZE;
+}
+
+static int arm_spe_info_fill(struct auxtrace_record *itr,
+ struct perf_session *session,
+ struct auxtrace_info_event *auxtrace_info,
+ size_t priv_size)
+{
+ struct arm_spe_recording *sper =
+ container_of(itr, struct arm_spe_recording, itr);
+ struct perf_pmu *arm_spe_pmu = sper->arm_spe_pmu;
+
+ if (priv_size != ARM_SPE_AUXTRACE_PRIV_SIZE)
+ return -EINVAL;
+
+ if (!session->evlist->nr_mmaps)
+ return -EINVAL;
+
+ auxtrace_info->type = PERF_AUXTRACE_ARM_SPE;
+ auxtrace_info->priv[ARM_SPE_PMU_TYPE] = arm_spe_pmu->type;
+
+ return 0;
+}
+
+static int arm_spe_recording_options(struct auxtrace_record *itr,
+ struct perf_evlist *evlist,
+ struct record_opts *opts)
+{
+ struct arm_spe_recording *sper =
+ container_of(itr, struct arm_spe_recording, itr);
+ struct perf_pmu *arm_spe_pmu = sper->arm_spe_pmu;
+ struct perf_evsel *evsel, *arm_spe_evsel = NULL;
+ bool privileged = geteuid() == 0 || perf_event_paranoid() < 0;
+ struct perf_evsel *tracking_evsel;
+ int err;
+
+ sper->evlist = evlist;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel->attr.type == arm_spe_pmu->type) {
+ if (arm_spe_evsel) {
+ pr_err("There may be only one " ARM_SPE_PMU_NAME "x event\n");
+ return -EINVAL;
+ }
+ evsel->attr.freq = 0;
+ evsel->attr.sample_period = 1;
+ arm_spe_evsel = evsel;
+ opts->full_auxtrace = true;
+ }
+ }
+
+ if (!opts->full_auxtrace)
+ return 0;
+
+ /* We are in full trace mode but '-m,xyz' wasn't specified */
+ if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
+ if (privileged) {
+ opts->auxtrace_mmap_pages = MiB(4) / page_size;
+ } else {
+ opts->auxtrace_mmap_pages = KiB(128) / page_size;
+ if (opts->mmap_pages == UINT_MAX)
+ opts->mmap_pages = KiB(256) / page_size;
+ }
+ }
+
+ /* Validate auxtrace_mmap_pages */
+ if (opts->auxtrace_mmap_pages) {
+ size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
+ size_t min_sz = KiB(8);
+
+ if (sz < min_sz || !is_power_of_2(sz)) {
+ pr_err("Invalid mmap size for ARM SPE: must be at least %zuKiB and a power of 2\n",
+ min_sz / 1024);
+ return -EINVAL;
+ }
+ }
+
+
+ /*
+ * To obtain the auxtrace buffer file descriptor, the auxtrace event
+ * must come first.
+ */
+ perf_evlist__to_front(evlist, arm_spe_evsel);
+
+ perf_evsel__set_sample_bit(arm_spe_evsel, CPU);
+ perf_evsel__set_sample_bit(arm_spe_evsel, TIME);
+ perf_evsel__set_sample_bit(arm_spe_evsel, TID);
+
+ /* Add dummy event to keep tracking */
+ err = parse_events(evlist, "dummy:u", NULL);
+ if (err)
+ return err;
+
+ tracking_evsel = perf_evlist__last(evlist);
+ perf_evlist__set_tracking_event(evlist, tracking_evsel);
+
+ tracking_evsel->attr.freq = 0;
+ tracking_evsel->attr.sample_period = 1;
+ perf_evsel__set_sample_bit(tracking_evsel, TIME);
+ perf_evsel__set_sample_bit(tracking_evsel, CPU);
+ perf_evsel__reset_sample_bit(tracking_evsel, BRANCH_STACK);
+
+ return 0;
+}
+
+static u64 arm_spe_reference(struct auxtrace_record *itr __maybe_unused)
+{
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC_RAW, &ts);
+
+ return ts.tv_sec ^ ts.tv_nsec;
+}
+
+static void arm_spe_recording_free(struct auxtrace_record *itr)
+{
+ struct arm_spe_recording *sper =
+ container_of(itr, struct arm_spe_recording, itr);
+
+ free(sper);
+}
+
+static int arm_spe_read_finish(struct auxtrace_record *itr, int idx)
+{
+ struct arm_spe_recording *sper =
+ container_of(itr, struct arm_spe_recording, itr);
+ struct perf_evsel *evsel;
+
+ evlist__for_each_entry(sper->evlist, evsel) {
+ if (evsel->attr.type == sper->arm_spe_pmu->type)
+ return perf_evlist__enable_event_idx(sper->evlist,
+ evsel, idx);
+ }
+ return -EINVAL;
+}
+
+struct auxtrace_record *arm_spe_recording_init(int *err,
+ struct perf_pmu *arm_spe_pmu)
+{
+ struct arm_spe_recording *sper;
+
+ if (!arm_spe_pmu) {
+ *err = -ENODEV;
+ return NULL;
+ }
+
+ sper = zalloc(sizeof(struct arm_spe_recording));
+ if (!sper) {
+ *err = -ENOMEM;
+ return NULL;
+ }
+
+ sper->arm_spe_pmu = arm_spe_pmu;
+ sper->itr.recording_options = arm_spe_recording_options;
+ sper->itr.info_priv_size = arm_spe_info_priv_size;
+ sper->itr.info_fill = arm_spe_info_fill;
+ sper->itr.free = arm_spe_recording_free;
+ sper->itr.reference = arm_spe_reference;
+ sper->itr.read_finish = arm_spe_read_finish;
+ sper->itr.alignment = 0;
+
+ return &sper->itr;
+}
+
+struct perf_event_attr
+*arm_spe_pmu_default_config(struct perf_pmu *arm_spe_pmu)
+{
+ struct perf_event_attr *attr;
+
+ attr = zalloc(sizeof(struct perf_event_attr));
+ if (!attr) {
+ pr_err("arm_spe default config cannot allocate a perf_event_attr\n");
+ return NULL;
+ }
+
+ /*
+ * If kernel driver doesn't advertise a minimum,
+ * use max allowable by PMSIDR_EL1.INTERVAL
+ */
+ if (perf_pmu__scan_file(arm_spe_pmu, "caps/min_interval", "%llu",
+ &attr->sample_period) != 1) {
+ pr_debug("arm_spe driver doesn't advertise a min. interval. Using 4096\n");
+ attr->sample_period = 4096;
+ }
+
+ arm_spe_pmu->selectable = true;
+ arm_spe_pmu->is_uncore = false;
+
+ return attr;
+}
diff --git a/tools/perf/arch/arm64/util/header.c b/tools/perf/arch/arm64/util/header.c
new file mode 100644
index 000000000000..534cd2507d83
--- /dev/null
+++ b/tools/perf/arch/arm64/util/header.c
@@ -0,0 +1,65 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <api/fs/fs.h>
+#include "header.h"
+
+#define MIDR "/regs/identification/midr_el1"
+#define MIDR_SIZE 19
+#define MIDR_REVISION_MASK 0xf
+#define MIDR_VARIANT_SHIFT 20
+#define MIDR_VARIANT_MASK (0xf << MIDR_VARIANT_SHIFT)
+
+char *get_cpuid_str(struct perf_pmu *pmu)
+{
+ char *buf = NULL;
+ char path[PATH_MAX];
+ const char *sysfs = sysfs__mountpoint();
+ int cpu;
+ u64 midr = 0;
+ struct cpu_map *cpus;
+ FILE *file;
+
+ if (!sysfs || !pmu || !pmu->cpus)
+ return NULL;
+
+ buf = malloc(MIDR_SIZE);
+ if (!buf)
+ return NULL;
+
+ /* read midr from list of cpus mapped to this pmu */
+ cpus = cpu_map__get(pmu->cpus);
+ for (cpu = 0; cpu < cpus->nr; cpu++) {
+ scnprintf(path, PATH_MAX, "%s/devices/system/cpu/cpu%d"MIDR,
+ sysfs, cpus->map[cpu]);
+
+ file = fopen(path, "r");
+ if (!file) {
+ pr_debug("fopen failed for file %s\n", path);
+ continue;
+ }
+
+ if (!fgets(buf, MIDR_SIZE, file)) {
+ fclose(file);
+ continue;
+ }
+ fclose(file);
+
+ /* Ignore/clear Variant[23:20] and
+ * Revision[3:0] of MIDR
+ */
+ midr = strtoul(buf, NULL, 16);
+ midr &= (~(MIDR_VARIANT_MASK | MIDR_REVISION_MASK));
+ scnprintf(buf, MIDR_SIZE, "0x%016lx", midr);
+ /* got midr break loop */
+ break;
+ }
+
+ if (!midr) {
+ pr_err("failed to get cpuid string for PMU %s\n", pmu->name);
+ free(buf);
+ buf = NULL;
+ }
+
+ cpu_map__put(cpus);
+ return buf;
+}
diff --git a/tools/perf/arch/arm64/util/sym-handling.c b/tools/perf/arch/arm64/util/sym-handling.c
new file mode 100644
index 000000000000..0051b1ee8450
--- /dev/null
+++ b/tools/perf/arch/arm64/util/sym-handling.c
@@ -0,0 +1,22 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2015 Naveen N. Rao, IBM Corporation
+ */
+
+#include "debug.h"
+#include "symbol.h"
+#include "map.h"
+#include "probe-event.h"
+#include "probe-file.h"
+
+#ifdef HAVE_LIBELF_SUPPORT
+bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
+{
+ return ehdr.e_type == ET_EXEC ||
+ ehdr.e_type == ET_REL ||
+ ehdr.e_type == ET_DYN;
+}
+#endif
diff --git a/tools/perf/arch/common.c b/tools/perf/arch/common.c
index 8c0cfeb55f8e..c6f373508a4f 100644
--- a/tools/perf/arch/common.c
+++ b/tools/perf/arch/common.c
@@ -1,12 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
-#include <sys/utsname.h>
#include "common.h"
+#include "../util/env.h"
#include "../util/util.h"
#include "../util/debug.h"
-#include "sane_ctype.h"
-
const char *const arm_triplets[] = {
"arm-eabi-",
"arm-linux-androideabi-",
@@ -120,55 +118,19 @@ static int lookup_triplets(const char *const *triplets, const char *name)
return -1;
}
-/*
- * Return architecture name in a normalized form.
- * The conversion logic comes from the Makefile.
- */
-const char *normalize_arch(char *arch)
-{
- if (!strcmp(arch, "x86_64"))
- return "x86";
- if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6')
- return "x86";
- if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5))
- return "sparc";
- if (!strcmp(arch, "aarch64") || !strcmp(arch, "arm64"))
- return "arm64";
- if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110"))
- return "arm";
- if (!strncmp(arch, "s390", 4))
- return "s390";
- if (!strncmp(arch, "parisc", 6))
- return "parisc";
- if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3))
- return "powerpc";
- if (!strncmp(arch, "mips", 4))
- return "mips";
- if (!strncmp(arch, "sh", 2) && isdigit(arch[2]))
- return "sh";
-
- return arch;
-}
-
static int perf_env__lookup_binutils_path(struct perf_env *env,
const char *name, const char **path)
{
int idx;
- const char *arch, *cross_env;
- struct utsname uts;
+ const char *arch = perf_env__arch(env), *cross_env;
const char *const *path_list;
char *buf = NULL;
- arch = normalize_arch(env->arch);
-
- if (uname(&uts) < 0)
- goto out;
-
/*
* We don't need to try to find objdump path for native system.
* Just use default binutils path (e.g.: "objdump").
*/
- if (!strcmp(normalize_arch(uts.machine), arch))
+ if (!strcmp(perf_env__arch(NULL), arch))
goto out;
cross_env = getenv("CROSS_COMPILE");
diff --git a/tools/perf/arch/common.h b/tools/perf/arch/common.h
index a1546509ad24..2d875baa92e6 100644
--- a/tools/perf/arch/common.h
+++ b/tools/perf/arch/common.h
@@ -7,6 +7,5 @@
extern const char *objdump_path;
int perf_env__lookup_objdump(struct perf_env *env);
-const char *normalize_arch(char *arch);
#endif /* ARCH_PERF_COMMON_H */
diff --git a/tools/perf/arch/powerpc/util/header.c b/tools/perf/arch/powerpc/util/header.c
index 7a4cf80c207a..0b242664f5ea 100644
--- a/tools/perf/arch/powerpc/util/header.c
+++ b/tools/perf/arch/powerpc/util/header.c
@@ -35,7 +35,7 @@ get_cpuid(char *buffer, size_t sz)
}
char *
-get_cpuid_str(void)
+get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
{
char *bufp;
diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c
index 9c4e23d8c8ce..53d83d7e6a09 100644
--- a/tools/perf/arch/powerpc/util/sym-handling.c
+++ b/tools/perf/arch/powerpc/util/sym-handling.c
@@ -64,6 +64,14 @@ int arch__compare_symbol_names_n(const char *namea, const char *nameb,
return strncmp(namea, nameb, n);
}
+
+const char *arch__normalize_symbol_name(const char *name)
+{
+ /* Skip over initial dot */
+ if (name && *name == '.')
+ name++;
+ return name;
+}
#endif
#if defined(_CALL_ELF) && _CALL_ELF == 2
diff --git a/tools/perf/arch/s390/Makefile b/tools/perf/arch/s390/Makefile
index 09ba923debe8..48228de415d0 100644
--- a/tools/perf/arch/s390/Makefile
+++ b/tools/perf/arch/s390/Makefile
@@ -3,3 +3,24 @@ PERF_HAVE_DWARF_REGS := 1
endif
HAVE_KVM_STAT_SUPPORT := 1
PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
+
+#
+# Syscall table generation for perf
+#
+
+out := $(OUTPUT)arch/s390/include/generated/asm
+header := $(out)/syscalls_64.c
+sysdef := $(srctree)/tools/arch/s390/include/uapi/asm/unistd.h
+sysprf := $(srctree)/tools/perf/arch/s390/entry/syscalls/
+systbl := $(sysprf)/mksyscalltbl
+
+# Create output directory if not already present
+_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
+
+$(header): $(sysdef) $(systbl)
+ $(Q)$(SHELL) '$(systbl)' '$(CC)' $(sysdef) > $@
+
+clean::
+ $(call QUIET_CLEAN, s390) $(RM) $(header)
+
+archheaders: $(header)
diff --git a/tools/perf/arch/s390/annotate/instructions.c b/tools/perf/arch/s390/annotate/instructions.c
index e0e466c650df..8c72b44444cb 100644
--- a/tools/perf/arch/s390/annotate/instructions.c
+++ b/tools/perf/arch/s390/annotate/instructions.c
@@ -18,7 +18,8 @@ static struct ins_ops *s390__associate_ins_ops(struct arch *arch, const char *na
if (!strcmp(name, "br"))
ops = &ret_ops;
- arch__associate_ins_ops(arch, name, ops);
+ if (ops)
+ arch__associate_ins_ops(arch, name, ops);
return ops;
}
diff --git a/tools/perf/arch/s390/entry/syscalls/mksyscalltbl b/tools/perf/arch/s390/entry/syscalls/mksyscalltbl
new file mode 100755
index 000000000000..7fa0d0abd419
--- /dev/null
+++ b/tools/perf/arch/s390/entry/syscalls/mksyscalltbl
@@ -0,0 +1,36 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Generate system call table for perf
+#
+#
+# Copyright IBM Corp. 2017
+# Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+#
+
+gcc=$1
+input=$2
+
+if ! test -r $input; then
+ echo "Could not read input file" >&2
+ exit 1
+fi
+
+create_table()
+{
+ local max_nr
+
+ echo 'static const char *syscalltbl_s390_64[] = {'
+ while read sc nr; do
+ printf '\t[%d] = "%s",\n' $nr $sc
+ max_nr=$nr
+ done
+ echo '};'
+ echo "#define SYSCALLTBL_S390_64_MAX_ID $max_nr"
+}
+
+
+$gcc -m64 -E -dM -x c $input \
+ |sed -ne 's/^#define __NR_//p' \
+ |sort -t' ' -k2 -nu \
+ |create_table
diff --git a/tools/perf/arch/x86/tests/perf-time-to-tsc.c b/tools/perf/arch/x86/tests/perf-time-to-tsc.c
index b59678e8c1e2..06abe8108b33 100644
--- a/tools/perf/arch/x86/tests/perf-time-to-tsc.c
+++ b/tools/perf/arch/x86/tests/perf-time-to-tsc.c
@@ -84,7 +84,7 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
CHECK__(perf_evlist__open(evlist));
- CHECK__(perf_evlist__mmap(evlist, UINT_MAX, false));
+ CHECK__(perf_evlist__mmap(evlist, UINT_MAX));
pc = evlist->mmap[0].base;
ret = perf_read_tsc_conversion(pc, &tc);
diff --git a/tools/perf/arch/x86/util/header.c b/tools/perf/arch/x86/util/header.c
index 33027c5e6f92..fb0d71afee8b 100644
--- a/tools/perf/arch/x86/util/header.c
+++ b/tools/perf/arch/x86/util/header.c
@@ -66,11 +66,11 @@ get_cpuid(char *buffer, size_t sz)
}
char *
-get_cpuid_str(void)
+get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
{
char *buf = malloc(128);
- if (__get_cpuid(buf, 128, "%s-%u-%X$") < 0) {
+ if (buf && __get_cpuid(buf, 128, "%s-%u-%X$") < 0) {
free(buf);
return NULL;
}
diff --git a/tools/perf/arch/x86/util/unwind-libunwind.c b/tools/perf/arch/x86/util/unwind-libunwind.c
index 9c917f80c906..05920e3edf7a 100644
--- a/tools/perf/arch/x86/util/unwind-libunwind.c
+++ b/tools/perf/arch/x86/util/unwind-libunwind.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
-#ifndef REMOTE_UNWIND_LIBUNWIND
#include <errno.h>
+#ifndef REMOTE_UNWIND_LIBUNWIND
#include <libunwind.h>
#include "perf_regs.h"
#include "../../util/unwind.h"
diff --git a/tools/perf/bench/futex-hash.c b/tools/perf/bench/futex-hash.c
index 58ae6ed8f38b..9aa3a674829b 100644
--- a/tools/perf/bench/futex-hash.c
+++ b/tools/perf/bench/futex-hash.c
@@ -24,9 +24,9 @@
#include <subcmd/parse-options.h>
#include "bench.h"
#include "futex.h"
+#include "cpumap.h"
#include <err.h>
-#include <sys/time.h>
static unsigned int nthreads = 0;
static unsigned int nsecs = 10;
@@ -118,11 +118,12 @@ static void print_summary(void)
int bench_futex_hash(int argc, const char **argv)
{
int ret = 0;
- cpu_set_t cpu;
+ cpu_set_t cpuset;
struct sigaction act;
- unsigned int i, ncpus;
+ unsigned int i;
pthread_attr_t thread_attr;
struct worker *worker = NULL;
+ struct cpu_map *cpu;
argc = parse_options(argc, argv, options, bench_futex_hash_usage, 0);
if (argc) {
@@ -130,14 +131,16 @@ int bench_futex_hash(int argc, const char **argv)
exit(EXIT_FAILURE);
}
- ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+ cpu = cpu_map__new(NULL);
+ if (!cpu)
+ goto errmem;
sigfillset(&act.sa_mask);
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
if (!nthreads) /* default to the number of CPUs */
- nthreads = ncpus;
+ nthreads = cpu->nr;
worker = calloc(nthreads, sizeof(*worker));
if (!worker)
@@ -163,10 +166,10 @@ int bench_futex_hash(int argc, const char **argv)
if (!worker[i].futex)
goto errmem;
- CPU_ZERO(&cpu);
- CPU_SET(i % ncpus, &cpu);
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu->map[i % cpu->nr], &cpuset);
- ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu);
+ ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset);
if (ret)
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
@@ -217,6 +220,7 @@ int bench_futex_hash(int argc, const char **argv)
print_summary();
free(worker);
+ free(cpu);
return ret;
errmem:
err(EXIT_FAILURE, "calloc");
diff --git a/tools/perf/bench/futex-lock-pi.c b/tools/perf/bench/futex-lock-pi.c
index 08653ae8a8c4..8e9c4753e304 100644
--- a/tools/perf/bench/futex-lock-pi.c
+++ b/tools/perf/bench/futex-lock-pi.c
@@ -15,6 +15,7 @@
#include <errno.h>
#include "bench.h"
#include "futex.h"
+#include "cpumap.h"
#include <err.h>
#include <stdlib.h>
@@ -32,7 +33,7 @@ static struct worker *worker;
static unsigned int nsecs = 10;
static bool silent = false, multi = false;
static bool done = false, fshared = false;
-static unsigned int ncpus, nthreads = 0;
+static unsigned int nthreads = 0;
static int futex_flag = 0;
struct timeval start, end, runtime;
static pthread_mutex_t thread_lock;
@@ -113,9 +114,10 @@ static void *workerfn(void *arg)
return NULL;
}
-static void create_threads(struct worker *w, pthread_attr_t thread_attr)
+static void create_threads(struct worker *w, pthread_attr_t thread_attr,
+ struct cpu_map *cpu)
{
- cpu_set_t cpu;
+ cpu_set_t cpuset;
unsigned int i;
threads_starting = nthreads;
@@ -130,10 +132,10 @@ static void create_threads(struct worker *w, pthread_attr_t thread_attr)
} else
worker[i].futex = &global_futex;
- CPU_ZERO(&cpu);
- CPU_SET(i % ncpus, &cpu);
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu->map[i % cpu->nr], &cpuset);
- if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu))
+ if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
if (pthread_create(&w[i].thread, &thread_attr, workerfn, &worker[i]))
@@ -147,19 +149,22 @@ int bench_futex_lock_pi(int argc, const char **argv)
unsigned int i;
struct sigaction act;
pthread_attr_t thread_attr;
+ struct cpu_map *cpu;
argc = parse_options(argc, argv, options, bench_futex_lock_pi_usage, 0);
if (argc)
goto err;
- ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+ cpu = cpu_map__new(NULL);
+ if (!cpu)
+ err(EXIT_FAILURE, "calloc");
sigfillset(&act.sa_mask);
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
if (!nthreads)
- nthreads = ncpus;
+ nthreads = cpu->nr;
worker = calloc(nthreads, sizeof(*worker));
if (!worker)
@@ -180,7 +185,7 @@ int bench_futex_lock_pi(int argc, const char **argv)
pthread_attr_init(&thread_attr);
gettimeofday(&start, NULL);
- create_threads(worker, thread_attr);
+ create_threads(worker, thread_attr, cpu);
pthread_attr_destroy(&thread_attr);
pthread_mutex_lock(&thread_lock);
diff --git a/tools/perf/bench/futex-requeue.c b/tools/perf/bench/futex-requeue.c
index 1058c194608a..fc692efa0c05 100644
--- a/tools/perf/bench/futex-requeue.c
+++ b/tools/perf/bench/futex-requeue.c
@@ -22,6 +22,7 @@
#include <errno.h>
#include "bench.h"
#include "futex.h"
+#include "cpumap.h"
#include <err.h>
#include <stdlib.h>
@@ -40,7 +41,7 @@ static bool done = false, silent = false, fshared = false;
static pthread_mutex_t thread_lock;
static pthread_cond_t thread_parent, thread_worker;
static struct stats requeuetime_stats, requeued_stats;
-static unsigned int ncpus, threads_starting, nthreads = 0;
+static unsigned int threads_starting, nthreads = 0;
static int futex_flag = 0;
static const struct option options[] = {
@@ -83,19 +84,19 @@ static void *workerfn(void *arg __maybe_unused)
}
static void block_threads(pthread_t *w,
- pthread_attr_t thread_attr)
+ pthread_attr_t thread_attr, struct cpu_map *cpu)
{
- cpu_set_t cpu;
+ cpu_set_t cpuset;
unsigned int i;
threads_starting = nthreads;
/* create and block all threads */
for (i = 0; i < nthreads; i++) {
- CPU_ZERO(&cpu);
- CPU_SET(i % ncpus, &cpu);
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu->map[i % cpu->nr], &cpuset);
- if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu))
+ if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
if (pthread_create(&w[i], &thread_attr, workerfn, NULL))
@@ -116,19 +117,22 @@ int bench_futex_requeue(int argc, const char **argv)
unsigned int i, j;
struct sigaction act;
pthread_attr_t thread_attr;
+ struct cpu_map *cpu;
argc = parse_options(argc, argv, options, bench_futex_requeue_usage, 0);
if (argc)
goto err;
- ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+ cpu = cpu_map__new(NULL);
+ if (!cpu)
+ err(EXIT_FAILURE, "cpu_map__new");
sigfillset(&act.sa_mask);
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
if (!nthreads)
- nthreads = ncpus;
+ nthreads = cpu->nr;
worker = calloc(nthreads, sizeof(*worker));
if (!worker)
@@ -156,7 +160,7 @@ int bench_futex_requeue(int argc, const char **argv)
struct timeval start, end, runtime;
/* create, launch & block all threads */
- block_threads(worker, thread_attr);
+ block_threads(worker, thread_attr, cpu);
/* make sure all threads are already blocked */
pthread_mutex_lock(&thread_lock);
diff --git a/tools/perf/bench/futex-wake-parallel.c b/tools/perf/bench/futex-wake-parallel.c
index b4732dad9f89..69d8fdc87315 100644
--- a/tools/perf/bench/futex-wake-parallel.c
+++ b/tools/perf/bench/futex-wake-parallel.c
@@ -7,7 +7,17 @@
* for each individual thread to service its share of work. Ultimately
* it can be used to measure futex_wake() changes.
*/
+#include "bench.h"
+#include <linux/compiler.h>
+#include "../util/debug.h"
+#ifndef HAVE_PTHREAD_BARRIER
+int bench_futex_wake_parallel(int argc __maybe_unused, const char **argv __maybe_unused)
+{
+ pr_err("%s: pthread_barrier_t unavailable, disabling this test...\n", __func__);
+ return 0;
+}
+#else /* HAVE_PTHREAD_BARRIER */
/* For the CLR_() macros */
#include <string.h>
#include <pthread.h>
@@ -15,12 +25,11 @@
#include <signal.h>
#include "../util/stat.h"
#include <subcmd/parse-options.h>
-#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/time64.h>
#include <errno.h>
-#include "bench.h"
#include "futex.h"
+#include "cpumap.h"
#include <err.h>
#include <stdlib.h>
@@ -42,8 +51,9 @@ static bool done = false, silent = false, fshared = false;
static unsigned int nblocked_threads = 0, nwaking_threads = 0;
static pthread_mutex_t thread_lock;
static pthread_cond_t thread_parent, thread_worker;
+static pthread_barrier_t barrier;
static struct stats waketime_stats, wakeup_stats;
-static unsigned int ncpus, threads_starting;
+static unsigned int threads_starting;
static int futex_flag = 0;
static const struct option options[] = {
@@ -64,6 +74,8 @@ static void *waking_workerfn(void *arg)
struct thread_data *waker = (struct thread_data *) arg;
struct timeval start, end;
+ pthread_barrier_wait(&barrier);
+
gettimeofday(&start, NULL);
waker->nwoken = futex_wake(&futex, nwakes, futex_flag);
@@ -84,6 +96,8 @@ static void wakeup_threads(struct thread_data *td, pthread_attr_t thread_attr)
pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
+ pthread_barrier_init(&barrier, NULL, nwaking_threads + 1);
+
/* create and block all threads */
for (i = 0; i < nwaking_threads; i++) {
/*
@@ -96,9 +110,13 @@ static void wakeup_threads(struct thread_data *td, pthread_attr_t thread_attr)
err(EXIT_FAILURE, "pthread_create");
}
+ pthread_barrier_wait(&barrier);
+
for (i = 0; i < nwaking_threads; i++)
if (pthread_join(td[i].worker, NULL))
err(EXIT_FAILURE, "pthread_join");
+
+ pthread_barrier_destroy(&barrier);
}
static void *blocked_workerfn(void *arg __maybe_unused)
@@ -119,19 +137,20 @@ static void *blocked_workerfn(void *arg __maybe_unused)
return NULL;
}
-static void block_threads(pthread_t *w, pthread_attr_t thread_attr)
+static void block_threads(pthread_t *w, pthread_attr_t thread_attr,
+ struct cpu_map *cpu)
{
- cpu_set_t cpu;
+ cpu_set_t cpuset;
unsigned int i;
threads_starting = nblocked_threads;
/* create and block all threads */
for (i = 0; i < nblocked_threads; i++) {
- CPU_ZERO(&cpu);
- CPU_SET(i % ncpus, &cpu);
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu->map[i % cpu->nr], &cpuset);
- if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu))
+ if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
if (pthread_create(&w[i], &thread_attr, blocked_workerfn, NULL))
@@ -205,6 +224,7 @@ int bench_futex_wake_parallel(int argc, const char **argv)
struct sigaction act;
pthread_attr_t thread_attr;
struct thread_data *waking_worker;
+ struct cpu_map *cpu;
argc = parse_options(argc, argv, options,
bench_futex_wake_parallel_usage, 0);
@@ -217,9 +237,12 @@ int bench_futex_wake_parallel(int argc, const char **argv)
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
- ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+ cpu = cpu_map__new(NULL);
+ if (!cpu)
+ err(EXIT_FAILURE, "calloc");
+
if (!nblocked_threads)
- nblocked_threads = ncpus;
+ nblocked_threads = cpu->nr;
/* some sanity checks */
if (nwaking_threads > nblocked_threads || !nwaking_threads)
@@ -259,7 +282,7 @@ int bench_futex_wake_parallel(int argc, const char **argv)
err(EXIT_FAILURE, "calloc");
/* create, launch & block all threads */
- block_threads(blocked_worker, thread_attr);
+ block_threads(blocked_worker, thread_attr, cpu);
/* make sure all threads are already blocked */
pthread_mutex_lock(&thread_lock);
@@ -297,3 +320,4 @@ int bench_futex_wake_parallel(int argc, const char **argv)
free(blocked_worker);
return ret;
}
+#endif /* HAVE_PTHREAD_BARRIER */
diff --git a/tools/perf/bench/futex-wake.c b/tools/perf/bench/futex-wake.c
index 8c5c0b6b5c97..e8181ad7d088 100644
--- a/tools/perf/bench/futex-wake.c
+++ b/tools/perf/bench/futex-wake.c
@@ -22,6 +22,7 @@
#include <errno.h>
#include "bench.h"
#include "futex.h"
+#include "cpumap.h"
#include <err.h>
#include <stdlib.h>
@@ -89,19 +90,19 @@ static void print_summary(void)
}
static void block_threads(pthread_t *w,
- pthread_attr_t thread_attr)
+ pthread_attr_t thread_attr, struct cpu_map *cpu)
{
- cpu_set_t cpu;
+ cpu_set_t cpuset;
unsigned int i;
threads_starting = nthreads;
/* create and block all threads */
for (i = 0; i < nthreads; i++) {
- CPU_ZERO(&cpu);
- CPU_SET(i % ncpus, &cpu);
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu->map[i % cpu->nr], &cpuset);
- if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu))
+ if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
if (pthread_create(&w[i], &thread_attr, workerfn, NULL))
@@ -122,6 +123,7 @@ int bench_futex_wake(int argc, const char **argv)
unsigned int i, j;
struct sigaction act;
pthread_attr_t thread_attr;
+ struct cpu_map *cpu;
argc = parse_options(argc, argv, options, bench_futex_wake_usage, 0);
if (argc) {
@@ -129,7 +131,9 @@ int bench_futex_wake(int argc, const char **argv)
exit(EXIT_FAILURE);
}
- ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+ cpu = cpu_map__new(NULL);
+ if (!cpu)
+ err(EXIT_FAILURE, "calloc");
sigfillset(&act.sa_mask);
act.sa_sigaction = toggle_done;
@@ -161,7 +165,7 @@ int bench_futex_wake(int argc, const char **argv)
struct timeval start, end, runtime;
/* create, launch & block all threads */
- block_threads(worker, thread_attr);
+ block_threads(worker, thread_attr, cpu);
/* make sure all threads are already blocked */
pthread_mutex_lock(&thread_lock);
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index 3d354ba6e9c5..41db2cba77eb 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -325,8 +325,8 @@ int cmd_buildid_cache(int argc, const char **argv)
"file", "kcore file to add"),
OPT_STRING('r', "remove", &remove_name_list_str, "file list",
"file(s) to remove"),
- OPT_STRING('p', "purge", &purge_name_list_str, "path list",
- "path(s) to remove (remove old caches too)"),
+ OPT_STRING('p', "purge", &purge_name_list_str, "file list",
+ "file(s) to remove (remove old caches too)"),
OPT_STRING('M', "missing", &missing_filename, "file",
"to find missing build ids in the cache"),
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index 17855c4626a0..c0815a37fdb5 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -27,13 +27,10 @@
#include "sort.h"
#include "tool.h"
#include "data.h"
-#include "sort.h"
#include "event.h"
#include "evlist.h"
#include "evsel.h"
-#include <asm/bug.h>
#include "ui/browsers/hists.h"
-#include "evlist.h"
#include "thread.h"
struct c2c_hists {
@@ -2224,9 +2221,9 @@ static int perf_c2c__browse_cacheline(struct hist_entry *he)
struct hist_browser *browser;
int key = -1;
const char help[] =
- " ENTER Togle callchains (if present) \n"
- " n Togle Node details info \n"
- " s Togle full lenght of symbol and source line columns \n"
+ " ENTER Toggle callchains (if present) \n"
+ " n Toggle Node details info \n"
+ " s Toggle full length of symbol and source line columns \n"
" q Return back to cacheline list \n";
/* Display compact version first. */
@@ -2303,7 +2300,7 @@ static int perf_c2c__hists_browse(struct hists *hists)
int key = -1;
const char help[] =
" d Display cacheline details \n"
- " ENTER Togle callchains (if present) \n"
+ " ENTER Toggle callchains (if present) \n"
" q Quit \n";
browser = perf_c2c_browser__new(hists);
@@ -2393,9 +2390,10 @@ static int setup_callchain(struct perf_evlist *evlist)
enum perf_call_graph_mode mode = CALLCHAIN_NONE;
if ((sample_type & PERF_SAMPLE_REGS_USER) &&
- (sample_type & PERF_SAMPLE_STACK_USER))
+ (sample_type & PERF_SAMPLE_STACK_USER)) {
mode = CALLCHAIN_DWARF;
- else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
+ dwarf_callchain_users = true;
+ } else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
mode = CALLCHAIN_LBR;
else if (sample_type & PERF_SAMPLE_CALLCHAIN)
mode = CALLCHAIN_FP;
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index a0f7ed2b869b..4aca13f23b9d 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -439,7 +439,7 @@ int cmd_help(int argc, const char **argv)
#ifdef HAVE_LIBELF_SUPPORT
"probe",
#endif
-#ifdef HAVE_LIBAUDIT_SUPPORT
+#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE)
"trace",
#endif
NULL };
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 16a28547ca86..40fe919bbcf3 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -536,8 +536,7 @@ found:
sample_sw.period = sample->period;
sample_sw.time = sample->time;
perf_event__synthesize_sample(event_sw, evsel->attr.sample_type,
- evsel->attr.read_format, &sample_sw,
- false);
+ evsel->attr.read_format, &sample_sw);
build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine);
return perf_event__repipe(tool, event_sw, &sample_sw, machine);
}
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 0c36f2ac6a0e..55d919dc5bc6 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -26,6 +26,9 @@
#include <sys/timerfd.h>
#endif
#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
#include <linux/kernel.h>
#include <linux/time64.h>
@@ -741,20 +744,20 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
u64 *mmap_time)
{
union perf_event *event;
- struct perf_sample sample;
+ u64 timestamp;
s64 n = 0;
int err;
*mmap_time = ULLONG_MAX;
while ((event = perf_evlist__mmap_read(kvm->evlist, idx)) != NULL) {
- err = perf_evlist__parse_sample(kvm->evlist, event, &sample);
+ err = perf_evlist__parse_sample_timestamp(kvm->evlist, event, &timestamp);
if (err) {
perf_evlist__mmap_consume(kvm->evlist, idx);
pr_err("Failed to parse sample\n");
return -1;
}
- err = perf_session__queue_event(kvm->session, event, &sample, 0);
+ err = perf_session__queue_event(kvm->session, event, timestamp, 0);
/*
* FIXME: Here we can't consume the event, as perf_session__queue_event will
* point to it, and it'll get possibly overwritten by the kernel.
@@ -768,7 +771,7 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
/* save time stamp of our first sample for this mmap */
if (n == 0)
- *mmap_time = sample.time;
+ *mmap_time = timestamp;
/* limit events per mmap handled all at once */
n++;
@@ -1044,7 +1047,7 @@ static int kvm_live_open_events(struct perf_kvm_stat *kvm)
goto out;
}
- if (perf_evlist__mmap(evlist, kvm->opts.mmap_pages, false) < 0) {
+ if (perf_evlist__mmap(evlist, kvm->opts.mmap_pages) < 0) {
ui__error("Failed to mmap the events: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
perf_evlist__close(evlist);
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 003255910c05..65681a1a292a 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -51,7 +51,6 @@
#include <signal.h>
#include <sys/mman.h>
#include <sys/wait.h>
-#include <asm/bug.h>
#include <linux/time64.h>
struct switch_output {
@@ -79,6 +78,7 @@ struct record {
bool no_buildid_cache_set;
bool buildid_all;
bool timestamp_filename;
+ bool timestamp_boundary;
struct switch_output switch_output;
unsigned long long samples;
};
@@ -301,7 +301,7 @@ static int record__mmap_evlist(struct record *rec,
struct record_opts *opts = &rec->opts;
char msg[512];
- if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, false,
+ if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
opts->auxtrace_mmap_pages,
opts->auxtrace_snapshot_mode) < 0) {
if (errno == EPERM) {
@@ -372,6 +372,8 @@ try_again:
ui__error("%s\n", msg);
goto out;
}
+
+ pos->supported = true;
}
if (perf_evlist__apply_filters(evlist, &pos)) {
@@ -408,8 +410,15 @@ static int process_sample_event(struct perf_tool *tool,
{
struct record *rec = container_of(tool, struct record, tool);
- rec->samples++;
+ if (rec->evlist->first_sample_time == 0)
+ rec->evlist->first_sample_time = sample->time;
+
+ rec->evlist->last_sample_time = sample->time;
+ if (rec->buildid_all)
+ return 0;
+
+ rec->samples++;
return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
}
@@ -434,9 +443,11 @@ static int process_buildids(struct record *rec)
/*
* If --buildid-all is given, it marks all DSO regardless of hits,
- * so no need to process samples.
+ * so no need to process samples. But if timestamp_boundary is enabled,
+ * it still needs to walk on all samples to get the timestamps of
+ * first/last samples.
*/
- if (rec->buildid_all)
+ if (rec->buildid_all && !rec->timestamp_boundary)
rec->tool.sample = NULL;
return perf_session__process_events(session);
@@ -477,7 +488,7 @@ static struct perf_event_header finished_round_event = {
};
static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
- bool backward)
+ bool overwrite)
{
u64 bytes_written = rec->bytes_written;
int i;
@@ -487,18 +498,18 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
if (!evlist)
return 0;
- maps = backward ? evlist->backward_mmap : evlist->mmap;
+ maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
if (!maps)
return 0;
- if (backward && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
+ if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
return 0;
for (i = 0; i < evlist->nr_mmaps; i++) {
struct auxtrace_mmap *mm = &maps[i].auxtrace_mmap;
if (maps[i].base) {
- if (perf_mmap__push(&maps[i], evlist->overwrite, backward, rec, record__pushfn) != 0) {
+ if (perf_mmap__push(&maps[i], overwrite, rec, record__pushfn) != 0) {
rc = -1;
goto out;
}
@@ -518,7 +529,7 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
if (bytes_written != rec->bytes_written)
rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
- if (backward)
+ if (overwrite)
perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
out:
return rc;
@@ -690,8 +701,8 @@ perf_evlist__pick_pc(struct perf_evlist *evlist)
if (evlist) {
if (evlist->mmap && evlist->mmap[0].base)
return evlist->mmap[0].base;
- if (evlist->backward_mmap && evlist->backward_mmap[0].base)
- return evlist->backward_mmap[0].base;
+ if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base)
+ return evlist->overwrite_mmap[0].base;
}
return NULL;
}
@@ -784,6 +795,28 @@ static int record__synthesize(struct record *rec, bool tail)
perf_event__synthesize_guest_os, tool);
}
+ err = perf_event__synthesize_extra_attr(&rec->tool,
+ rec->evlist,
+ process_synthesized_event,
+ data->is_pipe);
+ if (err)
+ goto out;
+
+ err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->threads,
+ process_synthesized_event,
+ NULL);
+ if (err < 0) {
+ pr_err("Couldn't synthesize thread map.\n");
+ return err;
+ }
+
+ err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->cpus,
+ process_synthesized_event, NULL);
+ if (err < 0) {
+ pr_err("Couldn't synthesize cpu map.\n");
+ return err;
+ }
+
err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
process_synthesized_event, opts->sample_address,
opts->proc_map_timeout, 1);
@@ -1598,6 +1631,8 @@ static struct option __record_options[] = {
"Record build-id of all DSOs regardless of hits"),
OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
"append timestamp to output filename"),
+ OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
+ "Record timestamp boundary (time of first/last samples)"),
OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
&record.switch_output.set, "signal,size,time",
"Switch output when receive SIGUSR2 or cross size,time threshold",
@@ -1781,8 +1816,8 @@ int cmd_record(int argc, const char **argv)
goto out;
}
- /* Enable ignoring missing threads when -u option is defined. */
- rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX;
+ /* Enable ignoring missing threads when -u/-p option is defined. */
+ rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
err = -ENOMEM;
if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index af5dd038195e..42a52dcc41cd 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -15,6 +15,7 @@
#include "util/color.h"
#include <linux/list.h>
#include <linux/rbtree.h>
+#include <linux/err.h>
#include "util/symbol.h"
#include "util/callchain.h"
#include "util/values.h"
@@ -51,6 +52,7 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
+#include <linux/mman.h>
struct report {
struct perf_tool tool;
@@ -60,6 +62,9 @@ struct report {
bool show_threads;
bool inverted_callchain;
bool mem_mode;
+ bool stats_mode;
+ bool tasks_mode;
+ bool mmaps_mode;
bool header;
bool header_only;
bool nonany_branch_mode;
@@ -69,7 +74,9 @@ struct report {
const char *cpu_list;
const char *symbol_filter_str;
const char *time_str;
- struct perf_time_interval ptime;
+ struct perf_time_interval *ptime_range;
+ int range_size;
+ int range_num;
float min_percent;
u64 nr_entries;
u64 queue_size;
@@ -162,12 +169,28 @@ static int hist_iter__branch_callback(struct hist_entry_iter *iter,
struct hist_entry *he = iter->he;
struct report *rep = arg;
struct branch_info *bi;
+ struct perf_sample *sample = iter->sample;
+ struct perf_evsel *evsel = iter->evsel;
+ int err;
+
+ if (!ui__has_annotation())
+ return 0;
+
+ hist__account_cycles(sample->branch_stack, al, sample,
+ rep->nonany_branch_mode);
bi = he->branch_info;
+ err = addr_map_symbol__inc_samples(&bi->from, sample, evsel->idx);
+ if (err)
+ goto out;
+
+ err = addr_map_symbol__inc_samples(&bi->to, sample, evsel->idx);
+
branch_type_count(&rep->brtype_stat, &bi->flags,
bi->from.addr, bi->to.addr);
- return 0;
+out:
+ return err;
}
static int process_sample_event(struct perf_tool *tool,
@@ -186,8 +209,10 @@ static int process_sample_event(struct perf_tool *tool,
};
int ret = 0;
- if (perf_time__skip_sample(&rep->ptime, sample->time))
+ if (perf_time__ranges_skip_sample(rep->ptime_range, rep->range_num,
+ sample->time)) {
return 0;
+ }
if (machine__resolve(machine, &al, sample) < 0) {
pr_debug("problem processing %d event, skipping it.\n",
@@ -312,9 +337,10 @@ static int report__setup_sample_type(struct report *rep)
if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) {
if ((sample_type & PERF_SAMPLE_REGS_USER) &&
- (sample_type & PERF_SAMPLE_STACK_USER))
+ (sample_type & PERF_SAMPLE_STACK_USER)) {
callchain_param.record_mode = CALLCHAIN_DWARF;
- else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
+ dwarf_callchain_users = true;
+ } else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
callchain_param.record_mode = CALLCHAIN_LBR;
else
callchain_param.record_mode = CALLCHAIN_FP;
@@ -377,6 +403,9 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report
if (evname != NULL)
ret += fprintf(fp, " of event '%s'", evname);
+ if (rep->time_str)
+ ret += fprintf(fp, " (time slices: %s)", rep->time_str);
+
if (symbol_conf.show_ref_callgraph &&
strstr(evname, "call-graph=no")) {
ret += fprintf(fp, ", show reference callgraph");
@@ -567,6 +596,174 @@ static void report__output_resort(struct report *rep)
ui_progress__finish();
}
+static void stats_setup(struct report *rep)
+{
+ memset(&rep->tool, 0, sizeof(rep->tool));
+ rep->tool.no_warn = true;
+}
+
+static int stats_print(struct report *rep)
+{
+ struct perf_session *session = rep->session;
+
+ perf_session__fprintf_nr_events(session, stdout);
+ return 0;
+}
+
+static void tasks_setup(struct report *rep)
+{
+ memset(&rep->tool, 0, sizeof(rep->tool));
+ if (rep->mmaps_mode) {
+ rep->tool.mmap = perf_event__process_mmap;
+ rep->tool.mmap2 = perf_event__process_mmap2;
+ }
+ rep->tool.comm = perf_event__process_comm;
+ rep->tool.exit = perf_event__process_exit;
+ rep->tool.fork = perf_event__process_fork;
+ rep->tool.no_warn = true;
+}
+
+struct task {
+ struct thread *thread;
+ struct list_head list;
+ struct list_head children;
+};
+
+static struct task *tasks_list(struct task *task, struct machine *machine)
+{
+ struct thread *parent_thread, *thread = task->thread;
+ struct task *parent_task;
+
+ /* Already listed. */
+ if (!list_empty(&task->list))
+ return NULL;
+
+ /* Last one in the chain. */
+ if (thread->ppid == -1)
+ return task;
+
+ parent_thread = machine__find_thread(machine, -1, thread->ppid);
+ if (!parent_thread)
+ return ERR_PTR(-ENOENT);
+
+ parent_task = thread__priv(parent_thread);
+ list_add_tail(&task->list, &parent_task->children);
+ return tasks_list(parent_task, machine);
+}
+
+static size_t maps__fprintf_task(struct maps *maps, int indent, FILE *fp)
+{
+ size_t printed = 0;
+ struct rb_node *nd;
+
+ for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
+ struct map *map = rb_entry(nd, struct map, rb_node);
+
+ printed += fprintf(fp, "%*s %" PRIx64 "-%" PRIx64 " %c%c%c%c %08" PRIx64 " %" PRIu64 " %s\n",
+ indent, "", map->start, map->end,
+ map->prot & PROT_READ ? 'r' : '-',
+ map->prot & PROT_WRITE ? 'w' : '-',
+ map->prot & PROT_EXEC ? 'x' : '-',
+ map->flags & MAP_SHARED ? 's' : 'p',
+ map->pgoff,
+ map->ino, map->dso->name);
+ }
+
+ return printed;
+}
+
+static int map_groups__fprintf_task(struct map_groups *mg, int indent, FILE *fp)
+{
+ int printed = 0, i;
+ for (i = 0; i < MAP__NR_TYPES; ++i)
+ printed += maps__fprintf_task(&mg->maps[i], indent, fp);
+ return printed;
+}
+
+static void task__print_level(struct task *task, FILE *fp, int level)
+{
+ struct thread *thread = task->thread;
+ struct task *child;
+ int comm_indent = fprintf(fp, " %8d %8d %8d |%*s",
+ thread->pid_, thread->tid, thread->ppid,
+ level, "");
+
+ fprintf(fp, "%s\n", thread__comm_str(thread));
+
+ map_groups__fprintf_task(thread->mg, comm_indent, fp);
+
+ if (!list_empty(&task->children)) {
+ list_for_each_entry(child, &task->children, list)
+ task__print_level(child, fp, level + 1);
+ }
+}
+
+static int tasks_print(struct report *rep, FILE *fp)
+{
+ struct perf_session *session = rep->session;
+ struct machine *machine = &session->machines.host;
+ struct task *tasks, *task;
+ unsigned int nr = 0, itask = 0, i;
+ struct rb_node *nd;
+ LIST_HEAD(list);
+
+ /*
+ * No locking needed while accessing machine->threads,
+ * because --tasks is single threaded command.
+ */
+
+ /* Count all the threads. */
+ for (i = 0; i < THREADS__TABLE_SIZE; i++)
+ nr += machine->threads[i].nr;
+
+ tasks = malloc(sizeof(*tasks) * nr);
+ if (!tasks)
+ return -ENOMEM;
+
+ for (i = 0; i < THREADS__TABLE_SIZE; i++) {
+ struct threads *threads = &machine->threads[i];
+
+ for (nd = rb_first(&threads->entries); nd; nd = rb_next(nd)) {
+ task = tasks + itask++;
+
+ task->thread = rb_entry(nd, struct thread, rb_node);
+ INIT_LIST_HEAD(&task->children);
+ INIT_LIST_HEAD(&task->list);
+ thread__set_priv(task->thread, task);
+ }
+ }
+
+ /*
+ * Iterate every task down to the unprocessed parent
+ * and link all in task children list. Task with no
+ * parent is added into 'list'.
+ */
+ for (itask = 0; itask < nr; itask++) {
+ task = tasks + itask;
+
+ if (!list_empty(&task->list))
+ continue;
+
+ task = tasks_list(task, machine);
+ if (IS_ERR(task)) {
+ pr_err("Error: failed to process tasks\n");
+ free(tasks);
+ return PTR_ERR(task);
+ }
+
+ if (task)
+ list_add_tail(&task->list, &list);
+ }
+
+ fprintf(fp, "# %8s %8s %8s %s\n", "pid", "tid", "ppid", "comm");
+
+ list_for_each_entry(task, &list, list)
+ task__print_level(task, fp, 0);
+
+ free(tasks);
+ return 0;
+}
+
static int __cmd_report(struct report *rep)
{
int ret;
@@ -598,12 +795,24 @@ static int __cmd_report(struct report *rep)
return ret;
}
+ if (rep->stats_mode)
+ stats_setup(rep);
+
+ if (rep->tasks_mode)
+ tasks_setup(rep);
+
ret = perf_session__process_events(session);
if (ret) {
ui__error("failed to process sample\n");
return ret;
}
+ if (rep->stats_mode)
+ return stats_print(rep);
+
+ if (rep->tasks_mode)
+ return tasks_print(rep, stdout);
+
report__warn_kptr_restrict(rep);
evlist__for_each_entry(session->evlist, pos)
@@ -760,6 +969,9 @@ int cmd_report(int argc, const char **argv)
OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any message"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
+ OPT_BOOLEAN(0, "stats", &report.stats_mode, "Display event stats"),
+ OPT_BOOLEAN(0, "tasks", &report.tasks_mode, "Display recorded tasks"),
+ OPT_BOOLEAN(0, "mmaps", &report.mmaps_mode, "Display recorded tasks memory maps"),
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
"file", "vmlinux pathname"),
OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
@@ -907,6 +1119,9 @@ int cmd_report(int argc, const char **argv)
report.symbol_filter_str = argv[0];
}
+ if (report.mmaps_mode)
+ report.tasks_mode = true;
+
if (quiet)
perf_quiet_option();
@@ -921,13 +1136,6 @@ int cmd_report(int argc, const char **argv)
return -EINVAL;
}
- if (report.use_stdio)
- use_browser = 0;
- else if (report.use_tui)
- use_browser = 1;
- else if (report.use_gtk)
- use_browser = 2;
-
if (report.inverted_callchain)
callchain_param.order = ORDER_CALLER;
if (symbol_conf.cumulate_callchain && !callchain_param.order_set)
@@ -1014,6 +1222,13 @@ repeat:
perf_hpp_list.need_collapse = true;
}
+ if (report.use_stdio)
+ use_browser = 0;
+ else if (report.use_tui)
+ use_browser = 1;
+ else if (report.use_gtk)
+ use_browser = 2;
+
/* Force tty output for header output and per-thread stat. */
if (report.header || report.header_only || report.show_threads)
use_browser = 0;
@@ -1021,6 +1236,12 @@ repeat:
report.tool.show_feat_hdr = SHOW_FEAT_HEADER;
if (report.show_full_info)
report.tool.show_feat_hdr = SHOW_FEAT_HEADER_FULL_INFO;
+ if (report.stats_mode || report.tasks_mode)
+ use_browser = 0;
+ if (report.stats_mode && report.tasks_mode) {
+ pr_err("Error: --tasks and --mmaps can't be used together with --stats\n");
+ goto error;
+ }
if (strcmp(input_name, "-") != 0)
setup_browser(true);
@@ -1043,7 +1264,8 @@ repeat:
ret = 0;
goto error;
}
- } else if (use_browser == 0 && !quiet) {
+ } else if (use_browser == 0 && !quiet &&
+ !report.stats_mode && !report.tasks_mode) {
fputs("# To display the perf.data header info, please use --header/--header-only options.\n#\n",
stdout);
}
@@ -1077,9 +1299,36 @@ repeat:
if (symbol__init(&session->header.env) < 0)
goto error;
- if (perf_time__parse_str(&report.ptime, report.time_str) != 0) {
- pr_err("Invalid time string\n");
- return -EINVAL;
+ report.ptime_range = perf_time__range_alloc(report.time_str,
+ &report.range_size);
+ if (!report.ptime_range) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ if (perf_time__parse_str(report.ptime_range, report.time_str) != 0) {
+ if (session->evlist->first_sample_time == 0 &&
+ session->evlist->last_sample_time == 0) {
+ pr_err("HINT: no first/last sample time found in perf data.\n"
+ "Please use latest perf binary to execute 'perf record'\n"
+ "(if '--buildid-all' is enabled, please set '--timestamp-boundary').\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ report.range_num = perf_time__percent_parse_str(
+ report.ptime_range, report.range_size,
+ report.time_str,
+ session->evlist->first_sample_time,
+ session->evlist->last_sample_time);
+
+ if (report.range_num < 0) {
+ pr_err("Invalid time string\n");
+ ret = -EINVAL;
+ goto error;
+ }
+ } else {
+ report.range_num = 1;
}
sort__setup_elide(stdout);
@@ -1092,6 +1341,8 @@ repeat:
ret = 0;
error:
+ zfree(&report.ptime_range);
+
perf_session__delete(session);
return ret;
}
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 9b43bda45a41..ab19a6ee4093 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -22,9 +22,11 @@
#include "util/cpumap.h"
#include "util/thread_map.h"
#include "util/stat.h"
+#include "util/color.h"
#include "util/string2.h"
#include "util/thread-stack.h"
#include "util/time-utils.h"
+#include "util/path.h"
#include "print_binary.h"
#include <linux/bitmap.h>
#include <linux/kernel.h>
@@ -40,6 +42,7 @@
#include <sys/param.h>
#include <sys/types.h>
#include <sys/stat.h>
+#include <fcntl.h>
#include <unistd.h>
#include "sane_ctype.h"
@@ -90,6 +93,8 @@ enum perf_output_field {
PERF_OUTPUT_SYNTH = 1U << 25,
PERF_OUTPUT_PHYS_ADDR = 1U << 26,
PERF_OUTPUT_UREGS = 1U << 27,
+ PERF_OUTPUT_METRIC = 1U << 28,
+ PERF_OUTPUT_MISC = 1U << 29,
};
struct output_option {
@@ -124,6 +129,8 @@ struct output_option {
{.str = "brstackoff", .field = PERF_OUTPUT_BRSTACKOFF},
{.str = "synth", .field = PERF_OUTPUT_SYNTH},
{.str = "phys_addr", .field = PERF_OUTPUT_PHYS_ADDR},
+ {.str = "metric", .field = PERF_OUTPUT_METRIC},
+ {.str = "misc", .field = PERF_OUTPUT_MISC},
};
enum {
@@ -215,12 +222,20 @@ struct perf_evsel_script {
char *filename;
FILE *fp;
u64 samples;
+ /* For metric output */
+ u64 val;
+ int gnum;
};
+static inline struct perf_evsel_script *evsel_script(struct perf_evsel *evsel)
+{
+ return (struct perf_evsel_script *)evsel->priv;
+}
+
static struct perf_evsel_script *perf_evsel_script__new(struct perf_evsel *evsel,
struct perf_data *data)
{
- struct perf_evsel_script *es = malloc(sizeof(*es));
+ struct perf_evsel_script *es = zalloc(sizeof(*es));
if (es != NULL) {
if (asprintf(&es->filename, "%s.%s.dump", data->file.path, perf_evsel__name(evsel)) < 0)
@@ -228,7 +243,6 @@ static struct perf_evsel_script *perf_evsel_script__new(struct perf_evsel *evsel
es->fp = fopen(es->filename, "w");
if (es->fp == NULL)
goto out_free_filename;
- es->samples = 0;
}
return es;
@@ -423,11 +437,6 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
PERF_OUTPUT_CPU, allow_user_set))
return -EINVAL;
- if (PRINT_FIELD(PERIOD) &&
- perf_evsel__check_stype(evsel, PERF_SAMPLE_PERIOD, "PERIOD",
- PERF_OUTPUT_PERIOD))
- return -EINVAL;
-
if (PRINT_FIELD(IREGS) &&
perf_evsel__check_stype(evsel, PERF_SAMPLE_REGS_INTR, "IREGS",
PERF_OUTPUT_IREGS))
@@ -588,7 +597,8 @@ static int perf_sample__fprintf_uregs(struct perf_sample *sample,
static int perf_sample__fprintf_start(struct perf_sample *sample,
struct thread *thread,
- struct perf_evsel *evsel, FILE *fp)
+ struct perf_evsel *evsel,
+ u32 type, FILE *fp)
{
struct perf_event_attr *attr = &evsel->attr;
unsigned long secs;
@@ -618,6 +628,47 @@ static int perf_sample__fprintf_start(struct perf_sample *sample,
printed += fprintf(fp, "[%03d] ", sample->cpu);
}
+ if (PRINT_FIELD(MISC)) {
+ int ret = 0;
+
+ #define has(m) \
+ (sample->misc & PERF_RECORD_MISC_##m) == PERF_RECORD_MISC_##m
+
+ if (has(KERNEL))
+ ret += fprintf(fp, "K");
+ if (has(USER))
+ ret += fprintf(fp, "U");
+ if (has(HYPERVISOR))
+ ret += fprintf(fp, "H");
+ if (has(GUEST_KERNEL))
+ ret += fprintf(fp, "G");
+ if (has(GUEST_USER))
+ ret += fprintf(fp, "g");
+
+ switch (type) {
+ case PERF_RECORD_MMAP:
+ case PERF_RECORD_MMAP2:
+ if (has(MMAP_DATA))
+ ret += fprintf(fp, "M");
+ break;
+ case PERF_RECORD_COMM:
+ if (has(COMM_EXEC))
+ ret += fprintf(fp, "E");
+ break;
+ case PERF_RECORD_SWITCH:
+ case PERF_RECORD_SWITCH_CPU_WIDE:
+ if (has(SWITCH_OUT))
+ ret += fprintf(fp, "S");
+ default:
+ break;
+ }
+
+ #undef has
+
+ ret += fprintf(fp, "%*s", 6 - ret, " ");
+ printed += ret;
+ }
+
if (PRINT_FIELD(TIME)) {
nsecs = sample->time;
secs = nsecs / NSEC_PER_SEC;
@@ -1437,13 +1488,16 @@ struct perf_script {
bool show_mmap_events;
bool show_switch_events;
bool show_namespace_events;
+ bool show_lost_events;
bool allocated;
bool per_event_dump;
struct cpu_map *cpus;
struct thread_map *threads;
int name_width;
const char *time_str;
- struct perf_time_interval ptime;
+ struct perf_time_interval *ptime_range;
+ int range_size;
+ int range_num;
};
static int perf_evlist__max_name_len(struct perf_evlist *evlist)
@@ -1477,6 +1531,88 @@ static int data_src__fprintf(u64 data_src, FILE *fp)
return fprintf(fp, "%-*s", maxlen, out);
}
+struct metric_ctx {
+ struct perf_sample *sample;
+ struct thread *thread;
+ struct perf_evsel *evsel;
+ FILE *fp;
+};
+
+static void script_print_metric(void *ctx, const char *color,
+ const char *fmt,
+ const char *unit, double val)
+{
+ struct metric_ctx *mctx = ctx;
+
+ if (!fmt)
+ return;
+ perf_sample__fprintf_start(mctx->sample, mctx->thread, mctx->evsel,
+ PERF_RECORD_SAMPLE, mctx->fp);
+ fputs("\tmetric: ", mctx->fp);
+ if (color)
+ color_fprintf(mctx->fp, color, fmt, val);
+ else
+ printf(fmt, val);
+ fprintf(mctx->fp, " %s\n", unit);
+}
+
+static void script_new_line(void *ctx)
+{
+ struct metric_ctx *mctx = ctx;
+
+ perf_sample__fprintf_start(mctx->sample, mctx->thread, mctx->evsel,
+ PERF_RECORD_SAMPLE, mctx->fp);
+ fputs("\tmetric: ", mctx->fp);
+}
+
+static void perf_sample__fprint_metric(struct perf_script *script,
+ struct thread *thread,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ FILE *fp)
+{
+ struct perf_stat_output_ctx ctx = {
+ .print_metric = script_print_metric,
+ .new_line = script_new_line,
+ .ctx = &(struct metric_ctx) {
+ .sample = sample,
+ .thread = thread,
+ .evsel = evsel,
+ .fp = fp,
+ },
+ .force_header = false,
+ };
+ struct perf_evsel *ev2;
+ static bool init;
+ u64 val;
+
+ if (!init) {
+ perf_stat__init_shadow_stats();
+ init = true;
+ }
+ if (!evsel->stats)
+ perf_evlist__alloc_stats(script->session->evlist, false);
+ if (evsel_script(evsel->leader)->gnum++ == 0)
+ perf_stat__reset_shadow_stats();
+ val = sample->period * evsel->scale;
+ perf_stat__update_shadow_stats(evsel,
+ val,
+ sample->cpu,
+ &rt_stat);
+ evsel_script(evsel)->val = val;
+ if (evsel_script(evsel->leader)->gnum == evsel->leader->nr_members) {
+ for_each_group_member (ev2, evsel->leader) {
+ perf_stat__print_shadow_stats(ev2,
+ evsel_script(ev2)->val,
+ sample->cpu,
+ &ctx,
+ NULL,
+ &rt_stat);
+ }
+ evsel_script(evsel->leader)->gnum = 0;
+ }
+}
+
static void process_event(struct perf_script *script,
struct perf_sample *sample, struct perf_evsel *evsel,
struct addr_location *al,
@@ -1493,7 +1629,8 @@ static void process_event(struct perf_script *script,
++es->samples;
- perf_sample__fprintf_start(sample, thread, evsel, fp);
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_SAMPLE, fp);
if (PRINT_FIELD(PERIOD))
fprintf(fp, "%10" PRIu64 " ", sample->period);
@@ -1564,6 +1701,9 @@ static void process_event(struct perf_script *script,
if (PRINT_FIELD(PHYS_ADDR))
fprintf(fp, "%16" PRIx64, sample->phys_addr);
fprintf(fp, "\n");
+
+ if (PRINT_FIELD(METRIC))
+ perf_sample__fprint_metric(script, thread, evsel, sample, fp);
}
static struct scripting_ops *scripting_ops;
@@ -1643,8 +1783,10 @@ static int process_sample_event(struct perf_tool *tool,
struct perf_script *scr = container_of(tool, struct perf_script, tool);
struct addr_location al;
- if (perf_time__skip_sample(&scr->ptime, sample->time))
+ if (perf_time__ranges_skip_sample(scr->ptime_range, scr->range_num,
+ sample->time)) {
return 0;
+ }
if (debug_mode) {
if (sample->time < last_timestamp) {
@@ -1737,7 +1879,8 @@ static int process_comm_event(struct perf_tool *tool,
sample->tid = event->comm.tid;
sample->pid = event->comm.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel, stdout);
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_COMM, stdout);
perf_event__fprintf(event, stdout);
ret = 0;
out:
@@ -1772,7 +1915,8 @@ static int process_namespaces_event(struct perf_tool *tool,
sample->tid = event->namespaces.tid;
sample->pid = event->namespaces.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel, stdout);
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_NAMESPACES, stdout);
perf_event__fprintf(event, stdout);
ret = 0;
out:
@@ -1805,7 +1949,8 @@ static int process_fork_event(struct perf_tool *tool,
sample->tid = event->fork.tid;
sample->pid = event->fork.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel, stdout);
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_FORK, stdout);
perf_event__fprintf(event, stdout);
thread__put(thread);
@@ -1834,7 +1979,8 @@ static int process_exit_event(struct perf_tool *tool,
sample->tid = event->fork.tid;
sample->pid = event->fork.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel, stdout);
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_EXIT, stdout);
perf_event__fprintf(event, stdout);
if (perf_event__process_exit(tool, event, sample, machine) < 0)
@@ -1869,7 +2015,8 @@ static int process_mmap_event(struct perf_tool *tool,
sample->tid = event->mmap.tid;
sample->pid = event->mmap.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel, stdout);
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_MMAP, stdout);
perf_event__fprintf(event, stdout);
thread__put(thread);
return 0;
@@ -1900,7 +2047,8 @@ static int process_mmap2_event(struct perf_tool *tool,
sample->tid = event->mmap2.tid;
sample->pid = event->mmap2.pid;
}
- perf_sample__fprintf_start(sample, thread, evsel, stdout);
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_MMAP2, stdout);
perf_event__fprintf(event, stdout);
thread__put(thread);
return 0;
@@ -1926,7 +2074,31 @@ static int process_switch_event(struct perf_tool *tool,
return -1;
}
- perf_sample__fprintf_start(sample, thread, evsel, stdout);
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_SWITCH, stdout);
+ perf_event__fprintf(event, stdout);
+ thread__put(thread);
+ return 0;
+}
+
+static int
+process_lost_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct perf_script *script = container_of(tool, struct perf_script, tool);
+ struct perf_session *session = script->session;
+ struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
+ struct thread *thread;
+
+ thread = machine__findnew_thread(machine, sample->pid,
+ sample->tid);
+ if (thread == NULL)
+ return -1;
+
+ perf_sample__fprintf_start(sample, thread, evsel,
+ PERF_RECORD_LOST, stdout);
perf_event__fprintf(event, stdout);
thread__put(thread);
return 0;
@@ -2026,6 +2198,8 @@ static int __cmd_script(struct perf_script *script)
script->tool.context_switch = process_switch_event;
if (script->show_namespace_events)
script->tool.namespaces = process_namespaces_event;
+ if (script->show_lost_events)
+ script->tool.lost = process_lost_event;
if (perf_script__setup_per_event_dump(script)) {
pr_err("Couldn't create the per event dump files\n");
@@ -2311,19 +2485,6 @@ out:
return rc;
}
-/* Helper function for filesystems that return a dent->d_type DT_UNKNOWN */
-static int is_directory(const char *base_path, const struct dirent *dent)
-{
- char path[PATH_MAX];
- struct stat st;
-
- sprintf(path, "%s/%s", base_path, dent->d_name);
- if (stat(path, &st))
- return 0;
-
- return S_ISDIR(st.st_mode);
-}
-
#define for_each_lang(scripts_path, scripts_dir, lang_dirent) \
while ((lang_dirent = readdir(scripts_dir)) != NULL) \
if ((lang_dirent->d_type == DT_DIR || \
@@ -2758,9 +2919,10 @@ static void script__setup_sample_type(struct perf_script *script)
if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) {
if ((sample_type & PERF_SAMPLE_REGS_USER) &&
- (sample_type & PERF_SAMPLE_STACK_USER))
+ (sample_type & PERF_SAMPLE_STACK_USER)) {
callchain_param.record_mode = CALLCHAIN_DWARF;
- else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
+ dwarf_callchain_users = true;
+ } else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
callchain_param.record_mode = CALLCHAIN_LBR;
else
callchain_param.record_mode = CALLCHAIN_FP;
@@ -2975,6 +3137,8 @@ int cmd_script(int argc, const char **argv)
"Show context switch events (if recorded)"),
OPT_BOOLEAN('\0', "show-namespace-events", &script.show_namespace_events,
"Show namespace events (if recorded)"),
+ OPT_BOOLEAN('\0', "show-lost-events", &script.show_lost_events,
+ "Show lost events (if recorded)"),
OPT_BOOLEAN('\0', "per-event-dump", &script.per_event_dump,
"Dump trace output to files named by the monitored events"),
OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
@@ -3281,18 +3445,46 @@ int cmd_script(int argc, const char **argv)
if (err < 0)
goto out_delete;
- /* needs to be parsed after looking up reference time */
- if (perf_time__parse_str(&script.ptime, script.time_str) != 0) {
- pr_err("Invalid time string\n");
- err = -EINVAL;
+ script.ptime_range = perf_time__range_alloc(script.time_str,
+ &script.range_size);
+ if (!script.ptime_range) {
+ err = -ENOMEM;
goto out_delete;
}
+ /* needs to be parsed after looking up reference time */
+ if (perf_time__parse_str(script.ptime_range, script.time_str) != 0) {
+ if (session->evlist->first_sample_time == 0 &&
+ session->evlist->last_sample_time == 0) {
+ pr_err("HINT: no first/last sample time found in perf data.\n"
+ "Please use latest perf binary to execute 'perf record'\n"
+ "(if '--buildid-all' is enabled, please set '--timestamp-boundary').\n");
+ err = -EINVAL;
+ goto out_delete;
+ }
+
+ script.range_num = perf_time__percent_parse_str(
+ script.ptime_range, script.range_size,
+ script.time_str,
+ session->evlist->first_sample_time,
+ session->evlist->last_sample_time);
+
+ if (script.range_num < 0) {
+ pr_err("Invalid time string\n");
+ err = -EINVAL;
+ goto out_delete;
+ }
+ } else {
+ script.range_num = 1;
+ }
+
err = __cmd_script(&script);
flush_scripting();
out_delete:
+ zfree(&script.ptime_range);
+
perf_evlist__free_stats(session->evlist);
perf_session__delete(session);
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 59af5a8419e2..98bf9d32f222 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -63,7 +63,6 @@
#include "util/group.h"
#include "util/session.h"
#include "util/tool.h"
-#include "util/group.h"
#include "util/string2.h"
#include "util/metricgroup.h"
#include "asm/bug.h"
@@ -214,8 +213,13 @@ static inline void diff_timespec(struct timespec *r, struct timespec *a,
static void perf_stat__reset_stats(void)
{
+ int i;
+
perf_evlist__reset_stats(evsel_list);
perf_stat__reset_shadow_stats();
+
+ for (i = 0; i < stat_config.stats_num; i++)
+ perf_stat__reset_shadow_per_stat(&stat_config.stats[i]);
}
static int create_perf_stat_counter(struct perf_evsel *evsel)
@@ -272,7 +276,7 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
attr->enable_on_exec = 1;
}
- if (target__has_cpu(&target))
+ if (target__has_cpu(&target) && !target__has_per_thread(&target))
return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
return perf_evsel__open_per_thread(evsel, evsel_list->threads);
@@ -335,7 +339,7 @@ static int read_counter(struct perf_evsel *counter)
int nthreads = thread_map__nr(evsel_list->threads);
int ncpus, cpu, thread;
- if (target__has_cpu(&target))
+ if (target__has_cpu(&target) && !target__has_per_thread(&target))
ncpus = perf_evsel__nr_cpus(counter);
else
ncpus = 1;
@@ -458,19 +462,8 @@ static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *inf
workload_exec_errno = info->si_value.sival_int;
}
-static bool has_unit(struct perf_evsel *counter)
-{
- return counter->unit && *counter->unit;
-}
-
-static bool has_scale(struct perf_evsel *counter)
-{
- return counter->scale != 1;
-}
-
static int perf_stat_synthesize_config(bool is_pipe)
{
- struct perf_evsel *counter;
int err;
if (is_pipe) {
@@ -482,53 +475,10 @@ static int perf_stat_synthesize_config(bool is_pipe)
}
}
- /*
- * Synthesize other events stuff not carried within
- * attr event - unit, scale, name
- */
- evlist__for_each_entry(evsel_list, counter) {
- if (!counter->supported)
- continue;
-
- /*
- * Synthesize unit and scale only if it's defined.
- */
- if (has_unit(counter)) {
- err = perf_event__synthesize_event_update_unit(NULL, counter, process_synthesized_event);
- if (err < 0) {
- pr_err("Couldn't synthesize evsel unit.\n");
- return err;
- }
- }
-
- if (has_scale(counter)) {
- err = perf_event__synthesize_event_update_scale(NULL, counter, process_synthesized_event);
- if (err < 0) {
- pr_err("Couldn't synthesize evsel scale.\n");
- return err;
- }
- }
-
- if (counter->own_cpus) {
- err = perf_event__synthesize_event_update_cpus(NULL, counter, process_synthesized_event);
- if (err < 0) {
- pr_err("Couldn't synthesize evsel scale.\n");
- return err;
- }
- }
-
- /*
- * Name is needed only for pipe output,
- * perf.data carries event names.
- */
- if (is_pipe) {
- err = perf_event__synthesize_event_update_name(NULL, counter, process_synthesized_event);
- if (err < 0) {
- pr_err("Couldn't synthesize evsel name.\n");
- return err;
- }
- }
- }
+ err = perf_event__synthesize_extra_attr(NULL,
+ evsel_list,
+ process_synthesized_event,
+ is_pipe);
err = perf_event__synthesize_thread_map2(NULL, evsel_list->threads,
process_synthesized_event,
@@ -1151,7 +1101,8 @@ static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
}
static void printout(int id, int nr, struct perf_evsel *counter, double uval,
- char *prefix, u64 run, u64 ena, double noise)
+ char *prefix, u64 run, u64 ena, double noise,
+ struct runtime_stat *st)
{
struct perf_stat_output_ctx out;
struct outstate os = {
@@ -1244,7 +1195,7 @@ static void printout(int id, int nr, struct perf_evsel *counter, double uval,
perf_stat__print_shadow_stats(counter, uval,
first_shadow_cpu(counter, id),
- &out, &metric_events);
+ &out, &metric_events, st);
if (!csv_output && !metric_only) {
print_noise(counter, noise);
print_running(run, ena);
@@ -1268,7 +1219,8 @@ static void aggr_update_shadow(void)
val += perf_counts(counter->counts, cpu, 0)->val;
}
perf_stat__update_shadow_stats(counter, val,
- first_shadow_cpu(counter, id));
+ first_shadow_cpu(counter, id),
+ &rt_stat);
}
}
}
@@ -1388,7 +1340,8 @@ static void print_aggr(char *prefix)
fprintf(output, "%s", prefix);
uval = val * counter->scale;
- printout(id, nr, counter, uval, prefix, run, ena, 1.0);
+ printout(id, nr, counter, uval, prefix, run, ena, 1.0,
+ &rt_stat);
if (!metric_only)
fputc('\n', output);
}
@@ -1397,13 +1350,24 @@ static void print_aggr(char *prefix)
}
}
-static void print_aggr_thread(struct perf_evsel *counter, char *prefix)
+static int cmp_val(const void *a, const void *b)
{
- FILE *output = stat_config.output;
- int nthreads = thread_map__nr(counter->threads);
- int ncpus = cpu_map__nr(counter->cpus);
- int cpu, thread;
+ return ((struct perf_aggr_thread_value *)b)->val -
+ ((struct perf_aggr_thread_value *)a)->val;
+}
+
+static struct perf_aggr_thread_value *sort_aggr_thread(
+ struct perf_evsel *counter,
+ int nthreads, int ncpus,
+ int *ret)
+{
+ int cpu, thread, i = 0;
double uval;
+ struct perf_aggr_thread_value *buf;
+
+ buf = calloc(nthreads, sizeof(struct perf_aggr_thread_value));
+ if (!buf)
+ return NULL;
for (thread = 0; thread < nthreads; thread++) {
u64 ena = 0, run = 0, val = 0;
@@ -1414,13 +1378,63 @@ static void print_aggr_thread(struct perf_evsel *counter, char *prefix)
run += perf_counts(counter->counts, cpu, thread)->run;
}
+ uval = val * counter->scale;
+
+ /*
+ * Skip value 0 when enabling --per-thread globally,
+ * otherwise too many 0 output.
+ */
+ if (uval == 0.0 && target__has_per_thread(&target))
+ continue;
+
+ buf[i].counter = counter;
+ buf[i].id = thread;
+ buf[i].uval = uval;
+ buf[i].val = val;
+ buf[i].run = run;
+ buf[i].ena = ena;
+ i++;
+ }
+
+ qsort(buf, i, sizeof(struct perf_aggr_thread_value), cmp_val);
+
+ if (ret)
+ *ret = i;
+
+ return buf;
+}
+
+static void print_aggr_thread(struct perf_evsel *counter, char *prefix)
+{
+ FILE *output = stat_config.output;
+ int nthreads = thread_map__nr(counter->threads);
+ int ncpus = cpu_map__nr(counter->cpus);
+ int thread, sorted_threads, id;
+ struct perf_aggr_thread_value *buf;
+
+ buf = sort_aggr_thread(counter, nthreads, ncpus, &sorted_threads);
+ if (!buf) {
+ perror("cannot sort aggr thread");
+ return;
+ }
+
+ for (thread = 0; thread < sorted_threads; thread++) {
if (prefix)
fprintf(output, "%s", prefix);
- uval = val * counter->scale;
- printout(thread, 0, counter, uval, prefix, run, ena, 1.0);
+ id = buf[thread].id;
+ if (stat_config.stats)
+ printout(id, 0, buf[thread].counter, buf[thread].uval,
+ prefix, buf[thread].run, buf[thread].ena, 1.0,
+ &stat_config.stats[id]);
+ else
+ printout(id, 0, buf[thread].counter, buf[thread].uval,
+ prefix, buf[thread].run, buf[thread].ena, 1.0,
+ &rt_stat);
fputc('\n', output);
}
+
+ free(buf);
}
struct caggr_data {
@@ -1455,7 +1469,8 @@ static void print_counter_aggr(struct perf_evsel *counter, char *prefix)
fprintf(output, "%s", prefix);
uval = cd.avg * counter->scale;
- printout(-1, 0, counter, uval, prefix, cd.avg_running, cd.avg_enabled, cd.avg);
+ printout(-1, 0, counter, uval, prefix, cd.avg_running, cd.avg_enabled,
+ cd.avg, &rt_stat);
if (!metric_only)
fprintf(output, "\n");
}
@@ -1494,7 +1509,8 @@ static void print_counter(struct perf_evsel *counter, char *prefix)
fprintf(output, "%s", prefix);
uval = val * counter->scale;
- printout(cpu, 0, counter, uval, prefix, run, ena, 1.0);
+ printout(cpu, 0, counter, uval, prefix, run, ena, 1.0,
+ &rt_stat);
fputc('\n', output);
}
@@ -1526,7 +1542,8 @@ static void print_no_aggr_metric(char *prefix)
run = perf_counts(counter->counts, cpu, 0)->run;
uval = val * counter->scale;
- printout(cpu, 0, counter, uval, prefix, run, ena, 1.0);
+ printout(cpu, 0, counter, uval, prefix, run, ena, 1.0,
+ &rt_stat);
}
fputc('\n', stat_config.output);
}
@@ -1582,7 +1599,8 @@ static void print_metric_headers(const char *prefix, bool no_indent)
perf_stat__print_shadow_stats(counter, 0,
0,
&out,
- &metric_events);
+ &metric_events,
+ &rt_stat);
}
fputc('\n', stat_config.output);
}
@@ -2541,6 +2559,35 @@ int process_cpu_map_event(struct perf_tool *tool,
return set_maps(st);
}
+static int runtime_stat_new(struct perf_stat_config *config, int nthreads)
+{
+ int i;
+
+ config->stats = calloc(nthreads, sizeof(struct runtime_stat));
+ if (!config->stats)
+ return -1;
+
+ config->stats_num = nthreads;
+
+ for (i = 0; i < nthreads; i++)
+ runtime_stat__init(&config->stats[i]);
+
+ return 0;
+}
+
+static void runtime_stat_delete(struct perf_stat_config *config)
+{
+ int i;
+
+ if (!config->stats)
+ return;
+
+ for (i = 0; i < config->stats_num; i++)
+ runtime_stat__exit(&config->stats[i]);
+
+ free(config->stats);
+}
+
static const char * const stat_report_usage[] = {
"perf stat report [<options>]",
NULL,
@@ -2750,12 +2797,16 @@ int cmd_stat(int argc, const char **argv)
run_count = 1;
}
- if ((stat_config.aggr_mode == AGGR_THREAD) && !target__has_task(&target)) {
- fprintf(stderr, "The --per-thread option is only available "
- "when monitoring via -p -t options.\n");
- parse_options_usage(NULL, stat_options, "p", 1);
- parse_options_usage(NULL, stat_options, "t", 1);
- goto out;
+ if ((stat_config.aggr_mode == AGGR_THREAD) &&
+ !target__has_task(&target)) {
+ if (!target.system_wide || target.cpu_list) {
+ fprintf(stderr, "The --per-thread option is only "
+ "available when monitoring via -p -t -a "
+ "options or only --per-thread.\n");
+ parse_options_usage(NULL, stat_options, "p", 1);
+ parse_options_usage(NULL, stat_options, "t", 1);
+ goto out;
+ }
}
/*
@@ -2779,6 +2830,9 @@ int cmd_stat(int argc, const char **argv)
target__validate(&target);
+ if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide))
+ target.per_thread = true;
+
if (perf_evlist__create_maps(evsel_list, &target) < 0) {
if (target__has_task(&target)) {
pr_err("Problems finding threads of monitor\n");
@@ -2796,8 +2850,15 @@ int cmd_stat(int argc, const char **argv)
* Initialize thread_map with comm names,
* so we could print it out on output.
*/
- if (stat_config.aggr_mode == AGGR_THREAD)
+ if (stat_config.aggr_mode == AGGR_THREAD) {
thread_map__read_comms(evsel_list->threads);
+ if (target.system_wide) {
+ if (runtime_stat_new(&stat_config,
+ thread_map__nr(evsel_list->threads))) {
+ goto out;
+ }
+ }
+ }
if (interval && interval < 100) {
if (interval < 10) {
@@ -2887,5 +2948,8 @@ out:
sysfs__write_int(FREEZE_ON_SMI_PATH, 0);
perf_evlist__delete(evsel_list);
+
+ runtime_stat_delete(&stat_config);
+
return status;
}
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 9e0d2645ae13..c6ccda52117d 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -99,6 +99,7 @@ static void perf_top__resize(struct perf_top *top)
static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
{
+ struct perf_evsel *evsel = hists_to_evsel(he->hists);
struct symbol *sym;
struct annotation *notes;
struct map *map;
@@ -137,7 +138,7 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
return err;
}
- err = symbol__disassemble(sym, map, NULL, 0, NULL, NULL);
+ err = symbol__annotate(sym, map, evsel, 0, NULL);
if (err == 0) {
out_assign:
top->sym_filter_entry = he;
@@ -229,6 +230,7 @@ static void perf_top__record_precise_ip(struct perf_top *top,
static void perf_top__show_details(struct perf_top *top)
{
struct hist_entry *he = top->sym_filter_entry;
+ struct perf_evsel *evsel = hists_to_evsel(he->hists);
struct annotation *notes;
struct symbol *symbol;
int more;
@@ -241,6 +243,8 @@ static void perf_top__show_details(struct perf_top *top)
pthread_mutex_lock(&notes->lock);
+ symbol__calc_percent(symbol, evsel);
+
if (notes->src == NULL)
goto out_unlock;
@@ -412,7 +416,7 @@ static void perf_top__print_mapped_keys(struct perf_top *top)
fprintf(stdout, "\t[S] stop annotation.\n");
fprintf(stdout,
- "\t[K] hide kernel_symbols symbols. \t(%s)\n",
+ "\t[K] hide kernel symbols. \t(%s)\n",
top->hide_kernel_symbols ? "yes" : "no");
fprintf(stdout,
"\t[U] hide user symbols. \t(%s)\n",
@@ -903,7 +907,7 @@ try_again:
}
}
- if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
+ if (perf_evlist__mmap(evlist, opts->mmap_pages) < 0) {
ui__error("Failed to mmap with %d (%s)\n",
errno, str_error_r(errno, msg, sizeof(msg)));
goto out_err;
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 84debdbad327..17d11deeb88d 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -21,6 +21,7 @@
#include "builtin.h"
#include "util/color.h"
#include "util/debug.h"
+#include "util/env.h"
#include "util/event.h"
#include "util/evlist.h"
#include <subcmd/exec-cmd.h>
@@ -45,18 +46,17 @@
#include <errno.h>
#include <inttypes.h>
-#include <libaudit.h> /* FIXME: Still needed for audit_errno_to_name */
#include <poll.h>
#include <signal.h>
#include <stdlib.h>
#include <string.h>
#include <linux/err.h>
#include <linux/filter.h>
-#include <linux/audit.h>
#include <linux/kernel.h>
#include <linux/random.h>
#include <linux/stringify.h>
#include <linux/time64.h>
+#include <fcntl.h>
#include "sane_ctype.h"
@@ -111,6 +111,7 @@ struct trace {
bool summary;
bool summary_only;
bool show_comm;
+ bool print_sample;
bool show_tool_stats;
bool trace_syscalls;
bool kernel_syscallchains;
@@ -545,9 +546,10 @@ static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
{ .scnprintf = SCA_STRARRAY, \
.parm = &strarray__##array, }
+#include "trace/beauty/arch_errno_names.c"
#include "trace/beauty/eventfd.c"
-#include "trace/beauty/flock.c"
#include "trace/beauty/futex_op.c"
+#include "trace/beauty/futex_val3.c"
#include "trace/beauty/mmap.c"
#include "trace/beauty/mode_t.c"
#include "trace/beauty/msg_flags.c"
@@ -610,7 +612,8 @@ static struct syscall_fmt {
{ .name = "fstat", .alias = "newfstat", },
{ .name = "fstatat", .alias = "newfstatat", },
{ .name = "futex",
- .arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ }, }, },
+ .arg = { [1] = { .scnprintf = SCA_FUTEX_OP, /* op */ },
+ [5] = { .scnprintf = SCA_FUTEX_VAL3, /* val3 */ }, }, },
{ .name = "futimesat",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
{ .name = "getitimer",
@@ -622,6 +625,7 @@ static struct syscall_fmt {
.arg = { [2] = { .scnprintf = SCA_GETRANDOM_FLAGS, /* flags */ }, }, },
{ .name = "getrlimit",
.arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
+ { .name = "gettid", .errpid = true, },
{ .name = "ioctl",
.arg = {
#if defined(__i386__) || defined(__x86_64__)
@@ -819,7 +823,7 @@ static size_t fprintf_duration(unsigned long t, bool calculated, FILE *fp)
size_t printed = fprintf(fp, "(");
if (!calculated)
- printed += fprintf(fp, " ? ");
+ printed += fprintf(fp, " ");
else if (duration >= 1.0)
printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
else if (duration >= 0.01)
@@ -1554,10 +1558,9 @@ static void thread__update_stats(struct thread_trace *ttrace,
update_stats(stats, duration);
}
-static int trace__printf_interrupted_entry(struct trace *trace, struct perf_sample *sample)
+static int trace__printf_interrupted_entry(struct trace *trace)
{
struct thread_trace *ttrace;
- u64 duration;
size_t printed;
if (trace->current == NULL)
@@ -1568,15 +1571,30 @@ static int trace__printf_interrupted_entry(struct trace *trace, struct perf_samp
if (!ttrace->entry_pending)
return 0;
- duration = sample->time - ttrace->entry_time;
-
- printed = trace__fprintf_entry_head(trace, trace->current, duration, true, ttrace->entry_time, trace->output);
+ printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output);
printed += fprintf(trace->output, "%-70s) ...\n", ttrace->entry_str);
ttrace->entry_pending = false;
return printed;
}
+static int trace__fprintf_sample(struct trace *trace, struct perf_evsel *evsel,
+ struct perf_sample *sample, struct thread *thread)
+{
+ int printed = 0;
+
+ if (trace->print_sample) {
+ double ts = (double)sample->time / NSEC_PER_MSEC;
+
+ printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n",
+ perf_evsel__name(evsel), ts,
+ thread__comm_str(thread),
+ sample->pid, sample->tid, sample->cpu);
+ }
+
+ return printed;
+}
+
static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
union perf_event *event __maybe_unused,
struct perf_sample *sample)
@@ -1597,6 +1615,8 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
if (ttrace == NULL)
goto out_put;
+ trace__fprintf_sample(trace, evsel, sample, thread);
+
args = perf_evsel__sc_tp_ptr(evsel, args, sample);
if (ttrace->entry_str == NULL) {
@@ -1606,7 +1626,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
}
if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
- trace__printf_interrupted_entry(trace, sample);
+ trace__printf_interrupted_entry(trace);
ttrace->entry_time = sample->time;
msg = ttrace->entry_str;
@@ -1643,7 +1663,7 @@ static int trace__resolve_callchain(struct trace *trace, struct perf_evsel *evse
struct addr_location al;
if (machine__resolve(trace->host, &al, sample) < 0 ||
- thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, trace->max_stack))
+ thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, evsel->attr.sample_max_stack))
return -1;
return 0;
@@ -1659,6 +1679,14 @@ static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sam
return sample__fprintf_callchain(sample, 38, print_opts, &callchain_cursor, trace->output);
}
+static const char *errno_to_name(struct perf_evsel *evsel, int err)
+{
+ struct perf_env *env = perf_evsel__env(evsel);
+ const char *arch_name = perf_env__arch(env);
+
+ return arch_syscalls__strerrno(arch_name, err);
+}
+
static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
union perf_event *event __maybe_unused,
struct perf_sample *sample)
@@ -1679,6 +1707,8 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
if (ttrace == NULL)
goto out_put;
+ trace__fprintf_sample(trace, evsel, sample, thread);
+
if (trace->summary)
thread__update_stats(ttrace, id, sample);
@@ -1729,7 +1759,7 @@ signed_print:
errno_print: {
char bf[STRERR_BUFSIZE];
const char *emsg = str_error_r(-ret, bf, sizeof(bf)),
- *e = audit_errno_to_name(-ret);
+ *e = errno_to_name(evsel, -ret);
fprintf(trace->output, ") = -1 %s %s", e, emsg);
}
@@ -1910,7 +1940,7 @@ static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
}
}
- trace__printf_interrupted_entry(trace, sample);
+ trace__printf_interrupted_entry(trace);
trace__fprintf_tstamp(trace, sample->time, trace->output);
if (trace->trace_syscalls)
@@ -2221,6 +2251,9 @@ static int trace__add_syscall_newtp(struct trace *trace)
if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
goto out_delete_sys_exit;
+ perf_evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
+ perf_evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
+
perf_evlist__add(evlist, sys_enter);
perf_evlist__add(evlist, sys_exit);
@@ -2317,6 +2350,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
pgfault_maj = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
if (pgfault_maj == NULL)
goto out_error_mem;
+ perf_evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
perf_evlist__add(evlist, pgfault_maj);
}
@@ -2324,6 +2358,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
pgfault_min = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
if (pgfault_min == NULL)
goto out_error_mem;
+ perf_evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
perf_evlist__add(evlist, pgfault_min);
}
@@ -2344,45 +2379,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
goto out_delete_evlist;
}
- perf_evlist__config(evlist, &trace->opts, NULL);
-
- if (callchain_param.enabled) {
- bool use_identifier = false;
-
- if (trace->syscalls.events.sys_exit) {
- perf_evsel__config_callchain(trace->syscalls.events.sys_exit,
- &trace->opts, &callchain_param);
- use_identifier = true;
- }
-
- if (pgfault_maj) {
- perf_evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
- use_identifier = true;
- }
-
- if (pgfault_min) {
- perf_evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
- use_identifier = true;
- }
-
- if (use_identifier) {
- /*
- * Now we have evsels with different sample_ids, use
- * PERF_SAMPLE_IDENTIFIER to map from sample to evsel
- * from a fixed position in each ring buffer record.
- *
- * As of this the changeset introducing this comment, this
- * isn't strictly needed, as the fields that can come before
- * PERF_SAMPLE_ID are all used, but we'll probably disable
- * some of those for things like copying the payload of
- * pointer syscall arguments, and for vfs_getname we don't
- * need PERF_SAMPLE_ADDR and PERF_SAMPLE_IP, so do this
- * here as a warning we need to use PERF_SAMPLE_IDENTIFIER.
- */
- perf_evlist__set_sample_bit(evlist, IDENTIFIER);
- perf_evlist__reset_sample_bit(evlist, ID);
- }
- }
+ perf_evlist__config(evlist, &trace->opts, &callchain_param);
signal(SIGCHLD, sig_handler);
signal(SIGINT, sig_handler);
@@ -2437,7 +2434,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
if (err < 0)
goto out_error_apply_filters;
- err = perf_evlist__mmap(evlist, trace->opts.mmap_pages, false);
+ err = perf_evlist__mmap(evlist, trace->opts.mmap_pages);
if (err < 0)
goto out_error_mmap;
@@ -2455,6 +2452,18 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
trace->multiple_threads = thread_map__pid(evlist->threads, 0) == -1 ||
evlist->threads->nr > 1 ||
perf_evlist__first(evlist)->attr.inherit;
+
+ /*
+ * Now that we already used evsel->attr to ask the kernel to setup the
+ * events, lets reuse evsel->attr.sample_max_stack as the limit in
+ * trace__resolve_callchain(), allowing per-event max-stack settings
+ * to override an explicitely set --max-stack global setting.
+ */
+ evlist__for_each_entry(evlist, evsel) {
+ if ((evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) &&
+ evsel->attr.sample_max_stack == 0)
+ evsel->attr.sample_max_stack = trace->max_stack;
+ }
again:
before = trace->nr_events;
@@ -3046,6 +3055,8 @@ int cmd_trace(int argc, const char **argv)
"Set the maximum stack depth when parsing the callchain, "
"anything beyond the specified depth will be ignored. "
"Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
+ OPT_BOOLEAN(0, "print-sample", &trace.print_sample,
+ "print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"),
OPT_UINTEGER(0, "proc-map-timeout", &trace.opts.proc_map_timeout,
"per thread proc mmap processing timeout in ms"),
OPT_UINTEGER('D', "delay", &trace.opts.initial_delay,
@@ -3097,8 +3108,9 @@ int cmd_trace(int argc, const char **argv)
}
#ifdef HAVE_DWARF_UNWIND_SUPPORT
- if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled && trace.trace_syscalls)
+ if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) {
record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
+ }
#endif
if (callchain_param.enabled) {
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index 3e64f10b6d66..51abdb0a4047 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -33,21 +33,30 @@ arch/s390/include/uapi/asm/kvm.h
arch/s390/include/uapi/asm/kvm_perf.h
arch/s390/include/uapi/asm/ptrace.h
arch/s390/include/uapi/asm/sie.h
+arch/s390/include/uapi/asm/unistd.h
arch/arm/include/uapi/asm/kvm.h
arch/arm64/include/uapi/asm/kvm.h
+arch/alpha/include/uapi/asm/errno.h
+arch/mips/include/asm/errno.h
+arch/mips/include/uapi/asm/errno.h
+arch/parisc/include/uapi/asm/errno.h
+arch/powerpc/include/uapi/asm/errno.h
+arch/sparc/include/uapi/asm/errno.h
+arch/x86/include/uapi/asm/errno.h
include/asm-generic/bitops/arch_hweight.h
include/asm-generic/bitops/const_hweight.h
include/asm-generic/bitops/__fls.h
include/asm-generic/bitops/fls.h
include/asm-generic/bitops/fls64.h
include/linux/coresight-pmu.h
+include/uapi/asm-generic/errno.h
+include/uapi/asm-generic/errno-base.h
include/uapi/asm-generic/ioctls.h
include/uapi/asm-generic/mman-common.h
'
check () {
file=$1
- opts="--ignore-blank-lines --ignore-space-change"
shift
while [ -n "$*" ]; do
diff --git a/tools/perf/perf-completion.sh b/tools/perf/perf-completion.sh
index 345f5d6e9ed5..fdf75d45efff 100644
--- a/tools/perf/perf-completion.sh
+++ b/tools/perf/perf-completion.sh
@@ -162,8 +162,37 @@ __perf_main ()
# List possible events for -e option
elif [[ $prev == @("-e"|"--event") &&
$prev_skip_opts == @(record|stat|top) ]]; then
- evts=$($cmd list --raw-dump)
- __perfcomp_colon "$evts" "$cur"
+
+ local cur1=${COMP_WORDS[COMP_CWORD]}
+ local raw_evts=$($cmd list --raw-dump)
+ local arr s tmp result
+
+ if [[ "$cur1" == */* && ${cur1#*/} =~ ^[A-Z] ]]; then
+ OLD_IFS="$IFS"
+ IFS=" "
+ arr=($raw_evts)
+ IFS="$OLD_IFS"
+
+ for s in ${arr[@]}
+ do
+ if [[ "$s" == *cpu/* ]]; then
+ tmp=${s#*cpu/}
+ result=$result" ""cpu/"${tmp^^}
+ else
+ result=$result" "$s
+ fi
+ done
+
+ evts=${result}" "$(ls /sys/bus/event_source/devices/cpu/events)
+ else
+ evts=${raw_evts}" "$(ls /sys/bus/event_source/devices/cpu/events)
+ fi
+
+ if [[ "$cur1" == , ]]; then
+ __perfcomp_colon "$evts" ""
+ else
+ __perfcomp_colon "$evts" "$cur1"
+ fi
else
# List subcommands for perf commands
if [[ $prev_skip_opts == @(kvm|kmem|mem|lock|sched|
@@ -246,11 +275,21 @@ fi
type perf &>/dev/null &&
_perf()
{
+ if [[ "$COMP_WORDBREAKS" != *,* ]]; then
+ COMP_WORDBREAKS="${COMP_WORDBREAKS},"
+ export COMP_WORDBREAKS
+ fi
+
+ if [[ "$COMP_WORDBREAKS" == *:* ]]; then
+ COMP_WORDBREAKS="${COMP_WORDBREAKS/:/}"
+ export COMP_WORDBREAKS
+ fi
+
local cur words cword prev
if [ $preload_get_comp_words_by_ref = "true" ]; then
- _get_comp_words_by_ref -n =: cur words cword prev
+ _get_comp_words_by_ref -n =:, cur words cword prev
else
- __perf_get_comp_words_by_ref -n =: cur words cword prev
+ __perf_get_comp_words_by_ref -n =:, cur words cword prev
fi
__perf_main
} &&
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 62b13518bc6e..1b3fc8ec0fa2 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -73,7 +73,7 @@ static struct cmd_struct commands[] = {
{ "lock", cmd_lock, 0 },
{ "kvm", cmd_kvm, 0 },
{ "test", cmd_test, 0 },
-#ifdef HAVE_LIBAUDIT_SUPPORT
+#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE)
{ "trace", cmd_trace, 0 },
#endif
{ "inject", cmd_inject, 0 },
@@ -485,7 +485,7 @@ int main(int argc, const char **argv)
argv[0] = cmd;
}
if (strstarts(cmd, "trace")) {
-#ifdef HAVE_LIBAUDIT_SUPPORT
+#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE)
setup_path();
argv[0] = "trace";
return cmd_trace(argc, argv);
diff --git a/tools/perf/pmu-events/arch/arm64/cavium/thunderx2-imp-def.json b/tools/perf/pmu-events/arch/arm64/cavium/thunderx2-imp-def.json
new file mode 100644
index 000000000000..2db45c40ebc7
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/cavium/thunderx2-imp-def.json
@@ -0,0 +1,62 @@
+[
+ {
+ "PublicDescription": "Attributable Level 1 data cache access, read",
+ "EventCode": "0x40",
+ "EventName": "l1d_cache_rd",
+ "BriefDescription": "L1D cache read",
+ },
+ {
+ "PublicDescription": "Attributable Level 1 data cache access, write ",
+ "EventCode": "0x41",
+ "EventName": "l1d_cache_wr",
+ "BriefDescription": "L1D cache write",
+ },
+ {
+ "PublicDescription": "Attributable Level 1 data cache refill, read",
+ "EventCode": "0x42",
+ "EventName": "l1d_cache_refill_rd",
+ "BriefDescription": "L1D cache refill read",
+ },
+ {
+ "PublicDescription": "Attributable Level 1 data cache refill, write",
+ "EventCode": "0x43",
+ "EventName": "l1d_cache_refill_wr",
+ "BriefDescription": "L1D refill write",
+ },
+ {
+ "PublicDescription": "Attributable Level 1 data TLB refill, read",
+ "EventCode": "0x4C",
+ "EventName": "l1d_tlb_refill_rd",
+ "BriefDescription": "L1D tlb refill read",
+ },
+ {
+ "PublicDescription": "Attributable Level 1 data TLB refill, write",
+ "EventCode": "0x4D",
+ "EventName": "l1d_tlb_refill_wr",
+ "BriefDescription": "L1D tlb refill write",
+ },
+ {
+ "PublicDescription": "Attributable Level 1 data or unified TLB access, read",
+ "EventCode": "0x4E",
+ "EventName": "l1d_tlb_rd",
+ "BriefDescription": "L1D tlb read",
+ },
+ {
+ "PublicDescription": "Attributable Level 1 data or unified TLB access, write",
+ "EventCode": "0x4F",
+ "EventName": "l1d_tlb_wr",
+ "BriefDescription": "L1D tlb write",
+ },
+ {
+ "PublicDescription": "Bus access read",
+ "EventCode": "0x60",
+ "EventName": "bus_access_rd",
+ "BriefDescription": "Bus access read",
+ },
+ {
+ "PublicDescription": "Bus access write",
+ "EventCode": "0x61",
+ "EventName": "bus_access_wr",
+ "BriefDescription": "Bus access write",
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/mapfile.csv b/tools/perf/pmu-events/arch/arm64/mapfile.csv
new file mode 100644
index 000000000000..219d6756134e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/mapfile.csv
@@ -0,0 +1,15 @@
+# Format:
+# MIDR,Version,JSON/file/pathname,Type
+#
+# where
+# MIDR Processor version
+# Variant[23:20] and Revision [3:0] should be zero.
+# Version could be used to track version of of JSON file
+# but currently unused.
+# JSON/file/pathname is the path to JSON file, relative
+# to tools/perf/pmu-events/arch/arm64/.
+# Type is core, uncore etc
+#
+#
+#Family-model,Version,Filename,EventType
+0x00000000420f5160,v1,cavium,core
diff --git a/tools/perf/pmu-events/arch/powerpc/mapfile.csv b/tools/perf/pmu-events/arch/powerpc/mapfile.csv
index a0f3a11ca19f..229150e7ab7d 100644
--- a/tools/perf/pmu-events/arch/powerpc/mapfile.csv
+++ b/tools/perf/pmu-events/arch/powerpc/mapfile.csv
@@ -13,13 +13,5 @@
#
# Power8 entries
-004b0000,1,power8,core
-004b0201,1,power8,core
-004c0000,1,power8,core
-004d0000,1,power8,core
-004d0100,1,power8,core
-004d0200,1,power8,core
-004c0100,1,power8,core
-004e0100,1,power9,core
-004e0200,1,power9,core
-004e1200,1,power9,core
+004[bcd][[:xdigit:]]{4},1,power8,core
+004e[[:xdigit:]]{4},1,power9,core
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/cache.json b/tools/perf/pmu-events/arch/powerpc/power9/cache.json
index 18f6645f2897..7945c5196c43 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/cache.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/cache.json
@@ -125,11 +125,6 @@
"BriefDescription": "Finish stall because the NTF instruction was a larx waiting to be satisfied"
},
{,
- "EventCode": "0x3006C",
- "EventName": "PM_RUN_CYC_SMT2_MODE",
- "BriefDescription": "Cycles in which this thread's run latch is set and the core is in SMT2 mode"
- },
- {,
"EventCode": "0x1C058",
"EventName": "PM_DTLB_MISS_16G",
"BriefDescription": "Data TLB Miss page size 16G"
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/frontend.json b/tools/perf/pmu-events/arch/powerpc/power9/frontend.json
index c63a919eda98..bd8361b5fd6a 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/frontend.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/frontend.json
@@ -1,10 +1,5 @@
[
{,
- "EventCode": "0x3E15C",
- "EventName": "PM_MRK_L2_TM_ST_ABORT_SISTER",
- "BriefDescription": "TM marked store abort for this thread"
- },
- {,
"EventCode": "0x25044",
"EventName": "PM_IPTEG_FROM_L31_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's L3 on the same chip due to a instruction side request"
@@ -369,4 +364,4 @@
"EventName": "PM_IPTEG_FROM_L31_ECO_MOD",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's ECO L3 on the same chip due to a instruction side request"
}
-]
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/marked.json b/tools/perf/pmu-events/arch/powerpc/power9/marked.json
index b9df54fb37e3..22f9f32060a8 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/marked.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/marked.json
@@ -1,10 +1,5 @@
[
{,
- "EventCode": "0x3C052",
- "EventName": "PM_DATA_SYS_PUMP_MPRED",
- "BriefDescription": "Final Pump Scope (system) mispredicted. Either the original scope was too small (Chip/Group) or the original scope was System and it should have been smaller. Counts for a demand load"
- },
- {,
"EventCode": "0x3013E",
"EventName": "PM_MRK_STALL_CMPLU_CYC",
"BriefDescription": "Number of cycles the marked instruction is experiencing a stall while it is next to complete (NTC)"
@@ -255,6 +250,11 @@
"BriefDescription": "A Page Directory Entry was reloaded to a level 1 page walk cache from the core's L3 data cache"
},
{,
+ "EventCode": "0x3C052",
+ "EventName": "PM_DATA_SYS_PUMP_MPRED",
+ "BriefDescription": "Final Pump Scope (system) mispredicted. Either the original scope was too small (Chip/Group) or the original scope was System and it should have been smaller. Counts for a demand load"
+ },
+ {,
"EventCode": "0x4D142",
"EventName": "PM_MRK_DATA_FROM_L3",
"BriefDescription": "The processor's data cache was reloaded from local core's L3 due to a marked load"
@@ -435,21 +435,6 @@
"BriefDescription": "ITLB Reloaded. Counts 1 per ITLB miss for HPT but multiple for radix depending on number of levels traveresed"
},
{,
- "EventCode": "0x2D024",
- "EventName": "PM_RADIX_PWC_L2_HIT",
- "BriefDescription": "A radix translation attempt missed in the TLB but hit on both the first and second levels of page walk cache."
- },
- {,
- "EventCode": "0x3F056",
- "EventName": "PM_RADIX_PWC_L3_HIT",
- "BriefDescription": "A radix translation attempt missed in the TLB but hit on the first, second, and third levels of page walk cache."
- },
- {,
- "EventCode": "0x4E014",
- "EventName": "PM_TM_TX_PASS_RUN_INST",
- "BriefDescription": "Run instructions spent in successful transactions"
- },
- {,
"EventCode": "0x1E044",
"EventName": "PM_DPTEG_FROM_L3_NO_CONFLICT",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 without conflict due to a data side request. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
@@ -644,4 +629,4 @@
"EventName": "PM_MRK_BR_MPRED_CMPL",
"BriefDescription": "Marked Branch Mispredicted"
}
-]
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/other.json b/tools/perf/pmu-events/arch/powerpc/power9/other.json
index 54cc3be00fc2..5ce312973f1e 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/other.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/other.json
@@ -80,6 +80,11 @@
"BriefDescription": "A radix translation attempt missed in the TLB and all levels of page walk cache."
},
{,
+ "EventCode": "0x26882",
+ "EventName": "PM_L2_DC_INV",
+ "BriefDescription": "D-cache invalidates sent over the reload bus to the core"
+ },
+ {,
"EventCode": "0x24048",
"EventName": "PM_INST_FROM_LMEM",
"BriefDescription": "The processor's Instruction cache was reloaded from the local chip's Memory due to an instruction fetch (not prefetch)"
@@ -95,11 +100,6 @@
"BriefDescription": "Number of TM transactions that passed"
},
{,
- "EventCode": "0xD1A0",
- "EventName": "PM_MRK_LSU_FLUSH_LHS",
- "BriefDescription": "Effective Address alias flush : no EA match but Real Address match. If the data has not yet been returned for this load, the instruction will just be rejected, but if it has returned data, it will be flushed"
- },
- {,
"EventCode": "0xF088",
"EventName": "PM_LSU0_STORE_REJECT",
"BriefDescription": "All internal store rejects cause the instruction to go back to the SRQ and go to sleep until woken up to try again after the condition has been met"
@@ -127,7 +127,7 @@
{,
"EventCode": "0xD08C",
"EventName": "PM_LSU2_LDMX_FIN",
- "BriefDescription": "New P9 instruction LDMX. The definition of this new PMU event is (from the ldmx RFC02491): The thread has executed an ldmx instruction that accessed a doubleword that contains an effective address within an enabled section of the Load Monitored region. This event, therefore, should not occur if the FSCR has disabled the load monitored facility (FSCR[52]) or disabled the EBB facility (FSCR[56])"
+ "BriefDescription": "New P9 instruction LDMX. The definition of this new PMU event is (from the ldmx RFC02491): The thread has executed an ldmx instruction that accessed a doubleword that contains an effective address within an enabled section of the Load Monitored region. This event, therefore, should not occur if the FSCR has disabled the load monitored facility (FSCR[52]) or disabled the EBB facility (FSCR[56])."
},
{,
"EventCode": "0x300F8",
@@ -205,11 +205,6 @@
"BriefDescription": "Duration in cycles to reload with Modified (M) data from another core's ECO L3 on the same chip due to a marked load"
},
{,
- "EventCode": "0xF0B4",
- "EventName": "PM_DC_PREF_CONS_ALLOC",
- "BriefDescription": "Prefetch stream allocated in the conservative phase by either the hardware prefetch mechanism or software prefetch"
- },
- {,
"EventCode": "0xF894",
"EventName": "PM_LSU3_L1_CAM_CANCEL",
"BriefDescription": "ls3 l1 tm cam cancel"
@@ -220,21 +215,11 @@
"BriefDescription": "Dispatch Flush: TLBIE"
},
{,
- "EventCode": "0xD1A4",
- "EventName": "PM_MRK_LSU_FLUSH_SAO",
- "BriefDescription": "A load-hit-load condition with Strong Address Ordering will have address compare disabled and flush"
- },
- {,
"EventCode": "0x4E11E",
"EventName": "PM_MRK_DATA_FROM_DMEM_CYC",
"BriefDescription": "Duration in cycles to reload from another chip's memory on the same Node or Group (Distant) due to a marked load"
},
{,
- "EventCode": "0x5894",
- "EventName": "PM_LWSYNC",
- "BriefDescription": "Lwsync instruction decoded and transferred"
- },
- {,
"EventCode": "0x14156",
"EventName": "PM_MRK_DATA_FROM_L2_CYC",
"BriefDescription": "Duration in cycles to reload from local core's L2 due to a marked load"
@@ -245,11 +230,6 @@
"BriefDescription": "Read clearing SC"
},
{,
- "EventCode": "0x50A0",
- "EventName": "PM_HWSYNC",
- "BriefDescription": "Hwsync instruction decoded and transferred"
- },
- {,
"EventCode": "0x168B0",
"EventName": "PM_L3_P1_NODE_PUMP",
"BriefDescription": "L3 PF sent with nodal scope port 1, counts even retried requests"
@@ -265,6 +245,11 @@
"BriefDescription": "The processor's data cache was reloaded from local core's L2 with load hit store conflict due to a marked load"
},
{,
+ "EventCode": "0x468AE",
+ "EventName": "PM_L3_P3_CO_RTY",
+ "BriefDescription": "L3 CO received retry port 3 (memory only), every retry counted"
+ },
+ {,
"EventCode": "0x460A8",
"EventName": "PM_SN_HIT",
"BriefDescription": "Any port snooper hit L3. Up to 4 can happen in a cycle but we only count 1"
@@ -280,11 +265,6 @@
"BriefDescription": "Prefetch stream allocated by the hardware prefetch mechanism"
},
{,
- "EventCode": "0xF0BC",
- "EventName": "PM_LS2_UNALIGNED_ST",
- "BriefDescription": "Store instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the Store of that size. If the Store wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
- },
- {,
"EventCode": "0xD0AC",
"EventName": "PM_SRQ_SYNC_CYC",
"BriefDescription": "A sync is in the S2Q (edge detect to count)"
@@ -380,26 +360,11 @@
"BriefDescription": "Cycles in which this thread's run latch is set and the core is in SMT4 mode"
},
{,
- "EventCode": "0x5088",
- "EventName": "PM_DECODE_FUSION_OP_PRESERV",
- "BriefDescription": "Destructive op operand preservation"
- },
- {,
"EventCode": "0x1D14E",
"EventName": "PM_MRK_DATA_FROM_OFF_CHIP_CACHE_CYC",
"BriefDescription": "Duration in cycles to reload either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to a marked load"
},
{,
- "EventCode": "0x509C",
- "EventName": "PM_FORCED_NOP",
- "BriefDescription": "Instruction was forced to execute as a nop because it was found to behave like a nop (have no effect) at decode time"
- },
- {,
- "EventCode": "0xC098",
- "EventName": "PM_LS2_UNALIGNED_LD",
- "BriefDescription": "Load instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the load of that size. If the load wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
- },
- {,
"EventCode": "0x20058",
"EventName": "PM_DARQ1_10_12_ENTRIES",
"BriefDescription": "Cycles in which 10 or more DARQ1 entries (out of 12) are in use"
@@ -435,11 +400,6 @@
"BriefDescription": "All internal store rejects cause the instruction to go back to the SRQ and go to sleep until woken up to try again after the condition has been met"
},
{,
- "EventCode": "0x4505E",
- "EventName": "PM_FLOP_CMPL",
- "BriefDescription": "Floating Point Operation Finished"
- },
- {,
"EventCode": "0x1D144",
"EventName": "PM_MRK_DATA_FROM_L3_DISP_CONFLICT",
"BriefDescription": "The processor's data cache was reloaded from local core's L3 with dispatch conflict due to a marked load"
@@ -480,14 +440,9 @@
"BriefDescription": "XL-form branch was mispredicted due to the predicted target address missing from EAT. The EAT forces a mispredict in this case since there is no predicated target to validate. This is a rare case that may occur when the EAT is full and a branch is issued"
},
{,
- "EventCode": "0xC094",
- "EventName": "PM_LS0_UNALIGNED_LD",
- "BriefDescription": "Load instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the load of that size. If the load wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
- },
- {,
- "EventCode": "0xF8BC",
- "EventName": "PM_LS3_UNALIGNED_ST",
- "BriefDescription": "Store instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the Store of that size. If the Store wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
+ "EventCode": "0x460AE",
+ "EventName": "PM_L3_P2_CO_RTY",
+ "BriefDescription": "L3 CO received retry port 2 (memory only), every retry counted"
},
{,
"EventCode": "0x58B0",
@@ -505,11 +460,6 @@
"BriefDescription": "TM Store (fav or non-fav) ran into conflict (failed)"
},
{,
- "EventCode": "0xD998",
- "EventName": "PM_MRK_LSU_FLUSH_EMSH",
- "BriefDescription": "An ERAT miss was detected after a set-p hit. Erat tracker indicates fail due to tlbmiss and the instruction gets flushed because the instruction was working on the wrong address"
- },
- {,
"EventCode": "0xF8A0",
"EventName": "PM_NON_DATA_STORE",
"BriefDescription": "All ops that drain from s2q to L2 and contain no data"
@@ -525,11 +475,6 @@
"BriefDescription": "Unconditional Branch Completed. HW branch prediction was not used for this branch. This can be an I-form branch, a B-form branch with BO-field set to branch always, or a B-form branch which was covenrted to a Resolve."
},
{,
- "EventCode": "0x1F056",
- "EventName": "PM_RADIX_PWC_L1_HIT",
- "BriefDescription": "A radix translation attempt missed in the TLB and only the first level page walk cache was a hit."
- },
- {,
"EventCode": "0xF8A8",
"EventName": "PM_DC_PREF_FUZZY_CONF",
"BriefDescription": "A demand load referenced a line in an active fuzzy prefetch stream. The stream could have been allocated through the hardware prefetch mechanism or through software.Fuzzy stream confirm (out of order effects, or pf cant keep up)"
@@ -545,6 +490,11 @@
"BriefDescription": "Load tm L1 miss"
},
{,
+ "EventCode": "0xC880",
+ "EventName": "PM_LS1_LD_VECTOR_FIN",
+ "BriefDescription": ""
+ },
+ {,
"EventCode": "0x2894",
"EventName": "PM_TM_OUTER_TEND",
"BriefDescription": "Completion time outer tend"
@@ -565,21 +515,11 @@
"BriefDescription": "Marked derat reload (miss) for any page size"
},
{,
- "EventCode": "0x160A0",
- "EventName": "PM_L3_PF_MISS_L3",
- "BriefDescription": "L3 PF missed in L3"
- },
- {,
"EventCode": "0x1C04A",
"EventName": "PM_DATA_FROM_RL2L3_SHR",
"BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a demand load"
},
{,
- "EventCode": "0xD99C",
- "EventName": "PM_MRK_LSU_FLUSH_UE",
- "BriefDescription": "Correctable ECC error on reload data, reported at critical data forward time"
- },
- {,
"EventCode": "0x268B0",
"EventName": "PM_L3_P1_GRP_PUMP",
"BriefDescription": "L3 PF sent with grp scope port 1, counts even retried requests"
@@ -630,11 +570,6 @@
"BriefDescription": "addrs only req to L2 only on the first one,Indication that Load footprint is not expanding"
},
{,
- "EventCode": "0x5884",
- "EventName": "PM_DECODE_LANES_NOT_AVAIL",
- "BriefDescription": "Decode has something to transmit but dispatch lanes are not available"
- },
- {,
"EventCode": "0x3C042",
"EventName": "PM_DATA_FROM_L3_DISP_CONFLICT",
"BriefDescription": "The processor's data cache was reloaded from local core's L3 with dispatch conflict due to a demand load"
@@ -690,9 +625,9 @@
"BriefDescription": "False LHS match detected"
},
{,
- "EventCode": "0xD9A4",
- "EventName": "PM_MRK_LSU_FLUSH_LARX_STCX",
- "BriefDescription": "A larx is flushed because an older larx has an LMQ reservation for the same thread. A stcx is flushed because an older stcx is in the LMQ. The flush happens when the older larx/stcx relaunches"
+ "EventCode": "0xF0B0",
+ "EventName": "PM_L3_LD_PREF",
+ "BriefDescription": "L3 load prefetch, sourced from a hardware or software stream, was sent to the nest"
},
{,
"EventCode": "0x4D012",
@@ -715,9 +650,9 @@
"BriefDescription": "All successful Ld/St dispatches for this thread that were an L2 miss (excludes i_l2mru_tch_reqs)"
},
{,
- "EventCode": "0xF8B8",
- "EventName": "PM_LS1_UNALIGNED_ST",
- "BriefDescription": "Store instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the Store of that size. If the Store wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
+ "EventCode": "0x160A0",
+ "EventName": "PM_L3_PF_MISS_L3",
+ "BriefDescription": "L3 PF missed in L3"
},
{,
"EventCode": "0x408C",
@@ -765,11 +700,6 @@
"BriefDescription": "Completion time nested tend"
},
{,
- "EventCode": "0x36084",
- "EventName": "PM_L2_RCST_DISP",
- "BriefDescription": "All D-side store dispatch attempts for this thread"
- },
- {,
"EventCode": "0x368A0",
"EventName": "PM_L3_PF_OFF_CHIP_CACHE",
"BriefDescription": "L3 PF from Off chip cache"
@@ -830,11 +760,6 @@
"BriefDescription": "Rotating sample of 16 snoop valids"
},
{,
- "EventCode": "0x16084",
- "EventName": "PM_L2_RCLD_DISP",
- "BriefDescription": "All I-or-D side load dispatch attempts for this thread (excludes i_l2mru_tch_reqs)"
- },
- {,
"EventCode": "0x1608C",
"EventName": "PM_RC0_BUSY",
"BriefDescription": "RC mach 0 Busy. Used by PMU to sample ave RC lifetime (mach0 used as sample point)"
@@ -842,7 +767,7 @@
{,
"EventCode": "0x36082",
"EventName": "PM_L2_LD_DISP",
- "BriefDescription": "All successful I-or-D side load dispatches for this thread (excludes i_l2mru_tch_reqs)."
+ "BriefDescription": "All successful I-or-D side load dispatches for this thread (excludes i_l2mru_tch_reqs)"
},
{,
"EventCode": "0xF8B0",
@@ -905,11 +830,6 @@
"BriefDescription": "Instruction prefetch requests"
},
{,
- "EventCode": "0xC898",
- "EventName": "PM_LS3_UNALIGNED_LD",
- "BriefDescription": "Load instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the load of that size. If the load wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
- },
- {,
"EventCode": "0x488C",
"EventName": "PM_IC_PREF_WRITE",
"BriefDescription": "Instruction prefetch written into IL1"
@@ -1017,7 +937,7 @@
{,
"EventCode": "0x3E05E",
"EventName": "PM_L3_CO_MEPF",
- "BriefDescription": "L3 castouts in Mepf state for this thread"
+ "BriefDescription": "L3 CO of line in Mep state (includes casthrough to memory). The Mepf state indicates that a line was brought in to satisfy an L3 prefetch request"
},
{,
"EventCode": "0x460A2",
@@ -1205,11 +1125,6 @@
"BriefDescription": "Non transactional conflict from LSU, gets reported to TEXASR"
},
{,
- "EventCode": "0xD198",
- "EventName": "PM_MRK_LSU_FLUSH_ATOMIC",
- "BriefDescription": "Quad-word loads (lq) are considered atomic because they always span at least 2 slices. If a snoop or store from another thread changes the data the load is accessing between the 2 or 3 pieces of the lq instruction, the lq will be flushed"
- },
- {,
"EventCode": "0x201E0",
"EventName": "PM_MRK_DATA_FROM_MEMORY",
"BriefDescription": "The processor's data cache was reloaded from a memory location including L4 from local remote or distant due to a marked load"
@@ -1295,11 +1210,6 @@
"BriefDescription": "Ict empty for this thread due to dispatch holds because the History Buffer was full. Could be GPR/VSR/VMR/FPR/CR/XVF; CR; XVF (XER/VSCR/FPSCR)"
},
{,
- "EventCode": "0xC894",
- "EventName": "PM_LS1_UNALIGNED_LD",
- "BriefDescription": "Load instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the load of that size. If the load wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
- },
- {,
"EventCode": "0x360A2",
"EventName": "PM_L3_L2_CO_HIT",
"BriefDescription": "L2 CO hits"
@@ -1325,11 +1235,6 @@
"BriefDescription": "L2 Castouts - Shared (Tx,Sx)"
},
{,
- "EventCode": "0xD884",
- "EventName": "PM_LSU3_SET_MPRED",
- "BriefDescription": "Set prediction(set-p) miss. The entry was not found in the Set prediction table"
- },
- {,
"EventCode": "0x26092",
"EventName": "PM_L2_LD_MISS_64B",
"BriefDescription": "All successful D-side load dispatches that were an L2 miss (NOT Sx,Tx,Mx) for this thread and the RC calculated the request should be for 64B(i.e., M=1)"
@@ -1362,12 +1267,12 @@
{,
"EventCode": "0xD8A8",
"EventName": "PM_ISLB_MISS",
- "BriefDescription": "Instruction SLB miss - Total of all segment sizes"
+ "BriefDescription": "Instruction SLB Miss - Total of all segment sizes"
},
{,
- "EventCode": "0xD19C",
- "EventName": "PM_MRK_LSU_FLUSH_RELAUNCH_MISS",
- "BriefDescription": "If a load that has already returned data and has to relaunch for any reason then gets a miss (erat, setp, data cache), it will often be flushed at relaunch time because the data might be inconsistent"
+ "EventCode": "0x368AE",
+ "EventName": "PM_L3_P1_CO_RTY",
+ "BriefDescription": "L3 CO received retry port 1 (memory only), every retry counted"
},
{,
"EventCode": "0x260A2",
@@ -1385,6 +1290,11 @@
"BriefDescription": "Completion stall because the ISU is updating the TEXASR to keep track of the nested tbegin. This is a short delay, and it includes ROT"
},
{,
+ "EventCode": "0xC084",
+ "EventName": "PM_LS2_LD_VECTOR_FIN",
+ "BriefDescription": ""
+ },
+ {,
"EventCode": "0x1608E",
"EventName": "PM_ST_CAUSED_FAIL",
"BriefDescription": "Non-TM Store caused any thread to fail"
@@ -1410,11 +1320,6 @@
"BriefDescription": "Continuous 16 cycle (2to1) window where this signals rotates thru sampling each CO machine busy. PMU uses this wave to then do 16 cyc count to sample total number of machs running"
},
{,
- "EventCode": "0xD084",
- "EventName": "PM_LSU2_SET_MPRED",
- "BriefDescription": "Set prediction(set-p) miss. The entry was not found in the Set prediction table"
- },
- {,
"EventCode": "0x48B8",
"EventName": "PM_BR_MPRED_TAKEN_TA",
"BriefDescription": "Conditional Branch Completed that was Mispredicted due to the Target Address Prediction from the Count Cache or Link Stack. Only XL-form branches that resolved Taken set this event."
@@ -1450,29 +1355,24 @@
"BriefDescription": "A demand load referenced a line in an active strided prefetch stream. The stream could have been allocated through the hardware prefetch mechanism or through software."
},
{,
+ "EventCode": "0x36084",
+ "EventName": "PM_L2_RCST_DISP",
+ "BriefDescription": "All D-side store dispatch attempts for this thread"
+ },
+ {,
"EventCode": "0x45054",
"EventName": "PM_FMA_CMPL",
"BriefDescription": "two flops operation completed (fmadd, fnmadd, fmsub, fnmsub) Scalar instructions only. "
},
{,
- "EventCode": "0x5090",
- "EventName": "PM_SHL_ST_DISABLE",
- "BriefDescription": "Store-Hit-Load Table Read Hit with entry Disabled (entry was disabled due to the entry shown to not prevent the flush)"
- },
- {,
"EventCode": "0x201E8",
"EventName": "PM_THRESH_EXC_512",
"BriefDescription": "Threshold counter exceeded a value of 512"
},
{,
- "EventCode": "0x5084",
- "EventName": "PM_DECODE_FUSION_EXT_ADD",
- "BriefDescription": "32-bit extended addition"
- },
- {,
"EventCode": "0x36080",
"EventName": "PM_L2_INST",
- "BriefDescription": "All successful I-side dispatches for this thread (excludes i_l2mru_tch reqs)."
+ "BriefDescription": "All successful I-side dispatches for this thread (excludes i_l2mru_tch reqs)"
},
{,
"EventCode": "0x3504C",
@@ -1555,21 +1455,11 @@
"BriefDescription": "Memory Read With Intent to Modify for this thread"
},
{,
- "EventCode": "0x26882",
- "EventName": "PM_L2_DC_INV",
- "BriefDescription": "D-cache invalidates sent over the reload bus to the core"
- },
- {,
"EventCode": "0xC090",
"EventName": "PM_LSU_STCX",
"BriefDescription": "STCX sent to nest, i.e. total"
},
{,
- "EventCode": "0xD080",
- "EventName": "PM_LSU0_SET_MPRED",
- "BriefDescription": "Set prediction(set-p) miss. The entry was not found in the Set prediction table"
- },
- {,
"EventCode": "0x2C120",
"EventName": "PM_MRK_DATA_FROM_L2_NO_CONFLICT",
"BriefDescription": "The processor's data cache was reloaded from local core's L2 without conflict due to a marked load"
@@ -1610,11 +1500,6 @@
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 without conflict due to a instruction side request"
},
{,
- "EventCode": "0xD9A0",
- "EventName": "PM_MRK_LSU_FLUSH_LHL_SHL",
- "BriefDescription": "The instruction was flushed because of a sequential load/store consistency. If a load or store hits on an older load that has either been snooped (for loads) or has stale data (for stores)."
- },
- {,
"EventCode": "0x35042",
"EventName": "PM_IPTEG_FROM_L3_DISP_CONFLICT",
"BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 with dispatch conflict due to a instruction side request"
@@ -1692,7 +1577,7 @@
{,
"EventCode": "0x2001A",
"EventName": "PM_NTC_ALL_FIN",
- "BriefDescription": "Cycles after all instructions have finished to group completed"
+ "BriefDescription": "Cycles after instruction finished to instruction completed."
},
{,
"EventCode": "0x3005A",
@@ -1710,6 +1595,11 @@
"BriefDescription": "ls1 l1 tm cam cancel"
},
{,
+ "EventCode": "0x268AE",
+ "EventName": "PM_L3_P3_PF_RTY",
+ "BriefDescription": "L3 PF received retry port 3, every retry counted"
+ },
+ {,
"EventCode": "0xE884",
"EventName": "PM_LS1_ERAT_MISS_PREF",
"BriefDescription": "LS1 Erat miss due to prefetch"
@@ -1742,7 +1632,7 @@
{,
"EventCode": "0x160B6",
"EventName": "PM_L3_WI0_BUSY",
- "BriefDescription": "Rotating sample of 8 WI valid"
+ "BriefDescription": "Rotating sample of 8 WI valid (duplicate)"
},
{,
"EventCode": "0x368AC",
@@ -1790,9 +1680,9 @@
"BriefDescription": "L2 guess system (VGS or RNS) and guess was correct (ie data beyond-group)"
},
{,
- "EventCode": "0x589C",
- "EventName": "PM_PTESYNC",
- "BriefDescription": "ptesync instruction counted when the instruction is decoded and transmitted"
+ "EventCode": "0x260AE",
+ "EventName": "PM_L3_P2_PF_RTY",
+ "BriefDescription": "L3 PF received retry port 2, every retry counted"
},
{,
"EventCode": "0x26086",
@@ -1825,6 +1715,11 @@
"BriefDescription": "Store-Hit-Load Table Read Hit with entry Enabled"
},
{,
+ "EventCode": "0x46882",
+ "EventName": "PM_L2_ST_HIT",
+ "BriefDescription": "All successful D-side store dispatches for this thread that were L2 hits"
+ },
+ {,
"EventCode": "0x360AC",
"EventName": "PM_L3_SN0_BUSY",
"BriefDescription": "Lifetime, sample of snooper machine 0 valid"
@@ -1845,11 +1740,6 @@
"BriefDescription": "All successful D-Side Store dispatches that were an L2 miss for this thread"
},
{,
- "EventCode": "0xF8B4",
- "EventName": "PM_DC_PREF_XCONS_ALLOC",
- "BriefDescription": "Prefetch stream allocated in the Ultra conservative phase by either the hardware prefetch mechanism or software prefetch"
- },
- {,
"EventCode": "0x35048",
"EventName": "PM_IPTEG_FROM_DL2L3_SHR",
"BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a instruction side request"
@@ -1970,11 +1860,6 @@
"BriefDescription": "Cycles thread running at priority level 2 or 3"
},
{,
- "EventCode": "0x10134",
- "EventName": "PM_MRK_ST_DONE_L2",
- "BriefDescription": "marked store completed in L2 ( RC machine done)"
- },
- {,
"EventCode": "0x368B2",
"EventName": "PM_L3_GRP_GUESS_WRONG_HIGH",
"BriefDescription": "Initial scope=group (GS or NNS) but data from local node. Prediction too high"
@@ -2005,11 +1890,6 @@
"BriefDescription": "L2 guess grp (GS or NNS) and guess was not correct (ie data on-chip OR beyond-group)"
},
{,
- "EventCode": "0x368AE",
- "EventName": "PM_L3_P1_CO_RTY",
- "BriefDescription": "L3 CO received retry port 1 (memory only), every retry counted"
- },
- {,
"EventCode": "0xC0AC",
"EventName": "PM_LSU_FLUSH_EMSH",
"BriefDescription": "An ERAT miss was detected after a set-p hit. Erat tracker indicates fail due to tlbmiss and the instruction gets flushed because the instruction was working on the wrong address"
@@ -2035,11 +1915,6 @@
"BriefDescription": "RC requests that were on group (aka nodel) pump attempts"
},
{,
- "EventCode": "0xF0B0",
- "EventName": "PM_L3_LD_PREF",
- "BriefDescription": "L3 load prefetch, sourced from a hardware or software stream, was sent to the nest"
- },
- {,
"EventCode": "0x16080",
"EventName": "PM_L2_LD",
"BriefDescription": "All successful D-side Load dispatches for this thread (L2 miss + L2 hits)"
@@ -2050,6 +1925,11 @@
"BriefDescription": "Math flop instruction completed"
},
{,
+ "EventCode": "0xC080",
+ "EventName": "PM_LS0_LD_VECTOR_FIN",
+ "BriefDescription": ""
+ },
+ {,
"EventCode": "0x368B0",
"EventName": "PM_L3_P1_SYS_PUMP",
"BriefDescription": "L3 PF sent with sys scope port 1, counts even retried requests"
@@ -2120,11 +2000,6 @@
"BriefDescription": "Conditional Branch Completed in which the HW correctly predicted the direction as taken. Counted at completion time"
},
{,
- "EventCode": "0xF0B8",
- "EventName": "PM_LS0_UNALIGNED_ST",
- "BriefDescription": "Store instructions whose data crosses a double-word boundary, which causes it to require an additional slice than than what normally would be required of the Store of that size. If the Store wraps from slice 3 to slice 0, thee is an additional 3-cycle penalty"
- },
- {,
"EventCode": "0x20132",
"EventName": "PM_MRK_DFU_FIN",
"BriefDescription": "Decimal Unit marked Instruction Finish"
@@ -2140,6 +2015,11 @@
"BriefDescription": "Effective Address alias flush : no EA match but Real Address match. If the data has not yet been returned for this load, the instruction will just be rejected, but if it has returned data, it will be flushed"
},
{,
+ "EventCode": "0x16084",
+ "EventName": "PM_L2_RCLD_DISP",
+ "BriefDescription": "All I-or-D side load dispatch attempts for this thread (excludes i_l2mru_tch_reqs)"
+ },
+ {,
"EventCode": "0x3F150",
"EventName": "PM_MRK_ST_DRAIN_TO_L2DISP_CYC",
"BriefDescription": "cycles to drain st from core to L2"
@@ -2225,11 +2105,6 @@
"BriefDescription": "Prefetch Canceled due to page boundary"
},
{,
- "EventCode": "0xF09C",
- "EventName": "PM_SLB_TABLEWALK_CYC",
- "BriefDescription": "Cycles when a tablewalk is pending on this thread on the SLB table"
- },
- {,
"EventCode": "0x460AA",
"EventName": "PM_L3_P0_CO_L31",
"BriefDescription": "L3 CO to L3.1 (LCO) port 0 with or without data"
@@ -2247,10 +2122,10 @@
{,
"EventCode": "0x46082",
"EventName": "PM_L2_ST_DISP",
- "BriefDescription": "All successful D-side store dispatches for this thread "
+ "BriefDescription": "All successful D-side store dispatches for this thread (L2 miss + L2 hits)"
},
{,
- "EventCode": "0x4609E",
+ "EventCode": "0x36880",
"EventName": "PM_L2_INST_MISS",
"BriefDescription": "All successful I-side dispatches that were an L2 miss for this thread (excludes i_l2mru_tch reqs)"
},
@@ -2340,9 +2215,9 @@
"BriefDescription": "All ISU rejects"
},
{,
- "EventCode": "0x46882",
- "EventName": "PM_L2_ST_HIT",
- "BriefDescription": "All successful D-side store dispatches for this thread that were L2 hits"
+ "EventCode": "0xC884",
+ "EventName": "PM_LS3_LD_VECTOR_FIN",
+ "BriefDescription": ""
},
{,
"EventCode": "0x360A8",
@@ -2360,11 +2235,6 @@
"BriefDescription": "Asserts when a i=1 store op is sent to the nest. No record of issue pipe (LS0/LS1) is maintained so this is for both pipes. Probably don't need separate LS0 and LS1"
},
{,
- "EventCode": "0xD880",
- "EventName": "PM_LSU1_SET_MPRED",
- "BriefDescription": "Set prediction(set-p) miss. The entry was not found in the Set prediction table"
- },
- {,
"EventCode": "0xD0B8",
"EventName": "PM_LSU_LMQ_FULL_CYC",
"BriefDescription": "Counts the number of cycles the LMQ is full"
@@ -2389,4 +2259,4 @@
"EventName": "PM_L3_PF_USAGE",
"BriefDescription": "Rotating sample of 32 PF actives"
}
-]
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json b/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
index bc2db636dabf..5af1abbe82c4 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
@@ -125,6 +125,11 @@
"BriefDescription": "Overflow from counter 5"
},
{,
+ "EventCode": "0x4505E",
+ "EventName": "PM_FLOP_CMPL",
+ "BriefDescription": "Floating Point Operation Finished"
+ },
+ {,
"EventCode": "0x2C018",
"EventName": "PM_CMPLU_STALL_DMISS_L21_L31",
"BriefDescription": "Completion stall by Dcache miss which resolved on chip ( excluding local L2/L3)"
@@ -390,11 +395,6 @@
"BriefDescription": "Ict empty for this thread due to branch mispred"
},
{,
- "EventCode": "0x3405E",
- "EventName": "PM_IFETCH_THROTTLE",
- "BriefDescription": "Cycles in which Instruction fetch throttle was active."
- },
- {,
"EventCode": "0x1F148",
"EventName": "PM_MRK_DPTEG_FROM_ON_CHIP_CACHE",
"BriefDescription": "A Page Table Entry was loaded into the TLB either shared or modified data from another core's L2/L3 on the same chip due to a marked data side request.. When using Radix Page Translation, this count excludes PDE reloads. Only PTE reloads are included"
@@ -422,7 +422,7 @@
{,
"EventCode": "0xD0A8",
"EventName": "PM_DSLB_MISS",
- "BriefDescription": "Data SLB Miss - Total of all segment sizes"
+ "BriefDescription": "gate_and(sd_pc_c0_comp_valid AND sd_pc_c0_comp_thread(0:1)=tid,sd_pc_c0_comp_ppc_count(0:3)) + gate_and(sd_pc_c1_comp_valid AND sd_pc_c1_comp_thread(0:1)=tid,sd_pc_c1_comp_ppc_count(0:3))"
},
{,
"EventCode": "0x4C058",
@@ -549,4 +549,4 @@
"EventName": "PM_MRK_DATA_FROM_L21_SHR_CYC",
"BriefDescription": "Duration in cycles to reload with Shared (S) data from another core's L2 on the same chip due to a marked load"
}
-]
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/pmc.json b/tools/perf/pmu-events/arch/powerpc/power9/pmc.json
index 3ef8a10aac86..d0b89f930567 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/pmc.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/pmc.json
@@ -119,4 +119,4 @@
"EventName": "PM_1FLOP_CMPL",
"BriefDescription": "one flop (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg) operation completed"
}
-]
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/translation.json b/tools/perf/pmu-events/arch/powerpc/power9/translation.json
index 8c0f12024afa..bc8e03d7a6b0 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/translation.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/translation.json
@@ -90,11 +90,6 @@
"BriefDescription": "stcx failed"
},
{,
- "EventCode": "0x20112",
- "EventName": "PM_MRK_NTF_FIN",
- "BriefDescription": "Marked next to finish instruction finished"
- },
- {,
"EventCode": "0x300F0",
"EventName": "PM_ST_MISS_L1",
"BriefDescription": "Store Missed L1"
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/cache.json b/tools/perf/pmu-events/arch/x86/broadwell/cache.json
index 73688a9dab2a..bba3152ec54a 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/cache.json
@@ -10,13 +10,30 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "UMask": "0x22",
+ "EventName": "L2_RQSTS.RFO_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "BriefDescription": "RFO requests that miss L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache misses when fetching instructions.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x27",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand requests that miss L2 cache.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -30,6 +47,43 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3f",
+ "EventName": "L2_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "All requests that miss L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x42",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that hit L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x44",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that hit L2 cache. L3 prefetch new types.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -70,6 +124,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe7",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand requests to L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts the total number of requests from the L2 hardware prefetchers.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -80,6 +143,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "All L2 requests.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts the number of WB requests that hit L2 cache.",
"EventCode": "0x27",
"Counter": "0,1,2,3",
@@ -131,6 +203,27 @@
"CounterHTOff": "2"
},
{
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "CounterMask": "1",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
"EventCode": "0x51",
"Counter": "0,1,2,3",
@@ -152,7 +245,30 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The \"Offcore outstanding\" state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "BDM76",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "BDM76",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The Offcore outstanding state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -174,26 +290,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The Offcore outstanding state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x8",
+ "UMask": "0x4",
"Errata": "BDM76",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "UMask": "0x8",
"Errata": "BDM76",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
- "CounterMask": "1",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -209,18 +325,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The \"Offcore outstanding\" state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "Errata": "BDM76",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PublicDescription": "This event counts the number of cycles when the L1D is locked. It is a superset of the 0x1 mask (BUS_LOCK_CLOCKS.BUS_LOCK_DURATION).",
"EventCode": "0x63",
"Counter": "0,1,2,3",
@@ -261,7 +365,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable \"Demands\" and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable Demands and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
"EventCode": "0xB0",
"Counter": "0,1,2,3",
"UMask": "0x8",
@@ -281,152 +385,161 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xB7, 0xBB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x11",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that miss the STLB.",
+ "BriefDescription": "Retired load uops that miss the STLB. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts store uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts store uops true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x12",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that miss the STLB.",
+ "BriefDescription": "Retired store uops that miss the STLB. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1",
"L1_Hit_Indication": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with locked access retired to the architected path.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops with locked access retired to the architected path.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x21",
"Errata": "BDM35",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired load uops with locked access.",
+ "BriefDescription": "Retired load uops with locked access. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x41",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary.(Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x42",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1",
"L1_Hit_Indication": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This event counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x81",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired load uops.",
+ "BriefDescription": "All retired load uops. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This event counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x82",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired store uops.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1",
"L1_Hit_Indication": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data source were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
"SampleAfterValue": "2000003",
- "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x2",
"Errata": "BDM35",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x4",
"Errata": "BDM100",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
"SampleAfterValue": "50021",
- "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
+ "BriefDescription": "Hit in last-level (L3) cache. Excludes Unknown data-source. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops misses in L1 cache as data sources.",
+ "BriefDescription": "Retired load uops misses in L1 cache as data sources. Uses PEBS.",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x10",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"SampleAfterValue": "50021",
- "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Retired load uops with L2 cache misses as data sources. Uses PEBS.",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
@@ -438,84 +551,83 @@
"Errata": "BDM100, BDE70",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_MISS",
"SampleAfterValue": "100007",
- "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source. (Precise Event - PEBS).",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x40",
"EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x1",
"Errata": "BDM100",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x2",
"Errata": "BDM100",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x4",
"Errata": "BDM100",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x8",
"Errata": "BDM100",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_NONE",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.",
+ "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI).",
+ "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches. This is a precise event.",
"EventCode": "0xD3",
"Counter": "0,1,2,3",
"UMask": "0x1",
"Errata": "BDE70, BDM100",
"EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
"SampleAfterValue": "100007",
- "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
@@ -659,119 +771,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x42",
- "EventName": "L2_RQSTS.RFO_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that hit L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x22",
- "EventName": "L2_RQSTS.RFO_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that miss L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x44",
- "EventName": "L2_RQSTS.CODE_RD_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 cache misses when fetching instructions.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x27",
- "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand requests that miss L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xe7",
- "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand requests to L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x3f",
- "EventName": "L2_RQSTS.MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "All requests that miss L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xff",
- "EventName": "L2_RQSTS.REFERENCES",
- "SampleAfterValue": "200003",
- "BriefDescription": "All L2 requests.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "BDM76",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "CounterMask": "1",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
+ "PublicDescription": "Counts demand data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010001 ",
"Counter": "0,1,2,3",
@@ -784,6 +784,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020001 ",
"Counter": "0,1,2,3",
@@ -796,6 +797,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020001 ",
"Counter": "0,1,2,3",
@@ -808,6 +810,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020001 ",
"Counter": "0,1,2,3",
@@ -820,6 +823,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020001 ",
"Counter": "0,1,2,3",
@@ -832,6 +836,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020001 ",
"Counter": "0,1,2,3",
@@ -844,6 +849,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020001 ",
"Counter": "0,1,2,3",
@@ -856,6 +862,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0001 ",
"Counter": "0,1,2,3",
@@ -868,6 +875,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0001 ",
"Counter": "0,1,2,3",
@@ -880,6 +888,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0001 ",
"Counter": "0,1,2,3",
@@ -892,6 +901,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0001 ",
"Counter": "0,1,2,3",
@@ -904,6 +914,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0001 ",
"Counter": "0,1,2,3",
@@ -916,6 +927,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0001 ",
"Counter": "0,1,2,3",
@@ -928,6 +940,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010002 ",
"Counter": "0,1,2,3",
@@ -940,6 +953,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0002 ",
"Counter": "0,1,2,3",
@@ -952,6 +966,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0002 ",
"Counter": "0,1,2,3",
@@ -964,6 +979,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0002 ",
"Counter": "0,1,2,3",
@@ -976,6 +992,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0002 ",
"Counter": "0,1,2,3",
@@ -988,6 +1005,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0002 ",
"Counter": "0,1,2,3",
@@ -1000,6 +1018,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0002 ",
"Counter": "0,1,2,3",
@@ -1012,6 +1031,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010004 ",
"Counter": "0,1,2,3",
@@ -1024,6 +1044,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020004 ",
"Counter": "0,1,2,3",
@@ -1036,6 +1057,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020004 ",
"Counter": "0,1,2,3",
@@ -1048,6 +1070,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020004 ",
"Counter": "0,1,2,3",
@@ -1060,6 +1083,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020004 ",
"Counter": "0,1,2,3",
@@ -1072,6 +1096,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020004 ",
"Counter": "0,1,2,3",
@@ -1084,6 +1109,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020004 ",
"Counter": "0,1,2,3",
@@ -1096,6 +1122,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0004 ",
"Counter": "0,1,2,3",
@@ -1108,6 +1135,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0004 ",
"Counter": "0,1,2,3",
@@ -1120,6 +1148,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0004 ",
"Counter": "0,1,2,3",
@@ -1132,6 +1161,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0004 ",
"Counter": "0,1,2,3",
@@ -1144,6 +1174,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0004 ",
"Counter": "0,1,2,3",
@@ -1156,6 +1187,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0004 ",
"Counter": "0,1,2,3",
@@ -1168,6 +1200,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts writebacks (modified to exclusive) that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010008 ",
"Counter": "0,1,2,3",
@@ -1180,6 +1213,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020008 ",
"Counter": "0,1,2,3",
@@ -1192,6 +1226,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020008 ",
"Counter": "0,1,2,3",
@@ -1204,6 +1239,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020008 ",
"Counter": "0,1,2,3",
@@ -1216,6 +1252,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020008 ",
"Counter": "0,1,2,3",
@@ -1228,6 +1265,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020008 ",
"Counter": "0,1,2,3",
@@ -1240,6 +1278,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020008 ",
"Counter": "0,1,2,3",
@@ -1252,6 +1291,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0008 ",
"Counter": "0,1,2,3",
@@ -1264,6 +1304,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0008 ",
"Counter": "0,1,2,3",
@@ -1276,6 +1317,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0008 ",
"Counter": "0,1,2,3",
@@ -1288,6 +1330,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0008 ",
"Counter": "0,1,2,3",
@@ -1300,6 +1343,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0008 ",
"Counter": "0,1,2,3",
@@ -1312,6 +1356,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0008 ",
"Counter": "0,1,2,3",
@@ -1324,6 +1369,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010010 ",
"Counter": "0,1,2,3",
@@ -1336,6 +1382,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020010 ",
"Counter": "0,1,2,3",
@@ -1348,6 +1395,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020010 ",
"Counter": "0,1,2,3",
@@ -1360,6 +1408,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020010 ",
"Counter": "0,1,2,3",
@@ -1372,6 +1421,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020010 ",
"Counter": "0,1,2,3",
@@ -1384,6 +1434,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020010 ",
"Counter": "0,1,2,3",
@@ -1396,6 +1447,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020010 ",
"Counter": "0,1,2,3",
@@ -1408,6 +1460,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0010 ",
"Counter": "0,1,2,3",
@@ -1420,6 +1473,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0010 ",
"Counter": "0,1,2,3",
@@ -1432,6 +1486,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0010 ",
"Counter": "0,1,2,3",
@@ -1444,6 +1499,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0010 ",
"Counter": "0,1,2,3",
@@ -1456,6 +1512,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0010 ",
"Counter": "0,1,2,3",
@@ -1468,6 +1525,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0010 ",
"Counter": "0,1,2,3",
@@ -1480,6 +1538,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010020 ",
"Counter": "0,1,2,3",
@@ -1492,6 +1551,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020020 ",
"Counter": "0,1,2,3",
@@ -1504,6 +1564,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020020 ",
"Counter": "0,1,2,3",
@@ -1516,6 +1577,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020020 ",
"Counter": "0,1,2,3",
@@ -1528,6 +1590,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020020 ",
"Counter": "0,1,2,3",
@@ -1540,6 +1603,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020020 ",
"Counter": "0,1,2,3",
@@ -1552,6 +1616,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020020 ",
"Counter": "0,1,2,3",
@@ -1564,6 +1629,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0020 ",
"Counter": "0,1,2,3",
@@ -1576,6 +1642,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0020 ",
"Counter": "0,1,2,3",
@@ -1588,6 +1655,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0020 ",
"Counter": "0,1,2,3",
@@ -1600,6 +1668,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0020 ",
"Counter": "0,1,2,3",
@@ -1612,6 +1681,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0020 ",
"Counter": "0,1,2,3",
@@ -1624,6 +1694,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0020 ",
"Counter": "0,1,2,3",
@@ -1636,6 +1707,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010040 ",
"Counter": "0,1,2,3",
@@ -1648,6 +1720,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020040 ",
"Counter": "0,1,2,3",
@@ -1660,6 +1733,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020040 ",
"Counter": "0,1,2,3",
@@ -1672,6 +1746,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020040 ",
"Counter": "0,1,2,3",
@@ -1684,6 +1759,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020040 ",
"Counter": "0,1,2,3",
@@ -1696,6 +1772,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020040 ",
"Counter": "0,1,2,3",
@@ -1708,6 +1785,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020040 ",
"Counter": "0,1,2,3",
@@ -1720,6 +1798,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0040 ",
"Counter": "0,1,2,3",
@@ -1732,6 +1811,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0040 ",
"Counter": "0,1,2,3",
@@ -1744,6 +1824,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0040 ",
"Counter": "0,1,2,3",
@@ -1756,6 +1837,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0040 ",
"Counter": "0,1,2,3",
@@ -1768,6 +1850,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0040 ",
"Counter": "0,1,2,3",
@@ -1780,6 +1863,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0040 ",
"Counter": "0,1,2,3",
@@ -1792,6 +1876,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010080 ",
"Counter": "0,1,2,3",
@@ -1804,6 +1889,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020080 ",
"Counter": "0,1,2,3",
@@ -1816,6 +1902,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020080 ",
"Counter": "0,1,2,3",
@@ -1828,6 +1915,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020080 ",
"Counter": "0,1,2,3",
@@ -1840,6 +1928,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020080 ",
"Counter": "0,1,2,3",
@@ -1852,6 +1941,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020080 ",
"Counter": "0,1,2,3",
@@ -1864,6 +1954,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020080 ",
"Counter": "0,1,2,3",
@@ -1876,6 +1967,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0080 ",
"Counter": "0,1,2,3",
@@ -1888,6 +1980,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0080 ",
"Counter": "0,1,2,3",
@@ -1900,6 +1993,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0080 ",
"Counter": "0,1,2,3",
@@ -1912,6 +2006,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0080 ",
"Counter": "0,1,2,3",
@@ -1924,6 +2019,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0080 ",
"Counter": "0,1,2,3",
@@ -1936,6 +2032,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0080 ",
"Counter": "0,1,2,3",
@@ -1948,6 +2045,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010100 ",
"Counter": "0,1,2,3",
@@ -1960,6 +2058,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020100 ",
"Counter": "0,1,2,3",
@@ -1972,6 +2071,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020100 ",
"Counter": "0,1,2,3",
@@ -1984,6 +2084,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020100 ",
"Counter": "0,1,2,3",
@@ -1996,6 +2097,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020100 ",
"Counter": "0,1,2,3",
@@ -2008,6 +2110,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020100 ",
"Counter": "0,1,2,3",
@@ -2020,6 +2123,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020100 ",
"Counter": "0,1,2,3",
@@ -2032,6 +2136,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0100 ",
"Counter": "0,1,2,3",
@@ -2044,6 +2149,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0100 ",
"Counter": "0,1,2,3",
@@ -2056,6 +2162,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0100 ",
"Counter": "0,1,2,3",
@@ -2068,6 +2175,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0100 ",
"Counter": "0,1,2,3",
@@ -2080,6 +2188,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0100 ",
"Counter": "0,1,2,3",
@@ -2092,6 +2201,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0100 ",
"Counter": "0,1,2,3",
@@ -2104,6 +2214,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010200 ",
"Counter": "0,1,2,3",
@@ -2116,6 +2227,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020200 ",
"Counter": "0,1,2,3",
@@ -2128,6 +2240,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020200 ",
"Counter": "0,1,2,3",
@@ -2140,6 +2253,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020200 ",
"Counter": "0,1,2,3",
@@ -2152,6 +2266,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020200 ",
"Counter": "0,1,2,3",
@@ -2164,6 +2279,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020200 ",
"Counter": "0,1,2,3",
@@ -2176,6 +2292,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020200 ",
"Counter": "0,1,2,3",
@@ -2188,6 +2305,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0200 ",
"Counter": "0,1,2,3",
@@ -2200,6 +2318,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0200 ",
"Counter": "0,1,2,3",
@@ -2212,6 +2331,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0200 ",
"Counter": "0,1,2,3",
@@ -2224,6 +2344,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0200 ",
"Counter": "0,1,2,3",
@@ -2236,6 +2357,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0200 ",
"Counter": "0,1,2,3",
@@ -2248,6 +2370,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0200 ",
"Counter": "0,1,2,3",
@@ -2260,6 +2383,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts any other requests that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000018000 ",
"Counter": "0,1,2,3",
@@ -2272,6 +2396,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080028000 ",
"Counter": "0,1,2,3",
@@ -2284,6 +2409,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100028000 ",
"Counter": "0,1,2,3",
@@ -2296,6 +2422,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200028000 ",
"Counter": "0,1,2,3",
@@ -2308,6 +2435,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400028000 ",
"Counter": "0,1,2,3",
@@ -2320,6 +2448,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000028000 ",
"Counter": "0,1,2,3",
@@ -2332,6 +2461,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80028000 ",
"Counter": "0,1,2,3",
@@ -2344,6 +2474,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts any other requests that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c8000 ",
"Counter": "0,1,2,3",
@@ -2356,6 +2487,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts any other requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c8000 ",
"Counter": "0,1,2,3",
@@ -2368,6 +2500,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts any other requests that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c8000 ",
"Counter": "0,1,2,3",
@@ -2380,6 +2513,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts any other requests that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c8000 ",
"Counter": "0,1,2,3",
@@ -2392,6 +2526,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c8000 ",
"Counter": "0,1,2,3",
@@ -2404,6 +2539,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts any other requests that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c8000 ",
"Counter": "0,1,2,3",
@@ -2416,6 +2552,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010090 ",
"Counter": "0,1,2,3",
@@ -2428,6 +2565,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020090 ",
"Counter": "0,1,2,3",
@@ -2440,6 +2578,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020090 ",
"Counter": "0,1,2,3",
@@ -2452,6 +2591,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020090 ",
"Counter": "0,1,2,3",
@@ -2464,6 +2604,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020090 ",
"Counter": "0,1,2,3",
@@ -2476,6 +2617,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020090 ",
"Counter": "0,1,2,3",
@@ -2488,6 +2630,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020090 ",
"Counter": "0,1,2,3",
@@ -2500,6 +2643,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0090 ",
"Counter": "0,1,2,3",
@@ -2512,6 +2656,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0090 ",
"Counter": "0,1,2,3",
@@ -2524,6 +2669,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0090 ",
"Counter": "0,1,2,3",
@@ -2536,6 +2682,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0090 ",
"Counter": "0,1,2,3",
@@ -2548,6 +2695,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0090 ",
"Counter": "0,1,2,3",
@@ -2560,6 +2708,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0090 ",
"Counter": "0,1,2,3",
@@ -2572,6 +2721,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010120 ",
"Counter": "0,1,2,3",
@@ -2584,6 +2734,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020120 ",
"Counter": "0,1,2,3",
@@ -2596,6 +2747,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020120 ",
"Counter": "0,1,2,3",
@@ -2608,6 +2760,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020120 ",
"Counter": "0,1,2,3",
@@ -2620,6 +2773,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020120 ",
"Counter": "0,1,2,3",
@@ -2632,6 +2786,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020120 ",
"Counter": "0,1,2,3",
@@ -2644,6 +2799,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020120 ",
"Counter": "0,1,2,3",
@@ -2656,6 +2812,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch RFOs that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0120 ",
"Counter": "0,1,2,3",
@@ -2668,6 +2825,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0120 ",
"Counter": "0,1,2,3",
@@ -2680,6 +2838,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch RFOs that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0120 ",
"Counter": "0,1,2,3",
@@ -2692,6 +2851,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0120 ",
"Counter": "0,1,2,3",
@@ -2704,6 +2864,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0120 ",
"Counter": "0,1,2,3",
@@ -2716,6 +2877,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0120 ",
"Counter": "0,1,2,3",
@@ -2728,6 +2890,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010240 ",
"Counter": "0,1,2,3",
@@ -2740,6 +2903,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020240 ",
"Counter": "0,1,2,3",
@@ -2752,6 +2916,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020240 ",
"Counter": "0,1,2,3",
@@ -2764,6 +2929,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020240 ",
"Counter": "0,1,2,3",
@@ -2776,6 +2942,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020240 ",
"Counter": "0,1,2,3",
@@ -2788,6 +2955,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020240 ",
"Counter": "0,1,2,3",
@@ -2800,6 +2968,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020240 ",
"Counter": "0,1,2,3",
@@ -2812,6 +2981,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch code reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0240 ",
"Counter": "0,1,2,3",
@@ -2824,6 +2994,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0240 ",
"Counter": "0,1,2,3",
@@ -2836,6 +3007,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch code reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0240 ",
"Counter": "0,1,2,3",
@@ -2848,6 +3020,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0240 ",
"Counter": "0,1,2,3",
@@ -2860,6 +3033,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0240 ",
"Counter": "0,1,2,3",
@@ -2872,6 +3046,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0240 ",
"Counter": "0,1,2,3",
@@ -2884,6 +3059,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010091 ",
"Counter": "0,1,2,3",
@@ -2896,6 +3072,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020091 ",
"Counter": "0,1,2,3",
@@ -2908,6 +3085,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020091 ",
"Counter": "0,1,2,3",
@@ -2920,6 +3098,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020091 ",
"Counter": "0,1,2,3",
@@ -2932,6 +3111,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020091 ",
"Counter": "0,1,2,3",
@@ -2944,6 +3124,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020091 ",
"Counter": "0,1,2,3",
@@ -2956,6 +3137,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020091 ",
"Counter": "0,1,2,3",
@@ -2968,6 +3150,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0091 ",
"Counter": "0,1,2,3",
@@ -2980,6 +3163,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0091 ",
"Counter": "0,1,2,3",
@@ -2992,6 +3176,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0091 ",
"Counter": "0,1,2,3",
@@ -3004,6 +3189,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0091 ",
"Counter": "0,1,2,3",
@@ -3016,6 +3202,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0091 ",
"Counter": "0,1,2,3",
@@ -3028,6 +3215,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0091 ",
"Counter": "0,1,2,3",
@@ -3040,6 +3228,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010122 ",
"Counter": "0,1,2,3",
@@ -3052,6 +3241,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020122 ",
"Counter": "0,1,2,3",
@@ -3064,6 +3254,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020122 ",
"Counter": "0,1,2,3",
@@ -3076,6 +3267,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020122 ",
"Counter": "0,1,2,3",
@@ -3088,6 +3280,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020122 ",
"Counter": "0,1,2,3",
@@ -3100,6 +3293,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020122 ",
"Counter": "0,1,2,3",
@@ -3112,6 +3306,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f80020122 ",
"Counter": "0,1,2,3",
@@ -3124,6 +3319,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00803c0122 ",
"Counter": "0,1,2,3",
@@ -3136,6 +3332,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01003c0122 ",
"Counter": "0,1,2,3",
@@ -3148,6 +3345,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02003c0122 ",
"Counter": "0,1,2,3",
@@ -3160,6 +3358,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0122 ",
"Counter": "0,1,2,3",
@@ -3172,6 +3371,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0122 ",
"Counter": "0,1,2,3",
@@ -3184,6 +3384,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0122 ",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json b/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json
index 102bfb808199..689d478dae93 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json
@@ -1,6 +1,6 @@
[
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
+ "PublicDescription": "This event counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
"EventCode": "0xC1",
"Counter": "0,1,2,3",
"UMask": "0x8",
@@ -11,7 +11,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
+ "PublicDescription": "This event counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
"EventCode": "0xC1",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -22,7 +22,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
"EventCode": "0xC7",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -32,7 +31,6 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PEBS": "1",
"EventCode": "0xC7",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -42,7 +40,15 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0xC7",
"Counter": "0,1,2,3",
"UMask": "0x4",
@@ -52,7 +58,6 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PEBS": "1",
"EventCode": "0xC7",
"Counter": "0,1,2,3",
"UMask": "0x8",
@@ -62,7 +67,6 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PEBS": "1",
"EventCode": "0xC7",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -72,7 +76,43 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x15",
+ "EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
+ "SampleAfterValue": "2000006",
+ "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2a",
+ "EventName": "FP_ARITH_INST_RETIRED.SINGLE",
+ "SampleAfterValue": "2000005",
+ "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3c",
+ "EventName": "FP_ARITH_INST_RETIRED.PACKED",
+ "SampleAfterValue": "2000004",
+ "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This event counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -82,7 +122,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
+ "PublicDescription": "This event counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x4",
@@ -92,7 +132,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
+ "PublicDescription": "This event counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x8",
@@ -102,7 +142,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
+ "PublicDescription": "This event counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -121,51 +161,5 @@
"BriefDescription": "Cycles with any input/output SSE or FP assist",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3"
- },
- {
- "PEBS": "1",
- "EventCode": "0xc7",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x3c",
- "EventName": "FP_ARITH_INST_RETIRED.PACKED",
- "SampleAfterValue": "2000004",
- "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x2a",
- "EventName": "FP_ARITH_INST_RETIRED.SINGLE",
- "SampleAfterValue": "2000005",
- "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x15",
- "EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
- "SampleAfterValue": "2000006",
- "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
- "CounterHTOff": "0,1,2,3"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/frontend.json b/tools/perf/pmu-events/arch/x86/broadwell/frontend.json
index b0cdf1f097a0..7142c76d7f11 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/frontend.json
@@ -10,7 +10,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x4",
@@ -20,80 +20,49 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "IDQ.MS_MITE_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_UOPS",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_CYCLES",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterMask": "1",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_CYCLES",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_CYCLES",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
- "CounterMask": "1",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -104,7 +73,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -116,7 +85,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x18",
@@ -127,7 +96,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x18",
@@ -138,7 +107,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x24",
@@ -149,7 +128,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x24",
@@ -160,7 +139,39 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_SWITCHES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x3c",
@@ -200,7 +211,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding ?4 ? x? when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
"EventCode": "0x9C",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -263,7 +274,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0?2 cycles.",
+ "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
"EventCode": "0xAB",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -271,16 +282,5 @@
"SampleAfterValue": "2000003",
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_SWITCHES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/memory.json b/tools/perf/pmu-events/arch/x86/broadwell/memory.json
index ff5416d29d0d..c9154cebbdf0 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/memory.json
@@ -90,7 +90,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Unfriendly TSX abort triggered by a flowmarker.",
"EventCode": "0x5d",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -170,13 +169,13 @@
},
{
"PEBS": "1",
- "PublicDescription": "Number of times HLE abort was triggered.",
+ "PublicDescription": "Number of times HLE abort was triggered (PEBS).",
"EventCode": "0xc8",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "HLE_RETIRED.ABORTED",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times HLE abort was triggered",
+ "BriefDescription": "Number of times HLE abort was triggered (PEBS)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -251,13 +250,13 @@
},
{
"PEBS": "1",
- "PublicDescription": "Number of times RTM abort was triggered .",
+ "PublicDescription": "Number of times RTM abort was triggered (PEBS).",
"EventCode": "0xc9",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "RTM_RETIRED.ABORTED",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times RTM abort was triggered",
+ "BriefDescription": "Number of times RTM abort was triggered (PEBS)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -431,6 +430,7 @@
"CounterHTOff": "3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020001 ",
"Counter": "0,1,2,3",
@@ -443,6 +443,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0001 ",
"Counter": "0,1,2,3",
@@ -455,6 +456,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000001 ",
"Counter": "0,1,2,3",
@@ -467,6 +469,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000001 ",
"Counter": "0,1,2,3",
@@ -479,6 +482,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000001 ",
"Counter": "0,1,2,3",
@@ -491,6 +495,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000001 ",
"Counter": "0,1,2,3",
@@ -503,6 +508,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000001 ",
"Counter": "0,1,2,3",
@@ -515,6 +521,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000001 ",
"Counter": "0,1,2,3",
@@ -527,6 +534,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000001 ",
"Counter": "0,1,2,3",
@@ -539,6 +547,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000001 ",
"Counter": "0,1,2,3",
@@ -551,6 +560,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000001 ",
"Counter": "0,1,2,3",
@@ -563,6 +573,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000001 ",
"Counter": "0,1,2,3",
@@ -575,6 +586,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000001 ",
"Counter": "0,1,2,3",
@@ -587,6 +599,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0002 ",
"Counter": "0,1,2,3",
@@ -599,6 +612,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000002 ",
"Counter": "0,1,2,3",
@@ -611,6 +625,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000002 ",
"Counter": "0,1,2,3",
@@ -623,6 +638,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000002 ",
"Counter": "0,1,2,3",
@@ -635,6 +651,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000002 ",
"Counter": "0,1,2,3",
@@ -647,6 +664,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000002 ",
"Counter": "0,1,2,3",
@@ -659,6 +677,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020004 ",
"Counter": "0,1,2,3",
@@ -671,6 +690,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0004 ",
"Counter": "0,1,2,3",
@@ -683,6 +703,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000004 ",
"Counter": "0,1,2,3",
@@ -695,6 +716,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000004 ",
"Counter": "0,1,2,3",
@@ -707,6 +729,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000004 ",
"Counter": "0,1,2,3",
@@ -719,6 +742,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000004 ",
"Counter": "0,1,2,3",
@@ -731,6 +755,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000004 ",
"Counter": "0,1,2,3",
@@ -743,6 +768,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000004 ",
"Counter": "0,1,2,3",
@@ -755,6 +781,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000004 ",
"Counter": "0,1,2,3",
@@ -767,6 +794,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000004 ",
"Counter": "0,1,2,3",
@@ -779,6 +807,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000004 ",
"Counter": "0,1,2,3",
@@ -791,6 +820,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000004 ",
"Counter": "0,1,2,3",
@@ -803,6 +833,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000004 ",
"Counter": "0,1,2,3",
@@ -815,6 +846,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020008 ",
"Counter": "0,1,2,3",
@@ -827,6 +859,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0008 ",
"Counter": "0,1,2,3",
@@ -839,6 +872,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000008 ",
"Counter": "0,1,2,3",
@@ -851,6 +885,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000008 ",
"Counter": "0,1,2,3",
@@ -863,6 +898,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000008 ",
"Counter": "0,1,2,3",
@@ -875,6 +911,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000008 ",
"Counter": "0,1,2,3",
@@ -887,6 +924,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000008 ",
"Counter": "0,1,2,3",
@@ -899,6 +937,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000008 ",
"Counter": "0,1,2,3",
@@ -911,6 +950,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000008 ",
"Counter": "0,1,2,3",
@@ -923,6 +963,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts writebacks (modified to exclusive) that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000008 ",
"Counter": "0,1,2,3",
@@ -935,6 +976,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000008 ",
"Counter": "0,1,2,3",
@@ -947,6 +989,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts writebacks (modified to exclusive) that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000008 ",
"Counter": "0,1,2,3",
@@ -959,6 +1002,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000008 ",
"Counter": "0,1,2,3",
@@ -971,6 +1015,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020010 ",
"Counter": "0,1,2,3",
@@ -983,6 +1028,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0010 ",
"Counter": "0,1,2,3",
@@ -995,6 +1041,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000010 ",
"Counter": "0,1,2,3",
@@ -1007,6 +1054,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000010 ",
"Counter": "0,1,2,3",
@@ -1019,6 +1067,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000010 ",
"Counter": "0,1,2,3",
@@ -1031,6 +1080,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000010 ",
"Counter": "0,1,2,3",
@@ -1043,6 +1093,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000010 ",
"Counter": "0,1,2,3",
@@ -1055,6 +1106,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000010 ",
"Counter": "0,1,2,3",
@@ -1067,6 +1119,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000010 ",
"Counter": "0,1,2,3",
@@ -1079,6 +1132,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000010 ",
"Counter": "0,1,2,3",
@@ -1091,6 +1145,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000010 ",
"Counter": "0,1,2,3",
@@ -1103,6 +1158,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000010 ",
"Counter": "0,1,2,3",
@@ -1115,6 +1171,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000010 ",
"Counter": "0,1,2,3",
@@ -1127,6 +1184,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020020 ",
"Counter": "0,1,2,3",
@@ -1139,6 +1197,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0020 ",
"Counter": "0,1,2,3",
@@ -1151,6 +1210,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000020 ",
"Counter": "0,1,2,3",
@@ -1163,6 +1223,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000020 ",
"Counter": "0,1,2,3",
@@ -1175,6 +1236,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000020 ",
"Counter": "0,1,2,3",
@@ -1187,6 +1249,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000020 ",
"Counter": "0,1,2,3",
@@ -1199,6 +1262,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000020 ",
"Counter": "0,1,2,3",
@@ -1211,6 +1275,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000020 ",
"Counter": "0,1,2,3",
@@ -1223,6 +1288,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000020 ",
"Counter": "0,1,2,3",
@@ -1235,6 +1301,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000020 ",
"Counter": "0,1,2,3",
@@ -1247,6 +1314,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000020 ",
"Counter": "0,1,2,3",
@@ -1259,6 +1327,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000020 ",
"Counter": "0,1,2,3",
@@ -1271,6 +1340,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000020 ",
"Counter": "0,1,2,3",
@@ -1283,6 +1353,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020040 ",
"Counter": "0,1,2,3",
@@ -1295,6 +1366,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0040 ",
"Counter": "0,1,2,3",
@@ -1307,6 +1379,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000040 ",
"Counter": "0,1,2,3",
@@ -1319,6 +1392,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000040 ",
"Counter": "0,1,2,3",
@@ -1331,6 +1405,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000040 ",
"Counter": "0,1,2,3",
@@ -1343,6 +1418,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000040 ",
"Counter": "0,1,2,3",
@@ -1355,6 +1431,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000040 ",
"Counter": "0,1,2,3",
@@ -1367,6 +1444,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000040 ",
"Counter": "0,1,2,3",
@@ -1379,6 +1457,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000040 ",
"Counter": "0,1,2,3",
@@ -1391,6 +1470,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000040 ",
"Counter": "0,1,2,3",
@@ -1403,6 +1483,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000040 ",
"Counter": "0,1,2,3",
@@ -1415,6 +1496,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000040 ",
"Counter": "0,1,2,3",
@@ -1427,6 +1509,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000040 ",
"Counter": "0,1,2,3",
@@ -1439,6 +1522,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020080 ",
"Counter": "0,1,2,3",
@@ -1451,6 +1535,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0080 ",
"Counter": "0,1,2,3",
@@ -1463,6 +1548,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000080 ",
"Counter": "0,1,2,3",
@@ -1475,6 +1561,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000080 ",
"Counter": "0,1,2,3",
@@ -1487,6 +1574,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000080 ",
"Counter": "0,1,2,3",
@@ -1499,6 +1587,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000080 ",
"Counter": "0,1,2,3",
@@ -1511,6 +1600,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000080 ",
"Counter": "0,1,2,3",
@@ -1523,6 +1613,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000080 ",
"Counter": "0,1,2,3",
@@ -1535,6 +1626,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000080 ",
"Counter": "0,1,2,3",
@@ -1547,6 +1639,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000080 ",
"Counter": "0,1,2,3",
@@ -1559,6 +1652,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000080 ",
"Counter": "0,1,2,3",
@@ -1571,6 +1665,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000080 ",
"Counter": "0,1,2,3",
@@ -1583,6 +1678,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000080 ",
"Counter": "0,1,2,3",
@@ -1595,6 +1691,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020100 ",
"Counter": "0,1,2,3",
@@ -1607,6 +1704,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0100 ",
"Counter": "0,1,2,3",
@@ -1619,6 +1717,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000100 ",
"Counter": "0,1,2,3",
@@ -1631,6 +1730,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000100 ",
"Counter": "0,1,2,3",
@@ -1643,6 +1743,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000100 ",
"Counter": "0,1,2,3",
@@ -1655,6 +1756,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000100 ",
"Counter": "0,1,2,3",
@@ -1667,6 +1769,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000100 ",
"Counter": "0,1,2,3",
@@ -1679,6 +1782,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000100 ",
"Counter": "0,1,2,3",
@@ -1691,6 +1795,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000100 ",
"Counter": "0,1,2,3",
@@ -1703,6 +1808,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000100 ",
"Counter": "0,1,2,3",
@@ -1715,6 +1821,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000100 ",
"Counter": "0,1,2,3",
@@ -1727,6 +1834,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000100 ",
"Counter": "0,1,2,3",
@@ -1739,6 +1847,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000100 ",
"Counter": "0,1,2,3",
@@ -1751,6 +1860,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020200 ",
"Counter": "0,1,2,3",
@@ -1763,6 +1873,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0200 ",
"Counter": "0,1,2,3",
@@ -1775,6 +1886,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000200 ",
"Counter": "0,1,2,3",
@@ -1787,6 +1899,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000200 ",
"Counter": "0,1,2,3",
@@ -1799,6 +1912,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000200 ",
"Counter": "0,1,2,3",
@@ -1811,6 +1925,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000200 ",
"Counter": "0,1,2,3",
@@ -1823,6 +1938,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000200 ",
"Counter": "0,1,2,3",
@@ -1835,6 +1951,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000200 ",
"Counter": "0,1,2,3",
@@ -1847,6 +1964,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000200 ",
"Counter": "0,1,2,3",
@@ -1859,6 +1977,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000200 ",
"Counter": "0,1,2,3",
@@ -1871,6 +1990,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000200 ",
"Counter": "0,1,2,3",
@@ -1883,6 +2003,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000200 ",
"Counter": "0,1,2,3",
@@ -1895,6 +2016,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000200 ",
"Counter": "0,1,2,3",
@@ -1907,6 +2029,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000028000 ",
"Counter": "0,1,2,3",
@@ -1919,6 +2042,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts any other requests that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c8000 ",
"Counter": "0,1,2,3",
@@ -1931,6 +2055,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084008000 ",
"Counter": "0,1,2,3",
@@ -1943,6 +2068,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104008000 ",
"Counter": "0,1,2,3",
@@ -1955,6 +2081,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204008000 ",
"Counter": "0,1,2,3",
@@ -1967,6 +2094,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404008000 ",
"Counter": "0,1,2,3",
@@ -1979,6 +2107,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004008000 ",
"Counter": "0,1,2,3",
@@ -1991,6 +2120,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004008000 ",
"Counter": "0,1,2,3",
@@ -2003,6 +2133,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84008000 ",
"Counter": "0,1,2,3",
@@ -2015,6 +2146,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts any other requests that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc008000 ",
"Counter": "0,1,2,3",
@@ -2027,6 +2159,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c008000 ",
"Counter": "0,1,2,3",
@@ -2039,6 +2172,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts any other requests that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c008000 ",
"Counter": "0,1,2,3",
@@ -2051,6 +2185,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c008000 ",
"Counter": "0,1,2,3",
@@ -2063,6 +2198,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020090 ",
"Counter": "0,1,2,3",
@@ -2075,6 +2211,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0090 ",
"Counter": "0,1,2,3",
@@ -2087,6 +2224,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000090 ",
"Counter": "0,1,2,3",
@@ -2099,6 +2237,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000090 ",
"Counter": "0,1,2,3",
@@ -2111,6 +2250,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000090 ",
"Counter": "0,1,2,3",
@@ -2123,6 +2263,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000090 ",
"Counter": "0,1,2,3",
@@ -2135,6 +2276,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000090 ",
"Counter": "0,1,2,3",
@@ -2147,6 +2289,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000090 ",
"Counter": "0,1,2,3",
@@ -2159,6 +2302,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000090 ",
"Counter": "0,1,2,3",
@@ -2171,6 +2315,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000090 ",
"Counter": "0,1,2,3",
@@ -2183,6 +2328,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000090 ",
"Counter": "0,1,2,3",
@@ -2195,6 +2341,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000090 ",
"Counter": "0,1,2,3",
@@ -2207,6 +2354,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000090 ",
"Counter": "0,1,2,3",
@@ -2219,6 +2367,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020120 ",
"Counter": "0,1,2,3",
@@ -2231,6 +2380,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0120 ",
"Counter": "0,1,2,3",
@@ -2243,6 +2393,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000120 ",
"Counter": "0,1,2,3",
@@ -2255,6 +2406,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000120 ",
"Counter": "0,1,2,3",
@@ -2267,6 +2419,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000120 ",
"Counter": "0,1,2,3",
@@ -2279,6 +2432,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000120 ",
"Counter": "0,1,2,3",
@@ -2291,6 +2445,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000120 ",
"Counter": "0,1,2,3",
@@ -2303,6 +2458,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000120 ",
"Counter": "0,1,2,3",
@@ -2315,6 +2471,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000120 ",
"Counter": "0,1,2,3",
@@ -2327,6 +2484,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch RFOs that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000120 ",
"Counter": "0,1,2,3",
@@ -2339,6 +2497,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000120 ",
"Counter": "0,1,2,3",
@@ -2351,6 +2510,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch RFOs that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000120 ",
"Counter": "0,1,2,3",
@@ -2363,6 +2523,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000120 ",
"Counter": "0,1,2,3",
@@ -2375,6 +2536,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020240 ",
"Counter": "0,1,2,3",
@@ -2387,6 +2549,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch code reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0240 ",
"Counter": "0,1,2,3",
@@ -2399,6 +2562,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000240 ",
"Counter": "0,1,2,3",
@@ -2411,6 +2575,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000240 ",
"Counter": "0,1,2,3",
@@ -2423,6 +2588,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000240 ",
"Counter": "0,1,2,3",
@@ -2435,6 +2601,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000240 ",
"Counter": "0,1,2,3",
@@ -2447,6 +2614,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000240 ",
"Counter": "0,1,2,3",
@@ -2459,6 +2627,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000240 ",
"Counter": "0,1,2,3",
@@ -2471,6 +2640,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000240 ",
"Counter": "0,1,2,3",
@@ -2483,6 +2653,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch code reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000240 ",
"Counter": "0,1,2,3",
@@ -2495,6 +2666,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000240 ",
"Counter": "0,1,2,3",
@@ -2507,6 +2679,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch code reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000240 ",
"Counter": "0,1,2,3",
@@ -2519,6 +2692,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000240 ",
"Counter": "0,1,2,3",
@@ -2531,6 +2705,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020091 ",
"Counter": "0,1,2,3",
@@ -2543,6 +2718,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0091 ",
"Counter": "0,1,2,3",
@@ -2555,6 +2731,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000091 ",
"Counter": "0,1,2,3",
@@ -2567,6 +2744,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000091 ",
"Counter": "0,1,2,3",
@@ -2579,6 +2757,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000091 ",
"Counter": "0,1,2,3",
@@ -2591,6 +2770,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000091 ",
"Counter": "0,1,2,3",
@@ -2603,6 +2783,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000091 ",
"Counter": "0,1,2,3",
@@ -2615,6 +2796,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000091 ",
"Counter": "0,1,2,3",
@@ -2627,6 +2809,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000091 ",
"Counter": "0,1,2,3",
@@ -2639,6 +2822,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000091 ",
"Counter": "0,1,2,3",
@@ -2651,6 +2835,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000091 ",
"Counter": "0,1,2,3",
@@ -2663,6 +2848,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000091 ",
"Counter": "0,1,2,3",
@@ -2675,6 +2861,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000091 ",
"Counter": "0,1,2,3",
@@ -2687,6 +2874,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2000020122 ",
"Counter": "0,1,2,3",
@@ -2699,6 +2887,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the target was non-DRAM system address. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x20003c0122 ",
"Counter": "0,1,2,3",
@@ -2711,6 +2900,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000122 ",
"Counter": "0,1,2,3",
@@ -2723,6 +2913,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000122 ",
"Counter": "0,1,2,3",
@@ -2735,6 +2926,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000122 ",
"Counter": "0,1,2,3",
@@ -2747,6 +2939,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000122 ",
"Counter": "0,1,2,3",
@@ -2759,6 +2952,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000122 ",
"Counter": "0,1,2,3",
@@ -2771,6 +2965,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x2004000122 ",
"Counter": "0,1,2,3",
@@ -2783,6 +2978,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f84000122 ",
"Counter": "0,1,2,3",
@@ -2795,6 +2991,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 with no details on snoop-related information. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000122 ",
"Counter": "0,1,2,3",
@@ -2807,6 +3004,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000122 ",
"Counter": "0,1,2,3",
@@ -2819,6 +3017,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 with a snoop miss response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000122 ",
"Counter": "0,1,2,3",
@@ -2831,6 +3030,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000122 ",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/other.json b/tools/perf/pmu-events/arch/x86/broadwell/other.json
index edf14f0d0eaf..4f829c5febbe 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/other.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/other.json
@@ -10,16 +10,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",
- "EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPL_CYCLES.RING123",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PublicDescription": "This event counts when there is a transition from ring 1,2 or 3 to ring0.",
"EventCode": "0x5C",
"Counter": "0,1,2,3",
@@ -32,6 +22,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",
+ "EventCode": "0x5C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPL_CYCLES.RING123",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts cycles in which the L1 and L2 are locked due to a UC lock or split lock. A lock is asserted in case of locked memory access, due to noncacheable memory, locked operation that spans two cache lines, or a page walk from the noncacheable page table. L1D and L2 locks have a very high performance penalty and it is highly recommended to avoid such access.",
"EventCode": "0x63",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
index 78913ae87703..97c5d0784c6c 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
@@ -2,32 +2,42 @@
{
"PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. \nNotes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. \nCounting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"EventCode": "0x00",
- "Counter": "Fixed counter 1",
+ "Counter": "Fixed counter 0",
"UMask": "0x1",
"EventName": "INST_RETIRED.ANY",
"SampleAfterValue": "2000003",
"BriefDescription": "Instructions retired from execution.",
- "CounterHTOff": "Fixed counter 1"
+ "CounterHTOff": "Fixed counter 0"
},
{
"PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"EventCode": "0x00",
- "Counter": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
"BriefDescription": "Core cycles when the thread is not in halt state",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "CounterHTOff": "Fixed counter 1"
},
{
"PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. \nNote: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. This event is clocked by base clock (100 Mhz) on Sandy Bridge. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"EventCode": "0x00",
- "Counter": "Fixed counter 3",
+ "Counter": "Fixed counter 2",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"SampleAfterValue": "2000003",
"BriefDescription": "Reference cycles when the core is not in halt state.",
- "CounterHTOff": "Fixed counter 3"
+ "CounterHTOff": "Fixed counter 2"
},
{
"PublicDescription": "This event counts how many times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when:\n - preceding store conflicts with the load (incomplete overlap);\n - store forwarding is impossible due to u-arch limitations;\n - preceding lock RMW operations are not forwarded;\n - store has the no-forward bit set (uncacheable/page-split/masked stores);\n - all-blocking stores are used (mostly, fences and port I/O);\nand others.\nThe most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events.\nSee the table of not supported store forwards in the Optimization Guide.",
@@ -59,27 +69,38 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
+ "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
"EventCode": "0x0D",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "INT_MISC.RAT_STALL_CYCLES",
+ "UMask": "0x3",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
"EventCode": "0x0D",
"Counter": "0,1,2,3",
"UMask": "0x3",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "AnyThread": "1",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...)",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "INT_MISC.RAT_STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts the number of Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS).",
"EventCode": "0x0E",
"Counter": "0,1,2,3",
@@ -90,6 +111,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "EventCode": "0x0E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"PublicDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive\n added by GSR u-arch.",
"EventCode": "0x0E",
"Counter": "0,1,2,3",
@@ -118,18 +151,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
- "EventCode": "0x0E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
"PublicDescription": "This event counts the number of the divide operations executed. Uses edge-detect and a cmask value of 1 on ARITH.FPU_DIV_ACTIVE to get the number of the divide operations executed.",
"EventCode": "0x14",
"Counter": "0,1,2,3",
@@ -140,6 +161,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This is a fixed-frequency event programmed to general counters. It counts when the core is unhalted at 100 Mhz.",
"EventCode": "0x3C",
"Counter": "0,1,2,3",
@@ -150,6 +191,36 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x3c",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -159,6 +230,15 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by asm inspection of the nearby instructions.",
"EventCode": "0x4c",
"Counter": "0,1,2,3",
@@ -225,6 +305,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x5E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
"EventCode": "0x87",
"Counter": "0,1,2,3",
@@ -405,6 +497,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0xa0",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted macro conditional branch instructions.",
"EventCode": "0x89",
"Counter": "0,1,2,3",
@@ -435,6 +536,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This event counts the number of micro-operations cancelled after they were dispatched from the scheduler to the execution units when the total number of physical register read ports across all dispatch ports exceeds the read bandwidth of the physical register file. The SIMD_PRF subevent applies to the following instructions: VDPPS, DPPS, VPCMPESTRI, PCMPESTRI, VPCMPESTRM, PCMPESTRM, VFMADD*, VFMADDSUB*, VFMSUB*, VMSUBADD*, VFNMADD*, VFNMSUB*. See the Broadwell Optimization Guide for more information.",
+ "EventCode": "0xA0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "UOP_DISPATCHES_CANCELLED.SIMD_PRF",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Micro-op dispatches cancelled due to insufficient SIMD physical register file read ports",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -445,6 +556,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -455,6 +586,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -465,6 +616,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -475,6 +646,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -485,6 +676,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -495,6 +706,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -505,6 +736,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -515,6 +766,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts resource-related stall cycles. Reasons for stalls can be as follows:\n - *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots)\n - *any* u-arch structure got empty (like INT/SIMD FreeLists)\n - FPU control word (FPCW), MXCSR\nand others. This counts cycles that the pipeline backend blocked uop delivery from the front end.",
"EventCode": "0xA2",
"Counter": "0,1,2,3",
@@ -566,15 +837,14 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.",
"EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "CounterMask": "8",
- "CounterHTOff": "2"
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request (that is cycles with non-completed load waiting for its data from memory subsystem).",
@@ -588,17 +858,37 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"PublicDescription": "Counts number of cycles nothing is executed on any execution port.",
"EventCode": "0xA3",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls",
+ "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
"CounterMask": "4",
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Total execution stalls.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand* load request missing the L2 cache.(as a footprint) * includes also L1 HW prefetch requests that may or may not be required by demands.",
"EventCode": "0xA3",
"Counter": "0,1,2,3",
@@ -610,6 +900,16 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x5",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "CounterMask": "5",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request.",
"EventCode": "0xA3",
"Counter": "0,1,2,3",
@@ -621,6 +921,37 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x6",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.",
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "CounterHTOff": "2"
+ },
+ {
"PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request missing the L1 data cache.",
"EventCode": "0xA3",
"Counter": "2",
@@ -632,7 +963,16 @@
"CounterHTOff": "2"
},
{
- "PublicDescription": "Number of Uops delivered by the LSD. ",
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0xc",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "CounterHTOff": "2"
+ },
+ {
"EventCode": "0xA8",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -642,6 +982,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Number of uops to be executed per-thread each cycle.",
"EventCode": "0xB1",
"Counter": "0,1,2,3",
@@ -652,6 +1012,58 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"PublicDescription": "Number of uops executed from any thread.",
"EventCode": "0xB1",
"Counter": "0,1,2,3",
@@ -662,36 +1074,64 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
- "EventCode": "0xB1",
- "Invert": "1",
+ "EventCode": "0xb1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
- "EventCode": "0xC0",
+ "EventCode": "0xb1",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "Errata": "BDM61",
- "EventName": "INST_RETIRED.ANY_P",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CounterMask": "2",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
- "EventCode": "0xC0",
+ "EventCode": "0xb1",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "INST_RETIRED.X87",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
"SampleAfterValue": "2000003",
- "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions:",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "Errata": "BDM61",
+ "EventName": "INST_RETIRED.ANY_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -707,6 +1147,16 @@
"CounterHTOff": "1"
},
{
+ "PublicDescription": "This event counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "INST_RETIRED.X87",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions:",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC1",
"Counter": "0,1,2,3",
"UMask": "0x40",
@@ -717,29 +1167,18 @@
},
{
"PEBS": "1",
- "PublicDescription": "This event counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
"EventCode": "0xC2",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "UOPS_RETIRED.ALL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Actually retired uops.",
+ "BriefDescription": "Actually retired uops. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3,4,5,6,7",
"Data_LA": "1"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of retirement slots used.",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Retirement slots used.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
"EventCode": "0xC2",
"Invert": "1",
"Counter": "0,1,2,3",
@@ -763,6 +1202,17 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of retirement slots used.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retirement slots used. (Precise Event - PEBS)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts both thread-specific (TS) and all-thread (AT) nukes.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
@@ -773,6 +1223,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts self-modifying code (SMC) detected, which causes a machine clear.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
@@ -793,50 +1254,73 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This event counts all (macro) branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts conditional branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts conditional branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
"SampleAfterValue": "400009",
- "BriefDescription": "Conditional branch instructions retired.",
+ "BriefDescription": "Conditional branch instructions retired. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts both direct and indirect near call instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect near call instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect near call instructions retired.",
+ "BriefDescription": "Direct and indirect near call instructions retired. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts all (macro) branch instructions retired.",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect macro near call instructions retired (captured in ring 3).",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired.",
+ "UMask": "0x2",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3). (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "Errata": "BDW98",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS)",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts return instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts return instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"SampleAfterValue": "100007",
- "BriefDescription": "Return instructions retired.",
+ "BriefDescription": "Return instructions retired. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts not taken branch instructions retired.",
+ "PublicDescription": "This event counts not taken branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -847,17 +1331,17 @@
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts taken branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts taken branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x20",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"SampleAfterValue": "400009",
- "BriefDescription": "Taken branch instructions retired.",
+ "BriefDescription": "Taken branch instructions retired. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts far branch instructions retired.",
+ "PublicDescription": "This event counts far branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x40",
@@ -868,29 +1352,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "2",
- "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "Errata": "BDW98",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS)",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted conditional branch instructions retired.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted conditional branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PublicDescription": "This event counts all mispredicted macro branch instructions retired.",
"EventCode": "0xC5",
"Counter": "0,1,2,3",
@@ -902,13 +1363,13 @@
},
{
"PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted conditional branch instructions retired.",
"EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "BR_MISP_RETIRED.RET",
- "SampleAfterValue": "100007",
- "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "UMask": "0x1",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted conditional branch instructions retired. (Precise Event - PEBS)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -923,164 +1384,36 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "This event counts cases of saving new LBR records by hardware. This assumes proper enabling of LBRs and takes into account LBR filtering done by the LBR_SELECT register.",
- "EventCode": "0xCC",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Count cases of saving new LBR",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Thread cycles when thread is not in halt state",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xa0",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted return instructions retired.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0x8",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired.(Precise Event)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PEBS": "1",
- "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken. (Precise Event - PEBS).",
"EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0x20",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
"SampleAfterValue": "400009",
- "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken. (Precise Event - PEBS).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread.",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
+ "PublicDescription": "This event counts cases of saving new LBR records by hardware. This assumes proper enabling of LBRs and takes into account LBR filtering done by the LBR_SELECT register.",
+ "EventCode": "0xCC",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "UMask": "0x20",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Count cases of saving new LBR",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xe6",
@@ -1090,328 +1423,5 @@
"SampleAfterValue": "100003",
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "CounterMask": "8",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0xc",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "CounterMask": "12",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x5",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
- "CounterMask": "5",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x6",
- "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "MACHINE_CLEARS.COUNT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_4_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "RS_EVENTS.EMPTY_END",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 0",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 5",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 7",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of micro-operations cancelled after they were dispatched from the scheduler to the execution units when the total number of physical register read ports across all dispatch ports exceeds the read bandwidth of the physical register file. The SIMD_PRF subevent applies to the following instructions: VDPPS, DPPS, VPCMPESTRI, PCMPESTRI, VPCMPESTRM, PCMPESTRM, VFMADD*, VFMADDSUB*, VFMSUB*, VMSUBADD*, VFNMADD*, VFNMSUB*. See the Broadwell Optimization Guide for more information.",
- "EventCode": "0xA0",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "UOP_DISPATCHES_CANCELLED.SIMD_PRF",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Micro-op dispatches cancelled due to insufficient SIMD physical register file read ports",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x00",
- "Counter": "Fixed counter 2",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "AnyThread": "1",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json b/tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json
index 4301e6fbc5eb..2a015e4c7e21 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json
@@ -44,6 +44,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "Errata": "BDM69",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
@@ -73,6 +83,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x60",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
"EventCode": "0x49",
"Counter": "0,1,2,3",
@@ -117,6 +136,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "Errata": "BDM69",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
@@ -146,6 +175,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x60",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts cycles for an extended page table walk. The Extended Page directory cache differs from standard TLB caches by the operating system that use it. Virtual machine operating systems use the extended page directory cache, while guest operating systems use the standard TLB caches.",
"EventCode": "0x4F",
"Counter": "0,1,2,3",
@@ -200,6 +238,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "Errata": "BDM69",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
"EventCode": "0x85",
"Counter": "0,1,2,3",
@@ -229,6 +277,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x60",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
"EventCode": "0xAE",
"Counter": "0,1,2,3",
@@ -251,61 +308,61 @@
{
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x21",
+ "UMask": "0x12",
"Errata": "BDM69, BDM98",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of ITLB page walker hits in the L1+FB.",
+ "BriefDescription": "Number of DTLB page walker hits in the L2.",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x12",
+ "UMask": "0x14",
"Errata": "BDM69, BDM98",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of DTLB page walker hits in the L2.",
+ "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP.",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x22",
+ "UMask": "0x18",
"Errata": "BDM69, BDM98",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of ITLB page walker hits in the L2.",
+ "BriefDescription": "Number of DTLB page walker hits in Memory.",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x14",
+ "UMask": "0x21",
"Errata": "BDM69, BDM98",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP.",
+ "BriefDescription": "Number of ITLB page walker hits in the L1+FB.",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x24",
+ "UMask": "0x22",
"Errata": "BDM69, BDM98",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP.",
+ "BriefDescription": "Number of ITLB page walker hits in the L2.",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x18",
+ "UMask": "0x24",
"Errata": "BDM69, BDM98",
- "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of DTLB page walker hits in Memory.",
+ "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP.",
"CounterHTOff": "0,1,2,3"
},
{
@@ -327,62 +384,5 @@
"SampleAfterValue": "100007",
"BriefDescription": "STLB flush attempts",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
- "Errata": "BDM69",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x60",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
- "Errata": "BDM69",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
- "SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x60",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
- "Errata": "BDM69",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
- "SampleAfterValue": "100003",
- "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x60",
- "EventName": "ITLB_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/cache.json b/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
index 36fe398029b9..bf243fe2a0ec 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
@@ -11,11 +11,28 @@
},
{
"EventCode": "0x24",
- "UMask": "0x41",
- "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "UMask": "0x22",
+ "BriefDescription": "RFO requests that miss L2 cache.",
"Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
- "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x24",
+ "BriefDescription": "L2 cache misses when fetching instructions.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x27",
+ "BriefDescription": "Demand requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"SampleAfterValue": "200003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -31,6 +48,43 @@
},
{
"EventCode": "0x24",
+ "UMask": "0x3f",
+ "BriefDescription": "All requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x41",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x42",
+ "BriefDescription": "RFO requests that hit L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x44",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
"UMask": "0x50",
"BriefDescription": "L2 prefetch requests that hit L2 cache",
"Counter": "0,1,2,3",
@@ -71,6 +125,15 @@
},
{
"EventCode": "0x24",
+ "UMask": "0xe7",
+ "BriefDescription": "Demand requests to L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
"UMask": "0xf8",
"BriefDescription": "Requests from L2 hardware prefetchers",
"Counter": "0,1,2,3",
@@ -80,6 +143,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x24",
+ "UMask": "0xff",
+ "BriefDescription": "All L2 requests.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x27",
"UMask": "0x50",
"BriefDescription": "Not rejected writebacks that hit L2 cache",
@@ -131,6 +203,27 @@
"CounterHTOff": "2"
},
{
+ "EventCode": "0x48",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "Counter": "2",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "AnyThread": "1",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "Counter": "0,1,2,3",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x51",
"UMask": "0x1",
"BriefDescription": "L1D data line replacements",
@@ -153,12 +246,35 @@
},
{
"EventCode": "0x60",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "CounterMask": "1",
+ "Errata": "BDM76",
+ "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "CounterMask": "6",
+ "Errata": "BDM76",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
"UMask": "0x2",
"BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
"Errata": "BDM76",
- "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The \"Offcore outstanding\" state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The Offcore outstanding state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -175,24 +291,24 @@
},
{
"EventCode": "0x60",
- "UMask": "0x8",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "UMask": "0x4",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "CounterMask": "1",
"Errata": "BDM76",
- "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The Offcore outstanding state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "UMask": "0x8",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
- "CounterMask": "1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"Errata": "BDM76",
- "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -209,18 +325,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x60",
- "UMask": "0x4",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
- "CounterMask": "1",
- "Errata": "BDM76",
- "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The \"Offcore outstanding\" state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0x63",
"UMask": "0x2",
"BriefDescription": "Cycles when L1D is locked",
@@ -266,7 +370,7 @@
"BriefDescription": "Demand and prefetch data reads",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
- "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable \"Demands\" and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable Demands and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -281,26 +385,35 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0xD0",
"UMask": "0x11",
- "BriefDescription": "Retired load uops that miss the STLB.",
+ "BriefDescription": "Retired load uops that miss the STLB. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x12",
- "BriefDescription": "Retired store uops that miss the STLB.",
+ "BriefDescription": "Retired store uops that miss the STLB. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts store uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts store uops true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
"SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -308,37 +421,37 @@
{
"EventCode": "0xD0",
"UMask": "0x21",
- "BriefDescription": "Retired load uops with locked access.",
+ "BriefDescription": "Retired load uops with locked access. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"Errata": "BDM35",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with locked access retired to the architected path.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops with locked access retired to the architected path.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x41",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary.(Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x42",
- "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -346,24 +459,24 @@
{
"EventCode": "0xD0",
"UMask": "0x81",
- "BriefDescription": "All retired load uops.",
+ "BriefDescription": "All retired load uops. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
- "PublicDescription": "This event counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x82",
- "BriefDescription": "All retired store uops.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
- "PublicDescription": "This event counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement.",
"SampleAfterValue": "2000003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -371,69 +484,69 @@
{
"EventCode": "0xD1",
"UMask": "0x1",
- "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data source were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x2",
- "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"Errata": "BDM35",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x4",
- "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
+ "BriefDescription": "Hit in last-level (L3) cache. Excludes Unknown data-source. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
"Errata": "BDM100",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
"SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x8",
- "BriefDescription": "Retired load uops misses in L1 cache as data sources.",
+ "BriefDescription": "Retired load uops misses in L1 cache as data sources. Uses PEBS.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x10",
- "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Retired load uops with L2 cache misses as data sources. Uses PEBS.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
"SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x20",
- "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source. (Precise Event - PEBS).",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -445,77 +558,112 @@
{
"EventCode": "0xD1",
"UMask": "0x40",
- "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x1",
- "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS",
"Errata": "BDM100",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x2",
- "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
"Errata": "BDM100",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x4",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
"Errata": "BDM100",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x8",
- "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.",
+ "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_NONE",
"Errata": "BDM100",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD3",
"UMask": "0x1",
- "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
"Errata": "BDE70, BDM100",
- "PublicDescription": "Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI).",
+ "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches. This is a precise event.",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD3",
+ "UMask": "0x4",
+ "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI) (Precise Event)",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM",
+ "Errata": "BDE70",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD3",
+ "UMask": "0x10",
+ "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM (Precise Event)",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM",
+ "Errata": "BDE70",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD3",
+ "UMask": "0x20",
+ "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache (Precise Event)",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD",
+ "Errata": "BDE70",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
@@ -657,118 +805,5 @@
"PublicDescription": "This event counts the number of split locks in the super queue.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x42",
- "BriefDescription": "RFO requests that hit L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_HIT",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x22",
- "BriefDescription": "RFO requests that miss L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x44",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_HIT",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x24",
- "BriefDescription": "L2 cache misses when fetching instructions.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x27",
- "BriefDescription": "Demand requests that miss L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0xe7",
- "BriefDescription": "Demand requests to L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x3f",
- "BriefDescription": "All requests that miss L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0xff",
- "BriefDescription": "All L2 requests.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.REFERENCES",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "CounterMask": "6",
- "Errata": "BDM76",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x48",
- "UMask": "0x1",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "Counter": "2",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "AnyThread": "1",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0x48",
- "UMask": "0x2",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "Counter": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json b/tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json
index 4ae1ea24f22f..d7b9d9c9c518 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json
@@ -6,7 +6,7 @@
"Counter": "0,1,2,3",
"EventName": "OTHER_ASSISTS.AVX_TO_SSE",
"Errata": "BDM30",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
+ "PublicDescription": "This event counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -17,7 +17,7 @@
"Counter": "0,1,2,3",
"EventName": "OTHER_ASSISTS.SSE_TO_AVX",
"Errata": "BDM30",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
+ "PublicDescription": "This event counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -25,7 +25,6 @@
"EventCode": "0xC7",
"UMask": "0x1",
"BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired. Each count represents 1 computation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
"SampleAfterValue": "2000003",
@@ -35,7 +34,6 @@
"EventCode": "0xC7",
"UMask": "0x2",
"BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired. Each count represents 1 computation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
"SampleAfterValue": "2000003",
@@ -43,9 +41,17 @@
},
{
"EventCode": "0xC7",
+ "UMask": "0x3",
+ "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
"UMask": "0x4",
"BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired. Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
"SampleAfterValue": "2000003",
@@ -55,7 +61,6 @@
"EventCode": "0xC7",
"UMask": "0x8",
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"SampleAfterValue": "2000003",
@@ -65,19 +70,54 @@
"EventCode": "0xC7",
"UMask": "0x10",
"BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xC7",
+ "UMask": "0x15",
+ "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
+ "SampleAfterValue": "2000006",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc7",
+ "UMask": "0x20",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x2a",
+ "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.SINGLE",
+ "SampleAfterValue": "2000005",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x3c",
+ "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.PACKED",
+ "SampleAfterValue": "2000004",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0xCA",
"UMask": "0x2",
"BriefDescription": "Number of X87 assists due to output value.",
"Counter": "0,1,2,3",
"EventName": "FP_ASSIST.X87_OUTPUT",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
+ "PublicDescription": "This event counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -87,7 +127,7 @@
"BriefDescription": "Number of X87 assists due to input value.",
"Counter": "0,1,2,3",
"EventName": "FP_ASSIST.X87_INPUT",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
+ "PublicDescription": "This event counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -97,7 +137,7 @@
"BriefDescription": "Number of SIMD FP assists due to Output values",
"Counter": "0,1,2,3",
"EventName": "FP_ASSIST.SIMD_OUTPUT",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
+ "PublicDescription": "This event counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -107,7 +147,7 @@
"BriefDescription": "Number of SIMD FP assists due to input values",
"Counter": "0,1,2,3",
"EventName": "FP_ASSIST.SIMD_INPUT",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
+ "PublicDescription": "This event counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -121,51 +161,5 @@
"PublicDescription": "This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xc7",
- "UMask": "0x20",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "UMask": "0x3",
- "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "UMask": "0x3c",
- "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.PACKED",
- "SampleAfterValue": "2000004",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "UMask": "0x2a",
- "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.SINGLE",
- "SampleAfterValue": "2000005",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "UMask": "0x15",
- "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
- "SampleAfterValue": "2000006",
- "CounterHTOff": "0,1,2,3"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/frontend.json b/tools/perf/pmu-events/arch/x86/broadwellde/frontend.json
index 06bf0a40e568..72781e1e3362 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/frontend.json
@@ -15,80 +15,49 @@
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"Counter": "0,1,2,3",
"EventName": "IDQ.MITE_UOPS",
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x8",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.DSB_UOPS",
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x10",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_DSB_UOPS",
- "PublicDescription": "This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x20",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_MITE_UOPS",
- "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MS_UOPS",
- "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "EventName": "IDQ.MITE_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "UMask": "0x8",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MS_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x4",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MITE_CYCLES",
+ "EventName": "IDQ.DSB_CYCLES",
"CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x8",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "UMask": "0x10",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"Counter": "0,1,2,3",
- "EventName": "IDQ.DSB_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "EventName": "IDQ.MS_DSB_UOPS",
+ "PublicDescription": "This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -99,7 +68,7 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.MS_DSB_CYCLES",
"CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -111,7 +80,7 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.MS_DSB_OCCUR",
"CounterMask": "1",
- "PublicDescription": "This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -122,7 +91,7 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
"CounterMask": "4",
- "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -133,7 +102,17 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
"CounterMask": "1",
- "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x20",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -144,7 +123,7 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
"CounterMask": "4",
- "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -155,7 +134,39 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
"CounterMask": "1",
- "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_SWITCHES",
+ "CounterMask": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -165,7 +176,7 @@
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"Counter": "0,1,2,3",
"EventName": "IDQ.MITE_ALL_UOPS",
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -205,7 +216,7 @@
"BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
"Counter": "0,1,2,3",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
- "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding ?4 ? x? when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -268,18 +279,7 @@
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
"Counter": "0,1,2,3",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
- "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0?2 cycles.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EdgeDetect": "1",
- "EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_SWITCHES",
- "CounterMask": "1",
+ "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
}
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/memory.json b/tools/perf/pmu-events/arch/x86/broadwellde/memory.json
index cfa1e5876ec3..e44f73c24ac8 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/memory.json
@@ -95,7 +95,6 @@
"BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
"Counter": "0,1,2,3",
"EventName": "TX_EXEC.MISC1",
- "PublicDescription": "Unfriendly TSX abort triggered by a flowmarker.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -171,11 +170,11 @@
{
"EventCode": "0xc8",
"UMask": "0x4",
- "BriefDescription": "Number of times HLE abort was triggered",
+ "BriefDescription": "Number of times HLE abort was triggered (PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "HLE_RETIRED.ABORTED",
- "PublicDescription": "Number of times HLE abort was triggered.",
+ "PublicDescription": "Number of times HLE abort was triggered (PEBS).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -252,11 +251,11 @@
{
"EventCode": "0xc9",
"UMask": "0x4",
- "BriefDescription": "Number of times RTM abort was triggered",
+ "BriefDescription": "Number of times RTM abort was triggered (PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "RTM_RETIRED.ABORTED",
- "PublicDescription": "Number of times RTM abort was triggered .",
+ "PublicDescription": "Number of times RTM abort was triggered (PEBS).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/other.json b/tools/perf/pmu-events/arch/x86/broadwellde/other.json
index 718fcb1db2ee..4475249ea9da 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/other.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/other.json
@@ -10,16 +10,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x5C",
- "UMask": "0x2",
- "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
- "Counter": "0,1,2,3",
- "EventName": "CPL_CYCLES.RING123",
- "PublicDescription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EdgeDetect": "1",
"EventCode": "0x5C",
"UMask": "0x1",
@@ -32,6 +22,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x5C",
+ "UMask": "0x2",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "Counter": "0,1,2,3",
+ "EventName": "CPL_CYCLES.RING123",
+ "PublicDescription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x63",
"UMask": "0x1",
"BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
index 02b4e1035f2d..920c89da9111 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
@@ -3,31 +3,41 @@
"EventCode": "0x00",
"UMask": "0x1",
"BriefDescription": "Instructions retired from execution.",
- "Counter": "Fixed counter 1",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. \nNotes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. \nCounting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 1"
+ "CounterHTOff": "Fixed counter 0"
},
{
"EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when the thread is not in halt state",
- "Counter": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "UMask": "0x2",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "Fixed counter 1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "Fixed counter 1"
},
{
"EventCode": "0x00",
"UMask": "0x3",
"BriefDescription": "Reference cycles when the core is not in halt state.",
- "Counter": "Fixed counter 3",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. \nNote: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. This event is clocked by base clock (100 Mhz) on Sandy Bridge. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 3"
+ "CounterHTOff": "Fixed counter 2"
},
{
"EventCode": "0x03",
@@ -60,22 +70,33 @@
},
{
"EventCode": "0x0D",
- "UMask": "0x8",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
+ "UMask": "0x3",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
"Counter": "0,1,2,3",
- "EventName": "INT_MISC.RAT_STALL_CYCLES",
- "PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x0D",
"UMask": "0x3",
- "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...)",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
"Counter": "0,1,2,3",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "AnyThread": "1",
"CounterMask": "1",
- "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
+ "Counter": "0,1,2,3",
+ "EventName": "INT_MISC.RAT_STALL_CYCLES",
+ "PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -90,6 +111,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "Invert": "1",
+ "EventCode": "0x0E",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0x0E",
"UMask": "0x10",
"BriefDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.",
@@ -118,18 +151,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "Invert": "1",
- "EventCode": "0x0E",
- "UMask": "0x1",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
"EventCode": "0x14",
"UMask": "0x1",
"BriefDescription": "Cycles when divider is busy executing divide operations",
@@ -141,6 +162,26 @@
},
{
"EventCode": "0x3C",
+ "UMask": "0x0",
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x0",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
"UMask": "0x1",
"BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
"Counter": "0,1,2,3",
@@ -150,6 +191,36 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x3c",
"UMask": "0x2",
"BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
@@ -159,6 +230,15 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0x3C",
+ "UMask": "0x2",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x4c",
"UMask": "0x1",
"BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
@@ -225,6 +305,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EdgeDetect": "1",
+ "Invert": "1",
+ "EventCode": "0x5E",
+ "UMask": "0x1",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "Counter": "0,1,2,3",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "CounterMask": "1",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x87",
"UMask": "0x1",
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
@@ -406,6 +498,15 @@
},
{
"EventCode": "0x89",
+ "UMask": "0xa0",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
"UMask": "0xc1",
"BriefDescription": "Speculative and retired mispredicted macro conditional branches",
"Counter": "0,1,2,3",
@@ -435,6 +536,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA0",
+ "UMask": "0x3",
+ "BriefDescription": "Micro-op dispatches cancelled due to insufficient SIMD physical register file read ports",
+ "Counter": "0,1,2,3",
+ "EventName": "UOP_DISPATCHES_CANCELLED.SIMD_PRF",
+ "PublicDescription": "This event counts the number of micro-operations cancelled after they were dispatched from the scheduler to the execution units when the total number of physical register read ports across all dispatch ports exceeds the read bandwidth of the physical register file. The SIMD_PRF subevent applies to the following instructions: VDPPS, DPPS, VPCMPESTRI, PCMPESTRI, VPCMPESTRM, PCMPESTRM, VFMADD*, VFMADDSUB*, VFMSUB*, VMSUBADD*, VFNMADD*, VFNMSUB*. See the Broadwell Optimization Guide for more information.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0xA1",
"UMask": "0x1",
"BriefDescription": "Cycles per thread when uops are executed in port 0",
@@ -446,6 +557,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x2",
"BriefDescription": "Cycles per thread when uops are executed in port 1",
"Counter": "0,1,2,3",
@@ -456,6 +587,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x4",
"BriefDescription": "Cycles per thread when uops are executed in port 2",
"Counter": "0,1,2,3",
@@ -466,6 +617,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x8",
"BriefDescription": "Cycles per thread when uops are executed in port 3",
"Counter": "0,1,2,3",
@@ -476,6 +647,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x10",
"BriefDescription": "Cycles per thread when uops are executed in port 4",
"Counter": "0,1,2,3",
@@ -486,6 +677,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x20",
"BriefDescription": "Cycles per thread when uops are executed in port 5",
"Counter": "0,1,2,3",
@@ -496,6 +707,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x20",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x20",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x40",
"BriefDescription": "Cycles per thread when uops are executed in port 6",
"Counter": "0,1,2,3",
@@ -506,6 +737,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x40",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x40",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x80",
"BriefDescription": "Cycles per thread when uops are executed in port 7",
"Counter": "0,1,2,3",
@@ -515,6 +766,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "UMask": "0x80",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x80",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xA2",
"UMask": "0x1",
"BriefDescription": "Resource-related stall cycles",
@@ -567,14 +838,13 @@
},
{
"EventCode": "0xA3",
- "UMask": "0x8",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
- "CounterMask": "8",
- "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "CounterMask": "1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "2"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xA3",
@@ -589,8 +859,18 @@
},
{
"EventCode": "0xA3",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "CounterMask": "2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
"UMask": "0x4",
- "BriefDescription": "Total execution stalls",
+ "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
"Counter": "0,1,2,3",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
"CounterMask": "4",
@@ -600,6 +880,16 @@
},
{
"EventCode": "0xA3",
+ "UMask": "0x4",
+ "BriefDescription": "Total execution stalls.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
"UMask": "0x5",
"BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
"Counter": "0,1,2,3",
@@ -611,6 +901,16 @@
},
{
"EventCode": "0xA3",
+ "UMask": "0x5",
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "CounterMask": "5",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
"UMask": "0x6",
"BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
"Counter": "0,1,2,3",
@@ -622,6 +922,37 @@
},
{
"EventCode": "0xA3",
+ "UMask": "0x6",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "CounterMask": "6",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "CounterMask": "8",
+ "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "CounterMask": "8",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
"UMask": "0xc",
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
"Counter": "2",
@@ -632,12 +963,41 @@
"CounterHTOff": "2"
},
{
+ "EventCode": "0xA3",
+ "UMask": "0xc",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "CounterMask": "12",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
"EventCode": "0xA8",
"UMask": "0x1",
"BriefDescription": "Number of Uops delivered by the LSD.",
"Counter": "0,1,2,3",
"EventName": "LSD.UOPS",
- "PublicDescription": "Number of Uops delivered by the LSD. ",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "CounterMask": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -652,6 +1012,58 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "Invert": "1",
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "CounterMask": "2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "CounterMask": "3",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0xB1",
"UMask": "0x2",
"BriefDescription": "Number of uops executed on the core.",
@@ -662,35 +1074,63 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "Invert": "1",
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"CounterMask": "1",
- "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC0",
- "UMask": "0x0",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "INST_RETIRED.ANY_P",
- "Errata": "BDM61",
- "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "CounterMask": "2",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC0",
+ "EventCode": "0xb1",
"UMask": "0x2",
- "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions:",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "INST_RETIRED.X87",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "CounterMask": "3",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC0",
+ "UMask": "0x0",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3",
+ "EventName": "INST_RETIRED.ANY_P",
+ "Errata": "BDM61",
+ "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -707,6 +1147,16 @@
"CounterHTOff": "1"
},
{
+ "EventCode": "0xC0",
+ "UMask": "0x2",
+ "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions:",
+ "Counter": "0,1,2,3",
+ "EventName": "INST_RETIRED.X87",
+ "PublicDescription": "This event counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC1",
"UMask": "0x40",
"BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
@@ -718,23 +1168,12 @@
{
"EventCode": "0xC2",
"UMask": "0x1",
- "BriefDescription": "Actually retired uops.",
+ "BriefDescription": "Actually retired uops. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "UOPS_RETIRED.ALL",
- "PublicDescription": "This event counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC2",
- "UMask": "0x2",
- "BriefDescription": "Retirement slots used.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of retirement slots used.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -746,7 +1185,7 @@
"Counter": "0,1,2,3",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
"CounterMask": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -763,6 +1202,17 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xC2",
+ "UMask": "0x2",
+ "BriefDescription": "Retirement slots used. (Precise Event - PEBS)",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of retirement slots used.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC3",
"UMask": "0x1",
"BriefDescription": "Cycles there was a Nuke. Account for both thread-specific and All Thread Nukes.",
@@ -773,6 +1223,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EdgeDetect": "1",
+ "EventCode": "0xC3",
+ "UMask": "0x1",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "CounterMask": "1",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC3",
"UMask": "0x4",
"BriefDescription": "Self-modifying code (SMC) detected.",
@@ -794,44 +1255,67 @@
},
{
"EventCode": "0xC4",
+ "UMask": "0x0",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "This event counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
"UMask": "0x1",
- "BriefDescription": "Conditional branch instructions retired.",
+ "BriefDescription": "Conditional branch instructions retired. (Precise Event - PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts conditional branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts conditional branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
"UMask": "0x2",
- "BriefDescription": "Direct and indirect near call instructions retired.",
+ "BriefDescription": "Direct and indirect near call instructions retired. (Precise Event - PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts both direct and indirect near call instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect near call instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
- "UMask": "0x0",
- "BriefDescription": "All (macro) branch instructions retired.",
+ "UMask": "0x2",
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3). (Precise Event - PEBS)",
+ "PEBS": "1",
"Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "PublicDescription": "This event counts all (macro) branch instructions retired.",
- "SampleAfterValue": "400009",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect macro near call instructions retired (captured in ring 3).",
+ "SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
+ "UMask": "0x4",
+ "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS)",
+ "PEBS": "2",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "Errata": "BDW98",
+ "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC4",
"UMask": "0x8",
- "BriefDescription": "Return instructions retired.",
+ "BriefDescription": "Return instructions retired. (Precise Event - PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts return instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts return instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -841,18 +1325,18 @@
"BriefDescription": "Not taken branch instructions retired.",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts not taken branch instructions retired.",
+ "PublicDescription": "This event counts not taken branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
"UMask": "0x20",
- "BriefDescription": "Taken branch instructions retired.",
+ "BriefDescription": "Taken branch instructions retired. (Precise Event - PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts taken branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts taken branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -863,34 +1347,11 @@
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"Errata": "BDW98",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts far branch instructions retired.",
+ "PublicDescription": "This event counts far branch instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC4",
- "UMask": "0x4",
- "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS)",
- "PEBS": "2",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "Errata": "BDW98",
- "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC5",
- "UMask": "0x1",
- "BriefDescription": "Mispredicted conditional branch instructions retired.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted conditional branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0xC5",
"UMask": "0x0",
"BriefDescription": "All mispredicted macro branch instructions retired.",
@@ -902,13 +1363,13 @@
},
{
"EventCode": "0xC5",
- "UMask": "0x8",
- "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "UMask": "0x1",
+ "BriefDescription": "Mispredicted conditional branch instructions retired. (Precise Event - PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.RET",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
- "SampleAfterValue": "100007",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -923,164 +1384,36 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xCC",
- "UMask": "0x20",
- "BriefDescription": "Count cases of saving new LBR",
- "Counter": "0,1,2,3",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
- "PublicDescription": "This event counts cases of saving new LBR records by hardware. This assumes proper enabling of LBRs and takes into account LBR filtering done by the LBR_SELECT register.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x0",
- "BriefDescription": "Thread cycles when thread is not in halt state",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x89",
- "UMask": "0xa0",
- "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x1",
- "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x2",
- "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x4",
- "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
+ "EventCode": "0xC5",
"UMask": "0x8",
- "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x10",
- "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x20",
- "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x40",
- "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x80",
- "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired.(Precise Event)",
+ "PEBS": "1",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted return instructions retired.",
+ "SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC5",
"UMask": "0x20",
- "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken. (Precise Event - PEBS).",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken. (Precise Event - PEBS).",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
- "CounterMask": "2",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
- "CounterMask": "3",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "EventCode": "0xCC",
+ "UMask": "0x20",
+ "BriefDescription": "Count cases of saving new LBR",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
- "CounterMask": "4",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "PublicDescription": "This event counts cases of saving new LBR records by hardware. This assumes proper enabling of LBRs and takes into account LBR filtering done by the LBR_SELECT register.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xe6",
@@ -1090,328 +1423,5 @@
"EventName": "BACLEARS.ANY",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x8",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
- "CounterMask": "8",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x1",
- "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x2",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
- "CounterMask": "2",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x4",
- "BriefDescription": "Total execution stalls.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
- "CounterMask": "4",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0xc",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
- "CounterMask": "12",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x5",
- "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
- "CounterMask": "5",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x6",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
- "CounterMask": "6",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EdgeDetect": "1",
- "EventCode": "0xC3",
- "UMask": "0x1",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "Counter": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.COUNT",
- "CounterMask": "1",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "UMask": "0x1",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "Counter": "0,1,2,3",
- "EventName": "LSD.CYCLES_4_UOPS",
- "CounterMask": "4",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EdgeDetect": "1",
- "Invert": "1",
- "EventCode": "0x5E",
- "UMask": "0x1",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "Counter": "0,1,2,3",
- "EventName": "RS_EVENTS.EMPTY_END",
- "CounterMask": "1",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "UMask": "0x1",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "Counter": "0,1,2,3",
- "EventName": "LSD.CYCLES_ACTIVE",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x1",
- "BriefDescription": "Cycles per thread when uops are executed in port 0",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x2",
- "BriefDescription": "Cycles per thread when uops are executed in port 1",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x4",
- "BriefDescription": "Cycles per thread when uops are executed in port 2",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x8",
- "BriefDescription": "Cycles per thread when uops are executed in port 3",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x10",
- "BriefDescription": "Cycles per thread when uops are executed in port 4",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x20",
- "BriefDescription": "Cycles per thread when uops are executed in port 5",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x40",
- "BriefDescription": "Cycles per thread when uops are executed in port 6",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x80",
- "BriefDescription": "Cycles per thread when uops are executed in port 7",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA0",
- "UMask": "0x3",
- "BriefDescription": "Micro-op dispatches cancelled due to insufficient SIMD physical register file read ports",
- "Counter": "0,1,2,3",
- "EventName": "UOP_DISPATCHES_CANCELLED.SIMD_PRF",
- "PublicDescription": "This event counts the number of micro-operations cancelled after they were dispatched from the scheduler to the execution units when the total number of physical register read ports across all dispatch ports exceeds the read bandwidth of the physical register file. The SIMD_PRF subevent applies to the following instructions: VDPPS, DPPS, VPCMPESTRI, PCMPESTRI, VPCMPESTRM, PCMPESTRM, VFMADD*, VFMADDSUB*, VFMSUB*, VMSUBADD*, VFNMADD*, VFNMSUB*. See the Broadwell Optimization Guide for more information.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x00",
- "UMask": "0x2",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "Fixed counter 2",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x0",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "UMask": "0x3",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "Counter": "0,1,2,3",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "AnyThread": "1",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "CounterMask": "2",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
- "CounterMask": "3",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "CounterMask": "4",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "Invert": "1",
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x2",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json b/tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json
index 5ce8b67ba076..7d79c707c6d1 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json
@@ -45,6 +45,16 @@
},
{
"EventCode": "0x08",
+ "UMask": "0xe",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "Errata": "BDM69",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
"UMask": "0x10",
"BriefDescription": "Cycles when PMH is busy with page walks",
"Counter": "0,1,2,3",
@@ -73,6 +83,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x08",
+ "UMask": "0x60",
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x49",
"UMask": "0x1",
"BriefDescription": "Store misses in all DTLB levels that cause page walks",
@@ -118,6 +137,16 @@
},
{
"EventCode": "0x49",
+ "UMask": "0xe",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "Errata": "BDM69",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
"UMask": "0x10",
"BriefDescription": "Cycles when PMH is busy with page walks",
"Counter": "0,1,2,3",
@@ -146,6 +175,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x49",
+ "UMask": "0x60",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x4F",
"UMask": "0x10",
"BriefDescription": "Cycle count for an Extended Page table walk.",
@@ -201,6 +239,16 @@
},
{
"EventCode": "0x85",
+ "UMask": "0xe",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "Errata": "BDM69",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
"UMask": "0x10",
"BriefDescription": "Cycles when PMH is busy with page walks",
"Counter": "0,1,2,3",
@@ -229,6 +277,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x85",
+ "UMask": "0x60",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xAE",
"UMask": "0x1",
"BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
@@ -250,60 +307,60 @@
},
{
"EventCode": "0xBC",
- "UMask": "0x21",
- "BriefDescription": "Number of ITLB page walker hits in the L1+FB.",
+ "UMask": "0x12",
+ "BriefDescription": "Number of DTLB page walker hits in the L2.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x12",
- "BriefDescription": "Number of DTLB page walker hits in the L2.",
+ "UMask": "0x14",
+ "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x22",
- "BriefDescription": "Number of ITLB page walker hits in the L2.",
+ "UMask": "0x18",
+ "BriefDescription": "Number of DTLB page walker hits in Memory.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x14",
- "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP.",
+ "UMask": "0x21",
+ "BriefDescription": "Number of ITLB page walker hits in the L1+FB.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x24",
- "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP.",
+ "UMask": "0x22",
+ "BriefDescription": "Number of ITLB page walker hits in the L2.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x18",
- "BriefDescription": "Number of DTLB page walker hits in Memory.",
+ "UMask": "0x24",
+ "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
@@ -327,62 +384,5 @@
"PublicDescription": "This event counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, and so on).",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x08",
- "UMask": "0xe",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
- "Errata": "BDM69",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x08",
- "UMask": "0x60",
- "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x49",
- "UMask": "0xe",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
- "Errata": "BDM69",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x49",
- "UMask": "0x60",
- "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "UMask": "0xe",
- "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
- "Errata": "BDM69",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "UMask": "0x60",
- "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/cache.json b/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
index d1d043829b95..bf0c51272068 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
@@ -11,11 +11,28 @@
},
{
"EventCode": "0x24",
- "UMask": "0x41",
- "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "UMask": "0x22",
+ "BriefDescription": "RFO requests that miss L2 cache.",
"Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
- "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x24",
+ "BriefDescription": "L2 cache misses when fetching instructions.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x27",
+ "BriefDescription": "Demand requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"SampleAfterValue": "200003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -31,6 +48,43 @@
},
{
"EventCode": "0x24",
+ "UMask": "0x3f",
+ "BriefDescription": "All requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x41",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x42",
+ "BriefDescription": "RFO requests that hit L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x44",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
"UMask": "0x50",
"BriefDescription": "L2 prefetch requests that hit L2 cache",
"Counter": "0,1,2,3",
@@ -71,6 +125,15 @@
},
{
"EventCode": "0x24",
+ "UMask": "0xe7",
+ "BriefDescription": "Demand requests to L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
"UMask": "0xf8",
"BriefDescription": "Requests from L2 hardware prefetchers",
"Counter": "0,1,2,3",
@@ -80,6 +143,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x24",
+ "UMask": "0xff",
+ "BriefDescription": "All L2 requests.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x27",
"UMask": "0x50",
"BriefDescription": "Not rejected writebacks that hit L2 cache",
@@ -131,6 +203,27 @@
"CounterHTOff": "2"
},
{
+ "EventCode": "0x48",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "Counter": "2",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "AnyThread": "1",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "Counter": "0,1,2,3",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x51",
"UMask": "0x1",
"BriefDescription": "L1D data line replacements",
@@ -153,12 +246,35 @@
},
{
"EventCode": "0x60",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "CounterMask": "1",
+ "Errata": "BDM76",
+ "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "CounterMask": "6",
+ "Errata": "BDM76",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
"UMask": "0x2",
"BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
"Errata": "BDM76",
- "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The \"Offcore outstanding\" state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The Offcore outstanding state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -175,24 +291,24 @@
},
{
"EventCode": "0x60",
- "UMask": "0x8",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "UMask": "0x4",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "CounterMask": "1",
"Errata": "BDM76",
- "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The Offcore outstanding state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "UMask": "0x8",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
- "CounterMask": "1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"Errata": "BDM76",
- "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -209,18 +325,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x60",
- "UMask": "0x4",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
- "CounterMask": "1",
- "Errata": "BDM76",
- "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The \"Offcore outstanding\" state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0x63",
"UMask": "0x2",
"BriefDescription": "Cycles when L1D is locked",
@@ -266,7 +370,7 @@
"BriefDescription": "Demand and prefetch data reads",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
- "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable \"Demands\" and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable Demands and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -281,26 +385,35 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0xD0",
"UMask": "0x11",
- "BriefDescription": "Retired load uops that miss the STLB.",
+ "BriefDescription": "Retired load uops that miss the STLB. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x12",
- "BriefDescription": "Retired store uops that miss the STLB.",
+ "BriefDescription": "Retired store uops that miss the STLB. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts store uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts store uops true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
"SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -308,37 +421,37 @@
{
"EventCode": "0xD0",
"UMask": "0x21",
- "BriefDescription": "Retired load uops with locked access.",
+ "BriefDescription": "Retired load uops with locked access. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"Errata": "BDM35",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with locked access retired to the architected path.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops with locked access retired to the architected path.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x41",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary.(Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x42",
- "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -346,24 +459,24 @@
{
"EventCode": "0xD0",
"UMask": "0x81",
- "BriefDescription": "All retired load uops.",
+ "BriefDescription": "All retired load uops. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
- "PublicDescription": "This event counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x82",
- "BriefDescription": "All retired store uops.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
- "PublicDescription": "This event counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement.",
"SampleAfterValue": "2000003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -371,69 +484,69 @@
{
"EventCode": "0xD1",
"UMask": "0x1",
- "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data source were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x2",
- "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"Errata": "BDM35",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x4",
- "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
+ "BriefDescription": "Hit in last-level (L3) cache. Excludes Unknown data-source. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
"Errata": "BDM100",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
"SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x8",
- "BriefDescription": "Retired load uops misses in L1 cache as data sources.",
+ "BriefDescription": "Retired load uops misses in L1 cache as data sources. Uses PEBS.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x10",
- "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Retired load uops with L2 cache misses as data sources. Uses PEBS.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
"SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x20",
- "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source. (Precise Event - PEBS).",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -445,84 +558,83 @@
{
"EventCode": "0xD1",
"UMask": "0x40",
- "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x1",
- "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS",
"Errata": "BDM100",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x2",
- "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
"Errata": "BDM100",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x4",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
"Errata": "BDM100",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x8",
- "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.",
+ "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_NONE",
"Errata": "BDM100",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD3",
"UMask": "0x1",
- "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
"Errata": "BDE70, BDM100",
- "PublicDescription": "Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI).",
+ "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches. This is a precise event.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD3",
"UMask": "0x4",
- "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI) (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -534,7 +646,7 @@
{
"EventCode": "0xD3",
"UMask": "0x10",
- "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM",
+ "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -546,7 +658,7 @@
{
"EventCode": "0xD3",
"UMask": "0x20",
- "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache",
+ "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -695,119 +807,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x24",
- "UMask": "0x42",
- "BriefDescription": "RFO requests that hit L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_HIT",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x22",
- "BriefDescription": "RFO requests that miss L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x44",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_HIT",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x24",
- "BriefDescription": "L2 cache misses when fetching instructions.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x27",
- "BriefDescription": "Demand requests that miss L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0xe7",
- "BriefDescription": "Demand requests to L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x3f",
- "BriefDescription": "All requests that miss L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0xff",
- "BriefDescription": "All L2 requests.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.REFERENCES",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "CounterMask": "6",
- "Errata": "BDM76",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x48",
- "UMask": "0x1",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "Counter": "2",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "AnyThread": "1",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0x48",
- "UMask": "0x2",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "Counter": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
@@ -816,6 +815,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all requests that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -828,6 +828,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -840,6 +841,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -852,6 +854,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -864,6 +867,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -876,6 +880,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -888,6 +893,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -900,6 +906,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -912,6 +919,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -924,6 +932,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -936,6 +945,20 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3",
+ "MSRValue": "0x3f803c0002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json b/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json
index 4ae1ea24f22f..d7b9d9c9c518 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json
@@ -6,7 +6,7 @@
"Counter": "0,1,2,3",
"EventName": "OTHER_ASSISTS.AVX_TO_SSE",
"Errata": "BDM30",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
+ "PublicDescription": "This event counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -17,7 +17,7 @@
"Counter": "0,1,2,3",
"EventName": "OTHER_ASSISTS.SSE_TO_AVX",
"Errata": "BDM30",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
+ "PublicDescription": "This event counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -25,7 +25,6 @@
"EventCode": "0xC7",
"UMask": "0x1",
"BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired. Each count represents 1 computation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
"SampleAfterValue": "2000003",
@@ -35,7 +34,6 @@
"EventCode": "0xC7",
"UMask": "0x2",
"BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired. Each count represents 1 computation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
"SampleAfterValue": "2000003",
@@ -43,9 +41,17 @@
},
{
"EventCode": "0xC7",
+ "UMask": "0x3",
+ "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
"UMask": "0x4",
"BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired. Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
"SampleAfterValue": "2000003",
@@ -55,7 +61,6 @@
"EventCode": "0xC7",
"UMask": "0x8",
"BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"SampleAfterValue": "2000003",
@@ -65,19 +70,54 @@
"EventCode": "0xC7",
"UMask": "0x10",
"BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xC7",
+ "UMask": "0x15",
+ "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
+ "SampleAfterValue": "2000006",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc7",
+ "UMask": "0x20",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x2a",
+ "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.SINGLE",
+ "SampleAfterValue": "2000005",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x3c",
+ "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.PACKED",
+ "SampleAfterValue": "2000004",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0xCA",
"UMask": "0x2",
"BriefDescription": "Number of X87 assists due to output value.",
"Counter": "0,1,2,3",
"EventName": "FP_ASSIST.X87_OUTPUT",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
+ "PublicDescription": "This event counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -87,7 +127,7 @@
"BriefDescription": "Number of X87 assists due to input value.",
"Counter": "0,1,2,3",
"EventName": "FP_ASSIST.X87_INPUT",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
+ "PublicDescription": "This event counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -97,7 +137,7 @@
"BriefDescription": "Number of SIMD FP assists due to Output values",
"Counter": "0,1,2,3",
"EventName": "FP_ASSIST.SIMD_OUTPUT",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
+ "PublicDescription": "This event counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -107,7 +147,7 @@
"BriefDescription": "Number of SIMD FP assists due to input values",
"Counter": "0,1,2,3",
"EventName": "FP_ASSIST.SIMD_INPUT",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
+ "PublicDescription": "This event counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -121,51 +161,5 @@
"PublicDescription": "This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xc7",
- "UMask": "0x20",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "UMask": "0x3",
- "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "UMask": "0x3c",
- "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.PACKED",
- "SampleAfterValue": "2000004",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "UMask": "0x2a",
- "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.SINGLE",
- "SampleAfterValue": "2000005",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC7",
- "UMask": "0x15",
- "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
- "SampleAfterValue": "2000006",
- "CounterHTOff": "0,1,2,3"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/frontend.json b/tools/perf/pmu-events/arch/x86/broadwellx/frontend.json
index 06bf0a40e568..72781e1e3362 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/frontend.json
@@ -15,80 +15,49 @@
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"Counter": "0,1,2,3",
"EventName": "IDQ.MITE_UOPS",
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x8",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.DSB_UOPS",
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x10",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_DSB_UOPS",
- "PublicDescription": "This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x20",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_MITE_UOPS",
- "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MS_UOPS",
- "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "EventName": "IDQ.MITE_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "UMask": "0x8",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MS_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x4",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MITE_CYCLES",
+ "EventName": "IDQ.DSB_CYCLES",
"CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x8",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "UMask": "0x10",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"Counter": "0,1,2,3",
- "EventName": "IDQ.DSB_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "EventName": "IDQ.MS_DSB_UOPS",
+ "PublicDescription": "This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -99,7 +68,7 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.MS_DSB_CYCLES",
"CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -111,7 +80,7 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.MS_DSB_OCCUR",
"CounterMask": "1",
- "PublicDescription": "This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -122,7 +91,7 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
"CounterMask": "4",
- "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -133,7 +102,17 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
"CounterMask": "1",
- "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x20",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -144,7 +123,7 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
"CounterMask": "4",
- "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -155,7 +134,39 @@
"Counter": "0,1,2,3",
"EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
"CounterMask": "1",
- "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_SWITCHES",
+ "CounterMask": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -165,7 +176,7 @@
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"Counter": "0,1,2,3",
"EventName": "IDQ.MITE_ALL_UOPS",
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -205,7 +216,7 @@
"BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
"Counter": "0,1,2,3",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
- "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding ?4 ? x? when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -268,18 +279,7 @@
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
"Counter": "0,1,2,3",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
- "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0?2 cycles.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EdgeDetect": "1",
- "EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_SWITCHES",
- "CounterMask": "1",
+ "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
}
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/memory.json b/tools/perf/pmu-events/arch/x86/broadwellx/memory.json
index 1204ea8ff30d..d79a5cfea44b 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/memory.json
@@ -95,7 +95,6 @@
"BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
"Counter": "0,1,2,3",
"EventName": "TX_EXEC.MISC1",
- "PublicDescription": "Unfriendly TSX abort triggered by a flowmarker.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -171,11 +170,11 @@
{
"EventCode": "0xc8",
"UMask": "0x4",
- "BriefDescription": "Number of times HLE abort was triggered",
+ "BriefDescription": "Number of times HLE abort was triggered (PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "HLE_RETIRED.ABORTED",
- "PublicDescription": "Number of times HLE abort was triggered.",
+ "PublicDescription": "Number of times HLE abort was triggered (PEBS).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -252,11 +251,11 @@
{
"EventCode": "0xc9",
"UMask": "0x4",
- "BriefDescription": "Number of times RTM abort was triggered",
+ "BriefDescription": "Number of times RTM abort was triggered (PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "RTM_RETIRED.ABORTED",
- "PublicDescription": "Number of times RTM abort was triggered .",
+ "PublicDescription": "Number of times RTM abort was triggered (PEBS).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -439,6 +438,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all requests that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -451,6 +451,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and clean or shared data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -463,6 +464,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -475,6 +477,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from remote dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -487,6 +490,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -499,6 +503,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -511,6 +516,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -523,6 +529,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -535,6 +542,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -547,6 +555,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -559,6 +568,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -571,6 +581,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -583,6 +594,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -595,6 +607,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -607,6 +620,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -619,6 +633,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -631,6 +646,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -643,6 +659,20 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) that miss in the L3",
+ "MSRValue": "0x3fbfc00002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/other.json b/tools/perf/pmu-events/arch/x86/broadwellx/other.json
index 718fcb1db2ee..4475249ea9da 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/other.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/other.json
@@ -10,16 +10,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x5C",
- "UMask": "0x2",
- "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
- "Counter": "0,1,2,3",
- "EventName": "CPL_CYCLES.RING123",
- "PublicDescription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EdgeDetect": "1",
"EventCode": "0x5C",
"UMask": "0x1",
@@ -32,6 +22,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x5C",
+ "UMask": "0x2",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "Counter": "0,1,2,3",
+ "EventName": "CPL_CYCLES.RING123",
+ "PublicDescription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x63",
"UMask": "0x1",
"BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
index 02b4e1035f2d..920c89da9111 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
@@ -3,31 +3,41 @@
"EventCode": "0x00",
"UMask": "0x1",
"BriefDescription": "Instructions retired from execution.",
- "Counter": "Fixed counter 1",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. \nNotes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. \nCounting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 1"
+ "CounterHTOff": "Fixed counter 0"
},
{
"EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when the thread is not in halt state",
- "Counter": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "UMask": "0x2",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "Fixed counter 1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "Fixed counter 1"
},
{
"EventCode": "0x00",
"UMask": "0x3",
"BriefDescription": "Reference cycles when the core is not in halt state.",
- "Counter": "Fixed counter 3",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. \nNote: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. This event is clocked by base clock (100 Mhz) on Sandy Bridge. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 3"
+ "CounterHTOff": "Fixed counter 2"
},
{
"EventCode": "0x03",
@@ -60,22 +70,33 @@
},
{
"EventCode": "0x0D",
- "UMask": "0x8",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
+ "UMask": "0x3",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
"Counter": "0,1,2,3",
- "EventName": "INT_MISC.RAT_STALL_CYCLES",
- "PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x0D",
"UMask": "0x3",
- "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...)",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
"Counter": "0,1,2,3",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "AnyThread": "1",
"CounterMask": "1",
- "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
+ "Counter": "0,1,2,3",
+ "EventName": "INT_MISC.RAT_STALL_CYCLES",
+ "PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -90,6 +111,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "Invert": "1",
+ "EventCode": "0x0E",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0x0E",
"UMask": "0x10",
"BriefDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.",
@@ -118,18 +151,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "Invert": "1",
- "EventCode": "0x0E",
- "UMask": "0x1",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
"EventCode": "0x14",
"UMask": "0x1",
"BriefDescription": "Cycles when divider is busy executing divide operations",
@@ -141,6 +162,26 @@
},
{
"EventCode": "0x3C",
+ "UMask": "0x0",
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x0",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
"UMask": "0x1",
"BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
"Counter": "0,1,2,3",
@@ -150,6 +191,36 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x3c",
"UMask": "0x2",
"BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
@@ -159,6 +230,15 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0x3C",
+ "UMask": "0x2",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x4c",
"UMask": "0x1",
"BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
@@ -225,6 +305,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EdgeDetect": "1",
+ "Invert": "1",
+ "EventCode": "0x5E",
+ "UMask": "0x1",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "Counter": "0,1,2,3",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "CounterMask": "1",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x87",
"UMask": "0x1",
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
@@ -406,6 +498,15 @@
},
{
"EventCode": "0x89",
+ "UMask": "0xa0",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
"UMask": "0xc1",
"BriefDescription": "Speculative and retired mispredicted macro conditional branches",
"Counter": "0,1,2,3",
@@ -435,6 +536,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA0",
+ "UMask": "0x3",
+ "BriefDescription": "Micro-op dispatches cancelled due to insufficient SIMD physical register file read ports",
+ "Counter": "0,1,2,3",
+ "EventName": "UOP_DISPATCHES_CANCELLED.SIMD_PRF",
+ "PublicDescription": "This event counts the number of micro-operations cancelled after they were dispatched from the scheduler to the execution units when the total number of physical register read ports across all dispatch ports exceeds the read bandwidth of the physical register file. The SIMD_PRF subevent applies to the following instructions: VDPPS, DPPS, VPCMPESTRI, PCMPESTRI, VPCMPESTRM, PCMPESTRM, VFMADD*, VFMADDSUB*, VFMSUB*, VMSUBADD*, VFNMADD*, VFNMSUB*. See the Broadwell Optimization Guide for more information.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0xA1",
"UMask": "0x1",
"BriefDescription": "Cycles per thread when uops are executed in port 0",
@@ -446,6 +557,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x2",
"BriefDescription": "Cycles per thread when uops are executed in port 1",
"Counter": "0,1,2,3",
@@ -456,6 +587,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x4",
"BriefDescription": "Cycles per thread when uops are executed in port 2",
"Counter": "0,1,2,3",
@@ -466,6 +617,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x8",
"BriefDescription": "Cycles per thread when uops are executed in port 3",
"Counter": "0,1,2,3",
@@ -476,6 +647,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x10",
"BriefDescription": "Cycles per thread when uops are executed in port 4",
"Counter": "0,1,2,3",
@@ -486,6 +677,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x20",
"BriefDescription": "Cycles per thread when uops are executed in port 5",
"Counter": "0,1,2,3",
@@ -496,6 +707,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x20",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x20",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x40",
"BriefDescription": "Cycles per thread when uops are executed in port 6",
"Counter": "0,1,2,3",
@@ -506,6 +737,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x40",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x40",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x80",
"BriefDescription": "Cycles per thread when uops are executed in port 7",
"Counter": "0,1,2,3",
@@ -515,6 +766,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "UMask": "0x80",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x80",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xA2",
"UMask": "0x1",
"BriefDescription": "Resource-related stall cycles",
@@ -567,14 +838,13 @@
},
{
"EventCode": "0xA3",
- "UMask": "0x8",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
- "CounterMask": "8",
- "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "CounterMask": "1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "2"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xA3",
@@ -589,8 +859,18 @@
},
{
"EventCode": "0xA3",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "CounterMask": "2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
"UMask": "0x4",
- "BriefDescription": "Total execution stalls",
+ "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
"Counter": "0,1,2,3",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
"CounterMask": "4",
@@ -600,6 +880,16 @@
},
{
"EventCode": "0xA3",
+ "UMask": "0x4",
+ "BriefDescription": "Total execution stalls.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
"UMask": "0x5",
"BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
"Counter": "0,1,2,3",
@@ -611,6 +901,16 @@
},
{
"EventCode": "0xA3",
+ "UMask": "0x5",
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "CounterMask": "5",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
"UMask": "0x6",
"BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
"Counter": "0,1,2,3",
@@ -622,6 +922,37 @@
},
{
"EventCode": "0xA3",
+ "UMask": "0x6",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "CounterMask": "6",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "CounterMask": "8",
+ "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "CounterMask": "8",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
"UMask": "0xc",
"BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
"Counter": "2",
@@ -632,12 +963,41 @@
"CounterHTOff": "2"
},
{
+ "EventCode": "0xA3",
+ "UMask": "0xc",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "CounterMask": "12",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
"EventCode": "0xA8",
"UMask": "0x1",
"BriefDescription": "Number of Uops delivered by the LSD.",
"Counter": "0,1,2,3",
"EventName": "LSD.UOPS",
- "PublicDescription": "Number of Uops delivered by the LSD. ",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "CounterMask": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -652,6 +1012,58 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "Invert": "1",
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "CounterMask": "2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "CounterMask": "3",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0xB1",
"UMask": "0x2",
"BriefDescription": "Number of uops executed on the core.",
@@ -662,35 +1074,63 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "Invert": "1",
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"CounterMask": "1",
- "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC0",
- "UMask": "0x0",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "INST_RETIRED.ANY_P",
- "Errata": "BDM61",
- "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "CounterMask": "2",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC0",
+ "EventCode": "0xb1",
"UMask": "0x2",
- "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions:",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "INST_RETIRED.X87",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "CounterMask": "3",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC0",
+ "UMask": "0x0",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3",
+ "EventName": "INST_RETIRED.ANY_P",
+ "Errata": "BDM61",
+ "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -707,6 +1147,16 @@
"CounterHTOff": "1"
},
{
+ "EventCode": "0xC0",
+ "UMask": "0x2",
+ "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions:",
+ "Counter": "0,1,2,3",
+ "EventName": "INST_RETIRED.X87",
+ "PublicDescription": "This event counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC1",
"UMask": "0x40",
"BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
@@ -718,23 +1168,12 @@
{
"EventCode": "0xC2",
"UMask": "0x1",
- "BriefDescription": "Actually retired uops.",
+ "BriefDescription": "Actually retired uops. (Precise Event - PEBS)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "UOPS_RETIRED.ALL",
- "PublicDescription": "This event counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC2",
- "UMask": "0x2",
- "BriefDescription": "Retirement slots used.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of retirement slots used.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -746,7 +1185,7 @@
"Counter": "0,1,2,3",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
"CounterMask": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -763,6 +1202,17 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xC2",
+ "UMask": "0x2",
+ "BriefDescription": "Retirement slots used. (Precise Event - PEBS)",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of retirement slots used.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC3",
"UMask": "0x1",
"BriefDescription": "Cycles there was a Nuke. Account for both thread-specific and All Thread Nukes.",
@@ -773,6 +1223,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EdgeDetect": "1",
+ "EventCode": "0xC3",
+ "UMask": "0x1",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "CounterMask": "1",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC3",
"UMask": "0x4",
"BriefDescription": "Self-modifying code (SMC) detected.",
@@ -794,44 +1255,67 @@
},
{
"EventCode": "0xC4",
+ "UMask": "0x0",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "This event counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
"UMask": "0x1",
- "BriefDescription": "Conditional branch instructions retired.",
+ "BriefDescription": "Conditional branch instructions retired. (Precise Event - PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts conditional branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts conditional branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
"UMask": "0x2",
- "BriefDescription": "Direct and indirect near call instructions retired.",
+ "BriefDescription": "Direct and indirect near call instructions retired. (Precise Event - PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts both direct and indirect near call instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect near call instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
- "UMask": "0x0",
- "BriefDescription": "All (macro) branch instructions retired.",
+ "UMask": "0x2",
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3). (Precise Event - PEBS)",
+ "PEBS": "1",
"Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "PublicDescription": "This event counts all (macro) branch instructions retired.",
- "SampleAfterValue": "400009",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect macro near call instructions retired (captured in ring 3).",
+ "SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
+ "UMask": "0x4",
+ "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS)",
+ "PEBS": "2",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "Errata": "BDW98",
+ "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC4",
"UMask": "0x8",
- "BriefDescription": "Return instructions retired.",
+ "BriefDescription": "Return instructions retired. (Precise Event - PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts return instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts return instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -841,18 +1325,18 @@
"BriefDescription": "Not taken branch instructions retired.",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts not taken branch instructions retired.",
+ "PublicDescription": "This event counts not taken branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
"UMask": "0x20",
- "BriefDescription": "Taken branch instructions retired.",
+ "BriefDescription": "Taken branch instructions retired. (Precise Event - PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts taken branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts taken branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -863,34 +1347,11 @@
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"Errata": "BDW98",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts far branch instructions retired.",
+ "PublicDescription": "This event counts far branch instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC4",
- "UMask": "0x4",
- "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS)",
- "PEBS": "2",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "Errata": "BDW98",
- "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC5",
- "UMask": "0x1",
- "BriefDescription": "Mispredicted conditional branch instructions retired.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted conditional branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0xC5",
"UMask": "0x0",
"BriefDescription": "All mispredicted macro branch instructions retired.",
@@ -902,13 +1363,13 @@
},
{
"EventCode": "0xC5",
- "UMask": "0x8",
- "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "UMask": "0x1",
+ "BriefDescription": "Mispredicted conditional branch instructions retired. (Precise Event - PEBS)",
"PEBS": "1",
"Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.RET",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
- "SampleAfterValue": "100007",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -923,164 +1384,36 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xCC",
- "UMask": "0x20",
- "BriefDescription": "Count cases of saving new LBR",
- "Counter": "0,1,2,3",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
- "PublicDescription": "This event counts cases of saving new LBR records by hardware. This assumes proper enabling of LBRs and takes into account LBR filtering done by the LBR_SELECT register.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x0",
- "BriefDescription": "Thread cycles when thread is not in halt state",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x89",
- "UMask": "0xa0",
- "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x1",
- "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x2",
- "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x4",
- "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
+ "EventCode": "0xC5",
"UMask": "0x8",
- "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x10",
- "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x20",
- "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x40",
- "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x80",
- "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired.(Precise Event)",
+ "PEBS": "1",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted return instructions retired.",
+ "SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC5",
"UMask": "0x20",
- "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken. (Precise Event - PEBS).",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken. (Precise Event - PEBS).",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
- "CounterMask": "2",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
- "CounterMask": "3",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "EventCode": "0xCC",
+ "UMask": "0x20",
+ "BriefDescription": "Count cases of saving new LBR",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
- "CounterMask": "4",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "PublicDescription": "This event counts cases of saving new LBR records by hardware. This assumes proper enabling of LBRs and takes into account LBR filtering done by the LBR_SELECT register.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xe6",
@@ -1090,328 +1423,5 @@
"EventName": "BACLEARS.ANY",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x8",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
- "CounterMask": "8",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x1",
- "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x2",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
- "CounterMask": "2",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x4",
- "BriefDescription": "Total execution stalls.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
- "CounterMask": "4",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0xc",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
- "CounterMask": "12",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x5",
- "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
- "CounterMask": "5",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x6",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
- "CounterMask": "6",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EdgeDetect": "1",
- "EventCode": "0xC3",
- "UMask": "0x1",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "Counter": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.COUNT",
- "CounterMask": "1",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "UMask": "0x1",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "Counter": "0,1,2,3",
- "EventName": "LSD.CYCLES_4_UOPS",
- "CounterMask": "4",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EdgeDetect": "1",
- "Invert": "1",
- "EventCode": "0x5E",
- "UMask": "0x1",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "Counter": "0,1,2,3",
- "EventName": "RS_EVENTS.EMPTY_END",
- "CounterMask": "1",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "UMask": "0x1",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "Counter": "0,1,2,3",
- "EventName": "LSD.CYCLES_ACTIVE",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x1",
- "BriefDescription": "Cycles per thread when uops are executed in port 0",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x2",
- "BriefDescription": "Cycles per thread when uops are executed in port 1",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x4",
- "BriefDescription": "Cycles per thread when uops are executed in port 2",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x8",
- "BriefDescription": "Cycles per thread when uops are executed in port 3",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x10",
- "BriefDescription": "Cycles per thread when uops are executed in port 4",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x20",
- "BriefDescription": "Cycles per thread when uops are executed in port 5",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x40",
- "BriefDescription": "Cycles per thread when uops are executed in port 6",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x80",
- "BriefDescription": "Cycles per thread when uops are executed in port 7",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA0",
- "UMask": "0x3",
- "BriefDescription": "Micro-op dispatches cancelled due to insufficient SIMD physical register file read ports",
- "Counter": "0,1,2,3",
- "EventName": "UOP_DISPATCHES_CANCELLED.SIMD_PRF",
- "PublicDescription": "This event counts the number of micro-operations cancelled after they were dispatched from the scheduler to the execution units when the total number of physical register read ports across all dispatch ports exceeds the read bandwidth of the physical register file. The SIMD_PRF subevent applies to the following instructions: VDPPS, DPPS, VPCMPESTRI, PCMPESTRI, VPCMPESTRM, PCMPESTRM, VFMADD*, VFMADDSUB*, VFMSUB*, VMSUBADD*, VFNMADD*, VFNMSUB*. See the Broadwell Optimization Guide for more information.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x00",
- "UMask": "0x2",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "Fixed counter 2",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x0",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "UMask": "0x3",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "Counter": "0,1,2,3",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "AnyThread": "1",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "CounterMask": "2",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
- "CounterMask": "3",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "CounterMask": "4",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "Invert": "1",
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x2",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json b/tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json
index 5ce8b67ba076..7d79c707c6d1 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json
@@ -45,6 +45,16 @@
},
{
"EventCode": "0x08",
+ "UMask": "0xe",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "Errata": "BDM69",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
"UMask": "0x10",
"BriefDescription": "Cycles when PMH is busy with page walks",
"Counter": "0,1,2,3",
@@ -73,6 +83,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x08",
+ "UMask": "0x60",
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x49",
"UMask": "0x1",
"BriefDescription": "Store misses in all DTLB levels that cause page walks",
@@ -118,6 +137,16 @@
},
{
"EventCode": "0x49",
+ "UMask": "0xe",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "Errata": "BDM69",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
"UMask": "0x10",
"BriefDescription": "Cycles when PMH is busy with page walks",
"Counter": "0,1,2,3",
@@ -146,6 +175,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x49",
+ "UMask": "0x60",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x4F",
"UMask": "0x10",
"BriefDescription": "Cycle count for an Extended Page table walk.",
@@ -201,6 +239,16 @@
},
{
"EventCode": "0x85",
+ "UMask": "0xe",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "Errata": "BDM69",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
"UMask": "0x10",
"BriefDescription": "Cycles when PMH is busy with page walks",
"Counter": "0,1,2,3",
@@ -229,6 +277,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x85",
+ "UMask": "0x60",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xAE",
"UMask": "0x1",
"BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
@@ -250,60 +307,60 @@
},
{
"EventCode": "0xBC",
- "UMask": "0x21",
- "BriefDescription": "Number of ITLB page walker hits in the L1+FB.",
+ "UMask": "0x12",
+ "BriefDescription": "Number of DTLB page walker hits in the L2.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x12",
- "BriefDescription": "Number of DTLB page walker hits in the L2.",
+ "UMask": "0x14",
+ "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x22",
- "BriefDescription": "Number of ITLB page walker hits in the L2.",
+ "UMask": "0x18",
+ "BriefDescription": "Number of DTLB page walker hits in Memory.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x14",
- "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP.",
+ "UMask": "0x21",
+ "BriefDescription": "Number of ITLB page walker hits in the L1+FB.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x24",
- "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP.",
+ "UMask": "0x22",
+ "BriefDescription": "Number of ITLB page walker hits in the L2.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x18",
- "BriefDescription": "Number of DTLB page walker hits in Memory.",
+ "UMask": "0x24",
+ "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
"Errata": "BDM69, BDM98",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
@@ -327,62 +384,5 @@
"PublicDescription": "This event counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, and so on).",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x08",
- "UMask": "0xe",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
- "Errata": "BDM69",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x08",
- "UMask": "0x60",
- "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x49",
- "UMask": "0xe",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
- "Errata": "BDM69",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x49",
- "UMask": "0x60",
- "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "UMask": "0xe",
- "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
- "Errata": "BDM69",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "UMask": "0x60",
- "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/cache.json b/tools/perf/pmu-events/arch/x86/goldmont/cache.json
index 4e02e1e5e70d..f8bbe087b0f8 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/cache.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/cache.json
@@ -1,6 +1,26 @@
[
{
"CollectPEBSRecord": "1",
+ "PublicDescription": "Counts memory requests originating from the core that miss in the L2 cache.",
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache request misses"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts memory requests originating from the core that reference a cache line in the L2 cache.",
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4f",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache requests"
+ },
+ {
+ "CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of demand and prefetch transactions that the L2 XQ rejects due to a full or near full condition which likely indicates back pressure from the intra-die interconnect (IDI) fabric. The XQ may reject transactions from the L2Q (non-cacheable requests), L2 misses and L2 write-back victims.",
"EventCode": "0x30",
"Counter": "0,1,2,3",
@@ -11,120 +31,119 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of demand and L1 prefetcher requests rejected by the L2Q due to a full or nearly full condition which likely indicates back pressure from L2Q. It also counts requests that would have gone directly to the XQ, but are rejected due to a full or nearly full condition, indicating back pressure from the IDI link. The L2Q may also reject transactions from a core to insure fairness between cores, or to delay a core's dirty eviction when the address conflicts with incoming external snoops.",
+ "PublicDescription": "Counts the number of demand and L1 prefetcher requests rejected by the L2Q due to a full or nearly full condition which likely indicates back pressure from L2Q. It also counts requests that would have gone directly to the XQ, but are rejected due to a full or nearly full condition, indicating back pressure from the IDI link. The L2Q may also reject transactions from a core to ensure fairness between cores, or to delay a core's dirty eviction when the address conflicts with incoming external snoops.",
"EventCode": "0x31",
"Counter": "0,1,2,3",
"UMask": "0x0",
"EventName": "CORE_REJECT_L2Q.ALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Requests rejected by the L2Q "
+ "BriefDescription": "Requests rejected by the L2Q"
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts memory requests originating from the core that reference a cache line in the L2 cache.",
- "EventCode": "0x2E",
+ "PublicDescription": "Counts when a modified (dirty) cache line is evicted from the data L1 cache and needs to be written back to memory. No count will occur if the evicted line is clean, and hence does not require a writeback.",
+ "EventCode": "0x51",
"Counter": "0,1,2,3",
- "UMask": "0x4f",
- "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "UMask": "0x1",
+ "EventName": "DL1.DIRTY_EVICTION",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache requests"
+ "BriefDescription": "L1 Cache evictions for dirty data"
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts memory requests originating from the core that miss in the L2 cache.",
- "EventCode": "0x2E",
+ "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ICache miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ICache miss. Note: this event is not the same as the total number of cycles spent retrieving instruction cache lines from the memory hierarchy.",
+ "EventCode": "0x86",
"Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "LONGEST_LAT_CACHE.MISS",
+ "UMask": "0x2",
+ "EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache request misses"
+ "BriefDescription": "Cycles code-fetch stalled due to an outstanding ICache miss."
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts cycles that an ICache miss is outstanding, and instruction fetch is stalled. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes, while an Icache miss outstanding. Note this event is not the same as cycles to retrieve an instruction due to an Icache miss. Rather, it is the part of the Instruction Cache (ICache) miss time where no bytes are available for the decoder.",
- "EventCode": "0x86",
+ "EventCode": "0xB7",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Cycles where code-fetch is stalled and an ICache miss is outstanding. This is not the same as an ICache Miss."
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of load uops retired.",
+ "PublicDescription": "Counts locked memory uops retired. This includes regular locks and bus locks. (To specifically count bus locks only, see the Offcore response event.) A locked access is one with a lock prefix, or an exchange to memory. See the SDM for a complete description of which memory load accesses are locks.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "UMask": "0x21",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired (Precise event capable)"
+ "BriefDescription": "Locked load uops retired (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of store uops retired.",
+ "PublicDescription": "Counts load uops retired where the data requested spans a 64 byte cache line boundary.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "UMask": "0x41",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"SampleAfterValue": "200003",
- "BriefDescription": "Store uops retired (Precise event capable)"
+ "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of memory uops retired that is either a loads or a store or both.",
+ "PublicDescription": "Counts store uops retired where the data requested spans a 64 byte cache line boundary.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x83",
- "EventName": "MEM_UOPS_RETIRED.ALL",
+ "UMask": "0x42",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"SampleAfterValue": "200003",
- "BriefDescription": "Memory uops retired (Precise event capable)"
+ "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts locked memory uops retired. This includes \"regular\" locks and bus locks. (To specifically count bus locks only, see the Offcore response event.) A locked access is one with a lock prefix, or an exchange to memory. See the SDM for a complete description of which memory load accesses are locks.",
+ "PublicDescription": "Counts memory uops retired where the data requested spans a 64 byte cache line boundary.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x21",
- "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+ "UMask": "0x43",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Locked load uops retired (Precise event capable)"
+ "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts load uops retired where the data requested spans a 64 byte cache line boundary.",
+ "PublicDescription": "Counts the number of load uops retired.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "UMask": "0x81",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)"
+ "BriefDescription": "Load uops retired (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts store uops retired where the data requested spans a 64 byte cache line boundary.",
+ "PublicDescription": "Counts the number of store uops retired.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x42",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
+ "UMask": "0x82",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"SampleAfterValue": "200003",
- "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)"
+ "BriefDescription": "Store uops retired (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts memory uops retired where the data requested spans a 64 byte cache line boundary.",
+ "PublicDescription": "Counts the number of memory uops retired that is either a loads or a store or both.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x43",
- "EventName": "MEM_UOPS_RETIRED.SPLIT",
+ "UMask": "0x83",
+ "EventName": "MEM_UOPS_RETIRED.ALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)"
+ "BriefDescription": "Memory uops retired (Precise event capable)"
},
{
"PEBS": "2",
@@ -140,24 +159,24 @@
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts load uops retired that miss the L1 data cache.",
+ "PublicDescription": "Counts load uops retired that hit in the L2 cache.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)"
+ "BriefDescription": "Load uops retired that hit L2 (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts load uops retired that hit in the L2 cache.",
+ "PublicDescription": "Counts load uops retired that miss the L1 data cache.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "UMask": "0x8",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that hit L2 (Precise event capable)"
+ "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)"
},
{
"PEBS": "2",
@@ -205,24 +224,20 @@
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts when a modified (dirty) cache line is evicted from the data L1 cache and needs to be written back to memory. No count will occur if the evicted line is clean, and hence does not require a writeback.",
- "EventCode": "0x51",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "DL1.DIRTY_EVICTION",
- "SampleAfterValue": "200003",
- "BriefDescription": "L1 Cache evictions for dirty data"
- },
- {
- "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
+ "MSRValue": "0x40000032b7 ",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.OUTSTANDING",
+ "MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)"
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x36000032b7 ",
"Counter": "0,1,2,3",
@@ -234,6 +249,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x10000032b7 ",
"Counter": "0,1,2,3",
@@ -245,6 +262,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x04000032b7 ",
"Counter": "0,1,2,3",
@@ -256,6 +275,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x02000032b7 ",
"Counter": "0,1,2,3",
@@ -267,6 +288,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x00000432b7 ",
"Counter": "0,1,2,3",
@@ -278,6 +301,34 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x00000132b7 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000022 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600000022 ",
"Counter": "0,1,2,3",
@@ -289,6 +340,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000000022 ",
"Counter": "0,1,2,3",
@@ -300,6 +353,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400000022 ",
"Counter": "0,1,2,3",
@@ -311,6 +366,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000022 ",
"Counter": "0,1,2,3",
@@ -322,6 +379,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040022 ",
"Counter": "0,1,2,3",
@@ -333,6 +392,34 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010022 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000003091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads (demand & prefetch) that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600003091",
"Counter": "0,1,2,3",
@@ -344,6 +431,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000003091",
"Counter": "0,1,2,3",
@@ -355,6 +444,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400003091",
"Counter": "0,1,2,3",
@@ -366,6 +457,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200003091",
"Counter": "0,1,2,3",
@@ -377,6 +470,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads (demand & prefetch) that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000043091",
"Counter": "0,1,2,3",
@@ -388,6 +483,34 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads (demand & prefetch) that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000013091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads (demand & prefetch) that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000003010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600003010 ",
"Counter": "0,1,2,3",
@@ -399,6 +522,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000003010 ",
"Counter": "0,1,2,3",
@@ -410,6 +535,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400003010 ",
"Counter": "0,1,2,3",
@@ -421,6 +548,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200003010 ",
"Counter": "0,1,2,3",
@@ -432,6 +561,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000043010 ",
"Counter": "0,1,2,3",
@@ -443,6 +574,47 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000013010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the uncore subsystem that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts requests to the uncore subsystem that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x3600008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts requests to the uncore subsystem that miss the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000008000 ",
"Counter": "0,1,2,3",
@@ -454,6 +626,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400008000 ",
"Counter": "0,1,2,3",
@@ -465,6 +639,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the uncore subsystem that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200008000 ",
"Counter": "0,1,2,3",
@@ -476,6 +652,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the uncore subsystem that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000048000 ",
"Counter": "0,1,2,3",
@@ -487,6 +665,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the uncore subsystem that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000018000 ",
"Counter": "0,1,2,3",
@@ -498,6 +678,21 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000004800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600004800 ",
"Counter": "0,1,2,3",
@@ -509,6 +704,47 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000004800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400004800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200004800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000044800 ",
"Counter": "0,1,2,3",
@@ -520,6 +756,34 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000014800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000004000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600004000 ",
"Counter": "0,1,2,3",
@@ -531,6 +795,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000004000 ",
"Counter": "0,1,2,3",
@@ -542,6 +808,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400004000 ",
"Counter": "0,1,2,3",
@@ -553,6 +821,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200004000 ",
"Counter": "0,1,2,3",
@@ -564,6 +834,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000044000 ",
"Counter": "0,1,2,3",
@@ -575,6 +847,34 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000014000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000002000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600002000 ",
"Counter": "0,1,2,3",
@@ -586,6 +886,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000002000 ",
"Counter": "0,1,2,3",
@@ -597,6 +899,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400002000 ",
"Counter": "0,1,2,3",
@@ -608,6 +912,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200002000 ",
"Counter": "0,1,2,3",
@@ -619,6 +925,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000042000 ",
"Counter": "0,1,2,3",
@@ -630,6 +938,34 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000012000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000001000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600001000 ",
"Counter": "0,1,2,3",
@@ -641,6 +977,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000001000 ",
"Counter": "0,1,2,3",
@@ -652,6 +990,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400001000 ",
"Counter": "0,1,2,3",
@@ -663,6 +1003,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200001000 ",
"Counter": "0,1,2,3",
@@ -674,6 +1016,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000041000 ",
"Counter": "0,1,2,3",
@@ -685,6 +1029,34 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000011000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600000800 ",
"Counter": "0,1,2,3",
@@ -696,6 +1068,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000000800 ",
"Counter": "0,1,2,3",
@@ -707,6 +1081,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400000800 ",
"Counter": "0,1,2,3",
@@ -718,6 +1094,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000800 ",
"Counter": "0,1,2,3",
@@ -729,6 +1107,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040800 ",
"Counter": "0,1,2,3",
@@ -740,6 +1120,99 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts bus lock and split lock requests that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000400 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts bus lock and split lock requests that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts bus lock and split lock requests that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x3600000400 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts bus lock and split lock requests that miss the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts bus lock and split lock requests that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000400 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts bus lock and split lock requests that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts bus lock and split lock requests that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400000400 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts bus lock and split lock requests that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts bus lock and split lock requests that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000400 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts bus lock and split lock requests that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts bus lock and split lock requests that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000040400 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts bus lock and split lock requests that hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts bus lock and split lock requests that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000010400 ",
"Counter": "0,1,2,3",
@@ -751,6 +1224,112 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts code reads in uncacheable (UC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts code reads in uncacheable (UC) memory region that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x3600000200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400000200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts code reads in uncacheable (UC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts code reads in uncacheable (UC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts code reads in uncacheable (UC) memory region that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000040200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts code reads in uncacheable (UC) memory region that hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts code reads in uncacheable (UC) memory region that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts code reads in uncacheable (UC) memory region that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600000100 ",
"Counter": "0,1,2,3",
@@ -762,6 +1341,86 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000040100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600000080 ",
"Counter": "0,1,2,3",
@@ -773,6 +1432,86 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000040080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600000020 ",
"Counter": "0,1,2,3",
@@ -784,6 +1523,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000000020 ",
"Counter": "0,1,2,3",
@@ -795,6 +1536,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400000020 ",
"Counter": "0,1,2,3",
@@ -806,6 +1549,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000020 ",
"Counter": "0,1,2,3",
@@ -817,6 +1562,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040020 ",
"Counter": "0,1,2,3",
@@ -828,6 +1575,34 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600000010 ",
"Counter": "0,1,2,3",
@@ -839,6 +1614,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000000010 ",
"Counter": "0,1,2,3",
@@ -850,6 +1627,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400000010 ",
"Counter": "0,1,2,3",
@@ -861,6 +1640,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000010 ",
"Counter": "0,1,2,3",
@@ -872,6 +1653,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040010 ",
"Counter": "0,1,2,3",
@@ -883,6 +1666,34 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600000008 ",
"Counter": "0,1,2,3",
@@ -894,6 +1705,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000000008 ",
"Counter": "0,1,2,3",
@@ -905,6 +1718,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400000008 ",
"Counter": "0,1,2,3",
@@ -916,6 +1731,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000008 ",
"Counter": "0,1,2,3",
@@ -927,6 +1744,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040008 ",
"Counter": "0,1,2,3",
@@ -938,6 +1757,21 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x4000000004 ",
"Counter": "0,1,2,3",
@@ -949,6 +1783,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600000004 ",
"Counter": "0,1,2,3",
@@ -960,6 +1796,21 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400000004 ",
"Counter": "0,1,2,3",
@@ -971,6 +1822,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000004 ",
"Counter": "0,1,2,3",
@@ -982,6 +1835,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040004 ",
"Counter": "0,1,2,3",
@@ -993,6 +1848,21 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x4000000002 ",
"Counter": "0,1,2,3",
@@ -1004,6 +1874,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600000002 ",
"Counter": "0,1,2,3",
@@ -1015,6 +1887,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000000002 ",
"Counter": "0,1,2,3",
@@ -1026,6 +1900,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400000002 ",
"Counter": "0,1,2,3",
@@ -1037,6 +1913,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000002 ",
"Counter": "0,1,2,3",
@@ -1048,6 +1926,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040002 ",
"Counter": "0,1,2,3",
@@ -1059,6 +1939,21 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x4000000001 ",
"Counter": "0,1,2,3",
@@ -1070,6 +1965,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x3600000001 ",
"Counter": "0,1,2,3",
@@ -1081,6 +1978,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x1000000001 ",
"Counter": "0,1,2,3",
@@ -1092,6 +1991,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0400000001 ",
"Counter": "0,1,2,3",
@@ -1103,6 +2004,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0200000001 ",
"Counter": "0,1,2,3",
@@ -1114,6 +2017,8 @@
"Offcore": "1"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"EventCode": "0xB7",
"MSRValue": "0x0000040001 ",
"Counter": "0,1,2,3",
@@ -1123,5 +2028,18 @@
"SampleAfterValue": "100007",
"BriefDescription": "Counts demand cacheable data reads of full cache lines that hit the L2 cache.",
"Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/memory.json b/tools/perf/pmu-events/arch/x86/goldmont/memory.json
index ac8b0d365a19..690cebd12a94 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/memory.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/memory.json
@@ -1,15 +1,5 @@
[
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts machine clears due to memory ordering issues. This occurs when a snoop request happens and the machine is uncertain if memory ordering will be preserved - as another core is in the process of modifying the data.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
- "SampleAfterValue": "200003",
- "BriefDescription": "Machine clears due to memory ordering issue"
- },
- {
"PEBS": "2",
"CollectPEBSRecord": "2",
"PublicDescription": "Counts when a memory load of a uop spans a page boundary (a split) is retired.",
@@ -30,5 +20,275 @@
"EventName": "MISALIGN_MEM_REF.STORE_PAGE_SPLIT",
"SampleAfterValue": "200003",
"BriefDescription": "Store uops that split a page (Precise event capable)"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts machine clears due to memory ordering issues. This occurs when a snoop request happens and the machine is uncertain if memory ordering will be preserved as another core is in the process of modifying the data.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Machine clears due to memory ordering issue"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x20000032b7 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000022 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000003091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads (demand & prefetch) that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000003010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts requests to the uncore subsystem that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000004800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000004000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000002000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000001000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts bus lock and split lock requests that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000400 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts bus lock and split lock requests that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_RD.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts code reads in uncacheable (UC) memory region that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache and targets non-DRAM system address. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache and targets non-DRAM system address.",
+ "Offcore": "1"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/other.json b/tools/perf/pmu-events/arch/x86/goldmont/other.json
index df25ca9542f1..959cadd7cb0e 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/other.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/other.json
@@ -1,23 +1,23 @@
[
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed because of a full resource in the backend. Including but not limited to resources such as the Re-order Buffer (ROB), reservation stations (RS), load/store buffers, physical registers, or any other needed machine resource that is currently unavailable. Note that uops must be available for consumption in order for this event to fire. If a uop is not available (Instruction Queue is empty), this event will not count.",
- "EventCode": "0xCA",
+ "PublicDescription": "Counts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events.",
+ "EventCode": "0x86",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RESOURCE_FULL",
+ "UMask": "0x0",
+ "EventName": "FETCH_STALL.ALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Unfilled issue slots per cycle because of a full resource in the backend"
+ "BriefDescription": "Cycles code-fetch stalled due to any reason."
},
{
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend because allocation is stalled waiting for a mispredicted jump to retire or other branch-like conditions (e.g. the event is relevant during certain microcode flows). Counts all issue slots blocked while within this window including slots where uops were not available in the Instruction Queue.",
- "EventCode": "0xCA",
+ "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ITLB miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ITLB miss. Note: this event is not the same as page walk cycles to retrieve an instruction translation.",
+ "EventCode": "0x86",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RECOVERY",
+ "UMask": "0x1",
+ "EventName": "FETCH_STALL.ITLB_FILL_PENDING_CYCLES",
"SampleAfterValue": "200003",
- "BriefDescription": "Unfilled issue slots per cycle to recover"
+ "BriefDescription": "Cycles code-fetch stalled due to an outstanding ITLB miss."
},
{
"CollectPEBSRecord": "1",
@@ -30,14 +30,44 @@
"BriefDescription": "Unfilled issue slots per cycle"
},
{
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed because of a full resource in the backend. Including but not limited to resources such as the Re-order Buffer (ROB), reservation stations (RS), load/store buffers, physical registers, or any other needed machine resource that is currently unavailable. Note that uops must be available for consumption in order for this event to fire. If a uop is not available (Instruction Queue is empty), this event will not count.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RESOURCE_FULL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Unfilled issue slots per cycle because of a full resource in the backend"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend because allocation is stalled waiting for a mispredicted jump to retire or other branch-like conditions (e.g. the event is relevant during certain microcode flows). Counts all issue slots blocked while within this window including slots where uops were not available in the Instruction Queue.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RECOVERY",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Unfilled issue slots per cycle to recover"
+ },
+ {
"CollectPEBSRecord": "2",
"PublicDescription": "Counts hardware interrupts received by the processor.",
"EventCode": "0xCB",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "HW_INTERRUPTS.RECEIVED",
+ "SampleAfterValue": "203",
+ "BriefDescription": "Hardware interrupts received"
+ },
+ {
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts the number of core cycles during which interrupts are masked (disabled). Increments by 1 each core cycle that EFLAGS.IF is 0, regardless of whether interrupts are pending or not.",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "HW_INTERRUPTS.MASKED",
"SampleAfterValue": "200003",
- "BriefDescription": "Hardware interrupts received (Precise event capable)"
+ "BriefDescription": "Cycles hardware interrupts are masked"
},
{
"CollectPEBSRecord": "2",
@@ -47,6 +77,6 @@
"UMask": "0x4",
"EventName": "HW_INTERRUPTS.PENDING_AND_MASKED",
"SampleAfterValue": "200003",
- "BriefDescription": "Cycles pending interrupts are masked (Precise event capable)"
+ "BriefDescription": "Cycles pending interrupts are masked"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json b/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json
index 07f00041f56f..254788af8ab6 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json
@@ -1,168 +1,136 @@
[
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts branch instructions retired for all branch types. This is an architectural performance event.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired branch instructions (Precise event capable)"
- },
- {
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was taken and when it was not taken.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x7e",
- "EventName": "BR_INST_RETIRED.JCC",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired conditional branch instructions (Precise event capable)"
+ "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. You cannot collect a PEBs record for this event.",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 0",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Instructions retired (Fixed event)"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were taken and does not count when the Jcc branch instruction were not taken.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0xfe",
- "EventName": "BR_INST_RETIRED.TAKEN_JCC",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired conditional branch instructions that were taken (Precise event capable)"
+ "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1. You cannot collect a PEBs record for this event.",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when core is not halted (Fixed event)"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts near CALL branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0xf9",
- "EventName": "BR_INST_RETIRED.CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired near call instructions (Precise event capable)"
+ "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. This event uses fixed counter 2. You cannot collect a PEBs record for this event.",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 2",
+ "UMask": "0x3",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when core is not halted (Fixed event)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts near relative CALL branch instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts a load blocked from using a store forward, but did not occur because the store data was not available at the right time. The forward might occur subsequently when the data is available.",
+ "EventCode": "0x03",
"Counter": "0,1,2,3",
- "UMask": "0xfd",
- "EventName": "BR_INST_RETIRED.REL_CALL",
+ "UMask": "0x1",
+ "EventName": "LD_BLOCKS.DATA_UNKNOWN",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired near relative call instructions (Precise event capable)"
+ "BriefDescription": "Loads blocked due to store data not ready (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts near indirect CALL branch instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts a load blocked from using a store forward because of an address/size mismatch, only one of the loads blocked from each store will be counted.",
+ "EventCode": "0x03",
"Counter": "0,1,2,3",
- "UMask": "0xfb",
- "EventName": "BR_INST_RETIRED.IND_CALL",
+ "UMask": "0x2",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired near indirect call instructions (Precise event capable)"
+ "BriefDescription": "Loads blocked due to store forward restriction (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts near return branch instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts loads that block because their address modulo 4K matches a pending store.",
+ "EventCode": "0x03",
"Counter": "0,1,2,3",
- "UMask": "0xf7",
- "EventName": "BR_INST_RETIRED.RETURN",
+ "UMask": "0x4",
+ "EventName": "LD_BLOCKS.4K_ALIAS",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired near return instructions (Precise event capable)"
+ "BriefDescription": "Loads blocked because address has 4k partial address false dependence (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts near indirect call or near indirect jmp branch instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts loads blocked because they are unable to find their physical address in the micro TLB (UTLB).",
+ "EventCode": "0x03",
"Counter": "0,1,2,3",
- "UMask": "0xeb",
- "EventName": "BR_INST_RETIRED.NON_RETURN_IND",
+ "UMask": "0x8",
+ "EventName": "LD_BLOCKS.UTLB_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired instructions of near indirect Jmp or call (Precise event capable)"
+ "BriefDescription": "Loads blocked because address in not in the UTLB (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts far branch instructions retired. This includes far jump, far call and return, and Interrupt call and return.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts anytime a load that retires is blocked for any reason.",
+ "EventCode": "0x03",
"Counter": "0,1,2,3",
- "UMask": "0xbf",
- "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "UMask": "0x10",
+ "EventName": "LD_BLOCKS.ALL_BLOCK",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired far branch instructions (Precise event capable)"
+ "BriefDescription": "Loads blocked (Precise event capable)"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted branch instructions retired including all branch types.",
- "EventCode": "0xC5",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts uops issued by the front end and allocated into the back end of the machine. This event counts uops that retire as well as uops that were speculatively executed but didn't retire. The sort of speculative uops that might be counted includes, but is not limited to those uops issued in the shadow of a miss-predicted branch, those uops that are inserted during an assist (such as for a denormal floating point result), and (previously allocated) uops that might be canceled during a machine clear.",
+ "EventCode": "0x0E",
"Counter": "0,1,2,3",
"UMask": "0x0",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired mispredicted branch instructions (Precise event capable)"
- },
- {
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was supposed to be taken and when it was not supposed to be taken (but the processor predicted the opposite condition).",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x7e",
- "EventName": "BR_MISP_RETIRED.JCC",
+ "EventName": "UOPS_ISSUED.ANY",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired mispredicted conditional branch instructions (Precise event capable)"
+ "BriefDescription": "Uops issued to the back end per cycle"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were supposed to be taken but the processor predicted that it would not be taken.",
- "EventCode": "0xC5",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Core cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0xfe",
- "EventName": "BR_MISP_RETIRED.TAKEN_JCC",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired mispredicted conditional branch instructions that were taken (Precise event capable)"
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.CORE_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when core is not halted"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted near indirect CALL branch instructions retired, where the target address taken was not what the processor predicted.",
- "EventCode": "0xC5",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Reference cycles when core is not halted. This event uses a programmable general purpose performance counter.",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0xfb",
- "EventName": "BR_MISP_RETIRED.IND_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired mispredicted near indirect call instructions (Precise event capable)"
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when core is not halted"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted near RET branch instructions retired, where the return address taken was not what the processor predicted.",
- "EventCode": "0xC5",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "This event used to measure front-end inefficiencies. I.e. when front-end of the machine is not delivering uops to the back-end and the back-end has is not stalled. This event can be used to identify if the machine is truly front-end bound. When this event occurs, it is an indication that the front-end of the machine is operating at less than its theoretical peak performance. Background: We can think of the processor pipeline as being divided into 2 broader parts: Front-end and Back-end. Front-end is responsible for fetching the instruction, decoding into uops in machine understandable format and putting them into a uop queue to be consumed by back end. The back-end then takes these uops, allocates the required resources. When all resources are ready, uops are executed. If the back-end is not ready to accept uops from the front-end, then we do not want to count these as front-end bottlenecks. However, whenever we have bottlenecks in the back-end, we will have allocation unit stalls and eventually forcing the front-end to wait until the back-end is ready to receive more uops. This event counts only when back-end is requesting more uops and front-end is not able to provide them. When 3 uops are requested and no uops are delivered, the event counts 3. When 3 are requested, and only 1 is delivered, the event counts 2. When only 2 are delivered, the event counts 1. Alternatively stated, the event will not count if 3 uops are delivered, or if the back end is stalled and not requesting any uops at all. Counts indicate missed opportunities for the front-end to deliver a uop to the back end. Some examples of conditions that cause front-end efficiencies are: ICache misses, ITLB misses, and decoder restrictions that limit the front-end bandwidth. Known Issues: Some uops require multiple allocation slots. These uops will not be charged as a front end 'not delivered' opportunity, and will be regarded as a back end problem. For example, the INC instruction has one uop that requires 2 issue slots. A stream of INC instructions will not count as UOPS_NOT_DELIVERED, even though only one instruction can be issued per clock. The low uop issue rate for a stream of INC instructions is considered to be a back end issue.",
+ "EventCode": "0x9C",
"Counter": "0,1,2,3",
- "UMask": "0xf7",
- "EventName": "BR_MISP_RETIRED.RETURN",
+ "UMask": "0x0",
+ "EventName": "UOPS_NOT_DELIVERED.ANY",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired mispredicted near return instructions (Precise event capable)"
+ "BriefDescription": "Uops requested but not-delivered to the back-end per cycle"
},
{
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted branch instructions retired that were near indirect call or near indirect jmp, where the target address taken was not what the processor predicted.",
- "EventCode": "0xC5",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The event continues counting during hardware interrupts, traps, and inside interrupt handlers. This is an architectural performance event. This event uses a (_P)rogrammable general purpose performance counter. *This event is Precise Event capable: The EventingRIP field in the PEBS record is precise to the address of the instruction which caused the event. Note: Because PEBS records can be collected only on IA32_PMC0, only one event can use the PEBS facility at a time.",
+ "EventCode": "0xC0",
"Counter": "0,1,2,3",
- "UMask": "0xeb",
- "EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired mispredicted instructions of near indirect Jmp or near indirect call. (Precise event capable)"
+ "UMask": "0x0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Instructions retired (Precise event capable)"
},
{
"PEBS": "2",
@@ -187,8 +155,40 @@
"BriefDescription": "MS uops retired (Precise event capable)"
},
{
+ "PEBS": "2",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of floating point divide uops retired.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "UOPS_RETIRED.FPDIV",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Floating point divide uops retired. (Precise Event Capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of integer divide uops retired.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "UOPS_RETIRED.IDIV",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Integer divide uops retired. (Precise Event Capable)"
+ },
+ {
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel? architecture processors.",
+ "PublicDescription": "Counts machine clears for any reason.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "MACHINE_CLEARS.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "All machine clears"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel architecture processors.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -217,217 +217,239 @@
"BriefDescription": "Machine clears due to memory disambiguation"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts machine clears for any reason.",
- "EventCode": "0xC3",
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts branch instructions retired for all branch types. This is an architectural performance event.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x0",
- "EventName": "MACHINE_CLEARS.ALL",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"SampleAfterValue": "200003",
- "BriefDescription": "All machine clears"
+ "BriefDescription": "Retired branch instructions (Precise event capable)"
},
{
"PEBS": "2",
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The event continues counting during hardware interrupts, traps, and inside interrupt handlers. This is an architectural performance event. This event uses a (_P)rogrammable general purpose performance counter. *This event is Precise Event capable: The EventingRIP field in the PEBS record is precise to the address of the instruction which caused the event. Note: Because PEBS records can be collected only on IA32_PMC0, only one event can use the PEBS facility at a time.",
- "EventCode": "0xC0",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was taken and when it was not taken.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "INST_RETIRED.ANY_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Instructions retired (Precise event capable)"
+ "UMask": "0x7e",
+ "EventName": "BR_INST_RETIRED.JCC",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired conditional branch instructions (Precise event capable)"
},
{
+ "PEBS": "2",
"CollectPEBSRecord": "1",
- "PublicDescription": "This event used to measure front-end inefficiencies. I.e. when front-end of the machine is not delivering uops to the back-end and the back-end has is not stalled. This event can be used to identify if the machine is truly front-end bound. When this event occurs, it is an indication that the front-end of the machine is operating at less than its theoretical peak performance. Background: We can think of the processor pipeline as being divided into 2 broader parts: Front-end and Back-end. Front-end is responsible for fetching the instruction, decoding into uops in machine understandable format and putting them into a uop queue to be consumed by back end. The back-end then takes these uops, allocates the required resources. When all resources are ready, uops are executed. If the back-end is not ready to accept uops from the front-end, then we do not want to count these as front-end bottlenecks. However, whenever we have bottlenecks in the back-end, we will have allocation unit stalls and eventually forcing the front-end to wait until the back-end is ready to receive more uops. This event counts only when back-end is requesting more uops and front-end is not able to provide them. When 3 uops are requested and no uops are delivered, the event counts 3. When 3 are requested, and only 1 is delivered, the event counts 2. When only 2 are delivered, the event counts 1. Alternatively stated, the event will not count if 3 uops are delivered, or if the back end is stalled and not requesting any uops at all. Counts indicate missed opportunities for the front-end to deliver a uop to the back end. Some examples of conditions that cause front-end efficiencies are: ICache misses, ITLB misses, and decoder restrictions that limit the front-end bandwidth. Known Issues: Some uops require multiple allocation slots. These uops will not be charged as a front end 'not delivered' opportunity, and will be regarded as a back end problem. For example, the INC instruction has one uop that requires 2 issue slots. A stream of INC instructions will not count as UOPS_NOT_DELIVERED, even though only one instruction can be issued per clock. The low uop issue rate for a stream of INC instructions is considered to be a back end issue.",
- "EventCode": "0x9C",
+ "PublicDescription": "Counts the number of taken branch instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "UOPS_NOT_DELIVERED.ANY",
+ "UMask": "0x80",
+ "EventName": "BR_INST_RETIRED.ALL_TAKEN_BRANCHES",
"SampleAfterValue": "200003",
- "BriefDescription": "Uops requested but not-delivered to the back-end per cycle"
+ "BriefDescription": "Retired taken branch instructions (Precise event capable)"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts uops issued by the front end and allocated into the back end of the machine. This event counts uops that retire as well as uops that were speculatively executed but didn't retire. The sort of speculative uops that might be counted includes, but is not limited to those uops issued in the shadow of a miss-predicted branch, those uops that are inserted during an assist (such as for a denormal floating point result), and (previously allocated) uops that might be canceled during a machine clear.",
- "EventCode": "0x0E",
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts far branch instructions retired. This includes far jump, far call and return, and Interrupt call and return.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "UOPS_ISSUED.ANY",
+ "UMask": "0xbf",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
"SampleAfterValue": "200003",
- "BriefDescription": "Uops issued to the back end per cycle"
+ "BriefDescription": "Retired far branch instructions (Precise event capable)"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts core cycles if either divide unit is busy.",
- "EventCode": "0xCD",
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts near indirect call or near indirect jmp branch instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "CYCLES_DIV_BUSY.ALL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles a divider is busy"
+ "UMask": "0xeb",
+ "EventName": "BR_INST_RETIRED.NON_RETURN_IND",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired instructions of near indirect Jmp or call (Precise event capable)"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts core cycles the integer divide unit is busy.",
- "EventCode": "0xCD",
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts near return branch instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CYCLES_DIV_BUSY.IDIV",
+ "UMask": "0xf7",
+ "EventName": "BR_INST_RETIRED.RETURN",
"SampleAfterValue": "200003",
- "BriefDescription": "Cycles the integer divide unit is busy"
+ "BriefDescription": "Retired near return instructions (Precise event capable)"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts core cycles the floating point divide unit is busy.",
- "EventCode": "0xCD",
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts near CALL branch instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CYCLES_DIV_BUSY.FPDIV",
+ "UMask": "0xf9",
+ "EventName": "BR_INST_RETIRED.CALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Cycles the FP divide unit is busy"
+ "BriefDescription": "Retired near call instructions (Precise event capable)"
},
{
- "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. You cannot collect a PEBs record for this event.",
- "EventCode": "0x00",
- "Counter": "Fixed counter 1",
- "UMask": "0x1",
- "EventName": "INST_RETIRED.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Instructions retired (Fixed event)"
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts near indirect CALL branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0xfb",
+ "EventName": "BR_INST_RETIRED.IND_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired near indirect call instructions (Precise event capable)"
},
{
- "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1. You cannot collect a PEBs record for this event.",
- "EventCode": "0x00",
- "Counter": "Fixed counter 2",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when core is not halted (Fixed event)"
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts near relative CALL branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0xfd",
+ "EventName": "BR_INST_RETIRED.REL_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired near relative call instructions (Precise event capable)"
},
{
- "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. This event uses fixed counter 2. You cannot collect a PEBs record for this event.",
- "EventCode": "0x00",
- "Counter": "Fixed counter 3",
- "UMask": "0x3",
- "EventName": "CPU_CLK_UNHALTED.REF_TSC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when core is not halted (Fixed event)"
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were taken and does not count when the Jcc branch instruction were not taken.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0xfe",
+ "EventName": "BR_INST_RETIRED.TAKEN_JCC",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired conditional branch instructions that were taken (Precise event capable)"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Core cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.",
- "EventCode": "0x3C",
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts mispredicted branch instructions retired including all branch types.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0x0",
- "EventName": "CPU_CLK_UNHALTED.CORE_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when core is not halted"
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired mispredicted branch instructions (Precise event capable)"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Reference cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.",
- "EventCode": "0x3C",
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was supposed to be taken and when it was not supposed to be taken (but the processor predicted the opposite condition).",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when core is not halted"
+ "UMask": "0x7e",
+ "EventName": "BR_MISP_RETIRED.JCC",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired mispredicted conditional branch instructions (Precise event capable)"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of times a BACLEAR is signaled for any reason, including, but not limited to indirect branch/call, Jcc (Jump on Conditional Code/Jump if Condition is Met) branch, unconditional branch/call, and returns.",
- "EventCode": "0xE6",
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts mispredicted branch instructions retired that were near indirect call or near indirect jmp, where the target address taken was not what the processor predicted.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BACLEARS.ALL",
+ "UMask": "0xeb",
+ "EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
"SampleAfterValue": "200003",
- "BriefDescription": "BACLEARs asserted for any branch type"
+ "BriefDescription": "Retired mispredicted instructions of near indirect Jmp or near indirect call. (Precise event capable)"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts BACLEARS on return instructions.",
- "EventCode": "0xE6",
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts mispredicted near RET branch instructions retired, where the return address taken was not what the processor predicted.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "BACLEARS.RETURN",
+ "UMask": "0xf7",
+ "EventName": "BR_MISP_RETIRED.RETURN",
"SampleAfterValue": "200003",
- "BriefDescription": "BACLEARs asserted for return branch"
+ "BriefDescription": "Retired mispredicted near return instructions (Precise event capable)"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts BACLEARS on Jcc (Jump on Conditional Code/Jump if Condition is Met) branches.",
- "EventCode": "0xE6",
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts mispredicted near indirect CALL branch instructions retired, where the target address taken was not what the processor predicted.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "BACLEARS.COND",
+ "UMask": "0xfb",
+ "EventName": "BR_MISP_RETIRED.IND_CALL",
"SampleAfterValue": "200003",
- "BriefDescription": "BACLEARs asserted for conditional branch"
+ "BriefDescription": "Retired mispredicted near indirect call instructions (Precise event capable)"
},
{
"PEBS": "2",
"CollectPEBSRecord": "2",
- "PublicDescription": "Counts anytime a load that retires is blocked for any reason.",
- "EventCode": "0x03",
+ "PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were supposed to be taken but the processor predicted that it would not be taken.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "LD_BLOCKS.ALL_BLOCK",
+ "UMask": "0xfe",
+ "EventName": "BR_MISP_RETIRED.TAKEN_JCC",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads blocked (Precise event capable)"
+ "BriefDescription": "Retired mispredicted conditional branch instructions that were taken (Precise event capable)"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts loads blocked because they are unable to find their physical address in the micro TLB (UTLB).",
- "EventCode": "0x03",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts core cycles if either divide unit is busy.",
+ "EventCode": "0xCD",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "LD_BLOCKS.UTLB_MISS",
+ "UMask": "0x0",
+ "EventName": "CYCLES_DIV_BUSY.ALL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles a divider is busy"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts core cycles the integer divide unit is busy.",
+ "EventCode": "0xCD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CYCLES_DIV_BUSY.IDIV",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads blocked because address in not in the UTLB (Precise event capable)"
+ "BriefDescription": "Cycles the integer divide unit is busy"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts a load blocked from using a store forward because of an address/size mismatch, only one of the loads blocked from each store will be counted.",
- "EventCode": "0x03",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts core cycles the floating point divide unit is busy.",
+ "EventCode": "0xCD",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "EventName": "CYCLES_DIV_BUSY.FPDIV",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads blocked due to store forward restriction (Precise event capable)"
+ "BriefDescription": "Cycles the FP divide unit is busy"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts a load blocked from using a store forward, but did not occur because the store data was not available at the right time. The forward might occur subsequently when the data is available.",
- "EventCode": "0x03",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of times a BACLEAR is signaled for any reason, including, but not limited to indirect branch/call, Jcc (Jump on Conditional Code/Jump if Condition is Met) branch, unconditional branch/call, and returns.",
+ "EventCode": "0xE6",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "LD_BLOCKS.DATA_UNKNOWN",
+ "EventName": "BACLEARS.ALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads blocked due to store data not ready (Precise event capable)"
+ "BriefDescription": "BACLEARs asserted for any branch type"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts loads that block because their address modulo 4K matches a pending store.",
- "EventCode": "0x03",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts BACLEARS on return instructions.",
+ "EventCode": "0xE6",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "LD_BLOCKS.4K_ALIAS",
+ "UMask": "0x8",
+ "EventName": "BACLEARS.RETURN",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads blocked because address has 4k partial address false dependence (Precise event capable)"
+ "BriefDescription": "BACLEARs asserted for return branch"
},
{
- "PEBS": "2",
"CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of taken branch instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts BACLEARS on Jcc (Jump on Conditional Code/Jump if Condition is Met) branches.",
+ "EventCode": "0xE6",
"Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "BR_INST_RETIRED.ALL_TAKEN_BRANCHES",
+ "UMask": "0x10",
+ "EventName": "BACLEARS.COND",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired taken branch instructions (Precise event capable)"
+ "BriefDescription": "BACLEARs asserted for conditional branch"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json b/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json
index 3202c4478836..9805198d3f5f 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json
@@ -1,6 +1,36 @@
[
{
"CollectPEBSRecord": "1",
+ "PublicDescription": "Counts every core cycle when a Data-side (walks due to a data operation) page walk is in progress.",
+ "EventCode": "0x05",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "PAGE_WALKS.D_SIDE_CYCLES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Duration of D-side page-walks in cycles"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts every core cycle when a Instruction-side (walks due to an instruction fetch) page walk is in progress.",
+ "EventCode": "0x05",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "PAGE_WALKS.I_SIDE_CYCLES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Duration of I-side pagewalks in cycles"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts every core cycle a page-walk is in progress due to either a data memory operation or an instruction fetch.",
+ "EventCode": "0x05",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "PAGE_WALKS.CYCLES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Duration of page-walks in cycles"
+ },
+ {
+ "CollectPEBSRecord": "1",
"PublicDescription": "Counts the number of times the machine was unable to find a translation in the Instruction Translation Lookaside Buffer (ITLB) for a linear address of an instruction fetch. It counts when new translation are filled into the ITLB. The event is speculative in nature, but will not count translations (page walks) that are begun and not finished, or translations that are finished but not filled into the ITLB.",
"EventCode": "0x81",
"Counter": "0,1,2,3",
@@ -41,35 +71,5 @@
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS",
"SampleAfterValue": "200003",
"BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts every core cycle when a Data-side (walks due to a data operation) page walk is in progress.",
- "EventCode": "0x05",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "PAGE_WALKS.D_SIDE_CYCLES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Duration of D-side page-walks in cycles"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts every core cycle when a Instruction-side (walks due to an instruction fetch) page walk is in progress.",
- "EventCode": "0x05",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "PAGE_WALKS.I_SIDE_CYCLES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Duration of I-side pagewalks in cycles"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts every core cycle a page-walk is in progress due to either a data memory operation or an instruction fetch.",
- "EventCode": "0x05",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "PAGE_WALKS.CYCLES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Duration of page-walks in cycles"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswell/cache.json b/tools/perf/pmu-events/arch/x86/haswell/cache.json
index bfb5ebf48c54..da4d6ddd4f92 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/cache.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/cache.json
@@ -11,14 +11,34 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Demand data read requests that hit L2 cache.",
+ "PublicDescription": "Counts the number of store RFO requests that miss the L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x41",
+ "UMask": "0x22",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that miss L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of instruction fetches that missed the L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache misses when fetching instructions",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Demand requests that miss L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x27",
"Errata": "HSD78",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "BriefDescription": "Demand requests that miss L2 cache",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -32,6 +52,48 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "All requests that missed L2.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3f",
+ "Errata": "HSD78",
+ "EventName": "L2_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "All requests that miss L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Demand data read requests that hit L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "Errata": "HSD78",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of store RFO requests that hit the L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x42",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that hit L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of instruction fetches that hit the L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x44",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts all L2 HW prefetcher requests that hit L2.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -73,6 +135,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Demand requests to L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe7",
+ "Errata": "HSD78",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand requests to L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts all L2 HW prefetcher requests.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -83,6 +156,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "All requests to L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "Errata": "HSD78",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "All L2 requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Not rejected writebacks that hit L2 cache.",
"EventCode": "0x27",
"Counter": "0,1,2,3",
@@ -124,6 +208,27 @@
},
{
"EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CounterMask": "1",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "CounterMask": "1",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "L1D_PEND_MISS.REQUEST_FB_FULL",
@@ -133,13 +238,13 @@
},
{
"EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
"CounterMask": "1",
- "CounterHTOff": "2"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PublicDescription": "This event counts when new data lines are brought into the L1 Data cache, which cause other lines to be evicted from the cache.",
@@ -163,6 +268,28 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD78, HSD62, HSD61",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD78, HSD62, HSD61",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Offcore outstanding Demand code Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
@@ -185,46 +312,35 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x8",
+ "UMask": "0x4",
"Errata": "HSD62, HSD61",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD78, HSD62, HSD61",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
"UMask": "0x8",
"Errata": "HSD62, HSD61",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
- "CounterMask": "1",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x4",
+ "UMask": "0x8",
"Errata": "HSD62, HSD61",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -289,6 +405,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xB7, 0xBB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"PEBS": "1",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
@@ -296,7 +421,7 @@
"Errata": "HSD29, HSM30",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that miss the STLB.",
+ "BriefDescription": "Retired load uops that miss the STLB. (precise Event)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
@@ -308,7 +433,7 @@
"Errata": "HSD29, HSM30",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that miss the STLB.",
+ "BriefDescription": "Retired store uops that miss the STLB. (precise Event)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1",
"L1_Hit_Indication": "1"
@@ -321,31 +446,33 @@
"Errata": "HSD76, HSD29, HSM30",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops with locked access.",
+ "BriefDescription": "Retired load uops with locked access. (precise Event)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
+ "PublicDescription": "This event counts load uops retired which had memory addresses spilt across 2 cache lines. A line split is across 64B cache-lines which may include a page split (4K). This is a precise event.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x41",
"Errata": "HSD29, HSM30",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary. (precise Event)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
+ "PublicDescription": "This event counts store uops retired which had memory addresses spilt across 2 cache lines. A line split is across 64B cache-lines which may include a page split (4K). This is a precise event.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x42",
"Errata": "HSD29, HSM30",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (precise Event)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1",
"L1_Hit_Indication": "1"
@@ -358,19 +485,20 @@
"Errata": "HSD29, HSM30",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired load uops.",
+ "BriefDescription": "All retired load uops. (precise Event)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
+ "PublicDescription": "This event counts all store uops retired. This is a precise event.",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x82",
"Errata": "HSD29, HSM30",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired store uops.",
+ "BriefDescription": "All retired store uops. (precise Event)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1",
"L1_Hit_Indication": "1"
@@ -401,20 +529,20 @@
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops with L3 cache hits as data sources.",
+ "PublicDescription": "This event counts retired load uops in which data sources were data hits in the L3 cache without snoops required. This does not include hardware prefetches. This is a precise event.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x4",
"Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
"SampleAfterValue": "50021",
- "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops missed L1 cache as data sources.",
+ "PublicDescription": "This event counts retired load uops in which data sources missed in the L1 cache. This does not include hardware prefetches. This is a precise event.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x8",
@@ -427,20 +555,18 @@
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops missed L2. Unknown data source excluded.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x10",
"Errata": "HSD29, HSM30",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"SampleAfterValue": "50021",
- "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Retired load uops with L2 cache misses as data sources.",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops missed L3. Excludes unknown data source .",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x20",
@@ -477,25 +603,27 @@
},
{
"PEBS": "1",
+ "PublicDescription": "This event counts retired load uops that hit in the L3 cache, but required a cross-core snoop which resulted in a HIT in an on-pkg core cache. This does not include hardware prefetches. This is a precise event.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x2",
"Errata": "HSD29, HSD25, HSM26, HSM30",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache. ",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
{
"PEBS": "1",
+ "PublicDescription": "This event counts retired load uops that hit in the L3 cache, but required a cross-core snoop which resulted in a HITM (hit modified) in an on-pkg core cache. This does not include hardware prefetches. This is a precise event.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x4",
"Errata": "HSD29, HSD25, HSM26, HSM30",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3. ",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
@@ -513,14 +641,13 @@
},
{
"PEBS": "1",
- "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches.",
+ "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches. This is a precise event.",
"EventCode": "0xD3",
"Counter": "0,1,2,3",
"UMask": "0x1",
"Errata": "HSD74, HSD29, HSD25, HSM30",
"EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
"SampleAfterValue": "100003",
- "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
"CounterHTOff": "0,1,2,3",
"Data_LA": "1"
},
@@ -665,6 +792,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "",
"EventCode": "0xf4",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -674,131 +802,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts the number of store RFO requests that hit the L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x42",
- "EventName": "L2_RQSTS.RFO_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that hit L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts the number of store RFO requests that miss the L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x22",
- "EventName": "L2_RQSTS.RFO_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that miss L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of instruction fetches that hit the L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x44",
- "EventName": "L2_RQSTS.CODE_RD_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of instruction fetches that missed the L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 cache misses when fetching instructions",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Demand requests that miss L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x27",
- "Errata": "HSD78",
- "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand requests that miss L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Demand requests to L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xe7",
- "Errata": "HSD78",
- "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand requests to L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "All requests that missed L2.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x3f",
- "Errata": "HSD78",
- "EventName": "L2_RQSTS.MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "All requests that miss L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "All requests to L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xff",
- "Errata": "HSD78",
- "EventName": "L2_RQSTS.REFERENCES",
- "SampleAfterValue": "200003",
- "BriefDescription": "All L2 requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD78, HSD62, HSD61",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "CounterMask": "1",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
+ "PublicDescription": "Counts all requests that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c8fff",
"Counter": "0,1,2,3",
@@ -811,6 +815,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c07f7",
"Counter": "0,1,2,3",
@@ -823,6 +828,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c07f7",
"Counter": "0,1,2,3",
@@ -835,6 +841,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0244",
"Counter": "0,1,2,3",
@@ -847,6 +854,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0122",
"Counter": "0,1,2,3",
@@ -859,6 +867,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0122",
"Counter": "0,1,2,3",
@@ -871,6 +880,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0091",
"Counter": "0,1,2,3",
@@ -883,6 +893,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0091",
"Counter": "0,1,2,3",
@@ -895,6 +906,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0200",
"Counter": "0,1,2,3",
@@ -907,6 +919,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0100",
"Counter": "0,1,2,3",
@@ -919,6 +932,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0080",
"Counter": "0,1,2,3",
@@ -931,6 +945,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0040",
"Counter": "0,1,2,3",
@@ -943,6 +958,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0020",
"Counter": "0,1,2,3",
@@ -955,6 +971,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0010",
"Counter": "0,1,2,3",
@@ -967,6 +984,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0004",
"Counter": "0,1,2,3",
@@ -979,6 +997,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0004",
"Counter": "0,1,2,3",
@@ -991,6 +1010,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0002",
"Counter": "0,1,2,3",
@@ -1003,6 +1023,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0002",
"Counter": "0,1,2,3",
@@ -1015,6 +1036,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10003c0001",
"Counter": "0,1,2,3",
@@ -1027,6 +1049,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04003c0001",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/haswell/floating-point.json b/tools/perf/pmu-events/arch/x86/haswell/floating-point.json
index 1732fa49c6d2..f9843e5a9b42 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/floating-point.json
@@ -20,6 +20,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Note that a whole rep string only counts AVX_INST.ALL once.",
+ "EventCode": "0xC6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "AVX_INSTS.ALL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Approximate counts of AVX & AVX2 256-bit instructions, including non-arithmetic instructions, loads, and stores. May count non-AVX instructions that employ 256-bit operations, including (but not necessarily limited to) rep string instructions that use 256-bit loads and stores for optimized performance, XSAVE* and XRSTOR*, and operations that transition the x87 FPU data registers between x87 and MMX.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Number of X87 FP assists due to output values.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
@@ -69,15 +79,5 @@
"BriefDescription": "Cycles with any input/output SSE or FP assist",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "Note that a whole rep string only counts AVX_INST.ALL once.",
- "EventCode": "0xC6",
- "Counter": "0,1,2,3",
- "UMask": "0x7",
- "EventName": "AVX_INSTS.ALL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Approximate counts of AVX & AVX2 256-bit instructions, including non-arithmetic instructions, loads, and stores. May count non-AVX instructions that employ 256-bit operations, including (but not necessarily limited to) rep string instructions that use 256-bit loads and stores for optimized performance, XSAVE* and XRSTOR*, and operations that transition the x87 FPU data registers between x87 and MMX.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswell/frontend.json b/tools/perf/pmu-events/arch/x86/haswell/frontend.json
index 57a1ce46971f..c0a5bedcc15c 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/frontend.json
@@ -21,74 +21,43 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "IDQ.MS_MITE_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts uops delivered by the Front-end with the assistance of the microcode sequencer. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_UOPS",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the Front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
+ "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_CYCLES",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterMask": "1",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_CYCLES",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_CYCLES",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
- "CounterMask": "1",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -135,6 +104,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts cycles MITE is delivered four uops. Set Cmask = 4.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
@@ -157,6 +136,38 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "This event counts uops delivered by the Front-end with the assistance of the microcode sequencer. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the Front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_SWITCHES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Number of uops delivered to IDQ from any path.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
@@ -195,6 +206,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ICACHE.IFDATA_STALL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event count the number of undelivered (unallocated) uops from the Front-end to the Resource Allocation Table (RAT) while the Back-end of the processor is not stalled. The Front-end can allocate up to 4 uops per cycle so this event can increment 0-4 times per cycle depending on the number of unallocated uops. This event is counted on a per-core basis.",
"EventCode": "0x9C",
"Counter": "0,1,2,3",
@@ -270,25 +290,5 @@
"SampleAfterValue": "2000003",
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_SWITCHES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ICACHE.IFDATA_STALL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswell/memory.json b/tools/perf/pmu-events/arch/x86/haswell/memory.json
index aab981b42339..e5f9fa6655b3 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/memory.json
@@ -401,6 +401,7 @@
"CounterHTOff": "3"
},
{
+ "PublicDescription": "Counts all requests that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc08fff",
"Counter": "0,1,2,3",
@@ -413,6 +414,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01004007f7",
"Counter": "0,1,2,3",
@@ -425,6 +427,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc007f7",
"Counter": "0,1,2,3",
@@ -437,6 +440,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400244",
"Counter": "0,1,2,3",
@@ -449,6 +453,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00244",
"Counter": "0,1,2,3",
@@ -461,6 +466,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400122",
"Counter": "0,1,2,3",
@@ -473,6 +479,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00122",
"Counter": "0,1,2,3",
@@ -485,6 +492,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400091",
"Counter": "0,1,2,3",
@@ -497,6 +505,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00091",
"Counter": "0,1,2,3",
@@ -509,6 +518,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00200",
"Counter": "0,1,2,3",
@@ -521,6 +531,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00100",
"Counter": "0,1,2,3",
@@ -533,6 +544,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00080",
"Counter": "0,1,2,3",
@@ -545,6 +557,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00040",
"Counter": "0,1,2,3",
@@ -557,6 +570,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00020",
"Counter": "0,1,2,3",
@@ -569,6 +583,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00010",
"Counter": "0,1,2,3",
@@ -581,6 +596,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400004",
"Counter": "0,1,2,3",
@@ -593,6 +609,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00004",
"Counter": "0,1,2,3",
@@ -605,6 +622,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400002",
"Counter": "0,1,2,3",
@@ -617,6 +635,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00002",
"Counter": "0,1,2,3",
@@ -629,6 +648,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400001",
"Counter": "0,1,2,3",
@@ -641,6 +661,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00001",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/haswell/other.json b/tools/perf/pmu-events/arch/x86/haswell/other.json
index 85d6a14baf9d..8a4d898d76c1 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/other.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/other.json
@@ -10,16 +10,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
- "EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPL_CYCLES.RING123",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0x5C",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -31,6 +21,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
+ "EventCode": "0x5C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPL_CYCLES.RING123",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles in which the L1D and L2 are locked, due to a UC lock or split lock.",
"EventCode": "0x63",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/haswell/pipeline.json b/tools/perf/pmu-events/arch/x86/haswell/pipeline.json
index 0099848607ad..a4dcfce4a512 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/pipeline.json
@@ -2,33 +2,43 @@
{
"PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. INST_RETIRED.ANY is counted by a designated fixed counter, leaving the programmable counters available for other events. Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"EventCode": "0x00",
- "Counter": "Fixed counter 1",
+ "Counter": "Fixed counter 0",
"UMask": "0x1",
"Errata": "HSD140, HSD143",
"EventName": "INST_RETIRED.ANY",
"SampleAfterValue": "2000003",
"BriefDescription": "Instructions retired from execution.",
- "CounterHTOff": "Fixed counter 1"
+ "CounterHTOff": "Fixed counter 0"
},
{
"PublicDescription": "This event counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
"EventCode": "0x00",
- "Counter": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
"BriefDescription": "Core cycles when the thread is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "CounterHTOff": "Fixed counter 1"
},
{
"PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state.",
"EventCode": "0x00",
- "Counter": "Fixed counter 3",
+ "Counter": "Fixed counter 2",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"SampleAfterValue": "2000003",
"BriefDescription": "Reference cycles when the core is not in halt state.",
- "CounterHTOff": "Fixed counter 3"
+ "CounterHTOff": "Fixed counter 2"
},
{
"PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceding smaller uncompleted store. The penalty for blocked store forwarding is that the load must wait for the store to write its value to the cache before it can be issued.",
@@ -67,7 +77,19 @@
"UMask": "0x3",
"EventName": "INT_MISC.RECOVERY_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...)",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "AnyThread": "1",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke)",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -82,6 +104,29 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x0E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x0E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"PublicDescription": "Number of flags-merge uops allocated. Such uops add delay.",
"EventCode": "0x0E",
"Counter": "0,1,2,3",
@@ -112,35 +157,32 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x0E",
- "Invert": "1",
+ "EventCode": "0x14",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "UMask": "0x2",
+ "EventName": "ARITH.DIVIDER_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Any uop executed by the Divider. (This includes all divide uops, sqrt, ...)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x0E",
- "Invert": "1",
+ "PublicDescription": "Counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x14",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ARITH.DIVIDER_UOPS",
+ "UMask": "0x0",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Any uop executed by the Divider. (This includes all divide uops, sqrt, ...)",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -154,6 +196,38 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x3c",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -163,6 +237,15 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for S/W prefetch.",
"EventCode": "0x4c",
"Counter": "0,1,2,3",
@@ -233,6 +316,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x5E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts cycles where the decoder is stalled on an instruction with a length changing prefix (LCP).",
"EventCode": "0x87",
"Counter": "0,1,2,3",
@@ -409,6 +504,15 @@
{
"EventCode": "0x89",
"Counter": "0,1,2,3",
+ "UMask": "0xa0",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
"UMask": "0xc1",
"EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
"SampleAfterValue": "200003",
@@ -445,6 +549,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles per core when uops are exectuted in port 0.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are executed in port 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles which a uop is dispatched on port 1 in this thread.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -455,6 +579,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles per core when uops are exectuted in port 1.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are executed in port 1.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles which a uop is dispatched on port 2 in this thread.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -465,6 +609,25 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles which a uop is dispatched on port 3 in this thread.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -475,6 +638,25 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles which a uop is dispatched on port 4 in this thread.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -485,6 +667,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles per core when uops are exectuted in port 4.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are executed in port 4.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles which a uop is dispatched on port 5 in this thread.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -495,6 +697,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles per core when uops are exectuted in port 5.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are executed in port 5.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles which a uop is dispatched on port 6 in this thread.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -505,6 +727,26 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles per core when uops are exectuted in port 6.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are executed in port 6.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles which a uop is dispatched on port 7 in this thread.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
@@ -515,6 +757,25 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles allocation is stalled due to resource related reason.",
"EventCode": "0xA2",
"Counter": "0,1,2,3",
@@ -566,17 +827,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles with pending L1 data cache miss loads. Set Cmask=8 to count cycle.",
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with pending L1 cache miss loads.",
- "CounterMask": "8",
- "CounterHTOff": "2"
- },
- {
"PublicDescription": "Cycles with pending memory loads. Set Cmask=2 to count cycle.",
"EventCode": "0xA3",
"Counter": "0,1,2,3",
@@ -594,7 +844,7 @@
"UMask": "0x4",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls",
+ "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
"CounterMask": "4",
"CounterHTOff": "0,1,2,3"
},
@@ -621,6 +871,17 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Cycles with pending L1 data cache miss loads. Set Cmask=8 to count cycle.",
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with pending L1 cache miss loads.",
+ "CounterMask": "8",
+ "CounterHTOff": "2"
+ },
+ {
"PublicDescription": "Execution stalls due to L1 data cache miss loads. Set Cmask=0CH.",
"EventCode": "0xA3",
"Counter": "2",
@@ -642,14 +903,23 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
- "EventCode": "0xB1",
+ "EventCode": "0xA8",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CORE",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on the core.",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -665,24 +935,127 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Number of instructions at retirement.",
- "EventCode": "0xC0",
+ "PublicDescription": "This events counts the cycles where at least one uop was executed. It is counted per thread.",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "Errata": "HSD11, HSD140",
- "EventName": "INST_RETIRED.ANY_P",
+ "UMask": "0x1",
+ "Errata": "HSD144, HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This events counts the cycles where at least two uop were executed. It is counted per thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD144, HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This events counts the cycles where at least three uop were executed. It is counted per thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD144, HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD144, HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of uops executed on the core.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
- "EventCode": "0xC0",
+ "EventCode": "0xb1",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "INST_RETIRED.X87",
+ "Errata": "HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions: Counts also flows that have several X87 or flows that use X87 uops in the exception handling.",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of instructions at retirement.",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "Errata": "HSD11, HSD140",
+ "EventName": "INST_RETIRED.ANY_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -698,6 +1071,16 @@
"CounterHTOff": "1"
},
{
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "INST_RETIRED.X87",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions: Counts also flows that have several X87 or flows that use X87 uops in the exception handling.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Number of microcode assists invoked by HW upon uop writeback.",
"EventCode": "0xC1",
"Counter": "0,1,2,3",
@@ -709,7 +1092,6 @@
},
{
"PEBS": "1",
- "PublicDescription": "Counts the number of micro-ops retired. Use Cmask=1 and invert to count active cycles or stalled cycles.",
"EventCode": "0xC2",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -720,17 +1102,6 @@
"Data_LA": "1"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts the number of retirement slots used each cycle. There are potentially 4 slots that can be used each cycle - meaning, 4 uops or 4 instructions could retire each cycle.",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Retirement slots used.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0xC2",
"Invert": "1",
"Counter": "0,1,2,3",
@@ -765,6 +1136,16 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retirement slots used.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC3",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -774,6 +1155,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event is incremented when self-modifying code (SMC) is detected, which causes a machine clear. Machine clears can have a significant performance impact if they are happening frequently.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
@@ -793,8 +1185,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Branch instructions at retirement.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PEBS": "1",
- "PublicDescription": "Counts the number of conditional branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -814,18 +1215,27 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Branch instructions at retirement.",
+ "PEBS": "1",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "UMask": "0x2",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
"SampleAfterValue": "400009",
"BriefDescription": "All (macro) branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Counts the number of near return instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x8",
@@ -846,7 +1256,6 @@
},
{
"PEBS": "1",
- "PublicDescription": "Number of near taken branches retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x20",
@@ -866,14 +1275,14 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "2",
- "EventCode": "0xC4",
+ "PublicDescription": "Mispredicted branch instructions at retirement.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "UMask": "0x0",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired.",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PEBS": "1",
@@ -886,16 +1295,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Mispredicted branch instructions at retirement.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All mispredicted macro branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PEBS": "2",
"PublicDescription": "This event counts all mispredicted branch instructions retired. This is a precise event.",
"EventCode": "0xC5",
@@ -903,121 +1302,11 @@
"UMask": "0x4",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
"SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted macro branch instructions retired. ",
+ "BriefDescription": "Mispredicted macro branch instructions retired.",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Count cases of saving new LBR records by hardware.",
- "EventCode": "0xCC",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Count cases of saving new LBR",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Thread cycles when thread is not in halt state",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xa0",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PEBS": "1",
- "PublicDescription": "Number of near branch instructions retired that were taken but mispredicted.",
"EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0x20",
@@ -1027,51 +1316,14 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This events counts the cycles where at least one uop was executed. It is counted per thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD144, HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "This events counts the cycles where at least two uop were executed. It is counted per thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD144, HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "This events counts the cycles where at least three uop were executed. It is counted per thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD144, HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
+ "PublicDescription": "Count cases of saving new LBR records by hardware.",
+ "EventCode": "0xCC",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD144, HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "UMask": "0x20",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Count cases of saving new LBR",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PublicDescription": "Number of front end re-steers due to BPU misprediction.",
@@ -1082,248 +1334,5 @@
"SampleAfterValue": "100003",
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "MACHINE_CLEARS.COUNT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_4_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "RS_EVENTS.EMPTY_END",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 0.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 1.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 3.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 4.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 5.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 6.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 7.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x00",
- "Counter": "Fixed counter 2",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "AnyThread": "1",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke)",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json b/tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json
index ce80a08d0f08..777b500a5c9f 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json
@@ -39,6 +39,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Completed page walks in any TLB of any page size due to demand load misses.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB load misses.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
@@ -69,6 +79,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Number of cache load STLB hits. No page walk.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x60",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "DTLB demand load misses with low part of linear-to-physical address translation missed.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
@@ -118,6 +138,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Completed page walks due to store miss in any TLB levels of any page size (4K/2M/4M/1G).",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB store misses.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
@@ -148,6 +178,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x60",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "DTLB store misses with low part of linear-to-physical address translation missed.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
@@ -206,6 +246,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Completed page walks in ITLB of any page size.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by ITLB misses.",
"EventCode": "0x85",
"Counter": "0,1,2,3",
@@ -236,6 +286,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "ITLB misses that hit STLB. No page walk.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x60",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts the number of ITLB flushes, includes 4k/2M/4M pages.",
"EventCode": "0xae",
"Counter": "0,1,2,3",
@@ -256,41 +316,45 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Number of ITLB page walker loads that hit in the L1+FB.",
+ "PublicDescription": "Number of DTLB page walker loads that hit in the L2.",
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x21",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
+ "UMask": "0x12",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of ITLB page walker hits in the L1+FB",
+ "BriefDescription": "Number of DTLB page walker hits in the L2",
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Number of DTLB page walker loads that hit in the L3.",
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L1",
+ "UMask": "0x14",
+ "Errata": "HSD25",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L1 and FB.",
+ "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP",
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Number of DTLB page walker loads from memory.",
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L1",
+ "UMask": "0x18",
+ "Errata": "HSD25",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L1 and FB.",
+ "BriefDescription": "Number of DTLB page walker hits in Memory",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Number of DTLB page walker loads that hit in the L2.",
+ "PublicDescription": "Number of ITLB page walker loads that hit in the L1+FB.",
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x12",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
+ "UMask": "0x21",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of DTLB page walker hits in the L2",
+ "BriefDescription": "Number of ITLB page walker hits in the L1+FB",
"CounterHTOff": "0,1,2,3"
},
{
@@ -304,43 +368,43 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Number of ITLB page walker loads that hit in the L3.",
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x42",
- "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L2",
+ "UMask": "0x24",
+ "Errata": "HSD25",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L2.",
+ "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP",
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Number of ITLB page walker loads from memory.",
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L2",
+ "UMask": "0x28",
+ "Errata": "HSD25",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_MEMORY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
+ "BriefDescription": "Number of ITLB page walker hits in Memory",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Number of DTLB page walker loads that hit in the L3.",
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x14",
- "Errata": "HSD25",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
+ "UMask": "0x41",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L1 and FB.",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Number of ITLB page walker loads that hit in the L3.",
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x24",
- "Errata": "HSD25",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
+ "UMask": "0x42",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L2.",
"CounterHTOff": "0,1,2,3"
},
{
@@ -355,41 +419,37 @@
{
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x84",
- "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L3",
+ "UMask": "0x48",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_MEMORY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in memory.",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Number of DTLB page walker loads from memory.",
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x18",
- "Errata": "HSD25",
- "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
+ "UMask": "0x81",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of DTLB page walker hits in Memory",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L1 and FB.",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Number of ITLB page walker loads from memory.",
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x28",
- "Errata": "HSD25",
- "EventName": "PAGE_WALKER_LOADS.ITLB_MEMORY",
+ "UMask": "0x82",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of ITLB page walker hits in Memory",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
"Counter": "0,1,2,3",
- "UMask": "0x48",
- "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_MEMORY",
+ "UMask": "0x84",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in memory.",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
"CounterHTOff": "0,1,2,3"
},
{
@@ -420,65 +480,5 @@
"SampleAfterValue": "100003",
"BriefDescription": "STLB flush attempts",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Completed page walks in any TLB of any page size due to demand load misses.",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of cache load STLB hits. No page walk.",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x60",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Completed page walks due to store miss in any TLB levels of any page size (4K/2M/4M/1G).",
- "EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
- "SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
- "EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x60",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Completed page walks in ITLB of any page size.",
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
- "SampleAfterValue": "100003",
- "BriefDescription": "Misses in all ITLB levels that cause completed page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "ITLB misses that hit STLB. No page walk.",
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x60",
- "EventName": "ITLB_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/cache.json b/tools/perf/pmu-events/arch/x86/haswellx/cache.json
index f1bae0817a6f..b2fbd617306a 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/cache.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/cache.json
@@ -12,12 +12,32 @@
},
{
"EventCode": "0x24",
- "UMask": "0x41",
- "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "UMask": "0x22",
+ "BriefDescription": "RFO requests that miss L2 cache",
"Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "PublicDescription": "Counts the number of store RFO requests that miss the L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x24",
+ "BriefDescription": "L2 cache misses when fetching instructions",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "PublicDescription": "Number of instruction fetches that missed the L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x27",
+ "BriefDescription": "Demand requests that miss L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"Errata": "HSD78",
- "PublicDescription": "Demand data read requests that hit L2 cache.",
+ "PublicDescription": "Demand requests that miss L2 cache.",
"SampleAfterValue": "200003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -33,6 +53,48 @@
},
{
"EventCode": "0x24",
+ "UMask": "0x3f",
+ "BriefDescription": "All requests that miss L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.MISS",
+ "Errata": "HSD78",
+ "PublicDescription": "All requests that missed L2.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x41",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "Errata": "HSD78",
+ "PublicDescription": "Demand data read requests that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x42",
+ "BriefDescription": "RFO requests that hit L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "PublicDescription": "Counts the number of store RFO requests that hit the L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x44",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "PublicDescription": "Number of instruction fetches that hit the L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
"UMask": "0x50",
"BriefDescription": "L2 prefetch requests that hit L2 cache",
"Counter": "0,1,2,3",
@@ -74,6 +136,17 @@
},
{
"EventCode": "0x24",
+ "UMask": "0xe7",
+ "BriefDescription": "Demand requests to L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "Errata": "HSD78",
+ "PublicDescription": "Demand requests to L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
"UMask": "0xf8",
"BriefDescription": "Requests from L2 hardware prefetchers",
"Counter": "0,1,2,3",
@@ -83,6 +156,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x24",
+ "UMask": "0xff",
+ "BriefDescription": "All L2 requests",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "Errata": "HSD78",
+ "PublicDescription": "All requests to L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x27",
"UMask": "0x50",
"BriefDescription": "Not rejected writebacks that hit L2 cache",
@@ -124,6 +208,27 @@
},
{
"EventCode": "0x48",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "Counter": "2",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "Counter": "2",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "AnyThread": "1",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
"UMask": "0x2",
"BriefDescription": "Number of times a request needed a FB entry but there was no entry available for it. That is the FB unavailability was dominant reason for blocking the request. A request includes cacheable/uncacheable demands that is load, store or SW prefetch. HWP are e.",
"Counter": "0,1,2,3",
@@ -133,13 +238,13 @@
},
{
"EventCode": "0x48",
- "UMask": "0x1",
- "BriefDescription": "Cycles with L1D load Misses outstanding.",
- "Counter": "2",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "Counter": "0,1,2,3",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
"CounterMask": "1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "2"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x51",
@@ -164,6 +269,28 @@
},
{
"EventCode": "0x60",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "CounterMask": "1",
+ "Errata": "HSD78, HSD62, HSD61",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "CounterMask": "6",
+ "Errata": "HSD78, HSD62, HSD61",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
"UMask": "0x2",
"BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"Counter": "0,1,2,3",
@@ -186,23 +313,23 @@
},
{
"EventCode": "0x60",
- "UMask": "0x8",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "UMask": "0x4",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "CounterMask": "1",
"Errata": "HSD62, HSD61",
- "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "UMask": "0x8",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
"Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
- "CounterMask": "1",
- "Errata": "HSD78, HSD62, HSD61",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "Errata": "HSD62, HSD61",
+ "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -218,17 +345,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x60",
- "UMask": "0x4",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
- "CounterMask": "1",
- "Errata": "HSD62, HSD61",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0x63",
"UMask": "0x2",
"BriefDescription": "Cycles when L1D is locked",
@@ -289,9 +405,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0xD0",
"UMask": "0x11",
- "BriefDescription": "Retired load uops that miss the STLB.",
+ "BriefDescription": "Retired load uops that miss the STLB. (precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -303,20 +428,20 @@
{
"EventCode": "0xD0",
"UMask": "0x12",
- "BriefDescription": "Retired store uops that miss the STLB.",
+ "BriefDescription": "Retired store uops that miss the STLB. (precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
"Errata": "HSD29, HSM30",
- "SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
+ "SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x21",
- "BriefDescription": "Retired load uops with locked access.",
+ "BriefDescription": "Retired load uops with locked access. (precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -328,32 +453,34 @@
{
"EventCode": "0xD0",
"UMask": "0x41",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary. (precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"Errata": "HSD29, HSM30",
+ "PublicDescription": "This event counts load uops retired which had memory addresses spilt across 2 cache lines. A line split is across 64B cache-lines which may include a page split (4K). This is a precise event.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x42",
- "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"Errata": "HSD29, HSM30",
- "SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
+ "PublicDescription": "This event counts store uops retired which had memory addresses spilt across 2 cache lines. A line split is across 64B cache-lines which may include a page split (4K). This is a precise event.",
+ "SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x81",
- "BriefDescription": "All retired load uops.",
+ "BriefDescription": "All retired load uops. (precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -365,14 +492,15 @@
{
"EventCode": "0xD0",
"UMask": "0x82",
- "BriefDescription": "All retired store uops.",
+ "BriefDescription": "All retired store uops. (precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"Errata": "HSD29, HSM30",
- "SampleAfterValue": "2000003",
"L1_Hit_Indication": "1",
+ "PublicDescription": "This event counts all store uops retired. This is a precise event.",
+ "SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
@@ -402,13 +530,13 @@
{
"EventCode": "0xD1",
"UMask": "0x4",
- "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
"Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
- "PublicDescription": "Retired load uops with L3 cache hits as data sources.",
+ "PublicDescription": "This event counts retired load uops in which data sources were data hits in the L3 cache without snoops required. This does not include hardware prefetches. This is a precise event.",
"SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
@@ -421,20 +549,19 @@
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"Errata": "HSM30",
- "PublicDescription": "Retired load uops missed L1 cache as data sources.",
+ "PublicDescription": "This event counts retired load uops in which data sources missed in the L1 cache. This does not include hardware prefetches. This is a precise event.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD1",
"UMask": "0x10",
- "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Retired load uops with L2 cache misses as data sources.",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"Errata": "HSD29, HSM30",
- "PublicDescription": "Retired load uops missed L2. Unknown data source excluded.",
"SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
@@ -447,7 +574,6 @@
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_MISS",
"Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
- "PublicDescription": "Retired load uops missed L3. Excludes unknown data source .",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -478,24 +604,26 @@
{
"EventCode": "0xD2",
"UMask": "0x2",
- "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache. ",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
"Errata": "HSD29, HSD25, HSM26, HSM30",
+ "PublicDescription": "This event counts retired load uops that hit in the L3 cache, but required a cross-core snoop which resulted in a HIT in an on-pkg core cache. This does not include hardware prefetches. This is a precise event.",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD2",
"UMask": "0x4",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3. ",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
"Errata": "HSD29, HSD25, HSM26, HSM30",
+ "PublicDescription": "This event counts retired load uops that hit in the L3 cache, but required a cross-core snoop which resulted in a HITM (hit modified) in an on-pkg core cache. This does not include hardware prefetches. This is a precise event.",
"SampleAfterValue": "20011",
"CounterHTOff": "0,1,2,3"
},
@@ -514,20 +642,19 @@
{
"EventCode": "0xD3",
"UMask": "0x1",
- "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
"Errata": "HSD74, HSD29, HSD25, HSM30",
- "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches.",
+ "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches. This is a precise event.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD3",
"UMask": "0x4",
- "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI) (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -539,7 +666,7 @@
{
"EventCode": "0xD3",
"UMask": "0x10",
- "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM",
+ "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -551,7 +678,7 @@
{
"EventCode": "0xD3",
"UMask": "0x20",
- "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache",
+ "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -706,135 +833,11 @@
"BriefDescription": "Split locks in SQ",
"Counter": "0,1,2,3",
"EventName": "SQ_MISC.SPLIT_LOCK",
+ "PublicDescription": "",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x24",
- "UMask": "0x42",
- "BriefDescription": "RFO requests that hit L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_HIT",
- "PublicDescription": "Counts the number of store RFO requests that hit the L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x22",
- "BriefDescription": "RFO requests that miss L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_MISS",
- "PublicDescription": "Counts the number of store RFO requests that miss the L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x44",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_HIT",
- "PublicDescription": "Number of instruction fetches that hit the L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x24",
- "BriefDescription": "L2 cache misses when fetching instructions",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
- "PublicDescription": "Number of instruction fetches that missed the L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x27",
- "BriefDescription": "Demand requests that miss L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
- "Errata": "HSD78",
- "PublicDescription": "Demand requests that miss L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0xe7",
- "BriefDescription": "Demand requests to L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
- "Errata": "HSD78",
- "PublicDescription": "Demand requests to L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0x3f",
- "BriefDescription": "All requests that miss L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.MISS",
- "Errata": "HSD78",
- "PublicDescription": "All requests that missed L2.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x24",
- "UMask": "0xff",
- "BriefDescription": "All L2 requests",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.REFERENCES",
- "Errata": "HSD78",
- "PublicDescription": "All requests to L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "CounterMask": "6",
- "Errata": "HSD78, HSD62, HSD61",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x48",
- "UMask": "0x1",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "Counter": "2",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "AnyThread": "1",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0x48",
- "UMask": "0x2",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "Counter": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"Offcore": "1",
"EventCode": "0xB7, 0xBB",
"UMask": "0x1",
@@ -843,6 +846,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -855,6 +859,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -867,6 +872,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -879,6 +885,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -891,6 +898,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -903,6 +911,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -915,6 +924,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -927,6 +937,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -939,6 +950,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -951,6 +963,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -963,6 +976,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -975,6 +989,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -987,6 +1002,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -999,6 +1015,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1011,6 +1028,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1023,6 +1041,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1035,6 +1054,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1047,6 +1067,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1059,6 +1080,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1071,6 +1093,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all requests that hit in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/floating-point.json b/tools/perf/pmu-events/arch/x86/haswellx/floating-point.json
index 6282aed6e090..bc08cc1f2f7e 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/floating-point.json
@@ -20,6 +20,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xC6",
+ "UMask": "0x7",
+ "BriefDescription": "Approximate counts of AVX & AVX2 256-bit instructions, including non-arithmetic instructions, loads, and stores. May count non-AVX instructions that employ 256-bit operations, including (but not necessarily limited to) rep string instructions that use 256-bit loads and stores for optimized performance, XSAVE* and XRSTOR*, and operations that transition the x87 FPU data registers between x87 and MMX.",
+ "Counter": "0,1,2,3",
+ "EventName": "AVX_INSTS.ALL",
+ "PublicDescription": "Note that a whole rep string only counts AVX_INST.ALL once.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xCA",
"UMask": "0x2",
"BriefDescription": "Number of X87 assists due to output value.",
@@ -69,15 +79,5 @@
"PublicDescription": "Cycles with any input/output SSE* or FP assists.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xC6",
- "UMask": "0x7",
- "BriefDescription": "Approximate counts of AVX & AVX2 256-bit instructions, including non-arithmetic instructions, loads, and stores. May count non-AVX instructions that employ 256-bit operations, including (but not necessarily limited to) rep string instructions that use 256-bit loads and stores for optimized performance, XSAVE* and XRSTOR*, and operations that transition the x87 FPU data registers between x87 and MMX.",
- "Counter": "0,1,2,3",
- "EventName": "AVX_INSTS.ALL",
- "PublicDescription": "Note that a whole rep string only counts AVX_INST.ALL once.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/frontend.json b/tools/perf/pmu-events/arch/x86/haswellx/frontend.json
index 2d0c7aac1e61..a4d9f1fcf940 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/frontend.json
@@ -22,72 +22,41 @@
},
{
"EventCode": "0x79",
- "UMask": "0x8",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.DSB_UOPS",
- "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x10",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_DSB_UOPS",
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x20",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_MITE_UOPS",
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MS_UOPS",
- "PublicDescription": "This event counts uops delivered by the Front-end with the assistance of the microcode sequencer. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
+ "EventName": "IDQ.MITE_CYCLES",
+ "CounterMask": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "UMask": "0x8",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MS_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the Front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x4",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
"Counter": "0,1,2,3",
- "EventName": "IDQ.MITE_CYCLES",
+ "EventName": "IDQ.DSB_CYCLES",
"CounterMask": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x79",
- "UMask": "0x8",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
+ "UMask": "0x10",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"Counter": "0,1,2,3",
- "EventName": "IDQ.DSB_CYCLES",
- "CounterMask": "1",
+ "EventName": "IDQ.MS_DSB_UOPS",
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -136,6 +105,16 @@
},
{
"EventCode": "0x79",
+ "UMask": "0x20",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
"UMask": "0x24",
"BriefDescription": "Cycles MITE is delivering 4 Uops",
"Counter": "0,1,2,3",
@@ -158,6 +137,38 @@
},
{
"EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "This event counts uops delivered by the Front-end with the assistance of the microcode sequencer. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the Front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_SWITCHES",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
"UMask": "0x3c",
"BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"Counter": "0,1,2,3",
@@ -195,6 +206,15 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x80",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
+ "Counter": "0,1,2,3",
+ "EventName": "ICACHE.IFDATA_STALL",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x9C",
"UMask": "0x1",
"BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
@@ -270,25 +290,5 @@
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EdgeDetect": "1",
- "EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_SWITCHES",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x80",
- "UMask": "0x4",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
- "Counter": "0,1,2,3",
- "EventName": "ICACHE.IFDATA_STALL",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/memory.json b/tools/perf/pmu-events/arch/x86/haswellx/memory.json
index 0886cc000d22..56b0f24b8029 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/memory.json
@@ -409,6 +409,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts demand data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -421,6 +422,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -433,6 +435,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -445,6 +448,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -457,6 +461,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -469,6 +474,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -481,6 +487,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -493,6 +500,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -505,6 +513,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -517,6 +526,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -529,6 +539,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -541,6 +552,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -553,6 +565,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -565,6 +578,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -577,6 +591,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -589,6 +604,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -601,6 +617,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -613,6 +630,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -625,6 +643,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -637,6 +656,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -649,6 +669,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch code reads that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -661,6 +682,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -673,6 +695,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -685,6 +708,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -697,6 +721,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from remote dram Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -709,6 +734,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the modified data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -721,6 +747,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and clean or shared data is transferred from remote cache Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -733,6 +760,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts all requests that miss in the L3 Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/other.json b/tools/perf/pmu-events/arch/x86/haswellx/other.json
index 4e1b6ce96ca3..800e65df31bc 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/other.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/other.json
@@ -10,16 +10,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x5C",
- "UMask": "0x2",
- "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
- "Counter": "0,1,2,3",
- "EventName": "CPL_CYCLES.RING123",
- "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EdgeDetect": "1",
"EventCode": "0x5C",
"UMask": "0x1",
@@ -31,6 +21,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x5C",
+ "UMask": "0x2",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "Counter": "0,1,2,3",
+ "EventName": "CPL_CYCLES.RING123",
+ "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x63",
"UMask": "0x1",
"BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json b/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json
index c3a163d34bd7..8a18bfe9e3e4 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json
@@ -3,32 +3,42 @@
"EventCode": "0x00",
"UMask": "0x1",
"BriefDescription": "Instructions retired from execution.",
- "Counter": "Fixed counter 1",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"Errata": "HSD140, HSD143",
"PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. INST_RETIRED.ANY is counted by a designated fixed counter, leaving the programmable counters available for other events. Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 1"
+ "CounterHTOff": "Fixed counter 0"
},
{
"EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when the thread is not in halt state.",
- "Counter": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "This event counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "UMask": "0x2",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "Fixed counter 1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "Fixed counter 1"
},
{
"EventCode": "0x00",
"UMask": "0x3",
"BriefDescription": "Reference cycles when the core is not in halt state.",
- "Counter": "Fixed counter 3",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 3"
+ "CounterHTOff": "Fixed counter 2"
},
{
"EventCode": "0x03",
@@ -63,7 +73,7 @@
{
"EventCode": "0x0D",
"UMask": "0x3",
- "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...)",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
"Counter": "0,1,2,3",
"EventName": "INT_MISC.RECOVERY_CYCLES",
"CounterMask": "1",
@@ -72,6 +82,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x0D",
+ "UMask": "0x3",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke)",
+ "Counter": "0,1,2,3",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "AnyThread": "1",
+ "CounterMask": "1",
+ "PublicDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x0E",
"UMask": "0x1",
"BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
@@ -82,6 +104,29 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "Invert": "1",
+ "EventCode": "0x0E",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0x0E",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "AnyThread": "1",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"EventCode": "0x0E",
"UMask": "0x10",
"BriefDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.",
@@ -112,34 +157,31 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "Invert": "1",
- "EventCode": "0x0E",
- "UMask": "0x1",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
+ "EventCode": "0x14",
+ "UMask": "0x2",
+ "BriefDescription": "Any uop executed by the Divider. (This includes all divide uops, sqrt, ...)",
"Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
- "CounterMask": "1",
+ "EventName": "ARITH.DIVIDER_UOPS",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "Invert": "1",
- "EventCode": "0x0E",
- "UMask": "0x1",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+ "EventCode": "0x3C",
+ "UMask": "0x0",
+ "BriefDescription": "Thread cycles when thread is not in halt state",
"Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
- "AnyThread": "1",
- "CounterMask": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "Counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x14",
- "UMask": "0x2",
- "BriefDescription": "Any uop executed by the Divider. (This includes all divide uops, sqrt, ...)",
+ "EventCode": "0x3C",
+ "UMask": "0x0",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"Counter": "0,1,2,3",
- "EventName": "ARITH.DIVIDER_UOPS",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "AnyThread": "1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -154,6 +196,38 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "AnyThread": "1",
+ "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "AnyThread": "1",
+ "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x3c",
"UMask": "0x2",
"BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
@@ -163,6 +237,15 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0x3C",
+ "UMask": "0x2",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x4c",
"UMask": "0x1",
"BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
@@ -233,6 +316,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EdgeDetect": "1",
+ "Invert": "1",
+ "EventCode": "0x5E",
+ "UMask": "0x1",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "Counter": "0,1,2,3",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "CounterMask": "1",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x87",
"UMask": "0x1",
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
@@ -408,6 +503,15 @@
},
{
"EventCode": "0x89",
+ "UMask": "0xa0",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
"UMask": "0xc1",
"BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
"Counter": "0,1,2,3",
@@ -446,6 +550,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per core when uops are executed in port 0.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
+ "AnyThread": "1",
+ "PublicDescription": "Cycles per core when uops are exectuted in port 0.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x2",
"BriefDescription": "Cycles per thread when uops are executed in port 1",
"Counter": "0,1,2,3",
@@ -456,6 +580,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles per core when uops are executed in port 1.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
+ "AnyThread": "1",
+ "PublicDescription": "Cycles per core when uops are exectuted in port 1.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x4",
"BriefDescription": "Cycles per thread when uops are executed in port 2",
"Counter": "0,1,2,3",
@@ -466,6 +610,25 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x8",
"BriefDescription": "Cycles per thread when uops are executed in port 3",
"Counter": "0,1,2,3",
@@ -476,6 +639,25 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x10",
"BriefDescription": "Cycles per thread when uops are executed in port 4",
"Counter": "0,1,2,3",
@@ -486,6 +668,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles per core when uops are executed in port 4.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
+ "AnyThread": "1",
+ "PublicDescription": "Cycles per core when uops are exectuted in port 4.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x20",
"BriefDescription": "Cycles per thread when uops are executed in port 5",
"Counter": "0,1,2,3",
@@ -496,6 +698,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x20",
+ "BriefDescription": "Cycles per core when uops are executed in port 5.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
+ "AnyThread": "1",
+ "PublicDescription": "Cycles per core when uops are exectuted in port 5.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x20",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x40",
"BriefDescription": "Cycles per thread when uops are executed in port 6",
"Counter": "0,1,2,3",
@@ -506,6 +728,26 @@
},
{
"EventCode": "0xA1",
+ "UMask": "0x40",
+ "BriefDescription": "Cycles per core when uops are executed in port 6.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
+ "AnyThread": "1",
+ "PublicDescription": "Cycles per core when uops are exectuted in port 6.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x40",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
"UMask": "0x80",
"BriefDescription": "Cycles per thread when uops are executed in port 7",
"Counter": "0,1,2,3",
@@ -515,6 +757,25 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xA1",
+ "UMask": "0x80",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x80",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xA2",
"UMask": "0x1",
"BriefDescription": "Resource-related stall cycles",
@@ -567,17 +828,6 @@
},
{
"EventCode": "0xA3",
- "UMask": "0x8",
- "BriefDescription": "Cycles with pending L1 cache miss loads.",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
- "CounterMask": "8",
- "PublicDescription": "Cycles with pending L1 data cache miss loads. Set Cmask=8 to count cycle.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
"UMask": "0x2",
"BriefDescription": "Cycles with pending memory loads.",
"Counter": "0,1,2,3",
@@ -590,7 +840,7 @@
{
"EventCode": "0xA3",
"UMask": "0x4",
- "BriefDescription": "Total execution stalls",
+ "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
"Counter": "0,1,2,3",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
"CounterMask": "4",
@@ -622,6 +872,17 @@
},
{
"EventCode": "0xA3",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles with pending L1 cache miss loads.",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "CounterMask": "8",
+ "PublicDescription": "Cycles with pending L1 data cache miss loads. Set Cmask=8 to count cycle.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
"UMask": "0xc",
"BriefDescription": "Execution stalls due to L1 data cache misses",
"Counter": "2",
@@ -642,13 +903,22 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
- "UMask": "0x2",
- "BriefDescription": "Number of uops executed on the core.",
+ "EventCode": "0xA8",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE",
- "Errata": "HSD30, HSM31",
- "PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "CounterMask": "4",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -665,23 +935,126 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xC0",
- "UMask": "0x0",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
"Counter": "0,1,2,3",
- "EventName": "INST_RETIRED.ANY_P",
- "Errata": "HSD11, HSD140",
- "PublicDescription": "Number of instructions at retirement.",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "CounterMask": "1",
+ "Errata": "HSD144, HSD30, HSM31",
+ "PublicDescription": "This events counts the cycles where at least one uop was executed. It is counted per thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "CounterMask": "2",
+ "Errata": "HSD144, HSD30, HSM31",
+ "PublicDescription": "This events counts the cycles where at least two uop were executed. It is counted per thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "CounterMask": "3",
+ "Errata": "HSD144, HSD30, HSM31",
+ "PublicDescription": "This events counts the cycles where at least three uop were executed. It is counted per thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "CounterMask": "4",
+ "Errata": "HSD144, HSD30, HSM31",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x2",
+ "BriefDescription": "Number of uops executed on the core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "Errata": "HSD30, HSM31",
+ "PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC0",
+ "EventCode": "0xb1",
"UMask": "0x2",
- "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions: Counts also flows that have several X87 or flows that use X87 uops in the exception handling.",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
"Counter": "0,1,2,3",
- "EventName": "INST_RETIRED.X87",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "CounterMask": "1",
+ "Errata": "HSD30, HSM31",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "CounterMask": "2",
+ "Errata": "HSD30, HSM31",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "CounterMask": "3",
+ "Errata": "HSD30, HSM31",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "CounterMask": "4",
+ "Errata": "HSD30, HSM31",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "Errata": "HSD30, HSM31",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC0",
+ "UMask": "0x0",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3",
+ "EventName": "INST_RETIRED.ANY_P",
+ "Errata": "HSD11, HSD140",
+ "PublicDescription": "Number of instructions at retirement.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -698,6 +1071,16 @@
"CounterHTOff": "1"
},
{
+ "EventCode": "0xC0",
+ "UMask": "0x2",
+ "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions: Counts also flows that have several X87 or flows that use X87 uops in the exception handling.",
+ "Counter": "0,1,2,3",
+ "EventName": "INST_RETIRED.X87",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC1",
"UMask": "0x40",
"BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
@@ -715,18 +1098,6 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "UOPS_RETIRED.ALL",
- "PublicDescription": "Counts the number of micro-ops retired. Use Cmask=1 and invert to count active cycles or stalled cycles.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC2",
- "UMask": "0x2",
- "BriefDescription": "Retirement slots used.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "PublicDescription": "This event counts the number of retirement slots used each cycle. There are potentially 4 slots that can be used each cycle - meaning, 4 uops or 4 instructions could retire each cycle.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -765,6 +1136,16 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xC2",
+ "UMask": "0x2",
+ "BriefDescription": "Retirement slots used.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC3",
"UMask": "0x1",
"BriefDescription": "Cycles there was a Nuke. Account for both thread-specific and All Thread Nukes.",
@@ -774,6 +1155,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EdgeDetect": "1",
+ "EventCode": "0xC3",
+ "UMask": "0x1",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "CounterMask": "1",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xC3",
"UMask": "0x4",
"BriefDescription": "Self-modifying code (SMC) detected.",
@@ -794,12 +1186,21 @@
},
{
"EventCode": "0xC4",
+ "UMask": "0x0",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Branch instructions at retirement.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
"UMask": "0x1",
"BriefDescription": "Conditional branch instructions retired.",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
- "PublicDescription": "Counts the number of conditional branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -815,13 +1216,23 @@
},
{
"EventCode": "0xC4",
- "UMask": "0x0",
+ "UMask": "0x2",
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x4",
"BriefDescription": "All (macro) branch instructions retired.",
+ "PEBS": "2",
"Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "PublicDescription": "Branch instructions at retirement.",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
"SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xC4",
@@ -830,7 +1241,6 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "PublicDescription": "Counts the number of near return instructions retired.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -851,7 +1261,6 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Number of near taken branches retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -866,14 +1275,14 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC4",
- "UMask": "0x4",
- "BriefDescription": "All (macro) branch instructions retired.",
- "PEBS": "2",
+ "EventCode": "0xC5",
+ "UMask": "0x0",
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
"Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Mispredicted branch instructions at retirement.",
"SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC5",
@@ -887,18 +1296,8 @@
},
{
"EventCode": "0xC5",
- "UMask": "0x0",
- "BriefDescription": "All mispredicted macro branch instructions retired.",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "PublicDescription": "Mispredicted branch instructions at retirement.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC5",
"UMask": "0x4",
- "BriefDescription": "Mispredicted macro branch instructions retired. ",
+ "BriefDescription": "Mispredicted macro branch instructions retired.",
"PEBS": "2",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
@@ -907,171 +1306,24 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xCC",
- "UMask": "0x20",
- "BriefDescription": "Count cases of saving new LBR",
- "Counter": "0,1,2,3",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
- "PublicDescription": "Count cases of saving new LBR records by hardware.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x0",
- "BriefDescription": "Thread cycles when thread is not in halt state",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "PublicDescription": "Counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x89",
- "UMask": "0xa0",
- "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x1",
- "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x2",
- "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x4",
- "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x8",
- "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x10",
- "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x20",
- "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x40",
- "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x80",
- "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0xC5",
"UMask": "0x20",
"BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Number of near branch instructions retired that were taken but mispredicted.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
- "CounterMask": "1",
- "Errata": "HSD144, HSD30, HSM31",
- "PublicDescription": "This events counts the cycles where at least one uop was executed. It is counted per thread.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
- "CounterMask": "2",
- "Errata": "HSD144, HSD30, HSM31",
- "PublicDescription": "This events counts the cycles where at least two uop were executed. It is counted per thread.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
- "CounterMask": "3",
- "Errata": "HSD144, HSD30, HSM31",
- "PublicDescription": "This events counts the cycles where at least three uop were executed. It is counted per thread.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "EventCode": "0xCC",
+ "UMask": "0x20",
+ "BriefDescription": "Count cases of saving new LBR",
"Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
- "CounterMask": "4",
- "Errata": "HSD144, HSD30, HSM31",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "PublicDescription": "Count cases of saving new LBR records by hardware.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xe6",
@@ -1082,248 +1334,5 @@
"PublicDescription": "Number of front end re-steers due to BPU misprediction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EdgeDetect": "1",
- "EventCode": "0xC3",
- "UMask": "0x1",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "Counter": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.COUNT",
- "CounterMask": "1",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "UMask": "0x1",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "Counter": "0,1,2,3",
- "EventName": "LSD.CYCLES_ACTIVE",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "UMask": "0x1",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "Counter": "0,1,2,3",
- "EventName": "LSD.CYCLES_4_UOPS",
- "CounterMask": "4",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EdgeDetect": "1",
- "Invert": "1",
- "EventCode": "0x5E",
- "UMask": "0x1",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "Counter": "0,1,2,3",
- "EventName": "RS_EVENTS.EMPTY_END",
- "CounterMask": "1",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x1",
- "BriefDescription": "Cycles per thread when uops are executed in port 0.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x2",
- "BriefDescription": "Cycles per thread when uops are executed in port 1.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x4",
- "BriefDescription": "Cycles per thread when uops are executed in port 2.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x8",
- "BriefDescription": "Cycles per thread when uops are executed in port 3.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x10",
- "BriefDescription": "Cycles per thread when uops are executed in port 4.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x20",
- "BriefDescription": "Cycles per thread when uops are executed in port 5.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x40",
- "BriefDescription": "Cycles per thread when uops are executed in port 6.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA1",
- "UMask": "0x80",
- "BriefDescription": "Cycles per thread when uops are executed in port 7.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x00",
- "UMask": "0x2",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "Fixed counter 2",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x0",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "AnyThread": "1",
- "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "UMask": "0x3",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke)",
- "Counter": "0,1,2,3",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "AnyThread": "1",
- "CounterMask": "1",
- "PublicDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
- "CounterMask": "1",
- "Errata": "HSD30, HSM31",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "CounterMask": "2",
- "Errata": "HSD30, HSM31",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
- "CounterMask": "3",
- "Errata": "HSD30, HSM31",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "CounterMask": "4",
- "Errata": "HSD30, HSM31",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "Invert": "1",
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
- "Errata": "HSD30, HSM31",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "AnyThread": "1",
- "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x2",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json b/tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json
index 9c00f8ef6a07..168df552b1a8 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json
@@ -40,6 +40,16 @@
},
{
"EventCode": "0x08",
+ "UMask": "0xe",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Completed page walks in any TLB of any page size due to demand load misses.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
"UMask": "0x10",
"BriefDescription": "Cycles when PMH is busy with page walks",
"Counter": "0,1,2,3",
@@ -70,6 +80,16 @@
},
{
"EventCode": "0x08",
+ "UMask": "0x60",
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "PublicDescription": "Number of cache load STLB hits. No page walk.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
"UMask": "0x80",
"BriefDescription": "DTLB demand load misses with low part of linear-to-physical address translation missed",
"Counter": "0,1,2,3",
@@ -119,6 +139,16 @@
},
{
"EventCode": "0x49",
+ "UMask": "0xe",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Completed page walks due to store miss in any TLB levels of any page size (4K/2M/4M/1G).",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
"UMask": "0x10",
"BriefDescription": "Cycles when PMH is busy with page walks",
"Counter": "0,1,2,3",
@@ -149,6 +179,16 @@
},
{
"EventCode": "0x49",
+ "UMask": "0x60",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "PublicDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
"UMask": "0x80",
"BriefDescription": "DTLB store misses with low part of linear-to-physical address translation missed",
"Counter": "0,1,2,3",
@@ -207,6 +247,16 @@
},
{
"EventCode": "0x85",
+ "UMask": "0xe",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Completed page walks in ITLB of any page size.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
"UMask": "0x10",
"BriefDescription": "Cycles when PMH is busy with page walks",
"Counter": "0,1,2,3",
@@ -236,6 +286,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x85",
+ "UMask": "0x60",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "PublicDescription": "ITLB misses that hit STLB. No page walk.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xae",
"UMask": "0x1",
"BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
@@ -257,39 +317,43 @@
},
{
"EventCode": "0xBC",
- "UMask": "0x21",
- "BriefDescription": "Number of ITLB page walker hits in the L1+FB",
+ "UMask": "0x12",
+ "BriefDescription": "Number of DTLB page walker hits in the L2",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
- "PublicDescription": "Number of ITLB page walker loads that hit in the L1+FB.",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
+ "PublicDescription": "Number of DTLB page walker loads that hit in the L2.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x41",
- "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L1 and FB.",
+ "UMask": "0x14",
+ "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L1",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
+ "Errata": "HSD25",
+ "PublicDescription": "Number of DTLB page walker loads that hit in the L3.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x81",
- "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L1 and FB.",
+ "UMask": "0x18",
+ "BriefDescription": "Number of DTLB page walker hits in Memory",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L1",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
+ "Errata": "HSD25",
+ "PublicDescription": "Number of DTLB page walker loads from memory.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x12",
- "BriefDescription": "Number of DTLB page walker hits in the L2",
+ "UMask": "0x21",
+ "BriefDescription": "Number of ITLB page walker hits in the L1+FB",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
- "PublicDescription": "Number of DTLB page walker loads that hit in the L2.",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
+ "PublicDescription": "Number of ITLB page walker loads that hit in the L1+FB.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -305,41 +369,41 @@
},
{
"EventCode": "0xBC",
- "UMask": "0x42",
- "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L2.",
+ "UMask": "0x24",
+ "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L2",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
+ "Errata": "HSD25",
+ "PublicDescription": "Number of ITLB page walker loads that hit in the L3.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x82",
- "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
+ "UMask": "0x28",
+ "BriefDescription": "Number of ITLB page walker hits in Memory",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L2",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_MEMORY",
+ "Errata": "HSD25",
+ "PublicDescription": "Number of ITLB page walker loads from memory.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x14",
- "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP",
+ "UMask": "0x41",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L1 and FB.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
- "Errata": "HSD25",
- "PublicDescription": "Number of DTLB page walker loads that hit in the L3.",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x24",
- "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP",
+ "UMask": "0x42",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L2.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
- "Errata": "HSD25",
- "PublicDescription": "Number of ITLB page walker loads that hit in the L3.",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L2",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -354,41 +418,37 @@
},
{
"EventCode": "0xBC",
- "UMask": "0x84",
- "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
+ "UMask": "0x48",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in memory.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L3",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_MEMORY",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x18",
- "BriefDescription": "Number of DTLB page walker hits in Memory",
+ "UMask": "0x81",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L1 and FB.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
- "Errata": "HSD25",
- "PublicDescription": "Number of DTLB page walker loads from memory.",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L1",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x28",
- "BriefDescription": "Number of ITLB page walker hits in Memory",
+ "UMask": "0x82",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_MEMORY",
- "Errata": "HSD25",
- "PublicDescription": "Number of ITLB page walker loads from memory.",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L2",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xBC",
- "UMask": "0x48",
- "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in memory.",
+ "UMask": "0x84",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
"Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_MEMORY",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L3",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -420,65 +480,5 @@
"PublicDescription": "Count number of STLB flush attempts.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x08",
- "UMask": "0xe",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
- "PublicDescription": "Completed page walks in any TLB of any page size due to demand load misses.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x08",
- "UMask": "0x60",
- "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
- "PublicDescription": "Number of cache load STLB hits. No page walk.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x49",
- "UMask": "0xe",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
- "PublicDescription": "Completed page walks due to store miss in any TLB levels of any page size (4K/2M/4M/1G).",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x49",
- "UMask": "0x60",
- "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
- "PublicDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "UMask": "0xe",
- "BriefDescription": "Misses in all ITLB levels that cause completed page walks",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
- "PublicDescription": "Completed page walks in ITLB of any page size.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "UMask": "0x60",
- "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.STLB_HIT",
- "PublicDescription": "ITLB misses that hit STLB. No page walk.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/cache.json b/tools/perf/pmu-events/arch/x86/ivybridge/cache.json
index f1ee6d4853c5..999a01bc6467 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/cache.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/cache.json
@@ -10,6 +10,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts any demand and L1 HW prefetch data load requests to L2.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "RFO requests that hit L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -30,6 +40,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts all L2 store RFO requests.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests to L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Number of instruction fetches that hit the L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -50,6 +70,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts all L2 code requests.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 code requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts all L2 HW prefetcher requests that hit L2.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -70,36 +100,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts any demand and L1 HW prefetch data load requests to L2.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts all L2 store RFO requests.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "L2_RQSTS.ALL_RFO",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests to L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts all L2 code requests.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "L2_RQSTS.ALL_CODE_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 code requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PublicDescription": "Counts all L2 HW prefetcher requests.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -219,6 +219,29 @@
"CounterHTOff": "2"
},
{
+ "PublicDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core",
+ "CounterMask": "1",
+ "CounterHTOff": "2"
+ },
+ {
+ "PublicDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "EventCode": "0x48",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts the number of lines brought into the L1 data cache.",
"EventCode": "0x51",
"Counter": "0,1,2,3",
@@ -239,76 +262,87 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding Demand Code Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "PublicDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding RFO store transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "PublicDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue",
+ "CounterMask": "6",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "PublicDescription": "Offcore outstanding Demand Code Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "PublicDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "PublicDescription": "Offcore outstanding RFO store transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
- "CounterMask": "1",
+ "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "PublicDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -379,7 +413,7 @@
"UMask": "0x11",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that miss the STLB.",
+ "BriefDescription": "Retired load uops that miss the STLB. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -389,7 +423,7 @@
"UMask": "0x12",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that miss the STLB.",
+ "BriefDescription": "Retired store uops that miss the STLB. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -399,7 +433,7 @@
"UMask": "0x21",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired load uops with locked access.",
+ "BriefDescription": "Retired load uops with locked access. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -409,7 +443,7 @@
"UMask": "0x41",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -419,7 +453,7 @@
"UMask": "0x42",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -429,7 +463,7 @@
"UMask": "0x81",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired load uops.",
+ "BriefDescription": "All retired load uops. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -439,67 +473,61 @@
"UMask": "0x82",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired store uops.",
+ "BriefDescription": "All retired store uops. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops with L1 cache hits as data sources.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
"SampleAfterValue": "2000003",
- "BriefDescription": "Retired load uops with L1 cache hits as data sources. ",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops with L2 cache hits as data sources.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops with L2 cache hits as data sources. ",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source was LLC hit with no snoop required.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT",
"SampleAfterValue": "50021",
- "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required. ",
+ "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source followed an L1 miss.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources following L1 data-cache miss",
+ "BriefDescription": "Retired load uops which data sources following L1 data-cache miss.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops that missed L2, excluding unknown sources.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x10",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"SampleAfterValue": "50021",
- "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Retired load uops with L2 cache misses as data sources.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source is LLC miss.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x20",
@@ -510,61 +538,56 @@
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x40",
"EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready. ",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source was an on-package core cache LLC hit and cross-core snoop missed.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache. ",
+ "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source was an on-package LLC hit and cross-core snoop hits.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache. ",
+ "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source was an on-package core cache with HitM responses.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC. ",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source was LLC hit with no snoop required.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required. ",
+ "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required.",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "PublicDescription": "Retired load uops whose data source was local memory (cross-socket snoop not needed or missed).",
"EventCode": "0xD3",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -753,50 +776,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Retired load uops whose data source was local memory (cross-socket snoop not needed or missed).",
- "EventCode": "0xD3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load uops which data sources missed LLC but serviced from local dram.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core",
- "CounterMask": "1",
- "CounterHTOff": "2"
- },
- {
- "PublicDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3f803c0244",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json b/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json
index de72b84b3536..efaa949ead31 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json
@@ -20,76 +20,45 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "IDQ.MS_MITE_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ from MS by either DSB or MITE. Set Cmask = 1 to count cycles.",
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_UOPS",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_CYCLES",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterMask": "1",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_CYCLES",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_CYCLES",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
- "CounterMask": "1",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -138,6 +107,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts cycles MITE is delivered four uops. Set Cmask = 4.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
@@ -160,6 +139,39 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ from MS by either DSB or MITE. Set Cmask = 1 to count cycles.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_SWITCHES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Number of uops delivered to IDQ from any path.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
@@ -206,7 +218,7 @@
"UMask": "0x1",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled ",
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
"CounterHTOff": "0,1,2,3"
},
{
@@ -289,17 +301,5 @@
"SampleAfterValue": "2000003",
"BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_SWITCHES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/memory.json b/tools/perf/pmu-events/arch/x86/ivybridge/memory.json
index e1c6a1d4a4d5..a74d54f56192 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/memory.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/memory.json
@@ -39,18 +39,6 @@
},
{
"PEBS": "2",
- "EventCode": "0xCD",
- "Counter": "3",
- "UMask": "0x2",
- "EventName": "MEM_TRANS_RETIRED.PRECISE_STORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only.",
- "PRECISE_STORE": "1",
- "TakenAlone": "1",
- "CounterHTOff": "3"
- },
- {
- "PEBS": "2",
"PublicDescription": "Loads with latency value being above 4.",
"EventCode": "0xCD",
"MSRValue": "0x4",
@@ -162,6 +150,18 @@
"CounterHTOff": "3"
},
{
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "Counter": "3",
+ "UMask": "0x2",
+ "EventName": "MEM_TRANS_RETIRED.PRECISE_STORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only.",
+ "PRECISE_STORE": "1",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x300400244",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/other.json b/tools/perf/pmu-events/arch/x86/ivybridge/other.json
index 9c2dd0511a32..4eb83ee40412 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/other.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/other.json
@@ -10,16 +10,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
- "EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPL_CYCLES.RING123",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PublicDescription": "Number of intervals between processor halts while thread is in ring 0.",
"EventCode": "0x5C",
"Counter": "0,1,2,3",
@@ -32,6 +22,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
+ "EventCode": "0x5C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPL_CYCLES.RING123",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles in which the L1D and L2 are locked, due to a UC lock or split lock.",
"EventCode": "0x63",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json b/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json
index 2145c28193f7..0afbfd95ea30 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json
@@ -1,30 +1,41 @@
[
{
"EventCode": "0x00",
- "Counter": "Fixed counter 1",
+ "Counter": "Fixed counter 0",
"UMask": "0x1",
"EventName": "INST_RETIRED.ANY",
"SampleAfterValue": "2000003",
"BriefDescription": "Instructions retired from execution.",
- "CounterHTOff": "Fixed counter 1"
+ "CounterHTOff": "Fixed counter 0"
},
{
"EventCode": "0x00",
- "Counter": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
"BriefDescription": "Core cycles when the thread is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
},
{
+ "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"EventCode": "0x00",
- "Counter": "Fixed counter 3",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 2",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"SampleAfterValue": "2000003",
"BriefDescription": "Reference cycles when the core is not in halt state.",
- "CounterHTOff": "Fixed counter 3"
+ "CounterHTOff": "Fixed counter 2"
},
{
"PublicDescription": "Loads blocked by overlapping with store buffer that cannot be forwarded.",
@@ -78,6 +89,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "AnyThread": "1",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Increments each cycle the # of Uops issued by the RAT to RS. Set Cmask = 1, Inv = 1, Any= 1to count stalled cycles of this core.",
"EventCode": "0x0E",
"Counter": "0,1,2,3",
@@ -175,6 +197,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Increments at the frequency of XCLK (100 MHz) when not halted.",
"EventCode": "0x3C",
"Counter": "0,1,2,3",
@@ -187,6 +220,36 @@
{
"EventCode": "0x3C",
"Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
@@ -194,6 +257,15 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for S/W prefetch.",
"EventCode": "0x4C",
"Counter": "0,1,2,3",
@@ -216,37 +288,37 @@
{
"EventCode": "0x58",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
+ "UMask": "0x1",
+ "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
"SampleAfterValue": "1000003",
- "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x58",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
+ "UMask": "0x2",
+ "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
"SampleAfterValue": "1000003",
- "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x58",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
+ "UMask": "0x4",
+ "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
"SampleAfterValue": "1000003",
- "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x58",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
+ "UMask": "0x8",
+ "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
"SampleAfterValue": "1000003",
- "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -260,6 +332,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x5E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x87",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -498,118 +582,118 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 1.",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 0.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 0",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 4.",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 1.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "UMask": "0x2",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 4",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 5.",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 1.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 5",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 0.",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 2.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
+ "UMask": "0xc",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 0",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 1.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "UMask": "0xc",
"AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 1",
+ "BriefDescription": "Uops dispatched to port 2, loads and stores per core (speculative and retired).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 4.",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 3.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
+ "UMask": "0x30",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 4",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 5.",
+ "PublicDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x80",
+ "UMask": "0x30",
"AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 5",
+ "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 2.",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 4.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "UMask": "0x40",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 3.",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 4.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "UMask": "0x40",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles which a Uop is dispatched on port 5.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0xc",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
+ "UMask": "0x80",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops dispatched to port 2, loads and stores per core (speculative and retired).",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 5",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 5.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x30",
+ "UMask": "0x80",
"AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 5",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -662,15 +746,14 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles with pending L1 cache miss loads. Set AnyThread to count per core.",
"EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with pending L1 cache miss loads.",
- "CounterMask": "8",
- "CounterHTOff": "2"
+ "BriefDescription": "Cycles while L2 cache miss load* is outstanding.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PublicDescription": "Cycles with pending memory loads. Set AnyThread to count per core.",
@@ -684,13 +767,33 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"PublicDescription": "Total execution stalls.",
"EventCode": "0xA3",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls",
+ "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Total execution stalls.",
"CounterMask": "4",
"CounterHTOff": "0,1,2,3"
},
@@ -708,6 +811,16 @@
{
"EventCode": "0xA3",
"Counter": "0,1,2,3",
+ "UMask": "0x5",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L2 cache miss load* is outstanding.",
+ "CounterMask": "5",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
"UMask": "0x6",
"EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
"SampleAfterValue": "2000003",
@@ -716,6 +829,37 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x6",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Cycles with pending L1 cache miss loads. Set AnyThread to count per core.",
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with pending L1 cache miss loads.",
+ "CounterMask": "8",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "CounterHTOff": "2"
+ },
+ {
"PublicDescription": "Execution stalls due to L1 data cache miss loads. Set Cmask=0CH.",
"EventCode": "0xA3",
"Counter": "2",
@@ -727,6 +871,16 @@
"CounterHTOff": "2"
},
{
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0xc",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "CounterHTOff": "2"
+ },
+ {
"EventCode": "0xA8",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -747,6 +901,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts total number of uops to be executed per-thread each cycle. Set Cmask = 1, INV =1 to count stall cycles.",
"EventCode": "0xB1",
"Counter": "0,1,2,3",
@@ -757,6 +922,61 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
"EventCode": "0xB1",
"Counter": "0,1,2,3",
@@ -767,15 +987,59 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
"EventCode": "0xB1",
- "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PublicDescription": "Number of instructions at retirement.",
@@ -809,24 +1073,12 @@
},
{
"PEBS": "1",
- "PublicDescription": "Counts the number of micro-ops retired, Use cmask=1 and invert to count active cycles or stalled cycles.",
"EventCode": "0xC2",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "UOPS_RETIRED.ALL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Actually retired uops. ",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Counts the number of retirement slots used each cycle.",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Retirement slots used. ",
+ "BriefDescription": "Retired uops.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -864,6 +1116,27 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retirement slots used.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Number of self-modifying-code machine clears detected.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
@@ -880,50 +1153,67 @@
"UMask": "0x20",
"EventName": "MACHINE_CLEARS.MASKMOV",
"SampleAfterValue": "100003",
- "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0. ",
+ "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Branch instructions at retirement.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PEBS": "1",
- "PublicDescription": "Counts the number of conditional branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
"SampleAfterValue": "400009",
- "BriefDescription": "Conditional branch instructions retired. ",
+ "BriefDescription": "Conditional branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PEBS": "1",
- "PublicDescription": "Direct and indirect near call instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect near call instructions retired. ",
+ "BriefDescription": "Direct and indirect near call instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Branch instructions at retirement.",
+ "PEBS": "1",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "UMask": "0x2",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
"SampleAfterValue": "400009",
"BriefDescription": "All (macro) branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Counts the number of near return instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"SampleAfterValue": "100007",
- "BriefDescription": "Return instructions retired. ",
+ "BriefDescription": "Return instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -933,18 +1223,17 @@
"UMask": "0x10",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
"SampleAfterValue": "400009",
- "BriefDescription": "Not taken branch instructions retired. ",
+ "BriefDescription": "Not taken branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PEBS": "1",
- "PublicDescription": "Number of near taken branches retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x20",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"SampleAfterValue": "400009",
- "BriefDescription": "Taken branch instructions retired. ",
+ "BriefDescription": "Taken branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -954,28 +1243,7 @@
"UMask": "0x40",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"SampleAfterValue": "100007",
- "BriefDescription": "Far branch instructions retired. ",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PEBS": "2",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Mispredicted conditional branch instructions retired.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted conditional branch instructions retired. ",
+ "BriefDescription": "Far branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -990,13 +1258,12 @@
},
{
"PEBS": "1",
- "PublicDescription": "Mispredicted taken branch instructions retired.",
"EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "UMask": "0x1",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
"SampleAfterValue": "400009",
- "BriefDescription": "number of near branch instructions retired that were mispredicted and taken. ",
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -1010,6 +1277,16 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Count cases of saving new LBR records by hardware.",
"EventCode": "0xCC",
"Counter": "0,1,2,3",
@@ -1028,280 +1305,5 @@
"SampleAfterValue": "100003",
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "RS_EVENTS.EMPTY_END",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "MACHINE_CLEARS.COUNT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_4_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "CounterMask": "8",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L2 cache miss load* is outstanding.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0xc",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "CounterMask": "12",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x5",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L2 cache miss load* is outstanding.",
- "CounterMask": "5",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x6",
- "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "EventCode": "0x00",
- "Counter": "Fixed counter 2",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "AnyThread": "1",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json b/tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json
index f036f5398906..f243551b4d12 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json
@@ -1,5 +1,35 @@
[
{
+ "PublicDescription": "Misses in all TLB levels that cause a page walk of any page size from demand loads.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x81",
+ "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes an page walk of any page size.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Misses in all TLB levels that caused page walk completed of any size by demand loads.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x82",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycle PMH is busy with a walk due to demand loads.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x84",
+ "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Demand load cycles page miss handler (PMH) is busy with this walk.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x88",
@@ -146,35 +176,5 @@
"SampleAfterValue": "100007",
"BriefDescription": "STLB flush attempts",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Misses in all TLB levels that cause a page walk of any page size from demand loads.",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes an page walk of any page size.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Misses in all TLB levels that caused page walk completed of any size by demand loads.",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycle PMH is busy with a walk due to demand loads.",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x84",
- "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Demand load cycles page miss handler (PMH) is busy with this walk.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/cache.json b/tools/perf/pmu-events/arch/x86/ivytown/cache.json
index ff27a620edd8..6dad3ad6b102 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/cache.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/cache.json
@@ -10,6 +10,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts any demand and L1 HW prefetch data load requests to L2.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "RFO requests that hit L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -30,6 +40,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts all L2 store RFO requests.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests to L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Number of instruction fetches that hit the L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -50,6 +70,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts all L2 code requests.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 code requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts all L2 HW prefetcher requests that hit L2.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -70,36 +100,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Counts any demand and L1 HW prefetch data load requests to L2.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts all L2 store RFO requests.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "L2_RQSTS.ALL_RFO",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests to L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts all L2 code requests.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "L2_RQSTS.ALL_CODE_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 code requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PublicDescription": "Counts all L2 HW prefetcher requests.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
@@ -219,6 +219,29 @@
"CounterHTOff": "2"
},
{
+ "PublicDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core",
+ "CounterMask": "1",
+ "CounterHTOff": "2"
+ },
+ {
+ "PublicDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "EventCode": "0x48",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts the number of lines brought into the L1 data cache.",
"EventCode": "0x51",
"Counter": "0,1,2,3",
@@ -239,76 +262,87 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding Demand Code Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "PublicDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding RFO store transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "PublicDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue",
+ "CounterMask": "6",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "PublicDescription": "Offcore outstanding Demand Code Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "PublicDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "PublicDescription": "Offcore outstanding RFO store transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
- "CounterMask": "1",
+ "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "PublicDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
"EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -379,7 +413,7 @@
"UMask": "0x11",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that miss the STLB.",
+ "BriefDescription": "Retired load uops that miss the STLB. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -389,7 +423,7 @@
"UMask": "0x12",
"EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that miss the STLB.",
+ "BriefDescription": "Retired store uops that miss the STLB. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -399,7 +433,7 @@
"UMask": "0x21",
"EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired load uops with locked access.",
+ "BriefDescription": "Retired load uops with locked access. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -409,7 +443,7 @@
"UMask": "0x41",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -419,7 +453,7 @@
"UMask": "0x42",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -429,7 +463,7 @@
"UMask": "0x81",
"EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired load uops.",
+ "BriefDescription": "All retired load uops. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
@@ -439,67 +473,61 @@
"UMask": "0x82",
"EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired store uops.",
+ "BriefDescription": "All retired store uops. (Precise Event)",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops with L1 cache hits as data sources.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
"SampleAfterValue": "2000003",
- "BriefDescription": "Retired load uops with L1 cache hits as data sources. ",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops with L2 cache hits as data sources.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops with L2 cache hits as data sources. ",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source was LLC hit with no snoop required.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT",
"SampleAfterValue": "50021",
- "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required. ",
+ "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source followed an L1 miss.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources following L1 data-cache miss",
+ "BriefDescription": "Retired load uops which data sources following L1 data-cache miss.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops that missed L2, excluding unknown sources.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x10",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"SampleAfterValue": "50021",
- "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Retired load uops with L2 cache misses as data sources.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source is LLC miss.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x20",
@@ -510,67 +538,61 @@
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
"EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x40",
"EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready. ",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source was an on-package core cache LLC hit and cross-core snoop missed.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache. ",
+ "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source was an on-package LLC hit and cross-core snoop hits.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache. ",
+ "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source was an on-package core cache with HitM responses.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM",
"SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC. ",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC.",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Retired load uops whose data source was LLC hit with no snoop required.",
"EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required. ",
+ "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required.",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI)",
"EventCode": "0xD3",
"Counter": "0,1,2,3",
- "UMask": "0x1",
+ "UMask": "0x3",
"EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired load uops which data sources missed LLC but serviced from local dram.",
+ "BriefDescription": "Retired load uops whose data source was local DRAM (Snoop not needed, Snoop Miss, or Snoop Hit data not forwarded).",
"CounterHTOff": "0,1,2,3"
},
{
@@ -780,40 +802,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core",
- "CounterMask": "1",
- "CounterHTOff": "2"
- },
- {
- "PublicDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x4003c0091",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/frontend.json b/tools/perf/pmu-events/arch/x86/ivytown/frontend.json
index de72b84b3536..efaa949ead31 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/frontend.json
@@ -20,76 +20,45 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "IDQ.MS_MITE_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ from MS by either DSB or MITE. Set Cmask = 1 to count cycles.",
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_UOPS",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_CYCLES",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterMask": "1",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_CYCLES",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_CYCLES",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
- "CounterMask": "1",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -138,6 +107,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts cycles MITE is delivered four uops. Set Cmask = 4.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
@@ -160,6 +139,39 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ from MS by either DSB or MITE. Set Cmask = 1 to count cycles.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_SWITCHES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Number of uops delivered to IDQ from any path.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
@@ -206,7 +218,7 @@
"UMask": "0x1",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled ",
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
"CounterHTOff": "0,1,2,3"
},
{
@@ -289,17 +301,5 @@
"SampleAfterValue": "2000003",
"BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_SWITCHES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/memory.json b/tools/perf/pmu-events/arch/x86/ivytown/memory.json
index 437d98f3e344..3a7b86af8816 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/memory.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/memory.json
@@ -30,18 +30,6 @@
},
{
"PEBS": "2",
- "EventCode": "0xCD",
- "Counter": "3",
- "UMask": "0x2",
- "EventName": "MEM_TRANS_RETIRED.PRECISE_STORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only.",
- "PRECISE_STORE": "1",
- "TakenAlone": "1",
- "CounterHTOff": "3"
- },
- {
- "PEBS": "2",
"PublicDescription": "Loads with latency value being above 4.",
"EventCode": "0xCD",
"MSRValue": "0x4",
@@ -153,6 +141,18 @@
"CounterHTOff": "3"
},
{
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "Counter": "3",
+ "UMask": "0x2",
+ "EventName": "MEM_TRANS_RETIRED.PRECISE_STORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only.",
+ "PRECISE_STORE": "1",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fffc00244",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/other.json b/tools/perf/pmu-events/arch/x86/ivytown/other.json
index 9c2dd0511a32..4eb83ee40412 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/other.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/other.json
@@ -10,16 +10,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
- "EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPL_CYCLES.RING123",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PublicDescription": "Number of intervals between processor halts while thread is in ring 0.",
"EventCode": "0x5C",
"Counter": "0,1,2,3",
@@ -32,6 +22,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
+ "EventCode": "0x5C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPL_CYCLES.RING123",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Cycles in which the L1D and L2 are locked, due to a UC lock or split lock.",
"EventCode": "0x63",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json b/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json
index 2145c28193f7..0afbfd95ea30 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json
@@ -1,30 +1,41 @@
[
{
"EventCode": "0x00",
- "Counter": "Fixed counter 1",
+ "Counter": "Fixed counter 0",
"UMask": "0x1",
"EventName": "INST_RETIRED.ANY",
"SampleAfterValue": "2000003",
"BriefDescription": "Instructions retired from execution.",
- "CounterHTOff": "Fixed counter 1"
+ "CounterHTOff": "Fixed counter 0"
},
{
"EventCode": "0x00",
- "Counter": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
"BriefDescription": "Core cycles when the thread is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
},
{
+ "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"EventCode": "0x00",
- "Counter": "Fixed counter 3",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 2",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"SampleAfterValue": "2000003",
"BriefDescription": "Reference cycles when the core is not in halt state.",
- "CounterHTOff": "Fixed counter 3"
+ "CounterHTOff": "Fixed counter 2"
},
{
"PublicDescription": "Loads blocked by overlapping with store buffer that cannot be forwarded.",
@@ -78,6 +89,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "AnyThread": "1",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Increments each cycle the # of Uops issued by the RAT to RS. Set Cmask = 1, Inv = 1, Any= 1to count stalled cycles of this core.",
"EventCode": "0x0E",
"Counter": "0,1,2,3",
@@ -175,6 +197,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Increments at the frequency of XCLK (100 MHz) when not halted.",
"EventCode": "0x3C",
"Counter": "0,1,2,3",
@@ -187,6 +220,36 @@
{
"EventCode": "0x3C",
"Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
@@ -194,6 +257,15 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for S/W prefetch.",
"EventCode": "0x4C",
"Counter": "0,1,2,3",
@@ -216,37 +288,37 @@
{
"EventCode": "0x58",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
+ "UMask": "0x1",
+ "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
"SampleAfterValue": "1000003",
- "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x58",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
+ "UMask": "0x2",
+ "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
"SampleAfterValue": "1000003",
- "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x58",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
+ "UMask": "0x4",
+ "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
"SampleAfterValue": "1000003",
- "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x58",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
+ "UMask": "0x8",
+ "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
"SampleAfterValue": "1000003",
- "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -260,6 +332,18 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x5E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x87",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -498,118 +582,118 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 1.",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 0.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 0",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 4.",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 1.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "UMask": "0x2",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 4",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 5.",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 1.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 5",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 0.",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 2.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
+ "UMask": "0xc",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 0",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 1.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x2",
+ "UMask": "0xc",
"AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 1",
+ "BriefDescription": "Uops dispatched to port 2, loads and stores per core (speculative and retired).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 4.",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 3.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
+ "UMask": "0x30",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 4",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 5.",
+ "PublicDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x80",
+ "UMask": "0x30",
"AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 5",
+ "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 2.",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 4.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "UMask": "0x40",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 3.",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 4.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "UMask": "0x40",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles which a Uop is dispatched on port 5.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0xc",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
+ "UMask": "0x80",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops dispatched to port 2, loads and stores per core (speculative and retired).",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 5",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 5.",
"EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x30",
+ "UMask": "0x80",
"AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 5",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -662,15 +746,14 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles with pending L1 cache miss loads. Set AnyThread to count per core.",
"EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with pending L1 cache miss loads.",
- "CounterMask": "8",
- "CounterHTOff": "2"
+ "BriefDescription": "Cycles while L2 cache miss load* is outstanding.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PublicDescription": "Cycles with pending memory loads. Set AnyThread to count per core.",
@@ -684,13 +767,33 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
"PublicDescription": "Total execution stalls.",
"EventCode": "0xA3",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls",
+ "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Total execution stalls.",
"CounterMask": "4",
"CounterHTOff": "0,1,2,3"
},
@@ -708,6 +811,16 @@
{
"EventCode": "0xA3",
"Counter": "0,1,2,3",
+ "UMask": "0x5",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L2 cache miss load* is outstanding.",
+ "CounterMask": "5",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
"UMask": "0x6",
"EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
"SampleAfterValue": "2000003",
@@ -716,6 +829,37 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x6",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Cycles with pending L1 cache miss loads. Set AnyThread to count per core.",
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with pending L1 cache miss loads.",
+ "CounterMask": "8",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "CounterHTOff": "2"
+ },
+ {
"PublicDescription": "Execution stalls due to L1 data cache miss loads. Set Cmask=0CH.",
"EventCode": "0xA3",
"Counter": "2",
@@ -727,6 +871,16 @@
"CounterHTOff": "2"
},
{
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0xc",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "CounterHTOff": "2"
+ },
+ {
"EventCode": "0xA8",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -747,6 +901,17 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts total number of uops to be executed per-thread each cycle. Set Cmask = 1, INV =1 to count stall cycles.",
"EventCode": "0xB1",
"Counter": "0,1,2,3",
@@ -757,6 +922,61 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
"EventCode": "0xB1",
"Counter": "0,1,2,3",
@@ -767,15 +987,59 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
"EventCode": "0xB1",
- "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PublicDescription": "Number of instructions at retirement.",
@@ -809,24 +1073,12 @@
},
{
"PEBS": "1",
- "PublicDescription": "Counts the number of micro-ops retired, Use cmask=1 and invert to count active cycles or stalled cycles.",
"EventCode": "0xC2",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "UOPS_RETIRED.ALL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Actually retired uops. ",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Counts the number of retirement slots used each cycle.",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Retirement slots used. ",
+ "BriefDescription": "Retired uops.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -864,6 +1116,27 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retirement slots used.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Number of self-modifying-code machine clears detected.",
"EventCode": "0xC3",
"Counter": "0,1,2,3",
@@ -880,50 +1153,67 @@
"UMask": "0x20",
"EventName": "MACHINE_CLEARS.MASKMOV",
"SampleAfterValue": "100003",
- "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0. ",
+ "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Branch instructions at retirement.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PEBS": "1",
- "PublicDescription": "Counts the number of conditional branch instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
"SampleAfterValue": "400009",
- "BriefDescription": "Conditional branch instructions retired. ",
+ "BriefDescription": "Conditional branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PEBS": "1",
- "PublicDescription": "Direct and indirect near call instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect near call instructions retired. ",
+ "BriefDescription": "Direct and indirect near call instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Branch instructions at retirement.",
+ "PEBS": "1",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "UMask": "0x2",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
"SampleAfterValue": "400009",
"BriefDescription": "All (macro) branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
- "PublicDescription": "Counts the number of near return instructions retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"SampleAfterValue": "100007",
- "BriefDescription": "Return instructions retired. ",
+ "BriefDescription": "Return instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -933,18 +1223,17 @@
"UMask": "0x10",
"EventName": "BR_INST_RETIRED.NOT_TAKEN",
"SampleAfterValue": "400009",
- "BriefDescription": "Not taken branch instructions retired. ",
+ "BriefDescription": "Not taken branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"PEBS": "1",
- "PublicDescription": "Number of near taken branches retired.",
"EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x20",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"SampleAfterValue": "400009",
- "BriefDescription": "Taken branch instructions retired. ",
+ "BriefDescription": "Taken branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -954,28 +1243,7 @@
"UMask": "0x40",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"SampleAfterValue": "100007",
- "BriefDescription": "Far branch instructions retired. ",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PEBS": "2",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Mispredicted conditional branch instructions retired.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted conditional branch instructions retired. ",
+ "BriefDescription": "Far branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -990,13 +1258,12 @@
},
{
"PEBS": "1",
- "PublicDescription": "Mispredicted taken branch instructions retired.",
"EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "UMask": "0x1",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
"SampleAfterValue": "400009",
- "BriefDescription": "number of near branch instructions retired that were mispredicted and taken. ",
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -1010,6 +1277,16 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"PublicDescription": "Count cases of saving new LBR records by hardware.",
"EventCode": "0xCC",
"Counter": "0,1,2,3",
@@ -1028,280 +1305,5 @@
"SampleAfterValue": "100003",
"BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "RS_EVENTS.EMPTY_END",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "MACHINE_CLEARS.COUNT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_4_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "CounterMask": "8",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L2 cache miss load* is outstanding.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0xc",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "CounterMask": "12",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x5",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L2 cache miss load* is outstanding.",
- "CounterMask": "5",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x6",
- "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "EventCode": "0x00",
- "Counter": "Fixed counter 2",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "AnyThread": "1",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json b/tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json
index c8de548b78fa..4645e9d3f460 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json
@@ -1,5 +1,15 @@
[
{
+ "PublicDescription": "Misses in all TLB levels that cause a page walk of any page size from demand loads.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x81",
+ "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes an page walk of any page size.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x82",
@@ -9,6 +19,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Misses in all TLB levels that caused page walk completed of any size by demand loads.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x82",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x84",
@@ -18,6 +38,16 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycle PMH is busy with a walk due to demand loads.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x84",
+ "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Demand load cycles page miss handler (PMH) is busy with this walk.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x88",
@@ -164,35 +194,5 @@
"SampleAfterValue": "100007",
"BriefDescription": "STLB flush attempts",
"CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Misses in all TLB levels that cause a page walk of any page size from demand loads.",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes an page walk of any page size.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Misses in all TLB levels that caused page walk completed of any size by demand loads.",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycle PMH is busy with a walk due to demand loads.",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x84",
- "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Demand load cycles page miss handler (PMH) is busy with this walk.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv
index fe1a2c47cabf..93656f2fd53a 100644
--- a/tools/perf/pmu-events/arch/x86/mapfile.csv
+++ b/tools/perf/pmu-events/arch/x86/mapfile.csv
@@ -23,10 +23,7 @@ GenuineIntel-6-1E,v2,nehalemep,core
GenuineIntel-6-1F,v2,nehalemep,core
GenuineIntel-6-1A,v2,nehalemep,core
GenuineIntel-6-2E,v2,nehalemex,core
-GenuineIntel-6-4E,v24,skylake,core
-GenuineIntel-6-5E,v24,skylake,core
-GenuineIntel-6-8E,v24,skylake,core
-GenuineIntel-6-9E,v24,skylake,core
+GenuineIntel-6-[4589]E,v24,skylake,core
GenuineIntel-6-37,v13,silvermont,core
GenuineIntel-6-4D,v13,silvermont,core
GenuineIntel-6-4C,v13,silvermont,core
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/cache.json b/tools/perf/pmu-events/arch/x86/silvermont/cache.json
index 0bd1bc5302de..82be7d1b8b81 100644
--- a/tools/perf/pmu-events/arch/x86/silvermont/cache.json
+++ b/tools/perf/pmu-events/arch/x86/silvermont/cache.json
@@ -36,12 +36,13 @@
"BriefDescription": "L2 cache request misses"
},
{
+ "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ICache miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ICache miss. Note: this event is not the same as the total number of cycles spent retrieving instruction cache lines from the memory hierarchy.\r\nCounts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events. \r\n",
"EventCode": "0x86",
"Counter": "0,1",
"UMask": "0x4",
"EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of cycles the NIP stalls because of an icache miss. This is a cumulative count of cycles the NIP stalled for all icache misses."
+ "BriefDescription": "Cycles code-fetch stalled due to an outstanding ICache miss."
},
{
"PEBS": "1",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/cache.json b/tools/perf/pmu-events/arch/x86/skylake/cache.json
index 0551a9ba865d..54bfe9e4045c 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/cache.json
@@ -1,442 +1,6 @@
[
{
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x11",
- "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions that miss the STLB.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x12",
- "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired store instructions that miss the STLB.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "L1_Hit_Indication": "1"
- },
- {
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x21",
- "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load instructions with locked access.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x42",
- "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "L1_Hit_Indication": "1"
- },
- {
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "MEM_INST_RETIRED.ALL_LOADS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "All retired load instructions.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "MEM_INST_RETIRED.ALL_STORES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "All retired store instructions.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "L1_Hit_Indication": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Retired load instructions with L1 cache hits as data sources.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_LOAD_RETIRED.L1_HIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Retired load instructions with L2 cache hits as data sources.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_LOAD_RETIRED.L2_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Retired load instructions with L3 cache hits as data sources.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MEM_LOAD_RETIRED.L3_HIT",
- "SampleAfterValue": "50021",
- "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Retired load instructions missed L1 cache as data sources.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MEM_LOAD_RETIRED.L1_MISS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions missed L1 cache as data sources",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Retired load instructions missed L2 cache as data sources.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "MEM_LOAD_RETIRED.L2_MISS",
- "SampleAfterValue": "50021",
- "BriefDescription": "Retired load instructions missed L2 cache as data sources",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Retired load instructions missed L3 cache as data sources.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "MEM_LOAD_RETIRED.L3_MISS",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load instructions missed L3 cache as data sources",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Retired load instructions which data sources were load missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "MEM_LOAD_RETIRED.FB_HIT",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load instructions which data sources were load missed L1 but hit FB due to preceding miss to the same cache line with data not ready",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load instructions which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Retired load instructions which data sources were HitM responses from shared L3.",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load instructions which data sources were HitM responses from shared L3",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Retired load instructions which data sources were hits in L3 without snoops required.",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions which data sources were hits in L3 without snoops required",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "EventCode": "0xD4",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MEM_LOAD_MISC_RETIRED.UC",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
- "PublicDescription": "This event counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
- "EventCode": "0x51",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L1D.REPLACEMENT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "L1D data line replacements",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand\n from the demand Hit FB, if it is allocated by hardware or software prefetch.\nNote: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L1D_PEND_MISS.PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "L1D miss outstandings duration in cycles",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times a request needed a FB entry but there was no entry available for it. That is the FB unavailability was dominant reason for blocking the request. A request includes cacheable/uncacheable demands that is load, store or SW prefetch.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts duration of L1D miss outstanding in cycles.",
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand Data Read requests sent to uncore",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts both cacheable and noncachaeble code read requests.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Cacheable and noncachaeble code read requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand and prefetch data reads",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, and so on.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Any memory transaction that reached the SQ.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.\nNote: A prefetch promoted to Demand is counted from the promotion point.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle. ",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.\nNote: Writeback pending FIFO has six entries.",
- "EventCode": "0xB2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts L2 writebacks that access L2 cache.",
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "L2_TRANS.L2_WB",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 writebacks that access L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts core-originated cacheable demand requests that miss the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
- "EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "Errata": "SKL057",
- "EventName": "LONGEST_LAT_CACHE.MISS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Core-originated cacheable demand requests missed L3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts core-originated cacheable demand requests that refer to the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
- "EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x4f",
- "Errata": "SKL057",
- "EventName": "LONGEST_LAT_CACHE.REFERENCE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Core-originated cacheable demand requests that refer to L3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of cache line split locks sent to the uncore.",
- "EventCode": "0xF4",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "SQ_MISC.SPLIT_LOCK",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of cache line split locks sent to uncore.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "EventCode": "0xB7, 0xBB",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "This event counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
+ "PublicDescription": "Counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
"UMask": "0x21",
@@ -446,122 +10,123 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "UMask": "0x22",
+ "EventName": "L2_RQSTS.RFO_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "BriefDescription": "RFO requests that miss L2 cache",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
+ "PublicDescription": "Counts L2 cache misses when fetching instructions.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xe1",
- "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "UMask": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests",
+ "BriefDescription": "L2 cache misses when fetching instructions",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
+ "PublicDescription": "Demand requests that miss L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xe2",
- "EventName": "L2_RQSTS.ALL_RFO",
+ "UMask": "0x27",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests to L2 cache",
+ "BriefDescription": "Demand requests that miss L2 cache",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the total number of L2 code requests.",
+ "PublicDescription": "Counts requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xe4",
- "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "UMask": "0x38",
+ "EventName": "L2_RQSTS.PF_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 code requests",
+ "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the total number of requests from the L2 hardware prefetchers.",
+ "PublicDescription": "All requests that miss L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xf8",
- "EventName": "L2_RQSTS.ALL_PF",
+ "UMask": "0x3f",
+ "EventName": "L2_RQSTS.MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches",
+ "BriefDescription": "All requests that miss L2 cache",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache.",
+ "PublicDescription": "Counts the number of demand Data Read requests that hit L2 cache. Only non rejected loads are counted.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x38",
- "EventName": "L2_RQSTS.PF_MISS",
+ "UMask": "0x41",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache.",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0xd8",
- "EventName": "L2_RQSTS.PF_HIT",
+ "UMask": "0x42",
+ "EventName": "L2_RQSTS.RFO_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache",
+ "BriefDescription": "RFO requests that hit L2 cache",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "RFO requests that hit L2 cache.",
+ "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x42",
- "EventName": "L2_RQSTS.RFO_HIT",
+ "UMask": "0x44",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that hit L2 cache",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "RFO requests that miss L2 cache.",
+ "PublicDescription": "Counts requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x22",
- "EventName": "L2_RQSTS.RFO_MISS",
+ "UMask": "0xd8",
+ "EventName": "L2_RQSTS.PF_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that miss L2 cache",
+ "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x44",
- "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "UMask": "0xe1",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "BriefDescription": "Demand Data Read requests",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "L2 cache misses when fetching instructions.",
+ "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "UMask": "0xe2",
+ "EventName": "L2_RQSTS.ALL_RFO",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache misses when fetching instructions",
+ "BriefDescription": "RFO requests to L2 cache",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Demand requests that miss L2 cache.",
+ "PublicDescription": "Counts the total number of L2 code requests.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x27",
- "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "UMask": "0xe4",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand requests that miss L2 cache",
+ "BriefDescription": "L2 code requests",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -575,13 +140,13 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "All requests that miss L2 cache.",
+ "PublicDescription": "Counts the total number of requests from the L2 hardware prefetchers.",
"EventCode": "0x24",
"Counter": "0,1,2,3",
- "UMask": "0x3f",
- "EventName": "L2_RQSTS.MISS",
+ "UMask": "0xf8",
+ "EventName": "L2_RQSTS.ALL_PF",
"SampleAfterValue": "200003",
- "BriefDescription": "All requests that miss L2 cache",
+ "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -595,62 +160,45 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_LINES_OUT.SILENT",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L2_LINES_OUT.NON_SILENT",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache.",
- "EventCode": "0xF2",
+ "PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all misses to the L3.",
+ "EventCode": "0x2E",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_LINES_OUT.USELESS_PREF",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
+ "UMask": "0x41",
+ "Errata": "SKL057",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Core-originated cacheable demand requests missed L3",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
- "EventCode": "0xF1",
+ "PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all accesses to the L3.",
+ "EventCode": "0x2E",
"Counter": "0,1,2,3",
- "UMask": "0x1f",
- "EventName": "L2_LINES_IN.ALL",
+ "UMask": "0x4f",
+ "Errata": "SKL057",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
"SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines filling L2",
+ "BriefDescription": "Core-originated cacheable demand requests that refer to L3",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
+ "PublicDescription": "Counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch.Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "EventCode": "0x48",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
+ "UMask": "0x1",
+ "EventName": "L1D_PEND_MISS.PENDING",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore.",
- "CounterMask": "1",
+ "BriefDescription": "L1D miss outstandings duration in cycles",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
+ "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
+ "EventCode": "0x48",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "UMask": "0x1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -666,3121 +214,483 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x60",
+ "PublicDescription": "Number of times a request needed a FB (Fill Buffer) entry but there was no entry available for it. A request includes cacheable/uncacheable demands that are load, store or SW prefetch instructions.",
+ "EventCode": "0x48",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "UMask": "0x2",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_LINES_OUT.USELESS_HWPF",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
+ "BriefDescription": "Number of times a request needed a FB entry but there was no entry available for it. That is the FB unavailability was dominant reason for blocking the request. A request includes cacheable/uncacheable demands that is load, store or SW prefetch.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0408000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000408000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400408000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200408000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100408000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080408000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040408000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc01c8000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10001c8000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04001c8000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02001c8000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests that hit in the L3 and the snoops sent to sibling cores return clean response.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01001c8000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00801c8000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00401c8000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0108000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_S & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000108000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_S & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400108000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_S & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200108000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_S & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100108000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_S & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080108000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_S & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040108000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_S & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0088000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_E & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000088000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_E & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400088000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_E & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200088000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_E & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100088000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_E & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080088000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_E & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040088000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_E & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0048000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_M & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000048000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_M & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400048000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_M & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200048000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_M & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100048000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_M & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080048000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_M & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040048000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_M & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0028000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000028000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400028000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200028000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100028000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080028000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040028000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000018000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests that have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0400800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000400800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400400800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200400800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100400800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080400800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040400800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc01c0800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10001c0800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04001c0800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts streaming stores that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02001c0800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts streaming stores that hit in the L3 and the snoops sent to sibling cores return clean response.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01001c0800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts streaming stores that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00801c0800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00401c0800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0100800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_S & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000100800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_S & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400100800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_S & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200100800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_S & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100100800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_S & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080100800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_S & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040100800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_S & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0080800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_E & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000080800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_E & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400080800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_E & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200080800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_E & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100080800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_E & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080080800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_E & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040080800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_E & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0040800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_M & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000040800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_M & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400040800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_M & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200040800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_M & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100040800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_M & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080040800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_M & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040040800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_M & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0020800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040020800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts streaming stores that have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0400100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000400100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400400100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200400100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100400100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080400100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040400100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc01c0100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10001c0100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04001c0100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02001c0100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoops sent to sibling cores return clean response.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01001c0100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00801c0100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00401c0100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0100100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_S & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000100100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_S & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400100100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_S & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200100100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_S & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100100100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_S & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080100100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_S & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040100100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_S & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0080100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_E & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000080100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_E & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400080100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_E & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200080100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_E & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100080100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_E & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080080100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_E & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040080100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_E & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0040100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_M & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000040100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_M & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400040100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_M & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200040100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_M & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100040100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_M & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080040100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_M & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040040100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_M & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0020100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040020100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0400080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000400080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400400080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200400080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100400080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080400080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040400080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc01c0080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10001c0080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04001c0080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02001c0080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoops sent to sibling cores return clean response.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01001c0080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00801c0080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00401c0080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0100080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000100080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400100080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200100080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100100080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080100080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040100080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0080080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000080080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400080080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200080080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100080080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080080080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040080080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0040080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000040080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400040080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200040080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100040080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080040080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040040080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0020080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040020080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0400004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000400004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400400004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200400004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100400004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080400004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040400004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc01c0004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10001c0004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04001c0004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02001c0004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoops sent to sibling cores return clean response.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01001c0004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00801c0004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00401c0004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0100004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000100004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400100004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200100004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100100004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080100004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040100004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0080004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000080004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400080004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200080004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100080004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080080004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040080004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0040004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000040004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400040004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200040004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100040004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080040004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040040004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0020004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040020004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0400002 ",
+ "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
+ "EventCode": "0x51",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "L1D.REPLACEMENT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "L1D data line replacements",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000400002 ",
+ "PublicDescription": "Counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.Note: A prefetch promoted to Demand is counted from the promotion point.",
+ "EventCode": "0x60",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400400002 ",
+ "PublicDescription": "Counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "EventCode": "0x60",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200400002 ",
+ "EventCode": "0x60",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100400002 ",
+ "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080400002 ",
+ "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040400002 ",
+ "PublicDescription": "Counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc01c0002 ",
+ "PublicDescription": "Counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10001c0002 ",
+ "PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04001c0002 ",
+ "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "EventCode": "0x60",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02001c0002 ",
+ "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "EventCode": "0xB0",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops sent to sibling cores return clean response.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01001c0002 ",
+ "PublicDescription": "Counts both cacheable and non-cacheable code read requests.",
+ "EventCode": "0xB0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Cacheable and noncachaeble code read requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00801c0002 ",
+ "PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
+ "EventCode": "0xB0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00401c0002 ",
+ "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "EventCode": "0xB0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Demand and prefetch data reads",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0100002 ",
+ "PublicDescription": "Counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, etc..",
+ "EventCode": "0xB0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x80",
+ "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_S & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Any memory transaction that reached the SQ.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000100002 ",
+ "PublicDescription": "Counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.Note: Writeback pending FIFO has six entries.",
+ "EventCode": "0xB2",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_S & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400100002 ",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_S & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction",
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200100002 ",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions that miss the STLB.",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x11",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_S & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Retired load instructions that miss the STLB. (Precise Event)",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100100002 ",
+ "PEBS": "1",
+ "PublicDescription": "Retired store instructions that miss the STLB.",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x12",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_S & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Retired store instructions that miss the STLB. (Precise Event)",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "L1_Hit_Indication": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080100002 ",
+ "PEBS": "1",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_S & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x21",
+ "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired load instructions with locked access. (Precise Event)",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040100002 ",
+ "PEBS": "1",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x41",
+ "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_S & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Retired load instructions that split across a cacheline boundary. (Precise Event)",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0080002 ",
+ "PEBS": "1",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x42",
+ "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_E & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Retired store instructions that split across a cacheline boundary. (Precise Event)",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "L1_Hit_Indication": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000080002 ",
+ "PEBS": "1",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_E & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x81",
+ "EventName": "MEM_INST_RETIRED.ALL_LOADS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "All retired load instructions. (Precise Event)",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400080002 ",
+ "PEBS": "1",
+ "PublicDescription": "All retired store instructions.",
+ "EventCode": "0xD0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_E & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x82",
+ "EventName": "MEM_INST_RETIRED.ALL_STORES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "All retired store instructions. (Precise Event)",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "L1_Hit_Indication": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200080002 ",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.\r\n",
+ "EventCode": "0xD1",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_E & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "MEM_LOAD_RETIRED.L1_HIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100080002 ",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions with L2 cache hits as data sources.",
+ "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_E & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080080002 ",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions with L3 cache hits as data sources.",
+ "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_E & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4",
+ "EventName": "MEM_LOAD_RETIRED.L3_HIT",
+ "SampleAfterValue": "50021",
+ "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040080002 ",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
+ "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x8",
+ "EventName": "MEM_LOAD_RETIRED.L1_MISS",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_E & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0040002 ",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions missed L2 cache as data sources.",
+ "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_M & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10",
+ "EventName": "MEM_LOAD_RETIRED.L2_MISS",
+ "SampleAfterValue": "50021",
+ "BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000040002 ",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions missed L3 cache as data sources.",
+ "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_M & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x20",
+ "EventName": "MEM_LOAD_RETIRED.L3_MISS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400040002 ",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready. \r\n",
+ "EventCode": "0xD1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_M & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x40",
+ "EventName": "MEM_LOAD_RETIRED.FB_HIT",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired load instructions which data sources were load missed L1 but hit FB due to preceding miss to the same cache line with data not ready",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200040002 ",
+ "PEBS": "1",
+ "EventCode": "0xD2",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_M & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load instructions which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100040002 ",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "EventCode": "0xD2",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_M & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080040002 ",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions which data sources were HitM responses from shared L3.",
+ "EventCode": "0xD2",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_M & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load instructions which data sources were HitM responses from shared L3",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040040002 ",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions which data sources were hits in L3 without snoops required.",
+ "EventCode": "0xD2",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x8",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_M & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Retired load instructions which data sources were hits in L3 without snoops required",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0020002 ",
+ "PEBS": "1",
+ "EventCode": "0xD4",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4",
+ "EventName": "MEM_LOAD_MISC_RETIRED.UC",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020002 ",
+ "PublicDescription": "Counts L2 writebacks that access L2 cache.",
+ "EventCode": "0xF0",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x40",
+ "EventName": "L2_TRANS.L2_WB",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020002 ",
+ "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "EventCode": "0xF1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x1f",
+ "EventName": "L2_LINES_IN.ALL",
"SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "L2 cache lines filling L2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020002 ",
+ "EventCode": "0xF2",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "L2_LINES_OUT.SILENT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020002 ",
+ "EventCode": "0xF2",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2",
+ "EventName": "L2_LINES_OUT.NON_SILENT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020002 ",
+ "PublicDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache.",
+ "EventCode": "0xF2",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4",
+ "EventName": "L2_LINES_OUT.USELESS_PREF",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040020002 ",
+ "EventCode": "0xF2",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4",
+ "EventName": "L2_LINES_OUT.USELESS_HWPF",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010002 ",
+ "PublicDescription": "Counts the number of cache line split locks sent to the uncore.",
+ "EventCode": "0xF4",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "UMask": "0x10",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Number of cache line split locks sent to uncore.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fc0400001 ",
"Counter": "0,1,2,3",
@@ -3793,6 +703,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000400001 ",
"Counter": "0,1,2,3",
@@ -3805,6 +716,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400400001 ",
"Counter": "0,1,2,3",
@@ -3817,6 +729,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200400001 ",
"Counter": "0,1,2,3",
@@ -3829,6 +742,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100400001 ",
"Counter": "0,1,2,3",
@@ -3841,6 +755,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080400001 ",
"Counter": "0,1,2,3",
@@ -3853,18 +768,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040400001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fc01c0001 ",
"Counter": "0,1,2,3",
@@ -3877,6 +781,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x10001c0001 ",
"Counter": "0,1,2,3",
@@ -3889,6 +794,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x04001c0001 ",
"Counter": "0,1,2,3",
@@ -3901,6 +807,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3 and the snoops sent to sibling cores return clean response. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x02001c0001 ",
"Counter": "0,1,2,3",
@@ -3913,6 +820,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x01001c0001 ",
"Counter": "0,1,2,3",
@@ -3925,6 +833,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00801c0001 ",
"Counter": "0,1,2,3",
@@ -3937,270 +846,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00401c0001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0100001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000100001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400100001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200100001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100100001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080100001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040100001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0080001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000080001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400080001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200080001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100080001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080080001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040080001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc0040001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000040001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400040001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200040001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100040001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080040001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040040001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fc0020001 ",
"Counter": "0,1,2,3",
@@ -4213,6 +859,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1000020001 ",
"Counter": "0,1,2,3",
@@ -4225,6 +872,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0400020001 ",
"Counter": "0,1,2,3",
@@ -4237,6 +885,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0200020001 ",
"Counter": "0,1,2,3",
@@ -4249,6 +898,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0100020001 ",
"Counter": "0,1,2,3",
@@ -4261,6 +911,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0080020001 ",
"Counter": "0,1,2,3",
@@ -4273,18 +924,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040020001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "PublicDescription": "Counts demand data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0000010001 ",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/floating-point.json b/tools/perf/pmu-events/arch/x86/skylake/floating-point.json
index 3c6b59af5d54..213dd6230cf2 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/floating-point.json
@@ -27,13 +27,12 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"EventCode": "0xC7",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ",
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -55,7 +54,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
+ "PublicDescription": "Counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
"EventCode": "0xCA",
"Counter": "0,1,2,3",
"UMask": "0x1e",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/frontend.json b/tools/perf/pmu-events/arch/x86/skylake/frontend.json
index e697dbd63e6e..578dff5bd823 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/frontend.json
@@ -1,62 +1,81 @@
[
{
- "EventCode": "0x80",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x4",
- "EventName": "ICACHE_16B.IFDATA_STALL",
+ "EventName": "IDQ.MITE_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x83",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ.",
+ "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ICACHE_64B.IFTAG_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x83",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ICACHE_64B.IFTAG_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x83",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ICACHE_64B.IFTAG_STALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "PublicDescription": "Counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_UOPS",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_UOPS",
+ "UMask": "0x18",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "CounterMask": "4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x18",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
"UMask": "0x20",
@@ -66,95 +85,99 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may 'bypass' the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "PublicDescription": "Counts the number of cycles 4 uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_CYCLES",
+ "UMask": "0x24",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterMask": "1",
+ "BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "CounterMask": "4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ.",
+ "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_CYCLES",
+ "UMask": "0x24",
+ "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "BriefDescription": "Cycles MITE is delivering any Uop",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_CYCLES",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_CYCLES",
+ "UMask": "0x30",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_SWITCHES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
"EventCode": "0x79",
"Counter": "0,1,2,3",
- "UMask": "0x18",
- "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
- "CounterMask": "4",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
- "EventCode": "0x79",
+ "PublicDescription": "Cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity.",
+ "EventCode": "0x80",
"Counter": "0,1,2,3",
- "UMask": "0x18",
- "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "UMask": "0x4",
+ "EventName": "ICACHE_16B.IFDATA_STALL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
- "CounterMask": "1",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
- "EventCode": "0x79",
+ "EventCode": "0x83",
"Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering 4 Uops",
- "CounterMask": "4",
+ "UMask": "0x1",
+ "EventName": "ICACHE_64B.IFTAG_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
- "EventCode": "0x79",
+ "EventCode": "0x83",
"Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering any Uop",
- "CounterMask": "1",
+ "UMask": "0x2",
+ "EventName": "ICACHE_64B.IFTAG_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x83",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ICACHE_64B.IFTAG_STALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding ?4 ? x? when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread\n\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions)\n \n c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.",
"EventCode": "0x9C",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -164,7 +187,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
+ "PublicDescription": "Counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
"EventCode": "0x9C",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -175,7 +198,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >=3.",
+ "PublicDescription": "Counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >= 3.",
"EventCode": "0x9C",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -186,6 +209,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles with less than 2 uops delivered by the front-end.",
"EventCode": "0x9C",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -196,6 +220,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles with less than 3 uops delivered by the front-end.",
"EventCode": "0x9C",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -217,7 +242,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0?2 cycles.",
+ "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
"EventCode": "0xAB",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -228,6 +253,7 @@
},
{
"PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. \r\n",
"EventCode": "0xC6",
"MSRValue": "0x11",
"Counter": "0,1,2,3",
@@ -235,7 +261,7 @@
"EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced decode stream buffer (DSB - the decoded instruction-cache) miss.",
+ "BriefDescription": "Retired Instructions who experienced decode stream buffer (DSB - the decoded instruction-cache) miss. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -248,7 +274,7 @@
"EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -261,12 +287,13 @@
"EventName": "FRONTEND_RETIRED.L2_MISS",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
"EventCode": "0xC6",
"MSRValue": "0x14",
"Counter": "0,1,2,3",
@@ -274,12 +301,13 @@
"EventName": "FRONTEND_RETIRED.ITLB_MISS",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "BriefDescription": "Retired Instructions who experienced iTLB true miss. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
"EventCode": "0xC6",
"MSRValue": "0x15",
"Counter": "0,1,2,3",
@@ -287,7 +315,7 @@
"EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -300,7 +328,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -313,7 +341,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_2",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -326,34 +354,13 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_SWITCHES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may 'bypass' the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops. \r\n",
"EventCode": "0xC6",
"MSRValue": "0x400806",
"Counter": "0,1,2,3",
@@ -367,6 +374,7 @@
},
{
"PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.\r\n",
"EventCode": "0xC6",
"MSRValue": "0x401006",
"Counter": "0,1,2,3",
@@ -374,12 +382,13 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.\r\n",
"EventCode": "0xC6",
"MSRValue": "0x402006",
"Counter": "0,1,2,3",
@@ -387,7 +396,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -400,7 +409,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -413,7 +422,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -426,7 +435,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -439,12 +448,13 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
{
"PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.\r\n",
"EventCode": "0xC6",
"MSRValue": "0x100206",
"Counter": "0,1,2,3",
@@ -452,7 +462,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
},
@@ -465,7 +475,7 @@
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_3",
"MSRIndex": "0x3F7",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"TakenAlone": "1",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/skylake/memory.json b/tools/perf/pmu-events/arch/x86/skylake/memory.json
index d7fd5b06825b..3bd8b712c889 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/memory.json
@@ -1,6 +1,74 @@
[
{
- "PublicDescription": "Unfriendly TSX abort triggered by a flowmarker.",
+ "PublicDescription": "Number of times a TSX line had a cache conflict.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "TX_MEM.ABORT_CONFLICT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "TX_MEM.ABORT_CAPACITY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data capacity limitation for transactional reads or writes.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times we could not allocate Lock Buffer.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0x5d",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -10,7 +78,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
+ "PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
"EventCode": "0x5d",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -50,7 +118,77 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of times we entered an HLE region\n does not count nested transactions.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts number of Offcore outstanding Demand Data Read requests that miss L3 cache in the superQ every cycle.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with at least 6 Demand Data Read requests that miss L3 cache in the superQ.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while L3 cache miss demand load is outstanding.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x6",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Demand Data Read requests who miss L3 cache.",
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand Data Read requests who miss L3 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following:a. memory disambiguation,b. external snoop, orc. cross SMT-HW-thread snoop (stores) hitting load buffer.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "SKL089",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times we entered an HLE region. Does not count nested transactions.",
"EventCode": "0xC8",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -71,7 +209,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "Number of times HLE abort was triggered.",
+ "PublicDescription": "Number of times HLE abort was triggered. (PEBS)",
"EventCode": "0xC8",
"Counter": "0,1,2,3",
"UMask": "0x4",
@@ -99,13 +237,12 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
"EventCode": "0xC8",
"Counter": "0,1,2,3",
"UMask": "0x20",
"EventName": "HLE_RETIRED.ABORTED_UNFRIENDLY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.). ",
+ "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -128,7 +265,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of times we entered an RTM region\n does not count nested transactions.",
+ "PublicDescription": "Number of times we entered an RTM region. Does not count nested transactions.",
"EventCode": "0xC9",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -149,7 +286,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "Number of times RTM abort was triggered.",
+ "PublicDescription": "Number of times RTM abort was triggered. (PEBS)",
"EventCode": "0xC9",
"Counter": "0,1,2,3",
"UMask": "0x4",
@@ -208,17 +345,6 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following:\n1. memory disambiguation,\n2. external snoop, or\n3. cross SMT-HW-thread snoop (stores) hitting load buffer.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "SKL089",
- "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"PEBS": "2",
"PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
"EventCode": "0xCD",
@@ -331,1718 +457,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "PublicDescription": "Number of times a TSX line had a cache conflict.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "TX_MEM.ABORT_CONFLICT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "TX_MEM.ABORT_CAPACITY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times a transactional abort was signaled due to a data capacity limitation for transactional reads or writes.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times we could not allocate Lock Buffer.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Demand Data Read requests who miss L3 cache.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand Data Read requests who miss L3 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts number of Offcore outstanding Demand Data Read requests that miss L3 cache in the superQ every cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L3 cache miss demand load is outstanding.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x6",
- "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 6 Demand Data Read requests that miss L3 cache in the superQ.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3ffc008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x203c008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x103c008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x007c008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc4008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0044008000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000408000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20001c8000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000108000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_S & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000088000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_E & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000048000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & L3_HIT_M & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000028000 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3ffc000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x203c000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x103c000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x007c000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc4000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0044000800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000400800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20001c0800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000100800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_S & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000080800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_E & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000040800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & L3_HIT_M & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020800 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3ffc000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x203c000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x103c000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x007c000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc4000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0044000100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000400100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20001c0100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000100100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_S & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000080100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_E & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000040100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & L3_HIT_M & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020100 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3ffc000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x203c000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x103c000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x007c000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc4000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0044000080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000400080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20001c0080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000100080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000080080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000040080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020080 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3ffc000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x203c000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x103c000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x007c000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc4000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0044000004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000400004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20001c0004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000100004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000080004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000040004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020004 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3ffc000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x203c000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x103c000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043c000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023c000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013c000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00bc000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x007c000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fc4000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0044000002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000400002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20001c0002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000100002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_S & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000080002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_E & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000040002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & L3_HIT_M & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020002 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3ffc000001 ",
"Counter": "0,1,2,3",
@@ -2055,18 +470,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x203c000001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x103c000001 ",
"Counter": "0,1,2,3",
@@ -2079,6 +483,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x043c000001 ",
"Counter": "0,1,2,3",
@@ -2091,6 +496,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x023c000001 ",
"Counter": "0,1,2,3",
@@ -2103,6 +509,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x013c000001 ",
"Counter": "0,1,2,3",
@@ -2115,6 +522,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x00bc000001 ",
"Counter": "0,1,2,3",
@@ -2127,18 +535,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x007c000001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x3fc4000001 ",
"Counter": "0,1,2,3",
@@ -2151,18 +548,7 @@
"CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x1004000001 ",
"Counter": "0,1,2,3",
@@ -2175,6 +561,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0404000001 ",
"Counter": "0,1,2,3",
@@ -2187,6 +574,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0204000001 ",
"Counter": "0,1,2,3",
@@ -2199,6 +587,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0104000001 ",
"Counter": "0,1,2,3",
@@ -2211,6 +600,7 @@
"CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"EventCode": "0xB7, 0xBB",
"MSRValue": "0x0084000001 ",
"Counter": "0,1,2,3",
@@ -2221,89 +611,5 @@
"BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
"Offcore": "1",
"CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0044000001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SPL_HIT",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SPL_HIT",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000400001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20001c0001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000100001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000080001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000040001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020001 ",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/skylake/other.json b/tools/perf/pmu-events/arch/x86/skylake/other.json
index cfdc323acc82..84a316d380ac 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/other.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/other.json
@@ -1,11 +1,47 @@
[
{
- "PublicDescription": "This event counts the number of hardware interruptions received by the processor.",
+ "EventCode": "0x32",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SW_PREFETCH_ACCESS.NTA",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x32",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "SW_PREFETCH_ACCESS.T0",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x32",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "SW_PREFETCH_ACCESS.T1_T2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x32",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of PREFETCHW instructions executed.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of hardware interruptions received by the processor.",
"EventCode": "0xCB",
"Counter": "0,1,2,3",
"UMask": "0x1",
"EventName": "HW_INTERRUPTS.RECEIVED",
- "SampleAfterValue": "100003",
+ "SampleAfterValue": "203",
"BriefDescription": "Number of hardware interrupts received by the processor.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
}
diff --git a/tools/perf/pmu-events/arch/x86/skylake/pipeline.json b/tools/perf/pmu-events/arch/x86/skylake/pipeline.json
index 0f7adb809be3..bc6d2afbcd8a 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/pipeline.json
@@ -1,80 +1,92 @@
[
{
- "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. \nNotes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. \nCounting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
+ "PublicDescription": "Counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, Counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. Counting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"EventCode": "0x00",
- "Counter": "Fixed counter 1",
+ "Counter": "Fixed counter 0",
"UMask": "0x1",
"EventName": "INST_RETIRED.ANY",
"SampleAfterValue": "2000003",
"BriefDescription": "Instructions retired from execution.",
- "CounterHTOff": "Fixed counter 1"
+ "CounterHTOff": "Fixed counter 0"
},
{
- "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
+ "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"EventCode": "0x00",
- "Counter": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
"UMask": "0x2",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
"BriefDescription": "Core cycles when the thread is not in halt state",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "CounterHTOff": "Fixed counter 1"
},
{
- "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"EventCode": "0x00",
- "Counter": "Fixed counter 3",
+ "Counter": "Fixed counter 2",
"UMask": "0x3",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"SampleAfterValue": "2000003",
"BriefDescription": "Reference cycles when the core is not in halt state.",
- "CounterHTOff": "Fixed counter 3"
+ "CounterHTOff": "Fixed counter 2"
},
{
- "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
- "EventCode": "0x3C",
+ "PublicDescription": "Counts how many times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when:a. preceding store conflicts with the load (incomplete overlap),b. store forwarding is impossible due to u-arch limitations,c. preceding lock RMW operations are not forwarded,d. store has the no-forward bit set (uncacheable/page-split/masked stores),e. all-blocking stores are used (mostly, fences and port I/O), and others.The most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events. See the table of not supported store forwards in the Optimization Guide.",
+ "EventCode": "0x03",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Thread cycles when thread is not in halt state",
+ "UMask": "0x2",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Loads blocked by overlapping with store buffer that cannot be forwarded .",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xE6",
+ "PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "EventCode": "0x03",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BACLEARS.ANY",
+ "UMask": "0x8",
+ "EventName": "LD_BLOCKS.NO_SR",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA8",
+ "PublicDescription": "Counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
+ "EventCode": "0x07",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "LSD.UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of Uops delivered by the LSD.",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "False dependencies in MOB due to partial compare on address.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
- "EventCode": "0x87",
+ "PublicDescription": "Core cycles the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
+ "EventCode": "0x0D",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "ILD_STALL.LCP",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
"EventCode": "0x0D",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "AnyThread": "1",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -87,33 +99,35 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts resource-related stall cycles. Reasons for stalls can be as follows:\n - *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots)\n - *any* u-arch structure got empty (like INT/SIMD FreeLists)\n - FPU control word (FPCW), MXCSR\nand others. This counts cycles that the pipeline backend blocked uop delivery from the front end.",
- "EventCode": "0xA2",
+ "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
+ "EventCode": "0x0E",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "RESOURCE_STALLS.ANY",
+ "EventName": "UOPS_ISSUED.ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Resource-related stall cycles",
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts stall cycles caused by the store buffer (SB) overflow (excluding draining from synch). This counts cycles that the pipeline backend blocked uop delivery from the front end.",
- "EventCode": "0xA2",
+ "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "EventCode": "0x0E",
+ "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "RESOURCE_STALLS.SB",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS).",
+ "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to Mixing Intel AVX and Intel SSE Code section of the Optimization Guide.",
"EventCode": "0x0E",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_ISSUED.ANY",
+ "UMask": "0x2",
+ "EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
@@ -126,361 +140,318 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
- "EventCode": "0x0E",
- "Invert": "1",
+ "EventCode": "0x14",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "EventName": "ARITH.DIVIDER_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which the reservation station (RS) is empty for the thread.\nNote: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
- "EventCode": "0x5E",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "BriefDescription": "Thread cycles when thread is not in halt state",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x5E",
- "Invert": "1",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "RS_EVENTS.EMPTY_END",
+ "UMask": "0x0",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "CounterMask": "1",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
- "EventCode": "0xCC",
+ "PublicDescription": "Counts when the Current Privilege Level (CPL) transitions from ring 1, 2 or 3 to ring 0 (Kernel).",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "UMask": "0x0",
+ "EdgeDetect": "1",
+ "EventName": "CPU_CLK_UNHALTED.RING0_TRANS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts when there is a transition from ring 1, 2 or 3 to ring 0.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of machine clears (nukes) of any type.",
- "EventCode": "0xC3",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "MACHINE_CLEARS.COUNT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of machine clears (nukes) of any type. ",
- "CounterMask": "1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2503",
+ "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts self-modifying code (SMC) detected, which causes a machine clear.",
- "EventCode": "0xC3",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MACHINE_CLEARS.SMC",
- "SampleAfterValue": "100003",
- "BriefDescription": "Self-modifying code (SMC) detected.",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2503",
+ "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
- "EventCode": "0xC0",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "Errata": "SKL091, SKL044",
- "EventName": "INST_RETIRED.ANY_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2503",
+ "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "2",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts instructions retired.",
- "EventCode": "0xC0",
- "Counter": "1",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
"UMask": "0x1",
- "Errata": "SKL091, SKL044",
- "EventName": "INST_RETIRED.PREC_DIST",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
- "CounterHTOff": "1"
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2503",
+ "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of retirement slots used.",
- "EventCode": "0xC2",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Retirement slots used.",
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.",
- "EventCode": "0xC2",
- "Invert": "1",
+ "EventCode": "0x3C",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.STALL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles without actually retired uops.",
- "CounterMask": "1",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2503",
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
- "EventCode": "0xC2",
- "Invert": "1",
+ "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
+ "EventCode": "0x4C",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with less than 10 actually retired uops.",
- "CounterMask": "10",
+ "EventName": "LOAD_HIT_PRE.SW_PF",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts conditional branch instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for the thread.; Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
+ "EventCode": "0x5E",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Conditional branch instructions retired.",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts both direct and indirect near call instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate front-end Latency Bound issues.",
+ "EventCode": "0x5E",
+ "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.NEAR_CALL",
- "SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect near call instructions retired.",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts all (macro) branch instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
+ "EventCode": "0x87",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired.",
+ "UMask": "0x1",
+ "EventName": "ILD_STALL.LCP",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts return instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 0.",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "SampleAfterValue": "100007",
- "BriefDescription": "Return instructions retired.",
+ "UMask": "0x1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts not taken branch instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 1.",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.NOT_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Not taken branch instructions retired.",
+ "UMask": "0x2",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts taken branch instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 2.",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Taken branch instructions retired.",
+ "UMask": "0x4",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts far branch instructions retired.",
- "EventCode": "0xC4",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 3.",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "SampleAfterValue": "100007",
- "BriefDescription": "Far branch instructions retired.",
+ "UMask": "0x8",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "2",
- "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired. ",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PEBS": "1",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted conditional branch instructions retired.",
- "EventCode": "0xC5",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 4.",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "UMask": "0x10",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts both taken and not taken retired mispredicted direct and indirect near calls, including both register and memory indirect.",
- "EventCode": "0xC5",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 5.",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "BR_MISP_RETIRED.NEAR_CALL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted direct and indirect near call instructions retired.",
+ "UMask": "0x20",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts all mispredicted macro branch instructions retired.",
- "EventCode": "0xC5",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 6.",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "UMask": "0x40",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "1",
- "EventCode": "0xC5",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 7.",
+ "EventCode": "0xA1",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "UMask": "0x80",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PEBS": "2",
- "PublicDescription": "This is a precise version of BR_MISP_RETIRED.ALL_BRANCHES that counts all mispredicted macro branch instructions retired.",
- "EventCode": "0xC5",
+ "PublicDescription": "Counts resource-related stall cycles. Reasons for stalls can be as follows:a. *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots).b. *any* u-arch structure got empty (like INT/SIMD FreeLists).c. FPU control word (FPCW), MXCSR.and others. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
+ "EventCode": "0xA2",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted macro branch instructions retired. ",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Resource-related stall cycles",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of uops to be executed per-thread each cycle.",
- "EventCode": "0xB1",
+ "PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
+ "EventCode": "0xA2",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.THREAD",
+ "UMask": "0x8",
+ "EventName": "RESOURCE_STALLS.SB",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Number of uops executed from any thread.",
- "EventCode": "0xB1",
+ "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE",
+ "UMask": "0x1",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on the core.",
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
+ "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "UOPS_EXECUTED.X87",
+ "UMask": "0x4",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of x87 uops dispatched.",
+ "BriefDescription": "Total execution stalls.",
+ "CounterMask": "4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
- "EventCode": "0xB1",
- "Invert": "1",
+ "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "UMask": "0x5",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
- "CounterMask": "1",
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "CounterMask": "5",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
- "EventCode": "0xB1",
+ "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
- "CounterMask": "1",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
- "EventCode": "0xB1",
+ "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "UMask": "0xc",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
- "CounterMask": "2",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
- "EventCode": "0xB1",
+ "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "UMask": "0x10",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
- "CounterMask": "3",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "16",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
- "EventCode": "0xB1",
+ "EventCode": "0xA3",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "UMask": "0x14",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "20",
+ "CounterHTOff": "0,1,2,3"
},
{
+ "PublicDescription": "Counts cycles during which no uops were executed on all ports and Reservation Station (RS) was not empty.",
"EventCode": "0xA6",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -490,6 +461,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
"EventCode": "0xA6",
"Counter": "0,1,2,3",
"UMask": "0x2",
@@ -499,6 +471,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
"EventCode": "0xA6",
"Counter": "0,1,2,3",
"UMask": "0x4",
@@ -508,6 +481,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
"EventCode": "0xA6",
"Counter": "0,1,2,3",
"UMask": "0x8",
@@ -517,6 +491,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
"EventCode": "0xA6",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -535,212 +510,196 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
- "EventCode": "0xA1",
+ "PublicDescription": "Number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
+ "EventCode": "0xA8",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "EventName": "LSD.UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "BriefDescription": "Number of Uops delivered by the LSD.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
- "EventCode": "0xA1",
+ "PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).",
+ "EventCode": "0xA8",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
- "EventCode": "0xA1",
+ "PublicDescription": "Counts the cycles when 4 uops are delivered by the LSD (Loop-stream detector).",
+ "EventCode": "0xA8",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_4_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
- "EventCode": "0xA1",
+ "PublicDescription": "Number of uops to be executed per-thread each cycle.",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.THREAD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
- "EventCode": "0xA1",
+ "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "EventCode": "0xB1",
+ "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
- "EventCode": "0xA1",
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
- "EventCode": "0xA1",
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CounterMask": "2",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
- "EventCode": "0xA1",
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CounterMask": "3",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
"SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls.",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
"CounterMask": "4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
+ "PublicDescription": "Number of uops executed from any thread.",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "CounterMask": "8",
+ "BriefDescription": "Number of uops executed on the core.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "CounterMask": "12",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by asm inspection of the nearby instructions.",
- "EventCode": "0x4C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LOAD_HIT_PRE.SW_PF",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts how many times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when:\n - preceding store conflicts with the load (incomplete overlap)\n\n - store forwarding is impossible due to u-arch limitations\n\n - preceding lock RMW operations are not forwarded\n\n - store has the no-forward bit set (uncacheable/page-split/masked stores)\n\n - all-blocking stores are used (mostly, fences and port I/O)\n\nand others.\nThe most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events.\nSee the table of not supported store forwards in the Optimization Guide.",
- "EventCode": "0x03",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
"UMask": "0x2",
- "EventName": "LD_BLOCKS.STORE_FORWARD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Loads blocked by overlapping with store buffer that cannot be forwarded .",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "LD_BLOCKS.NO_SR",
- "SampleAfterValue": "100003",
- "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CounterMask": "2",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
- "EventCode": "0x07",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
- "SampleAfterValue": "100003",
- "BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
- "CounterMask": "1",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
+ "EventCode": "0xB1",
+ "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x5",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
- "CounterMask": "5",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
+ "PublicDescription": "Counts the number of x87 uops executed.",
+ "EventCode": "0xB1",
"Counter": "0,1,2,3",
"UMask": "0x10",
- "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "EventName": "UOPS_EXECUTED.X87",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "CounterMask": "16",
+ "BriefDescription": "Counts the number of x87 uops dispatched.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA3",
+ "PublicDescription": "Counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
+ "EventCode": "0xC0",
"Counter": "0,1,2,3",
- "UMask": "0x14",
- "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "UMask": "0x0",
+ "Errata": "SKL091, SKL044",
+ "EventName": "INST_RETIRED.ANY_P",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "CounterMask": "20",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2503",
- "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
+ "PEBS": "2",
+ "PublicDescription": "A version of INST_RETIRED that allows for a more unbiased distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR) feature to mitigate some bias in how retired instructions get sampled.",
+ "EventCode": "0xC0",
+ "Counter": "1",
+ "UMask": "0x1",
+ "Errata": "SKL091, SKL044",
+ "EventName": "INST_RETIRED.PREC_DIST",
"SampleAfterValue": "2000003",
- "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "CounterHTOff": "1"
},
{
"PEBS": "2",
@@ -757,183 +716,235 @@
"CounterHTOff": "0,2,3"
},
{
- "EventCode": "0x14",
+ "EventCode": "0xC1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ARITH.DIVIDER_ACTIVE",
+ "UMask": "0x3f",
+ "EventName": "OTHER_ASSISTS.ANY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of times a microcode assist is invoked by HW other than FP-assist. Examples include AD (page Access Dirty) and AVX* related assists.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the retirement slots used.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
- "CounterMask": "1",
+ "BriefDescription": "Retirement slots used.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA8",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.",
+ "EventCode": "0xC2",
+ "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_ACTIVE",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "BriefDescription": "Cycles without actually retired uops.",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xA8",
+ "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
+ "EventCode": "0xC2",
+ "Invert": "1",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_4_UOPS",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "4",
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CounterMask": "10",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xC1",
+ "EventCode": "0xC3",
"Counter": "0,1,2,3",
- "UMask": "0x3f",
- "EventName": "OTHER_ASSISTS.ANY",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "MACHINE_CLEARS.COUNT",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of times a microcode assist is invoked by HW other than FP-assist. Examples include AD (page Access Dirty) and AVX* related assists.",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register.\r\nFor more information, refer to ?Mixing Intel AVX and Intel SSE Code? section of the Optimization Guide.",
- "EventCode": "0x0E",
+ "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
+ "EventCode": "0xC3",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
+ "UMask": "0x4",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Self-modifying code (SMC) detected.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x00",
- "Counter": "Fixed counter 2",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "EventCode": "0x3C",
+ "PublicDescription": "Counts all (macro) branch instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x0",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x3C",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts conditional branch instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2503",
- "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Conditional branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x0D",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect near call instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "UMask": "0x2",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Direct and indirect near call instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "CounterHTOff": "0,1,2,3"
},
{
- "EventCode": "0xB1",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts return instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "CounterMask": "2",
+ "UMask": "0x8",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Return instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts not taken branch instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "CounterMask": "3",
+ "UMask": "0x10",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Not taken branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts taken branch instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "CounterMask": "4",
+ "UMask": "0x20",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Taken branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0xB1",
- "Invert": "1",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts far branch instructions retired.",
+ "EventCode": "0xC4",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "CounterMask": "1",
+ "UMask": "0x40",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of far branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts when the Current Privilege Level (CPL) transitions from ring 1, 2 or 3 to ring 0 (Kernel).",
- "EventCode": "0x3C",
+ "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0x0",
- "EdgeDetect": "1",
- "EventName": "CPU_CLK_UNHALTED.RING0_TRANS",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts when there is a transition from ring 1, 2 or 3 to ring 0.",
- "CounterMask": "1",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x3C",
+ "PEBS": "1",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted conditional branch instructions retired.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
"UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2503",
- "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x3C",
+ "PEBS": "1",
+ "PublicDescription": "This event counts both taken and not taken retired mispredicted direct and indirect near calls, including both register and memory indirect.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2503",
- "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
+ "UMask": "0x2",
+ "EventName": "BR_MISP_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted direct and indirect near call instructions retired.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x3C",
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version of BR_MISP_RETIRED.ALL_BRANCHES that counts all mispredicted macro branch instructions retired.",
+ "EventCode": "0xC5",
"Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2503",
- "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "UMask": "0x4",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted macro branch instructions retired.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken. ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
+ "EventCode": "0xCC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
+ "EventCode": "0xE6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BACLEARS.ANY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json
index 02f32cbf6789..2bcba7daca14 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json
@@ -1,83 +1,6 @@
[
{
- "PublicDescription": "This event counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
- "EventCode": "0xAE",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ITLB.ITLB_FLUSH",
- "SampleAfterValue": "100007",
- "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x4F",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "EPT.WALK_PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a EPT (Extended Page Table) walk for any request type.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
- "SampleAfterValue": "100003",
- "BriefDescription": "Misses at all ITLB levels that cause page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
- "SampleAfterValue": "100003",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
- "SampleAfterValue": "100003",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
- "SampleAfterValue": "100003",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (1G)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "ITLB_MISSES.WALK_PENDING",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake. ",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "ITLB_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts load misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
+ "PublicDescription": "Counts demand data loads that caused a page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels, but the walk need not have completed.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -87,45 +10,68 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
"SampleAfterValue": "2000003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (4K).",
+ "BriefDescription": "Page walk completed due to a demand data load to a 4K page",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
"SampleAfterValue": "2000003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (2M/4M).",
+ "BriefDescription": "Page walk completed due to a demand data load to a 2M/4M page",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
"SampleAfterValue": "2000003",
- "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (1G)",
+ "BriefDescription": "Page walk completed due to a demand data load to a 1G page",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts demand data loads that caused a completed page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake microarchitecture.",
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x10",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake. ",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a load.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
"EventCode": "0x08",
"Counter": "0,1,2,3",
"UMask": "0x20",
@@ -135,7 +81,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
+ "PublicDescription": "Counts demand data stores that caused a page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels, but the walk need not have completed.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x1",
@@ -145,45 +91,68 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x2",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
"SampleAfterValue": "100003",
- "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (4K)",
+ "BriefDescription": "Page walk completed due to a demand data store to a 4K page",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x4",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (2M/4M)",
+ "BriefDescription": "Page walk completed due to a demand data store to a 2M/4M page",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 1G pages. The page walks can end with or without a page fault.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x8",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (1G)",
+ "BriefDescription": "Page walk completed due to a demand data store to a 1G page",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts demand data stores that caused a completed page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake microarchitecture.",
"EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x10",
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake. ",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
+ "CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
"EventCode": "0x49",
"Counter": "0,1,2,3",
"UMask": "0x20",
@@ -193,73 +162,77 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of DTLB flush attempts of the thread-specific entries.",
- "EventCode": "0xBD",
+ "PublicDescription": "Counts cycles for each PMH (Page Miss Handler) that is busy with an EPT (Extended Page Table) walk for any request type.",
+ "EventCode": "0x4F",
"Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "TLB_FLUSH.DTLB_THREAD",
- "SampleAfterValue": "100007",
- "BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "UMask": "0x10",
+ "EventName": "EPT.WALK_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a EPT (Extended Page Table) walk for any request type.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, and so on).",
- "EventCode": "0xBD",
+ "PublicDescription": "Counts page walks of any page size (4K/2M/4M/1G) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB, but the walk need not have completed.",
+ "EventCode": "0x85",
"Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "TLB_FLUSH.STLB_ANY",
- "SampleAfterValue": "100007",
- "BriefDescription": "STLB flush attempts",
+ "UMask": "0x1",
+ "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Misses at all ITLB levels that cause page walks",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Counts completed page walks (4K page size) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB. The page walk can end with or without a fault.",
"EventCode": "0x85",
"Counter": "0,1,2,3",
- "UMask": "0xe",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "UMask": "0x2",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
"SampleAfterValue": "100003",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x08",
+ "PublicDescription": "Counts code misses in all ITLB levels that caused a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "EventCode": "0x85",
"Counter": "0,1,2,3",
- "UMask": "0xe",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "UMask": "0x4",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
"SampleAfterValue": "100003",
- "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x49",
+ "PublicDescription": "Counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "EventCode": "0x85",
"Counter": "0,1,2,3",
- "UMask": "0xe",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "UMask": "0x8",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (1G)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x49",
+ "PublicDescription": "Counts completed page walks (2M and 4M page sizes) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB. The page walk can end with or without a fault.",
+ "EventCode": "0x85",
"Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
+ "UMask": "0xe",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
- "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store. EPT page walk duration are excluded in Skylake. ",
- "CounterMask": "1",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "EventCode": "0x08",
+ "PublicDescription": "Counts 1 per cycle for each PMH (Page Miss Handler) that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake michroarchitecture.",
+ "EventCode": "0x85",
"Counter": "0,1,2,3",
"UMask": "0x10",
- "EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
+ "EventName": "ITLB_MISSES.WALK_PENDING",
"SampleAfterValue": "100003",
- "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a load. EPT page walk duration are excluded in Skylake. ",
- "CounterMask": "1",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake.",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "PublicDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake microarchitecture.",
"EventCode": "0x85",
"Counter": "0,1,2,3",
"UMask": "0x10",
@@ -268,5 +241,44 @@
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake.",
"CounterMask": "1",
"CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
+ "EventCode": "0xAE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ITLB.ITLB_FLUSH",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of DTLB flush attempts of the thread-specific entries.",
+ "EventCode": "0xBD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "TLB_FLUSH.DTLB_THREAD",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, etc.).",
+ "EventCode": "0xBD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "TLB_FLUSH.STLB_ANY",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "STLB flush attempts",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
}
] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/cache.json b/tools/perf/pmu-events/arch/x86/skylakex/cache.json
index b5bc742b6fbc..5c9940866acd 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/cache.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/cache.json
@@ -265,7 +265,7 @@
{
"EventCode": "0x60",
"UMask": "0x2",
- "BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle. ",
+ "BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle.",
"Counter": "0,1,2,3",
"EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
"PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
@@ -398,22 +398,24 @@
{
"EventCode": "0xD0",
"UMask": "0x11",
- "BriefDescription": "Retired load instructions that miss the STLB.",
+ "BriefDescription": "Retired load instructions that miss the STLB. (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
+ "PublicDescription": "Retired load instructions that miss the STLB.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x12",
- "BriefDescription": "Retired store instructions that miss the STLB.",
+ "BriefDescription": "Retired store instructions that miss the STLB. (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
+ "PublicDescription": "Retired store instructions that miss the STLB.",
"SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -421,7 +423,7 @@
{
"EventCode": "0xD0",
"UMask": "0x21",
- "BriefDescription": "Retired load instructions with locked access.",
+ "BriefDescription": "Retired load instructions with locked access. (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -432,24 +434,22 @@
{
"EventCode": "0xD0",
"UMask": "0x41",
- "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "BriefDescription": "Retired load instructions that split across a cacheline boundary. (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
- "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
{
"EventCode": "0xD0",
"UMask": "0x42",
- "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "BriefDescription": "Retired store instructions that split across a cacheline boundary. (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_INST_RETIRED.SPLIT_STORES",
- "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
"SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -457,7 +457,7 @@
{
"EventCode": "0xD0",
"UMask": "0x81",
- "BriefDescription": "All retired load instructions.",
+ "BriefDescription": "All retired load instructions. (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
@@ -468,11 +468,12 @@
{
"EventCode": "0xD0",
"UMask": "0x82",
- "BriefDescription": "All retired store instructions.",
+ "BriefDescription": "All retired store instructions. (Precise Event)",
"Data_LA": "1",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_INST_RETIRED.ALL_STORES",
+ "PublicDescription": "All retired store instructions.",
"SampleAfterValue": "2000003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
@@ -485,7 +486,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_RETIRED.L1_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.\r\n",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3"
},
@@ -509,7 +510,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_RETIRED.L3_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache. ",
+ "PublicDescription": "Retired load instructions with L3 cache hits as data sources.",
"SampleAfterValue": "50021",
"CounterHTOff": "0,1,2,3"
},
@@ -545,7 +546,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_RETIRED.L3_MISS",
- "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache. ",
+ "PublicDescription": "Retired load instructions missed L3 cache as data sources.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
@@ -557,7 +558,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_RETIRED.FB_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready. ",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready. \r\n",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
@@ -616,7 +617,6 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
- "PublicDescription": "Retired load instructions which data sources missed L3 but serviced from local DRAM.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
@@ -639,7 +639,6 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
- "PublicDescription": "Retired load instructions whose data sources was remote HITM.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
@@ -648,9 +647,9 @@
"UMask": "0x8",
"BriefDescription": "Retired load instructions whose data sources was forwarded from a remote cache",
"Data_LA": "1",
+ "PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD",
- "PublicDescription": "Retired load instructions whose data sources was forwarded from a remote cache.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
},
@@ -697,7 +696,7 @@
{
"EventCode": "0xF2",
"UMask": "0x2",
- "BriefDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped ",
+ "BriefDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped",
"Counter": "0,1,2,3",
"EventName": "L2_LINES_OUT.NON_SILENT",
"PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped.",
@@ -742,7 +741,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -755,7 +754,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -768,7 +767,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -781,7 +780,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -794,7 +793,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -807,7 +806,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts demand data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -820,7 +819,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -833,7 +832,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -846,7 +845,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -859,7 +858,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -872,7 +871,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -885,7 +884,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand data writes (RFOs) that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -898,7 +897,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -911,7 +910,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -924,7 +923,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -937,7 +936,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -950,7 +949,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -963,7 +962,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand code reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -976,7 +975,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -989,7 +988,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1002,7 +1001,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1015,7 +1014,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1028,7 +1027,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1041,7 +1040,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1054,7 +1053,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1067,7 +1066,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1080,7 +1079,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1093,7 +1092,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1106,7 +1105,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1119,7 +1118,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1132,7 +1131,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1145,7 +1144,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1158,7 +1157,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1171,7 +1170,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1184,7 +1183,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1197,7 +1196,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1210,7 +1209,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1223,7 +1222,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1236,7 +1235,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1249,7 +1248,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1262,7 +1261,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1275,7 +1274,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1288,7 +1287,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1301,7 +1300,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1314,7 +1313,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1327,7 +1326,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1340,7 +1339,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1353,7 +1352,85 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that have any response type.",
+ "MSRValue": "0x0000018000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "MSRValue": "0x01003c8000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "MSRValue": "0x04003c8000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "OTHER & L3_HIT & SNOOP_HIT_WITH_FWD",
+ "MSRValue": "0x08003c8000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "MSRValue": "0x10003c8000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that hit in the L3.",
+ "MSRValue": "0x3f803c8000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1366,7 +1443,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1379,7 +1456,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1392,7 +1469,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1405,7 +1482,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1418,7 +1495,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1431,7 +1508,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all prefetch data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1444,7 +1521,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1457,7 +1534,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1470,7 +1547,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1483,7 +1560,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1496,7 +1573,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1509,7 +1586,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts prefetch RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1522,7 +1599,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1535,7 +1612,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1548,7 +1625,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1561,7 +1638,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1574,7 +1651,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1587,7 +1664,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch data reads that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1600,7 +1677,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that have any response type.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs that have any response type. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1613,7 +1690,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1626,7 +1703,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1639,7 +1716,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "tbd; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "tbd Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1652,7 +1729,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1665,7 +1742,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3.; Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "PublicDescription": "Counts all demand & prefetch RFOs that hit in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
}
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json b/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json
index 1c09a328df36..286ed1a37ec9 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json
@@ -29,10 +29,9 @@
{
"EventCode": "0xC7",
"UMask": "0x8",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ",
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"Counter": "0,1,2,3",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
- "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/frontend.json b/tools/perf/pmu-events/arch/x86/skylakex/frontend.json
index 40abc0852cd6..403a4f89e9b2 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/frontend.json
@@ -182,7 +182,7 @@
"BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
"Counter": "0,1,2,3",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
- "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding \u201c4 \u2013 x\u201d when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -247,20 +247,20 @@
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
"Counter": "0,1,2,3",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
- "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0\u20132 cycles.",
+ "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced decode stream buffer (DSB - the decoded instruction-cache) miss.",
+ "BriefDescription": "Retired Instructions who experienced decode stream buffer (DSB - the decoded instruction-cache) miss. Precise Event.",
"PEBS": "1",
"MSRValue": "0x11",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. ",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. \r\n",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -268,7 +268,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss. Precise Event.",
"PEBS": "1",
"MSRValue": "0x12",
"Counter": "0,1,2,3",
@@ -281,7 +281,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss. Precise Event.",
"PEBS": "1",
"MSRValue": "0x13",
"Counter": "0,1,2,3",
@@ -294,7 +294,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "BriefDescription": "Retired Instructions who experienced iTLB true miss. Precise Event.",
"PEBS": "1",
"MSRValue": "0x14",
"Counter": "0,1,2,3",
@@ -308,13 +308,13 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss. Precise Event.",
"PEBS": "1",
"MSRValue": "0x15",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss. ",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -322,7 +322,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x400206",
"Counter": "0,1,2,3",
@@ -335,7 +335,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x200206",
"Counter": "0,1,2,3",
@@ -348,7 +348,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x400406",
"Counter": "0,1,2,3",
@@ -367,7 +367,7 @@
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops. \r\n",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -375,13 +375,13 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x401006",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.\r\n",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -389,13 +389,13 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x402006",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.\r\n",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -403,7 +403,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x404006",
"Counter": "0,1,2,3",
@@ -416,7 +416,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x408006",
"Counter": "0,1,2,3",
@@ -429,7 +429,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x410006",
"Counter": "0,1,2,3",
@@ -442,7 +442,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x420006",
"Counter": "0,1,2,3",
@@ -455,13 +455,13 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x100206",
"Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.\r\n",
"TakenAlone": "1",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3"
@@ -469,7 +469,7 @@
{
"EventCode": "0xC6",
"UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
"PEBS": "1",
"MSRValue": "0x300206",
"Counter": "0,1,2,3",
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/memory.json b/tools/perf/pmu-events/arch/x86/skylakex/memory.json
index ca22a22c1abd..e7f1aa31226d 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/memory.json
@@ -214,7 +214,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "HLE_RETIRED.ABORTED",
- "PublicDescription": "Number of times HLE abort was triggered.",
+ "PublicDescription": "Number of times HLE abort was triggered. (PEBS)",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -239,10 +239,9 @@
{
"EventCode": "0xC8",
"UMask": "0x20",
- "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.). ",
+ "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
"Counter": "0,1,2,3",
"EventName": "HLE_RETIRED.ABORTED_UNFRIENDLY",
- "PublicDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -292,7 +291,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "RTM_RETIRED.ABORTED",
- "PublicDescription": "Number of times RTM abort was triggered.",
+ "PublicDescription": "Number of times RTM abort was triggered. (PEBS)",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -466,7 +465,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss in the L3. ",
+ "PublicDescription": "Counts demand data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -479,7 +478,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts demand data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -492,7 +491,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts demand data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -505,7 +504,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -518,7 +517,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -531,7 +530,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -544,7 +543,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3. ",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -557,7 +556,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -570,7 +569,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -583,7 +582,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -596,7 +595,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -609,7 +608,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -622,7 +621,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss in the L3. ",
+ "PublicDescription": "Counts all demand code reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -635,7 +634,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts all demand code reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -648,7 +647,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts all demand code reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -661,7 +660,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -674,7 +673,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -687,7 +686,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -700,7 +699,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3. ",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -713,7 +712,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -726,7 +725,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -739,7 +738,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -752,7 +751,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -765,7 +764,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -778,7 +777,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3. ",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -791,7 +790,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -804,7 +803,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -817,7 +816,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -830,7 +829,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -843,7 +842,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -856,7 +855,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -869,7 +868,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -882,7 +881,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -895,7 +894,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -908,7 +907,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -921,7 +920,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -934,7 +933,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -947,7 +946,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -960,7 +959,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -973,7 +972,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -986,7 +985,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -999,7 +998,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1012,7 +1011,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss in the L3. ",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1025,7 +1024,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1038,7 +1037,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1051,7 +1050,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1064,7 +1063,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1077,7 +1076,85 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that miss in the L3.",
+ "MSRValue": "0x3fbc008000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that miss the L3 and clean or shared data is transferred from remote cache.",
+ "MSRValue": "0x083fc08000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that miss the L3 and the modified data is transferred from remote cache.",
+ "MSRValue": "0x103fc08000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that miss the L3 and the data is returned from local or remote dram.",
+ "MSRValue": "0x063fc08000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that miss the L3 and the data is returned from remote dram.",
+ "MSRValue": "0x063b808000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests that miss the L3 and the data is returned from local dram.",
+ "MSRValue": "0x0604008000 ",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Counts any other requests that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1090,7 +1167,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that miss in the L3. ",
+ "PublicDescription": "Counts all prefetch data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1103,7 +1180,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts all prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1116,7 +1193,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts all prefetch data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1129,7 +1206,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1142,7 +1219,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1155,7 +1232,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1168,7 +1245,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that miss in the L3. ",
+ "PublicDescription": "Counts prefetch RFOs that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1181,7 +1258,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1194,7 +1271,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts prefetch RFOs that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1207,7 +1284,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1220,7 +1297,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1233,7 +1310,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1246,7 +1323,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3. ",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1259,7 +1336,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1272,7 +1349,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1285,7 +1362,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1298,7 +1375,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1311,7 +1388,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram. ",
+ "PublicDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1324,7 +1401,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3. ",
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss in the L3. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1337,7 +1414,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache. ",
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1350,7 +1427,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the modified data is transferred from remote cache. ",
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the modified data is transferred from remote cache. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1363,7 +1440,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local or remote dram. ",
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local or remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1376,7 +1453,7 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from remote dram. ",
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from remote dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -1389,8 +1466,8 @@
"Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram.",
+ "PublicDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram. Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
}
-]
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/other.json b/tools/perf/pmu-events/arch/x86/skylakex/other.json
index 70243b0b0586..778a541463eb 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/other.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/other.json
@@ -40,6 +40,42 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0x32",
+ "UMask": "0x1",
+ "BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventName": "SW_PREFETCH_ACCESS.NTA",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x32",
+ "UMask": "0x2",
+ "BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventName": "SW_PREFETCH_ACCESS.T0",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x32",
+ "UMask": "0x4",
+ "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventName": "SW_PREFETCH_ACCESS.T1_T2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x32",
+ "UMask": "0x8",
+ "BriefDescription": "Number of PREFETCHW instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xCB",
"UMask": "0x1",
"BriefDescription": "Number of hardware interrupts received by the processor.",
@@ -50,6 +86,62 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
+ "EventCode": "0xEF",
+ "UMask": "0x1",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IHITI",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xEF",
+ "UMask": "0x2",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IHITFSE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xEF",
+ "UMask": "0x4",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_SHITFSE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xEF",
+ "UMask": "0x8",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDM",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xEF",
+ "UMask": "0x10",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDM",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xEF",
+ "UMask": "0x20",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDFE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xEF",
+ "UMask": "0x40",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDFE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
"EventCode": "0xFE",
"UMask": "0x2",
"BriefDescription": "Counts number of cache lines that are allocated and written back to L3 with the intention that they are more likely to be reused shortly",
@@ -69,4 +161,4 @@
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
}
-]
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json b/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
index 0895d1e52a4a..f99f7ae27820 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
@@ -3,41 +3,41 @@
"EventCode": "0x00",
"UMask": "0x1",
"BriefDescription": "Instructions retired from execution.",
- "Counter": "Fixed counter 1",
+ "Counter": "Fixed counter 0",
"EventName": "INST_RETIRED.ANY",
"PublicDescription": "Counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, Counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. Counting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 1"
+ "CounterHTOff": "Fixed counter 0"
},
{
"EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when the thread is not in halt state",
- "Counter": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD",
"PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
},
{
"EventCode": "0x00",
"UMask": "0x2",
"BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "Fixed counter 2",
+ "Counter": "Fixed counter 1",
"EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
"AnyThread": "1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 2"
+ "CounterHTOff": "Fixed counter 1"
},
{
"EventCode": "0x00",
"UMask": "0x3",
"BriefDescription": "Reference cycles when the core is not in halt state.",
- "Counter": "Fixed counter 3",
+ "Counter": "Fixed counter 2",
"EventName": "CPU_CLK_UNHALTED.REF_TSC",
"PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 3"
+ "CounterHTOff": "Fixed counter 2"
},
{
"EventCode": "0x03",
@@ -126,7 +126,7 @@
"BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
"Counter": "0,1,2,3",
"EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
- "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to \u201cMixing Intel AVX and Intel SSE Code\u201d section of the Optimization Guide.",
+ "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to Mixing Intel AVX and Intel SSE Code section of the Optimization Guide.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -762,11 +762,10 @@
"EdgeDetect": "1",
"EventCode": "0xC3",
"UMask": "0x1",
- "BriefDescription": "Number of machine clears (nukes) of any type. ",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
"Counter": "0,1,2,3",
"EventName": "MACHINE_CLEARS.COUNT",
"CounterMask": "1",
- "PublicDescription": "Number of machine clears (nukes) of any type.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -799,7 +798,7 @@
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
"Errata": "SKL091",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts conditional branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts conditional branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -811,14 +810,14 @@
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
"Errata": "SKL091",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts both direct and indirect near call instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect near call instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
"UMask": "0x4",
- "BriefDescription": "All (macro) branch instructions retired. ",
+ "BriefDescription": "All (macro) branch instructions retired.",
"PEBS": "2",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
@@ -835,7 +834,7 @@
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_RETURN",
"Errata": "SKL091",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts return instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts return instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -858,19 +857,19 @@
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.NEAR_TAKEN",
"Errata": "SKL091",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts taken branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts taken branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC4",
"UMask": "0x40",
- "BriefDescription": "Far branch instructions retired.",
+ "BriefDescription": "Counts the number of far branch instructions retired.",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_INST_RETIRED.FAR_BRANCH",
"Errata": "SKL091",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts far branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts far branch instructions retired.",
"SampleAfterValue": "100007",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -891,7 +890,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted conditional branch instructions retired.",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted conditional branch instructions retired.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -902,14 +901,14 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.NEAR_CALL",
- "PublicDescription": "Counts both taken and not taken retired mispredicted direct and indirect near calls, including both register and memory indirect.",
+ "PublicDescription": "This event counts both taken and not taken retired mispredicted direct and indirect near calls, including both register and memory indirect.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0xC5",
"UMask": "0x4",
- "BriefDescription": "Mispredicted macro branch instructions retired. ",
+ "BriefDescription": "Mispredicted macro branch instructions retired.",
"PEBS": "2",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
@@ -920,10 +919,11 @@
{
"EventCode": "0xC5",
"UMask": "0x20",
- "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken. ",
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken.",
"SampleAfterValue": "400009",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json b/tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json
index 70750dab7ead..7f466c97e485 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json
@@ -12,30 +12,30 @@
{
"EventCode": "0x08",
"UMask": "0x2",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (4K).",
+ "BriefDescription": "Page walk completed due to a demand data load to a 4K page",
"Counter": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
- "PublicDescription": "Counts demand data loads that caused a completed page walk (4K page size). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x08",
"UMask": "0x4",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (2M/4M).",
+ "BriefDescription": "Page walk completed due to a demand data load to a 2M/4M page",
"Counter": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
- "PublicDescription": "Counts demand data loads that caused a completed page walk (2M and 4M page sizes). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x08",
"UMask": "0x8",
- "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (1G)",
+ "BriefDescription": "Page walk completed due to a demand data load to a 1G page",
"Counter": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
- "PublicDescription": "Counts load misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -52,17 +52,17 @@
{
"EventCode": "0x08",
"UMask": "0x10",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake. ",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
"Counter": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
- "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake microarchitecture. ",
+ "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake microarchitecture.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x08",
"UMask": "0x10",
- "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a load. EPT page walk duration are excluded in Skylake. ",
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
"Counter": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
"CounterMask": "1",
@@ -93,30 +93,30 @@
{
"EventCode": "0x49",
"UMask": "0x2",
- "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (4K)",
+ "BriefDescription": "Page walk completed due to a demand data store to a 4K page",
"Counter": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
- "PublicDescription": "Counts demand data stores that caused a completed page walk (4K page size). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x49",
"UMask": "0x4",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (2M/4M)",
+ "BriefDescription": "Page walk completed due to a demand data store to a 2M/4M page",
"Counter": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
- "PublicDescription": "Counts demand data stores that caused a completed page walk (2M and 4M page sizes). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x49",
"UMask": "0x8",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (1G)",
+ "BriefDescription": "Page walk completed due to a demand data store to a 1G page",
"Counter": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
- "PublicDescription": "Counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 1G pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -133,17 +133,17 @@
{
"EventCode": "0x49",
"UMask": "0x10",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake. ",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
"Counter": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
- "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake microarchitecture. ",
+ "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake microarchitecture.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
"EventCode": "0x49",
"UMask": "0x10",
- "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store. EPT page walk duration are excluded in Skylake. ",
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
"Counter": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
"CounterMask": "1",
@@ -197,7 +197,7 @@
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
"Counter": "0,1,2,3",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
- "PublicDescription": "Counts completed page walks of any page size (4K/2M/4M/1G) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB. The page walk can end with or without a fault.",
+ "PublicDescription": "Counts code misses in all ITLB levels that caused a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
@@ -224,10 +224,10 @@
{
"EventCode": "0x85",
"UMask": "0x10",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake. ",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake.",
"Counter": "0,1,2,3",
"EventName": "ITLB_MISSES.WALK_PENDING",
- "PublicDescription": "Counts 1 per cycle for each PMH (Page Miss Handler) that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake michroarchitecture. ",
+ "PublicDescription": "Counts 1 per cycle for each PMH (Page Miss Handler) that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake michroarchitecture.",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
index 9eb7047bafe4..b578aa26e375 100644
--- a/tools/perf/pmu-events/jevents.c
+++ b/tools/perf/pmu-events/jevents.c
@@ -116,6 +116,43 @@ static void fixdesc(char *s)
*e = 0;
}
+/* Add escapes for '\' so they are proper C strings. */
+static char *fixregex(char *s)
+{
+ int len = 0;
+ int esc_count = 0;
+ char *fixed = NULL;
+ char *p, *q;
+
+ /* Count the number of '\' in string */
+ for (p = s; *p; p++) {
+ ++len;
+ if (*p == '\\')
+ ++esc_count;
+ }
+
+ if (esc_count == 0)
+ return s;
+
+ /* allocate space for a new string */
+ fixed = (char *) malloc(len + 1);
+ if (!fixed)
+ return NULL;
+
+ /* copy over the characters */
+ q = fixed;
+ for (p = s; *p; p++) {
+ if (*p == '\\') {
+ *q = '\\';
+ ++q;
+ }
+ *q = *p;
+ ++q;
+ }
+ *q = '\0';
+ return fixed;
+}
+
static struct msrmap {
const char *num;
const char *pname;
@@ -648,7 +685,7 @@ static int process_mapfile(FILE *outfp, char *fpath)
}
line[strlen(line)-1] = '\0';
- cpuid = strtok_r(p, ",", &save);
+ cpuid = fixregex(strtok_r(p, ",", &save));
version = strtok_r(NULL, ",", &save);
fname = strtok_r(NULL, ",", &save);
type = strtok_r(NULL, ",", &save);
diff --git a/tools/perf/scripts/python/bin/mem-phys-addr-record b/tools/perf/scripts/python/bin/mem-phys-addr-record
new file mode 100644
index 000000000000..5a875122a904
--- /dev/null
+++ b/tools/perf/scripts/python/bin/mem-phys-addr-record
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+#
+# Profiling physical memory by all retired load instructions/uops event
+# MEM_INST_RETIRED.ALL_LOADS or MEM_UOPS_RETIRED.ALL_LOADS
+#
+
+load=`perf list | grep mem_inst_retired.all_loads`
+if [ -z "$load" ]; then
+ load=`perf list | grep mem_uops_retired.all_loads`
+fi
+if [ -z "$load" ]; then
+ echo "There is no event to count all retired load instructions/uops."
+ exit 1
+fi
+
+arg=$(echo $load | tr -d ' ')
+arg="$arg:P"
+perf record --phys-data -e $arg $@
diff --git a/tools/perf/scripts/python/bin/mem-phys-addr-report b/tools/perf/scripts/python/bin/mem-phys-addr-report
new file mode 100644
index 000000000000..3f2b847e2eab
--- /dev/null
+++ b/tools/perf/scripts/python/bin/mem-phys-addr-report
@@ -0,0 +1,3 @@
+#!/bin/bash
+# description: resolve physical address samples
+perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/mem-phys-addr.py
diff --git a/tools/perf/scripts/python/mem-phys-addr.py b/tools/perf/scripts/python/mem-phys-addr.py
new file mode 100644
index 000000000000..ebee2c5ae496
--- /dev/null
+++ b/tools/perf/scripts/python/mem-phys-addr.py
@@ -0,0 +1,95 @@
+# mem-phys-addr.py: Resolve physical address samples
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2018, Intel Corporation.
+
+from __future__ import division
+import os
+import sys
+import struct
+import re
+import bisect
+import collections
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+#physical address ranges for System RAM
+system_ram = []
+#physical address ranges for Persistent Memory
+pmem = []
+#file object for proc iomem
+f = None
+#Count for each type of memory
+load_mem_type_cnt = collections.Counter()
+#perf event name
+event_name = None
+
+def parse_iomem():
+ global f
+ f = open('/proc/iomem', 'r')
+ for i, j in enumerate(f):
+ m = re.split('-|:',j,2)
+ if m[2].strip() == 'System RAM':
+ system_ram.append(long(m[0], 16))
+ system_ram.append(long(m[1], 16))
+ if m[2].strip() == 'Persistent Memory':
+ pmem.append(long(m[0], 16))
+ pmem.append(long(m[1], 16))
+
+def print_memory_type():
+ print "Event: %s" % (event_name)
+ print "%-40s %10s %10s\n" % ("Memory type", "count", "percentage"),
+ print "%-40s %10s %10s\n" % ("----------------------------------------", \
+ "-----------", "-----------"),
+ total = sum(load_mem_type_cnt.values())
+ for mem_type, count in sorted(load_mem_type_cnt.most_common(), \
+ key = lambda(k, v): (v, k), reverse = True):
+ print "%-40s %10d %10.1f%%\n" % (mem_type, count, 100 * count / total),
+
+def trace_begin():
+ parse_iomem()
+
+def trace_end():
+ print_memory_type()
+ f.close()
+
+def is_system_ram(phys_addr):
+ #/proc/iomem is sorted
+ position = bisect.bisect(system_ram, phys_addr)
+ if position % 2 == 0:
+ return False
+ return True
+
+def is_persistent_mem(phys_addr):
+ position = bisect.bisect(pmem, phys_addr)
+ if position % 2 == 0:
+ return False
+ return True
+
+def find_memory_type(phys_addr):
+ if phys_addr == 0:
+ return "N/A"
+ if is_system_ram(phys_addr):
+ return "System RAM"
+
+ if is_persistent_mem(phys_addr):
+ return "Persistent Memory"
+
+ #slow path, search all
+ f.seek(0, 0)
+ for j in f:
+ m = re.split('-|:',j,2)
+ if long(m[0], 16) <= phys_addr <= long(m[1], 16):
+ return m[2]
+ return "N/A"
+
+def process_event(param_dict):
+ name = param_dict["ev_name"]
+ sample = param_dict["sample"]
+ phys_addr = sample["phys_addr"]
+
+ global event_name
+ if event_name == None:
+ event_name = name
+ load_mem_type_cnt[find_memory_type(phys_addr)] += 1
diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c
index 0e1367f90af5..97f64ad7fa08 100644
--- a/tools/perf/tests/attr.c
+++ b/tools/perf/tests/attr.c
@@ -124,6 +124,12 @@ static int store_event(struct perf_event_attr *attr, pid_t pid, int cpu,
WRITE_ASS(exclude_guest, "d");
WRITE_ASS(exclude_callchain_kernel, "d");
WRITE_ASS(exclude_callchain_user, "d");
+ WRITE_ASS(mmap2, "d");
+ WRITE_ASS(comm_exec, "d");
+ WRITE_ASS(context_switch, "d");
+ WRITE_ASS(write_backward, "d");
+ WRITE_ASS(namespaces, "d");
+ WRITE_ASS(use_clockid, "d");
WRITE_ASS(wakeup_events, PRIu32);
WRITE_ASS(bp_type, PRIu32);
WRITE_ASS(config1, "llu");
diff --git a/tools/perf/tests/backward-ring-buffer.c b/tools/perf/tests/backward-ring-buffer.c
index 71b9a0b613d2..4035d43523c3 100644
--- a/tools/perf/tests/backward-ring-buffer.c
+++ b/tools/perf/tests/backward-ring-buffer.c
@@ -33,8 +33,8 @@ static int count_samples(struct perf_evlist *evlist, int *sample_count,
for (i = 0; i < evlist->nr_mmaps; i++) {
union perf_event *event;
- perf_mmap__read_catchup(&evlist->backward_mmap[i]);
- while ((event = perf_mmap__read_backward(&evlist->backward_mmap[i])) != NULL) {
+ perf_mmap__read_catchup(&evlist->overwrite_mmap[i]);
+ while ((event = perf_mmap__read_backward(&evlist->overwrite_mmap[i])) != NULL) {
const u32 type = event->header.type;
switch (type) {
@@ -59,7 +59,7 @@ static int do_test(struct perf_evlist *evlist, int mmap_pages,
int err;
char sbuf[STRERR_BUFSIZE];
- err = perf_evlist__mmap(evlist, mmap_pages, true);
+ err = perf_evlist__mmap(evlist, mmap_pages);
if (err < 0) {
pr_debug("perf_evlist__mmap: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
index 335b695f4970..a467615c5a0e 100644
--- a/tools/perf/tests/bp_signal.c
+++ b/tools/perf/tests/bp_signal.c
@@ -296,7 +296,7 @@ bool test__bp_signal_is_supported(void)
* instruction breakpoint using the perf event interface.
* Once it's there we can release this.
*/
-#ifdef __powerpc__
+#if defined(__powerpc__) || defined(__s390x__)
return false;
#else
return true;
diff --git a/tools/perf/tests/bpf-script-example.c b/tools/perf/tests/bpf-script-example.c
index 268e5f8e4aa2..e4123c1b0e88 100644
--- a/tools/perf/tests/bpf-script-example.c
+++ b/tools/perf/tests/bpf-script-example.c
@@ -31,8 +31,8 @@ struct bpf_map_def SEC("maps") flip_table = {
.max_entries = 1,
};
-SEC("func=SyS_epoll_wait")
-int bpf_func__SyS_epoll_wait(void *ctx)
+SEC("func=SyS_epoll_pwait")
+int bpf_func__SyS_epoll_pwait(void *ctx)
{
int ind =0;
int *flag = bpf_map_lookup_elem(&flip_table, &ind);
diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c
index 34c22cdf4d5d..e8399beca62b 100644
--- a/tools/perf/tests/bpf.c
+++ b/tools/perf/tests/bpf.c
@@ -3,6 +3,7 @@
#include <sys/epoll.h>
#include <sys/types.h>
#include <sys/stat.h>
+#include <fcntl.h>
#include <util/util.h>
#include <util/bpf-loader.h>
#include <util/evlist.h>
@@ -19,13 +20,13 @@
#ifdef HAVE_LIBBPF_SUPPORT
-static int epoll_wait_loop(void)
+static int epoll_pwait_loop(void)
{
int i;
/* Should fail NR_ITERS times */
for (i = 0; i < NR_ITERS; i++)
- epoll_wait(-(i + 1), NULL, 0, 0);
+ epoll_pwait(-(i + 1), NULL, 0, 0, NULL);
return 0;
}
@@ -63,46 +64,41 @@ static struct {
bool pin;
} bpf_testcase_table[] = {
{
- LLVM_TESTCASE_BASE,
- "Basic BPF filtering",
- "[basic_bpf_test]",
- "fix 'perf test LLVM' first",
- "load bpf object failed",
- &epoll_wait_loop,
- (NR_ITERS + 1) / 2,
- false,
+ .prog_id = LLVM_TESTCASE_BASE,
+ .desc = "Basic BPF filtering",
+ .name = "[basic_bpf_test]",
+ .msg_compile_fail = "fix 'perf test LLVM' first",
+ .msg_load_fail = "load bpf object failed",
+ .target_func = &epoll_pwait_loop,
+ .expect_result = (NR_ITERS + 1) / 2,
},
{
- LLVM_TESTCASE_BASE,
- "BPF pinning",
- "[bpf_pinning]",
- "fix kbuild first",
- "check your vmlinux setting?",
- &epoll_wait_loop,
- (NR_ITERS + 1) / 2,
- true,
+ .prog_id = LLVM_TESTCASE_BASE,
+ .desc = "BPF pinning",
+ .name = "[bpf_pinning]",
+ .msg_compile_fail = "fix kbuild first",
+ .msg_load_fail = "check your vmlinux setting?",
+ .target_func = &epoll_pwait_loop,
+ .expect_result = (NR_ITERS + 1) / 2,
+ .pin = true,
},
#ifdef HAVE_BPF_PROLOGUE
{
- LLVM_TESTCASE_BPF_PROLOGUE,
- "BPF prologue generation",
- "[bpf_prologue_test]",
- "fix kbuild first",
- "check your vmlinux setting?",
- &llseek_loop,
- (NR_ITERS + 1) / 4,
- false,
+ .prog_id = LLVM_TESTCASE_BPF_PROLOGUE,
+ .desc = "BPF prologue generation",
+ .name = "[bpf_prologue_test]",
+ .msg_compile_fail = "fix kbuild first",
+ .msg_load_fail = "check your vmlinux setting?",
+ .target_func = &llseek_loop,
+ .expect_result = (NR_ITERS + 1) / 4,
},
#endif
{
- LLVM_TESTCASE_BPF_RELOCATION,
- "BPF relocation checker",
- "[bpf_relocation_test]",
- "fix 'perf test LLVM' first",
- "libbpf error when dealing with relocation",
- NULL,
- 0,
- false,
+ .prog_id = LLVM_TESTCASE_BPF_RELOCATION,
+ .desc = "BPF relocation checker",
+ .name = "[bpf_relocation_test]",
+ .msg_compile_fail = "fix 'perf test LLVM' first",
+ .msg_load_fail = "libbpf error when dealing with relocation",
},
};
@@ -167,7 +163,7 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
goto out_delete_evlist;
}
- err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
+ err = perf_evlist__mmap(evlist, opts.mmap_pages);
if (err < 0) {
pr_debug("perf_evlist__mmap: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
@@ -190,7 +186,7 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
}
if (count != expect) {
- pr_debug("BPF filter result incorrect\n");
+ pr_debug("BPF filter result incorrect, expected %d, got %d samples\n", expect, count);
goto out_delete_evlist;
}
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 766573e236e4..fafa014240cd 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -411,9 +411,9 @@ static const char *shell_test__description(char *description, size_t size,
return description ? trim(description + 1) : NULL;
}
-#define for_each_shell_test(dir, ent) \
+#define for_each_shell_test(dir, base, ent) \
while ((ent = readdir(dir)) != NULL) \
- if (ent->d_type == DT_REG && ent->d_name[0] != '.')
+ if (!is_directory(base, ent))
static const char *shell_tests__dir(char *path, size_t size)
{
@@ -452,7 +452,7 @@ static int shell_tests__max_desc_width(void)
if (!dir)
return -1;
- for_each_shell_test(dir, ent) {
+ for_each_shell_test(dir, path, ent) {
char bf[256];
const char *desc = shell_test__description(bf, sizeof(bf), path, ent->d_name);
@@ -504,7 +504,7 @@ static int run_shell_tests(int argc, const char *argv[], int i, int width)
if (!dir)
return -1;
- for_each_shell_test(dir, ent) {
+ for_each_shell_test(dir, st.dir, ent) {
int curr = i++;
char desc[256];
struct test test = {
@@ -614,7 +614,7 @@ static int perf_test__list_shell(int argc, const char **argv, int i)
if (!dir)
return -1;
- for_each_shell_test(dir, ent) {
+ for_each_shell_test(dir, path, ent) {
int curr = i++;
char bf[256];
struct test t = {
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index fcc8984bc329..3bf7b145b826 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -639,7 +639,7 @@ static int do_test_code_reading(bool try_kcore)
break;
}
- ret = perf_evlist__mmap(evlist, UINT_MAX, false);
+ ret = perf_evlist__mmap(evlist, UINT_MAX);
if (ret < 0) {
pr_debug("perf_evlist__mmap failed\n");
goto out_put;
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
index ac40e05bcab4..260418969120 100644
--- a/tools/perf/tests/dwarf-unwind.c
+++ b/tools/perf/tests/dwarf-unwind.c
@@ -173,6 +173,7 @@ int test__dwarf_unwind(struct test *test __maybe_unused, int subtest __maybe_unu
}
callchain_param.record_mode = CALLCHAIN_DWARF;
+ dwarf_callchain_users = true;
if (init_live_machine(machine)) {
pr_err("Could not init machine\n");
diff --git a/tools/perf/tests/keep-tracking.c b/tools/perf/tests/keep-tracking.c
index 842d33637a18..c46530918938 100644
--- a/tools/perf/tests/keep-tracking.c
+++ b/tools/perf/tests/keep-tracking.c
@@ -95,7 +95,7 @@ int test__keep_tracking(struct test *test __maybe_unused, int subtest __maybe_un
goto out_err;
}
- CHECK__(perf_evlist__mmap(evlist, UINT_MAX, false));
+ CHECK__(perf_evlist__mmap(evlist, UINT_MAX));
/*
* First, test that a 'comm' event can be found when the event is
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c
index 5a8bf318f8a7..c0e971da965c 100644
--- a/tools/perf/tests/mmap-basic.c
+++ b/tools/perf/tests/mmap-basic.c
@@ -94,7 +94,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
expected_nr_events[i] = 1 + rand() % 127;
}
- if (perf_evlist__mmap(evlist, 128, true) < 0) {
+ if (perf_evlist__mmap(evlist, 128) < 0) {
pr_debug("failed to mmap events: %d (%s)\n", errno,
str_error_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
diff --git a/tools/perf/tests/openat-syscall-tp-fields.c b/tools/perf/tests/openat-syscall-tp-fields.c
index d9619d265314..43519267b93b 100644
--- a/tools/perf/tests/openat-syscall-tp-fields.c
+++ b/tools/perf/tests/openat-syscall-tp-fields.c
@@ -1,5 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/err.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
#include "perf.h"
#include "evlist.h"
#include "evsel.h"
@@ -64,7 +67,7 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
goto out_delete_evlist;
}
- err = perf_evlist__mmap(evlist, UINT_MAX, false);
+ err = perf_evlist__mmap(evlist, UINT_MAX);
if (err < 0) {
pr_debug("perf_evlist__mmap: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index f0679613bd18..18b06444f230 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -13,7 +13,6 @@
#include <unistd.h>
#include <linux/kernel.h>
#include <linux/hw_breakpoint.h>
-#include <api/fs/fs.h>
#include <api/fs/tracing_path.h>
#define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \
diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c
index c34904d37705..0afafab85238 100644
--- a/tools/perf/tests/perf-record.c
+++ b/tools/perf/tests/perf-record.c
@@ -141,7 +141,7 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
* fds in the same CPU to be injected in the same mmap ring buffer
* (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
*/
- err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
+ err = perf_evlist__mmap(evlist, opts.mmap_pages);
if (err < 0) {
pr_debug("perf_evlist__mmap: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c
index 3ec6302b6498..0e2d00d69e6e 100644
--- a/tools/perf/tests/sample-parsing.c
+++ b/tools/perf/tests/sample-parsing.c
@@ -248,7 +248,7 @@ static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
event->header.size = sz;
err = perf_event__synthesize_sample(event, sample_type, read_format,
- &sample, false);
+ &sample);
if (err) {
pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
"perf_event__synthesize_sample", sample_type, err);
diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
index 2a9ef080efd0..55ad9793d544 100755
--- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
@@ -17,10 +17,9 @@ skip_if_no_perf_probe || exit 2
file=$(mktemp /tmp/temporary_file.XXXXX)
trace_open_vfs_getname() {
- test "$(uname -m)" = s390x && { svc="openat"; txt="dfd: +CWD, +"; }
-
- perf trace -e ${svc:-open} touch $file 2>&1 | \
- egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ ${svc:-open}\(${txt}filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"
+ evts=$(echo $(perf list syscalls:sys_enter_open* |& egrep 'open(at)? ' | sed -r 's/.*sys_enter_([a-z]+) +\[.*$/\1/') | sed 's/ /,/')
+ perf trace -e $evts touch $file 2>&1 | \
+ egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ open(at)?\((dfd: +CWD, +)?filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"
}
diff --git a/tools/perf/tests/sw-clock.c b/tools/perf/tests/sw-clock.c
index 725a196991a8..f6c72f915d48 100644
--- a/tools/perf/tests/sw-clock.c
+++ b/tools/perf/tests/sw-clock.c
@@ -78,7 +78,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
goto out_delete_evlist;
}
- err = perf_evlist__mmap(evlist, 128, true);
+ err = perf_evlist__mmap(evlist, 128);
if (err < 0) {
pr_debug("failed to mmap event: %d (%s)\n", errno,
str_error_r(errno, sbuf, sizeof(sbuf)));
diff --git a/tools/perf/tests/switch-tracking.c b/tools/perf/tests/switch-tracking.c
index 7d3f4bf9534f..33e00295a972 100644
--- a/tools/perf/tests/switch-tracking.c
+++ b/tools/perf/tests/switch-tracking.c
@@ -449,7 +449,7 @@ int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_
goto out;
}
- err = perf_evlist__mmap(evlist, UINT_MAX, false);
+ err = perf_evlist__mmap(evlist, UINT_MAX);
if (err) {
pr_debug("perf_evlist__mmap failed!\n");
goto out_err;
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
index 89c8e1604ca7..01b62b81751b 100644
--- a/tools/perf/tests/task-exit.c
+++ b/tools/perf/tests/task-exit.c
@@ -101,7 +101,7 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
goto out_delete_evlist;
}
- if (perf_evlist__mmap(evlist, 128, true) < 0) {
+ if (perf_evlist__mmap(evlist, 128) < 0) {
pr_debug("failed to mmap events: %d (%s)\n", errno,
str_error_r(errno, sbuf, sizeof(sbuf)));
goto out_delete_evlist;
diff --git a/tools/perf/tests/thread-map.c b/tools/perf/tests/thread-map.c
index dbcb6a19b375..4de1939b58ba 100644
--- a/tools/perf/tests/thread-map.c
+++ b/tools/perf/tests/thread-map.c
@@ -105,7 +105,7 @@ int test__thread_map_remove(struct test *test __maybe_unused, int subtest __mayb
TEST_ASSERT_VAL("failed to allocate map string",
asprintf(&str, "%d,%d", getpid(), getppid()) >= 0);
- threads = thread_map__new_str(str, NULL, 0);
+ threads = thread_map__new_str(str, NULL, 0, false);
TEST_ASSERT_VAL("failed to allocate thread_map",
threads);
diff --git a/tools/perf/trace/beauty/Build b/tools/perf/trace/beauty/Build
index 066bbf0f4a74..66330d4b739b 100644
--- a/tools/perf/trace/beauty/Build
+++ b/tools/perf/trace/beauty/Build
@@ -1,5 +1,6 @@
libperf-y += clone.o
libperf-y += fcntl.o
+libperf-y += flock.o
ifeq ($(SRCARCH),$(filter $(SRCARCH),x86))
libperf-y += ioctl.o
endif
diff --git a/tools/perf/trace/beauty/arch_errno_names.c b/tools/perf/trace/beauty/arch_errno_names.c
new file mode 100644
index 000000000000..ede031c3a9e0
--- /dev/null
+++ b/tools/perf/trace/beauty/arch_errno_names.c
@@ -0,0 +1 @@
+#include "trace/beauty/generated/arch_errno_name_array.c"
diff --git a/tools/perf/trace/beauty/arch_errno_names.sh b/tools/perf/trace/beauty/arch_errno_names.sh
new file mode 100755
index 000000000000..22c9fc900c84
--- /dev/null
+++ b/tools/perf/trace/beauty/arch_errno_names.sh
@@ -0,0 +1,100 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Generate C file mapping errno codes to errno names.
+#
+# Copyright IBM Corp. 2018
+# Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+
+gcc="$1"
+toolsdir="$2"
+include_path="-I$toolsdir/include/uapi"
+
+arch_string()
+{
+ echo "$1" |sed -e 'y/- /__/' |tr '[[:upper:]]' '[[:lower:]]'
+}
+
+asm_errno_file()
+{
+ local arch="$1"
+ local header
+
+ header="$toolsdir/arch/$arch/include/uapi/asm/errno.h"
+ if test -r "$header"; then
+ echo "$header"
+ else
+ echo "$toolsdir/include/uapi/asm-generic/errno.h"
+ fi
+}
+
+create_errno_lookup_func()
+{
+ local arch=$(arch_string "$1")
+ local nr name
+
+ cat <<EoFuncBegin
+static const char *errno_to_name__$arch(int err)
+{
+ switch (err) {
+EoFuncBegin
+
+ while read name nr; do
+ printf '\tcase %d: return "%s";\n' $nr $name
+ done
+
+ cat <<EoFuncEnd
+ default:
+ return "(unknown)";
+ }
+}
+
+EoFuncEnd
+}
+
+process_arch()
+{
+ local arch="$1"
+ local asm_errno=$(asm_errno_file "$arch")
+
+ $gcc $include_path -E -dM -x c $asm_errno \
+ |grep -hE '^#define[[:blank:]]+(E[^[:blank:]]+)[[:blank:]]+([[:digit:]]+).*' \
+ |awk '{ print $2","$3; }' \
+ |sort -t, -k2 -nu \
+ |IFS=, create_errno_lookup_func "$arch"
+}
+
+create_arch_errno_table_func()
+{
+ local archlist="$1"
+ local default="$2"
+ local arch
+
+ printf 'const char *arch_syscalls__strerrno(const char *arch, int err)\n'
+ printf '{\n'
+ for arch in $archlist; do
+ printf '\tif (!strcmp(arch, "%s"))\n' $(arch_string "$arch")
+ printf '\t\treturn errno_to_name__%s(err);\n' $(arch_string "$arch")
+ done
+ printf '\treturn errno_to_name__%s(err);\n' $(arch_string "$default")
+ printf '}\n'
+}
+
+cat <<EoHEADER
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <string.h>
+
+EoHEADER
+
+# Create list of architectures and ignore those that do not appear
+# in tools/perf/arch
+archlist=""
+for arch in $(find $toolsdir/arch -maxdepth 1 -mindepth 1 -type d -printf "%f\n" | grep -v x86 | sort); do
+ test -d arch/$arch && archlist="$archlist $arch"
+done
+
+for arch in x86 $archlist generic; do
+ process_arch "$arch"
+done
+create_arch_errno_table_func "x86 $archlist" "generic"
diff --git a/tools/perf/trace/beauty/beauty.h b/tools/perf/trace/beauty/beauty.h
index a6dfd04beaee..984a504d335c 100644
--- a/tools/perf/trace/beauty/beauty.h
+++ b/tools/perf/trace/beauty/beauty.h
@@ -79,6 +79,9 @@ size_t syscall_arg__scnprintf_fcntl_cmd(char *bf, size_t size, struct syscall_ar
size_t syscall_arg__scnprintf_fcntl_arg(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_FCNTL_ARG syscall_arg__scnprintf_fcntl_arg
+size_t syscall_arg__scnprintf_flock(char *bf, size_t size, struct syscall_arg *arg);
+#define SCA_FLOCK syscall_arg__scnprintf_flock
+
size_t syscall_arg__scnprintf_ioctl_cmd(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_IOCTL_CMD syscall_arg__scnprintf_ioctl_cmd
@@ -114,4 +117,6 @@ size_t open__scnprintf_flags(unsigned long flags, char *bf, size_t size);
void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg,
size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg));
+const char *arch_syscalls__strerrno(const char *arch, int err);
+
#endif /* _PERF_TRACE_BEAUTY_H */
diff --git a/tools/perf/trace/beauty/flock.c b/tools/perf/trace/beauty/flock.c
index f9707f57566c..c4ff6ad30b06 100644
--- a/tools/perf/trace/beauty/flock.c
+++ b/tools/perf/trace/beauty/flock.c
@@ -1,5 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-#include <fcntl.h>
+
+#include "trace/beauty/beauty.h"
+#include <linux/kernel.h>
+#include <uapi/linux/fcntl.h>
#ifndef LOCK_MAND
#define LOCK_MAND 32
@@ -17,8 +20,7 @@
#define LOCK_RW 192
#endif
-static size_t syscall_arg__scnprintf_flock(char *bf, size_t size,
- struct syscall_arg *arg)
+size_t syscall_arg__scnprintf_flock(char *bf, size_t size, struct syscall_arg *arg)
{
int printed = 0, op = arg->val;
@@ -45,5 +47,3 @@ static size_t syscall_arg__scnprintf_flock(char *bf, size_t size,
return printed;
}
-
-#define SCA_FLOCK syscall_arg__scnprintf_flock
diff --git a/tools/perf/trace/beauty/futex_val3.c b/tools/perf/trace/beauty/futex_val3.c
new file mode 100644
index 000000000000..26f6b3253511
--- /dev/null
+++ b/tools/perf/trace/beauty/futex_val3.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/futex.h>
+
+#ifndef FUTEX_BITSET_MATCH_ANY
+#define FUTEX_BITSET_MATCH_ANY 0xffffffff
+#endif
+
+static size_t syscall_arg__scnprintf_futex_val3(char *bf, size_t size, struct syscall_arg *arg)
+{
+ unsigned int bitset = arg->val;
+
+ if (bitset == FUTEX_BITSET_MATCH_ANY)
+ return scnprintf(bf, size, "MATCH_ANY");
+
+ return scnprintf(bf, size, "%#xd", bitset);
+}
+
+#define SCA_FUTEX_VAL3 syscall_arg__scnprintf_futex_val3
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 8f7f59d1a2b5..286427975112 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -25,16 +25,10 @@ struct disasm_line_samples {
#define IPC_WIDTH 6
#define CYCLES_WIDTH 6
-struct browser_disasm_line {
- struct rb_node rb_node;
- u32 idx;
- int idx_asm;
- int jump_sources;
- /*
- * actual length of this array is saved on the nr_events field
- * of the struct annotate_browser
- */
- struct disasm_line_samples samples[1];
+struct browser_line {
+ u32 idx;
+ int idx_asm;
+ int jump_sources;
};
static struct annotate_browser_opt {
@@ -53,39 +47,43 @@ static struct annotate_browser_opt {
struct arch;
struct annotate_browser {
- struct ui_browser b;
- struct rb_root entries;
- struct rb_node *curr_hot;
- struct disasm_line *selection;
- struct disasm_line **offsets;
- struct arch *arch;
- int nr_events;
- u64 start;
- int nr_asm_entries;
- int nr_entries;
- int max_jump_sources;
- int nr_jumps;
- bool searching_backwards;
- bool have_cycles;
- u8 addr_width;
- u8 jumps_width;
- u8 target_width;
- u8 min_addr_width;
- u8 max_addr_width;
- char search_bf[128];
+ struct ui_browser b;
+ struct rb_root entries;
+ struct rb_node *curr_hot;
+ struct annotation_line *selection;
+ struct annotation_line **offsets;
+ struct arch *arch;
+ int nr_events;
+ u64 start;
+ int nr_asm_entries;
+ int nr_entries;
+ int max_jump_sources;
+ int nr_jumps;
+ bool searching_backwards;
+ bool have_cycles;
+ u8 addr_width;
+ u8 jumps_width;
+ u8 target_width;
+ u8 min_addr_width;
+ u8 max_addr_width;
+ char search_bf[128];
};
-static inline struct browser_disasm_line *disasm_line__browser(struct disasm_line *dl)
+static inline struct browser_line *browser_line(struct annotation_line *al)
{
- return (struct browser_disasm_line *)(dl + 1);
+ void *ptr = al;
+
+ ptr = container_of(al, struct disasm_line, al);
+ return ptr - sizeof(struct browser_line);
}
static bool disasm_line__filter(struct ui_browser *browser __maybe_unused,
void *entry)
{
if (annotate_browser__opts.hide_src_code) {
- struct disasm_line *dl = list_entry(entry, struct disasm_line, node);
- return dl->offset == -1;
+ struct annotation_line *al = list_entry(entry, struct annotation_line, node);
+
+ return al->offset == -1;
}
return false;
@@ -120,11 +118,37 @@ static int annotate_browser__cycles_width(struct annotate_browser *ab)
return ab->have_cycles ? IPC_WIDTH + CYCLES_WIDTH : 0;
}
+static void disasm_line__write(struct disasm_line *dl, struct ui_browser *browser,
+ char *bf, size_t size)
+{
+ if (dl->ins.ops && dl->ins.ops->scnprintf) {
+ if (ins__is_jump(&dl->ins)) {
+ bool fwd = dl->ops.target.offset > dl->al.offset;
+
+ ui_browser__write_graph(browser, fwd ? SLSMG_DARROW_CHAR :
+ SLSMG_UARROW_CHAR);
+ SLsmg_write_char(' ');
+ } else if (ins__is_call(&dl->ins)) {
+ ui_browser__write_graph(browser, SLSMG_RARROW_CHAR);
+ SLsmg_write_char(' ');
+ } else if (ins__is_ret(&dl->ins)) {
+ ui_browser__write_graph(browser, SLSMG_LARROW_CHAR);
+ SLsmg_write_char(' ');
+ } else {
+ ui_browser__write_nstring(browser, " ", 2);
+ }
+ } else {
+ ui_browser__write_nstring(browser, " ", 2);
+ }
+
+ disasm_line__scnprintf(dl, bf, size, !annotate_browser__opts.use_offset);
+}
+
static void annotate_browser__write(struct ui_browser *browser, void *entry, int row)
{
struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
- struct disasm_line *dl = list_entry(entry, struct disasm_line, node);
- struct browser_disasm_line *bdl = disasm_line__browser(dl);
+ struct annotation_line *al = list_entry(entry, struct annotation_line, node);
+ struct browser_line *bl = browser_line(al);
bool current_entry = ui_browser__is_current_entry(browser, row);
bool change_color = (!annotate_browser__opts.hide_src_code &&
(!current_entry || (browser->use_navkeypressed &&
@@ -137,32 +161,32 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
bool show_title = false;
for (i = 0; i < ab->nr_events; i++) {
- if (bdl->samples[i].percent > percent_max)
- percent_max = bdl->samples[i].percent;
+ if (al->samples[i].percent > percent_max)
+ percent_max = al->samples[i].percent;
}
- if ((row == 0) && (dl->offset == -1 || percent_max == 0.0)) {
+ if ((row == 0) && (al->offset == -1 || percent_max == 0.0)) {
if (ab->have_cycles) {
- if (dl->ipc == 0.0 && dl->cycles == 0)
+ if (al->ipc == 0.0 && al->cycles == 0)
show_title = true;
} else
show_title = true;
}
- if (dl->offset != -1 && percent_max != 0.0) {
+ if (al->offset != -1 && percent_max != 0.0) {
for (i = 0; i < ab->nr_events; i++) {
ui_browser__set_percent_color(browser,
- bdl->samples[i].percent,
+ al->samples[i].percent,
current_entry);
if (annotate_browser__opts.show_total_period) {
ui_browser__printf(browser, "%11" PRIu64 " ",
- bdl->samples[i].he.period);
+ al->samples[i].he.period);
} else if (annotate_browser__opts.show_nr_samples) {
ui_browser__printf(browser, "%6" PRIu64 " ",
- bdl->samples[i].he.nr_samples);
+ al->samples[i].he.nr_samples);
} else {
ui_browser__printf(browser, "%6.2f ",
- bdl->samples[i].percent);
+ al->samples[i].percent);
}
}
} else {
@@ -177,16 +201,16 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
}
}
if (ab->have_cycles) {
- if (dl->ipc)
- ui_browser__printf(browser, "%*.2f ", IPC_WIDTH - 1, dl->ipc);
+ if (al->ipc)
+ ui_browser__printf(browser, "%*.2f ", IPC_WIDTH - 1, al->ipc);
else if (!show_title)
ui_browser__write_nstring(browser, " ", IPC_WIDTH);
else
ui_browser__printf(browser, "%*s ", IPC_WIDTH - 1, "IPC");
- if (dl->cycles)
+ if (al->cycles)
ui_browser__printf(browser, "%*" PRIu64 " ",
- CYCLES_WIDTH - 1, dl->cycles);
+ CYCLES_WIDTH - 1, al->cycles);
else if (!show_title)
ui_browser__write_nstring(browser, " ", CYCLES_WIDTH);
else
@@ -199,19 +223,19 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
if (!browser->navkeypressed)
width += 1;
- if (!*dl->line)
+ if (!*al->line)
ui_browser__write_nstring(browser, " ", width - pcnt_width - cycles_width);
- else if (dl->offset == -1) {
- if (dl->line_nr && annotate_browser__opts.show_linenr)
+ else if (al->offset == -1) {
+ if (al->line_nr && annotate_browser__opts.show_linenr)
printed = scnprintf(bf, sizeof(bf), "%-*d ",
- ab->addr_width + 1, dl->line_nr);
+ ab->addr_width + 1, al->line_nr);
else
printed = scnprintf(bf, sizeof(bf), "%*s ",
ab->addr_width, " ");
ui_browser__write_nstring(browser, bf, printed);
- ui_browser__write_nstring(browser, dl->line, width - printed - pcnt_width - cycles_width + 1);
+ ui_browser__write_nstring(browser, al->line, width - printed - pcnt_width - cycles_width + 1);
} else {
- u64 addr = dl->offset;
+ u64 addr = al->offset;
int color = -1;
if (!annotate_browser__opts.use_offset)
@@ -220,13 +244,13 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
if (!annotate_browser__opts.use_offset) {
printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr);
} else {
- if (bdl->jump_sources) {
+ if (bl->jump_sources) {
if (annotate_browser__opts.show_nr_jumps) {
int prev;
printed = scnprintf(bf, sizeof(bf), "%*d ",
ab->jumps_width,
- bdl->jump_sources);
- prev = annotate_browser__set_jumps_percent_color(ab, bdl->jump_sources,
+ bl->jump_sources);
+ prev = annotate_browser__set_jumps_percent_color(ab, bl->jump_sources,
current_entry);
ui_browser__write_nstring(browser, bf, printed);
ui_browser__set_color(browser, prev);
@@ -245,32 +269,14 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
ui_browser__write_nstring(browser, bf, printed);
if (change_color)
ui_browser__set_color(browser, color);
- if (dl->ins.ops && dl->ins.ops->scnprintf) {
- if (ins__is_jump(&dl->ins)) {
- bool fwd = dl->ops.target.offset > dl->offset;
-
- ui_browser__write_graph(browser, fwd ? SLSMG_DARROW_CHAR :
- SLSMG_UARROW_CHAR);
- SLsmg_write_char(' ');
- } else if (ins__is_call(&dl->ins)) {
- ui_browser__write_graph(browser, SLSMG_RARROW_CHAR);
- SLsmg_write_char(' ');
- } else if (ins__is_ret(&dl->ins)) {
- ui_browser__write_graph(browser, SLSMG_LARROW_CHAR);
- SLsmg_write_char(' ');
- } else {
- ui_browser__write_nstring(browser, " ", 2);
- }
- } else {
- ui_browser__write_nstring(browser, " ", 2);
- }
- disasm_line__scnprintf(dl, bf, sizeof(bf), !annotate_browser__opts.use_offset);
+ disasm_line__write(disasm_line(al), browser, bf, sizeof(bf));
+
ui_browser__write_nstring(browser, bf, width - pcnt_width - cycles_width - 3 - printed);
}
if (current_entry)
- ab->selection = dl;
+ ab->selection = al;
}
static bool disasm_line__is_valid_jump(struct disasm_line *dl, struct symbol *sym)
@@ -286,7 +292,7 @@ static bool disasm_line__is_valid_jump(struct disasm_line *dl, struct symbol *sy
static bool is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
{
- struct disasm_line *pos = list_prev_entry(cursor, node);
+ struct disasm_line *pos = list_prev_entry(cursor, al.node);
const char *name;
if (!pos)
@@ -306,8 +312,9 @@ static bool is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
static void annotate_browser__draw_current_jump(struct ui_browser *browser)
{
struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
- struct disasm_line *cursor = ab->selection, *target;
- struct browser_disasm_line *btarget, *bcursor;
+ struct disasm_line *cursor = disasm_line(ab->selection);
+ struct annotation_line *target;
+ struct browser_line *btarget, *bcursor;
unsigned int from, to;
struct map_symbol *ms = ab->b.priv;
struct symbol *sym = ms->sym;
@@ -321,11 +328,9 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
return;
target = ab->offsets[cursor->ops.target.offset];
- if (!target)
- return;
- bcursor = disasm_line__browser(cursor);
- btarget = disasm_line__browser(target);
+ bcursor = browser_line(&cursor->al);
+ btarget = browser_line(target);
if (annotate_browser__opts.hide_src_code) {
from = bcursor->idx_asm;
@@ -361,12 +366,11 @@ static unsigned int annotate_browser__refresh(struct ui_browser *browser)
return ret;
}
-static int disasm__cmp(struct browser_disasm_line *a,
- struct browser_disasm_line *b, int nr_pcnt)
+static int disasm__cmp(struct annotation_line *a, struct annotation_line *b)
{
int i;
- for (i = 0; i < nr_pcnt; i++) {
+ for (i = 0; i < a->samples_nr; i++) {
if (a->samples[i].percent == b->samples[i].percent)
continue;
return a->samples[i].percent < b->samples[i].percent;
@@ -374,28 +378,27 @@ static int disasm__cmp(struct browser_disasm_line *a,
return 0;
}
-static void disasm_rb_tree__insert(struct rb_root *root, struct browser_disasm_line *bdl,
- int nr_events)
+static void disasm_rb_tree__insert(struct rb_root *root, struct annotation_line *al)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
- struct browser_disasm_line *l;
+ struct annotation_line *l;
while (*p != NULL) {
parent = *p;
- l = rb_entry(parent, struct browser_disasm_line, rb_node);
+ l = rb_entry(parent, struct annotation_line, rb_node);
- if (disasm__cmp(bdl, l, nr_events))
+ if (disasm__cmp(al, l))
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
- rb_link_node(&bdl->rb_node, parent, p);
- rb_insert_color(&bdl->rb_node, root);
+ rb_link_node(&al->rb_node, parent, p);
+ rb_insert_color(&al->rb_node, root);
}
static void annotate_browser__set_top(struct annotate_browser *browser,
- struct disasm_line *pos, u32 idx)
+ struct annotation_line *pos, u32 idx)
{
unsigned back;
@@ -404,7 +407,7 @@ static void annotate_browser__set_top(struct annotate_browser *browser,
browser->b.top_idx = browser->b.index = idx;
while (browser->b.top_idx != 0 && back != 0) {
- pos = list_entry(pos->node.prev, struct disasm_line, node);
+ pos = list_entry(pos->node.prev, struct annotation_line, node);
if (disasm_line__filter(&browser->b, &pos->node))
continue;
@@ -420,12 +423,13 @@ static void annotate_browser__set_top(struct annotate_browser *browser,
static void annotate_browser__set_rb_top(struct annotate_browser *browser,
struct rb_node *nd)
{
- struct browser_disasm_line *bpos;
- struct disasm_line *pos;
+ struct browser_line *bpos;
+ struct annotation_line *pos;
u32 idx;
- bpos = rb_entry(nd, struct browser_disasm_line, rb_node);
- pos = ((struct disasm_line *)bpos) - 1;
+ pos = rb_entry(nd, struct annotation_line, rb_node);
+ bpos = browser_line(pos);
+
idx = bpos->idx;
if (annotate_browser__opts.hide_src_code)
idx = bpos->idx_asm;
@@ -439,46 +443,35 @@ static void annotate_browser__calc_percent(struct annotate_browser *browser,
struct map_symbol *ms = browser->b.priv;
struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(sym);
- struct disasm_line *pos, *next;
- s64 len = symbol__size(sym);
+ struct disasm_line *pos;
browser->entries = RB_ROOT;
pthread_mutex_lock(&notes->lock);
- list_for_each_entry(pos, &notes->src->source, node) {
- struct browser_disasm_line *bpos = disasm_line__browser(pos);
- const char *path = NULL;
+ symbol__calc_percent(sym, evsel);
+
+ list_for_each_entry(pos, &notes->src->source, al.node) {
double max_percent = 0.0;
int i;
- if (pos->offset == -1) {
- RB_CLEAR_NODE(&bpos->rb_node);
+ if (pos->al.offset == -1) {
+ RB_CLEAR_NODE(&pos->al.rb_node);
continue;
}
- next = disasm__get_next_ip_line(&notes->src->source, pos);
-
- for (i = 0; i < browser->nr_events; i++) {
- struct sym_hist_entry sample;
-
- bpos->samples[i].percent = disasm__calc_percent(notes,
- evsel->idx + i,
- pos->offset,
- next ? next->offset : len,
- &path, &sample);
- bpos->samples[i].he = sample;
+ for (i = 0; i < pos->al.samples_nr; i++) {
+ struct annotation_data *sample = &pos->al.samples[i];
- if (max_percent < bpos->samples[i].percent)
- max_percent = bpos->samples[i].percent;
+ if (max_percent < sample->percent)
+ max_percent = sample->percent;
}
- if (max_percent < 0.01 && pos->ipc == 0) {
- RB_CLEAR_NODE(&bpos->rb_node);
+ if (max_percent < 0.01 && pos->al.ipc == 0) {
+ RB_CLEAR_NODE(&pos->al.rb_node);
continue;
}
- disasm_rb_tree__insert(&browser->entries, bpos,
- browser->nr_events);
+ disasm_rb_tree__insert(&browser->entries, &pos->al);
}
pthread_mutex_unlock(&notes->lock);
@@ -487,38 +480,38 @@ static void annotate_browser__calc_percent(struct annotate_browser *browser,
static bool annotate_browser__toggle_source(struct annotate_browser *browser)
{
- struct disasm_line *dl;
- struct browser_disasm_line *bdl;
+ struct annotation_line *al;
+ struct browser_line *bl;
off_t offset = browser->b.index - browser->b.top_idx;
browser->b.seek(&browser->b, offset, SEEK_CUR);
- dl = list_entry(browser->b.top, struct disasm_line, node);
- bdl = disasm_line__browser(dl);
+ al = list_entry(browser->b.top, struct annotation_line, node);
+ bl = browser_line(al);
if (annotate_browser__opts.hide_src_code) {
- if (bdl->idx_asm < offset)
- offset = bdl->idx;
+ if (bl->idx_asm < offset)
+ offset = bl->idx;
browser->b.nr_entries = browser->nr_entries;
annotate_browser__opts.hide_src_code = false;
browser->b.seek(&browser->b, -offset, SEEK_CUR);
- browser->b.top_idx = bdl->idx - offset;
- browser->b.index = bdl->idx;
+ browser->b.top_idx = bl->idx - offset;
+ browser->b.index = bl->idx;
} else {
- if (bdl->idx_asm < 0) {
+ if (bl->idx_asm < 0) {
ui_helpline__puts("Only available for assembly lines.");
browser->b.seek(&browser->b, -offset, SEEK_CUR);
return false;
}
- if (bdl->idx_asm < offset)
- offset = bdl->idx_asm;
+ if (bl->idx_asm < offset)
+ offset = bl->idx_asm;
browser->b.nr_entries = browser->nr_asm_entries;
annotate_browser__opts.hide_src_code = true;
browser->b.seek(&browser->b, -offset, SEEK_CUR);
- browser->b.top_idx = bdl->idx_asm - offset;
- browser->b.index = bdl->idx_asm;
+ browser->b.top_idx = bl->idx_asm - offset;
+ browser->b.index = bl->idx_asm;
}
return true;
@@ -543,7 +536,7 @@ static bool annotate_browser__callq(struct annotate_browser *browser,
struct hist_browser_timer *hbt)
{
struct map_symbol *ms = browser->b.priv;
- struct disasm_line *dl = browser->selection;
+ struct disasm_line *dl = disasm_line(browser->selection);
struct annotation *notes;
struct addr_map_symbol target = {
.map = ms->map,
@@ -589,10 +582,10 @@ struct disasm_line *annotate_browser__find_offset(struct annotate_browser *brows
struct disasm_line *pos;
*idx = 0;
- list_for_each_entry(pos, &notes->src->source, node) {
- if (pos->offset == offset)
+ list_for_each_entry(pos, &notes->src->source, al.node) {
+ if (pos->al.offset == offset)
return pos;
- if (!disasm_line__filter(&browser->b, &pos->node))
+ if (!disasm_line__filter(&browser->b, &pos->al.node))
++*idx;
}
@@ -601,7 +594,7 @@ struct disasm_line *annotate_browser__find_offset(struct annotate_browser *brows
static bool annotate_browser__jump(struct annotate_browser *browser)
{
- struct disasm_line *dl = browser->selection;
+ struct disasm_line *dl = disasm_line(browser->selection);
u64 offset;
s64 idx;
@@ -615,29 +608,29 @@ static bool annotate_browser__jump(struct annotate_browser *browser)
return true;
}
- annotate_browser__set_top(browser, dl, idx);
+ annotate_browser__set_top(browser, &dl->al, idx);
return true;
}
static
-struct disasm_line *annotate_browser__find_string(struct annotate_browser *browser,
+struct annotation_line *annotate_browser__find_string(struct annotate_browser *browser,
char *s, s64 *idx)
{
struct map_symbol *ms = browser->b.priv;
struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(sym);
- struct disasm_line *pos = browser->selection;
+ struct annotation_line *al = browser->selection;
*idx = browser->b.index;
- list_for_each_entry_continue(pos, &notes->src->source, node) {
- if (disasm_line__filter(&browser->b, &pos->node))
+ list_for_each_entry_continue(al, &notes->src->source, node) {
+ if (disasm_line__filter(&browser->b, &al->node))
continue;
++*idx;
- if (pos->line && strstr(pos->line, s) != NULL)
- return pos;
+ if (al->line && strstr(al->line, s) != NULL)
+ return al;
}
return NULL;
@@ -645,38 +638,38 @@ struct disasm_line *annotate_browser__find_string(struct annotate_browser *brows
static bool __annotate_browser__search(struct annotate_browser *browser)
{
- struct disasm_line *dl;
+ struct annotation_line *al;
s64 idx;
- dl = annotate_browser__find_string(browser, browser->search_bf, &idx);
- if (dl == NULL) {
+ al = annotate_browser__find_string(browser, browser->search_bf, &idx);
+ if (al == NULL) {
ui_helpline__puts("String not found!");
return false;
}
- annotate_browser__set_top(browser, dl, idx);
+ annotate_browser__set_top(browser, al, idx);
browser->searching_backwards = false;
return true;
}
static
-struct disasm_line *annotate_browser__find_string_reverse(struct annotate_browser *browser,
+struct annotation_line *annotate_browser__find_string_reverse(struct annotate_browser *browser,
char *s, s64 *idx)
{
struct map_symbol *ms = browser->b.priv;
struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(sym);
- struct disasm_line *pos = browser->selection;
+ struct annotation_line *al = browser->selection;
*idx = browser->b.index;
- list_for_each_entry_continue_reverse(pos, &notes->src->source, node) {
- if (disasm_line__filter(&browser->b, &pos->node))
+ list_for_each_entry_continue_reverse(al, &notes->src->source, node) {
+ if (disasm_line__filter(&browser->b, &al->node))
continue;
--*idx;
- if (pos->line && strstr(pos->line, s) != NULL)
- return pos;
+ if (al->line && strstr(al->line, s) != NULL)
+ return al;
}
return NULL;
@@ -684,16 +677,16 @@ struct disasm_line *annotate_browser__find_string_reverse(struct annotate_browse
static bool __annotate_browser__search_reverse(struct annotate_browser *browser)
{
- struct disasm_line *dl;
+ struct annotation_line *al;
s64 idx;
- dl = annotate_browser__find_string_reverse(browser, browser->search_bf, &idx);
- if (dl == NULL) {
+ al = annotate_browser__find_string_reverse(browser, browser->search_bf, &idx);
+ if (al == NULL) {
ui_helpline__puts("String not found!");
return false;
}
- annotate_browser__set_top(browser, dl, idx);
+ annotate_browser__set_top(browser, al, idx);
browser->searching_backwards = true;
return true;
}
@@ -899,13 +892,16 @@ show_help:
continue;
case K_ENTER:
case K_RIGHT:
+ {
+ struct disasm_line *dl = disasm_line(browser->selection);
+
if (browser->selection == NULL)
ui_helpline__puts("Huh? No selection. Report to linux-kernel@vger.kernel.org");
else if (browser->selection->offset == -1)
ui_helpline__puts("Actions are only available for assembly lines.");
- else if (!browser->selection->ins.ops)
+ else if (!dl->ins.ops)
goto show_sup_ins;
- else if (ins__is_ret(&browser->selection->ins))
+ else if (ins__is_ret(&dl->ins))
goto out;
else if (!(annotate_browser__jump(browser) ||
annotate_browser__callq(browser, evsel, hbt))) {
@@ -913,6 +909,7 @@ show_sup_ins:
ui_helpline__puts("Actions are only available for function call/return & jump/branch instructions.");
}
continue;
+ }
case 't':
if (annotate_browser__opts.show_total_period) {
annotate_browser__opts.show_total_period = false;
@@ -990,10 +987,10 @@ static void count_and_fill(struct annotate_browser *browser, u64 start, u64 end,
return;
for (offset = start; offset <= end; offset++) {
- struct disasm_line *dl = browser->offsets[offset];
+ struct annotation_line *al = browser->offsets[offset];
- if (dl)
- dl->ipc = ipc;
+ if (al)
+ al->ipc = ipc;
}
}
}
@@ -1018,13 +1015,13 @@ static void annotate__compute_ipc(struct annotate_browser *browser, size_t size,
ch = &notes->src->cycles_hist[offset];
if (ch && ch->cycles) {
- struct disasm_line *dl;
+ struct annotation_line *al;
if (ch->have_start)
count_and_fill(browser, ch->start, offset, ch);
- dl = browser->offsets[offset];
- if (dl && ch->num_aggr)
- dl->cycles = ch->cycles_aggr / ch->num_aggr;
+ al = browser->offsets[offset];
+ if (al && ch->num_aggr)
+ al->cycles = ch->cycles_aggr / ch->num_aggr;
browser->have_cycles = true;
}
}
@@ -1043,23 +1040,27 @@ static void annotate_browser__mark_jump_targets(struct annotate_browser *browser
return;
for (offset = 0; offset < size; ++offset) {
- struct disasm_line *dl = browser->offsets[offset], *dlt;
- struct browser_disasm_line *bdlt;
+ struct annotation_line *al = browser->offsets[offset];
+ struct disasm_line *dl;
+ struct browser_line *blt;
+
+ dl = disasm_line(al);
if (!disasm_line__is_valid_jump(dl, sym))
continue;
- dlt = browser->offsets[dl->ops.target.offset];
+ al = browser->offsets[dl->ops.target.offset];
+
/*
* FIXME: Oops, no jump target? Buggy disassembler? Or do we
* have to adjust to the previous offset?
*/
- if (dlt == NULL)
+ if (al == NULL)
continue;
- bdlt = disasm_line__browser(dlt);
- if (++bdlt->jump_sources > browser->max_jump_sources)
- browser->max_jump_sources = bdlt->jump_sources;
+ blt = browser_line(al);
+ if (++blt->jump_sources > browser->max_jump_sources)
+ browser->max_jump_sources = blt->jump_sources;
++browser->nr_jumps;
}
@@ -1078,7 +1079,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
struct perf_evsel *evsel,
struct hist_browser_timer *hbt)
{
- struct disasm_line *pos, *n;
+ struct annotation_line *al;
struct annotation *notes;
size_t size;
struct map_symbol ms = {
@@ -1097,7 +1098,6 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
};
int ret = -1, err;
int nr_pcnt = 1;
- size_t sizeof_bdl = sizeof(struct browser_disasm_line);
if (sym == NULL)
return -1;
@@ -1107,21 +1107,16 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
if (map->dso->annotate_warned)
return -1;
- browser.offsets = zalloc(size * sizeof(struct disasm_line *));
+ browser.offsets = zalloc(size * sizeof(struct annotation_line *));
if (browser.offsets == NULL) {
ui__error("Not enough memory!");
return -1;
}
- if (perf_evsel__is_group_event(evsel)) {
+ if (perf_evsel__is_group_event(evsel))
nr_pcnt = evsel->nr_members;
- sizeof_bdl += sizeof(struct disasm_line_samples) *
- (nr_pcnt - 1);
- }
- err = symbol__disassemble(sym, map, perf_evsel__env_arch(evsel),
- sizeof_bdl, &browser.arch,
- perf_evsel__env_cpuid(evsel));
+ err = symbol__annotate(sym, map, evsel, sizeof(struct browser_line), &browser.arch);
if (err) {
char msg[BUFSIZ];
symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));
@@ -1129,20 +1124,22 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
goto out_free_offsets;
}
+ symbol__calc_percent(sym, evsel);
+
ui_helpline__push("Press ESC to exit");
notes = symbol__annotation(sym);
browser.start = map__rip_2objdump(map, sym->start);
- list_for_each_entry(pos, &notes->src->source, node) {
- struct browser_disasm_line *bpos;
- size_t line_len = strlen(pos->line);
+ list_for_each_entry(al, &notes->src->source, node) {
+ struct browser_line *bpos;
+ size_t line_len = strlen(al->line);
if (browser.b.width < line_len)
browser.b.width = line_len;
- bpos = disasm_line__browser(pos);
+ bpos = browser_line(al);
bpos->idx = browser.nr_entries++;
- if (pos->offset != -1) {
+ if (al->offset != -1) {
bpos->idx_asm = browser.nr_asm_entries++;
/*
* FIXME: short term bandaid to cope with assembly
@@ -1151,8 +1148,8 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
*
* E.g. copy_user_generic_unrolled
*/
- if (pos->offset < (s64)size)
- browser.offsets[pos->offset] = pos;
+ if (al->offset < (s64)size)
+ browser.offsets[al->offset] = al;
} else
bpos->idx_asm = -1;
}
@@ -1174,10 +1171,8 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
annotate_browser__update_addr_width(&browser);
ret = annotate_browser__run(&browser, evsel, hbt);
- list_for_each_entry_safe(pos, n, &notes->src->source, node) {
- list_del(&pos->node);
- disasm_line__free(pos);
- }
+
+ annotated_source__purge(notes->src);
out_free_offsets:
free(browser.offsets);
diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c
index fc7a2e105bfd..aeeaf15029f0 100644
--- a/tools/perf/ui/gtk/annotate.c
+++ b/tools/perf/ui/gtk/annotate.c
@@ -31,14 +31,14 @@ static int perf_gtk__get_percent(char *buf, size_t size, struct symbol *sym,
strcpy(buf, "");
- if (dl->offset == (s64) -1)
+ if (dl->al.offset == (s64) -1)
return 0;
symhist = annotation__histogram(symbol__annotation(sym), evidx);
- if (!symbol_conf.event_group && !symhist->addr[dl->offset].nr_samples)
+ if (!symbol_conf.event_group && !symhist->addr[dl->al.offset].nr_samples)
return 0;
- percent = 100.0 * symhist->addr[dl->offset].nr_samples / symhist->nr_samples;
+ percent = 100.0 * symhist->addr[dl->al.offset].nr_samples / symhist->nr_samples;
markup = perf_gtk__get_percent_color(percent);
if (markup)
@@ -57,16 +57,16 @@ static int perf_gtk__get_offset(char *buf, size_t size, struct symbol *sym,
strcpy(buf, "");
- if (dl->offset == (s64) -1)
+ if (dl->al.offset == (s64) -1)
return 0;
- return scnprintf(buf, size, "%"PRIx64, start + dl->offset);
+ return scnprintf(buf, size, "%"PRIx64, start + dl->al.offset);
}
static int perf_gtk__get_line(char *buf, size_t size, struct disasm_line *dl)
{
int ret = 0;
- char *line = g_markup_escape_text(dl->line, -1);
+ char *line = g_markup_escape_text(dl->al.line, -1);
const char *markup = "<span fgcolor='gray'>";
strcpy(buf, "");
@@ -74,7 +74,7 @@ static int perf_gtk__get_line(char *buf, size_t size, struct disasm_line *dl)
if (!line)
return 0;
- if (dl->offset != (s64) -1)
+ if (dl->al.offset != (s64) -1)
markup = NULL;
if (markup)
@@ -119,7 +119,7 @@ static int perf_gtk__annotate_symbol(GtkWidget *window, struct symbol *sym,
gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store));
g_object_unref(GTK_TREE_MODEL(store));
- list_for_each_entry(pos, &notes->src->source, node) {
+ list_for_each_entry(pos, &notes->src->source, al.node) {
GtkTreeIter iter;
int ret = 0;
@@ -148,8 +148,8 @@ static int perf_gtk__annotate_symbol(GtkWidget *window, struct symbol *sym,
gtk_container_add(GTK_CONTAINER(window), view);
- list_for_each_entry_safe(pos, n, &notes->src->source, node) {
- list_del(&pos->node);
+ list_for_each_entry_safe(pos, n, &notes->src->source, al.node) {
+ list_del(&pos->al.node);
disasm_line__free(pos);
}
@@ -169,8 +169,7 @@ static int symbol__gtk_annotate(struct symbol *sym, struct map *map,
if (map->dso->annotate_warned)
return -1;
- err = symbol__disassemble(sym, map, perf_evsel__env_arch(evsel),
- 0, NULL, NULL);
+ err = symbol__annotate(sym, map, evsel, 0, NULL);
if (err) {
char msg[BUFSIZ];
symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));
@@ -178,6 +177,8 @@ static int symbol__gtk_annotate(struct symbol *sym, struct map *map,
return -1;
}
+ symbol__calc_percent(sym, evsel);
+
if (perf_gtk__is_active_context(pgctx)) {
window = pgctx->main_window;
notebook = pgctx->notebook;
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index a3de7916fe63..ea0a452550b0 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -44,7 +44,7 @@ libperf-y += machine.o
libperf-y += map.o
libperf-y += pstack.o
libperf-y += session.o
-libperf-$(CONFIG_AUDIT) += syscalltbl.o
+libperf-$(CONFIG_TRACE) += syscalltbl.o
libperf-y += ordered-events.o
libperf-y += namespaces.o
libperf-y += comm.o
@@ -86,6 +86,14 @@ libperf-$(CONFIG_AUXTRACE) += auxtrace.o
libperf-$(CONFIG_AUXTRACE) += intel-pt-decoder/
libperf-$(CONFIG_AUXTRACE) += intel-pt.o
libperf-$(CONFIG_AUXTRACE) += intel-bts.o
+libperf-$(CONFIG_AUXTRACE) += arm-spe.o
+libperf-$(CONFIG_AUXTRACE) += arm-spe-pkt-decoder.o
+
+ifdef CONFIG_LIBOPENCSD
+libperf-$(CONFIG_AUXTRACE) += cs-etm.o
+libperf-$(CONFIG_AUXTRACE) += cs-etm-decoder/
+endif
+
libperf-y += parse-branch-options.o
libperf-y += dump-insn.o
libperf-y += parse-regs-options.o
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 3369c7830260..28b233c3dcbe 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -26,7 +26,6 @@
#include <pthread.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
-#include <sys/utsname.h>
#include "sane_ctype.h"
@@ -322,6 +321,8 @@ static int comment__symbol(char *raw, char *comment, u64 *addrp, char **namep)
return 0;
*addrp = strtoull(comment, &endptr, 16);
+ if (endptr == comment)
+ return 0;
name = strchr(endptr, '<');
if (name == NULL)
return -1;
@@ -435,8 +436,8 @@ static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map *m
return 0;
comment = ltrim(comment);
- comment__symbol(ops->source.raw, comment, &ops->source.addr, &ops->source.name);
- comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name);
+ comment__symbol(ops->source.raw, comment + 1, &ops->source.addr, &ops->source.name);
+ comment__symbol(ops->target.raw, comment + 1, &ops->target.addr, &ops->target.name);
return 0;
@@ -480,7 +481,7 @@ static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops
return 0;
comment = ltrim(comment);
- comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name);
+ comment__symbol(ops->target.raw, comment + 1, &ops->target.addr, &ops->target.name);
return 0;
}
@@ -878,32 +879,99 @@ out_free_name:
return -1;
}
-static struct disasm_line *disasm_line__new(s64 offset, char *line,
- size_t privsize, int line_nr,
- struct arch *arch,
- struct map *map)
+struct annotate_args {
+ size_t privsize;
+ struct arch *arch;
+ struct map *map;
+ struct perf_evsel *evsel;
+ s64 offset;
+ char *line;
+ int line_nr;
+};
+
+static void annotation_line__delete(struct annotation_line *al)
{
- struct disasm_line *dl = zalloc(sizeof(*dl) + privsize);
+ void *ptr = (void *) al - al->privsize;
+
+ free_srcline(al->path);
+ zfree(&al->line);
+ free(ptr);
+}
+
+/*
+ * Allocating the annotation line data with following
+ * structure:
+ *
+ * --------------------------------------
+ * private space | struct annotation_line
+ * --------------------------------------
+ *
+ * Size of the private space is stored in 'struct annotation_line'.
+ *
+ */
+static struct annotation_line *
+annotation_line__new(struct annotate_args *args, size_t privsize)
+{
+ struct annotation_line *al;
+ struct perf_evsel *evsel = args->evsel;
+ size_t size = privsize + sizeof(*al);
+ int nr = 1;
+
+ if (perf_evsel__is_group_event(evsel))
+ nr = evsel->nr_members;
+
+ size += sizeof(al->samples[0]) * nr;
+
+ al = zalloc(size);
+ if (al) {
+ al = (void *) al + privsize;
+ al->privsize = privsize;
+ al->offset = args->offset;
+ al->line = strdup(args->line);
+ al->line_nr = args->line_nr;
+ al->samples_nr = nr;
+ }
+
+ return al;
+}
+
+/*
+ * Allocating the disasm annotation line data with
+ * following structure:
+ *
+ * ------------------------------------------------------------
+ * privsize space | struct disasm_line | struct annotation_line
+ * ------------------------------------------------------------
+ *
+ * We have 'struct annotation_line' member as last member
+ * of 'struct disasm_line' to have an easy access.
+ *
+ */
+static struct disasm_line *disasm_line__new(struct annotate_args *args)
+{
+ struct disasm_line *dl = NULL;
+ struct annotation_line *al;
+ size_t privsize = args->privsize + offsetof(struct disasm_line, al);
+
+ al = annotation_line__new(args, privsize);
+ if (al != NULL) {
+ dl = disasm_line(al);
- if (dl != NULL) {
- dl->offset = offset;
- dl->line = strdup(line);
- dl->line_nr = line_nr;
- if (dl->line == NULL)
+ if (dl->al.line == NULL)
goto out_delete;
- if (offset != -1) {
- if (disasm_line__parse(dl->line, &dl->ins.name, &dl->ops.raw) < 0)
+ if (args->offset != -1) {
+ if (disasm_line__parse(dl->al.line, &dl->ins.name, &dl->ops.raw) < 0)
goto out_free_line;
- disasm_line__init_ins(dl, arch, map);
+ disasm_line__init_ins(dl, args->arch, args->map);
}
}
return dl;
out_free_line:
- zfree(&dl->line);
+ zfree(&dl->al.line);
out_delete:
free(dl);
return NULL;
@@ -911,14 +979,13 @@ out_delete:
void disasm_line__free(struct disasm_line *dl)
{
- zfree(&dl->line);
if (dl->ins.ops && dl->ins.ops->free)
dl->ins.ops->free(&dl->ops);
else
ins__delete(&dl->ops);
free((void *)dl->ins.name);
dl->ins.name = NULL;
- free(dl);
+ annotation_line__delete(&dl->al);
}
int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw)
@@ -929,12 +996,13 @@ int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool r
return ins__scnprintf(&dl->ins, bf, size, &dl->ops);
}
-static void disasm__add(struct list_head *head, struct disasm_line *line)
+static void annotation_line__add(struct annotation_line *al, struct list_head *head)
{
- list_add_tail(&line->node, head);
+ list_add_tail(&al->node, head);
}
-struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disasm_line *pos)
+struct annotation_line *
+annotation_line__next(struct annotation_line *pos, struct list_head *head)
{
list_for_each_entry_continue(pos, head, node)
if (pos->offset >= 0)
@@ -943,50 +1011,6 @@ struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disa
return NULL;
}
-double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset,
- s64 end, const char **path, struct sym_hist_entry *sample)
-{
- struct source_line *src_line = notes->src->lines;
- double percent = 0.0;
-
- sample->nr_samples = sample->period = 0;
-
- if (src_line) {
- size_t sizeof_src_line = sizeof(*src_line) +
- sizeof(src_line->samples) * (src_line->nr_pcnt - 1);
-
- while (offset < end) {
- src_line = (void *)notes->src->lines +
- (sizeof_src_line * offset);
-
- if (*path == NULL)
- *path = src_line->path;
-
- percent += src_line->samples[evidx].percent;
- sample->nr_samples += src_line->samples[evidx].nr;
- offset++;
- }
- } else {
- struct sym_hist *h = annotation__histogram(notes, evidx);
- unsigned int hits = 0;
- u64 period = 0;
-
- while (offset < end) {
- hits += h->addr[offset].nr_samples;
- period += h->addr[offset].period;
- ++offset;
- }
-
- if (h->nr_samples) {
- sample->period = period;
- sample->nr_samples = hits;
- percent = 100.0 * hits / h->nr_samples;
- }
- }
-
- return percent;
-}
-
static const char *annotate__address_color(struct block_range *br)
{
double cov = block_range__coverage(br);
@@ -1069,50 +1093,39 @@ static void annotate__branch_printf(struct block_range *br, u64 addr)
}
}
+static int disasm_line__print(struct disasm_line *dl, u64 start, int addr_fmt_width)
+{
+ s64 offset = dl->al.offset;
+ const u64 addr = start + offset;
+ struct block_range *br;
+
+ br = block_range__find(addr);
+ color_fprintf(stdout, annotate__address_color(br), " %*" PRIx64 ":", addr_fmt_width, addr);
+ color_fprintf(stdout, annotate__asm_color(br), "%s", dl->al.line);
+ annotate__branch_printf(br, addr);
+ return 0;
+}
-static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 start,
- struct perf_evsel *evsel, u64 len, int min_pcnt, int printed,
- int max_lines, struct disasm_line *queue)
+static int
+annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start,
+ struct perf_evsel *evsel, u64 len, int min_pcnt, int printed,
+ int max_lines, struct annotation_line *queue, int addr_fmt_width)
{
+ struct disasm_line *dl = container_of(al, struct disasm_line, al);
static const char *prev_line;
static const char *prev_color;
- if (dl->offset != -1) {
- const char *path = NULL;
- double percent, max_percent = 0.0;
- double *ppercents = &percent;
- struct sym_hist_entry sample;
- struct sym_hist_entry *psamples = &sample;
+ if (al->offset != -1) {
+ double max_percent = 0.0;
int i, nr_percent = 1;
const char *color;
struct annotation *notes = symbol__annotation(sym);
- s64 offset = dl->offset;
- const u64 addr = start + offset;
- struct disasm_line *next;
- struct block_range *br;
-
- next = disasm__get_next_ip_line(&notes->src->source, dl);
-
- if (perf_evsel__is_group_event(evsel)) {
- nr_percent = evsel->nr_members;
- ppercents = calloc(nr_percent, sizeof(double));
- psamples = calloc(nr_percent, sizeof(struct sym_hist_entry));
- if (ppercents == NULL || psamples == NULL) {
- return -1;
- }
- }
- for (i = 0; i < nr_percent; i++) {
- percent = disasm__calc_percent(notes,
- notes->src->lines ? i : evsel->idx + i,
- offset,
- next ? next->offset : (s64) len,
- &path, &sample);
-
- ppercents[i] = percent;
- psamples[i] = sample;
- if (percent > max_percent)
- max_percent = percent;
+ for (i = 0; i < al->samples_nr; i++) {
+ struct annotation_data *sample = &al->samples[i];
+
+ if (sample->percent > max_percent)
+ max_percent = sample->percent;
}
if (max_percent < min_pcnt)
@@ -1123,10 +1136,10 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st
if (queue != NULL) {
list_for_each_entry_from(queue, &notes->src->source, node) {
- if (queue == dl)
+ if (queue == al)
break;
- disasm_line__print(queue, sym, start, evsel, len,
- 0, 0, 1, NULL);
+ annotation_line__print(queue, sym, start, evsel, len,
+ 0, 0, 1, NULL, addr_fmt_width);
}
}
@@ -1137,44 +1150,34 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st
* the same color than the percentage. Don't print it
* twice for close colored addr with the same filename:line
*/
- if (path) {
- if (!prev_line || strcmp(prev_line, path)
+ if (al->path) {
+ if (!prev_line || strcmp(prev_line, al->path)
|| color != prev_color) {
- color_fprintf(stdout, color, " %s", path);
- prev_line = path;
+ color_fprintf(stdout, color, " %s", al->path);
+ prev_line = al->path;
prev_color = color;
}
}
for (i = 0; i < nr_percent; i++) {
- percent = ppercents[i];
- sample = psamples[i];
- color = get_percent_color(percent);
+ struct annotation_data *sample = &al->samples[i];
+
+ color = get_percent_color(sample->percent);
if (symbol_conf.show_total_period)
color_fprintf(stdout, color, " %11" PRIu64,
- sample.period);
+ sample->he.period);
else if (symbol_conf.show_nr_samples)
color_fprintf(stdout, color, " %7" PRIu64,
- sample.nr_samples);
+ sample->he.nr_samples);
else
- color_fprintf(stdout, color, " %7.2f", percent);
+ color_fprintf(stdout, color, " %7.2f", sample->percent);
}
- printf(" : ");
+ printf(" : ");
- br = block_range__find(addr);
- color_fprintf(stdout, annotate__address_color(br), " %" PRIx64 ":", addr);
- color_fprintf(stdout, annotate__asm_color(br), "%s", dl->line);
- annotate__branch_printf(br, addr);
+ disasm_line__print(dl, start, addr_fmt_width);
printf("\n");
-
- if (ppercents != &percent)
- free(ppercents);
-
- if (psamples != &sample)
- free(psamples);
-
} else if (max_lines && printed >= max_lines)
return 1;
else {
@@ -1186,10 +1189,10 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st
if (perf_evsel__is_group_event(evsel))
width *= evsel->nr_members;
- if (!*dl->line)
+ if (!*al->line)
printf(" %*s:\n", width, " ");
else
- printf(" %*s: %s\n", width, " ", dl->line);
+ printf(" %*s: %*s %s\n", width, " ", addr_fmt_width, " ", al->line);
}
return 0;
@@ -1215,11 +1218,11 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st
* means that it's not a disassembly line so should be treated differently.
* The ops.raw part will be parsed further according to type of the instruction.
*/
-static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
- struct arch *arch,
- FILE *file, size_t privsize,
+static int symbol__parse_objdump_line(struct symbol *sym, FILE *file,
+ struct annotate_args *args,
int *line_nr)
{
+ struct map *map = args->map;
struct annotation *notes = symbol__annotation(sym);
struct disasm_line *dl;
char *line = NULL, *parsed_line, *tmp, *tmp2;
@@ -1263,7 +1266,11 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
parsed_line = tmp2 + 1;
}
- dl = disasm_line__new(offset, parsed_line, privsize, *line_nr, arch, map);
+ args->offset = offset;
+ args->line = parsed_line;
+ args->line_nr = *line_nr;
+
+ dl = disasm_line__new(args);
free(line);
(*line_nr)++;
@@ -1288,7 +1295,7 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
dl->ops.target.name = strdup(target.sym->name);
}
- disasm__add(&notes->src->source, dl);
+ annotation_line__add(&dl->al, &notes->src->source);
return 0;
}
@@ -1305,19 +1312,19 @@ static void delete_last_nop(struct symbol *sym)
struct disasm_line *dl;
while (!list_empty(list)) {
- dl = list_entry(list->prev, struct disasm_line, node);
+ dl = list_entry(list->prev, struct disasm_line, al.node);
if (dl->ins.ops) {
if (dl->ins.ops != &nop_ops)
return;
} else {
- if (!strstr(dl->line, " nop ") &&
- !strstr(dl->line, " nopl ") &&
- !strstr(dl->line, " nopw "))
+ if (!strstr(dl->al.line, " nop ") &&
+ !strstr(dl->al.line, " nopl ") &&
+ !strstr(dl->al.line, " nopw "))
return;
}
- list_del(&dl->node);
+ list_del(&dl->al.node);
disasm_line__free(dl);
}
}
@@ -1412,25 +1419,11 @@ fallback:
return 0;
}
-static const char *annotate__norm_arch(const char *arch_name)
-{
- struct utsname uts;
-
- if (!arch_name) { /* Assume we are annotating locally. */
- if (uname(&uts) < 0)
- return NULL;
- arch_name = uts.machine;
- }
- return normalize_arch((char *)arch_name);
-}
-
-int symbol__disassemble(struct symbol *sym, struct map *map,
- const char *arch_name, size_t privsize,
- struct arch **parch, char *cpuid)
+static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
{
+ struct map *map = args->map;
struct dso *dso = map->dso;
char command[PATH_MAX * 2];
- struct arch *arch = NULL;
FILE *file;
char symfs_filename[PATH_MAX];
struct kcore_extract kce;
@@ -1444,25 +1437,6 @@ int symbol__disassemble(struct symbol *sym, struct map *map,
if (err)
return err;
- arch_name = annotate__norm_arch(arch_name);
- if (!arch_name)
- return -1;
-
- arch = arch__find(arch_name);
- if (arch == NULL)
- return -ENOTSUP;
-
- if (parch)
- *parch = arch;
-
- if (arch->init) {
- err = arch->init(arch, cpuid);
- if (err) {
- pr_err("%s: failed to initialize %s arch priv area\n", __func__, arch->name);
- return err;
- }
- }
-
pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
symfs_filename, sym->name, map->unmap_ip(map, sym->start),
map->unmap_ip(map, sym->end));
@@ -1546,8 +1520,7 @@ int symbol__disassemble(struct symbol *sym, struct map *map,
* can associate it with the instructions till the next one.
* See disasm_line__new() and struct disasm_line::line_nr.
*/
- if (symbol__parse_objdump_line(sym, map, arch, file, privsize,
- &lineno) < 0)
+ if (symbol__parse_objdump_line(sym, file, args, &lineno) < 0)
break;
nline++;
}
@@ -1580,21 +1553,110 @@ out_close_stdout:
goto out_remove_tmp;
}
-static void insert_source_line(struct rb_root *root, struct source_line *src_line)
+static void calc_percent(struct sym_hist *hist,
+ struct annotation_data *sample,
+ s64 offset, s64 end)
+{
+ unsigned int hits = 0;
+ u64 period = 0;
+
+ while (offset < end) {
+ hits += hist->addr[offset].nr_samples;
+ period += hist->addr[offset].period;
+ ++offset;
+ }
+
+ if (hist->nr_samples) {
+ sample->he.period = period;
+ sample->he.nr_samples = hits;
+ sample->percent = 100.0 * hits / hist->nr_samples;
+ }
+}
+
+static void annotation__calc_percent(struct annotation *notes,
+ struct perf_evsel *evsel, s64 len)
+{
+ struct annotation_line *al, *next;
+
+ list_for_each_entry(al, &notes->src->source, node) {
+ s64 end;
+ int i;
+
+ if (al->offset == -1)
+ continue;
+
+ next = annotation_line__next(al, &notes->src->source);
+ end = next ? next->offset : len;
+
+ for (i = 0; i < al->samples_nr; i++) {
+ struct annotation_data *sample;
+ struct sym_hist *hist;
+
+ hist = annotation__histogram(notes, evsel->idx + i);
+ sample = &al->samples[i];
+
+ calc_percent(hist, sample, al->offset, end);
+ }
+ }
+}
+
+void symbol__calc_percent(struct symbol *sym, struct perf_evsel *evsel)
+{
+ struct annotation *notes = symbol__annotation(sym);
+
+ annotation__calc_percent(notes, evsel, symbol__size(sym));
+}
+
+int symbol__annotate(struct symbol *sym, struct map *map,
+ struct perf_evsel *evsel, size_t privsize,
+ struct arch **parch)
{
- struct source_line *iter;
+ struct annotate_args args = {
+ .privsize = privsize,
+ .map = map,
+ .evsel = evsel,
+ };
+ struct perf_env *env = perf_evsel__env(evsel);
+ const char *arch_name = perf_env__arch(env);
+ struct arch *arch;
+ int err;
+
+ if (!arch_name)
+ return -1;
+
+ args.arch = arch = arch__find(arch_name);
+ if (arch == NULL)
+ return -ENOTSUP;
+
+ if (parch)
+ *parch = arch;
+
+ if (arch->init) {
+ err = arch->init(arch, env ? env->cpuid : NULL);
+ if (err) {
+ pr_err("%s: failed to initialize %s arch priv area\n", __func__, arch->name);
+ return err;
+ }
+ }
+
+ return symbol__disassemble(sym, &args);
+}
+
+static void insert_source_line(struct rb_root *root, struct annotation_line *al)
+{
+ struct annotation_line *iter;
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
int i, ret;
while (*p != NULL) {
parent = *p;
- iter = rb_entry(parent, struct source_line, node);
+ iter = rb_entry(parent, struct annotation_line, rb_node);
- ret = strcmp(iter->path, src_line->path);
+ ret = strcmp(iter->path, al->path);
if (ret == 0) {
- for (i = 0; i < src_line->nr_pcnt; i++)
- iter->samples[i].percent_sum += src_line->samples[i].percent;
+ for (i = 0; i < al->samples_nr; i++)
+ iter->samples[i].percent_sum += al->samples[i].percent;
return;
}
@@ -1604,18 +1666,18 @@ static void insert_source_line(struct rb_root *root, struct source_line *src_lin
p = &(*p)->rb_right;
}
- for (i = 0; i < src_line->nr_pcnt; i++)
- src_line->samples[i].percent_sum = src_line->samples[i].percent;
+ for (i = 0; i < al->samples_nr; i++)
+ al->samples[i].percent_sum = al->samples[i].percent;
- rb_link_node(&src_line->node, parent, p);
- rb_insert_color(&src_line->node, root);
+ rb_link_node(&al->rb_node, parent, p);
+ rb_insert_color(&al->rb_node, root);
}
-static int cmp_source_line(struct source_line *a, struct source_line *b)
+static int cmp_source_line(struct annotation_line *a, struct annotation_line *b)
{
int i;
- for (i = 0; i < a->nr_pcnt; i++) {
+ for (i = 0; i < a->samples_nr; i++) {
if (a->samples[i].percent_sum == b->samples[i].percent_sum)
continue;
return a->samples[i].percent_sum > b->samples[i].percent_sum;
@@ -1624,135 +1686,47 @@ static int cmp_source_line(struct source_line *a, struct source_line *b)
return 0;
}
-static void __resort_source_line(struct rb_root *root, struct source_line *src_line)
+static void __resort_source_line(struct rb_root *root, struct annotation_line *al)
{
- struct source_line *iter;
+ struct annotation_line *iter;
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
while (*p != NULL) {
parent = *p;
- iter = rb_entry(parent, struct source_line, node);
+ iter = rb_entry(parent, struct annotation_line, rb_node);
- if (cmp_source_line(src_line, iter))
+ if (cmp_source_line(al, iter))
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
- rb_link_node(&src_line->node, parent, p);
- rb_insert_color(&src_line->node, root);
+ rb_link_node(&al->rb_node, parent, p);
+ rb_insert_color(&al->rb_node, root);
}
static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root)
{
- struct source_line *src_line;
+ struct annotation_line *al;
struct rb_node *node;
node = rb_first(src_root);
while (node) {
struct rb_node *next;
- src_line = rb_entry(node, struct source_line, node);
+ al = rb_entry(node, struct annotation_line, rb_node);
next = rb_next(node);
rb_erase(node, src_root);
- __resort_source_line(dest_root, src_line);
+ __resort_source_line(dest_root, al);
node = next;
}
}
-static void symbol__free_source_line(struct symbol *sym, int len)
-{
- struct annotation *notes = symbol__annotation(sym);
- struct source_line *src_line = notes->src->lines;
- size_t sizeof_src_line;
- int i;
-
- sizeof_src_line = sizeof(*src_line) +
- (sizeof(src_line->samples) * (src_line->nr_pcnt - 1));
-
- for (i = 0; i < len; i++) {
- free_srcline(src_line->path);
- src_line = (void *)src_line + sizeof_src_line;
- }
-
- zfree(&notes->src->lines);
-}
-
-/* Get the filename:line for the colored entries */
-static int symbol__get_source_line(struct symbol *sym, struct map *map,
- struct perf_evsel *evsel,
- struct rb_root *root, int len)
-{
- u64 start;
- int i, k;
- int evidx = evsel->idx;
- struct source_line *src_line;
- struct annotation *notes = symbol__annotation(sym);
- struct sym_hist *h = annotation__histogram(notes, evidx);
- struct rb_root tmp_root = RB_ROOT;
- int nr_pcnt = 1;
- u64 nr_samples = h->nr_samples;
- size_t sizeof_src_line = sizeof(struct source_line);
-
- if (perf_evsel__is_group_event(evsel)) {
- for (i = 1; i < evsel->nr_members; i++) {
- h = annotation__histogram(notes, evidx + i);
- nr_samples += h->nr_samples;
- }
- nr_pcnt = evsel->nr_members;
- sizeof_src_line += (nr_pcnt - 1) * sizeof(src_line->samples);
- }
-
- if (!nr_samples)
- return 0;
-
- src_line = notes->src->lines = calloc(len, sizeof_src_line);
- if (!notes->src->lines)
- return -1;
-
- start = map__rip_2objdump(map, sym->start);
-
- for (i = 0; i < len; i++) {
- u64 offset;
- double percent_max = 0.0;
-
- src_line->nr_pcnt = nr_pcnt;
-
- for (k = 0; k < nr_pcnt; k++) {
- double percent = 0.0;
-
- h = annotation__histogram(notes, evidx + k);
- nr_samples = h->addr[i].nr_samples;
- if (h->nr_samples)
- percent = 100.0 * nr_samples / h->nr_samples;
-
- if (percent > percent_max)
- percent_max = percent;
- src_line->samples[k].percent = percent;
- src_line->samples[k].nr = nr_samples;
- }
-
- if (percent_max <= 0.5)
- goto next;
-
- offset = start + i;
- src_line->path = get_srcline(map->dso, offset, NULL,
- false, true);
- insert_source_line(&tmp_root, src_line);
-
- next:
- src_line = (void *)src_line + sizeof_src_line;
- }
-
- resort_source_line(root, &tmp_root);
- return 0;
-}
-
static void print_summary(struct rb_root *root, const char *filename)
{
- struct source_line *src_line;
+ struct annotation_line *al;
struct rb_node *node;
printf("\nSorted summary for file %s\n", filename);
@@ -1770,9 +1744,9 @@ static void print_summary(struct rb_root *root, const char *filename)
char *path;
int i;
- src_line = rb_entry(node, struct source_line, node);
- for (i = 0; i < src_line->nr_pcnt; i++) {
- percent = src_line->samples[i].percent_sum;
+ al = rb_entry(node, struct annotation_line, rb_node);
+ for (i = 0; i < al->samples_nr; i++) {
+ percent = al->samples[i].percent_sum;
color = get_percent_color(percent);
color_fprintf(stdout, color, " %7.2f", percent);
@@ -1780,7 +1754,7 @@ static void print_summary(struct rb_root *root, const char *filename)
percent_max = percent;
}
- path = src_line->path;
+ path = al->path;
color = get_percent_color(percent_max);
color_fprintf(stdout, color, " %s\n", path);
@@ -1801,6 +1775,19 @@ static void symbol__annotate_hits(struct symbol *sym, struct perf_evsel *evsel)
printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->nr_samples", h->nr_samples);
}
+static int annotated_source__addr_fmt_width(struct list_head *lines, u64 start)
+{
+ char bf[32];
+ struct annotation_line *line;
+
+ list_for_each_entry_reverse(line, lines, node) {
+ if (line->offset != -1)
+ return scnprintf(bf, sizeof(bf), "%" PRIx64, start + line->offset);
+ }
+
+ return 0;
+}
+
int symbol__annotate_printf(struct symbol *sym, struct map *map,
struct perf_evsel *evsel, bool full_paths,
int min_pcnt, int max_lines, int context)
@@ -1811,9 +1798,9 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map,
const char *evsel_name = perf_evsel__name(evsel);
struct annotation *notes = symbol__annotation(sym);
struct sym_hist *h = annotation__histogram(notes, evsel->idx);
- struct disasm_line *pos, *queue = NULL;
+ struct annotation_line *pos, *queue = NULL;
u64 start = map__rip_2objdump(map, sym->start);
- int printed = 2, queue_len = 0;
+ int printed = 2, queue_len = 0, addr_fmt_width;
int more = 0;
u64 len;
int width = symbol_conf.show_total_period ? 12 : 8;
@@ -1844,15 +1831,21 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map,
if (verbose > 0)
symbol__annotate_hits(sym, evsel);
+ addr_fmt_width = annotated_source__addr_fmt_width(&notes->src->source, start);
+
list_for_each_entry(pos, &notes->src->source, node) {
+ int err;
+
if (context && queue == NULL) {
queue = pos;
queue_len = 0;
}
- switch (disasm_line__print(pos, sym, start, evsel, len,
- min_pcnt, printed, max_lines,
- queue)) {
+ err = annotation_line__print(pos, sym, start, evsel, len,
+ min_pcnt, printed, max_lines,
+ queue, addr_fmt_width);
+
+ switch (err) {
case 0:
++printed;
if (context) {
@@ -1907,13 +1900,13 @@ void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
}
}
-void disasm__purge(struct list_head *head)
+void annotated_source__purge(struct annotated_source *as)
{
- struct disasm_line *pos, *n;
+ struct annotation_line *al, *n;
- list_for_each_entry_safe(pos, n, head, node) {
- list_del(&pos->node);
- disasm_line__free(pos);
+ list_for_each_entry_safe(al, n, &as->source, node) {
+ list_del(&al->node);
+ disasm_line__free(disasm_line(al));
}
}
@@ -1921,10 +1914,10 @@ static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp)
{
size_t printed;
- if (dl->offset == -1)
- return fprintf(fp, "%s\n", dl->line);
+ if (dl->al.offset == -1)
+ return fprintf(fp, "%s\n", dl->al.line);
- printed = fprintf(fp, "%#" PRIx64 " %s", dl->offset, dl->ins.name);
+ printed = fprintf(fp, "%#" PRIx64 " %s", dl->al.offset, dl->ins.name);
if (dl->ops.raw[0] != '\0') {
printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ",
@@ -1939,38 +1932,73 @@ size_t disasm__fprintf(struct list_head *head, FILE *fp)
struct disasm_line *pos;
size_t printed = 0;
- list_for_each_entry(pos, head, node)
+ list_for_each_entry(pos, head, al.node)
printed += disasm_line__fprintf(pos, fp);
return printed;
}
+static void annotation__calc_lines(struct annotation *notes, struct map *map,
+ struct rb_root *root, u64 start)
+{
+ struct annotation_line *al;
+ struct rb_root tmp_root = RB_ROOT;
+
+ list_for_each_entry(al, &notes->src->source, node) {
+ double percent_max = 0.0;
+ int i;
+
+ for (i = 0; i < al->samples_nr; i++) {
+ struct annotation_data *sample;
+
+ sample = &al->samples[i];
+
+ if (sample->percent > percent_max)
+ percent_max = sample->percent;
+ }
+
+ if (percent_max <= 0.5)
+ continue;
+
+ al->path = get_srcline(map->dso, start + al->offset, NULL,
+ false, true, start + al->offset);
+ insert_source_line(&tmp_root, al);
+ }
+
+ resort_source_line(root, &tmp_root);
+}
+
+static void symbol__calc_lines(struct symbol *sym, struct map *map,
+ struct rb_root *root)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ u64 start = map__rip_2objdump(map, sym->start);
+
+ annotation__calc_lines(notes, map, root, start);
+}
+
int symbol__tty_annotate(struct symbol *sym, struct map *map,
struct perf_evsel *evsel, bool print_lines,
bool full_paths, int min_pcnt, int max_lines)
{
struct dso *dso = map->dso;
struct rb_root source_line = RB_ROOT;
- u64 len;
- if (symbol__disassemble(sym, map, perf_evsel__env_arch(evsel),
- 0, NULL, NULL) < 0)
+ if (symbol__annotate(sym, map, evsel, 0, NULL) < 0)
return -1;
- len = symbol__size(sym);
+ symbol__calc_percent(sym, evsel);
if (print_lines) {
srcline_full_filename = full_paths;
- symbol__get_source_line(sym, map, evsel, &source_line, len);
+ symbol__calc_lines(sym, map, &source_line);
print_summary(&source_line, dso->long_name);
}
symbol__annotate_printf(sym, map, evsel, full_paths,
min_pcnt, max_lines, 0);
- if (print_lines)
- symbol__free_source_line(sym, len);
- disasm__purge(&symbol__annotation(sym)->src->source);
+ annotated_source__purge(symbol__annotation(sym)->src);
return 0;
}
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index f6ba3560de5e..ce427445671f 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -59,33 +59,55 @@ bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2);
struct annotation;
+struct sym_hist_entry {
+ u64 nr_samples;
+ u64 period;
+};
+
+struct annotation_data {
+ double percent;
+ double percent_sum;
+ struct sym_hist_entry he;
+};
+
+struct annotation_line {
+ struct list_head node;
+ struct rb_node rb_node;
+ s64 offset;
+ char *line;
+ int line_nr;
+ float ipc;
+ u64 cycles;
+ size_t privsize;
+ char *path;
+ int samples_nr;
+ struct annotation_data samples[0];
+};
+
struct disasm_line {
- struct list_head node;
- s64 offset;
- char *line;
- struct ins ins;
- int line_nr;
- float ipc;
- u64 cycles;
- struct ins_operands ops;
+ struct ins ins;
+ struct ins_operands ops;
+
+ /* This needs to be at the end. */
+ struct annotation_line al;
};
+static inline struct disasm_line *disasm_line(struct annotation_line *al)
+{
+ return al ? container_of(al, struct disasm_line, al) : NULL;
+}
+
static inline bool disasm_line__has_offset(const struct disasm_line *dl)
{
return dl->ops.target.offset_avail;
}
-struct sym_hist_entry {
- u64 nr_samples;
- u64 period;
-};
-
void disasm_line__free(struct disasm_line *dl);
-struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disasm_line *pos);
+struct annotation_line *
+annotation_line__next(struct annotation_line *pos, struct list_head *head);
int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw);
size_t disasm__fprintf(struct list_head *head, FILE *fp);
-double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset,
- s64 end, const char **path, struct sym_hist_entry *sample);
+void symbol__calc_percent(struct symbol *sym, struct perf_evsel *evsel);
struct sym_hist {
u64 nr_samples;
@@ -104,19 +126,6 @@ struct cyc_hist {
u16 reset;
};
-struct source_line_samples {
- double percent;
- double percent_sum;
- u64 nr;
-};
-
-struct source_line {
- struct rb_node node;
- char *path;
- int nr_pcnt;
- struct source_line_samples samples[1];
-};
-
/** struct annotated_source - symbols with hits have this attached as in sannotation
*
* @histogram: Array of addr hit histograms per event being monitored
@@ -132,7 +141,6 @@ struct source_line {
*/
struct annotated_source {
struct list_head source;
- struct source_line *lines;
int nr_histograms;
size_t sizeof_sym_hist;
struct cyc_hist *cycles_hist;
@@ -169,9 +177,9 @@ int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *samp
int symbol__alloc_hist(struct symbol *sym);
void symbol__annotate_zero_histograms(struct symbol *sym);
-int symbol__disassemble(struct symbol *sym, struct map *map,
- const char *arch_name, size_t privsize,
- struct arch **parch, char *cpuid);
+int symbol__annotate(struct symbol *sym, struct map *map,
+ struct perf_evsel *evsel, size_t privsize,
+ struct arch **parch);
enum symbol_disassemble_errno {
SYMBOL_ANNOTATE_ERRNO__SUCCESS = 0,
@@ -198,7 +206,7 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map,
int min_pcnt, int max_lines, int context);
void symbol__annotate_zero_histogram(struct symbol *sym, int evidx);
void symbol__annotate_decay_histogram(struct symbol *sym, int evidx);
-void disasm__purge(struct list_head *head);
+void annotated_source__purge(struct annotated_source *as);
bool ui__has_annotation(void);
diff --git a/tools/perf/util/arm-spe-pkt-decoder.c b/tools/perf/util/arm-spe-pkt-decoder.c
new file mode 100644
index 000000000000..b94001b756c7
--- /dev/null
+++ b/tools/perf/util/arm-spe-pkt-decoder.c
@@ -0,0 +1,462 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Arm Statistical Profiling Extensions (SPE) support
+ * Copyright (c) 2017-2018, Arm Ltd.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <endian.h>
+#include <byteswap.h>
+
+#include "arm-spe-pkt-decoder.h"
+
+#define BIT(n) (1ULL << (n))
+
+#define NS_FLAG BIT(63)
+#define EL_FLAG (BIT(62) | BIT(61))
+
+#define SPE_HEADER0_PAD 0x0
+#define SPE_HEADER0_END 0x1
+#define SPE_HEADER0_ADDRESS 0x30 /* address packet (short) */
+#define SPE_HEADER0_ADDRESS_MASK 0x38
+#define SPE_HEADER0_COUNTER 0x18 /* counter packet (short) */
+#define SPE_HEADER0_COUNTER_MASK 0x38
+#define SPE_HEADER0_TIMESTAMP 0x71
+#define SPE_HEADER0_TIMESTAMP 0x71
+#define SPE_HEADER0_EVENTS 0x2
+#define SPE_HEADER0_EVENTS_MASK 0xf
+#define SPE_HEADER0_SOURCE 0x3
+#define SPE_HEADER0_SOURCE_MASK 0xf
+#define SPE_HEADER0_CONTEXT 0x24
+#define SPE_HEADER0_CONTEXT_MASK 0x3c
+#define SPE_HEADER0_OP_TYPE 0x8
+#define SPE_HEADER0_OP_TYPE_MASK 0x3c
+#define SPE_HEADER1_ALIGNMENT 0x0
+#define SPE_HEADER1_ADDRESS 0xb0 /* address packet (extended) */
+#define SPE_HEADER1_ADDRESS_MASK 0xf8
+#define SPE_HEADER1_COUNTER 0x98 /* counter packet (extended) */
+#define SPE_HEADER1_COUNTER_MASK 0xf8
+
+#if __BYTE_ORDER == __BIG_ENDIAN
+#define le16_to_cpu bswap_16
+#define le32_to_cpu bswap_32
+#define le64_to_cpu bswap_64
+#define memcpy_le64(d, s, n) do { \
+ memcpy((d), (s), (n)); \
+ *(d) = le64_to_cpu(*(d)); \
+} while (0)
+#else
+#define le16_to_cpu
+#define le32_to_cpu
+#define le64_to_cpu
+#define memcpy_le64 memcpy
+#endif
+
+static const char * const arm_spe_packet_name[] = {
+ [ARM_SPE_PAD] = "PAD",
+ [ARM_SPE_END] = "END",
+ [ARM_SPE_TIMESTAMP] = "TS",
+ [ARM_SPE_ADDRESS] = "ADDR",
+ [ARM_SPE_COUNTER] = "LAT",
+ [ARM_SPE_CONTEXT] = "CONTEXT",
+ [ARM_SPE_OP_TYPE] = "OP-TYPE",
+ [ARM_SPE_EVENTS] = "EVENTS",
+ [ARM_SPE_DATA_SOURCE] = "DATA-SOURCE",
+};
+
+const char *arm_spe_pkt_name(enum arm_spe_pkt_type type)
+{
+ return arm_spe_packet_name[type];
+}
+
+/* return ARM SPE payload size from its encoding,
+ * which is in bits 5:4 of the byte.
+ * 00 : byte
+ * 01 : halfword (2)
+ * 10 : word (4)
+ * 11 : doubleword (8)
+ */
+static int payloadlen(unsigned char byte)
+{
+ return 1 << ((byte & 0x30) >> 4);
+}
+
+static int arm_spe_get_payload(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet)
+{
+ size_t payload_len = payloadlen(buf[0]);
+
+ if (len < 1 + payload_len)
+ return ARM_SPE_NEED_MORE_BYTES;
+
+ buf++;
+
+ switch (payload_len) {
+ case 1: packet->payload = *(uint8_t *)buf; break;
+ case 2: packet->payload = le16_to_cpu(*(uint16_t *)buf); break;
+ case 4: packet->payload = le32_to_cpu(*(uint32_t *)buf); break;
+ case 8: packet->payload = le64_to_cpu(*(uint64_t *)buf); break;
+ default: return ARM_SPE_BAD_PACKET;
+ }
+
+ return 1 + payload_len;
+}
+
+static int arm_spe_get_pad(struct arm_spe_pkt *packet)
+{
+ packet->type = ARM_SPE_PAD;
+ return 1;
+}
+
+static int arm_spe_get_alignment(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet)
+{
+ unsigned int alignment = 1 << ((buf[0] & 0xf) + 1);
+
+ if (len < alignment)
+ return ARM_SPE_NEED_MORE_BYTES;
+
+ packet->type = ARM_SPE_PAD;
+ return alignment - (((uintptr_t)buf) & (alignment - 1));
+}
+
+static int arm_spe_get_end(struct arm_spe_pkt *packet)
+{
+ packet->type = ARM_SPE_END;
+ return 1;
+}
+
+static int arm_spe_get_timestamp(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet)
+{
+ packet->type = ARM_SPE_TIMESTAMP;
+ return arm_spe_get_payload(buf, len, packet);
+}
+
+static int arm_spe_get_events(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet)
+{
+ int ret = arm_spe_get_payload(buf, len, packet);
+
+ packet->type = ARM_SPE_EVENTS;
+
+ /* we use index to identify Events with a less number of
+ * comparisons in arm_spe_pkt_desc(): E.g., the LLC-ACCESS,
+ * LLC-REFILL, and REMOTE-ACCESS events are identified iff
+ * index > 1.
+ */
+ packet->index = ret - 1;
+
+ return ret;
+}
+
+static int arm_spe_get_data_source(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet)
+{
+ packet->type = ARM_SPE_DATA_SOURCE;
+ return arm_spe_get_payload(buf, len, packet);
+}
+
+static int arm_spe_get_context(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet)
+{
+ packet->type = ARM_SPE_CONTEXT;
+ packet->index = buf[0] & 0x3;
+
+ return arm_spe_get_payload(buf, len, packet);
+}
+
+static int arm_spe_get_op_type(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet)
+{
+ packet->type = ARM_SPE_OP_TYPE;
+ packet->index = buf[0] & 0x3;
+ return arm_spe_get_payload(buf, len, packet);
+}
+
+static int arm_spe_get_counter(const unsigned char *buf, size_t len,
+ const unsigned char ext_hdr, struct arm_spe_pkt *packet)
+{
+ if (len < 2)
+ return ARM_SPE_NEED_MORE_BYTES;
+
+ packet->type = ARM_SPE_COUNTER;
+ if (ext_hdr)
+ packet->index = ((buf[0] & 0x3) << 3) | (buf[1] & 0x7);
+ else
+ packet->index = buf[0] & 0x7;
+
+ packet->payload = le16_to_cpu(*(uint16_t *)(buf + 1));
+
+ return 1 + ext_hdr + 2;
+}
+
+static int arm_spe_get_addr(const unsigned char *buf, size_t len,
+ const unsigned char ext_hdr, struct arm_spe_pkt *packet)
+{
+ if (len < 8)
+ return ARM_SPE_NEED_MORE_BYTES;
+
+ packet->type = ARM_SPE_ADDRESS;
+ if (ext_hdr)
+ packet->index = ((buf[0] & 0x3) << 3) | (buf[1] & 0x7);
+ else
+ packet->index = buf[0] & 0x7;
+
+ memcpy_le64(&packet->payload, buf + 1, 8);
+
+ return 1 + ext_hdr + 8;
+}
+
+static int arm_spe_do_get_packet(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet)
+{
+ unsigned int byte;
+
+ memset(packet, 0, sizeof(struct arm_spe_pkt));
+
+ if (!len)
+ return ARM_SPE_NEED_MORE_BYTES;
+
+ byte = buf[0];
+ if (byte == SPE_HEADER0_PAD)
+ return arm_spe_get_pad(packet);
+ else if (byte == SPE_HEADER0_END) /* no timestamp at end of record */
+ return arm_spe_get_end(packet);
+ else if (byte & 0xc0 /* 0y11xxxxxx */) {
+ if (byte & 0x80) {
+ if ((byte & SPE_HEADER0_ADDRESS_MASK) == SPE_HEADER0_ADDRESS)
+ return arm_spe_get_addr(buf, len, 0, packet);
+ if ((byte & SPE_HEADER0_COUNTER_MASK) == SPE_HEADER0_COUNTER)
+ return arm_spe_get_counter(buf, len, 0, packet);
+ } else
+ if (byte == SPE_HEADER0_TIMESTAMP)
+ return arm_spe_get_timestamp(buf, len, packet);
+ else if ((byte & SPE_HEADER0_EVENTS_MASK) == SPE_HEADER0_EVENTS)
+ return arm_spe_get_events(buf, len, packet);
+ else if ((byte & SPE_HEADER0_SOURCE_MASK) == SPE_HEADER0_SOURCE)
+ return arm_spe_get_data_source(buf, len, packet);
+ else if ((byte & SPE_HEADER0_CONTEXT_MASK) == SPE_HEADER0_CONTEXT)
+ return arm_spe_get_context(buf, len, packet);
+ else if ((byte & SPE_HEADER0_OP_TYPE_MASK) == SPE_HEADER0_OP_TYPE)
+ return arm_spe_get_op_type(buf, len, packet);
+ } else if ((byte & 0xe0) == 0x20 /* 0y001xxxxx */) {
+ /* 16-bit header */
+ byte = buf[1];
+ if (byte == SPE_HEADER1_ALIGNMENT)
+ return arm_spe_get_alignment(buf, len, packet);
+ else if ((byte & SPE_HEADER1_ADDRESS_MASK) == SPE_HEADER1_ADDRESS)
+ return arm_spe_get_addr(buf, len, 1, packet);
+ else if ((byte & SPE_HEADER1_COUNTER_MASK) == SPE_HEADER1_COUNTER)
+ return arm_spe_get_counter(buf, len, 1, packet);
+ }
+
+ return ARM_SPE_BAD_PACKET;
+}
+
+int arm_spe_get_packet(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet)
+{
+ int ret;
+
+ ret = arm_spe_do_get_packet(buf, len, packet);
+ /* put multiple consecutive PADs on the same line, up to
+ * the fixed-width output format of 16 bytes per line.
+ */
+ if (ret > 0 && packet->type == ARM_SPE_PAD) {
+ while (ret < 16 && len > (size_t)ret && !buf[ret])
+ ret += 1;
+ }
+ return ret;
+}
+
+int arm_spe_pkt_desc(const struct arm_spe_pkt *packet, char *buf,
+ size_t buf_len)
+{
+ int ret, ns, el, idx = packet->index;
+ unsigned long long payload = packet->payload;
+ const char *name = arm_spe_pkt_name(packet->type);
+
+ switch (packet->type) {
+ case ARM_SPE_BAD:
+ case ARM_SPE_PAD:
+ case ARM_SPE_END:
+ return snprintf(buf, buf_len, "%s", name);
+ case ARM_SPE_EVENTS: {
+ size_t blen = buf_len;
+
+ ret = 0;
+ ret = snprintf(buf, buf_len, "EV");
+ buf += ret;
+ blen -= ret;
+ if (payload & 0x1) {
+ ret = snprintf(buf, buf_len, " EXCEPTION-GEN");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x2) {
+ ret = snprintf(buf, buf_len, " RETIRED");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x4) {
+ ret = snprintf(buf, buf_len, " L1D-ACCESS");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x8) {
+ ret = snprintf(buf, buf_len, " L1D-REFILL");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x10) {
+ ret = snprintf(buf, buf_len, " TLB-ACCESS");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x20) {
+ ret = snprintf(buf, buf_len, " TLB-REFILL");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x40) {
+ ret = snprintf(buf, buf_len, " NOT-TAKEN");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x80) {
+ ret = snprintf(buf, buf_len, " MISPRED");
+ buf += ret;
+ blen -= ret;
+ }
+ if (idx > 1) {
+ if (payload & 0x100) {
+ ret = snprintf(buf, buf_len, " LLC-ACCESS");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x200) {
+ ret = snprintf(buf, buf_len, " LLC-REFILL");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x400) {
+ ret = snprintf(buf, buf_len, " REMOTE-ACCESS");
+ buf += ret;
+ blen -= ret;
+ }
+ }
+ if (ret < 0)
+ return ret;
+ blen -= ret;
+ return buf_len - blen;
+ }
+ case ARM_SPE_OP_TYPE:
+ switch (idx) {
+ case 0: return snprintf(buf, buf_len, "%s", payload & 0x1 ?
+ "COND-SELECT" : "INSN-OTHER");
+ case 1: {
+ size_t blen = buf_len;
+
+ if (payload & 0x1)
+ ret = snprintf(buf, buf_len, "ST");
+ else
+ ret = snprintf(buf, buf_len, "LD");
+ buf += ret;
+ blen -= ret;
+ if (payload & 0x2) {
+ if (payload & 0x4) {
+ ret = snprintf(buf, buf_len, " AT");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x8) {
+ ret = snprintf(buf, buf_len, " EXCL");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x10) {
+ ret = snprintf(buf, buf_len, " AR");
+ buf += ret;
+ blen -= ret;
+ }
+ } else if (payload & 0x4) {
+ ret = snprintf(buf, buf_len, " SIMD-FP");
+ buf += ret;
+ blen -= ret;
+ }
+ if (ret < 0)
+ return ret;
+ blen -= ret;
+ return buf_len - blen;
+ }
+ case 2: {
+ size_t blen = buf_len;
+
+ ret = snprintf(buf, buf_len, "B");
+ buf += ret;
+ blen -= ret;
+ if (payload & 0x1) {
+ ret = snprintf(buf, buf_len, " COND");
+ buf += ret;
+ blen -= ret;
+ }
+ if (payload & 0x2) {
+ ret = snprintf(buf, buf_len, " IND");
+ buf += ret;
+ blen -= ret;
+ }
+ if (ret < 0)
+ return ret;
+ blen -= ret;
+ return buf_len - blen;
+ }
+ default: return 0;
+ }
+ case ARM_SPE_DATA_SOURCE:
+ case ARM_SPE_TIMESTAMP:
+ return snprintf(buf, buf_len, "%s %lld", name, payload);
+ case ARM_SPE_ADDRESS:
+ switch (idx) {
+ case 0:
+ case 1: ns = !!(packet->payload & NS_FLAG);
+ el = (packet->payload & EL_FLAG) >> 61;
+ payload &= ~(0xffULL << 56);
+ return snprintf(buf, buf_len, "%s 0x%llx el%d ns=%d",
+ (idx == 1) ? "TGT" : "PC", payload, el, ns);
+ case 2: return snprintf(buf, buf_len, "VA 0x%llx", payload);
+ case 3: ns = !!(packet->payload & NS_FLAG);
+ payload &= ~(0xffULL << 56);
+ return snprintf(buf, buf_len, "PA 0x%llx ns=%d",
+ payload, ns);
+ default: return 0;
+ }
+ case ARM_SPE_CONTEXT:
+ return snprintf(buf, buf_len, "%s 0x%lx el%d", name,
+ (unsigned long)payload, idx + 1);
+ case ARM_SPE_COUNTER: {
+ size_t blen = buf_len;
+
+ ret = snprintf(buf, buf_len, "%s %d ", name,
+ (unsigned short)payload);
+ buf += ret;
+ blen -= ret;
+ switch (idx) {
+ case 0: ret = snprintf(buf, buf_len, "TOT"); break;
+ case 1: ret = snprintf(buf, buf_len, "ISSUE"); break;
+ case 2: ret = snprintf(buf, buf_len, "XLAT"); break;
+ default: ret = 0;
+ }
+ if (ret < 0)
+ return ret;
+ blen -= ret;
+ return buf_len - blen;
+ }
+ default:
+ break;
+ }
+
+ return snprintf(buf, buf_len, "%s 0x%llx (%d)",
+ name, payload, packet->index);
+}
diff --git a/tools/perf/util/arm-spe-pkt-decoder.h b/tools/perf/util/arm-spe-pkt-decoder.h
new file mode 100644
index 000000000000..d786ef65113f
--- /dev/null
+++ b/tools/perf/util/arm-spe-pkt-decoder.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Arm Statistical Profiling Extensions (SPE) support
+ * Copyright (c) 2017-2018, Arm Ltd.
+ */
+
+#ifndef INCLUDE__ARM_SPE_PKT_DECODER_H__
+#define INCLUDE__ARM_SPE_PKT_DECODER_H__
+
+#include <stddef.h>
+#include <stdint.h>
+
+#define ARM_SPE_PKT_DESC_MAX 256
+
+#define ARM_SPE_NEED_MORE_BYTES -1
+#define ARM_SPE_BAD_PACKET -2
+
+enum arm_spe_pkt_type {
+ ARM_SPE_BAD,
+ ARM_SPE_PAD,
+ ARM_SPE_END,
+ ARM_SPE_TIMESTAMP,
+ ARM_SPE_ADDRESS,
+ ARM_SPE_COUNTER,
+ ARM_SPE_CONTEXT,
+ ARM_SPE_OP_TYPE,
+ ARM_SPE_EVENTS,
+ ARM_SPE_DATA_SOURCE,
+};
+
+struct arm_spe_pkt {
+ enum arm_spe_pkt_type type;
+ unsigned char index;
+ uint64_t payload;
+};
+
+const char *arm_spe_pkt_name(enum arm_spe_pkt_type);
+
+int arm_spe_get_packet(const unsigned char *buf, size_t len,
+ struct arm_spe_pkt *packet);
+
+int arm_spe_pkt_desc(const struct arm_spe_pkt *packet, char *buf, size_t len);
+#endif
diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
new file mode 100644
index 000000000000..6067267cc76c
--- /dev/null
+++ b/tools/perf/util/arm-spe.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Arm Statistical Profiling Extensions (SPE) support
+ * Copyright (c) 2017-2018, Arm Ltd.
+ */
+
+#include <endian.h>
+#include <errno.h>
+#include <byteswap.h>
+#include <inttypes.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/log2.h>
+
+#include "cpumap.h"
+#include "color.h"
+#include "evsel.h"
+#include "evlist.h"
+#include "machine.h"
+#include "session.h"
+#include "util.h"
+#include "thread.h"
+#include "debug.h"
+#include "auxtrace.h"
+#include "arm-spe.h"
+#include "arm-spe-pkt-decoder.h"
+
+struct arm_spe {
+ struct auxtrace auxtrace;
+ struct auxtrace_queues queues;
+ struct auxtrace_heap heap;
+ u32 auxtrace_type;
+ struct perf_session *session;
+ struct machine *machine;
+ u32 pmu_type;
+};
+
+struct arm_spe_queue {
+ struct arm_spe *spe;
+ unsigned int queue_nr;
+ struct auxtrace_buffer *buffer;
+ bool on_heap;
+ bool done;
+ pid_t pid;
+ pid_t tid;
+ int cpu;
+};
+
+static void arm_spe_dump(struct arm_spe *spe __maybe_unused,
+ unsigned char *buf, size_t len)
+{
+ struct arm_spe_pkt packet;
+ size_t pos = 0;
+ int ret, pkt_len, i;
+ char desc[ARM_SPE_PKT_DESC_MAX];
+ const char *color = PERF_COLOR_BLUE;
+
+ color_fprintf(stdout, color,
+ ". ... ARM SPE data: size %zu bytes\n",
+ len);
+
+ while (len) {
+ ret = arm_spe_get_packet(buf, len, &packet);
+ if (ret > 0)
+ pkt_len = ret;
+ else
+ pkt_len = 1;
+ printf(".");
+ color_fprintf(stdout, color, " %08x: ", pos);
+ for (i = 0; i < pkt_len; i++)
+ color_fprintf(stdout, color, " %02x", buf[i]);
+ for (; i < 16; i++)
+ color_fprintf(stdout, color, " ");
+ if (ret > 0) {
+ ret = arm_spe_pkt_desc(&packet, desc,
+ ARM_SPE_PKT_DESC_MAX);
+ if (ret > 0)
+ color_fprintf(stdout, color, " %s\n", desc);
+ } else {
+ color_fprintf(stdout, color, " Bad packet!\n");
+ }
+ pos += pkt_len;
+ buf += pkt_len;
+ len -= pkt_len;
+ }
+}
+
+static void arm_spe_dump_event(struct arm_spe *spe, unsigned char *buf,
+ size_t len)
+{
+ printf(".\n");
+ arm_spe_dump(spe, buf, len);
+}
+
+static int arm_spe_process_event(struct perf_session *session __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_sample *sample __maybe_unused,
+ struct perf_tool *tool __maybe_unused)
+{
+ return 0;
+}
+
+static int arm_spe_process_auxtrace_event(struct perf_session *session,
+ union perf_event *event,
+ struct perf_tool *tool __maybe_unused)
+{
+ struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
+ auxtrace);
+ struct auxtrace_buffer *buffer;
+ off_t data_offset;
+ int fd = perf_data__fd(session->data);
+ int err;
+
+ if (perf_data__is_pipe(session->data)) {
+ data_offset = 0;
+ } else {
+ data_offset = lseek(fd, 0, SEEK_CUR);
+ if (data_offset == -1)
+ return -errno;
+ }
+
+ err = auxtrace_queues__add_event(&spe->queues, session, event,
+ data_offset, &buffer);
+ if (err)
+ return err;
+
+ /* Dump here now we have copied a piped trace out of the pipe */
+ if (dump_trace) {
+ if (auxtrace_buffer__get_data(buffer, fd)) {
+ arm_spe_dump_event(spe, buffer->data,
+ buffer->size);
+ auxtrace_buffer__put_data(buffer);
+ }
+ }
+
+ return 0;
+}
+
+static int arm_spe_flush(struct perf_session *session __maybe_unused,
+ struct perf_tool *tool __maybe_unused)
+{
+ return 0;
+}
+
+static void arm_spe_free_queue(void *priv)
+{
+ struct arm_spe_queue *speq = priv;
+
+ if (!speq)
+ return;
+ free(speq);
+}
+
+static void arm_spe_free_events(struct perf_session *session)
+{
+ struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
+ auxtrace);
+ struct auxtrace_queues *queues = &spe->queues;
+ unsigned int i;
+
+ for (i = 0; i < queues->nr_queues; i++) {
+ arm_spe_free_queue(queues->queue_array[i].priv);
+ queues->queue_array[i].priv = NULL;
+ }
+ auxtrace_queues__free(queues);
+}
+
+static void arm_spe_free(struct perf_session *session)
+{
+ struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
+ auxtrace);
+
+ auxtrace_heap__free(&spe->heap);
+ arm_spe_free_events(session);
+ session->auxtrace = NULL;
+ free(spe);
+}
+
+static const char * const arm_spe_info_fmts[] = {
+ [ARM_SPE_PMU_TYPE] = " PMU Type %"PRId64"\n",
+};
+
+static void arm_spe_print_info(u64 *arr)
+{
+ if (!dump_trace)
+ return;
+
+ fprintf(stdout, arm_spe_info_fmts[ARM_SPE_PMU_TYPE], arr[ARM_SPE_PMU_TYPE]);
+}
+
+int arm_spe_process_auxtrace_info(union perf_event *event,
+ struct perf_session *session)
+{
+ struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info;
+ size_t min_sz = sizeof(u64) * ARM_SPE_PMU_TYPE;
+ struct arm_spe *spe;
+ int err;
+
+ if (auxtrace_info->header.size < sizeof(struct auxtrace_info_event) +
+ min_sz)
+ return -EINVAL;
+
+ spe = zalloc(sizeof(struct arm_spe));
+ if (!spe)
+ return -ENOMEM;
+
+ err = auxtrace_queues__init(&spe->queues);
+ if (err)
+ goto err_free;
+
+ spe->session = session;
+ spe->machine = &session->machines.host; /* No kvm support */
+ spe->auxtrace_type = auxtrace_info->type;
+ spe->pmu_type = auxtrace_info->priv[ARM_SPE_PMU_TYPE];
+
+ spe->auxtrace.process_event = arm_spe_process_event;
+ spe->auxtrace.process_auxtrace_event = arm_spe_process_auxtrace_event;
+ spe->auxtrace.flush_events = arm_spe_flush;
+ spe->auxtrace.free_events = arm_spe_free_events;
+ spe->auxtrace.free = arm_spe_free;
+ session->auxtrace = &spe->auxtrace;
+
+ arm_spe_print_info(&auxtrace_info->priv[0]);
+
+ return 0;
+
+err_free:
+ free(spe);
+ return err;
+}
diff --git a/tools/perf/util/arm-spe.h b/tools/perf/util/arm-spe.h
new file mode 100644
index 000000000000..98d3235781c3
--- /dev/null
+++ b/tools/perf/util/arm-spe.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Arm Statistical Profiling Extensions (SPE) support
+ * Copyright (c) 2017-2018, Arm Ltd.
+ */
+
+#ifndef INCLUDE__PERF_ARM_SPE_H__
+#define INCLUDE__PERF_ARM_SPE_H__
+
+#define ARM_SPE_PMU_NAME "arm_spe_"
+
+enum {
+ ARM_SPE_PMU_TYPE,
+ ARM_SPE_PER_CPU_MMAPS,
+ ARM_SPE_AUXTRACE_PRIV_MAX,
+};
+
+#define ARM_SPE_AUXTRACE_PRIV_SIZE (ARM_SPE_AUXTRACE_PRIV_MAX * sizeof(u64))
+
+union perf_event;
+struct perf_session;
+struct perf_pmu;
+
+struct auxtrace_record *arm_spe_recording_init(int *err,
+ struct perf_pmu *arm_spe_pmu);
+
+int arm_spe_process_auxtrace_info(union perf_event *event,
+ struct perf_session *session);
+
+struct perf_event_attr *arm_spe_pmu_default_config(struct perf_pmu *arm_spe_pmu);
+#endif
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index a33491416400..9faf3b5367db 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -31,9 +31,6 @@
#include <sys/param.h>
#include <stdlib.h>
#include <stdio.h>
-#include <string.h>
-#include <limits.h>
-#include <errno.h>
#include <linux/list.h>
#include "../perf.h"
@@ -55,8 +52,10 @@
#include "debug.h"
#include <subcmd/parse-options.h>
+#include "cs-etm.h"
#include "intel-pt.h"
#include "intel-bts.h"
+#include "arm-spe.h"
#include "sane_ctype.h"
#include "symbol/kallsyms.h"
@@ -913,7 +912,10 @@ int perf_event__process_auxtrace_info(struct perf_tool *tool __maybe_unused,
return intel_pt_process_auxtrace_info(event, session);
case PERF_AUXTRACE_INTEL_BTS:
return intel_bts_process_auxtrace_info(event, session);
+ case PERF_AUXTRACE_ARM_SPE:
+ return arm_spe_process_auxtrace_info(event, session);
case PERF_AUXTRACE_CS_ETM:
+ return cs_etm__process_auxtrace_info(event, session);
case PERF_AUXTRACE_UNKNOWN:
default:
return -EINVAL;
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index d19e11b68de7..453c148d2158 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -43,6 +43,7 @@ enum auxtrace_type {
PERF_AUXTRACE_INTEL_PT,
PERF_AUXTRACE_INTEL_BTS,
PERF_AUXTRACE_CS_ETM,
+ PERF_AUXTRACE_ARM_SPE,
};
enum itrace_period_type {
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index 72c107fcbc5a..af7ad814b2c3 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -94,7 +94,7 @@ struct bpf_object *bpf__prepare_load(const char *filename, bool source)
err = perf_clang__compile_bpf(filename, &obj_buf, &obj_buf_sz);
perf_clang__cleanup();
if (err) {
- pr_warning("bpf: builtin compilation failed: %d, try external compiler\n", err);
+ pr_debug("bpf: builtin compilation failed: %d, try external compiler\n", err);
err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz);
if (err)
return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
@@ -1533,7 +1533,7 @@ int bpf__apply_obj_config(void)
(strcmp("__bpf_stdout__", \
bpf_map__name(pos)) == 0))
-int bpf__setup_stdout(struct perf_evlist *evlist __maybe_unused)
+int bpf__setup_stdout(struct perf_evlist *evlist)
{
struct bpf_map_priv *tmpl_priv = NULL;
struct bpf_object *obj, *tmp;
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 082505d08d72..32ef7bdca1cf 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -37,6 +37,15 @@ struct callchain_param callchain_param = {
CALLCHAIN_PARAM_DEFAULT
};
+/*
+ * Are there any events usind DWARF callchains?
+ *
+ * I.e.
+ *
+ * -e cycles/call-graph=dwarf/
+ */
+bool dwarf_callchain_users;
+
struct callchain_param callchain_param_default = {
CALLCHAIN_PARAM_DEFAULT
};
@@ -265,6 +274,7 @@ int parse_callchain_record(const char *arg, struct callchain_param *param)
ret = 0;
param->record_mode = CALLCHAIN_DWARF;
param->dump_size = default_stack_dump_size;
+ dwarf_callchain_users = true;
tok = strtok_r(NULL, ",", &saveptr);
if (tok) {
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index b79ef2478a57..154560b1eb65 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -89,6 +89,8 @@ enum chain_value {
CCVAL_COUNT,
};
+extern bool dwarf_callchain_users;
+
struct callchain_param {
bool enabled;
enum perf_call_graph_mode record_mode;
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
index d9ffc1e6eb39..984f69144f87 100644
--- a/tools/perf/util/cgroup.c
+++ b/tools/perf/util/cgroup.c
@@ -6,6 +6,9 @@
#include "cgroup.h"
#include "evlist.h"
#include <linux/stringify.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
int nr_cgroups;
diff --git a/tools/perf/util/cs-etm-decoder/Build b/tools/perf/util/cs-etm-decoder/Build
new file mode 100644
index 000000000000..bc22c39c727f
--- /dev/null
+++ b/tools/perf/util/cs-etm-decoder/Build
@@ -0,0 +1 @@
+libperf-$(CONFIG_AUXTRACE) += cs-etm-decoder.o
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
new file mode 100644
index 000000000000..1fb01849f1c7
--- /dev/null
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
@@ -0,0 +1,513 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright(C) 2015-2018 Linaro Limited.
+ *
+ * Author: Tor Jeremiassen <tor@ti.com>
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ */
+
+#include <linux/err.h>
+#include <linux/list.h>
+#include <stdlib.h>
+#include <opencsd/c_api/opencsd_c_api.h>
+#include <opencsd/etmv4/trc_pkt_types_etmv4.h>
+#include <opencsd/ocsd_if_types.h>
+
+#include "cs-etm.h"
+#include "cs-etm-decoder.h"
+#include "intlist.h"
+#include "util.h"
+
+#define MAX_BUFFER 1024
+
+/* use raw logging */
+#ifdef CS_DEBUG_RAW
+#define CS_LOG_RAW_FRAMES
+#ifdef CS_RAW_PACKED
+#define CS_RAW_DEBUG_FLAGS (OCSD_DFRMTR_UNPACKED_RAW_OUT | \
+ OCSD_DFRMTR_PACKED_RAW_OUT)
+#else
+#define CS_RAW_DEBUG_FLAGS (OCSD_DFRMTR_UNPACKED_RAW_OUT)
+#endif
+#endif
+
+struct cs_etm_decoder {
+ void *data;
+ void (*packet_printer)(const char *msg);
+ bool trace_on;
+ dcd_tree_handle_t dcd_tree;
+ cs_etm_mem_cb_type mem_access;
+ ocsd_datapath_resp_t prev_return;
+ u32 packet_count;
+ u32 head;
+ u32 tail;
+ struct cs_etm_packet packet_buffer[MAX_BUFFER];
+};
+
+static u32
+cs_etm_decoder__mem_access(const void *context,
+ const ocsd_vaddr_t address,
+ const ocsd_mem_space_acc_t mem_space __maybe_unused,
+ const u32 req_size,
+ u8 *buffer)
+{
+ struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context;
+
+ return decoder->mem_access(decoder->data,
+ address,
+ req_size,
+ buffer);
+}
+
+int cs_etm_decoder__add_mem_access_cb(struct cs_etm_decoder *decoder,
+ u64 start, u64 end,
+ cs_etm_mem_cb_type cb_func)
+{
+ decoder->mem_access = cb_func;
+
+ if (ocsd_dt_add_callback_mem_acc(decoder->dcd_tree, start, end,
+ OCSD_MEM_SPACE_ANY,
+ cs_etm_decoder__mem_access, decoder))
+ return -1;
+
+ return 0;
+}
+
+int cs_etm_decoder__reset(struct cs_etm_decoder *decoder)
+{
+ ocsd_datapath_resp_t dp_ret;
+
+ dp_ret = ocsd_dt_process_data(decoder->dcd_tree, OCSD_OP_RESET,
+ 0, 0, NULL, NULL);
+ if (OCSD_DATA_RESP_IS_FATAL(dp_ret))
+ return -1;
+
+ return 0;
+}
+
+int cs_etm_decoder__get_packet(struct cs_etm_decoder *decoder,
+ struct cs_etm_packet *packet)
+{
+ if (!decoder || !packet)
+ return -EINVAL;
+
+ /* Nothing to do, might as well just return */
+ if (decoder->packet_count == 0)
+ return 0;
+
+ *packet = decoder->packet_buffer[decoder->head];
+
+ decoder->head = (decoder->head + 1) & (MAX_BUFFER - 1);
+
+ decoder->packet_count--;
+
+ return 1;
+}
+
+static void cs_etm_decoder__gen_etmv4_config(struct cs_etm_trace_params *params,
+ ocsd_etmv4_cfg *config)
+{
+ config->reg_configr = params->etmv4.reg_configr;
+ config->reg_traceidr = params->etmv4.reg_traceidr;
+ config->reg_idr0 = params->etmv4.reg_idr0;
+ config->reg_idr1 = params->etmv4.reg_idr1;
+ config->reg_idr2 = params->etmv4.reg_idr2;
+ config->reg_idr8 = params->etmv4.reg_idr8;
+ config->reg_idr9 = 0;
+ config->reg_idr10 = 0;
+ config->reg_idr11 = 0;
+ config->reg_idr12 = 0;
+ config->reg_idr13 = 0;
+ config->arch_ver = ARCH_V8;
+ config->core_prof = profile_CortexA;
+}
+
+static void cs_etm_decoder__print_str_cb(const void *p_context,
+ const char *msg,
+ const int str_len)
+{
+ if (p_context && str_len)
+ ((struct cs_etm_decoder *)p_context)->packet_printer(msg);
+}
+
+static int
+cs_etm_decoder__init_def_logger_printing(struct cs_etm_decoder_params *d_params,
+ struct cs_etm_decoder *decoder)
+{
+ int ret = 0;
+
+ if (d_params->packet_printer == NULL)
+ return -1;
+
+ decoder->packet_printer = d_params->packet_printer;
+
+ /*
+ * Set up a library default logger to process any printers
+ * (packet/raw frame) we add later.
+ */
+ ret = ocsd_def_errlog_init(OCSD_ERR_SEV_ERROR, 1);
+ if (ret != 0)
+ return -1;
+
+ /* no stdout / err / file output */
+ ret = ocsd_def_errlog_config_output(C_API_MSGLOGOUT_FLG_NONE, NULL);
+ if (ret != 0)
+ return -1;
+
+ /*
+ * Set the string CB for the default logger, passes strings to
+ * perf print logger.
+ */
+ ret = ocsd_def_errlog_set_strprint_cb(decoder->dcd_tree,
+ (void *)decoder,
+ cs_etm_decoder__print_str_cb);
+ if (ret != 0)
+ ret = -1;
+
+ return 0;
+}
+
+#ifdef CS_LOG_RAW_FRAMES
+static void
+cs_etm_decoder__init_raw_frame_logging(struct cs_etm_decoder_params *d_params,
+ struct cs_etm_decoder *decoder)
+{
+ /* Only log these during a --dump operation */
+ if (d_params->operation == CS_ETM_OPERATION_PRINT) {
+ /* set up a library default logger to process the
+ * raw frame printer we add later
+ */
+ ocsd_def_errlog_init(OCSD_ERR_SEV_ERROR, 1);
+
+ /* no stdout / err / file output */
+ ocsd_def_errlog_config_output(C_API_MSGLOGOUT_FLG_NONE, NULL);
+
+ /* set the string CB for the default logger,
+ * passes strings to perf print logger.
+ */
+ ocsd_def_errlog_set_strprint_cb(decoder->dcd_tree,
+ (void *)decoder,
+ cs_etm_decoder__print_str_cb);
+
+ /* use the built in library printer for the raw frames */
+ ocsd_dt_set_raw_frame_printer(decoder->dcd_tree,
+ CS_RAW_DEBUG_FLAGS);
+ }
+}
+#else
+static void
+cs_etm_decoder__init_raw_frame_logging(
+ struct cs_etm_decoder_params *d_params __maybe_unused,
+ struct cs_etm_decoder *decoder __maybe_unused)
+{
+}
+#endif
+
+static int cs_etm_decoder__create_packet_printer(struct cs_etm_decoder *decoder,
+ const char *decoder_name,
+ void *trace_config)
+{
+ u8 csid;
+
+ if (ocsd_dt_create_decoder(decoder->dcd_tree, decoder_name,
+ OCSD_CREATE_FLG_PACKET_PROC,
+ trace_config, &csid))
+ return -1;
+
+ if (ocsd_dt_set_pkt_protocol_printer(decoder->dcd_tree, csid, 0))
+ return -1;
+
+ return 0;
+}
+
+static int
+cs_etm_decoder__create_etm_packet_printer(struct cs_etm_trace_params *t_params,
+ struct cs_etm_decoder *decoder)
+{
+ const char *decoder_name;
+ ocsd_etmv4_cfg trace_config_etmv4;
+ void *trace_config;
+
+ switch (t_params->protocol) {
+ case CS_ETM_PROTO_ETMV4i:
+ cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4);
+ decoder_name = OCSD_BUILTIN_DCD_ETMV4I;
+ trace_config = &trace_config_etmv4;
+ break;
+ default:
+ return -1;
+ }
+
+ return cs_etm_decoder__create_packet_printer(decoder,
+ decoder_name,
+ trace_config);
+}
+
+static void cs_etm_decoder__clear_buffer(struct cs_etm_decoder *decoder)
+{
+ int i;
+
+ decoder->head = 0;
+ decoder->tail = 0;
+ decoder->packet_count = 0;
+ for (i = 0; i < MAX_BUFFER; i++) {
+ decoder->packet_buffer[i].start_addr = 0xdeadbeefdeadbeefUL;
+ decoder->packet_buffer[i].end_addr = 0xdeadbeefdeadbeefUL;
+ decoder->packet_buffer[i].exc = false;
+ decoder->packet_buffer[i].exc_ret = false;
+ decoder->packet_buffer[i].cpu = INT_MIN;
+ }
+}
+
+static ocsd_datapath_resp_t
+cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder,
+ const ocsd_generic_trace_elem *elem,
+ const u8 trace_chan_id,
+ enum cs_etm_sample_type sample_type)
+{
+ u32 et = 0;
+ struct int_node *inode = NULL;
+
+ if (decoder->packet_count >= MAX_BUFFER - 1)
+ return OCSD_RESP_FATAL_SYS_ERR;
+
+ /* Search the RB tree for the cpu associated with this traceID */
+ inode = intlist__find(traceid_list, trace_chan_id);
+ if (!inode)
+ return OCSD_RESP_FATAL_SYS_ERR;
+
+ et = decoder->tail;
+ decoder->packet_buffer[et].sample_type = sample_type;
+ decoder->packet_buffer[et].start_addr = elem->st_addr;
+ decoder->packet_buffer[et].end_addr = elem->en_addr;
+ decoder->packet_buffer[et].exc = false;
+ decoder->packet_buffer[et].exc_ret = false;
+ decoder->packet_buffer[et].cpu = *((int *)inode->priv);
+
+ /* Wrap around if need be */
+ et = (et + 1) & (MAX_BUFFER - 1);
+
+ decoder->tail = et;
+ decoder->packet_count++;
+
+ if (decoder->packet_count == MAX_BUFFER - 1)
+ return OCSD_RESP_WAIT;
+
+ return OCSD_RESP_CONT;
+}
+
+static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
+ const void *context,
+ const ocsd_trc_index_t indx __maybe_unused,
+ const u8 trace_chan_id __maybe_unused,
+ const ocsd_generic_trace_elem *elem)
+{
+ ocsd_datapath_resp_t resp = OCSD_RESP_CONT;
+ struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context;
+
+ switch (elem->elem_type) {
+ case OCSD_GEN_TRC_ELEM_UNKNOWN:
+ break;
+ case OCSD_GEN_TRC_ELEM_NO_SYNC:
+ decoder->trace_on = false;
+ break;
+ case OCSD_GEN_TRC_ELEM_TRACE_ON:
+ decoder->trace_on = true;
+ break;
+ case OCSD_GEN_TRC_ELEM_INSTR_RANGE:
+ resp = cs_etm_decoder__buffer_packet(decoder, elem,
+ trace_chan_id,
+ CS_ETM_RANGE);
+ break;
+ case OCSD_GEN_TRC_ELEM_EXCEPTION:
+ decoder->packet_buffer[decoder->tail].exc = true;
+ break;
+ case OCSD_GEN_TRC_ELEM_EXCEPTION_RET:
+ decoder->packet_buffer[decoder->tail].exc_ret = true;
+ break;
+ case OCSD_GEN_TRC_ELEM_PE_CONTEXT:
+ case OCSD_GEN_TRC_ELEM_EO_TRACE:
+ case OCSD_GEN_TRC_ELEM_ADDR_NACC:
+ case OCSD_GEN_TRC_ELEM_TIMESTAMP:
+ case OCSD_GEN_TRC_ELEM_CYCLE_COUNT:
+ case OCSD_GEN_TRC_ELEM_ADDR_UNKNOWN:
+ case OCSD_GEN_TRC_ELEM_EVENT:
+ case OCSD_GEN_TRC_ELEM_SWTRACE:
+ case OCSD_GEN_TRC_ELEM_CUSTOM:
+ default:
+ break;
+ }
+
+ return resp;
+}
+
+static int cs_etm_decoder__create_etm_packet_decoder(
+ struct cs_etm_trace_params *t_params,
+ struct cs_etm_decoder *decoder)
+{
+ const char *decoder_name;
+ ocsd_etmv4_cfg trace_config_etmv4;
+ void *trace_config;
+ u8 csid;
+
+ switch (t_params->protocol) {
+ case CS_ETM_PROTO_ETMV4i:
+ cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4);
+ decoder_name = OCSD_BUILTIN_DCD_ETMV4I;
+ trace_config = &trace_config_etmv4;
+ break;
+ default:
+ return -1;
+ }
+
+ if (ocsd_dt_create_decoder(decoder->dcd_tree,
+ decoder_name,
+ OCSD_CREATE_FLG_FULL_DECODER,
+ trace_config, &csid))
+ return -1;
+
+ if (ocsd_dt_set_gen_elem_outfn(decoder->dcd_tree,
+ cs_etm_decoder__gen_trace_elem_printer,
+ decoder))
+ return -1;
+
+ return 0;
+}
+
+static int
+cs_etm_decoder__create_etm_decoder(struct cs_etm_decoder_params *d_params,
+ struct cs_etm_trace_params *t_params,
+ struct cs_etm_decoder *decoder)
+{
+ if (d_params->operation == CS_ETM_OPERATION_PRINT)
+ return cs_etm_decoder__create_etm_packet_printer(t_params,
+ decoder);
+ else if (d_params->operation == CS_ETM_OPERATION_DECODE)
+ return cs_etm_decoder__create_etm_packet_decoder(t_params,
+ decoder);
+
+ return -1;
+}
+
+struct cs_etm_decoder *
+cs_etm_decoder__new(int num_cpu, struct cs_etm_decoder_params *d_params,
+ struct cs_etm_trace_params t_params[])
+{
+ struct cs_etm_decoder *decoder;
+ ocsd_dcd_tree_src_t format;
+ u32 flags;
+ int i, ret;
+
+ if ((!t_params) || (!d_params))
+ return NULL;
+
+ decoder = zalloc(sizeof(*decoder));
+
+ if (!decoder)
+ return NULL;
+
+ decoder->data = d_params->data;
+ decoder->prev_return = OCSD_RESP_CONT;
+ cs_etm_decoder__clear_buffer(decoder);
+ format = (d_params->formatted ? OCSD_TRC_SRC_FRAME_FORMATTED :
+ OCSD_TRC_SRC_SINGLE);
+ flags = 0;
+ flags |= (d_params->fsyncs ? OCSD_DFRMTR_HAS_FSYNCS : 0);
+ flags |= (d_params->hsyncs ? OCSD_DFRMTR_HAS_HSYNCS : 0);
+ flags |= (d_params->frame_aligned ? OCSD_DFRMTR_FRAME_MEM_ALIGN : 0);
+
+ /*
+ * Drivers may add barrier frames when used with perf, set up to
+ * handle this. Barriers const of FSYNC packet repeated 4 times.
+ */
+ flags |= OCSD_DFRMTR_RESET_ON_4X_FSYNC;
+
+ /* Create decode tree for the data source */
+ decoder->dcd_tree = ocsd_create_dcd_tree(format, flags);
+
+ if (decoder->dcd_tree == 0)
+ goto err_free_decoder;
+
+ /* init library print logging support */
+ ret = cs_etm_decoder__init_def_logger_printing(d_params, decoder);
+ if (ret != 0)
+ goto err_free_decoder_tree;
+
+ /* init raw frame logging if required */
+ cs_etm_decoder__init_raw_frame_logging(d_params, decoder);
+
+ for (i = 0; i < num_cpu; i++) {
+ ret = cs_etm_decoder__create_etm_decoder(d_params,
+ &t_params[i],
+ decoder);
+ if (ret != 0)
+ goto err_free_decoder_tree;
+ }
+
+ return decoder;
+
+err_free_decoder_tree:
+ ocsd_destroy_dcd_tree(decoder->dcd_tree);
+err_free_decoder:
+ free(decoder);
+ return NULL;
+}
+
+int cs_etm_decoder__process_data_block(struct cs_etm_decoder *decoder,
+ u64 indx, const u8 *buf,
+ size_t len, size_t *consumed)
+{
+ int ret = 0;
+ ocsd_datapath_resp_t cur = OCSD_RESP_CONT;
+ ocsd_datapath_resp_t prev_return = decoder->prev_return;
+ size_t processed = 0;
+ u32 count;
+
+ while (processed < len) {
+ if (OCSD_DATA_RESP_IS_WAIT(prev_return)) {
+ cur = ocsd_dt_process_data(decoder->dcd_tree,
+ OCSD_OP_FLUSH,
+ 0,
+ 0,
+ NULL,
+ NULL);
+ } else if (OCSD_DATA_RESP_IS_CONT(prev_return)) {
+ cur = ocsd_dt_process_data(decoder->dcd_tree,
+ OCSD_OP_DATA,
+ indx + processed,
+ len - processed,
+ &buf[processed],
+ &count);
+ processed += count;
+ } else {
+ ret = -EINVAL;
+ break;
+ }
+
+ /*
+ * Return to the input code if the packet buffer is full.
+ * Flushing will get done once the packet buffer has been
+ * processed.
+ */
+ if (OCSD_DATA_RESP_IS_WAIT(cur))
+ break;
+
+ prev_return = cur;
+ }
+
+ decoder->prev_return = cur;
+ *consumed = processed;
+
+ return ret;
+}
+
+void cs_etm_decoder__free(struct cs_etm_decoder *decoder)
+{
+ if (!decoder)
+ return;
+
+ ocsd_destroy_dcd_tree(decoder->dcd_tree);
+ decoder->dcd_tree = NULL;
+ free(decoder);
+}
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
new file mode 100644
index 000000000000..3d2e6205d186
--- /dev/null
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
@@ -0,0 +1,105 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright(C) 2015-2018 Linaro Limited.
+ *
+ * Author: Tor Jeremiassen <tor@ti.com>
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ */
+
+#ifndef INCLUDE__CS_ETM_DECODER_H__
+#define INCLUDE__CS_ETM_DECODER_H__
+
+#include <linux/types.h>
+#include <stdio.h>
+
+struct cs_etm_decoder;
+
+struct cs_etm_buffer {
+ const unsigned char *buf;
+ size_t len;
+ u64 offset;
+ u64 ref_timestamp;
+};
+
+enum cs_etm_sample_type {
+ CS_ETM_RANGE = 1 << 0,
+};
+
+struct cs_etm_packet {
+ enum cs_etm_sample_type sample_type;
+ u64 start_addr;
+ u64 end_addr;
+ u8 exc;
+ u8 exc_ret;
+ int cpu;
+};
+
+struct cs_etm_queue;
+
+typedef u32 (*cs_etm_mem_cb_type)(struct cs_etm_queue *, u64,
+ size_t, u8 *);
+
+struct cs_etmv4_trace_params {
+ u32 reg_idr0;
+ u32 reg_idr1;
+ u32 reg_idr2;
+ u32 reg_idr8;
+ u32 reg_configr;
+ u32 reg_traceidr;
+};
+
+struct cs_etm_trace_params {
+ int protocol;
+ union {
+ struct cs_etmv4_trace_params etmv4;
+ };
+};
+
+struct cs_etm_decoder_params {
+ int operation;
+ void (*packet_printer)(const char *msg);
+ cs_etm_mem_cb_type mem_acc_cb;
+ u8 formatted;
+ u8 fsyncs;
+ u8 hsyncs;
+ u8 frame_aligned;
+ void *data;
+};
+
+/*
+ * The following enums are indexed starting with 1 to align with the
+ * open source coresight trace decoder library.
+ */
+enum {
+ CS_ETM_PROTO_ETMV3 = 1,
+ CS_ETM_PROTO_ETMV4i,
+ CS_ETM_PROTO_ETMV4d,
+};
+
+enum {
+ CS_ETM_OPERATION_PRINT = 1,
+ CS_ETM_OPERATION_DECODE,
+};
+
+int cs_etm_decoder__process_data_block(struct cs_etm_decoder *decoder,
+ u64 indx, const u8 *buf,
+ size_t len, size_t *consumed);
+
+struct cs_etm_decoder *
+cs_etm_decoder__new(int num_cpu,
+ struct cs_etm_decoder_params *d_params,
+ struct cs_etm_trace_params t_params[]);
+
+void cs_etm_decoder__free(struct cs_etm_decoder *decoder);
+
+int cs_etm_decoder__add_mem_access_cb(struct cs_etm_decoder *decoder,
+ u64 start, u64 end,
+ cs_etm_mem_cb_type cb_func);
+
+int cs_etm_decoder__get_packet(struct cs_etm_decoder *decoder,
+ struct cs_etm_packet *packet);
+
+int cs_etm_decoder__reset(struct cs_etm_decoder *decoder);
+
+#endif /* INCLUDE__CS_ETM_DECODER_H__ */
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
new file mode 100644
index 000000000000..b9f0a53dfa65
--- /dev/null
+++ b/tools/perf/util/cs-etm.c
@@ -0,0 +1,1023 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright(C) 2015-2018 Linaro Limited.
+ *
+ * Author: Tor Jeremiassen <tor@ti.com>
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ */
+
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/types.h>
+
+#include <stdlib.h>
+
+#include "auxtrace.h"
+#include "color.h"
+#include "cs-etm.h"
+#include "cs-etm-decoder/cs-etm-decoder.h"
+#include "debug.h"
+#include "evlist.h"
+#include "intlist.h"
+#include "machine.h"
+#include "map.h"
+#include "perf.h"
+#include "thread.h"
+#include "thread_map.h"
+#include "thread-stack.h"
+#include "util.h"
+
+#define MAX_TIMESTAMP (~0ULL)
+
+struct cs_etm_auxtrace {
+ struct auxtrace auxtrace;
+ struct auxtrace_queues queues;
+ struct auxtrace_heap heap;
+ struct itrace_synth_opts synth_opts;
+ struct perf_session *session;
+ struct machine *machine;
+ struct thread *unknown_thread;
+
+ u8 timeless_decoding;
+ u8 snapshot_mode;
+ u8 data_queued;
+ u8 sample_branches;
+
+ int num_cpu;
+ u32 auxtrace_type;
+ u64 branches_sample_type;
+ u64 branches_id;
+ u64 **metadata;
+ u64 kernel_start;
+ unsigned int pmu_type;
+};
+
+struct cs_etm_queue {
+ struct cs_etm_auxtrace *etm;
+ struct thread *thread;
+ struct cs_etm_decoder *decoder;
+ struct auxtrace_buffer *buffer;
+ const struct cs_etm_state *state;
+ union perf_event *event_buf;
+ unsigned int queue_nr;
+ pid_t pid, tid;
+ int cpu;
+ u64 time;
+ u64 timestamp;
+ u64 offset;
+};
+
+static int cs_etm__update_queues(struct cs_etm_auxtrace *etm);
+static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
+ pid_t tid, u64 time_);
+
+static void cs_etm__packet_dump(const char *pkt_string)
+{
+ const char *color = PERF_COLOR_BLUE;
+ int len = strlen(pkt_string);
+
+ if (len && (pkt_string[len-1] == '\n'))
+ color_fprintf(stdout, color, " %s", pkt_string);
+ else
+ color_fprintf(stdout, color, " %s\n", pkt_string);
+
+ fflush(stdout);
+}
+
+static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
+ struct auxtrace_buffer *buffer)
+{
+ int i, ret;
+ const char *color = PERF_COLOR_BLUE;
+ struct cs_etm_decoder_params d_params;
+ struct cs_etm_trace_params *t_params;
+ struct cs_etm_decoder *decoder;
+ size_t buffer_used = 0;
+
+ fprintf(stdout, "\n");
+ color_fprintf(stdout, color,
+ ". ... CoreSight ETM Trace data: size %zu bytes\n",
+ buffer->size);
+
+ /* Use metadata to fill in trace parameters for trace decoder */
+ t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
+ for (i = 0; i < etm->num_cpu; i++) {
+ t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
+ t_params[i].etmv4.reg_idr0 = etm->metadata[i][CS_ETMV4_TRCIDR0];
+ t_params[i].etmv4.reg_idr1 = etm->metadata[i][CS_ETMV4_TRCIDR1];
+ t_params[i].etmv4.reg_idr2 = etm->metadata[i][CS_ETMV4_TRCIDR2];
+ t_params[i].etmv4.reg_idr8 = etm->metadata[i][CS_ETMV4_TRCIDR8];
+ t_params[i].etmv4.reg_configr =
+ etm->metadata[i][CS_ETMV4_TRCCONFIGR];
+ t_params[i].etmv4.reg_traceidr =
+ etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
+ }
+
+ /* Set decoder parameters to simply print the trace packets */
+ d_params.packet_printer = cs_etm__packet_dump;
+ d_params.operation = CS_ETM_OPERATION_PRINT;
+ d_params.formatted = true;
+ d_params.fsyncs = false;
+ d_params.hsyncs = false;
+ d_params.frame_aligned = true;
+
+ decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
+
+ zfree(&t_params);
+
+ if (!decoder)
+ return;
+ do {
+ size_t consumed;
+
+ ret = cs_etm_decoder__process_data_block(
+ decoder, buffer->offset,
+ &((u8 *)buffer->data)[buffer_used],
+ buffer->size - buffer_used, &consumed);
+ if (ret)
+ break;
+
+ buffer_used += consumed;
+ } while (buffer_used < buffer->size);
+
+ cs_etm_decoder__free(decoder);
+}
+
+static int cs_etm__flush_events(struct perf_session *session,
+ struct perf_tool *tool)
+{
+ int ret;
+ struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
+ struct cs_etm_auxtrace,
+ auxtrace);
+ if (dump_trace)
+ return 0;
+
+ if (!tool->ordered_events)
+ return -EINVAL;
+
+ if (!etm->timeless_decoding)
+ return -EINVAL;
+
+ ret = cs_etm__update_queues(etm);
+
+ if (ret < 0)
+ return ret;
+
+ return cs_etm__process_timeless_queues(etm, -1, MAX_TIMESTAMP - 1);
+}
+
+static void cs_etm__free_queue(void *priv)
+{
+ struct cs_etm_queue *etmq = priv;
+
+ free(etmq);
+}
+
+static void cs_etm__free_events(struct perf_session *session)
+{
+ unsigned int i;
+ struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
+ struct cs_etm_auxtrace,
+ auxtrace);
+ struct auxtrace_queues *queues = &aux->queues;
+
+ for (i = 0; i < queues->nr_queues; i++) {
+ cs_etm__free_queue(queues->queue_array[i].priv);
+ queues->queue_array[i].priv = NULL;
+ }
+
+ auxtrace_queues__free(queues);
+}
+
+static void cs_etm__free(struct perf_session *session)
+{
+ int i;
+ struct int_node *inode, *tmp;
+ struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
+ struct cs_etm_auxtrace,
+ auxtrace);
+ cs_etm__free_events(session);
+ session->auxtrace = NULL;
+
+ /* First remove all traceID/CPU# nodes for the RB tree */
+ intlist__for_each_entry_safe(inode, tmp, traceid_list)
+ intlist__remove(traceid_list, inode);
+ /* Then the RB tree itself */
+ intlist__delete(traceid_list);
+
+ for (i = 0; i < aux->num_cpu; i++)
+ zfree(&aux->metadata[i]);
+
+ zfree(&aux->metadata);
+ zfree(&aux);
+}
+
+static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address,
+ size_t size, u8 *buffer)
+{
+ u8 cpumode;
+ u64 offset;
+ int len;
+ struct thread *thread;
+ struct machine *machine;
+ struct addr_location al;
+
+ if (!etmq)
+ return -1;
+
+ machine = etmq->etm->machine;
+ if (address >= etmq->etm->kernel_start)
+ cpumode = PERF_RECORD_MISC_KERNEL;
+ else
+ cpumode = PERF_RECORD_MISC_USER;
+
+ thread = etmq->thread;
+ if (!thread) {
+ if (cpumode != PERF_RECORD_MISC_KERNEL)
+ return -EINVAL;
+ thread = etmq->etm->unknown_thread;
+ }
+
+ thread__find_addr_map(thread, cpumode, MAP__FUNCTION, address, &al);
+
+ if (!al.map || !al.map->dso)
+ return 0;
+
+ if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
+ dso__data_status_seen(al.map->dso, DSO_DATA_STATUS_SEEN_ITRACE))
+ return 0;
+
+ offset = al.map->map_ip(al.map, address);
+
+ map__load(al.map);
+
+ len = dso__data_read_offset(al.map->dso, machine, offset, buffer, size);
+
+ if (len <= 0)
+ return 0;
+
+ return len;
+}
+
+static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
+ unsigned int queue_nr)
+{
+ int i;
+ struct cs_etm_decoder_params d_params;
+ struct cs_etm_trace_params *t_params;
+ struct cs_etm_queue *etmq;
+
+ etmq = zalloc(sizeof(*etmq));
+ if (!etmq)
+ return NULL;
+
+ etmq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
+ if (!etmq->event_buf)
+ goto out_free;
+
+ etmq->etm = etm;
+ etmq->queue_nr = queue_nr;
+ etmq->pid = -1;
+ etmq->tid = -1;
+ etmq->cpu = -1;
+
+ /* Use metadata to fill in trace parameters for trace decoder */
+ t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
+
+ if (!t_params)
+ goto out_free;
+
+ for (i = 0; i < etm->num_cpu; i++) {
+ t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
+ t_params[i].etmv4.reg_idr0 = etm->metadata[i][CS_ETMV4_TRCIDR0];
+ t_params[i].etmv4.reg_idr1 = etm->metadata[i][CS_ETMV4_TRCIDR1];
+ t_params[i].etmv4.reg_idr2 = etm->metadata[i][CS_ETMV4_TRCIDR2];
+ t_params[i].etmv4.reg_idr8 = etm->metadata[i][CS_ETMV4_TRCIDR8];
+ t_params[i].etmv4.reg_configr =
+ etm->metadata[i][CS_ETMV4_TRCCONFIGR];
+ t_params[i].etmv4.reg_traceidr =
+ etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
+ }
+
+ /* Set decoder parameters to simply print the trace packets */
+ d_params.packet_printer = cs_etm__packet_dump;
+ d_params.operation = CS_ETM_OPERATION_DECODE;
+ d_params.formatted = true;
+ d_params.fsyncs = false;
+ d_params.hsyncs = false;
+ d_params.frame_aligned = true;
+ d_params.data = etmq;
+
+ etmq->decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
+
+ zfree(&t_params);
+
+ if (!etmq->decoder)
+ goto out_free;
+
+ /*
+ * Register a function to handle all memory accesses required by
+ * the trace decoder library.
+ */
+ if (cs_etm_decoder__add_mem_access_cb(etmq->decoder,
+ 0x0L, ((u64) -1L),
+ cs_etm__mem_access))
+ goto out_free_decoder;
+
+ etmq->offset = 0;
+
+ return etmq;
+
+out_free_decoder:
+ cs_etm_decoder__free(etmq->decoder);
+out_free:
+ zfree(&etmq->event_buf);
+ free(etmq);
+
+ return NULL;
+}
+
+static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
+ struct auxtrace_queue *queue,
+ unsigned int queue_nr)
+{
+ struct cs_etm_queue *etmq = queue->priv;
+
+ if (list_empty(&queue->head) || etmq)
+ return 0;
+
+ etmq = cs_etm__alloc_queue(etm, queue_nr);
+
+ if (!etmq)
+ return -ENOMEM;
+
+ queue->priv = etmq;
+
+ if (queue->cpu != -1)
+ etmq->cpu = queue->cpu;
+
+ etmq->tid = queue->tid;
+
+ return 0;
+}
+
+static int cs_etm__setup_queues(struct cs_etm_auxtrace *etm)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < etm->queues.nr_queues; i++) {
+ ret = cs_etm__setup_queue(etm, &etm->queues.queue_array[i], i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cs_etm__update_queues(struct cs_etm_auxtrace *etm)
+{
+ if (etm->queues.new_data) {
+ etm->queues.new_data = false;
+ return cs_etm__setup_queues(etm);
+ }
+
+ return 0;
+}
+
+static int
+cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq)
+{
+ struct auxtrace_buffer *aux_buffer = etmq->buffer;
+ struct auxtrace_buffer *old_buffer = aux_buffer;
+ struct auxtrace_queue *queue;
+
+ queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
+
+ aux_buffer = auxtrace_buffer__next(queue, aux_buffer);
+
+ /* If no more data, drop the previous auxtrace_buffer and return */
+ if (!aux_buffer) {
+ if (old_buffer)
+ auxtrace_buffer__drop_data(old_buffer);
+ buff->len = 0;
+ return 0;
+ }
+
+ etmq->buffer = aux_buffer;
+
+ /* If the aux_buffer doesn't have data associated, try to load it */
+ if (!aux_buffer->data) {
+ /* get the file desc associated with the perf data file */
+ int fd = perf_data__fd(etmq->etm->session->data);
+
+ aux_buffer->data = auxtrace_buffer__get_data(aux_buffer, fd);
+ if (!aux_buffer->data)
+ return -ENOMEM;
+ }
+
+ /* If valid, drop the previous buffer */
+ if (old_buffer)
+ auxtrace_buffer__drop_data(old_buffer);
+
+ buff->offset = aux_buffer->offset;
+ buff->len = aux_buffer->size;
+ buff->buf = aux_buffer->data;
+
+ buff->ref_timestamp = aux_buffer->reference;
+
+ return buff->len;
+}
+
+static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
+ struct auxtrace_queue *queue)
+{
+ struct cs_etm_queue *etmq = queue->priv;
+
+ /* CPU-wide tracing isn't supported yet */
+ if (queue->tid == -1)
+ return;
+
+ if ((!etmq->thread) && (etmq->tid != -1))
+ etmq->thread = machine__find_thread(etm->machine, -1,
+ etmq->tid);
+
+ if (etmq->thread) {
+ etmq->pid = etmq->thread->pid_;
+ if (queue->cpu == -1)
+ etmq->cpu = etmq->thread->cpu;
+ }
+}
+
+/*
+ * The cs etm packet encodes an instruction range between a branch target
+ * and the next taken branch. Generate sample accordingly.
+ */
+static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq,
+ struct cs_etm_packet *packet)
+{
+ int ret = 0;
+ struct cs_etm_auxtrace *etm = etmq->etm;
+ struct perf_sample sample = {.ip = 0,};
+ union perf_event *event = etmq->event_buf;
+ u64 start_addr = packet->start_addr;
+ u64 end_addr = packet->end_addr;
+
+ event->sample.header.type = PERF_RECORD_SAMPLE;
+ event->sample.header.misc = PERF_RECORD_MISC_USER;
+ event->sample.header.size = sizeof(struct perf_event_header);
+
+ sample.ip = start_addr;
+ sample.pid = etmq->pid;
+ sample.tid = etmq->tid;
+ sample.addr = end_addr;
+ sample.id = etmq->etm->branches_id;
+ sample.stream_id = etmq->etm->branches_id;
+ sample.period = 1;
+ sample.cpu = packet->cpu;
+ sample.flags = 0;
+ sample.cpumode = PERF_RECORD_MISC_USER;
+
+ ret = perf_session__deliver_synth_event(etm->session, event, &sample);
+
+ if (ret)
+ pr_err(
+ "CS ETM Trace: failed to deliver instruction event, error %d\n",
+ ret);
+
+ return ret;
+}
+
+struct cs_etm_synth {
+ struct perf_tool dummy_tool;
+ struct perf_session *session;
+};
+
+static int cs_etm__event_synth(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct cs_etm_synth *cs_etm_synth =
+ container_of(tool, struct cs_etm_synth, dummy_tool);
+
+ return perf_session__deliver_synth_event(cs_etm_synth->session,
+ event, NULL);
+}
+
+static int cs_etm__synth_event(struct perf_session *session,
+ struct perf_event_attr *attr, u64 id)
+{
+ struct cs_etm_synth cs_etm_synth;
+
+ memset(&cs_etm_synth, 0, sizeof(struct cs_etm_synth));
+ cs_etm_synth.session = session;
+
+ return perf_event__synthesize_attr(&cs_etm_synth.dummy_tool, attr, 1,
+ &id, cs_etm__event_synth);
+}
+
+static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
+ struct perf_session *session)
+{
+ struct perf_evlist *evlist = session->evlist;
+ struct perf_evsel *evsel;
+ struct perf_event_attr attr;
+ bool found = false;
+ u64 id;
+ int err;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel->attr.type == etm->pmu_type) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ pr_debug("No selected events with CoreSight Trace data\n");
+ return 0;
+ }
+
+ memset(&attr, 0, sizeof(struct perf_event_attr));
+ attr.size = sizeof(struct perf_event_attr);
+ attr.type = PERF_TYPE_HARDWARE;
+ attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK;
+ attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
+ PERF_SAMPLE_PERIOD;
+ if (etm->timeless_decoding)
+ attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
+ else
+ attr.sample_type |= PERF_SAMPLE_TIME;
+
+ attr.exclude_user = evsel->attr.exclude_user;
+ attr.exclude_kernel = evsel->attr.exclude_kernel;
+ attr.exclude_hv = evsel->attr.exclude_hv;
+ attr.exclude_host = evsel->attr.exclude_host;
+ attr.exclude_guest = evsel->attr.exclude_guest;
+ attr.sample_id_all = evsel->attr.sample_id_all;
+ attr.read_format = evsel->attr.read_format;
+
+ /* create new id val to be a fixed offset from evsel id */
+ id = evsel->id[0] + 1000000000;
+
+ if (!id)
+ id = 1;
+
+ if (etm->synth_opts.branches) {
+ attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
+ attr.sample_period = 1;
+ attr.sample_type |= PERF_SAMPLE_ADDR;
+ err = cs_etm__synth_event(session, &attr, id);
+ if (err)
+ return err;
+ etm->sample_branches = true;
+ etm->branches_sample_type = attr.sample_type;
+ etm->branches_id = id;
+ }
+
+ return 0;
+}
+
+static int cs_etm__sample(struct cs_etm_queue *etmq)
+{
+ int ret;
+ struct cs_etm_packet packet;
+
+ while (1) {
+ ret = cs_etm_decoder__get_packet(etmq->decoder, &packet);
+ if (ret <= 0)
+ return ret;
+
+ /*
+ * If the packet contains an instruction range, generate an
+ * instruction sequence event.
+ */
+ if (packet.sample_type & CS_ETM_RANGE)
+ cs_etm__synth_branch_sample(etmq, &packet);
+ }
+
+ return 0;
+}
+
+static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
+{
+ struct cs_etm_auxtrace *etm = etmq->etm;
+ struct cs_etm_buffer buffer;
+ size_t buffer_used, processed;
+ int err = 0;
+
+ if (!etm->kernel_start)
+ etm->kernel_start = machine__kernel_start(etm->machine);
+
+ /* Go through each buffer in the queue and decode them one by one */
+more:
+ buffer_used = 0;
+ memset(&buffer, 0, sizeof(buffer));
+ err = cs_etm__get_trace(&buffer, etmq);
+ if (err <= 0)
+ return err;
+ /*
+ * We cannot assume consecutive blocks in the data file are contiguous,
+ * reset the decoder to force re-sync.
+ */
+ err = cs_etm_decoder__reset(etmq->decoder);
+ if (err != 0)
+ return err;
+
+ /* Run trace decoder until buffer consumed or end of trace */
+ do {
+ processed = 0;
+
+ err = cs_etm_decoder__process_data_block(
+ etmq->decoder,
+ etmq->offset,
+ &buffer.buf[buffer_used],
+ buffer.len - buffer_used,
+ &processed);
+
+ if (err)
+ return err;
+
+ etmq->offset += processed;
+ buffer_used += processed;
+
+ /*
+ * Nothing to do with an error condition, let's hope the next
+ * chunk will be better.
+ */
+ err = cs_etm__sample(etmq);
+ } while (buffer.len > buffer_used);
+
+goto more;
+
+ return err;
+}
+
+static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
+ pid_t tid, u64 time_)
+{
+ unsigned int i;
+ struct auxtrace_queues *queues = &etm->queues;
+
+ for (i = 0; i < queues->nr_queues; i++) {
+ struct auxtrace_queue *queue = &etm->queues.queue_array[i];
+ struct cs_etm_queue *etmq = queue->priv;
+
+ if (etmq && ((tid == -1) || (etmq->tid == tid))) {
+ etmq->time = time_;
+ cs_etm__set_pid_tid_cpu(etm, queue);
+ cs_etm__run_decoder(etmq);
+ }
+ }
+
+ return 0;
+}
+
+static int cs_etm__process_event(struct perf_session *session,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_tool *tool)
+{
+ int err = 0;
+ u64 timestamp;
+ struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
+ struct cs_etm_auxtrace,
+ auxtrace);
+
+ if (dump_trace)
+ return 0;
+
+ if (!tool->ordered_events) {
+ pr_err("CoreSight ETM Trace requires ordered events\n");
+ return -EINVAL;
+ }
+
+ if (!etm->timeless_decoding)
+ return -EINVAL;
+
+ if (sample->time && (sample->time != (u64) -1))
+ timestamp = sample->time;
+ else
+ timestamp = 0;
+
+ if (timestamp || etm->timeless_decoding) {
+ err = cs_etm__update_queues(etm);
+ if (err)
+ return err;
+ }
+
+ if (event->header.type == PERF_RECORD_EXIT)
+ return cs_etm__process_timeless_queues(etm,
+ event->fork.tid,
+ sample->time);
+
+ return 0;
+}
+
+static int cs_etm__process_auxtrace_event(struct perf_session *session,
+ union perf_event *event,
+ struct perf_tool *tool __maybe_unused)
+{
+ struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
+ struct cs_etm_auxtrace,
+ auxtrace);
+ if (!etm->data_queued) {
+ struct auxtrace_buffer *buffer;
+ off_t data_offset;
+ int fd = perf_data__fd(session->data);
+ bool is_pipe = perf_data__is_pipe(session->data);
+ int err;
+
+ if (is_pipe)
+ data_offset = 0;
+ else {
+ data_offset = lseek(fd, 0, SEEK_CUR);
+ if (data_offset == -1)
+ return -errno;
+ }
+
+ err = auxtrace_queues__add_event(&etm->queues, session,
+ event, data_offset, &buffer);
+ if (err)
+ return err;
+
+ if (dump_trace)
+ if (auxtrace_buffer__get_data(buffer, fd)) {
+ cs_etm__dump_event(etm, buffer);
+ auxtrace_buffer__put_data(buffer);
+ }
+ }
+
+ return 0;
+}
+
+static bool cs_etm__is_timeless_decoding(struct cs_etm_auxtrace *etm)
+{
+ struct perf_evsel *evsel;
+ struct perf_evlist *evlist = etm->session->evlist;
+ bool timeless_decoding = true;
+
+ /*
+ * Circle through the list of event and complain if we find one
+ * with the time bit set.
+ */
+ evlist__for_each_entry(evlist, evsel) {
+ if ((evsel->attr.sample_type & PERF_SAMPLE_TIME))
+ timeless_decoding = false;
+ }
+
+ return timeless_decoding;
+}
+
+static const char * const cs_etm_global_header_fmts[] = {
+ [CS_HEADER_VERSION_0] = " Header version %llx\n",
+ [CS_PMU_TYPE_CPUS] = " PMU type/num cpus %llx\n",
+ [CS_ETM_SNAPSHOT] = " Snapshot %llx\n",
+};
+
+static const char * const cs_etm_priv_fmts[] = {
+ [CS_ETM_MAGIC] = " Magic number %llx\n",
+ [CS_ETM_CPU] = " CPU %lld\n",
+ [CS_ETM_ETMCR] = " ETMCR %llx\n",
+ [CS_ETM_ETMTRACEIDR] = " ETMTRACEIDR %llx\n",
+ [CS_ETM_ETMCCER] = " ETMCCER %llx\n",
+ [CS_ETM_ETMIDR] = " ETMIDR %llx\n",
+};
+
+static const char * const cs_etmv4_priv_fmts[] = {
+ [CS_ETM_MAGIC] = " Magic number %llx\n",
+ [CS_ETM_CPU] = " CPU %lld\n",
+ [CS_ETMV4_TRCCONFIGR] = " TRCCONFIGR %llx\n",
+ [CS_ETMV4_TRCTRACEIDR] = " TRCTRACEIDR %llx\n",
+ [CS_ETMV4_TRCIDR0] = " TRCIDR0 %llx\n",
+ [CS_ETMV4_TRCIDR1] = " TRCIDR1 %llx\n",
+ [CS_ETMV4_TRCIDR2] = " TRCIDR2 %llx\n",
+ [CS_ETMV4_TRCIDR8] = " TRCIDR8 %llx\n",
+ [CS_ETMV4_TRCAUTHSTATUS] = " TRCAUTHSTATUS %llx\n",
+};
+
+static void cs_etm__print_auxtrace_info(u64 *val, int num)
+{
+ int i, j, cpu = 0;
+
+ for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
+ fprintf(stdout, cs_etm_global_header_fmts[i], val[i]);
+
+ for (i = CS_HEADER_VERSION_0_MAX; cpu < num; cpu++) {
+ if (val[i] == __perf_cs_etmv3_magic)
+ for (j = 0; j < CS_ETM_PRIV_MAX; j++, i++)
+ fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
+ else if (val[i] == __perf_cs_etmv4_magic)
+ for (j = 0; j < CS_ETMV4_PRIV_MAX; j++, i++)
+ fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
+ else
+ /* failure.. return */
+ return;
+ }
+}
+
+int cs_etm__process_auxtrace_info(union perf_event *event,
+ struct perf_session *session)
+{
+ struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info;
+ struct cs_etm_auxtrace *etm = NULL;
+ struct int_node *inode;
+ unsigned int pmu_type;
+ int event_header_size = sizeof(struct perf_event_header);
+ int info_header_size;
+ int total_size = auxtrace_info->header.size;
+ int priv_size = 0;
+ int num_cpu;
+ int err = 0, idx = -1;
+ int i, j, k;
+ u64 *ptr, *hdr = NULL;
+ u64 **metadata = NULL;
+
+ /*
+ * sizeof(auxtrace_info_event::type) +
+ * sizeof(auxtrace_info_event::reserved) == 8
+ */
+ info_header_size = 8;
+
+ if (total_size < (event_header_size + info_header_size))
+ return -EINVAL;
+
+ priv_size = total_size - event_header_size - info_header_size;
+
+ /* First the global part */
+ ptr = (u64 *) auxtrace_info->priv;
+
+ /* Look for version '0' of the header */
+ if (ptr[0] != 0)
+ return -EINVAL;
+
+ hdr = zalloc(sizeof(*hdr) * CS_HEADER_VERSION_0_MAX);
+ if (!hdr)
+ return -ENOMEM;
+
+ /* Extract header information - see cs-etm.h for format */
+ for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
+ hdr[i] = ptr[i];
+ num_cpu = hdr[CS_PMU_TYPE_CPUS] & 0xffffffff;
+ pmu_type = (unsigned int) ((hdr[CS_PMU_TYPE_CPUS] >> 32) &
+ 0xffffffff);
+
+ /*
+ * Create an RB tree for traceID-CPU# tuple. Since the conversion has
+ * to be made for each packet that gets decoded, optimizing access in
+ * anything other than a sequential array is worth doing.
+ */
+ traceid_list = intlist__new(NULL);
+ if (!traceid_list) {
+ err = -ENOMEM;
+ goto err_free_hdr;
+ }
+
+ metadata = zalloc(sizeof(*metadata) * num_cpu);
+ if (!metadata) {
+ err = -ENOMEM;
+ goto err_free_traceid_list;
+ }
+
+ /*
+ * The metadata is stored in the auxtrace_info section and encodes
+ * the configuration of the ARM embedded trace macrocell which is
+ * required by the trace decoder to properly decode the trace due
+ * to its highly compressed nature.
+ */
+ for (j = 0; j < num_cpu; j++) {
+ if (ptr[i] == __perf_cs_etmv3_magic) {
+ metadata[j] = zalloc(sizeof(*metadata[j]) *
+ CS_ETM_PRIV_MAX);
+ if (!metadata[j]) {
+ err = -ENOMEM;
+ goto err_free_metadata;
+ }
+ for (k = 0; k < CS_ETM_PRIV_MAX; k++)
+ metadata[j][k] = ptr[i + k];
+
+ /* The traceID is our handle */
+ idx = metadata[j][CS_ETM_ETMTRACEIDR];
+ i += CS_ETM_PRIV_MAX;
+ } else if (ptr[i] == __perf_cs_etmv4_magic) {
+ metadata[j] = zalloc(sizeof(*metadata[j]) *
+ CS_ETMV4_PRIV_MAX);
+ if (!metadata[j]) {
+ err = -ENOMEM;
+ goto err_free_metadata;
+ }
+ for (k = 0; k < CS_ETMV4_PRIV_MAX; k++)
+ metadata[j][k] = ptr[i + k];
+
+ /* The traceID is our handle */
+ idx = metadata[j][CS_ETMV4_TRCTRACEIDR];
+ i += CS_ETMV4_PRIV_MAX;
+ }
+
+ /* Get an RB node for this CPU */
+ inode = intlist__findnew(traceid_list, idx);
+
+ /* Something went wrong, no need to continue */
+ if (!inode) {
+ err = PTR_ERR(inode);
+ goto err_free_metadata;
+ }
+
+ /*
+ * The node for that CPU should not be taken.
+ * Back out if that's the case.
+ */
+ if (inode->priv) {
+ err = -EINVAL;
+ goto err_free_metadata;
+ }
+ /* All good, associate the traceID with the CPU# */
+ inode->priv = &metadata[j][CS_ETM_CPU];
+ }
+
+ /*
+ * Each of CS_HEADER_VERSION_0_MAX, CS_ETM_PRIV_MAX and
+ * CS_ETMV4_PRIV_MAX mark how many double words are in the
+ * global metadata, and each cpu's metadata respectively.
+ * The following tests if the correct number of double words was
+ * present in the auxtrace info section.
+ */
+ if (i * 8 != priv_size) {
+ err = -EINVAL;
+ goto err_free_metadata;
+ }
+
+ etm = zalloc(sizeof(*etm));
+
+ if (!etm) {
+ err = -ENOMEM;
+ goto err_free_metadata;
+ }
+
+ err = auxtrace_queues__init(&etm->queues);
+ if (err)
+ goto err_free_etm;
+
+ etm->session = session;
+ etm->machine = &session->machines.host;
+
+ etm->num_cpu = num_cpu;
+ etm->pmu_type = pmu_type;
+ etm->snapshot_mode = (hdr[CS_ETM_SNAPSHOT] != 0);
+ etm->metadata = metadata;
+ etm->auxtrace_type = auxtrace_info->type;
+ etm->timeless_decoding = cs_etm__is_timeless_decoding(etm);
+
+ etm->auxtrace.process_event = cs_etm__process_event;
+ etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
+ etm->auxtrace.flush_events = cs_etm__flush_events;
+ etm->auxtrace.free_events = cs_etm__free_events;
+ etm->auxtrace.free = cs_etm__free;
+ session->auxtrace = &etm->auxtrace;
+
+ if (dump_trace) {
+ cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);
+ return 0;
+ }
+
+ if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
+ etm->synth_opts = *session->itrace_synth_opts;
+ } else {
+ itrace_synth_opts__set_default(&etm->synth_opts);
+ etm->synth_opts.callchain = false;
+ }
+
+ err = cs_etm__synth_events(etm, session);
+ if (err)
+ goto err_free_queues;
+
+ err = auxtrace_queues__process_index(&etm->queues, session);
+ if (err)
+ goto err_free_queues;
+
+ etm->data_queued = etm->queues.populated;
+
+ return 0;
+
+err_free_queues:
+ auxtrace_queues__free(&etm->queues);
+ session->auxtrace = NULL;
+err_free_etm:
+ zfree(&etm);
+err_free_metadata:
+ /* No need to check @metadata[j], free(NULL) is supported */
+ for (j = 0; j < num_cpu; j++)
+ free(metadata[j]);
+ zfree(&metadata);
+err_free_traceid_list:
+ intlist__delete(traceid_list);
+err_free_hdr:
+ zfree(&hdr);
+
+ return -EINVAL;
+}
diff --git a/tools/perf/util/cs-etm.h b/tools/perf/util/cs-etm.h
index 3cc6bc3263fe..5864d5dca616 100644
--- a/tools/perf/util/cs-etm.h
+++ b/tools/perf/util/cs-etm.h
@@ -18,6 +18,9 @@
#ifndef INCLUDE__UTIL_PERF_CS_ETM_H__
#define INCLUDE__UTIL_PERF_CS_ETM_H__
+#include "util/event.h"
+#include "util/session.h"
+
/* Versionning header in case things need tro change in the future. That way
* decoding of old snapshot is still possible.
*/
@@ -61,6 +64,9 @@ enum {
CS_ETMV4_PRIV_MAX,
};
+/* RB tree for quick conversion between traceID and CPUs */
+struct intlist *traceid_list;
+
#define KiB(x) ((x) * 1024)
#define MiB(x) ((x) * 1024 * 1024)
@@ -71,4 +77,16 @@ static const u64 __perf_cs_etmv4_magic = 0x4040404040404040ULL;
#define CS_ETMV3_PRIV_SIZE (CS_ETM_PRIV_MAX * sizeof(u64))
#define CS_ETMV4_PRIV_SIZE (CS_ETMV4_PRIV_MAX * sizeof(u64))
+#ifdef HAVE_CSTRACE_SUPPORT
+int cs_etm__process_auxtrace_info(union perf_event *event,
+ struct perf_session *session);
+#else
+static inline int
+cs_etm__process_auxtrace_info(union perf_event *event __maybe_unused,
+ struct perf_session *session __maybe_unused)
+{
+ return -1;
+}
+#endif
+
#endif
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index 48094fde0a68..d8cfc19ddb10 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -12,16 +12,6 @@
#include "util.h"
#include "debug.h"
-#ifndef O_CLOEXEC
-#ifdef __sparc__
-#define O_CLOEXEC 0x400000
-#elif defined(__alpha__) || defined(__hppa__)
-#define O_CLOEXEC 010000000
-#else
-#define O_CLOEXEC 02000000
-#endif
-#endif
-
static bool check_pipe(struct perf_data *data)
{
struct stat st;
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index d5b6f7f5baff..36ef45b2e89d 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -446,7 +446,7 @@ static int do_open(char *name)
char sbuf[STRERR_BUFSIZE];
do {
- fd = open(name, O_RDONLY);
+ fd = open(name, O_RDONLY|O_CLOEXEC);
if (fd >= 0)
return fd;
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 6276b340f893..6d311868d850 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
#include "cpumap.h"
#include "env.h"
+#include "sane_ctype.h"
#include "util.h"
#include <errno.h>
+#include <sys/utsname.h>
struct perf_env perf_env;
@@ -93,3 +95,48 @@ void cpu_cache_level__free(struct cpu_cache_level *cache)
free(cache->map);
free(cache->size);
}
+
+/*
+ * Return architecture name in a normalized form.
+ * The conversion logic comes from the Makefile.
+ */
+static const char *normalize_arch(char *arch)
+{
+ if (!strcmp(arch, "x86_64"))
+ return "x86";
+ if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6')
+ return "x86";
+ if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5))
+ return "sparc";
+ if (!strcmp(arch, "aarch64") || !strcmp(arch, "arm64"))
+ return "arm64";
+ if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110"))
+ return "arm";
+ if (!strncmp(arch, "s390", 4))
+ return "s390";
+ if (!strncmp(arch, "parisc", 6))
+ return "parisc";
+ if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3))
+ return "powerpc";
+ if (!strncmp(arch, "mips", 4))
+ return "mips";
+ if (!strncmp(arch, "sh", 2) && isdigit(arch[2]))
+ return "sh";
+
+ return arch;
+}
+
+const char *perf_env__arch(struct perf_env *env)
+{
+ struct utsname uts;
+ char *arch_name;
+
+ if (!env) { /* Assume local operation */
+ if (uname(&uts) < 0)
+ return NULL;
+ arch_name = uts.machine;
+ } else
+ arch_name = env->arch;
+
+ return normalize_arch(arch_name);
+}
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index 1eb35b190b34..bf970f57dce0 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -65,4 +65,6 @@ int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]);
int perf_env__read_cpu_topology_map(struct perf_env *env);
void cpu_cache_level__free(struct cpu_cache_level *cache);
+
+const char *perf_env__arch(struct perf_env *env);
#endif /* __PERF_ENV_H */
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 97a8ef9980db..44e603c27944 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -1435,6 +1435,11 @@ size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
event->context_switch.next_prev_tid);
}
+static size_t perf_event__fprintf_lost(union perf_event *event, FILE *fp)
+{
+ return fprintf(fp, " lost %" PRIu64 "\n", event->lost.lost);
+}
+
size_t perf_event__fprintf(union perf_event *event, FILE *fp)
{
size_t ret = fprintf(fp, "PERF_RECORD_%s",
@@ -1467,6 +1472,9 @@ size_t perf_event__fprintf(union perf_event *event, FILE *fp)
case PERF_RECORD_SWITCH_CPU_WIDE:
ret += perf_event__fprintf_switch(event, fp);
break;
+ case PERF_RECORD_LOST:
+ ret += perf_event__fprintf_lost(event, fp);
+ break;
default:
ret += fprintf(fp, "\n");
}
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 1ae95efbfb95..0f794744919c 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -205,6 +205,7 @@ struct perf_sample {
u32 flags;
u16 insn_len;
u8 cpumode;
+ u16 misc;
char insn[MAX_INSN];
void *raw_data;
struct ip_callchain *callchain;
@@ -774,8 +775,7 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
u64 read_format);
int perf_event__synthesize_sample(union perf_event *event, u64 type,
u64 read_format,
- const struct perf_sample *sample,
- bool swapped);
+ const struct perf_sample *sample);
pid_t perf_event__synthesize_comm(struct perf_tool *tool,
union perf_event *event, pid_t pid,
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index b62e523a7035..ac35cd214feb 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -25,6 +25,7 @@
#include "parse-events.h"
#include <subcmd/parse-options.h>
+#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
@@ -125,7 +126,7 @@ static void perf_evlist__purge(struct perf_evlist *evlist)
void perf_evlist__exit(struct perf_evlist *evlist)
{
zfree(&evlist->mmap);
- zfree(&evlist->backward_mmap);
+ zfree(&evlist->overwrite_mmap);
fdarray__exit(&evlist->pollfd);
}
@@ -675,11 +676,11 @@ static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
{
int i;
- if (!evlist->backward_mmap)
+ if (!evlist->overwrite_mmap)
return 0;
for (i = 0; i < evlist->nr_mmaps; i++) {
- int fd = evlist->backward_mmap[i].fd;
+ int fd = evlist->overwrite_mmap[i].fd;
int err;
if (fd < 0)
@@ -711,7 +712,7 @@ union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int
* No need for read-write ring buffer: kernel stop outputting when
* it hit md->prev (perf_mmap__consume()).
*/
- return perf_mmap__read_forward(md, evlist->overwrite);
+ return perf_mmap__read_forward(md);
}
union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
@@ -738,7 +739,7 @@ void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
{
- perf_mmap__consume(&evlist->mmap[idx], evlist->overwrite);
+ perf_mmap__consume(&evlist->mmap[idx], false);
}
static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
@@ -749,16 +750,16 @@ static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
for (i = 0; i < evlist->nr_mmaps; i++)
perf_mmap__munmap(&evlist->mmap[i]);
- if (evlist->backward_mmap)
+ if (evlist->overwrite_mmap)
for (i = 0; i < evlist->nr_mmaps; i++)
- perf_mmap__munmap(&evlist->backward_mmap[i]);
+ perf_mmap__munmap(&evlist->overwrite_mmap[i]);
}
void perf_evlist__munmap(struct perf_evlist *evlist)
{
perf_evlist__munmap_nofree(evlist);
zfree(&evlist->mmap);
- zfree(&evlist->backward_mmap);
+ zfree(&evlist->overwrite_mmap);
}
static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)
@@ -800,7 +801,7 @@ perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
struct mmap_params *mp, int cpu_idx,
- int thread, int *_output, int *_output_backward)
+ int thread, int *_output, int *_output_overwrite)
{
struct perf_evsel *evsel;
int revent;
@@ -812,18 +813,20 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
int fd;
int cpu;
+ mp->prot = PROT_READ | PROT_WRITE;
if (evsel->attr.write_backward) {
- output = _output_backward;
- maps = evlist->backward_mmap;
+ output = _output_overwrite;
+ maps = evlist->overwrite_mmap;
if (!maps) {
maps = perf_evlist__alloc_mmap(evlist);
if (!maps)
return -1;
- evlist->backward_mmap = maps;
+ evlist->overwrite_mmap = maps;
if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
}
+ mp->prot &= ~PROT_WRITE;
}
if (evsel->system_wide && thread)
@@ -884,14 +887,14 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
pr_debug2("perf event ring buffer mmapped per cpu\n");
for (cpu = 0; cpu < nr_cpus; cpu++) {
int output = -1;
- int output_backward = -1;
+ int output_overwrite = -1;
auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
true);
for (thread = 0; thread < nr_threads; thread++) {
if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
- thread, &output, &output_backward))
+ thread, &output, &output_overwrite))
goto out_unmap;
}
}
@@ -912,13 +915,13 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
pr_debug2("perf event ring buffer mmapped per thread\n");
for (thread = 0; thread < nr_threads; thread++) {
int output = -1;
- int output_backward = -1;
+ int output_overwrite = -1;
auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
false);
if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
- &output, &output_backward))
+ &output, &output_overwrite))
goto out_unmap;
}
@@ -1052,15 +1055,18 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
* Return: %0 on success, negative error code otherwise.
*/
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
- bool overwrite, unsigned int auxtrace_pages,
+ unsigned int auxtrace_pages,
bool auxtrace_overwrite)
{
struct perf_evsel *evsel;
const struct cpu_map *cpus = evlist->cpus;
const struct thread_map *threads = evlist->threads;
- struct mmap_params mp = {
- .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE),
- };
+ /*
+ * Delay setting mp.prot: set it before calling perf_mmap__mmap.
+ * Its value is decided by evsel's write_backward.
+ * So &mp should not be passed through const pointer.
+ */
+ struct mmap_params mp;
if (!evlist->mmap)
evlist->mmap = perf_evlist__alloc_mmap(evlist);
@@ -1070,7 +1076,6 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
return -ENOMEM;
- evlist->overwrite = overwrite;
evlist->mmap_len = perf_evlist__mmap_size(pages);
pr_debug("mmap size %zuB\n", evlist->mmap_len);
mp.mask = evlist->mmap_len - page_size - 1;
@@ -1091,10 +1096,9 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
return perf_evlist__mmap_per_cpu(evlist, &mp);
}
-int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
- bool overwrite)
+int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages)
{
- return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false);
+ return perf_evlist__mmap_ex(evlist, pages, 0, false);
}
int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
@@ -1102,7 +1106,8 @@ int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
struct cpu_map *cpus;
struct thread_map *threads;
- threads = thread_map__new_str(target->pid, target->tid, target->uid);
+ threads = thread_map__new_str(target->pid, target->tid, target->uid,
+ target->per_thread);
if (!threads)
return -1;
@@ -1582,6 +1587,17 @@ int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *even
return perf_evsel__parse_sample(evsel, event, sample);
}
+int perf_evlist__parse_sample_timestamp(struct perf_evlist *evlist,
+ union perf_event *event,
+ u64 *timestamp)
+{
+ struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
+
+ if (!evsel)
+ return -EFAULT;
+ return perf_evsel__parse_sample_timestamp(evsel, event, timestamp);
+}
+
size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
{
struct perf_evsel *evsel;
@@ -1739,13 +1755,13 @@ void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist,
RESUME,
} action = NONE;
- if (!evlist->backward_mmap)
+ if (!evlist->overwrite_mmap)
return;
switch (old_state) {
case BKW_MMAP_NOTREADY: {
if (state != BKW_MMAP_RUNNING)
- goto state_err;;
+ goto state_err;
break;
}
case BKW_MMAP_RUNNING: {
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 491f69542920..75f8e0ad5d76 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -7,7 +7,6 @@
#include <linux/refcount.h>
#include <linux/list.h>
#include <api/fd/array.h>
-#include <fcntl.h>
#include <stdio.h>
#include "../perf.h"
#include "event.h"
@@ -31,7 +30,6 @@ struct perf_evlist {
int nr_entries;
int nr_groups;
int nr_mmaps;
- bool overwrite;
bool enabled;
bool has_user_cpus;
size_t mmap_len;
@@ -45,12 +43,14 @@ struct perf_evlist {
} workload;
struct fdarray pollfd;
struct perf_mmap *mmap;
- struct perf_mmap *backward_mmap;
+ struct perf_mmap *overwrite_mmap;
struct thread_map *threads;
struct cpu_map *cpus;
struct perf_evsel *selected;
struct events_stats stats;
struct perf_env *env;
+ u64 first_sample_time;
+ u64 last_sample_time;
};
struct perf_evsel_str_handler {
@@ -169,10 +169,9 @@ int perf_evlist__parse_mmap_pages(const struct option *opt,
unsigned long perf_event_mlock_kb_in_pages(void);
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
- bool overwrite, unsigned int auxtrace_pages,
+ unsigned int auxtrace_pages,
bool auxtrace_overwrite);
-int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
- bool overwrite);
+int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages);
void perf_evlist__munmap(struct perf_evlist *evlist);
size_t perf_evlist__mmap_size(unsigned long pages);
@@ -205,6 +204,10 @@ u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist);
int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
struct perf_sample *sample);
+int perf_evlist__parse_sample_timestamp(struct perf_evlist *evlist,
+ union perf_event *event,
+ u64 *timestamp);
+
bool perf_evlist__valid_sample_type(struct perf_evlist *evlist);
bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist);
bool perf_evlist__valid_read_format(struct perf_evlist *evlist);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index d5fbcf8c7aa7..66fa45198a11 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -36,6 +36,7 @@
#include "debug.h"
#include "trace-event.h"
#include "stat.h"
+#include "memswap.h"
#include "util/parse-branch-options.h"
#include "sane_ctype.h"
@@ -650,9 +651,9 @@ int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
return ret;
}
-void perf_evsel__config_callchain(struct perf_evsel *evsel,
- struct record_opts *opts,
- struct callchain_param *param)
+static void __perf_evsel__config_callchain(struct perf_evsel *evsel,
+ struct record_opts *opts,
+ struct callchain_param *param)
{
bool function = perf_evsel__is_function_event(evsel);
struct perf_event_attr *attr = &evsel->attr;
@@ -698,6 +699,14 @@ void perf_evsel__config_callchain(struct perf_evsel *evsel,
}
}
+void perf_evsel__config_callchain(struct perf_evsel *evsel,
+ struct record_opts *opts,
+ struct callchain_param *param)
+{
+ if (param->enabled)
+ return __perf_evsel__config_callchain(evsel, opts, param);
+}
+
static void
perf_evsel__reset_callgraph(struct perf_evsel *evsel,
struct callchain_param *param)
@@ -717,19 +726,19 @@ perf_evsel__reset_callgraph(struct perf_evsel *evsel,
}
static void apply_config_terms(struct perf_evsel *evsel,
- struct record_opts *opts)
+ struct record_opts *opts, bool track)
{
struct perf_evsel_config_term *term;
struct list_head *config_terms = &evsel->config_terms;
struct perf_event_attr *attr = &evsel->attr;
- struct callchain_param param;
+ /* callgraph default */
+ struct callchain_param param = {
+ .record_mode = callchain_param.record_mode,
+ };
u32 dump_size = 0;
int max_stack = 0;
const char *callgraph_buf = NULL;
- /* callgraph default */
- param.record_mode = callchain_param.record_mode;
-
list_for_each_entry(term, config_terms, list) {
switch (term->type) {
case PERF_EVSEL__CONFIG_TERM_PERIOD:
@@ -779,6 +788,8 @@ static void apply_config_terms(struct perf_evsel *evsel,
case PERF_EVSEL__CONFIG_TERM_OVERWRITE:
attr->write_backward = term->val.overwrite ? 1 : 0;
break;
+ case PERF_EVSEL__CONFIG_TERM_DRV_CFG:
+ break;
default:
break;
}
@@ -786,6 +797,8 @@ static void apply_config_terms(struct perf_evsel *evsel,
/* User explicitly set per-event callgraph, clear the old setting and reset. */
if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) {
+ bool sample_address = false;
+
if (max_stack) {
param.max_stack = max_stack;
if (callgraph_buf == NULL)
@@ -805,6 +818,8 @@ static void apply_config_terms(struct perf_evsel *evsel,
evsel->name);
return;
}
+ if (param.record_mode == CALLCHAIN_DWARF)
+ sample_address = true;
}
}
if (dump_size > 0) {
@@ -817,8 +832,14 @@ static void apply_config_terms(struct perf_evsel *evsel,
perf_evsel__reset_callgraph(evsel, &callchain_param);
/* set perf-event callgraph */
- if (param.enabled)
+ if (param.enabled) {
+ if (sample_address) {
+ perf_evsel__set_sample_bit(evsel, ADDR);
+ perf_evsel__set_sample_bit(evsel, DATA_SRC);
+ evsel->attr.mmap_data = track;
+ }
perf_evsel__config_callchain(evsel, opts, &param);
+ }
}
}
@@ -1049,7 +1070,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
* Apply event specific term settings,
* it overloads any global configuration.
*/
- apply_config_terms(evsel, opts);
+ apply_config_terms(evsel, opts, track);
evsel->ignore_missing_thread = opts->ignore_missing_thread;
}
@@ -1574,6 +1595,7 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
PRINT_ATTRf(use_clockid, p_unsigned);
PRINT_ATTRf(context_switch, p_unsigned);
PRINT_ATTRf(write_backward, p_unsigned);
+ PRINT_ATTRf(namespaces, p_unsigned);
PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
PRINT_ATTRf(bp_type, p_unsigned);
@@ -1596,10 +1618,46 @@ static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
return fprintf(fp, " %-32s %s\n", name, val);
}
+static void perf_evsel__remove_fd(struct perf_evsel *pos,
+ int nr_cpus, int nr_threads,
+ int thread_idx)
+{
+ for (int cpu = 0; cpu < nr_cpus; cpu++)
+ for (int thread = thread_idx; thread < nr_threads - 1; thread++)
+ FD(pos, cpu, thread) = FD(pos, cpu, thread + 1);
+}
+
+static int update_fds(struct perf_evsel *evsel,
+ int nr_cpus, int cpu_idx,
+ int nr_threads, int thread_idx)
+{
+ struct perf_evsel *pos;
+
+ if (cpu_idx >= nr_cpus || thread_idx >= nr_threads)
+ return -EINVAL;
+
+ evlist__for_each_entry(evsel->evlist, pos) {
+ nr_cpus = pos != evsel ? nr_cpus : cpu_idx;
+
+ perf_evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx);
+
+ /*
+ * Since fds for next evsel has not been created,
+ * there is no need to iterate whole event list.
+ */
+ if (pos == evsel)
+ break;
+ }
+ return 0;
+}
+
static bool ignore_missing_thread(struct perf_evsel *evsel,
+ int nr_cpus, int cpu,
struct thread_map *threads,
int thread, int err)
{
+ pid_t ignore_pid = thread_map__pid(threads, thread);
+
if (!evsel->ignore_missing_thread)
return false;
@@ -1615,11 +1673,18 @@ static bool ignore_missing_thread(struct perf_evsel *evsel,
if (threads->nr == 1)
return false;
+ /*
+ * We should remove fd for missing_thread first
+ * because thread_map__remove() will decrease threads->nr.
+ */
+ if (update_fds(evsel, nr_cpus, cpu, threads->nr, thread))
+ return false;
+
if (thread_map__remove(threads, thread))
return false;
pr_warning("WARNING: Ignored open failure for pid %d\n",
- thread_map__pid(threads, thread));
+ ignore_pid);
return true;
}
@@ -1724,7 +1789,7 @@ retry_open:
if (fd < 0) {
err = -errno;
- if (ignore_missing_thread(evsel, threads, thread, err)) {
+ if (ignore_missing_thread(evsel, cpus->nr, cpu, threads, thread, err)) {
/*
* We just removed 1 thread, so take a step
* back on thread index and lower the upper
@@ -1960,6 +2025,20 @@ static inline bool overflow(const void *endp, u16 max_size, const void *offset,
#define OVERFLOW_CHECK_u64(offset) \
OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
+static int
+perf_event__check_size(union perf_event *event, unsigned int sample_size)
+{
+ /*
+ * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
+ * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
+ * check the format does not go past the end of the event.
+ */
+ if (sample_size + sizeof(event->header) > event->header.size)
+ return -EFAULT;
+
+ return 0;
+}
+
int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
struct perf_sample *data)
{
@@ -1981,6 +2060,9 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
data->stream_id = data->id = data->time = -1ULL;
data->period = evsel->attr.sample_period;
data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+ data->misc = event->header.misc;
+ data->id = -1ULL;
+ data->data_src = PERF_MEM_DATA_SRC_NONE;
if (event->header.type != PERF_RECORD_SAMPLE) {
if (!evsel->attr.sample_id_all)
@@ -1990,15 +2072,9 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
array = event->sample.array;
- /*
- * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
- * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
- * check the format does not go past the end of the event.
- */
- if (evsel->sample_size + sizeof(event->header) > event->header.size)
+ if (perf_event__check_size(event, evsel->sample_size))
return -EFAULT;
- data->id = -1ULL;
if (type & PERF_SAMPLE_IDENTIFIER) {
data->id = *array;
array++;
@@ -2028,7 +2104,6 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
array++;
}
- data->addr = 0;
if (type & PERF_SAMPLE_ADDR) {
data->addr = *array;
array++;
@@ -2120,14 +2195,27 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
if (type & PERF_SAMPLE_RAW) {
OVERFLOW_CHECK_u64(array);
u.val64 = *array;
- if (WARN_ONCE(swapped,
- "Endianness of raw data not corrected!\n")) {
- /* undo swap of u64, then swap on individual u32s */
+
+ /*
+ * Undo swap of u64, then swap on individual u32s,
+ * get the size of the raw area and undo all of the
+ * swap. The pevent interface handles endianity by
+ * itself.
+ */
+ if (swapped) {
u.val64 = bswap_64(u.val64);
u.val32[0] = bswap_32(u.val32[0]);
u.val32[1] = bswap_32(u.val32[1]);
}
data->raw_size = u.val32[0];
+
+ /*
+ * The raw data is aligned on 64bits including the
+ * u32 size, so it's safe to use mem_bswap_64.
+ */
+ if (swapped)
+ mem_bswap_64((void *) array, data->raw_size);
+
array = (void *)array + sizeof(u32);
OVERFLOW_CHECK(array, data->raw_size, max_size);
@@ -2192,14 +2280,12 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
array++;
}
- data->data_src = PERF_MEM_DATA_SRC_NONE;
if (type & PERF_SAMPLE_DATA_SRC) {
OVERFLOW_CHECK_u64(array);
data->data_src = *array;
array++;
}
- data->transaction = 0;
if (type & PERF_SAMPLE_TRANSACTION) {
OVERFLOW_CHECK_u64(array);
data->transaction = *array;
@@ -2232,6 +2318,50 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
return 0;
}
+int perf_evsel__parse_sample_timestamp(struct perf_evsel *evsel,
+ union perf_event *event,
+ u64 *timestamp)
+{
+ u64 type = evsel->attr.sample_type;
+ const u64 *array;
+
+ if (!(type & PERF_SAMPLE_TIME))
+ return -1;
+
+ if (event->header.type != PERF_RECORD_SAMPLE) {
+ struct perf_sample data = {
+ .time = -1ULL,
+ };
+
+ if (!evsel->attr.sample_id_all)
+ return -1;
+ if (perf_evsel__parse_id_sample(evsel, event, &data))
+ return -1;
+
+ *timestamp = data.time;
+ return 0;
+ }
+
+ array = event->sample.array;
+
+ if (perf_event__check_size(event, evsel->sample_size))
+ return -EFAULT;
+
+ if (type & PERF_SAMPLE_IDENTIFIER)
+ array++;
+
+ if (type & PERF_SAMPLE_IP)
+ array++;
+
+ if (type & PERF_SAMPLE_TID)
+ array++;
+
+ if (type & PERF_SAMPLE_TIME)
+ *timestamp = *array;
+
+ return 0;
+}
+
size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
u64 read_format)
{
@@ -2342,8 +2472,7 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
int perf_event__synthesize_sample(union perf_event *event, u64 type,
u64 read_format,
- const struct perf_sample *sample,
- bool swapped)
+ const struct perf_sample *sample)
{
u64 *array;
size_t sz;
@@ -2368,15 +2497,6 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
if (type & PERF_SAMPLE_TID) {
u.val32[0] = sample->pid;
u.val32[1] = sample->tid;
- if (swapped) {
- /*
- * Inverse of what is done in perf_evsel__parse_sample
- */
- u.val32[0] = bswap_32(u.val32[0]);
- u.val32[1] = bswap_32(u.val32[1]);
- u.val64 = bswap_64(u.val64);
- }
-
*array = u.val64;
array++;
}
@@ -2403,13 +2523,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
if (type & PERF_SAMPLE_CPU) {
u.val32[0] = sample->cpu;
- if (swapped) {
- /*
- * Inverse of what is done in perf_evsel__parse_sample
- */
- u.val32[0] = bswap_32(u.val32[0]);
- u.val64 = bswap_64(u.val64);
- }
+ u.val32[1] = 0;
*array = u.val64;
array++;
}
@@ -2456,15 +2570,6 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
if (type & PERF_SAMPLE_RAW) {
u.val32[0] = sample->raw_size;
- if (WARN_ONCE(swapped,
- "Endianness of raw data not corrected!\n")) {
- /*
- * Inverse of what is done in perf_evsel__parse_sample
- */
- u.val32[0] = bswap_32(u.val32[0]);
- u.val32[1] = bswap_32(u.val32[1]);
- u.val64 = bswap_64(u.val64);
- }
*array = u.val64;
array = (void *)array + sizeof(u32);
@@ -2743,8 +2848,9 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
break;
case EOPNOTSUPP:
if (evsel->attr.sample_period != 0)
- return scnprintf(msg, size, "%s",
- "PMU Hardware doesn't support sampling/overflow-interrupts.");
+ return scnprintf(msg, size,
+ "%s: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'",
+ perf_evsel__name(evsel));
if (evsel->attr.precise_ip)
return scnprintf(msg, size, "%s",
"\'precise\' request may not be supported. Try removing 'p' modifier.");
@@ -2781,16 +2887,9 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
perf_evsel__name(evsel));
}
-char *perf_evsel__env_arch(struct perf_evsel *evsel)
-{
- if (evsel && evsel->evlist && evsel->evlist->env)
- return evsel->evlist->env->arch;
- return NULL;
-}
-
-char *perf_evsel__env_cpuid(struct perf_evsel *evsel)
+struct perf_env *perf_evsel__env(struct perf_evsel *evsel)
{
- if (evsel && evsel->evlist && evsel->evlist->env)
- return evsel->evlist->env->cpuid;
+ if (evsel && evsel->evlist)
+ return evsel->evlist->env;
return NULL;
}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 157f49e8a772..846e41644525 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -38,7 +38,7 @@ struct cgroup_sel;
* It is allocated within event parsing and attached to
* perf_evsel::config_terms list head.
*/
-enum {
+enum term_type {
PERF_EVSEL__CONFIG_TERM_PERIOD,
PERF_EVSEL__CONFIG_TERM_FREQ,
PERF_EVSEL__CONFIG_TERM_TIME,
@@ -49,12 +49,11 @@ enum {
PERF_EVSEL__CONFIG_TERM_OVERWRITE,
PERF_EVSEL__CONFIG_TERM_DRV_CFG,
PERF_EVSEL__CONFIG_TERM_BRANCH,
- PERF_EVSEL__CONFIG_TERM_MAX,
};
struct perf_evsel_config_term {
struct list_head list;
- int type;
+ enum term_type type;
union {
u64 period;
u64 freq;
@@ -339,6 +338,10 @@ static inline int perf_evsel__read_on_cpu_scaled(struct perf_evsel *evsel,
int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
struct perf_sample *sample);
+int perf_evsel__parse_sample_timestamp(struct perf_evsel *evsel,
+ union perf_event *event,
+ u64 *timestamp);
+
static inline struct perf_evsel *perf_evsel__next(struct perf_evsel *evsel)
{
return list_entry(evsel->node.next, struct perf_evsel, node);
@@ -443,7 +446,6 @@ typedef int (*attr__fprintf_f)(FILE *, const char *, const char *, void *);
int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
attr__fprintf_f attr__fprintf, void *priv);
-char *perf_evsel__env_arch(struct perf_evsel *evsel);
-char *perf_evsel__env_cpuid(struct perf_evsel *evsel);
+struct perf_env *perf_evsel__env(struct perf_evsel *evsel);
#endif /* __PERF_EVSEL_H */
diff --git a/tools/perf/util/generate-cmdlist.sh b/tools/perf/util/generate-cmdlist.sh
index 9bbcec4e3365..ff17920a5ebc 100755
--- a/tools/perf/util/generate-cmdlist.sh
+++ b/tools/perf/util/generate-cmdlist.sh
@@ -38,7 +38,7 @@ do
done
echo "#endif /* HAVE_LIBELF_SUPPORT */"
-echo "#ifdef HAVE_LIBAUDIT_SUPPORT"
+echo "#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE)"
sed -n -e 's/^perf-\([^ ]*\)[ ].* audit*/\1/p' command-list.txt |
sort |
while read cmd
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 7c0e9d587bfa..a326e0d8b5b6 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -15,9 +15,8 @@
#include <linux/bitops.h>
#include <linux/stringify.h>
#include <sys/stat.h>
-#include <sys/types.h>
#include <sys/utsname.h>
-#include <unistd.h>
+#include <linux/time64.h>
#include "evlist.h"
#include "evsel.h"
@@ -37,6 +36,7 @@
#include <api/fs/fs.h>
#include "asm/bug.h"
#include "tool.h"
+#include "time-utils.h"
#include "sane_ctype.h"
@@ -1182,6 +1182,20 @@ static int write_stat(struct feat_fd *ff __maybe_unused,
return 0;
}
+static int write_sample_time(struct feat_fd *ff,
+ struct perf_evlist *evlist)
+{
+ int ret;
+
+ ret = do_write(ff, &evlist->first_sample_time,
+ sizeof(evlist->first_sample_time));
+ if (ret < 0)
+ return ret;
+
+ return do_write(ff, &evlist->last_sample_time,
+ sizeof(evlist->last_sample_time));
+}
+
static void print_hostname(struct feat_fd *ff, FILE *fp)
{
fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
@@ -1507,6 +1521,28 @@ static void print_group_desc(struct feat_fd *ff, FILE *fp)
}
}
+static void print_sample_time(struct feat_fd *ff, FILE *fp)
+{
+ struct perf_session *session;
+ char time_buf[32];
+ double d;
+
+ session = container_of(ff->ph, struct perf_session, header);
+
+ timestamp__scnprintf_usec(session->evlist->first_sample_time,
+ time_buf, sizeof(time_buf));
+ fprintf(fp, "# time of first sample : %s\n", time_buf);
+
+ timestamp__scnprintf_usec(session->evlist->last_sample_time,
+ time_buf, sizeof(time_buf));
+ fprintf(fp, "# time of last sample : %s\n", time_buf);
+
+ d = (double)(session->evlist->last_sample_time -
+ session->evlist->first_sample_time) / NSEC_PER_MSEC;
+
+ fprintf(fp, "# sample duration : %10.3f ms\n", d);
+}
+
static int __event_process_build_id(struct build_id_event *bev,
char *filename,
struct perf_session *session)
@@ -2148,6 +2184,27 @@ out_free_caches:
return -1;
}
+static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused)
+{
+ struct perf_session *session;
+ u64 first_sample_time, last_sample_time;
+ int ret;
+
+ session = container_of(ff->ph, struct perf_session, header);
+
+ ret = do_read_u64(ff, &first_sample_time);
+ if (ret)
+ return -1;
+
+ ret = do_read_u64(ff, &last_sample_time);
+ if (ret)
+ return -1;
+
+ session->evlist->first_sample_time = first_sample_time;
+ session->evlist->last_sample_time = last_sample_time;
+ return 0;
+}
+
struct feature_ops {
int (*write)(struct feat_fd *ff, struct perf_evlist *evlist);
void (*print)(struct feat_fd *ff, FILE *fp);
@@ -2205,6 +2262,7 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
FEAT_OPN(AUXTRACE, auxtrace, false),
FEAT_OPN(STAT, stat, false),
FEAT_OPN(CACHE, cache, true),
+ FEAT_OPR(SAMPLE_TIME, sample_time, false),
};
struct header_print_data {
@@ -3258,6 +3316,74 @@ int perf_event__synthesize_attrs(struct perf_tool *tool,
return err;
}
+static bool has_unit(struct perf_evsel *counter)
+{
+ return counter->unit && *counter->unit;
+}
+
+static bool has_scale(struct perf_evsel *counter)
+{
+ return counter->scale != 1;
+}
+
+int perf_event__synthesize_extra_attr(struct perf_tool *tool,
+ struct perf_evlist *evsel_list,
+ perf_event__handler_t process,
+ bool is_pipe)
+{
+ struct perf_evsel *counter;
+ int err;
+
+ /*
+ * Synthesize other events stuff not carried within
+ * attr event - unit, scale, name
+ */
+ evlist__for_each_entry(evsel_list, counter) {
+ if (!counter->supported)
+ continue;
+
+ /*
+ * Synthesize unit and scale only if it's defined.
+ */
+ if (has_unit(counter)) {
+ err = perf_event__synthesize_event_update_unit(tool, counter, process);
+ if (err < 0) {
+ pr_err("Couldn't synthesize evsel unit.\n");
+ return err;
+ }
+ }
+
+ if (has_scale(counter)) {
+ err = perf_event__synthesize_event_update_scale(tool, counter, process);
+ if (err < 0) {
+ pr_err("Couldn't synthesize evsel counter.\n");
+ return err;
+ }
+ }
+
+ if (counter->own_cpus) {
+ err = perf_event__synthesize_event_update_cpus(tool, counter, process);
+ if (err < 0) {
+ pr_err("Couldn't synthesize evsel cpus.\n");
+ return err;
+ }
+ }
+
+ /*
+ * Name is needed only for pipe output,
+ * perf.data carries event names.
+ */
+ if (is_pipe) {
+ err = perf_event__synthesize_event_update_name(tool, counter, process);
+ if (err < 0) {
+ pr_err("Couldn't synthesize evsel name.\n");
+ return err;
+ }
+ }
+ }
+ return 0;
+}
+
int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_evlist **pevlist)
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 29ccbfdf8724..f28aaaa3a440 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include "event.h"
#include "env.h"
+#include "pmu.h"
enum {
HEADER_RESERVED = 0, /* always cleared */
@@ -34,6 +35,7 @@ enum {
HEADER_AUXTRACE,
HEADER_STAT,
HEADER_CACHE,
+ HEADER_SAMPLE_TIME,
HEADER_LAST_FEATURE,
HEADER_FEAT_BITS = 256,
};
@@ -107,6 +109,11 @@ int perf_event__synthesize_features(struct perf_tool *tool,
struct perf_evlist *evlist,
perf_event__handler_t process);
+int perf_event__synthesize_extra_attr(struct perf_tool *tool,
+ struct perf_evlist *evsel_list,
+ perf_event__handler_t process,
+ bool is_pipe);
+
int perf_event__process_feature(struct perf_tool *tool,
union perf_event *event,
struct perf_session *session);
@@ -166,5 +173,5 @@ int write_padded(struct feat_fd *fd, const void *bf,
*/
int get_cpuid(char *buffer, size_t sz);
-char *get_cpuid_str(void);
+char *get_cpuid_str(struct perf_pmu *pmu __maybe_unused);
#endif /* __PERF_HEADER_H */
diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c
index 5325e65f9711..72db2744876d 100644
--- a/tools/perf/util/intel-bts.c
+++ b/tools/perf/util/intel-bts.c
@@ -67,7 +67,6 @@ struct intel_bts {
u64 branches_sample_type;
u64 branches_id;
size_t branches_event_size;
- bool synth_needs_swap;
unsigned long num_events;
};
@@ -303,8 +302,7 @@ static int intel_bts_synth_branch_sample(struct intel_bts_queue *btsq,
event.sample.header.size = bts->branches_event_size;
ret = perf_event__synthesize_sample(&event,
bts->branches_sample_type,
- 0, &sample,
- bts->synth_needs_swap);
+ 0, &sample);
if (ret)
return ret;
}
@@ -841,8 +839,6 @@ static int intel_bts_synth_events(struct intel_bts *bts,
__perf_evsel__sample_size(attr.sample_type);
}
- bts->synth_needs_swap = evsel->needs_swap;
-
return 0;
}
diff --git a/tools/perf/util/intel-pt-decoder/Build b/tools/perf/util/intel-pt-decoder/Build
index 10e0814bb8d2..1b704fbea9de 100644
--- a/tools/perf/util/intel-pt-decoder/Build
+++ b/tools/perf/util/intel-pt-decoder/Build
@@ -11,15 +11,21 @@ $(OUTPUT)util/intel-pt-decoder/inat-tables.c: $(inat_tables_script) $(inat_table
$(OUTPUT)util/intel-pt-decoder/intel-pt-insn-decoder.o: util/intel-pt-decoder/intel-pt-insn-decoder.c util/intel-pt-decoder/inat.c $(OUTPUT)util/intel-pt-decoder/inat-tables.c
@(diff -I 2>&1 | grep -q 'option requires an argument' && \
- test -d ../../kernel -a -d ../../tools -a -d ../perf && (( \
- diff -B -I'^#include' util/intel-pt-decoder/insn.c ../../arch/x86/lib/insn.c >/dev/null && \
- diff -B -I'^#include' util/intel-pt-decoder/inat.c ../../arch/x86/lib/inat.c >/dev/null && \
- diff -B util/intel-pt-decoder/x86-opcode-map.txt ../../arch/x86/lib/x86-opcode-map.txt >/dev/null && \
- diff -B util/intel-pt-decoder/gen-insn-attr-x86.awk ../../arch/x86/tools/gen-insn-attr-x86.awk >/dev/null && \
- diff -B -I'^#include' util/intel-pt-decoder/insn.h ../../arch/x86/include/asm/insn.h >/dev/null && \
- diff -B -I'^#include' util/intel-pt-decoder/inat.h ../../arch/x86/include/asm/inat.h >/dev/null && \
- diff -B -I'^#include' util/intel-pt-decoder/inat_types.h ../../arch/x86/include/asm/inat_types.h >/dev/null) \
- || echo "Warning: Intel PT: x86 instruction decoder differs from kernel" >&2 )) || true
+ test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \
+ ((diff -B -I'^#include' util/intel-pt-decoder/insn.c ../../arch/x86/lib/insn.c >/dev/null) || \
+ (echo "Warning: Intel PT: x86 instruction decoder C file at 'tools/perf/util/intel-pt-decoder/insn.c' differs from latest version at 'arch/x86/lib/insn.c'" >&2)) && \
+ ((diff -B -I'^#include' util/intel-pt-decoder/inat.c ../../arch/x86/lib/inat.c >/dev/null) || \
+ (echo "Warning: Intel PT: x86 instruction decoder C file at 'tools/perf/util/intel-pt-decoder/inat.c' differs from latest version at 'arch/x86/lib/inat.c'" >&2)) && \
+ ((diff -B util/intel-pt-decoder/x86-opcode-map.txt ../../arch/x86/lib/x86-opcode-map.txt >/dev/null) || \
+ (echo "Warning: Intel PT: x86 instruction decoder map file at 'tools/perf/util/intel-pt-decoder/x86-opcode-map.txt' differs from latest version at 'arch/x86/lib/x86-opcode-map.txt'" >&2)) && \
+ ((diff -B util/intel-pt-decoder/gen-insn-attr-x86.awk ../../arch/x86/tools/gen-insn-attr-x86.awk >/dev/null) || \
+ (echo "Warning: Intel PT: x86 instruction decoder script at 'tools/perf/util/intel-pt-decoder/gen-insn-attr-x86.awk' differs from latest version at 'arch/x86/tools/gen-insn-attr-x86.awk'" >&2)) && \
+ ((diff -B -I'^#include' util/intel-pt-decoder/insn.h ../../arch/x86/include/asm/insn.h >/dev/null) || \
+ (echo "Warning: Intel PT: x86 instruction decoder header at 'tools/perf/util/intel-pt-decoder/insn.h' differs from latest version at 'arch/x86/include/asm/insn.h'" >&2)) && \
+ ((diff -B -I'^#include' util/intel-pt-decoder/inat.h ../../arch/x86/include/asm/inat.h >/dev/null) || \
+ (echo "Warning: Intel PT: x86 instruction decoder header at 'tools/perf/util/intel-pt-decoder/inat.h' differs from latest version at 'arch/x86/include/asm/inat.h'" >&2)) && \
+ ((diff -B -I'^#include' util/intel-pt-decoder/inat_types.h ../../arch/x86/include/asm/inat_types.h >/dev/null) || \
+ (echo "Warning: Intel PT: x86 instruction decoder header at 'tools/perf/util/intel-pt-decoder/inat_types.h' differs from latest version at 'arch/x86/include/asm/inat_types.h'" >&2)))) || true
$(call rule_mkdir)
$(call if_changed_dep,cc_o_c)
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 23f9ba676df0..3773d9c54f45 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -104,8 +104,6 @@ struct intel_pt {
u64 pwrx_id;
u64 cbr_id;
- bool synth_needs_swap;
-
u64 tsc_bit;
u64 mtc_bit;
u64 mtc_freq_bits;
@@ -1101,11 +1099,10 @@ static void intel_pt_prep_b_sample(struct intel_pt *pt,
}
static int intel_pt_inject_event(union perf_event *event,
- struct perf_sample *sample, u64 type,
- bool swapped)
+ struct perf_sample *sample, u64 type)
{
event->header.size = perf_event__sample_event_size(sample, type, 0);
- return perf_event__synthesize_sample(event, type, 0, sample, swapped);
+ return perf_event__synthesize_sample(event, type, 0, sample);
}
static inline int intel_pt_opt_inject(struct intel_pt *pt,
@@ -1115,7 +1112,7 @@ static inline int intel_pt_opt_inject(struct intel_pt *pt,
if (!pt->synth_opts.inject)
return 0;
- return intel_pt_inject_event(event, sample, type, pt->synth_needs_swap);
+ return intel_pt_inject_event(event, sample, type);
}
static int intel_pt_deliver_synth_b_event(struct intel_pt *pt,
@@ -2329,8 +2326,6 @@ static int intel_pt_synth_events(struct intel_pt *pt,
id += 1;
}
- pt->synth_needs_swap = evsel->needs_swap;
-
return 0;
}
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 270f3223c6df..b05a67464c03 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1726,7 +1726,7 @@ static char *callchain_srcline(struct map *map, struct symbol *sym, u64 ip)
bool show_addr = callchain_param.key == CCKEY_ADDRESS;
srcline = get_srcline(map->dso, map__rip_2objdump(map, ip),
- sym, show_sym, show_addr);
+ sym, show_sym, show_addr, ip);
srcline__tree_insert(&map->dso->srclines, ip, srcline);
}
@@ -2204,7 +2204,7 @@ int thread__resolve_callchain(struct thread *thread,
{
int ret = 0;
- callchain_cursor_reset(&callchain_cursor);
+ callchain_cursor_reset(cursor);
if (callchain_param.order == ORDER_CALLEE) {
ret = thread__resolve_callchain_sample(thread, cursor,
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 6d40efd74402..8fe57031e1a8 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -419,7 +419,7 @@ int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
if (map && map->dso) {
srcline = get_srcline(map->dso,
map__rip_2objdump(map, addr), NULL,
- true, true);
+ true, true, addr);
if (srcline != SRCLINE_UNKNOWN)
ret = fprintf(fp, "%s%s", prefix, srcline);
free_srcline(srcline);
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index 0ddd9c199227..1ddc3d1d0147 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -20,12 +20,10 @@
#include "pmu.h"
#include "expr.h"
#include "rblist.h"
-#include "pmu.h"
#include <string.h>
#include <stdbool.h>
#include <errno.h>
#include "pmu-events/pmu-events.h"
-#include "strbuf.h"
#include "strlist.h"
#include <assert.h>
#include <ctype.h>
@@ -38,6 +36,10 @@ struct metric_event *metricgroup__lookup(struct rblist *metric_events,
struct metric_event me = {
.evsel = evsel
};
+
+ if (!metric_events)
+ return NULL;
+
nd = rblist__find(metric_events, &me);
if (nd)
return container_of(nd, struct metric_event, nd);
@@ -270,7 +272,7 @@ static void metricgroup__print_strlist(struct strlist *metrics, bool raw)
void metricgroup__print(bool metrics, bool metricgroups, char *filter,
bool raw)
{
- struct pmu_events_map *map = perf_pmu__find_map();
+ struct pmu_events_map *map = perf_pmu__find_map(NULL);
struct pmu_event *pe;
int i;
struct rblist groups;
@@ -368,7 +370,7 @@ void metricgroup__print(bool metrics, bool metricgroups, char *filter,
static int metricgroup__add_metric(const char *metric, struct strbuf *events,
struct list_head *group_list)
{
- struct pmu_events_map *map = perf_pmu__find_map();
+ struct pmu_events_map *map = perf_pmu__find_map(NULL);
struct pmu_event *pe;
int ret = -EINVAL;
int i, j;
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index 9fe5f9c7d577..05076e683938 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -21,33 +21,13 @@ size_t perf_mmap__mmap_len(struct perf_mmap *map)
}
/* When check_messup is true, 'end' must points to a good entry */
-static union perf_event *perf_mmap__read(struct perf_mmap *map, bool check_messup,
+static union perf_event *perf_mmap__read(struct perf_mmap *map,
u64 start, u64 end, u64 *prev)
{
unsigned char *data = map->base + page_size;
union perf_event *event = NULL;
int diff = end - start;
- if (check_messup) {
- /*
- * If we're further behind than half the buffer, there's a chance
- * the writer will bite our tail and mess up the samples under us.
- *
- * If we somehow ended up ahead of the 'end', we got messed up.
- *
- * In either case, truncate and restart at 'end'.
- */
- if (diff > map->mask / 2 || diff < 0) {
- fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
-
- /*
- * 'end' points to a known good entry, start there.
- */
- start = end;
- diff = 0;
- }
- }
-
if (diff >= (int)sizeof(event->header)) {
size_t size;
@@ -89,7 +69,7 @@ broken_event:
return event;
}
-union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup)
+union perf_event *perf_mmap__read_forward(struct perf_mmap *map)
{
u64 head;
u64 old = map->prev;
@@ -102,7 +82,7 @@ union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_mess
head = perf_mmap__read_head(map);
- return perf_mmap__read(map, check_messup, old, head, &map->prev);
+ return perf_mmap__read(map, old, head, &map->prev);
}
union perf_event *perf_mmap__read_backward(struct perf_mmap *map)
@@ -138,7 +118,7 @@ union perf_event *perf_mmap__read_backward(struct perf_mmap *map)
else
end = head + map->mask + 1;
- return perf_mmap__read(map, false, start, end, &map->prev);
+ return perf_mmap__read(map, start, end, &map->prev);
}
void perf_mmap__read_catchup(struct perf_mmap *map)
@@ -254,18 +234,18 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd)
return 0;
}
-static int backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
+static int overwrite_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
{
struct perf_event_header *pheader;
u64 evt_head = head;
int size = mask + 1;
- pr_debug2("backward_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
+ pr_debug2("overwrite_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
pheader = (struct perf_event_header *)(buf + (head & mask));
*start = head;
while (true) {
if (evt_head - head >= (unsigned int)size) {
- pr_debug("Finished reading backward ring buffer: rewind\n");
+ pr_debug("Finished reading overwrite ring buffer: rewind\n");
if (evt_head - head > (unsigned int)size)
evt_head -= pheader->size;
*end = evt_head;
@@ -275,7 +255,7 @@ static int backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64
pheader = (struct perf_event_header *)(buf + (evt_head & mask));
if (pheader->size == 0) {
- pr_debug("Finished reading backward ring buffer: get start\n");
+ pr_debug("Finished reading overwrite ring buffer: get start\n");
*end = evt_head;
return 0;
}
@@ -287,19 +267,7 @@ static int backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64
return -1;
}
-static int rb_find_range(void *data, int mask, u64 head, u64 old,
- u64 *start, u64 *end, bool backward)
-{
- if (!backward) {
- *start = old;
- *end = head;
- return 0;
- }
-
- return backward_rb_find_range(data, mask, head, start, end);
-}
-
-int perf_mmap__push(struct perf_mmap *md, bool overwrite, bool backward,
+int perf_mmap__push(struct perf_mmap *md, bool overwrite,
void *to, int push(void *to, void *buf, size_t size))
{
u64 head = perf_mmap__read_head(md);
@@ -310,19 +278,28 @@ int perf_mmap__push(struct perf_mmap *md, bool overwrite, bool backward,
void *buf;
int rc = 0;
- if (rb_find_range(data, md->mask, head, old, &start, &end, backward))
- return -1;
+ start = overwrite ? head : old;
+ end = overwrite ? old : head;
if (start == end)
return 0;
size = end - start;
if (size > (unsigned long)(md->mask) + 1) {
- WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
+ if (!overwrite) {
+ WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
- md->prev = head;
- perf_mmap__consume(md, overwrite || backward);
- return 0;
+ md->prev = head;
+ perf_mmap__consume(md, overwrite);
+ return 0;
+ }
+
+ /*
+ * Backward ring buffer is full. We still have a chance to read
+ * most of data from it.
+ */
+ if (overwrite_rb_find_range(data, md->mask, head, &start, &end))
+ return -1;
}
if ((start & md->mask) + size != (end & md->mask)) {
@@ -346,7 +323,7 @@ int perf_mmap__push(struct perf_mmap *md, bool overwrite, bool backward,
}
md->prev = head;
- perf_mmap__consume(md, overwrite || backward);
+ perf_mmap__consume(md, overwrite);
out:
return rc;
}
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index 3a5cb5a6e94a..e43d7b55a55f 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -86,10 +86,10 @@ static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
pc->data_tail = tail;
}
-union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup);
+union perf_event *perf_mmap__read_forward(struct perf_mmap *map);
union perf_event *perf_mmap__read_backward(struct perf_mmap *map);
-int perf_mmap__push(struct perf_mmap *md, bool overwrite, bool backward,
+int perf_mmap__push(struct perf_mmap *md, bool backward,
void *to, int push(void *to, void *buf, size_t size));
size_t perf_mmap__mmap_len(struct perf_mmap *map);
diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
index 8e09fd2d842f..bad9e0296e9a 100644
--- a/tools/perf/util/ordered-events.c
+++ b/tools/perf/util/ordered-events.c
@@ -157,9 +157,8 @@ void ordered_events__delete(struct ordered_events *oe, struct ordered_event *eve
}
int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
- struct perf_sample *sample, u64 file_offset)
+ u64 timestamp, u64 file_offset)
{
- u64 timestamp = sample->time;
struct ordered_event *oevent;
if (!timestamp || timestamp == ~0ULL)
diff --git a/tools/perf/util/ordered-events.h b/tools/perf/util/ordered-events.h
index 96e5292d88e2..8c7a2948593e 100644
--- a/tools/perf/util/ordered-events.h
+++ b/tools/perf/util/ordered-events.h
@@ -45,7 +45,7 @@ struct ordered_events {
};
int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
- struct perf_sample *sample, u64 file_offset);
+ u64 timestamp, u64 file_offset);
void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event);
int ordered_events__flush(struct ordered_events *oe, enum oe_flush how);
void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver);
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 170316795a18..34589c427e52 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -4,6 +4,9 @@
#include <dirent.h>
#include <errno.h>
#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
#include <sys/param.h>
#include "term.h"
#include "../perf.h"
diff --git a/tools/perf/util/path.c b/tools/perf/util/path.c
index 933f5c6bffb4..ca56ba2dd3da 100644
--- a/tools/perf/util/path.c
+++ b/tools/perf/util/path.c
@@ -18,6 +18,7 @@
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
+#include <dirent.h>
#include <unistd.h>
static char bad_path[] = "/bad-path/";
@@ -77,3 +78,16 @@ bool is_regular_file(const char *file)
return S_ISREG(st.st_mode);
}
+
+/* Helper function for filesystems that return a dent->d_type DT_UNKNOWN */
+bool is_directory(const char *base_path, const struct dirent *dent)
+{
+ char path[PATH_MAX];
+ struct stat st;
+
+ sprintf(path, "%s/%s", base_path, dent->d_name);
+ if (stat(path, &st))
+ return false;
+
+ return S_ISDIR(st.st_mode);
+}
diff --git a/tools/perf/util/path.h b/tools/perf/util/path.h
index 14a254ada7eb..f014f905df50 100644
--- a/tools/perf/util/path.h
+++ b/tools/perf/util/path.h
@@ -2,9 +2,12 @@
#ifndef _PERF_PATH_H
#define _PERF_PATH_H
+struct dirent;
+
int path__join(char *bf, size_t size, const char *path1, const char *path2);
int path__join3(char *bf, size_t size, const char *path1, const char *path2, const char *path3);
bool is_regular_file(const char *file);
+bool is_directory(const char *base_path, const struct dirent *dent);
#endif /* _PERF_PATH_H */
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 80fb1593913a..57e38fdf0b34 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -12,6 +12,7 @@
#include <dirent.h>
#include <api/fs/fs.h>
#include <locale.h>
+#include <regex.h>
#include "util.h"
#include "pmu.h"
#include "parse-events.h"
@@ -537,17 +538,45 @@ static bool pmu_is_uncore(const char *name)
}
/*
+ * PMU CORE devices have different name other than cpu in sysfs on some
+ * platforms. looking for possible sysfs files to identify as core device.
+ */
+static int is_pmu_core(const char *name)
+{
+ struct stat st;
+ char path[PATH_MAX];
+ const char *sysfs = sysfs__mountpoint();
+
+ if (!sysfs)
+ return 0;
+
+ /* Look for cpu sysfs (x86 and others) */
+ scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/cpu", sysfs);
+ if ((stat(path, &st) == 0) &&
+ (strncmp(name, "cpu", strlen("cpu")) == 0))
+ return 1;
+
+ /* Look for cpu sysfs (specific to arm) */
+ scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s/cpus",
+ sysfs, name);
+ if (stat(path, &st) == 0)
+ return 1;
+
+ return 0;
+}
+
+/*
* Return the CPU id as a raw string.
*
* Each architecture should provide a more precise id string that
* can be use to match the architecture's "mapfile".
*/
-char * __weak get_cpuid_str(void)
+char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
{
return NULL;
}
-static char *perf_pmu__getcpuid(void)
+static char *perf_pmu__getcpuid(struct perf_pmu *pmu)
{
char *cpuid;
static bool printed;
@@ -556,7 +585,7 @@ static char *perf_pmu__getcpuid(void)
if (cpuid)
cpuid = strdup(cpuid);
if (!cpuid)
- cpuid = get_cpuid_str();
+ cpuid = get_cpuid_str(pmu);
if (!cpuid)
return NULL;
@@ -567,22 +596,45 @@ static char *perf_pmu__getcpuid(void)
return cpuid;
}
-struct pmu_events_map *perf_pmu__find_map(void)
+struct pmu_events_map *perf_pmu__find_map(struct perf_pmu *pmu)
{
struct pmu_events_map *map;
- char *cpuid = perf_pmu__getcpuid();
+ char *cpuid = perf_pmu__getcpuid(pmu);
int i;
+ /* on some platforms which uses cpus map, cpuid can be NULL for
+ * PMUs other than CORE PMUs.
+ */
+ if (!cpuid)
+ return NULL;
+
i = 0;
for (;;) {
+ regex_t re;
+ regmatch_t pmatch[1];
+ int match;
+
map = &pmu_events_map[i++];
if (!map->table) {
map = NULL;
break;
}
- if (!strcmp(map->cpuid, cpuid))
+ if (regcomp(&re, map->cpuid, REG_EXTENDED) != 0) {
+ /* Warn unable to generate match particular string. */
+ pr_info("Invalid regular expression %s\n", map->cpuid);
break;
+ }
+
+ match = !regexec(&re, cpuid, 1, pmatch, 0);
+ regfree(&re);
+ if (match) {
+ size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so);
+
+ /* Verify the entire string matched. */
+ if (match_len == strlen(cpuid))
+ break;
+ }
}
free(cpuid);
return map;
@@ -593,13 +645,14 @@ struct pmu_events_map *perf_pmu__find_map(void)
* to the current running CPU. Then, add all PMU events from that table
* as aliases.
*/
-static void pmu_add_cpu_aliases(struct list_head *head, const char *name)
+static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
{
int i;
struct pmu_events_map *map;
struct pmu_event *pe;
+ const char *name = pmu->name;
- map = perf_pmu__find_map();
+ map = perf_pmu__find_map(pmu);
if (!map)
return;
@@ -608,7 +661,6 @@ static void pmu_add_cpu_aliases(struct list_head *head, const char *name)
*/
i = 0;
while (1) {
- const char *pname;
pe = &map->table[i++];
if (!pe->name) {
@@ -617,9 +669,13 @@ static void pmu_add_cpu_aliases(struct list_head *head, const char *name)
break;
}
- pname = pe->pmu ? pe->pmu : "cpu";
- if (strncmp(pname, name, strlen(pname)))
- continue;
+ if (!is_pmu_core(name)) {
+ /* check for uncore devices */
+ if (pe->pmu == NULL)
+ continue;
+ if (strncmp(pe->pmu, name, strlen(pe->pmu)))
+ continue;
+ }
/* need type casts to override 'const' */
__perf_pmu__new_alias(head, NULL, (char *)pe->name,
@@ -661,21 +717,20 @@ static struct perf_pmu *pmu_lookup(const char *name)
if (pmu_aliases(name, &aliases))
return NULL;
- pmu_add_cpu_aliases(&aliases, name);
pmu = zalloc(sizeof(*pmu));
if (!pmu)
return NULL;
pmu->cpus = pmu_cpumask(name);
-
+ pmu->name = strdup(name);
+ pmu->type = type;
pmu->is_uncore = pmu_is_uncore(name);
+ pmu_add_cpu_aliases(&aliases, pmu);
INIT_LIST_HEAD(&pmu->format);
INIT_LIST_HEAD(&pmu->aliases);
list_splice(&format, &pmu->format);
list_splice(&aliases, &pmu->aliases);
- pmu->name = strdup(name);
- pmu->type = type;
list_add_tail(&pmu->list, &pmus);
pmu->default_config = perf_pmu__get_default_config(pmu);
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 27c75e635866..76fecec7b3f9 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -92,6 +92,6 @@ int perf_pmu__test(void);
struct perf_event_attr *perf_pmu__get_default_config(struct perf_pmu *pmu);
-struct pmu_events_map *perf_pmu__find_map(void);
+struct pmu_events_map *perf_pmu__find_map(struct perf_pmu *pmu);
#endif /* __PMU_H */
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index b7aaf9b2294d..e1dbc9821617 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -1325,27 +1325,30 @@ static int parse_perf_probe_event_name(char **arg, struct perf_probe_event *pev)
{
char *ptr;
- ptr = strchr(*arg, ':');
+ ptr = strpbrk_esc(*arg, ":");
if (ptr) {
*ptr = '\0';
if (!pev->sdt && !is_c_func_name(*arg))
goto ng_name;
- pev->group = strdup(*arg);
+ pev->group = strdup_esc(*arg);
if (!pev->group)
return -ENOMEM;
*arg = ptr + 1;
} else
pev->group = NULL;
- if (!pev->sdt && !is_c_func_name(*arg)) {
+
+ pev->event = strdup_esc(*arg);
+ if (pev->event == NULL)
+ return -ENOMEM;
+
+ if (!pev->sdt && !is_c_func_name(pev->event)) {
+ zfree(&pev->event);
ng_name:
+ zfree(&pev->group);
semantic_error("%s is bad for event name -it must "
"follow C symbol-naming rule.\n", *arg);
return -EINVAL;
}
- pev->event = strdup(*arg);
- if (pev->event == NULL)
- return -ENOMEM;
-
return 0;
}
@@ -1373,7 +1376,7 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
arg++;
}
- ptr = strpbrk(arg, ";=@+%");
+ ptr = strpbrk_esc(arg, ";=@+%");
if (pev->sdt) {
if (ptr) {
if (*ptr != '@') {
@@ -1387,7 +1390,7 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
pev->target = build_id_cache__origname(tmp);
free(tmp);
} else
- pev->target = strdup(ptr + 1);
+ pev->target = strdup_esc(ptr + 1);
if (!pev->target)
return -ENOMEM;
*ptr = '\0';
@@ -1421,13 +1424,14 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
*
* Otherwise, we consider arg to be a function specification.
*/
- if (!strpbrk(arg, "+@%") && (ptr = strpbrk(arg, ";:")) != NULL) {
+ if (!strpbrk_esc(arg, "+@%")) {
+ ptr = strpbrk_esc(arg, ";:");
/* This is a file spec if it includes a '.' before ; or : */
- if (memchr(arg, '.', ptr - arg))
+ if (ptr && memchr(arg, '.', ptr - arg))
file_spec = true;
}
- ptr = strpbrk(arg, ";:+@%");
+ ptr = strpbrk_esc(arg, ";:+@%");
if (ptr) {
nc = *ptr;
*ptr++ = '\0';
@@ -1436,7 +1440,7 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
if (arg[0] == '\0')
tmp = NULL;
else {
- tmp = strdup(arg);
+ tmp = strdup_esc(arg);
if (tmp == NULL)
return -ENOMEM;
}
@@ -1469,12 +1473,12 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
arg = ptr;
c = nc;
if (c == ';') { /* Lazy pattern must be the last part */
- pp->lazy_line = strdup(arg);
+ pp->lazy_line = strdup(arg); /* let leave escapes */
if (pp->lazy_line == NULL)
return -ENOMEM;
break;
}
- ptr = strpbrk(arg, ";:+@%");
+ ptr = strpbrk_esc(arg, ";:+@%");
if (ptr) {
nc = *ptr;
*ptr++ = '\0';
@@ -1501,7 +1505,7 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
semantic_error("SRC@SRC is not allowed.\n");
return -EINVAL;
}
- pp->file = strdup(arg);
+ pp->file = strdup_esc(arg);
if (pp->file == NULL)
return -ENOMEM;
break;
@@ -2573,7 +2577,8 @@ int show_perf_probe_events(struct strfilter *filter)
}
static int get_new_event_name(char *buf, size_t len, const char *base,
- struct strlist *namelist, bool allow_suffix)
+ struct strlist *namelist, bool ret_event,
+ bool allow_suffix)
{
int i, ret;
char *p, *nbase;
@@ -2584,13 +2589,13 @@ static int get_new_event_name(char *buf, size_t len, const char *base,
if (!nbase)
return -ENOMEM;
- /* Cut off the dot suffixes (e.g. .const, .isra)*/
- p = strchr(nbase, '.');
+ /* Cut off the dot suffixes (e.g. .const, .isra) and version suffixes */
+ p = strpbrk(nbase, ".@");
if (p && p != nbase)
*p = '\0';
/* Try no suffix number */
- ret = e_snprintf(buf, len, "%s", nbase);
+ ret = e_snprintf(buf, len, "%s%s", nbase, ret_event ? "__return" : "");
if (ret < 0) {
pr_debug("snprintf() failed: %d\n", ret);
goto out;
@@ -2625,6 +2630,14 @@ static int get_new_event_name(char *buf, size_t len, const char *base,
out:
free(nbase);
+
+ /* Final validation */
+ if (ret >= 0 && !is_c_func_name(buf)) {
+ pr_warning("Internal error: \"%s\" is an invalid event name.\n",
+ buf);
+ ret = -EINVAL;
+ }
+
return ret;
}
@@ -2681,8 +2694,8 @@ static int probe_trace_event__set_name(struct probe_trace_event *tev,
group = PERFPROBE_GROUP;
/* Get an unused new event name */
- ret = get_new_event_name(buf, 64, event,
- namelist, allow_suffix);
+ ret = get_new_event_name(buf, 64, event, namelist,
+ tev->point.retprobe, allow_suffix);
if (ret < 0)
return ret;
@@ -2792,16 +2805,40 @@ static int find_probe_functions(struct map *map, char *name,
int found = 0;
struct symbol *sym;
struct rb_node *tmp;
+ const char *norm, *ver;
+ char *buf = NULL;
+ bool cut_version = true;
if (map__load(map) < 0)
return 0;
+ /* If user gives a version, don't cut off the version from symbols */
+ if (strchr(name, '@'))
+ cut_version = false;
+
map__for_each_symbol(map, sym, tmp) {
- if (strglobmatch(sym->name, name)) {
+ norm = arch__normalize_symbol_name(sym->name);
+ if (!norm)
+ continue;
+
+ if (cut_version) {
+ /* We don't care about default symbol or not */
+ ver = strchr(norm, '@');
+ if (ver) {
+ buf = strndup(norm, ver - norm);
+ if (!buf)
+ return -ENOMEM;
+ norm = buf;
+ }
+ }
+
+ if (strglobmatch(norm, name)) {
found++;
if (syms && found < probe_conf.max_probes)
syms[found - 1] = sym;
}
+ if (buf)
+ zfree(&buf);
}
return found;
@@ -2847,7 +2884,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
* same name but different addresses, this lists all the symbols.
*/
num_matched_functions = find_probe_functions(map, pp->function, syms);
- if (num_matched_functions == 0) {
+ if (num_matched_functions <= 0) {
pr_err("Failed to find symbol %s in %s\n", pp->function,
pev->target ? : "kernel");
ret = -ENOENT;
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index b4f2f06722a7..7aa0ea64544e 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -10,6 +10,7 @@ util/ctype.c
util/evlist.c
util/evsel.c
util/cpumap.c
+util/memswap.c
util/mmap.c
util/namespaces.c
../lib/bitmap.c
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 8e49d9cafcfc..b1e999bd21ef 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -864,7 +864,7 @@ static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist,
&pages, &overwrite))
return NULL;
- if (perf_evlist__mmap(evlist, pages, overwrite) < 0) {
+ if (perf_evlist__mmap(evlist, pages) < 0) {
PyErr_SetFromErrno(PyExc_OSError);
return NULL;
}
diff --git a/tools/perf/util/rblist.c b/tools/perf/util/rblist.c
index 0dfe27d99458..0efc3258c648 100644
--- a/tools/perf/util/rblist.c
+++ b/tools/perf/util/rblist.c
@@ -101,16 +101,21 @@ void rblist__init(struct rblist *rblist)
return;
}
+void rblist__exit(struct rblist *rblist)
+{
+ struct rb_node *pos, *next = rb_first(&rblist->entries);
+
+ while (next) {
+ pos = next;
+ next = rb_next(pos);
+ rblist__remove_node(rblist, pos);
+ }
+}
+
void rblist__delete(struct rblist *rblist)
{
if (rblist != NULL) {
- struct rb_node *pos, *next = rb_first(&rblist->entries);
-
- while (next) {
- pos = next;
- next = rb_next(pos);
- rblist__remove_node(rblist, pos);
- }
+ rblist__exit(rblist);
free(rblist);
}
}
diff --git a/tools/perf/util/rblist.h b/tools/perf/util/rblist.h
index 4c8638a22571..76df15c27f5f 100644
--- a/tools/perf/util/rblist.h
+++ b/tools/perf/util/rblist.h
@@ -29,6 +29,7 @@ struct rblist {
};
void rblist__init(struct rblist *rblist);
+void rblist__exit(struct rblist *rblist);
void rblist__delete(struct rblist *rblist);
int rblist__add_node(struct rblist *rblist, const void *new_entry);
void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node);
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index c7187f067d31..ea070883c593 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -43,7 +43,6 @@
#include "../db-export.h"
#include "../thread-stack.h"
#include "../trace-event.h"
-#include "../machine.h"
#include "../call-path.h"
#include "thread_map.h"
#include "cpumap.h"
@@ -500,6 +499,8 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
PyLong_FromUnsignedLongLong(sample->time));
pydict_set_item_string_decref(dict_sample, "period",
PyLong_FromUnsignedLongLong(sample->period));
+ pydict_set_item_string_decref(dict_sample, "phys_addr",
+ PyLong_FromUnsignedLongLong(sample->phys_addr));
set_sample_read_in_dict(dict_sample, sample, evsel);
pydict_set_item_string_decref(dict, "sample", dict_sample);
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 5c412310f266..c71ced7db152 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -27,7 +27,6 @@
static int perf_session__deliver_event(struct perf_session *session,
union perf_event *event,
- struct perf_sample *sample,
struct perf_tool *tool,
u64 file_offset);
@@ -107,17 +106,10 @@ static void perf_session__set_comm_exec(struct perf_session *session)
static int ordered_events__deliver_event(struct ordered_events *oe,
struct ordered_event *event)
{
- struct perf_sample sample;
struct perf_session *session = container_of(oe, struct perf_session,
ordered_events);
- int ret = perf_evlist__parse_sample(session->evlist, event->event, &sample);
-
- if (ret) {
- pr_err("Can't parse sample, err = %d\n", ret);
- return ret;
- }
- return perf_session__deliver_event(session, event->event, &sample,
+ return perf_session__deliver_event(session, event->event,
session->tool, event->file_offset);
}
@@ -873,9 +865,9 @@ static int process_finished_round(struct perf_tool *tool __maybe_unused,
}
int perf_session__queue_event(struct perf_session *s, union perf_event *event,
- struct perf_sample *sample, u64 file_offset)
+ u64 timestamp, u64 file_offset)
{
- return ordered_events__queue(&s->ordered_events, event, sample, file_offset);
+ return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
}
static void callchain__lbr_callstack_printf(struct perf_sample *sample)
@@ -1328,20 +1320,26 @@ static int machines__deliver_event(struct machines *machines,
static int perf_session__deliver_event(struct perf_session *session,
union perf_event *event,
- struct perf_sample *sample,
struct perf_tool *tool,
u64 file_offset)
{
+ struct perf_sample sample;
int ret;
- ret = auxtrace__process_event(session, event, sample, tool);
+ ret = perf_evlist__parse_sample(session->evlist, event, &sample);
+ if (ret) {
+ pr_err("Can't parse sample, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = auxtrace__process_event(session, event, &sample, tool);
if (ret < 0)
return ret;
if (ret > 0)
return 0;
return machines__deliver_event(&session->machines, session->evlist,
- event, sample, tool, file_offset);
+ event, &sample, tool, file_offset);
}
static s64 perf_session__process_user_event(struct perf_session *session,
@@ -1350,10 +1348,11 @@ static s64 perf_session__process_user_event(struct perf_session *session,
{
struct ordered_events *oe = &session->ordered_events;
struct perf_tool *tool = session->tool;
+ struct perf_sample sample = { .time = 0, };
int fd = perf_data__fd(session->data);
int err;
- dump_event(session->evlist, event, file_offset, NULL);
+ dump_event(session->evlist, event, file_offset, &sample);
/* These events are processed right away */
switch (event->header.type) {
@@ -1495,7 +1494,6 @@ static s64 perf_session__process_event(struct perf_session *session,
{
struct perf_evlist *evlist = session->evlist;
struct perf_tool *tool = session->tool;
- struct perf_sample sample;
int ret;
if (session->header.needs_swap)
@@ -1509,21 +1507,19 @@ static s64 perf_session__process_event(struct perf_session *session,
if (event->header.type >= PERF_RECORD_USER_TYPE_START)
return perf_session__process_user_event(session, event, file_offset);
- /*
- * For all kernel events we get the sample data
- */
- ret = perf_evlist__parse_sample(evlist, event, &sample);
- if (ret)
- return ret;
-
if (tool->ordered_events) {
- ret = perf_session__queue_event(session, event, &sample, file_offset);
+ u64 timestamp = -1ULL;
+
+ ret = perf_evlist__parse_sample_timestamp(evlist, event, &timestamp);
+ if (ret && ret != -1)
+ return ret;
+
+ ret = perf_session__queue_event(session, event, timestamp, file_offset);
if (ret != -ETIME)
return ret;
}
- return perf_session__deliver_event(session, event, &sample, tool,
- file_offset);
+ return perf_session__deliver_event(session, event, tool, file_offset);
}
void perf_event_header__bswap(struct perf_event_header *hdr)
@@ -1777,7 +1773,8 @@ done:
err = perf_session__flush_thread_stacks(session);
out_err:
free(buf);
- perf_session__warn_about_errors(session);
+ if (!tool->no_warn)
+ perf_session__warn_about_errors(session);
ordered_events__free(&session->ordered_events);
auxtrace__free_events(session);
return err;
@@ -1933,7 +1930,8 @@ out:
err = perf_session__flush_thread_stacks(session);
out_err:
ui_progress__finish();
- perf_session__warn_about_errors(session);
+ if (!tool->no_warn)
+ perf_session__warn_about_errors(session);
/*
* We may switching perf.data output, make ordered_events
* reusable.
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index da1434a7c120..da40b4b380ca 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -53,7 +53,7 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
int perf_session__process_events(struct perf_session *session);
int perf_session__queue_event(struct perf_session *s, union perf_event *event,
- struct perf_sample *sample, u64 file_offset);
+ u64 timestamp, u64 file_offset);
void perf_tool__fill_defaults(struct perf_tool *tool);
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index a00eacdf02ed..2da4d0456a03 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -336,7 +336,7 @@ char *hist_entry__get_srcline(struct hist_entry *he)
return SRCLINE_UNKNOWN;
return get_srcline(map->dso, map__rip_2objdump(map, he->ip),
- he->ms.sym, true, true);
+ he->ms.sym, true, true, he->ip);
}
static int64_t
@@ -380,7 +380,8 @@ sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
map__rip_2objdump(map,
left->branch_info->from.al_addr),
left->branch_info->from.sym,
- true, true);
+ true, true,
+ left->branch_info->from.al_addr);
}
if (!right->branch_info->srcline_from) {
struct map *map = right->branch_info->from.map;
@@ -391,7 +392,8 @@ sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
map__rip_2objdump(map,
right->branch_info->from.al_addr),
right->branch_info->from.sym,
- true, true);
+ true, true,
+ right->branch_info->from.al_addr);
}
return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
}
@@ -423,7 +425,8 @@ sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
map__rip_2objdump(map,
left->branch_info->to.al_addr),
left->branch_info->from.sym,
- true, true);
+ true, true,
+ left->branch_info->to.al_addr);
}
if (!right->branch_info->srcline_to) {
struct map *map = right->branch_info->to.map;
@@ -434,7 +437,8 @@ sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
map__rip_2objdump(map,
right->branch_info->to.al_addr),
right->branch_info->to.sym,
- true, true);
+ true, true,
+ right->branch_info->to.al_addr);
}
return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
}
@@ -465,7 +469,7 @@ static char *hist_entry__get_srcfile(struct hist_entry *e)
return no_srcfile;
sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip),
- e->ms.sym, false, true, true);
+ e->ms.sym, false, true, true, e->ip);
if (!strcmp(sf, SRCLINE_UNKNOWN))
return no_srcfile;
p = strchr(sf, ':');
@@ -2883,10 +2887,10 @@ static int setup_output_list(struct perf_hpp_list *list, char *str)
tok; tok = strtok_r(NULL, ", ", &tmp)) {
ret = output_field_add(list, tok);
if (ret == -EINVAL) {
- pr_err("Invalid --fields key: `%s'", tok);
+ ui__error("Invalid --fields key: `%s'", tok);
break;
} else if (ret == -ESRCH) {
- pr_err("Unknown --fields key: `%s'", tok);
+ ui__error("Unknown --fields key: `%s'", tok);
break;
}
}
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
index d19f05c56de6..3c21fd059b64 100644
--- a/tools/perf/util/srcline.c
+++ b/tools/perf/util/srcline.c
@@ -496,7 +496,8 @@ out:
#define A2L_FAIL_LIMIT 123
char *__get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
- bool show_sym, bool show_addr, bool unwind_inlines)
+ bool show_sym, bool show_addr, bool unwind_inlines,
+ u64 ip)
{
char *file = NULL;
unsigned line = 0;
@@ -536,7 +537,7 @@ out:
if (sym) {
if (asprintf(&srcline, "%s+%" PRIu64, show_sym ? sym->name : "",
- addr - sym->start) < 0)
+ ip - sym->start) < 0)
return SRCLINE_UNKNOWN;
} else if (asprintf(&srcline, "%s[%" PRIx64 "]", dso->short_name, addr) < 0)
return SRCLINE_UNKNOWN;
@@ -550,9 +551,9 @@ void free_srcline(char *srcline)
}
char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
- bool show_sym, bool show_addr)
+ bool show_sym, bool show_addr, u64 ip)
{
- return __get_srcline(dso, addr, sym, show_sym, show_addr, false);
+ return __get_srcline(dso, addr, sym, show_sym, show_addr, false, ip);
}
struct srcline_node {
diff --git a/tools/perf/util/srcline.h b/tools/perf/util/srcline.h
index 847b7086182c..b2bb5502fd62 100644
--- a/tools/perf/util/srcline.h
+++ b/tools/perf/util/srcline.h
@@ -11,9 +11,10 @@ struct symbol;
extern bool srcline_full_filename;
char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
- bool show_sym, bool show_addr);
+ bool show_sym, bool show_addr, u64 ip);
char *__get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
- bool show_sym, bool show_addr, bool unwind_inlines);
+ bool show_sym, bool show_addr, bool unwind_inlines,
+ u64 ip);
void free_srcline(char *srcline);
/* insert the srcline into the DSO, which will take ownership */
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 855e35cbb1dc..594d14a02b67 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -9,17 +9,6 @@
#include "expr.h"
#include "metricgroup.h"
-enum {
- CTX_BIT_USER = 1 << 0,
- CTX_BIT_KERNEL = 1 << 1,
- CTX_BIT_HV = 1 << 2,
- CTX_BIT_HOST = 1 << 3,
- CTX_BIT_IDLE = 1 << 4,
- CTX_BIT_MAX = 1 << 5,
-};
-
-#define NUM_CTX CTX_BIT_MAX
-
/*
* AGGR_GLOBAL: Use CPU 0
* AGGR_SOCKET: Use first CPU of socket
@@ -27,36 +16,18 @@ enum {
* AGGR_NONE: Use matching CPU
* AGGR_THREAD: Not supported?
*/
-static struct stats runtime_nsecs_stats[MAX_NR_CPUS];
-static struct stats runtime_cycles_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_stalled_cycles_front_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_stalled_cycles_back_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_branches_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_cacherefs_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_l1_dcache_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_l1_icache_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_ll_cache_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_itlb_cache_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_dtlb_cache_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_cycles_in_tx_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_transaction_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_elision_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_topdown_total_slots[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_topdown_slots_issued[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_topdown_slots_retired[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_topdown_fetch_bubbles[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_topdown_recovery_bubbles[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_smi_num_stats[NUM_CTX][MAX_NR_CPUS];
-static struct stats runtime_aperf_stats[NUM_CTX][MAX_NR_CPUS];
-static struct rblist runtime_saved_values;
static bool have_frontend_stalled;
+struct runtime_stat rt_stat;
struct stats walltime_nsecs_stats;
struct saved_value {
struct rb_node rb_node;
struct perf_evsel *evsel;
+ enum stat_type type;
+ int ctx;
int cpu;
+ struct runtime_stat *stat;
struct stats stats;
};
@@ -69,6 +40,30 @@ static int saved_value_cmp(struct rb_node *rb_node, const void *entry)
if (a->cpu != b->cpu)
return a->cpu - b->cpu;
+
+ /*
+ * Previously the rbtree was used to link generic metrics.
+ * The keys were evsel/cpu. Now the rbtree is extended to support
+ * per-thread shadow stats. For shadow stats case, the keys
+ * are cpu/type/ctx/stat (evsel is NULL). For generic metrics
+ * case, the keys are still evsel/cpu (type/ctx/stat are 0 or NULL).
+ */
+ if (a->type != b->type)
+ return a->type - b->type;
+
+ if (a->ctx != b->ctx)
+ return a->ctx - b->ctx;
+
+ if (a->evsel == NULL && b->evsel == NULL) {
+ if (a->stat == b->stat)
+ return 0;
+
+ if ((char *)a->stat < (char *)b->stat)
+ return -1;
+
+ return 1;
+ }
+
if (a->evsel == b->evsel)
return 0;
if ((char *)a->evsel < (char *)b->evsel)
@@ -87,34 +82,66 @@ static struct rb_node *saved_value_new(struct rblist *rblist __maybe_unused,
return &nd->rb_node;
}
+static void saved_value_delete(struct rblist *rblist __maybe_unused,
+ struct rb_node *rb_node)
+{
+ struct saved_value *v;
+
+ BUG_ON(!rb_node);
+ v = container_of(rb_node, struct saved_value, rb_node);
+ free(v);
+}
+
static struct saved_value *saved_value_lookup(struct perf_evsel *evsel,
int cpu,
- bool create)
+ bool create,
+ enum stat_type type,
+ int ctx,
+ struct runtime_stat *st)
{
+ struct rblist *rblist;
struct rb_node *nd;
struct saved_value dm = {
.cpu = cpu,
.evsel = evsel,
+ .type = type,
+ .ctx = ctx,
+ .stat = st,
};
- nd = rblist__find(&runtime_saved_values, &dm);
+
+ rblist = &st->value_list;
+
+ nd = rblist__find(rblist, &dm);
if (nd)
return container_of(nd, struct saved_value, rb_node);
if (create) {
- rblist__add_node(&runtime_saved_values, &dm);
- nd = rblist__find(&runtime_saved_values, &dm);
+ rblist__add_node(rblist, &dm);
+ nd = rblist__find(rblist, &dm);
if (nd)
return container_of(nd, struct saved_value, rb_node);
}
return NULL;
}
+void runtime_stat__init(struct runtime_stat *st)
+{
+ struct rblist *rblist = &st->value_list;
+
+ rblist__init(rblist);
+ rblist->node_cmp = saved_value_cmp;
+ rblist->node_new = saved_value_new;
+ rblist->node_delete = saved_value_delete;
+}
+
+void runtime_stat__exit(struct runtime_stat *st)
+{
+ rblist__exit(&st->value_list);
+}
+
void perf_stat__init_shadow_stats(void)
{
have_frontend_stalled = pmu_have_event("cpu", "stalled-cycles-frontend");
- rblist__init(&runtime_saved_values);
- runtime_saved_values.node_cmp = saved_value_cmp;
- runtime_saved_values.node_new = saved_value_new;
- /* No delete for now */
+ runtime_stat__init(&rt_stat);
}
static int evsel_context(struct perf_evsel *evsel)
@@ -135,36 +162,13 @@ static int evsel_context(struct perf_evsel *evsel)
return ctx;
}
-void perf_stat__reset_shadow_stats(void)
+static void reset_stat(struct runtime_stat *st)
{
+ struct rblist *rblist;
struct rb_node *pos, *next;
- memset(runtime_nsecs_stats, 0, sizeof(runtime_nsecs_stats));
- memset(runtime_cycles_stats, 0, sizeof(runtime_cycles_stats));
- memset(runtime_stalled_cycles_front_stats, 0, sizeof(runtime_stalled_cycles_front_stats));
- memset(runtime_stalled_cycles_back_stats, 0, sizeof(runtime_stalled_cycles_back_stats));
- memset(runtime_branches_stats, 0, sizeof(runtime_branches_stats));
- memset(runtime_cacherefs_stats, 0, sizeof(runtime_cacherefs_stats));
- memset(runtime_l1_dcache_stats, 0, sizeof(runtime_l1_dcache_stats));
- memset(runtime_l1_icache_stats, 0, sizeof(runtime_l1_icache_stats));
- memset(runtime_ll_cache_stats, 0, sizeof(runtime_ll_cache_stats));
- memset(runtime_itlb_cache_stats, 0, sizeof(runtime_itlb_cache_stats));
- memset(runtime_dtlb_cache_stats, 0, sizeof(runtime_dtlb_cache_stats));
- memset(runtime_cycles_in_tx_stats, 0,
- sizeof(runtime_cycles_in_tx_stats));
- memset(runtime_transaction_stats, 0,
- sizeof(runtime_transaction_stats));
- memset(runtime_elision_stats, 0, sizeof(runtime_elision_stats));
- memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats));
- memset(runtime_topdown_total_slots, 0, sizeof(runtime_topdown_total_slots));
- memset(runtime_topdown_slots_retired, 0, sizeof(runtime_topdown_slots_retired));
- memset(runtime_topdown_slots_issued, 0, sizeof(runtime_topdown_slots_issued));
- memset(runtime_topdown_fetch_bubbles, 0, sizeof(runtime_topdown_fetch_bubbles));
- memset(runtime_topdown_recovery_bubbles, 0, sizeof(runtime_topdown_recovery_bubbles));
- memset(runtime_smi_num_stats, 0, sizeof(runtime_smi_num_stats));
- memset(runtime_aperf_stats, 0, sizeof(runtime_aperf_stats));
-
- next = rb_first(&runtime_saved_values.entries);
+ rblist = &st->value_list;
+ next = rb_first(&rblist->entries);
while (next) {
pos = next;
next = rb_next(pos);
@@ -174,13 +178,35 @@ void perf_stat__reset_shadow_stats(void)
}
}
+void perf_stat__reset_shadow_stats(void)
+{
+ reset_stat(&rt_stat);
+ memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats));
+}
+
+void perf_stat__reset_shadow_per_stat(struct runtime_stat *st)
+{
+ reset_stat(st);
+}
+
+static void update_runtime_stat(struct runtime_stat *st,
+ enum stat_type type,
+ int ctx, int cpu, u64 count)
+{
+ struct saved_value *v = saved_value_lookup(NULL, cpu, true,
+ type, ctx, st);
+
+ if (v)
+ update_stats(&v->stats, count);
+}
+
/*
* Update various tracking values we maintain to print
* more semantic information such as miss/hit ratios,
* instruction rates, etc:
*/
void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 count,
- int cpu)
+ int cpu, struct runtime_stat *st)
{
int ctx = evsel_context(counter);
@@ -188,50 +214,58 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 count,
if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK) ||
perf_evsel__match(counter, SOFTWARE, SW_CPU_CLOCK))
- update_stats(&runtime_nsecs_stats[cpu], count);
+ update_runtime_stat(st, STAT_NSECS, 0, cpu, count);
else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
- update_stats(&runtime_cycles_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_CYCLES, ctx, cpu, count);
else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
- update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_CYCLES_IN_TX, ctx, cpu, count);
else if (perf_stat_evsel__is(counter, TRANSACTION_START))
- update_stats(&runtime_transaction_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_TRANSACTION, ctx, cpu, count);
else if (perf_stat_evsel__is(counter, ELISION_START))
- update_stats(&runtime_elision_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_ELISION, ctx, cpu, count);
else if (perf_stat_evsel__is(counter, TOPDOWN_TOTAL_SLOTS))
- update_stats(&runtime_topdown_total_slots[ctx][cpu], count);
+ update_runtime_stat(st, STAT_TOPDOWN_TOTAL_SLOTS,
+ ctx, cpu, count);
else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_ISSUED))
- update_stats(&runtime_topdown_slots_issued[ctx][cpu], count);
+ update_runtime_stat(st, STAT_TOPDOWN_SLOTS_ISSUED,
+ ctx, cpu, count);
else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_RETIRED))
- update_stats(&runtime_topdown_slots_retired[ctx][cpu], count);
+ update_runtime_stat(st, STAT_TOPDOWN_SLOTS_RETIRED,
+ ctx, cpu, count);
else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_BUBBLES))
- update_stats(&runtime_topdown_fetch_bubbles[ctx][cpu], count);
+ update_runtime_stat(st, STAT_TOPDOWN_FETCH_BUBBLES,
+ ctx, cpu, count);
else if (perf_stat_evsel__is(counter, TOPDOWN_RECOVERY_BUBBLES))
- update_stats(&runtime_topdown_recovery_bubbles[ctx][cpu], count);
+ update_runtime_stat(st, STAT_TOPDOWN_RECOVERY_BUBBLES,
+ ctx, cpu, count);
else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
- update_stats(&runtime_stalled_cycles_front_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_STALLED_CYCLES_FRONT,
+ ctx, cpu, count);
else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
- update_stats(&runtime_stalled_cycles_back_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_STALLED_CYCLES_BACK,
+ ctx, cpu, count);
else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
- update_stats(&runtime_branches_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_BRANCHES, ctx, cpu, count);
else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
- update_stats(&runtime_cacherefs_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_CACHEREFS, ctx, cpu, count);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
- update_stats(&runtime_l1_dcache_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_L1_DCACHE, ctx, cpu, count);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
- update_stats(&runtime_ll_cache_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_L1_ICACHE, ctx, cpu, count);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL))
- update_stats(&runtime_ll_cache_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_LL_CACHE, ctx, cpu, count);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
- update_stats(&runtime_dtlb_cache_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_DTLB_CACHE, ctx, cpu, count);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
- update_stats(&runtime_itlb_cache_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_ITLB_CACHE, ctx, cpu, count);
else if (perf_stat_evsel__is(counter, SMI_NUM))
- update_stats(&runtime_smi_num_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_SMI_NUM, ctx, cpu, count);
else if (perf_stat_evsel__is(counter, APERF))
- update_stats(&runtime_aperf_stats[ctx][cpu], count);
+ update_runtime_stat(st, STAT_APERF, ctx, cpu, count);
if (counter->collect_stat) {
- struct saved_value *v = saved_value_lookup(counter, cpu, true);
+ struct saved_value *v = saved_value_lookup(counter, cpu, true,
+ STAT_NONE, 0, st);
update_stats(&v->stats, count);
}
}
@@ -352,15 +386,40 @@ void perf_stat__collect_metric_expr(struct perf_evlist *evsel_list)
}
}
+static double runtime_stat_avg(struct runtime_stat *st,
+ enum stat_type type, int ctx, int cpu)
+{
+ struct saved_value *v;
+
+ v = saved_value_lookup(NULL, cpu, false, type, ctx, st);
+ if (!v)
+ return 0.0;
+
+ return avg_stats(&v->stats);
+}
+
+static double runtime_stat_n(struct runtime_stat *st,
+ enum stat_type type, int ctx, int cpu)
+{
+ struct saved_value *v;
+
+ v = saved_value_lookup(NULL, cpu, false, type, ctx, st);
+ if (!v)
+ return 0.0;
+
+ return v->stats.n;
+}
+
static void print_stalled_cycles_frontend(int cpu,
struct perf_evsel *evsel, double avg,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct runtime_stat *st)
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
- total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
if (total)
ratio = avg / total * 100.0;
@@ -376,13 +435,14 @@ static void print_stalled_cycles_frontend(int cpu,
static void print_stalled_cycles_backend(int cpu,
struct perf_evsel *evsel, double avg,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct runtime_stat *st)
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
- total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
if (total)
ratio = avg / total * 100.0;
@@ -395,13 +455,14 @@ static void print_stalled_cycles_backend(int cpu,
static void print_branch_misses(int cpu,
struct perf_evsel *evsel,
double avg,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct runtime_stat *st)
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
- total = avg_stats(&runtime_branches_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_BRANCHES, ctx, cpu);
if (total)
ratio = avg / total * 100.0;
@@ -414,13 +475,15 @@ static void print_branch_misses(int cpu,
static void print_l1_dcache_misses(int cpu,
struct perf_evsel *evsel,
double avg,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct runtime_stat *st)
+
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
- total = avg_stats(&runtime_l1_dcache_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_L1_DCACHE, ctx, cpu);
if (total)
ratio = avg / total * 100.0;
@@ -433,13 +496,15 @@ static void print_l1_dcache_misses(int cpu,
static void print_l1_icache_misses(int cpu,
struct perf_evsel *evsel,
double avg,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct runtime_stat *st)
+
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
- total = avg_stats(&runtime_l1_icache_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_L1_ICACHE, ctx, cpu);
if (total)
ratio = avg / total * 100.0;
@@ -451,13 +516,14 @@ static void print_l1_icache_misses(int cpu,
static void print_dtlb_cache_misses(int cpu,
struct perf_evsel *evsel,
double avg,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct runtime_stat *st)
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
- total = avg_stats(&runtime_dtlb_cache_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_DTLB_CACHE, ctx, cpu);
if (total)
ratio = avg / total * 100.0;
@@ -469,13 +535,14 @@ static void print_dtlb_cache_misses(int cpu,
static void print_itlb_cache_misses(int cpu,
struct perf_evsel *evsel,
double avg,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct runtime_stat *st)
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
- total = avg_stats(&runtime_itlb_cache_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_ITLB_CACHE, ctx, cpu);
if (total)
ratio = avg / total * 100.0;
@@ -487,13 +554,14 @@ static void print_itlb_cache_misses(int cpu,
static void print_ll_cache_misses(int cpu,
struct perf_evsel *evsel,
double avg,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct runtime_stat *st)
{
double total, ratio = 0.0;
const char *color;
int ctx = evsel_context(evsel);
- total = avg_stats(&runtime_ll_cache_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_LL_CACHE, ctx, cpu);
if (total)
ratio = avg / total * 100.0;
@@ -551,68 +619,72 @@ static double sanitize_val(double x)
return x;
}
-static double td_total_slots(int ctx, int cpu)
+static double td_total_slots(int ctx, int cpu, struct runtime_stat *st)
{
- return avg_stats(&runtime_topdown_total_slots[ctx][cpu]);
+ return runtime_stat_avg(st, STAT_TOPDOWN_TOTAL_SLOTS, ctx, cpu);
}
-static double td_bad_spec(int ctx, int cpu)
+static double td_bad_spec(int ctx, int cpu, struct runtime_stat *st)
{
double bad_spec = 0;
double total_slots;
double total;
- total = avg_stats(&runtime_topdown_slots_issued[ctx][cpu]) -
- avg_stats(&runtime_topdown_slots_retired[ctx][cpu]) +
- avg_stats(&runtime_topdown_recovery_bubbles[ctx][cpu]);
- total_slots = td_total_slots(ctx, cpu);
+ total = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_ISSUED, ctx, cpu) -
+ runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED, ctx, cpu) +
+ runtime_stat_avg(st, STAT_TOPDOWN_RECOVERY_BUBBLES, ctx, cpu);
+
+ total_slots = td_total_slots(ctx, cpu, st);
if (total_slots)
bad_spec = total / total_slots;
return sanitize_val(bad_spec);
}
-static double td_retiring(int ctx, int cpu)
+static double td_retiring(int ctx, int cpu, struct runtime_stat *st)
{
double retiring = 0;
- double total_slots = td_total_slots(ctx, cpu);
- double ret_slots = avg_stats(&runtime_topdown_slots_retired[ctx][cpu]);
+ double total_slots = td_total_slots(ctx, cpu, st);
+ double ret_slots = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED,
+ ctx, cpu);
if (total_slots)
retiring = ret_slots / total_slots;
return retiring;
}
-static double td_fe_bound(int ctx, int cpu)
+static double td_fe_bound(int ctx, int cpu, struct runtime_stat *st)
{
double fe_bound = 0;
- double total_slots = td_total_slots(ctx, cpu);
- double fetch_bub = avg_stats(&runtime_topdown_fetch_bubbles[ctx][cpu]);
+ double total_slots = td_total_slots(ctx, cpu, st);
+ double fetch_bub = runtime_stat_avg(st, STAT_TOPDOWN_FETCH_BUBBLES,
+ ctx, cpu);
if (total_slots)
fe_bound = fetch_bub / total_slots;
return fe_bound;
}
-static double td_be_bound(int ctx, int cpu)
+static double td_be_bound(int ctx, int cpu, struct runtime_stat *st)
{
- double sum = (td_fe_bound(ctx, cpu) +
- td_bad_spec(ctx, cpu) +
- td_retiring(ctx, cpu));
+ double sum = (td_fe_bound(ctx, cpu, st) +
+ td_bad_spec(ctx, cpu, st) +
+ td_retiring(ctx, cpu, st));
if (sum == 0)
return 0;
return sanitize_val(1.0 - sum);
}
static void print_smi_cost(int cpu, struct perf_evsel *evsel,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct runtime_stat *st)
{
double smi_num, aperf, cycles, cost = 0.0;
int ctx = evsel_context(evsel);
const char *color = NULL;
- smi_num = avg_stats(&runtime_smi_num_stats[ctx][cpu]);
- aperf = avg_stats(&runtime_aperf_stats[ctx][cpu]);
- cycles = avg_stats(&runtime_cycles_stats[ctx][cpu]);
+ smi_num = runtime_stat_avg(st, STAT_SMI_NUM, ctx, cpu);
+ aperf = runtime_stat_avg(st, STAT_APERF, ctx, cpu);
+ cycles = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
if ((cycles == 0) || (aperf == 0))
return;
@@ -632,7 +704,8 @@ static void generic_metric(const char *metric_expr,
const char *metric_name,
double avg,
int cpu,
- struct perf_stat_output_ctx *out)
+ struct perf_stat_output_ctx *out,
+ struct runtime_stat *st)
{
print_metric_t print_metric = out->print_metric;
struct parse_ctx pctx;
@@ -651,7 +724,8 @@ static void generic_metric(const char *metric_expr,
stats = &walltime_nsecs_stats;
scale = 1e-9;
} else {
- v = saved_value_lookup(metric_events[i], cpu, false);
+ v = saved_value_lookup(metric_events[i], cpu, false,
+ STAT_NONE, 0, st);
if (!v)
break;
stats = &v->stats;
@@ -679,7 +753,8 @@ static void generic_metric(const char *metric_expr,
void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
double avg, int cpu,
struct perf_stat_output_ctx *out,
- struct rblist *metric_events)
+ struct rblist *metric_events,
+ struct runtime_stat *st)
{
void *ctxp = out->ctx;
print_metric_t print_metric = out->print_metric;
@@ -690,7 +765,8 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
int num = 1;
if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
- total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
+
if (total) {
ratio = avg / total;
print_metric(ctxp, NULL, "%7.2f ",
@@ -698,8 +774,13 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
} else {
print_metric(ctxp, NULL, NULL, "insn per cycle", 0);
}
- total = avg_stats(&runtime_stalled_cycles_front_stats[ctx][cpu]);
- total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[ctx][cpu]));
+
+ total = runtime_stat_avg(st, STAT_STALLED_CYCLES_FRONT,
+ ctx, cpu);
+
+ total = max(total, runtime_stat_avg(st,
+ STAT_STALLED_CYCLES_BACK,
+ ctx, cpu));
if (total && avg) {
out->new_line(ctxp);
@@ -712,8 +793,8 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
"stalled cycles per insn", 0);
}
} else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) {
- if (runtime_branches_stats[ctx][cpu].n != 0)
- print_branch_misses(cpu, evsel, avg, out);
+ if (runtime_stat_n(st, STAT_BRANCHES, ctx, cpu) != 0)
+ print_branch_misses(cpu, evsel, avg, out, st);
else
print_metric(ctxp, NULL, NULL, "of all branches", 0);
} else if (
@@ -721,8 +802,9 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
- if (runtime_l1_dcache_stats[ctx][cpu].n != 0)
- print_l1_dcache_misses(cpu, evsel, avg, out);
+
+ if (runtime_stat_n(st, STAT_L1_DCACHE, ctx, cpu) != 0)
+ print_l1_dcache_misses(cpu, evsel, avg, out, st);
else
print_metric(ctxp, NULL, NULL, "of all L1-dcache hits", 0);
} else if (
@@ -730,8 +812,9 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1I |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
- if (runtime_l1_icache_stats[ctx][cpu].n != 0)
- print_l1_icache_misses(cpu, evsel, avg, out);
+
+ if (runtime_stat_n(st, STAT_L1_ICACHE, ctx, cpu) != 0)
+ print_l1_icache_misses(cpu, evsel, avg, out, st);
else
print_metric(ctxp, NULL, NULL, "of all L1-icache hits", 0);
} else if (
@@ -739,8 +822,9 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
evsel->attr.config == ( PERF_COUNT_HW_CACHE_DTLB |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
- if (runtime_dtlb_cache_stats[ctx][cpu].n != 0)
- print_dtlb_cache_misses(cpu, evsel, avg, out);
+
+ if (runtime_stat_n(st, STAT_DTLB_CACHE, ctx, cpu) != 0)
+ print_dtlb_cache_misses(cpu, evsel, avg, out, st);
else
print_metric(ctxp, NULL, NULL, "of all dTLB cache hits", 0);
} else if (
@@ -748,8 +832,9 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
evsel->attr.config == ( PERF_COUNT_HW_CACHE_ITLB |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
- if (runtime_itlb_cache_stats[ctx][cpu].n != 0)
- print_itlb_cache_misses(cpu, evsel, avg, out);
+
+ if (runtime_stat_n(st, STAT_ITLB_CACHE, ctx, cpu) != 0)
+ print_itlb_cache_misses(cpu, evsel, avg, out, st);
else
print_metric(ctxp, NULL, NULL, "of all iTLB cache hits", 0);
} else if (
@@ -757,27 +842,28 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
evsel->attr.config == ( PERF_COUNT_HW_CACHE_LL |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
- if (runtime_ll_cache_stats[ctx][cpu].n != 0)
- print_ll_cache_misses(cpu, evsel, avg, out);
+
+ if (runtime_stat_n(st, STAT_LL_CACHE, ctx, cpu) != 0)
+ print_ll_cache_misses(cpu, evsel, avg, out, st);
else
print_metric(ctxp, NULL, NULL, "of all LL-cache hits", 0);
} else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) {
- total = avg_stats(&runtime_cacherefs_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_CACHEREFS, ctx, cpu);
if (total)
ratio = avg * 100 / total;
- if (runtime_cacherefs_stats[ctx][cpu].n != 0)
+ if (runtime_stat_n(st, STAT_CACHEREFS, ctx, cpu) != 0)
print_metric(ctxp, NULL, "%8.3f %%",
"of all cache refs", ratio);
else
print_metric(ctxp, NULL, NULL, "of all cache refs", 0);
} else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
- print_stalled_cycles_frontend(cpu, evsel, avg, out);
+ print_stalled_cycles_frontend(cpu, evsel, avg, out, st);
} else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
- print_stalled_cycles_backend(cpu, evsel, avg, out);
+ print_stalled_cycles_backend(cpu, evsel, avg, out, st);
} else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
- total = avg_stats(&runtime_nsecs_stats[cpu]);
+ total = runtime_stat_avg(st, STAT_NSECS, 0, cpu);
if (total) {
ratio = avg / total;
@@ -786,7 +872,8 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
print_metric(ctxp, NULL, NULL, "Ghz", 0);
}
} else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX)) {
- total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
+
if (total)
print_metric(ctxp, NULL,
"%7.2f%%", "transactional cycles",
@@ -795,8 +882,9 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
print_metric(ctxp, NULL, NULL, "transactional cycles",
0);
} else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX_CP)) {
- total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
- total2 = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
+ total2 = runtime_stat_avg(st, STAT_CYCLES_IN_TX, ctx, cpu);
+
if (total2 < avg)
total2 = avg;
if (total)
@@ -805,19 +893,21 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
else
print_metric(ctxp, NULL, NULL, "aborted cycles", 0);
} else if (perf_stat_evsel__is(evsel, TRANSACTION_START)) {
- total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_CYCLES_IN_TX,
+ ctx, cpu);
if (avg)
ratio = total / avg;
- if (runtime_cycles_in_tx_stats[ctx][cpu].n != 0)
+ if (runtime_stat_n(st, STAT_CYCLES_IN_TX, ctx, cpu) != 0)
print_metric(ctxp, NULL, "%8.0f",
"cycles / transaction", ratio);
else
print_metric(ctxp, NULL, NULL, "cycles / transaction",
- 0);
+ 0);
} else if (perf_stat_evsel__is(evsel, ELISION_START)) {
- total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
+ total = runtime_stat_avg(st, STAT_CYCLES_IN_TX,
+ ctx, cpu);
if (avg)
ratio = total / avg;
@@ -831,28 +921,28 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
else
print_metric(ctxp, NULL, NULL, "CPUs utilized", 0);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_BUBBLES)) {
- double fe_bound = td_fe_bound(ctx, cpu);
+ double fe_bound = td_fe_bound(ctx, cpu, st);
if (fe_bound > 0.2)
color = PERF_COLOR_RED;
print_metric(ctxp, color, "%8.1f%%", "frontend bound",
fe_bound * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_RETIRED)) {
- double retiring = td_retiring(ctx, cpu);
+ double retiring = td_retiring(ctx, cpu, st);
if (retiring > 0.7)
color = PERF_COLOR_GREEN;
print_metric(ctxp, color, "%8.1f%%", "retiring",
retiring * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_RECOVERY_BUBBLES)) {
- double bad_spec = td_bad_spec(ctx, cpu);
+ double bad_spec = td_bad_spec(ctx, cpu, st);
if (bad_spec > 0.1)
color = PERF_COLOR_RED;
print_metric(ctxp, color, "%8.1f%%", "bad speculation",
bad_spec * 100.);
} else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_ISSUED)) {
- double be_bound = td_be_bound(ctx, cpu);
+ double be_bound = td_be_bound(ctx, cpu, st);
const char *name = "backend bound";
static int have_recovery_bubbles = -1;
@@ -865,19 +955,19 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
if (be_bound > 0.2)
color = PERF_COLOR_RED;
- if (td_total_slots(ctx, cpu) > 0)
+ if (td_total_slots(ctx, cpu, st) > 0)
print_metric(ctxp, color, "%8.1f%%", name,
be_bound * 100.);
else
print_metric(ctxp, NULL, NULL, name, 0);
} else if (evsel->metric_expr) {
generic_metric(evsel->metric_expr, evsel->metric_events, evsel->name,
- evsel->metric_name, avg, cpu, out);
- } else if (runtime_nsecs_stats[cpu].n != 0) {
+ evsel->metric_name, avg, cpu, out, st);
+ } else if (runtime_stat_n(st, STAT_NSECS, 0, cpu) != 0) {
char unit = 'M';
char unit_buf[10];
- total = avg_stats(&runtime_nsecs_stats[cpu]);
+ total = runtime_stat_avg(st, STAT_NSECS, 0, cpu);
if (total)
ratio = 1000.0 * avg / total;
@@ -888,7 +978,7 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit);
print_metric(ctxp, NULL, "%8.3f", unit_buf, ratio);
} else if (perf_stat_evsel__is(evsel, SMI_NUM)) {
- print_smi_cost(cpu, evsel, out);
+ print_smi_cost(cpu, evsel, out, st);
} else {
num = 0;
}
@@ -901,7 +991,7 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
out->new_line(ctxp);
generic_metric(mexp->metric_expr, mexp->metric_events,
evsel->name, mexp->metric_name,
- avg, cpu, out);
+ avg, cpu, out, st);
}
}
if (num == 0)
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 151e9efd7286..32235657c1ac 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -278,9 +278,16 @@ process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel
perf_evsel__compute_deltas(evsel, cpu, thread, count);
perf_counts_values__scale(count, config->scale, NULL);
if (config->aggr_mode == AGGR_NONE)
- perf_stat__update_shadow_stats(evsel, count->val, cpu);
- if (config->aggr_mode == AGGR_THREAD)
- perf_stat__update_shadow_stats(evsel, count->val, 0);
+ perf_stat__update_shadow_stats(evsel, count->val, cpu,
+ &rt_stat);
+ if (config->aggr_mode == AGGR_THREAD) {
+ if (config->stats)
+ perf_stat__update_shadow_stats(evsel,
+ count->val, 0, &config->stats[thread]);
+ else
+ perf_stat__update_shadow_stats(evsel,
+ count->val, 0, &rt_stat);
+ }
break;
case AGGR_GLOBAL:
aggr->val += count->val;
@@ -362,7 +369,7 @@ int perf_stat_process_counter(struct perf_stat_config *config,
/*
* Save the full runtime - to allow normalization during printout:
*/
- perf_stat__update_shadow_stats(counter, *count, 0);
+ perf_stat__update_shadow_stats(counter, *count, 0, &rt_stat);
return 0;
}
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index eefca5c981fd..dbc6f7134f61 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -5,6 +5,7 @@
#include <linux/types.h>
#include <stdio.h>
#include "xyarray.h"
+#include "rblist.h"
struct stats
{
@@ -43,11 +44,54 @@ enum aggr_mode {
AGGR_UNSET,
};
+enum {
+ CTX_BIT_USER = 1 << 0,
+ CTX_BIT_KERNEL = 1 << 1,
+ CTX_BIT_HV = 1 << 2,
+ CTX_BIT_HOST = 1 << 3,
+ CTX_BIT_IDLE = 1 << 4,
+ CTX_BIT_MAX = 1 << 5,
+};
+
+#define NUM_CTX CTX_BIT_MAX
+
+enum stat_type {
+ STAT_NONE = 0,
+ STAT_NSECS,
+ STAT_CYCLES,
+ STAT_STALLED_CYCLES_FRONT,
+ STAT_STALLED_CYCLES_BACK,
+ STAT_BRANCHES,
+ STAT_CACHEREFS,
+ STAT_L1_DCACHE,
+ STAT_L1_ICACHE,
+ STAT_LL_CACHE,
+ STAT_ITLB_CACHE,
+ STAT_DTLB_CACHE,
+ STAT_CYCLES_IN_TX,
+ STAT_TRANSACTION,
+ STAT_ELISION,
+ STAT_TOPDOWN_TOTAL_SLOTS,
+ STAT_TOPDOWN_SLOTS_ISSUED,
+ STAT_TOPDOWN_SLOTS_RETIRED,
+ STAT_TOPDOWN_FETCH_BUBBLES,
+ STAT_TOPDOWN_RECOVERY_BUBBLES,
+ STAT_SMI_NUM,
+ STAT_APERF,
+ STAT_MAX
+};
+
+struct runtime_stat {
+ struct rblist value_list;
+};
+
struct perf_stat_config {
enum aggr_mode aggr_mode;
bool scale;
FILE *output;
unsigned int interval;
+ struct runtime_stat *stats;
+ int stats_num;
};
void update_stats(struct stats *stats, u64 val);
@@ -67,6 +111,15 @@ static inline void init_stats(struct stats *stats)
struct perf_evsel;
struct perf_evlist;
+struct perf_aggr_thread_value {
+ struct perf_evsel *counter;
+ int id;
+ double uval;
+ u64 val;
+ u64 run;
+ u64 ena;
+};
+
bool __perf_evsel_stat__is(struct perf_evsel *evsel,
enum perf_stat_evsel_id id);
@@ -75,16 +128,20 @@ bool __perf_evsel_stat__is(struct perf_evsel *evsel,
void perf_stat_evsel_id_init(struct perf_evsel *evsel);
+extern struct runtime_stat rt_stat;
extern struct stats walltime_nsecs_stats;
typedef void (*print_metric_t)(void *ctx, const char *color, const char *unit,
const char *fmt, double val);
typedef void (*new_line_t )(void *ctx);
+void runtime_stat__init(struct runtime_stat *st);
+void runtime_stat__exit(struct runtime_stat *st);
void perf_stat__init_shadow_stats(void);
void perf_stat__reset_shadow_stats(void);
+void perf_stat__reset_shadow_per_stat(struct runtime_stat *st);
void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 count,
- int cpu);
+ int cpu, struct runtime_stat *st);
struct perf_stat_output_ctx {
void *ctx;
print_metric_t print_metric;
@@ -92,11 +149,11 @@ struct perf_stat_output_ctx {
bool force_header;
};
-struct rblist;
void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
double avg, int cpu,
struct perf_stat_output_ctx *out,
- struct rblist *metric_events);
+ struct rblist *metric_events,
+ struct runtime_stat *st);
void perf_stat__collect_metric_expr(struct perf_evlist *);
int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw);
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index aaa08ee8c717..d8bfd0c4d2cb 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -396,3 +396,49 @@ out_err_overflow:
free(expr);
return NULL;
}
+
+/* Like strpbrk(), but not break if it is right after a backslash (escaped) */
+char *strpbrk_esc(char *str, const char *stopset)
+{
+ char *ptr;
+
+ do {
+ ptr = strpbrk(str, stopset);
+ if (ptr == str ||
+ (ptr == str + 1 && *(ptr - 1) != '\\'))
+ break;
+ str = ptr + 1;
+ } while (ptr && *(ptr - 1) == '\\' && *(ptr - 2) != '\\');
+
+ return ptr;
+}
+
+/* Like strdup, but do not copy a single backslash */
+char *strdup_esc(const char *str)
+{
+ char *s, *d, *p, *ret = strdup(str);
+
+ if (!ret)
+ return NULL;
+
+ d = strchr(ret, '\\');
+ if (!d)
+ return ret;
+
+ s = d + 1;
+ do {
+ if (*s == '\0') {
+ *d = '\0';
+ break;
+ }
+ p = strchr(s + 1, '\\');
+ if (p) {
+ memmove(d, s, p - s);
+ d += p - s;
+ s = p + 1;
+ } else
+ memmove(d, s, strlen(s) + 1);
+ } while (p);
+
+ return ret;
+}
diff --git a/tools/perf/util/string2.h b/tools/perf/util/string2.h
index ee14ca5451ab..4c68a09b97e8 100644
--- a/tools/perf/util/string2.h
+++ b/tools/perf/util/string2.h
@@ -39,5 +39,7 @@ static inline char *asprintf_expr_not_in_ints(const char *var, size_t nints, int
return asprintf_expr_inout_ints(var, false, nints, ints);
}
+char *strpbrk_esc(char *str, const char *stopset);
+char *strdup_esc(const char *str);
#endif /* PERF_STRING_H */
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 1b67a8639dfe..cc065d4bfafc 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -94,6 +94,11 @@ static int prefix_underscores_count(const char *str)
return tail - str;
}
+const char * __weak arch__normalize_symbol_name(const char *name)
+{
+ return name;
+}
+
int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
{
return strcmp(namea, nameb);
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index a4f0075b4e5c..0563f33c1eb3 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -349,6 +349,7 @@ bool elf__needs_adjust_symbols(GElf_Ehdr ehdr);
void arch__sym_update(struct symbol *s, GElf_Sym *sym);
#endif
+const char *arch__normalize_symbol_name(const char *name);
#define SYMBOL_A 0
#define SYMBOL_B 1
diff --git a/tools/perf/util/syscalltbl.c b/tools/perf/util/syscalltbl.c
index 6eea7cff3d4e..303bdb84ab5a 100644
--- a/tools/perf/util/syscalltbl.c
+++ b/tools/perf/util/syscalltbl.c
@@ -26,6 +26,10 @@
#include <asm/syscalls_64.c>
const int syscalltbl_native_max_id = SYSCALLTBL_x86_64_MAX_ID;
static const char **syscalltbl_native = syscalltbl_x86_64;
+#elif defined(__s390x__)
+#include <asm/syscalls_64.c>
+const int syscalltbl_native_max_id = SYSCALLTBL_S390_64_MAX_ID;
+static const char **syscalltbl_native = syscalltbl_s390_64;
#endif
struct syscall {
diff --git a/tools/perf/util/target.h b/tools/perf/util/target.h
index 446aa7a56f25..6ef01a83b24e 100644
--- a/tools/perf/util/target.h
+++ b/tools/perf/util/target.h
@@ -64,6 +64,11 @@ static inline bool target__none(struct target *target)
return !target__has_task(target) && !target__has_cpu(target);
}
+static inline bool target__has_per_thread(struct target *target)
+{
+ return target->system_wide && target->per_thread;
+}
+
static inline bool target__uses_dummy_map(struct target *target)
{
bool use_dummy = false;
@@ -73,6 +78,8 @@ static inline bool target__uses_dummy_map(struct target *target)
else if (target__has_task(target) ||
(!target__has_cpu(target) && !target->uses_mmap))
use_dummy = true;
+ else if (target__has_per_thread(target))
+ use_dummy = true;
return use_dummy;
}
diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c
index be0d5a736dea..3e1038f6491c 100644
--- a/tools/perf/util/thread_map.c
+++ b/tools/perf/util/thread_map.c
@@ -92,7 +92,7 @@ struct thread_map *thread_map__new_by_tid(pid_t tid)
return threads;
}
-struct thread_map *thread_map__new_by_uid(uid_t uid)
+static struct thread_map *__thread_map__new_all_cpus(uid_t uid)
{
DIR *proc;
int max_threads = 32, items, i;
@@ -113,7 +113,6 @@ struct thread_map *thread_map__new_by_uid(uid_t uid)
while ((dirent = readdir(proc)) != NULL) {
char *end;
bool grow = false;
- struct stat st;
pid_t pid = strtol(dirent->d_name, &end, 10);
if (*end) /* only interested in proper numerical dirents */
@@ -121,11 +120,12 @@ struct thread_map *thread_map__new_by_uid(uid_t uid)
snprintf(path, sizeof(path), "/proc/%s", dirent->d_name);
- if (stat(path, &st) != 0)
- continue;
+ if (uid != UINT_MAX) {
+ struct stat st;
- if (st.st_uid != uid)
- continue;
+ if (stat(path, &st) != 0 || st.st_uid != uid)
+ continue;
+ }
snprintf(path, sizeof(path), "/proc/%d/task", pid);
items = scandir(path, &namelist, filter, NULL);
@@ -178,6 +178,16 @@ out_free_closedir:
goto out_closedir;
}
+struct thread_map *thread_map__new_all_cpus(void)
+{
+ return __thread_map__new_all_cpus(UINT_MAX);
+}
+
+struct thread_map *thread_map__new_by_uid(uid_t uid)
+{
+ return __thread_map__new_all_cpus(uid);
+}
+
struct thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid)
{
if (pid != -1)
@@ -313,7 +323,7 @@ out_free_threads:
}
struct thread_map *thread_map__new_str(const char *pid, const char *tid,
- uid_t uid)
+ uid_t uid, bool per_thread)
{
if (pid)
return thread_map__new_by_pid_str(pid);
@@ -321,6 +331,9 @@ struct thread_map *thread_map__new_str(const char *pid, const char *tid,
if (!tid && uid != UINT_MAX)
return thread_map__new_by_uid(uid);
+ if (per_thread)
+ return thread_map__new_all_cpus();
+
return thread_map__new_by_tid_str(tid);
}
diff --git a/tools/perf/util/thread_map.h b/tools/perf/util/thread_map.h
index f15803985435..0a806b99e73c 100644
--- a/tools/perf/util/thread_map.h
+++ b/tools/perf/util/thread_map.h
@@ -23,6 +23,7 @@ struct thread_map *thread_map__new_dummy(void);
struct thread_map *thread_map__new_by_pid(pid_t pid);
struct thread_map *thread_map__new_by_tid(pid_t tid);
struct thread_map *thread_map__new_by_uid(uid_t uid);
+struct thread_map *thread_map__new_all_cpus(void);
struct thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid);
struct thread_map *thread_map__new_event(struct thread_map_event *event);
@@ -30,7 +31,7 @@ struct thread_map *thread_map__get(struct thread_map *map);
void thread_map__put(struct thread_map *map);
struct thread_map *thread_map__new_str(const char *pid,
- const char *tid, uid_t uid);
+ const char *tid, uid_t uid, bool per_thread);
struct thread_map *thread_map__new_by_tid_str(const char *tid_str);
diff --git a/tools/perf/util/time-utils.c b/tools/perf/util/time-utils.c
index 81927d027417..6193b46050a5 100644
--- a/tools/perf/util/time-utils.c
+++ b/tools/perf/util/time-utils.c
@@ -6,6 +6,7 @@
#include <time.h>
#include <errno.h>
#include <inttypes.h>
+#include <math.h>
#include "perf.h"
#include "debug.h"
@@ -60,11 +61,10 @@ static int parse_timestr_sec_nsec(struct perf_time_interval *ptime,
return 0;
}
-int perf_time__parse_str(struct perf_time_interval *ptime, const char *ostr)
+static int split_start_end(char **start, char **end, const char *ostr, char ch)
{
char *start_str, *end_str;
char *d, *str;
- int rc = 0;
if (ostr == NULL || *ostr == '\0')
return 0;
@@ -74,25 +74,35 @@ int perf_time__parse_str(struct perf_time_interval *ptime, const char *ostr)
if (str == NULL)
return -ENOMEM;
- ptime->start = 0;
- ptime->end = 0;
-
- /* str has the format: <start>,<stop>
- * variations: <start>,
- * ,<stop>
- * ,
- */
start_str = str;
- d = strchr(start_str, ',');
+ d = strchr(start_str, ch);
if (d) {
*d = '\0';
++d;
}
end_str = d;
+ *start = start_str;
+ *end = end_str;
+
+ return 0;
+}
+
+int perf_time__parse_str(struct perf_time_interval *ptime, const char *ostr)
+{
+ char *start_str = NULL, *end_str;
+ int rc;
+
+ rc = split_start_end(&start_str, &end_str, ostr, ',');
+ if (rc || !start_str)
+ return rc;
+
+ ptime->start = 0;
+ ptime->end = 0;
+
rc = parse_timestr_sec_nsec(ptime, start_str, end_str);
- free(str);
+ free(start_str);
/* make sure end time is after start time if it was given */
if (rc == 0 && ptime->end && ptime->end < ptime->start)
@@ -104,6 +114,245 @@ int perf_time__parse_str(struct perf_time_interval *ptime, const char *ostr)
return rc;
}
+static int parse_percent(double *pcnt, char *str)
+{
+ char *c, *endptr;
+ double d;
+
+ c = strchr(str, '%');
+ if (c)
+ *c = '\0';
+ else
+ return -1;
+
+ d = strtod(str, &endptr);
+ if (endptr != str + strlen(str))
+ return -1;
+
+ *pcnt = d / 100.0;
+ return 0;
+}
+
+static int percent_slash_split(char *str, struct perf_time_interval *ptime,
+ u64 start, u64 end)
+{
+ char *p, *end_str;
+ double pcnt, start_pcnt, end_pcnt;
+ u64 total = end - start;
+ int i;
+
+ /*
+ * Example:
+ * 10%/2: select the second 10% slice and the third 10% slice
+ */
+
+ /* We can modify this string since the original one is copied */
+ p = strchr(str, '/');
+ if (!p)
+ return -1;
+
+ *p = '\0';
+ if (parse_percent(&pcnt, str) < 0)
+ return -1;
+
+ p++;
+ i = (int)strtol(p, &end_str, 10);
+ if (*end_str)
+ return -1;
+
+ if (pcnt <= 0.0)
+ return -1;
+
+ start_pcnt = pcnt * (i - 1);
+ end_pcnt = pcnt * i;
+
+ if (start_pcnt < 0.0 || start_pcnt > 1.0 ||
+ end_pcnt < 0.0 || end_pcnt > 1.0) {
+ return -1;
+ }
+
+ ptime->start = start + round(start_pcnt * total);
+ ptime->end = start + round(end_pcnt * total);
+
+ return 0;
+}
+
+static int percent_dash_split(char *str, struct perf_time_interval *ptime,
+ u64 start, u64 end)
+{
+ char *start_str = NULL, *end_str;
+ double start_pcnt, end_pcnt;
+ u64 total = end - start;
+ int ret;
+
+ /*
+ * Example: 0%-10%
+ */
+
+ ret = split_start_end(&start_str, &end_str, str, '-');
+ if (ret || !start_str)
+ return ret;
+
+ if ((parse_percent(&start_pcnt, start_str) != 0) ||
+ (parse_percent(&end_pcnt, end_str) != 0)) {
+ free(start_str);
+ return -1;
+ }
+
+ free(start_str);
+
+ if (start_pcnt < 0.0 || start_pcnt > 1.0 ||
+ end_pcnt < 0.0 || end_pcnt > 1.0 ||
+ start_pcnt > end_pcnt) {
+ return -1;
+ }
+
+ ptime->start = start + round(start_pcnt * total);
+ ptime->end = start + round(end_pcnt * total);
+
+ return 0;
+}
+
+typedef int (*time_pecent_split)(char *, struct perf_time_interval *,
+ u64 start, u64 end);
+
+static int percent_comma_split(struct perf_time_interval *ptime_buf, int num,
+ const char *ostr, u64 start, u64 end,
+ time_pecent_split func)
+{
+ char *str, *p1, *p2;
+ int len, ret, i = 0;
+
+ str = strdup(ostr);
+ if (str == NULL)
+ return -ENOMEM;
+
+ len = strlen(str);
+ p1 = str;
+
+ while (p1 < str + len) {
+ if (i >= num) {
+ free(str);
+ return -1;
+ }
+
+ p2 = strchr(p1, ',');
+ if (p2)
+ *p2 = '\0';
+
+ ret = (func)(p1, &ptime_buf[i], start, end);
+ if (ret < 0) {
+ free(str);
+ return -1;
+ }
+
+ pr_debug("start time %d: %" PRIu64 ", ", i, ptime_buf[i].start);
+ pr_debug("end time %d: %" PRIu64 "\n", i, ptime_buf[i].end);
+
+ i++;
+
+ if (p2)
+ p1 = p2 + 1;
+ else
+ break;
+ }
+
+ free(str);
+ return i;
+}
+
+static int one_percent_convert(struct perf_time_interval *ptime_buf,
+ const char *ostr, u64 start, u64 end, char *c)
+{
+ char *str;
+ int len = strlen(ostr), ret;
+
+ /*
+ * c points to '%'.
+ * '%' should be the last character
+ */
+ if (ostr + len - 1 != c)
+ return -1;
+
+ /*
+ * Construct a string like "xx%/1"
+ */
+ str = malloc(len + 3);
+ if (str == NULL)
+ return -ENOMEM;
+
+ memcpy(str, ostr, len);
+ strcpy(str + len, "/1");
+
+ ret = percent_slash_split(str, ptime_buf, start, end);
+ if (ret == 0)
+ ret = 1;
+
+ free(str);
+ return ret;
+}
+
+int perf_time__percent_parse_str(struct perf_time_interval *ptime_buf, int num,
+ const char *ostr, u64 start, u64 end)
+{
+ char *c;
+
+ /*
+ * ostr example:
+ * 10%/2,10%/3: select the second 10% slice and the third 10% slice
+ * 0%-10%,30%-40%: multiple time range
+ * 50%: just one percent
+ */
+
+ memset(ptime_buf, 0, sizeof(*ptime_buf) * num);
+
+ c = strchr(ostr, '/');
+ if (c) {
+ return percent_comma_split(ptime_buf, num, ostr, start,
+ end, percent_slash_split);
+ }
+
+ c = strchr(ostr, '-');
+ if (c) {
+ return percent_comma_split(ptime_buf, num, ostr, start,
+ end, percent_dash_split);
+ }
+
+ c = strchr(ostr, '%');
+ if (c)
+ return one_percent_convert(ptime_buf, ostr, start, end, c);
+
+ return -1;
+}
+
+struct perf_time_interval *perf_time__range_alloc(const char *ostr, int *size)
+{
+ const char *p1, *p2;
+ int i = 1;
+ struct perf_time_interval *ptime;
+
+ /*
+ * At least allocate one time range.
+ */
+ if (!ostr)
+ goto alloc;
+
+ p1 = ostr;
+ while (p1 < ostr + strlen(ostr)) {
+ p2 = strchr(p1, ',');
+ if (!p2)
+ break;
+
+ p1 = p2 + 1;
+ i++;
+ }
+
+alloc:
+ *size = i;
+ ptime = calloc(i, sizeof(*ptime));
+ return ptime;
+}
+
bool perf_time__skip_sample(struct perf_time_interval *ptime, u64 timestamp)
{
/* if time is not set don't drop sample */
@@ -119,6 +368,34 @@ bool perf_time__skip_sample(struct perf_time_interval *ptime, u64 timestamp)
return false;
}
+bool perf_time__ranges_skip_sample(struct perf_time_interval *ptime_buf,
+ int num, u64 timestamp)
+{
+ struct perf_time_interval *ptime;
+ int i;
+
+ if ((timestamp == 0) || (num == 0))
+ return false;
+
+ if (num == 1)
+ return perf_time__skip_sample(&ptime_buf[0], timestamp);
+
+ /*
+ * start/end of multiple time ranges must be valid.
+ */
+ for (i = 0; i < num; i++) {
+ ptime = &ptime_buf[i];
+
+ if (timestamp >= ptime->start &&
+ ((timestamp < ptime->end && i < num - 1) ||
+ (timestamp <= ptime->end && i == num - 1))) {
+ break;
+ }
+ }
+
+ return (i == num) ? true : false;
+}
+
int timestamp__scnprintf_usec(u64 timestamp, char *buf, size_t sz)
{
u64 sec = timestamp / NSEC_PER_SEC;
diff --git a/tools/perf/util/time-utils.h b/tools/perf/util/time-utils.h
index 15b475c50ccf..70b177d2b98c 100644
--- a/tools/perf/util/time-utils.h
+++ b/tools/perf/util/time-utils.h
@@ -13,8 +13,16 @@ int parse_nsec_time(const char *str, u64 *ptime);
int perf_time__parse_str(struct perf_time_interval *ptime, const char *ostr);
+int perf_time__percent_parse_str(struct perf_time_interval *ptime_buf, int num,
+ const char *ostr, u64 start, u64 end);
+
+struct perf_time_interval *perf_time__range_alloc(const char *ostr, int *size);
+
bool perf_time__skip_sample(struct perf_time_interval *ptime, u64 timestamp);
+bool perf_time__ranges_skip_sample(struct perf_time_interval *ptime_buf,
+ int num, u64 timestamp);
+
int timestamp__scnprintf_usec(u64 timestamp, char *buf, size_t sz);
int fetch_current_timestamp(char *buf, size_t sz);
diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h
index 2532b558099b..183c91453522 100644
--- a/tools/perf/util/tool.h
+++ b/tools/perf/util/tool.h
@@ -76,6 +76,7 @@ struct perf_tool {
bool ordered_events;
bool ordering_requires_timestamps;
bool namespace_events;
+ bool no_warn;
enum show_feature_header show_feat_hdr;
};
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index 7a42f703e858..af873044d33a 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -631,9 +631,8 @@ static unw_accessors_t accessors = {
static int _unwind__prepare_access(struct thread *thread)
{
- if (callchain_param.record_mode != CALLCHAIN_DWARF)
+ if (!dwarf_callchain_users)
return 0;
-
thread->addr_space = unw_create_addr_space(&accessors, 0);
if (!thread->addr_space) {
pr_err("unwind: Can't create unwind address space.\n");
@@ -646,17 +645,15 @@ static int _unwind__prepare_access(struct thread *thread)
static void _unwind__flush_access(struct thread *thread)
{
- if (callchain_param.record_mode != CALLCHAIN_DWARF)
+ if (!dwarf_callchain_users)
return;
-
unw_flush_cache(thread->addr_space, 0, 0);
}
static void _unwind__finish_access(struct thread *thread)
{
- if (callchain_param.record_mode != CALLCHAIN_DWARF)
+ if (!dwarf_callchain_users)
return;
-
unw_destroy_addr_space(thread->addr_space);
}
diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c
index 647a1e6b4c7b..b029a5e9ae49 100644
--- a/tools/perf/util/unwind-libunwind.c
+++ b/tools/perf/util/unwind-libunwind.c
@@ -3,7 +3,7 @@
#include "thread.h"
#include "session.h"
#include "debug.h"
-#include "arch/common.h"
+#include "env.h"
struct unwind_libunwind_ops __weak *local_unwind_libunwind_ops;
struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops;
@@ -39,7 +39,7 @@ int unwind__prepare_access(struct thread *thread, struct map *map,
if (dso_type == DSO__TYPE_UNKNOWN)
return 0;
- arch = normalize_arch(thread->mg->machine->env->arch);
+ arch = perf_env__arch(thread->mg->machine->env);
if (!strcmp(arch, "x86")) {
if (dso_type != DSO__TYPE_64BIT)
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index a789f952b3e9..443892dabedb 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -210,7 +210,7 @@ static int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64
size -= ret;
off_in += ret;
- off_out -= ret;
+ off_out += ret;
}
munmap(ptr, off_in + size);
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 01434509c2e9..9496365da3d7 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -68,4 +68,14 @@ extern bool perf_singlethreaded;
void perf_set_singlethreaded(void);
void perf_set_multithreaded(void);
+#ifndef O_CLOEXEC
+#ifdef __sparc__
+#define O_CLOEXEC 0x400000
+#elif defined(__alpha__) || defined(__hppa__)
+#define O_CLOEXEC 010000000
+#else
+#define O_CLOEXEC 02000000
+#endif
+#endif
+
#endif /* GIT_COMPAT_UTIL_H */
diff --git a/tools/power/acpi/tools/acpidump/apmain.c b/tools/power/acpi/tools/acpidump/apmain.c
index 22c3b4ee1617..be418fba9441 100644
--- a/tools/power/acpi/tools/acpidump/apmain.c
+++ b/tools/power/acpi/tools/acpidump/apmain.c
@@ -79,7 +79,7 @@ struct ap_dump_action action_table[AP_MAX_ACTIONS];
u32 current_action = 0;
#define AP_UTILITY_NAME "ACPI Binary Table Dump Utility"
-#define AP_SUPPORTED_OPTIONS "?a:bc:f:hn:o:r:svxz"
+#define AP_SUPPORTED_OPTIONS "?a:bc:f:hn:o:r:sv^xz"
/******************************************************************************
*
@@ -100,6 +100,7 @@ static void ap_display_usage(void)
ACPI_OPTION("-r <Address>", "Dump tables from specified RSDP");
ACPI_OPTION("-s", "Print table summaries only");
ACPI_OPTION("-v", "Display version information");
+ ACPI_OPTION("-vd", "Display build date and time");
ACPI_OPTION("-z", "Verbose mode");
ACPI_USAGE_TEXT("\nTable Options:\n");
@@ -231,10 +232,29 @@ static int ap_do_options(int argc, char **argv)
}
continue;
- case 'v': /* Revision/version */
+ case 'v': /* -v: (Version): signon already emitted, just exit */
- acpi_os_printf(ACPI_COMMON_SIGNON(AP_UTILITY_NAME));
- return (1);
+ switch (acpi_gbl_optarg[0]) {
+ case '^': /* -v: (Version) */
+
+ fprintf(stderr,
+ ACPI_COMMON_SIGNON(AP_UTILITY_NAME));
+ return (1);
+
+ case 'd':
+
+ fprintf(stderr,
+ ACPI_COMMON_SIGNON(AP_UTILITY_NAME));
+ printf(ACPI_COMMON_BUILD_TIME);
+ return (1);
+
+ default:
+
+ printf("Unknown option: -v%s\n",
+ acpi_gbl_optarg);
+ return (-1);
+ }
+ break;
case 'z': /* Verbose mode */
diff --git a/tools/power/cpupower/lib/cpufreq.h b/tools/power/cpupower/lib/cpufreq.h
index 3b005c39f068..60beaf5ed2ea 100644
--- a/tools/power/cpupower/lib/cpufreq.h
+++ b/tools/power/cpupower/lib/cpufreq.h
@@ -11,10 +11,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __CPUPOWER_CPUFREQ_H__
diff --git a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
index 0b24dd9d01ff..29f50d4cfea0 100755
--- a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
+++ b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
@@ -411,6 +411,16 @@ def set_trace_buffer_size():
print('IO error setting trace buffer size ')
quit()
+def free_trace_buffer():
+ """ Free the trace buffer memory """
+
+ try:
+ open('/sys/kernel/debug/tracing/buffer_size_kb'
+ , 'w').write("1")
+ except:
+ print('IO error setting trace buffer size ')
+ quit()
+
def read_trace_data(filename):
""" Read and parse trace data """
@@ -583,4 +593,9 @@ for root, dirs, files in os.walk('.'):
for f in files:
fix_ownership(f)
+clear_trace_file()
+# Free the memory
+if interval:
+ free_trace_buffer()
+
os.chdir('../../')
diff --git a/tools/testing/selftests/ptp/testptp.c b/tools/testing/selftests/ptp/testptp.c
index 5d2eae16f7ee..a5d8f0ab0da0 100644
--- a/tools/testing/selftests/ptp/testptp.c
+++ b/tools/testing/selftests/ptp/testptp.c
@@ -60,9 +60,7 @@ static int clock_adjtime(clockid_t id, struct timex *tx)
static clockid_t get_clockid(int fd)
{
#define CLOCKFD 3
-#define FD_TO_CLOCKID(fd) ((~(clockid_t) (fd) << 3) | CLOCKFD)
-
- return FD_TO_CLOCKID(fd);
+ return (((unsigned int) ~fd) << 3) | CLOCKFD;
}
static void handle_alarm(int s)
diff --git a/tools/testing/selftests/rcutorture/bin/config2frag.sh b/tools/testing/selftests/rcutorture/bin/config2frag.sh
deleted file mode 100755
index 56f51ae13d73..000000000000
--- a/tools/testing/selftests/rcutorture/bin/config2frag.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-# Usage: config2frag.sh < .config > configfrag
-#
-# Converts the "# CONFIG_XXX is not set" to "CONFIG_XXX=n" so that the
-# resulting file becomes a legitimate Kconfig fragment.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
-# Copyright (C) IBM Corporation, 2013
-#
-# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-
-LANG=C sed -e 's/^# CONFIG_\([a-zA-Z0-9_]*\) is not set$/CONFIG_\1=n/'
diff --git a/tools/testing/selftests/rcutorture/bin/configinit.sh b/tools/testing/selftests/rcutorture/bin/configinit.sh
index 51f66a7ce876..c15f270e121d 100755
--- a/tools/testing/selftests/rcutorture/bin/configinit.sh
+++ b/tools/testing/selftests/rcutorture/bin/configinit.sh
@@ -51,7 +51,7 @@ then
mkdir $builddir
fi
else
- echo Bad build directory: \"$builddir\"
+ echo Bad build directory: \"$buildloc\"
exit 2
fi
fi
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-build.sh b/tools/testing/selftests/rcutorture/bin/kvm-build.sh
index fb66d0173638..34d126734cde 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-build.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-build.sh
@@ -29,11 +29,6 @@ then
exit 1
fi
builddir=${2}
-if test -z "$builddir" -o ! -d "$builddir" -o ! -w "$builddir"
-then
- echo "kvm-build.sh :$builddir: Not a writable directory, cannot build into it"
- exit 1
-fi
T=${TMPDIR-/tmp}/test-linux.sh.$$
trap 'rm -rf $T' 0
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh
index 43f764098e50..2de92f43ee8c 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh
@@ -23,7 +23,7 @@
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
i="$1"
-if test -d $i
+if test -d "$i" -a -r "$i"
then
:
else
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
index 559e01ac86be..c2e1bb6d0cba 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
@@ -23,14 +23,14 @@
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
i="$1"
-if test -d $i
+if test -d "$i" -a -r "$i"
then
:
else
echo Unreadable results directory: $i
exit 1
fi
-. tools/testing/selftests/rcutorture/bin/functions.sh
+. functions.sh
configfile=`echo $i | sed -e 's/^.*\///'`
ngps=`grep ver: $i/console.log 2> /dev/null | tail -1 | sed -e 's/^.* ver: //' -e 's/ .*$//'`
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh
index f79b0e9e84fc..963f71289d22 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh
@@ -26,7 +26,7 @@
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
i="$1"
-. tools/testing/selftests/rcutorture/bin/functions.sh
+. functions.sh
if test "`grep -c 'rcu_exp_grace_period.*start' < $i/console.log`" -lt 100
then
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh
index 8f3121afc716..ccebf772fa1e 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh
@@ -23,7 +23,7 @@
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
i="$1"
-if test -d $i
+if test -d "$i" -a -r "$i"
then
:
else
@@ -31,7 +31,7 @@ else
exit 1
fi
PATH=`pwd`/tools/testing/selftests/rcutorture/bin:$PATH; export PATH
-. tools/testing/selftests/rcutorture/bin/functions.sh
+. functions.sh
if kvm-recheck-rcuperf-ftrace.sh $i
then
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
index f659346d3358..f7e988f369dd 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
@@ -25,7 +25,7 @@
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
PATH=`pwd`/tools/testing/selftests/rcutorture/bin:$PATH; export PATH
-. tools/testing/selftests/rcutorture/bin/functions.sh
+. functions.sh
for rd in "$@"
do
firsttime=1
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
index ab14b97c942c..1b78a12740e5 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
@@ -42,7 +42,7 @@ T=${TMPDIR-/tmp}/kvm-test-1-run.sh.$$
trap 'rm -rf $T' 0
mkdir $T
-. $KVM/bin/functions.sh
+. functions.sh
. $CONFIGFRAG/ver_functions.sh
config_template=${1}
@@ -154,9 +154,7 @@ cpu_count=`configfrag_boot_cpus "$boot_args" "$config_template" "$cpu_count"`
vcpus=`identify_qemu_vcpus`
if test $cpu_count -gt $vcpus
then
- echo CPU count limited from $cpu_count to $vcpus
- touch $resdir/Warnings
- echo CPU count limited from $cpu_count to $vcpus >> $resdir/Warnings
+ echo CPU count limited from $cpu_count to $vcpus | tee -a $resdir/Warnings
cpu_count=$vcpus
fi
qemu_args="`specify_qemu_cpus "$QEMU" "$qemu_args" "$cpu_count"`"
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
index ccd49e958fd2..7d1f607f0f76 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -1,8 +1,7 @@
#!/bin/bash
#
# Run a series of 14 tests under KVM. These are not particularly
-# well-selected or well-tuned, but are the current set. Run from the
-# top level of the source tree.
+# well-selected or well-tuned, but are the current set.
#
# Edit the definitions below to set the locations of the various directories,
# as well as the test duration.
@@ -34,6 +33,8 @@ T=${TMPDIR-/tmp}/kvm.sh.$$
trap 'rm -rf $T' 0
mkdir $T
+cd `dirname $scriptname`/../../../../../
+
dur=$((30*60))
dryrun=""
KVM="`pwd`/tools/testing/selftests/rcutorture"; export KVM
@@ -70,7 +71,7 @@ usage () {
echo " --kmake-arg kernel-make-arguments"
echo " --mac nn:nn:nn:nn:nn:nn"
echo " --no-initrd"
- echo " --qemu-args qemu-system-..."
+ echo " --qemu-args qemu-arguments"
echo " --qemu-cmd qemu-system-..."
echo " --results absolute-pathname"
echo " --torture rcu"
@@ -150,7 +151,7 @@ do
TORTURE_INITRD=""; export TORTURE_INITRD
;;
--qemu-args|--qemu-arg)
- checkarg --qemu-args "-qemu args" $# "$2" '^-' '^error'
+ checkarg --qemu-args "(qemu arguments)" $# "$2" '^-' '^error'
TORTURE_QEMU_ARG="$2"
shift
;;
@@ -238,7 +239,6 @@ BEGIN {
}
END {
- alldone = 0;
batch = 0;
nc = -1;
@@ -331,8 +331,7 @@ awk < $T/cfgcpu.pack \
# Dump out the scripting required to run one test batch.
function dump(first, pastlast, batchnum)
{
- print "echo ----Start batch " batchnum ": `date`";
- print "echo ----Start batch " batchnum ": `date` >> " rd "/log";
+ print "echo ----Start batch " batchnum ": `date` | tee -a " rd "log";
print "needqemurun="
jn=1
for (j = first; j < pastlast; j++) {
@@ -349,21 +348,18 @@ function dump(first, pastlast, batchnum)
ovf = "-ovf";
else
ovf = "";
- print "echo ", cfr[jn], cpusr[jn] ovf ": Starting build. `date`";
- print "echo ", cfr[jn], cpusr[jn] ovf ": Starting build. `date` >> " rd "/log";
+ print "echo ", cfr[jn], cpusr[jn] ovf ": Starting build. `date` | tee -a " rd "log";
print "rm -f " builddir ".*";
print "touch " builddir ".wait";
print "mkdir " builddir " > /dev/null 2>&1 || :";
print "mkdir " rd cfr[jn] " || :";
print "kvm-test-1-run.sh " CONFIGDIR cf[j], builddir, rd cfr[jn], dur " \"" TORTURE_QEMU_ARG "\" \"" TORTURE_BOOTARGS "\" > " rd cfr[jn] "/kvm-test-1-run.sh.out 2>&1 &"
- print "echo ", cfr[jn], cpusr[jn] ovf ": Waiting for build to complete. `date`";
- print "echo ", cfr[jn], cpusr[jn] ovf ": Waiting for build to complete. `date` >> " rd "/log";
+ print "echo ", cfr[jn], cpusr[jn] ovf ": Waiting for build to complete. `date` | tee -a " rd "log";
print "while test -f " builddir ".wait"
print "do"
print "\tsleep 1"
print "done"
- print "echo ", cfr[jn], cpusr[jn] ovf ": Build complete. `date`";
- print "echo ", cfr[jn], cpusr[jn] ovf ": Build complete. `date` >> " rd "/log";
+ print "echo ", cfr[jn], cpusr[jn] ovf ": Build complete. `date` | tee -a " rd "log";
jn++;
}
for (j = 1; j < jn; j++) {
@@ -371,8 +367,7 @@ function dump(first, pastlast, batchnum)
print "rm -f " builddir ".ready"
print "if test -f \"" rd cfr[j] "/builtkernel\""
print "then"
- print "\techo ----", cfr[j], cpusr[j] ovf ": Kernel present. `date`";
- print "\techo ----", cfr[j], cpusr[j] ovf ": Kernel present. `date` >> " rd "/log";
+ print "\techo ----", cfr[j], cpusr[j] ovf ": Kernel present. `date` | tee -a " rd "log";
print "\tneedqemurun=1"
print "fi"
}
@@ -386,31 +381,26 @@ function dump(first, pastlast, batchnum)
njitter = ja[1];
if (TORTURE_BUILDONLY && njitter != 0) {
njitter = 0;
- print "echo Build-only run, so suppressing jitter >> " rd "/log"
+ print "echo Build-only run, so suppressing jitter | tee -a " rd "log"
}
if (TORTURE_BUILDONLY) {
print "needqemurun="
}
print "if test -n \"$needqemurun\""
print "then"
- print "\techo ---- Starting kernels. `date`";
- print "\techo ---- Starting kernels. `date` >> " rd "/log";
+ print "\techo ---- Starting kernels. `date` | tee -a " rd "log";
for (j = 0; j < njitter; j++)
print "\tjitter.sh " j " " dur " " ja[2] " " ja[3] "&"
print "\twait"
- print "\techo ---- All kernel runs complete. `date`";
- print "\techo ---- All kernel runs complete. `date` >> " rd "/log";
+ print "\techo ---- All kernel runs complete. `date` | tee -a " rd "log";
print "else"
print "\twait"
- print "\techo ---- No kernel runs. `date`";
- print "\techo ---- No kernel runs. `date` >> " rd "/log";
+ print "\techo ---- No kernel runs. `date` | tee -a " rd "log";
print "fi"
for (j = 1; j < jn; j++) {
builddir=KVM "/b" j
- print "echo ----", cfr[j], cpusr[j] ovf ": Build/run results:";
- print "echo ----", cfr[j], cpusr[j] ovf ": Build/run results: >> " rd "/log";
- print "cat " rd cfr[j] "/kvm-test-1-run.sh.out";
- print "cat " rd cfr[j] "/kvm-test-1-run.sh.out >> " rd "/log";
+ print "echo ----", cfr[j], cpusr[j] ovf ": Build/run results: | tee -a " rd "log";
+ print "cat " rd cfr[j] "/kvm-test-1-run.sh.out | tee -a " rd "log";
}
}
diff --git a/tools/testing/selftests/rcutorture/bin/parse-torture.sh b/tools/testing/selftests/rcutorture/bin/parse-torture.sh
index f12c38909b00..5987e50cfeb4 100755
--- a/tools/testing/selftests/rcutorture/bin/parse-torture.sh
+++ b/tools/testing/selftests/rcutorture/bin/parse-torture.sh
@@ -55,7 +55,7 @@ then
exit
fi
-grep --binary-files=text 'torture:.*ver:' $file | grep --binary-files=text -v '(null)' | sed -e 's/^(initramfs)[^]]*] //' -e 's/^\[[^]]*] //' |
+grep --binary-files=text 'torture:.*ver:' $file | egrep --binary-files=text -v '\(null\)|rtc: 000000000* ' | sed -e 's/^(initramfs)[^]]*] //' -e 's/^\[[^]]*] //' |
awk '
BEGIN {
ver = 0;
diff --git a/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh
index 252aae618984..80eb646e1319 100644
--- a/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh
@@ -38,6 +38,5 @@ per_version_boot_params () {
echo $1 `locktorture_param_onoff "$1" "$2"` \
locktorture.stat_interval=15 \
locktorture.shutdown_secs=$3 \
- locktorture.torture_runnable=1 \
locktorture.verbose=1
}
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
index ffb85ed786fa..24ec91041957 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
@@ -51,7 +51,6 @@ per_version_boot_params () {
`rcutorture_param_n_barrier_cbs "$1"` \
rcutorture.stat_interval=15 \
rcutorture.shutdown_secs=$3 \
- rcutorture.torture_runnable=1 \
rcutorture.test_no_idle_hz=1 \
rcutorture.verbose=1
}
diff --git a/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh
index 34f2a1b35ee5..b9603115d7c7 100644
--- a/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh
+++ b/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh
@@ -46,7 +46,6 @@ rcuperf_param_nwriters () {
per_version_boot_params () {
echo $1 `rcuperf_param_nreaders "$1"` \
`rcuperf_param_nwriters "$1"` \
- rcuperf.perf_runnable=1 \
rcuperf.shutdown=1 \
rcuperf.verbose=1
}
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 2e43f9d42bd5..08464b2fba1d 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -53,8 +53,8 @@
__asm__(".arch_extension virt");
#endif
+DEFINE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
-static kvm_cpu_context_t __percpu *kvm_host_cpu_state;
/* Per-CPU variable containing the currently running vcpu. */
static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
@@ -354,7 +354,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
vcpu->cpu = cpu;
- vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
+ vcpu->arch.host_cpu_context = this_cpu_ptr(&kvm_host_cpu_state);
kvm_arm_set_running_vcpu(vcpu);
kvm_vgic_load(vcpu);
@@ -509,7 +509,7 @@ static void update_vttbr(struct kvm *kvm)
pgd_phys = virt_to_phys(kvm->arch.pgd);
BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
- kvm->arch.vttbr = pgd_phys | vmid;
+ kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid;
spin_unlock(&kvm_vmid_lock);
}
@@ -704,9 +704,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
*/
trace_kvm_entry(*vcpu_pc(vcpu));
guest_enter_irqoff();
+ if (has_vhe())
+ kvm_arm_vhe_guest_enter();
ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
+ if (has_vhe())
+ kvm_arm_vhe_guest_exit();
vcpu->mode = OUTSIDE_GUEST_MODE;
vcpu->stat.exits++;
/*
@@ -759,6 +763,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
guest_exit();
trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
+ /* Exit types that need handling before we can be preempted */
+ handle_exit_early(vcpu, run, ret);
+
preempt_enable();
ret = handle_exit(vcpu, run, ret);
@@ -1158,7 +1165,7 @@ static void cpu_init_hyp_mode(void *dummy)
pgd_ptr = kvm_mmu_get_httbr();
stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
hyp_stack_ptr = stack_page + PAGE_SIZE;
- vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
+ vector_ptr = (unsigned long)kvm_get_hyp_vector();
__cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
__cpu_init_stage2();
@@ -1272,19 +1279,8 @@ static inline void hyp_cpu_pm_exit(void)
}
#endif
-static void teardown_common_resources(void)
-{
- free_percpu(kvm_host_cpu_state);
-}
-
static int init_common_resources(void)
{
- kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
- if (!kvm_host_cpu_state) {
- kvm_err("Cannot allocate host CPU state\n");
- return -ENOMEM;
- }
-
/* set size of VMID supported by CPU */
kvm_vmid_bits = kvm_get_vmid_bits();
kvm_info("%d-bit VMID\n", kvm_vmid_bits);
@@ -1403,6 +1399,12 @@ static int init_hyp_mode(void)
goto out_err;
}
+ err = kvm_map_vectors();
+ if (err) {
+ kvm_err("Cannot map vectors\n");
+ goto out_err;
+ }
+
/*
* Map the Hyp stack pages
*/
@@ -1420,7 +1422,7 @@ static int init_hyp_mode(void)
for_each_possible_cpu(cpu) {
kvm_cpu_context_t *cpu_ctxt;
- cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu);
+ cpu_ctxt = per_cpu_ptr(&kvm_host_cpu_state, cpu);
err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP);
if (err) {
@@ -1544,7 +1546,6 @@ out_hyp:
if (!in_hyp_mode)
teardown_hyp_mode();
out_err:
- teardown_common_resources();
return err;
}
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 9dea96380339..f8eaf86b740a 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -621,7 +621,7 @@ static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
return 0;
}
-static int __create_hyp_mappings(pgd_t *pgdp,
+static int __create_hyp_mappings(pgd_t *pgdp, unsigned long ptrs_per_pgd,
unsigned long start, unsigned long end,
unsigned long pfn, pgprot_t prot)
{
@@ -634,7 +634,7 @@ static int __create_hyp_mappings(pgd_t *pgdp,
addr = start & PAGE_MASK;
end = PAGE_ALIGN(end);
do {
- pgd = pgdp + pgd_index(addr);
+ pgd = pgdp + ((addr >> PGDIR_SHIFT) & (ptrs_per_pgd - 1));
if (pgd_none(*pgd)) {
pud = pud_alloc_one(NULL, addr);
@@ -697,8 +697,8 @@ int create_hyp_mappings(void *from, void *to, pgprot_t prot)
int err;
phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
- err = __create_hyp_mappings(hyp_pgd, virt_addr,
- virt_addr + PAGE_SIZE,
+ err = __create_hyp_mappings(hyp_pgd, PTRS_PER_PGD,
+ virt_addr, virt_addr + PAGE_SIZE,
__phys_to_pfn(phys_addr),
prot);
if (err)
@@ -729,7 +729,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
return -EINVAL;
- return __create_hyp_mappings(hyp_pgd, start, end,
+ return __create_hyp_mappings(hyp_pgd, PTRS_PER_PGD, start, end,
__phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
}
@@ -1735,7 +1735,7 @@ static int kvm_map_idmap_text(pgd_t *pgd)
int err;
/* Create the idmap in the boot page tables */
- err = __create_hyp_mappings(pgd,
+ err = __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(),
hyp_idmap_start, hyp_idmap_end,
__phys_to_pfn(hyp_idmap_start),
PAGE_HYP_EXEC);
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index f2ac53ab8243..a334399fafec 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -188,7 +188,7 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
{
struct kvm_kernel_irqfd *irqfd =
container_of(wait, struct kvm_kernel_irqfd, wait);
- unsigned long flags = (unsigned long)key;
+ __poll_t flags = key_to_poll(key);
struct kvm_kernel_irq_routing_entry irq;
struct kvm *kvm = irqfd->kvm;
unsigned seq;
@@ -287,7 +287,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
struct fd f;
struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
int ret;
- unsigned int events;
+ __poll_t events;
int idx;
if (!kvm_arch_intc_initialized(kvm))
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 210bf820385a..d6b9370806f8 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1322,17 +1322,6 @@ unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *w
return gfn_to_hva_memslot_prot(slot, gfn, writable);
}
-static int get_user_page_nowait(unsigned long start, int write,
- struct page **page)
-{
- int flags = FOLL_NOWAIT | FOLL_HWPOISON;
-
- if (write)
- flags |= FOLL_WRITE;
-
- return get_user_pages(start, 1, flags, page, NULL);
-}
-
static inline int check_user_page_hwpoison(unsigned long addr)
{
int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
@@ -1381,7 +1370,8 @@ static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async,
static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
bool *writable, kvm_pfn_t *pfn)
{
- struct page *page[1];
+ unsigned int flags = FOLL_HWPOISON;
+ struct page *page;
int npages = 0;
might_sleep();
@@ -1389,35 +1379,26 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
if (writable)
*writable = write_fault;
- if (async) {
- down_read(&current->mm->mmap_sem);
- npages = get_user_page_nowait(addr, write_fault, page);
- up_read(&current->mm->mmap_sem);
- } else {
- unsigned int flags = FOLL_HWPOISON;
-
- if (write_fault)
- flags |= FOLL_WRITE;
+ if (write_fault)
+ flags |= FOLL_WRITE;
+ if (async)
+ flags |= FOLL_NOWAIT;
- npages = get_user_pages_unlocked(addr, 1, page, flags);
- }
+ npages = get_user_pages_unlocked(addr, 1, &page, flags);
if (npages != 1)
return npages;
/* map read fault as writable if possible */
if (unlikely(!write_fault) && writable) {
- struct page *wpage[1];
+ struct page *wpage;
- npages = __get_user_pages_fast(addr, 1, 1, wpage);
- if (npages == 1) {
+ if (__get_user_pages_fast(addr, 1, 1, &wpage) == 1) {
*writable = true;
- put_page(page[0]);
- page[0] = wpage[0];
+ put_page(page);
+ page = wpage;
}
-
- npages = 1;
}
- *pfn = page_to_pfn(page[0]);
+ *pfn = page_to_pfn(page);
return npages;
}